gpt4 book ai didi

org.znerd.xmlenc.XMLOutputter类的使用及代码示例

转载 作者:知者 更新时间:2024-03-24 05:21:05 25 4
gpt4 key购买 nike

本文整理了Java中org.znerd.xmlenc.XMLOutputter类的一些代码示例,展示了XMLOutputter类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。XMLOutputter类的具体详情如下:
包路径:org.znerd.xmlenc.XMLOutputter
类名称:XMLOutputter

XMLOutputter介绍

暂无

代码示例

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Generate a XML block as such, <item label=key value=value/>
 */
private static void toXmlItemBlock(XMLOutputter doc, String key, String value)
  throws IOException {
 doc.startTag("item");
 doc.attribute("label", key);
 doc.attribute("value", value);
 doc.endTag();
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Override
 public Void run() throws Exception {
  final String path = ServletUtil.getDecodedPath(request, "/contentSummary");
  final PrintWriter out = response.getWriter();
  final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
  xml.declaration();
  try {
   //get content summary
   final ClientProtocol nnproxy = createNameNodeProxy();
   final ContentSummary cs = nnproxy.getContentSummary(path);
   //write xml
   xml.startTag(ContentSummary.class.getName());
   if (cs != null) {
    xml.attribute("length"        , "" + cs.getLength());
    xml.attribute("fileCount"     , "" + cs.getFileCount());
    xml.attribute("directoryCount", "" + cs.getDirectoryCount());
    xml.attribute("quota"         , "" + cs.getQuota());
    xml.attribute("spaceConsumed" , "" + cs.getSpaceConsumed());
    xml.attribute("spaceQuota"    , "" + cs.getSpaceQuota());
   }
   xml.endTag();
  } catch(IOException ioe) {
   writeXml(ioe, path, xml);
  }
  xml.endDocument();
  return null;
 }
});

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

createGeneralException(doc, clusterid,
   StringUtils.stringifyException(error));
 doc.getWriter().flush();
 return;
 doc.startTag("cluster");
 createNamenodeExceptionMsg(doc, exceptions);
 doc.endTag();
 doc.getWriter().flush();
 return;
doc.startTag("cluster");
doc.attribute("clusterId", clusterid);
doc.startTag("decommissioningReport");
countDecommissionDatanodes();
toXmlItemBlock(doc, DecommissionStates.DECOMMISSIONED.toString(),
doc.endTag(); // decommissioningReport
doc.startTag("datanodes");
Set<String> dnSet = statusMap.keySet();
for (String dnhost : dnSet) {
         .toString()) || overallStatus
     .equals(DecommissionStates.UNKNOWN.toString()))) {
  doc.startTag("node");
  doc.endTag(); // node

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Override
public Void run() throws IOException {
 ClientProtocol nn = createNameNodeProxy();
 doc.declaration();
 doc.startTag("listing");
 for (Map.Entry<String, String> m : root.entrySet()) {
  doc.attribute(m.getKey(), m.getValue());

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

/** {@inheritDoc} */
 public void doGet(HttpServletRequest request, HttpServletResponse response
   ) throws ServletException, IOException {
  final UnixUserGroupInformation ugi = getUGI(request);
  final PrintWriter out = response.getWriter();
  final String filename = getFilename(request, response);
  final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
  xml.declaration();
  final Configuration conf = new Configuration(DataNode.getDataNode().getConf());
  final int socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT);
  final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
  UnixUserGroupInformation.saveToConf(conf,
    UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
  final ClientProtocol nnproxy = DFSClient.createNamenode(conf);
  try {
   final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum(
     filename, nnproxy, socketFactory, socketTimeout);
   MD5MD5CRC32FileChecksum.write(xml, checksum);
  } catch(IOException ioe) {
   new RemoteException(ioe.getClass().getName(), ioe.getMessage()
     ).writeXml(filename, xml);
  }
  xml.endDocument();
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

throws ServletException, IOException {
final PrintWriter out = response.getWriter();
final XMLOutputter doc = new XMLOutputter(out, "UTF-8");
} finally {
 if (doc != null) {
  doc.endDocument();

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

@Override
 public Void run() throws Exception {
  final String path = ServletUtil.getDecodedPath(request, "/contentSummary");
  final PrintWriter out = response.getWriter();
  final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
  xml.declaration();
  try {
   //get content summary
   final ClientProtocol nnproxy = createNameNodeProxy();
   final ContentSummary cs = nnproxy.getContentSummary(path);
   //write xml
   xml.startTag(ContentSummary.class.getName());
   if (cs != null) {
    xml.attribute("length"        , "" + cs.getLength());
    xml.attribute("fileCount"     , "" + cs.getFileCount());
    xml.attribute("directoryCount", "" + cs.getDirectoryCount());
    xml.attribute("quota"         , "" + cs.getQuota());
    xml.attribute("spaceConsumed" , "" + cs.getSpaceConsumed());
    xml.attribute("spaceQuota"    , "" + cs.getSpaceQuota());
   }
   xml.endTag();
  } catch(IOException ioe) {
   writeXml(ioe, path, xml);
  }
  xml.endDocument();
  return null;
 }
});

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

createGeneralException(doc, clusterid,
   StringUtils.stringifyException(error));
 doc.getWriter().flush();
 return;
 doc.startTag("cluster");
 createNamenodeExceptionMsg(doc, exceptions);
 doc.endTag();
 doc.getWriter().flush();
 return;
doc.startTag("cluster");
doc.attribute("clusterId", clusterid);
doc.startTag("decommissioningReport");
countDecommissionDatanodes();
toXmlItemBlock(doc, DecommissionStates.DECOMMISSIONED.toString(),
doc.endTag(); // decommissioningReport
doc.startTag("datanodes");
Set<String> dnSet = statusMap.keySet();
for (String dnhost : dnSet) {
         .toString()) || overallStatus
     .equals(DecommissionStates.UNKNOWN.toString()))) {
  doc.startTag("node");
  doc.endTag(); // node

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

@Override
public Void run() throws IOException {
 ClientProtocol nn = createNameNodeProxy();
 doc.declaration();
 doc.startTag("listing");
 for (Map.Entry<String, String> m : root.entrySet()) {
  doc.attribute(m.getKey(), m.getValue());

代码示例来源:origin: com.facebook.hadoop/hadoop-core

/** {@inheritDoc} */
 public void doGet(HttpServletRequest request, HttpServletResponse response
   ) throws ServletException, IOException {
  final UnixUserGroupInformation ugi = getUGI(request);
  final PrintWriter out = response.getWriter();
  final String filename = getFilename(request, response);
  final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
  xml.declaration();
  Configuration daemonConf = (Configuration) getServletContext()
   .getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE);
  final Configuration conf = (daemonConf == null) ? new Configuration()
   : new Configuration(daemonConf);
  final int socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT);
  final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
  UnixUserGroupInformation.saveToConf(conf,
    UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
  final ProtocolProxy<ClientProtocol> nnproxy =
   DFSClient.createRPCNamenode(conf);
  try {
   final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum(
     DataTransferProtocol.DATA_TRANSFER_VERSION,
     filename, nnproxy.getProxy(), nnproxy, socketFactory, socketTimeout);
   MD5MD5CRC32FileChecksum.write(xml, checksum);
  } catch(IOException ioe) {
   new RemoteException(ioe.getClass().getName(), ioe.getMessage()
     ).writeXml(filename, xml);
  }
  xml.endDocument();
 }
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

throws ServletException, IOException {
final PrintWriter out = response.getWriter();
final XMLOutputter doc = new XMLOutputter(out, "UTF-8");
} finally {
 if (doc != null) {
  doc.endDocument();

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * Generate a XML block as such, <item label=key value=value/>
 */
private static void toXmlItemBlock(XMLOutputter doc, String key, String value)
  throws IOException {
 doc.startTag("item");
 doc.attribute("label", key);
 doc.attribute("value", value);
 doc.endTag();
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

final UnixUserGroupInformation ugi = getUGI(request);
final PrintWriter out = response.getWriter();
final XMLOutputter doc = new XMLOutputter(out, "UTF-8");
try {
 final Map<String, String> root = buildRoot(request, doc);
 ClientProtocol nnproxy = createNameNodeProxy(ugi);
 doc.declaration();
 doc.startTag("listing");
 for (Map.Entry<String,String> m : root.entrySet()) {
  doc.attribute(m.getKey(), m.getValue());
} finally {
 if (doc != null) {
  doc.endDocument();

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

doc.getWriter().flush();
 return;
doc.startTag("cluster");
doc.attribute("clusterId", clusterid);
doc.startTag("storage");
doc.endTag(); // storage
doc.startTag("namenodes");
 doc.startTag("node");
 toXmlItemBlockWithLink(doc, nn.host, nn.httpAddress, "NameNode");
 toXmlItemBlock(doc, "Blockpool Used",
   "Dead Datanode (Decommissioned)");
 toXmlItemBlock(doc, "Software Version", nn.softwareVersion);
 doc.endTag(); // node
doc.endTag(); // namenodes
doc.endTag(); // cluster
doc.getWriter().flush();

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Override
 public void doGet(HttpServletRequest request, HttpServletResponse response
   ) throws ServletException, IOException {
  final PrintWriter out = response.getWriter();
  final String path = ServletUtil.getDecodedPath(request, "/getFileChecksum");
  final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
  xml.declaration();
  final ServletContext context = getServletContext();
  final DataNode datanode = (DataNode) context.getAttribute("datanode");
  final Configuration conf = 
   new HdfsConfiguration(datanode.getConf());
  
  try {
   final DFSClient dfs = DatanodeJspHelper.getDFSClient(request, 
     datanode, conf, getUGI(request, conf));
   final MD5MD5CRC32FileChecksum checksum = dfs.getFileChecksum(path, Long.MAX_VALUE);
   MD5MD5CRC32FileChecksum.write(xml, checksum);
  } catch(IOException ioe) {
   writeXml(ioe, path, xml);
  } catch (InterruptedException e) {
   writeXml(e, path, xml);
  }
  xml.endDocument();
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * create XML block from general exception.
 */
private static void createGeneralException(XMLOutputter doc,
  String clusterid, String eMsg) throws IOException {
 doc.startTag("cluster");
 doc.attribute("clusterId", clusterid);
 doc.startTag("message");
 doc.startTag("item");
 doc.attribute("msg", eMsg);
 doc.endTag(); // item
 doc.endTag(); // message
 doc.endTag(); // cluster
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

final UnixUserGroupInformation ugi = getUGI(request);
final PrintWriter out = response.getWriter();
final XMLOutputter doc = new XMLOutputter(out, "UTF-8");
try {
 final Map<String, String> root = buildRoot(request, doc);
 ClientProtocol nnproxy = createNameNodeProxy(ugi);
 doc.declaration();
 doc.startTag("listing");
 for (Map.Entry<String,String> m : root.entrySet()) {
  doc.attribute(m.getKey(), m.getValue());
  doc.endDocument();

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

doc.getWriter().flush();
 return;
doc.startTag("cluster");
doc.attribute("clusterId", clusterid);
doc.startTag("storage");
doc.endTag(); // storage
doc.startTag("namenodes");
 doc.startTag("node");
 toXmlItemBlockWithLink(doc, nn.host, nn.httpAddress, "NameNode");
 toXmlItemBlock(doc, "Blockpool Used",
   "Dead Datanode (Decommissioned)");
 toXmlItemBlock(doc, "Software Version", nn.softwareVersion);
 doc.endTag(); // node
doc.endTag(); // namenodes
doc.endTag(); // cluster
doc.getWriter().flush();

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

@Override
 public void doGet(HttpServletRequest request, HttpServletResponse response
   ) throws ServletException, IOException {
  final PrintWriter out = response.getWriter();
  final String path = ServletUtil.getDecodedPath(request, "/getFileChecksum");
  final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
  xml.declaration();
  final ServletContext context = getServletContext();
  final DataNode datanode = (DataNode) context.getAttribute("datanode");
  final Configuration conf = 
   new HdfsConfiguration(datanode.getConf());
  
  try {
   final DFSClient dfs = DatanodeJspHelper.getDFSClient(request, 
     datanode, conf, getUGI(request, conf));
   final MD5MD5CRC32FileChecksum checksum = dfs.getFileChecksum(path, Long.MAX_VALUE);
   MD5MD5CRC32FileChecksum.write(xml, checksum);
  } catch(IOException ioe) {
   writeXml(ioe, path, xml);
  } catch (InterruptedException e) {
   writeXml(e, path, xml);
  }
  xml.endDocument();
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Generate a XML block as such, <item label="Node" value="hostname"
 * link="http://hostname:50070" />
 */
private static void toXmlItemBlockWithLink(XMLOutputter doc, String value,
  URL url, String label) throws IOException {
 doc.startTag("item");
 doc.attribute("label", label);
 doc.attribute("value", value);
 doc.attribute("link", url.toString());
 doc.endTag(); // item
}

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com