gpt4 book ai didi

org.apache.hadoop.fs.XAttr.getValue()方法的使用及代码示例

转载 作者:知者 更新时间:2024-03-19 02:54:40 29 4
gpt4 key购买 nike

本文整理了Java中org.apache.hadoop.fs.XAttr.getValue()方法的一些代码示例,展示了XAttr.getValue()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。XAttr.getValue()方法的具体详情如下:
包路径:org.apache.hadoop.fs.XAttr
类名称:XAttr
方法名:getValue

XAttr.getValue介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private static Map<String, Object> toJsonMap(final XAttr xAttr,
  final XAttrCodec encoding) throws IOException {
 if (xAttr == null) {
  return null;
 }
 final Map<String, Object> m = new TreeMap<String, Object>();
 m.put("name", XAttrHelper.getPrefixedName(xAttr));
 m.put("value", xAttr.getValue() != null ?
   XAttrCodec.encodeValue(xAttr.getValue(), encoding) : null);
 return m;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public XAttrFeature(List<XAttr> xAttrs) {
 if (xAttrs != null && !xAttrs.isEmpty()) {
  List<XAttr> toPack = new ArrayList<XAttr>();
  ImmutableList.Builder<XAttr> b = null;
  for (XAttr attr : xAttrs) {
   if (attr.getValue() == null ||
     attr.getValue().length <= PACK_THRESHOLD) {
    toPack.add(attr);
   } else {
    if (b == null) {
     b = ImmutableList.builder();
    }
    b.add(attr);
   }
  }
  this.attrs = XAttrFormat.toBytes(toPack);
  if (b != null) {
   this.xAttrs = b.build();
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

int vlen = a.getValue() == null ? 0 : a.getValue().length;
Preconditions.checkArgument(vlen < XATTR_VALUE_LEN_MAX,
  "The length of xAttr values is too long.");
out.write((byte)(vlen));
if (vlen > 0) {
 out.write(a.getValue());

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override
public byte getLocalStoragePolicyID() {
 XAttrFeature f = getXAttrFeature();
 XAttr xattr = f == null ? null : f.getXAttr(
   BlockStoragePolicySuite.getStoragePolicyXAttrPrefixedName());
 if (xattr != null) {
  return (xattr.getValue())[0];
 }
 return BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private static void appendXAttrsToXml(ContentHandler contentHandler,
  List<XAttr> xAttrs) throws SAXException {
 for (XAttr xAttr: xAttrs) {
  contentHandler.startElement("", "", "XATTR", new AttributesImpl());
  XMLUtils.addSaxString(contentHandler, "NAMESPACE",
    xAttr.getNameSpace().toString());
  XMLUtils.addSaxString(contentHandler, "NAME", xAttr.getName());
  if (xAttr.getValue() != null) {
   try {
    XMLUtils.addSaxString(contentHandler, "VALUE",
      XAttrCodec.encodeValue(xAttr.getValue(), XAttrCodec.HEX));
   } catch (IOException e) {
    throw new SAXException(e);
   }
  }
  contentHandler.endElement("", "", "XATTR");
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Verifies that the combined size of the name and value of an xattr is within
 * the configured limit. Setting a limit of zero disables this check.
 */
private static void checkXAttrSize(FSDirectory fsd, XAttr xAttr) {
 int size = DFSUtil.string2Bytes(xAttr.getName()).length;
 if (xAttr.getValue() != null) {
  size += xAttr.getValue().length;
 }
 if (size > fsd.getXattrMaxSize()) {
  throw new HadoopIllegalArgumentException(
    "The XAttr is too big. The maximum combined size of the"
    + " name and value is " + fsd.getXattrMaxSize()
    + ", but the total size is " + size);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private static ZoneEncryptionInfoProto getZoneEncryptionInfoProto(
  final INodesInPath iip) throws IOException {
 final XAttr fileXAttr = FSDirXAttrOp.unprotectedGetXAttrByPrefixedName(
   iip.getLastINode(), iip.getPathSnapshotId(),
   CRYPTO_XATTR_ENCRYPTION_ZONE);
 if (fileXAttr == null) {
  throw new IOException(
    "Could not find reencryption XAttr for file " + iip.getPath());
 }
 try {
  return ZoneEncryptionInfoProto.parseFrom(fileXAttr.getValue());
 } catch (InvalidProtocolBufferException e) {
  throw new IOException(
    "Could not parse file encryption info for " + "inode " + iip
      .getPath(), e);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private static XAttrFeatureProto.Builder buildXAttrs(XAttrFeature f) {
 XAttrFeatureProto.Builder b = XAttrFeatureProto.newBuilder();
 for (XAttr a : f.getXAttrs()) {
  XAttrCompactProto.Builder xAttrCompactBuilder = XAttrCompactProto.
    newBuilder();
  int v = XAttrFormat.toInt(a);
  xAttrCompactBuilder.setName(v);
  if (a.getValue() != null) {
   xAttrCompactBuilder.setValue(PBHelperClient.getByteString(a.getValue()));
  }
  b.addXAttrs(xAttrCompactBuilder.build());
 }
 
 return b;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr,
  boolean isRawPath)
  throws AccessControlException {
 final boolean isSuperUser = pc.isSuperUser();
 if (xAttr.getNameSpace() == XAttr.NameSpace.USER || 
   (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && isSuperUser)) {
  return;
 }
 if (xAttr.getNameSpace() == XAttr.NameSpace.RAW && isRawPath) {
  return;
 }
 if (XAttrHelper.getPrefixedName(xAttr).
   equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) {
  if (xAttr.getValue() != null) {
   throw new AccessControlException("Attempt to set a value for '" +
     SECURITY_XATTR_UNREADABLE_BY_SUPERUSER +
     "'. Values are not allowed for this xattr.");
  }
  return;
 }
 throw new AccessControlException("User doesn't have permission for xattr: "
   + XAttrHelper.getPrefixedName(xAttr));
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

HdfsProtos.PerFileEncryptionInfoProto fileProto =
   HdfsProtos.PerFileEncryptionInfoProto.parseFrom(
     fileXAttr.getValue());
 return PBHelperClient.convert(fileProto, suite, version, keyName);
} catch (InvalidProtocolBufferException e) {

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

XAttr xattr = xaf.getXAttr(XATTR_ERASURECODING_POLICY);
if (xattr != null) {
 ByteArrayInputStream bIn = new ByteArrayInputStream(xattr.getValue());
 DataInputStream dIn = new DataInputStream(bIn);
 String ecPolicyName = WritableUtils.readString(dIn);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

try {
 final HdfsProtos.ZoneEncryptionInfoProto ezProto =
   HdfsProtos.ZoneEncryptionInfoProto.parseFrom(xAttr.getValue());
 return new EncryptionZoneInt(
   inode.getId(), PBHelperClient.convert(ezProto.getSuite()),

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

HdfsProtos.ZoneEncryptionInfoProto.parseFrom(xattr.getValue());
fsd.ezManager.addEncryptionZone(inode.getId(),
  PBHelperClient.convert(ezProto.getSuite()),

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

if (xattr != null) {
 ByteArrayInputStream bins =
   new ByteArrayInputStream(xattr.getValue());
 DataInputStream din = new DataInputStream(bins);
 String ecPolicyName = WritableUtils.readString(din);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private void addEncryptionZone(INodeWithAdditionalFields inode,
  XAttrFeature xaf) {
 if (xaf == null) {
  return;
 }
 XAttr xattr = xaf.getXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE);
 if (xattr == null) {
  return;
 }
 try {
  final HdfsProtos.ZoneEncryptionInfoProto ezProto =
    HdfsProtos.ZoneEncryptionInfoProto.parseFrom(xattr.getValue());
  ezManager.unprotectedAddEncryptionZone(inode.getId(),
    PBHelperClient.convert(ezProto.getSuite()),
    PBHelperClient.convert(ezProto.getCryptoProtocolVersion()),
    ezProto.getKeyName());
  if (ezProto.hasReencryptionProto()) {
   final ReencryptionInfoProto reProto = ezProto.getReencryptionProto();
   // inodes parents may not be loaded if this is done during fsimage
   // loading so cannot set full path now. Pass in null to indicate that.
   ezManager.getReencryptionStatus()
     .updateZoneStatus(inode.getId(), null, reProto);
  }
 } catch (InvalidProtocolBufferException e) {
  NameNode.LOG.warn("Error parsing protocol buffer of " +
    "EZ XAttr " + xattr.getName() + " dir:" + inode.getFullPathName());
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

private static Map<String, Object> toJsonMap(final XAttr xAttr,
  final XAttrCodec encoding) throws IOException {
 if (xAttr == null) {
  return null;
 }
 final Map<String, Object> m = new TreeMap<String, Object>();
 m.put("name", XAttrHelper.getPrefixName(xAttr));
 m.put("value", xAttr.getValue() != null ? 
   XAttrCodec.encodeValue(xAttr.getValue(), encoding) : null);
 return m;
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

@Override
public byte getLocalStoragePolicyID() {
 XAttrFeature f = getXAttrFeature();
 ImmutableList<XAttr> xattrs = f == null ? ImmutableList.<XAttr> of() : f
   .getXAttrs();
 for (XAttr xattr : xattrs) {
  if (BlockStoragePolicySuite.isStoragePolicyXAttr(xattr)) {
   return (xattr.getValue())[0];
  }
 }
 return ID_UNSPECIFIED;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

public static XAttrProto convertXAttrProto(XAttr a) {
 XAttrProto.Builder builder = XAttrProto.newBuilder();
 builder.setNamespace(convert(a.getNameSpace()));
 if (a.getName() != null) {
  builder.setName(a.getName());
 }
 if (a.getValue() != null) {
  builder.setValue(getByteString(a.getValue()));
 }
 return builder.build();
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-client

public static XAttrProto convertXAttrProto(XAttr a) {
 XAttrProto.Builder builder = XAttrProto.newBuilder();
 builder.setNamespace(convert(a.getNameSpace()));
 if (a.getName() != null) {
  builder.setName(a.getName());
 }
 if (a.getValue() != null) {
  builder.setValue(getByteString(a.getValue()));
 }
 return builder.build();
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

public static XAttrProto convertXAttrProto(XAttr a) {
 XAttrProto.Builder builder = XAttrProto.newBuilder();
 builder.setNamespace(convert(a.getNameSpace()));
 if (a.getName() != null) {
  builder.setName(a.getName());
 }
 if (a.getValue() != null) {
  builder.setValue(getByteString(a.getValue()));
 }
 return builder.build();
}

29 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com