gpt4 book ai didi

org.apache.spark.sql.catalyst.expressions.XXH64类的使用及代码示例

转载 作者:知者 更新时间:2024-03-19 14:08:40 28 4
gpt4 key购买 nike

本文整理了Java中org.apache.spark.sql.catalyst.expressions.XXH64类的一些代码示例,展示了XXH64类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。XXH64类的具体详情如下:
包路径:org.apache.spark.sql.catalyst.expressions.XXH64
类名称:XXH64

XXH64介绍

[英]xxHash64. A high quality and fast 64 bit hash code by Yann Colet and Mathias Westerdahl. The class below is modelled like its Murmur3_x86_32 cousin.

This was largely based on the following (original) C and Java implementations: https://github.com/Cyan4973/xxHash/blob/master/xxhash.c https://github.com/OpenHFT/Zero-Allocation-Hashing/blob/master/src/main/java/net/openhft/hashing/XxHash_r39.java https://github.com/airlift/slice/blob/master/src/main/java/io/airlift/slice/XxHash64.java
[中]XX64。由Yann Colet和Mathias Westerdahl编写的高质量、快速的64位哈希代码。下面的类被建模为它的3_x86_32表亲。
这主要基于以下(原始)C和Java实现:https://github.com/Cyan4973/xxHash/blob/master/xxhash.c https://github.com/OpenHFT/Zero-Allocation-Hashing/blob/master/src/main/java/net/openhft/hashing/XxHash_r39.java https://github.com/airlift/slice/blob/master/src/main/java/io/airlift/slice/XxHash64.java

代码示例

代码示例来源:origin: org.apache.spark/spark-catalyst_2.11

public long hashInt(int input) {
 return hashInt(input, seed);
}

代码示例来源:origin: org.apache.spark/spark-catalyst_2.11

public long hashLong(long input) {
 return hashLong(input, seed);
}

代码示例来源:origin: org.apache.spark/spark-catalyst_2.11

public long hashUnsafeBytes(Object base, long offset, int length) {
 return hashUnsafeBytes(base, offset, length, seed);
}

代码示例来源:origin: org.apache.spark/spark-catalyst_2.11

public static long hashUnsafeWords(Object base, long offset, int length, long seed) {
 assert (length % 8 == 0) : "lengthInBytes must be a multiple of 8 (word-aligned)";
 long hash = hashBytesByWords(base, offset, length, seed);
 return fmix(hash);
}

代码示例来源:origin: org.apache.spark/spark-catalyst_2.10

@Test
public void randomizedStressTest() {
 int size = 65536;
 Random rand = new Random();
 // A set used to track collision rate.
 Set<Long> hashcodes = new HashSet<>();
 for (int i = 0; i < size; i++) {
  int vint = rand.nextInt();
  long lint = rand.nextLong();
  Assert.assertEquals(hasher.hashInt(vint), hasher.hashInt(vint));
  Assert.assertEquals(hasher.hashLong(lint), hasher.hashLong(lint));
  hashcodes.add(hasher.hashLong(lint));
 }
 // A very loose bound.
 Assert.assertTrue(hashcodes.size() > size * 0.95d);
}

代码示例来源:origin: org.apache.spark/spark-catalyst_2.11

public long hashUnsafeWords(Object base, long offset, int length) {
 return hashUnsafeWords(base, offset, length, seed);
}

代码示例来源:origin: org.apache.spark/spark-catalyst_2.11

public static long hashInt(int input, long seed) {
 long hash = seed + PRIME64_5 + 4L;
 hash ^= (input & 0xFFFFFFFFL) * PRIME64_1;
 hash = Long.rotateLeft(hash, 23) * PRIME64_2 + PRIME64_3;
 return fmix(hash);
}

代码示例来源:origin: org.apache.spark/spark-catalyst

@Test
public void randomizedStressTest() {
 int size = 65536;
 Random rand = new Random();
 // A set used to track collision rate.
 Set<Long> hashcodes = new HashSet<>();
 for (int i = 0; i < size; i++) {
  int vint = rand.nextInt();
  long lint = rand.nextLong();
  Assert.assertEquals(hasher.hashInt(vint), hasher.hashInt(vint));
  Assert.assertEquals(hasher.hashLong(lint), hasher.hashLong(lint));
  hashcodes.add(hasher.hashLong(lint));
 }
 // A very loose bound.
 Assert.assertTrue(hashcodes.size() > size * 0.95d);
}

代码示例来源:origin: org.apache.spark/spark-catalyst_2.10

public long hashUnsafeWords(Object base, long offset, int length) {
 return hashUnsafeWords(base, offset, length, seed);
}

代码示例来源:origin: org.apache.spark/spark-catalyst

public static long hashUnsafeWords(Object base, long offset, int length, long seed) {
 assert (length % 8 == 0) : "lengthInBytes must be a multiple of 8 (word-aligned)";
 long hash = hashBytesByWords(base, offset, length, seed);
 return fmix(hash);
}

代码示例来源:origin: org.apache.spark/spark-catalyst_2.10

public static long hashInt(int input, long seed) {
 long hash = seed + PRIME64_5 + 4L;
 hash ^= (input & 0xFFFFFFFFL) * PRIME64_1;
 hash = Long.rotateLeft(hash, 23) * PRIME64_2 + PRIME64_3;
 return fmix(hash);
}

代码示例来源:origin: org.apache.spark/spark-catalyst_2.11

@Test
public void randomizedStressTest() {
 int size = 65536;
 Random rand = new Random();
 // A set used to track collision rate.
 Set<Long> hashcodes = new HashSet<>();
 for (int i = 0; i < size; i++) {
  int vint = rand.nextInt();
  long lint = rand.nextLong();
  Assert.assertEquals(hasher.hashInt(vint), hasher.hashInt(vint));
  Assert.assertEquals(hasher.hashLong(lint), hasher.hashLong(lint));
  hashcodes.add(hasher.hashLong(lint));
 }
 // A very loose bound.
 Assert.assertTrue(hashcodes.size() > size * 0.95d);
}

代码示例来源:origin: org.apache.spark/spark-catalyst

public long hashUnsafeWords(Object base, long offset, int length) {
 return hashUnsafeWords(base, offset, length, seed);
}

代码示例来源:origin: org.apache.spark/spark-catalyst_2.10

public long hashUnsafeBytes(Object base, long offset, int length) {
 return hashUnsafeBytes(base, offset, length, seed);
}

代码示例来源:origin: org.apache.spark/spark-catalyst_2.10

public long hashInt(int input) {
 return hashInt(input, seed);
}

代码示例来源:origin: org.apache.spark/spark-catalyst_2.10

public long hashLong(long input) {
 return hashLong(input, seed);
}

代码示例来源:origin: org.apache.spark/spark-catalyst_2.10

public static long hashUnsafeWords(Object base, long offset, int length, long seed) {
 assert (length % 8 == 0) : "lengthInBytes must be a multiple of 8 (word-aligned)";
 long hash = hashBytesByWords(base, offset, length, seed);
 return fmix(hash);
}

代码示例来源:origin: org.apache.spark/spark-catalyst

public static long hashInt(int input, long seed) {
 long hash = seed + PRIME64_5 + 4L;
 hash ^= (input & 0xFFFFFFFFL) * PRIME64_1;
 hash = Long.rotateLeft(hash, 23) * PRIME64_2 + PRIME64_3;
 return fmix(hash);
}

代码示例来源:origin: org.apache.spark/spark-catalyst_2.11

@Test
public void randomizedStressTestBytes() {
 int size = 65536;
 Random rand = new Random();
 // A set used to track collision rate.
 Set<Long> hashcodes = new HashSet<>();
 for (int i = 0; i < size; i++) {
  int byteArrSize = rand.nextInt(100) * 8;
  byte[] bytes = new byte[byteArrSize];
  rand.nextBytes(bytes);
  Assert.assertEquals(
      hasher.hashUnsafeWords(bytes, Platform.BYTE_ARRAY_OFFSET, byteArrSize),
      hasher.hashUnsafeWords(bytes, Platform.BYTE_ARRAY_OFFSET, byteArrSize));
  hashcodes.add(hasher.hashUnsafeWords(
      bytes, Platform.BYTE_ARRAY_OFFSET, byteArrSize));
 }
 // A very loose bound.
 Assert.assertTrue(hashcodes.size() > size * 0.95d);
}

代码示例来源:origin: org.apache.spark/spark-catalyst

public long hashUnsafeBytes(Object base, long offset, int length) {
 return hashUnsafeBytes(base, offset, length, seed);
}

28 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com