gpt4 book ai didi

c - 如何制作灵活大小的哈希表

转载 作者:行者123 更新时间:2023-12-01 02:46:57 26 4
gpt4 key购买 nike

我想使用这段过滤大文件的代码。目前我正在硬编码哈希表的大小,假设输入有 5000 万行。我希望总行数为哈希表大小的 37%。这是目前实现的,因为 0x8000000 的 37% 大约是 5000 万。但是,实际上在开始处理之前我并不知道输入的大小。如何修改代码以自动调整哈希表大小,使其大小合适?速度也很重要,因为过滤的目的是节省时间。

#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>

// Should be 37% occupied with 50m entries
#define TABLE_SIZE 0x8000000
#define MASK (TABLE_SIZE - 1)
#define BUFFER_SIZE 16384
#define END_OF_FILE (-1)
#define DEFAULT_VALUE (-1)

typedef struct Row {
int32_t a;
int32_t b;
int32_t t;
} Row;

int32_t hash(int32_t a) {
return a * 428916315;
}

void insert(Row * table, Row row) {
long loc = hash(row.a) & MASK; // Entries are hashed on a
long inc = 0;
while (inc <= TABLE_SIZE) {
loc = (loc + inc) & MASK;
inc++;
if (table[loc].a == DEFAULT_VALUE) {
table[loc] = row;
break;
}
}
}

int readChar(FILE * input, char * buffer, int * pos, int * limit) {
if (*limit < *pos) {
return buffer[(*limit)++];
} else {
*limit = 0;
*pos = fread(buffer, sizeof(char), BUFFER_SIZE, input);
if (*limit < *pos) {
return buffer[(*limit)++];
} else return END_OF_FILE;
}
}

void readAll(char * fileName, Row * table) {
char* buffer = (char*) malloc(sizeof(char) * BUFFER_SIZE);
int limit = 0;
int pos = 0;

FILE * input = fopen(fileName, "rb");

int lastRead;
Row currentRow;
uint32_t * currentElement = &(currentRow.a);

// As with the Scala version, we read rows with an FSM. We can
// roll up some of the code using the `currentElement` pointer
while (1) {
switch(lastRead = readChar(input, buffer, &pos, &limit)) {
case END_OF_FILE:
fclose(input);
return;
case ' ':
if (currentElement == &(currentRow.a)) currentElement = &(currentRow.b);
else currentElement = &(currentRow.t);
break;
case '\n':
insert(table, currentRow);
currentRow.a = 0;
currentRow.b = 0;
currentRow.t = 0;
currentElement = &(currentRow.a);
break;
default:
*currentElement = *currentElement * 10 + (lastRead - '0');
break;
}
}
//printf("Read %d", lastRead);
}

int main() {
Row* table = (Row*) malloc(sizeof(Row) * TABLE_SIZE);
memset(table, 255, sizeof(Row) * TABLE_SIZE);

readAll("test.file", table);

// We'll iterate through our hash table inline - passing a callback
// is trickier in C than in Scala, so we just don't bother
for (size_t i = 0; i < TABLE_SIZE; i++) {
Row * this = table + i;
if (this->a != DEFAULT_VALUE) {
// Lookup entries `that`, where `that.a == this.b`
long loc = hash(this->b) & MASK;
long inc = 0;
while (inc <= TABLE_SIZE) {
loc = (loc + inc) & MASK;
inc++;
Row * that = table + loc;
if ((this->b == that->a) && (0 <= that->t - this->t) && (that->t - this->t < 100)) {
// Conditions are symmetric, so we output both rows
printf("%d %d %d\n", this->a, this->b, this->t);
printf("%d %d %d\n", that->a, that->b, that->t);
}
else if (that->b == DEFAULT_VALUE) break;
}
}
}

free(table);
return 0;
}

最佳答案

读取文件的第一个,比如说,100 KB,计算它的换行符。从中推断以猜测您可能在整个文件中遇到的换行符总数(使用 O(1) 的总大小)。如果输入文件相当规则,这将为您提供足够接近的猜测,以便确定哈希表的大小。

关于c - 如何制作灵活大小的哈希表,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/25685708/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com