X-Git-Url: https://asedeno.scripts.mit.edu/gitweb/?a=blobdiff_plain;f=diffcore-delta.c;h=e670f8512558c38d9a9d6e754cfc609b042b1195;hb=847d10f56d7853cd0e139a7c2e6ad0ad2de5c464;hp=70bacff83752a6f91b6aa3089374660840e8059a;hpb=6013f17d8852c4b6365e143422e7a85f88efc492;p=git.git diff --git a/diffcore-delta.c b/diffcore-delta.c index 70bacff83..e670f8512 100644 --- a/diffcore-delta.c +++ b/diffcore-delta.c @@ -5,79 +5,220 @@ /* * Idea here is very simple. * - * We have total of (sz-N+1) N-byte overlapping sequences in buf whose - * size is sz. If the same N-byte sequence appears in both source and - * destination, we say the byte that starts that sequence is shared - * between them (i.e. copied from source to destination). + * Almost all data we are interested in are text, but sometimes we have + * to deal with binary data. So we cut them into chunks delimited by + * LF byte, or 64-byte sequence, whichever comes first, and hash them. * - * For each possible N-byte sequence, if the source buffer has more - * instances of it than the destination buffer, that means the - * difference are the number of bytes not copied from source to - * destination. If the counts are the same, everything was copied - * from source to destination. If the destination has more, - * everything was copied, and destination added more. + * For those chunks, if the source buffer has more instances of it + * than the destination buffer, that means the difference are the + * number of bytes not copied from source to destination. If the + * counts are the same, everything was copied from source to + * destination. If the destination has more, everything was copied, + * and destination added more. * * We are doing an approximation so we do not really have to waste * memory by actually storing the sequence. We just hash them into * somewhere around 2^16 hashbuckets and count the occurrences. - * - * The length of the sequence is arbitrarily set to 8 for now. */ -#define HASHBASE 65537 /* next_prime(2^16) */ +/* Wild guess at the initial hash size */ +#define INITIAL_HASH_SIZE 9 + +/* We leave more room in smaller hash but do not let it + * grow to have unused hole too much. + */ +#define INITIAL_FREE(sz_log2) ((1<<(sz_log2))*(sz_log2-3)/(sz_log2)) + +/* A prime rather carefully chosen between 2^16..2^17, so that + * HASHBASE < INITIAL_FREE(17). We want to keep the maximum hashtable + * size under the current 2<<17 maximum, which can hold this many + * different values before overflowing to hashtable of size 2<<18. + */ +#define HASHBASE 107927 + +struct spanhash { + unsigned int hashval; + unsigned int cnt; +}; +struct spanhash_top { + int alloc_log2; + int free; + struct spanhash data[FLEX_ARRAY]; +}; + +static struct spanhash_top *spanhash_rehash(struct spanhash_top *orig) +{ + struct spanhash_top *new; + int i; + int osz = 1 << orig->alloc_log2; + int sz = osz << 1; + + new = xmalloc(sizeof(*orig) + sizeof(struct spanhash) * sz); + new->alloc_log2 = orig->alloc_log2 + 1; + new->free = INITIAL_FREE(new->alloc_log2); + memset(new->data, 0, sizeof(struct spanhash) * sz); + for (i = 0; i < osz; i++) { + struct spanhash *o = &(orig->data[i]); + int bucket; + if (!o->cnt) + continue; + bucket = o->hashval & (sz - 1); + while (1) { + struct spanhash *h = &(new->data[bucket++]); + if (!h->cnt) { + h->hashval = o->hashval; + h->cnt = o->cnt; + new->free--; + break; + } + if (sz <= bucket) + bucket = 0; + } + } + free(orig); + return new; +} -static void hash_chars(unsigned char *buf, unsigned long sz, int *count) +static struct spanhash_top *add_spanhash(struct spanhash_top *top, + unsigned int hashval, int cnt) { - unsigned int accum1, accum2, i; - - /* an 8-byte shift register made of accum1 and accum2. New - * bytes come at LSB of accum2, and shifted up to accum1 - */ - for (i = accum1 = accum2 = 0; i < 7; i++, sz--) { - accum1 = (accum1 << 8) | (accum2 >> 24); - accum2 = (accum2 << 8) | *buf++; + int bucket, lim; + struct spanhash *h; + + lim = (1 << top->alloc_log2); + bucket = hashval & (lim - 1); + while (1) { + h = &(top->data[bucket++]); + if (!h->cnt) { + h->hashval = hashval; + h->cnt = cnt; + top->free--; + if (top->free < 0) + return spanhash_rehash(top); + return top; + } + if (h->hashval == hashval) { + h->cnt += cnt; + return top; + } + if (lim <= bucket) + bucket = 0; } +} + +static int spanhash_cmp(const void *a_, const void *b_) +{ + const struct spanhash *a = a_; + const struct spanhash *b = b_; + + /* A count of zero compares at the end.. */ + if (!a->cnt) + return !b->cnt ? 0 : 1; + if (!b->cnt) + return -1; + return a->hashval < b->hashval ? -1 : + a->hashval > b->hashval ? 1 : 0; +} + +static struct spanhash_top *hash_chars(struct diff_filespec *one) +{ + int i, n; + unsigned int accum1, accum2, hashval; + struct spanhash_top *hash; + unsigned char *buf = one->data; + unsigned int sz = one->size; + int is_text = !diff_filespec_is_binary(one); + + i = INITIAL_HASH_SIZE; + hash = xmalloc(sizeof(*hash) + sizeof(struct spanhash) * (1<alloc_log2 = i; + hash->free = INITIAL_FREE(i); + memset(hash->data, 0, sizeof(struct spanhash) * (1<> 24); - accum2 = (accum2 << 8) | *buf++; - /* We want something that hashes permuted byte - * sequences nicely; simpler hash like (accum1 ^ - * accum2) does not perform as well. - */ - i = (accum1 + accum2 * 0x61) % HASHBASE; - count[i]++; + unsigned int c = *buf++; + unsigned int old_1 = accum1; sz--; + + /* Ignore CR in CRLF sequence if text */ + if (is_text && c == '\r' && sz && *buf == '\n') + continue; + + accum1 = (accum1 << 7) ^ (accum2 >> 25); + accum2 = (accum2 << 7) ^ (old_1 >> 25); + accum1 += c; + if (++n < 64 && c != '\n') + continue; + hashval = (accum1 + accum2 * 0x61) % HASHBASE; + hash = add_spanhash(hash, hashval, n); + n = 0; + accum1 = accum2 = 0; } + qsort(hash->data, + 1ul << hash->alloc_log2, + sizeof(hash->data[0]), + spanhash_cmp); + return hash; } -int diffcore_count_changes(void *src, unsigned long src_size, - void *dst, unsigned long dst_size, +int diffcore_count_changes(struct diff_filespec *src, + struct diff_filespec *dst, + void **src_count_p, + void **dst_count_p, unsigned long delta_limit, unsigned long *src_copied, unsigned long *literal_added) { - int *src_count, *dst_count, i; + struct spanhash *s, *d; + struct spanhash_top *src_count, *dst_count; unsigned long sc, la; - if (src_size < 8 || dst_size < 8) - return -1; - - src_count = xcalloc(HASHBASE * 2, sizeof(int)); - dst_count = src_count + HASHBASE; - hash_chars(src, src_size, src_count); - hash_chars(dst, dst_size, dst_count); - + src_count = dst_count = NULL; + if (src_count_p) + src_count = *src_count_p; + if (!src_count) { + src_count = hash_chars(src); + if (src_count_p) + *src_count_p = src_count; + } + if (dst_count_p) + dst_count = *dst_count_p; + if (!dst_count) { + dst_count = hash_chars(dst); + if (dst_count_p) + *dst_count_p = dst_count; + } sc = la = 0; - for (i = 0; i < HASHBASE; i++) { - if (src_count[i] < dst_count[i]) { - la += dst_count[i] - src_count[i]; - sc += src_count[i]; + + s = src_count->data; + d = dst_count->data; + for (;;) { + unsigned dst_cnt, src_cnt; + if (!s->cnt) + break; /* we checked all in src */ + while (d->cnt) { + if (d->hashval >= s->hashval) + break; + d++; } - else /* i.e. if (dst_count[i] <= src_count[i]) */ - sc += dst_count[i]; + src_cnt = s->cnt; + dst_cnt = d->hashval == s->hashval ? d->cnt : 0; + if (src_cnt < dst_cnt) { + la += dst_cnt - src_cnt; + sc += src_cnt; + } + else + sc += dst_cnt; + s++; } + + if (!src_count_p) + free(src_count); + if (!dst_count_p) + free(dst_count); *src_copied = sc; *literal_added = la; - free(src_count); return 0; }