mirror of
https://github.com/git/git.git
synced 2026-01-16 21:59:45 +00:00
fscache: fscache takes an initial size
Update enable_fscache() to take an optional initial size parameter which is used to initialize the hashmap so that it can avoid having to rehash as additional entries are added. Add a separate disable_fscache() macro to make the code clearer and easier to read. Signed-off-by: Ben Peart <benpeart@microsoft.com>
This commit is contained in:
committed by
Johannes Schindelin
parent
d569fecba1
commit
be35dedc84
@@ -461,7 +461,7 @@ int cmd_add(int argc, const char **argv, const char *prefix)
|
||||
|
||||
die_path_inside_submodule(&the_index, &pathspec);
|
||||
|
||||
enable_fscache(1);
|
||||
enable_fscache(0);
|
||||
/* We do not really re-read the index but update the up-to-date flags */
|
||||
preload_index(&the_index, &pathspec);
|
||||
|
||||
|
||||
@@ -360,7 +360,7 @@ static int checkout_paths(const struct checkout_opts *opts,
|
||||
state.istate = &the_index;
|
||||
|
||||
enable_delayed_checkout(&state);
|
||||
enable_fscache(1);
|
||||
enable_fscache(active_nr);
|
||||
for (pos = 0; pos < active_nr; pos++) {
|
||||
struct cache_entry *ce = active_cache[pos];
|
||||
if (ce->ce_flags & CE_MATCHED) {
|
||||
@@ -375,7 +375,7 @@ static int checkout_paths(const struct checkout_opts *opts,
|
||||
pos = skip_same_name(ce, pos) - 1;
|
||||
}
|
||||
}
|
||||
enable_fscache(0);
|
||||
disable_fscache();
|
||||
errs |= finish_delayed_checkout(&state);
|
||||
|
||||
if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
|
||||
|
||||
@@ -1376,7 +1376,7 @@ int cmd_status(int argc, const char **argv, const char *prefix)
|
||||
PATHSPEC_PREFER_FULL,
|
||||
prefix, argv);
|
||||
|
||||
enable_fscache(1);
|
||||
enable_fscache(0);
|
||||
read_cache_preload(&s.pathspec);
|
||||
refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, &s.pathspec, NULL, NULL);
|
||||
|
||||
@@ -1410,7 +1410,7 @@ int cmd_status(int argc, const char **argv, const char *prefix)
|
||||
s.prefix = prefix;
|
||||
|
||||
wt_status_print(&s);
|
||||
enable_fscache(0);
|
||||
disable_fscache();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -401,7 +401,7 @@ static struct fsentry *fscache_get(struct fsentry *key)
|
||||
* Enables or disables the cache. Note that the cache is read-only, changes to
|
||||
* the working directory are NOT reflected in the cache while enabled.
|
||||
*/
|
||||
int fscache_enable(int enable)
|
||||
int fscache_enable(int enable, size_t initial_size)
|
||||
{
|
||||
int result;
|
||||
|
||||
@@ -417,7 +417,11 @@ int fscache_enable(int enable)
|
||||
InitializeCriticalSection(&mutex);
|
||||
lstat_requests = opendir_requests = 0;
|
||||
fscache_misses = fscache_requests = 0;
|
||||
hashmap_init(&map, (hashmap_cmp_fn) fsentry_cmp, NULL, 0);
|
||||
/*
|
||||
* avoid having to rehash by leaving room for the parent dirs.
|
||||
* '4' was determined empirically by testing several repos
|
||||
*/
|
||||
hashmap_init(&map, (hashmap_cmp_fn) fsentry_cmp, NULL, initial_size * 4);
|
||||
initialized = 1;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
#ifndef FSCACHE_H
|
||||
#define FSCACHE_H
|
||||
|
||||
int fscache_enable(int enable);
|
||||
#define enable_fscache(x) fscache_enable(x)
|
||||
int fscache_enable(int enable, size_t initial_size);
|
||||
#define enable_fscache(initial_size) fscache_enable(1, initial_size)
|
||||
#define disable_fscache() fscache_enable(0, 0)
|
||||
|
||||
int fscache_enabled(const char *path);
|
||||
#define is_fscache_enabled(path) fscache_enabled(path)
|
||||
|
||||
@@ -678,7 +678,7 @@ static void mark_complete_and_common_ref(struct fetch_negotiator *negotiator,
|
||||
|
||||
save_commit_buffer = 0;
|
||||
|
||||
enable_fscache(1);
|
||||
enable_fscache(0);
|
||||
for (ref = *refs; ref; ref = ref->next) {
|
||||
struct object *o;
|
||||
unsigned int flags = OBJECT_INFO_QUICK;
|
||||
@@ -708,7 +708,7 @@ static void mark_complete_and_common_ref(struct fetch_negotiator *negotiator,
|
||||
cutoff = commit->date;
|
||||
}
|
||||
}
|
||||
enable_fscache(0);
|
||||
disable_fscache();
|
||||
|
||||
oidset_clear(&loose_oid_set);
|
||||
|
||||
|
||||
@@ -1259,6 +1259,10 @@ static inline int is_missing_file_error(int errno_)
|
||||
#define enable_fscache(x) /* noop */
|
||||
#endif
|
||||
|
||||
#ifndef disable_fscache
|
||||
#define disable_fscache() /* noop */
|
||||
#endif
|
||||
|
||||
#ifndef is_fscache_enabled
|
||||
#define is_fscache_enabled(path) (0)
|
||||
#endif
|
||||
|
||||
@@ -91,7 +91,7 @@ void preload_index(struct index_state *index, const struct pathspec *pathspec)
|
||||
offset = 0;
|
||||
work = DIV_ROUND_UP(index->cache_nr, threads);
|
||||
memset(&data, 0, sizeof(data));
|
||||
enable_fscache(1);
|
||||
enable_fscache(index->cache_nr);
|
||||
for (i = 0; i < threads; i++) {
|
||||
struct thread_data *p = data+i;
|
||||
p->index = index;
|
||||
@@ -109,7 +109,7 @@ void preload_index(struct index_state *index, const struct pathspec *pathspec)
|
||||
die("unable to join threaded lstat");
|
||||
}
|
||||
trace_performance_since(start, "preload index");
|
||||
enable_fscache(0);
|
||||
disable_fscache();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -1483,7 +1483,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
|
||||
typechange_fmt = (in_porcelain ? "T\t%s\n" : "%s needs update\n");
|
||||
added_fmt = (in_porcelain ? "A\t%s\n" : "%s needs update\n");
|
||||
unmerged_fmt = (in_porcelain ? "U\t%s\n" : "%s: needs merge\n");
|
||||
enable_fscache(1);
|
||||
enable_fscache(0);
|
||||
for (i = 0; i < istate->cache_nr; i++) {
|
||||
struct cache_entry *ce, *new_entry;
|
||||
int cache_errno = 0;
|
||||
@@ -1548,7 +1548,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
|
||||
|
||||
replace_index_entry(istate, i, new_entry);
|
||||
}
|
||||
enable_fscache(0);
|
||||
disable_fscache();
|
||||
trace_performance_since(start, "refresh index");
|
||||
return has_errors;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user