Merge branch 'ps/object-read-stream'

The "git_istream" abstraction has been revamped to make it easier
to interface with pluggable object database design.

* ps/object-read-stream:
  streaming: drop redundant type and size pointers
  streaming: move into object database subsystem
  streaming: refactor interface to be object-database-centric
  streaming: move logic to read packed objects streams into backend
  streaming: move logic to read loose objects streams into backend
  streaming: make the `odb_read_stream` definition public
  streaming: get rid of `the_repository`
  streaming: rely on object sources to create object stream
  packfile: introduce function to read object info from a store
  streaming: move zlib stream into backends
  streaming: create structure for filtered object streams
  streaming: create structure for packed object streams
  streaming: create structure for loose object streams
  streaming: create structure for in-core object streams
  streaming: allocate stream inside the backend-specific logic
  streaming: explicitly pass packfile info when streaming a packed object
  streaming: propagate final object type via the stream
  streaming: drop the `open()` callback function
  streaming: rename `git_istream` into `odb_read_stream`
This commit is contained in:
Junio C Hamano
2025-12-16 11:08:34 +09:00
20 changed files with 779 additions and 729 deletions

View File

@@ -20,6 +20,7 @@
#include "tree.h"
#include "object-file.h"
#include "odb.h"
#include "odb/streaming.h"
#include "midx.h"
#include "commit-graph.h"
#include "pack-revindex.h"
@@ -885,22 +886,6 @@ struct packed_git *packfile_store_load_pack(struct packfile_store *store,
return p;
}
int packfile_store_freshen_object(struct packfile_store *store,
const struct object_id *oid)
{
struct pack_entry e;
if (!find_pack_entry(store->odb->repo, oid, &e))
return 0;
if (e.p->is_cruft)
return 0;
if (e.p->freshened)
return 1;
if (utime(e.p->pack_name, NULL))
return 0;
e.p->freshened = 1;
return 1;
}
void (*report_garbage)(unsigned seen_bits, const char *path);
static void report_helper(const struct string_list *list,
@@ -2105,7 +2090,9 @@ static int fill_pack_entry(const struct object_id *oid,
return 1;
}
int find_pack_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e)
static int find_pack_entry(struct repository *r,
const struct object_id *oid,
struct pack_entry *e)
{
struct packfile_list_entry *l;
@@ -2130,6 +2117,57 @@ int find_pack_entry(struct repository *r, const struct object_id *oid, struct pa
return 0;
}
int packfile_store_freshen_object(struct packfile_store *store,
const struct object_id *oid)
{
struct pack_entry e;
if (!find_pack_entry(store->odb->repo, oid, &e))
return 0;
if (e.p->is_cruft)
return 0;
if (e.p->freshened)
return 1;
if (utime(e.p->pack_name, NULL))
return 0;
e.p->freshened = 1;
return 1;
}
int packfile_store_read_object_info(struct packfile_store *store,
const struct object_id *oid,
struct object_info *oi,
unsigned flags UNUSED)
{
static struct object_info blank_oi = OBJECT_INFO_INIT;
struct pack_entry e;
int rtype;
if (!find_pack_entry(store->odb->repo, oid, &e))
return 1;
/*
* We know that the caller doesn't actually need the
* information below, so return early.
*/
if (oi == &blank_oi)
return 0;
rtype = packed_object_info(store->odb->repo, e.p, e.offset, oi);
if (rtype < 0) {
mark_bad_packed_object(e.p, oid);
return -1;
}
if (oi->whence == OI_PACKED) {
oi->u.packed.offset = e.offset;
oi->u.packed.pack = e.p;
oi->u.packed.is_delta = (rtype == OBJ_REF_DELTA ||
rtype == OBJ_OFS_DELTA);
}
return 0;
}
static void maybe_invalidate_kept_pack_cache(struct repository *r,
unsigned flags)
{
@@ -2400,3 +2438,130 @@ void packfile_store_close(struct packfile_store *store)
close_pack(e->pack);
}
}
struct odb_packed_read_stream {
struct odb_read_stream base;
struct packed_git *pack;
git_zstream z;
enum {
ODB_PACKED_READ_STREAM_UNINITIALIZED,
ODB_PACKED_READ_STREAM_INUSE,
ODB_PACKED_READ_STREAM_DONE,
ODB_PACKED_READ_STREAM_ERROR,
} z_state;
off_t pos;
};
static ssize_t read_istream_pack_non_delta(struct odb_read_stream *_st, char *buf,
size_t sz)
{
struct odb_packed_read_stream *st = (struct odb_packed_read_stream *)_st;
size_t total_read = 0;
switch (st->z_state) {
case ODB_PACKED_READ_STREAM_UNINITIALIZED:
memset(&st->z, 0, sizeof(st->z));
git_inflate_init(&st->z);
st->z_state = ODB_PACKED_READ_STREAM_INUSE;
break;
case ODB_PACKED_READ_STREAM_DONE:
return 0;
case ODB_PACKED_READ_STREAM_ERROR:
return -1;
case ODB_PACKED_READ_STREAM_INUSE:
break;
}
while (total_read < sz) {
int status;
struct pack_window *window = NULL;
unsigned char *mapped;
mapped = use_pack(st->pack, &window,
st->pos, &st->z.avail_in);
st->z.next_out = (unsigned char *)buf + total_read;
st->z.avail_out = sz - total_read;
st->z.next_in = mapped;
status = git_inflate(&st->z, Z_FINISH);
st->pos += st->z.next_in - mapped;
total_read = st->z.next_out - (unsigned char *)buf;
unuse_pack(&window);
if (status == Z_STREAM_END) {
git_inflate_end(&st->z);
st->z_state = ODB_PACKED_READ_STREAM_DONE;
break;
}
/*
* Unlike the loose object case, we do not have to worry here
* about running out of input bytes and spinning infinitely. If
* we get Z_BUF_ERROR due to too few input bytes, then we'll
* replenish them in the next use_pack() call when we loop. If
* we truly hit the end of the pack (i.e., because it's corrupt
* or truncated), then use_pack() catches that and will die().
*/
if (status != Z_OK && status != Z_BUF_ERROR) {
git_inflate_end(&st->z);
st->z_state = ODB_PACKED_READ_STREAM_ERROR;
return -1;
}
}
return total_read;
}
static int close_istream_pack_non_delta(struct odb_read_stream *_st)
{
struct odb_packed_read_stream *st = (struct odb_packed_read_stream *)_st;
if (st->z_state == ODB_PACKED_READ_STREAM_INUSE)
git_inflate_end(&st->z);
return 0;
}
int packfile_store_read_object_stream(struct odb_read_stream **out,
struct packfile_store *store,
const struct object_id *oid)
{
struct odb_packed_read_stream *stream;
struct pack_window *window = NULL;
struct object_info oi = OBJECT_INFO_INIT;
enum object_type in_pack_type;
unsigned long size;
oi.sizep = &size;
if (packfile_store_read_object_info(store, oid, &oi, 0) ||
oi.u.packed.is_delta ||
repo_settings_get_big_file_threshold(store->odb->repo) >= size)
return -1;
in_pack_type = unpack_object_header(oi.u.packed.pack,
&window,
&oi.u.packed.offset,
&size);
unuse_pack(&window);
switch (in_pack_type) {
default:
return -1; /* we do not do deltas for now */
case OBJ_COMMIT:
case OBJ_TREE:
case OBJ_BLOB:
case OBJ_TAG:
break;
}
CALLOC_ARRAY(stream, 1);
stream->base.close = close_istream_pack_non_delta;
stream->base.read = read_istream_pack_non_delta;
stream->base.type = in_pack_type;
stream->base.size = size;
stream->z_state = ODB_PACKED_READ_STREAM_UNINITIALIZED;
stream->pack = oi.u.packed.pack;
stream->pos = oi.u.packed.offset;
*out = &stream->base;
return 0;
}