mirror of
https://github.com/git/git.git
synced 2026-04-01 12:30:09 +02:00
Merge commit 'v1.7.0.2' into work/1.7.0.2
Conflicts: builtin-grep.c Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
This commit is contained in:
35
Documentation/RelNotes-1.7.0.1.txt
Normal file
35
Documentation/RelNotes-1.7.0.1.txt
Normal file
@@ -0,0 +1,35 @@
|
||||
Git v1.7.0.1 Release Notes
|
||||
==========================
|
||||
|
||||
Fixes since v1.7.0
|
||||
------------------
|
||||
|
||||
* In a freshly created repository "rev-parse HEAD^0" complained that
|
||||
it is dangling symref, even though "rev-parse HEAD" didn't.
|
||||
|
||||
* "git show :no-such-name" tried to access the index without bounds
|
||||
check, leading to a potential segfault.
|
||||
|
||||
* Message from "git cherry-pick" was harder to read and use than necessary
|
||||
when it stopped due to conflicting changes.
|
||||
|
||||
* We referred to ".git/refs/" throughout the documentation when we
|
||||
meant to talk about abstract notion of "ref namespace". Because
|
||||
people's repositories often have packed refs these days, this was
|
||||
confusing.
|
||||
|
||||
* "git diff --output=/path/that/cannot/be/written" did not correctly
|
||||
error out.
|
||||
|
||||
* "git grep -e -pattern-that-begin-with-dash paths..." could not be
|
||||
spelled as "git grep -- -pattern-that-begin-with-dash paths..." which
|
||||
would be a GNU way to use "--" as "end of options".
|
||||
|
||||
* "git grep" compiled with threading support tried to access an
|
||||
uninitialized mutex on boxes with a single CPU.
|
||||
|
||||
* "git stash pop -q --index" failed because the unnecessary --index
|
||||
option was propagated to "git stash drop" that is internally run at the
|
||||
end.
|
||||
|
||||
And other minor fixes and documentation updates.
|
||||
40
Documentation/RelNotes-1.7.0.2.txt
Normal file
40
Documentation/RelNotes-1.7.0.2.txt
Normal file
@@ -0,0 +1,40 @@
|
||||
Git v1.7.0.2 Release Notes
|
||||
==========================
|
||||
|
||||
Fixes since v1.7.0.1
|
||||
--------------------
|
||||
|
||||
* GIT_PAGER was not honored consistently by some scripted Porcelains, most
|
||||
notably "git am".
|
||||
|
||||
* updating working tree files after telling git to add them to the
|
||||
index and while it is still working created garbage object files in
|
||||
the repository without diagnosing it as an error.
|
||||
|
||||
* "git bisect -- pathspec..." did not diagnose an error condition properly when
|
||||
the simplification with given pathspec made the history empty.
|
||||
|
||||
* "git rev-list --cherry-pick A...B" now has an obvious optimization when the
|
||||
histories haven't diverged (i.e. when one end is an ancestor of the other).
|
||||
|
||||
* "git diff --quiet -w" did not work as expected.
|
||||
|
||||
* "git fast-import" didn't work with a large input, as it lacked support
|
||||
for producing the pack index in v2 format.
|
||||
|
||||
* "git imap-send" didn't use CRLF line endings over the imap protocol
|
||||
when storing its payload to the draft box, violating RFC 3501.
|
||||
|
||||
* "git log --format='%w(x,y,z)%b'" and friends that rewrap message
|
||||
has been optimized for utf-8 payload.
|
||||
|
||||
* Error messages generated on the receiving end did not come back to "git
|
||||
push".
|
||||
|
||||
* "git status" in 1.7.0 lacked the optimization we used to have in 1.6.X series
|
||||
to speed up scanning of large working tree.
|
||||
|
||||
* "gitweb" did not diagnose parsing errors properly while reading tis configuration
|
||||
file.
|
||||
|
||||
And other minor fixes and documentation updates.
|
||||
@@ -686,9 +686,7 @@ color.grep::
|
||||
|
||||
color.grep.match::
|
||||
Use customized color for matches. The value of this variable
|
||||
may be specified as in color.branch.<slot>. It is passed using
|
||||
the environment variables 'GREP_COLOR' and 'GREP_COLORS' when
|
||||
calling an external 'grep'.
|
||||
may be specified as in color.branch.<slot>.
|
||||
|
||||
color.interactive::
|
||||
When set to `always`, always use colors for interactive prompts
|
||||
|
||||
@@ -44,7 +44,7 @@ OPTIONS
|
||||
Remove everything in body before a scissors line (see
|
||||
linkgit:git-mailinfo[1]).
|
||||
|
||||
---no-scissors::
|
||||
--no-scissors::
|
||||
Ignore scissors lines (see linkgit:git-mailinfo[1]).
|
||||
|
||||
-q::
|
||||
|
||||
@@ -19,8 +19,9 @@ status if it is not.
|
||||
|
||||
A reference is used in git to specify branches and tags. A
|
||||
branch head is stored under the `$GIT_DIR/refs/heads` directory, and
|
||||
a tag is stored under the `$GIT_DIR/refs/tags` directory. git
|
||||
imposes the following rules on how references are named:
|
||||
a tag is stored under the `$GIT_DIR/refs/tags` directory (or, if refs
|
||||
are packed by `git gc`, as entries in the `$GIT_DIR/packed-refs` file).
|
||||
git imposes the following rules on how references are named:
|
||||
|
||||
. They can include slash `/` for hierarchical (directory)
|
||||
grouping, but no slash-separated component can begin with a
|
||||
|
||||
@@ -29,7 +29,7 @@ arguments will in addition merge the remote master branch into the
|
||||
current master branch, if any.
|
||||
|
||||
This default configuration is achieved by creating references to
|
||||
the remote branch heads under `$GIT_DIR/refs/remotes/origin` and
|
||||
the remote branch heads under `refs/remotes/origin` and
|
||||
by initializing `remote.origin.url` and `remote.origin.fetch`
|
||||
configuration variables.
|
||||
|
||||
|
||||
@@ -197,13 +197,13 @@ FROM UPSTREAM REBASE" section in linkgit:git-rebase[1].)
|
||||
Show untracked files (Default: 'all').
|
||||
+
|
||||
The mode parameter is optional, and is used to specify
|
||||
the handling of untracked files. The possible options are:
|
||||
the handling of untracked files.
|
||||
+
|
||||
The possible options are:
|
||||
+
|
||||
--
|
||||
- 'no' - Show no untracked files
|
||||
- 'normal' - Shows untracked files and directories
|
||||
- 'all' - Also shows individual files in untracked directories.
|
||||
--
|
||||
+
|
||||
See linkgit:git-config[1] for configuration variable
|
||||
used to change the default for when the option is not
|
||||
|
||||
@@ -45,10 +45,7 @@ OPTIONS
|
||||
|
||||
--max-pack-size=<n>::
|
||||
Maximum size of each output packfile.
|
||||
The default is 4 GiB as that is the maximum allowed
|
||||
packfile size (due to file format limitations). Some
|
||||
importers may wish to lower this, such as to ensure the
|
||||
resulting packfiles fit on CDs.
|
||||
The default is unlimited.
|
||||
|
||||
--big-file-threshold=<n>::
|
||||
Maximum size of a blob that fast-import will attempt to
|
||||
|
||||
@@ -18,7 +18,7 @@ higher level wrapper of this command, instead.
|
||||
Invokes 'git-upload-pack' on a possibly remote repository
|
||||
and asks it to send objects missing from this repository, to
|
||||
update the named heads. The list of commits available locally
|
||||
is found out by scanning local $GIT_DIR/refs/ and sent to
|
||||
is found out by scanning the local refs/ hierarchy and sent to
|
||||
'git-upload-pack' running on the other end.
|
||||
|
||||
This command degenerates to download everything to complete the
|
||||
@@ -44,8 +44,8 @@ OPTIONS
|
||||
locked against repacking.
|
||||
|
||||
--thin::
|
||||
Spend extra cycles to minimize the number of objects to be sent.
|
||||
Use it on slower connection.
|
||||
Fetch a "thin" pack, which records objects in deltified form based
|
||||
on objects not included in the pack to reduce network traffic.
|
||||
|
||||
--include-tag::
|
||||
If the remote side supports it, annotated tags objects will
|
||||
|
||||
@@ -22,12 +22,12 @@ SYNOPSIS
|
||||
[-A <post-context>] [-B <pre-context>] [-C <context>]
|
||||
[-f <file>] [-e] <pattern>
|
||||
[--and|--or|--not|(|)|-e <pattern>...] [<tree>...]
|
||||
[--] [<path>...]
|
||||
[--] [<pathspec>...]
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
Look for specified patterns in the working tree files, blobs
|
||||
registered in the index file, or given tree objects.
|
||||
Look for specified patterns in the tracked files in the work tree, blobs
|
||||
registered in the index file, or blobs in given tree objects.
|
||||
|
||||
|
||||
OPTIONS
|
||||
@@ -49,7 +49,7 @@ OPTIONS
|
||||
Don't match the pattern in binary files.
|
||||
|
||||
--max-depth <depth>::
|
||||
For each pathspec given on command line, descend at most <depth>
|
||||
For each <pathspec> given on command line, descend at most <depth>
|
||||
levels of directories. A negative value means no limit.
|
||||
|
||||
-w::
|
||||
@@ -168,12 +168,19 @@ OPTIONS
|
||||
|
||||
\--::
|
||||
Signals the end of options; the rest of the parameters
|
||||
are <path> limiters.
|
||||
are <pathspec> limiters.
|
||||
|
||||
<pathspec>...::
|
||||
If given, limit the search to paths matching at least one pattern.
|
||||
Both leading paths match and glob(7) patterns are supported.
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
git grep 'time_t' -- '*.[ch]'::
|
||||
Looks for `time_t` in all tracked .c and .h files in the working
|
||||
directory and its subdirectories.
|
||||
|
||||
git grep -e \'#define\' --and \( -e MAX_PATH -e PATH_MAX \)::
|
||||
Looks for a line that has `#define` and either `MAX_PATH` or
|
||||
`PATH_MAX`.
|
||||
|
||||
@@ -46,14 +46,10 @@ OPTIONS
|
||||
'git repack'.
|
||||
|
||||
--fix-thin::
|
||||
It is possible for 'git pack-objects' to build
|
||||
"thin" pack, which records objects in deltified form based on
|
||||
objects not included in the pack to reduce network traffic.
|
||||
Those objects are expected to be present on the receiving end
|
||||
and they must be included in the pack for that pack to be self
|
||||
contained and indexable. Without this option any attempt to
|
||||
index a thin pack will fail. This option only makes sense in
|
||||
conjunction with --stdin.
|
||||
Fix a "thin" pack produced by `git pack-objects --thin` (see
|
||||
linkgit:git-pack-objects[1] for details) by adding the
|
||||
excluded objects the deltified objects are based on to the
|
||||
pack. This option only makes sense in conjunction with --stdin.
|
||||
|
||||
--keep::
|
||||
Before moving the index into its final destination
|
||||
|
||||
@@ -21,16 +21,21 @@ DESCRIPTION
|
||||
Reads list of objects from the standard input, and writes a packed
|
||||
archive with specified base-name, or to the standard output.
|
||||
|
||||
A packed archive is an efficient way to transfer set of objects
|
||||
between two repositories, and also is an archival format which
|
||||
is efficient to access. The packed archive format (.pack) is
|
||||
designed to be self contained so that it can be unpacked without
|
||||
any further information, but for fast, random access to the objects
|
||||
in the pack, a pack index file (.idx) will be generated.
|
||||
A packed archive is an efficient way to transfer a set of objects
|
||||
between two repositories as well as an access efficient archival
|
||||
format. In a packed archive, an object is either stored as a
|
||||
compressed whole or as a difference from some other object.
|
||||
The latter is often called a delta.
|
||||
|
||||
Placing both in the pack/ subdirectory of $GIT_OBJECT_DIRECTORY (or
|
||||
The packed archive format (.pack) is designed to be self-contained
|
||||
so that it can be unpacked without any further information. Therefore,
|
||||
each object that a delta depends upon must be present within the pack.
|
||||
|
||||
A pack index file (.idx) is generated for fast, random access to the
|
||||
objects in the pack. Placing both the index file (.idx) and the packed
|
||||
archive (.pack) in the pack/ subdirectory of $GIT_OBJECT_DIRECTORY (or
|
||||
any of the directories on $GIT_ALTERNATE_OBJECT_DIRECTORIES)
|
||||
enables git to read from such an archive.
|
||||
enables git to read from the pack archive.
|
||||
|
||||
The 'git unpack-objects' command can read the packed archive and
|
||||
expand the objects contained in the pack into "one-file
|
||||
@@ -38,10 +43,6 @@ one-object" format; this is typically done by the smart-pull
|
||||
commands when a pack is created on-the-fly for efficient network
|
||||
transport by their peers.
|
||||
|
||||
In a packed archive, an object is either stored as a compressed
|
||||
whole, or as a difference from some other object. The latter is
|
||||
often called a delta.
|
||||
|
||||
|
||||
OPTIONS
|
||||
-------
|
||||
@@ -73,7 +74,7 @@ base-name::
|
||||
--all::
|
||||
This implies `--revs`. In addition to the list of
|
||||
revision arguments read from the standard input, pretend
|
||||
as if all refs under `$GIT_DIR/refs` are specified to be
|
||||
as if all refs under `refs/` are specified to be
|
||||
included.
|
||||
|
||||
--include-tag::
|
||||
@@ -179,6 +180,16 @@ base-name::
|
||||
Add --no-reuse-object if you want to force a uniform compression
|
||||
level on all data no matter the source.
|
||||
|
||||
--thin::
|
||||
Create a "thin" pack by omitting the common objects between a
|
||||
sender and a receiver in order to reduce network transfer. This
|
||||
option only makes sense in conjunction with --stdout.
|
||||
+
|
||||
Note: A thin pack violates the packed archive format by omitting
|
||||
required objects and is thus unusable by git without making it
|
||||
self-contained. Use `git index-pack --fix-thin`
|
||||
(see linkgit:git-index-pack[1]) to restore the self-contained property.
|
||||
|
||||
--delta-base-offset::
|
||||
A packed archive can express base object of a delta as
|
||||
either 20-byte object name or as an offset in the
|
||||
|
||||
@@ -17,7 +17,7 @@ NOTE: In most cases, users should run 'git gc', which calls
|
||||
'git prune'. See the section "NOTES", below.
|
||||
|
||||
This runs 'git fsck --unreachable' using all the refs
|
||||
available in `$GIT_DIR/refs`, optionally with additional set of
|
||||
available in `refs/`, optionally with additional set of
|
||||
objects specified on the command line, and prunes all unpacked
|
||||
objects unreachable from any of these head objects from the object database.
|
||||
In addition, it
|
||||
|
||||
@@ -69,11 +69,11 @@ nor in any Push line of the corresponding remotes file---see below).
|
||||
|
||||
--all::
|
||||
Instead of naming each ref to push, specifies that all
|
||||
refs under `$GIT_DIR/refs/heads/` be pushed.
|
||||
refs under `refs/heads/` be pushed.
|
||||
|
||||
--mirror::
|
||||
Instead of naming each ref to push, specifies that all
|
||||
refs under `$GIT_DIR/refs/` (which includes but is not
|
||||
refs under `refs/` (which includes but is not
|
||||
limited to `refs/heads/`, `refs/remotes/`, and `refs/tags/`)
|
||||
be mirrored to the remote repository. Newly created local
|
||||
refs will be pushed to the remote end, locally updated refs
|
||||
@@ -96,7 +96,7 @@ nor in any Push line of the corresponding remotes file---see below).
|
||||
the same as prefixing all refs with a colon.
|
||||
|
||||
--tags::
|
||||
All refs under `$GIT_DIR/refs/tags` are pushed, in
|
||||
All refs under `refs/tags` are pushed, in
|
||||
addition to refspecs explicitly listed on the command
|
||||
line.
|
||||
|
||||
@@ -141,9 +141,10 @@ useful if you write an alias or script around 'git push'.
|
||||
|
||||
--thin::
|
||||
--no-thin::
|
||||
These options are passed to 'git send-pack'. Thin
|
||||
transfer spends extra cycles to minimize the number of
|
||||
objects to be sent and meant to be used on slower connection.
|
||||
These options are passed to linkgit:git-send-pack[1]. A thin transfer
|
||||
significantly reduces the amount of sent data when the sender and
|
||||
receiver share many of the same objects in common. The default is
|
||||
\--thin.
|
||||
|
||||
-v::
|
||||
--verbose::
|
||||
|
||||
@@ -101,15 +101,14 @@ OPTIONS
|
||||
abbreviation mode.
|
||||
|
||||
--all::
|
||||
Show all refs found in `$GIT_DIR/refs`.
|
||||
Show all refs found in `refs/`.
|
||||
|
||||
--branches[=pattern]::
|
||||
--tags[=pattern]::
|
||||
--remotes[=pattern]::
|
||||
Show all branches, tags, or remote-tracking branches,
|
||||
respectively (i.e., refs found in `$GIT_DIR/refs/heads`,
|
||||
`$GIT_DIR/refs/tags`, or `$GIT_DIR/refs/remotes`,
|
||||
respectively).
|
||||
respectively (i.e., refs found in `refs/heads`,
|
||||
`refs/tags`, or `refs/remotes`, respectively).
|
||||
+
|
||||
If a `pattern` is given, only refs matching the given shell glob are
|
||||
shown. If the pattern does not contain a globbing character (`?`,
|
||||
@@ -189,7 +188,7 @@ blobs contained in a commit.
|
||||
`g`, and an abbreviated object name.
|
||||
|
||||
* A symbolic ref name. E.g. 'master' typically means the commit
|
||||
object referenced by $GIT_DIR/refs/heads/master. If you
|
||||
object referenced by refs/heads/master. If you
|
||||
happen to have both heads/master and tags/master, you can
|
||||
explicitly say 'heads/master' to tell git which one you mean.
|
||||
When ambiguous, a `<name>` is disambiguated by taking the
|
||||
@@ -198,15 +197,15 @@ blobs contained in a commit.
|
||||
. if `$GIT_DIR/<name>` exists, that is what you mean (this is usually
|
||||
useful only for `HEAD`, `FETCH_HEAD`, `ORIG_HEAD` and `MERGE_HEAD`);
|
||||
|
||||
. otherwise, `$GIT_DIR/refs/<name>` if exists;
|
||||
. otherwise, `refs/<name>` if exists;
|
||||
|
||||
. otherwise, `$GIT_DIR/refs/tags/<name>` if exists;
|
||||
. otherwise, `refs/tags/<name>` if exists;
|
||||
|
||||
. otherwise, `$GIT_DIR/refs/heads/<name>` if exists;
|
||||
. otherwise, `refs/heads/<name>` if exists;
|
||||
|
||||
. otherwise, `$GIT_DIR/refs/remotes/<name>` if exists;
|
||||
. otherwise, `refs/remotes/<name>` if exists;
|
||||
|
||||
. otherwise, `$GIT_DIR/refs/remotes/<name>/HEAD` if exists.
|
||||
. otherwise, `refs/remotes/<name>/HEAD` if exists.
|
||||
+
|
||||
HEAD names the commit your changes in the working tree is based on.
|
||||
FETCH_HEAD records the branch you fetched from a remote repository
|
||||
@@ -217,6 +216,9 @@ you can change the tip of the branch back to the state before you ran
|
||||
them easily.
|
||||
MERGE_HEAD records the commit(s) you are merging into your branch
|
||||
when you run 'git merge'.
|
||||
+
|
||||
Note that any of the `refs/*` cases above may come either from
|
||||
the `$GIT_DIR/refs` directory or from the `$GIT_DIR/packed-refs` file.
|
||||
|
||||
* A ref followed by the suffix '@' with a date specification
|
||||
enclosed in a brace
|
||||
|
||||
@@ -48,8 +48,8 @@ OPTIONS
|
||||
Run verbosely.
|
||||
|
||||
--thin::
|
||||
Spend extra cycles to minimize the number of objects to be sent.
|
||||
Use it on slower connection.
|
||||
Send a "thin" pack, which records objects in deltified form based
|
||||
on objects not included in the pack to reduce network traffic.
|
||||
|
||||
<host>::
|
||||
A remote host to house the repository. When this
|
||||
|
||||
@@ -20,8 +20,8 @@ DESCRIPTION
|
||||
-----------
|
||||
|
||||
Shows the commit ancestry graph starting from the commits named
|
||||
with <rev>s or <globs>s (or all refs under $GIT_DIR/refs/heads
|
||||
and/or $GIT_DIR/refs/tags) semi-visually.
|
||||
with <rev>s or <globs>s (or all refs under refs/heads
|
||||
and/or refs/tags) semi-visually.
|
||||
|
||||
It cannot show more than 29 branches and commits at a time.
|
||||
|
||||
@@ -37,8 +37,8 @@ OPTIONS
|
||||
|
||||
<glob>::
|
||||
A glob pattern that matches branch or tag names under
|
||||
$GIT_DIR/refs. For example, if you have many topic
|
||||
branches under $GIT_DIR/refs/heads/topic, giving
|
||||
refs/. For example, if you have many topic
|
||||
branches under refs/heads/topic, giving
|
||||
`topic/*` would show all of them.
|
||||
|
||||
-r::
|
||||
@@ -176,7 +176,7 @@ EXAMPLE
|
||||
-------
|
||||
|
||||
If you keep your primary branches immediately under
|
||||
`$GIT_DIR/refs/heads`, and topic branches in subdirectories of
|
||||
`refs/heads`, and topic branches in subdirectories of
|
||||
it, having the following in the configuration file may help:
|
||||
|
||||
------------
|
||||
|
||||
@@ -33,7 +33,7 @@ A stash is by default listed as "WIP on 'branchname' ...", but
|
||||
you can give a more descriptive message on the command line when
|
||||
you create one.
|
||||
|
||||
The latest stash you created is stored in `$GIT_DIR/refs/stash`; older
|
||||
The latest stash you created is stored in `refs/stash`; older
|
||||
stashes are found in the reflog of this reference and can be named using
|
||||
the usual reflog syntax (e.g. `stash@\{0}` is the most recently
|
||||
created stash, `stash@\{1}` is the one before it, `stash@\{2.hours.ago}`
|
||||
|
||||
@@ -8,7 +8,7 @@ git-var - Show a git logical variable
|
||||
|
||||
SYNOPSIS
|
||||
--------
|
||||
'git var' [ -l | <variable> ]
|
||||
'git var' ( -l | <variable> )
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
|
||||
@@ -43,9 +43,11 @@ unreleased) version of git, that is available from 'master'
|
||||
branch of the `git.git` repository.
|
||||
Documentation for older releases are available here:
|
||||
|
||||
* link:v1.7.0/git.html[documentation for release 1.7.0]
|
||||
* link:v1.7.0.2/git.html[documentation for release 1.7.0.2]
|
||||
|
||||
* release notes for
|
||||
link:RelNotes-1.7.0.2.txt[1.7.0.2],
|
||||
link:RelNotes-1.7.0.1.txt[1.7.0.1],
|
||||
link:RelNotes-1.7.0.txt[1.7.0].
|
||||
|
||||
* link:v1.6.6.2/git.html[documentation for release 1.6.6.2]
|
||||
@@ -229,7 +231,10 @@ help ...`.
|
||||
|
||||
-p::
|
||||
--paginate::
|
||||
Pipe all output into 'less' (or if set, $PAGER).
|
||||
Pipe all output into 'less' (or if set, $PAGER) if standard
|
||||
output is a terminal. This overrides the `pager.<cmd>`
|
||||
configuration options (see the "Configuration Mechanism" section
|
||||
below).
|
||||
|
||||
--no-pager::
|
||||
Do not pipe git output into a pager.
|
||||
@@ -401,7 +406,8 @@ people. Here is an example:
|
||||
------------
|
||||
|
||||
Various commands read from the configuration file and adjust
|
||||
their operation accordingly.
|
||||
their operation accordingly. See linkgit:git-config[1] for a
|
||||
list.
|
||||
|
||||
|
||||
Identifier Terminology
|
||||
|
||||
@@ -511,7 +511,8 @@ command to run to merge ancestor's version (`%O`), current
|
||||
version (`%A`) and the other branches' version (`%B`). These
|
||||
three tokens are replaced with the names of temporary files that
|
||||
hold the contents of these versions when the command line is
|
||||
built.
|
||||
built. Additionally, %L will be replaced with the conflict marker
|
||||
size (see below).
|
||||
|
||||
The merge driver is expected to leave the result of the merge in
|
||||
the file named with `%A` by overwriting it, and exit with zero
|
||||
|
||||
@@ -225,26 +225,26 @@ endif::git-rev-list[]
|
||||
|
||||
--all::
|
||||
|
||||
Pretend as if all the refs in `$GIT_DIR/refs/` are listed on the
|
||||
Pretend as if all the refs in `refs/` are listed on the
|
||||
command line as '<commit>'.
|
||||
|
||||
--branches[=pattern]::
|
||||
|
||||
Pretend as if all the refs in `$GIT_DIR/refs/heads` are listed
|
||||
Pretend as if all the refs in `refs/heads` are listed
|
||||
on the command line as '<commit>'. If `pattern` is given, limit
|
||||
branches to ones matching given shell glob. If pattern lacks '?',
|
||||
'*', or '[', '/*' at the end is implied.
|
||||
|
||||
--tags[=pattern]::
|
||||
|
||||
Pretend as if all the refs in `$GIT_DIR/refs/tags` are listed
|
||||
Pretend as if all the refs in `refs/tags` are listed
|
||||
on the command line as '<commit>'. If `pattern` is given, limit
|
||||
tags to ones matching given shell glob. If pattern lacks '?', '*',
|
||||
or '[', '/*' at the end is implied.
|
||||
|
||||
--remotes[=pattern]::
|
||||
|
||||
Pretend as if all the refs in `$GIT_DIR/refs/remotes` are listed
|
||||
Pretend as if all the refs in `refs/remotes` are listed
|
||||
on the command line as '<commit>'. If `pattern`is given, limit
|
||||
remote tracking branches to ones matching given shell glob.
|
||||
If pattern lacks '?', '*', or '[', '/*' at the end is implied.
|
||||
@@ -259,9 +259,9 @@ endif::git-rev-list[]
|
||||
ifndef::git-rev-list[]
|
||||
--bisect::
|
||||
|
||||
Pretend as if the bad bisection ref `$GIT_DIR/refs/bisect/bad`
|
||||
Pretend as if the bad bisection ref `refs/bisect/bad`
|
||||
was listed and as if it was followed by `--not` and the good
|
||||
bisection refs `$GIT_DIR/refs/bisect/good-*` on the command
|
||||
bisection refs `refs/bisect/good-*` on the command
|
||||
line.
|
||||
endif::git-rev-list[]
|
||||
|
||||
@@ -561,10 +561,10 @@ Bisection Helpers
|
||||
|
||||
Limit output to the one commit object which is roughly halfway between
|
||||
included and excluded commits. Note that the bad bisection ref
|
||||
`$GIT_DIR/refs/bisect/bad` is added to the included commits (if it
|
||||
exists) and the good bisection refs `$GIT_DIR/refs/bisect/good-*` are
|
||||
`refs/bisect/bad` is added to the included commits (if it
|
||||
exists) and the good bisection refs `refs/bisect/good-*` are
|
||||
added to the excluded commits (if they exist). Thus, supposing there
|
||||
are no refs in `$GIT_DIR/refs/bisect/`, if
|
||||
are no refs in `refs/bisect/`, if
|
||||
|
||||
-----------------------------------------------------------------------
|
||||
$ git rev-list --bisect foo ^bar ^baz
|
||||
@@ -585,7 +585,7 @@ one.
|
||||
--bisect-vars::
|
||||
|
||||
This calculates the same as `--bisect`, except that refs in
|
||||
`$GIT_DIR/refs/bisect/` are not used, and except that this outputs
|
||||
`refs/bisect/` are not used, and except that this outputs
|
||||
text ready to be eval'ed by the shell. These lines will assign the
|
||||
name of the midpoint revision to the variable `bisect_rev`, and the
|
||||
expected number of commits to be tested after `bisect_rev` is tested
|
||||
@@ -599,7 +599,7 @@ number of commits to be tested if `bisect_rev` turns out to be bad to
|
||||
|
||||
This outputs all the commit objects between the included and excluded
|
||||
commits, ordered by their distance to the included and excluded
|
||||
commits. Refs in `$GIT_DIR/refs/bisect/` are not used. The farthest
|
||||
commits. Refs in `refs/bisect/` are not used. The farthest
|
||||
from them is displayed first. (This is the only one displayed by
|
||||
`--bisect`.)
|
||||
+
|
||||
|
||||
@@ -64,8 +64,8 @@ The functions above do the following:
|
||||
`start_async`::
|
||||
|
||||
Run a function asynchronously. Takes a pointer to a `struct
|
||||
async` that specifies the details and returns a pipe FD
|
||||
from which the caller reads. See below for details.
|
||||
async` that specifies the details and returns a set of pipe FDs
|
||||
for communication with the function. See below for details.
|
||||
|
||||
`finish_async`::
|
||||
|
||||
@@ -135,7 +135,7 @@ stderr as follows:
|
||||
|
||||
.in: The FD must be readable; it becomes child's stdin.
|
||||
.out: The FD must be writable; it becomes child's stdout.
|
||||
.err > 0 is not supported.
|
||||
.err: The FD must be writable; it becomes child's stderr.
|
||||
|
||||
The specified FD is closed by start_command(), even if it fails to
|
||||
run the sub-process!
|
||||
@@ -180,17 +180,47 @@ The caller:
|
||||
struct async variable;
|
||||
2. initializes .proc and .data;
|
||||
3. calls start_async();
|
||||
4. processes the data by reading from the fd in .out;
|
||||
5. closes .out;
|
||||
4. processes communicates with proc through .in and .out;
|
||||
5. closes .in and .out;
|
||||
6. calls finish_async().
|
||||
|
||||
The members .in, .out are used to provide a set of fd's for
|
||||
communication between the caller and the callee as follows:
|
||||
|
||||
. Specify 0 to have no file descriptor passed. The callee will
|
||||
receive -1 in the corresponding argument.
|
||||
|
||||
. Specify < 0 to have a pipe allocated; start_async() replaces
|
||||
with the pipe FD in the following way:
|
||||
|
||||
.in: Returns the writable pipe end into which the caller
|
||||
writes; the readable end of the pipe becomes the function's
|
||||
in argument.
|
||||
|
||||
.out: Returns the readable pipe end from which the caller
|
||||
reads; the writable end of the pipe becomes the function's
|
||||
out argument.
|
||||
|
||||
The caller of start_async() must close the returned FDs after it
|
||||
has completed reading from/writing from them.
|
||||
|
||||
. Specify a file descriptor > 0 to be used by the function:
|
||||
|
||||
.in: The FD must be readable; it becomes the function's in.
|
||||
.out: The FD must be writable; it becomes the function's out.
|
||||
|
||||
The specified FD is closed by start_async(), even if it fails to
|
||||
run the function.
|
||||
|
||||
The function pointer in .proc has the following signature:
|
||||
|
||||
int proc(int fd, void *data);
|
||||
int proc(int in, int out, void *data);
|
||||
|
||||
. fd specifies a writable file descriptor to which the function must
|
||||
write the data that it produces. The function *must* close this
|
||||
descriptor before it returns.
|
||||
. in, out specifies a set of file descriptors to which the function
|
||||
must read/write the data that it needs/produces. The function
|
||||
*must* close these descriptors before it returns. A descriptor
|
||||
may be -1 if the caller did not configure a descriptor for that
|
||||
direction.
|
||||
|
||||
. data is the value that the caller has specified in the .data member
|
||||
of struct async.
|
||||
@@ -205,8 +235,8 @@ because this facility is implemented by a pipe to a forked process on
|
||||
UNIX, but by a thread in the same address space on Windows:
|
||||
|
||||
. It cannot change the program's state (global variables, environment,
|
||||
etc.) in a way that the caller notices; in other words, .out is the
|
||||
only communication channel to the caller.
|
||||
etc.) in a way that the caller notices; in other words, .in and .out
|
||||
are the only communication channels to the caller.
|
||||
|
||||
. It must not change the program's state that the caller of the
|
||||
facility also uses.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
GVF=GIT-VERSION-FILE
|
||||
DEF_VER=v1.7.0
|
||||
DEF_VER=v1.7.0.2
|
||||
|
||||
LF='
|
||||
'
|
||||
|
||||
2
RelNotes
2
RelNotes
@@ -1 +1 @@
|
||||
Documentation/RelNotes-1.7.0.txt
|
||||
Documentation/RelNotes-1.7.0.2.txt
|
||||
6
bisect.c
6
bisect.c
@@ -986,6 +986,12 @@ int bisect_next_all(const char *prefix)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (!all) {
|
||||
fprintf(stderr, "No testable commit found.\n"
|
||||
"Maybe you started with bad path parameters?\n");
|
||||
exit(4);
|
||||
}
|
||||
|
||||
bisect_rev = revs.commits->item->object.sha1;
|
||||
memcpy(bisect_rev_hex, sha1_to_hex(bisect_rev), 41);
|
||||
|
||||
|
||||
@@ -2006,7 +2006,7 @@ static int find_pos(struct image *img,
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* If match_begining or match_end is specified, there is no
|
||||
* If match_beginning or match_end is specified, there is no
|
||||
* point starting from a wrong line that will never match and
|
||||
* wander around and wait for a match at the specified end.
|
||||
*/
|
||||
|
||||
@@ -219,9 +219,10 @@ int cmd_cat_file(int argc, const char **argv, const char *prefix)
|
||||
"exit with zero when there's no error", 'e'),
|
||||
OPT_SET_INT('p', NULL, &opt, "pretty-print object's content", 'p'),
|
||||
OPT_SET_INT(0, "batch", &batch,
|
||||
"show info and content of objects feeded on stdin", BATCH),
|
||||
"show info and content of objects fed from the standard input",
|
||||
BATCH),
|
||||
OPT_SET_INT(0, "batch-check", &batch,
|
||||
"show info about objects feeded on stdin",
|
||||
"show info about objects fed from the standard input",
|
||||
BATCH_CHECK),
|
||||
OPT_END()
|
||||
};
|
||||
|
||||
@@ -41,7 +41,7 @@ static const char implicit_ident_advice[] =
|
||||
"on your username and hostname. Please check that they are accurate.\n"
|
||||
"You can suppress this message by setting them explicitly:\n"
|
||||
"\n"
|
||||
" git config --global user.name Your Name\n"
|
||||
" git config --global user.name \"Your Name\"\n"
|
||||
" git config --global user.email you@example.com\n"
|
||||
"\n"
|
||||
"If the identity used for this commit is wrong, you can fix it with:\n"
|
||||
@@ -1046,7 +1046,7 @@ int cmd_status(int argc, const char **argv, const char *prefix)
|
||||
if (*argv)
|
||||
s.pathspec = get_pathspec(prefix, argv);
|
||||
|
||||
read_cache();
|
||||
read_cache_preload(s.pathspec);
|
||||
refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, s.pathspec, NULL, NULL);
|
||||
s.is_initial = get_sha1(s.reference, sha1) ? 1 : 0;
|
||||
s.in_merge = in_merge;
|
||||
|
||||
@@ -586,12 +586,12 @@ static int everything_local(struct ref **refs, int nr_match, char **match)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int sideband_demux(int fd, void *data)
|
||||
static int sideband_demux(int in, int out, void *data)
|
||||
{
|
||||
int *xd = data;
|
||||
|
||||
int ret = recv_sideband("fetch-pack", xd[0], fd);
|
||||
close(fd);
|
||||
int ret = recv_sideband("fetch-pack", xd[0], out);
|
||||
close(out);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -613,6 +613,7 @@ static int get_pack(int xd[2], char **pack_lockfile)
|
||||
*/
|
||||
demux.proc = sideband_demux;
|
||||
demux.data = xd;
|
||||
demux.out = -1;
|
||||
if (start_async(&demux))
|
||||
die("fetch-pack: unable to fork off sideband"
|
||||
" demultiplexer");
|
||||
|
||||
@@ -408,7 +408,7 @@ static int pathspec_matches(const char **paths, const char *name, int max_depth)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *read_sha1_file_locked(const unsigned char *sha1, enum object_type *type, unsigned long *size)
|
||||
static void *lock_and_read_sha1_file(const unsigned char *sha1, enum object_type *type, unsigned long *size)
|
||||
{
|
||||
void *data;
|
||||
|
||||
@@ -426,7 +426,7 @@ static void *load_sha1(const unsigned char *sha1, unsigned long *size,
|
||||
const char *name)
|
||||
{
|
||||
enum object_type type;
|
||||
void *data = read_sha1_file_locked(sha1, &type, size);
|
||||
void *data = lock_and_read_sha1_file(sha1, &type, size);
|
||||
|
||||
if (!data)
|
||||
error("'%s': unable to read %s", name, sha1_to_hex(sha1));
|
||||
@@ -615,7 +615,7 @@ static int grep_tree(struct grep_opt *opt, const char **paths,
|
||||
void *data;
|
||||
unsigned long size;
|
||||
|
||||
data = read_sha1_file_locked(entry.sha1, &type, &size);
|
||||
data = lock_and_read_sha1_file(entry.sha1, &type, &size);
|
||||
if (!data)
|
||||
die("unable to read tree (%s)",
|
||||
sha1_to_hex(entry.sha1));
|
||||
@@ -868,6 +868,16 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
|
||||
PARSE_OPT_STOP_AT_NON_OPTION |
|
||||
PARSE_OPT_NO_INTERNAL_HELP);
|
||||
|
||||
/*
|
||||
* skip a -- separator; we know it cannot be
|
||||
* separating revisions from pathnames if
|
||||
* we haven't even had any patterns yet
|
||||
*/
|
||||
if (argc > 0 && !opt.pattern_list && !strcmp(argv[0], "--")) {
|
||||
argv++;
|
||||
argc--;
|
||||
}
|
||||
|
||||
/* First unrecognized non-option token */
|
||||
if (argc > 0 && !opt.pattern_list) {
|
||||
append_grep_pattern(&opt, argv[0], "command line", 0,
|
||||
|
||||
@@ -1089,7 +1089,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
|
||||
|
||||
/*
|
||||
* We cannot move this anywhere earlier because we do want to
|
||||
* know if --root was given explicitly from the comand line.
|
||||
* know if --root was given explicitly from the command line.
|
||||
*/
|
||||
rev.show_root_diff = 1;
|
||||
|
||||
|
||||
@@ -106,7 +106,7 @@ static void prune_object_dir(const char *path)
|
||||
/*
|
||||
* Write errors (particularly out of space) can result in
|
||||
* failed temporary packs (and more rarely indexes and other
|
||||
* files begining with "tmp_") accumulating in the object
|
||||
* files beginning with "tmp_") accumulating in the object
|
||||
* and the pack directories.
|
||||
*/
|
||||
static void remove_temporary_files(const char *path)
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#include "pack.h"
|
||||
#include "refs.h"
|
||||
#include "pkt-line.h"
|
||||
#include "sideband.h"
|
||||
#include "run-command.h"
|
||||
#include "exec_cmd.h"
|
||||
#include "commit.h"
|
||||
@@ -27,11 +28,12 @@ static int receive_unpack_limit = -1;
|
||||
static int transfer_unpack_limit = -1;
|
||||
static int unpack_limit = 100;
|
||||
static int report_status;
|
||||
static int use_sideband;
|
||||
static int prefer_ofs_delta = 1;
|
||||
static int auto_update_server_info;
|
||||
static int auto_gc = 1;
|
||||
static const char *head_name;
|
||||
static char *capabilities_to_send;
|
||||
static int sent_capabilities;
|
||||
|
||||
static enum deny_action parse_deny_action(const char *var, const char *value)
|
||||
{
|
||||
@@ -105,19 +107,21 @@ static int receive_pack_config(const char *var, const char *value, void *cb)
|
||||
|
||||
static int show_ref(const char *path, const unsigned char *sha1, int flag, void *cb_data)
|
||||
{
|
||||
if (!capabilities_to_send)
|
||||
if (sent_capabilities)
|
||||
packet_write(1, "%s %s\n", sha1_to_hex(sha1), path);
|
||||
else
|
||||
packet_write(1, "%s %s%c%s\n",
|
||||
sha1_to_hex(sha1), path, 0, capabilities_to_send);
|
||||
capabilities_to_send = NULL;
|
||||
packet_write(1, "%s %s%c%s%s\n",
|
||||
sha1_to_hex(sha1), path, 0,
|
||||
" report-status delete-refs side-band-64k",
|
||||
prefer_ofs_delta ? " ofs-delta" : "");
|
||||
sent_capabilities = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void write_head_info(void)
|
||||
{
|
||||
for_each_ref(show_ref, NULL);
|
||||
if (capabilities_to_send)
|
||||
if (!sent_capabilities)
|
||||
show_ref("capabilities^{}", null_sha1, 0, NULL);
|
||||
|
||||
}
|
||||
@@ -135,11 +139,61 @@ static struct command *commands;
|
||||
static const char pre_receive_hook[] = "hooks/pre-receive";
|
||||
static const char post_receive_hook[] = "hooks/post-receive";
|
||||
|
||||
static void rp_error(const char *err, ...) __attribute__((format (printf, 1, 2)));
|
||||
static void rp_warning(const char *err, ...) __attribute__((format (printf, 1, 2)));
|
||||
|
||||
static void report_message(const char *prefix, const char *err, va_list params)
|
||||
{
|
||||
int sz = strlen(prefix);
|
||||
char msg[4096];
|
||||
|
||||
strncpy(msg, prefix, sz);
|
||||
sz += vsnprintf(msg + sz, sizeof(msg) - sz, err, params);
|
||||
if (sz > (sizeof(msg) - 1))
|
||||
sz = sizeof(msg) - 1;
|
||||
msg[sz++] = '\n';
|
||||
|
||||
if (use_sideband)
|
||||
send_sideband(1, 2, msg, sz, use_sideband);
|
||||
else
|
||||
xwrite(2, msg, sz);
|
||||
}
|
||||
|
||||
static void rp_warning(const char *err, ...)
|
||||
{
|
||||
va_list params;
|
||||
va_start(params, err);
|
||||
report_message("warning: ", err, params);
|
||||
va_end(params);
|
||||
}
|
||||
|
||||
static void rp_error(const char *err, ...)
|
||||
{
|
||||
va_list params;
|
||||
va_start(params, err);
|
||||
report_message("error: ", err, params);
|
||||
va_end(params);
|
||||
}
|
||||
|
||||
static int copy_to_sideband(int in, int out, void *arg)
|
||||
{
|
||||
char data[128];
|
||||
while (1) {
|
||||
ssize_t sz = xread(in, data, sizeof(data));
|
||||
if (sz <= 0)
|
||||
break;
|
||||
send_sideband(1, 2, data, sz, use_sideband);
|
||||
}
|
||||
close(in);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int run_receive_hook(const char *hook_name)
|
||||
{
|
||||
static char buf[sizeof(commands->old_sha1) * 2 + PATH_MAX + 4];
|
||||
struct command *cmd;
|
||||
struct child_process proc;
|
||||
struct async muxer;
|
||||
const char *argv[2];
|
||||
int have_input = 0, code;
|
||||
|
||||
@@ -159,9 +213,23 @@ static int run_receive_hook(const char *hook_name)
|
||||
proc.in = -1;
|
||||
proc.stdout_to_stderr = 1;
|
||||
|
||||
if (use_sideband) {
|
||||
memset(&muxer, 0, sizeof(muxer));
|
||||
muxer.proc = copy_to_sideband;
|
||||
muxer.in = -1;
|
||||
code = start_async(&muxer);
|
||||
if (code)
|
||||
return code;
|
||||
proc.err = muxer.in;
|
||||
}
|
||||
|
||||
code = start_command(&proc);
|
||||
if (code)
|
||||
if (code) {
|
||||
if (use_sideband)
|
||||
finish_async(&muxer);
|
||||
return code;
|
||||
}
|
||||
|
||||
for (cmd = commands; cmd; cmd = cmd->next) {
|
||||
if (!cmd->error_string) {
|
||||
size_t n = snprintf(buf, sizeof(buf), "%s %s %s\n",
|
||||
@@ -173,6 +241,8 @@ static int run_receive_hook(const char *hook_name)
|
||||
}
|
||||
}
|
||||
close(proc.in);
|
||||
if (use_sideband)
|
||||
finish_async(&muxer);
|
||||
return finish_command(&proc);
|
||||
}
|
||||
|
||||
@@ -180,6 +250,8 @@ static int run_update_hook(struct command *cmd)
|
||||
{
|
||||
static const char update_hook[] = "hooks/update";
|
||||
const char *argv[5];
|
||||
struct child_process proc;
|
||||
int code;
|
||||
|
||||
if (access(update_hook, X_OK) < 0)
|
||||
return 0;
|
||||
@@ -190,8 +262,18 @@ static int run_update_hook(struct command *cmd)
|
||||
argv[3] = sha1_to_hex(cmd->new_sha1);
|
||||
argv[4] = NULL;
|
||||
|
||||
return run_command_v_opt(argv, RUN_COMMAND_NO_STDIN |
|
||||
RUN_COMMAND_STDOUT_TO_STDERR);
|
||||
memset(&proc, 0, sizeof(proc));
|
||||
proc.no_stdin = 1;
|
||||
proc.stdout_to_stderr = 1;
|
||||
proc.err = use_sideband ? -1 : 0;
|
||||
proc.argv = argv;
|
||||
|
||||
code = start_command(&proc);
|
||||
if (code)
|
||||
return code;
|
||||
if (use_sideband)
|
||||
copy_to_sideband(proc.err, -1, NULL);
|
||||
return finish_command(&proc);
|
||||
}
|
||||
|
||||
static int is_ref_checked_out(const char *ref)
|
||||
@@ -224,7 +306,7 @@ static void refuse_unconfigured_deny(void)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < ARRAY_SIZE(refuse_unconfigured_deny_msg); i++)
|
||||
error("%s", refuse_unconfigured_deny_msg[i]);
|
||||
rp_error("%s", refuse_unconfigured_deny_msg[i]);
|
||||
}
|
||||
|
||||
static char *refuse_unconfigured_deny_delete_current_msg[] = {
|
||||
@@ -244,7 +326,7 @@ static void refuse_unconfigured_deny_delete_current(void)
|
||||
for (i = 0;
|
||||
i < ARRAY_SIZE(refuse_unconfigured_deny_delete_current_msg);
|
||||
i++)
|
||||
error("%s", refuse_unconfigured_deny_delete_current_msg[i]);
|
||||
rp_error("%s", refuse_unconfigured_deny_delete_current_msg[i]);
|
||||
}
|
||||
|
||||
static const char *update(struct command *cmd)
|
||||
@@ -256,7 +338,7 @@ static const char *update(struct command *cmd)
|
||||
|
||||
/* only refs/... are allowed */
|
||||
if (prefixcmp(name, "refs/") || check_ref_format(name + 5)) {
|
||||
error("refusing to create funny ref '%s' remotely", name);
|
||||
rp_error("refusing to create funny ref '%s' remotely", name);
|
||||
return "funny refname";
|
||||
}
|
||||
|
||||
@@ -265,11 +347,11 @@ static const char *update(struct command *cmd)
|
||||
case DENY_IGNORE:
|
||||
break;
|
||||
case DENY_WARN:
|
||||
warning("updating the current branch");
|
||||
rp_warning("updating the current branch");
|
||||
break;
|
||||
case DENY_REFUSE:
|
||||
case DENY_UNCONFIGURED:
|
||||
error("refusing to update checked out branch: %s", name);
|
||||
rp_error("refusing to update checked out branch: %s", name);
|
||||
if (deny_current_branch == DENY_UNCONFIGURED)
|
||||
refuse_unconfigured_deny();
|
||||
return "branch is currently checked out";
|
||||
@@ -284,7 +366,7 @@ static const char *update(struct command *cmd)
|
||||
|
||||
if (!is_null_sha1(old_sha1) && is_null_sha1(new_sha1)) {
|
||||
if (deny_deletes && !prefixcmp(name, "refs/heads/")) {
|
||||
error("denying ref deletion for %s", name);
|
||||
rp_error("denying ref deletion for %s", name);
|
||||
return "deletion prohibited";
|
||||
}
|
||||
|
||||
@@ -293,13 +375,13 @@ static const char *update(struct command *cmd)
|
||||
case DENY_IGNORE:
|
||||
break;
|
||||
case DENY_WARN:
|
||||
warning("deleting the current branch");
|
||||
rp_warning("deleting the current branch");
|
||||
break;
|
||||
case DENY_REFUSE:
|
||||
case DENY_UNCONFIGURED:
|
||||
if (deny_delete_current == DENY_UNCONFIGURED)
|
||||
refuse_unconfigured_deny_delete_current();
|
||||
error("refusing to delete the current branch: %s", name);
|
||||
rp_error("refusing to delete the current branch: %s", name);
|
||||
return "deletion of the current branch prohibited";
|
||||
}
|
||||
}
|
||||
@@ -329,23 +411,23 @@ static const char *update(struct command *cmd)
|
||||
break;
|
||||
free_commit_list(bases);
|
||||
if (!ent) {
|
||||
error("denying non-fast-forward %s"
|
||||
" (you should pull first)", name);
|
||||
rp_error("denying non-fast-forward %s"
|
||||
" (you should pull first)", name);
|
||||
return "non-fast-forward";
|
||||
}
|
||||
}
|
||||
if (run_update_hook(cmd)) {
|
||||
error("hook declined to update %s", name);
|
||||
rp_error("hook declined to update %s", name);
|
||||
return "hook declined";
|
||||
}
|
||||
|
||||
if (is_null_sha1(new_sha1)) {
|
||||
if (!parse_object(old_sha1)) {
|
||||
warning ("Allowing deletion of corrupt ref.");
|
||||
rp_warning("Allowing deletion of corrupt ref.");
|
||||
old_sha1 = NULL;
|
||||
}
|
||||
if (delete_ref(name, old_sha1, 0)) {
|
||||
error("failed to delete %s", name);
|
||||
rp_error("failed to delete %s", name);
|
||||
return "failed to delete";
|
||||
}
|
||||
return NULL; /* good */
|
||||
@@ -353,7 +435,7 @@ static const char *update(struct command *cmd)
|
||||
else {
|
||||
lock = lock_any_ref_for_update(name, old_sha1, 0);
|
||||
if (!lock) {
|
||||
error("failed to lock %s", name);
|
||||
rp_error("failed to lock %s", name);
|
||||
return "failed to lock";
|
||||
}
|
||||
if (write_ref_sha1(lock, new_sha1, "push")) {
|
||||
@@ -368,8 +450,9 @@ static char update_post_hook[] = "hooks/post-update";
|
||||
static void run_update_post_hook(struct command *cmd)
|
||||
{
|
||||
struct command *cmd_p;
|
||||
int argc, status;
|
||||
int argc;
|
||||
const char **argv;
|
||||
struct child_process proc;
|
||||
|
||||
for (argc = 0, cmd_p = cmd; cmd_p; cmd_p = cmd_p->next) {
|
||||
if (cmd_p->error_string)
|
||||
@@ -391,8 +474,18 @@ static void run_update_post_hook(struct command *cmd)
|
||||
argc++;
|
||||
}
|
||||
argv[argc] = NULL;
|
||||
status = run_command_v_opt(argv, RUN_COMMAND_NO_STDIN
|
||||
| RUN_COMMAND_STDOUT_TO_STDERR);
|
||||
|
||||
memset(&proc, 0, sizeof(proc));
|
||||
proc.no_stdin = 1;
|
||||
proc.stdout_to_stderr = 1;
|
||||
proc.err = use_sideband ? -1 : 0;
|
||||
proc.argv = argv;
|
||||
|
||||
if (!start_command(&proc)) {
|
||||
if (use_sideband)
|
||||
copy_to_sideband(proc.err, -1, NULL);
|
||||
finish_command(&proc);
|
||||
}
|
||||
}
|
||||
|
||||
static void execute_commands(const char *unpacker_error)
|
||||
@@ -452,6 +545,8 @@ static void read_head_info(void)
|
||||
if (reflen + 82 < len) {
|
||||
if (strstr(refname + reflen + 1, "report-status"))
|
||||
report_status = 1;
|
||||
if (strstr(refname + reflen + 1, "side-band-64k"))
|
||||
use_sideband = LARGE_PACKET_MAX;
|
||||
}
|
||||
cmd = xmalloc(sizeof(struct command) + len - 80);
|
||||
hashcpy(cmd->old_sha1, old_sha1);
|
||||
@@ -551,17 +646,25 @@ static const char *unpack(void)
|
||||
static void report(const char *unpack_status)
|
||||
{
|
||||
struct command *cmd;
|
||||
packet_write(1, "unpack %s\n",
|
||||
unpack_status ? unpack_status : "ok");
|
||||
struct strbuf buf = STRBUF_INIT;
|
||||
|
||||
packet_buf_write(&buf, "unpack %s\n",
|
||||
unpack_status ? unpack_status : "ok");
|
||||
for (cmd = commands; cmd; cmd = cmd->next) {
|
||||
if (!cmd->error_string)
|
||||
packet_write(1, "ok %s\n",
|
||||
cmd->ref_name);
|
||||
packet_buf_write(&buf, "ok %s\n",
|
||||
cmd->ref_name);
|
||||
else
|
||||
packet_write(1, "ng %s %s\n",
|
||||
cmd->ref_name, cmd->error_string);
|
||||
packet_buf_write(&buf, "ng %s %s\n",
|
||||
cmd->ref_name, cmd->error_string);
|
||||
}
|
||||
packet_flush(1);
|
||||
packet_buf_flush(&buf);
|
||||
|
||||
if (use_sideband)
|
||||
send_sideband(1, 1, buf.buf, buf.len, use_sideband);
|
||||
else
|
||||
safe_write(1, buf.buf, buf.len);
|
||||
strbuf_release(&buf);
|
||||
}
|
||||
|
||||
static int delete_only(struct command *cmd)
|
||||
@@ -658,10 +761,6 @@ int cmd_receive_pack(int argc, const char **argv, const char *prefix)
|
||||
else if (0 <= receive_unpack_limit)
|
||||
unpack_limit = receive_unpack_limit;
|
||||
|
||||
capabilities_to_send = (prefer_ofs_delta) ?
|
||||
" report-status delete-refs ofs-delta " :
|
||||
" report-status delete-refs ";
|
||||
|
||||
if (advertise_refs || !stateless_rpc) {
|
||||
add_alternate_refs();
|
||||
write_head_info();
|
||||
@@ -695,5 +794,7 @@ int cmd_receive_pack(int argc, const char **argv, const char *prefix)
|
||||
if (auto_update_server_info)
|
||||
update_server_info(0);
|
||||
}
|
||||
if (use_sideband)
|
||||
packet_flush(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ static const char * const cherry_pick_usage[] = {
|
||||
static int edit, no_replay, no_commit, mainline, signoff;
|
||||
static enum { REVERT, CHERRY_PICK } action;
|
||||
static struct commit *commit;
|
||||
static const char *commit_name;
|
||||
static int allow_rerere_auto;
|
||||
|
||||
static const char *me;
|
||||
@@ -49,7 +50,6 @@ static void parse_args(int argc, const char **argv)
|
||||
const char * const * usage_str =
|
||||
action == REVERT ? revert_usage : cherry_pick_usage;
|
||||
unsigned char sha1[20];
|
||||
const char *arg;
|
||||
int noop;
|
||||
struct option options[] = {
|
||||
OPT_BOOLEAN('n', "no-commit", &no_commit, "don't automatically commit"),
|
||||
@@ -64,19 +64,13 @@ static void parse_args(int argc, const char **argv)
|
||||
|
||||
if (parse_options(argc, argv, NULL, options, usage_str, 0) != 1)
|
||||
usage_with_options(usage_str, options);
|
||||
arg = argv[0];
|
||||
|
||||
if (get_sha1(arg, sha1))
|
||||
die ("Cannot find '%s'", arg);
|
||||
commit = (struct commit *)parse_object(sha1);
|
||||
commit_name = argv[0];
|
||||
if (get_sha1(commit_name, sha1))
|
||||
die ("Cannot find '%s'", commit_name);
|
||||
commit = lookup_commit_reference(sha1);
|
||||
if (!commit)
|
||||
die ("Could not find %s", sha1_to_hex(sha1));
|
||||
if (commit->object.type == OBJ_TAG) {
|
||||
commit = (struct commit *)
|
||||
deref_tag((struct object *)commit, arg, strlen(arg));
|
||||
}
|
||||
if (commit->object.type != OBJ_COMMIT)
|
||||
die ("'%s' does not point to a commit", arg);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static char *get_oneline(const char *message)
|
||||
@@ -204,25 +198,27 @@ static void set_author_ident_env(const char *message)
|
||||
sha1_to_hex(commit->object.sha1));
|
||||
}
|
||||
|
||||
static char *help_msg(const unsigned char *sha1)
|
||||
static char *help_msg(const char *name)
|
||||
{
|
||||
static char helpbuf[1024];
|
||||
struct strbuf helpbuf = STRBUF_INIT;
|
||||
char *msg = getenv("GIT_CHERRY_PICK_HELP");
|
||||
|
||||
if (msg)
|
||||
return msg;
|
||||
|
||||
strcpy(helpbuf, " After resolving the conflicts,\n"
|
||||
"mark the corrected paths with 'git add <paths>' "
|
||||
"or 'git rm <paths>' and commit the result.");
|
||||
strbuf_addstr(&helpbuf, " After resolving the conflicts,\n"
|
||||
"mark the corrected paths with 'git add <paths>' or 'git rm <paths>'\n"
|
||||
"and commit the result");
|
||||
|
||||
if (action == CHERRY_PICK) {
|
||||
sprintf(helpbuf + strlen(helpbuf),
|
||||
"\nWhen commiting, use the option "
|
||||
"'-c %s' to retain authorship and message.",
|
||||
find_unique_abbrev(sha1, DEFAULT_ABBREV));
|
||||
strbuf_addf(&helpbuf, " with: \n"
|
||||
"\n"
|
||||
" git commit -c %s\n",
|
||||
name);
|
||||
}
|
||||
return helpbuf;
|
||||
else
|
||||
strbuf_addch(&helpbuf, '.');
|
||||
return strbuf_detach(&helpbuf, NULL);
|
||||
}
|
||||
|
||||
static struct tree *empty_tree(void)
|
||||
@@ -409,7 +405,7 @@ static int revert_or_cherry_pick(int argc, const char **argv)
|
||||
if (commit_lock_file(&msg_file) < 0)
|
||||
die ("Error wrapping up %s", defmsg);
|
||||
fprintf(stderr, "Automatic %s failed.%s\n",
|
||||
me, help_msg(commit->object.sha1));
|
||||
me, help_msg(commit_name));
|
||||
rerere(allow_rerere_auto);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@@ -372,6 +372,14 @@ static void print_helper_status(struct ref *ref)
|
||||
strbuf_release(&buf);
|
||||
}
|
||||
|
||||
static int sideband_demux(int in, int out, void *data)
|
||||
{
|
||||
int *fd = data;
|
||||
int ret = recv_sideband("send-pack", fd[0], out);
|
||||
close(out);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int send_pack(struct send_pack_args *args,
|
||||
int fd[], struct child_process *conn,
|
||||
struct ref *remote_refs,
|
||||
@@ -382,18 +390,22 @@ int send_pack(struct send_pack_args *args,
|
||||
struct strbuf req_buf = STRBUF_INIT;
|
||||
struct ref *ref;
|
||||
int new_refs;
|
||||
int ask_for_status_report = 0;
|
||||
int allow_deleting_refs = 0;
|
||||
int expect_status_report = 0;
|
||||
int status_report = 0;
|
||||
int use_sideband = 0;
|
||||
unsigned cmds_sent = 0;
|
||||
int ret;
|
||||
struct async demux;
|
||||
|
||||
/* Does the other end support the reporting? */
|
||||
if (server_supports("report-status"))
|
||||
ask_for_status_report = 1;
|
||||
status_report = 1;
|
||||
if (server_supports("delete-refs"))
|
||||
allow_deleting_refs = 1;
|
||||
if (server_supports("ofs-delta"))
|
||||
args->use_ofs_delta = 1;
|
||||
if (server_supports("side-band-64k"))
|
||||
use_sideband = 1;
|
||||
|
||||
if (!remote_refs) {
|
||||
fprintf(stderr, "No refs in common and none specified; doing nothing.\n"
|
||||
@@ -426,28 +438,30 @@ int send_pack(struct send_pack_args *args,
|
||||
if (!ref->deletion)
|
||||
new_refs++;
|
||||
|
||||
if (!args->dry_run) {
|
||||
if (args->dry_run) {
|
||||
ref->status = REF_STATUS_OK;
|
||||
} else {
|
||||
char *old_hex = sha1_to_hex(ref->old_sha1);
|
||||
char *new_hex = sha1_to_hex(ref->new_sha1);
|
||||
|
||||
if (ask_for_status_report) {
|
||||
packet_buf_write(&req_buf, "%s %s %s%c%s",
|
||||
if (!cmds_sent && (status_report || use_sideband)) {
|
||||
packet_buf_write(&req_buf, "%s %s %s%c%s%s",
|
||||
old_hex, new_hex, ref->name, 0,
|
||||
"report-status");
|
||||
ask_for_status_report = 0;
|
||||
expect_status_report = 1;
|
||||
status_report ? " report-status" : "",
|
||||
use_sideband ? " side-band-64k" : "");
|
||||
}
|
||||
else
|
||||
packet_buf_write(&req_buf, "%s %s %s",
|
||||
old_hex, new_hex, ref->name);
|
||||
ref->status = status_report ?
|
||||
REF_STATUS_EXPECTING_REPORT :
|
||||
REF_STATUS_OK;
|
||||
cmds_sent++;
|
||||
}
|
||||
ref->status = expect_status_report ?
|
||||
REF_STATUS_EXPECTING_REPORT :
|
||||
REF_STATUS_OK;
|
||||
}
|
||||
|
||||
if (args->stateless_rpc) {
|
||||
if (!args->dry_run) {
|
||||
if (!args->dry_run && cmds_sent) {
|
||||
packet_buf_flush(&req_buf);
|
||||
send_sideband(out, -1, req_buf.buf, req_buf.len, LARGE_PACKET_MAX);
|
||||
}
|
||||
@@ -457,23 +471,43 @@ int send_pack(struct send_pack_args *args,
|
||||
}
|
||||
strbuf_release(&req_buf);
|
||||
|
||||
if (new_refs && !args->dry_run) {
|
||||
if (use_sideband && cmds_sent) {
|
||||
memset(&demux, 0, sizeof(demux));
|
||||
demux.proc = sideband_demux;
|
||||
demux.data = fd;
|
||||
demux.out = -1;
|
||||
if (start_async(&demux))
|
||||
die("receive-pack: unable to fork off sideband demultiplexer");
|
||||
in = demux.out;
|
||||
}
|
||||
|
||||
if (new_refs && cmds_sent) {
|
||||
if (pack_objects(out, remote_refs, extra_have, args) < 0) {
|
||||
for (ref = remote_refs; ref; ref = ref->next)
|
||||
ref->status = REF_STATUS_NONE;
|
||||
if (use_sideband)
|
||||
finish_async(&demux);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
if (args->stateless_rpc && !args->dry_run)
|
||||
if (args->stateless_rpc && cmds_sent)
|
||||
packet_flush(out);
|
||||
|
||||
if (expect_status_report)
|
||||
if (status_report && cmds_sent)
|
||||
ret = receive_status(in, remote_refs);
|
||||
else
|
||||
ret = 0;
|
||||
if (args->stateless_rpc)
|
||||
packet_flush(out);
|
||||
|
||||
if (use_sideband && cmds_sent) {
|
||||
if (finish_async(&demux)) {
|
||||
error("error in sideband demultiplexer");
|
||||
ret = -1;
|
||||
}
|
||||
close(demux.out);
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
for (ref = remote_refs; ref; ref = ref->next) {
|
||||
|
||||
@@ -304,9 +304,19 @@ parse_done:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void add_wrapped_shortlog_msg(struct strbuf *sb, const char *s,
|
||||
const struct shortlog *log)
|
||||
{
|
||||
int col = strbuf_add_wrapped_text(sb, s, log->in1, log->in2, log->wrap);
|
||||
if (col != log->wrap)
|
||||
strbuf_addch(sb, '\n');
|
||||
}
|
||||
|
||||
void shortlog_output(struct shortlog *log)
|
||||
{
|
||||
int i, j;
|
||||
struct strbuf sb = STRBUF_INIT;
|
||||
|
||||
if (log->sort_by_number)
|
||||
qsort(log->list.items, log->list.nr, sizeof(struct string_list_item),
|
||||
compare_by_number);
|
||||
@@ -321,9 +331,9 @@ void shortlog_output(struct shortlog *log)
|
||||
const char *msg = onelines->items[j].string;
|
||||
|
||||
if (log->wrap_lines) {
|
||||
int col = print_wrapped_text(msg, log->in1, log->in2, log->wrap);
|
||||
if (col != log->wrap)
|
||||
putchar('\n');
|
||||
strbuf_reset(&sb);
|
||||
add_wrapped_shortlog_msg(&sb, msg, log);
|
||||
fwrite(sb.buf, sb.len, 1, stdout);
|
||||
}
|
||||
else
|
||||
printf(" %s\n", msg);
|
||||
@@ -337,6 +347,7 @@ void shortlog_output(struct shortlog *log)
|
||||
log->list.items[i].util = NULL;
|
||||
}
|
||||
|
||||
strbuf_release(&sb);
|
||||
log->list.strdup_strings = 1;
|
||||
string_list_clear(&log->list, 1);
|
||||
clear_mailmap(&log->mailmap);
|
||||
|
||||
@@ -567,7 +567,7 @@ static int git_show_branch_config(const char *var, const char *value, void *cb)
|
||||
return config_error_nonbool(var);
|
||||
/*
|
||||
* default_arg is now passed to parse_options(), so we need to
|
||||
* mimick the real argv a bit better.
|
||||
* mimic the real argv a bit better.
|
||||
*/
|
||||
if (!default_num) {
|
||||
default_alloc = 20;
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
#include "cache.h"
|
||||
#include "exec_cmd.h"
|
||||
|
||||
static const char var_usage[] = "git var [-l | <variable>]";
|
||||
static const char var_usage[] = "git var (-l | <variable>)";
|
||||
|
||||
static const char *editor(int flag)
|
||||
{
|
||||
@@ -20,7 +20,7 @@ static const char *editor(int flag)
|
||||
|
||||
static const char *pager(int flag)
|
||||
{
|
||||
const char *pgm = git_pager();
|
||||
const char *pgm = git_pager(1);
|
||||
|
||||
if (!pgm)
|
||||
pgm = "cat";
|
||||
|
||||
2
cache.h
2
cache.h
@@ -782,7 +782,7 @@ extern const char *git_committer_info(int);
|
||||
extern const char *fmt_ident(const char *name, const char *email, const char *date_str, int);
|
||||
extern const char *fmt_name(const char *name, const char *email);
|
||||
extern const char *git_editor(void);
|
||||
extern const char *git_pager(void);
|
||||
extern const char *git_pager(int stdout_is_tty);
|
||||
|
||||
struct checkout {
|
||||
const char *base_dir;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Copyright (C) 2009 Andrzej K. Haczewski <ahaczewski@gmail.com>
|
||||
*
|
||||
* DISCLAMER: The implementation is Git-specific, it is subset of original
|
||||
* DISCLAIMER: The implementation is Git-specific, it is subset of original
|
||||
* Pthreads API, without lots of other features that Git doesn't use.
|
||||
* Git also makes sure that the passed arguments are valid, so there's
|
||||
* no need for double-checking.
|
||||
|
||||
@@ -504,7 +504,7 @@ struct child_process *git_connect(int fd[2], const char *url_orig,
|
||||
|
||||
/*
|
||||
* Don't do destructive transforms with git:// as that
|
||||
* protocol code does '[]' dewrapping of its own.
|
||||
* protocol code does '[]' unwrapping of its own.
|
||||
*/
|
||||
if (host[0] == '[') {
|
||||
end = strchr(host + 1, ']');
|
||||
|
||||
@@ -967,9 +967,8 @@ class P4Sync(Command):
|
||||
elif file["type"] == "symlink":
|
||||
mode = "120000"
|
||||
# p4 print on a symlink contains "target\n", so strip it off
|
||||
last = contents.pop()
|
||||
last = last[:-1]
|
||||
contents.append(last)
|
||||
data = ''.join(contents)
|
||||
contents = [data[:-1]]
|
||||
|
||||
if self.isWindows and file["type"].endswith("text"):
|
||||
mangled = []
|
||||
|
||||
@@ -344,7 +344,7 @@ sub parsekeyvaluepair
|
||||
|
||||
Key and value strings may be enclosed in quotes, in which case
|
||||
whitespace inside the quotes is preserved. Additionally, an equal
|
||||
sign may be included in the key by preceeding it with a backslash.
|
||||
sign may be included in the key by preceding it with a backslash.
|
||||
For example:
|
||||
|
||||
"key1 "=value1
|
||||
|
||||
@@ -241,7 +241,7 @@ struct filter_params {
|
||||
const char *cmd;
|
||||
};
|
||||
|
||||
static int filter_buffer(int fd, void *data)
|
||||
static int filter_buffer(int in, int out, void *data)
|
||||
{
|
||||
/*
|
||||
* Spawn cmd and feed the buffer contents through its stdin.
|
||||
@@ -255,7 +255,7 @@ static int filter_buffer(int fd, void *data)
|
||||
child_process.argv = argv;
|
||||
child_process.use_shell = 1;
|
||||
child_process.in = -1;
|
||||
child_process.out = fd;
|
||||
child_process.out = out;
|
||||
|
||||
if (start_command(&child_process))
|
||||
return error("cannot fork to run external filter %s", params->cmd);
|
||||
@@ -292,6 +292,7 @@ static int apply_filter(const char *path, const char *src, size_t len,
|
||||
memset(&async, 0, sizeof(async));
|
||||
async.proc = filter_buffer;
|
||||
async.data = ¶ms;
|
||||
async.out = -1;
|
||||
params.src = src;
|
||||
params.size = len;
|
||||
params.cmd = cmd;
|
||||
|
||||
2
daemon.c
2
daemon.c
@@ -407,7 +407,7 @@ static void parse_host_and_port(char *hostport, char **host,
|
||||
|
||||
end = strchr(hostport, ']');
|
||||
if (!end)
|
||||
die("Invalid reqeuest ('[' without ']')");
|
||||
die("Invalid request ('[' without ']')");
|
||||
*end = '\0';
|
||||
*host = hostport + 1;
|
||||
if (!end[1])
|
||||
|
||||
27
diff.c
27
diff.c
@@ -2893,6 +2893,8 @@ int diff_opt_parse(struct diff_options *options, const char **av, int ac)
|
||||
;
|
||||
else if (!prefixcmp(arg, "--output=")) {
|
||||
options->file = fopen(arg + strlen("--output="), "w");
|
||||
if (!options->file)
|
||||
die_errno("Could not open '%s'", arg + strlen("--output="));
|
||||
options->close_file = 1;
|
||||
} else
|
||||
return 0;
|
||||
@@ -3520,6 +3522,29 @@ void diff_flush(struct diff_options *options)
|
||||
separator++;
|
||||
}
|
||||
|
||||
if (output_format & DIFF_FORMAT_NO_OUTPUT &&
|
||||
DIFF_OPT_TST(options, EXIT_WITH_STATUS) &&
|
||||
DIFF_OPT_TST(options, DIFF_FROM_CONTENTS)) {
|
||||
/*
|
||||
* run diff_flush_patch for the exit status. setting
|
||||
* options->file to /dev/null should be safe, becaue we
|
||||
* aren't supposed to produce any output anyway.
|
||||
*/
|
||||
if (options->close_file)
|
||||
fclose(options->file);
|
||||
options->file = fopen("/dev/null", "w");
|
||||
if (!options->file)
|
||||
die_errno("Could not open /dev/null");
|
||||
options->close_file = 1;
|
||||
for (i = 0; i < q->nr; i++) {
|
||||
struct diff_filepair *p = q->queue[i];
|
||||
if (check_pair_status(p))
|
||||
diff_flush_patch(p, options);
|
||||
if (options->found_changes)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (output_format & DIFF_FORMAT_PATCH) {
|
||||
if (separator) {
|
||||
putc(options->line_termination, options->file);
|
||||
@@ -3642,7 +3667,7 @@ static void diffcore_skip_stat_unmatch(struct diff_options *diffopt)
|
||||
struct diff_filepair *p = q->queue[i];
|
||||
|
||||
/*
|
||||
* 1. Entries that come from stat info dirtyness
|
||||
* 1. Entries that come from stat info dirtiness
|
||||
* always have both sides (iow, not create/delete),
|
||||
* one side of the object name is unknown, with
|
||||
* the same mode and size. Keep the ones that
|
||||
|
||||
2
dir.c
2
dir.c
@@ -1044,7 +1044,7 @@ int remove_path(const char *name)
|
||||
slash = dirs + (slash - name);
|
||||
do {
|
||||
*slash = '\0';
|
||||
} while (rmdir(dirs) && (slash = strrchr(dirs, '/')));
|
||||
} while (rmdir(dirs) == 0 && (slash = strrchr(dirs, '/')));
|
||||
free(dirs);
|
||||
}
|
||||
return 0;
|
||||
|
||||
174
fast-import.c
174
fast-import.c
@@ -164,12 +164,11 @@ Format of STDIN stream:
|
||||
|
||||
struct object_entry
|
||||
{
|
||||
struct pack_idx_entry idx;
|
||||
struct object_entry *next;
|
||||
uint32_t offset;
|
||||
uint32_t type : TYPE_BITS,
|
||||
pack_id : PACK_ID_BITS,
|
||||
depth : DEPTH_BITS;
|
||||
unsigned char sha1[20];
|
||||
};
|
||||
|
||||
struct object_entry_pool
|
||||
@@ -192,7 +191,7 @@ struct mark_set
|
||||
struct last_object
|
||||
{
|
||||
struct strbuf data;
|
||||
uint32_t offset;
|
||||
off_t offset;
|
||||
unsigned int depth;
|
||||
unsigned no_swap : 1;
|
||||
};
|
||||
@@ -280,7 +279,7 @@ struct recent_command
|
||||
|
||||
/* Configured limits on output */
|
||||
static unsigned long max_depth = 10;
|
||||
static off_t max_packsize = (1LL << 32) - 1;
|
||||
static off_t max_packsize;
|
||||
static uintmax_t big_file_threshold = 512 * 1024 * 1024;
|
||||
static int force_update;
|
||||
static int pack_compression_level = Z_DEFAULT_COMPRESSION;
|
||||
@@ -313,9 +312,10 @@ static struct atom_str **atom_table;
|
||||
|
||||
/* The .pack file being generated */
|
||||
static unsigned int pack_id;
|
||||
static struct sha1file *pack_file;
|
||||
static struct packed_git *pack_data;
|
||||
static struct packed_git **all_packs;
|
||||
static unsigned long pack_size;
|
||||
static off_t pack_size;
|
||||
|
||||
/* Table of objects we've written. */
|
||||
static unsigned int object_entry_alloc = 5000;
|
||||
@@ -521,7 +521,7 @@ static struct object_entry *new_object(unsigned char *sha1)
|
||||
alloc_objects(object_entry_alloc);
|
||||
|
||||
e = blocks->next_free++;
|
||||
hashcpy(e->sha1, sha1);
|
||||
hashcpy(e->idx.sha1, sha1);
|
||||
return e;
|
||||
}
|
||||
|
||||
@@ -530,7 +530,7 @@ static struct object_entry *find_object(unsigned char *sha1)
|
||||
unsigned int h = sha1[0] << 8 | sha1[1];
|
||||
struct object_entry *e;
|
||||
for (e = object_table[h]; e; e = e->next)
|
||||
if (!hashcmp(sha1, e->sha1))
|
||||
if (!hashcmp(sha1, e->idx.sha1))
|
||||
return e;
|
||||
return NULL;
|
||||
}
|
||||
@@ -542,7 +542,7 @@ static struct object_entry *insert_object(unsigned char *sha1)
|
||||
struct object_entry *p = NULL;
|
||||
|
||||
while (e) {
|
||||
if (!hashcmp(sha1, e->sha1))
|
||||
if (!hashcmp(sha1, e->idx.sha1))
|
||||
return e;
|
||||
p = e;
|
||||
e = e->next;
|
||||
@@ -550,7 +550,7 @@ static struct object_entry *insert_object(unsigned char *sha1)
|
||||
|
||||
e = new_object(sha1);
|
||||
e->next = NULL;
|
||||
e->offset = 0;
|
||||
e->idx.offset = 0;
|
||||
if (p)
|
||||
p->next = e;
|
||||
else
|
||||
@@ -839,11 +839,12 @@ static void start_packfile(void)
|
||||
p = xcalloc(1, sizeof(*p) + strlen(tmpfile) + 2);
|
||||
strcpy(p->pack_name, tmpfile);
|
||||
p->pack_fd = pack_fd;
|
||||
pack_file = sha1fd(pack_fd, p->pack_name);
|
||||
|
||||
hdr.hdr_signature = htonl(PACK_SIGNATURE);
|
||||
hdr.hdr_version = htonl(2);
|
||||
hdr.hdr_entries = 0;
|
||||
write_or_die(p->pack_fd, &hdr, sizeof(hdr));
|
||||
sha1write(pack_file, &hdr, sizeof(hdr));
|
||||
|
||||
pack_data = p;
|
||||
pack_size = sizeof(hdr);
|
||||
@@ -853,67 +854,30 @@ static void start_packfile(void)
|
||||
all_packs[pack_id] = p;
|
||||
}
|
||||
|
||||
static int oecmp (const void *a_, const void *b_)
|
||||
static const char *create_index(void)
|
||||
{
|
||||
struct object_entry *a = *((struct object_entry**)a_);
|
||||
struct object_entry *b = *((struct object_entry**)b_);
|
||||
return hashcmp(a->sha1, b->sha1);
|
||||
}
|
||||
|
||||
static char *create_index(void)
|
||||
{
|
||||
static char tmpfile[PATH_MAX];
|
||||
git_SHA_CTX ctx;
|
||||
struct sha1file *f;
|
||||
struct object_entry **idx, **c, **last, *e;
|
||||
const char *tmpfile;
|
||||
struct pack_idx_entry **idx, **c, **last;
|
||||
struct object_entry *e;
|
||||
struct object_entry_pool *o;
|
||||
uint32_t array[256];
|
||||
int i, idx_fd;
|
||||
|
||||
/* Build the sorted table of object IDs. */
|
||||
idx = xmalloc(object_count * sizeof(struct object_entry*));
|
||||
/* Build the table of object IDs. */
|
||||
idx = xmalloc(object_count * sizeof(*idx));
|
||||
c = idx;
|
||||
for (o = blocks; o; o = o->next_pool)
|
||||
for (e = o->next_free; e-- != o->entries;)
|
||||
if (pack_id == e->pack_id)
|
||||
*c++ = e;
|
||||
*c++ = &e->idx;
|
||||
last = idx + object_count;
|
||||
if (c != last)
|
||||
die("internal consistency error creating the index");
|
||||
qsort(idx, object_count, sizeof(struct object_entry*), oecmp);
|
||||
|
||||
/* Generate the fan-out array. */
|
||||
c = idx;
|
||||
for (i = 0; i < 256; i++) {
|
||||
struct object_entry **next = c;
|
||||
while (next < last) {
|
||||
if ((*next)->sha1[0] != i)
|
||||
break;
|
||||
next++;
|
||||
}
|
||||
array[i] = htonl(next - idx);
|
||||
c = next;
|
||||
}
|
||||
|
||||
idx_fd = odb_mkstemp(tmpfile, sizeof(tmpfile),
|
||||
"pack/tmp_idx_XXXXXX");
|
||||
f = sha1fd(idx_fd, tmpfile);
|
||||
sha1write(f, array, 256 * sizeof(int));
|
||||
git_SHA1_Init(&ctx);
|
||||
for (c = idx; c != last; c++) {
|
||||
uint32_t offset = htonl((*c)->offset);
|
||||
sha1write(f, &offset, 4);
|
||||
sha1write(f, (*c)->sha1, sizeof((*c)->sha1));
|
||||
git_SHA1_Update(&ctx, (*c)->sha1, 20);
|
||||
}
|
||||
sha1write(f, pack_data->sha1, sizeof(pack_data->sha1));
|
||||
sha1close(f, NULL, CSUM_FSYNC);
|
||||
tmpfile = write_idx_file(NULL, idx, object_count, pack_data->sha1);
|
||||
free(idx);
|
||||
git_SHA1_Final(pack_data->sha1, &ctx);
|
||||
return tmpfile;
|
||||
}
|
||||
|
||||
static char *keep_pack(char *curr_index_name)
|
||||
static char *keep_pack(const char *curr_index_name)
|
||||
{
|
||||
static char name[PATH_MAX];
|
||||
static const char *keep_msg = "fast-import";
|
||||
@@ -935,6 +899,7 @@ static char *keep_pack(char *curr_index_name)
|
||||
get_object_directory(), sha1_to_hex(pack_data->sha1));
|
||||
if (move_temp_to_file(curr_index_name, name))
|
||||
die("cannot store index file");
|
||||
free((void *)curr_index_name);
|
||||
return name;
|
||||
}
|
||||
|
||||
@@ -957,15 +922,17 @@ static void end_packfile(void)
|
||||
|
||||
clear_delta_base_cache();
|
||||
if (object_count) {
|
||||
unsigned char cur_pack_sha1[20];
|
||||
char *idx_name;
|
||||
int i;
|
||||
struct branch *b;
|
||||
struct tag *t;
|
||||
|
||||
close_pack_windows(pack_data);
|
||||
sha1close(pack_file, cur_pack_sha1, 0);
|
||||
fixup_pack_header_footer(pack_data->pack_fd, pack_data->sha1,
|
||||
pack_data->pack_name, object_count,
|
||||
NULL, 0);
|
||||
cur_pack_sha1, pack_size);
|
||||
close(pack_data->pack_fd);
|
||||
idx_name = keep_pack(create_index());
|
||||
|
||||
@@ -1063,25 +1030,21 @@ static int store_object(
|
||||
e = insert_object(sha1);
|
||||
if (mark)
|
||||
insert_mark(mark, e);
|
||||
if (e->offset) {
|
||||
if (e->idx.offset) {
|
||||
duplicate_count_by_type[type]++;
|
||||
return 1;
|
||||
} else if (find_sha1_pack(sha1, packed_git)) {
|
||||
e->type = type;
|
||||
e->pack_id = MAX_PACK_ID;
|
||||
e->offset = 1; /* just not zero! */
|
||||
e->idx.offset = 1; /* just not zero! */
|
||||
duplicate_count_by_type[type]++;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (last && last->data.buf && last->depth < max_depth) {
|
||||
if (last && last->data.buf && last->depth < max_depth && dat->len > 20) {
|
||||
delta = diff_delta(last->data.buf, last->data.len,
|
||||
dat->buf, dat->len,
|
||||
&deltalen, 0);
|
||||
if (delta && deltalen >= dat->len) {
|
||||
free(delta);
|
||||
delta = NULL;
|
||||
}
|
||||
&deltalen, dat->len - 20);
|
||||
} else
|
||||
delta = NULL;
|
||||
|
||||
@@ -1101,7 +1064,7 @@ static int store_object(
|
||||
deflateEnd(&s);
|
||||
|
||||
/* Determine if we should auto-checkpoint. */
|
||||
if ((pack_size + 60 + s.total_out) > max_packsize
|
||||
if ((max_packsize && (pack_size + 60 + s.total_out) > max_packsize)
|
||||
|| (pack_size + 60 + s.total_out) < pack_size) {
|
||||
|
||||
/* This new object needs to *not* have the current pack_id. */
|
||||
@@ -1127,36 +1090,40 @@ static int store_object(
|
||||
|
||||
e->type = type;
|
||||
e->pack_id = pack_id;
|
||||
e->offset = pack_size;
|
||||
e->idx.offset = pack_size;
|
||||
object_count++;
|
||||
object_count_by_type[type]++;
|
||||
|
||||
crc32_begin(pack_file);
|
||||
|
||||
if (delta) {
|
||||
unsigned long ofs = e->offset - last->offset;
|
||||
off_t ofs = e->idx.offset - last->offset;
|
||||
unsigned pos = sizeof(hdr) - 1;
|
||||
|
||||
delta_count_by_type[type]++;
|
||||
e->depth = last->depth + 1;
|
||||
|
||||
hdrlen = encode_header(OBJ_OFS_DELTA, deltalen, hdr);
|
||||
write_or_die(pack_data->pack_fd, hdr, hdrlen);
|
||||
sha1write(pack_file, hdr, hdrlen);
|
||||
pack_size += hdrlen;
|
||||
|
||||
hdr[pos] = ofs & 127;
|
||||
while (ofs >>= 7)
|
||||
hdr[--pos] = 128 | (--ofs & 127);
|
||||
write_or_die(pack_data->pack_fd, hdr + pos, sizeof(hdr) - pos);
|
||||
sha1write(pack_file, hdr + pos, sizeof(hdr) - pos);
|
||||
pack_size += sizeof(hdr) - pos;
|
||||
} else {
|
||||
e->depth = 0;
|
||||
hdrlen = encode_header(type, dat->len, hdr);
|
||||
write_or_die(pack_data->pack_fd, hdr, hdrlen);
|
||||
sha1write(pack_file, hdr, hdrlen);
|
||||
pack_size += hdrlen;
|
||||
}
|
||||
|
||||
write_or_die(pack_data->pack_fd, out, s.total_out);
|
||||
sha1write(pack_file, out, s.total_out);
|
||||
pack_size += s.total_out;
|
||||
|
||||
e->idx.crc32 = crc32_end(pack_file);
|
||||
|
||||
free(out);
|
||||
free(delta);
|
||||
if (last) {
|
||||
@@ -1165,18 +1132,23 @@ static int store_object(
|
||||
} else {
|
||||
strbuf_swap(&last->data, dat);
|
||||
}
|
||||
last->offset = e->offset;
|
||||
last->offset = e->idx.offset;
|
||||
last->depth = e->depth;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void truncate_pack(off_t to)
|
||||
static void truncate_pack(off_t to, git_SHA_CTX *ctx)
|
||||
{
|
||||
if (ftruncate(pack_data->pack_fd, to)
|
||||
|| lseek(pack_data->pack_fd, to, SEEK_SET) != to)
|
||||
die_errno("cannot truncate pack to skip duplicate");
|
||||
pack_size = to;
|
||||
|
||||
/* yes this is a layering violation */
|
||||
pack_file->total = to;
|
||||
pack_file->offset = 0;
|
||||
pack_file->ctx = *ctx;
|
||||
}
|
||||
|
||||
static void stream_blob(uintmax_t len, unsigned char *sha1out, uintmax_t mark)
|
||||
@@ -1189,16 +1161,21 @@ static void stream_blob(uintmax_t len, unsigned char *sha1out, uintmax_t mark)
|
||||
unsigned long hdrlen;
|
||||
off_t offset;
|
||||
git_SHA_CTX c;
|
||||
git_SHA_CTX pack_file_ctx;
|
||||
z_stream s;
|
||||
int status = Z_OK;
|
||||
|
||||
/* Determine if we should auto-checkpoint. */
|
||||
if ((pack_size + 60 + len) > max_packsize
|
||||
if ((max_packsize && (pack_size + 60 + len) > max_packsize)
|
||||
|| (pack_size + 60 + len) < pack_size)
|
||||
cycle_packfile();
|
||||
|
||||
offset = pack_size;
|
||||
|
||||
/* preserve the pack_file SHA1 ctx in case we have to truncate later */
|
||||
sha1flush(pack_file);
|
||||
pack_file_ctx = pack_file->ctx;
|
||||
|
||||
hdrlen = snprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1;
|
||||
if (out_sz <= hdrlen)
|
||||
die("impossibly large object header");
|
||||
@@ -1206,6 +1183,8 @@ static void stream_blob(uintmax_t len, unsigned char *sha1out, uintmax_t mark)
|
||||
git_SHA1_Init(&c);
|
||||
git_SHA1_Update(&c, out_buf, hdrlen);
|
||||
|
||||
crc32_begin(pack_file);
|
||||
|
||||
memset(&s, 0, sizeof(s));
|
||||
deflateInit(&s, pack_compression_level);
|
||||
|
||||
@@ -1233,7 +1212,7 @@ static void stream_blob(uintmax_t len, unsigned char *sha1out, uintmax_t mark)
|
||||
|
||||
if (!s.avail_out || status == Z_STREAM_END) {
|
||||
size_t n = s.next_out - out_buf;
|
||||
write_or_die(pack_data->pack_fd, out_buf, n);
|
||||
sha1write(pack_file, out_buf, n);
|
||||
pack_size += n;
|
||||
s.next_out = out_buf;
|
||||
s.avail_out = out_sz;
|
||||
@@ -1259,22 +1238,23 @@ static void stream_blob(uintmax_t len, unsigned char *sha1out, uintmax_t mark)
|
||||
if (mark)
|
||||
insert_mark(mark, e);
|
||||
|
||||
if (e->offset) {
|
||||
if (e->idx.offset) {
|
||||
duplicate_count_by_type[OBJ_BLOB]++;
|
||||
truncate_pack(offset);
|
||||
truncate_pack(offset, &pack_file_ctx);
|
||||
|
||||
} else if (find_sha1_pack(sha1, packed_git)) {
|
||||
e->type = OBJ_BLOB;
|
||||
e->pack_id = MAX_PACK_ID;
|
||||
e->offset = 1; /* just not zero! */
|
||||
e->idx.offset = 1; /* just not zero! */
|
||||
duplicate_count_by_type[OBJ_BLOB]++;
|
||||
truncate_pack(offset);
|
||||
truncate_pack(offset, &pack_file_ctx);
|
||||
|
||||
} else {
|
||||
e->depth = 0;
|
||||
e->type = OBJ_BLOB;
|
||||
e->pack_id = pack_id;
|
||||
e->offset = offset;
|
||||
e->idx.offset = offset;
|
||||
e->idx.crc32 = crc32_end(pack_file);
|
||||
object_count++;
|
||||
object_count_by_type[OBJ_BLOB]++;
|
||||
}
|
||||
@@ -1317,6 +1297,7 @@ static void *gfi_unpack_entry(
|
||||
* the newly written data.
|
||||
*/
|
||||
close_pack_windows(p);
|
||||
sha1flush(pack_file);
|
||||
|
||||
/* We have to offer 20 bytes additional on the end of
|
||||
* the packfile as the core unpacker code assumes the
|
||||
@@ -1326,7 +1307,7 @@ static void *gfi_unpack_entry(
|
||||
*/
|
||||
p->pack_size = pack_size + 20;
|
||||
}
|
||||
return unpack_entry(p, oe->offset, &type, sizep);
|
||||
return unpack_entry(p, oe->idx.offset, &type, sizep);
|
||||
}
|
||||
|
||||
static const char *get_mode(const char *str, uint16_t *modep)
|
||||
@@ -1457,7 +1438,7 @@ static void store_tree(struct tree_entry *root)
|
||||
if (S_ISDIR(root->versions[0].mode) && le && le->pack_id == pack_id) {
|
||||
mktree(t, 0, &old_tree);
|
||||
lo.data = old_tree;
|
||||
lo.offset = le->offset;
|
||||
lo.offset = le->idx.offset;
|
||||
lo.depth = t->delta_depth;
|
||||
}
|
||||
|
||||
@@ -1715,7 +1696,7 @@ static void dump_marks_helper(FILE *f,
|
||||
for (k = 0; k < 1024; k++) {
|
||||
if (m->data.marked[k])
|
||||
fprintf(f, ":%" PRIuMAX " %s\n", base + k,
|
||||
sha1_to_hex(m->data.marked[k]->sha1));
|
||||
sha1_to_hex(m->data.marked[k]->idx.sha1));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1798,7 +1779,7 @@ static void read_marks(void)
|
||||
e = insert_object(sha1);
|
||||
e->type = type;
|
||||
e->pack_id = MAX_PACK_ID;
|
||||
e->offset = 1; /* just not zero! */
|
||||
e->idx.offset = 1; /* just not zero! */
|
||||
}
|
||||
insert_mark(mark, e);
|
||||
}
|
||||
@@ -2183,7 +2164,7 @@ static void file_change_m(struct branch *b)
|
||||
if (*p == ':') {
|
||||
char *x;
|
||||
oe = find_mark(strtoumax(p + 1, &x, 10));
|
||||
hashcpy(sha1, oe->sha1);
|
||||
hashcpy(sha1, oe->idx.sha1);
|
||||
p = x;
|
||||
} else if (!prefixcmp(p, "inline")) {
|
||||
inline_data = 1;
|
||||
@@ -2316,7 +2297,7 @@ static void note_change_n(struct branch *b, unsigned char old_fanout)
|
||||
if (*p == ':') {
|
||||
char *x;
|
||||
oe = find_mark(strtoumax(p + 1, &x, 10));
|
||||
hashcpy(sha1, oe->sha1);
|
||||
hashcpy(sha1, oe->idx.sha1);
|
||||
p = x;
|
||||
} else if (!prefixcmp(p, "inline")) {
|
||||
inline_data = 1;
|
||||
@@ -2339,7 +2320,7 @@ static void note_change_n(struct branch *b, unsigned char old_fanout)
|
||||
struct object_entry *commit_oe = find_mark(commit_mark);
|
||||
if (commit_oe->type != OBJ_COMMIT)
|
||||
die("Mark :%" PRIuMAX " not a commit", commit_mark);
|
||||
hashcpy(commit_sha1, commit_oe->sha1);
|
||||
hashcpy(commit_sha1, commit_oe->idx.sha1);
|
||||
} else if (!get_sha1(p, commit_sha1)) {
|
||||
unsigned long size;
|
||||
char *buf = read_object_with_reference(commit_sha1,
|
||||
@@ -2446,7 +2427,7 @@ static int parse_from(struct branch *b)
|
||||
struct object_entry *oe = find_mark(idnum);
|
||||
if (oe->type != OBJ_COMMIT)
|
||||
die("Mark :%" PRIuMAX " not a commit", idnum);
|
||||
hashcpy(b->sha1, oe->sha1);
|
||||
hashcpy(b->sha1, oe->idx.sha1);
|
||||
if (oe->pack_id != MAX_PACK_ID) {
|
||||
unsigned long size;
|
||||
char *buf = gfi_unpack_entry(oe, &size);
|
||||
@@ -2481,7 +2462,7 @@ static struct hash_list *parse_merge(unsigned int *count)
|
||||
struct object_entry *oe = find_mark(idnum);
|
||||
if (oe->type != OBJ_COMMIT)
|
||||
die("Mark :%" PRIuMAX " not a commit", idnum);
|
||||
hashcpy(n->sha1, oe->sha1);
|
||||
hashcpy(n->sha1, oe->idx.sha1);
|
||||
} else if (!get_sha1(from, n->sha1)) {
|
||||
unsigned long size;
|
||||
char *buf = read_object_with_reference(n->sha1,
|
||||
@@ -2639,7 +2620,7 @@ static void parse_new_tag(void)
|
||||
from_mark = strtoumax(from + 1, NULL, 10);
|
||||
oe = find_mark(from_mark);
|
||||
type = oe->type;
|
||||
hashcpy(sha1, oe->sha1);
|
||||
hashcpy(sha1, oe->idx.sha1);
|
||||
} else if (!get_sha1(from, sha1)) {
|
||||
unsigned long size;
|
||||
char *buf;
|
||||
@@ -2891,6 +2872,17 @@ static int git_pack_config(const char *k, const char *v, void *cb)
|
||||
pack_compression_seen = 1;
|
||||
return 0;
|
||||
}
|
||||
if (!strcmp(k, "pack.indexversion")) {
|
||||
pack_idx_default_version = git_config_int(k, v);
|
||||
if (pack_idx_default_version > 2)
|
||||
die("bad pack.indexversion=%"PRIu32,
|
||||
pack_idx_default_version);
|
||||
return 0;
|
||||
}
|
||||
if (!strcmp(k, "pack.packsizelimit")) {
|
||||
max_packsize = git_config_ulong(k, v);
|
||||
return 0;
|
||||
}
|
||||
if (!strcmp(k, "core.bigfilethreshold")) {
|
||||
long n = git_config_int(k, v);
|
||||
big_file_threshold = 0 < n ? n : 0;
|
||||
|
||||
@@ -663,10 +663,7 @@ do
|
||||
[eE]*) git_editor "$dotest/final-commit"
|
||||
action=again ;;
|
||||
[vV]*) action=again
|
||||
: ${GIT_PAGER=$(git var GIT_PAGER)}
|
||||
: ${LESS=-FRSX}
|
||||
export LESS
|
||||
$GIT_PAGER "$dotest/patch" ;;
|
||||
git_pager "$dotest/patch" ;;
|
||||
*) action=again ;;
|
||||
esac
|
||||
done
|
||||
@@ -776,6 +773,5 @@ do
|
||||
go_next
|
||||
done
|
||||
|
||||
git gc --auto
|
||||
|
||||
rm -fr "$dotest"
|
||||
git gc --auto
|
||||
|
||||
@@ -107,6 +107,19 @@ git_editor() {
|
||||
eval "$GIT_EDITOR" '"$@"'
|
||||
}
|
||||
|
||||
git_pager() {
|
||||
if test -t 1
|
||||
then
|
||||
GIT_PAGER=$(git var GIT_PAGER)
|
||||
else
|
||||
GIT_PAGER=cat
|
||||
fi
|
||||
: ${LESS=-FRSX}
|
||||
export LESS
|
||||
|
||||
eval "$GIT_PAGER" '"$@"'
|
||||
}
|
||||
|
||||
sane_grep () {
|
||||
GREP_OPTIONS= LC_ALL=C grep "$@"
|
||||
}
|
||||
@@ -128,7 +141,7 @@ cd_to_toplevel () {
|
||||
}
|
||||
|
||||
require_work_tree () {
|
||||
test $(git rev-parse --is-inside-work-tree) = true ||
|
||||
test "$(git rev-parse --is-inside-work-tree 2>/dev/null)" = true ||
|
||||
die "fatal: $0 cannot be used without a working tree."
|
||||
}
|
||||
|
||||
|
||||
@@ -151,6 +151,7 @@ save_stash () {
|
||||
;;
|
||||
-*)
|
||||
echo "error: unknown option for 'stash save': $1"
|
||||
echo " To provide a message, use git stash save -- '$1'"
|
||||
usage
|
||||
;;
|
||||
*)
|
||||
@@ -221,6 +222,7 @@ show_stash () {
|
||||
}
|
||||
|
||||
apply_stash () {
|
||||
applied_stash=
|
||||
unstash_index=
|
||||
|
||||
while test $# != 0
|
||||
@@ -242,6 +244,9 @@ apply_stash () {
|
||||
if test $# = 0
|
||||
then
|
||||
have_stash || die 'Nothing to apply'
|
||||
applied_stash="$ref_stash@{0}"
|
||||
else
|
||||
applied_stash="$*"
|
||||
fi
|
||||
|
||||
# stash records the work tree, and is a merge between the
|
||||
@@ -415,8 +420,7 @@ pop)
|
||||
shift
|
||||
if apply_stash "$@"
|
||||
then
|
||||
test -z "$unstash_index" || shift
|
||||
drop_stash "$@"
|
||||
drop_stash "$applied_stash"
|
||||
fi
|
||||
;;
|
||||
branch)
|
||||
|
||||
@@ -5459,7 +5459,12 @@ sub git_svn_log_cmd {
|
||||
|
||||
# adapted from pager.c
|
||||
sub config_pager {
|
||||
chomp(my $pager = command_oneline(qw(var GIT_PAGER)));
|
||||
if (! -t *STDOUT) {
|
||||
$ENV{GIT_PAGER_IN_USE} = 'false';
|
||||
$pager = undef;
|
||||
return;
|
||||
}
|
||||
chomp($pager = command_oneline(qw(var GIT_PAGER)));
|
||||
if ($pager eq 'cat') {
|
||||
$pager = undef;
|
||||
}
|
||||
@@ -5467,7 +5472,7 @@ sub config_pager {
|
||||
}
|
||||
|
||||
sub run_pager {
|
||||
return unless -t *STDOUT && defined $pager;
|
||||
return unless defined $pager;
|
||||
pipe my ($rfd, $wfd) or return;
|
||||
defined(my $pid = fork) or ::fatal "Can't fork: $!";
|
||||
if (!$pid) {
|
||||
|
||||
2
git.c
2
git.c
@@ -527,7 +527,7 @@ int main(int argc, const char **argv)
|
||||
break;
|
||||
if (was_alias) {
|
||||
fprintf(stderr, "Expansion of alias '%s' failed; "
|
||||
"'%s' is not a git-command\n",
|
||||
"'%s' is not a git command\n",
|
||||
cmd, argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@@ -312,12 +312,16 @@ If you want to have one URL for both gitweb and your http://
|
||||
repositories, you can configure apache like this:
|
||||
|
||||
<VirtualHost *:80>
|
||||
ServerName git.example.org
|
||||
DocumentRoot /pub/git
|
||||
SetEnv GITWEB_CONFIG /etc/gitweb.conf
|
||||
ServerName git.example.org
|
||||
DocumentRoot /pub/git
|
||||
SetEnv GITWEB_CONFIG /etc/gitweb.conf
|
||||
|
||||
# turning on mod rewrite
|
||||
RewriteEngine on
|
||||
|
||||
# make the front page an internal rewrite to the gitweb script
|
||||
RewriteRule ^/$ /cgi-bin/gitweb.cgi
|
||||
|
||||
# make access for "dumb clients" work
|
||||
RewriteRule ^/(.*\.git/(?!/?(HEAD|info|objects|refs)).*)?$ /cgi-bin/gitweb.cgi%{REQUEST_URI} [L,PT]
|
||||
</VirtualHost>
|
||||
@@ -343,6 +347,63 @@ something like the following in your gitweb.conf (or gitweb_config.perl) file:
|
||||
$home_link = "/";
|
||||
|
||||
|
||||
Webserver configuration with multiple projects' root
|
||||
----------------------------------------------------
|
||||
|
||||
If you want to use gitweb with several project roots you can edit your apache
|
||||
virtual host and gitweb.conf configuration files like this :
|
||||
|
||||
virtual host configuration :
|
||||
|
||||
<VirtualHost *:80>
|
||||
ServerName git.example.org
|
||||
DocumentRoot /pub/git
|
||||
SetEnv GITWEB_CONFIG /etc/gitweb.conf
|
||||
|
||||
# turning on mod rewrite
|
||||
RewriteEngine on
|
||||
|
||||
# make the front page an internal rewrite to the gitweb script
|
||||
RewriteRule ^/$ /cgi-bin/gitweb.cgi [QSA,L,PT]
|
||||
|
||||
# look for a public_git folder in unix users' home
|
||||
# http://git.example.org/~<user>/
|
||||
RewriteRule ^/\~([^\/]+)(/|/gitweb.cgi)?$ /cgi-bin/gitweb.cgi [QSA,E=GITWEB_PROJECTROOT:/home/$1/public_git/,L,PT]
|
||||
|
||||
# http://git.example.org/+<user>/
|
||||
#RewriteRule ^/\+([^\/]+)(/|/gitweb.cgi)?$ /cgi-bin/gitweb.cgi [QSA,E=GITWEB_PROJECTROOT:/home/$1/public_git/,L,PT]
|
||||
|
||||
# http://git.example.org/user/<user>/
|
||||
#RewriteRule ^/user/([^\/]+)/(gitweb.cgi)?$ /cgi-bin/gitweb.cgi [QSA,E=GITWEB_PROJECTROOT:/home/$1/public_git/,L,PT]
|
||||
|
||||
# defined list of project roots
|
||||
RewriteRule ^/scm(/|/gitweb.cgi)?$ /cgi-bin/gitweb.cgi [QSA,E=GITWEB_PROJECTROOT:/pub/scm/,L,PT]
|
||||
RewriteRule ^/var(/|/gitweb.cgi)?$ /cgi-bin/gitweb.cgi [QSA,E=GITWEB_PROJECTROOT:/var/git/,L,PT]
|
||||
|
||||
# make access for "dumb clients" work
|
||||
RewriteRule ^/(.*\.git/(?!/?(HEAD|info|objects|refs)).*)?$ /cgi-bin/gitweb.cgi%{REQUEST_URI} [L,PT]
|
||||
</VirtualHost>
|
||||
|
||||
gitweb.conf configuration :
|
||||
|
||||
$projectroot = $ENV{'GITWEB_PROJECTROOT'} || "/pub/git";
|
||||
|
||||
These configurations enable two things. First, each unix user (<user>) of the
|
||||
server will be able to browse through gitweb git repositories found in
|
||||
~/public_git/ with the following url : http://git.example.org/~<user>/
|
||||
|
||||
If you do not want this feature on your server just remove the second rewrite rule.
|
||||
|
||||
If you already use mod_userdir in your virtual host or you don't want to use
|
||||
the '~' as first character just comment or remove the second rewrite rule and
|
||||
uncomment one of the following according to what you want.
|
||||
|
||||
Second, repositories found in /pub/scm/ and /var/git/ will be accesible
|
||||
through http://git.example.org/scm/ and http://git.example.org/var/.
|
||||
You can add as many project roots as you want by adding rewrite rules like the
|
||||
third and the fourth.
|
||||
|
||||
|
||||
PATH_INFO usage
|
||||
-----------------------
|
||||
If you enable PATH_INFO usage in gitweb by putting
|
||||
|
||||
@@ -454,7 +454,11 @@ sub gitweb_get_feature {
|
||||
$feature{$name}{'sub'},
|
||||
$feature{$name}{'override'},
|
||||
@{$feature{$name}{'default'}});
|
||||
if (!$override) { return @defaults; }
|
||||
# project specific override is possible only if we have project
|
||||
our $git_dir; # global variable, declared later
|
||||
if (!$override || !defined $git_dir) {
|
||||
return @defaults;
|
||||
}
|
||||
if (!defined $sub) {
|
||||
warn "feature $name is not overridable";
|
||||
return @defaults;
|
||||
@@ -550,11 +554,14 @@ sub filter_snapshot_fmts {
|
||||
}
|
||||
|
||||
our $GITWEB_CONFIG = $ENV{'GITWEB_CONFIG'} || "++GITWEB_CONFIG++";
|
||||
our $GITWEB_CONFIG_SYSTEM = $ENV{'GITWEB_CONFIG_SYSTEM'} || "++GITWEB_CONFIG_SYSTEM++";
|
||||
# die if there are errors parsing config file
|
||||
if (-e $GITWEB_CONFIG) {
|
||||
do $GITWEB_CONFIG;
|
||||
} else {
|
||||
our $GITWEB_CONFIG_SYSTEM = $ENV{'GITWEB_CONFIG_SYSTEM'} || "++GITWEB_CONFIG_SYSTEM++";
|
||||
do $GITWEB_CONFIG_SYSTEM if -e $GITWEB_CONFIG_SYSTEM;
|
||||
die $@ if $@;
|
||||
} elsif (-e $GITWEB_CONFIG_SYSTEM) {
|
||||
do $GITWEB_CONFIG_SYSTEM;
|
||||
die $@ if $@;
|
||||
}
|
||||
|
||||
# Get loadavg of system, to compare against $maxload.
|
||||
@@ -2202,6 +2209,9 @@ sub config_to_multi {
|
||||
sub git_get_project_config {
|
||||
my ($key, $type) = @_;
|
||||
|
||||
# do we have project
|
||||
return unless (defined $project && defined $git_dir);
|
||||
|
||||
# key sanity check
|
||||
return unless ($key);
|
||||
$key =~ s/^gitweb\.//;
|
||||
|
||||
2
help.c
2
help.c
@@ -350,7 +350,7 @@ const char *help_unknown_cmd(const char *cmd)
|
||||
return assumed;
|
||||
}
|
||||
|
||||
fprintf(stderr, "git: '%s' is not a git-command. See 'git --help'.\n", cmd);
|
||||
fprintf(stderr, "git: '%s' is not a git command. See 'git --help'.\n", cmd);
|
||||
|
||||
if (SIMILAR_ENOUGH(best_similarity)) {
|
||||
fprintf(stderr, "\nDid you mean %s?\n",
|
||||
|
||||
40
imap-send.c
40
imap-send.c
@@ -91,7 +91,6 @@ struct msg_data {
|
||||
char *data;
|
||||
int len;
|
||||
unsigned char flags;
|
||||
unsigned int crlf:1;
|
||||
};
|
||||
|
||||
static const char imap_send_usage[] = "git imap-send < <mbox>";
|
||||
@@ -1162,6 +1161,44 @@ static int imap_make_flags(int flags, char *buf)
|
||||
return d;
|
||||
}
|
||||
|
||||
static void lf_to_crlf(struct msg_data *msg)
|
||||
{
|
||||
char *new;
|
||||
int i, j, lfnum = 0;
|
||||
|
||||
if (msg->data[0] == '\n')
|
||||
lfnum++;
|
||||
for (i = 1; i < msg->len; i++) {
|
||||
if (msg->data[i - 1] != '\r' && msg->data[i] == '\n')
|
||||
lfnum++;
|
||||
}
|
||||
|
||||
new = xmalloc(msg->len + lfnum);
|
||||
if (msg->data[0] == '\n') {
|
||||
new[0] = '\r';
|
||||
new[1] = '\n';
|
||||
i = 1;
|
||||
j = 2;
|
||||
} else {
|
||||
new[0] = msg->data[0];
|
||||
i = 1;
|
||||
j = 1;
|
||||
}
|
||||
for ( ; i < msg->len; i++) {
|
||||
if (msg->data[i] != '\n') {
|
||||
new[j++] = msg->data[i];
|
||||
continue;
|
||||
}
|
||||
if (msg->data[i - 1] != '\r')
|
||||
new[j++] = '\r';
|
||||
/* otherwise it already had CR before */
|
||||
new[j++] = '\n';
|
||||
}
|
||||
msg->len += lfnum;
|
||||
free(msg->data);
|
||||
msg->data = new;
|
||||
}
|
||||
|
||||
static int imap_store_msg(struct store *gctx, struct msg_data *data)
|
||||
{
|
||||
struct imap_store *ctx = (struct imap_store *)gctx;
|
||||
@@ -1171,6 +1208,7 @@ static int imap_store_msg(struct store *gctx, struct msg_data *data)
|
||||
int ret, d;
|
||||
char flagstr[128];
|
||||
|
||||
lf_to_crlf(data);
|
||||
memset(&cb, 0, sizeof(cb));
|
||||
|
||||
cb.dlen = data->len;
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
#define LEVENSHTEIN_H
|
||||
|
||||
int levenshtein(const char *string1, const char *string2,
|
||||
int swap_penalty, int substition_penalty,
|
||||
int swap_penalty, int substitution_penalty,
|
||||
int insertion_penalty, int deletion_penalty);
|
||||
|
||||
#endif
|
||||
|
||||
6
pager.c
6
pager.c
@@ -48,11 +48,11 @@ static void wait_for_pager_signal(int signo)
|
||||
raise(signo);
|
||||
}
|
||||
|
||||
const char *git_pager(void)
|
||||
const char *git_pager(int stdout_is_tty)
|
||||
{
|
||||
const char *pager;
|
||||
|
||||
if (!isatty(1))
|
||||
if (!stdout_is_tty)
|
||||
return NULL;
|
||||
|
||||
pager = getenv("GIT_PAGER");
|
||||
@@ -73,7 +73,7 @@ const char *git_pager(void)
|
||||
|
||||
void setup_pager(void)
|
||||
{
|
||||
const char *pager = git_pager();
|
||||
const char *pager = git_pager(isatty(1));
|
||||
|
||||
if (!pager)
|
||||
return;
|
||||
|
||||
2
path.c
2
path.c
@@ -610,7 +610,7 @@ int daemon_avoid_alias(const char *p)
|
||||
/*
|
||||
* This resurrects the belts and suspenders paranoia check by HPA
|
||||
* done in <435560F7.4080006@zytor.com> thread, now enter_repo()
|
||||
* does not do getcwd() based path canonicalizations.
|
||||
* does not do getcwd() based path canonicalization.
|
||||
*
|
||||
* sl becomes true immediately after seeing '/' and continues to
|
||||
* be true as long as dots continue after that without intervening
|
||||
|
||||
@@ -204,14 +204,14 @@ sub repository {
|
||||
$dir = $opts{Directory};
|
||||
|
||||
unless (-d "$dir/refs" and -d "$dir/objects" and -e "$dir/HEAD") {
|
||||
# Mimick git-rev-parse --git-dir error message:
|
||||
# Mimic git-rev-parse --git-dir error message:
|
||||
throw Error::Simple("fatal: Not a git repository: $dir");
|
||||
}
|
||||
my $search = Git->repository(Repository => $dir);
|
||||
try {
|
||||
$search->command('symbolic-ref', 'HEAD');
|
||||
} catch Git::Error::Command with {
|
||||
# Mimick git-rev-parse --git-dir error message:
|
||||
# Mimic git-rev-parse --git-dir error message:
|
||||
throw Error::Simple("fatal: Not a git repository: $dir");
|
||||
}
|
||||
|
||||
|
||||
2
refs.c
2
refs.c
@@ -706,7 +706,7 @@ int for_each_glob_ref_in(each_ref_fn fn, const char *pattern,
|
||||
|
||||
has_glob_specials = strpbrk(pattern, "?*[");
|
||||
if (!has_glob_specials) {
|
||||
/* Append impiled '/' '*' if not present. */
|
||||
/* Append implied '/' '*' if not present. */
|
||||
if (real_pattern.buf[real_pattern.len - 1] != '/')
|
||||
strbuf_addch(&real_pattern, '/');
|
||||
/* No need to check for '*', there is none. */
|
||||
|
||||
@@ -184,13 +184,13 @@ static struct discovery* discover_refs(const char *service)
|
||||
return last;
|
||||
}
|
||||
|
||||
static int write_discovery(int fd, void *data)
|
||||
static int write_discovery(int in, int out, void *data)
|
||||
{
|
||||
struct discovery *heads = data;
|
||||
int err = 0;
|
||||
if (write_in_full(fd, heads->buf, heads->len) != heads->len)
|
||||
if (write_in_full(out, heads->buf, heads->len) != heads->len)
|
||||
err = 1;
|
||||
close(fd);
|
||||
close(out);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -202,6 +202,7 @@ static struct ref *parse_git_refs(struct discovery *heads)
|
||||
memset(&async, 0, sizeof(async));
|
||||
async.proc = write_discovery;
|
||||
async.data = heads;
|
||||
async.out = -1;
|
||||
|
||||
if (start_async(&async))
|
||||
die("cannot start thread to parse advertised refs");
|
||||
|
||||
9
rerere.c
9
rerere.c
@@ -364,7 +364,7 @@ static int find_conflict(struct string_list *conflict)
|
||||
static int merge(const char *name, const char *path)
|
||||
{
|
||||
int ret;
|
||||
mmfile_t cur, base, other;
|
||||
mmfile_t cur = {NULL, 0}, base = {NULL, 0}, other = {NULL, 0};
|
||||
mmbuffer_t result = {NULL, 0};
|
||||
|
||||
if (handle_file(path, NULL, rerere_path(name, "thisimage")) < 0)
|
||||
@@ -372,8 +372,10 @@ static int merge(const char *name, const char *path)
|
||||
|
||||
if (read_mmfile(&cur, rerere_path(name, "thisimage")) ||
|
||||
read_mmfile(&base, rerere_path(name, "preimage")) ||
|
||||
read_mmfile(&other, rerere_path(name, "postimage")))
|
||||
return 1;
|
||||
read_mmfile(&other, rerere_path(name, "postimage"))) {
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
ret = ll_merge(&result, path, &base, &cur, "", &other, "", 0);
|
||||
if (!ret) {
|
||||
FILE *f = fopen(path, "w");
|
||||
@@ -387,6 +389,7 @@ static int merge(const char *name, const char *path)
|
||||
strerror(errno));
|
||||
}
|
||||
|
||||
out:
|
||||
free(cur.ptr);
|
||||
free(base.ptr);
|
||||
free(other.ptr);
|
||||
|
||||
@@ -547,6 +547,9 @@ static void cherry_pick_list(struct commit_list *list, struct rev_info *revs)
|
||||
right_count++;
|
||||
}
|
||||
|
||||
if (!left_count || !right_count)
|
||||
return;
|
||||
|
||||
left_first = left_count < right_count;
|
||||
init_patch_ids(&ids);
|
||||
if (revs->diffopt.nr_paths) {
|
||||
|
||||
@@ -233,6 +233,9 @@ fail_pipe:
|
||||
else if (need_err) {
|
||||
dup2(fderr[1], 2);
|
||||
close_pair(fderr);
|
||||
} else if (cmd->err > 1) {
|
||||
dup2(cmd->err, 2);
|
||||
close(cmd->err);
|
||||
}
|
||||
|
||||
if (cmd->no_stdout)
|
||||
@@ -325,6 +328,8 @@ fail_pipe:
|
||||
fherr = open("/dev/null", O_RDWR);
|
||||
else if (need_err)
|
||||
fherr = dup(fderr[1]);
|
||||
else if (cmd->err > 2)
|
||||
fherr = dup(cmd->err);
|
||||
|
||||
if (cmd->no_stdout)
|
||||
fhout = open("/dev/null", O_RDWR);
|
||||
@@ -394,6 +399,8 @@ fail_pipe:
|
||||
|
||||
if (need_err)
|
||||
close(fderr[1]);
|
||||
else if (cmd->err)
|
||||
close(cmd->err);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -444,17 +451,51 @@ int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const
|
||||
static unsigned __stdcall run_thread(void *data)
|
||||
{
|
||||
struct async *async = data;
|
||||
return async->proc(async->fd_for_proc, async->data);
|
||||
return async->proc(async->proc_in, async->proc_out, async->data);
|
||||
}
|
||||
#endif
|
||||
|
||||
int start_async(struct async *async)
|
||||
{
|
||||
int pipe_out[2];
|
||||
int need_in, need_out;
|
||||
int fdin[2], fdout[2];
|
||||
int proc_in, proc_out;
|
||||
|
||||
if (pipe(pipe_out) < 0)
|
||||
return error("cannot create pipe: %s", strerror(errno));
|
||||
async->out = pipe_out[0];
|
||||
need_in = async->in < 0;
|
||||
if (need_in) {
|
||||
if (pipe(fdin) < 0) {
|
||||
if (async->out > 0)
|
||||
close(async->out);
|
||||
return error("cannot create pipe: %s", strerror(errno));
|
||||
}
|
||||
async->in = fdin[1];
|
||||
}
|
||||
|
||||
need_out = async->out < 0;
|
||||
if (need_out) {
|
||||
if (pipe(fdout) < 0) {
|
||||
if (need_in)
|
||||
close_pair(fdin);
|
||||
else if (async->in)
|
||||
close(async->in);
|
||||
return error("cannot create pipe: %s", strerror(errno));
|
||||
}
|
||||
async->out = fdout[0];
|
||||
}
|
||||
|
||||
if (need_in)
|
||||
proc_in = fdin[0];
|
||||
else if (async->in)
|
||||
proc_in = async->in;
|
||||
else
|
||||
proc_in = -1;
|
||||
|
||||
if (need_out)
|
||||
proc_out = fdout[1];
|
||||
else if (async->out)
|
||||
proc_out = async->out;
|
||||
else
|
||||
proc_out = -1;
|
||||
|
||||
#ifndef WIN32
|
||||
/* Flush stdio before fork() to avoid cloning buffers */
|
||||
@@ -463,24 +504,47 @@ int start_async(struct async *async)
|
||||
async->pid = fork();
|
||||
if (async->pid < 0) {
|
||||
error("fork (async) failed: %s", strerror(errno));
|
||||
close_pair(pipe_out);
|
||||
return -1;
|
||||
goto error;
|
||||
}
|
||||
if (!async->pid) {
|
||||
close(pipe_out[0]);
|
||||
exit(!!async->proc(pipe_out[1], async->data));
|
||||
if (need_in)
|
||||
close(fdin[1]);
|
||||
if (need_out)
|
||||
close(fdout[0]);
|
||||
exit(!!async->proc(proc_in, proc_out, async->data));
|
||||
}
|
||||
close(pipe_out[1]);
|
||||
|
||||
if (need_in)
|
||||
close(fdin[0]);
|
||||
else if (async->in)
|
||||
close(async->in);
|
||||
|
||||
if (need_out)
|
||||
close(fdout[1]);
|
||||
else if (async->out)
|
||||
close(async->out);
|
||||
#else
|
||||
async->fd_for_proc = pipe_out[1];
|
||||
async->proc_in = proc_in;
|
||||
async->proc_out = proc_out;
|
||||
async->tid = (HANDLE) _beginthreadex(NULL, 0, run_thread, async, 0, NULL);
|
||||
if (!async->tid) {
|
||||
error("cannot create thread: %s", strerror(errno));
|
||||
close_pair(pipe_out);
|
||||
return -1;
|
||||
goto error;
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
error:
|
||||
if (need_in)
|
||||
close_pair(fdin);
|
||||
else if (async->in)
|
||||
close(async->in);
|
||||
|
||||
if (need_out)
|
||||
close_pair(fdout);
|
||||
else if (async->out)
|
||||
close(async->out);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int finish_async(struct async *async)
|
||||
|
||||
@@ -18,7 +18,7 @@ struct child_process {
|
||||
* - Specify > 0 to set a channel to a particular FD as follows:
|
||||
* .in: a readable FD, becomes child's stdin
|
||||
* .out: a writable FD, becomes child's stdout/stderr
|
||||
* .err > 0 not supported
|
||||
* .err: a writable FD, becomes child's stderr
|
||||
* The specified FD is closed by start_command(), even in case
|
||||
* of errors!
|
||||
*/
|
||||
@@ -66,17 +66,20 @@ int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const
|
||||
*/
|
||||
struct async {
|
||||
/*
|
||||
* proc writes to fd and closes it;
|
||||
* proc reads from in; closes it before return
|
||||
* proc writes to out; closes it before return
|
||||
* returns 0 on success, non-zero on failure
|
||||
*/
|
||||
int (*proc)(int fd, void *data);
|
||||
int (*proc)(int in, int out, void *data);
|
||||
void *data;
|
||||
int in; /* caller writes here and closes it */
|
||||
int out; /* caller reads from here and closes it */
|
||||
#ifndef WIN32
|
||||
pid_t pid;
|
||||
#else
|
||||
HANDLE tid;
|
||||
int fd_for_proc;
|
||||
int proc_in;
|
||||
int proc_out;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
2
setup.c
2
setup.c
@@ -206,7 +206,7 @@ int is_inside_work_tree(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* set_work_tree() is only ever called if you set GIT_DIR explicitely.
|
||||
* set_work_tree() is only ever called if you set GIT_DIR explicitly.
|
||||
* The old behaviour (which we retain here) is to set the work tree root
|
||||
* to the cwd, unless overridden by the config, the command line, or
|
||||
* GIT_WORK_TREE.
|
||||
|
||||
43
sha1_file.c
43
sha1_file.c
@@ -2281,9 +2281,10 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
|
||||
void *buf, unsigned long len, time_t mtime)
|
||||
{
|
||||
int fd, ret;
|
||||
size_t size;
|
||||
unsigned char *compressed;
|
||||
unsigned char compressed[4096];
|
||||
z_stream stream;
|
||||
git_SHA_CTX c;
|
||||
unsigned char parano_sha1[20];
|
||||
char *filename;
|
||||
static char tmpfile[PATH_MAX];
|
||||
|
||||
@@ -2301,36 +2302,40 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
|
||||
/* Set it up */
|
||||
memset(&stream, 0, sizeof(stream));
|
||||
deflateInit(&stream, zlib_compression_level);
|
||||
size = 8 + deflateBound(&stream, len+hdrlen);
|
||||
compressed = xmalloc(size);
|
||||
|
||||
/* Compress it */
|
||||
stream.next_out = compressed;
|
||||
stream.avail_out = size;
|
||||
stream.avail_out = sizeof(compressed);
|
||||
git_SHA1_Init(&c);
|
||||
|
||||
/* First header.. */
|
||||
stream.next_in = (unsigned char *)hdr;
|
||||
stream.avail_in = hdrlen;
|
||||
while (deflate(&stream, 0) == Z_OK)
|
||||
/* nothing */;
|
||||
git_SHA1_Update(&c, hdr, hdrlen);
|
||||
|
||||
/* Then the data itself.. */
|
||||
stream.next_in = buf;
|
||||
stream.avail_in = len;
|
||||
ret = deflate(&stream, Z_FINISH);
|
||||
do {
|
||||
unsigned char *in0 = stream.next_in;
|
||||
ret = deflate(&stream, Z_FINISH);
|
||||
git_SHA1_Update(&c, in0, stream.next_in - in0);
|
||||
if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
|
||||
die("unable to write sha1 file");
|
||||
stream.next_out = compressed;
|
||||
stream.avail_out = sizeof(compressed);
|
||||
} while (ret == Z_OK);
|
||||
|
||||
if (ret != Z_STREAM_END)
|
||||
die("unable to deflate new object %s (%d)", sha1_to_hex(sha1), ret);
|
||||
|
||||
ret = deflateEnd(&stream);
|
||||
if (ret != Z_OK)
|
||||
die("deflateEnd on object %s failed (%d)", sha1_to_hex(sha1), ret);
|
||||
git_SHA1_Final(parano_sha1, &c);
|
||||
if (hashcmp(sha1, parano_sha1) != 0)
|
||||
die("confused by unstable object source data for %s", sha1_to_hex(sha1));
|
||||
|
||||
size = stream.total_out;
|
||||
|
||||
if (write_buffer(fd, compressed, size) < 0)
|
||||
die("unable to write sha1 file");
|
||||
close_sha1_file(fd);
|
||||
free(compressed);
|
||||
|
||||
if (mtime) {
|
||||
struct utimbuf utb;
|
||||
@@ -2434,6 +2439,8 @@ static int index_mem(unsigned char *sha1, void *buf, size_t size,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define SMALL_FILE_SIZE (32*1024)
|
||||
|
||||
int index_fd(unsigned char *sha1, int fd, struct stat *st, int write_object,
|
||||
enum object_type type, const char *path)
|
||||
{
|
||||
@@ -2448,6 +2455,14 @@ int index_fd(unsigned char *sha1, int fd, struct stat *st, int write_object,
|
||||
else
|
||||
ret = -1;
|
||||
strbuf_release(&sbuf);
|
||||
} else if (size <= SMALL_FILE_SIZE) {
|
||||
char *buf = xmalloc(size);
|
||||
if (size == read_in_full(fd, buf, size))
|
||||
ret = index_mem(sha1, buf, size, write_object, type,
|
||||
path);
|
||||
else
|
||||
ret = error("short read %s", strerror(errno));
|
||||
free(buf);
|
||||
} else if (size) {
|
||||
void *buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
|
||||
ret = index_mem(sha1, buf, size, write_object, type, path);
|
||||
|
||||
35
sha1_name.c
35
sha1_name.c
@@ -280,8 +280,7 @@ int dwim_ref(const char *str, int len, unsigned char *sha1, char **ref)
|
||||
*ref = xstrdup(r);
|
||||
if (!warn_ambiguous_refs)
|
||||
break;
|
||||
} else if ((flag & REF_ISSYMREF) &&
|
||||
(len != 4 || strcmp(str, "HEAD")))
|
||||
} else if ((flag & REF_ISSYMREF) && strcmp(fullref, "HEAD"))
|
||||
warning("ignoring dangling symref %s.", fullref);
|
||||
}
|
||||
free(last_branch);
|
||||
@@ -993,13 +992,15 @@ static void diagnose_invalid_index_path(int stage,
|
||||
pos = cache_name_pos(filename, namelen);
|
||||
if (pos < 0)
|
||||
pos = -pos - 1;
|
||||
ce = active_cache[pos];
|
||||
if (ce_namelen(ce) == namelen &&
|
||||
!memcmp(ce->name, filename, namelen))
|
||||
die("Path '%s' is in the index, but not at stage %d.\n"
|
||||
"Did you mean ':%d:%s'?",
|
||||
filename, stage,
|
||||
ce_stage(ce), filename);
|
||||
if (pos < active_nr) {
|
||||
ce = active_cache[pos];
|
||||
if (ce_namelen(ce) == namelen &&
|
||||
!memcmp(ce->name, filename, namelen))
|
||||
die("Path '%s' is in the index, but not at stage %d.\n"
|
||||
"Did you mean ':%d:%s'?",
|
||||
filename, stage,
|
||||
ce_stage(ce), filename);
|
||||
}
|
||||
|
||||
/* Confusion between relative and absolute filenames? */
|
||||
fullnamelen = namelen + strlen(prefix);
|
||||
@@ -1009,13 +1010,15 @@ static void diagnose_invalid_index_path(int stage,
|
||||
pos = cache_name_pos(fullname, fullnamelen);
|
||||
if (pos < 0)
|
||||
pos = -pos - 1;
|
||||
ce = active_cache[pos];
|
||||
if (ce_namelen(ce) == fullnamelen &&
|
||||
!memcmp(ce->name, fullname, fullnamelen))
|
||||
die("Path '%s' is in the index, but not '%s'.\n"
|
||||
"Did you mean ':%d:%s'?",
|
||||
fullname, filename,
|
||||
ce_stage(ce), fullname);
|
||||
if (pos < active_nr) {
|
||||
ce = active_cache[pos];
|
||||
if (ce_namelen(ce) == fullnamelen &&
|
||||
!memcmp(ce->name, fullname, fullnamelen))
|
||||
die("Path '%s' is in the index, but not '%s'.\n"
|
||||
"Did you mean ':%d:%s'?",
|
||||
fullname, filename,
|
||||
ce_stage(ce), fullname);
|
||||
}
|
||||
|
||||
if (!lstat(filename, &st))
|
||||
die("Path '%s' exists on disk, but not in the index.", filename);
|
||||
|
||||
2
t/lib-patch-mode.sh
Executable file → Normal file
2
t/lib-patch-mode.sh
Executable file → Normal file
@@ -1,3 +1,5 @@
|
||||
: included from t2016 and others
|
||||
|
||||
. ./test-lib.sh
|
||||
|
||||
if ! test_have_prereq PERL; then
|
||||
|
||||
@@ -66,12 +66,12 @@ tagger T A Gger <tagger@example.com> 1234567890 -0000
|
||||
This is an invalid tag.
|
||||
EOF
|
||||
|
||||
test_expect_failure 'tag pointing to nonexistent' '
|
||||
tag=$(git hash-object -w --stdin < invalid-tag) &&
|
||||
test_expect_success 'tag pointing to nonexistent' '
|
||||
tag=$(git hash-object -t tag -w --stdin < invalid-tag) &&
|
||||
echo $tag > .git/refs/tags/invalid &&
|
||||
git fsck --tags 2>out &&
|
||||
test_must_fail git fsck --tags >out &&
|
||||
cat out &&
|
||||
grep "could not load tagged object" out &&
|
||||
grep "broken link" out &&
|
||||
rm .git/refs/tags/invalid
|
||||
'
|
||||
|
||||
@@ -84,12 +84,12 @@ tagger T A Gger <tagger@example.com> 1234567890 -0000
|
||||
This is an invalid tag.
|
||||
EOF
|
||||
|
||||
test_expect_failure 'tag pointing to something else than its type' '
|
||||
tag=$(git hash-object -w --stdin < wrong-tag) &&
|
||||
test_expect_success 'tag pointing to something else than its type' '
|
||||
tag=$(git hash-object -t tag -w --stdin < wrong-tag) &&
|
||||
echo $tag > .git/refs/tags/wrong &&
|
||||
git fsck --tags 2>out &&
|
||||
test_must_fail git fsck --tags 2>out &&
|
||||
cat out &&
|
||||
grep "some sane error message" out &&
|
||||
grep "error in tag.*broken links" out &&
|
||||
rm .git/refs/tags/wrong
|
||||
'
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ test_description='Test commit notes'
|
||||
. ./test-lib.sh
|
||||
|
||||
cat > fake_editor.sh << \EOF
|
||||
#!/bin/sh
|
||||
echo "$MSG" > "$1"
|
||||
echo "$MSG" >& 2
|
||||
EOF
|
||||
|
||||
@@ -271,4 +271,12 @@ test_expect_success 'choking "git rm" should not let it die with cruft' '
|
||||
test "$status" != 0
|
||||
'
|
||||
|
||||
test_expect_success 'rm removes subdirectories recursively' '
|
||||
mkdir -p dir/subdir/subsubdir &&
|
||||
echo content >dir/subdir/subsubdir/file &&
|
||||
git add dir/subdir/subsubdir/file &&
|
||||
git rm -f dir/subdir/subsubdir/file &&
|
||||
! test -d dir
|
||||
'
|
||||
|
||||
test_done
|
||||
|
||||
@@ -195,6 +195,15 @@ test_expect_success 'pop -q is quiet' '
|
||||
test ! -s output.out
|
||||
'
|
||||
|
||||
test_expect_success 'pop -q --index works and is quiet' '
|
||||
echo foo > file &&
|
||||
git add file &&
|
||||
git stash save --quiet &&
|
||||
git stash pop -q --index > output.out 2>&1 &&
|
||||
test foo = "$(git show :file)" &&
|
||||
test ! -s output.out
|
||||
'
|
||||
|
||||
test_expect_success 'drop -q is quiet' '
|
||||
git stash &&
|
||||
git stash drop -q > output.out 2>&1 &&
|
||||
|
||||
@@ -5,6 +5,9 @@ test_description='Return value of diffs'
|
||||
. ./test-lib.sh
|
||||
|
||||
test_expect_success 'setup' '
|
||||
echo "1 " >a &&
|
||||
git add . &&
|
||||
git commit -m zeroth &&
|
||||
echo 1 >a &&
|
||||
git add . &&
|
||||
git commit -m first &&
|
||||
@@ -13,6 +16,18 @@ test_expect_success 'setup' '
|
||||
git commit -a -m second
|
||||
'
|
||||
|
||||
test_expect_success 'git diff --quiet -w HEAD^^ HEAD^' '
|
||||
git diff --quiet -w HEAD^^ HEAD^
|
||||
'
|
||||
|
||||
test_expect_success 'git diff --quiet HEAD^^ HEAD^' '
|
||||
test_must_fail git diff --quiet HEAD^^ HEAD^
|
||||
'
|
||||
|
||||
test_expect_success 'git diff --quiet -w HEAD^ HEAD' '
|
||||
test_must_fail git diff --quiet -w HEAD^ HEAD
|
||||
'
|
||||
|
||||
test_expect_success 'git diff-tree HEAD^ HEAD' '
|
||||
git diff-tree --exit-code HEAD^ HEAD
|
||||
test $? = 1
|
||||
|
||||
@@ -17,23 +17,22 @@ test_expect_success setup '
|
||||
commit1=$(echo modify | git commit-tree $tree1 -p $commit0) &&
|
||||
git update-ref refs/heads/master $commit0 &&
|
||||
git update-ref refs/heads/tofail $commit1 &&
|
||||
git clone ./. victim &&
|
||||
GIT_DIR=victim/.git git config receive.denyCurrentBranch warn &&
|
||||
GIT_DIR=victim/.git git update-ref refs/heads/tofail $commit1 &&
|
||||
git clone --bare ./. victim.git &&
|
||||
GIT_DIR=victim.git git update-ref refs/heads/tofail $commit1 &&
|
||||
git update-ref refs/heads/master $commit1 &&
|
||||
git update-ref refs/heads/tofail $commit0
|
||||
'
|
||||
|
||||
cat >victim/.git/hooks/pre-receive <<'EOF'
|
||||
cat >victim.git/hooks/pre-receive <<'EOF'
|
||||
#!/bin/sh
|
||||
printf %s "$@" >>$GIT_DIR/pre-receive.args
|
||||
cat - >$GIT_DIR/pre-receive.stdin
|
||||
echo STDOUT pre-receive
|
||||
echo STDERR pre-receive >&2
|
||||
EOF
|
||||
chmod u+x victim/.git/hooks/pre-receive
|
||||
chmod u+x victim.git/hooks/pre-receive
|
||||
|
||||
cat >victim/.git/hooks/update <<'EOF'
|
||||
cat >victim.git/hooks/update <<'EOF'
|
||||
#!/bin/sh
|
||||
echo "$@" >>$GIT_DIR/update.args
|
||||
read x; printf %s "$x" >$GIT_DIR/update.stdin
|
||||
@@ -41,77 +40,77 @@ echo STDOUT update $1
|
||||
echo STDERR update $1 >&2
|
||||
test "$1" = refs/heads/master || exit
|
||||
EOF
|
||||
chmod u+x victim/.git/hooks/update
|
||||
chmod u+x victim.git/hooks/update
|
||||
|
||||
cat >victim/.git/hooks/post-receive <<'EOF'
|
||||
cat >victim.git/hooks/post-receive <<'EOF'
|
||||
#!/bin/sh
|
||||
printf %s "$@" >>$GIT_DIR/post-receive.args
|
||||
cat - >$GIT_DIR/post-receive.stdin
|
||||
echo STDOUT post-receive
|
||||
echo STDERR post-receive >&2
|
||||
EOF
|
||||
chmod u+x victim/.git/hooks/post-receive
|
||||
chmod u+x victim.git/hooks/post-receive
|
||||
|
||||
cat >victim/.git/hooks/post-update <<'EOF'
|
||||
cat >victim.git/hooks/post-update <<'EOF'
|
||||
#!/bin/sh
|
||||
echo "$@" >>$GIT_DIR/post-update.args
|
||||
read x; printf %s "$x" >$GIT_DIR/post-update.stdin
|
||||
echo STDOUT post-update
|
||||
echo STDERR post-update >&2
|
||||
EOF
|
||||
chmod u+x victim/.git/hooks/post-update
|
||||
chmod u+x victim.git/hooks/post-update
|
||||
|
||||
test_expect_success push '
|
||||
test_must_fail git send-pack --force ./victim/.git \
|
||||
test_must_fail git send-pack --force ./victim.git \
|
||||
master tofail >send.out 2>send.err
|
||||
'
|
||||
|
||||
test_expect_success 'updated as expected' '
|
||||
test $(GIT_DIR=victim/.git git rev-parse master) = $commit1 &&
|
||||
test $(GIT_DIR=victim/.git git rev-parse tofail) = $commit1
|
||||
test $(GIT_DIR=victim.git git rev-parse master) = $commit1 &&
|
||||
test $(GIT_DIR=victim.git git rev-parse tofail) = $commit1
|
||||
'
|
||||
|
||||
test_expect_success 'hooks ran' '
|
||||
test -f victim/.git/pre-receive.args &&
|
||||
test -f victim/.git/pre-receive.stdin &&
|
||||
test -f victim/.git/update.args &&
|
||||
test -f victim/.git/update.stdin &&
|
||||
test -f victim/.git/post-receive.args &&
|
||||
test -f victim/.git/post-receive.stdin &&
|
||||
test -f victim/.git/post-update.args &&
|
||||
test -f victim/.git/post-update.stdin
|
||||
test -f victim.git/pre-receive.args &&
|
||||
test -f victim.git/pre-receive.stdin &&
|
||||
test -f victim.git/update.args &&
|
||||
test -f victim.git/update.stdin &&
|
||||
test -f victim.git/post-receive.args &&
|
||||
test -f victim.git/post-receive.stdin &&
|
||||
test -f victim.git/post-update.args &&
|
||||
test -f victim.git/post-update.stdin
|
||||
'
|
||||
|
||||
test_expect_success 'pre-receive hook input' '
|
||||
(echo $commit0 $commit1 refs/heads/master;
|
||||
echo $commit1 $commit0 refs/heads/tofail
|
||||
) | test_cmp - victim/.git/pre-receive.stdin
|
||||
) | test_cmp - victim.git/pre-receive.stdin
|
||||
'
|
||||
|
||||
test_expect_success 'update hook arguments' '
|
||||
(echo refs/heads/master $commit0 $commit1;
|
||||
echo refs/heads/tofail $commit1 $commit0
|
||||
) | test_cmp - victim/.git/update.args
|
||||
) | test_cmp - victim.git/update.args
|
||||
'
|
||||
|
||||
test_expect_success 'post-receive hook input' '
|
||||
echo $commit0 $commit1 refs/heads/master |
|
||||
test_cmp - victim/.git/post-receive.stdin
|
||||
test_cmp - victim.git/post-receive.stdin
|
||||
'
|
||||
|
||||
test_expect_success 'post-update hook arguments' '
|
||||
echo refs/heads/master |
|
||||
test_cmp - victim/.git/post-update.args
|
||||
test_cmp - victim.git/post-update.args
|
||||
'
|
||||
|
||||
test_expect_success 'all hook stdin is /dev/null' '
|
||||
! test -s victim/.git/update.stdin &&
|
||||
! test -s victim/.git/post-update.stdin
|
||||
! test -s victim.git/update.stdin &&
|
||||
! test -s victim.git/post-update.stdin
|
||||
'
|
||||
|
||||
test_expect_success 'all *-receive hook args are empty' '
|
||||
! test -s victim/.git/pre-receive.args &&
|
||||
! test -s victim/.git/post-receive.args
|
||||
! test -s victim.git/pre-receive.args &&
|
||||
! test -s victim.git/post-receive.args
|
||||
'
|
||||
|
||||
test_expect_success 'send-pack produced no output' '
|
||||
@@ -119,20 +118,21 @@ test_expect_success 'send-pack produced no output' '
|
||||
'
|
||||
|
||||
cat <<EOF >expect
|
||||
STDOUT pre-receive
|
||||
STDERR pre-receive
|
||||
STDOUT update refs/heads/master
|
||||
STDERR update refs/heads/master
|
||||
STDOUT update refs/heads/tofail
|
||||
STDERR update refs/heads/tofail
|
||||
STDOUT post-receive
|
||||
STDERR post-receive
|
||||
STDOUT post-update
|
||||
STDERR post-update
|
||||
remote: STDOUT pre-receive
|
||||
remote: STDERR pre-receive
|
||||
remote: STDOUT update refs/heads/master
|
||||
remote: STDERR update refs/heads/master
|
||||
remote: STDOUT update refs/heads/tofail
|
||||
remote: STDERR update refs/heads/tofail
|
||||
remote: error: hook declined to update refs/heads/tofail
|
||||
remote: STDOUT post-receive
|
||||
remote: STDERR post-receive
|
||||
remote: STDOUT post-update
|
||||
remote: STDERR post-update
|
||||
EOF
|
||||
test_expect_success 'send-pack stderr contains hook messages' '
|
||||
grep ^STD send.err >actual &&
|
||||
test_cmp - actual <expect
|
||||
grep ^remote: send.err | sed "s/ *\$//" >actual &&
|
||||
test_cmp expect actual
|
||||
'
|
||||
|
||||
test_done
|
||||
|
||||
2
t/t6000lib.sh
Executable file → Normal file
2
t/t6000lib.sh
Executable file → Normal file
@@ -1,3 +1,5 @@
|
||||
: included from 6002 and others
|
||||
|
||||
[ -d .git/refs/tags ] || mkdir -p .git/refs/tags
|
||||
|
||||
:> sed.script
|
||||
|
||||
@@ -567,6 +567,11 @@ test_expect_success 'skipping away from skipped commit' '
|
||||
test "$para3" = "$PARA_HASH3"
|
||||
'
|
||||
|
||||
test_expect_success 'erroring out when using bad path parameters' '
|
||||
test_must_fail git bisect start $PARA_HASH7 $HASH1 -- foobar 2> error.txt &&
|
||||
grep "bad path parameters" error.txt
|
||||
'
|
||||
|
||||
#
|
||||
#
|
||||
test_done
|
||||
|
||||
@@ -434,4 +434,37 @@ test_expect_success 'grep -Fi' '
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
test_expect_success 'setup double-dash tests' '
|
||||
cat >double-dash <<EOF &&
|
||||
--
|
||||
->
|
||||
other
|
||||
EOF
|
||||
git add double-dash
|
||||
'
|
||||
|
||||
cat >expected <<EOF
|
||||
double-dash:->
|
||||
EOF
|
||||
test_expect_success 'grep -- pattern' '
|
||||
git grep -- "->" >actual &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
test_expect_success 'grep -- pattern -- pathspec' '
|
||||
git grep -- "->" -- double-dash >actual &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
test_expect_success 'grep -e pattern -- path' '
|
||||
git grep -e "->" -- double-dash >actual &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
cat >expected <<EOF
|
||||
double-dash:--
|
||||
EOF
|
||||
test_expect_success 'grep -e -- -- path' '
|
||||
git grep -e -- -- double-dash >actual &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
test_done
|
||||
|
||||
176
t/t7006-pager.sh
Executable file
176
t/t7006-pager.sh
Executable file
@@ -0,0 +1,176 @@
|
||||
#!/bin/sh
|
||||
|
||||
test_description='Test automatic use of a pager.'
|
||||
|
||||
. ./test-lib.sh
|
||||
|
||||
rm -f stdout_is_tty
|
||||
test_expect_success 'set up terminal for tests' '
|
||||
if test -t 1
|
||||
then
|
||||
: > stdout_is_tty
|
||||
elif
|
||||
test_have_prereq PERL &&
|
||||
"$PERL_PATH" "$TEST_DIRECTORY"/t7006/test-terminal.perl \
|
||||
sh -c "test -t 1"
|
||||
then
|
||||
: > test_terminal_works
|
||||
fi
|
||||
'
|
||||
|
||||
if test -e stdout_is_tty
|
||||
then
|
||||
test_terminal() { "$@"; }
|
||||
test_set_prereq TTY
|
||||
elif test -e test_terminal_works
|
||||
then
|
||||
test_terminal() {
|
||||
"$PERL_PATH" "$TEST_DIRECTORY"/t7006/test-terminal.perl "$@"
|
||||
}
|
||||
test_set_prereq TTY
|
||||
else
|
||||
say no usable terminal, so skipping some tests
|
||||
fi
|
||||
|
||||
unset GIT_PAGER GIT_PAGER_IN_USE
|
||||
git config --unset core.pager
|
||||
PAGER='cat > paginated.out'
|
||||
export PAGER
|
||||
|
||||
test_expect_success 'setup' '
|
||||
test_commit initial
|
||||
'
|
||||
|
||||
rm -f paginated.out
|
||||
test_expect_success TTY 'some commands use a pager' '
|
||||
test_terminal git log &&
|
||||
test -e paginated.out
|
||||
'
|
||||
|
||||
rm -f paginated.out
|
||||
test_expect_success TTY 'some commands do not use a pager' '
|
||||
test_terminal git rev-list HEAD &&
|
||||
! test -e paginated.out
|
||||
'
|
||||
|
||||
rm -f paginated.out
|
||||
test_expect_success 'no pager when stdout is a pipe' '
|
||||
git log | cat &&
|
||||
! test -e paginated.out
|
||||
'
|
||||
|
||||
rm -f paginated.out
|
||||
test_expect_success 'no pager when stdout is a regular file' '
|
||||
git log > file &&
|
||||
! test -e paginated.out
|
||||
'
|
||||
|
||||
rm -f paginated.out
|
||||
test_expect_success TTY 'git --paginate rev-list uses a pager' '
|
||||
test_terminal git --paginate rev-list HEAD &&
|
||||
test -e paginated.out
|
||||
'
|
||||
|
||||
rm -f file paginated.out
|
||||
test_expect_success 'no pager even with --paginate when stdout is a pipe' '
|
||||
git --paginate log | cat &&
|
||||
! test -e paginated.out
|
||||
'
|
||||
|
||||
rm -f paginated.out
|
||||
test_expect_success TTY 'no pager with --no-pager' '
|
||||
test_terminal git --no-pager log &&
|
||||
! test -e paginated.out
|
||||
'
|
||||
|
||||
# A colored commit log will begin with an appropriate ANSI escape
|
||||
# for the first color; the text "commit" comes later.
|
||||
colorful() {
|
||||
read firstline < $1
|
||||
! expr "$firstline" : "^[a-zA-Z]" >/dev/null
|
||||
}
|
||||
|
||||
rm -f colorful.log colorless.log
|
||||
test_expect_success 'tests can detect color' '
|
||||
git log --no-color > colorless.log &&
|
||||
git log --color > colorful.log &&
|
||||
! colorful colorless.log &&
|
||||
colorful colorful.log
|
||||
'
|
||||
|
||||
rm -f colorless.log
|
||||
git config color.ui auto
|
||||
test_expect_success 'no color when stdout is a regular file' '
|
||||
git log > colorless.log &&
|
||||
! colorful colorless.log
|
||||
'
|
||||
|
||||
rm -f paginated.out
|
||||
git config color.ui auto
|
||||
test_expect_success TTY 'color when writing to a pager' '
|
||||
TERM=vt100 test_terminal git log &&
|
||||
colorful paginated.out
|
||||
'
|
||||
|
||||
rm -f colorful.log
|
||||
git config color.ui auto
|
||||
test_expect_success 'color when writing to a file intended for a pager' '
|
||||
TERM=vt100 GIT_PAGER_IN_USE=true git log > colorful.log &&
|
||||
colorful colorful.log
|
||||
'
|
||||
|
||||
unset PAGER GIT_PAGER
|
||||
git config --unset core.pager
|
||||
test_expect_success 'determine default pager' '
|
||||
less=$(git var GIT_PAGER) &&
|
||||
test -n "$less"
|
||||
'
|
||||
|
||||
if expr "$less" : '^[a-z]*$' > /dev/null && test_have_prereq TTY
|
||||
then
|
||||
test_set_prereq SIMPLEPAGER
|
||||
fi
|
||||
|
||||
unset PAGER GIT_PAGER
|
||||
git config --unset core.pager
|
||||
rm -f default_pager_used
|
||||
test_expect_success SIMPLEPAGER 'default pager is used by default' '
|
||||
cat > $less <<-EOF &&
|
||||
#!$SHELL_PATH
|
||||
wc > default_pager_used
|
||||
EOF
|
||||
chmod +x $less &&
|
||||
PATH=.:$PATH test_terminal git log &&
|
||||
test -e default_pager_used
|
||||
'
|
||||
|
||||
unset GIT_PAGER
|
||||
git config --unset core.pager
|
||||
rm -f PAGER_used
|
||||
test_expect_success TTY 'PAGER overrides default pager' '
|
||||
PAGER="wc > PAGER_used" &&
|
||||
export PAGER &&
|
||||
test_terminal git log &&
|
||||
test -e PAGER_used
|
||||
'
|
||||
|
||||
unset GIT_PAGER
|
||||
rm -f core.pager_used
|
||||
test_expect_success TTY 'core.pager overrides PAGER' '
|
||||
PAGER=wc &&
|
||||
export PAGER &&
|
||||
git config core.pager "wc > core.pager_used" &&
|
||||
test_terminal git log &&
|
||||
test -e core.pager_used
|
||||
'
|
||||
|
||||
rm -f GIT_PAGER_used
|
||||
test_expect_success TTY 'GIT_PAGER overrides core.pager' '
|
||||
git config core.pager wc &&
|
||||
GIT_PAGER="wc > GIT_PAGER_used" &&
|
||||
export GIT_PAGER &&
|
||||
test_terminal git log &&
|
||||
test -e GIT_PAGER_used
|
||||
'
|
||||
|
||||
test_done
|
||||
58
t/t7006/test-terminal.perl
Executable file
58
t/t7006/test-terminal.perl
Executable file
@@ -0,0 +1,58 @@
|
||||
#!/usr/bin/perl
|
||||
use strict;
|
||||
use warnings;
|
||||
use IO::Pty;
|
||||
use File::Copy;
|
||||
|
||||
# Run @$argv in the background with stdout redirected to $out.
|
||||
sub start_child {
|
||||
my ($argv, $out) = @_;
|
||||
my $pid = fork;
|
||||
if (not defined $pid) {
|
||||
die "fork failed: $!"
|
||||
} elsif ($pid == 0) {
|
||||
open STDOUT, ">&", $out;
|
||||
close $out;
|
||||
exec(@$argv) or die "cannot exec '$argv->[0]': $!"
|
||||
}
|
||||
return $pid;
|
||||
}
|
||||
|
||||
# Wait for $pid to finish.
|
||||
sub finish_child {
|
||||
# Simplified from wait_or_whine() in run-command.c.
|
||||
my ($pid) = @_;
|
||||
|
||||
my $waiting = waitpid($pid, 0);
|
||||
if ($waiting < 0) {
|
||||
die "waitpid failed: $!";
|
||||
} elsif ($? & 127) {
|
||||
my $code = $? & 127;
|
||||
warn "died of signal $code";
|
||||
return $code - 128;
|
||||
} else {
|
||||
return $? >> 8;
|
||||
}
|
||||
}
|
||||
|
||||
sub xsendfile {
|
||||
my ($out, $in) = @_;
|
||||
|
||||
# Note: the real sendfile() cannot read from a terminal.
|
||||
|
||||
# It is unspecified by POSIX whether reads
|
||||
# from a disconnected terminal will return
|
||||
# EIO (as in AIX 4.x, IRIX, and Linux) or
|
||||
# end-of-file. Either is fine.
|
||||
copy($in, $out, 4096) or $!{EIO} or die "cannot copy from child: $!";
|
||||
}
|
||||
|
||||
if ($#ARGV < 1) {
|
||||
die "usage: test-terminal program args";
|
||||
}
|
||||
my $master = new IO::Pty;
|
||||
my $slave = $master->slave;
|
||||
my $pid = start_child(\@ARGV, $slave);
|
||||
close $slave;
|
||||
xsendfile(\*STDOUT, $master);
|
||||
exit(finish_child($pid));
|
||||
@@ -28,6 +28,8 @@ test_expect_success 'setup a submodule tree' '
|
||||
git commit -m upstream
|
||||
git clone . super &&
|
||||
git clone super submodule &&
|
||||
git clone super rebasing &&
|
||||
git clone super merging &&
|
||||
(cd super &&
|
||||
git submodule add ../submodule submodule &&
|
||||
test_tick &&
|
||||
@@ -45,6 +47,16 @@ test_expect_success 'setup a submodule tree' '
|
||||
) &&
|
||||
git add submodule &&
|
||||
git commit -m "submodule update"
|
||||
) &&
|
||||
(cd super &&
|
||||
git submodule add ../rebasing rebasing &&
|
||||
test_tick &&
|
||||
git commit -m "rebasing"
|
||||
) &&
|
||||
(cd super &&
|
||||
git submodule add ../merging merging &&
|
||||
test_tick &&
|
||||
git commit -m "rebasing"
|
||||
)
|
||||
'
|
||||
|
||||
@@ -177,21 +189,17 @@ test_expect_success 'submodule update - checkout in .git/config' '
|
||||
|
||||
test_expect_success 'submodule init picks up rebase' '
|
||||
(cd super &&
|
||||
git config submodule.rebasing.url git://non-existing/git &&
|
||||
git config submodule.rebasing.path does-not-matter &&
|
||||
git config submodule.rebasing.update rebase &&
|
||||
git config -f .gitmodules submodule.rebasing.update rebase &&
|
||||
git submodule init rebasing &&
|
||||
test "rebase" = $(git config submodule.rebasing.update)
|
||||
test "rebase" = "$(git config submodule.rebasing.update)"
|
||||
)
|
||||
'
|
||||
|
||||
test_expect_success 'submodule init picks up merge' '
|
||||
(cd super &&
|
||||
git config submodule.merging.url git://non-existing/git &&
|
||||
git config submodule.merging.path does-not-matter &&
|
||||
git config submodule.merging.update merge &&
|
||||
git config -f .gitmodules submodule.merging.update merge &&
|
||||
git submodule init merging &&
|
||||
test "merge" = $(git config submodule.merging.update)
|
||||
test "merge" = "$(git config submodule.merging.update)"
|
||||
)
|
||||
'
|
||||
|
||||
|
||||
@@ -591,13 +591,21 @@ test_debug 'cat gitweb.log'
|
||||
# ----------------------------------------------------------------------
|
||||
# gitweb config and repo config
|
||||
|
||||
cat >>gitweb_config.perl <<EOF
|
||||
cat >>gitweb_config.perl <<\EOF
|
||||
|
||||
\$feature{'blame'}{'override'} = 1;
|
||||
\$feature{'snapshot'}{'override'} = 1;
|
||||
\$feature{'avatar'}{'override'} = 1;
|
||||
# turn on override for each overridable feature
|
||||
foreach my $key (keys %feature) {
|
||||
if ($feature{$key}{'sub'}) {
|
||||
$feature{$key}{'override'} = 1;
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
test_expect_success \
|
||||
'config override: projects list (implicit)' \
|
||||
'gitweb_run'
|
||||
test_debug 'cat gitweb.log'
|
||||
|
||||
test_expect_success \
|
||||
'config override: tree view, features not overridden in repo config' \
|
||||
'gitweb_run "p=.git;a=tree"'
|
||||
|
||||
@@ -65,6 +65,8 @@ GIT_TEST_CMP=${GIT_TEST_CMP:-diff -u}
|
||||
# CDPATH into the environment
|
||||
unset CDPATH
|
||||
|
||||
unset GREP_OPTIONS
|
||||
|
||||
case $(echo $GIT_TRACE |tr "[A-Z]" "[a-z]") in
|
||||
1|2|true)
|
||||
echo "* warning: Some tests will not work if GIT_TRACE" \
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* This program can either change modification time of the given
|
||||
* file(s) or just print it. The program does not change atime nor
|
||||
* ctime (their values are explicitely preserved).
|
||||
* ctime (their values are explicitly preserved).
|
||||
*
|
||||
* The mtime can be changed to an absolute value:
|
||||
*
|
||||
|
||||
@@ -171,7 +171,7 @@ static struct child_process *get_helper(struct transport *transport)
|
||||
} else if (!strcmp(capname, "connect")) {
|
||||
data->connect = 1;
|
||||
} else if (mandatory) {
|
||||
die("Unknown madatory capability %s. This remote "
|
||||
die("Unknown mandatory capability %s. This remote "
|
||||
"helper probably needs newer version of Git.\n",
|
||||
capname);
|
||||
}
|
||||
|
||||
@@ -918,6 +918,7 @@ struct transport *transport_get(struct remote *remote, const char *url)
|
||||
if (!remote)
|
||||
die("No remote provided to transport_get()");
|
||||
|
||||
ret->got_remote_refs = 0;
|
||||
ret->remote = remote;
|
||||
helper = remote->foreign_vcs;
|
||||
|
||||
@@ -1079,8 +1080,10 @@ int transport_push(struct transport *transport,
|
||||
|
||||
const struct ref *transport_get_remote_refs(struct transport *transport)
|
||||
{
|
||||
if (!transport->remote_refs)
|
||||
if (!transport->got_remote_refs) {
|
||||
transport->remote_refs = transport->get_refs_list(transport, 0);
|
||||
transport->got_remote_refs = 1;
|
||||
}
|
||||
|
||||
return transport->remote_refs;
|
||||
}
|
||||
|
||||
@@ -19,6 +19,12 @@ struct transport {
|
||||
void *data;
|
||||
const struct ref *remote_refs;
|
||||
|
||||
/**
|
||||
* Indicates whether we already called get_refs_list(); set by
|
||||
* transport.c::transport_get_remote_refs().
|
||||
*/
|
||||
unsigned got_remote_refs : 1;
|
||||
|
||||
/**
|
||||
* Returns 0 if successful, positive if the option is not
|
||||
* recognized or is inapplicable, and negative if the option
|
||||
|
||||
@@ -441,6 +441,7 @@ int get_tree_entry(const unsigned char *tree_sha1, const char *name, unsigned ch
|
||||
|
||||
if (name[0] == '\0') {
|
||||
hashcpy(sha1, root);
|
||||
free(tree);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -105,12 +105,12 @@ static void show_edge(struct commit *commit)
|
||||
fprintf(pack_pipe, "-%s\n", sha1_to_hex(commit->object.sha1));
|
||||
}
|
||||
|
||||
static int do_rev_list(int fd, void *create_full_pack)
|
||||
static int do_rev_list(int in, int out, void *create_full_pack)
|
||||
{
|
||||
int i;
|
||||
struct rev_info revs;
|
||||
|
||||
pack_pipe = xfdopen(fd, "w");
|
||||
pack_pipe = xfdopen(out, "w");
|
||||
init_revisions(&revs, NULL);
|
||||
revs.tag_objects = 1;
|
||||
revs.tree_objects = 1;
|
||||
@@ -162,8 +162,9 @@ static void create_pack_file(void)
|
||||
int arg = 0;
|
||||
|
||||
if (shallow_nr) {
|
||||
memset(&rev_list, 0, sizeof(rev_list));
|
||||
rev_list.proc = do_rev_list;
|
||||
rev_list.data = 0;
|
||||
rev_list.out = -1;
|
||||
if (start_async(&rev_list))
|
||||
die("git upload-pack: unable to fork git-rev-list");
|
||||
argv[arg++] = "pack-objects";
|
||||
|
||||
61
utf8.c
61
utf8.c
@@ -280,22 +280,11 @@ int is_utf8(const char *text)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void strbuf_write(struct strbuf *sb, const char *buf, int len)
|
||||
static void strbuf_addchars(struct strbuf *sb, int c, size_t n)
|
||||
{
|
||||
if (sb)
|
||||
strbuf_insert(sb, sb->len, buf, len);
|
||||
else
|
||||
fwrite(buf, len, 1, stdout);
|
||||
}
|
||||
|
||||
static void print_spaces(struct strbuf *buf, int count)
|
||||
{
|
||||
static const char s[] = " ";
|
||||
while (count >= sizeof(s)) {
|
||||
strbuf_write(buf, s, sizeof(s) - 1);
|
||||
count -= sizeof(s) - 1;
|
||||
}
|
||||
strbuf_write(buf, s, count);
|
||||
strbuf_grow(sb, n);
|
||||
memset(sb->buf + sb->len, c, n);
|
||||
strbuf_setlen(sb, sb->len + n);
|
||||
}
|
||||
|
||||
static void strbuf_add_indented_text(struct strbuf *buf, const char *text,
|
||||
@@ -307,8 +296,8 @@ static void strbuf_add_indented_text(struct strbuf *buf, const char *text,
|
||||
const char *eol = strchrnul(text, '\n');
|
||||
if (*eol == '\n')
|
||||
eol++;
|
||||
print_spaces(buf, indent);
|
||||
strbuf_write(buf, text, eol - text);
|
||||
strbuf_addchars(buf, ' ', indent);
|
||||
strbuf_add(buf, text, eol - text);
|
||||
text = eol;
|
||||
indent = indent2;
|
||||
}
|
||||
@@ -335,16 +324,21 @@ static size_t display_mode_esc_sequence_len(const char *s)
|
||||
* consumed (and no extra indent is necessary for the first line).
|
||||
*/
|
||||
int strbuf_add_wrapped_text(struct strbuf *buf,
|
||||
const char *text, int indent, int indent2, int width)
|
||||
const char *text, int indent1, int indent2, int width)
|
||||
{
|
||||
int w = indent, assume_utf8 = is_utf8(text);
|
||||
const char *bol = text, *space = NULL;
|
||||
int indent, w, assume_utf8 = 1;
|
||||
const char *bol, *space, *start = text;
|
||||
size_t orig_len = buf->len;
|
||||
|
||||
if (width <= 0) {
|
||||
strbuf_add_indented_text(buf, text, indent, indent2);
|
||||
strbuf_add_indented_text(buf, text, indent1, indent2);
|
||||
return 1;
|
||||
}
|
||||
|
||||
retry:
|
||||
bol = text;
|
||||
w = indent = indent1;
|
||||
space = NULL;
|
||||
if (indent < 0) {
|
||||
w = -indent;
|
||||
space = text;
|
||||
@@ -366,8 +360,8 @@ int strbuf_add_wrapped_text(struct strbuf *buf,
|
||||
if (space)
|
||||
start = space;
|
||||
else
|
||||
print_spaces(buf, indent);
|
||||
strbuf_write(buf, start, text - start);
|
||||
strbuf_addchars(buf, ' ', indent);
|
||||
strbuf_add(buf, start, text - start);
|
||||
if (!c)
|
||||
return w;
|
||||
space = text;
|
||||
@@ -376,40 +370,41 @@ int strbuf_add_wrapped_text(struct strbuf *buf,
|
||||
else if (c == '\n') {
|
||||
space++;
|
||||
if (*space == '\n') {
|
||||
strbuf_write(buf, "\n", 1);
|
||||
strbuf_addch(buf, '\n');
|
||||
goto new_line;
|
||||
}
|
||||
else if (!isalnum(*space))
|
||||
goto new_line;
|
||||
else
|
||||
strbuf_write(buf, " ", 1);
|
||||
strbuf_addch(buf, ' ');
|
||||
}
|
||||
w++;
|
||||
text++;
|
||||
}
|
||||
else {
|
||||
new_line:
|
||||
strbuf_write(buf, "\n", 1);
|
||||
strbuf_addch(buf, '\n');
|
||||
text = bol = space + isspace(*space);
|
||||
space = NULL;
|
||||
w = indent = indent2;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (assume_utf8)
|
||||
if (assume_utf8) {
|
||||
w += utf8_width(&text, NULL);
|
||||
else {
|
||||
if (!text) {
|
||||
assume_utf8 = 0;
|
||||
text = start;
|
||||
strbuf_setlen(buf, orig_len);
|
||||
goto retry;
|
||||
}
|
||||
} else {
|
||||
w++;
|
||||
text++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int print_wrapped_text(const char *text, int indent, int indent2, int width)
|
||||
{
|
||||
return strbuf_add_wrapped_text(NULL, text, indent, indent2, width);
|
||||
}
|
||||
|
||||
int is_encoding_utf8(const char *name)
|
||||
{
|
||||
if (!name)
|
||||
|
||||
1
utf8.h
1
utf8.h
@@ -8,7 +8,6 @@ int utf8_strwidth(const char *string);
|
||||
int is_utf8(const char *text);
|
||||
int is_encoding_utf8(const char *name);
|
||||
|
||||
int print_wrapped_text(const char *text, int indent, int indent2, int len);
|
||||
int strbuf_add_wrapped_text(struct strbuf *buf,
|
||||
const char *text, int indent, int indent2, int width);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user