_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q22000
Connection.executescript
train
async def executescript(self, sql_script: str) -> Cursor: """Helper to create a cursor and execute a user script.""" cursor = await self._execute(self._conn.executescript, sql_script) return Cursor(self, cursor)
python
{ "resource": "" }
q22001
_log_multivariate_normal_density_diag
train
def _log_multivariate_normal_density_diag(X, means, covars): """Compute Gaussian log-density at X for a diagonal model.""" n_samples, n_dim = X.shape lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1) + np.sum((means ** 2) / covars, 1) - 2 * np.dot(X, (means / covars).T) + np.dot(X ** 2, (1.0 / covars).T)) return lpr
python
{ "resource": "" }
q22002
_log_multivariate_normal_density_spherical
train
def _log_multivariate_normal_density_spherical(X, means, covars): """Compute Gaussian log-density at X for a spherical model.""" cv = covars.copy() if covars.ndim == 1: cv = cv[:, np.newaxis] if cv.shape[1] == 1: cv = np.tile(cv, (1, X.shape[-1])) return _log_multivariate_normal_density_diag(X, means, cv)
python
{ "resource": "" }
q22003
_log_multivariate_normal_density_tied
train
def _log_multivariate_normal_density_tied(X, means, covars): """Compute Gaussian log-density at X for a tied model.""" cv = np.tile(covars, (means.shape[0], 1, 1)) return _log_multivariate_normal_density_full(X, means, cv)
python
{ "resource": "" }
q22004
_log_multivariate_normal_density_full
train
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7): """Log probability for full covariance matrices.""" n_samples, n_dim = X.shape nmix = len(means) log_prob = np.empty((n_samples, nmix)) for c, (mu, cv) in enumerate(zip(means, covars)): try: cv_chol = linalg.cholesky(cv, lower=True) except linalg.LinAlgError: # The model is most probably stuck in a component with too # few observations, we need to reinitialize this components try: cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim), lower=True) except linalg.LinAlgError: raise ValueError("'covars' must be symmetric, " "positive-definite") cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol))) cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) + n_dim * np.log(2 * np.pi) + cv_log_det) return log_prob
python
{ "resource": "" }
q22005
ConvergenceMonitor.converged
train
def converged(self): """``True`` if the EM algorithm converged and ``False`` otherwise.""" # XXX we might want to check that ``logprob`` is non-decreasing. return (self.iter == self.n_iter or (len(self.history) == 2 and self.history[1] - self.history[0] < self.tol))
python
{ "resource": "" }
q22006
_BaseHMM.get_stationary_distribution
train
def get_stationary_distribution(self): """Compute the stationary distribution of states. """ # The stationary distribution is proportional to the left-eigenvector # associated with the largest eigenvalue (i.e., 1) of the transition # matrix. check_is_fitted(self, "transmat_") eigvals, eigvecs = np.linalg.eig(self.transmat_.T) eigvec = np.real_if_close(eigvecs[:, np.argmax(eigvals)]) return eigvec / eigvec.sum()
python
{ "resource": "" }
q22007
_BaseHMM.score_samples
train
def score_samples(self, X, lengths=None): """Compute the log probability under the model and compute posteriors. Parameters ---------- X : array-like, shape (n_samples, n_features) Feature matrix of individual samples. lengths : array-like of integers, shape (n_sequences, ), optional Lengths of the individual sequences in ``X``. The sum of these should be ``n_samples``. Returns ------- logprob : float Log likelihood of ``X``. posteriors : array, shape (n_samples, n_components) State-membership probabilities for each sample in ``X``. See Also -------- score : Compute the log probability under the model. decode : Find most likely state sequence corresponding to ``X``. """ check_is_fitted(self, "startprob_") self._check() X = check_array(X) n_samples = X.shape[0] logprob = 0 posteriors = np.zeros((n_samples, self.n_components)) for i, j in iter_from_X_lengths(X, lengths): framelogprob = self._compute_log_likelihood(X[i:j]) logprobij, fwdlattice = self._do_forward_pass(framelogprob) logprob += logprobij bwdlattice = self._do_backward_pass(framelogprob) posteriors[i:j] = self._compute_posteriors(fwdlattice, bwdlattice) return logprob, posteriors
python
{ "resource": "" }
q22008
_BaseHMM.predict_proba
train
def predict_proba(self, X, lengths=None): """Compute the posterior probability for each state in the model. X : array-like, shape (n_samples, n_features) Feature matrix of individual samples. lengths : array-like of integers, shape (n_sequences, ), optional Lengths of the individual sequences in ``X``. The sum of these should be ``n_samples``. Returns ------- posteriors : array, shape (n_samples, n_components) State-membership probabilities for each sample from ``X``. """ _, posteriors = self.score_samples(X, lengths) return posteriors
python
{ "resource": "" }
q22009
_BaseHMM._init
train
def _init(self, X, lengths): """Initializes model parameters prior to fitting. Parameters ---------- X : array-like, shape (n_samples, n_features) Feature matrix of individual samples. lengths : array-like of integers, shape (n_sequences, ) Lengths of the individual sequences in ``X``. The sum of these should be ``n_samples``. """ init = 1. / self.n_components if 's' in self.init_params or not hasattr(self, "startprob_"): self.startprob_ = np.full(self.n_components, init) if 't' in self.init_params or not hasattr(self, "transmat_"): self.transmat_ = np.full((self.n_components, self.n_components), init)
python
{ "resource": "" }
q22010
_BaseHMM._check
train
def _check(self): """Validates model parameters prior to fitting. Raises ------ ValueError If any of the parameters are invalid, e.g. if :attr:`startprob_` don't sum to 1. """ self.startprob_ = np.asarray(self.startprob_) if len(self.startprob_) != self.n_components: raise ValueError("startprob_ must have length n_components") if not np.allclose(self.startprob_.sum(), 1.0): raise ValueError("startprob_ must sum to 1.0 (got {:.4f})" .format(self.startprob_.sum())) self.transmat_ = np.asarray(self.transmat_) if self.transmat_.shape != (self.n_components, self.n_components): raise ValueError( "transmat_ must have shape (n_components, n_components)") if not np.allclose(self.transmat_.sum(axis=1), 1.0): raise ValueError("rows of transmat_ must sum to 1.0 (got {})" .format(self.transmat_.sum(axis=1)))
python
{ "resource": "" }
q22011
_BaseHMM._initialize_sufficient_statistics
train
def _initialize_sufficient_statistics(self): """Initializes sufficient statistics required for M-step. The method is *pure*, meaning that it doesn't change the state of the instance. For extensibility computed statistics are stored in a dictionary. Returns ------- nobs : int Number of samples in the data. start : array, shape (n_components, ) An array where the i-th element corresponds to the posterior probability of the first sample being generated by the i-th state. trans : array, shape (n_components, n_components) An array where the (i, j)-th element corresponds to the posterior probability of transitioning between the i-th to j-th states. """ stats = {'nobs': 0, 'start': np.zeros(self.n_components), 'trans': np.zeros((self.n_components, self.n_components))} return stats
python
{ "resource": "" }
q22012
_BaseHMM._accumulate_sufficient_statistics
train
def _accumulate_sufficient_statistics(self, stats, X, framelogprob, posteriors, fwdlattice, bwdlattice): """Updates sufficient statistics from a given sample. Parameters ---------- stats : dict Sufficient statistics as returned by :meth:`~base._BaseHMM._initialize_sufficient_statistics`. X : array, shape (n_samples, n_features) Sample sequence. framelogprob : array, shape (n_samples, n_components) Log-probabilities of each sample under each of the model states. posteriors : array, shape (n_samples, n_components) Posterior probabilities of each sample being generated by each of the model states. fwdlattice, bwdlattice : array, shape (n_samples, n_components) Log-forward and log-backward probabilities. """ stats['nobs'] += 1 if 's' in self.params: stats['start'] += posteriors[0] if 't' in self.params: n_samples, n_components = framelogprob.shape # when the sample is of length 1, it contains no transitions # so there is no reason to update our trans. matrix estimate if n_samples <= 1: return log_xi_sum = np.full((n_components, n_components), -np.inf) _hmmc._compute_log_xi_sum(n_samples, n_components, fwdlattice, log_mask_zero(self.transmat_), bwdlattice, framelogprob, log_xi_sum) with np.errstate(under="ignore"): stats['trans'] += np.exp(log_xi_sum)
python
{ "resource": "" }
q22013
_BaseHMM._do_mstep
train
def _do_mstep(self, stats): """Performs the M-step of EM algorithm. Parameters ---------- stats : dict Sufficient statistics updated from all available samples. """ # The ``np.where`` calls guard against updating forbidden states # or transitions in e.g. a left-right HMM. if 's' in self.params: startprob_ = self.startprob_prior - 1.0 + stats['start'] self.startprob_ = np.where(self.startprob_ == 0.0, self.startprob_, startprob_) normalize(self.startprob_) if 't' in self.params: transmat_ = self.transmat_prior - 1.0 + stats['trans'] self.transmat_ = np.where(self.transmat_ == 0.0, self.transmat_, transmat_) normalize(self.transmat_, axis=1)
python
{ "resource": "" }
q22014
_validate_covars
train
def _validate_covars(covars, covariance_type, n_components): """Do basic checks on matrix covariance sizes and values.""" from scipy import linalg if covariance_type == 'spherical': if len(covars) != n_components: raise ValueError("'spherical' covars have length n_components") elif np.any(covars <= 0): raise ValueError("'spherical' covars must be non-negative") elif covariance_type == 'tied': if covars.shape[0] != covars.shape[1]: raise ValueError("'tied' covars must have shape (n_dim, n_dim)") elif (not np.allclose(covars, covars.T) or np.any(linalg.eigvalsh(covars) <= 0)): raise ValueError("'tied' covars must be symmetric, " "positive-definite") elif covariance_type == 'diag': if len(covars.shape) != 2: raise ValueError("'diag' covars must have shape " "(n_components, n_dim)") elif np.any(covars <= 0): raise ValueError("'diag' covars must be non-negative") elif covariance_type == 'full': if len(covars.shape) != 3: raise ValueError("'full' covars must have shape " "(n_components, n_dim, n_dim)") elif covars.shape[1] != covars.shape[2]: raise ValueError("'full' covars must have shape " "(n_components, n_dim, n_dim)") for n, cv in enumerate(covars): if (not np.allclose(cv, cv.T) or np.any(linalg.eigvalsh(cv) <= 0)): raise ValueError("component %d of 'full' covars must be " "symmetric, positive-definite" % n) else: raise ValueError("covariance_type must be one of " + "'spherical', 'tied', 'diag', 'full'")
python
{ "resource": "" }
q22015
distribute_covar_matrix_to_match_covariance_type
train
def distribute_covar_matrix_to_match_covariance_type( tied_cv, covariance_type, n_components): """Create all the covariance matrices from a given template.""" if covariance_type == 'spherical': cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]), (n_components, 1)) elif covariance_type == 'tied': cv = tied_cv elif covariance_type == 'diag': cv = np.tile(np.diag(tied_cv), (n_components, 1)) elif covariance_type == 'full': cv = np.tile(tied_cv, (n_components, 1, 1)) else: raise ValueError("covariance_type must be one of " + "'spherical', 'tied', 'diag', 'full'") return cv
python
{ "resource": "" }
q22016
normalize
train
def normalize(a, axis=None): """Normalizes the input array so that it sums to 1. Parameters ---------- a : array Non-normalized input data. axis : int Dimension along which normalization is performed. Notes ----- Modifies the input **inplace**. """ a_sum = a.sum(axis) if axis and a.ndim > 1: # Make sure we don't divide by zero. a_sum[a_sum == 0] = 1 shape = list(a.shape) shape[axis] = 1 a_sum.shape = shape a /= a_sum
python
{ "resource": "" }
q22017
log_normalize
train
def log_normalize(a, axis=None): """Normalizes the input array so that the exponent of the sum is 1. Parameters ---------- a : array Non-normalized input data. axis : int Dimension along which normalization is performed. Notes ----- Modifies the input **inplace**. """ with np.errstate(under="ignore"): a_lse = logsumexp(a, axis) a -= a_lse[:, np.newaxis]
python
{ "resource": "" }
q22018
log_mask_zero
train
def log_mask_zero(a): """Computes the log of input probabilities masking divide by zero in log. Notes ----- During the M-step of EM-algorithm, very small intermediate start or transition probabilities could be normalized to zero, causing a *RuntimeWarning: divide by zero encountered in log*. This function masks this unharmful warning. """ a = np.asarray(a) with np.errstate(divide="ignore"): return np.log(a)
python
{ "resource": "" }
q22019
GaussianHMM.covars_
train
def covars_(self): """Return covars as a full matrix.""" return fill_covars(self._covars_, self.covariance_type, self.n_components, self.n_features)
python
{ "resource": "" }
q22020
MultinomialHMM._check_input_symbols
train
def _check_input_symbols(self, X): """Check if ``X`` is a sample from a Multinomial distribution. That is ``X`` should be an array of non-negative integers from range ``[min(X), max(X)]``, such that each integer from the range occurs in ``X`` at least once. For example ``[0, 0, 2, 1, 3, 1, 1]`` is a valid sample from a Multinomial distribution, while ``[0, 0, 3, 5, 10]`` is not. """ symbols = np.concatenate(X) if (len(symbols) == 1 # not enough data or not np.issubdtype(symbols.dtype, np.integer) # not an integer or (symbols < 0).any()): # not positive return False u = np.unique(symbols) return u[0] == 0 and u[-1] == len(u) - 1
python
{ "resource": "" }
q22021
Object._set_cache_
train
def _set_cache_(self, attr): """Retrieve object information""" if attr == "size": oinfo = self.repo.odb.info(self.binsha) self.size = oinfo.size # assert oinfo.type == self.type, _assertion_msg_format % (self.binsha, oinfo.type, self.type) else: super(Object, self)._set_cache_(attr)
python
{ "resource": "" }
q22022
RemoteReference.iter_items
train
def iter_items(cls, repo, common_path=None, remote=None): """Iterate remote references, and if given, constrain them to the given remote""" common_path = common_path or cls._common_path_default if remote is not None: common_path = join_path(common_path, str(remote)) # END handle remote constraint return super(RemoteReference, cls).iter_items(repo, common_path)
python
{ "resource": "" }
q22023
RemoteReference.delete
train
def delete(cls, repo, *refs, **kwargs): """Delete the given remote references :note: kwargs are given for comparability with the base class method as we should not narrow the signature.""" repo.git.branch("-d", "-r", *refs) # the official deletion method will ignore remote symbolic refs - these # are generally ignored in the refs/ folder. We don't though # and delete remainders manually for ref in refs: try: os.remove(osp.join(repo.common_dir, ref.path)) except OSError: pass try: os.remove(osp.join(repo.git_dir, ref.path)) except OSError: pass
python
{ "resource": "" }
q22024
_init_externals
train
def _init_externals(): """Initialize external projects by putting them into the path""" if __version__ == 'git': sys.path.insert(0, osp.join(osp.dirname(__file__), 'ext', 'gitdb')) try: import gitdb except ImportError: raise ImportError("'gitdb' could not be found in your PYTHONPATH")
python
{ "resource": "" }
q22025
refresh
train
def refresh(path=None): """Convenience method for setting the git executable path.""" global GIT_OK GIT_OK = False if not Git.refresh(path=path): return if not FetchInfo.refresh(): return GIT_OK = True
python
{ "resource": "" }
q22026
_git_dir
train
def _git_dir(repo, path): """ Find the git dir that's appropriate for the path""" name = "%s" % (path,) if name in ['HEAD', 'ORIG_HEAD', 'FETCH_HEAD', 'index', 'logs']: return repo.git_dir return repo.common_dir
python
{ "resource": "" }
q22027
SymbolicReference.set_commit
train
def set_commit(self, commit, logmsg=None): """As set_object, but restricts the type of object to be a Commit :raise ValueError: If commit is not a Commit object or doesn't point to a commit :return: self""" # check the type - assume the best if it is a base-string invalid_type = False if isinstance(commit, Object): invalid_type = commit.type != Commit.type elif isinstance(commit, SymbolicReference): invalid_type = commit.object.type != Commit.type else: try: invalid_type = self.repo.rev_parse(commit).type != Commit.type except (BadObject, BadName): raise ValueError("Invalid object: %s" % commit) # END handle exception # END verify type if invalid_type: raise ValueError("Need commit, got %r" % commit) # END handle raise # we leave strings to the rev-parse method below self.set_object(commit, logmsg) return self
python
{ "resource": "" }
q22028
SymbolicReference.set_object
train
def set_object(self, object, logmsg=None): # @ReservedAssignment """Set the object we point to, possibly dereference our symbolic reference first. If the reference does not exist, it will be created :param object: a refspec, a SymbolicReference or an Object instance. SymbolicReferences will be dereferenced beforehand to obtain the object they point to :param logmsg: If not None, the message will be used in the reflog entry to be written. Otherwise the reflog is not altered :note: plain SymbolicReferences may not actually point to objects by convention :return: self""" if isinstance(object, SymbolicReference): object = object.object # @ReservedAssignment # END resolve references is_detached = True try: is_detached = self.is_detached except ValueError: pass # END handle non-existing ones if is_detached: return self.set_reference(object, logmsg) # set the commit on our reference return self._get_reference().set_object(object, logmsg)
python
{ "resource": "" }
q22029
SymbolicReference.set_reference
train
def set_reference(self, ref, logmsg=None): """Set ourselves to the given ref. It will stay a symbol if the ref is a Reference. Otherwise an Object, given as Object instance or refspec, is assumed and if valid, will be set which effectively detaches the refererence if it was a purely symbolic one. :param ref: SymbolicReference instance, Object instance or refspec string Only if the ref is a SymbolicRef instance, we will point to it. Everything else is dereferenced to obtain the actual object. :param logmsg: If set to a string, the message will be used in the reflog. Otherwise, a reflog entry is not written for the changed reference. The previous commit of the entry will be the commit we point to now. See also: log_append() :return: self :note: This symbolic reference will not be dereferenced. For that, see ``set_object(...)``""" write_value = None obj = None if isinstance(ref, SymbolicReference): write_value = "ref: %s" % ref.path elif isinstance(ref, Object): obj = ref write_value = ref.hexsha elif isinstance(ref, string_types): try: obj = self.repo.rev_parse(ref + "^{}") # optionally deref tags write_value = obj.hexsha except (BadObject, BadName): raise ValueError("Could not extract object from %s" % ref) # END end try string else: raise ValueError("Unrecognized Value: %r" % ref) # END try commit attribute # typecheck if obj is not None and self._points_to_commits_only and obj.type != Commit.type: raise TypeError("Require commit, got %r" % obj) # END verify type oldbinsha = None if logmsg is not None: try: oldbinsha = self.commit.binsha except ValueError: oldbinsha = Commit.NULL_BIN_SHA # END handle non-existing # END retrieve old hexsha fpath = self.abspath assure_directory_exists(fpath, is_file=True) lfd = LockedFD(fpath) fd = lfd.open(write=True, stream=True) ok = True try: fd.write(write_value.encode('ascii') + b'\n') lfd.commit() ok = True finally: if not ok: lfd.rollback() # Adjust the reflog if logmsg is not None: self.log_append(oldbinsha, logmsg) return self
python
{ "resource": "" }
q22030
SymbolicReference.log_append
train
def log_append(self, oldbinsha, message, newbinsha=None): """Append a logentry to the logfile of this ref :param oldbinsha: binary sha this ref used to point to :param message: A message describing the change :param newbinsha: The sha the ref points to now. If None, our current commit sha will be used :return: added RefLogEntry instance""" # NOTE: we use the committer of the currently active commit - this should be # correct to allow overriding the committer on a per-commit level. # See https://github.com/gitpython-developers/GitPython/pull/146 try: committer_or_reader = self.commit.committer except ValueError: committer_or_reader = self.repo.config_reader() # end handle newly cloned repositories return RefLog.append_entry(committer_or_reader, RefLog.path(self), oldbinsha, (newbinsha is None and self.commit.binsha) or newbinsha, message)
python
{ "resource": "" }
q22031
SymbolicReference.delete
train
def delete(cls, repo, path): """Delete the reference at the given path :param repo: Repository to delete the reference from :param path: Short or full path pointing to the reference, i.e. refs/myreference or just "myreference", hence 'refs/' is implied. Alternatively the symbolic reference to be deleted""" full_ref_path = cls.to_full_path(path) abs_path = osp.join(repo.common_dir, full_ref_path) if osp.exists(abs_path): os.remove(abs_path) else: # check packed refs pack_file_path = cls._get_packed_refs_path(repo) try: with open(pack_file_path, 'rb') as reader: new_lines = [] made_change = False dropped_last_line = False for line in reader: # keep line if it is a comment or if the ref to delete is not # in the line # If we deleted the last line and this one is a tag-reference object, # we drop it as well line = line.decode(defenc) if (line.startswith('#') or full_ref_path not in line) and \ (not dropped_last_line or dropped_last_line and not line.startswith('^')): new_lines.append(line) dropped_last_line = False continue # END skip comments and lines without our path # drop this line made_change = True dropped_last_line = True # write the new lines if made_change: # write-binary is required, otherwise windows will # open the file in text mode and change LF to CRLF ! with open(pack_file_path, 'wb') as fd: fd.writelines(l.encode(defenc) for l in new_lines) except (OSError, IOError): pass # it didn't exist at all # delete the reflog reflog_path = RefLog.path(cls(repo, full_ref_path)) if osp.isfile(reflog_path): os.remove(reflog_path)
python
{ "resource": "" }
q22032
SymbolicReference._create
train
def _create(cls, repo, path, resolve, reference, force, logmsg=None): """internal method used to create a new symbolic reference. If resolve is False, the reference will be taken as is, creating a proper symbolic reference. Otherwise it will be resolved to the corresponding object and a detached symbolic reference will be created instead""" git_dir = _git_dir(repo, path) full_ref_path = cls.to_full_path(path) abs_ref_path = osp.join(git_dir, full_ref_path) # figure out target data target = reference if resolve: target = repo.rev_parse(str(reference)) if not force and osp.isfile(abs_ref_path): target_data = str(target) if isinstance(target, SymbolicReference): target_data = target.path if not resolve: target_data = "ref: " + target_data with open(abs_ref_path, 'rb') as fd: existing_data = fd.read().decode(defenc).strip() if existing_data != target_data: raise OSError("Reference at %r does already exist, pointing to %r, requested was %r" % (full_ref_path, existing_data, target_data)) # END no force handling ref = cls(repo, full_ref_path) ref.set_reference(target, logmsg) return ref
python
{ "resource": "" }
q22033
SymbolicReference.create
train
def create(cls, repo, path, reference='HEAD', force=False, logmsg=None): """Create a new symbolic reference, hence a reference pointing to another reference. :param repo: Repository to create the reference in :param path: full path at which the new symbolic reference is supposed to be created at, i.e. "NEW_HEAD" or "symrefs/my_new_symref" :param reference: The reference to which the new symbolic reference should point to. If it is a commit'ish, the symbolic ref will be detached. :param force: if True, force creation even if a symbolic reference with that name already exists. Raise OSError otherwise :param logmsg: If not None, the message to append to the reflog. Otherwise no reflog entry is written. :return: Newly created symbolic Reference :raise OSError: If a (Symbolic)Reference with the same name but different contents already exists. :note: This does not alter the current HEAD, index or Working Tree""" return cls._create(repo, path, cls._resolve_ref_on_create, reference, force, logmsg)
python
{ "resource": "" }
q22034
SymbolicReference.iter_items
train
def iter_items(cls, repo, common_path=None): """Find all refs in the repository :param repo: is the Repo :param common_path: Optional keyword argument to the path which is to be shared by all returned Ref objects. Defaults to class specific portion if None assuring that only refs suitable for the actual class are returned. :return: git.SymbolicReference[], each of them is guaranteed to be a symbolic ref which is not detached and pointing to a valid ref List is lexicographically sorted The returned objects represent actual subclasses, such as Head or TagReference""" return (r for r in cls._iter_items(repo, common_path) if r.__class__ == SymbolicReference or not r.is_detached)
python
{ "resource": "" }
q22035
HEAD.reset
train
def reset(self, commit='HEAD', index=True, working_tree=False, paths=None, **kwargs): """Reset our HEAD to the given commit optionally synchronizing the index and working tree. The reference we refer to will be set to commit as well. :param commit: Commit object, Reference Object or string identifying a revision we should reset HEAD to. :param index: If True, the index will be set to match the given commit. Otherwise it will not be touched. :param working_tree: If True, the working tree will be forcefully adjusted to match the given commit, possibly overwriting uncommitted changes without warning. If working_tree is True, index must be true as well :param paths: Single path or list of paths relative to the git root directory that are to be reset. This allows to partially reset individual files. :param kwargs: Additional arguments passed to git-reset. :return: self""" mode = "--soft" if index: mode = "--mixed" # it appears, some git-versions declare mixed and paths deprecated # see http://github.com/Byron/GitPython/issues#issue/2 if paths: mode = None # END special case # END handle index if working_tree: mode = "--hard" if not index: raise ValueError("Cannot reset the working tree if the index is not reset as well") # END working tree handling try: self.repo.git.reset(mode, commit, '--', paths, **kwargs) except GitCommandError as e: # git nowadays may use 1 as status to indicate there are still unstaged # modifications after the reset if e.status != 1: raise # END handle exception return self
python
{ "resource": "" }
q22036
Head.delete
train
def delete(cls, repo, *heads, **kwargs): """Delete the given heads :param force: If True, the heads will be deleted even if they are not yet merged into the main development stream. Default False""" force = kwargs.get("force", False) flag = "-d" if force: flag = "-D" repo.git.branch(flag, *heads)
python
{ "resource": "" }
q22037
Head.set_tracking_branch
train
def set_tracking_branch(self, remote_reference): """ Configure this branch to track the given remote reference. This will alter this branch's configuration accordingly. :param remote_reference: The remote reference to track or None to untrack any references :return: self""" from .remote import RemoteReference if remote_reference is not None and not isinstance(remote_reference, RemoteReference): raise ValueError("Incorrect parameter type: %r" % remote_reference) # END handle type with self.config_writer() as writer: if remote_reference is None: writer.remove_option(self.k_config_remote) writer.remove_option(self.k_config_remote_ref) if len(writer.options()) == 0: writer.remove_section() else: writer.set_value(self.k_config_remote, remote_reference.remote_name) writer.set_value(self.k_config_remote_ref, Head.to_full_path(remote_reference.remote_head)) return self
python
{ "resource": "" }
q22038
Head.checkout
train
def checkout(self, force=False, **kwargs): """Checkout this head by setting the HEAD to this reference, by updating the index to reflect the tree we point to and by updating the working tree to reflect the latest index. The command will fail if changed working tree files would be overwritten. :param force: If True, changes to the index and the working tree will be discarded. If False, GitCommandError will be raised in that situation. :param kwargs: Additional keyword arguments to be passed to git checkout, i.e. b='new_branch' to create a new branch at the given spot. :return: The active branch after the checkout operation, usually self unless a new branch has been created. If there is no active branch, as the HEAD is now detached, the HEAD reference will be returned instead. :note: By default it is only allowed to checkout heads - everything else will leave the HEAD detached which is allowed and possible, but remains a special state that some tools might not be able to handle.""" kwargs['f'] = force if kwargs['f'] is False: kwargs.pop('f') self.repo.git.checkout(self, **kwargs) if self.repo.head.is_detached: return self.repo.head else: return self.repo.active_branch
python
{ "resource": "" }
q22039
IndexFile._deserialize
train
def _deserialize(self, stream): """Initialize this instance with index values read from the given stream""" self.version, self.entries, self._extension_data, conten_sha = read_cache(stream) # @UnusedVariable return self
python
{ "resource": "" }
q22040
IndexFile.write
train
def write(self, file_path=None, ignore_extension_data=False): """Write the current state to our file path or to the given one :param file_path: If None, we will write to our stored file path from which we have been initialized. Otherwise we write to the given file path. Please note that this will change the file_path of this index to the one you gave. :param ignore_extension_data: If True, the TREE type extension data read in the index will not be written to disk. NOTE that no extension data is actually written. Use this if you have altered the index and would like to use git-write-tree afterwards to create a tree representing your written changes. If this data is present in the written index, git-write-tree will instead write the stored/cached tree. Alternatively, use IndexFile.write_tree() to handle this case automatically :return: self""" # make sure we have our entries read before getting a write lock # else it would be done when streaming. This can happen # if one doesn't change the index, but writes it right away self.entries lfd = LockedFD(file_path or self._file_path) stream = lfd.open(write=True, stream=True) ok = False try: self._serialize(stream, ignore_extension_data) ok = True finally: if not ok: lfd.rollback() lfd.commit() # make sure we represent what we have written if file_path is not None: self._file_path = file_path
python
{ "resource": "" }
q22041
IndexFile.merge_tree
train
def merge_tree(self, rhs, base=None): """Merge the given rhs treeish into the current index, possibly taking a common base treeish into account. As opposed to the from_tree_ method, this allows you to use an already existing tree as the left side of the merge :param rhs: treeish reference pointing to the 'other' side of the merge. :param base: optional treeish reference pointing to the common base of 'rhs' and this index which equals lhs :return: self ( containing the merge and possibly unmerged entries in case of conflicts ) :raise GitCommandError: If there is a merge conflict. The error will be raised at the first conflicting path. If you want to have proper merge resolution to be done by yourself, you have to commit the changed index ( or make a valid tree from it ) and retry with a three-way index.from_tree call. """ # -i : ignore working tree status # --aggressive : handle more merge cases # -m : do an actual merge args = ["--aggressive", "-i", "-m"] if base is not None: args.append(base) args.append(rhs) self.repo.git.read_tree(args) return self
python
{ "resource": "" }
q22042
IndexFile.from_tree
train
def from_tree(cls, repo, *treeish, **kwargs): """Merge the given treeish revisions into a new index which is returned. The original index will remain unaltered :param repo: The repository treeish are located in. :param treeish: One, two or three Tree Objects, Commits or 40 byte hexshas. The result changes according to the amount of trees. If 1 Tree is given, it will just be read into a new index If 2 Trees are given, they will be merged into a new index using a two way merge algorithm. Tree 1 is the 'current' tree, tree 2 is the 'other' one. It behaves like a fast-forward. If 3 Trees are given, a 3-way merge will be performed with the first tree being the common ancestor of tree 2 and tree 3. Tree 2 is the 'current' tree, tree 3 is the 'other' one :param kwargs: Additional arguments passed to git-read-tree :return: New IndexFile instance. It will point to a temporary index location which does not exist anymore. If you intend to write such a merged Index, supply an alternate file_path to its 'write' method. :note: In the three-way merge case, --aggressive will be specified to automatically resolve more cases in a commonly correct manner. Specify trivial=True as kwarg to override that. As the underlying git-read-tree command takes into account the current index, it will be temporarily moved out of the way to assure there are no unsuspected interferences.""" if len(treeish) == 0 or len(treeish) > 3: raise ValueError("Please specify between 1 and 3 treeish, got %i" % len(treeish)) arg_list = [] # ignore that working tree and index possibly are out of date if len(treeish) > 1: # drop unmerged entries when reading our index and merging arg_list.append("--reset") # handle non-trivial cases the way a real merge does arg_list.append("--aggressive") # END merge handling # tmp file created in git home directory to be sure renaming # works - /tmp/ dirs could be on another device tmp_index = tempfile.mktemp('', '', repo.git_dir) arg_list.append("--index-output=%s" % tmp_index) arg_list.extend(treeish) # move current index out of the way - otherwise the merge may fail # as it considers existing entries. moving it essentially clears the index. # Unfortunately there is no 'soft' way to do it. # The TemporaryFileSwap assure the original file get put back index_handler = TemporaryFileSwap(join_path_native(repo.git_dir, 'index')) try: repo.git.read_tree(*arg_list, **kwargs) index = cls(repo, tmp_index) index.entries # force it to read the file as we will delete the temp-file del(index_handler) # release as soon as possible finally: if osp.exists(tmp_index): os.remove(tmp_index) # END index merge handling return index
python
{ "resource": "" }
q22043
IndexFile._iter_expand_paths
train
def _iter_expand_paths(self, paths): """Expand the directories in list of paths to the corresponding paths accordingly, Note: git will add items multiple times even if a glob overlapped with manually specified paths or if paths where specified multiple times - we respect that and do not prune""" def raise_exc(e): raise e r = self.repo.working_tree_dir rs = r + os.sep for path in paths: abs_path = path if not osp.isabs(abs_path): abs_path = osp.join(r, path) # END make absolute path try: st = os.lstat(abs_path) # handles non-symlinks as well except OSError: # the lstat call may fail as the path may contain globs as well pass else: if S_ISLNK(st.st_mode): yield abs_path.replace(rs, '') continue # end check symlink # resolve globs if possible if '?' in path or '*' in path or '[' in path: resolved_paths = glob.glob(abs_path) # not abs_path in resolved_paths: # a glob() resolving to the same path we are feeding it with # is a glob() that failed to resolve. If we continued calling # ourselves we'd endlessly recurse. If the condition below # evaluates to true then we are likely dealing with a file # whose name contains wildcard characters. if abs_path not in resolved_paths: for f in self._iter_expand_paths(glob.glob(abs_path)): yield f.replace(rs, '') continue # END glob handling try: for root, dirs, files in os.walk(abs_path, onerror=raise_exc): # @UnusedVariable for rela_file in files: # add relative paths only yield osp.join(root.replace(rs, ''), rela_file) # END for each file in subdir # END for each subdirectory except OSError: # was a file or something that could not be iterated yield path.replace(rs, '')
python
{ "resource": "" }
q22044
IndexFile._write_path_to_stdin
train
def _write_path_to_stdin(self, proc, filepath, item, fmakeexc, fprogress, read_from_stdout=True): """Write path to proc.stdin and make sure it processes the item, including progress. :return: stdout string :param read_from_stdout: if True, proc.stdout will be read after the item was sent to stdin. In that case, it will return None :note: There is a bug in git-update-index that prevents it from sending reports just in time. This is why we have a version that tries to read stdout and one which doesn't. In fact, the stdout is not important as the piped-in files are processed anyway and just in time :note: Newlines are essential here, gits behaviour is somewhat inconsistent on this depending on the version, hence we try our best to deal with newlines carefully. Usually the last newline will not be sent, instead we will close stdin to break the pipe.""" fprogress(filepath, False, item) rval = None try: proc.stdin.write(("%s\n" % filepath).encode(defenc)) except IOError: # pipe broke, usually because some error happened raise fmakeexc() # END write exception handling proc.stdin.flush() if read_from_stdout: rval = proc.stdout.readline().strip() fprogress(filepath, True, item) return rval
python
{ "resource": "" }
q22045
IndexFile.resolve_blobs
train
def resolve_blobs(self, iter_blobs): """Resolve the blobs given in blob iterator. This will effectively remove the index entries of the respective path at all non-null stages and add the given blob as new stage null blob. For each path there may only be one blob, otherwise a ValueError will be raised claiming the path is already at stage 0. :raise ValueError: if one of the blobs already existed at stage 0 :return: self :note: You will have to write the index manually once you are done, i.e. index.resolve_blobs(blobs).write() """ for blob in iter_blobs: stage_null_key = (blob.path, 0) if stage_null_key in self.entries: raise ValueError("Path %r already exists at stage 0" % blob.path) # END assert blob is not stage 0 already # delete all possible stages for stage in (1, 2, 3): try: del(self.entries[(blob.path, stage)]) except KeyError: pass # END ignore key errors # END for each possible stage self.entries[stage_null_key] = IndexEntry.from_blob(blob) # END for each blob return self
python
{ "resource": "" }
q22046
IndexFile.write_tree
train
def write_tree(self): """Writes this index to a corresponding Tree object into the repository's object database and return it. :return: Tree object representing this index :note: The tree will be written even if one or more objects the tree refers to does not yet exist in the object database. This could happen if you added Entries to the index directly. :raise ValueError: if there are no entries in the cache :raise UnmergedEntriesError: """ # we obtain no lock as we just flush our contents to disk as tree # If we are a new index, the entries access will load our data accordingly mdb = MemoryDB() entries = self._entries_sorted() binsha, tree_items = write_tree_from_cache(entries, mdb, slice(0, len(entries))) # copy changed trees only mdb.stream_copy(mdb.sha_iter(), self.repo.odb) # note: additional deserialization could be saved if write_tree_from_cache # would return sorted tree entries root_tree = Tree(self.repo, binsha, path='') root_tree._cache = tree_items return root_tree
python
{ "resource": "" }
q22047
IndexFile._preprocess_add_items
train
def _preprocess_add_items(self, items): """ Split the items into two lists of path strings and BaseEntries. """ paths = [] entries = [] for item in items: if isinstance(item, string_types): paths.append(self._to_relative_path(item)) elif isinstance(item, (Blob, Submodule)): entries.append(BaseIndexEntry.from_blob(item)) elif isinstance(item, BaseIndexEntry): entries.append(item) else: raise TypeError("Invalid Type: %r" % item) # END for each item return (paths, entries)
python
{ "resource": "" }
q22048
IndexFile._store_path
train
def _store_path(self, filepath, fprogress): """Store file at filepath in the database and return the base index entry Needs the git_working_dir decorator active ! This must be assured in the calling code""" st = os.lstat(filepath) # handles non-symlinks as well if S_ISLNK(st.st_mode): # in PY3, readlink is string, but we need bytes. In PY2, it's just OS encoded bytes, we assume UTF-8 open_stream = lambda: BytesIO(force_bytes(os.readlink(filepath), encoding=defenc)) else: open_stream = lambda: open(filepath, 'rb') with open_stream() as stream: fprogress(filepath, False, filepath) istream = self.repo.odb.store(IStream(Blob.type, st.st_size, stream)) fprogress(filepath, True, filepath) return BaseIndexEntry((stat_mode_to_index_mode(st.st_mode), istream.binsha, 0, to_native_path_linux(filepath)))
python
{ "resource": "" }
q22049
IndexFile.add
train
def add(self, items, force=True, fprogress=lambda *args: None, path_rewriter=None, write=True, write_extension_data=False): """Add files from the working tree, specific blobs or BaseIndexEntries to the index. :param items: Multiple types of items are supported, types can be mixed within one call. Different types imply a different handling. File paths may generally be relative or absolute. - path string strings denote a relative or absolute path into the repository pointing to an existing file, i.e. CHANGES, lib/myfile.ext, '/home/gitrepo/lib/myfile.ext'. Absolute paths must start with working tree directory of this index's repository to be considered valid. For example, if it was initialized with a non-normalized path, like `/root/repo/../repo`, absolute paths to be added must start with `/root/repo/../repo`. Paths provided like this must exist. When added, they will be written into the object database. PathStrings may contain globs, such as 'lib/__init__*' or can be directories like 'lib', the latter ones will add all the files within the dirctory and subdirectories. This equals a straight git-add. They are added at stage 0 - Blob or Submodule object Blobs are added as they are assuming a valid mode is set. The file they refer to may or may not exist in the file system, but must be a path relative to our repository. If their sha is null ( 40*0 ), their path must exist in the file system relative to the git repository as an object will be created from the data at the path. The handling now very much equals the way string paths are processed, except that the mode you have set will be kept. This allows you to create symlinks by settings the mode respectively and writing the target of the symlink directly into the file. This equals a default Linux-Symlink which is not dereferenced automatically, except that it can be created on filesystems not supporting it as well. Please note that globs or directories are not allowed in Blob objects. They are added at stage 0 - BaseIndexEntry or type Handling equals the one of Blob objects, but the stage may be explicitly set. Please note that Index Entries require binary sha's. :param force: **CURRENTLY INEFFECTIVE** If True, otherwise ignored or excluded files will be added anyway. As opposed to the git-add command, we enable this flag by default as the API user usually wants the item to be added even though they might be excluded. :param fprogress: Function with signature f(path, done=False, item=item) called for each path to be added, one time once it is about to be added where done==False and once after it was added where done=True. item is set to the actual item we handle, either a Path or a BaseIndexEntry Please note that the processed path is not guaranteed to be present in the index already as the index is currently being processed. :param path_rewriter: Function with signature (string) func(BaseIndexEntry) function returning a path for each passed entry which is the path to be actually recorded for the object created from entry.path. This allows you to write an index which is not identical to the layout of the actual files on your hard-disk. If not None and ``items`` contain plain paths, these paths will be converted to Entries beforehand and passed to the path_rewriter. Please note that entry.path is relative to the git repository. :param write: If True, the index will be written once it was altered. Otherwise the changes only exist in memory and are not available to git commands. :param write_extension_data: If True, extension data will be written back to the index. This can lead to issues in case it is containing the 'TREE' extension, which will cause the `git commit` command to write an old tree, instead of a new one representing the now changed index. This doesn't matter if you use `IndexFile.commit()`, which ignores the `TREE` extension altogether. You should set it to True if you intend to use `IndexFile.commit()` exclusively while maintaining support for third-party extensions. Besides that, you can usually safely ignore the built-in extensions when using GitPython on repositories that are not handled manually at all. All current built-in extensions are listed here: http://opensource.apple.com/source/Git/Git-26/src/git-htmldocs/technical/index-format.txt :return: List(BaseIndexEntries) representing the entries just actually added. :raise OSError: if a supplied Path did not exist. Please note that BaseIndexEntry Objects that do not have a null sha will be added even if their paths do not exist. """ # sort the entries into strings and Entries, Blobs are converted to entries # automatically # paths can be git-added, for everything else we use git-update-index paths, entries = self._preprocess_add_items(items) entries_added = [] # This code needs a working tree, therefore we try not to run it unless required. # That way, we are OK on a bare repository as well. # If there are no paths, the rewriter has nothing to do either if paths: entries_added.extend(self._entries_for_paths(paths, path_rewriter, fprogress, entries)) # HANDLE ENTRIES if entries: null_mode_entries = [e for e in entries if e.mode == 0] if null_mode_entries: raise ValueError( "At least one Entry has a null-mode - please use index.remove to remove files for clarity") # END null mode should be remove # HANLDE ENTRY OBJECT CREATION # create objects if required, otherwise go with the existing shas null_entries_indices = [i for i, e in enumerate(entries) if e.binsha == Object.NULL_BIN_SHA] if null_entries_indices: @git_working_dir def handle_null_entries(self): for ei in null_entries_indices: null_entry = entries[ei] new_entry = self._store_path(null_entry.path, fprogress) # update null entry entries[ei] = BaseIndexEntry( (null_entry.mode, new_entry.binsha, null_entry.stage, null_entry.path)) # END for each entry index # end closure handle_null_entries(self) # END null_entry handling # REWRITE PATHS # If we have to rewrite the entries, do so now, after we have generated # all object sha's if path_rewriter: for i, e in enumerate(entries): entries[i] = BaseIndexEntry((e.mode, e.binsha, e.stage, path_rewriter(e))) # END for each entry # END handle path rewriting # just go through the remaining entries and provide progress info for i, entry in enumerate(entries): progress_sent = i in null_entries_indices if not progress_sent: fprogress(entry.path, False, entry) fprogress(entry.path, True, entry) # END handle progress # END for each enty entries_added.extend(entries) # END if there are base entries # FINALIZE # add the new entries to this instance for entry in entries_added: self.entries[(entry.path, 0)] = IndexEntry.from_base(entry) if write: self.write(ignore_extension_data=not write_extension_data) # END handle write return entries_added
python
{ "resource": "" }
q22050
IndexFile._items_to_rela_paths
train
def _items_to_rela_paths(self, items): """Returns a list of repo-relative paths from the given items which may be absolute or relative paths, entries or blobs""" paths = [] for item in items: if isinstance(item, (BaseIndexEntry, (Blob, Submodule))): paths.append(self._to_relative_path(item.path)) elif isinstance(item, string_types): paths.append(self._to_relative_path(item)) else: raise TypeError("Invalid item type: %r" % item) # END for each item return paths
python
{ "resource": "" }
q22051
IndexFile.remove
train
def remove(self, items, working_tree=False, **kwargs): """Remove the given items from the index and optionally from the working tree as well. :param items: Multiple types of items are supported which may be be freely mixed. - path string Remove the given path at all stages. If it is a directory, you must specify the r=True keyword argument to remove all file entries below it. If absolute paths are given, they will be converted to a path relative to the git repository directory containing the working tree The path string may include globs, such as *.c. - Blob Object Only the path portion is used in this case. - BaseIndexEntry or compatible type The only relevant information here Yis the path. The stage is ignored. :param working_tree: If True, the entry will also be removed from the working tree, physically removing the respective file. This may fail if there are uncommitted changes in it. :param kwargs: Additional keyword arguments to be passed to git-rm, such as 'r' to allow recursive removal of :return: List(path_string, ...) list of repository relative paths that have been removed effectively. This is interesting to know in case you have provided a directory or globs. Paths are relative to the repository. """ args = [] if not working_tree: args.append("--cached") args.append("--") # preprocess paths paths = self._items_to_rela_paths(items) removed_paths = self.repo.git.rm(args, paths, **kwargs).splitlines() # process output to gain proper paths # rm 'path' return [p[4:-1] for p in removed_paths]
python
{ "resource": "" }
q22052
IndexFile.reset
train
def reset(self, commit='HEAD', working_tree=False, paths=None, head=False, **kwargs): """Reset the index to reflect the tree at the given commit. This will not adjust our HEAD reference as opposed to HEAD.reset by default. :param commit: Revision, Reference or Commit specifying the commit we should represent. If you want to specify a tree only, use IndexFile.from_tree and overwrite the default index. :param working_tree: If True, the files in the working tree will reflect the changed index. If False, the working tree will not be touched Please note that changes to the working copy will be discarded without warning ! :param head: If True, the head will be set to the given commit. This is False by default, but if True, this method behaves like HEAD.reset. :param paths: if given as an iterable of absolute or repository-relative paths, only these will be reset to their state at the given commit'ish. The paths need to exist at the commit, otherwise an exception will be raised. :param kwargs: Additional keyword arguments passed to git-reset .. note:: IndexFile.reset, as opposed to HEAD.reset, will not delete anyfiles in order to maintain a consistent working tree. Instead, it will just checkout the files according to their state in the index. If you want git-reset like behaviour, use *HEAD.reset* instead. :return: self """ # what we actually want to do is to merge the tree into our existing # index, which is what git-read-tree does new_inst = type(self).from_tree(self.repo, commit) if not paths: self.entries = new_inst.entries else: nie = new_inst.entries for path in paths: path = self._to_relative_path(path) try: key = entry_key(path, 0) self.entries[key] = nie[key] except KeyError: # if key is not in theirs, it musn't be in ours try: del(self.entries[key]) except KeyError: pass # END handle deletion keyerror # END handle keyerror # END for each path # END handle paths self.write() if working_tree: self.checkout(paths=paths, force=True) # END handle working tree if head: self.repo.head.set_commit(self.repo.commit(commit), logmsg="%s: Updating HEAD" % commit) # END handle head change return self
python
{ "resource": "" }
q22053
IndexFile.diff
train
def diff(self, other=diff.Diffable.Index, paths=None, create_patch=False, **kwargs): """Diff this index against the working copy or a Tree or Commit object For a documentation of the parameters and return values, see Diffable.diff :note: Will only work with indices that represent the default git index as they have not been initialized with a stream. """ # index against index is always empty if other is self.Index: return diff.DiffIndex() # index against anything but None is a reverse diff with the respective # item. Handle existing -R flags properly. Transform strings to the object # so that we can call diff on it if isinstance(other, string_types): other = self.repo.rev_parse(other) # END object conversion if isinstance(other, Object): # invert the existing R flag cur_val = kwargs.get('R', False) kwargs['R'] = not cur_val return other.diff(self.Index, paths, create_patch, **kwargs) # END diff against other item handling # if other is not None here, something is wrong if other is not None: raise ValueError("other must be None, Diffable.Index, a Tree or Commit, was %r" % other) # diff against working copy - can be handled by superclass natively return super(IndexFile, self).diff(other, paths, create_patch, **kwargs)
python
{ "resource": "" }
q22054
TagReference.create
train
def create(cls, repo, path, ref='HEAD', message=None, force=False, **kwargs): """Create a new tag reference. :param path: The name of the tag, i.e. 1.0 or releases/1.0. The prefix refs/tags is implied :param ref: A reference to the object you want to tag. It can be a commit, tree or blob. :param message: If not None, the message will be used in your tag object. This will also create an additional tag object that allows to obtain that information, i.e.:: tagref.tag.message :param force: If True, to force creation of a tag even though that tag already exists. :param kwargs: Additional keyword arguments to be passed to git-tag :return: A new TagReference""" args = (path, ref) if message: kwargs['m'] = message if force: kwargs['f'] = True repo.git.tag(*args, **kwargs) return TagReference(repo, "%s/%s" % (cls._common_path_default, path))
python
{ "resource": "" }
q22055
_find_by_name
train
def _find_by_name(tree_data, name, is_dir, start_at): """return data entry matching the given name and tree mode or None. Before the item is returned, the respective data item is set None in the tree_data list to mark it done""" try: item = tree_data[start_at] if item and item[2] == name and S_ISDIR(item[1]) == is_dir: tree_data[start_at] = None return item except IndexError: pass # END exception handling for index, item in enumerate(tree_data): if item and item[2] == name and S_ISDIR(item[1]) == is_dir: tree_data[index] = None return item # END if item matches # END for each item return None
python
{ "resource": "" }
q22056
set_dirty_and_flush_changes
train
def set_dirty_and_flush_changes(non_const_func): """Return method that checks whether given non constant function may be called. If so, the instance will be set dirty. Additionally, we flush the changes right to disk""" def flush_changes(self, *args, **kwargs): rval = non_const_func(self, *args, **kwargs) self._dirty = True self.write() return rval # END wrapper method flush_changes.__name__ = non_const_func.__name__ return flush_changes
python
{ "resource": "" }
q22057
SectionConstraint._call_config
train
def _call_config(self, method, *args, **kwargs): """Call the configuration at the given method which must take a section name as first argument""" return getattr(self._config, method)(self._section_name, *args, **kwargs)
python
{ "resource": "" }
q22058
GitConfigParser._read
train
def _read(self, fp, fpname): """A direct copy of the py2.4 version of the super class's _read method to assure it uses ordered dicts. Had to change one line to make it work. Future versions have this fixed, but in fact its quite embarrassing for the guys not to have done it right in the first place ! Removed big comments to make it more compact. Made sure it ignores initial whitespace as git uses tabs""" cursect = None # None, or a dictionary optname = None lineno = 0 is_multi_line = False e = None # None, or an exception def string_decode(v): if v[-1] == '\\': v = v[:-1] # end cut trailing escapes to prevent decode error if PY3: return v.encode(defenc).decode('unicode_escape') else: return v.decode('string_escape') # end # end while True: # we assume to read binary ! line = fp.readline().decode(defenc) if not line: break lineno = lineno + 1 # comment or blank line? if line.strip() == '' or self.re_comment.match(line): continue if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR": # no leading whitespace continue # is it a section header? mo = self.SECTCRE.match(line.strip()) if not is_multi_line and mo: sectname = mo.group('header').strip() if sectname in self._sections: cursect = self._sections[sectname] elif sectname == cp.DEFAULTSECT: cursect = self._defaults else: cursect = self._dict((('__name__', sectname),)) self._sections[sectname] = cursect self._proxies[sectname] = None # So sections can't start with a continuation line optname = None # no section header in the file? elif cursect is None: raise cp.MissingSectionHeaderError(fpname, lineno, line) # an option line? elif not is_multi_line: mo = self.OPTCRE.match(line) if mo: # We might just have handled the last line, which could contain a quotation we want to remove optname, vi, optval = mo.group('option', 'vi', 'value') if vi in ('=', ':') and ';' in optval and not optval.strip().startswith('"'): pos = optval.find(';') if pos != -1 and optval[pos - 1].isspace(): optval = optval[:pos] optval = optval.strip() if optval == '""': optval = '' # end handle empty string optname = self.optionxform(optname.rstrip()) if len(optval) > 1 and optval[0] == '"' and optval[-1] != '"': is_multi_line = True optval = string_decode(optval[1:]) # end handle multi-line cursect[optname] = optval else: # check if it's an option with no value - it's just ignored by git if not self.OPTVALUEONLY.match(line): if not e: e = cp.ParsingError(fpname) e.append(lineno, repr(line)) continue else: line = line.rstrip() if line.endswith('"'): is_multi_line = False line = line[:-1] # end handle quotations cursect[optname] += string_decode(line) # END parse section or option # END while reading # if any parsing errors occurred, raise an exception if e: raise e
python
{ "resource": "" }
q22059
GitConfigParser.read
train
def read(self): """Reads the data stored in the files we have been initialized with. It will ignore files that cannot be read, possibly leaving an empty configuration :return: Nothing :raise IOError: if a file cannot be handled""" if self._is_initialized: return self._is_initialized = True if not isinstance(self._file_or_files, (tuple, list)): files_to_read = [self._file_or_files] else: files_to_read = list(self._file_or_files) # end assure we have a copy of the paths to handle seen = set(files_to_read) num_read_include_files = 0 while files_to_read: file_path = files_to_read.pop(0) fp = file_path file_ok = False if hasattr(fp, "seek"): self._read(fp, fp.name) else: # assume a path if it is not a file-object try: with open(file_path, 'rb') as fp: file_ok = True self._read(fp, fp.name) except IOError: continue # Read includes and append those that we didn't handle yet # We expect all paths to be normalized and absolute (and will assure that is the case) if self._has_includes(): for _, include_path in self.items('include'): if include_path.startswith('~'): include_path = osp.expanduser(include_path) if not osp.isabs(include_path): if not file_ok: continue # end ignore relative paths if we don't know the configuration file path assert osp.isabs(file_path), "Need absolute paths to be sure our cycle checks will work" include_path = osp.join(osp.dirname(file_path), include_path) # end make include path absolute include_path = osp.normpath(include_path) if include_path in seen or not os.access(include_path, os.R_OK): continue seen.add(include_path) # insert included file to the top to be considered first files_to_read.insert(0, include_path) num_read_include_files += 1 # each include path in configuration file # end handle includes # END for each file object to read # If there was no file included, we can safely write back (potentially) the configuration file # without altering it's meaning if num_read_include_files == 0: self._merge_includes = False
python
{ "resource": "" }
q22060
GitConfigParser._write
train
def _write(self, fp): """Write an .ini-format representation of the configuration state in git compatible format""" def write_section(name, section_dict): fp.write(("[%s]\n" % name).encode(defenc)) for (key, value) in section_dict.items(): if key != "__name__": fp.write(("\t%s = %s\n" % (key, self._value_to_string(value).replace('\n', '\n\t'))).encode(defenc)) # END if key is not __name__ # END section writing if self._defaults: write_section(cp.DEFAULTSECT, self._defaults) for name, value in self._sections.items(): write_section(name, value)
python
{ "resource": "" }
q22061
GitConfigParser.write
train
def write(self): """Write changes to our file, if there are changes at all :raise IOError: if this is a read-only writer instance or if we could not obtain a file lock""" self._assure_writable("write") if not self._dirty: return if isinstance(self._file_or_files, (list, tuple)): raise AssertionError("Cannot write back if there is not exactly a single file to write to, have %i files" % len(self._file_or_files)) # end assert multiple files if self._has_includes(): log.debug("Skipping write-back of configuration file as include files were merged in." + "Set merge_includes=False to prevent this.") return # end fp = self._file_or_files # we have a physical file on disk, so get a lock is_file_lock = isinstance(fp, string_types + (FileType, )) if is_file_lock: self._lock._obtain_lock() if not hasattr(fp, "seek"): with open(self._file_or_files, "wb") as fp: self._write(fp) else: fp.seek(0) # make sure we do not overwrite into an existing file if hasattr(fp, 'truncate'): fp.truncate() self._write(fp)
python
{ "resource": "" }
q22062
GitConfigParser.set_value
train
def set_value(self, section, option, value): """Sets the given option in section to the given value. It will create the section if required, and will not throw as opposed to the default ConfigParser 'set' method. :param section: Name of the section in which the option resides or should reside :param option: Name of the options whose value to set :param value: Value to set the option to. It must be a string or convertible to a string :return: this instance""" if not self.has_section(section): self.add_section(section) self.set(section, option, self._value_to_string(value)) return self
python
{ "resource": "" }
q22063
stat_mode_to_index_mode
train
def stat_mode_to_index_mode(mode): """Convert the given mode from a stat call to the corresponding index mode and return it""" if S_ISLNK(mode): # symlinks return S_IFLNK if S_ISDIR(mode) or S_IFMT(mode) == S_IFGITLINK: # submodules return S_IFGITLINK return S_IFREG | 0o644 | (mode & 0o111)
python
{ "resource": "" }
q22064
write_cache
train
def write_cache(entries, stream, extension_data=None, ShaStreamCls=IndexFileSHA1Writer): """Write the cache represented by entries to a stream :param entries: **sorted** list of entries :param stream: stream to wrap into the AdapterStreamCls - it is used for final output. :param ShaStreamCls: Type to use when writing to the stream. It produces a sha while writing to it, before the data is passed on to the wrapped stream :param extension_data: any kind of data to write as a trailer, it must begin a 4 byte identifier, followed by its size ( 4 bytes )""" # wrap the stream into a compatible writer stream = ShaStreamCls(stream) tell = stream.tell write = stream.write # header version = 2 write(b"DIRC") write(pack(">LL", version, len(entries))) # body for entry in entries: beginoffset = tell() write(entry[4]) # ctime write(entry[5]) # mtime path = entry[3] path = force_bytes(path, encoding=defenc) plen = len(path) & CE_NAMEMASK # path length assert plen == len(path), "Path %s too long to fit into index" % entry[3] flags = plen | (entry[2] & CE_NAMEMASK_INV) # clear possible previous values write(pack(">LLLLLL20sH", entry[6], entry[7], entry[0], entry[8], entry[9], entry[10], entry[1], flags)) write(path) real_size = ((tell() - beginoffset + 8) & ~7) write(b"\0" * ((beginoffset + real_size) - tell())) # END for each entry # write previously cached extensions data if extension_data is not None: stream.write(extension_data) # write the sha over the content stream.write_sha()
python
{ "resource": "" }
q22065
write_tree_from_cache
train
def write_tree_from_cache(entries, odb, sl, si=0): """Create a tree from the given sorted list of entries and put the respective trees into the given object database :param entries: **sorted** list of IndexEntries :param odb: object database to store the trees in :param si: start index at which we should start creating subtrees :param sl: slice indicating the range we should process on the entries list :return: tuple(binsha, list(tree_entry, ...)) a tuple of a sha and a list of tree entries being a tuple of hexsha, mode, name""" tree_items = [] tree_items_append = tree_items.append ci = sl.start end = sl.stop while ci < end: entry = entries[ci] if entry.stage != 0: raise UnmergedEntriesError(entry) # END abort on unmerged ci += 1 rbound = entry.path.find('/', si) if rbound == -1: # its not a tree tree_items_append((entry.binsha, entry.mode, entry.path[si:])) else: # find common base range base = entry.path[si:rbound] xi = ci while xi < end: oentry = entries[xi] orbound = oentry.path.find('/', si) if orbound == -1 or oentry.path[si:orbound] != base: break # END abort on base mismatch xi += 1 # END find common base # enter recursion # ci - 1 as we want to count our current item as well sha, tree_entry_list = write_tree_from_cache(entries, odb, slice(ci - 1, xi), rbound + 1) # @UnusedVariable tree_items_append((sha, S_IFDIR, base)) # skip ahead ci = xi # END handle bounds # END for each entry # finally create the tree sio = BytesIO() tree_to_stream(tree_items, sio.write) sio.seek(0) istream = odb.store(IStream(str_tree_type, len(sio.getvalue()), sio)) return (istream.binsha, tree_items)
python
{ "resource": "" }
q22066
TreeModifier.add
train
def add(self, sha, mode, name, force=False): """Add the given item to the tree. If an item with the given name already exists, nothing will be done, but a ValueError will be raised if the sha and mode of the existing item do not match the one you add, unless force is True :param sha: The 20 or 40 byte sha of the item to add :param mode: int representing the stat compatible mode of the item :param force: If True, an item with your name and information will overwrite any existing item with the same name, no matter which information it has :return: self""" if '/' in name: raise ValueError("Name must not contain '/' characters") if (mode >> 12) not in Tree._map_id_to_type: raise ValueError("Invalid object type according to mode %o" % mode) sha = to_bin_sha(sha) index = self._index_by_name(name) item = (sha, mode, name) if index == -1: self._cache.append(item) else: if force: self._cache[index] = item else: ex_item = self._cache[index] if ex_item[0] != sha or ex_item[1] != mode: raise ValueError("Item %r existed with different properties" % name) # END handle mismatch # END handle force # END handle name exists return self
python
{ "resource": "" }
q22067
Tree.traverse
train
def traverse(self, predicate=lambda i, d: True, prune=lambda i, d: False, depth=-1, branch_first=True, visit_once=False, ignore_self=1): """For documentation, see util.Traversable.traverse Trees are set to visit_once = False to gain more performance in the traversal""" return super(Tree, self).traverse(predicate, prune, depth, branch_first, visit_once, ignore_self)
python
{ "resource": "" }
q22068
GitCmdObjectDB.stream
train
def stream(self, sha): """For now, all lookup is done by git itself""" hexsha, typename, size, stream = self._git.stream_object_data(bin_to_hex(sha)) return OStream(hex_to_bin(hexsha), typename, size, stream)
python
{ "resource": "" }
q22069
altz_to_utctz_str
train
def altz_to_utctz_str(altz): """As above, but inverses the operation, returning a string that can be used in commit objects""" utci = -1 * int((float(altz) / 3600) * 100) utcs = str(abs(utci)) utcs = "0" * (4 - len(utcs)) + utcs prefix = (utci < 0 and '-') or '+' return prefix + utcs
python
{ "resource": "" }
q22070
from_timestamp
train
def from_timestamp(timestamp, tz_offset): """Converts a timestamp + tz_offset into an aware datetime instance.""" utc_dt = datetime.fromtimestamp(timestamp, utc) try: local_dt = utc_dt.astimezone(tzoffset(tz_offset)) return local_dt except ValueError: return utc_dt
python
{ "resource": "" }
q22071
parse_date
train
def parse_date(string_date): """ Parse the given date as one of the following * Git internal format: timestamp offset * RFC 2822: Thu, 07 Apr 2005 22:13:13 +0200. * ISO 8601 2005-04-07T22:13:13 The T can be a space as well :return: Tuple(int(timestamp_UTC), int(offset)), both in seconds since epoch :raise ValueError: If the format could not be understood :note: Date can also be YYYY.MM.DD, MM/DD/YYYY and DD.MM.YYYY. """ # git time try: if string_date.count(' ') == 1 and string_date.rfind(':') == -1: timestamp, offset = string_date.split() timestamp = int(timestamp) return timestamp, utctz_to_altz(verify_utctz(offset)) else: offset = "+0000" # local time by default if string_date[-5] in '-+': offset = verify_utctz(string_date[-5:]) string_date = string_date[:-6] # skip space as well # END split timezone info offset = utctz_to_altz(offset) # now figure out the date and time portion - split time date_formats = [] splitter = -1 if ',' in string_date: date_formats.append("%a, %d %b %Y") splitter = string_date.rfind(' ') else: # iso plus additional date_formats.append("%Y-%m-%d") date_formats.append("%Y.%m.%d") date_formats.append("%m/%d/%Y") date_formats.append("%d.%m.%Y") splitter = string_date.rfind('T') if splitter == -1: splitter = string_date.rfind(' ') # END handle 'T' and ' ' # END handle rfc or iso assert splitter > -1 # split date and time time_part = string_date[splitter + 1:] # skip space date_part = string_date[:splitter] # parse time tstruct = time.strptime(time_part, "%H:%M:%S") for fmt in date_formats: try: dtstruct = time.strptime(date_part, fmt) utctime = calendar.timegm((dtstruct.tm_year, dtstruct.tm_mon, dtstruct.tm_mday, tstruct.tm_hour, tstruct.tm_min, tstruct.tm_sec, dtstruct.tm_wday, dtstruct.tm_yday, tstruct.tm_isdst)) return int(utctime), offset except ValueError: continue # END exception handling # END for each fmt # still here ? fail raise ValueError("no format matched") # END handle format except Exception: raise ValueError("Unsupported date format: %s" % string_date)
python
{ "resource": "" }
q22072
handle_process_output
train
def handle_process_output(process, stdout_handler, stderr_handler, finalizer=None, decode_streams=True): """Registers for notifications to lean that process output is ready to read, and dispatches lines to the respective line handlers. This function returns once the finalizer returns :return: result of finalizer :param process: subprocess.Popen instance :param stdout_handler: f(stdout_line_string), or None :param stderr_handler: f(stderr_line_string), or None :param finalizer: f(proc) - wait for proc to finish :param decode_streams: Assume stdout/stderr streams are binary and decode them before pushing \ their contents to handlers. Set it to False if `universal_newline == True` (then streams are in text-mode) or if decoding must happen later (i.e. for Diffs). """ # Use 2 "pump" threads and wait for both to finish. def pump_stream(cmdline, name, stream, is_decode, handler): try: for line in stream: if handler: if is_decode: line = line.decode(defenc) handler(line) except Exception as ex: log.error("Pumping %r of cmd(%s) failed due to: %r", name, cmdline, ex) raise CommandError(['<%s-pump>' % name] + cmdline, ex) finally: stream.close() cmdline = getattr(process, 'args', '') # PY3+ only if not isinstance(cmdline, (tuple, list)): cmdline = cmdline.split() pumps = [] if process.stdout: pumps.append(('stdout', process.stdout, stdout_handler)) if process.stderr: pumps.append(('stderr', process.stderr, stderr_handler)) threads = [] for name, stream, handler in pumps: t = threading.Thread(target=pump_stream, args=(cmdline, name, stream, decode_streams, handler)) t.setDaemon(True) t.start() threads.append(t) ## FIXME: Why Join?? Will block if `stdin` needs feeding... # for t in threads: t.join() if finalizer: return finalizer(process)
python
{ "resource": "" }
q22073
Git.set_persistent_git_options
train
def set_persistent_git_options(self, **kwargs): """Specify command line options to the git executable for subsequent subcommand calls :param kwargs: is a dict of keyword arguments. these arguments are passed as in _call_process but will be passed to the git command rather than the subcommand. """ self._persistent_git_options = self.transform_kwargs( split_single_char_options=True, **kwargs)
python
{ "resource": "" }
q22074
Git.custom_environment
train
def custom_environment(self, **kwargs): """ A context manager around the above ``update_environment`` method to restore the environment back to its previous state after operation. ``Examples``:: with self.custom_environment(GIT_SSH='/bin/ssh_wrapper'): repo.remotes.origin.fetch() :param kwargs: see update_environment """ old_env = self.update_environment(**kwargs) try: yield finally: self.update_environment(**old_env)
python
{ "resource": "" }
q22075
Git.get_object_header
train
def get_object_header(self, ref): """ Use this method to quickly examine the type and size of the object behind the given ref. :note: The method will only suffer from the costs of command invocation once and reuses the command in subsequent calls. :return: (hexsha, type_string, size_as_int)""" cmd = self._get_persistent_cmd("cat_file_header", "cat_file", batch_check=True) return self.__get_object_header(cmd, ref)
python
{ "resource": "" }
q22076
Git.clear_cache
train
def clear_cache(self): """Clear all kinds of internal caches to release resources. Currently persistent commands will be interrupted. :return: self""" for cmd in (self.cat_file_all, self.cat_file_header): if cmd: cmd.__del__() self.cat_file_all = None self.cat_file_header = None return self
python
{ "resource": "" }
q22077
Repo.create_head
train
def create_head(self, path, commit='HEAD', force=False, logmsg=None): """Create a new head within the repository. For more documentation, please see the Head.create method. :return: newly created Head Reference""" return Head.create(self, path, commit, force, logmsg)
python
{ "resource": "" }
q22078
Repo.create_tag
train
def create_tag(self, path, ref='HEAD', message=None, force=False, **kwargs): """Create a new tag reference. For more documentation, please see the TagReference.create method. :return: TagReference object """ return TagReference.create(self, path, ref, message, force, **kwargs)
python
{ "resource": "" }
q22079
Repo.create_remote
train
def create_remote(self, name, url, **kwargs): """Create a new remote. For more information, please see the documentation of the Remote.create methods :return: Remote reference""" return Remote.create(self, name, url, **kwargs)
python
{ "resource": "" }
q22080
Repo.is_ancestor
train
def is_ancestor(self, ancestor_rev, rev): """Check if a commit is an ancestor of another :param ancestor_rev: Rev which should be an ancestor :param rev: Rev to test against ancestor_rev :return: ``True``, ancestor_rev is an accestor to rev. """ try: self.git.merge_base(ancestor_rev, rev, is_ancestor=True) except GitCommandError as err: if err.status == 1: return False raise return True
python
{ "resource": "" }
q22081
Repo._get_alternates
train
def _get_alternates(self): """The list of alternates for this repo from which objects can be retrieved :return: list of strings being pathnames of alternates""" alternates_path = osp.join(self.git_dir, 'objects', 'info', 'alternates') if osp.exists(alternates_path): with open(alternates_path, 'rb') as f: alts = f.read().decode(defenc) return alts.strip().splitlines() else: return []
python
{ "resource": "" }
q22082
Repo._set_alternates
train
def _set_alternates(self, alts): """Sets the alternates :param alts: is the array of string paths representing the alternates at which git should look for objects, i.e. /home/user/repo/.git/objects :raise NoSuchPathError: :note: The method does not check for the existence of the paths in alts as the caller is responsible.""" alternates_path = osp.join(self.common_dir, 'objects', 'info', 'alternates') if not alts: if osp.isfile(alternates_path): os.remove(alternates_path) else: with open(alternates_path, 'wb') as f: f.write("\n".join(alts).encode(defenc))
python
{ "resource": "" }
q22083
Repo.blame_incremental
train
def blame_incremental(self, rev, file, **kwargs): """Iterator for blame information for the given file at the given revision. Unlike .blame(), this does not return the actual file's contents, only a stream of BlameEntry tuples. :param rev: revision specifier, see git-rev-parse for viable options. :return: lazy iterator of BlameEntry tuples, where the commit indicates the commit to blame for the line, and range indicates a span of line numbers in the resulting file. If you combine all line number ranges outputted by this command, you should get a continuous range spanning all line numbers in the file. """ data = self.git.blame(rev, '--', file, p=True, incremental=True, stdout_as_string=False, **kwargs) commits = {} stream = (line for line in data.split(b'\n') if line) while True: try: line = next(stream) # when exhausted, causes a StopIteration, terminating this function except StopIteration: return hexsha, orig_lineno, lineno, num_lines = line.split() lineno = int(lineno) num_lines = int(num_lines) orig_lineno = int(orig_lineno) if hexsha not in commits: # Now read the next few lines and build up a dict of properties # for this commit props = {} while True: try: line = next(stream) except StopIteration: return if line == b'boundary': # "boundary" indicates a root commit and occurs # instead of the "previous" tag continue tag, value = line.split(b' ', 1) props[tag] = value if tag == b'filename': # "filename" formally terminates the entry for --incremental orig_filename = value break c = Commit(self, hex_to_bin(hexsha), author=Actor(safe_decode(props[b'author']), safe_decode(props[b'author-mail'].lstrip(b'<').rstrip(b'>'))), authored_date=int(props[b'author-time']), committer=Actor(safe_decode(props[b'committer']), safe_decode(props[b'committer-mail'].lstrip(b'<').rstrip(b'>'))), committed_date=int(props[b'committer-time'])) commits[hexsha] = c else: # Discard all lines until we find "filename" which is # guaranteed to be the last line while True: try: line = next(stream) # will fail if we reach the EOF unexpectedly except StopIteration: return tag, value = line.split(b' ', 1) if tag == b'filename': orig_filename = value break yield BlameEntry(commits[hexsha], range(lineno, lineno + num_lines), safe_decode(orig_filename), range(orig_lineno, orig_lineno + num_lines))
python
{ "resource": "" }
q22084
Repo.blame
train
def blame(self, rev, file, incremental=False, **kwargs): """The blame information for the given file at the given revision. :param rev: revision specifier, see git-rev-parse for viable options. :return: list: [git.Commit, list: [<line>]] A list of tuples associating a Commit object with a list of lines that changed within the given commit. The Commit objects will be given in order of appearance.""" if incremental: return self.blame_incremental(rev, file, **kwargs) data = self.git.blame(rev, '--', file, p=True, stdout_as_string=False, **kwargs) commits = {} blames = [] info = None keepends = True for line in data.splitlines(keepends): try: line = line.rstrip().decode(defenc) except UnicodeDecodeError: firstpart = '' is_binary = True else: # As we don't have an idea when the binary data ends, as it could contain multiple newlines # in the process. So we rely on being able to decode to tell us what is is. # This can absolutely fail even on text files, but even if it does, we should be fine treating it # as binary instead parts = self.re_whitespace.split(line, 1) firstpart = parts[0] is_binary = False # end handle decode of line if self.re_hexsha_only.search(firstpart): # handles # 634396b2f541a9f2d58b00be1a07f0c358b999b3 1 1 7 - indicates blame-data start # 634396b2f541a9f2d58b00be1a07f0c358b999b3 2 2 - indicates # another line of blame with the same data digits = parts[-1].split(" ") if len(digits) == 3: info = {'id': firstpart} blames.append([None, []]) elif info['id'] != firstpart: info = {'id': firstpart} blames.append([commits.get(firstpart), []]) # END blame data initialization else: m = self.re_author_committer_start.search(firstpart) if m: # handles: # author Tom Preston-Werner # author-mail <tom@mojombo.com> # author-time 1192271832 # author-tz -0700 # committer Tom Preston-Werner # committer-mail <tom@mojombo.com> # committer-time 1192271832 # committer-tz -0700 - IGNORED BY US role = m.group(0) if firstpart.endswith('-mail'): info["%s_email" % role] = parts[-1] elif firstpart.endswith('-time'): info["%s_date" % role] = int(parts[-1]) elif role == firstpart: info[role] = parts[-1] # END distinguish mail,time,name else: # handle # filename lib/grit.rb # summary add Blob # <and rest> if firstpart.startswith('filename'): info['filename'] = parts[-1] elif firstpart.startswith('summary'): info['summary'] = parts[-1] elif firstpart == '': if info: sha = info['id'] c = commits.get(sha) if c is None: c = Commit(self, hex_to_bin(sha), author=Actor._from_string(info['author'] + ' ' + info['author_email']), authored_date=info['author_date'], committer=Actor._from_string( info['committer'] + ' ' + info['committer_email']), committed_date=info['committer_date']) commits[sha] = c # END if commit objects needs initial creation if not is_binary: if line and line[0] == '\t': line = line[1:] else: # NOTE: We are actually parsing lines out of binary data, which can lead to the # binary being split up along the newline separator. We will append this to the blame # we are currently looking at, even though it should be concatenated with the last line # we have seen. pass # end handle line contents blames[-1][0] = c blames[-1][1].append(line) info = {'id': sha} # END if we collected commit info # END distinguish filename,summary,rest # END distinguish author|committer vs filename,summary,rest # END distinguish hexsha vs other information return blames
python
{ "resource": "" }
q22085
Repo.init
train
def init(cls, path=None, mkdir=True, odbt=GitCmdObjectDB, expand_vars=True, **kwargs): """Initialize a git repository at the given path if specified :param path: is the full path to the repo (traditionally ends with /<name>.git) or None in which case the repository will be created in the current working directory :param mkdir: if specified will create the repository directory if it doesn't already exists. Creates the directory with a mode=0755. Only effective if a path is explicitly given :param odbt: Object DataBase type - a type which is constructed by providing the directory containing the database objects, i.e. .git/objects. It will be used to access all object data :param expand_vars: if specified, environment variables will not be escaped. This can lead to information disclosure, allowing attackers to access the contents of environment variables :param kwargs: keyword arguments serving as additional options to the git-init command :return: ``git.Repo`` (the newly created repo)""" if path: path = expand_path(path, expand_vars) if mkdir and path and not osp.exists(path): os.makedirs(path, 0o755) # git command automatically chdir into the directory git = Git(path) git.init(**kwargs) return cls(path, odbt=odbt)
python
{ "resource": "" }
q22086
Repo.clone
train
def clone(self, path, progress=None, **kwargs): """Create a clone from this repository. :param path: is the full path of the new repo (traditionally ends with ./<name>.git). :param progress: See 'git.remote.Remote.push'. :param kwargs: * odbt = ObjectDatabase Type, allowing to determine the object database implementation used by the returned Repo instance * All remaining keyword arguments are given to the git-clone command :return: ``git.Repo`` (the newly cloned repo)""" return self._clone(self.git, self.common_dir, path, type(self.odb), progress, **kwargs)
python
{ "resource": "" }
q22087
Repo.clone_from
train
def clone_from(cls, url, to_path, progress=None, env=None, **kwargs): """Create a clone from the given URL :param url: valid git url, see http://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS :param to_path: Path to which the repository should be cloned to :param progress: See 'git.remote.Remote.push'. :param env: Optional dictionary containing the desired environment variables. :param kwargs: see the ``clone`` method :return: Repo instance pointing to the cloned directory""" git = Git(os.getcwd()) if env is not None: git.update_environment(**env) return cls._clone(git, url, to_path, GitCmdObjectDB, progress, **kwargs)
python
{ "resource": "" }
q22088
Repo.archive
train
def archive(self, ostream, treeish=None, prefix=None, **kwargs): """Archive the tree at the given revision. :param ostream: file compatible stream object to which the archive will be written as bytes :param treeish: is the treeish name/id, defaults to active branch :param prefix: is the optional prefix to prepend to each filename in the archive :param kwargs: Additional arguments passed to git-archive * Use the 'format' argument to define the kind of format. Use specialized ostreams to write any format supported by python. * You may specify the special **path** keyword, which may either be a repository-relative path to a directory or file to place into the archive, or a list or tuple of multiple paths. :raise GitCommandError: in case something went wrong :return: self""" if treeish is None: treeish = self.head.commit if prefix and 'prefix' not in kwargs: kwargs['prefix'] = prefix kwargs['output_stream'] = ostream path = kwargs.pop('path', []) if not isinstance(path, (tuple, list)): path = [path] # end assure paths is list self.git.archive(treeish, *path, **kwargs) return self
python
{ "resource": "" }
q22089
win_encode
train
def win_encode(s): """Encode unicodes for process arguments on Windows.""" if isinstance(s, unicode): return s.encode(locale.getpreferredencoding(False)) elif isinstance(s, bytes): return s elif s is not None: raise TypeError('Expected bytes or text, but got %r' % (s,))
python
{ "resource": "" }
q22090
Commit.count
train
def count(self, paths='', **kwargs): """Count the number of commits reachable from this commit :param paths: is an optional path or a list of paths restricting the return value to commits actually containing the paths :param kwargs: Additional options to be passed to git-rev-list. They must not alter the output style of the command, or parsing will yield incorrect results :return: int defining the number of reachable commits""" # yes, it makes a difference whether empty paths are given or not in our case # as the empty paths version will ignore merge commits for some reason. if paths: return len(self.repo.git.rev_list(self.hexsha, '--', paths, **kwargs).splitlines()) else: return len(self.repo.git.rev_list(self.hexsha, **kwargs).splitlines())
python
{ "resource": "" }
q22091
Commit.iter_items
train
def iter_items(cls, repo, rev, paths='', **kwargs): """Find all commits matching the given criteria. :param repo: is the Repo :param rev: revision specifier, see git-rev-parse for viable options :param paths: is an optional path or list of paths, if set only Commits that include the path or paths will be considered :param kwargs: optional keyword arguments to git rev-list where ``max_count`` is the maximum number of commits to fetch ``skip`` is the number of commits to skip ``since`` all commits since i.e. '1970-01-01' :return: iterator yielding Commit items""" if 'pretty' in kwargs: raise ValueError("--pretty cannot be used as parsing expects single sha's only") # END handle pretty # use -- in any case, to prevent possibility of ambiguous arguments # see https://github.com/gitpython-developers/GitPython/issues/264 args = ['--'] if paths: args.extend((paths, )) # END if paths proc = repo.git.rev_list(rev, args, as_process=True, **kwargs) return cls._iter_from_process_or_stream(repo, proc)
python
{ "resource": "" }
q22092
Commit.iter_parents
train
def iter_parents(self, paths='', **kwargs): """Iterate _all_ parents of this commit. :param paths: Optional path or list of paths limiting the Commits to those that contain at least one of the paths :param kwargs: All arguments allowed by git-rev-list :return: Iterator yielding Commit objects which are parents of self """ # skip ourselves skip = kwargs.get("skip", 1) if skip == 0: # skip ourselves skip = 1 kwargs['skip'] = skip return self.iter_items(self.repo, self, paths, **kwargs)
python
{ "resource": "" }
q22093
Commit.stats
train
def stats(self): """Create a git stat from changes between this commit and its first parent or from all changes done if this is the very first commit. :return: git.Stats""" if not self.parents: text = self.repo.git.diff_tree(self.hexsha, '--', numstat=True, root=True) text2 = "" for line in text.splitlines()[1:]: (insertions, deletions, filename) = line.split("\t") text2 += "%s\t%s\t%s\n" % (insertions, deletions, filename) text = text2 else: text = self.repo.git.diff(self.parents[0].hexsha, self.hexsha, '--', numstat=True) return Stats._list_from_string(self.repo, text)
python
{ "resource": "" }
q22094
Commit._iter_from_process_or_stream
train
def _iter_from_process_or_stream(cls, repo, proc_or_stream): """Parse out commit information into a list of Commit objects We expect one-line per commit, and parse the actual commit information directly from our lighting fast object database :param proc: git-rev-list process instance - one sha per line :return: iterator returning Commit objects""" stream = proc_or_stream if not hasattr(stream, 'readline'): stream = proc_or_stream.stdout readline = stream.readline while True: line = readline() if not line: break hexsha = line.strip() if len(hexsha) > 40: # split additional information, as returned by bisect for instance hexsha, _ = line.split(None, 1) # END handle extra info assert len(hexsha) == 40, "Invalid line: %s" % hexsha yield Commit(repo, hex_to_bin(hexsha)) # END for each line in stream # TODO: Review this - it seems process handling got a bit out of control # due to many developers trying to fix the open file handles issue if hasattr(proc_or_stream, 'wait'): finalize_process(proc_or_stream)
python
{ "resource": "" }
q22095
Commit.create_from_tree
train
def create_from_tree(cls, repo, tree, message, parent_commits=None, head=False, author=None, committer=None, author_date=None, commit_date=None): """Commit the given tree, creating a commit object. :param repo: Repo object the commit should be part of :param tree: Tree object or hex or bin sha the tree of the new commit :param message: Commit message. It may be an empty string if no message is provided. It will be converted to a string in any case. :param parent_commits: Optional Commit objects to use as parents for the new commit. If empty list, the commit will have no parents at all and become a root commit. If None , the current head commit will be the parent of the new commit object :param head: If True, the HEAD will be advanced to the new commit automatically. Else the HEAD will remain pointing on the previous commit. This could lead to undesired results when diffing files. :param author: The name of the author, optional. If unset, the repository configuration is used to obtain this value. :param committer: The name of the committer, optional. If unset, the repository configuration is used to obtain this value. :param author_date: The timestamp for the author field :param commit_date: The timestamp for the committer field :return: Commit object representing the new commit :note: Additional information about the committer and Author are taken from the environment or from the git configuration, see git-commit-tree for more information""" if parent_commits is None: try: parent_commits = [repo.head.commit] except ValueError: # empty repositories have no head commit parent_commits = [] # END handle parent commits else: for p in parent_commits: if not isinstance(p, cls): raise ValueError("Parent commit '%r' must be of type %s" % (p, cls)) # end check parent commit types # END if parent commits are unset # retrieve all additional information, create a commit object, and # serialize it # Generally: # * Environment variables override configuration values # * Sensible defaults are set according to the git documentation # COMMITER AND AUTHOR INFO cr = repo.config_reader() env = os.environ committer = committer or Actor.committer(cr) author = author or Actor.author(cr) # PARSE THE DATES unix_time = int(time()) is_dst = daylight and localtime().tm_isdst > 0 offset = altzone if is_dst else timezone author_date_str = env.get(cls.env_author_date, '') if author_date: author_time, author_offset = parse_date(author_date) elif author_date_str: author_time, author_offset = parse_date(author_date_str) else: author_time, author_offset = unix_time, offset # END set author time committer_date_str = env.get(cls.env_committer_date, '') if commit_date: committer_time, committer_offset = parse_date(commit_date) elif committer_date_str: committer_time, committer_offset = parse_date(committer_date_str) else: committer_time, committer_offset = unix_time, offset # END set committer time # assume utf8 encoding enc_section, enc_option = cls.conf_encoding.split('.') conf_encoding = cr.get_value(enc_section, enc_option, cls.default_encoding) # if the tree is no object, make sure we create one - otherwise # the created commit object is invalid if isinstance(tree, str): tree = repo.tree(tree) # END tree conversion # CREATE NEW COMMIT new_commit = cls(repo, cls.NULL_BIN_SHA, tree, author, author_time, author_offset, committer, committer_time, committer_offset, message, parent_commits, conf_encoding) stream = BytesIO() new_commit._serialize(stream) streamlen = stream.tell() stream.seek(0) istream = repo.odb.store(IStream(cls.type, streamlen, stream)) new_commit.binsha = istream.binsha if head: # need late import here, importing git at the very beginning throws # as well ... import git.refs try: repo.head.set_commit(new_commit, logmsg=message) except ValueError: # head is not yet set to the ref our HEAD points to # Happens on first commit master = git.refs.Head.create(repo, repo.head.ref, new_commit, logmsg="commit (initial): %s" % message) repo.head.set_reference(master, logmsg='commit: Switching to %s' % master) # END handle empty repositories # END advance head handling return new_commit
python
{ "resource": "" }
q22096
find_worktree_git_dir
train
def find_worktree_git_dir(dotgit): """Search for a gitdir for this worktree.""" try: statbuf = os.stat(dotgit) except OSError: return None if not stat.S_ISREG(statbuf.st_mode): return None try: lines = open(dotgit, 'r').readlines() for key, value in [line.strip().split(': ') for line in lines]: if key == 'gitdir': return value except ValueError: pass return None
python
{ "resource": "" }
q22097
find_submodule_git_dir
train
def find_submodule_git_dir(d): """Search for a submodule repo.""" if is_git_dir(d): return d try: with open(d) as fp: content = fp.read().rstrip() except (IOError, OSError): # it's probably not a file pass else: if content.startswith('gitdir: '): path = content[8:] if Git.is_cygwin(): ## Cygwin creates submodules prefixed with `/cygdrive/...` suffixes. path = decygpath(path) if not osp.isabs(path): path = osp.join(osp.dirname(d), path) return find_submodule_git_dir(path) # end handle exception return None
python
{ "resource": "" }
q22098
to_commit
train
def to_commit(obj): """Convert the given object to a commit if possible and return it""" if obj.type == 'tag': obj = deref_tag(obj) if obj.type != "commit": raise ValueError("Cannot convert object %r to type commit" % obj) # END verify type return obj
python
{ "resource": "" }
q22099
FetchInfo._from_line
train
def _from_line(cls, repo, line, fetch_line): """Parse information from the given line as returned by git-fetch -v and return a new FetchInfo object representing this information. We can handle a line as follows "%c %-*s %-*s -> %s%s" Where c is either ' ', !, +, -, *, or = ! means error + means success forcing update - means a tag was updated * means birth of new branch or tag = means the head was up to date ( and not moved ) ' ' means a fast-forward fetch line is the corresponding line from FETCH_HEAD, like acb0fa8b94ef421ad60c8507b634759a472cd56c not-for-merge branch '0.1.7RC' of /tmp/tmpya0vairemote_repo""" match = cls._re_fetch_result.match(line) if match is None: raise ValueError("Failed to parse line: %r" % line) # parse lines control_character, operation, local_remote_ref, remote_local_ref, note = match.groups() try: new_hex_sha, fetch_operation, fetch_note = fetch_line.split("\t") # @UnusedVariable ref_type_name, fetch_note = fetch_note.split(' ', 1) except ValueError: # unpack error raise ValueError("Failed to parse FETCH_HEAD line: %r" % fetch_line) # parse flags from control_character flags = 0 try: flags |= cls._flag_map[control_character] except KeyError: raise ValueError("Control character %r unknown as parsed from line %r" % (control_character, line)) # END control char exception handling # parse operation string for more info - makes no sense for symbolic refs, but we parse it anyway old_commit = None is_tag_operation = False if 'rejected' in operation: flags |= cls.REJECTED if 'new tag' in operation: flags |= cls.NEW_TAG is_tag_operation = True if 'tag update' in operation: flags |= cls.TAG_UPDATE is_tag_operation = True if 'new branch' in operation: flags |= cls.NEW_HEAD if '...' in operation or '..' in operation: split_token = '...' if control_character == ' ': split_token = split_token[:-1] old_commit = repo.rev_parse(operation.split(split_token)[0]) # END handle refspec # handle FETCH_HEAD and figure out ref type # If we do not specify a target branch like master:refs/remotes/origin/master, # the fetch result is stored in FETCH_HEAD which destroys the rule we usually # have. In that case we use a symbolic reference which is detached ref_type = None if remote_local_ref == "FETCH_HEAD": ref_type = SymbolicReference elif ref_type_name == "tag" or is_tag_operation: # the ref_type_name can be branch, whereas we are still seeing a tag operation. It happens during # testing, which is based on actual git operations ref_type = TagReference elif ref_type_name in ("remote-tracking", "branch"): # note: remote-tracking is just the first part of the 'remote-tracking branch' token. # We don't parse it correctly, but its enough to know what to do, and its new in git 1.7something ref_type = RemoteReference elif '/' in ref_type_name: # If the fetch spec look something like this '+refs/pull/*:refs/heads/pull/*', and is thus pretty # much anything the user wants, we will have trouble to determine what's going on # For now, we assume the local ref is a Head ref_type = Head else: raise TypeError("Cannot handle reference type: %r" % ref_type_name) # END handle ref type # create ref instance if ref_type is SymbolicReference: remote_local_ref = ref_type(repo, "FETCH_HEAD") else: # determine prefix. Tags are usually pulled into refs/tags, they may have subdirectories. # It is not clear sometimes where exactly the item is, unless we have an absolute path as indicated # by the 'ref/' prefix. Otherwise even a tag could be in refs/remotes, which is when it will have the # 'tags/' subdirectory in its path. # We don't want to test for actual existence, but try to figure everything out analytically. ref_path = None remote_local_ref = remote_local_ref.strip() if remote_local_ref.startswith(Reference._common_path_default + "/"): # always use actual type if we get absolute paths # Will always be the case if something is fetched outside of refs/remotes (if its not a tag) ref_path = remote_local_ref if ref_type is not TagReference and not \ remote_local_ref.startswith(RemoteReference._common_path_default + "/"): ref_type = Reference # END downgrade remote reference elif ref_type is TagReference and 'tags/' in remote_local_ref: # even though its a tag, it is located in refs/remotes ref_path = join_path(RemoteReference._common_path_default, remote_local_ref) else: ref_path = join_path(ref_type._common_path_default, remote_local_ref) # END obtain refpath # even though the path could be within the git conventions, we make # sure we respect whatever the user wanted, and disabled path checking remote_local_ref = ref_type(repo, ref_path, check_path=False) # END create ref instance note = (note and note.strip()) or '' return cls(remote_local_ref, flags, note, old_commit, local_remote_ref)
python
{ "resource": "" }