_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q42800
connect
train
def connect(slug, config_loader): """ Ensure .cs50.yaml and tool key exists, raises Error otherwise Check that all required files as per .cs50.yaml are present Returns tool specific portion of .cs50.yaml """ with ProgressBar(_("Connecting")): # Parse slug slug = Slug(slug) # Get .cs50.yaml try: config = config_loader.load(_get_content(slug.org, slug.repo, slug.branch, slug.problem / ".cs50.yaml")) except InvalidConfigError: raise InvalidSlugError(_("Invalid slug for {}. Did you mean something else?").format(config_loader.tool)) print("WTF!!!!", config) if not config: raise InvalidSlugError(_("Invalid slug for {}. Did you mean something else?").format(config_loader.tool)) # If config of tool is just a truthy value, config should be empty if not isinstance(config, dict): config = {} org = config.get("org", config_loader.tool) included, excluded = files(config.get("files")) # Check that at least 1 file is staged if not included: raise Error(_("No files in this directory are expected for submission.")) return org, (included, excluded)
python
{ "resource": "" }
q42801
authenticate
train
def authenticate(org): """ Authenticate with GitHub via SSH if possible Otherwise authenticate via HTTPS Returns an authenticated User """ with ProgressBar(_("Authenticating")) as progress_bar: user = _authenticate_ssh(org) progress_bar.stop() if user is None: # SSH auth failed, fallback to HTTPS with _authenticate_https(org) as user: yield user else: yield user
python
{ "resource": "" }
q42802
prepare
train
def prepare(tool, branch, user, included): """ Prepare git for pushing Check that there are no permission errors Add necessities to git config Stage files Stage files via lfs if necessary Check that atleast one file is staged """ with ProgressBar(_("Preparing")) as progress_bar, working_area(included) as area: Git.working_area = f"-C {area}" git = Git(Git.working_area) # Clone just .git folder try: _run(git.set(Git.cache)(f"clone --bare {user.repo} .git")) except Error: raise Error(_("Looks like {} isn't enabled for your account yet. " "Go to https://cs50.me/authorize and make sure you accept any pending invitations!".format(tool))) _run(git("config --bool core.bare false")) _run(git(f"config --path core.worktree {area}")) try: _run(git("checkout --force {} .gitattributes".format(branch))) except Error: pass # Set user name/email in repo config _run(git(f"config user.email {shlex.quote(user.email)}")) _run(git(f"config user.name {shlex.quote(user.name)}")) # Switch to branch without checkout _run(git(f"symbolic-ref HEAD refs/heads/{branch}")) # Git add all included files for f in included: _run(git(f"add {f}")) # Remove gitattributes from included if Path(".gitattributes").exists() and ".gitattributes" in included: included.remove(".gitattributes") # Add any oversized files through git-lfs _lfs_add(included, git) progress_bar.stop() yield
python
{ "resource": "" }
q42803
upload
train
def upload(branch, user, tool): """ Commit + push to branch Returns username, commit hash """ with ProgressBar(_("Uploading")): language = os.environ.get("LANGUAGE") commit_message = [_("automated commit by {}").format(tool)] # If LANGUAGE environment variable is set, we need to communicate # this to any remote tool via the commit message. if language: commit_message.append(f"[{language}]") commit_message = " ".join(commit_message) # Commit + push git = Git(Git.working_area) _run(git(f"commit -m {shlex.quote(commit_message)} --allow-empty")) _run(git.set(Git.cache)(f"push origin {branch}")) commit_hash = _run(git("rev-parse HEAD")) return user.name, commit_hash
python
{ "resource": "" }
q42804
_run
train
def _run(command, quiet=False, timeout=None): """Run a command, returns command output.""" try: with _spawn(command, quiet, timeout) as child: command_output = child.read().strip().replace("\r\n", "\n") except pexpect.TIMEOUT: logger.info(f"command {command} timed out") raise Error() return command_output
python
{ "resource": "" }
q42805
_glob
train
def _glob(pattern, skip_dirs=False): """Glob pattern, expand directories, return all files that matched.""" # Implicit recursive iff no / in pattern and starts with * if "/" not in pattern and pattern.startswith("*"): files = glob.glob(f"**/{pattern}", recursive=True) else: files = glob.glob(pattern, recursive=True) # Expand dirs all_files = set() for file in files: if os.path.isdir(file) and not skip_dirs: all_files.update(set(f for f in _glob(f"{file}/**/*", skip_dirs=True) if not os.path.isdir(f))) else: all_files.add(file) # Normalize all files return {str(Path(f)) for f in all_files}
python
{ "resource": "" }
q42806
_lfs_add
train
def _lfs_add(files, git): """ Add any oversized files with lfs. Throws error if a file is bigger than 2GB or git-lfs is not installed. """ # Check for large files > 100 MB (and huge files > 2 GB) # https://help.github.com/articles/conditions-for-large-files/ # https://help.github.com/articles/about-git-large-file-storage/ larges, huges = [], [] for file in files: size = os.path.getsize(file) if size > (100 * 1024 * 1024): larges.append(file) elif size > (2 * 1024 * 1024 * 1024): huges.append(file) # Raise Error if a file is >2GB if huges: raise Error(_("These files are too large to be submitted:\n{}\n" "Remove these files from your directory " "and then re-run {}!").format("\n".join(huges), org)) # Add large files (>100MB) with git-lfs if larges: # Raise Error if git-lfs not installed if not shutil.which("git-lfs"): raise Error(_("These files are too large to be submitted:\n{}\n" "Install git-lfs (or remove these files from your directory) " "and then re-run!").format("\n".join(larges))) # Install git-lfs for this repo _run(git("lfs install --local")) # For pre-push hook _run(git("config credential.helper cache")) # Rm previously added file, have lfs track file, add file again for large in larges: _run(git("rm --cached {}".format(shlex.quote(large)))) _run(git("lfs track {}".format(shlex.quote(large)))) _run(git("add {}".format(shlex.quote(large)))) _run(git("add --force .gitattributes"))
python
{ "resource": "" }
q42807
_authenticate_ssh
train
def _authenticate_ssh(org): """Try authenticating via ssh, if succesful yields a User, otherwise raises Error.""" # Try to get username from git config username = os.environ.get(f"{org.upper()}_USERNAME") # Require ssh-agent child = pexpect.spawn("ssh -T git@github.com", encoding="utf8") # GitHub prints 'Hi {username}!...' when attempting to get shell access i = child.expect(["Hi (.+)! You've successfully authenticated", "Enter passphrase for key", "Permission denied", "Are you sure you want to continue connecting"]) child.close() if i == 0: if username is None: username = child.match.groups()[0] else: return None return User(name=username, repo=f"git@github.com:{org}/{username}")
python
{ "resource": "" }
q42808
_authenticate_https
train
def _authenticate_https(org): """Try authenticating via HTTPS, if succesful yields User, otherwise raises Error.""" _CREDENTIAL_SOCKET.parent.mkdir(mode=0o700, exist_ok=True) try: Git.cache = f"-c credential.helper= -c credential.helper='cache --socket {_CREDENTIAL_SOCKET}'" git = Git(Git.cache) # Get credentials from cache if possible with _spawn(git("credential fill"), quiet=True) as child: child.sendline("protocol=https") child.sendline("host=github.com") child.sendline("") i = child.expect(["Username for '.+'", "Password for '.+'", "username=([^\r]+)\r\npassword=([^\r]+)\r\n"]) if i == 2: username, password = child.match.groups() else: username = password = None child.close() child.exitstatus = 0 # No credentials found, need to ask user if password is None: username = _prompt_username(_("GitHub username: ")) password = _prompt_password(_("GitHub password: ")) # Check if credentials are correct res = requests.get("https://api.github.com/user", auth=(username, password)) # Check for 2-factor authentication https://developer.github.com/v3/auth/#working-with-two-factor-authentication if "X-GitHub-OTP" in res.headers: raise Error("Looks like you have two-factor authentication enabled!" " Please generate a personal access token and use it as your password." " See https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line for more info.") if res.status_code != 200: logger.info(res.headers) logger.info(res.text) raise Error(_("Invalid username and/or password.") if res.status_code == 401 else _("Could not authenticate user.")) # Canonicalize (capitalization of) username, # Especially if user logged in via email address username = res.json()["login"] # Credentials are correct, best cache them with _spawn(git("-c credentialcache.ignoresighup=true credential approve"), quiet=True) as child: child.sendline("protocol=https") child.sendline("host=github.com") child.sendline(f"path={org}/{username}") child.sendline(f"username={username}") child.sendline(f"password={password}") child.sendline("") yield User(name=username, repo=f"https://{username}@github.com/{org}/{username}") except BaseException: # Some error occured while this context manager is active, best forget credentials. logout() raise
python
{ "resource": "" }
q42809
_prompt_username
train
def _prompt_username(prompt="Username: ", prefill=None): """Prompt the user for username.""" if prefill: readline.set_startup_hook(lambda: readline.insert_text(prefill)) try: return input(prompt).strip() except EOFError: print() finally: readline.set_startup_hook()
python
{ "resource": "" }
q42810
_prompt_password
train
def _prompt_password(prompt="Password: "): """Prompt the user for password, printing asterisks for each character""" fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) tty.setraw(fd) print(prompt, end="", flush=True) password = [] try: while True: ch = sys.stdin.buffer.read(1)[0] if ch in (ord("\r"), ord("\n"), 4): # If user presses Enter or ctrl-d print("\r") break elif ch == 127: # DEL try: password.pop() except IndexError: pass else: print("\b \b", end="", flush=True) elif ch == 3: # ctrl-c print("^C", end="", flush=True) raise KeyboardInterrupt else: password.append(ch) print("*", end="", flush=True) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return bytes(password).decode()
python
{ "resource": "" }
q42811
ProgressBar.stop
train
def stop(self): """Stop the progress bar.""" if self._progressing: self._progressing = False self._thread.join()
python
{ "resource": "" }
q42812
make_url
train
def make_url(path, protocol=None, hosts=None): """Make an URL given a path, and optionally, a protocol and set of hosts to select from randomly. :param path: The Archive.org path. :type path: str :param protocol: (optional) The HTTP protocol to use. "https://" is used by default. :type protocol: str :param hosts: (optional) A set of hosts. A host will be chosen at random. The default host is "archive.org". :type hosts: iterable :rtype: str :returns: An Absolute URI. """ protocol = 'https://' if not protocol else protocol host = hosts[random.randrange(len(hosts))] if hosts else 'archive.org' return protocol + host + path.strip()
python
{ "resource": "" }
q42813
metadata_urls
train
def metadata_urls(identifiers, protocol=None, hosts=None): """An Archive.org metadata URL generator. :param identifiers: A set of Archive.org identifiers for which to make metadata URLs. :type identifiers: iterable :param protocol: (optional) The HTTP protocol to use. "https://" is used by default. :type protocol: str :param hosts: (optional) A set of hosts. A host will be chosen at random. The default host is "archive.org". :type hosts: iterable :returns: A generator yielding Archive.org metadata URLs. """ for identifier in identifiers: path = '/metadata/{}'.format(identifier) url = make_url(path, protocol, hosts) yield url
python
{ "resource": "" }
q42814
lang_direction
train
def lang_direction(request): """ Sets lang_direction context variable to whether the language is RTL or LTR """ if lang_direction.rtl_langs is None: lang_direction.rtl_langs = getattr(settings, "RTL_LANGUAGES", set()) return {"lang_direction": "rtl" if request.LANGUAGE_CODE in lang_direction.rtl_langs else "ltr"}
python
{ "resource": "" }
q42815
Lists.lists
train
def lists(self, **kwargs): """Gets the top-level lists available from the API. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('lists') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
{ "resource": "" }
q42816
Lists.movie_lists
train
def movie_lists(self, **kwargs): """Gets the movie lists available from the API. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('movie_lists') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
{ "resource": "" }
q42817
Lists.movies_box_office
train
def movies_box_office(self, **kwargs): """Gets the top box office earning movies from the API. Sorted by most recent weekend gross ticket sales. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('movies_box_office') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
{ "resource": "" }
q42818
Lists.movies_in_theaters
train
def movies_in_theaters(self, **kwargs): """Gets the movies currently in theaters from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('movies_in_theaters') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
{ "resource": "" }
q42819
Lists.dvd_lists
train
def dvd_lists(self, **kwargs): """Gets the dvd lists available from the API. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('dvd_lists') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
python
{ "resource": "" }
q42820
create_regex_patterns
train
def create_regex_patterns(symbols): u"""create regex patterns for text, google, docomo, kddi and softbank via `symbols` create regex patterns for finding emoji character from text. the pattern character use `unicode` formatted character so you have to decode text which is not decoded. """ pattern_unicode = [] pattern_google = [] pattern_docomo = [] pattern_kddi = [] pattern_softbank = [] for x in symbols: if x.unicode.code: pattern_unicode.append(re.escape(unicode(x.unicode))) if x.google.code: pattern_google.append(re.escape(unicode(x.google))) if x.docomo.code: pattern_docomo.append(re.escape(unicode(x.docomo))) if x.kddi.code: pattern_kddi.append(re.escape(unicode(x.kddi))) if x.softbank.code: pattern_softbank.append(re.escape(unicode(x.softbank))) # pattern_unicode = re.compile(u"[%s]" % u''.join(pattern_unicode)) # pattern_google = re.compile(u"[%s]" % u''.join(pattern_google)) # pattern_docomo = re.compile(u"[%s]" % u''.join(pattern_docomo)) # pattern_kddi = re.compile(u"[%s]" % u''.join(pattern_kddi)) # pattern_softbank = re.compile(u"[%s]" % u''.join(pattern_softbank)) pattern_unicode = re.compile(u"%s" % u'|'.join(pattern_unicode)) pattern_google = re.compile(u"%s" % u'|'.join(pattern_google)) pattern_docomo = re.compile(u"%s" % u'|'.join(pattern_docomo)) pattern_kddi = re.compile(u"%s" % u'|'.join(pattern_kddi)) pattern_softbank = re.compile(u"%s" % u'|'.join(pattern_softbank)) return { # forward reverse 'text': (None, pattern_unicode), 'docomo_img': (None, pattern_unicode), 'kddi_img': (None, pattern_unicode), 'softbank_img': (None, pattern_unicode), 'google': (pattern_google, pattern_unicode), 'docomo': (pattern_docomo, pattern_unicode), 'kddi': (pattern_kddi, pattern_unicode), 'softbank': (pattern_softbank, pattern_unicode), }
python
{ "resource": "" }
q42821
vtquery
train
def vtquery(apikey, checksums): """Performs the query dealing with errors and throttling requests.""" data = {'apikey': apikey, 'resource': isinstance(checksums, str) and checksums or ', '.join(checksums)} while 1: response = requests.post(VT_REPORT_URL, data=data) response.raise_for_status() if response.status_code == 200: return response.json() elif response.status_code == 204: logging.debug("API key request rate limit reached, throttling.") time.sleep(VT_THROTTLE) else: raise RuntimeError("Response status code %s" % response.status_code)
python
{ "resource": "" }
q42822
chunks
train
def chunks(iterable, size=1): """Splits iterator in chunks.""" iterator = iter(iterable) for element in iterator: yield chain([element], islice(iterator, size - 1))
python
{ "resource": "" }
q42823
VTScanner.scan
train
def scan(self, filetypes=None): """Iterates over the content of the disk and queries VirusTotal to determine whether it's malicious or not. filetypes is a list containing regular expression patterns. If given, only the files which type will match with one or more of the given patterns will be queried against VirusTotal. For each file which is unknown by VT or positive to any of its engines, the method yields a namedtuple: VTReport(path -> C:\\Windows\\System32\\infected.dll hash -> ab231... detections) -> dictionary engine -> detection Files unknown by VirusTotal will contain the string 'unknown' in the detection field. """ self.logger.debug("Scanning FS content.") checksums = self.filetype_filter(self._filesystem.checksums('/'), filetypes=filetypes) self.logger.debug("Querying %d objects to VTotal.", len(checksums)) for files in chunks(checksums, size=self.batchsize): files = dict((reversed(e) for e in files)) response = vtquery(self._apikey, files.keys()) yield from self.parse_response(files, response)
python
{ "resource": "" }
q42824
compute_training_sizes
train
def compute_training_sizes(train_perc, class_sizes, stratified=True): """Computes the maximum training size that the smallest class can provide """ size_per_class = np.int64(np.around(train_perc * class_sizes)) if stratified: print("Different classes in training set are stratified to match smallest class!") # per-class size_per_class = np.minimum(np.min(size_per_class), size_per_class) # single number reduced_sizes = np.unique(size_per_class) if len(reduced_sizes) != 1: # they must all be the same raise ValueError("Error in stratification of training set based on " "smallest class!") total_test_samples = np.int64(np.sum(class_sizes) - sum(size_per_class)) return size_per_class, total_test_samples
python
{ "resource": "" }
q42825
MultiDataset._load
train
def _load(self, dataset_spec): """Actual loading of datasets""" for idx, ds in enumerate(dataset_spec): self.append(ds, idx)
python
{ "resource": "" }
q42826
MultiDataset.append
train
def append(self, dataset, identifier): """ Adds a dataset, if compatible with the existing ones. Parameters ---------- dataset : MLDataset or compatible identifier : hashable String or integer or another hashable to uniquely identify this dataset """ dataset = dataset if isinstance(dataset, MLDataset) else MLDataset(dataset) if not self._is_init: self._ids = set(dataset.keys) self._classes = dataset.classes self._class_sizes = dataset.class_sizes self._num_samples = len(self._ids) self._modalities[identifier] = dataset.data self._num_features.append(dataset.num_features) # maintaining a no-data MLDataset internally for reuse its methods self._dataset = copy(dataset) # replacing its data with zeros self._dataset.data = {id_: np.zeros(1) for id_ in self._ids} self._is_init = True else: # this also checks for the size (num_samples) if set(dataset.keys) != self._ids: raise ValueError('Differing set of IDs in two datasets.' 'Unable to add this dataset to the MultiDataset.') if dataset.classes != self._classes: raise ValueError('Classes for IDs differ in the two datasets.') if identifier not in self._modalities: self._modalities[identifier] = dataset.data self._num_features.append(dataset.num_features) else: raise KeyError('{} already exists in MultiDataset'.format(identifier)) # each addition should be counted, if successful self._modality_count += 1
python
{ "resource": "" }
q42827
MultiDataset.holdout
train
def holdout(self, train_perc=0.7, num_rep=50, stratified=True, return_ids_only=False, format='MLDataset'): """ Builds a generator for train and test sets for cross-validation. """ ids_in_class = {cid: self._dataset.sample_ids_in_class(cid) for cid in self._class_sizes.keys()} sizes_numeric = np.array([len(ids_in_class[cid]) for cid in ids_in_class.keys()]) size_per_class, total_test_count = compute_training_sizes( train_perc, sizes_numeric, stratified=stratified) if len(self._class_sizes) != len(size_per_class): raise ValueError('size spec differs in num elements with class sizes!') for rep in range(num_rep): print('rep {}'.format(rep)) train_set = list() for index, (cls_id, class_size) in enumerate(self._class_sizes.items()): # shuffling the IDs each time random.shuffle(ids_in_class[cls_id]) subset_size = max(0, min(class_size, size_per_class[index])) if subset_size < 1 or class_size < 1: warnings.warn('No subjects from class {} were selected.' ''.format(cls_id)) else: subsets_this_class = ids_in_class[cls_id][0:size_per_class[index]] train_set.extend(subsets_this_class) # this ensures both are mutually exclusive! test_set = list(self._ids - set(train_set)) if return_ids_only: # when only IDs are required, without associated features # returning tuples to prevent accidental changes yield tuple(train_set), tuple(test_set) else: yield self._get_data(train_set, format), self._get_data(test_set, format)
python
{ "resource": "" }
q42828
MultiDataset._get_data
train
def _get_data(self, id_list, format='MLDataset'): """Returns the data, from all modalities, for a given list of IDs""" format = format.lower() features = list() # returning a dict would be better if AutoMKL() can handle it for modality, data in self._modalities.items(): if format in ('ndarray', 'data_matrix'): # turning dict of arrays into a data matrix # this is arguably worse, as labels are difficult to pass subset = np.array(itemgetter(*id_list)(data)) elif format in ('mldataset', 'pyradigm'): # getting container with fake data subset = self._dataset.get_subset(id_list) # injecting actual features subset.data = { id_: data[id_] for id_ in id_list } else: raise ValueError('Invalid output format - choose only one of ' 'MLDataset or data_matrix') features.append(subset) return features
python
{ "resource": "" }
q42829
ListCommand.can_be_updated
train
def can_be_updated(cls, dist, latest_version): """Determine whether package can be updated or not.""" scheme = get_scheme('default') name = dist.project_name dependants = cls.get_dependants(name) for dependant in dependants: requires = dependant.requires() for requirement in cls.get_requirement(name, requires): req = parse_requirement(requirement) # Ignore error if version in requirement spec can't be parsed try: matcher = scheme.matcher(req.requirement) except UnsupportedVersionError: continue if not matcher.match(str(latest_version)): return False return True
python
{ "resource": "" }
q42830
ListCommand.get_dependants
train
def get_dependants(cls, dist): """Yield dependant user packages for a given package name.""" for package in cls.installed_distributions: for requirement_package in package.requires(): requirement_name = requirement_package.project_name # perform case-insensitive matching if requirement_name.lower() == dist.lower(): yield package
python
{ "resource": "" }
q42831
ListCommand.get_requirement
train
def get_requirement(name, requires): """ Yield matching requirement strings. The strings are presented in the format demanded by pip._vendor.distlib.util.parse_requirement. Hopefully I'll be able to figure out a better way to handle this in the future. Perhaps figure out how pip does it's version satisfaction tests and see if it is offloadable? FYI there should only really be ONE matching requirement string, but I want to be able to process additional ones in case a certain package does something funky and splits up the requirements over multiple entries. """ for require in requires: if name.lower() == require.project_name.lower() and require.specs: safe_name = require.project_name.replace('-', '_') yield '%s (%s)' % (safe_name, require.specifier)
python
{ "resource": "" }
q42832
ListCommand.output_package
train
def output_package(dist): """Return string displaying package information.""" if dist_is_editable(dist): return '%s (%s, %s)' % ( dist.project_name, dist.version, dist.location, ) return '%s (%s)' % (dist.project_name, dist.version)
python
{ "resource": "" }
q42833
ListCommand.run_outdated
train
def run_outdated(cls, options): """Print outdated user packages.""" latest_versions = sorted( cls.find_packages_latest_versions(cls.options), key=lambda p: p[0].project_name.lower()) for dist, latest_version, typ in latest_versions: if latest_version > dist.parsed_version: if options.all: pass elif options.pinned: if cls.can_be_updated(dist, latest_version): continue elif not options.pinned: if not cls.can_be_updated(dist, latest_version): continue elif options.update: print(dist.project_name if options.brief else 'Updating %s to Latest: %s [%s]' % (cls.output_package(dist), latest_version, typ)) main(['install', '--upgrade'] + ([ '--user' ] if ENABLE_USER_SITE else []) + [dist.key]) continue print(dist.project_name if options.brief else '%s - Latest: %s [%s]' % (cls.output_package(dist), latest_version, typ))
python
{ "resource": "" }
q42834
softmax
train
def softmax(x): """Can be replaced once scipy 1.3 is released, although numeric stability should be checked.""" e_x = np.exp(x - np.max(x)) return e_x / e_x.sum(axis=1)[:, None]
python
{ "resource": "" }
q42835
BaseBoosting.iter_predict
train
def iter_predict(self, X, include_init=False): """Returns the predictions for ``X`` at every stage of the boosting procedure. Args: X (array-like or sparse matrix of shape (n_samples, n_features): The input samples. Sparse matrices are accepted only if they are supported by the weak model. include_init (bool, default=False): If ``True`` then the prediction from ``init_estimator`` will also be returned. Returns: iterator of arrays of shape (n_samples,) containing the predicted values at each stage """ utils.validation.check_is_fitted(self, 'init_estimator_') X = utils.check_array(X, accept_sparse=['csr', 'csc'], dtype=None, force_all_finite=False) y_pred = self.init_estimator_.predict(X) # The user decides if the initial prediction should be included or not if include_init: yield y_pred for estimators, line_searchers, cols in itertools.zip_longest(self.estimators_, self.line_searchers_, self.columns_): for i, (estimator, line_searcher) in enumerate(itertools.zip_longest(estimators, line_searchers or [])): # If we used column sampling then we have to make sure the columns of X are arranged # in the correct order if cols is None: direction = estimator.predict(X) else: direction = estimator.predict(X[:, cols]) if line_searcher: direction = line_searcher.update(direction) y_pred[:, i] += self.learning_rate * direction yield y_pred
python
{ "resource": "" }
q42836
BaseBoosting.predict
train
def predict(self, X): """Returns the predictions for ``X``. Under the hood this method simply goes through the outputs of ``iter_predict`` and returns the final one. Arguments: X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples. Sparse matrices are accepted only if they are supported by the weak model. Returns: array of shape (n_samples,) containing the predicted values. """ return collections.deque(self.iter_predict(X), maxlen=1).pop()
python
{ "resource": "" }
q42837
BoostingClassifier.iter_predict_proba
train
def iter_predict_proba(self, X, include_init=False): """Returns the predicted probabilities for ``X`` at every stage of the boosting procedure. Arguments: X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples. Sparse matrices are accepted only if they are supported by the weak model. include_init (bool, default=False): If ``True`` then the prediction from ``init_estimator`` will also be returned. Returns: iterator of arrays of shape (n_samples, n_classes) containing the predicted probabilities at each stage """ utils.validation.check_is_fitted(self, 'init_estimator_') X = utils.check_array(X, accept_sparse=['csr', 'csc'], dtype=None, force_all_finite=False) probas = np.empty(shape=(len(X), len(self.classes_)), dtype=np.float64) for y_pred in super().iter_predict(X, include_init=include_init): if len(self.classes_) == 2: probas[:, 1] = sigmoid(y_pred[:, 0]) probas[:, 0] = 1. - probas[:, 1] else: probas[:] = softmax(y_pred) yield probas
python
{ "resource": "" }
q42838
BoostingClassifier.iter_predict
train
def iter_predict(self, X, include_init=False): """Returns the predicted classes for ``X`` at every stage of the boosting procedure. Arguments: X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples. Sparse matrices are accepted only if they are supported by the weak model. include_init (bool, default=False): If ``True`` then the prediction from ``init_estimator`` will also be returned. Returns: iterator of arrays of shape (n_samples, n_classes) containing the predicted classes at each stage. """ for probas in self.iter_predict_proba(X, include_init=include_init): yield self.encoder_.inverse_transform(np.argmax(probas, axis=1))
python
{ "resource": "" }
q42839
BoostingClassifier.predict_proba
train
def predict_proba(self, X): """Returns the predicted probabilities for ``X``. Arguments: X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples. Sparse matrices are accepted only if they are supported by the weak model. Returns: array of shape (n_samples, n_classes) containing the predicted probabilities. """ return collections.deque(self.iter_predict_proba(X), maxlen=1).pop()
python
{ "resource": "" }
q42840
PandocAttributes.parse_pandoc
train
def parse_pandoc(self, attrs): """Read pandoc attributes.""" id = attrs[0] classes = attrs[1] kvs = OrderedDict(attrs[2]) return id, classes, kvs
python
{ "resource": "" }
q42841
PandocAttributes.parse_markdown
train
def parse_markdown(self, attr_string): """Read markdown attributes.""" attr_string = attr_string.strip('{}') splitter = re.compile(self.split_regex(separator=self.spnl)) attrs = splitter.split(attr_string)[1::2] # match single word attributes e.g. ```python if len(attrs) == 1 \ and not attr_string.startswith(('#', '.')) \ and '=' not in attr_string: return '', [attr_string], OrderedDict() try: id = [a[1:] for a in attrs if a.startswith('#')][0] except IndexError: id = '' classes = [a[1:] for a in attrs if a.startswith('.')] special = ['unnumbered' for a in attrs if a == '-'] classes.extend(special) kvs = OrderedDict(a.split('=', 1) for a in attrs if '=' in a) return id, classes, kvs
python
{ "resource": "" }
q42842
PandocAttributes.parse_html
train
def parse_html(self, attr_string): """Read a html string to attributes.""" splitter = re.compile(self.split_regex(separator=self.spnl)) attrs = splitter.split(attr_string)[1::2] idre = re.compile(r'''id=["']?([\w ]*)['"]?''') clsre = re.compile(r'''class=["']?([\w ]*)['"]?''') id_matches = [idre.search(a) for a in attrs] cls_matches = [clsre.search(a) for a in attrs] try: id = [m.groups()[0] for m in id_matches if m][0] except IndexError: id = '' classes = [m.groups()[0] for m in cls_matches if m][0].split() special = ['unnumbered' for a in attrs if '-' in a] classes.extend(special) kvs = [a.split('=', 1) for a in attrs if '=' in a] kvs = OrderedDict((k, v) for k, v in kvs if k not in ('id', 'class')) return id, classes, kvs
python
{ "resource": "" }
q42843
PandocAttributes.parse_dict
train
def parse_dict(self, attrs): """Read a dict to attributes.""" attrs = attrs or {} ident = attrs.get("id", "") classes = attrs.get("classes", []) kvs = OrderedDict((k, v) for k, v in attrs.items() if k not in ("classes", "id")) return ident, classes, kvs
python
{ "resource": "" }
q42844
PandocAttributes.to_markdown
train
def to_markdown(self, format='{id} {classes} {kvs}', surround=True): """Returns attributes formatted as markdown with optional format argument to determine order of attribute contents. """ id = '#' + self.id if self.id else '' classes = ' '.join('.' + cls for cls in self.classes) kvs = ' '.join('{}={}'.format(k, v) for k, v in self.kvs.items()) attrs = format.format(id=id, classes=classes, kvs=kvs).strip() if surround: return '{' + attrs + '}' elif not surround: return attrs
python
{ "resource": "" }
q42845
PandocAttributes.to_html
train
def to_html(self): """Returns attributes formatted as html.""" id, classes, kvs = self.id, self.classes, self.kvs id_str = 'id="{}"'.format(id) if id else '' class_str = 'class="{}"'.format(' '.join(classes)) if classes else '' key_str = ' '.join('{}={}'.format(k, v) for k, v in kvs.items()) return ' '.join((id_str, class_str, key_str)).strip()
python
{ "resource": "" }
q42846
PandocAttributes.to_dict
train
def to_dict(self): """Returns attributes formatted as a dictionary.""" d = {'id': self.id, 'classes': self.classes} d.update(self.kvs) return d
python
{ "resource": "" }
q42847
Rocket.from_socket
train
def from_socket(controller, host=None, port=None, track_path=None, log_level=logging.ERROR): """Create rocket instance using socket connector""" rocket = Rocket(controller, track_path=track_path, log_level=log_level) rocket.connector = SocketConnector(controller=controller, tracks=rocket.tracks, host=host, port=port) return rocket
python
{ "resource": "" }
q42848
Rocket.value
train
def value(self, name): """get value of a track at the current time""" return self.tracks.get(name).row_value(self.controller.row)
python
{ "resource": "" }
q42849
compare_filesystems
train
def compare_filesystems(fs0, fs1, concurrent=False): """Compares the two given filesystems. fs0 and fs1 are two mounted GuestFS instances containing the two disks to be compared. If the concurrent flag is True, two processes will be used speeding up the comparison on multiple CPUs. Returns a dictionary containing files created, removed and modified. {'created_files': [<files in fs1 and not in fs0>], 'deleted_files': [<files in fs0 and not in fs1>], 'modified_files': [<files in both fs0 and fs1 but different>]} """ if concurrent: future0 = concurrent_hash_filesystem(fs0) future1 = concurrent_hash_filesystem(fs1) files0 = future0.result() files1 = future1.result() else: files0 = hash_filesystem(fs0) files1 = hash_filesystem(fs1) return file_comparison(files0, files1)
python
{ "resource": "" }
q42850
file_comparison
train
def file_comparison(files0, files1): """Compares two dictionaries of files returning their difference. {'created_files': [<files in files1 and not in files0>], 'deleted_files': [<files in files0 and not in files1>], 'modified_files': [<files in both files0 and files1 but different>]} """ comparison = {'created_files': [], 'deleted_files': [], 'modified_files': []} for path, sha1 in files1.items(): if path in files0: if sha1 != files0[path]: comparison['modified_files'].append( {'path': path, 'original_sha1': files0[path], 'sha1': sha1}) else: comparison['created_files'].append({'path': path, 'sha1': sha1}) for path, sha1 in files0.items(): if path not in files1: comparison['deleted_files'].append({'path': path, 'original_sha1': files0[path]}) return comparison
python
{ "resource": "" }
q42851
extract_files
train
def extract_files(filesystem, files, path): """Extracts requested files. files must be a list of files in the format {"C:\\Windows\\System32\\NTUSER.DAT": "sha1_hash"} for windows {"/home/user/text.txt": "sha1_hash"} for other FS. files will be extracted into path which must exist beforehand. Returns two dictionaries: {"sha1": "/local/path/sha1"} files successfully extracted {"sha1": "C:\\..\\text.txt"} files which could not be extracted windows {"sha1": "/../text.txt"} files which could not be extracted linux """ extracted_files = {} failed_extractions = {} for file_to_extract in files: source = file_to_extract['path'] destination = Path(path, file_to_extract['sha1']) if not destination.exists(): destination = str(destination) try: filesystem.download(source, destination) extracted_files[file_to_extract['sha1']] = destination except RuntimeError: failed_extractions[file_to_extract['sha1']] = source else: extracted_files[file_to_extract['sha1']] = destination return extracted_files, failed_extractions
python
{ "resource": "" }
q42852
registry_comparison
train
def registry_comparison(registry0, registry1): """Compares two dictionaries of registry keys returning their difference.""" comparison = {'created_keys': {}, 'deleted_keys': [], 'created_values': {}, 'deleted_values': {}, 'modified_values': {}} for key, info in registry1.items(): if key in registry0: if info[1] != registry0[key][1]: created, deleted, modified = compare_values( registry0[key][1], info[1]) if created: comparison['created_values'][key] = (info[0], created) if deleted: comparison['deleted_values'][key] = (info[0], deleted) if modified: comparison['modified_values'][key] = (info[0], modified) else: comparison['created_keys'][key] = info for key in registry0.keys(): if key not in registry1: comparison['deleted_keys'].append(key) return comparison
python
{ "resource": "" }
q42853
compare_values
train
def compare_values(values0, values1): """Compares all the values of a single registry key.""" values0 = {v[0]: v[1:] for v in values0} values1 = {v[0]: v[1:] for v in values1} created = [(k, v[0], v[1]) for k, v in values1.items() if k not in values0] deleted = [(k, v[0], v[1]) for k, v in values0.items() if k not in values1] modified = [(k, v[0], v[1]) for k, v in values0.items() if v != values1.get(k, None)] return created, deleted, modified
python
{ "resource": "" }
q42854
compare_hives
train
def compare_hives(fs0, fs1): """Compares all the windows registry hive files returning those which differ. """ registries = [] for path in chain(registries_path(fs0.fsroot), user_registries(fs0, fs1)): if fs0.checksum(path) != fs1.checksum(path): registries.append(path) return registries
python
{ "resource": "" }
q42855
user_registries
train
def user_registries(fs0, fs1): """Returns the list of user registries present on both FileSystems.""" for user in fs0.ls('{}Users'.format(fs0.fsroot)): for path in user_registries_path(fs0.fsroot, user): if fs1.exists(path): yield path
python
{ "resource": "" }
q42856
files_type
train
def files_type(fs0, fs1, files): """Inspects the file type of the given files.""" for file_meta in files['deleted_files']: file_meta['type'] = fs0.file(file_meta['path']) for file_meta in files['created_files'] + files['modified_files']: file_meta['type'] = fs1.file(file_meta['path']) return files
python
{ "resource": "" }
q42857
files_size
train
def files_size(fs0, fs1, files): """Gets the file size of the given files.""" for file_meta in files['deleted_files']: file_meta['size'] = fs0.stat(file_meta['path'])['size'] for file_meta in files['created_files'] + files['modified_files']: file_meta['size'] = fs1.stat(file_meta['path'])['size'] return files
python
{ "resource": "" }
q42858
parse_registries
train
def parse_registries(filesystem, registries): """Returns a dictionary with the content of the given registry hives. {"\\Registry\\Key\\", (("ValueKey", "ValueType", ValueValue))} """ results = {} for path in registries: with NamedTemporaryFile(buffering=0) as tempfile: filesystem.download(path, tempfile.name) registry = RegistryHive(tempfile.name) registry.rootkey = registry_root(path) results.update({k.path: (k.timestamp, k.values) for k in registry.keys()}) return results
python
{ "resource": "" }
q42859
makedirs
train
def makedirs(path): """Creates the directory tree if non existing.""" path = Path(path) if not path.exists(): path.mkdir(parents=True)
python
{ "resource": "" }
q42860
DiskComparator.compare
train
def compare(self, concurrent=False, identify=False, size=False): """Compares the two disks according to flags. Generates the following report: :: {'created_files': [{'path': '/file/in/disk1/not/in/disk0', 'sha1': 'sha1_of_the_file'}], 'deleted_files': [{'path': '/file/in/disk0/not/in/disk1', 'original_sha1': 'sha1_of_the_file'}], 'modified_files': [{'path': '/file/both/disks/but/different', 'sha1': 'sha1_of_the_file_on_disk0', 'original_sha1': 'sha1_of_the_file_on_disk0'}]} If concurrent is set to True, the logic will use multiple CPUs to speed up the process. The identify and size keywords will add respectively the type and the size of the files to the results. """ self.logger.debug("Comparing FS contents.") results = compare_filesystems(self.filesystems[0], self.filesystems[1], concurrent=concurrent) if identify: self.logger.debug("Gatering file types.") results = files_type(self.filesystems[0], self.filesystems[1], results) if size: self.logger.debug("Gatering file sizes.") results = files_size(self.filesystems[0], self.filesystems[1], results) return results
python
{ "resource": "" }
q42861
DiskComparator.extract
train
def extract(self, disk, files, path='.'): """Extracts the given files from the given disk. Disk must be an integer (1 or 2) indicating from which of the two disks to extract. Files must be a list of dictionaries containing the keys 'path' and 'sha1'. Files will be extracted in path and will be named with their sha1. Returns a dictionary. {'extracted_files': [<sha1>, <sha1>], 'extraction_errors': [<sha1>, <sha1>]} """ self.logger.debug("Extracting files.") extracted_files, failed = self._extract_files(disk, files, path) return {'extracted_files': [f for f in extracted_files.keys()], 'extraction_errors': [f for f in failed.keys()]}
python
{ "resource": "" }
q42862
ComodoTLSService._create_error
train
def _create_error(self, status_code): """ Construct an error message in jsend format. :param int status_code: The status code to translate into an error message :return: A dictionary in jsend format with the error and the code :rtype: dict """ return jsend.error(message=ComodoCA.status_code[status_code], code=status_code)
python
{ "resource": "" }
q42863
ComodoTLSService.get_cert_types
train
def get_cert_types(self): """ Collect the certificate types that are available to the customer. :return: A list of dictionaries of certificate types :rtype: list """ result = self.client.service.getCustomerCertTypes(authData=self.auth) if result.statusCode == 0: return jsend.success({'cert_types': result.types}) else: return self._create_error(result.statusCode)
python
{ "resource": "" }
q42864
ComodoTLSService.collect
train
def collect(self, cert_id, format_type): """ Poll for certificate availability after submission. :param int cert_id: The certificate ID :param str format_type: The format type to use (example: 'X509 PEM Certificate only') :return: The certificate_id or the certificate depending on whether the certificate is ready (check status code) :rtype: dict """ result = self.client.service.collect(authData=self.auth, id=cert_id, formatType=ComodoCA.format_type[format_type]) # The certificate is ready for collection if result.statusCode == 2: return jsend.success({'certificate': result.SSL.certificate, 'certificate_status': 'issued', 'certificate_id': cert_id}) # The certificate is not ready for collection yet elif result.statusCode == 0: return jsend.fail({'certificate_id': cert_id, 'certificate': '', 'certificate_status': 'pending'}) # Some error occurred else: return self._create_error(result.statusCode)
python
{ "resource": "" }
q42865
ComodoTLSService.submit
train
def submit(self, cert_type_name, csr, revoke_password, term, subject_alt_names='', server_type='OTHER'): """ Submit a certificate request to Comodo. :param string cert_type_name: The full cert type name (Example: 'PlatinumSSL Certificate') the supported certificate types for your account can be obtained with the get_cert_types() method. :param string csr: The Certificate Signing Request (CSR) :param string revoke_password: A password for certificate revocation :param int term: The length, in years, for the certificate to be issued :param string subject_alt_names: Subject Alternative Names separated by a ",". :param string server_type: The type of server for the TLS certificate e.g 'Apache/ModSSL' full list available in ComodoCA.server_type (Default: OTHER) :return: The certificate_id and the normal status messages for errors. :rtype: dict """ cert_types = self.get_cert_types() # If collection of cert types fails we simply pass the error back. if cert_types['status'] == 'error': return cert_types # We do this because we need to pass the entire cert type definition back to Comodo # not just the name. for cert_type in cert_types['data']['cert_types']: if cert_type.name == cert_type_name: cert_type_def = cert_type result = self.client.service.enroll(authData=self.auth, orgId=self.org_id, secretKey=self.secret_key, csr=csr, phrase=revoke_password, subjAltNames=subject_alt_names, certType=cert_type_def, numberServers=1, serverType=ComodoCA.formats[server_type], term=term, comments='') # Anything greater than 0 is the certificate ID if result > 0: return jsend.success({'certificate_id': result}) # Anything else is an error else: return self._create_error(result)
python
{ "resource": "" }
q42866
L1Loss.gradient
train
def gradient(self, y_true, y_pred): """Returns the gradient of the L1 loss with respect to each prediction. Example: >>> import starboost as sb >>> y_true = [0, 0, 1] >>> y_pred = [0.3, 0, 0.8] >>> sb.losses.L1Loss().gradient(y_true, y_pred) array([ 1., 0., -1.]) """ return np.sign(np.subtract(y_pred, y_true))
python
{ "resource": "" }
q42867
get_parents
train
def get_parents(): """Return sorted list of names of packages without dependants.""" distributions = get_installed_distributions(user_only=ENABLE_USER_SITE) remaining = {d.project_name.lower() for d in distributions} requirements = {r.project_name.lower() for d in distributions for r in d.requires()} return get_realnames(remaining - requirements)
python
{ "resource": "" }
q42868
get_realnames
train
def get_realnames(packages): """ Return list of unique case-correct package names. Packages are listed in a case-insensitive sorted order. """ return sorted({get_distribution(p).project_name for p in packages}, key=lambda n: n.lower())
python
{ "resource": "" }
q42869
OpenIdMixin.authenticate_redirect
train
def authenticate_redirect(self, callback_uri=None, ask_for=["name", "email", "language", "username"]): """ Performs a redirect to the authentication URL for this service. After authentication, the service will redirect back to the given callback URI. We request the given attributes for the authenticated user by default (name, email, language, and username). If you don't need all those attributes for your app, you can request fewer with the |ask_for| keyword argument. """ callback_uri = callback_uri or request.url args = self._openid_args(callback_uri, ax_attrs=ask_for) return redirect(self._OPENID_ENDPOINT + ("&" if "?" in self._OPENID_ENDPOINT else "?") + urllib.urlencode(args))
python
{ "resource": "" }
q42870
GoogleAuth._on_auth
train
def _on_auth(self, user): """ This is called when login with OpenID succeeded and it's not necessary to figure out if this is the users's first login or not. """ app = current_app._get_current_object() if not user: # Google auth failed. login_error.send(app, user=None) abort(403) session["openid"] = user login.send(app, user=user) return redirect(request.args.get("next", None) or request.referrer or "/")
python
{ "resource": "" }
q42871
GoogleAuth.required
train
def required(self, fn): """Request decorator. Forces authentication.""" @functools.wraps(fn) def decorated(*args, **kwargs): if (not self._check_auth() # Don't try to force authentication if the request is part # of the authentication process - otherwise we end up in a # loop. and request.blueprint != self.blueprint.name): return redirect(url_for("%s.login" % self.blueprint.name, next=request.url)) return fn(*args, **kwargs) return decorated
python
{ "resource": "" }
q42872
parse_journal_file
train
def parse_journal_file(journal_file): """Iterates over the journal's file taking care of paddings.""" counter = count() for block in read_next_block(journal_file): block = remove_nullchars(block) while len(block) > MIN_RECORD_SIZE: header = RECORD_HEADER.unpack_from(block) size = header[0] try: yield parse_record(header, block[:size]) next(counter) except RuntimeError: yield CorruptedUsnRecord(next(counter)) finally: block = remove_nullchars(block[size:]) journal_file.seek(- len(block), 1)
python
{ "resource": "" }
q42873
parse_record
train
def parse_record(header, record): """Parses a record according to its version.""" major_version = header[1] try: return RECORD_PARSER[major_version](header, record) except (KeyError, struct.error) as error: raise RuntimeError("Corrupted USN Record") from error
python
{ "resource": "" }
q42874
usn_v2_record
train
def usn_v2_record(header, record): """Extracts USN V2 record information.""" length, major_version, minor_version = header fields = V2_RECORD.unpack_from(record, RECORD_HEADER.size) return UsnRecord(length, float('{}.{}'.format(major_version, minor_version)), fields[0] | fields[1] << 16, # 6 bytes little endian mft fields[2], # 2 bytes little endian mft sequence fields[3] | fields[4] << 16, # 6 bytes little endian mft fields[5], # 2 bytes little endian mft sequence fields[6], (datetime(1601, 1, 1) + timedelta(microseconds=(fields[7] / 10))).isoformat(' '), unpack_flags(fields[8], REASONS), unpack_flags(fields[9], SOURCEINFO), fields[10], unpack_flags(fields[11], ATTRIBUTES), str(struct.unpack_from('{}s'.format(fields[12]).encode(), record, fields[13])[0], 'utf16'))
python
{ "resource": "" }
q42875
usn_v4_record
train
def usn_v4_record(header, record): """Extracts USN V4 record information.""" length, major_version, minor_version = header fields = V4_RECORD.unpack_from(record, RECORD_HEADER.size) raise NotImplementedError('Not implemented')
python
{ "resource": "" }
q42876
unpack_flags
train
def unpack_flags(value, flags): """Multiple flags might be packed in the same field.""" try: return [flags[value]] except KeyError: return [flags[k] for k in sorted(flags.keys()) if k & value > 0]
python
{ "resource": "" }
q42877
read_next_block
train
def read_next_block(infile, block_size=io.DEFAULT_BUFFER_SIZE): """Iterates over the file in blocks.""" chunk = infile.read(block_size) while chunk: yield chunk chunk = infile.read(block_size)
python
{ "resource": "" }
q42878
remove_nullchars
train
def remove_nullchars(block): """Strips NULL chars taking care of bytes alignment.""" data = block.lstrip(b'\00') padding = b'\00' * ((len(block) - len(data)) % 8) return padding + data
python
{ "resource": "" }
q42879
timetopythonvalue
train
def timetopythonvalue(time_val): "Convert a time or time range from ArcGIS REST server format to Python" if isinstance(time_val, sequence): return map(timetopythonvalue, time_val) elif isinstance(time_val, numeric): return datetime.datetime(*(time.gmtime(time_val))[:6]) elif isinstance(time_val, numeric): values = [] try: values = map(long, time_val.split(",")) except: pass if values: return map(timetopythonvalue, values) raise ValueError(repr(time_val))
python
{ "resource": "" }
q42880
pythonvaluetotime
train
def pythonvaluetotime(time_val): "Convert a time or time range from Python datetime to ArcGIS REST server" if time_val is None: return None elif isinstance(time_val, numeric): return str(long(time_val * 1000.0)) elif isinstance(time_val, date): dtlist = [time_val.year, time_val.month, time_val.day] if isinstance(time_val, datetime.datetime): dtlist += [time_val.hour, time_val.minute, time_val.second] else: dtlist += [0, 0, 0] return long(calendar.timegm(dtlist) * 1000.0) elif (isinstance(time_val, sequence) and len(time_val) == 2): if all(isinstance(x, numeric) for x in time_val): return ",".join(pythonvaluetotime(x) for x in time_val) elif all(isinstance(x, date) for x in time_val): return ",".join(pythonvaluetotime(x) for x in time_val) raise ValueError(repr(time_val))
python
{ "resource": "" }
q42881
AnsibleInventory.get_hosts
train
def get_hosts(self, group=None): ''' Get the hosts ''' hostlist = [] if group: groupobj = self.inventory.groups.get(group) if not groupobj: print "Group [%s] not found in inventory" % group return None groupdict = {} groupdict['hostlist'] = [] for host in groupobj.get_hosts(): groupdict['hostlist'].append(host.name) hostlist.append(groupdict) else: for group in self.inventory.groups: groupdict = {} groupdict['group'] = group groupdict['hostlist'] = [] groupobj = self.inventory.groups.get(group) for host in groupobj.get_hosts(): groupdict['hostlist'].append(host.name) hostlist.append(groupdict) return hostlist
python
{ "resource": "" }
q42882
make_random_MLdataset
train
def make_random_MLdataset(max_num_classes = 20, min_class_size = 20, max_class_size = 50, max_dim = 100, stratified = True): "Generates a random MLDataset for use in testing." smallest = min(min_class_size, max_class_size) max_class_size = max(min_class_size, max_class_size) largest = max(50, max_class_size) largest = max(smallest+3,largest) if max_num_classes != 2: num_classes = np.random.randint(2, max_num_classes, 1) else: num_classes = 2 if type(num_classes) == np.ndarray: num_classes = num_classes[0] if not stratified: class_sizes = np.random.random_integers(smallest, largest, num_classes) else: class_sizes = np.repeat(np.random.randint(smallest, largest), num_classes) num_features = np.random.randint(min(3, max_dim), max(3, max_dim), 1)[0] # feat_names = [ str(x) for x in range(num_features)] class_ids = list() labels = list() for cl in range(num_classes): class_ids.append('class-{}'.format(cl)) labels.append(int(cl)) ds = MLDataset() for cc, class_ in enumerate(class_ids): subids = [ 's{}-c{}'.format(ix,cc) for ix in range(class_sizes[cc]) ] for sid in subids: ds.add_sample(sid, feat_generator(num_features), int(cc), class_) return ds
python
{ "resource": "" }
q42883
Observer.bindToEndPoint
train
def bindToEndPoint(self,bindingEndpoint): """ 2-way binds the target endpoint to all other registered endpoints. """ self.bindings[bindingEndpoint.instanceId] = bindingEndpoint bindingEndpoint.valueChangedSignal.connect(self._updateEndpoints)
python
{ "resource": "" }
q42884
Observer._updateEndpoints
train
def _updateEndpoints(self,*args,**kwargs): """ Updates all endpoints except the one from which this slot was called. Note: this method is probably not complete threadsafe. Maybe a lock is needed when setter self.ignoreEvents """ sender = self.sender() if not self.ignoreEvents: self.ignoreEvents = True for binding in self.bindings.values(): if binding.instanceId == id(sender): continue if args: binding.setter(*args,**kwargs) else: binding.setter(self.bindings[id(sender)].getter()) self.ignoreEvents = False
python
{ "resource": "" }
q42885
HyperTransformer._anonymize_table
train
def _anonymize_table(cls, table_data, pii_fields): """Anonymize in `table_data` the fields in `pii_fields`. Args: table_data (pandas.DataFrame): Original dataframe/table. pii_fields (list[dict]): Metadata for the fields to transform. Result: pandas.DataFrame: Anonymized table. """ for pii_field in pii_fields: field_name = pii_field['name'] transformer = cls.get_class(TRANSFORMERS['categorical'])(pii_field) table_data[field_name] = transformer.anonymize_column(table_data) return table_data
python
{ "resource": "" }
q42886
HyperTransformer._get_tables
train
def _get_tables(self, base_dir): """Load the contents of meta_file and the corresponding data. If fields containing Personally Identifiable Information are detected in the metadata they are anonymized before asign them into `table_dict`. Args: base_dir(str): Root folder of the dataset files. Returns: dict: Mapping str -> tuple(pandas.DataFrame, dict) """ table_dict = {} for table in self.metadata['tables']: if table['use']: relative_path = os.path.join(base_dir, self.metadata['path'], table['path']) data_table = pd.read_csv(relative_path) pii_fields = self._get_pii_fields(table) data_table = self._anonymize_table(data_table, pii_fields) table_dict[table['name']] = (data_table, table) return table_dict
python
{ "resource": "" }
q42887
HyperTransformer._get_transformers
train
def _get_transformers(self): """Load the contents of meta_file and extract information about the transformers. Returns: dict: tuple(str, str) -> Transformer. """ transformer_dict = {} for table in self.metadata['tables']: table_name = table['name'] for field in table['fields']: transformer_type = field.get('type') if transformer_type: col_name = field['name'] transformer_dict[(table_name, col_name)] = transformer_type return transformer_dict
python
{ "resource": "" }
q42888
HyperTransformer._fit_transform_column
train
def _fit_transform_column(self, table, metadata, transformer_name, table_name): """Transform a column from table using transformer and given parameters. Args: table (pandas.DataFrame): Dataframe containing column to transform. metadata (dict): Metadata for given column. transformer_name (str): Name of transformer to use on column. table_name (str): Name of table in original dataset. Returns: pandas.DataFrame: Dataframe containing the transformed column. If self.missing=True, it will contain a second column containing 0 and 1 marking if that value was originally null or not. """ column_name = metadata['name'] content = {} columns = [] if self.missing and table[column_name].isnull().any(): null_transformer = transformers.NullTransformer(metadata) clean_column = null_transformer.fit_transform(table[column_name]) null_name = '?' + column_name columns.append(null_name) content[null_name] = clean_column[null_name].values table[column_name] = clean_column[column_name] transformer_class = self.get_class(transformer_name) transformer = transformer_class(metadata) self.transformers[(table_name, column_name)] = transformer content[column_name] = transformer.fit_transform(table)[column_name].values columns = [column_name] + columns return pd.DataFrame(content, columns=columns)
python
{ "resource": "" }
q42889
HyperTransformer._reverse_transform_column
train
def _reverse_transform_column(self, table, metadata, table_name): """Reverses the transformtion on a column from table using the given parameters. Args: table (pandas.DataFrame): Dataframe containing column to transform. metadata (dict): Metadata for given column. table_name (str): Name of table in original dataset. Returns: pandas.DataFrame: Dataframe containing the transformed column. If self.missing=True, it will contain a second column containing 0 and 1 marking if that value was originally null or not. It will return None in the case the column is not in the table. """ column_name = metadata['name'] if column_name not in table: return null_name = '?' + column_name content = pd.DataFrame(columns=[column_name], index=table.index) transformer = self.transformers[(table_name, column_name)] content[column_name] = transformer.reverse_transform(table[column_name].to_frame()) if self.missing and null_name in table[column_name]: content[null_name] = table.pop(null_name) null_transformer = transformers.NullTransformer(metadata) content[column_name] = null_transformer.reverse_transform(content) return content
python
{ "resource": "" }
q42890
HyperTransformer.fit_transform_table
train
def fit_transform_table( self, table, table_meta, transformer_dict=None, transformer_list=None, missing=None): """Create, apply and store the specified transformers for `table`. Args: table(pandas.DataFrame): Contents of the table to be transformed. table_meta(dict): Metadata for the given table. transformer_dict(dict): Mapping `tuple(str, str)` -> `str` where the tuple in the keys represent the (table_name, column_name) and the value the name of the assigned transformer. transformer_list(list): List of transformers to use. Overrides the transformers in the meta_file. missing(bool): Wheter or not use NullTransformer to handle missing values. Returns: pandas.DataFrame: Transformed table. """ if missing is None: missing = self.missing else: self.missing = missing warnings.warn(DEPRECATION_MESSAGE.format('fit_transform_table'), DeprecationWarning) result = pd.DataFrame() table_name = table_meta['name'] for field in table_meta['fields']: col_name = field['name'] if transformer_list: for transformer_name in transformer_list: if field['type'] == self.get_class(transformer_name).type: transformed = self._fit_transform_column( table, field, transformer_name, table_name) result = pd.concat([result, transformed], axis=1) elif (table_name, col_name) in transformer_dict: transformer_name = TRANSFORMERS[transformer_dict[(table_name, col_name)]] transformed = self._fit_transform_column( table, field, transformer_name, table_name) result = pd.concat([result, transformed], axis=1) return result
python
{ "resource": "" }
q42891
HyperTransformer.transform_table
train
def transform_table(self, table, table_meta, missing=None): """Apply the stored transformers to `table`. Args: table(pandas.DataFrame): Contents of the table to be transformed. table_meta(dict): Metadata for the given table. missing(bool): Wheter or not use NullTransformer to handle missing values. Returns: pandas.DataFrame: Transformed table. """ if missing is None: missing = self.missing else: self.missing = missing warnings.warn(DEPRECATION_MESSAGE.format('transform_table'), DeprecationWarning) content = {} columns = [] table_name = table_meta['name'] for field in table_meta['fields']: column_name = field['name'] if missing and table[column_name].isnull().any(): null_transformer = transformers.NullTransformer(field) clean_column = null_transformer.fit_transform(table[column_name]) null_name = '?' + column_name columns.append(null_name) content[null_name] = clean_column[null_name].values column = clean_column[column_name] else: column = table[column_name].to_frame() transformer = self.transformers[(table_name, column_name)] content[column_name] = transformer.transform(column)[column_name].values columns.append(column_name) return pd.DataFrame(content, columns=columns)
python
{ "resource": "" }
q42892
HyperTransformer.reverse_transform_table
train
def reverse_transform_table(self, table, table_meta, missing=None): """Transform a `table` back to its original format. Args: table(pandas.DataFrame): Contents of the table to be transformed. table_meta(dict): Metadata for the given table. missing(bool): Wheter or not use NullTransformer to handle missing values. Returns: pandas.DataFrame: Table in original format. """ if missing is None: missing = self.missing else: self.missing = missing warnings.warn( DEPRECATION_MESSAGE.format('reverse_transform_table'), DeprecationWarning) result = pd.DataFrame(index=table.index) table_name = table_meta['name'] for field in table_meta['fields']: new_column = self._reverse_transform_column(table, field, table_name) if new_column is not None: result[field['name']] = new_column return result
python
{ "resource": "" }
q42893
HyperTransformer.fit_transform
train
def fit_transform( self, tables=None, transformer_dict=None, transformer_list=None, missing=None): """Create, apply and store the specified transformers for the given tables. Args: tables(dict): Mapping of table names to `tuple` where each tuple is on the form (`pandas.DataFrame`, `dict`). The `DataFrame` contains the table data and the `dict` the corresponding meta information. If not specified, the tables will be retrieved using the meta_file. transformer_dict(dict): Mapping `tuple(str, str)` -> `str` where the tuple is (table_name, column_name). transformer_list(list): List of transformers to use. Overrides the transformers in the meta_file. missing(bool): Wheter or not use NullTransformer to handle missing values. Returns: dict: Map from `str` (table_names) to `pandas.DataFrame` (transformed data). """ if missing is None: missing = self.missing else: self.missing = missing warnings.warn(DEPRECATION_MESSAGE.format('fit_transform'), DeprecationWarning) transformed = {} if tables is None: tables = self.table_dict if transformer_dict is None and transformer_list is None: transformer_dict = self.transformer_dict for table_name in tables: table, table_meta = tables[table_name] transformed_table = self.fit_transform_table( table, table_meta, transformer_dict, transformer_list) transformed[table_name] = transformed_table return transformed
python
{ "resource": "" }
q42894
HyperTransformer.transform
train
def transform(self, tables, table_metas=None, missing=None): """Apply all the saved transformers to `tables`. Args: tables(dict): mapping of table names to `tuple` where each tuple is on the form (`pandas.DataFrame`, `dict`). The `DataFrame` contains the table data and the `dict` the corresponding meta information. If not specified, the tables will be retrieved using the meta_file. table_metas(dict): Full metadata file for the dataset. missing(bool): Wheter or not use NullTransformer to handle missing values. Returns: dict: Map from `str` (table_names) to `pandas.DataFrame` (transformed data). """ if missing is None: missing = self.missing else: self.missing = missing warnings.warn(DEPRECATION_MESSAGE.format('transform'), DeprecationWarning) transformed = {} for table_name in tables: table = tables[table_name] if table_metas is None: table_meta = self.table_dict[table_name][1] else: table_meta = table_metas[table_name] transformed[table_name] = self.transform_table(table, table_meta) return transformed
python
{ "resource": "" }
q42895
HyperTransformer.reverse_transform
train
def reverse_transform(self, tables, table_metas=None, missing=None): """Transform data back to its original format. Args: tables(dict): mapping of table names to `tuple` where each tuple is on the form (`pandas.DataFrame`, `dict`). The `DataFrame` contains the transformed data and the `dict` the corresponding meta information. If not specified, the tables will be retrieved using the meta_file. table_metas(dict): Full metadata file for the dataset. missing(bool): Wheter or not use NullTransformer to handle missing values. Returns: dict: Map from `str` (table_names) to `pandas.DataFrame` (transformed data). """ if missing is None: missing = self.missing else: self.missing = missing warnings.warn(DEPRECATION_MESSAGE.format('reverse_transform'), DeprecationWarning) reverse = {} for table_name in tables: table = tables[table_name] if table_metas is None: table_meta = self.table_dict[table_name][1] else: table_meta = table_metas[table_name] reverse[table_name] = self.reverse_transform_table(table, table_meta) return reverse
python
{ "resource": "" }
q42896
echo
train
def echo(msg, *args, **kwargs): '''Wraps click.echo, handles formatting and check encoding''' file = kwargs.pop('file', None) nl = kwargs.pop('nl', True) err = kwargs.pop('err', False) color = kwargs.pop('color', None) msg = safe_unicode(msg).format(*args, **kwargs) click.echo(msg, file=file, nl=nl, err=err, color=color)
python
{ "resource": "" }
q42897
warning
train
def warning(msg, *args, **kwargs): '''Display a warning message''' msg = '{0} {1}'.format(yellow(WARNING), msg) echo(msg, *args, **kwargs)
python
{ "resource": "" }
q42898
error
train
def error(msg, details=None, *args, **kwargs): '''Display an error message with optionnal details''' msg = '{0} {1}'.format(red(KO), white(msg)) if details: msg = '\n'.join((msg, safe_unicode(details))) echo(format_multiline(msg), *args, **kwargs)
python
{ "resource": "" }
q42899
load
train
def load(patterns, full_reindex): ''' Load one or more CADA CSV files matching patterns ''' header('Loading CSV files') for pattern in patterns: for filename in iglob(pattern): echo('Loading {}'.format(white(filename))) with open(filename) as f: reader = csv.reader(f) # Skip header reader.next() for idx, row in enumerate(reader, 1): try: advice = csv.from_row(row) skipped = False if not full_reindex: index(advice) echo('.' if idx % 50 else white(idx), nl=False) except Exception: echo(cyan('s') if idx % 50 else white('{0}(s)'.format(idx)), nl=False) skipped = True if skipped: echo(white('{}(s)'.format(idx)) if idx % 50 else '') else: echo(white(idx) if idx % 50 else '') success('Processed {0} rows'.format(idx)) if full_reindex: reindex()
python
{ "resource": "" }