content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import six
def to_bytes(s):
"""Convert the string s to a unicode encoded byte string.
Returns:
The converted byte string
"""
if isinstance(s, bytes):
return s
elif isinstance(s, six.text_type):
return s.encode('utf-8')
return s
|
38523fd9a414903f1b6e561eec365eb48153f291
| 25,535
|
def parse_article(article):
"""
Parse article preview on newspage to extract title, link and time
Parameters
----------
article : bs4.tag
article tag to be parsed
Returns
-------
title, link, time : Tuple
a tuple containing title, link and time of the article
"""
title = article.a.text
link = article.a.get("href")
time = " ".join([x.text for x in article.find_all("span")])
return title, link, time
|
cb00db924478e6e916caf5f6420ff7f742246737
| 25,536
|
def validate_comma_seperated_list(input):
"""Split the input string into an array. """
return input.split(',')
|
bcc524d78eeebd47739dc29a8b0ed3300309be31
| 25,537
|
def diffraction_scale_biconvex(paraxial_focus, lens, wavelength, rmax):
"""
Calculates the diffraction scale for a biconvex lens.
"""
diff_scale = (paraxial_focus - lens._z1 + lens._sep/2)*(wavelength*1e-6) /(
2*rmax)
return diff_scale
|
225a254cb78fdd5604eef618627612f7355aaf24
| 25,538
|
import ast
def get_function_class(module):
"""
get function from class module.
"""
funcs = []
for stm in module.body:
# class define
if isinstance(stm, ast.ClassDef):
for stm2 in stm.body:
if isinstance(stm2, ast.FunctionDef):
funcs.append((stm2, stm.name))
return funcs
|
c6e3a8b415051e0ade661b4a55befa64f5770c95
| 25,539
|
def join_provenances(provenance1, provenance2):
"""
Given two provenances (lists of id strings) join them together
"""
# Use a dict to join them
joined = dict(
(p, True) for p in provenance1
)
joined.update(
(p, True) for p in provenance2
)
return list(joined.keys())
|
3736c809f0cb76e5c31b8082f8bb9c3b9f594857
| 25,540
|
import typing
def mark(foo=None, *, markers: typing.Iterable[str]):
"""
Add some markers to foo
:param foo: any object
:param args: some str markers
:return:
"""
def deco(_foo):
for x in markers:
try:
setattr(_foo, x, True)
except AttributeError:
raise
return _foo
return deco(foo) if foo is not None else deco
|
1825118a74994bfb11e42ebab1b95815514c4269
| 25,541
|
def get_workspace() -> str:
"""
get_workspace returns the TorchX notebook workspace fsspec path.
"""
return "memory://torchx-workspace/"
|
396b3f3444357d8beb2bad0e1462bee9e663cd8d
| 25,543
|
from typing import Dict
from typing import Any
def valid_config(config: Dict[str, Any]) -> bool:
""" Determine whether or not given configuration fits requirements. """
valid = True
# Test for requirements on num_minibatch, rollout_length, and num_processes detailed
# in meta/storage.py (in this file, these conditions are checked at the beginning of
# each generator definition, and an error is raised when they are violated)
if (
config["architecture_config"]["recurrent"]
and config["num_processes"] < config["num_minibatch"]
):
valid = False
if not config["architecture_config"]["recurrent"]:
total_steps = config["rollout_length"] * config["num_processes"]
if total_steps < config["num_minibatch"]:
valid = False
return valid
|
c7262fe497af00be79ccfa3c152f4d418c92d7ec
| 25,544
|
def impurity_decrease(y, membership, membership_true, membership_false, criterion):
"""
A general function that calculates decrease in impurity.
Parameters
----------
y : array-like of shape (n_samples,)
An array of labels.
membership : array-like of shape (n_samples,)
The old membership of each label.
membership_true : array-like of shape (n_samples,)
The new membership of each label.
membership_false : array-like of shape (n_samples,)
The complement of new membership of each label.
criterion: callable
The impurity function
Returns
-------
float : decrease of impurity measured by given criterion
"""
information_gain_ = criterion(y, membership) \
- (membership_true.sum() / membership.sum()) * criterion(y, membership_true) \
- (membership_false.sum() / membership.sum()) * criterion(y, membership_false)
return information_gain_
|
3f01757bbd32b7c711ba0ed11e0824620f71b055
| 25,545
|
def invalid_refresh_token(refresh_token):
"""
Невалидный refresh-токен. Генерируется на основе валидного токена путем
удаления одного символа.
"""
return refresh_token[:-1]
|
912a60758b46c4f892042a9cf2697e329cbc19f6
| 25,546
|
def search_by_language(s, language=None):
"""
Filter results by language
Options: "_EN" : English, "_ES" : Spanish etc.
"""
url = 'https://enregistreur.prosodie.com/odigo4isRecorder/' \
'EntryPoint?serviceName=CriteresMessagesHandler&lang=en'
s.driver.get(url)
if language:
s.driver.ensure_element_by_name('criteres').send_keys(language)
s.driver.ensure_element_by_id('button-1009').click()
return s
|
5ee369f819780938c0ad4805b45f443c78473b75
| 25,547
|
def get_household_income_columns():
"""Returns column names of SAIPE fields and their descriptions."""
return {
'COUNTY': 'County FIPS Code',
'GEOCAT': 'Summary Level',
'GEOID': 'State+County FIPS Code',
'SAEMHI_LB90': 'Median Household Income Lower Bound for 90% Confidence Interval',
'SAEMHI_MOE': 'Median Household Income Margin of Error',
'SAEMHI_PT': 'Median Household Income Estimate',
'SAEMHI_UB90': 'Median Household Income Upper Bound for 90% Confidence Interval',
'SAEPOVALL_LB90': 'All ages in Poverty, Count Lower Bound for 90% Confidence Interval',
'SAEPOVALL_MOE': 'All ages in Poverty, Count Margin of Error',
'SAEPOVALL_PT': 'All ages in Poverty, Count Estimate',
'SAEPOVALL_UB90': 'All ages in Poverty, Count Upper Bound for 90% Confidence Interval',
'SAEPOVRTALL_LB90': 'All ages in Poverty, Rate Lower Bound for 90% Confidence Interval',
'SAEPOVRTALL_MOE': 'All ages in Poverty, Rate Margin of Error',
'SAEPOVRTALL_PT': 'All ages in Poverty, Rate Estimate',
'SAEPOVRTALL_UB90': 'All ages in Poverty, Rate Upper Bound for 90% Confidence Interval',
'SAEPOVU_ALL': 'All Ages in Poverty Universe',
'STABREV': 'Two-letter State Postal abbreviation',
'STATE': 'FIPS State Code',
'YEAR': 'Estimate Year',
}
|
eccfbed620678f2d9bf97a9caad3e2211c3bfc07
| 25,548
|
import os
def newer(src, dst):
"""Returns whether the first path is newer than the second"""
if not os.path.exists(dst):
return True
else:
return os.stat(src).st_ctime > os.stat(dst).st_ctime
|
5324847852454d2759f869a64e3d7fe3cc727f9d
| 25,549
|
def tamiz3(m):
"""Algoritmo alternativo"""
found, numbers, i = [], [], 2
while (i <= m):
if i not in numbers:
found.append(i)
for j in range(i, m+1, i):
numbers.append(j)
i += 1
return found
|
00cc77fddaef6885787c0ff2702a2e4291f96b32
| 25,550
|
def down_sample(x, sample_rate, k=2):
""" Performs down sampling on the audio signal. It takes
ever kth sample of the signal and returns the resulting
audio signal and the resulting sample rate.
:param x: the audio signal of shape N x C, where N
is the number of samples, and C is the number of
channels
:param k: the number of every k samples to return
:return: a tuple of sample rate and the audio signal
down-sampled to include ever kth sample. """
if len(x.shape[0]) < 2:
return sample_rate / k, x[::k]
return sample_rate / k, x[:, ::k]
|
c688acabd77289f074a0a95eb1a315edb58568d0
| 25,551
|
def _check_for_setattr(instance):
"""
Check if there's any setattr method inside an instance. If so, return True.
"""
module = instance.get_parent_until()
try:
stmts = module.used_names['setattr']
except KeyError:
return False
return any(instance.start_pos < stmt.start_pos < instance.end_pos
for stmt in stmts)
|
9675a9236ff3de158e0f0149981bcd63f9beedd8
| 25,552
|
def blob_exists(storage_client, bucket_name, filename):
"""Checks if a file exists in the bucket."""
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(filename)
return (blob.exists())
|
4f5fa78328401930ce6399a5cea6cdcecc10a173
| 25,553
|
def check_legal_input(user_input, input_type):
"""
:param user_input: Self explanatory
:param input_type: Either 'lang' for language, 'semester' etc
:return: The user input is returned as is
"""
legal_dict = {
'lang': ['he', 'en'],
'semester': [str(num) for num in range(1, 4)],
'urls': ['y', 'n']
}
if user_input not in legal_dict[input_type]:
exit('Illegal input!')
return user_input
|
ebeddecb2b2393116978e5bc2db6c9ebc35ef0b0
| 25,554
|
def get_int_from_prompt(msg, default):
"""
Return integer from prompt input
Args:
:msg: (str) Message to print
:default: (int) Default value
Returns:
:value: (int) Integer from prompt
"""
while True:
value = input(msg)
if not value:
return default
else:
try:
value = int(value)
return value
except ValueError:
print("Invalid input, try again...")
|
ea5c9988a25e646e81e966a10d07dffa6cd93eb2
| 25,555
|
def extract_actions_from_category(json_object):
"""
returns the actions from a json representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('actions'):
items = json_object['actions']
actions = ""
for item in items:
actions += item + ","
actions = actions[:-1]
else:
actions = None
return actions
|
f71a150e2e89f3e15abfef61b541f460f479228b
| 25,556
|
import torch
def a2c_policy_loss(logps: torch.Tensor, advs: torch.Tensor) -> torch.Tensor:
"""
Loss function for an A2C policy. $-(logp(\pi(a|s)) * A_t)$
Args:
- logps (torch.Tensor): Log-probabilities of selected actions.
- advs (torch.Tensor): Advantage estimates of selected actions.
Returns:
- a2c_loss (torch.Tensor): A2C loss term.
"""
a2c_loss = -(logps * advs).mean()
return a2c_loss
|
294ae812a3f1d0363fb0ac4f292113e9db521c51
| 25,557
|
import random
def augment_volume(wav, rate_lower=0.7, rate_upper=1.3):
"""
Increase or decrease a waveform's volume by a randomly selected rate.
:param wav: a waveform.
:param rate_lower: lower bound of rate
:param rate_upper: upper bound of rate
:return:
"""
return wav * random.uniform(rate_lower, rate_upper)
|
5b0934a20423d744a8a5d63ad46846c0e14444d9
| 25,558
|
def do_something(x):
"""
Do something so we have something to test.
>>> do_something(3)
16
>>> do_something(7)
24
"""
return (x+5)*2
|
d3185823bb098929f1330e7f477beff04dc2eced
| 25,559
|
import os
def extract_labels(files):
"""Extract the labels as the directory in which the files reside."""
# Initialise result
result = list()
# Loop over all files
for file in files:
# Extract directory name
result.append(os.path.split(os.path.dirname(file))[-1])
# Return result
return result
|
0fb2dca0314d80e3e6f67694343a22f7377218ff
| 25,560
|
import requests
import json
import logging
def claim_intersight_device(AUTH, CLAIM_CODES, CLAIM_CONFIG):
""" Claim Device to Intersight """
response = requests.post(
CLAIM_CONFIG['intersight_base_url'] + 'asset/DeviceClaims',
data=json.dumps(CLAIM_CODES),
auth=AUTH
)
logging.info(response.text)
response_json = response.json()
logging.info(response_json["Device"]["Moid"])
return response_json["Device"]["Moid"]
|
135036a3da7b606da50b47d436604d2ac68b36e2
| 25,561
|
import imp
def LoadExtraSrc(path_to_file):
"""Attempts to load an extra source file, and overrides global values.
If the extra source file is loaded successfully, then it will use the new
module to override some global values, such as gclient spec data.
Args:
path_to_file: File path.
Returns:
The loaded module object, or None if none was imported.
"""
try:
global GCLIENT_SPEC_DATA
global GCLIENT_SPEC_ANDROID
extra_src = imp.load_source('data', path_to_file)
GCLIENT_SPEC_DATA = extra_src.GetGClientSpec()
GCLIENT_SPEC_ANDROID = extra_src.GetGClientSpecExtraParams()
return extra_src
except ImportError:
return None
|
32ec066cd79d928528385aae3cc88272f4ccec32
| 25,563
|
from typing import OrderedDict
def polymorphic_child_forms_factory(formset_children, **kwargs):
"""
Construct the forms for the formset children.
This is mostly used internally, and rarely needs to be used by external projects.
When using the factory methods (:func:`polymorphic_inlineformset_factory`),
this feature is called already for you.
"""
child_forms = OrderedDict()
for formset_child in formset_children:
child_forms[formset_child.model] = formset_child.get_form(**kwargs)
return child_forms
|
b647d0936b7c9d06bdb1bd6aadf767c0881f2e41
| 25,566
|
def get_top_k_results(sorted_data):
"""
Compute the top K precisions.
Args:
sorted_data: A numpy array of sorted data.
Returns:
A list of top K precisions.
"""
results = []
for k in [10, 20, 50, 100, 200]:
results.append("P@" + str(k) + ": " + str(sorted_data[:k][:, -1].sum()))
return results
|
700b8fff9ded8b46bb45e3054714a7422b03bba6
| 25,567
|
def get_slurm_script_gpu(train_dir, command, time):
"""Returns contents of SLURM script for a gpu job."""
return """#!/bin/bash
#SBATCH -N 1
#SBATCH --ntasks-per-node=1
#SBATCH --ntasks-per-socket=1
#SBATCH --gres=gpu:tesla_p100:1
#SBATCH --cpus-per-task=8
#SBATCH --mem=128000
#SBATCH --output={}/slurm_%j.out
#SBATCH -t {}
module load anaconda3 cudatoolkit/10.0 cudnn/cuda-10.0/7.3.1
source activate yumi
{}
""".format(train_dir, time, command)
|
85bce0f8f0ba1e3613b1656492e8c12c82e7b2e4
| 25,568
|
def _name_or_asname(alias):
"""Take alias node from AST and return the name bound to import"""
return alias.asname if alias.asname else alias.name
|
fd4a9e4ab4cf543f91ec452e7b6280a4592cd86d
| 25,569
|
import os
from pathlib import Path
def expand_files(sources, record_type):
"""
Return a list of all the files from a potentially mixed list of files and directories.
Expands into the directories and includes their files in the output, as well as any input files.
"""
# separate
files = [Path(f) for f in sources if os.path.isfile(f) and record_type in f]
dirs = [Path(d) for d in sources if os.path.isdir(d)]
# expand and extend
expanded = [f for ls in [Path(d).glob(f"*{record_type}*") for d in dirs] for f in ls]
files.extend(expanded)
return files
|
9c7f1ac6a5e75429cc398c9d5a9ab4819a3ffb25
| 25,570
|
def cleanup_content(sourcepage, remove_emails, codify_paths, clean_code_tags):
"""Run several cleaners on the content we get from perl files."""
content = sourcepage.rstcontent
if not sourcepage.path.endswith('.pan'):
for func in ['remove_emails', 'codify_paths', 'clean_code_tags']:
if func:
content = globals()[func](content)
sourcepage.rstcontent = content
return sourcepage
|
7ac12f6e499fab2f90a4842fad4db96e7f3788c7
| 25,572
|
import os
def get_root_directory():
"""
Gets the root directory of the project.
Source: https://www.kite.com/python/answers/how-to-get-the-path-of-the-root-project-structure-in-python
"""
root_level_file = "../requirements.txt"
root_dir = os.path.dirname(os.path.abspath(root_level_file))
return root_dir
|
a21429a7ad422dd68dcb351683fed918a16989bc
| 25,573
|
def _ylab(to_plot):
"""Returns the y-label for the plot given the type of plot.
Parameters
----------
to_plot : string
Type of thing to plot. Can be 'pmf', 'cdf', 'fid', or 'wern'.
Returns
-------
string
The y-label for the plot.
"""
labels_dict = {
'pmf' : "$\\Pr(T_n = t)$",
'cdf' : "$\\Pr(T_n \\leq t)$",
'fid' : "$F_n(t)$",
'wern' : "$W_n(t)$"
}
return labels_dict[to_plot]
|
58b7217269bbf2f75cd0c378896ead0cb3bcc1be
| 25,574
|
import yaml
def _ordereddict_representer(dumper, data):
"""
Generate a YAML representation for Python
``collections.OrderedDict`` objects. This converts the ordered
dictionary into a YAML mapping node, preserving the ordering of
the dictionary.
:param dumper: A YAML dumper.
:type dumper: ``yaml.Dumper``
:param data: The data to represent.
:type data: ``collections.OrderedDictionary``
:returns: A mapping node, with keys in the specified order.
:rtype: ``yaml.MappingNode``
"""
return yaml.MappingNode(
u'tag:yaml.org,2002:map',
[
(dumper.represent_data(key), dumper.represent_data(value))
for key, value in data.items()
]
)
|
32c77b72e9610bb8688e7690997b181a90003461
| 25,575
|
def total_aircraft(state,settings,geometry):
""" This computes the total drag of an aircraft and stores
that data in the conditions structure.
Assumptions:
None
Source:
N/A
Inputs:
settings.
drag_coefficient_increment [Unitless]
lift_to_drag_adjustment [Unitless] (.1 is 10% increase in L/D)
state.conditions.aerodynamics.drag_breakdown.
trim_corrected_drag [Unitless]
spoiler_drag [Unitless]
Outputs:
aircraft_total_drag (drag coefficient) [Unitless]
Properties Used:
N/A
"""
# Unpack inputs
conditions = state.conditions
configuration = settings
drag_coefficient_increment = configuration.drag_coefficient_increment
trim_corrected_drag = conditions.aerodynamics.drag_breakdown.trim_corrected_drag
spoiler_drag = conditions.aerodynamics.drag_breakdown.spoiler_drag
aircraft_total_drag = 0.0
# Add drag_coefficient_increment
aircraft_total_drag += trim_corrected_drag + drag_coefficient_increment + spoiler_drag
conditions.aerodynamics.drag_breakdown.drag_coefficient_increment = drag_coefficient_increment
# Add L/D correction
aircraft_total_drag = aircraft_total_drag/(1.+configuration.lift_to_drag_adjustment)
# Store to results
conditions.aerodynamics.drag_breakdown.total = aircraft_total_drag
conditions.aerodynamics.drag_coefficient = aircraft_total_drag
return aircraft_total_drag
|
d5daa779a6ee7875af6101f5ad20bedfeb4fac15
| 25,576
|
def credentials_from_config(path):
"""
Retrieves the username and password from a config file for the Data API.
DOES NOT raise an EnvironmentError if path is invalid.
See also: credential_prompt
"""
username = None
password = None
return (username, password)
|
24e984bed51fd0061ed289e503978c91c18d6954
| 25,577
|
def trim_img(img, calc):
"""
trim_calcで計算されたパロメータによってトリミング実行
Parameters
----------
img : OpemCV image array
OpenCVの画像配列
calc : [type]
[description]
Returns
-------
result : OpemCV image array
トリミングされた OpenCVの画像配列
"""
return img[calc[0]:calc[1], calc[2]:calc[3], :]
|
ec2008c4760aecdbc3a91e7ef2e483d2fdd0295a
| 25,578
|
def get_in_shape(in_data):
"""Get shapes of input datas.
Parameters
----------
in_data: Tensor
input datas.
Returns
-------
list of shape
The shapes of input datas.
"""
return [d.shape for d in in_data]
|
ae54409d425189c33fe9fe1bdb0487cc854f9510
| 25,579
|
def _normalize(options):
"""Return correct kwargs for setup() from provided options-dict.
"""
retval = {
key.replace("-", "_"): value for key, value in options.items()
}
# Classifiers
value = retval.pop("classifiers", None)
if value and isinstance(value, str):
classifiers = value.splitlines()
while "" in classifiers:
classifiers.remove("")
retval["classifiers"] = classifiers
# Long description from file
description_file = retval.pop("long_description_file", None)
if description_file:
try:
with open(description_file) as fdesc:
retval["long_description"] = fdesc.read()
except IOError:
retval["long_description"] = "Read the accompanying {}".format(
description_file
)
return retval
|
174bf546559d38606e2e55270398c28b52218f7d
| 25,580
|
import subprocess
from pathlib import Path
def upload_server_image():
"""
Build the upload server once for the test run. The image doesn't change.
"""
subprocess.run(
["docker", "build", "-t", "cog-test-upload-server", "."],
cwd=Path(__file__).parent.parent / "upload_server",
check=True,
)
return "cog-test-upload-server"
|
41a805bf9ce59af783e32fffa86215d7167bce0e
| 25,583
|
def sanitize_for_path(value, replace=' '):
"""Replace potentially illegal characters from a path."""
il_text='<>\"?\\/*:'
hm_text='<>˝?\/*:'
for i,j in zip(*[il_text,hm_text]):
value=value.replace(i,j)
return value
|
a885cf2b801ab182033a2a436ea2069ab03e754d
| 25,584
|
def getCheckBoxState_ihe(request, form, number_of_ihe):
"""
Process the current state of IHE prediction filter
(support function for the IHE web-page)
"""
for i in range(1, number_of_ihe + 1):
form[str(i)] = "selected" if request.GET.get('prediction') == str(i) else "unselected"
return form
|
17380c1fbdf98c8f1fbc16df3b19ef18ad0d7687
| 25,585
|
import random
def translate_point_cloud(batch_data):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
negative = random.random()
neg = 1
if negative > 0.5:
neg = -1
translation = random.random() * random.randint(1, 5) * neg
batch_data += translation
return batch_data
|
8ad2e07ba4c4b72b840ef671541eb5b57d6df93f
| 25,586
|
def process_i_sco(input_value):
"""Return good type (i.sco) from input_value string."""
return input_value
|
f65b2f1dbc8b43043169ca5b68fae8550ba4a90b
| 25,588
|
import sys
def tc(val_str, bytes):
""" twos complement"""
val = int(val_str, 2)
b = val.to_bytes(bytes, byteorder=sys.byteorder, signed=False)
return int.from_bytes(b, byteorder=sys.byteorder, signed=True)
|
42b7ada900b084e55352406dc9f8908e3a636679
| 25,589
|
import requests
import io
import csv
def get(url, **kwargs):
"""
Read data from the requested resource.
"""
params = {}
params.update(kwargs)
response = requests.get(url, params=params, stream=True)
response.raise_for_status()
csv_stream = io.StringIO(response.text)
return csv.DictReader(csv_stream)
|
5790dcc15d8ba028da046b0c882f23d6f99e3d94
| 25,591
|
def findSortList(item,mylist,num):
"""looks for the occurrence of an item in a given list, starting from num, and removes it.
Helper function for PossCompl"""
for i in range(num, len(mylist)):
if mylist[i]==item:
mylist.pop(i)
return i-1
break
#
|
13c808fe4dde6c52658769a66933497c68ad1f99
| 25,592
|
def _remove_links(row):
""" Takes a row of the dataframe and returns True if the
link is within the country.
"""
r = row.split("-")
if r[0].split("_")[1].strip() == r[1].split("_")[1].strip():
return False
else:
return True
|
28447e84230a42d329fa340b72d8a7a503826ede
| 25,593
|
def normalize_z_score(data):
"""Normalizes data around 0."""
mean = data.mean()
std = data.std()
data = (data - mean) / std
return data, mean, std
|
e61f6bc47693a5cfb2052ea613283d739da6621e
| 25,594
|
import os
def get_kernel_path():
""" function that returns the location of the CUDA kernels on disk
:returns: the location of the CUDA kernels
:rtype: string
"""
path = "/".join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])
return path+'/km3net/kernels/'
|
b8060d1f7e67e1fc01df381c84c3fa107279ddb5
| 25,595
|
import time
def timeit(function):
"""
Decorator to measure time take to execute a function
"""
def wrapper(*args):
"""
Wrapper definition for the function
"""
start_time = time.process_time()
output = function(*args)
print(
"Module: {} took: {}s".format(
function.__name__,
time.process_time() - start_time))
return output
return wrapper
|
c099166f12536bd253fcaed9a14a6f14f0825661
| 25,596
|
def quoted_object(schema, rest):
""" All objects that pgbedrock works with will be double-quoted after the schema. Anything we
work with in our test suite needs to behave similarly. """
return '{}."{}"'.format(schema, rest)
|
3acf69512c7b164319fd8c8b1054787cc1187d75
| 25,597
|
def CMDdisconnected(parser, args):
"""Lists disconnected slaves."""
_, args, buildbot = parser.parse_args(args)
if args:
parser.error('Unrecognized parameters: %s' % ' '.join(args))
for slave in buildbot.slaves:
if not slave.connected:
print(slave.name)
return 0
|
9466d40e2cfb59c7a916978f3f296fb6b6734bda
| 25,598
|
def bottomFraction(image, fraction=0.05):
"""
Return bottom fraction of image
Args:
image: image
fraction: float (default: 0.05)
"""
return image[int((1 - fraction) * image.shape[0]):]
|
f650966040b90d90a86fe42e172cc137d8c5eae4
| 25,599
|
def normalize_data(data, maxv, minv):
"""
Normalizes the data given the maximum and minimum values of each variable.
Parameters
----------
data: numpy n x m
Numpy array comprising n realisations (instances) of m variables.
maxv, minv: numpy m
The maximum and minimum values of each variable. One and zero, resp.
if the variable is categorical.
Returns
-------
data: numpy n x m
The normalized data.
"""
data = data.copy()
for v in range(data.shape[1]):
if maxv[v] != minv[v]:
data[:, v] = (data[:, v] - minv[v])/(maxv[v] - minv[v])
return data
|
41873142cd9ba0d25d12c2e89783d8097b2fd9e0
| 25,600
|
def _checkCatMatch(msgCat, filtCat):
"""
"""
return ( msgCat.__name__ == filtCat.__name__ )
|
522f0ae9e3df47a7396005927ff421f59d75df1a
| 25,601
|
from typing import List
import random
def generate_random_sequences(length: int, number: int, alphabet: str) -> List[str]:
"""Generate random sequences of particular length."""
return [
"".join([random.choice(alphabet) for _ in range(length)]) for _ in range(number)
]
|
eec4be8e90441e7476f0eed8162759cf8aeea843
| 25,603
|
def rev_query(users, project, args):
""" Get revision length, user, and page """
return []
|
99d5ac7e7a8b058c221d52f417b954545a3868c1
| 25,606
|
def load_graph_from_file(path):
"""
Function opens the file with given path and returns all lines.
File must contain all graph's edges separated with newline.\n
Edge format is: vertex1,vertex2,weight
"""
file = open(path)
return file.read().splitlines()
|
f0e23b67a3ab7979a94a53e928d9b7cc58a5b9fd
| 25,607
|
def set_element_type(entity, dtype, shape=None):
"""Indicates that the entity is expected hold items of specified type.
This function is a no-op. Its presence merely marks the data type of its
argument. The staged TensorFlow ops will reflect and assert this data type.
Args:
entity: A Tensor or TensorArray.
dtype: TensorFlow dtype value to assert for entity.
shape: Optional shape to assert for entity.
Returns:
The value of entity, unchanged.
"""
del dtype
del shape
return entity
|
95b4476240a384eb08467e29f5013ac4cf9b8ed6
| 25,609
|
def geom(x, p):
"""
【函数说明】
功能:几何分布的概率密度函数(Geometry Distribution)
参数:
+ x:[int 整型] 在第x次尝试取得第1次成功
+ p:[float 浮点型] 成功的概率
返回值:
+ [float 浮点型] 该样本点(x)处的概率密度
"""
y = (1-p)**(x-1) * p
return y
|
341aa227584fe38d8200558d4effc74274446055
| 25,610
|
import os
import base64
def generate_password():
""" Generate a password with 80 bits of entropy
"""
# Take a random 10 char binary string (80 bits of
# entropy) and encode it as lower cased base32 (16 chars)
random_bytes = os.urandom(10)
password = base64.b32encode(random_bytes).decode('ascii').rstrip('=').lower()
return password
|
96fe46086f41293125b1fb20ccf355ffb841d446
| 25,611
|
def toobject(thrift, tobj, cls=None):
"""Convert thrift object `tobj` to Python object (with optional type
`cls`).
"""
if cls is not None and isinstance(tobj, cls):
return tobj
tcls = globals().get(type(tobj).__name__)
if tcls is not None:
return tcls.toobject(thrift, tobj, cls=cls)
if cls is not None:
return cls(tobj)
return tobj
|
b13d58ad32f388f35d0763a82c6052a0c21b680e
| 25,614
|
def verify_workflow(workflow):
"""Verifies if the workflow can be executed, and specifies where the
error occurs and why.
"""
result = workflow.verify()
return result
|
d2158949f9088692aed85fc52725bd9938f1130b
| 25,615
|
def is_unique_bis(x):
"""Do exactly what is_unique() does.
Args:
x ([list]): The list you want to test
Returns:
[bool]: True if every element in the list occurs only once
"""
return len(set(x)) == len(x)
|
a4b70463e89fcd95cf6d2562f41c1b6c9db304c9
| 25,616
|
def filter_deployments_using_secret(secret_names, deployments_as_yaml):
"""Return a dictionary of deployments using the secret we are filtering on."""
deployments_using_secrets = {}
for deployment in deployments_as_yaml:
found_secret = False
containers = deployment['spec']['template']['spec']['containers']
for container in containers:
if found_secret:
break
if 'env' not in container:
continue
env_vars = container['env']
for env_var in env_vars:
if 'valueFrom' not in env_var or 'secretKeyRef' not in env_var['valueFrom']:
continue
if env_var['valueFrom']['secretKeyRef']['name'] in secret_names:
deployments_using_secrets[deployment['metadata']['name']] = deployment
found_secret = True
break
return deployments_using_secrets
|
6fbf70689862269710039344531a45d866fb54f6
| 25,617
|
from typing import Counter
def count_chars(molecule):
"""Find most and least common character"""
c = Counter(molecule)
most = c.most_common()[0][1]
least = c.most_common()[-1][1]
return most, least
|
8c9c8a0eea03b108b9aa8e1c6503de565f1c6437
| 25,618
|
def oui_ou_non(question):
"""Pose une question jusqu'à ce que le joueur réponde O pour oui ou N pour non
La fonction retourne vrai (True) si la réponse était oui"""
# Une boucle infinie dont nous ne sortirons avec `return ...' uniquement
# si nous avons une réponse qui nous convient:
while True:
reponse = input(question).upper()
if reponse in "ON":
return reponse == 'O'
|
30d88d363062d2b6454dda2f526b94bc813f297b
| 25,619
|
def _is_namespace_param(namespace: str) -> bool:
"""Returns whether a dataset namespace is a parameter"""
return namespace.lower().startswith("param")
|
c6e4a847d0d5d60bd670cd2a1004f83d4b89324d
| 25,620
|
from pathlib import Path
import os
def parent_dir(fn):
""" Get parent directory of existed filename """
return str(Path(fn).parent.absolute()) + os.sep
|
112577e355fa84485e65f44665a9e405fa252b28
| 25,621
|
def common_filenames(file_list1, file_list2):
"""
Find elements common in 2 lists
:param file_list1: a list to compare
:param file_list2: a list to compare
:return: list of common items
"""
return set(file_list1).intersection(file_list2)
|
29ecd33dd09a33ec42bcad796a96e184377273ef
| 25,622
|
def insertion_sort_recursive(integers):
"""Performs insertion sort recursively."""
integers_clone = list(integers)
def helper(arr, n):
if n > 0:
helper(arr, n-1)
while arr[n] < arr[n-1] and n > 0:
arr[n], arr[n-1] = arr[n-1], arr[n]
n -= 1
helper(integers_clone, len(integers_clone) - 1)
return integers_clone
|
7045b07c93a00970d9df10880e4460a0ecc8118b
| 25,623
|
import os
def check_previous_sim(name, output_path):
""" Makes sure that a previous simulation exists.
"""
while True:
# break the loop if the simulation exists, otherwise try to get correct name
if os.path.isdir(output_path + name):
break
else:
print("No directory exists with name/path: " + output_path + name)
name = input("\nPlease type the correct name of the simulation or type \"exit\" to exit: ")
print()
if name == "exit":
exit()
return name
|
21483663ff93b4a659b4dc7a03de35588a49cc18
| 25,624
|
import subprocess
def has_changes() -> bool:
"""
Invoke git in a subprocess to check if we have
any uncommitted changes in the local repo.
Returns:
bool: True if uncommitted changes, else False.
"""
status = (
subprocess.run(
"git status --porcelain",
shell=True,
check=True,
stdout=subprocess.PIPE,
)
.stdout.decode()
.strip()
)
return len(status) > 0
|
b36ec35bb7d44c2e7e3ce6fa3bf096c751389fd5
| 25,626
|
import torch
def gaussian_similarity_penalty(x_hat, context, eps=1e-4):
"""
Penalizes generators which can be approximated well by a Gaussian
Parameters
----------
x_hat: torch.tensor
generated data
context: torch.tensor
context data
Returns
-------
torch.tensor
"""
x = torch.cat([x_hat, context], dim=1)
mean = x.mean(0, keepdim=True)
cov = x.t().mm(x) / x.size(0) - mean.t().mm(mean) + eps * torch.rand_like(x[0]).diag()
gaussian = torch.distributions.MultivariateNormal(mean.detach(), cov.detach())
loglik = gaussian.log_prob(x).mean()
return loglik
|
fa898f45ba59c856a9f50dd459b3453ad424c090
| 25,628
|
import requests
def _test_webservice(route_root='http://0.0.0.0:5000/'):
"""Test the webservice.
Note: Of course, first launch it!"""
def get_json_response_for(url_suffix='', json_payload=None):
method = 'POST' if json_payload else 'GET'
r = requests.request(method, url=route_root + url_suffix, json=json_payload)
try:
return r.json()
except:
return r
assert get_json_response_for('?attr=pong') == {'_result': 'pong'}
assert get_json_response_for('?attr=pong&x=10') == {'number': 10, 'thing': 'pongs'}
# can also put x in the json:
assert get_json_response_for('?attr=pong', json_payload={'x': 10}) == {'number': 10, 'thing': 'pongs'}
assert get_json_response_for('?attr=pong&x=10', json_payload={'arr': [1, 2, 3]}) == {
'something': {'0': 'boo', '1': 'boo', '2': 'boo'},
'vm': {'0': 11, '1': 12, '2': 13}}
assert get_json_response_for('?attr=pong&x=10&arr=1,2,3') == {'something': {'0': 'boo', '1': 'boo', '2': 'boo'},
'vm': {'0': 11.0, '1': 12.0, '2': 13.0}}
|
f1b91266b5c5763295f141185d00e5d95565f759
| 25,629
|
def inject(*components):
"""Injects web components.
>>> inject(
>>> '<snap>a</snap>',
>>> '<snap>b</snap>'
>>> )
>>> <snap>a</snap><snap>b</snap>
Args:
components (WebComponents): The web components to inject.
Returns:
str: The string with injected web components.
"""
return ''. join(map(str, filter(None, components)))
|
5052fcf9dd6b161a7bd0d473c997b8315ed31ba5
| 25,630
|
def split_str(single_str: str) -> list:
"""Function used to split the single string for
the property data (no of bedrooms, bathrooms etc.)"""
return single_str.split("·")
|
8576c4b047c9319effc66ed744143433b06787e4
| 25,632
|
import uuid
import os
def gen_random_name():
"""Return a random name for temp file"""
return uuid.UUID(bytes=os.urandom(16), version=4).hex
|
b3981ede7f8d7bd87904f17b6cb5bd3bb40d6f8c
| 25,635
|
import json
def generate_device_update_payload(input_record):
"""Dynamically build the device update payload
This function looks at the device record information passed from the input file and looks to
see which keys are populated. The JSON payload does not include empty keys."""
payload = {}
for key, value in input_record.items():
# Here we are verifying that the value in the device record is not empty and that it is
# not the serial_number. We want to check for empty values because the blueprint_id, user,
# and asset_tag keys cannot be empty in the in the json payload sent to Kandji. If these
# keys are sent as empty or NULL Kandji will return an error.
if value != "" and key not in ["serial_number", "blueprint_name", "username"]:
payload.update([(key, value)])
return json.dumps(payload)
|
0e00572153059e14fc6d0435f8d9f2e38e5d6c00
| 25,636
|
import random
def spread(topic):
"""
Return a fictional spread in bps, tight triangular
distribution in most cases, except for Fabrikam where
the spreads are more scattered, higher, and with a longer tail.
"""
if topic.startswith("Fabrikam"):
if " 1Y CDS Spread" in topic:
return random.triangular(140, 280, 180)
elif " 3Y CDS Spread" in topic:
return random.triangular(200, 400, 300)
else:
assert False
else:
if " 1Y CDS Spread" in topic:
return random.triangular(140, 150)
elif " 3Y CDS Spread" in topic:
return random.triangular(150, 160)
else:
assert False
|
7c1f559c516396564ac618767f79630f6ce515b8
| 25,637
|
def create_dim(a, dim=''):
"""create dimension array for n-nested array
example:
>>> create_dim([[1,2],[3,4],[5,6,[7,8],]])
[['0-0', '0-1'], ['1-0', '1-1'], ['2-0', '2-1', ['2-2-0', '2-2-1']]]
>>> create_dim(5)
''
>>> create_dim([5,5])
['0', '1']
"""
if isinstance(a, list):
if dim:
prefix = dim + '-'
else:
prefix = ''
return([create_dim(a_, prefix + str(i)) for i, a_ in enumerate(a)])
else:
return(dim)
|
2a5fbea0ad0a26c81d90551a1e907a31c6362192
| 25,638
|
import logging
def get_meas_func(self, meas_func):
"""
Function that dynamically generates property getters for measurement functions defined in hp4284A.MEAS_FUNCS
Parameters
----------
meas_func : str
Name of measurement function; all keys of self.MEAS_FUNCS are valid
"""
def property_getter(self):
"""
Generate the property getter function
Returns
-------
tuple
Tuple of floats; primary and secondary measurment quantities according to the respective *meas_func*
Raises
------
KeyError
*meas_func* is unkown aka not a key of self.MEAS_FUNCS
RuntimeError
The measurement status indicates an error
"""
# Check if *meas_func* is valid
if meas_func not in self.MEAS_FUNCS:
raise KeyError(f"Unknown measurment function {meas_func}")
# Check current function; if needed change functions
if meas_func != self.get_meas_func().strip():
logging.info(f"Setting measurement function to {meas_func}")
self.set_meas_func(meas_func)
# Check if a manual trigger is needed and trigger if so
self._check_trigger()
# Get primary and secondary measurement quantities as well as the measurement status
primary_meas, secondary_meas, meas_status = self.get_value().strip().split(',')
# Check status
if meas_status != '+0':
if meas_status not in self.ERROR_STATES:
err_msg = f"Unknown measurement status {meas_status} retrieved"
else:
err_msg = self.ERROR_STATES[meas_status]
raise RuntimeError(err_msg)
return (float(primary_meas), float(secondary_meas))
return property_getter
|
1e106589ff1a8b72870065a38659d5e4653da931
| 25,639
|
import copy
def dict_path(from_d,to_d={},l=[]):
"""
Returns a dictionary with the path in which each of the keys is found
Parameters:
from_d : dict
Dictionary that contains all the keys, values
to_d : dict
Dictionary to which the results will be appended
Example:
dict_path({'level1':{'level2':{'level3':'value'}}})
Returns
{'level1': [],
'level2': ['level1'],
'level3': ['level1', 'level2']
}
"""
for k,v in list(from_d.items()):
if isinstance(v,dict):
to_d[k]=l
_l=copy.deepcopy(l)
_l.append(k)
to_d=dict_path(from_d[k],to_d,_l)
else:
to_d[k]=l
_to_d=to_d.copy()
to_d={}
return _to_d
|
a80363e99deb199111c9e4b3d4bcd9d3c65d4a67
| 25,640
|
def get_article_templates(article, user):
"""
example of functions to get custom templates
It may depend on article or user
"""
return (
('standard.html', 'Standard'),
('homepage.html', 'Homepage'),
('blog.html', 'Blog'),
('standard_en.html', 'English'),
)
|
3a6d27e6046b81407d66de9ed5bc76cdf2f8ec20
| 25,641
|
def filter_param_cd(df, code):
"""Return df filtered by approved data
"""
approved_df = df.copy()
params = [param.strip('_cd') for param in df.columns if param.endswith('_cd')]
for param in params:
#filter out records where param_cd doesn't contain 'A' for approved.
approved_df[param].where(approved_df[param + '_cd'].str.contains(code), inplace=True)
# drop any rows where all params are nan and return
#return approved_df.dropna(axis=0, how='all', subset=params)
return approved_df
|
7fe027e36e244442d35cc3ee314dc85d23807b4d
| 25,642
|
def keep_intersection_of_columns(train, test):
""" Remove the columns from test and train set that are not in
both datasets.
params
------
train: pd.DataFrame containing the train set.
test: pd.DataFrame containing the test set.
return
------
train and test where train.columns==test.columns by
keeping only columns that were present in both datasets.
"""
shared_cols = list(set(train.columns).intersection(set(test.columns)))
return train[shared_cols], test[shared_cols]
|
750c17d874b7cfad3eb7e48b84323aa25fd251da
| 25,643
|
def rst_heading(value, arg):
"""Provides an underline for restructured text heading.
Syntax::
{{ value|rst_heading:"=" }}
Results in:
``value``
``=====``
"""
return ''.join([value, '\n', arg*len(value)])
|
5e6379dccd5c15b24e0688b8f95d181a7e8b1783
| 25,644
|
def isdigit(uni_ch):
"""判断一个 unicode 是否是十进制数字。"""
if uni_ch >= u'\u0030' and uni_ch <= u'\u0039':
return True
else:
return False
|
014141dd4e86af89cd8c07f5c49a4b00d45a238b
| 25,645
|
def summary_candidate_ranking_info(top_ks, found_info, data_size):
"""Get a string for summarizing the candidate ranking results
Parameters
----------
top_ks : list of int
Options for top-k evaluation, e.g. [1, 3, ...].
found_info : dict
Storing the count of correct predictions
data_size : int
Size for the dataset
Returns
-------
string : str
String summarizing the evaluation results
"""
string = '[strict]'
for k in top_ks:
string += ' acc@{:d}: {:.4f}'.format(k, found_info['top_{:d}'.format(k)] / data_size)
string += ' gfound {:.4f}\n'.format(found_info['ground'] / data_size)
string += '[molvs]'
for k in top_ks:
string += ' acc@{:d}: {:.4f}'.format(
k, found_info['top_{:d}_sanitized'.format(k)] / data_size)
string += ' gfound {:.4f}\n'.format(found_info['ground_sanitized'] / data_size)
return string
|
877cbeaf6be01a0be8daf54ccdba88338c08343b
| 25,651
|
import typing
import os
def rec_join_path(path_list: typing.List[str]) -> str:
"""
Join components in to a path.
Much like os.path.join(), but "recursively".
"""
if len(path_list) == 0:
return ''
if len(path_list) == 1:
return path_list[0]
path = path_list.pop(0)
while len(path_list):
path = os.path.join(path, path_list.pop(0))
return path
|
7424edf6b8fb93dcdee9946147aa00f807701c3b
| 25,652
|
import os
def is_newer(file1, file2, strict=True):
"""
Determine if file1 has been modified after file2
Parameters
----------
file1 : str
File path. May not exist, in which case False is returned.
file1 : str
File path. Must exist.
strict : bool
Use strict inequality test (>). If False, then returns True for files
with the same modified time.
Returns
-------
newer : bool
True if file1 is strictly newer than file 2
"""
try:
t1 = os.path.getmtime(file1)
t2 = os.path.getmtime(file2)
except FileNotFoundError:
return False
if strict:
return t1 > t2
return t1 >= t2
|
23653223d132facfdb4f51ebe9c21a2bc8f54ffc
| 25,654
|
async def eval_issue_1(serialized_citation):
"""
Return 1 if the `npl_publn_id` is in the `when`field, else None
See: https://github.com/cverluise/SciCit/issues/1
:param serialized_citation: dict
:return: int or None
"""
if "when" in serialized_citation.keys():
return (
1
if str(serialized_citation["npl_publn_id"]) in serialized_citation["when"]
else None
)
|
2bf50fe83c59a99181dcbbf9a16854d93cd0210f
| 25,655
|
def predict_churn_single(self, data):
"""
Essentially the run method of PredictTask
"""
pred_array = self.model.predict([data])
positive_prob = pred_array[0][-1]
return positive_prob
|
de3c4ebbd6719a83305755300f895941bd1df515
| 25,656
|
def _require_positive_y(y):
"""Make targets strictly positive"""
offset = abs(y.min()) + 1
y = y + offset
return y
|
fa38ed8cc729e185ce97a6f63abd3a39bebcf6d9
| 25,657
|
def warmup():
"""
handle warmup request to suppress warning into logs
"""
return 'OK'
|
e64007ac6656d771b34b9489fa7505de7d010fce
| 25,658
|
def list_to_ranges(s):
"""
>>> list_to_ranges([])
''
>>> list_to_ranges([1])
'1'
>>> list_to_ranges([1,2])
'1-2'
>>> list_to_ranges([1,2,3])
'1-3'
>>> list_to_ranges([1,2,3,5])
'1-3,5'
>>> list_to_ranges([1,2,3,5,6,7])
'1-3,5-7'
>>> list_to_ranges(range(1,4001))
'1-4000'
"""
def f():
if last_start == last_end:
return str(last_start)
else:
return "%d-%d" % (last_start, last_end)
last_start = None
last_end = None
r = []
for i in sorted(s):
if last_end is not None and i == last_end + 1:
last_end += 1
else:
if last_start is not None:
r += [f()]
last_start = i
last_end = i
if last_start is not None:
r += [f()]
return ",".join(r)
|
1adccba785970a025e6474631a4b8d4f21dd35ca
| 25,659
|
from typing import List
def compute_grade(homework_scores: List[float], exam_scores: List[float]) -> float:
"""
pre: homework_scores or exam_scores
pre: all(0 <= s <= 1.0 for s in homework_scores + exam_scores)
post: 0 <= __return__ <= 1.0
"""
# make exams matter more by counting them twice:
all_scores = homework_scores + exam_scores + exam_scores
return sum(all_scores) / len(all_scores)
|
f922b92473b42b244753685e6a348406bffd6b9e
| 25,660
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.