content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def _get_client(app):
"""Returns a client instance for an App.
If the App already has a client associated with it, simply returns
it. Otherwise creates a new client, and adds it to the App before
returning it.
Args:
app: A Firebase App instance (or ``None`` to use the default App).
Returns:
Client: A client for the specified App instance.
Raises:
ValueError: If the app argument is invalid.
"""
return _utils.get_app_service(app, _AUTH_ATTRIBUTE, Client)
| 9,000
|
def get_job_config_build_for_branch(**kwargs):
"""pass kwargs to JobConfig constructor"""
return JobConfig(
type=JobType.copr_build,
trigger=JobConfigTriggerType.commit,
branch="build-branch",
scratch=True,
**kwargs,
)
| 9,001
|
def get_connection_string_from_config_file(cfg_src, db_cfg_key):
"""
Gets connection parameters from specified section in
a configuration file.
"""
# reading complete configuration
with open(cfg_src, 'r') as yml_file:
cfg = yaml.safe_load(yml_file)
# looking for specified connection name
for connection_cfg in cfg['connections']:
if db_cfg_key in connection_cfg:
db_cfg = connection_cfg[db_cfg_key]
# reading distinct configuration parameters
try:
db_engine = db_cfg['db_engine']
user = db_cfg['user']
password = db_cfg['password']
host = db_cfg['host']
port = db_cfg['port']
database = db_cfg['database']
except KeyError as e:
print(
"Unable to retrieve parameter '%s' "
"from configuration file." % e.args[0])
return
except Exception:
print("Unable to read configuration file")
return
# setting up connection string
conn_string = "{0}://{1}:{2}@{3}:{4}/{5}".format(
db_engine, user, password, host, port, database)
return conn_string
| 9,002
|
def _decomp_0_matrices(
kak: 'cirq.KakDecomposition',
atol: float = 1e-8,
) -> Tuple[Sequence[Tuple[np.ndarray, np.ndarray]], complex]:
"""Returns the single-qubit matrices for the 0-SQRT_ISWAP decomposition.
Assumes canonical x, y, z and (x, y, z) = (0, 0, 0) within tolerance.
"""
# Pairs of single-qubit unitaries, SQRT_ISWAP between each is implied
# Only a single pair of single-qubit unitaries is returned here so
# _decomp_to_operations will not insert any sqrt-iSWAP gates in between
return [
(
kak.single_qubit_operations_after[0] @ kak.single_qubit_operations_before[0],
kak.single_qubit_operations_after[1] @ kak.single_qubit_operations_before[1],
)
], kak.global_phase
| 9,003
|
def repo_crc32c():
"""
Link crc32c library.
"""
maybe(
http_archive,
name = "com_google_crc32c", # 1.1.2
build_file = "@toolbase//third_party/crc32c:crc32c.BUILD",
sha256 = "a40244a21b9ea50808b214e127e53500f3ef50defca2db2f7125cf95008431bd",
strip_prefix = "crc32c-02e65f4fd3065d27b2e29324800ca6d04df16126",
urls = [
"https://github.com/google/crc32c/archive/02e65f4fd3065d27b2e29324800ca6d04df16126.zip",
],
)
| 9,004
|
def test_ranking(preds, target):
""" test that ranking function works as expected """
for p, t in zip(preds, target):
scipy_ranking = [rankdata(p.numpy()), rankdata(t.numpy())]
tm_ranking = [_rank_data(p), _rank_data(t)]
assert (torch.tensor(scipy_ranking[0]) == tm_ranking[0]).all()
assert (torch.tensor(scipy_ranking[1]) == tm_ranking[1]).all()
| 9,005
|
def test_get_parent_process(requests_mock):
"""
Given - connection id and ptid to get its parent.
When -
Running get_parent_process function.
Then -
The process parent should be returned.
"""
api_raw_response = util_load_json('test_files/get_parent_process.json')
requests_mock.post(BASE_URL + '/api/v2/session/login', json={'data': {'session': 'session-id'}})
requests_mock.get(BASE_URL + '/plugin/products/threat-response/api/v1/conns/remote:host:123:/processtrees/2',
json=api_raw_response)
args = {'connection_id': 'remote:host:123:',
'ptid': '2'}
human_readable, outputs, _ = TaniumThreatResponseV2.get_parent_process(MOCK_CLIENT, args)
assert 'Parent process for process with PTID 2' in human_readable
assert outputs.get('Tanium.ProcessParent(val.id === obj.id)', [{}])[0].get('id') == "1"
| 9,006
|
def concave(x, m):
"""Shape function."""
assert shape_args_ok(x, m)
result = 1.0
for i in range(1, len(x) - m + 1):
result *= math.sin(x[i - 1] * math.pi / 2.0)
if m != 1:
result *= math.cos(x[len(x) - m] * math.pi / 2.0)
return correct_to_01(result)
| 9,007
|
def get_filenames(is_training, data_dir):
"""Return filenames for dataset."""
if is_training:
return [
os.path.join(data_dir, 'train-%05d-of-01024' % i)
for i in range(_NUM_TRAIN_FILES)]
else:
return [
os.path.join(data_dir, 'validation-%05d-of-00128' % i)
for i in range(_NUM_VAL_FILES)]
| 9,008
|
def norm_img(img):
"""
normalization image
:param img: (C, H, W)
:return:
norm_img: (C, H, W)
"""
height, width, channel = img.shape
img = np.reshape(img, (height * width, channel)) # (height * width, channel)
mean = np.mean(img, axis=0, keepdims=True) # (1, channel)
center = img - mean # (height * width, channel)
var = np.sum(np.power(center, 2), axis=0, keepdims=True) / (height * width) # (1, channel)
std = np.sqrt(var) # (1, channel)
_norm_img = center / std # (height * width, channel)
_norm_img = np.reshape(_norm_img, (height, width, channel))
return _norm_img
| 9,009
|
def test_positional_only_and_arg_invalid_calls(a, b, /, c):
"""
>>> test_positional_only_and_arg_invalid_calls(1, 2, 3)
>>> test_positional_only_and_arg_invalid_calls(1, 2, c=3)
>>> test_positional_only_and_arg_invalid_calls(1, 2) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: test_positional_only_and_arg_invalid_calls() ... positional argument...
>>> test_positional_only_and_arg_invalid_calls(1) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: test_positional_only_and_arg_invalid_calls() ... positional arguments...
>>> test_positional_only_and_arg_invalid_calls(1,2,3,4) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: test_positional_only_and_arg_invalid_calls() takes ... positional arguments ...4 ...given...
"""
| 9,010
|
def propagator_radial_diffusion(n,dim_rad,rate,wrad,lagtime,
lmax,bessel0_zeros,bessels,):
"""calculate propagator for radial diffusion as matrix exponential
n -- dim_trans, dimension transition matrix, usually number of bins in z-direction
dim_rad -- dimension transition matrix, always equal to len(redges)
rate -- rate matrix for 1-D diffusion in z-direction, in [1/dt]
wrad -- ln Drad, radial diffusion coefficient, dimension n
Drad = exp(wrad), in [dr**2/dt]
lagtime -- should be in units [dt]
bessels0_zeros -- first lmax zeros, no unit
bessels -- dimension lmax x dim_rad, no unit, in unit 'per r-bin'
rate_l -- rate matrix including sink equation, in [1/dt]
propagator -- no unit, is per r-bin per z-bin"""
rmax = np.float64(dim_rad) # in units [dr]
# initialize arrays
rate_l = np.zeros((n,n),dtype=np.float64) # N x N
propagator = np.zeros((dim_rad,n,n),dtype=np.float64) # dim_rad x N x N
# set up sink
sink = np.zeros((n),dtype=np.float64) # N
# loop over l (index of Bessel function)
for l in range(lmax):
sink = np.exp(wrad)*bessel0_zeros[l]**2/rmax**2 # sink term D_par(i) * (b_l)**2
# in units np.exp(wrad) [dr**2/dt] / rmax**2 [dr**2], so in units [1/dt]
rate_l[:,:] = rate[:,:] # take rate matrix for 1-D diffusion
rate_l.ravel()[::n+1] -= sink # and add sink term
mat_exp = scipy.linalg.expm(lagtime*rate_l) # matrix exponential, no unit
# increment propagator by solution of sink equation for each l
# propagator to arrive in radial bin k, contribution from Bessel function l
# bessels is 'per r-bin', no unit
# mat_exp is 'per z-bin', no unit
# so propagator is 'per r-bin per z-bin', no unit
for k in range(dim_rad):
propagator[k,:,:] += bessels[l,k] * mat_exp[:,:] # no unit
# TODO normalize? some probability might flow away after long times
#propagator /= np.sum(np.sum(propagator,axis=0),axis=0)
return propagator
| 9,011
|
def convert_examples_to_features(examples, use_label):
"""Loads a data file into a list of `InputBatch`s."""
features = []
line_tags = []
for (ex_index, example) in enumerate(examples):
if use_label:
labels = example.labels
else:
labels = ['O'] * len(example.units)
samples = []
context, tokens, predict_mask, label_ids = [], [], [], []
for i, w in enumerate(example.units):
if w == '[MASK]':
sub_words = ['[MASK]']
else:
sub_words = tokenizer.tokenize(w)
if not sub_words:
sub_words = ['[UNK]']
tokens.extend(sub_words)
predict_mask.append(1)
predict_mask.extend([0] * (len(sub_words) - 1))
label_ids.append(label_map[labels[i]])
label_ids.extend([0] * (len(sub_words) - 1))
while len(context) + len(tokens) >= max_seq_length - 2:
l = max_seq_length - len(context) - 2
samples.append(
[['[CLS]'] + context + tokens[:l] + ['[SEP]'], [0] * (len(context) + 1) + predict_mask[:l] + [0],
[0] * (len(context) + 1) + label_ids[:l] + [0]])
if not context:
line_tags.append(1)
else:
line_tags.append(0)
context = tokens[max(0, l - max_seq_length // 2):l]
tokens, predict_mask, label_ids = tokens[l:
], predict_mask[l:], label_ids[l:]
if sum(predict_mask):
samples.append([['[CLS]'] + context + tokens + ['[SEP]'], [0] * (len(
context) + 1) + predict_mask + [0], [0] * (len(context) + 1) + label_ids + [0]])
if not context:
line_tags.append(1)
else:
line_tags.append(0)
for s in samples:
input_ids = tokenizer.convert_tokens_to_ids(s[0])
input_mask = [1] * len(input_ids)
padding_length = max_seq_length - len(input_ids)
zero_padding = [0] * padding_length
input_ids += zero_padding
input_mask += zero_padding
predict_mask = s[1] + zero_padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(predict_mask) == max_seq_length
if use_label:
label_ids = s[2] + zero_padding
assert len(label_ids) == max_seq_length
one_hot_labels = np.eye(
len(label_map), dtype=np.float32)[label_ids]
else:
one_hot_labels = None
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask,
predict_mask=predict_mask, one_hot_labels=one_hot_labels))
assert len(examples) == sum(line_tags), logger.error(
'{} != {}'.format(len(examples), sum(line_tags)))
return features, line_tags
| 9,012
|
def broadcast_to_rank(t, rank, axis = -1):
"""Appends dimensions to tf.Tensor `t` at axis `axis` to match rank `rank`."""
rank_t = t.shape.rank # Assumes ranks are known at compile time (static).
for _ in range(rank - rank_t):
t = tf.expand_dims(t, axis=axis)
return t
| 9,013
|
def operator(func):
"""
Help decorator to rewrite a function so that
it returns another function from it.
"""
@wraps(func)
def wrapper(*args, **kwargs):
def operator(stream):
return func(stream, *args, **kwargs)
return operator
return wrapper
| 9,014
|
def json(body, charset="utf-8", **kwargs):
"""Takes JSON formatted data, converting it into native Python objects"""
return json_converter.loads(text(body, charset=charset))
| 9,015
|
def download_edictos(
data_dir=f"{os.environ['HOME']}/data/corteconstitucional/edictos",
):
"""
needs to be run several times, some times it claims that it cannot find downloaded pdfs,
:param data_dir:
:return:
"""
url = "https://www.corteconstitucional.gov.co/secretaria/edictos/"
download_dir = f"{data_dir}/downloads"
os.makedirs(download_dir, exist_ok=True)
wd = build_chrome_driver(download_dir, headless=True)
hrefs = get_hrefs(url, wd)
old_file = f"{data_dir}/documents.jsonl"
found_existing_documents = os.path.isfile(old_file)
if found_existing_documents:
new_file = old_file.split(".jsonl")[0] + "_updated.jsonl"
old_docs = list(data_io.read_jsonl(old_file))
else:
old_docs = []
new_file = old_file
try:
data_io.write_jsonl(
new_file, generate_raw_docs(old_docs, hrefs, wd, download_dir)
)
except Exception as e:
traceback.print_exc()
print("shit happened")
finally:
if found_existing_documents:
shutil.move(new_file, old_file)
| 9,016
|
def read_corpus(file_path, encoding=ENCODING, **kwargs):
"""
Create a Linguistica object with a corpus data file.
:param file_path: path of input corpus file
:param encoding: encoding of the file at *file_path*. Default: ``'utf8'``
:param kwargs: keyword arguments for parameters and their values.
"""
return Lexicon(file_path=file_path, wordlist_file=False, encoding=encoding,
**kwargs)
| 9,017
|
def sample_pagerank(corpus, damping_factor, n):
"""
Return PageRank values for each page by sampling `n` pages
according to transition model, starting with a page at random.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
pageRanks = {page: 0 for page in corpus}
# Randomly select a page to start
currPage = random.choice(list(corpus.keys()))
for _ in range(n):
pageRanks[currPage] += 1
model = transition_model(corpus, currPage, damping_factor)
currPage = random.choice(list(model.keys()))
return {page: rank / n for page, rank in pageRanks.items()}
| 9,018
|
def parse_args() -> argparse.Namespace:
"""
Parse program arguments
:return: Parser values
"""
parser = argparse.ArgumentParser(description="")
parser.add_argument("-a", action="store_true")
parser.add_argument("-c", action="store_true")
parser.add_argument("-x", action="store_true")
parser.add_argument("-z", action="store_true")
parser.add_argument("-s", metavar="SET", nargs="*", type=str)
parser.add_argument("--skip-keys", action="store_true")
parser.add_argument("--skip-sets", metavar="SET", nargs="*", type=str)
parser.add_argument("--skip-cache", action="store_true")
# Ensure there are args
if len(sys.argv) < 2:
parser.print_usage()
sys.exit(1)
return parser.parse_args()
| 9,019
|
def compute_participants(matches, challonge_data):
"""Compute series participants.
Iterate all matches and players to create a graph.
Apply connected components algorithm to resolve distinct
participant groups over all matches.
Sort participant groups by number of wins to correlate
with Challonge participant data (which also includes number
of wins).
Note that edge cases exist that are not covered. For example,
teams sometimes field a 1v1 player for a single match. If neither
player in the 1v1 match takes part in any other matches,
the players can't be placed in a participant group and their win
is not counted. There are two consequences:
1. Not counting a win may make the number of wins between
participants even, in which case we don't know which
participant group won the series.
2. Not grouping a player means the participant player list
will be incomplete.
"""
graph = networkx.DiGraph()
win_id = 0
platform_ids = []
name_to_user = {}
for match in matches:
# Record a win
win_id += 1
graph.add_node(win_id, type='win')
# Record platform ID
platform_ids.append(match['platform_id'])
# Add node for each player
for player in match['players']:
name_to_user[player['name']] = player['user_id']
graph.add_node(player['name'], type='player')
# Can happen for incomplete matches
if match['winning_team'] is None:
continue
# Connect winning players to recorded win
for player in match['winning_team']['players']:
graph.add_edge(player['name'], win_id)
# Connect all players on the same team
for team in match['teams']:
for i in team['players']:
for j in team['players']:
graph.add_edge(i['name'], j['name'])
mgz_data = [{
'wins': len([node for node in g if graph.nodes[node]['type'] == 'win']),
'players': [node for node in g if graph.nodes[node]['type'] == 'player']
} for g in networkx.weakly_connected_components(graph)]
return [{
'user_ids': [name_to_user[n] for n in mgz['players']],
'winner': challonge['winner'],
'name': challonge['name'],
'score': challonge['score'],
'platform_id': platform_ids[0]
} for mgz, challonge in zip(
sorted(mgz_data, key=lambda k: -1 * k['wins']),
sorted(challonge_data, key=lambda k: -1 * k['score'] if k['score'] else 0)
)]
| 9,020
|
def write_haiku(word_array, is_ipv6):
"""Return the beautiful haiku"""
# String to place in schema to show word slot.
octct = 'OCTET'
schema = get_schema(is_ipv6, octct)
# Replace each instance of 'octet' in the schema with a word from
# the encoded word array.
for i in range(len(word_array)):
for j in range(len(schema)):
if schema[j] == octct:
schema[j] = word_array[i]
break
# Capitalize appropriate words.
schema = capitalize_haiku(schema)
haiku = ''.join(schema)
return haiku
| 9,021
|
def import_data():
"""
Utility function to imoprt summary tsv ready for usage in PyMol
"""
col_types = {
'sift_score': float, 'sift_median': float, 'total_energy': float,
'interaction_energy': float, 'diff_interaction_energy': float,
'diff_interface_residues': float, 'freq': float
}
return pd.read_csv('data/output/summary.tsv', sep='\t', index_col=False,
dtype=col_types, low_memory=False)
| 9,022
|
def test_b_traversal_from_neighborless_node_gets_one_node_list(full_graph_1):
"""Test that traversing from neightborless node gets one node list."""
assert full_graph_1.breadth_first_traversal(21) == [21]
| 9,023
|
def get_current_datetime():
"""
Get the current datetime.
Note: This function is intended to be mocked in testing
Return:
time(datetime.datetime): current datetime
"""
return datetime.datetime.now(current_app.config['TIMEZONE'])
| 9,024
|
def launch_task(task):
"""
Use this function to launch a task, by passing a common.Task
instance
Arguments:
- task <common.Task>
"""
content_type = ContentType.objects.get_for_model(task._meta.model)
celery_id = _launch_task.delay(content_type.id, task.id)
task.queue_id = celery_id
task.save()
| 9,025
|
def _delete_vsx_interface_vlan_v1(vlan_id, **kwargs):
"""
Perform PUT calls on a VLAN interface to remove VSX IPv4 settings
:param vlan_id: Numeric ID of VLAN to that will be configured
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: Nothing
"""
ports_list = port.get_all_ports(**kwargs)
vlan_name = "vlan" + str(vlan_id)
if "/rest/v1/system/ports/%s" % vlan_name not in ports_list:
print("FAIL: Deleting VSX information from VLAN Interface '%d' failed "
"because VLAN Interface doesn't exist" % vlan_id)
else:
port_data = port.get_port(vlan_name, depth=0, selector="configuration", **kwargs)
port_data["vsx_active_forwarding_enable"] = False
port_data["vsx_sync"] = []
port_data["vsx_virtual_ip4"] = []
port_data.pop('vsx_virtual_gw_mac_v4', None)
port_data.pop('name', None) # must remove this item from the json since name can't be modified
port_data.pop('origin', None) # must remove this item from the json since origin can't be modified
target_url = kwargs["url"] + "system/ports/%s" % vlan_name
put_data = json.dumps(port_data, sort_keys=True, indent=4)
response = kwargs["s"].put(target_url, data=put_data, verify=False)
if not common_ops._response_ok(response, "PUT"):
print("FAIL: Deleting VSX information from VLAN Interface '%d' failed with status code %d"
% (vlan_id, response.status_code))
else:
print("SUCCESS: Deleting VSX information from VLAN Interface '%d' succeeded" % vlan_id)
| 9,026
|
def adapt_all(iterable, to_cls):
"""
Returns a list of items from adapting each item in iterable to `cls`
If `iterable` is `None`, an empty list will be returned.
"""
if iterable is None:
return []
return [adapt(obj, to_cls) for obj in iterable]
| 9,027
|
def get_config(seed, shot):
"""
Uses a given base 1-shot config to replicate it for 'shot' and 'seed'.
Changes dataset training split, cfg.OUTPUT_DIR and iteration number and steps accordingly.
"""
base_config_path: str = args.base_config
assert '1shot' in base_config_path
dataset_mode = 'novel' if '_novel' in base_config_path else 'all'
dataset_config = DatasetConfigs('coco' if args.coco else None, dataset_mode, args.method,
args.num_gpus, args.imgs_per_gpu, is_correct_train_iters='correct' in base_config_path)
seed_str = f'seed{seed}'
dataset_split = re.findall('split.', base_config_path)
assert len(dataset_split) <= 1
dataset_split = dataset_split[0] if dataset_split else ''
output_cfg_name = get_output_name_from_base(base_config_path, shot)
model_output_root = os.path.join(args.root, dataset_config.checkpoint_dir, dataset_split, seed_str)
os.makedirs(model_output_root, exist_ok=True)
output_dir = os.path.join(model_output_root,
os.path.splitext(output_cfg_name)[0])
result_config = load_yaml_file(base_config_path)
result_config = _fill_config(result_config, shot, dataset_split, seed, dataset_config, output_dir)
print(yaml.dump(result_config))
dry_run_config = not args.no_dry_run or args.skip_config_write
output_cfg_fullpath = _save_config(dataset_config.config_dir, dataset_split,
seed_str, output_cfg_name, result_config, dry_run_config)
return output_cfg_fullpath, result_config
| 9,028
|
def copy_code(outdir):
"""Copies files to the outdir to store complete script with each experiment"""
code = []
exclude = set([])
for root, _, files in os.walk("./code", topdown=True):
for f in files:
if not f.endswith('.py'):
continue
code += [(root,f)]
for r, f in code:
codedir = os.path.join(outdir,r)
if not os.path.exists(codedir):
os.mkdir(codedir)
shutil.copy2(os.path.join(r,f), os.path.join(codedir,f))
print("Code copied to '{}'".format(outdir))
| 9,029
|
def log_sum_exp_vb(vec, m_size):
"""
calculate log of exp sum
args:
vec (batch_size, vanishing_dim, hidden_dim) : input tensor
m_size : hidden_dim
return:
batch_size, hidden_dim
"""
_, idx = torch.max(vec, 1) # B * 1 * M
max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size) # B * M
return max_score.view(-1, m_size) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), 1)).view(-1,
m_size)
| 9,030
|
def C_fun_gen(fractions, speciesindices, y, time):
"""
Calculate the distribution of carbon functional groups as a percent of
total carbon.
Parameters
----------
fractions : list
The lumped phases that you want to include (as specified
in MW['species'][1], options are any subset of
['g','s','lt','t','char','H20','CO','CO2'] or ['initial']
for the case when you want to determine the initial
distribution before pyrolysis)
speciesindices : dict
dictionary from `load_results()` where species names are
keys and values are the index in `y` that corresponds to
that species
y : numpy array
a matrix with the concentrations of each species in the
kinetic scheme for every time in `t` (mol/L)
time : int
the index of the timepoint that you want the results for
Returns
-------
C_fun : numpy array
the distribution of carbon functional groups as a percent of total
carbon. The order of the elements in the array is:
carbonyl, aromatic C-O, aromatic C-C, aromatic C-H, aliphatic C-O,
aromatic methoxyl, aliphatic C-C
"""
C_fun = np.zeros(7)
ind = speciesindices
for species in MW:
if fractions == ['initial']:
time = 0
if y[time, speciesindices[species]] != 0:
# moles of functional group/L (order from Return docstring)
C_fun[0] += y[time, ind[species]] * MW[species][4][0]
C_fun[1] += y[time, ind[species]] * MW[species][4][1]
C_fun[2] += y[time, ind[species]] * MW[species][4][2]
C_fun[3] += y[time, ind[species]] * MW[species][4][3]
C_fun[4] += y[time, ind[species]] * MW[species][4][4]
C_fun[5] += y[time, ind[species]] * MW[species][4][5]
C_fun[6] += y[time, ind[species]] * MW[species][4][6]
else:
if MW[species][1] in set(fractions):
C_fun[0] += y[time, ind[species]] * MW[species][4][0]
C_fun[1] += y[time, ind[species]] * MW[species][4][1]
C_fun[2] += y[time, ind[species]] * MW[species][4][2]
C_fun[3] += y[time, ind[species]] * MW[species][4][3]
C_fun[4] += y[time, ind[species]] * MW[species][4][4]
C_fun[5] += y[time, ind[species]] * MW[species][4][5]
C_fun[6] += y[time, ind[species]] * MW[species][4][6]
C_fun /= C_fun.sum()
return C_fun
| 9,031
|
def get_branch(repo):
""" Retrieve the current branch of a dulwich repository
"""
refnames, sha = repo.refs.follow(b"HEAD")
if len(refnames) != 2:
LOGGER.debug("Got more than two refnames for HEAD!")
for ref in refnames:
if ref != b"HEAD":
return to_utf8(ref)
| 9,032
|
def test_empty_collection():
"""Test empty collection"""
with AnkiEmpty() as a:
assert a.col.cardCount() == 0
assert len(a.model_names) == 5
| 9,033
|
def print_grid(grid_in):
"""
Prints the elements of grid_in with a space between each element along the
columns and a new line for each row. Assumes that grid_in is a list of
lists.
"""
#iterate through grid
for row in grid_in:
for elem in row:
# print element followed by space
print(elem, end=' ')
print()
| 9,034
|
def _sigmoid(x):
"""
Sigmoid function that smoothly limits values between 0.0 and 1.0
:param x: Numpy array with float values that are to be limited.
:return: Numpy array with float values between 0.0 and 1.0
"""
return 1.0 / (1.0 + np.exp(-x))
| 9,035
|
def inherit_n_genes_prob(n, n_father, n_mother, mutation_prob) -> Dict:
"""Returns dictionary with distribution of conditional probability of
inherited genes given that father has n_father genes and mother has
n_mother genes, taking into account probability of mutations."""
# Probabily distributions:
# key 0 or False: probability of not inheriting the gene from parent
# key 1 or True: probability of inheriting the gene from parent
probs_f: Dict[bool, float] = p_not_p(prob_inherit(n_father, mutation_prob))
probs_m: Dict[bool, float] = p_not_p(prob_inherit(n_mother, mutation_prob))
return (
# Prob to not inherit at all
probs_f[0] * probs_m[0] if n == 0
# Prob to inherit from one parent only
else probs_f[1] * probs_m[0] + probs_f[0] * probs_m[1] if n == 1
# Prob to inherit from both parents
else probs_f[1] * probs_m[1]
)
| 9,036
|
def battle(player, npc):
"""
:param player: Monster
:param npc: Player
:return: None
"""
for item in inventory.inv.equipment.values():
if item.item_type == 'Weapon':
weapon_attack = item.attack
break
else:
weapon_attack = 0
for item in inventory.inv.equipment.values():
if item.item_type == 'Armour':
armour_defence = item.armour
break
else:
armour_defence = 0
effective_player_defence = player.defence + armour_defence
effective_player_attack = player.attack + weapon_attack
npc.health = npc.max_health
print(f"""
______ {player.name} Vs {npc.name} ______
- {player.name} HP: {player.health}/{player.max_health}
- {player.name} Attack: {player.attack} + {weapon_attack} from Weapon ({effective_player_attack})
- {player.name} Defence: {player.defence} + {armour_defence} from Armour ({effective_player_defence})
-----------------------------------------
- {npc.name} HP: {npc.health}/{npc.max_health}
- {npc.name} Attack: {npc.attack}
- {npc.name} Defence: {npc.defence}
__________________________________________
""")
prompt()
while True:
player_hit = random.randint(0, effective_player_attack) - random.randint(0, npc.defence)
npc_hit = random.randint(0, npc.attack) - random.randint(0, effective_player_defence)
if player_hit < 0:
player_hit = 0
if npc_hit < 0:
npc_hit = 0
player.health -= npc_hit
npc.health -= player_hit
print(f"""
______ {player.name} Vs {npc.name} ______
{player.name} hit {player_hit}.
{npc.name} hit {npc_hit}.
-----------------------------------------
- {player.name} HP: {player.health}/{player.max_health}
- {npc.name} HP: {npc.health}/{npc.max_health}
__________________________________________""")
if player.health <= 0:
player.death()
if npc.health <= 0:
break
print(f"""
[ Enter ] Attack
[ 1 ] Attack
[ 2 ] Drink HP Potion
[ Q ] Run Away!
Current Run chance: {player.luck} roll(s) at 1/5 Chance.""")
answer = prompt("\n>> ", '', '1', '2', 'q', 'Q')
if answer is '1':
pass
elif answer is '2':
for item in inventory.inv.items.values():
if item.item_type is 'Health_Potion':
inventory.inv.drink_potion(item, player)
else:
print("You don't have any Health Potions.")
prompt()
if answer is 'q' or answer is 'Q':
for number in range(player.luck):
if random.randint(0, 5) is 5:
clear_screen()
print("You ran away successfully. What a coward!")
prompt()
return
clear_screen()
print("You couldn't get away! Tough luck.")
prompt()
gold_reward = random.randint(0, npc.gold_drops)
print(f"""
[ {player.name} beat {npc.name} successfully. ]
[ HP: {player.health}/{player.max_health} ]
________________________________________________
[ REWARDS ]
[ Gold: {gold_reward} ]
""")
for number in range(player.luck):
if random.randint(0, npc.extra_drop_rate) == npc.extra_drop_rate:
try:
print(f"[ Other: {npc.extra_drop.name} ]")
inventory.inv.add_item(npc.extra_drop)
break
except AttributeError:
pass
else:
print("[ Other: None ]")
player.level_up(skill='attack', chance=7, increase=5)
player.level_up(skill='defence', chance=25, increase=3)
player.level_up(skill='health', chance=15, increase=5)
player.level_up(skill='luck', chance=5, increase=2)
player.gold += gold_reward
prompt()
| 9,037
|
async def get_metrics_address_counts_summary():
"""
Latest summary of address counts.
"""
qry = f"""
select col
, latest
, diff_1d
, diff_1w
, diff_4w
, diff_6m
, diff_1y
from mtr.address_counts_by_minimal_balance_change_summary;
"""
async with CONNECTION_POOL.acquire() as conn:
rows = await conn.fetch(qry)
return [dict(r) for r in rows]
| 9,038
|
async def processor(db, document: Dict[str, Any]) -> Dict[str, Any]:
"""
Process a history document before it is returned to the client.
:param db: the application object
:param document: the document to process
:return: the processed document
"""
return await apply_transforms(
virtool.utils.base_processor(document),
[AttachUserTransform(db, ignore_errors=True)],
)
| 9,039
|
def _unit_scale_traindata(X, xmins, xmaxs):
"""If xmax > xmin, unit-scale the training data, else do nothing
Parameters
----------
x : ndarray of shape (m, n)
xmins : ndarray of shape (n, )
xmaxs : ndarray of shape (n, )
Returns
-------
result : ndarray of shape (m, n)
Notes
-----
Training data must fit inside a rectangular box aligned with each dimension
"""
X = jnp.atleast_2d(X)
xmins = jnp.atleast_1d(xmins)
xmaxs = jnp.atleast_1d(xmaxs)
msk = xmins == xmaxs
norm = jnp.where(msk, 1.0, xmaxs - xmins)
offset = jnp.where(msk, 0.0, xmins)
return (X - offset) / norm
| 9,040
|
def script() -> None:
"""Function for running LADiM as a command line application"""
parser = argparse.ArgumentParser(
description="LADiM 2.0 — Lagrangian Advection and Diffusion Model"
)
parser.add_argument(
"-d",
"--debug",
help="Show debug information",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.INFO,
)
parser.add_argument(
"-s",
"--silent",
help="Show less information",
action="store_const",
dest="loglevel",
const=logging.WARNING,
default=logging.INFO,
)
parser.add_argument(
"-v", "--version", help="Configuration format version", type=int, default=2
)
parser.add_argument(
"config_file", nargs="?", help="Configuration file", default="ladim2.yaml"
)
args = parser.parse_args()
# Start up message
print("")
print(" ========================================================")
print(" === LADiM – Lagrangian Advection and Diffusion Model ===")
print(" ========================================================")
print("")
main(args.config_file, loglevel=args.loglevel, config_version=args.version)
| 9,041
|
def get_port_properties(port):
"""Retrieves common port properties from its package.sh file.
Returns:
dict: keys are values from PORT_PROPERTIES, values are from the package.sh file
"""
props = {}
for prop in PORT_PROPERTIES:
res = subprocess.run(f"cd {port}; exec ./package.sh showproperty {prop}", shell=True, capture_output=True)
if res.returncode == 0:
props[prop] = res.stdout.decode('utf-8').strip()
else:
print((
f'Executing "./package.sh showproperty {prop}" script for port {port} failed with '
f'exit code {res.returncode}, output from stderr:\n{res.stderr.decode("utf-8").strip()}'
))
props[prop] = ''
return props
| 9,042
|
def test_compute_reproject_roi_issue647():
"""In some scenarios non-overlapping geoboxes will result in non-empty
`roi_dst` even though `roi_src` is empty.
Test this case separately.
"""
src = GeoBox(
10980, 10980, Affine(10, 0, 300000, 0, -10, 5900020), CRS("epsg:32756")
)
dst = GeoBox(976, 976, Affine(10, 0, 1730240, 0, -10, -4170240), CRS("EPSG:3577"))
assert src.extent.overlaps(dst.extent.to_crs(src.crs)) is False
rr = compute_reproject_roi(src, dst)
assert roi_is_empty(rr.roi_src)
assert roi_is_empty(rr.roi_dst)
| 9,043
|
def run_HDBSCAN_subclustering(df=None, target=None, cluster_col="Cluster", soft_clustering=True,
min_cluster_size=100, min_samples=10,
cluster_selection_epsilon=0.0, cluster_selection_method='eom',
draw_condensed_tree=True, core_dist_n_jobs=None):
"""An implement of HDBSCAN (CPU version) for further clustering of a subcluster.
Parameters
----------
df: pd.DataFrame
A DataFrame with columns X, Y, and clusters.
soft_clustering: boolean
Use soft clustering or not. Default=True.
min_cluster_size: int
min_cluster_size in HDBSCAN.
min_samples: int
min_samples in HDBSCAN
cluster_selection_epsilon: float
cluster_selection_epsilon in HDBSCAN
cluster_selection_method: str
cluster_selection_method in HDBSCAN. Should be "eom" or "leaf".
draw_condensed_tree: boolean
Draw the condensed tree of HDBSCAN or not.
core_dist_n_jobs:
core_dist_n_jobs in HDBSCAN.
Returns
-------
sequences_onehot: list
A list of one-hot encoded sequences.
"""
import numpy as np
import hdbscan
from collections import Counter
import matplotlib.pyplot as plt
import seaborn as sns
df = df.copy()
max_cluster_id = df[cluster_col].max()
df1 = df[df[cluster_col]==target].copy()
X = np.stack([df1["X"], df1["Y"]], axis=1)
model = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, cluster_selection_method=cluster_selection_method, cluster_selection_epsilon=cluster_selection_epsilon, core_dist_n_jobs=core_dist_n_jobs, prediction_data=True)
yhat = model.fit(X)
soft_clusters = hdbscan.all_points_membership_vectors(yhat)
labels = [np.argmax(x) for x in soft_clusters]
df1[cluster_col] = [max_cluster_id + i + 1 for i in labels ] # re-number lables to make it human-readable
df.loc[df1.index, cluster_col] = df1[cluster_col].tolist()
print("HDBSCAN cluster number: {}".format(df["Cluster"].max()-1))
print(df.groupby(cluster_col)[cluster_col].count())
if draw_condensed_tree == True:
fig, ax = plt.subplots()
model.condensed_tree_.plot(select_clusters=True, selection_palette=sns.color_palette())
plt.savefig("Condensed_tree_subcluster.pdf")
return df, model
| 9,044
|
def extractive_explanations(
dataset,
prefix='explain sentiment',
input_feature='review',
output_classes=('negative', 'positive'),
drop_explanations=False
):
"""Preprocessor to handle extractive rationale prediction datasets.
The preprocessor expects a dataset with the provided 'input_feature', a label,
and a list of evidences. E.g. the movie rationale dataset consists of the
following features.
{
review: 'This is a bad movie. Skip it.'
label: 0,
evidences: ['bad movie', 'Skip it']
}
The example will be transformed to the following format by the preprocessor:
{
inputs: 'explain sentiment review: This is a bad movie. Skip it.'
targets: 'NEG because bad movie explanation: Skip it'
}
Args:
dataset: a tf.data.Dataset to process.
prefix: str, prefix to prepend to the inputs.
input_feature: str, feature name in input dataset.
output_classes: list of output classes in the input dataset. Defaults to
['negative', 'positive'] for the movie reviews dataset.
drop_explanations: bool, whether or not to drop explanations.
Returns:
a tf.data.Dataset
"""
if output_classes is None:
output_classes = ['negative', 'positive']
def my_fn(x):
"""Helper function to transform a rationale dataset to inputs/targets."""
input_label = tf.strings.join([input_feature, ':'], separator='')
inputs = tf.strings.join(
[prefix, input_label, x[input_feature]], separator=' ')
class_label = tf.gather(output_classes, x['label'])
if drop_explanations:
targets = class_label
else:
targets = _explanation_targets(class_label, x['evidences'])
return {'inputs': inputs, 'targets': targets}
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
| 9,045
|
async def test_signal_repetitions_cancelling(hass, monkeypatch):
"""Cancel outstanding repetitions when state changed."""
config = {
"rflink": {"port": "/dev/ttyABC0"},
DOMAIN: {
"platform": "rflink",
"devices": {"protocol_0_0": {"name": "test", "signal_repetitions": 3}},
},
}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: DOMAIN + ".test"}
)
await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: DOMAIN + ".test"}, blocking=True
)
assert [call[0][1] for call in protocol.send_command_ack.call_args_list] == [
"off",
"on",
"on",
"on",
]
| 9,046
|
def download_eval_public(dataset_folder):
""" Download the public eval part of desed dataset from Zenodo.
Args:
dataset_folder: str, the path to the root of the dataset where to download the evaluation files (this folder
contains audio and metadata folders).
Returns:
"""
create_folder(dataset_folder)
url_public_eval = (
f"https://zenodo.org/record/4560759/files/DESED_public_eval.tar.gz?download=1"
)
download_and_unpack_archive(url_public_eval, dataset_folder)
| 9,047
|
def _extractFile(configuration, zipFile, zipPath, zipInfo, targetDir, absTargetDir, onlyNewer):
"""Extract the ZipInfo object to a physical file at targetDir.
"""
engine = configuration.engine
targetFile = os.path.join(targetDir, zipInfo.filename)
absTargetFile = os.path.join(absTargetDir, zipInfo.filename)
if cake.zipping.isDirectoryInfo(zipInfo):
# The zip info corresponds to a directory.
cake.filesys.makeDirs(absTargetFile)
else:
# The zip info corresponds to a file.
year, month, day, hour, minute, second = zipInfo.date_time
zipTime = calendar.timegm((year, month, day, hour, minute, second, 0, 0, 0))
buildArgs = []
_, reasonToBuild = configuration.checkDependencyInfo(targetFile, buildArgs)
if reasonToBuild is None:
reasonToBuild = _shouldExtractFile(engine, absTargetFile, zipTime, onlyNewer)
if reasonToBuild is None:
return # Target is up to date
engine.logger.outputDebug(
"reason",
"Extracting '" + targetFile + "' because " + reasonToBuild + ".\n",
)
engine.logger.outputInfo("Extracting %s\n" % targetFile)
try:
cake.filesys.writeFile(absTargetFile, zipFile.read(zipInfo.filename))
except Exception, e:
engine.raiseError(
"Failed to extract file %s from zip %s: %s\n" % (
zipInfo.filename,
zipPath,
str(e),
),
targets=[targetFile],
)
# Now that the file has been written successfully, save the new dependency file
newDependencyInfo = configuration.createDependencyInfo(
targets=[targetFile],
args=buildArgs,
dependencies=[],
)
configuration.storeDependencyInfo(newDependencyInfo)
# Set the file modification time to match the zip time
os.utime(absTargetFile, (zipTime, zipTime))
| 9,048
|
def corr_weighted_kendalltau(top_list_prev, top_list, use_fast=True):
"""Compute weighted Kendall's Tau correlation (based on custom implementation!).
NOTE: Lists are DataFrame columns AND they must be sorted according to their value!!!"""
# it is irrelevant whether we compute kendall for ranks or scores.
list_a, list_b = proc_corr(top_list_prev, top_list)
if len(list_a) != len(list_b):
raise RuntimeError("The length of 'list_a' and 'list_b' must be the same!")
if use_fast:
return [fast_weighted_kendall(list_a, list_b)[1]]
else:
rank_list_a = tiedrank(list_a)
rank_list_b = tiedrank(list_b)
return [computeWKendall(rank_list_a,rank_list_b,ranked_input=True)[1]]
| 9,049
|
def test_Order_PUT_request(app):
"""with app.test_client() as client:
response = client.put(
'/orders/3',
data=json.dumps(dict(
status='Pedido Concluído',
)),
content_type='application/json',
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert data == {'data':
{'date': '2021-01-18T13:23:40', 'id': 3, 'payment_method': 'dinheiro', 'products': [], 'status': 'Pedido Concluído', 'total_price': 0.0}
} """
| 9,050
|
def generate_processes_by_exposure(exposure):
""" Creates a simulated process based on an exposure.
Arguments:
exposure {object} -- Exposure model
Raises:
ValueError -- returns when there is no processing
with a respective exposure.
Returns:
object -- Process model
"""
flavor = exposure.flavor
process = qlf_models.get_last_process_by_flavor(
flavor, jobs_isnull=False)
if not process:
raise ValueError(
'There is no process with {} flavor.'.format(flavor)
)
process.exposure_id = exposure.exposure_id
process.id = None
tdate = datetime.datetime.now()
tdate += datetime.timedelta(minutes=random.randint(1, 5))
process.end = tdate
process.save()
return process
| 9,051
|
def cross_val_confusion(classifier, X, y, cv=None):
"""
Evaluate confusion matrix and score from each fold of cross validation
Parameters:
----------
classifier: classifier object
The object used to fit the data.
X[ndarray]: shape=(n_sample, n_feature)
y[ndarray]: shape=(n_sample,)
cv[int]: the number of folds of the cross validation
Returns:
-------
conf_ms[list]: confusion matrices of the folds
accuracies[list]: accuracies of the folds
"""
assert getattr(classifier, "_estimator_type", None) == "classifier", \
"Estimator must be a classifier!"
# calculate CV metrics
conf_ms = []
accuracies = []
classifier = copy.deepcopy(classifier)
skf = StratifiedKFold(n_splits=cv)
for train_indices, test_indices in skf.split(X, y):
# fit and prediction
classifier.fit(X[train_indices], y[train_indices])
y_preds = classifier.predict(X[test_indices])
# calculate confusion matrix and accuracy
conf_m = confusion_matrix(y[test_indices], y_preds)
acc = np.sum(conf_m.diagonal()) / np.sum(conf_m)
# collection
conf_ms.append(conf_m)
accuracies.append(acc)
return conf_ms, accuracies
| 9,052
|
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Unload Synology DSM sensors."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
entry_data = hass.data[DOMAIN][entry.unique_id]
entry_data[UNDO_UPDATE_LISTENER]()
await entry_data[SYNO_API].async_unload()
hass.data[DOMAIN].pop(entry.unique_id)
return unload_ok
| 9,053
|
def transfer_segm_labels(verts_before, mesh, dir_path, name):
"""
Save segmentation labels for mesh after scan imitation
"""
verts_after = utils.get_vertices_np(mesh)
verts_mapping = utils.match_vert_lists(verts_after, verts_before)
# print(os.path.join(dir_path, name + '_sim_segmentation.txt'))
with open(os.path.join(dir_path, name + '_sim_segmentation.txt'), 'r') as f:
vert_labels = [line.rstrip() for line in f] # remove \n
scan_labels = [vert_labels[i] for i in verts_mapping]
filepath = os.path.join(dir_path, name + '_scan_imitation_segmentation.txt')
with open(filepath, 'w') as f:
for panel_name in scan_labels:
f.write("%s\n" % panel_name)
return 0
| 9,054
|
def confirm_control_contains(trestle_dir: pathlib.Path, control_id: str, part_label: str, seek_str: str) -> bool:
"""Confirm the text is present in the control markdown in the correct part."""
control_dir = trestle_dir / ssp_name / control_id.split('-')[0]
md_file = control_dir / f'{control_id}.md'
responses, _ = ControlIOReader.read_all_implementation_prose_and_header(md_file)
if part_label not in responses:
return False
prose = '\n'.join(responses[part_label])
return seek_str in prose
| 9,055
|
def describe_current_subtask(subtask, prefix=True):
"""
Make a 'natural' language description of subtask name
"""
to_verb = {"AnswerQuestion": "answering a question",
"ArmGoal": "moving my arm",
"DemoPresentation": "giving a demo",
"Find": "finding",
"Follow": "following",
"Guide": "guiding",
"GripperGoal": "moving my gripper",
"HandOver": "handing something over",
"Inspect": "inspecting",
"LookAt": "looking",
"NavigateTo": "navigating",
"PickUp": "picking up",
"Place": "placing",
"ResetWM": "resetting my world model",
"Say": "speaking",
"SendPicture": "sending a picture",
"TurnTowardSound": "turning towards a sound"}
description = to_verb.get(subtask, subtask + "ing")
if prefix:
description = random.choice(["I'm busy", "I'm"]) + " " + description
return description
| 9,056
|
def setup_base_repo(ctx, config):
"""
Setup repo based on redhat nodes
redhat:
base-repo-url: base url that provides Mon, OSD, Tools etc
installer-repo-url: Installer url that provides Agent, Installer
deb-repo-url: debian repo url
deb-gpg-key: gpg key used for signing the build
"""
rh_config = ctx.config.get('redhat')
if not rh_config.get('base-repo-url'):
# no repo defined
yield
if rh_config.get('set-cdn-repo'):
log.info("CDN repo already set, skipping rh repo")
yield
else:
_setup_latest_repo(ctx, rh_config)
try:
yield
finally:
log.info("Cleaning up repo's")
for remote in ctx.cluster.remotes.keys():
if remote.os.package_type == 'rpm':
remote.run(args=['sudo', 'rm',
run.Raw('/etc/yum.repos.d/rh*.repo'),
], check_status=False)
| 9,057
|
def addprint(x: int, y: int):
"""Print and "added" representation of `x` and `y`."""
expr = x + y
return "base addprint(x=%r, y=%r): %r" % (x, y, expr)
| 9,058
|
def parse_date(deadline_date):
"""
Given a date in the form MM/DD/YY or MM/DD/YYYY, returns
the integers MM, DD, and YYYY (or YY) in this order.
"""
deadline_split = re.split('\\/|\\-', deadline_date)
return int(deadline_split[0]), int(deadline_split[1]), int(deadline_split[2])
| 9,059
|
def print_distribution(distribution, out=sys.stdout):
""" Prints distribution data
"""
total = reduce(lambda x, y: x + y,
[group[1] for group in distribution])
output = ""
for group in distribution:
output += " %s: %.2f%% (%d instance%s)\n" % ( \
group[0],
round(group[1] * 1.0 / total, 4) * 100,
group[1],
"" if group[1] == 1 else "s")
out.write(output)
out.flush()
| 9,060
|
def jyfm_tools_position_fund_direction(
trade_date="2020-02-24", indicator="期货品种资金流向排名", headers=""
):
"""
交易法门-工具-资金分析-资金流向
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param trade_date: 指定交易日
:type trade_date: str
:param indicator: "期货品种资金流向排名" or "期货主力合约资金流向排名"
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的资金流向数据
:rtype: pandas.DataFrame
"""
params = {
"day": trade_date,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "期货品种资金流向排名":
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["flowCategory"]),
data_json["flowCategory"],
data_json["flowValue"],
],
index=["date", "symbol", "fund"],
).T
else:
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["dominantFlowCategory"]),
data_json["dominantFlowCategory"],
data_json["dominantFlowValue"],
],
index=["date", "symbol", "fund"],
).T
| 9,061
|
def CreateConferenceRoomRunConfig(run_id):
"""Creates and stores a conference room run config entity.
Args:
run_id: str unique id to be associated with this run config.
"""
config_value = {'run_id': run_id,
'run_start_time': datetime.datetime.now()}
config_value_str = pickle.dumps(config_value)
run_entity = models.Configuration(key_name=_CONFERENCE_ROOMS_RUN_KEY_NAME,
config_key=_CONFERENCE_ROOMS_RUN_KEY_NAME,
config_value=config_value_str)
run_entity.put()
| 9,062
|
def generate_points_in_areas(gdf, values, points_per_unit=1, seed=None):
"""
Create a GeoSeries of random points in polygons.
Parameters
----------
gdf : GeoDataFrame
The areas in which to create points
values : str or Series
The [possibly scaled] number of points to create in each area
points_per_unit : numeric, optional
The rate to scale the values in point generation.
seed : int, optional
A random seed
Returns
-------
GeoSeries
"""
geometry = gdf.geometry
if isinstance(values, str) and values in gdf.columns:
values = gdf[values]
new_values = (values / points_per_unit).astype(int)
g = gpd.GeoDataFrame(data={'vals': new_values}, geometry=geometry)
a = g.apply(lambda row: tuple(generate_random_points_in_polygon(row['geometry'], row['vals'], seed)), 1)
b = gpd.GeoSeries(a.apply(pd.Series).stack(), crs=geometry.crs)
b.name = 'geometry'
return b
| 9,063
|
def _log_parameter_search_results_as_artifact(cv_results_df, run_id):
"""
Records a collection of parameter search results as an MLflow artifact
for the specified run.
:param cv_results_df: A Pandas DataFrame containing the results of a parameter search
training session, which may be obtained by parsing the `cv_results_`
attribute of a trained parameter search estimator such as
`GridSearchCV`.
:param run_id: The ID of the MLflow Run to which the artifact should be recorded.
"""
with TempDir() as t:
results_path = t.path("cv_results.csv")
cv_results_df.to_csv(results_path, index=False)
try_mlflow_log(MlflowClient().log_artifact, run_id, results_path)
| 9,064
|
def validate_kata():
"""
Validate Kata
"""
wait_for_installation()
here = os.path.dirname(os.path.abspath(__file__))
manifest = os.path.join(here, "templates", "nginx-kata.yaml")
kubectl("apply -f {}".format(manifest))
wait_for_pod_state("", "default", "running", label="app=kata")
kubectl("delete -f {}".format(manifest))
| 9,065
|
def montager(xi, col=None, row=None, aspect=1.4, transpose=False, isRGB=False,
flipx=False, flipy=False, flipz=False, output_grid_size=False):
""" tile a 3D or 4D image into a single 2D montage
Parameters
----------
xi : ndarray
image data to montage
col : int, optional
number of columns in the montage
row : int, optional
number of rows in the montage
aspect : float, optional
desired aspect ratio of the montage
transpose : bool, optional
transpose each image slice in the montage? (transposes first two
dimensions of the input)
isRGB : bool, optional
set True if the input is RGB
flipx : bool, optional
reverse x-axis indices?
flipy : bool, optional
reverse y-axis indices?
flipz : bool, optional
reverse z-axis indices?
output_grid_size : bool, optional
if true, the number of rows and columns will also be returned
Returns
-------
xo : ndarray
2D ndarray containing the montage
Notes
-----
Any axis flips are applied prior to transposition
added RGB support, aspect ratio, transpose flag and axis flip flags
adapted from: montager.m (Jeff Fessler's IRT toolbox)
"""
# TODO?: also allow RGBA axis to be the first rather than last
# TODO: add option to add a border between the cells
# TODO: allow >4D by stacking all remaining dimensions along the 4th
if isRGB: # call montager for R,G,B channels separately
if xi.shape[-1] < 3 or xi.shape[-1] > 4:
raise Exception(
"if isRGB=True, the last dimension must be size 3 or 4")
if xi.shape[-1] == 4:
has_alpha = True
else:
has_alpha = False
xiR = xi[..., 0]
xiG = xi[..., 1]
xiB = xi[..., 2]
xoR, row, col = montager(xiR, col=col, row=row, aspect=aspect,
transpose=transpose, isRGB=False,
flipx=flipx, flipy=flipy, flipz=flipz,
output_grid_size=True)
xoR = xoR[:, :, None]
xoG = montager(xiG, col=col, row=row, aspect=aspect,
transpose=transpose, isRGB=False, flipx=flipx,
flipy=flipy, flipz=flipz,
output_grid_size=False)
xoG = xoG[:, :, None]
xoB = montager(xiB, col=col, row=row, aspect=aspect,
transpose=transpose, isRGB=False, flipx=flipx,
flipy=flipy, flipz=flipz, output_grid_size=False)
xoB = xoB[:, :, None]
if has_alpha:
xiA = xi[..., 3]
xoA = montager(xiA, col=col, row=row, aspect=aspect,
transpose=transpose, isRGB=False, flipx=flipx,
flipy=flipy, flipz=flipz, output_grid_size=False)
xoA = xoA[:, :, None]
xo = np.concatenate((xoR, xoG, xoB, xoA), axis=2)
else:
xo = np.concatenate((xoR, xoG, xoB), axis=2)
if output_grid_size:
return (xo, row, col)
else:
return xo
if xi.ndim > 4:
print('ERROR in %s: >4D not done' % __name__)
if xi.ndim == 4:
if flipx:
xi = xi[::-1, :, :, :]
if flipy:
xi = xi[:, ::-1, :, :]
if flipz:
xi = xi[:, :, ::-1, :]
if not transpose:
xi = np.transpose(xi, axes=(1, 0, 2, 3))
(nx, ny, n3, n4) = xi.shape
nz = n3 * n4
xi = np.reshape(xi, (nx, ny, nz), order='F')
elif xi.ndim == 3:
if flipx:
xi = xi[::-1, :, :]
if flipy:
xi = xi[:, ::-1, :]
if flipz:
xi = xi[:, :, ::-1]
if not transpose:
xi = np.transpose(xi, axes=(1, 0, 2))
(nx, ny, nz) = xi.shape
else: # for 1D or 2D case, just return the input, unchanged
if flipx:
xi = xi[::-1, :]
if flipy:
xi = xi[:, ::-1]
if not transpose:
xi = xi.T
if output_grid_size:
return xi, 1, 1
else:
return xi
if xi.ndim == 4:
col = n3
row, col = _calc_rows(nx, ny, nz, row=row, col=col, aspect=aspect)
xo = np.zeros((ny * row, nx * col))
for iz in range(nz):
iy = int(np.floor(iz / col))
ix = iz - iy * col
xo[iy * ny:(iy + 1) * ny, ix * nx:(ix + 1) * nx] = xi[:, :, iz].T
if output_grid_size:
return (xo, row, col)
else:
return xo
| 9,066
|
def template_dict(input_dict_arg, params_dict_arg):
"""function to enable templating a dictionary"""
output_dict = input_dict_arg
for key, value in output_dict.items():
if isinstance(value, str):
output_dict[key] = params_re_str(value, params_dict_arg)
elif isinstance(value, dict):
output_dict[key] = template_dict(value, params_dict_arg)
elif isinstance(value, list):
output_dict[key] = template_list(value, params_dict_arg)
return output_dict
| 9,067
|
def test_publishlib_name_from_metadata_problem(store_mock, config):
"""The metadata wasn't there to get the name."""
args = Namespace(library="charms.testcharm.v0.testlib")
with patch("charmcraft.commands.store.get_name_from_metadata") as mock:
mock.return_value = None
with pytest.raises(CraftError) as cm:
PublishLibCommand(config).run(args)
assert str(cm.value) == (
"Can't access name in 'metadata.yaml' file. The 'publish-lib' command needs to "
"be executed in a valid project's directory."
)
| 9,068
|
def _traverse_dictionaries(instance, parent="spin_systems"):
"""Parses through the instance object contained within the parent object and return
a list of attributes that are populated.
Args:
instance: An instance object from the parent object.
parent: a string object used to create the addresses of the SpinSystem
attributes.
Returns:
List Object.
"""
if isinstance(instance, list):
return [
value
for i, obj in enumerate(instance)
for value in _traverse_dictionaries(obj, _str_encode(f"{parent}[{i}]"))
]
if isinstance(instance, dict):
return [
item
for key, value in instance.items()
if key not in EXCLUDE and value is not None
for item in (
_traverse_dictionaries(value, _str_encode(f"{parent}.{key}"))
if isinstance(value, (dict, list))
else [_str_encode(f"{parent}.{key}")]
)
]
return []
| 9,069
|
def getSingleChildTextByName(rootNode, name):
"""Returns the text of a child node found by name.
Only one such named child is expected.
"""
try:
nodeList = [e.firstChild.data for e in rootNode.childNodes if e.localName == name]
if len(nodeList) > 0:
return nodeList[0]
else:
return None
except AttributeError:
return None
| 9,070
|
def test_aggregate_stores_output_in_record(configured_test_manager):
"""An aggregate output should exist in the record state."""
@aggregate(["output"])
def small_aggregate(record, records):
return "hello world"
record = Record(configured_test_manager, None)
small_aggregate(record, [record]) # TODO: blank records array crashes??
assert record.state["output"] == "hello world"
| 9,071
|
def _get_energy_ratio_single_wd_bin_bootstrapping(
df_binned,
df_freq,
N=1,
percentiles=[5.0, 95.0],
return_detailed_output=False,
):
"""Get the energy ratio for one particular wind direction bin and
an array of wind speed bins. This function also includes bootstrapping
functionality by increasing the number of bootstrap evaluations (N) to
larger than 1. The bootstrap percentiles default to 5 % and 95 %.
"""
# Get results excluding uncertainty
if return_detailed_output:
energy_ratio_nominal, dict_info = _get_energy_ratio_single_wd_bin_nominal(
df_binned=df_binned,
df_freq=df_freq,
return_detailed_output=return_detailed_output,
)
else:
energy_ratio_nominal = _get_energy_ratio_single_wd_bin_nominal(
df_binned=df_binned,
df_freq=df_freq,
return_detailed_output=return_detailed_output,
)
# Add bootstrapping results, if necessary
if N <= 1:
results_array = np.array([energy_ratio_nominal] * 3, dtype=float)
else:
# Get a bootstrap sample of range
bootstrap_results = np.zeros(N)
bootstrap_results[0] = energy_ratio_nominal
for i in range(1, N):
df_randomized = df_binned.sample(frac=1, replace=True).copy()
bootstrap_results[i] = _get_energy_ratio_single_wd_bin_nominal(
df_binned=df_randomized,
df_freq=df_freq,
return_detailed_output=False,
)
# Return the results in the order used in previous versions
results_array = np.array(
[
energy_ratio_nominal,
np.nanpercentile(bootstrap_results, percentiles)[0],
np.nanpercentile(bootstrap_results, percentiles)[1],
]
)
if return_detailed_output:
return results_array, dict_info
else:
return results_array
| 9,072
|
def save_tasks(
grid,
year,
temporal_range,
frequency,
output,
products,
dataset_filter,
env,
complevel,
overwrite=False,
tiles=None,
debug=False,
gqa=None,
):
"""
Prepare tasks for processing (query db).
<todo more help goes here>
\b
Not yet implemented features:
- output product config
- multi-product inputs
"""
from datacube import Datacube
from .tasks import SaveTasks
from .model import DateTimeRange
filter = {}
if dataset_filter:
filter = json.loads(dataset_filter)
if temporal_range is not None and year is not None:
print("Can only supply one of --year or --temporal_range", file=sys.stderr)
sys.exit(1)
if temporal_range is not None:
try:
temporal_range = DateTimeRange(temporal_range)
except ValueError:
print(f"Failed to parse supplied temporal_range: '{temporal_range}'")
sys.exit(1)
if year is not None:
temporal_range = DateTimeRange.year(year)
if frequency is not None:
if frequency not in ("annual", "annual-fy", "semiannual", "seasonal", "all"):
print(f"Frequency must be one of annual|annual-fy|semiannual|seasonal|all and not '{frequency}'")
sys.exit(1)
dc = Datacube(env=env)
products = products.split("+")
if len(products) == 1:
product = products[0]
dss = None
n_dss = None
else:
dss, n_dss, product, error_logger = _parse_products(dc, products, filter, temporal_range)
if output == "":
if temporal_range is not None:
output = f"{product}_{temporal_range.short}.db"
else:
output = f"{product}_all.db"
try:
tasks = SaveTasks(
output, grid, frequency=frequency, overwrite=overwrite, complevel=complevel
)
except ValueError as e:
print(str(e))
sys.exit(1)
def on_message(msg):
print(msg)
def gqa_predicate(ds):
return ds.metadata.gqa_iterative_mean_xy <= gqa
predicate = None
if gqa is not None:
predicate = gqa_predicate
try:
ok = tasks.save(
dc,
product,
dataset_filter=filter,
temporal_range=temporal_range,
tiles=tiles,
predicate=predicate,
debug=debug,
msg=on_message,
dss=dss,
n_dss=n_dss,
)
except ValueError as e:
print(str(e))
sys.exit(2)
if len(products) != 1:
for product, count in error_logger.missing_counts.items():
print(f"Product {product} has {count} missing datasets.")
if not ok:
# exit with error code, failure message was already printed
sys.exit(3)
| 9,073
|
def get_file_name(part):
"""get file name using regex from fragment ID"""
return re.findall(r"='(.*\-[a-z]+).*", part)[0]
| 9,074
|
def main():
"""Main entrance for training"""
args = parser.parse_args()
print(sys.argv)
#context.set_context(mode=context.GRAPH_MODE)
context.set_context(mode=context.PYNATIVE_MODE)
if args.GPU:
context.set_context(device_target='GPU')
# parse model argument
assert args.model.startswith(
"hournas"), "Only Tinynet models are supported."
#_, sub_name = args.model.split("_")
net = hournasnet(args.model,
num_classes=args.num_classes,
drop_rate=0.0,
drop_connect_rate=0.0,
global_pool="avg",
bn_tf=False,
bn_momentum=None,
bn_eps=None)
print(net)
print("Total number of parameters:", count_params(net))
cfg = edict({'image_height': args.image_size, 'image_width': args.image_size,})
cfg.batch_size = args.batch_size
print(cfg)
#input_size = net.default_cfg['input_size'][1]
val_data_url = args.data_path #os.path.join(args.data_path, 'val')
val_dataset = create_dataset_cifar10(val_data_url, repeat_num=1, training=False, cifar_cfg=cfg)
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
eval_metrics = {'Validation-Loss': Loss(),
'Top1-Acc': Top1CategoricalAccuracy(),
'Top5-Acc': Top5CategoricalAccuracy()}
ckpt = load_checkpoint(args.ckpt)
load_param_into_net(net, ckpt)
net.set_train(False)
model = Model(net, loss, metrics=eval_metrics)
metrics = model.eval(val_dataset, dataset_sink_mode=False)
print(metrics)
| 9,075
|
def get_helping_materials(project_id, limit=100, offset=0, last_id=None):
"""Return a list of helping materials for a given project ID.
:param project_id: PYBOSSA Project ID
:type project_id: integer
:param limit: Number of returned items, default 100
:type limit: integer
:param offset: Offset for the query, default 0
:param last_id: id of the last helping material, used for pagination. If provided, offset is ignored
:type last_id: integer
:type offset: integer
:returns: True -- the response status code
"""
if last_id is not None:
params = dict(limit=limit, last_id=last_id)
else:
params = dict(limit=limit, offset=offset)
print(OFFSET_WARNING)
params['project_id'] = project_id
try:
res = _pybossa_req('get', 'helpingmaterial',
params=params)
if type(res).__name__ == 'list':
return [HelpingMaterial(helping) for helping in res]
else:
return res
except: # pragma: no cover
raise
| 9,076
|
def generate_sphere_points(n):
"""
Returns list of 3d coordinates of points on a sphere using the
Golden Section Spiral algorithm.
"""
points = []
inc = math.pi * (3 - math.sqrt(5))
offset = 2 / float(n)
for k in range(int(n)):
y = k * offset - 1 + (offset / 2)
r = math.sqrt(1 - y*y)
phi = k * inc
points.append([math.cos(phi)*r, y, math.sin(phi)*r])
return points
| 9,077
|
def svn_repos_dir_delta2(*args):
"""
svn_repos_dir_delta2(svn_fs_root_t src_root, char src_parent_dir, char src_entry,
svn_fs_root_t tgt_root, char tgt_path,
svn_delta_editor_t editor, void edit_baton,
svn_repos_authz_func_t authz_read_func, svn_boolean_t text_deltas,
svn_depth_t depth, svn_boolean_t entry_props,
svn_boolean_t ignore_ancestry,
apr_pool_t pool) -> svn_error_t
"""
return apply(_repos.svn_repos_dir_delta2, args)
| 9,078
|
def log_error(e):
"""
Print any errors.
"""
print(e)
| 9,079
|
def linear_r2_points(points: np.ndarray, coef: tuple, r2: R2 = R2.classic) -> float:
"""Computes the coefficient of determination (R2).
Args:
points (np.ndarray): numpy array with the points (x, y)
coef (tuple): the coefficients from the linear fit
r2 (R2): select the type of coefficient of determination
Returns:
float: coefficient of determination (R2)
"""
x = points[:, 0]
y = points[:, 1]
return linear_r2(x, y, coef, r2)
| 9,080
|
def _show_tournament_list() -> List:
"""
Функция возвращает список предстоящих турниров
"""
tournaments = []
for tournament in loop.run_until_complete(get_request('https://codeforces.com/api/contest.list?gym=false')):
if tournament['phase'] != 'BEFORE':
break
tournaments.append(tournament)
for tournament in range(len(tournaments)):
tournaments[tournament]['durationSeconds'] = datetime.utcfromtimestamp(tournaments[tournament]['durationSeconds']).strftime("%H:%M:%S")
tournaments[tournament]['startTimeSeconds'] = datetime.utcfromtimestamp(tournaments[tournament]['startTimeSeconds']).strftime("%d.%m.%Y %H:%M:%S")
return tournaments
| 9,081
|
def init_tof_1(xshut):
"""XSHUT port HIGH enables the device."""
rpi_gpio.setup_output(xshut)
rpi_gpio.write_output(xshut, 1)
| 9,082
|
def less_goals_scored():
"""
returns the lowest number of goals scored during one week
"""
return goals_scored('min')
| 9,083
|
def compute_avgpool_output_shape(input_shape:Sequence[Union[int, None]],
kernel_size:Union[Sequence[int], int]=1,
stride:Union[Sequence[int], int]=1,
padding:Union[Sequence[int], int]=0,
channel_last:bool=False) -> Tuple[Union[int, None]]:
""" finished, cheched,
compute the output shape of a avgpool layer
input_shape: sequence of int or None,
shape of an input Tensor,
the first dimension is the batch dimension, which is allowed to be `None`
kernel_size: int, or sequence of int, default 1,
kernel size (filter size) of the layer, should be compatible with `input_shape`
stride: int, or sequence of int, default 1,
stride (down-sampling length) of the layer, should be compatible with `input_shape`
padding: int, or sequence of int, default 0,
padding length(s) of the layer, should be compatible with `input_shape`
channel_last: bool, default False,
channel dimension is the last dimension,
or the second dimension (the first is the batch dimension by convention)
Returns:
--------
output_shape: tuple,
shape of the output Tensor
"""
output_shape = compute_output_shape(
'avgpool',
input_shape, 1, kernel_size, stride, padding, 0, 1,
channel_last,
)
return output_shape
| 9,084
|
def notebuild():
"""
build tool
"""
args = command_line_parser()
package = PackageBuild()
if args.command == 'pull':
package.git_pull()
elif args.command == 'push':
package.git_push()
elif args.command == 'install':
package.git_install()
elif args.command == 'build':
package.git_build()
elif args.command == 'clean':
package.git_clean()
elif args.command == 'clean_history':
package.git_clean_history()
elif args.command == 'tags':
package.git_tags()
elif args.command == 'help':
info = """
build
pull
push
install
clean
clean_history
help
tags
"""
print(info)
| 9,085
|
def _obs_intersect(((x0, y0), (x1, y1)), ((x2, y2), (x3, y3))):
"""Check if two lines intersect. The boundaries don't count as
intersection."""
base1 = (x0, y0)
base2 = (x2, y2)
dir1 = (x1-x0, y1-y0)
dir2 = (x3-x2, y3-y2)
t1, t2 = _intersect(base1, dir1, base2, dir2)
eps = 0.00001
if -eps < t1 and t1 < 1.0 + eps and -eps < t2 and t2 < 1.0 + eps:
return True
else:
return False
| 9,086
|
def project_configure(request, project_name):
"""
get configuration
:param request: request object
:param project_name: project name
:return: json
"""
# get configuration
if request.method == 'GET':
project = Project.objects.get(name=project_name)
project = model_to_dict(project)
project['configuration'] = json.loads(project['configuration']) if project['configuration'] else None
return JsonResponse(project)
# update configuration
elif request.method == 'POST':
project = Project.objects.filter(name=project_name)
data = json.loads(request.body)
configuration = json.dumps(data.get('configuration'), ensure_ascii=False)
project.update(**{'configuration': configuration})
# for safe protection
project_name = re.sub('[\!\@\#\$\;\&\*\~\"\'\{\}\]\[\-\+\%\^]+', '', project_name)
# execute generate cmd
cmd = ' '.join(['gerapy', 'generate', project_name])
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = bytes2str(p.stdout.read()), bytes2str(p.stderr.read())
if not stderr:
return JsonResponse({'status': '1'})
else:
return JsonResponse({'status': '0', 'message': stderr})
| 9,087
|
def run_epoch(session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
for step in range(model.input.epoch_size):
feed_dict = {}
for i, h in enumerate(model.initial_state):
feed_dict[h] = state[i]
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += model.input.num_steps
if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / model.input.epoch_size, np.exp(costs / iters),
iters * model.input.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
| 9,088
|
def unemployment(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="ECONOMIC",
key="UNRATE",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
| 9,089
|
def print_dict(d):
"""按照键值打印一个字典"""
for key,value in d.items():
print( key + ':' + str(value))
| 9,090
|
def test_crd_nof_shots(crd_file):
"""Return number of shots."""
hdr, _, _, fname = crd_file
crd = CRDReader(Path(fname))
assert crd.nof_shots == hdr["nofShots"]
| 9,091
|
def splitunc(p):
"""Deprecated since Python 3.1. Please use splitdrive() instead;
it now handles UNC paths.
Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
import warnings
warnings.warn("ntpath.splitunc is deprecated, use ntpath.splitdrive instead",
DeprecationWarning, 2)
drive, path = splitdrive(p)
if len(drive) == 2:
# Drive letter present
return p[:0], p
return drive, path
| 9,092
|
def expand_locations(ctx, input, targets = []):
"""Expand location templates.
Expands all `$(execpath ...)`, `$(rootpath ...)` and deprecated `$(location ...)` templates in the
given string by replacing with the expanded path. Expansion only works for labels that point to direct dependencies
of this rule or that are explicitly listed in the optional argument targets.
See https://docs.bazel.build/versions/main/be/make-variables.html#predefined_label_variables.
Use `$(rootpath)` and `$(rootpaths)` to expand labels to the runfiles path that a built binary can use
to find its dependencies. This path is of the format:
- `./file`
- `path/to/file`
- `../external_repo/path/to/file`
Use `$(execpath)` and `$(execpaths)` to expand labels to the execroot (where Bazel runs build actions).
This is of the format:
- `./file`
- `path/to/file`
- `external/external_repo/path/to/file`
- `<bin_dir>/path/to/file`
- `<bin_dir>/external/external_repo/path/to/file`
The deprecated `$(location)` and `$(locations)` expansions returns either the execpath or rootpath depending on the context.
Args:
ctx: context
input: String to be expanded
targets: List of targets for additional lookup information.
Returns:
The expanded path or the original path
"""
return ctx.expand_location(input, targets = targets)
| 9,093
|
def scrape_db(test=False, write_file=True):
"""
Function to scrape bodybuild.com recipe database and save results as json.
Parameters:
-----------
"""
# Hacky way to get all recipes - you have to request the number. Luckily,
# this is listed at the beginning of any result you pull from DB.
# We want all of the recipes, so we'll do a quick request of one recipe to
# get the 'total' number in the DB
url_request = 'https://cms-api.bodybuilding.com/BbcomRecipe'
url_parameters = {'sort': 'publishDate', 'order': 'desc', 'limit': '1'}
fake_recipes_list = requests.get(url_request, params=url_parameters)
fake_recipes = bs4.BeautifulSoup(fake_recipes_list.content, features='html.parser')
fake = json.loads(str(fake_recipes))
# Get the total number of recipes in the db
total_recipes = fake['total']
if test == True:
all_recipes = fake_recipes
else:
# Change the 'limit' on the url to the total number of recipes
url_parameters['limit'] = str(total_recipes)
all_recipes_list = requests.get(url_request, params=url_parameters)
all_recipes = bs4.BeautifulSoup(all_recipes_list.content, features='html.parser')
# Just get search results and get rid of data before.
all_recipes_list = json.loads(str(all_recipes))['_embedded']['bb-cms:search-results']
# Dump to json file - results will always be saved in 'data' folder
if write_file:
save_path = _DATA_DIR.joinpath('bodybuilding_recipes.json')
rf = open(save_path, 'w')
json.dump(all_recipes_list, rf)
rf.close()
return all_recipes_list
| 9,094
|
def require(required):
""" Decorator for checking the required values in state.
It checks the required attributes in the passed state and stop when
any of those is missing. """
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
for key in required:
if key not in args[0]:
raise KeyError('{0} not passed to {1}'.format(
key, function.__name__))
return function(*args, **kwargs)
return wrapper
return decorator
| 9,095
|
def set_list(event):
"""
insert an edited line from the entry widget
back into the listbox
"""
try:
index = listbox1.curselection()[0]
# delete old listbox line
listbox1.delete(index)
except IndexError:
index = tk.END
# insert edited item back into listbox1 at index
listbox1.insert(index, enter1.get())
| 9,096
|
def simplify_unicode(sentence):
"""
Most accented Latin characters are pronounced just the same as the base character.
Shrink as many extended Unicode repertoire into the Estonian alphabet as possible.
It is GOOD for machine learning to have smaller ortographic repertoire.
It is a BAD idea if we start using any proper name dictionaries for morph analysis
or pronunciations later on. You are warned.
:param sentence:
:return: str
"""
sentence = sentence.replace("Ð", "D").replace("Þ", "Th")
sentence = sentence.replace("ð", "d").replace("þ", "th")
sentence = sentence.replace("ø", "ö").replace("Ø", "Ö")
sentence = sentence.replace("ß", "ss").replace("ẞ", "Ss")
sentence = re.sub(r'S(c|C)(h|H)', r'Š', sentence)
sentence = re.sub(r'sch', r'š', sentence)
sentence = re.sub(r'[ĆČ]', r'Tš', sentence)
sentence = re.sub(r'[ćč]', r'tš', sentence)
sentence = re.sub(r'[^A-ZÄÖÜÕŽŠa-zäöüõšž ,]+', lambda m: r'{}'.format( strip_combining(m.group(0)) ), sentence)
return sentence
| 9,097
|
def read_file(pickle_file_name):
"""Reads composite or non-composite novelty results from Pickle file.
:param pickle_file_name: Path to input file (created by
`write_standard_file` or `write_pmm_file`).
:return: novelty_dict: Has the following keys if not a composite...
novelty_dict['denorm_radar_matrix_baseline']: See doc for
`write_standard_file`.
novelty_dict['denorm_radar_matrix_trial']: Same.
novelty_dict['novel_indices']: Same.
novelty_dict['denorm_radar_matrix_upconv']: Same.
novelty_dict['denorm_radar_matrix_upconv_svd']: Same.
novelty_dict['percent_variance_to_keep']: Same.
novelty_dict['cnn_feature_layer_name']: Same.
novelty_dict['multipass']: Same.
novelty_dict['baseline_full_id_strings']: Same.
novelty_dict['baseline_times_unix_sec']: Same.
novelty_dict['trial_full_id_strings']: Same.
novelty_dict['trial_times_unix_sec']: Same.
novelty_dict['cnn_file_name']: Same.
novelty_dict['upconvnet_file_name']: Same.
...or the following keys if composite...
novelty_dict['mean_denorm_radar_matrix_baseline']:
See doc for `write_pmm_file`.
novelty_dict['mean_denorm_radar_matrix_novel']: Same.
novelty_dict['mean_denorm_radar_matrix_upconv']: Same.
novelty_dict['mean_denorm_radar_matrix_upconv_svd']: Same.
novelty_dict['cnn_file_name']: Same.
novelty_dict['non_pmm_file_name']: Same.
novelty_dict['pmm_max_percentile_level']: Same.
:return: pmm_flag: Boolean flag. True if `novelty_dict` contains composite,
False otherwise.
:raises: ValueError: if dictionary does not contain expected keys.
"""
pickle_file_handle = open(pickle_file_name, 'rb')
novelty_dict = pickle.load(pickle_file_handle)
pickle_file_handle.close()
pmm_flag = MEAN_BASELINE_MATRIX_KEY in novelty_dict
if pmm_flag:
missing_keys = list(
set(PMM_FILE_KEYS) - set(novelty_dict.keys())
)
else:
missing_keys = list(
set(STANDARD_FILE_KEYS) - set(novelty_dict.keys())
)
if len(missing_keys) == 0:
return novelty_dict, pmm_flag
error_string = (
'\n{0:s}\nKeys listed above were expected, but not found, in file '
'"{1:s}".'
).format(str(missing_keys), pickle_file_name)
raise ValueError(error_string)
| 9,098
|
def AutoscalersForMigs(migs, autoscalers, project):
"""Finds Autoscalers with target amongst given IGMs.
Args:
migs: List of triples (IGM name, scope type, scope name).
autoscalers: A list of Autoscalers to search among.
project: Project owning resources.
Returns:
A list of all Autoscalers with target on mig_names list.
"""
igm_url_regexes = []
for (name, scope_type, scope_name) in migs:
igm_url_regexes.append(
'/projects/{project}/{scopeType}/{scopeName}/'
'instanceGroupManagers/{name}$'
.format(project=project,
scopeType=(scope_type + 's'),
scopeName=scope_name,
name=name))
igm_url_regex = re.compile('(' + ')|('.join(igm_url_regexes) + ')')
result = [
autoscaler for autoscaler in autoscalers
if igm_url_regex.search(autoscaler.target)
]
return result
| 9,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.