content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import torch
def _demo_mm_inputs(input_shape, num_classes):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_classes (int):
number of semantic classes
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
segs = rng.randint(
low=0, high=num_classes - 1, size=(N, 1, H, W)).astype(np.uint8)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': 1.0,
'flip': False,
} for _ in range(N)]
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'img_metas': img_metas,
'gt_semantic_seg': torch.LongTensor(segs)
}
return mm_inputs | 9d8de5d5bd337720f386a45ad40f9e901a999b52 | 21,100 |
import socket
def get_ephemeral_port(sock_family=socket.AF_INET, sock_type=socket.SOCK_STREAM):
"""Return an ostensibly available ephemeral port number."""
# We expect that the operating system is polite enough to not hand out the
# same ephemeral port before we can explicitly bind it a second time.
s = socket.socket(sock_family, sock_type)
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
return port | 37287b70e35b8aa7fbdb01ced1882fb3bbf38543 | 21,101 |
from typing import Optional
def IR_guess_model(spectrum: ConvSpectrum, peak_args: Optional[dict] = None) -> tuple[Model, dict]:
"""
Guess a fit for the IR spectrum based on its peaks.
:param spectrum: the ConvSpectrum to be fit
:param peak_args: arguments for finding peaks
:return: Model, parameters
"""
min_intensity, max_intensity = spectrum.range
range_intensities = max_intensity - min_intensity
IR_peak_defaults = {
"prominence": 0.1 * range_intensities,
}
peak_args = IR_peak_defaults if peak_args is None else {**IR_peak_defaults, **peak_args}
peak_indices, peak_properties = spectrum.peaks(**peak_args, indices=True)
params = Parameters()
composite_model = None
# Fit the peaks
for i, peak_idx in enumerate(peak_indices):
prefix = f"a{i}_"
model = models.GaussianModel(prefix=prefix)
center = spectrum.energies[peak_idx]
height = spectrum.intensities[peak_idx]
model.set_param_hint("amplitude", min=0.05 * height)
model.set_param_hint("center", min=center - 10, max=center + 10)
model.set_param_hint("sigma", min=0.1, max=100)
peak_params = {
f"{prefix}amplitude": height * 0.8,
f"{prefix}center": center,
f"{prefix}sigma": 10,
}
params = params.update(model.make_params(**peak_params))
composite_model = model if composite_model is None else composite_model + model
return composite_model, params | fa56e3c183ef08b35f177df1d727ff134c964eaf | 21,102 |
def virus_monte_carlo(initial_infected, population, k):
""" Generates a list of points to which some is infected
at a given value k starting with initial_infected infected.
There is no mechanism to stop the infection from reaching
the entire population.
:param initial_infected: The amount of people whom are infected at the
start.
:type initial_infected: int
:param population: The total population sample.
:type population: int
:param k: The rate of infection.
:type k: float
:return: An array of the amount of people per time infected.
:rtype: tuple(time, infected)
"""
people_array = np.arange(1, population+1, dtype=int)
current_infected = initial_infected
people_infected = np.array([current_infected])
time_array = np.array([0])
# Array math.
counter = 0
for _ in people_array:
probability = (k)*current_infected/population
random_array = np.random.uniform(0, 1, size=people_array.size)
random_bool = np.where(random_array <= probability, True, False)
people_array = people_array[random_bool != True]
if people_array.size != population:
current_infected = (population-people_array.size)
people_infected = np.append(people_infected, current_infected)
counter+=1
time_array = np.append(time_array, counter)
if people_infected.size == population:
break
return (time_array, people_infected) | 856af13a8a7fdbb931ba32b97ff7bd5207e9ca49 | 21,103 |
def threadsafe_generator(f):
"""
A decorator that takes a generator function and makes it thread-safe.
"""
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g | 013e0df91f70da8c8f4f501bc31d8bddcf378787 | 21,104 |
def lastmsg(self):
"""
Return last logged message if **_lastmsg** attribute is available.
Returns:
last massage or empty str
"""
return getattr(self, '_last_message', '') | ad080c05caadbb644914344145460db0164f017c | 21,105 |
def _callback_on_all_dict_keys(dt, callback_fn):
"""
Callback callback_fn on all dictionary keys recursively
"""
result = {}
for (key, val) in dt.items():
if type(val) == dict:
val = _callback_on_all_dict_keys(val, callback_fn)
result[callback_fn(key)] = val
return result | 3cab018413a7ba8a0e5bbae8574025253a2ea885 | 21,106 |
import sys
def ovb_partial_r2_bound(model=None, treatment=None, r2dxj_x=None, r2yxj_dx=None,
benchmark_covariates=None, kd=1, ky=None):
"""
Provide a Pandas DataFrame with the bounds on the strength of the unobserved confounder.
Adjusted estimates, standard errors and t-values (among other quantities) need to be computed
manually by the user using those bounds with the functions adjusted_estimate, adjusted_se and adjusted_t.
:Required parameters: (model and treatment) or (r2dxj_x and r2yxj_dx).
Parameters
----------
model : statsmodels OLSResults object
a fitted statsmodels OLSResults object for the restricted regression model you have provided.
treatment : string
a string with the name of the "treatment" variable, e.g. the independent variable of interest.
r2dxj_x : float
float with the partial R2 of covariate Xj with the treatment D (after partialling out the effect of the remaining covariates X, excluding Xj).
r2yxj_dx : float
float with the partial R2 of covariate Xj with the outcome Y (after partialling out the effect of the remaining covariates X, excluding Xj).
benchmark_covariates : string or list of strings
a string or list of strings with names of the variables to use for benchmark bounding.
kd : float or list of floats
a float or list of floats with each being a multiple of the strength of association between a
benchmark variable and the treatment variable to test with benchmark bounding (Default value = 1).
ky : float or list of floats
same as kd except measured in terms of strength of association with the outcome variable (Default value = None).
Returns
-------
Pandas DataFrame
A Pandas DataFrame containing the following variables:
**bound_label** : a string created by label_maker to serve as a label for the bound for printing & plotting purposes.
**r2dz_x** : a float or list of floats with the partial R^2 of a putative unobserved confounder "z"
with the treatment variable "d", with observed covariates "x" partialed out, as implied by z being kd-times
as strong as the benchmark_covariates.
**r2yz_dx** : a float or list of floats with the partial R^2 of a putative unobserved confounder "z"
with the outcome variable "y", with observed covariates "x" and the treatment variable "d" partialed out,
as implied by z being ky-times as strong as the benchmark_covariates.
Examples
---------
Let's construct bounds from summary statistics only. Suppose you didn't have access to the data, but only to the treatment and outcome regression tables.
You can still compute the bounds.
>>> # First import the necessary libraries.
>>> import sensemakr as smkr
>>> # Use the t statistic of female in the outcome regression to compute the partial R2 of female with the outcome.
>>> r2yxj_dx = smkr.partial_r2(t_statistic = -9.789, dof = 783)
>>> # Use the t-value of female in the *treatment* regression to compute the partial R2 of female with the treatment.
>>> r2dxj_x = smkr.partial_r2(t_statistic = -2.680, dof = 783)
>>> # Compute manually bounds on the strength of confounders 1, 2, or 3 times as strong as female.
>>> bounds = smkr.ovb_partial_r2_bound(r2dxj_x = r2dxj_x, r2yxj_dx = r2yxj_dx,kd = [1, 2, 3], ky = [1, 2, 3])
>>> # Compute manually adjusted estimates.
>>> bound_values = smkr.adjusted_estimate(estimate = 0.0973, se = 0.0232, dof = 783, r2dz_x = bounds['r2dz_x'], r2yz_dx = bounds['r2yz_dx'])
>>> # Plot contours and bounds.
>>> smkr.ovb_contour_plot(estimate = 0.0973, se = 0.0232, dof = 783)
>>> smkr.add_bound_to_contour(bounds=bounds, bound_value = bound_values)
"""
if (model is None or treatment is None) and (r2dxj_x is None or r2yxj_dx is None):
sys.exit('Error: ovb_partial_r2_bound requires either a statsmodels OLSResults object and a treatment name'
'or the partial R^2 values with the benchmark covariate, r2dxj_x and r2yxj_dx.')
if (treatment is not None and type(treatment) is not str):
sys.exit('Error: treatment must be a single string.')
if ((benchmark_covariates is None) and (r2dxj_x is not None)) :
#return None
benchmark_covariates=['manual']
elif(benchmark_covariates is None):
return None
elif type(benchmark_covariates) is str:
benchmark_covariates = [benchmark_covariates]
else:
if ((type(benchmark_covariates) is not list) and (type(benchmark_covariates) is not dict)):
sys.exit('Benchmark covariates must be a string, list of strings, 2d list containing only strings or dict containing only strings and list of strings.')
if (type(benchmark_covariates) is list):
for i in benchmark_covariates:
if type(i) is not str and (type(i) is not list or any(type(j) is not str for j in i)):
sys.exit('Benchmark covariates must be a string, list of strings, 2d list containing only strings or dict containing only strings and list of strings.')
else: #benchmark_covariates is a dict
for i in benchmark_covariates:
if(type(benchmark_covariates[i]) is not str and (type(benchmark_covariates[i]) is not list or any(type(j) is not str for j in benchmark_covariates[i]))):
sys.exit('Benchmark covariates must be a string, list of strings, 2d list containing only strings or dict containing only strings and list of strings.')
if model is not None:
m = pd.DataFrame(model.model.exog, columns=model.model.exog_names)
d = np.array(m[treatment])
non_treatment = m.drop(columns=treatment) # all columns except treatment
non_treatment.insert(0, 0, 1) # add constant term for regression
treatment_model = sm.OLS(d, non_treatment)
treatment_results = treatment_model.fit()
if type(benchmark_covariates) is str:
# r2yxj_dx = partial R^2 with outcome; r2dxj_x = partial R^2 with treatment
r2yxj_dx = [sensitivity_statistics.partial_r2(model, covariates=benchmark_covariates)]
r2dxj_x = [sensitivity_statistics.partial_r2(treatment_results, covariates=benchmark_covariates)]
elif(type(benchmark_covariates) is list):
r2yxj_dx, r2dxj_x = [], []
for b in benchmark_covariates:
r2yxj_dx.append(sensitivity_statistics.group_partial_r2(model, covariates=b))
r2dxj_x.append(sensitivity_statistics.group_partial_r2(treatment_results, covariates=b))
# Group Benchmark
elif(type(benchmark_covariates) is dict):
r2yxj_dx, r2dxj_x = [], []
for b in benchmark_covariates:
r2yxj_dx.append(sensitivity_statistics.group_partial_r2(model, benchmark_covariates[b]))
r2dxj_x.append(sensitivity_statistics.group_partial_r2(treatment_results, benchmark_covariates[b]))
elif r2dxj_x is not None:
if np.isscalar(r2dxj_x):
r2dxj_x = [r2dxj_x]
if np.isscalar(r2yxj_dx):
r2yxj_dx = [r2yxj_dx]
bounds = pd.DataFrame()
for i in range(len(benchmark_covariates)):
r2dxj_x[i], r2yxj_dx[i] = sensitivity_statistics.check_r2(r2dxj_x[i], r2yxj_dx[i])
if type(kd) is list:
kd = np.array(kd)
if ky is None:
ky=kd
r2dz_x = kd * (r2dxj_x[i] / (1 - r2dxj_x[i]))
if (np.isscalar(r2dz_x) and r2dz_x >= 1) or (not np.isscalar(r2dz_x) and any(i >= 1 for i in r2dz_x)):
sys.exit("Implied bound on r2dz.x >= 1. Impossible kd value. Try a lower kd.")
r2zxj_xd = kd * (r2dxj_x[i] ** 2) / ((1 - kd * r2dxj_x[i]) * (1 - r2dxj_x[i]))
if (np.isscalar(r2zxj_xd) and r2zxj_xd >= 1) or (not np.isscalar(r2zxj_xd) and any(i >= 1 for i in r2zxj_xd)):
sys.exit("Impossible kd value. Try a lower kd.")
r2yz_dx = ((np.sqrt(ky) + np.sqrt(r2zxj_xd)) / np.sqrt(1 - r2zxj_xd)) ** 2 * (r2yxj_dx[i] / (1 - r2yxj_dx[i]))
if (np.isscalar(r2yz_dx) and r2yz_dx > 1) or (not np.isscalar(r2yz_dx) and any(i > 1 for i in r2yz_dx)):
print('Warning: Implied bound on r2yz.dx greater than 1, try lower kd and/or ky. Setting r2yz.dx to 1.')
r2yz_dx[r2yz_dx > 1] = 1
if(type(benchmark_covariates) is not dict):
if np.isscalar(kd):
bound_label = label_maker(benchmark_covariate=benchmark_covariates[i], kd=kd, ky=ky)
bounds = bounds.append({'bound_label': bound_label, 'r2dz_x': r2dz_x, 'r2yz_dx': r2yz_dx},
ignore_index=True)
else:
for j in range(len(kd)):
bound_label = label_maker(benchmark_covariate=benchmark_covariates[i], kd=kd[j], ky=ky[j])
bounds = bounds.append({'bound_label': bound_label, 'r2dz_x': r2dz_x[j], 'r2yz_dx': r2yz_dx[j]},
ignore_index=True)
else:
if np.isscalar(kd):
bound_label = label_maker(benchmark_covariate=list(benchmark_covariates)[i], kd=kd, ky=ky)
bounds = bounds.append({'bound_label': bound_label, 'r2dz_x': r2dz_x, 'r2yz_dx': r2yz_dx},
ignore_index=True)
else:
for j in range(len(kd)):
bound_label = label_maker(benchmark_covariate=list(benchmark_covariates)[i], kd=kd[j], ky=ky[j])
bounds = bounds.append({'bound_label': bound_label, 'r2dz_x': r2dz_x[j], 'r2yz_dx': r2yz_dx[j]},
ignore_index=True)
return bounds | a94aed31bd53caf0457b3aedf339572d2a56a8a1 | 21,107 |
def top_ngrams(df, n=2, ngrams=10):
"""
* Not generalizable in this form *
* This works well, but is very inefficient and should be optimized or rewritten *
Takes a preposcessed, tokenized column and create a large list.
Returns most frequent ngrams
Arguments:
df = name of DataFrame with no_hashtags column (this will be generalizable in a future commit)
n = number of words per grouping eg. 1, 2 or 3
ngrams = Number of ngrams to return
"""
word_list = preprocess(''.join(str(df['lemma'].tolist())))
return (pd.Series(nltk.ngrams(word_list, n)).value_counts())[:ngrams] | a6c540a30a288a8d26bf6f966b44b9f080db0026 | 21,108 |
def install_openvpn(instance, arg, verbose=True):
""" """
install(instance, {"module":"openvpn"}, verbose=True)
generate_dh_key(instance, {"dh_name":"openvpn", "key_size":"2048"})
server_conf = open("simulation/workstations/"+instance.name+"/server_openvpn.conf", "w")
server_conf.write("port 1197\n")
server_conf.write("proto udp\n")
server_conf.write("dev tun\n")
server_conf.write("ca /certs/"+arg["private_key_certificate_name"]+"/"+arg["private_key_middle_certificate_name"]+".cert\n")
server_conf.write("cert /certs/"+arg["private_key_certificate_name"]+"/"+arg["private_key_certificate_name"]+".cert\n")
server_conf.write("key /certs/"+arg["private_key_certificate_name"]+"/"+arg["private_key_certificate_name"]+".key\n")
server_conf.write("dh /certs/dh/openvpn-2048.key\n")
server_conf.write("server 10.122.0.0 255.255.255.0 \n")
server_conf.write("push \"10.122.1.0 255.255.255.0\"\n")
server_conf.write("keepalive \n")
server_conf.write("cipher AES-128-CBC \n")
server_conf.write("comp-lzo \n")
server_conf.write("max-clients "+arg["max_client"]+"\n")
if arg["user"] == "":
server_conf.write("user nobody\n")
else:
server_conf.write("user "+arg["user"]+"\n")
if arg["group"] == "":
server_conf.write("group nobody\n")
else:
server_conf.write("group "+arg["group"]+"\n")
server_conf.write("persist-key\n")
server_conf.write("persist-tun\n")
server_conf.write("status openvpn-status.log\n")
server_conf.write("log openvpn.log\n")
server_conf.write("verb 9\n")
server_conf.close()
if upload_file(instance, {"instance_path":"/etc/openvpn/server.conf", "host_manager_path": "simulation/workstations/"+instance.name+"/server_openvpn.conf"}, verbose=False) == 1:
return 1
if restart_service(instance, {"service":"openvpn"}) == 1:
return 1 | d95d99e7847dd08c43f54fc3dde769f69888da77 | 21,109 |
def rossoporn_parse(driver: webdriver.Firefox) -> tuple[list[str], int, str]:
"""Read the html for rossoporn.com"""
#Parses the html of the site
soup = soupify(driver)
dir_name = soup.find("div", class_="content_right").find("h1").text
dir_name = clean_dir_name(dir_name)
images = soup.find_all("div", class_="wrapper_g")
images = ["".join([PROTOCOL, img.get("src").replace("tn_", "")]) for tag_list in images for img in tag_list.find_all("img")]
num_files = len(images)
driver.quit()
return (images, num_files, dir_name) | 21aad0798bc3e13badb1076ec40c36c56f47ebf7 | 21,110 |
def pid_from_context(_, context, **kwargs):
"""Get PID from marshmallow context."""
pid = (context or {}).get('pid')
return pid.pid_value if pid else missing | 350fd4c915e186dd41575c5842e47beb7d055fb5 | 21,111 |
def score_text(text, tokenizer, preset_model, finetuned_model):
""" Uses rule-based rankings. Higher is better, but different features have different scales.
Args:
text (str/ List[str]): one story to rank.
tokenizer (Pytroch tokenizer): GPT2 Byte Tokenizer.
preset_model (Pytorch model): preset GPT2 model of the same/ different size of the finetuned model.
finetuned_model (Pytorch model): fine-tuned GPT2 model.
Returns a scores np.array of corresponding to text.
"""
assert isinstance(
text, (str, list)), f"score_text accepts type(text) = str/list, but got {type(text)}"
if isinstance(text, list):
text = ' '.join(text)
# Keep same order as in constants.FEATURES
scores = [0 for _ in range(len(constants.FEATURES))]
texts_sentences = split_to_sentences(text)
# scores[0] = _coherency(texts_sentences, lsa_embedder)
scores[1] = _readabilty(text, texts_sentences)
# Set of text words without punctuation and stop words.
filtered_words = list(filter(
lambda word: word not in constants.STOP_WORDS, split_words(text.lower().strip())))
filtered_words_set = set(filtered_words)
# Sentiment.
scores[2] = _sentiment_polarity(filtered_words)
# Set based measures.
scores[3], scores[4] = _simplicity(filtered_words_set), _diversity(
filtered_words, filtered_words_set)
# The bigger differene, the more tale-like, similar to the fine-tuned model, the text is.
scores[5] = KLDIV_error_per_text(
tokenizer, preset_model, finetuned_model, text)
# print(" | ".join(f'{key}: {score:.2f}' for key,
# score in zip(constants.FEATURES, scores)))
return np.array(scores) | e304975b55c44e78f6ce92f4df9d1ba563389b8b | 21,112 |
import sys
import subprocess
def get_bot_list(swarming_server, dimensions, dead_only):
"""Returns a list of swarming bots."""
cmd = [
sys.executable, 'swarming.py', 'bots',
'--swarming', swarming_server,
'--bare',
]
for k, v in sorted(dimensions.iteritems()):
cmd.extend(('--dimension', k, v))
if dead_only:
cmd.append('--dead-only')
return subprocess.check_output(cmd, cwd=ROOT_DIR).splitlines() | b34d8499ba94a0cf924b8dd446df6b797b39c35c | 21,113 |
import os
def running_on_kaggle() -> bool:
"""Detect if the current environment is running on Kaggle.
Returns:
bool:
True if the current environment is on Kaggle, False
otherwise.
"""
return os.environ.get("KAGGLE_KERNEL_RUN_TYPE") == "Interactive" | ac1432666ccc8ca8e9d1d73938c5a1212f4fc429 | 21,114 |
import subprocess
def _get_picture_from_attachments(path):
"""Get picture bytes from telegram server"""
url = 'https://api.telegram.org/file/bot' + API_TOKEN + '/' + path
pic_path = './photos/pic.jpg'
curl_command = f'curl {url} > {pic_path}'
data = subprocess.run(curl_command, shell=True)
if data.returncode != 0:
raise InvalidAttachments('Something evil has happened :c')
with open(pic_path, 'rb') as f:
data = f.read()
return data | d3ed1a38d8646cc506b70b4bcc4f8c597b99f5f4 | 21,115 |
def parse_cards(account_page_content):
"""
Parse card metadata and product balances from /ClipperCard/dashboard.jsf
"""
begin = account_page_content.index(b'<!--YOUR CLIPPER CARDS-->')
end = account_page_content.index(b'<!--END YOUR CLIPPER CARDS-->')
card_soup = bs4.BeautifulSoup(account_page_content[begin:end], "html.parser")
serial_numbers = find_values(card_soup, 'Serial Number:', get_next_sibling_text)
nicknames = find_values(card_soup, 'Card Nickname:', get_inner_display_text)
types = find_values(card_soup, 'Type:', get_next_sibling_text)
statuses = find_values(card_soup, 'Status:', get_next_sibling_text)
products = parse_card_products(card_soup)
cards = []
for sn, nn, tp, st, pd in zip(serial_numbers, nicknames, types, statuses, products):
cards.append(Card(serial_number=sn, nickname=nn, type=tp, status=st, products=pd))
return cards | 6ec10941aebe88af27a75c407e6805698d5cf31c | 21,116 |
def interaction_time_data_string(logs, title):
"""
times = utils.valid_values_for_enum((models.LogEntry.TIME_CHOICES))
contexts_map = dict(models.LogEntry.TIME_CHOICES)
counts = {contexts_map[k]: v
for k, v in _counts_by_getter(logs, lambda l: l.time_of_day).items()
}
plt.clf()
xs = list(range(len(times)))
ys = [counts.get(cont, 0) for cont in times]
plt.bar(xs, ys)
plt.xticks(xs, times)
plt.title(title)
plt.xlabel("Social Context")
plt.ylabel('Num Interactions')
plt.gca().xaxis.grid(False)
plt.tight_layout()
return pyplot_to_base64()
"""
contexts = utils.valid_values_for_enum((models.LogEntry.SOCIAL_CHOICES))
contexts_map = dict(models.LogEntry.SOCIAL_CHOICES)
reacc_map = dict(models.LogEntry.REACTION_CHOICES)
interaction_map = dict(models.LogEntry.MEDIUM_CHOICES)
time_map = dict(models.LogEntry.TIME_CHOICES)
first_agg = recommender.group_list_by_sel(logs, lambda l: interaction_map[l.interaction_medium])
plt.clf()
keys = sorted(first_agg.keys())
sub_keys = sorted(list(time_map.keys()))
xs = np.arange(len(sub_keys)) * 2
width = .35
colors = np.array([
[205,224,241],
[190,26,9],
[0,105,253],
[255,114,0],
]) / 255.0
for i, reacc in enumerate( keys ):
sub_logs = first_agg[reacc]
counts = _counts_by_getter(sub_logs, lambda l: l.time_of_day)
ys = [counts.get(cont, 0) for cont in sub_keys]
plt.bar(xs + i * width, ys, width, label=reacc, color=colors[i])
ax = plt.gca()
ax.set_xticks(xs + width * (len(keys) // 2))
ax.set_xticklabels([time_map[k] for k in sub_keys])
plt.title(title)
plt.xlabel("Social Context")
plt.ylabel('Num Interactions')
plt.legend()
ax.xaxis.grid(False)
plt.tight_layout()
return pyplot_to_base64() | fc6f6a32d39f3bd87c3b7b816e333aef462fb0f3 | 21,117 |
import math
def _label_boost(boost_form, label):
"""Returns the label boost.
Args:
boost_form: Either NDCG or PRECISION.
label: The example label.
Returns:
A list of per list weight.
"""
boost = {
'NDCG': math.pow(2.0, label) - 1.0,
'PRECISION': 1.0 if label >= 1.0 else 0.0,
}
return boost[boost_form] | 811e87949b0bbe7dc98f63814b343ffd90fe129a | 21,118 |
def has_matching_ts_templates(reactant, bond_rearr):
"""
See if there are any templates suitable to get a TS guess from a template
Arguments:
reactant (autode.complex.ReactantComplex):
bond_rearr (autode.bond_rearrangement.BondRearrangement):
Returns:
bool:
"""
mol_graph = get_truncated_active_mol_graph(graph=reactant.graph,
active_bonds=bond_rearr.all)
ts_guess_templates = get_ts_templates()
for ts_template in ts_guess_templates:
if template_matches(reactant=reactant, ts_template=ts_template,
truncated_graph=mol_graph):
return True
return False | 10061734d2831668099f3e85d99366dda9f51157 | 21,119 |
def get_commands(xml: objectify.ObjectifiedElement):
"""
Returns an action and the room from the xml string.
:param xml:
:return:
"""
return xml.body.attrib["action"] | 3724e00c626814e792911ae094a5b200d8593f4c | 21,120 |
def compression_point(w_db, slope = 1, compression = 1,
extrapolation_point = None, axis = -1):
"""Return input referred compression point"""
interpol_line = calc_extrapolation_line(w_db, slope, extrapolation_point,
axis)
return cross(interpol_line - w_db, compression) | 4c8793c5796d1359aa1fc00f226ecafda98c3f61 | 21,121 |
from typing import List
import logging
def pattern_remove_incomplete_region_or_spatial_path(
perception_graph: PerceptionGraphPattern
) -> PerceptionGraphPattern:
"""
Helper function to return a `PerceptionGraphPattern` verifying
that region and spatial path perceptions contain a reference object.
"""
graph = perception_graph.copy_as_digraph()
region_and_path_nodes: ImmutableSet[NodePredicate] = immutableset(
node
for node in graph.nodes
if isinstance(node, IsPathPredicate) or isinstance(node, RegionPredicate)
)
nodes_without_reference: List[NodePredicate] = []
for node in region_and_path_nodes:
has_reference_edge: bool = False
for successor in graph.successors(node):
predicate = graph.edges[node, successor]["predicate"]
if isinstance(predicate, RelationTypeIsPredicate):
if predicate.relation_type in [
REFERENCE_OBJECT_LABEL,
REFERENCE_OBJECT_DESTINATION_LABEL,
REFERENCE_OBJECT_SOURCE_LABEL,
]:
has_reference_edge = True
break
if not has_reference_edge:
nodes_without_reference.append(node)
logging.info(
f"Removing incomplete regions and paths. "
f"Removing nodes: {nodes_without_reference}"
)
graph.remove_nodes_from(nodes_without_reference)
def sort_by_num_nodes(g: DiGraph) -> int:
return len(g.nodes)
# We should maybe consider doing this a different way
# As this approach just brute force solves the problem rather than being methodical about it
if number_weakly_connected_components(graph) > 1:
components = [
component
for component in [
subgraph(graph, comp) for comp in weakly_connected_components(graph)
]
]
components.sort(key=sort_by_num_nodes, reverse=True)
computed_graph = subgraph(graph, components[0].nodes)
removed_nodes: List[NodePredicate] = []
for i in range(1, len(components)):
removed_nodes.extend(components[i].nodes)
logging.info(f"Cleanup disconnected elements. Removing: {removed_nodes}")
else:
computed_graph = graph
return PerceptionGraphPattern(computed_graph, dynamic=perception_graph.dynamic) | cbcc79602bf87e1ea88f8a0027d6cd19b74fb81c | 21,122 |
def other_shifted_bottleneck_distance(A, B, fudge=default_fudge, analysis=False):
"""Compute the shifted bottleneck distance between two diagrams, A and B (multisets)"""
A = pu.SaneCounter(A)
B = pu.SaneCounter(B)
if not A and not B:
return 0
radius = fudge(upper_bound_on_radius(A, B))
events = event_queue.EventQueue(A, B)
matching = GeometricBipartiteMatching(A, B)
# these counters are for performance monitoring only - they don't affect the logic
ctr, R_ctr, L_ctr, fail_ctr, win_ctr = 0, 0, 0, 0, 0
while events and radius > epsilon:
ctr += 1
event = events.next_event(radius)
if isinstance(event, event_queue.ExitEvent):
R_ctr += 1
matching.remove_all(event.edge)
else:
L_ctr += 1
if birth(event.edge, radius) >= death(event.edge, radius):
win_ctr += 1
continue # relies on ties being broken with the highest-radius edge
# assert not matching.diagonal_perfect()
if matching.diagonal_perfect():
fail_ctr += 1
radius = fudge(max(
events.next_diagonal_height(),
radius - (events.next_exit_shift(radius)
- birth(event.edge, radius)) / 2))
events.push(event)
continue
matching.maximize_matching(
shift=event.shift_to_check,
radius=radius)
if matching.diagonal_perfect():
# radius = fudge(matching.value())
events.push(event)
if analysis:
print("other:", len(A) + len(B), "total", ctr, "R", R_ctr, "L", L_ctr, "fail", fail_ctr, "win", win_ctr)
return radius | 51455945743bfc5f262711e826d1097122309f83 | 21,123 |
def getCountdown(c):
"""
Parse into a Friendly Readable format for Humans
"""
days = c.days
c = c.total_seconds()
hours = round(c//3600)
minutes = round(c // 60 - hours * 60)
seconds = round(c - hours * 3600 - minutes * 60)
return days, hours, minutes, seconds | f49225ae2680192340720c8958aa19b9e9369f5f | 21,124 |
def fromPSK(valstr):
"""A special version of fromStr that assumes the user is trying to set a PSK.
In that case we also allow "none", "default" or "random" (to have python generate one), or simpleN
"""
if valstr == "random":
return genPSK256()
elif valstr == "none":
return bytes([0]) # Use the 'no encryption' PSK
elif valstr == "default":
return bytes([1]) # Use default channel psk
elif valstr.startswith("simple"):
# Use one of the single byte encodings
return bytes([int(valstr[6:]) + 1])
else:
return fromStr(valstr) | 73fa661458601ec33d2b839aeea060f7a26b530f | 21,125 |
import os
def get_subpackages(name):
"""Return subpackages of package *name*"""
splist = []
for dirpath, _dirnames, _filenames in os.walk(name):
if osp.isfile(osp.join(dirpath, "__init__.py")):
splist.append(".".join(dirpath.split(os.sep)))
return splist | e18de0b0e76841f89b95bef30d2ba473422902ec | 21,126 |
def list_hierarchy(class_name, bases):
"""
Creates a list of the class hierarchy
Args:
-----
class_name: name of the current class
bases: list/tuple of bases for the current class
"""
class_list = [Uri(class_name)]
for base in bases:
if base.__name__ not in IGNORE_CLASSES:
class_list.append(Uri(base.__name__))
return list([i for i in set(class_list)]) | 1b82dfe6576a472c04bb7cb53f8eed94a83a1ac1 | 21,127 |
def move_mouse_to_specific_location(x_coordinate, y_coordinate):
"""Moves the mouse to a specific point"""
LOGGER.debug("Moving mouse to (%d,%d)", x_coordinate, y_coordinate)
pyautogui.moveTo(x_coordinate, y_coordinate)
return Promise.resolve((x_coordinate, y_coordinate)) | eae5f50486bd1f2d127a2d94f87be586e697abcd | 21,128 |
def rss():
"""Return ps -o rss (resident) memory in kB."""
return float(mem("rss")) / 1024 | 92580a4873f2afca3f419a7f661e5cd39ec28b96 | 21,129 |
def compare_words(
word1_features,
word2_features,
count=10,
exclude=set(),
similarity_degree=0.5,
separate=False,
min_feature_value=0.3
):
"""
Сравнение двух слов на основе списка похожих (или вообще каких-либо фич слова).
Возвращает 3 списка: характерные для первого слова, второго и общие
:param dict[int, float] word1_features: фичи первого слова: словарь {feature: value}
:param dict[int, float] word2_features: фичи второго слова: словарь {feature: value}
:param in count: число слов в результах
:param float similarity_degree: число 0..1. 1 — полное разделение слов, 0 — максимальный поиск сходства
:param bool separate: «срогое разделение» — запрет попадания одного слова в несколько колонок
:param float min_feature_value: минимальное значение
"""
diff1, diff2, common = {}, {}, {} # Характерное для первого слова, для второго и общее
features = set(word1_features.keys()).union(word2_features.keys())
for feature in features:
if feature in exclude:
continue
feature1 = word1_features.get(feature, 0)
feature2 = word2_features.get(feature, 0)
if feature1 < min_feature_value and feature2 < min_feature_value:
continue
diff1_value = feature1 * (1 - feature2)
diff2_value = feature2 * (1 - feature1)
common_value = (feature1 * feature2) ** similarity_degree
max_value = max(diff1_value, diff2_value, common_value)
if diff1_value == max_value or not separate:
diff1[feature] = diff1_value
if diff2_value == max_value or not separate:
diff2[feature] = diff2_value
if common_value == max_value or not separate:
common[feature] = common_value
return (
sorted(diff1.items(), key=itemgetter(1), reverse=True)[:count],
sorted(diff2.items(), key=itemgetter(1), reverse=True)[:count],
sorted(common.items(), key=itemgetter(1), reverse=True)[:count],
) | 4a04292e48911e6a4152cb03c19cda8de51802fb | 21,130 |
def dispatch_for_binary_elementwise_apis(x_type, y_type):
"""Decorator to override default implementation for binary elementwise APIs.
The decorated function (known as the "elementwise api handler") overrides
the default implementation for any binary elementwise API whenever the value
for the first two arguments (typically named `x` and `y`) match the specified
type annotations. The elementwise api handler is called with two arguments:
`elementwise_api_handler(api_func, x, y)`
Where `x` and `y` are the first two arguments to the elementwise api, and
`api_func` is a TensorFlow function that takes two parameters and performs the
elementwise operation (e.g., `tf.add`).
The following example shows how this decorator can be used to update all
binary elementwise operations to handle a `MaskedTensor` type:
>>> class MaskedTensor(tf.experimental.ExtensionType):
... values: tf.Tensor
... mask: tf.Tensor
>>> @dispatch_for_binary_elementwise_apis(MaskedTensor, MaskedTensor)
... def binary_elementwise_api_handler(api_func, x, y):
... return MaskedTensor(api_func(x.values, y.values), x.mask & y.mask)
>>> a = MaskedTensor([1, 2, 3, 4, 5], [True, True, True, True, False])
>>> b = MaskedTensor([2, 4, 6, 8, 0], [True, True, True, False, True])
>>> c = tf.add(a, b)
>>> print(f"values={c.values.numpy()}, mask={c.mask.numpy()}")
values=[ 3 6 9 12 5], mask=[ True True True False False]
Args:
x_type: A type annotation indicating when the api handler should be called.
y_type: A type annotation indicating when the api handler should be called.
Returns:
A decorator.
#### Registered APIs
The binary elementwise APIs are:
<<API_LIST>>
"""
def decorator(handler):
if (x_type, y_type) in _ELEMENTWISE_API_HANDLERS:
raise ValueError("A binary elementwise dispatch handler "
f"({_ELEMENTWISE_API_HANDLERS[x_type, y_type]}) "
f"has already been registered for ({x_type}, {y_type}).")
_ELEMENTWISE_API_HANDLERS[x_type, y_type] = handler
for api in _BINARY_ELEMENTWISE_APIS:
_add_dispatch_for_binary_elementwise_api(api, x_type, y_type, handler)
return handler
return decorator | 743d6f85b843f6200cf8b6c6361fc81154c37936 | 21,131 |
def grid(mat, i, j, k):
"""Returns true if the specified grid contains k"""
return lookup(k, [ mat[i + p][j + q] for p in range(3) for q in range(3) ]) | b2df3a905ada922011fc344f555a908aa03d5f64 | 21,132 |
def get_list_channels(sc):
"""Get list of channels."""
# https://api.slack.com/methods/channels.list
response = sc.api_call(
"channels.list",
)
return response['channels'] | d31271bcc065b4a212e298c6283c4d658e5547da | 21,133 |
def error_handler(error):
"""エラーメッセージを生成するハンドラ"""
response = jsonify({ 'cause': error.description['cause'] })
return response, error.code | 282b1a11d8e7326be1fa2d0b1b2457dc5d5d5ca1 | 21,134 |
def search_records(
name: str,
search: TextClassificationSearchRequest = None,
common_params: CommonTaskQueryParams = Depends(),
include_metrics: bool = Query(
False, description="If enabled, return related record metrics"
),
pagination: PaginationParams = Depends(),
service: TextClassificationService = Depends(
TextClassificationService.get_instance
),
datasets: DatasetsService = Depends(DatasetsService.get_instance),
current_user: User = Security(auth.get_user, scopes=[]),
) -> TextClassificationSearchResults:
"""
Searches data from dataset
Parameters
----------
name:
The dataset name
search:
The search query request
common_params:
Common query params
include_metrics:
Flag to enable include metrics
pagination:
The pagination params
service:
The dataset records service
datasets:
The dataset service
current_user:
The current request user
Returns
-------
The search results data
"""
search = search or TextClassificationSearchRequest()
query = search.query or TextClassificationQuery()
dataset = datasets.find_by_name(
user=current_user, name=name, task=TASK_TYPE, workspace=common_params.workspace
)
result = service.search(
dataset=dataset,
query=query,
sort_by=search.sort,
record_from=pagination.from_,
size=pagination.limit,
exclude_metrics=not include_metrics,
)
return result | 7dd932131f5fda1680fd419697df9c0a04d19fa5 | 21,135 |
def guess_udic(dic,data):
"""
Guess parameters of universal dictionary from dic, data pair.
Parameters
----------
dic : dict
Dictionary of JCAMP-DX, acqu, proc and spectrum parameters.
data : ndarray
Array of NMR data.
Returns
-------
udic : dict
Universal dictionary of spectral parameters.
"""
# Create an empty universal dictionary
udic = fileiobase.create_blank_udic(1)
# Update defalt parameters, first acqu.par parameters in dic are tried, then JCAMP-DX header parameters
# size
if data is not None:
udic[0]["size"] = len(data)
else:
warn('No data, cannot set udic size')
# sw
try:
udic[0]['sw'] = float(dic['acqu']['bandwidth']) * 1000
except KeyError:
try:
udic[0]['sw'] = float(dic['dx']['$SW'][0]) * float(dic['dx']['$BF1'][0])
except KeyError:
try:
if dic["spectrum"]["freqdata"]:
udic[0]['sw'] = dic["spectrum"]["xaxis"][-1] - dic["spectrum"]["xaxis"][0]
elif data is not None:
udic[0]['sw'] = len(data) / dic["spectrum"]["xaxis"][-1]
else:
warn("Cannot set spectral width - set manually using: 'udic[0]['sw'] = x' where x is the spectral width in Hz")
except KeyError:
warn("Cannot set spectral width - set manually using: 'udic[0]['sw'] = x' where x is the spectral width in Hz")
# obs
try:
udic[0]['obs'] = float(dic['acqu']['b1Freq'])
except KeyError:
try:
udic[0]['obs'] = float(dic['dx']['$BF1'][0])
except KeyError:
warn("Cannot set observe frequency - set manually using: 'udic[0]['obs'] = x' where x is magnetic field in MHz")
# car
try:
udic[0]['car'] = float(dic['acqu']['lowestFrequency']) + (float(dic['acqu']['bandwidth']) * 1000 / 2)
except KeyError:
try:
udic[0]['car'] = (float(dic['dx']['$REFERENCEPOINT'][0]) * -1 ) + (float(dic['dx']['$SW'][0]) * udic[0]['obs'] / 2)
except KeyError:
try:
udic[0]['car'] = (float(dic['dx']['$BF1'][0]) - float(dic['dx']['$SF'][0])) * 1000000
except KeyError:
warn("Cannot set carrier - try: 'udic[0]['car'] = x * udic[0]['obs']' where x is the center of the spectrum in ppm")
# label
try:
udic[0]['label'] = dic['acqu']['rxChannel']
except KeyError:
try:
label_value = dic['dx'][".OBSERVENUCLEUS"][0].replace("^", "")
udic[0]["label"] = label_value
except KeyError:
warn("Cannot set observed nucleus label")
#keys left to default
# udic[0]['complex']
# udic[0]['encoding']
# udic[0]['time'] = True
# udic[0]['freq'] = False
return udic | a8d79255b34f407ea54766ec2e4aedaf2ae42df9 | 21,136 |
def matchPosAny (msg, pos, rules, subrules):
"""Indicates whether or not `msg` matches any (i.e. a single) `subrule`
in `rules`, starting at position `pos`.
Returns the position in `msg` just after a successful match, or -1
if no match was found.
"""
index = -1
for rule in subrules:
if (index := matchPos(msg, pos, rules, rule)) != -1:
break
return index | 6ad053cdb61d7cc917e3acb896ea5d23cc042de9 | 21,137 |
def compute_accuracy(model, loader):
"""
:param model: a model which returns classifier_output and segmentator_output
:param loader: data loader
"""
model.eval() # enter evaluation mode
score_accum = 0
count = 0
for x, y, _, _ in loader:
classifier_output, _ = model(x)
score_accum += accuracy(classifier_output.data.cpu().numpy(), y.data.cpu().numpy()) * y.shape[0]
count += y.shape[0]
return float(score_accum / count) | ecc86c3c9c2429843bdd25023b3c6f0393c83db9 | 21,138 |
from operator import and_
def create_base_query_grouped_fifo(rse_id, filter_by_rse='destination', session=None):
"""
Build the sqlalchemy queries to filter relevant requests and to group them in datasets.
Group requests either by same destination RSE or source RSE.
:param rse_id: The RSE id.
:param filter_by_rse: Decide whether to filter by transfer destination or source RSE (`destination`, `source`).
:param session: The database session.
"""
# query DIDs that are attached to a collection and add a column indicating the order of attachment in case of mulitple attachments
attachment_order_subquery = session.query(models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.scope,
func.row_number().over(partition_by=(models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_scope),
order_by=models.DataIdentifierAssociation.created_at).label('order_of_attachment'))\
.subquery()
# query transfer requests and join with according datasets
filtered_requests_subquery = None
grouped_requests_subquery = None
dialect = session.bind.dialect.name
if dialect == 'mysql' or dialect == 'sqlite':
filtered_requests_subquery = session.query(models.Request.id.label('id'),
func.ifnull(attachment_order_subquery.c.name, models.Request.name).label('dataset_name'),
func.ifnull(attachment_order_subquery.c.scope, models.Request.scope).label('dataset_scope'))
combined_attached_unattached_requests = session.query(func.ifnull(attachment_order_subquery.c.scope, models.Request.scope).label('scope'),
func.ifnull(attachment_order_subquery.c.name, models.Request.name).label('name'),
models.Request.bytes,
models.Request.requested_at)
elif dialect == 'postgresql':
filtered_requests_subquery = session.query(models.Request.id.label('id'),
func.coalesce(attachment_order_subquery.c.name, models.Request.name).label('dataset_name'),
func.coalesce(attachment_order_subquery.c.scope, models.Request.scope).label('dataset_scope'))
combined_attached_unattached_requests = session.query(func.coalesce(attachment_order_subquery.c.scope, models.Request.scope).label('scope'),
func.coalesce(attachment_order_subquery.c.name, models.Request.name).label('name'),
models.Request.bytes,
models.Request.requested_at)
elif dialect == 'oracle':
filtered_requests_subquery = session.query(models.Request.id.label('id'),
func.nvl(attachment_order_subquery.c.name, models.Request.name).label('dataset_name'),
func.nvl(attachment_order_subquery.c.scope, models.Request.scope).label('dataset_scope'))
combined_attached_unattached_requests = session.query(func.nvl(attachment_order_subquery.c.scope, models.Request.scope).label('scope'),
func.nvl(attachment_order_subquery.c.name, models.Request.name).label('name'),
models.Request.bytes,
models.Request.requested_at)
filtered_requests_subquery = filtered_requests_subquery.join(attachment_order_subquery, and_(models.Request.name == attachment_order_subquery.c.child_name,
models.Request.scope == attachment_order_subquery.c.child_scope,
attachment_order_subquery.c.order_of_attachment == 1), isouter=True)
combined_attached_unattached_requests = combined_attached_unattached_requests.join(attachment_order_subquery, and_(models.Request.name == attachment_order_subquery.c.child_name,
models.Request.scope == attachment_order_subquery.c.child_scope,
attachment_order_subquery.c.order_of_attachment == 1), isouter=True)
# depending if throttler is used for reading or writing
if filter_by_rse == 'source':
filtered_requests_subquery = filtered_requests_subquery.filter(models.Request.source_rse_id == rse_id)
combined_attached_unattached_requests = combined_attached_unattached_requests.filter(models.Request.source_rse_id == rse_id)
elif filter_by_rse == 'destination':
filtered_requests_subquery = filtered_requests_subquery.filter(models.Request.dest_rse_id == rse_id)
combined_attached_unattached_requests = combined_attached_unattached_requests.filter(models.Request.dest_rse_id == rse_id)
filtered_requests_subquery = filtered_requests_subquery.filter(models.Request.state == RequestState.WAITING).subquery()
combined_attached_unattached_requests = combined_attached_unattached_requests.filter(models.Request.state == RequestState.WAITING).subquery()
# group requests and calculate properties like oldest requested_at, amount of children, volume
grouped_requests_subquery = session.query(func.sum(combined_attached_unattached_requests.c.bytes).label('volume'),
func.min(combined_attached_unattached_requests.c.requested_at).label('oldest_requested_at'),
func.count().label('amount_childs'),
combined_attached_unattached_requests.c.name,
combined_attached_unattached_requests.c.scope)\
.group_by(combined_attached_unattached_requests.c.scope, combined_attached_unattached_requests.c.name)\
.subquery()
return grouped_requests_subquery, filtered_requests_subquery | e4399a447e767610c7451f61ad543553168de1d6 | 21,139 |
def then(state1, state2):
"""
Like ``bind``, but instead of a function that returns a statetful action,
just bind a new stateful action.
Equivalent to bind(state1, lambda _: state2)
"""
return bind(state1, lambda _: state2) | ef6200f8776b84a5a9893b894b3d7cd406598f7d | 21,140 |
from typing import Optional
from typing import Dict
from typing import Any
def get_skyregions_collection(run_id: Optional[int]=None) -> Dict[str, Any]:
"""
Produce Sky region geometry shapes JSON object for d3-celestial.
Args:
run_id (int, optional): Run ID to filter on if not None.
Returns:
skyregions_collection (dict): Dictionary representing a JSON obejct
containing the sky regions.
"""
skyregions = SkyRegion.objects.all()
if run_id is not None:
skyregions = skyregions.filter(run=run_id)
features = []
for skr in skyregions:
ra_fix = 360. if skr.centre_ra > 180. else 0.
ra = skr.centre_ra - ra_fix
dec = skr.centre_dec
width_ra = skr.width_ra / 2.
width_dec = skr.width_dec / 2.
id = skr.id
features.append(
{
"type": "Feature",
"id": f"SkyRegion{id}",
"properties": {
"n": f"{id:02d}",
"loc": [ra, dec]
},
"geometry": {
"type": "MultiLineString",
"coordinates": [[
[ra+width_ra, dec+width_dec],
[ra+width_ra, dec-width_dec],
[ra-width_ra, dec-width_dec],
[ra-width_ra, dec+width_dec],
[ra+width_ra, dec+width_dec]
]]
}
}
)
skyregions_collection = {
"type": "FeatureCollection",
"features" : features
}
return skyregions_collection | 8d8fe2e46a9d37e774dbdab506f012a0560796e1 | 21,141 |
def construct_sru_query(keyword, keyword_type=None, mat_type=None, cat_source=None):
"""
Creates readable SRU/CQL query, does not encode white spaces or parenthesis -
this is handled by the session obj.
"""
query_elems = []
if keyword is None:
raise TypeError("query argument cannot be None.")
if keyword_type is None:
# take as straight sru query and pass to sru_query method
query_elems.append(keyword.strip())
elif keyword_type == "ISBN":
query_elems.append('srw.bn = "{}"'.format(keyword))
elif keyword_type == "UPC":
query_elems.append('srw.sn = "{}"'.format(keyword))
elif keyword_type == "ISSN":
query_elems.append('srw.in = "{}"'.format(keyword))
elif keyword_type == "OCLC #":
query_elems.append('srw.no = "{}"'.format(keyword))
elif keyword_type == "LCCN":
query_elems.append('srw.dn = "{}"'.format(keyword))
if mat_type is None or mat_type == "any":
pass
elif mat_type == "print":
query_elems.append('srw.mt = "bks"')
elif mat_type == "large print":
query_elems.append('srw.mt = "lpt"')
elif mat_type == "dvd":
query_elems.append('srw.mt = "dvv"')
elif mat_type == "bluray":
query_elems.append('srw.mt = "bta"')
if cat_source is None or cat_source == "any":
pass
elif cat_source == "DLC":
query_elems.append('srw.pc = "dlc"')
return " AND ".join(query_elems) | fbe28156beca73339fa88d200777e25172796864 | 21,142 |
def sitemap_xml():
"""Default Sitemap XML"""
show_years = retrieve_show_years(reverse_order=False)
sitemap = render_template("sitemaps/sitemap.xml",
show_years=show_years)
return Response(sitemap, mimetype="text/xml") | e6be9c98d1a1cd4bbfb04e9ad9676cc4b8521d79 | 21,143 |
def remove_property(product_id, property_id):
"""
Remove the property
"""
property = db.db.session.query(TypeProperty).\
filter_by(product_id = product_id, product_property_id = property_id).first()
try:
db.db.session.delete(property)
db.db.session.commit()
except Exception as e:
db.db.session.rollback()
print(e)
raise TypePropertyException("Could not remove type property")
tmp = property.to_dict()
return(json_util.dumps(tmp)) | cca8822d5b7de3ca8ee9b451fe21d4ddeb20e736 | 21,144 |
def format_solution_table_calc(solution, node_ids_to_nodes):
"""
:type solution: dict[int, list[int]]
:type node_ids_to_nodes: dict[int, int]
:rtype: dict[int, str]
"""
new_solution = {}
for (color, path) in solution.items():
new_path = []
for p in path:
back_p = node_ids_to_nodes[p]
new_p = "{0}{1}".format(
chr(back_p % width + ord("A")),
chr(back_p // width + ord("0")),
)
new_path.append(new_p)
new_solution[color] = " ".join(new_path)
return new_solution | 335bc0e90860a9181d5b819a5bb9e44cd44f750d | 21,145 |
import functools
def hashable(func):
"""Decorator for functions with numpy arrays as input arguments that will benefit from caching
Example:
from midgard.math import nputil
from functools import lru_cache
@nputil.hashable
@lru_cache()
def test_func(a: np.ndarray, b: np.ndarray = None)
do_something
return something
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
new_args_list = list()
for arg in args:
if isinstance(arg, np.ndarray):
arg = HashArray(arg)
new_args_list.append(arg)
for k, v in kwargs.items():
if isinstance(v, np.ndarray):
kwargs[k] = HashArray(v)
return func(*new_args_list, **kwargs)
return wrapper | db23cc12f9a322aaae6a585068a5c30194d7be7b | 21,146 |
import argparse
def parse_args():
"""
Parse input arguments. Helps with the command line argument input.
"""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
default='VGGnet_test')
parser.add_argument('--model', dest='model', help='Model path',
default=' ')
args = parser.parse_args()
return args | b52492a92b59b22443b0c3f0ec388d0c9f184b8f | 21,147 |
def object_hash(fd, fmt, repo=None):
""" Function to read the content of a open file, create appropiate object
and write the object to vcs directory and return the hash of the file"""
data = fd.read()
# choosing constructor on the basis of the object type found in header
if fmt == b'commit' : obj = vcsCommit(repo, data)
elif fmt == b'tree' : obj = vcsTree(repo, data)
elif fmt == b'tag' : obj = vcsTag(repo, data)
elif fmt == b'blob' : obj = vcsBlob(repo, data)
else:
raise Exception('Unknown type %s!' % fmt)
return object_write(obj, repo) | b55a4da36934843c111e4d66dc552c556c8d0ba4 | 21,148 |
import io
def read_from_pdf(pdf_file):
"""
读取PDF文件内容,并做处理
:param pdf_file: PDF 文件
:return: pdf文件内容
"""
# 二进制读取pdf文件内的内容
with open(pdf_file, 'rb') as file:
resource_manage = PDFResourceManager()
return_str = io.StringIO()
lap_params = LAParams()
# 内容转换
device = TextConverter(
resource_manage,
return_str,
laparams = lap_params
)
process_pdf(resource_manage, device, file)
device.close()
# 获取转换后的pdf文件内容
pdf_content = return_str.getvalue()
print(pdf_content)
return pdf_content | 140d4545f952983017d175303397c494457b4628 | 21,149 |
def _descending(dbus_object):
"""
Verify levels of variant values always descend by one.
:param object dbus_object: a dbus object
:returns: None if there was a failure of the property, otherwise the level
:rtype: int or NoneType
None is a better choice than False, for 0, a valid variant level, is always
interpreted as False.
"""
# pylint: disable=too-many-return-statements
if isinstance(dbus_object, dbus.Dictionary):
key_levels = [_descending(x) for x in dbus_object.keys()]
value_levels = [_descending(x) for x in dbus_object.values()]
if any(k is None for k in key_levels) or \
any(v is None for v in value_levels):
return None
max_key_level = max(key_levels) if key_levels != [] else 0
max_value_level = max(value_levels) if value_levels != [] else 0
max_level = max(max_key_level, max_value_level)
variant_level = dbus_object.variant_level
if variant_level == 0:
return max_level
if variant_level != max_level + 1:
return None
else:
return variant_level
elif isinstance(dbus_object, (dbus.Array, dbus.Struct)):
levels = [_descending(x) for x in dbus_object]
if any(l is None for l in levels):
return None
max_level = max(levels) if levels != [] else 0
variant_level = dbus_object.variant_level
if variant_level == 0:
return max_level
if variant_level != max_level + 1:
return None
else:
return variant_level
else:
variant_level = dbus_object.variant_level
return variant_level if variant_level in (0, 1) else None | 55de473807c22c50d8f65597cde390a56dcb9cd6 | 21,150 |
def _is_avconv():
"""
Returns `True` if the `ffmpeg` binary is really `avconv`.
"""
out = _run_command(['ffmpeg', '-version'])
return out and isinstance(out, strtype) and 'DEPRECATED' in out | dc9003623b4497b75d37f4e759f31401ad6261e1 | 21,151 |
def countries(request):
"""
Returns all valid countries and their country codes
"""
return JsonResponse({
"countries": [{
"id": unicode(code),
"name": unicode(name)
} for code, name in list(django_countries.countries)]
}) | 20296279dea898741950715a41b4188f7f5e6724 | 21,152 |
import os
def get_reads(file, reads=None):
"""
Get the read counts from the file
"""
if not reads:
reads={}
# get the sample name from the file
sample=os.path.basename(file).split(".")[0]
reads[sample]={}
with open(file) as file_handle:
for line in file_handle:
if READ_COUNT_IDENTIFIER in line:
count, type = get_read_count_type(line)
reads[sample][type]=count
return reads | 6951e5d401743c6a10e13d0d5bd0e28497a7ea5a | 21,153 |
def generate_monomer(species, monomerdict, initlen, initnames, tbobs):
"""
generate a PySB monomer based on species
:param species: a Species object
:param monomerdict: a dictionary with all monomers linked to their species id
:param initlen: number of the initial species
:param initnames: names of the initial species
:return: monomer, observable, and updated monomerdict
"""
if species.id <= initlen:
name = initnames[species.id - 1]
else:
name = 'sp_' + str(species.id)
sites = ['init']
m = Monomer(name, sites)
monomerdict[species.id] = m
if species.id in tbobs:
Observable('obs' + name, m(init=None))
return monomerdict | 2966473ef084991d0c589c16a8479f6395702b43 | 21,154 |
import logging
import tqdm
def convert_bert_tokens(outputs):
"""
Converts BERT tokens into a readable format for the parser, i.e. using Penn Treebank tokenization scheme.
Does the heavy lifting for this script.
"""
logging.info("Adjusting BERT indices to align with Penn Treebank.")
mapped_outputs = [] # Will hold the final results: sentences and mapped span indices
for output in tqdm(outputs):
comb_text = [word for sentence in output['sentences'] for word in sentence]
sentence_start_idx = 0
sent_so_far = []
word_so_far = []
sentence_map = output['sentence_map']
subtoken_map = output['subtoken_map']
clusters = output['clusters']
# preds = output['predicted_clusters']
# top_mentions = output['top_spans']
for i, subword in enumerate(comb_text):
if i != 0 and sentence_map[i - 1] != sentence_map[i]: # New sentence
sent_so_far.append(convert_bert_word(''.join(word_so_far)))
word_so_far = []
mapped_outputs.append({'doc_key': output['doc_key'],
'num_speakers': num_speakers(output['speakers']),
'words': sent_so_far,
'clusters': adjust_cluster_indices(clusters, subtoken_map, sentence_start_idx, i - 1)
# 'predicted_clusters': adjust_cluster_indices(preds, subtoken_map, sentence_start_idx, i - 1),
# 'top_mentions': adjust_top_mentions(top_mentions, subtoken_map, sentence_start_idx, i - 1)
})
sent_so_far = []
sentence_start_idx = i
elif i != 0 and subtoken_map[i - 1] != subtoken_map[i]: # New word
fullword = ''.join(word_so_far)
if fullword != '[SEP][CLS]': # Need this because sentences indices increment at SEP and CLS tokens
sent_so_far.append(convert_bert_word(fullword))
else:
sentence_start_idx += 2 # The sentence actually starts two tokens later due to [SEP] and [CLS]
word_so_far = []
word_so_far.append(subword)
return mapped_outputs | 27cef75fc48fb87e20f77af95e265406c6b8c520 | 21,155 |
from typing import Sequence
import pathlib
import logging
def load_proto_message(
config_path: AnyPath,
overrides: Sequence[str] = tuple(),
*,
msg_class=None,
extra_include_dirs: Sequence[pathlib.Path] = tuple(),
) -> ProtoMessage:
"""Loads message from the file and applies overrides.
If message type is not give, will try to guess message type.
All includes in the loaded messages (root message and includes) will be
recursively included.
Composition order:
* Create empty message of type msg_class
* Merge includes within the message in config_path
* Merge the content of confif_path
* Merge includes in overrides
* Merge scalars in overrides
Returns the message.
"""
config_path = pathlib.Path(config_path)
if msg_class is None:
msg_class = _guess_message_type(config_path)
elif hasattr(msg_class, "get_proto_class"):
msg_class = msg_class.get_proto_class()
def _resolve_mount(mount):
if msg_class is ROOT_CFG_CLASS:
# For convinience, top-level includes do not include name of task, i.e.,
# `lr=XXX` vs `train_sl.lr=XXX` where train_sl is a name of task. We
# manually add it.
return (_get_task_type(config_path) + "." + mount).strip(".")
else:
return mount
include_dirs = []
include_dirs.append(config_path.resolve().parent)
include_dirs.append(CONF_ROOT / "common")
include_dirs.extend(extra_include_dirs)
include_overides, scalar_overideds = _parse_overrides(overrides, include_dirs=include_dirs)
logging.debug(
"Constructing %s from %s with include overrides %s and scalar overrides %s",
msg_class.__name__,
config_path,
include_overides,
scalar_overideds,
)
msg = msg_class()
# Step 1: Populate message with includes.
default_includes = _get_config_includes(config_path, msg_class)
logging.debug("%s defaults %s", msg_class, default_includes)
for mount, include_msg_path in default_includes.items():
_apply_include(msg, _resolve_mount(mount), include_msg_path, include_dirs)
# Step 2: Override the includes with the config content.
_parse_text_proto_into(config_path, msg)
if hasattr(msg, INCLUDES_FIELD):
msg.ClearField(INCLUDES_FIELD)
# Step 3: Override with extra includes.
for mount, include_msg_path in include_overides.items():
_apply_include(msg, _resolve_mount(mount), include_msg_path, include_dirs)
# Step 4: Apply scalar overrides.
for mount, value in scalar_overideds.items():
logging.debug(
"Constructing %s. Applying scalar: mount=%r value=%r",
msg_class.__name__,
_resolve_mount(mount),
value,
)
_apply_scalar_override(msg, _resolve_mount(mount), value)
return msg | 253874fc5a41a51d84c4745ce61c415b13404f75 | 21,156 |
def calculate_iou(ground_truth_path, prediction_path):
""" Calculate the intersection over union of two raster images.
Args:
ground_truth_path (str): Path to the ground truth raster image.
prediction_path (str): Path to the prediction raster image.
Returns:
float: The intersection over union of the two raster datasets.
"""
with rasterio.open(ground_truth_path) as ground_truth_dataset:
with rasterio.open(prediction_path) as prediction_dataset:
ground_truth_array = ground_truth_dataset.read(1)
prediction_array = prediction_dataset.read(1)
intersection = np.logical_and(ground_truth_array, prediction_array)
union = np.logical_or(ground_truth_array, prediction_array)
iou = np.sum(intersection) / np.sum(union)
return iou | 70e49e787fe57f5c4d94a043d41b96de1b14fd39 | 21,157 |
from bokeh.models import ColumnDataSource
import warnings
def bokeh_scatter(x,
y=None,
*,
xlabel='x',
ylabel='y',
title='',
figure=None,
data=None,
saveas='scatter',
copy_data=False,
**kwargs):
"""
Create an interactive scatter plot with bokeh
:param x: arraylike or key for data for the x-axis
:param y: arraylike or key for data for the y-axis
:param data: source for the data of the plot (pandas Dataframe for example)
:param xlabel: label for the x-axis
:param ylabel: label for the y-axis
:param title: title of the figure
:param figure: bokeh figure (optional), if provided the plot will be added to this figure
:param outfilename: filename of the output file
:param copy_data: bool, if True the data argument will be copied
Kwargs will be passed on to :py:class:`masci_tools.vis.bokeh_plotter.BokehPlotter`.
If the arguments are not recognized they are passed on to the bokeh function `scatter`
"""
if isinstance(x, (dict, pd.DataFrame, ColumnDataSource)) or x is None:
warnings.warn(
'Passing the source as first argument is deprecated. Please pass in source by the keyword data'
'and xdata and ydata as the first arguments', DeprecationWarning)
data = x
x = kwargs.pop('xdata', 'x')
y = kwargs.pop('ydata', 'y')
plot_data = process_data_arguments(data=data,
x=x,
y=y,
copy_data=copy_data,
single_plot=True,
same_length=True,
use_column_source=True)
entry, source = plot_data.items(first=True)
plot_params.set_defaults(default_type='function', name=entry.y)
kwargs = plot_params.set_parameters(continue_on_error=True, **kwargs)
p = plot_params.prepare_figure(title, xlabel, ylabel, figure=figure)
plot_kwargs = plot_params.plot_kwargs(plot_type='scatter')
res = p.scatter(x=entry.x, y=entry.y, source=source, **plot_kwargs, **kwargs)
plot_params.add_tooltips(p, res, entry)
if plot_params['level'] is not None:
res.level = plot_params['level']
plot_params.draw_straight_lines(p)
plot_params.set_limits(p)
plot_params.save_plot(p, saveas)
return p | d2bf64efcd751f3dea0d63c1c02af14952684bd7 | 21,158 |
from utils.format import format_output
from utils.rule import get_all_rules
from utils.type import check_type
from utils.rule import get_rules
from core.exceptions import RuleArgumentsError
from utils.type import update_type, check_type
from utils.rule import add_rule
from core.exceptions import RuleArgumentsError
from utils.rule import update_rule, check_rule
from utils.type import update_type
from core.exceptions import RuleArgumentsError
from utils.rule import check_rule
from utils.profile import clean_profiles
from utils.rule import remove_rule
from utils.type import update_type
from utils.profile import clean_profiles
from utils.rule import remove_rule
from utils.type import update_type
from core.exceptions import RuleArgumentsError
from utils.rule import check_rule, link_rule
from utils.rule import link_rule
from core.exceptions import RuleArgumentsError
from utils.rule import unlink_rule, check_rule
from utils.rule import unlink_rule
from utils.type import check_type, update_type
from utils.rule import check_rule, move_rule
def rulesActionsHandler(args):
""" Check rule action and execute associates functions.
:param args: Rule action
:return: Return result from the executed functions.
"""
if 'get' == args.action:
# get rule arguments :
# - id:
# type: int
# args number : 1 or more
# required: False
# - type:
# type: str
# args number: 1 or more
# required: False
if not args.id and not args.type:
rules = get_all_rules()
else:
if args.type:
check_type(type=args.type)
rules = get_rules(type=args.type, id=args.id)
return format_output(rules)
if 'add' == args.action:
# add rule arguments :
# - type:
# type: str
# args number : 1
# required: True
# - desc:
# type: str
# args number: 1
# required: True
# - auditcmd:
# type: str
# args number: 1 or more
# required: False
# note: can't be set with auditscript
# - remedcmd:
# type: str
# args number: 1 or more
# required: False
# note: can't be set with remedscript
# - auditscript:
# type: str
# args number: 1
# required: False
# note: can't be set with auditcmd
# - remedscript:
# type: str
# args number: 1
# required: False
# note: can't be set with remedcmd
try:
if args.audit_cmd and args.audit_script:
raise RuleArgumentsError('Rule cant have auditscript AND auditcmd at the same time')
# elif args.remed_cmd and args.remed_script:
# raise RuleArgumentsError('Rule cant have remedscript AND remedcmd at the same time')
elif not (args.audit_cmd or args.audit_script):
raise RuleArgumentsError('Rule must have at least one auditcmd OR one auditscript')
# elif not (args.remed_cmd or args.remed_script):
# raise RuleArgumentsError('Rule must have at least one remedcmd OR one remedscript')
except RuleArgumentsError as rvd:
print rvd
exit(rvd.code)
check_type(type=args.type)
updated_type = add_rule(desc=args.desc, type=args.type, audit_cmd=args.audit_cmd,
audit_script=args.audit_script, remed_cmd=args.remed_cmd,
remed_script=args.remed_script)
return update_type(type=updated_type)
if 'update' == args.action:
# update rule arguments :
# - type:
# type: str
# args number : 1
# required: True
# - id:
# type: int
# args number : 1
# required: True
# - desc:
# type: str
# args number: 1
# required: False
# - auditcmd:
# type: str
# args number: 1 or more
# required: False
# note: can't be set with auditscript
# - remedcmd:
# type: str
# args number: 1 or more
# required: False
# note: can't be set with remedscript
# - auditscript:
# type: str
# args number: 1
# required: False
# note: can't be set with auditcmd
# - remedscript:
# type: str
# args number: 1
# required: False
# note: can't be set with remedcmd
try:
# if args.audit_cmd and args.audit_script:
# raise RuleArgumentsError('Rule cant have auditscript AND auditcmd at the same time')
# elif args.remed_cmd and args.remed_script:
# raise RuleArgumentsError('Rule cant have remedscript AND remedcmd at the same time')
if not (args.audit_cmd or args.audit_script):
raise RuleArgumentsError('Rule must have at least one auditcmd OR one auditscript')
except RuleArgumentsError as rvd:
print rvd
exit(rvd.code)
check_rule(type=args.type, id=args.id)
updated_type = update_rule(desc=args.desc, type=args.type, audit_cmd=args.audit_cmd,
audit_script=args.audit_script, remed_cmd=args.remed_cmd,
remed_script=args.remed_script, id=args.id)
return update_type(updated_type)
if 'remove' == args.action:
# remove rule arguments :
# - type:
# type: str
# args number : 1
# required: True
# - id:
# type: int
# args number : 1
# required: True
# - all
try:
if args.id:
if args.all:
raise RuleArgumentsError("--all option doesn't need an id (all rules will be deleted)")
else:
check_rule(type=args.type, id=args.id)
clean_profiles(type=args.type, id=args.id)
updated_type = remove_rule(type=args.type, id=args.id)
return update_type(updated_type)
else:
if args.all:
clean_profiles(type=args.type)
updated_type = remove_rule(type=args.type)
return update_type(updated_type)
else:
raise RuleArgumentsError("For removing one rule, id must be set !")
except RuleArgumentsError as rae:
print rae
exit(rae.code)
if 'link' == args.action:
# link rule arguments :
# - profile:
# type: str
# args number : 1 or more
# required: True
# - type:
# type: str
# args number : 1
# required: True
# - id:
# type: int
# args number: 1 or more
# required: False
# - all
try:
if args.all and not args.type:
raise RuleArgumentsError("--all options can't be used without rule type")
if args.id:
if args.all:
raise RuleArgumentsError("--all option doesn't need an id (all rules will be added)")
check_rule(type=args.type, id=args.id)
return link_rule(profile=args.profile, type=args.type, id=args.id)
else:
return link_rule(profile=args.profile, type=args.type, id=-1)
except RuleArgumentsError as rae:
print rae
exit(rae.code)
if 'unlink' == args.action:
# unlink rule arguments :
# - profile:
# type: str
# args number : 1 or more
# required: True
# - type:
# type: str
# args number : 1
# required: True
# - id:
# type: int
# args number: 1 or more
# required: False
# - all
try:
if args.id:
if args.all:
raise RuleArgumentsError("--all option doesn't need an id (all rules will be added)")
else:
check_rule(type=args.type, id=args.id)
return unlink_rule(profile=args.profile, type=args.type, id=args.id)
else:
return unlink_rule(profile=args.profile, type=args.type, id=-1)
except RuleArgumentsError as rae:
print rae
exit(rae)
if 'move' == args.action:
# move rule arguments :
# - type:
# type: str
# args number : 1
# required: True
# - id:
# type: int
# args number: 1 or more
# required: True
# - newtype:
# type: str
# args number : 1
# required: True
# - all
check_type(args.type)
check_type(args.newtype)
check_rule(type=args.type, id=args.id)
updated_oldtype, updated_newtype = move_rule(oldtype=args.type, id=args.id, newtype=args.newtype)
update_type(updated_oldtype)
return update_type(updated_newtype)
return | 991873d489486b5e6ffc9676a2d9a6e5af9e944b | 21,159 |
def superposition_training_mnist(model, X_train, y_train, X_test, y_test, num_of_epochs, num_of_tasks, context_matrices, nn_cnn, batch_size=32):
"""
Train model for 'num_of_tasks' tasks, each task is a different permutation of input images.
Check how accuracy for original images is changing through tasks using superposition training.
:param model: Keras model instance
:param X_train: train input data
:param y_train: train output labels
:param X_test: test input data
:param y_test: test output labels
:param num_of_epochs: number of epochs to train the model
:param num_of_tasks: number of different tasks (permutations of original images)
:param context_matrices: multidimensional numpy array with random context (binary superposition)
:param nn_cnn: usage of (convolutional) neural network (possible values: 'nn' or 'cnn')
:param batch_size: batch size - number of samples per gradient update (default = 32)
:return: list of test accuracies for 10 epochs for each task
"""
original_accuracies = []
# context_multiplication(model, context_matrices, 0)
# first training task - original MNIST images
history, _, accuracies = train_model(model, X_train, y_train, X_test, y_test, num_of_epochs, nn_cnn, batch_size, validation_share=0.1,
mode='superposition', context_matrices=context_matrices, task_index=0)
original_accuracies.extend(accuracies)
print_validation_acc(history, 0)
# other training tasks - permuted MNIST data
for i in range(num_of_tasks - 1):
print("\n\n Task: %d \n" % (i + 1))
# multiply current weights with context matrices for each layer (without changing weights from bias node)
if nn_cnn == 'nn':
context_multiplication(model, context_matrices, i + 1)
elif nn_cnn == 'cnn':
context_multiplication_CNN(model, context_matrices, i + 1)
permuted_X_train = permute_images(X_train)
history, _, accuracies = train_model(model, permuted_X_train, y_train, X_test, y_test, num_of_epochs, nn_cnn, batch_size, validation_share=0.1,
mode='superposition', context_matrices=context_matrices, task_index=i + 1)
original_accuracies.extend(accuracies)
print_validation_acc(history, i + 1)
return original_accuracies | e0e837c3a92e047ce894c166d09e2ce6a58b3035 | 21,160 |
import json
def json2dict(astr: str) -> dict:
"""将json字符串转为dict类型的数据对象
Args:
astr: json字符串转为dict类型的数据对象
Returns:
返回dict类型数据对象
"""
return json.loads(astr) | f13b698dcf7dda253fd872bb464594901280f03b | 21,161 |
from typing import Any
def any(wanted_type=None):
"""Matches against type of argument (`isinstance`).
If you want to match *any* type, use either `ANY` or `ANY()`.
Examples::
when(mock).foo(any).thenReturn(1)
verify(mock).foo(any(int))
"""
return Any(wanted_type) | 4c92d19a2168f815a88f2fa8aa56f0d656a5a534 | 21,162 |
def list_top_level_blob_folders(container_client):
"""
List all top-level folders in the ContainerClient object *container_client*
"""
top_level_folders,_ = walk_container(container_client,max_depth=1,store_blobs=False)
return top_level_folders | baf41750aae23df6d051986f24814d0f286afb6b | 21,163 |
import string
def keyword_encipher(message, keyword, wrap_alphabet=KeywordWrapAlphabet.from_a):
"""Enciphers a message with a keyword substitution cipher.
wrap_alphabet controls how the rest of the alphabet is added
after the keyword.
0 : from 'a'
1 : from the last letter in the sanitised keyword
2 : from the largest letter in the sanitised keyword
>>> keyword_encipher('test message', 'bayes')
'rsqr ksqqbds'
>>> keyword_encipher('test message', 'bayes', KeywordWrapAlphabet.from_a)
'rsqr ksqqbds'
>>> keyword_encipher('test message', 'bayes', KeywordWrapAlphabet.from_last)
'lskl dskkbus'
>>> keyword_encipher('test message', 'bayes', KeywordWrapAlphabet.from_largest)
'qspq jsppbcs'
"""
cipher_alphabet = keyword_cipher_alphabet_of(keyword, wrap_alphabet)
cipher_translation = ''.maketrans(string.ascii_lowercase, cipher_alphabet)
return unaccent(message).lower().translate(cipher_translation) | 155e997e1199f4adb25e20ad2c6e0047ffc7f7fd | 21,164 |
import PIL
def plt_to_img(dummy: any = None, **kwargs) -> PIL.Image.Image:
"""
Render the current figure as a (PIL) image
- Take dummy arg to support expression usage `plt_to_img(...)` as well as statement usage `...; plt_to_img()`
"""
return PIL.Image.open(plot_to_file(**kwargs)) | d07be803a2f3c71fa62b920c0a72954578d24f59 | 21,165 |
def _escape_char(c, escape_char=ESCAPE_CHAR):
"""Escape a single character"""
buf = []
for byte in c.encode('utf8'):
buf.append(escape_char)
buf.append('%X' % _ord(byte))
return ''.join(buf) | a4f4c69eb51a338d54b685336c036d991c295666 | 21,166 |
def error_log_to_html(error_log):
"""Convert an error log into an HTML representation"""
doc = etree.Element('ul')
for l in error_log:
if l.message.startswith('<runtrace '):
continue
el = etree.Element('li')
el.attrib['class'] = 'domain_{domain_name} level_{level_name} type_{type_name}'.format( # NOQA: E501
domain_name=l.domain_name,
level_name=l.level_name,
type_name=l.type_name,
)
el.text = '{msg:s} [{line:d}:{column:d}]'.format(
msg=l.message,
line=l.line,
column=l.column,
)
doc.append(el)
return doc | d2df223a0be82c5f58cf57be504833061d1afd40 | 21,167 |
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == "linear":
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == "step":
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == "plateau":
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode="min", factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == "cosine":
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError("learning rate policy [%s] is not implemented", opt.lr_policy)
return scheduler | b8996d9963533249f1b387a36bac3f209e70daff | 21,168 |
def gen_colors(count, drop_high=True):
"""Generate spread of `count` colors from matplotlib inferno colormap"""
# drop the top end of the color range by defining the norm with one
# element too many
cvals = range(0, count + drop_high)
# and dropping the last element when calculating the normed values
cvals = plt.Normalize(min(cvals), max(cvals))(cvals[0 : len(cvals) - drop_high])
# we're using the 'inferno' colormap from matplotlib
colors = plt.cm.inferno(cvals)
return colors | 14f53d46d4b7370603624caa84b7bf4731e5e73d | 21,169 |
import subprocess
def validate_branch():
"""Checks the branch passed in against the branches available on remote.
Returns true if branch exists on remote. This may be subject to false
postivies, but that should not be an issue"""
output = subprocess.run(["/usr/bin/git", "ls-remote",
CURRENT_CONFIG['URL']],
stdout=subprocess.PIPE)
ls_remote_output = output.stdout.decode("utf-8")
return CURRENT_CONFIG['RELEASE'] in ls_remote_output | daf96e3f1c072890295cd796658c5d3445c5956c | 21,170 |
def train_PCA(data, num_components):
"""
Normalize the face by subtracting the mean image
Calculate the eigenValue and eigenVector of the training face, in descending order
Keep only num_components eigenvectors (corresponding to the num_components largest eigenvalues)
Each training face is represented in this basis by a vector
Calculate the weight vectors for training images
Normalized training face = F - mean = w1*u1 + w2*u2 + ... + wk*uk => w = u.T * face
:param train_data: M * N^2, each row corresponding to each image, which is reshaped into 1-D vector
:param num_components: The number of the largest eigenVector to be kept
:return:
mean_image: 1 * N^2
eigenVectors: num_components * N^2 matrix, each row represents each eigenface, in descending order
weiVec_train: M * K matrix, each row is the weight vectors used to represent the training face
"""
mean_image = np.mean(data, axis=0)
data = data - mean_image
eigenValues, eigenVectors = eigen(data)
eigenVectors = eigenVectors[:num_components]
weiVec_train = np.dot(data, eigenVectors.T)
return mean_image, eigenVectors, weiVec_train | 2404fca9fe053c275b187e2435b497166ed7f4d8 | 21,171 |
def count_revoked_tickets_for_party(party_id: PartyID) -> int:
"""Return the number of revoked tickets for that party."""
return db.session \
.query(DbTicket) \
.filter_by(party_id=party_id) \
.filter_by(revoked=True) \
.count() | 6ad857a4630d2add7d2d46b9e930178a18f89e29 | 21,172 |
import os
def add_service_context(_logger, _method, event_dict):
"""
Function intended as a processor for structlog. It adds information
about the service environment and reasonable defaults when not running in Lambda.
"""
event_dict['region'] = os.environ.get('REGION', os.uname().nodename)
event_dict['service'] = os.environ.get('SERVICE', os.path.abspath(__file__))
event_dict['stage'] = os.environ.get('STAGE', 'dev')
return event_dict | f5ea74b09ddd7024a04bc4f42bbd339b82adc9e3 | 21,173 |
def get_player_stats():
"""
Get all the player stats
returns dict of dicts: ->
{ player_id: {
name -> str gamertag,
discord -> str discord,
rank -> int rank,
wins -> int wins,
losses -> int losses
is_challenged
avg_goals_per_challenge -> float goals
},
...
}
"""
players = {}
logger.info("Retrieving player stats")
ids = get_all_player_ids_ordered()
for player in ids:
# Get the basic info
# TODO: We shouldn't mix up name and gamertag here, needs a refactor
with Player(player) as p:
players[p.id] = {
"name": p.gamertag,
"discord": p.discord,
"rank": p.rank,
"wins": p.wins,
"losses": p.losses,
"is_challenged": p.challenged,
}
# Now get the average goals per challenge
players[player]["avg_goals_per_challenge"] = get_average_goals_per_challenge(player)
return players | 32ec1a926fb5008f596a63d68c3c19627af916fb | 21,174 |
def get_data(data_x, data_y):
"""
split data from loaded data
:param data_x:
:param data_y:
:return: Arrays
"""
print('Data X Length', len(data_x), 'Data Y Length', len(data_y))
print('Data X Example', data_x[0])
print('Data Y Example', data_y[0])
train_x, test_x, train_y, test_y = train_test_split(data_x, data_y, test_size=0.4, random_state=40)
dev_x, test_x, dev_y, test_y, = train_test_split(test_x, test_y, test_size=0.5, random_state=40)
print('Train X Shape', train_x.shape, 'Train Y Shape', train_y.shape)
print('Dev X Shape', dev_x.shape, 'Dev Y Shape', dev_y.shape)
print('Test Y Shape', test_x.shape, 'Test Y Shape', test_y.shape)
return train_x, train_y, dev_x, dev_y, test_x, test_y | a40406da641b36784719da3c3e375130e013e889 | 21,175 |
async def api_download_profile() -> str:
"""Downloads required files for the current profile."""
global download_status
assert core is not None
download_status = {}
def update_status(url, path, file_key, done, bytes_downloaded, bytes_expected):
bytes_percent = 100
if (bytes_expected is not None) and (bytes_expected > 0):
bytes_percent = int(bytes_downloaded / bytes_expected * 100)
download_status[file_key] = {"done": done, "bytes_percent": bytes_percent}
await rhasspyprofile.download_files(
core.profile,
status_fun=update_status,
session=get_http_session(),
ssl_context=ssl_context,
)
download_status = {}
return "OK" | 0dd6aaf17b49f8e48eb72c8adf726f2852937f18 | 21,176 |
def cumulative_segment_wrapper(fun):
"""Wrap a cumulative function such that it can be applied to segments.
Args:
fun: The cumulative function
Returns:
Wrapped function.
"""
def wrapped_segment_op(x, segment_ids, **kwargs):
with tf.compat.v1.name_scope(
None, default_name=fun.__name__+'_segment_wrapper', values=[x]):
segments, _ = tf.unique(segment_ids)
n_segments = tf.shape(segments)[0]
output_array = tf.TensorArray(
x.dtype, size=n_segments, infer_shape=False)
def loop_cond(i, out):
return i < n_segments
def execute_cumulative_op_on_segment(i, out):
segment_indices = tf.where(tf.equal(segment_ids, segments[i]))
seg_begin = tf.reduce_min(segment_indices)
seg_end = tf.reduce_max(segment_indices)
segment_data = x[seg_begin:seg_end+1]
out = out.write(i, fun(segment_data, **kwargs))
return i+1, out
i_end, filled_array = tf.while_loop(
loop_cond,
execute_cumulative_op_on_segment,
loop_vars=(tf.constant(0), output_array),
parallel_iterations=10,
swap_memory=True
)
output_tensor = filled_array.concat()
output_tensor.set_shape(x.get_shape())
return output_tensor
return wrapped_segment_op | 5471e525ab73855927fe04530b8ec6e14a4436d9 | 21,177 |
from typing import Any
def read_pet_types(
skip: int = 0,
limit: int = 100,
db: Session = Depends(deps.get_db),
current_user: models.User = Depends(deps.get_current_active_superuser)
) -> Any:
"""
Read pet types
:return:
"""
if not crud.user.is_superuser(current_user):
raise HTTPException(status_code=403, detail="Not enough permissions")
return crud.pettype.get_all(db=db, skip=skip, limit=limit) | 60de7c25b305bbb1a628db27559bcdb3abc5fb24 | 21,178 |
def get_approves_ag_request():
"""Creates the prerequisites for - and then creates and returns an instance of - ApprovesAgRequest."""
# Creates an access group request and an approver (required to create an instance of ApprovesAgRequest).
agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING)
approver = Approver(email="peter@example.org", password="abc123", name="Peter", surname="Parker")
db.session.add(agr)
db.session.add(approver)
# Returns a ApprovesAgRequest object.
return ApprovesAgRequest(ag_request=agr, approver=approver) | 139900d7948b4bd836410be09ba35a21954f2dc4 | 21,179 |
import requests
def currency_history(
base: str = "USD", date: str = "2020-02-03", api_key: str = ""
) -> pd.DataFrame:
"""
Latest data from currencyscoop.com
https://currencyscoop.com/api-documentation
:param base: The base currency you would like to use for your rates
:type base: str
:param date: Specific date, e.g., "2020-02-03"
:type date: str
:param api_key: Account -> Account Details -> API KEY (use as password in external tools)
:type api_key: str
:return: Latest data of base currency
:rtype: pandas.DataFrame
"""
payload = {"base": base, "date": date, "api_key": api_key}
url = "https://api.currencyscoop.com/v1/historical"
r = requests.get(url, params=payload)
temp_df = pd.DataFrame.from_dict(r.json()["response"])
temp_df["date"] = pd.to_datetime(temp_df["date"])
return temp_df | ed8c547e433a7f08e67863aca86b991bd746ccbb | 21,180 |
import os
def get_service_legacy(default=None):
"""Helper to get the old {DD,DATADOG}_SERVICE_NAME environment variables
and output a deprecation warning if they are defined.
Note that this helper should only be used for migrating integrations which
use the {DD,DATADOG}_SERVICE_NAME variables to the new DD_SERVICE variable.
If the environment variables are not in use, no deprecation warning is
produced and `default` is returned.
"""
for old_env_key in ["DD_SERVICE_NAME", "DATADOG_SERVICE_NAME"]:
if old_env_key in os.environ:
debtcollector.deprecate(
(
"'{}' is deprecated and will be removed in a future version. Please use DD_SERVICE instead. "
"Refer to our release notes on Github: https://github.com/DataDog/dd-trace-py/releases/tag/v0.36.0 "
"for the improvements being made for service names."
).format(old_env_key)
)
return os.getenv(old_env_key)
return default | 37ccd178f40af393028cc8ed88590c5d30f06855 | 21,181 |
import os
def get_met_data():
"""
Taken from Tensorflow tutorial on time series forecasting:
https://www.tensorflow.org/tutorials/structured_data/time_series
"""
zip_path = tf.keras.utils.get_file(
origin='https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip',
fname='jena_climate_2009_2016.csv.zip',
extract=True)
csv_path, _ = os.path.splitext(zip_path)
df = pd.read_csv(csv_path)
# Slice [start:stop:step], starting from index 5 take every 6th record.
df = df[5::6]
date_time = pd.to_datetime(df.pop('Date Time'), format='%d.%m.%Y %H:%M:%S')
# Convert wind from direction/velocity to (x,y) components
wv = df.pop('wv (m/s)')
max_wv = df.pop('max. wv (m/s)')
# Convert to radians.
wd_rad = df.pop('wd (deg)')*np.pi / 180
# Calculate the wind x and y components.
df['Wx'] = wv*np.cos(wd_rad)
df['Wy'] = wv*np.sin(wd_rad)
# Calculate the max wind x and y components.
df['max Wx'] = max_wv*np.cos(wd_rad)
df['max Wy'] = max_wv*np.sin(wd_rad)
# Represent time with periodic signals
timestamp_s = date_time.map(pd.Timestamp.timestamp)
day = 24*60*60
year = (365.2425)*day
df['Day sin'] = np.sin(timestamp_s * (2 * np.pi / day))
df['Day cos'] = np.cos(timestamp_s * (2 * np.pi / day))
df['Year sin'] = np.sin(timestamp_s * (2 * np.pi / year))
df['Year cos'] = np.cos(timestamp_s * (2 * np.pi / year))
return df | 17558ee12c0627f242746ef6afb9f45e9541d533 | 21,182 |
import os
def get_drives():
"""A list of accessible drives"""
if os.name == "nt":
return _get_win_drives()
else:
return [] | f370697170d27600322d6b1eae1c6028e73dc5a6 | 21,183 |
from typing import Type
from typing import Dict
def _ref_tier_copy(source_eaf: Type[Eaf] = None,
target_eaf: Type[Eaf] = None,
source_tier_name: str = "",
target_tier_name: str = "",
target_parent_tier_name: str = "",
override_params: Dict[str, str] = {}):
"""
Copy annotations from a ref tier in one EAF to a new ref tier in another EAF
:param source_eaf: The Eaf object to copy from
:param target_eaf: The Eaf object to write to
:param source_tier_name: Name of the tier to get
:param target_tier_name: The name to call this tier in the destination
:param target_parent_tier_name: The name of the parent for the ref tier in the destination object
:param override_params: Use this to change tier params from what the tier has in the source file
:return:
"""
params = override_params if override_params else source_eaf.get_parameters_for_tier(source_tier_name)
target_eaf.add_tier(target_tier_name, ling=params["LINGUISTIC_TYPE_REF"], parent=target_parent_tier_name, tier_dict=params)
annotations = source_eaf.get_ref_annotation_data_for_tier(source_tier_name)
for annotation in annotations:
target_eaf.add_ref_annotation(id_tier=target_tier_name,
tier2=target_parent_tier_name,
time=annotation[0]+1,
value=annotation[2])
return target_eaf | f0c2fe27446d4a1f992f33c7610bc177d0e2c896 | 21,184 |
def fibonacci(length=10):
"""Get fibonacci sequence given it length.
Parameters
----------
length : int
The length of the desired sequence.
Returns
-------
sequence : list of int
The desired Fibonacci sequence
"""
if length < 1:
raise ValueError("Sequence length must be > 0")
sequence = [0] * (length + 2)
sequence[0] = 0
sequence[1] = 1
for i in range(2, len(sequence)):
sequence[i] = sequence[i - 1] + sequence[i - 2]
return sequence[: -2] | afa3ef63a663b4e89e5c4a694315083debdbab59 | 21,185 |
def readForecast(config, stid, model, date, hour_start=6, hour_padding=6, no_hourly_ok=False):
"""
Return a Forecast object from the main theta-e database for a given model and date. This is specifically designed
to return a Forecast for a single model and a single day.
hour_start is the starting hour for the 24-hour forecast period.
hour_padding is the number of hours on either side of the forecast period to include in the timeseries.
:param config:
:param stid: str: station ID
:param model: str: model name
:param date: datetime or str: date to retrieve
:param hour_start: int: starting hour of the day in UTC
:param hour_padding: int: added hours around the 24-hour TimeSeries
:param no_hourly_ok: bool: if True, does not raise an error if the hourly timeseries is empty
:return: Forecast
"""
# Basic sanity check for hour parameters
if hour_start < 0 or hour_start > 23:
raise ValueError('db.readForecast error: hour_start must be between 0 and 23.')
if hour_padding < 0 or hour_padding > 24:
raise ValueError('db.readForecast error: hour_padding must be between 0 and 24.')
# Set the default database configuration; create Forecast
data_binding = 'forecast'
if config['debug'] > 9:
print("db.readForecast: reading forecast from '%s' data binding" % data_binding)
forecast = Forecast(stid, model, date)
# The daily forecast part
table_type = 'DAILY_FORECAST'
daily = readDaily(config, stid, data_binding, table_type, model, start_date=date, end_date=date)
# The hourly forecast part
table_type = 'HOURLY_FORECAST'
date = date_to_datetime(date)
start_date = date + timedelta(hours=hour_start - hour_padding)
end_date = date + timedelta(hours=hour_start + 24 + hour_padding)
try:
timeseries = readTimeSeries(config, stid, data_binding, table_type, model, start_date, end_date)
except MissingDataError:
if no_hourly_ok:
timeseries = TimeSeries(stid)
else:
raise
# Assign and return
forecast.timeseries = timeseries
forecast.daily = daily
return forecast | 57260de3d8866219bcdca003b9829f9e9caf0f86 | 21,186 |
def get_direct_hit_response(request, query, snuba_params, referrer):
"""
Checks whether a query is a direct hit for an event, and if so returns
a response. Otherwise returns None
"""
event_id = normalize_event_id(query)
if event_id:
snuba_args = get_snuba_query_args(
query=u'id:{}'.format(event_id),
params=snuba_params)
results = raw_query(
selected_columns=SnubaEvent.selected_columns,
referrer=referrer,
**snuba_args
)['data']
if len(results) == 1:
response = Response(
serialize([SnubaEvent(row) for row in results], request.user)
)
response['X-Sentry-Direct-Hit'] = '1'
return response | 4ffc0dcd5dbac56fc60e2414c1952e629f1fc951 | 21,187 |
from typing import Any
import dotenv
import os
def get_env_var(var_name: str) -> Any:
"""Get envronment var or raise helpful exception.
:param var_name: Name of environment variable to get.
:raises: ImproperlyConfigured if environment variable not found.
"""
dotenv.load_dotenv()
try:
return os.environ[var_name]
except KeyError:
error_msg = f"Environment variable {var_name} not set"
raise ImproperlyConfigured(error_msg) | 0771f1bba146af22b695d46bb99ee2edfec13b64 | 21,188 |
from typing import List
from typing import Tuple
from typing import Set
def _canonicalize_clusters(clusters: List[List[Tuple[int, int]]]) -> List[List[Tuple[int, int]]]:
"""
The data might include 2 annotated spans which are identical,
but have different ids. This checks all clusters for spans which are
identical, and if it finds any, merges the clusters containing the
identical spans.
"""
merged_clusters: List[Set[Tuple[int, int]]] = []
for cluster in clusters:
cluster_with_overlapping_mention = None
for mention in cluster:
# Look at clusters we have already processed to
# see if they contain a mention in the current
# cluster for comparison.
for cluster2 in merged_clusters:
if mention in cluster2:
# first cluster in merged clusters
# which contains this mention.
cluster_with_overlapping_mention = cluster2
break
# Already encountered overlap - no need to keep looking.
if cluster_with_overlapping_mention is not None:
break
if cluster_with_overlapping_mention is not None:
# Merge cluster we are currently processing into
# the cluster in the processed list.
cluster_with_overlapping_mention.update(cluster)
else:
merged_clusters.append(set(cluster))
return [list(c) for c in merged_clusters] | d8435e6859e1f9720d7a6f1ec7dd1e2d51df5502 | 21,189 |
def remove_outliers(matches, keypoints):
"""
Calculate fundamental matrix between 2 images to remove incorrect matches.
Return matches with outlier removed. Rejects matches between images if there are < 20
:param matches: List of lists of lists where matches[i][j][k] is the kth cv2.Dmatch object for images i and j
:param keypoints: List of lists of cv2.Keypoint objects. keypoints[i] is list for image i.
"""
for i in range(len(matches)):
for j in range(len(matches[i])):
if j <= i: continue
if len(matches[i][j]) < 20:
matches[i][j] = []
continue
kpts_i = []
kpts_j = []
for k in range(len(matches[i][j])):
kpts_i.append(keypoints[i][matches[i][j][k].queryIdx].pt)
kpts_j.append(keypoints[j][matches[i][j][k].trainIdx].pt)
kpts_i = np.int32(kpts_i)
kpts_j = np.int32(kpts_j)
F, mask = cv2.findFundamentalMat(kpts_i, kpts_j, cv2.FM_RANSAC, ransacReprojThreshold=3)
if np.linalg.det(F) > 1e-7: raise ValueError(f"Bad F_mat between images: {i}, {j}. Determinant: {np.linalg.det(F)}")
matches[i][j] = np.array(matches[i][j])
if mask is None:
matches[i][j] = []
continue
matches[i][j] = matches[i][j][mask.ravel() == 1]
matches[i][j] = list(matches[i][j])
if len(matches[i][j]) < 20:
matches[i][j] = []
continue
return matches | 53b70f98389a33ba6a28c65fab8862bc629d2f0d | 21,190 |
def err_comp(uh, snap, times_offline, times_online):
"""
Computes the absolute l2 error norm and the rms error
norm between the true solution and the nirom solution projected
on to the full dimensional space
"""
err = {}
w_rms = {}
soln_names = uh.keys()
# ky = list(uh.keys())[0]
N = snap[list(uh.keys())[0]].shape[0]
tstep = np.searchsorted(times_offline, times_online)
for key in soln_names:
interp = uh[key]
true = snap[key][:, tstep]
err[key] = np.linalg.norm(true - interp, axis=0)
w_rms[key] = err[key]/(np.sqrt(N))
return w_rms | d64b061ec1cb3f8d7e9247cc5a74c4b6b852bc3b | 21,191 |
from datetime import datetime
def calc_stock_state(portfolio,code:int,date:datetime,stocks,used_days:int):
"""
状態を計算
- 株価・テクニカル指標・出来高の時系列情報
- 総資産、所持株数
Args:
stocks: 単元株数と始値、終値、高値、低値、出来高を含む辞書を作成
used_days: 用いる情報の日数
"""
stock_df=stocks[code]['prices']
date=datetime(date.year,date.month,date.day) #convert to datetime
try:
time_series_array=stock_df[stock_df.index<=date][-used_days:].values
except Exception as e:
logger.error("datetime comparison error")
logger.error(e)
time_series_array=time_series_array/time_series_array[0] #normalization
time_series_list=list(time_series_array.flatten())
s1=portfolio.initial_deposit
s2=portfolio.stocks[code].total_cost # 取得にかかったコスト(総額)
s3=portfolio.stocks[code].current_count # 現在保有している株数
s4=portfolio.stocks[code].average_cost # 平均取得価額
return time_series_list+[s1,s2,s3,s4] | 7aec335e15d5c169bfbaf7995614c532c31bd353 | 21,192 |
def lowercase_words(words):
"""
Lowercases a list of words
Parameters
-----------
words: list of words to process
Returns
-------
Processed list of words where words are now all lowercase
"""
return [word.lower() for word in words] | b6e8658f35743f6729a9f8df229b382797b770f6 | 21,193 |
def convert_images_to_arrays_train(file_path, df):
"""
Converts each image to an array, and appends each array to a new NumPy
array, based on the image column equaling the image file name.
INPUT
file_path: Specified file path for resized test and train images.
df: Pandas DataFrame being used to assist file imports.
OUTPUT
NumPy array of image arrays.
"""
lst_imgs = [l for l in df['train_image_name']]
return np.array([np.array(Image.open(file_path + img)) for img in lst_imgs]) | bab9ccc350c891d8c8dc634a431309490533f8ad | 21,194 |
def get_projection_matrix(X_src, X_trg, orthogonal, direction='forward', out=None):
"""
X_src: ndarray
X_trg: ndarray
orthogonal: bool
direction: str
returns W_src if 'forward', W_trg otherwise
"""
xp = get_array_module(X_src, X_trg)
if orthogonal:
if direction == 'forward':
u, s, vt = xp.linalg.svd(xp.dot(X_trg.T, X_src))
W = xp.dot(vt.T, u.T, out=out)
elif direction == 'backward':
u, s, vt = xp.linalg.svd(xp.dot(X_src.T, X_trg))
W = xp.dot(vt.T, u.T, out=out)
else:
if direction == 'forward':
W = xp.dot(xp.linalg.pinv(X_src), X_trg, out=out)
elif direction == 'backward':
W = xp.dot(xp.linalg.pinv(X_trg), X_src, out=out)
return W | ef7f722e6beeb652069270afd81315a951d2a925 | 21,195 |
def _standardize_df(data_frame):
"""
Helper function which divides df by std and extracts mean.
:param data_frame: (pd.DataFrame): to standardize
:return: (pd.DataFrame): standardized data frame
"""
return data_frame.sub(data_frame.mean(), axis=1).div(data_frame.std(), axis=1) | cbe0e1f5c507181a63193a4e08f4ed8139d9e129 | 21,196 |
def has_edit_metadata_permission(user, record):
"""Return boolean whether user can update record."""
return EditMetadataPermission(user, record).can() | fd2a60d27151181c02d5a0fb6548f28beaa5b2b3 | 21,197 |
def truncate_chars_middle(text, limit, sep="..."):
"""
Truncates a given string **text** in the middle, so that **text** has length **limit** if the number of characters
is exceeded, or else **len(text)** if it isn't.
Since this is a template filter, no exceptions are raised when they would normally do.
:param text: the text to truncate.
:param limit: the maximum length of **text**.
:param sep: the separator to display in place of the (**len(text) - limit**) truncated characters.
:return: a truncated version of **text**.
"""
if not text or limit < 0:
return ""
length = len(text)
if length < limit:
return text
else:
first_half = ceil(limit / 2)
second_half = length - floor(limit / 2)
return text[:first_half] + sep + text[second_half:] | e08e6ec0b3522104d54e6690361d6ecf297f5566 | 21,198 |
import re
def parse_block(block, site_name, site_num, year):
"""Parse a main data block from a BBC file"""
# Cleanup difficult issues manually
# Combination of difficult \n's and OCR mistakes
replacements = {'Cemus': 'Census',
'Description of plot': 'Description of Plot',
'Description Oi Plot': 'Description of Plot',
'Acknowledgmentsz': 'Acknowledgments: ',
'Other Observers:]': 'Other Observers: ',
'Other 0berservers': 'Other Observers: ',
'0ther Observerers': 'Other Observers: ',
'Other 0bservers': 'Other Observers: ',
'Other Observers.': 'Other Observers:',
'Other Observers]': 'Other Observers:',
'Continnity': 'Continuity',
'lViagnolia': 'Magnolia',
'lVildlife': 'Wildlife',
'Mallard ): American Black Duck hybrid': 'Mallard x American Black Duck hybrid',
'Observerszj': 'Observers',
'Bobolink; 9.0 territories': 'Bobolink, 9.0 territories',
"37°38'N, 121°46lW": "37°38'N, 121°46'W",
'Common Yellowthroat, 4.5, Northern Flicker, 3.0': 'Common Yellowthroat, 4.5; Northern Flicker, 3.0',
'Red-bellied Woodpecker, 2.0, Carolina Chickadee, 2.0': 'Red-bellied Woodpecker, 2.0; Carolina Chickadee, 2.0',
'Winter 1992': ' ', #One header line in one file got OCR'd for some reason
'nuLquu “1:10': ' ',
'nululuu 1:1:1.)': ' ',
'20.9 h; 8 Visits (8 sunrise), 8, 15, 22, 29 April; 6, 13, 20, 27 May.': '20.9 h; 8 Visits (8 sunrise); 8, 15, 22, 29 April; 6, 13, 20, 27 May.',
'19.3 h; 11 visits (11 sunrise;': '19.3 h; 11 visits (11 sunrise);',
'Foster Plantation; 42"7’N': 'Foster Plantation; 42°7’N',
'Hermit Thrush, 4.5 (18), Black-throatcd Green Warbler': 'Hermit Thrush, 4.5 (18); Black-throated Green Warbler', # Fixes both delimiter and selling of throated
'39"] 2‘N, 76°54’W': '39°12‘N, 76°54’W',
"42°“7'N, 77°45’W": "42°7'N, 77°45’W",
'41°4\'N, 76"7’W': "41°4'N, 76°7’W",
'w‘sits': 'visits',
'79513’W': '79°13’W',
'Continuity.': 'Continuity:',
'Continuity"': 'Continuity:',
"40°44'N, 7 D50’W": "40°44'N, 75°50’W",
"41350'N, 71°33'W": "41°50'N, 71°33'W",
'44°57’N, 68D41’W': '44°57’N, 68°41’W',
'18.8 11; 11 Visits': '18.8 h; 11 Visits',
"Descripn'on of Plot": "Description of Plot",
'41 c’42’N, 73°13’VV': "41°42'N, 73°13'W",
'Northern Rough-winged Swallow. 0.5': 'Northern Rough-winged Swallow, 0.5',
'Warbling Vireo, 1.0, Northern Cardinal, 1.0': 'Warbling Vireo, 1.0; Northern Cardinal, 1.0',
'Wood Thrush, 3.0 (18), American Redstart, 3.0': 'Wood Thrush, 3.0; American Redstart, 3.0',
'study-hrs': 'study-hours',
'studyhours': 'study-hours',
'Nuttall’s Woodpecker, 3 (9; 2N),':'Nuttall’s Woodpecker, 3 (9; 2N);',
'38°35’45”N\', 76°45’46"W': '38°35’45”N, 76°45’46"W',
'Northern Parula 8': 'Northern Parula, 8',
'47°08’N, 99°] 5’ W': '47°08’N, 99°15’ W',
'Yellow Warbler, 1,’ Clay-colored Sparrow, 1,Savannah Sparrow, 1;': 'Yellow Warbler, 1; Clay-colored Sparrow, 1; Savannah Sparrow, 1;',
'Established 1993; 2 )n‘.': 'Established 1993; 2.',
'Established l983': 'Established 1983',
'Established 1978; 18 you': 'Established 1978; 18 yr.',
'This plot is part of a larger plot that was first censused in 1981.': '',
'Ruby-throatcd Hummingbird': 'Ruby-throated Hummingbird',
'RuHed Grouse': 'Ruffed Grouse',
'\Varbler': "Warbler",
'VVarbler': "Warbler",
'Common Yellowthroat 3': 'Common Yellowthroat, 3',
'all known to breed in immediate vicinity': '',
'and a number of vagrants': '',
"Utner Ubservers": "Other Observers",
'Dovmy': 'Downy',
"W'oodpecker": "Woodpecker",
"\700d Thrush": "Wood Thrush",
"\form-eating Warbler": "Worm-eating Warbler",
"Clifl' Swallow": "Cliff Swallow",
'Clifl\ Swallow"': 'Cliff Swallow',
'Downy Woodpecknululuu I JHJ er': 'Downy Woodpecker',
'unidentified Accipiter': 'Accipiter sp.',
"Traill’s Flycatcher": "Willow Flycatcher",
'Eastern Titmouse': 'Tufted Titmouse',
'Common Barn Owl': 'Barn Owl',
'Common Bushtit': 'Bushtit',
'Yellow-shafted Flicker': 'Northern Flicker',
'Yellowshafted Flicker': 'Northern Flicker',
'Common Barn-Owl': 'Barn Owl',
'Northern Parula Warbler': 'Northern Parula',
'Yellow-rumped,': 'Yellow-rumped Warbler,',
'Common Crow': 'American Crow',
', Raven,': ', Common Raven,',
'; Raven,': '; Common Raven,',
'+_': '+',
'chickadee sp.;': 'chickadee sp.,',
'Yellow Warbler, 0.5, Common Yellowthroat, 0.5.': 'Yellow Warbler, 0.5; Common Yellowthroat, 0.5.',
'Whip-poor-will, 1.0, European Starling, 1.0': 'Whip-poor-will, 1.0; European Starling, 1.0',
'80(9\'45"': '80°9\'45"',
'American Crow; 1.0;': 'American Crow, 1.0;',
"47°08'N7 99°15'W;": "47°08'N 99°15'W;",
"', 7'6°45": ", 76°45",
"43°] 6’N": "43°16'N",
"121°461W": "121°46'W",
"39.] h;": "39.1 h;",
"74°ll": "74°11",
"40°] 1": "40°11",
"Estao lished": "Established",
"Estabo lished": "Established",
"Estab lished": "Established",
"79°O": "79°0",
"79°]": "79°1",
"12.] h;": "12.1 h;",
"terfitories": "territories"
}
block = get_cleaned_string(block)
for replacement in replacements:
if replacement in block:
print("Replacing {} with {}".format(replacement, replacements[replacement]))
block = block.replace(replacement, replacements[replacement])
block = get_clean_block(block)
p = re.compile(r'((?:Site Number|Location|Continuity|Previously called|Size|Description of Plot|Edge|Topography and Elevation|Weather|Coverage|Census|Fledglings|Nests and Fledglings|Fledglings Seen|Fledglings Noted|Total|Visitors|Nests Found|Remarks|Observers|Other Observers|Other Observer|Acknowledgments)):')
split_block = p.split(block)[1:] #discard first value; an empty string
block_dict = {split_block[i]: split_block[i+1] for i in range(0, len(split_block), 2)}
block_dict['SiteName'] = site_name
block_dict['SiteNumInCensus'] = site_num * 10000 + year
return block_dict | 0a367e9163d1136ec725560156d67c05ca1c1d38 | 21,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.