content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def aes_block(ciphertext, key):
"""Uses the AES algorithm in ECB mode to decrypt a 16-byte ciphertext
block with a given key of the same length.
Keyword arguments:
ciphertext -- the byte string to be decrypted
key -- the byte string key
"""
if len(ciphertext) != 16:
raise ValueError("The ciphertext can only be one block (16 bytes).")
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=backend)
decryptor = cipher.decryptor()
return decryptor.update(ciphertext) + decryptor.finalize()
|
b0b894bcf860c92b46235ce45b8fd6c8c045b1ca
| 3,643,300
|
import logging
def getLogger(*args, **kwargs):
"""
Wrapper around ``logging.getLogger`` that respects `overrideLogLevel <#setOverrideLogLevel>`_.
"""
logger = logging.getLogger(*args, **kwargs)
if _overrideLogLevel is not None:
logger.setLevel(logging.NOTSET)
return logger
|
f4ae90925e8bd20a63997e2e5e04924aeeafbcaa
| 3,643,301
|
def split_metadata_string(text, chunk_length=None):
"""Split string by length.
Split text to chunks by entered length.
Example:
```python
text = "ABCDEFGHIJKLM"
result = split_metadata_string(text, 3)
print(result)
>>> ['ABC', 'DEF', 'GHI', 'JKL']
```
Args:
text (str): Text that will be split into chunks.
chunk_length (int): Single chunk size. Default chunk_length is
set to global variable `TVPAINT_CHUNK_LENGTH`.
Returns:
list: List of strings with at least one item.
"""
if chunk_length is None:
chunk_length = TVPAINT_CHUNK_LENGTH
chunks = []
for idx in range(chunk_length, len(text) + chunk_length, chunk_length):
start_idx = idx - chunk_length
chunks.append(text[start_idx:idx])
return chunks
|
c2e97aa768f64f02ef1a691dfadce3dd9fe5538a
| 3,643,302
|
def load_kimmel_data(root_data_path,
flag_size_factor=True, total_ct_per_cell=1e4,
flag_log1p=True):
"""Load normalized data from Kimmel et al, GR, 2019
1. Size factor normalization to counts per 1 million (total_ct_per_cell)
2. log(x+1) transform
Args:
file_path (str): file path. Should contain ./Kimmel_GR_2019_data
Returns:
adata_combine (AnnData): Combined data for kidney, lung, and spleen
"""
# Load filtered data
file_path=root_data_path+'/Kimmel_GR_2019_data'
adata_kidney = read_h5ad(file_path + '/kidney.h5ad')
adata_lung = read_h5ad(file_path + '/lung.h5ad')
adata_spleen = read_h5ad(file_path + '/spleen.h5ad')
# Size factor normalization
if flag_size_factor == True:
sc.pp.normalize_per_cell(adata_kidney, counts_per_cell_after=total_ct_per_cell)
sc.pp.normalize_per_cell(adata_lung, counts_per_cell_after=total_ct_per_cell)
sc.pp.normalize_per_cell(adata_spleen, counts_per_cell_after=total_ct_per_cell)
# log(x+1) transform
if flag_log1p == True:
sc.pp.log1p(adata_kidney)
sc.pp.log1p(adata_lung)
sc.pp.log1p(adata_spleen)
# Combine data
adata = adata_kidney.concatenate(adata_lung, adata_spleen,
batch_key='batch_combine', join='inner')
adata.obs['tissue'] = ''
adata.obs.loc[adata.obs['batch_combine']=='0', 'tissue'] = 'Kidney'
adata.obs.loc[adata.obs['batch_combine']=='1', 'tissue'] = 'Lung'
adata.obs.loc[adata.obs['batch_combine']=='2', 'tissue'] = 'Spleen'
adata.obs['sex'] = 'male'
adata.obs['age_old'] = adata.obs['age'].values.copy()
adata.obs['age'] = ['7m' if x=='young' else '22m' for x in adata.obs['age_old']]
adata.obs['age_num'] = [7 if x=='young' else 22 for x in adata.obs['age_old']]
return adata
|
324f5779c180811db0b9316125553f7089d5a34b
| 3,643,303
|
import requests
def get_coupon_page() -> bytes:
"""
Gets the coupon page HTML
"""
try:
response = requests.get(COUPONESE_DOMINOS_URL)
return response.content
except RequestException as e:
bot.logger.error(e.response.content)
return None
|
919cd65ec9e4f0af7b06a79c8aa962f164fb7af6
| 3,643,304
|
def get_program_similarity(fingerprint_a, fingerprint_b):
"""Find similarity between fingerprint of two programs.
A fingerprint is a subset of k-gram hashes generated from program. Each of
the k-gram hashes is formed by hashing a substring of length K and hence
fingerprint is indirectly based on substrings of a program. Fingerprint acts
as identity of the program and can be used to compare two programs.
Args:
fingerprint_a: list((int, int)). Fingerprint of first program. First
integer stores the fingerprint hash value and 2nd integer stores
location in the program where fingerprint is present.
fingerprint_b: list((int, int)). Fingerprint of second program.
Returns:
float. Similarity between first and second program.
"""
multiset_a = [h for (h, _) in fingerprint_a]
multiset_b = [h for (h, _) in fingerprint_b]
return calc_jaccard_index(multiset_a, multiset_b)
|
c3cc3def2d17657c266e09ce5b05da773e1f6f1a
| 3,643,305
|
import os
def load_regions_with_bounding_boxes():
"""Loads bounding boxes as shapely objects.
Returns:
list: list of shapely objects containing regional geometries
"""
print(
"loading region bounding boxes for computing carbon emissions region, this may take a moment..."
)
dir_path = os.path.dirname(os.path.realpath(__file__))
all_geoms = []
# with open('data/zone_geometries.json') as f:
all_geoms = read_terrible_json(os.path.join(dir_path, "data/zonegeometries.json"))
for i, geom in enumerate(all_geoms):
all_geoms[i]["geometry"] = shape(geom["geometry"])
print("Done!")
return all_geoms
|
3ac80793312423df28ed9b6bafc3dfe542319e84
| 3,643,306
|
def create_variables(name, shape, initializer=tf.contrib.layers.xavier_initializer(), is_fc_layer=False):
"""
:param name: A string. The name of the new variable
:param shape: A list of dimensions
:param initializer: User Xavier as default.
:param is_fc_layer: Want to create fc layer variable? May use different weight_decay for fc
layers.
:return: The created variable
"""
# TODO: to allow different weight decay to fully connected layer and conv layer
if is_fc_layer is True:
regularizer = tf.contrib.layers.l2_regularizer(scale=FLAGS.weight_decay)
else:
regularizer = tf.contrib.layers.l2_regularizer(scale=FLAGS.weight_decay)
new_variables = tf.get_variable(name, shape=shape, initializer=initializer, regularizer=regularizer)
return new_variables
|
4197b189c54075a1ce41e6fec85445b86ea26e92
| 3,643,307
|
def linear_transformation(x, y_min, y_max):
"""
x : the range to be transformed
y_min, y_max : lower and upper boundaries for the range into which x
is transformed to
Returns y = f(x), f(x) = m * x + b
"""
x_min = np.min(x)
x_max = np.max(x)
if x_min == x_max:
x_max = x_min * 1.0001
return (y_min + (y_max - y_min) / (x_max - x_min) * (x - x_min))
|
ddd6da6b006888a43711dc391948ffce96bd0a81
| 3,643,308
|
import random
def resize_image(image, desired_width=768, desired_height=384, random_pad=False):
"""Resizes an image keeping the aspect ratio mostly unchanged.
Returns:
image: the resized image
window: (x1, y1, x2, y2). If max_dim is provided, padding might
be inserted in the returned image. If so, this window is the
coordinates of the image part of the full image (excluding
the padding). The x2, y2 pixels are not included.
scale: The scale factor used to resize the image
padding: Padding added to the image [left, top, right, bottom]
"""
# Default window (x1, y1, x2, y2) and default scale == 1.
w, h = image.size
width_scale = desired_width / w
height_scale = desired_height / h
scale = min(width_scale, height_scale)
# Resize image using bilinear interpolation
if scale != 1:
image = functional.resize(image, (round(h * scale), round(w * scale)))
w, h = image.size
y_pad = desired_height - h
x_pad = desired_width - w
top_pad = random.randint(0, y_pad) if random_pad else y_pad // 2
left_pad = random.randint(0, x_pad) if random_pad else x_pad // 2
padding = (left_pad, top_pad, x_pad - left_pad, y_pad - top_pad)
assert all([x >= 0 for x in padding])
image = functional.pad(image, padding)
window = [left_pad, top_pad, w + left_pad, h + top_pad]
return image, window, scale, padding
|
9744bb52c58e1049c8cbd6ce9e1f1864f64ac3c5
| 3,643,309
|
def get_state(*names):
"""
Return a list of the values of the given state keys
Paramters
---------
*names : *str
List of name of state values to retreive
Returns
-------
[any, ...]
List of value matching the requested state property names
"""
_app = get_app_instance()
results = []
for name in names:
results.append(_app.get(name))
return results
|
8f863bdb9f578eb0e12731d1b752f197d4476a2c
| 3,643,310
|
def ver_datos_basicos(request, anexo_id):
"""
Visualización de los datos básicos de un anexo.
"""
anexo = __get_anexo(request, anexo_id)
parts = anexo.get_cue_parts()
return my_render(request, 'registro/anexo/ver_datos.html', {
'template': 'registro/anexo/ver_datos_basicos.html',
'anexo': anexo,
'page_title': 'Datos básicos',
'actual_page': 'datos_basicos',
'configuracion_solapas': ConfiguracionSolapasAnexo.get_instance(),
'datos_verificados': anexo.get_verificacion_datos().get_datos_verificados()
})
|
63cb5222cad1fa702dd5bd2fc7a14c38f4b71d65
| 3,643,311
|
def fill_sections(source, sections):
"""
>>> fill_sections(\
' /* Begin User Code Section: foobar *//* End User Code Section: foobar */', {'foobar': 'barbaz'})
' /* Begin User Code Section: foobar */\\n barbaz\\n /* End User Code Section: foobar */'
"""
def repl(matches):
indent_amt = len(matches[1])
secname = matches[2]
return indent(create_section(secname, sections.get(secname, '') + '\n'), indent_amt)
return fill_section.sub(repl, source)
|
6a76826f45aa0880039e70ad6bb41aa93442976b
| 3,643,312
|
def CNOT(n):
"""CNOT gate on 2-Qubit system with control qubit = 0 and target qubit = 1"""
x=np.copy(I4)
t=np.copy(x[2,])
x[2,]=x[3,]
x[3,]=t
return x.dot(n)
|
af72004c9dd6f4a970e95d1da48a9d3776bd730b
| 3,643,313
|
import tokenize
def parse(s):
"""Parse a single string. This is just a convenience function."""
return pogo(parseSingleExpression(tokenize(s),
identity_cont))
|
9a7a2f4b2afd1daf22e6d2258e13ac9d13d380b3
| 3,643,314
|
def link_match_check(row):
"""
Indicating that link is already in database
"""
all_objects = Post.objects.all()
try:
row_link = row.a["href"]
for object_founded in all_objects:
return row_link == object_founded.link
except TypeError:
return False
|
2d55554248791b8edb5ec6080bc4c4f152a6a23a
| 3,643,315
|
def merge_s2_threshold(log_area, gap_thresholds):
"""Return gap threshold for log_area of the merged S2
with linear interpolation given the points in gap_thresholds
:param log_area: Log 10 area of the merged S2
:param gap_thresholds: tuple (n, 2) of fix points for interpolation
"""
for i, (a1, g1) in enumerate(gap_thresholds):
if log_area < a1:
if i == 0:
return g1
a0, g0 = gap_thresholds[i - 1]
return (log_area - a0) * (g1 - g0) / (a1 - a0) + g0
return gap_thresholds[-1][1]
|
36dd06c8af828e3dc2ef5f1048046039feaa6c21
| 3,643,316
|
def rename_indatabet_cols(df_orig):
"""
"""
df = df_orig.copy(deep=True)
odds_cols = {'odds_awin_pinn': 'awinOddsPinnIndatabet',
'odds_draw_pinn': 'drawOddsPinnIndatabet',
'odds_hwin_pinn': 'hwinOddsPinnIndatabet',
'odds_awin_bet365': 'awinOddsBet365Indatabet',
'odds_draw_bet365': 'drawOddsBet365Indatabet',
'odds_hwin_bet365': 'hwinOddsBet365Indatabet',
'odds_ftgoalso2.5_bet365': 'ftGoalsO2.5OddsBet365Indatabet',
'odds_ftgoalsu2.5_bet365': 'ftGoalsU2.5OddsBet365Indatabet',
'odds_ftgoalso2.5_pinn': 'ftGoalsO2.5OddsPinnIndatabet',
'odds_ftgoalsu2.5_pinn': 'ftGoalsU2.5OddsPinnIndatabet'}
df.rename(columns=odds_cols, inplace=True)
return df
|
a07e7c9757e1b207528f7b7fda63e06a1dced47a
| 3,643,317
|
import urllib
def get_market_updates(symbols, special_tags):
"""
Get current yahoo quote.
'special_tags' is a list of tags. More info about tags can be found at
http://www.gummy-stuff.org/Yahoo-data.htm
Returns a DataFrame
"""
if isinstance(symbols, str):
sym_list = symbols
elif not isinstance(symbols, pd.Series):
symbols = pd.Series(symbols)
sym_list = str.join('+', symbols)
else:
sym_list = str.join('+', symbols)
# Symbol must be in the special_tags for now
if not 's' in special_tags:
special_tags.insert(0, 's')
request = ''.join(special_tags) # code request string
special_tag_names = [settings.YAHOO_SYMBOL_TAGS[x] for x in special_tags]
header = special_tag_names
data = dict(list(zip(
list(special_tag_names), [[] for i in range(len(special_tags))]
)))
urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (
sym_list, request)
try:
lines = urllib.request.urlopen(urlStr).readlines()
except Exception as e:
s = "Failed to download:\n{0}".format(e)
print(s)
return None
for line in lines:
fields = line.decode('utf-8').strip().split(',')
for i, field in enumerate(fields):
if field[-2:] == '%"':
data[header[i]].append(float(field.strip('"%')))
elif field[0] == '"':
data[header[i]].append(field.strip('"'))
else:
try:
data[header[i]].append(float(field))
except ValueError:
data[header[i]].append(np.nan)
idx = data.pop('Symbol')
return pd.DataFrame(data, index=idx)
|
d3dd970ef513a131147cc687cb9ad2076ee0b0ff
| 3,643,318
|
def HLRBRep_SurfaceTool_Torus(*args):
"""
:param S:
:type S: Standard_Address
:rtype: gp_Torus
"""
return _HLRBRep.HLRBRep_SurfaceTool_Torus(*args)
|
46aa63882557b1a2e13cb245f81fcf9871903a18
| 3,643,319
|
def load_augmentation_class():
"""
Loads the user augmentation class.
Similar in spirit to django.contrib.auth.load_backend
"""
try:
class_name = AUTH.USER_AUGMENTOR.get()
i = class_name.rfind('.')
module, attr = class_name[:i], class_name[i + 1:]
mod = import_module(module)
klass = getattr(mod, attr)
LOG.info("Augmenting users with class: %s" % (klass,))
return klass
except:
LOG.exception('failed to augment class')
raise ImproperlyConfigured("Could not find user_augmentation_class: %s" % (class_name,))
|
16f737a2687e0b2e5002982adcafef9c32c82e36
| 3,643,320
|
def FieldTypeFor(descriptor, field_desc, nullable):
"""Returns the Javascript type for a given field descriptor.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
field_desc: A field descriptor for a particular field in a message.
nullable: Whether or not the value may be null.
Returns:
The Javascript type for the given field descriptor.
"""
element_type = {
descriptor.FieldDescriptor.TYPE_DOUBLE: lambda: 'number',
descriptor.FieldDescriptor.TYPE_INT32: lambda: 'number',
descriptor.FieldDescriptor.TYPE_BOOL: lambda: 'boolean',
descriptor.FieldDescriptor.TYPE_STRING: lambda: 'string',
descriptor.FieldDescriptor.TYPE_ENUM: (
lambda: field_desc.enum_type.full_name),
descriptor.FieldDescriptor.TYPE_MESSAGE: (
lambda: field_desc.message_type.full_name),
}[field_desc.type]()
# However, if the field is actually a reference to a tagspec name (string),
# make it a number instead as we'll be replacing this with the tagspec id.
if field_desc.full_name in TAG_NAME_REFERENCE_FIELD:
element_type = 'number'
if field_desc.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if nullable:
return 'Array<!%s>' % element_type
return '!Array<!%s>' % element_type
if nullable:
return '?%s' % element_type
return '%s' % element_type
|
0e2c2d48dc22d209053d06fda354e4df9912144a
| 3,643,321
|
def unadmin(bot, input):
"""Removes person from admins list, owner only"""
if not input.owner: return False
bot.config.set_del('admins',input.group(2).lower())
bot.reply("Unadmin'd {0}".format(input.group(2)))
|
1a74ab0a3d3d1b41dd6d1f065b71a48557af84ed
| 3,643,322
|
def sim_beta_ratio(table, threshold, prior_strength, hyperparam, N,
return_bayes=False):
"""
Calculates simulated ratios of match probabilites using a beta
distribution and returns corresponding means and 95% credible
intervals, posterior parameters, Bayes factor
Parameters
------------
table : 2x2 numpy array
corresponds to contingency table,
for example,
False True
GroupA 5 4
GroupB 3 4
contains frequency counts: [[5, 4], [3, 4]]
threshold : float
value to split continuous variable on
prior_strength : string from {'weak', 'strong', 'uniform'}
prior distribution to be 'informative'/'noninformative'/'uniform'
N : int
number of posterior samples to draw for each simulation
Returns
------------
list : means and 95% credible intervals, posterior parameters, Bayes factor
"""
n_sim = N
# store array of total counts in table by category
category_counts = table.sum(axis=1, dtype=float)
# store array of number of matches by categories
match_counts = table[:, 1]
# set hyperparameters according to threshold and sample size
if prior_strength == 'weak':
# weakly informative prior, has standard deviation
# of 0.1 at alpha / (alpha + beta) = 0.5
# coefficient 24 is empirically derived for best smoothing at small N
alpha1, beta1 = (1 - threshold) * 24., threshold * 24.
alpha2, beta2 = (1 - threshold) * 24., threshold * 24.
elif prior_strength == 'strong':
# observing 'idealized' dataset of size n
alpha1 = round((1 - threshold) * category_counts[0])
beta1 = round(threshold * category_counts[0])
alpha2 = round((1 - threshold) * category_counts[1])
beta2 = round(threshold * category_counts[1])
elif prior_strength == 'uniform':
# uniform prior
alpha1, beta1 = 1, 1
alpha2, beta2 = 1, 1
else:
# user specified, defaults to uniform
alpha1, beta1, alpha2, beta2 = hyperparam
# draw posterior sample of matching probabilities
post_alpha1 = alpha1 + match_counts[0]
post_beta1 = beta1 + category_counts[0] - match_counts[0]
post_alpha2 = alpha2 + match_counts[1]
post_beta2 = beta2 + category_counts[1] - match_counts[1]
p1 = np.random.beta(post_alpha1, post_beta1, n_sim)
p2 = np.random.beta(post_alpha2, post_beta2, n_sim)
# posterior draw of ratios
p1p2 = p1 / p2
p2p1 = p2 / p1
sim_beta_ratio_metrics = [np.mean(p1p2), np.mean(p2p1),
np.std(p1p2), np.std(p2p1),
np.percentile(p1p2, 2.5),
np.percentile(p2p1, 2.5),
np.percentile(p1p2, 97.5),
np.percentile(p2p1, 97.5),
(post_alpha1, post_beta1),
(post_alpha2, post_beta2)]
if return_bayes:
# Return bayes factor for % of posterior ratios in range [.8, 1.25]
post_prob_null = np.sum((p1p2 >= 0.8) & (p1p2 <= 1.25)) / float(n_sim)
bayes_factor = post_prob_null / (1 - post_prob_null)
sim_beta_ratio_metrics.append(bayes_factor)
return sim_beta_ratio_metrics
|
01e61719dbecb89e40bdf2688578d493c951591c
| 3,643,323
|
def dump_yaml_and_check_difference(obj, filename, sort_keys=False):
"""Dump object to a yaml file, and check if the file content is different
from the original.
Args:
obj (any): The python object to be dumped.
filename (str): YAML filename to dump the object to.
sort_keys (str); Sort key by dictionary order.
Returns:
Bool: If the target YAML file is different from the original.
"""
str_dump = mmcv.dump(obj, None, file_format='yaml', sort_keys=sort_keys)
if osp.isfile(filename):
file_exists = True
with open(filename, 'r', encoding='utf-8') as f:
str_orig = f.read()
else:
file_exists = False
str_orig = None
if file_exists and str_orig == str_dump:
is_different = False
else:
is_different = True
with open(filename, 'w', encoding='utf-8') as f:
f.write(str_dump)
return is_different
|
47a271a34b0a1774188a725eddf0d6698f76e04c
| 3,643,324
|
from re import T
from typing import Optional
def get_data(
db: Redis[bytes],
store: StorageEngine,
source: Artefact[T],
carry_error: Optional[hash_t] = None,
do_resolve_link: bool = True,
) -> Result[T]:
"""Retrieve data corresponding to an artefact."""
stream = get_stream(db, store, source.hash, carry_error, do_resolve_link)
if isinstance(stream, Error):
return stream
else:
raw = stream.read()
stream.close()
return _serdes.decode(source.kind, raw, carry_error=carry_error)
|
1bb07e01ae151f985fcd30e8cca0da1b11213459
| 3,643,325
|
def record_edit(request, pk):
"""拜访记录修改"""
user = request.session.get('user_id')
record = get_object_or_404(Record, pk=pk, user=user, is_valid=True)
if request.method == 'POST':
form = RecordForm(data=request.POST, instance=record)
if form.is_valid():
form.save()
return redirect('record')
else:
print(form.errors.as_json)
else:
form = RecordForm(instance=record)
return render(request, 'record_edit.html', {
'form': form,
'pk': pk
})
|
d2d610e53641962e913849b4b643f38898b72a3f
| 3,643,326
|
def remove_body_footer(raw):
"""
Remove a specific body footer starting with the delimiter : -=-=-=-=-=-=-=-=-=-=-=-
"""
body = raw[MELUSINE_COLS[0]]
return body.replace(r'-=-=-=-=.*?$', '')
|
60161b06fe80fd526f66c796657bd9a77cc1bfb9
| 3,643,327
|
def get_strategy_name():
"""Return strategy module name."""
return 'store_type'
|
bbf1ed9f43f492561ee5c595061f74bea0f5e464
| 3,643,328
|
def pyccel_to_sympy(expr, symbol_map, used_names):
"""
Convert a pyccel expression to a sympy expression saving any pyccel objects
converted to sympy symbols in a dictionary to allow the reverse conversion
to be carried out later
Parameters
----------
expr : PyccelAstNode
The pyccel node to be translated
symbol_map : dict
Dictionary containing any pyccel objects converted to sympy symbols
used_names : Set
A set of all the names which already exist and therefore cannot
be used to create new symbols
Returns
----------
expr : sympy Object
"""
#Constants
if isinstance(expr, LiteralInteger):
return sp.Integer(expr.p)
elif isinstance(expr, LiteralFloat):
return sp.Float(expr)
#Operators
elif isinstance(expr, PyccelDiv):
args = [pyccel_to_sympy(e, symbol_map, used_names) for e in expr.args]
return args[0] / args[1]
elif isinstance(expr, PyccelMul):
args = [pyccel_to_sympy(e, symbol_map, used_names) for e in expr.args]
return sp.Mul(*args)
elif isinstance(expr, PyccelMinus):
args = [pyccel_to_sympy(e, symbol_map, used_names) for e in expr.args]
return args[0] - args[1]
elif isinstance(expr, PyccelUnarySub):
arg = pyccel_to_sympy(expr.args[0], symbol_map, used_names)
return -arg
elif isinstance(expr, PyccelAdd):
args = [pyccel_to_sympy(e, symbol_map, used_names) for e in expr.args]
return sp.Add(*args)
elif isinstance(expr, PyccelPow):
args = [pyccel_to_sympy(e, symbol_map, used_names) for e in expr.args]
return sp.Pow(*args)
elif isinstance(expr, PyccelAssociativeParenthesis):
return pyccel_to_sympy(expr.args[0], symbol_map, used_names)
elif isinstance(expr, MathCeil):
return sp.ceiling(pyccel_to_sympy(expr.args[0], symbol_map, used_names))
elif expr in symbol_map.values():
return list(symbol_map.keys())[list(symbol_map.values()).index(expr)]
elif isinstance(expr, Variable):
sym = sp.Symbol(expr.name)
symbol_map[sym] = expr
return sym
elif isinstance(expr, PyccelArraySize):
sym_name,_ = create_incremented_string(used_names, prefix = 'tmp_size')
sym = sp.Symbol(sym_name)
symbol_map[sym] = expr
return sym
elif isinstance(expr, CodeBlock):
body = (pyccel_to_sympy(b, symbol_map, used_names) for b in expr.body)
return CodeBlock(body)
elif isinstance(expr, (Comment)):
return Comment('')
elif isinstance(expr, For):
target = pyccel_to_sympy(expr.target, symbol_map, used_names)
iter_obj = pyccel_to_sympy(expr.iterable, symbol_map, used_names)
body = pyccel_to_sympy(expr.body, symbol_map, used_names)
return For(target, iter_obj, body)
elif isinstance(expr, PythonRange):
start = pyccel_to_sympy(expr.start, symbol_map, used_names)
stop = pyccel_to_sympy(expr.stop , symbol_map, used_names)
step = pyccel_to_sympy(expr.step , symbol_map, used_names)
return sp.Range(start, stop, step)
elif isinstance(expr, Assign):
lhs = pyccel_to_sympy(expr.lhs, symbol_map, used_names)
rhs = pyccel_to_sympy(expr.rhs, symbol_map, used_names)
return Assign(lhs, rhs)
elif isinstance(expr, (sp.core.basic.Atom, sp.core.operations.AssocOp, sp.Set)):
# Already translated
return expr
else:
raise TypeError(str(type(expr)))
|
1800a41d1d06fbbfd212b3b7b48ddc9f4ae07508
| 3,643,329
|
from pathlib import Path
def get_lockfile_path(repo_name: str) -> Path:
"""Get a lockfile to lock a git repo."""
if not _lockfile_path.is_dir():
_lockfile_path.mkdir()
return _lockfile_path / f"{repo_name}_lock_file.lock"
|
5f043b6976921d487054d5c9171c91eb6def19ee
| 3,643,330
|
def path_to_graph(hypernym_list, initialnoun):
"""Make a hypernym chain into a graph.
:param hypernym_list: list of hypernyms for a word as obtained from wordnet
:type hypernym_list: [str]
:param initialnoun: the initial noun (we need this to mark it as leaf in the tree)
:type initialnoun: str
:return: the linear directed graph of the chain
:rtype: :class:`networkx.DiGraph`
"""
graph = nx.DiGraph()
# mark the original word as 'seed' so we can track 'importance' later
graph.add_node(initialnoun, seed=True)
previous = initialnoun
for hypernym in reversed(hypernym_list):
# we'll take care of the distances later
graph.add_edge(previous, hypernym.name(), similarity=1.0, distance=1.0)
graph.nodes[hypernym.name()]["seed"] = False
previous = hypernym.name()
return graph
|
e80f90490e6376403d511f37a4703a7b867d2738
| 3,643,331
|
def make_3d_grid():
"""Generate a 3d grid of evenly spaced points"""
return np.mgrid[0:21, 0:21, 0:5]
|
0eccd9b2320ed28f0d08d40c9d59c22e77b607f4
| 3,643,332
|
def rho(flag, F, K, t, r, sigma):
"""Returns the Black rho of an option.
:param flag: 'c' or 'p' for call or put.
:type flag: str
:param F: underlying futures price
:type F: float
:param K: strike price
:type K: float
:param t: time to expiration in years
:type t: float
:param r: annual risk-free interest rate
:type r: float
:param sigma: volatility
:type sigma: float
:returns: float
::
==========================================================
The text book analytical formula does not multiply by .01,
but in practice rho is defined as the change in price
for each 1 percent change in r, hence we multiply by 0.01.
==========================================================
>>> F = 49
>>> K = 50
>>> r = .05
>>> t = 0.3846
>>> sigma = 0.2
>>> flag = 'c'
>>> v1 = rho(flag, F, K, t, r, sigma)
>>> v2 = -0.0074705380059582258
>>> abs(v1-v2) < .000001
True
>>> flag = 'p'
>>> v1 = rho(flag, F, K, t, r, sigma)
>>> v2 = -0.011243286001308292
>>> abs(v1-v2) < .000001
True
"""
return -t * black(flag, F, K, t, r, sigma) * .01
|
62bd0fdfe76319261c89bfa33b02b57fcdafb8df
| 3,643,333
|
async def novel_series(id: int, endpoint: PixivEndpoints = Depends(request_client)):
"""
## Name: `novel_series`
> 获取小说系列的信息
---
### Required:
- ***int*** **`id`**
- Description: 小说系列ID
"""
return await endpoint.novel_series(id=id)
|
94859a313c823d3fdcf055390473b116ea1229e0
| 3,643,334
|
def to_raw(
y: np.ndarray,
low: np.ndarray,
high: np.ndarray,
eps: float = 1e-4
) -> np.ndarray:
"""Scale the input y in [-1, 1] to [low, high]"""
# Warn the user if the arguments are out of bounds, this shouldn't happend.""""
if not (np.all(y >= -np.ones_like(y) - eps) and np.all(y <= np.ones_like(y) + eps)):
logger.warning(f"argument out of bounds, {y}, {low}, {high}")
# Clip the values (in case the above warning is ignored).
y = np.clip(y, -np.ones_like(y), np.ones_like(y))
# Transform the input to [low, high].
return (y * (high - low) + (high + low)) / 2.
|
61e916f9f46582fc6b9c135ac53fff3a3939d710
| 3,643,335
|
def etched_lines(image):
"""
Filters the given image to a representation that is similar to a drawing being preprocessed with an Adaptive Gaussian
Threshold
"""
block_size = 61
c = 41
blur = 7
max_value = 255
# image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
img_blur = cv2.GaussianBlur(image, (21, 21), 0, 0)
img_blend = cv2.divide(image, img_blur, scale=256)
blurred = cv2.medianBlur(img_blend, blur)
threshold_image = cv2.adaptiveThreshold(blurred, max_value, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY, block_size, c)
return threshold_image
|
33858c8ee50cd6977f81cc64f55967ecd8849369
| 3,643,336
|
def get_last_position(fit, warmup=False):
"""Parse last position from fit object
Parameters
----------
fit : StanFit4Model
warmup : bool
If True, returns the last warmup position, when warmup has been done.
Otherwise function returns the first sample position.
Returns
-------
list
list contains a dictionary of last draw from each chain.
"""
fit._verify_has_samples()
positions = []
extracted = fit.extract(permuted=False, pars=fit.model_pars, inc_warmup=warmup)
draw_location = -1
if warmup:
draw_location += max(1, fit.sim["warmup"])
chains = fit.sim["chains"]
for i in range(chains):
extract_pos = {key : values[draw_location, i] for key, values in extracted.items()}
positions.append(extract_pos)
return positions
|
28ec10c4f90ac786053334f593ffd3ade27b1fc5
| 3,643,337
|
def find_fast_route(objective, init, alpha=1, threshold=1e-3, max_iters=1e3):
"""
Optimizes FastRoute objective using Newton’s method optimizer to
find a fast route between the starting point and finish point.
Arguments:
objective : an initialized FastRoute object with preset start and finish points,
velocities and initialization vector.
init : (N-1,) numpy array, initial guess for the crossing points
alpha : step size for the NewtonOptimizer
threshold : stopping criteria |x_(k+1)- x_k |<threshold
max_iters : maximal number of iterations (stopping criteria)
Return:
route_time : scalar
route : (N-1,) numpy array with x coordinate of the optimal route,
i.e., a vector of x-coordinates of crossing points (not including start and finish point)
num_iters : number of iteration
"""
opt = NewtonOptimizer(func=objective, alpha=alpha, init=init)
route_time, route, num_iters = opt.optimize(threshold=threshold, max_iters=max_iters)
return route_time, route, num_iters
|
ab0d8364a7aab80a735b2b468a45abb5e30b396b
| 3,643,338
|
import os
import re
def get_version(filename="telepresence/__init__.py"):
"""Parse out version info"""
base_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(base_dir, filename)) as initfile:
for line in initfile.readlines():
match = re.match("__version__ *= *['\"](.*)['\"]", line)
if match:
return match.group(1)
|
829c2aad31bffc820a204110306cbbe92feb017b
| 3,643,339
|
import os
def find_child_files(path, searchRecursively=False, wildCardPattern="."):
"""在当前目录中查找文件,若选择searchRecursively则代表着搜索包含子目录, wildCardPattern意思是只搜索扩展名为".xxx"的文件,也可留空代表搜索全部文件. """
all_search_list = ['.','.*','*','']
tmp = list()
if not exists_as_dir(path):
return tmp
for fpath, _, fnames in os.walk(get_full_path_with_ext(path)):
if fpath is not get_full_path_with_ext(path) and not searchRecursively:
break
for filename in fnames:
if wildCardPattern in all_search_list:
pass
else:
if wildCardPattern[0] != '.':
wildCardPattern = '.' + wildCardPattern
if not filename.endswith(wildCardPattern) and wildCardPattern is not '.':
continue
tmp.append( os.path.join(fpath,filename) )
return tmp
|
b6fda66cf95978f778844d7511317658b73d8193
| 3,643,340
|
def check_tx_success(result):
"""
Checks if function :meth:`UcanServer.write_can_msg_ex` successfully wrote all CAN message(s).
:param ReturnCode result: Error code of the function.
:return: True if CAN message(s) was(were) written successfully, otherwise False.
:rtype: bool
"""
return result.value == ReturnCode.SUCCESSFUL
|
815293aafa42b7323414e1cb96d6d150ef16bb48
| 3,643,341
|
from typing import Iterable
from typing import Optional
def cache_contains_keys(connection: 'Connection', cache_info: CacheInfo, keys: Iterable,
query_id: Optional[int] = None) -> 'APIResult':
"""
Returns a value indicating whether all given keys are present in cache.
:param connection: connection to Ignite server,
:param cache_info: cache meta info,
:param keys: a list of keys or (key, type hint) tuples,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a bool value
retrieved on success: `True` when all keys are present, `False` otherwise,
non-zero status and an error description on failure.
"""
return __cache_contains_keys(connection, cache_info, keys, query_id)
|
48fffa703d7cd120d0faa898e7e94355ec663a84
| 3,643,342
|
def discount_cumsum_trun(x, discount, length):
"""
compute discounted cumulative sums of vectors.
truncate x in length array
:param x:
vector x,
[x0,
x1,
x2,
x3,
x4]
:param length:
vector length,
[3,
2]
:return:
truncated by the vector length
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2,
x3 + discount * x4,
x4]
"""
ret_arr = x.copy()
total_len = 0
for len in length:
tmp_list = ret_arr[total_len : total_len + len]
ret_arr[total_len: total_len + len] = discount_cumsum(tmp_list, discount)
total_len += len
return ret_arr
|
589ac22b19705a7881f91cffe78bed5accafc661
| 3,643,343
|
def get_canonical(flop):
"""
Returns the canonical version of the given flop.
Canonical flops are sorted. The first suit is 'c' and, if applicable,
the second is 'd' and the third is 'h'.
Args:
flop (tuple): three pokertools.Card objects
Returns
A tuple of three pokertools.Card objects which represent
the canonical version of the given flop.
>>> flop = (CARDS['Ks'], CARDS['2c'], CARDS['3s'])
>>> get_canonical(flop)
(<Card: 2c>, <Card: 3d>, <Card: Kd>)
"""
card1, card2, card3 = sorted(flop)
A, B, C = "cdh"
if card1.suit == card2.suit == card3.suit:
return (
CARDS[card1.rank + A],
CARDS[card2.rank + A],
CARDS[card3.rank + A],
)
elif card1.suit == card2.suit != card3.suit:
return (
CARDS[card1.rank + A],
CARDS[card2.rank + A],
CARDS[card3.rank + B],
)
elif card1.suit == card3.suit != card2.suit:
# Special case: if the 2nd and 3rd cards are a pair e.g. the flop is
# [Jc, Qd, Qc], then our suit changes have resulted in an
# unsorted flop! The correct canonical form is [Jc, Qc, Qd].
return tuple(sorted([
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + A],
]))
elif card1.suit != card2.suit == card3.suit:
# Special case: if the 1st and 2nd cards are a pair e.g. flop is
# [2c, 2d, 8d], that is isomorphic with those cards being switched
# e.g. [2d, 2c, 8d] -- which forms the suit pattern already
# covered above: 'ABA'. Thus, it can be transformed to [2c, 2d, 8c].
# This version has higher priority lexicographically -- it has more
# clubs! To make this change we can simply change the suit of the
# third card to 'c'.
if card1.rank == card2.rank:
return (
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + A],
)
return (
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + B],
)
elif card1.suit != card2.suit != card3.suit:
return (
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + C],
)
|
4a797c27e8c32dff18412128d2823a1592c2468e
| 3,643,344
|
import os
def normalize_path(path):
"""Normalize and return absolute path.
Expand user symbols like ~ and resolve relative paths.
"""
return os.path.abspath(os.path.expanduser(os.path.normpath(path)))
|
108a820df621cca2238feadf8a45eef59e9aa883
| 3,643,345
|
import importlib
def _version(lib_name):
"""
Returns the version of a package.
If version cannot be determined returns "available"
"""
lib = importlib.import_module(lib_name)
if hasattr(lib, "__version__"):
return lib.__version__
else:
return "available"
|
cec49d2de66d2fc3a7ed3c89259711bdf40bbe8e
| 3,643,346
|
def DeltaDeltaP(y, treatment, left_mask):
"""Absolute difference between ATEs of two groups."""
return np.abs(
ATE(y[left_mask], treatment[left_mask])
- ATE(y[~left_mask], treatment[~left_mask])
)
|
cd7816d2aa02cfb72dccf364cc73e07d596cc6ec
| 3,643,347
|
def start(isdsAppliance, serverID='directoryserver', check_mode=False, force=False):
"""
Restart the specified appliance server
"""
if force is True or _check(isdsAppliance, serverID, action='start') is True:
if check_mode is True:
return isdsAppliance.create_return_object(changed=True)
else:
return isdsAppliance.invoke_post("Restarting the service " + serverID,
"/widgets/server/start/" + serverID,
{})
return isdsAppliance.create_return_object()
|
b59941eafff24d9389f91edaa38de7b35eb48660
| 3,643,348
|
import sys
from aiida.common.exceptions import NotExistent
from aiida.orm import Code
from aiida.orm.querybuilder import QueryBuilder
def test_and_get_codenode(codenode, expected_code_type, use_exceptions=False):
"""
Pass a code node and an expected code (plugin) type. Check that the
code exists, is unique, and return the Code object.
:param codenode: the name of the code to load (in the form label@machine)
:param expected_code_type: a string with the plugin that is expected to
be loaded. In case no plugins exist with the given name, show all existing
plugins of that type
:param use_exceptions: if True, raise a ValueError exception instead of
calling sys.exit(1)
:return: a Code object
"""
try:
if codenode is None:
raise ValueError
code = codenode
if code.get_input_plugin_name() != expected_code_type:
raise ValueError
except (NotExistent, ValueError):
qb = QueryBuilder()
qb.append(Code,
filters={'attributes.input_plugin':
{'==': expected_code_type}},
project='*')
valid_code_labels = ["{}@{}".format(c.label, c.get_computer().name)
for [c] in qb.all()]
if valid_code_labels:
msg = ("Pass as further parameter a valid code label.\n"
"Valid labels with a {} executable are:\n".format(
expected_code_type))
msg += "\n".join("* {}".format(l) for l in valid_code_labels)
if use_exceptions:
raise ValueError(msg)
else:
print(msg)#, file=sys.stderr)
sys.exit(1)
else:
msg = ("Code not valid, and no valid codes for {}.\n"
"Configure at least one first using\n"
" verdi code setup".format(
expected_code_type))
if use_exceptions:
raise ValueError(msg)
else:
print(msg)#, file=sys.stderr)
sys.exit(1)
return code
|
edebd5acb110058b147604c10ddb6bcdb035c714
| 3,643,349
|
from typing import Dict
from typing import Tuple
def reverse_crop(
im_arr: np.array, crop_details: dict
) -> Dict[str, Tuple[Image.Image, int]]:
"""Return the recovered image and the number of annotated pixels per
lat_view. If the lat_view annotation has no annotations, nothing is added
for that image."""
width = 360
height = 720 # TODO: add this variable to crop details and read it from there
recovered_images = {}
for lat_view, details in crop_details.items():
lat_view_arr = slice_arr_by_lat_view(im_arr, lat_view, width)
lat_view_arr[lat_view_arr < 0] = 255
annotated_pixels = np.count_nonzero(lat_view_arr)
if annotated_pixels == 0:
continue # no annotations on this lat_view -> skip
# reverse flip
if details["flip"]:
lat_view_arr = np.flip(lat_view_arr, axis=0)
# reverse rot90
if details["rotation"] != 0:
lat_view_arr = np.rot90(lat_view_arr, k=-details["rotation"])
new_arr = np.zeros(shape=[height, width], dtype=np.uint8)
new_arr[details["crop_start"] : details["crop_end"], :] = lat_view_arr
new_im = Image.fromarray(new_arr)
recovered_images[lat_view] = (new_im, annotated_pixels)
return recovered_images
|
45e373ae8fd0100191796f29f536ab15481a64d4
| 3,643,350
|
def get_dates_keyboard(dates):
"""
Метод получения клавиатуры дат
"""
buttons = []
for date in dates:
button = InlineKeyboardButton(
text=date['entry_date'],
callback_data=date_callback.new(date_str=date['entry_date'], entry_date=date['entry_date'])
)
buttons.append(button)
keyboard = InlineKeyboardMarkup(inline_keyboard=[
buttons[:3],
buttons[3:],
])
return keyboard
|
41a87c64e603d6b19921c3a960743d3d27f2e373
| 3,643,351
|
import logging
import requests
def fetch_price(zone_key='FR', session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""Requests the last known power price of a given country
Arguments:
----------
zone_key: used in case a parser is able to fetch multiple countries
session: request session passed in order to re-use an existing session
target_datetime: the datetime for which we want production data. If not
provided, we should default it to now. If past data is not available,
raise a NotImplementedError. Beware that the provided target_datetime is
UTC. To convert to local timezone, you can use
`target_datetime = arrow.get(target_datetime).to('America/New_York')`.
Note that `arrow.get(None)` returns UTC now.
logger: an instance of a `logging.Logger` that will be passed by the
backend. Information logged will be publicly available so that correct
execution of the logger can be checked. All Exceptions will automatically
be logged, so when something's wrong, simply raise an Exception (with an
explicit text). Use `logger.warning` or `logger.info` for information
that can useful to check if the parser is working correctly. A default
logger is used so that logger output can be seen when coding / debugging.
Returns:
--------
If no data can be fetched, any falsy value (None, [], False) will be
ignored by the backend. If there is no data because the source may have
changed or is not available, raise an Exception.
A dictionary in the form:
{
'zoneKey': 'FR',
'currency': EUR,
'datetime': '2017-01-01T00:00:00Z',
'price': 0.0,
'source': 'mysource.com'
}
"""
if target_datetime:
raise NotImplementedError(
'This parser is not yet able to parse past dates')
r = session or requests.session()
assert r.status_code == 200
url = 'https://api.someservice.com/v1/price/latest'
response = r.get(url)
obj = response.json()
data = {
'zoneKey': zone_key,
'currency': 'EUR',
'price': obj['price'],
'source': 'someservice.com',
}
# Parse the datetime and return a python datetime object
data['datetime'] = arrow.get(obj['datetime']).datetime
return data
|
fa056b8186451cd8d3ab07357faa69fe7a55ab82
| 3,643,352
|
import logging
from pathlib import Path
import os
import requests
import base64
import json
def cmd_asydns(url, generate, revoke, verbose):
"""Requests a DNS domain name based on public and private
RSA keys using the AsyDNS protocol https://github.com/portantier/asydns
Example:
\b
$ habu.asydns -v
Generating RSA key ...
Loading RSA key ...
{
"ip": "181.31.41.231",
"name": "07286e90fd6e7e6be61d6a7919967c7cf3bbfb23a36edbc72b6d7c53.a.asydns.org"
}
\b
$ dig +short 07286e90fd6e7e6be61d6a7919967c7cf3bbfb23a36edbc72b6d7c53.a.asydns.org
181.31.41.231
"""
if verbose:
logging.basicConfig(level=logging.INFO, format='%(message)s')
#homedir = Path(pwd.getpwuid(os.getuid()).pw_dir)
homedir = Path(os.path.expanduser('~'))
dotdir = homedir / '.asydns'
dotdir.mkdir(exist_ok=True)
pub_file = dotdir / 'rsa.pub'
key_file = dotdir / 'rsa.key'
if generate or not key_file.is_file():
logging.info('Generating RSA key ...')
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
pub = key.public_key()
key_pem = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
pub_key = pub.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
with key_file.open('w') as k:
k.write(key_pem.decode())
with pub_file.open('w') as p:
p.write(pub_key.decode())
logging.info('Loading RSA key ...')
with key_file.open("rb") as key_file:
key = serialization.load_pem_private_key(
key_file.read(),
password=None,
backend=default_backend()
)
with pub_file.open("r") as pub_file:
pub_pem = pub_file.read()
r = requests.get(url + '/api')
if r.status_code != 200:
logging.error('Error')
logging.error(r.content.decode())
return False
j = r.json()
challenge = base64.b64decode(j['challenge'])
response = key.sign(
challenge,
padding.PKCS1v15(
),
hashes.SHA224()
)
response = base64.b64encode(response).decode()
if revoke:
r = requests.delete(url + '/api', json={'pub': pub_pem, 'challenge' : j['challenge'], 'response': response})
else:
r = requests.post(url + '/api', json={'pub': pub_pem, 'challenge' : j['challenge'], 'response': response})
if r.status_code != 200:
logging.error('Error')
logging.error(r.content.decode())
return False
print(json.dumps(r.json(), indent=4))
return True
|
b8513e29ea7e97478e6ca157e18e5cf816e9e85a
| 3,643,353
|
import os
import pickle
def get_stats(method='histogram', save=True, train=True):
"""
Computes statistics, histogram, dumps the object to file and returns it
"""
if os.path.exists(FILE_STATS):
return pickle.load(open(os.path.join(FILE_STATS), "rb"))
elif train:
dataset = _get_dataset()
print("Computing statistics")
stats = Stats()
stats.fill_stats(dataset)
return _train_model(stats, method, save)
else:
return None
|
71e3920ed177494ea03da00a4b3373ad30c8215a
| 3,643,354
|
def merge_synset(wn, synsets, reason, lexfile, ssid=None, change_list=None):
"""Create a new synset merging all the facts from other synsets"""
pos = synsets[0].part_of_speech.value
if not ssid:
ssid = new_id(wn, pos, synsets[0].definitions[0].text)
ss = Synset(ssid, "in",
PartOfSpeech(pos), lexfile)
ss.definitions = [d for s in synsets for d in s.definitions]
ss.examples = [x for s in synsets for x in s.examples]
members = {}
wn.add_synset(ss)
for s in synsets:
# Add all relations
for r in s.synset_relations:
if not any(r == r2 for r2 in ss.synset_relations):
add_relation(
wn, ss, wn.synset_by_id(
r.target), r.rel_type, change_list)
# Add members
for m in wn.members_by_id(s.id):
if m not in members:
members[m] = add_entry(wn, ss, m, change_list)
add_entry(wn, ss, m, change_list)
e = [e for e in [wn.entry_by_id(e2) for e2 in wn.entry_by_lemma(m)]
if e.lemma.part_of_speech.value == pos][0]
for f in e.forms:
if not any(f2 == f for f in members[m].forms):
members[m].add_form(f)
# syn behaviours - probably fix manually for the moment
if change_list:
change_list.change_synset(ss)
return ss
|
d1d7af2a83d6b7deb506fb69c7cbdb2770735f4f
| 3,643,355
|
def __correlate_uniform(im, size, output):
"""
Uses repeated scipy.ndimage.filters.correlate1d() calls to compute a uniform filter. Unlike
scipy.ndimage.filters.uniform_filter() this just uses ones(size) instead of ones(size)/size.
"""
# TODO: smarter handling of in-place convolutions?
ndi = get_ndimage_module(im)
weights = get_array_module(im).ones(size)
for axis in range(im.ndim):
ndi.correlate1d(im, weights, axis, output)
im = output
return output
|
3571d0cb6dbfbca45f4453077cb3ad789464f919
| 3,643,356
|
def clean_all(record):
""" A really messy function to make sure that the citeproc data
are indeed in the citeproc format. Basically a long list of if/...
conditions to catch all errors I have noticed.
"""
record = clean_fields(record)
for arrayed in ['ISSN']:
if arrayed in record:
record = clean_arrayed(record, arrayed)
return record
|
28ba59e808e88058c5745c444f1e58cd564c726d
| 3,643,357
|
def _create_model() -> Model:
"""Setup code: Load a program minimally"""
model = Model(initial_program, [], load=False)
engine = ApproximateEngine(model, 1, geometric_mean)
model.set_engine(engine)
return model
|
71fa7c000e6ed0cd8ad14bb0be3bb617337e7631
| 3,643,358
|
def candidate_elimination(trainingset):
"""Computes the version space containig all hypothesis
from H that are consistent with the examples in the training set"""
G = set()#set of maximally general h in H
S = set()#set of maximally specific h in H
G.add(("?","?","?","?","?","?"))
S.add(("0","0","0","0","0","0"))
for e in trainingset:
update_vs(G,S,e)
# print "-----------------"
# print "S:",S
# print "G:",G
return G,S
|
b368cea3b058cc667c41725b0fa6a6b4a51f418b
| 3,643,359
|
from pathlib import Path
def mkdir(path_str):
"""
Method to create a new directory or directories recursively.
"""
return Path(path_str).mkdir(parents=True, exist_ok=True)
|
1621fd5f4d74b739de0b17933c1804faabf44a2f
| 3,643,360
|
def get_image_with_projected_bbox3d(img, proj_bbox3d_pts=[], width=0, color=Color.White):
"""
Draw the outline of a 3D bbox on the image.
Input:
proj_bbox3d_pts: (8,2) array of projected vertices
"""
v = proj_bbox3d_pts
if proj_bbox3d_pts != []:
draw = ImageDraw.Draw(img)
for k in range(0,4):
#http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
i,j = k,(k+1)%4
draw.line([(v[i,0],v[i,1]), (v[j,0],v[j,1])], fill=color, width=width)
i,j = k+4,(k+1)%4 + 4
draw.line([(v[i,0],v[i,1]), (v[j,0],v[j,1])], fill=color, width=width)
i,j = k,k+4
draw.line([(v[i,0],v[i,1]), (v[j,0],v[j,1])], fill=color, width=width)
return img
|
2ec900c055635adbc6619f8e786e52bd820c6930
| 3,643,361
|
def process_spectrogram_params(fs, nfft, frequency_range, window_start, datawin_size):
""" Helper function to create frequency vector and window indices
Arguments:
fs (float): sampling frequency in Hz -- required
nfft (int): length of signal to calculate fft on -- required
frequency_range (list): 1x2 list - [<min frequency>, <max frequency>] -- required
window_start (1xm np.array): array of timestamps representing the beginning time for each
window -- required
datawin_size (float): seconds in one window -- required
Returns:
window_idxs (nxm np array): indices of timestamps for each window
(nxm where n=number of windows and m=datawin_size)
stimes (1xt np array): array of times for the center of the spectral bins
sfreqs (1xf np array): array of frequency bins for the spectrogram
freq_inds (1d np array): boolean array of which frequencies are being analyzed in
an array of frequencies from 0 to fs with steps of fs/nfft
"""
# create frequency vector
df = fs / nfft
sfreqs = np.arange(0, fs, df)
# Get frequencies for given frequency range
freq_inds = (sfreqs >= frequency_range[0]) & (sfreqs <= frequency_range[1])
sfreqs = sfreqs[freq_inds]
# Compute times in the middle of each spectrum
window_middle_samples = window_start + round(datawin_size / 2)
stimes = window_middle_samples / fs
# Get indexes for each window
window_idxs = np.atleast_2d(window_start).T + np.arange(0, datawin_size, 1)
window_idxs = window_idxs.astype(int)
return [window_idxs, stimes, sfreqs, freq_inds]
|
0e8563051a5ee4b48f7e635126ed4e6639e47bdd
| 3,643,362
|
from typing import Union
def Hellwig2022_to_XYZ(
specification: CAM_Specification_Hellwig2022,
XYZ_w: ArrayLike,
L_A: FloatingOrArrayLike,
Y_b: FloatingOrArrayLike,
surround: Union[
InductionFactors_CIECAM02, InductionFactors_Hellwig2022
] = VIEWING_CONDITIONS_HELLWIG2022["Average"],
discount_illuminant: Boolean = False,
) -> NDArray:
"""
Convert from *Hellwig and Fairchild (2022)* specification to *CIE XYZ*
tristimulus values.
Parameters
----------
specification : CAM_Specification_Hellwig2022
*Hellwig and Fairchild (2022)* colour appearance model specification.
Correlate of *Lightness* :math:`J`, correlate of *chroma* :math:`C` or
correlate of *colourfulness* :math:`M` and *hue* angle :math:`h` in
degrees must be specified, e.g. :math:`JCh` or :math:`JMh`.
XYZ_w
*CIE XYZ* tristimulus values of reference white.
L_A
Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken
to be 20% of the luminance of a white object in the scene).
Y_b
Luminous factor of background :math:`Y_b` such as
:math:`Y_b = 100 x L_b / L_w` where :math:`L_w` is the luminance of the
light source and :math:`L_b` is the luminance of the background. For
viewing images, :math:`Y_b` can be the average :math:`Y` value for the
pixels in the entire image, or frequently, a :math:`Y` value of 20,
approximate an :math:`L^*` of 50 is used.
surround
Surround viewing conditions.
discount_illuminant
Discount the illuminant.
Returns
-------
:class:`numpy.ndarray`
*CIE XYZ* tristimulus values.
Raises
------
ValueError
If neither *C* or *M* correlates have been defined in the
``CAM_Specification_Hellwig2022`` argument.
Notes
-----
+-------------------------------------+-----------------------+-----------\
----+
| **Domain** | **Scale - Reference** | **Scale - \
1** |
+=====================================+=======================+===========\
====+
| ``CAM_Specification_Hellwig2022.J`` | [0, 100] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``CAM_Specification_Hellwig2022.C`` | [0, 100] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``CAM_Specification_Hellwig2022.h`` | [0, 360] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``CAM_Specification_Hellwig2022.s`` | [0, 100] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``CAM_Specification_Hellwig2022.Q`` | [0, 100] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``CAM_Specification_Hellwig2022.M`` | [0, 100] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``CAM_Specification_Hellwig2022.H`` | [0, 360] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
| ``XYZ_w`` | [0, 100] | [0, 1] \
|
+-------------------------------------+-----------------------+-----------\
----+
+-----------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+===========+=======================+===============+
| ``XYZ`` | [0, 100] | [0, 1] |
+-----------+-----------------------+---------------+
References
----------
:cite:`Fairchild2022`, :cite:`Hellwig2022`
Examples
--------
>>> specification = CAM_Specification_Hellwig2022(J=41.731207905126638,
... C=0.025763615829912909,
... h=217.06795976739301)
>>> XYZ_w = np.array([95.05, 100.00, 108.88])
>>> L_A = 318.31
>>> Y_b = 20.0
>>> Hellwig2022_to_XYZ(specification, XYZ_w, L_A, Y_b)
... # doctest: +ELLIPSIS
array([ 19.01..., 20... , 21.78...])
"""
J, C, h, _s, _Q, M, _H, _HC = astuple(specification)
J = to_domain_100(J)
C = to_domain_100(C)
h = to_domain_degrees(h)
M = to_domain_100(M)
L_A = as_float_array(L_A)
XYZ_w = to_domain_100(XYZ_w)
_X_w, Y_w, _Z_w = tsplit(XYZ_w)
# Step 0
# Converting *CIE XYZ* tristimulus values to sharpened *RGB* values.
RGB_w = vector_dot(MATRIX_16, XYZ_w)
# Computing degree of adaptation :math:`D`.
D = (
np.clip(degree_of_adaptation(surround.F, L_A), 0, 1)
if not discount_illuminant
else ones(L_A.shape)
)
F_L, z = viewing_conditions_dependent_parameters(Y_b, Y_w, L_A)
D_RGB = (
D[..., np.newaxis] * Y_w[..., np.newaxis] / RGB_w
+ 1
- D[..., np.newaxis]
)
RGB_wc = D_RGB * RGB_w
# Applying forward post-adaptation non-linear response compression.
RGB_aw = post_adaptation_non_linear_response_compression_forward(
RGB_wc, F_L
)
# Computing achromatic responses for the whitepoint.
A_w = achromatic_response_forward(RGB_aw)
# Step 1
if has_only_nan(M) and not has_only_nan(C):
M = (C * A_w) / 35
elif has_only_nan(M):
raise ValueError(
'Either "C" or "M" correlate must be defined in '
'the "CAM_Specification_Hellwig2022" argument!'
)
# Step 2
# Computing eccentricity factor *e_t*.
e_t = eccentricity_factor(h)
# Computing achromatic response :math:`A` for the stimulus.
A = achromatic_response_inverse(A_w, J, surround.c, z)
# Computing *P_p_1* to *P_p_2*.
P_p_n = P_p(surround.N_c, e_t, A)
P_p_1, P_p_2 = tsplit(P_p_n)
# Step 3
# Computing opponent colour dimensions :math:`a` and :math:`b`.
ab = opponent_colour_dimensions_inverse(P_p_1, h, M)
a, b = tsplit(ab)
# Step 4
# Applying post-adaptation non-linear response compression matrix.
RGB_a = matrix_post_adaptation_non_linear_response_compression(P_p_2, a, b)
# Step 5
# Applying inverse post-adaptation non-linear response compression.
RGB_c = post_adaptation_non_linear_response_compression_inverse(
RGB_a + 0.1, F_L
)
# Step 6
RGB = RGB_c / D_RGB
# Step 7
XYZ = vector_dot(MATRIX_INVERSE_16, RGB)
return from_range_100(XYZ)
|
ef5f05f32f6871eaa67bb554a23595cedf2a97b1
| 3,643,363
|
def build_exec_file_name(graph: str,
strt: str,
nagts: int,
exec_id: int,
soc_name: str = None):
"""Builds the execution file name of id `exec_id` for the given patrolling
scenario `{graph, nagts, strt}` .
Args:
graph:
strt:
nagts:
exec_id:
soc_name:
"""
if soc_name is None or soc_name == '':
soc_name = misc.build_soc_name(strategy=strt, nagts=nagts)
return regularise_path("{}-{}-{}-{}-{}.json".format(strt,
graph,
soc_name,
str(nagts),
str(exec_id)))
|
143731bee19ad8e4b925f07d5449baff83994059
| 3,643,364
|
def set_ticks(ax, tick_locs, tick_labels=None, axis='y'):
"""Sets ticks at standard numerical locations"""
if tick_labels is None:
tick_labels = tick_locs
ax_transformer = AxTransformer()
ax_transformer.fit(ax, axis=axis)
getattr(ax, f'set_{axis}ticks')(ax_transformer.transform(tick_locs))
getattr(ax, f'set_{axis}ticklabels')(tick_labels)
ax.tick_params(axis=axis, which='both', bottom=True, top=False, labelbottom=True)
return ax
|
690179bcb2d2ca4f3b1e5b8cb03f68627168b73a
| 3,643,365
|
from typing import List
import re
def extract_discovery(value:str) -> List[dict]:
"""处理show discovery/show onu discovered得到的信息
Args:
value (str): show discovery/show onu discovered命令返回的字符串
Returns:
List[dict]: 包含字典的列表
"""
# ====================================================================================
# ----- ONU Unauth Table, SLOT = 4, PON = 8, ITEM = 1 -----
# No OnuType PhyId PhyPwd LogicId LogicPwd Why
# --- -------------- ------------ ---------- ------------------------ ------------ ---
# 1 HG6243C FHTT91fbc5e8 fiberhome fiberhome fiberhome 1
# Command executes success.
# ====================================================================================
# ----- ONU Unauth Table, SLOT = 4, PON = 8, ITEM = 1 -----
# No OnuType PhyId PhyPwd LogicId LogicPwd Why
# --- -------------- ------------ ---------- ------------------------ ------------ ---
# 1 HG6243C FHTT91fbc5e8 fiberhome fiberhome fiberhome 1
#
# ====================================================================================
# ----- ONU Unauth Table, SLOT = 4, PON = 8, ITEM = 6 -----
# No OnuType PhyId PhyPwd LogicId LogicPwd Why
# --- -------------- ------------ ---------- ------------------------ ------------ ---
# 1 5506-04-F1 FHTT033178b0 fiberhome fiberhome fiberhome 1
# 2 HG6243C FHTT92f445c8 fiberhome fiberhome fiberhome 1
# 3 5506-10-A1 FHTT00010104 fiberhome fiberhome fiberhome 1
# 4 5506-10-A1 FHTT000aae64 fiberhome fiberhome 1
# 5 HG6243C FHTT91fbc5e8 fiberhome fiberhome fiberhome 1
# 6 5506-02-F FHTT0274ab18 wangran3 12345678 1
# ====================================================================================
slotPortExp = re.compile('SLOT = (\d+), PON = (\d+)')
titleExp = re.compile('(No)\s+(OnuType)\s+(PhyId)\s+(PhyPwd)\s+(LogicId)\s+(LogicPwd)\s+(Why)\s*')
valueExp = re.compile('([\d\s]{3,3})\s([\w\s-]{14,14})\s([\w\s]{12,12})\s([\w\s]{10,10})\s([\w\s]{24,24})\s([\w\s]{12,12})\s([\d\s]{1,3})')
lines = value.splitlines()
ret = [ ]
titles = None
slot, port = None, None
for line in lines:
match = slotPortExp.search(line)
if match:
slot, port = match.groups()
if titles == None:
match = titleExp.match(line)
if match:
titles = match.groups()
continue
else:
match = valueExp.match(line)
if match:
values = match.groups()
ret.append({ })
for k, v in zip(titles, values):
ret[-1][value(k)] = value(v)
ret[-1]['SLOT'] = value(slot)
ret[-1]['PON'] = value(port)
continue
return ret
|
6107d194d10e6b7c1c6e33f7151214152e5bff7d
| 3,643,366
|
def dict_to_networkx(data):
"""
Convert data into networkx graph
Args:
data: data in dictionary type
Returns: networkx graph
"""
data_checker(data)
G = nx.Graph(data)
return G
|
0a3c670d3bad87bb18212dc6d2e47ac5a1ccc413
| 3,643,367
|
import urllib
def to_url_slug(string):
"""Transforms string into URL-safe slug."""
slug = urllib.parse.quote_plus(string)
return slug
|
0976e3d1568f793fa946be9fa67b40cc82e6f4f5
| 3,643,368
|
def is_wrapping(wrapper):
"""Determines if the given callable is a wrapper for another callable"""
return hasattr(wrapper, __WRAPPED)
|
16dcff38253424f6b93cee2a887aa7d91afd4f44
| 3,643,369
|
from conekt.models.relationships.sequence_go import SequenceGOAssociation
from typing import Sequence
def sequence_view(sequence_id):
"""
Get a sequence based on the ID and show the details for this sequence
:param sequence_id: ID of the sequence
"""
current_sequence = Sequence.query.get_or_404(sequence_id)
go_associations = current_sequence.go_associations.group_by(SequenceGOAssociation.go_id,
SequenceGOAssociation.evidence,
SequenceGOAssociation.source).all()
# to avoid running long count queries, fetch relations here and pass to template
return render_template('sequence.html',
sequence=current_sequence,
go_associations=go_associations,
interpro_associations=current_sequence.interpro_associations.all(),
families=current_sequence.families.all(),
expression_profiles=current_sequence.expression_profiles.all(),
network_nodes=current_sequence.network_nodes.all(),
coexpression_clusters=current_sequence.coexpression_clusters.all(),
ecc_query_associations=current_sequence.ecc_query_associations.all()
)
|
c9493376b8df2b9dc7585d8b380e54ce4d20f473
| 3,643,370
|
def horner(n,c,x0):
"""
Parameters
----------
n : integer
degree of the polynomial.
c : float
coefficients of the polynomial.
x0 : float
where we are evaluating the polynomial.
Returns
-------
y : float
the value of the function evaluated at x0.
z : float
the value of the derivative evaluated at x0.
"""
y=c[n]
z=c[n]
for i in range(n-1,0,-1):
y= x0*y+c[i]
z=x0*z+y
y=x0*y+c[0] #this computes the b0
return y,z
|
adf3f3772d12d5bed0158045ad480cee8454cb5c
| 3,643,371
|
def linlin(x, smi, sma, dmi, dma):
"""TODO
Arguments:
x {float} -- [description]
smi {float} -- [description]
sma {float} -- [description]
dmi {float} -- [description]
dma {float} -- [description]
Returns:
float -- [description]
"""
return (x-smi)/(sma-smi)*(dma-dmi) + dmi
|
10f375e961fa79b3b8bee5eb46f9f3af6663d893
| 3,643,372
|
import gzip
def _compression_safe_opener(fname):
"""Determine whether to use *open* or *gzip.open* to read
the input file, depending on whether or not the file is compressed.
"""
f = gzip.open(fname, "r")
try:
f.read(1)
opener = gzip.open
except IOError:
opener = open
finally:
f.close()
return opener
|
4c44da2ae15c63ccd6467e6e893a3c590c20a7e9
| 3,643,373
|
import sys
def gen_headers(value_type, value, header_type="PacketFilter", direction=None, notFilter=False):
"""
helper function constructs json header format
value: a STRING corresponding to value_type
direction: "src" or "dst"
Parameters
----------
value_type : string
a string of header formats. Most commonly used are:
ipv4_src |ipv4_dst ipv6_src | ipv6_dst mac_src | mac_dst tp_src | tp_dst| eth_type| vlan_vid| ip_proto
value : string
the value of the corresponding value_type.
header_type : string, optional
DESCRIPTION. The default is "PacketFilter". "PacketAliasFilter" needs corresponding alias set
direction : string, optional
DESCRIPTION. Either "src" or "dst"
notFilter : boolean, optional
DESCRIPTION. The default is False. If set to True negates the header value_type and value.
Returns
-------
dict
constructed header dict usable for fwdApi.
"""
header={}
header['type'] = header_type
if header_type == "PacketFilter":
header['values'] = {str(value_type): [str(value)]}
elif header_type == "PacketAliasFilter":
header['value'] = value
else:
sys.exit("header_type is either 'PacketFilter' or 'PacketAliasFilter'")
if direction:
header['direction'] = direction
if notFilter == True:
notHeader ={}
notHeader['type'] = "NotFilter"
notHeader['clause'] = header
return notHeader
return header
|
292d34bc44d51685633d7772439fc90d5e92edaf
| 3,643,374
|
from typing import List
import json
def read_payload(payload: str) -> OneOf[Issue, List[FileReport]]:
"""Transform an eslint payload to a list of `FileReport` instances.
Args:
payload: The raw payload from eslint.
Returns:
A `OneOf` containing an `Issue` or a list of `FileReport` instances.
"""
return one_of(lambda: [
[
FileReport(
file_path=error['filePath'],
violations=[
Violation(
msg['ruleId'],
msg['message'],
msg['line'],
msg['column'],
error['filePath'],
)
for msg in error['messages']
],
)
for error in json_payload
]
for json_payload in json.parse_json(payload)
])
|
809e4db54cb8d4c737d9eea7f77f1a1846f24589
| 3,643,375
|
def _ssim_per_channel(img1, img2, img3, max_val=1.0, mode='test',compensation=1):
"""Computes SSIM index between img1 and img2 per color channel.
This function matches the standard SSIM implementation from:
Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image
quality assessment: from error visibility to structural similarity. IEEE
transactions on image processing.
Details:
- 11x11 Gaussian filter of width 1.5 is used.
- k1 = 0.01, k2 = 0.03 as in the original paper.
Args:
img1: First image batch.
img2: Second image batch.
max_val: The dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
Returns:
A pair of tensors containing and channel-wise SSIM and contrast-structure
values. The shape is [..., channels].
"""
filter_size = constant_op.constant(11, dtype=dtypes.int32)
filter_sigma = constant_op.constant(1.5, dtype=img1.dtype)
shape1, shape2 = array_ops.shape_n([img1, img2])
shape1, shape2, shape3 = array_ops.shape_n([img1, img2, img3])
checks = [
control_flow_ops.Assert(math_ops.reduce_all(math_ops.greater_equal(
shape1[-3:-1], filter_size)), [shape1, filter_size], summarize=8),
control_flow_ops.Assert(math_ops.reduce_all(math_ops.greater_equal(
shape2[-3:-1], filter_size)), [shape2, filter_size], summarize=8),
control_flow_ops.Assert(math_ops.reduce_all(math_ops.greater_equal(
shape2[-3:-1], filter_size)), [shape3, filter_size], summarize=8)]
# Enforce the check to run before computation.
with ops.control_dependencies(checks):
img1 = array_ops.identity(img1)
# TODO(sjhwang): Try to cache kernels and compensation factor.
kernel = _fspecial_gauss(filter_size, filter_sigma)
kernel = array_ops.tile(kernel, multiples=[1, 1, shape1[-1], 1])
# The correct compensation factor is `1.0 - tf.reduce_sum(tf.square(kernel))`,
# but to match MATLAB implementation of MS-SSIM, we use 1.0 instead.
#compensation = 1.0
# TODO(sjhwang): Try FFT.
# TODO(sjhwang): Gaussian kernel is separable in space. Consider applying
# 1-by-n and n-by-1 Gaussain filters instead of an n-by-n filter.
def reducer(x):
shape = array_ops.shape(x)
x = array_ops.reshape(x, shape=array_ops.concat([[-1], shape[-3:]], 0))
y = nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
return array_ops.reshape(y, array_ops.concat([shape[:-3],
array_ops.shape(y)[1:]], 0))
#luminance, cs = _ssim_helper(img1, img2, reducer, max_val, compensation)
if mode == 'debug':
luminance_gt, cs_gt = _ssim_helper(img1, img2, reducer, max_val, compensation)
luminance, cs = _my_ssim_helper(img1, img1-img2, img1-img3, reducer, max_val, compensation, mode='train')
else:
luminance, cs = _my_ssim_helper(img1, img2, img3, reducer, max_val, compensation, mode)
if mode == 'debug':
axes = constant_op.constant([-3, -2], dtype=dtypes.int32)
ssim_val_gt = math_ops.reduce_mean(luminance_gt * cs_gt, axes)
lm_gt = math_ops.reduce_mean(luminance_gt, axes)
cs_gt = math_ops.reduce_mean(cs_gt, axes)
lm = math_ops.reduce_mean(luminance, axes)
cs = math_ops.reduce_mean(cs, axes)
return lm_gt, cs_gt, lm, cs, ssim_val_gt
else:
# Average over the second and the third from the last: height, width.
axes = constant_op.constant([-3, -2], dtype=dtypes.int32)
#ssim_val = math_ops.reduce_mean(luminance * cs, axes)
ssim_val = math_ops.reduce_mean(luminance + cs, axes)
print('ssim_shape',ssim_val.shape)
cs = math_ops.reduce_mean(cs, axes)
return ssim_val, cs
|
04982dbac0d4d50e0ccaa20733b90ef56cd4084d
| 3,643,376
|
from typing import Iterable
from typing import Any
from typing import Iterator
import itertools
def prepend(
iterable: Iterable[Any],
value: Any,
*,
times: int = 1,
) -> Iterator[Any]:
"""Return an iterator with a specified value prepended.
Arguments:
iterable: the iterable to which the value is to be prepended
value: the value to prepend to the iterable
Keyword Arguments:
times: number of times to prepend the value
(optional; default is 1)
Returns:
iterator prepending the specified value(s) to the items of the iterable
Examples:
>>> list(prepend(range(5), -1))
[-1, 0, 1, 2, 3, 4]
>>> list(prepend(['off to work we go'], 'hi ho', times=2))
['hi ho', 'hi ho', 'off to work we go']
"""
return itertools.chain([value] * times, iterable)
|
659bc3616238f5e40865505c006c1369f20e33d3
| 3,643,377
|
from skimage.transform import warp
def apply_transform(transform, source, target,
fill_value=None, propagate_mask=False):
"""Applies the transformation ``transform`` to ``source``.
The output image will have the same shape as ``target``.
Args:
transform: A scikit-image ``SimilarityTransform`` object.
source (numpy array): A 2D numpy array of the source image to be
transformed.
target (numpy array): A 2D numpy array of the target image. Only used
to set the output image shape.
fill_value (float): A value to fill in the areas of aligned_image
where footprint == True.
propagate_mask (bool): Wether to propagate the mask in source.mask
onto footprint.
Return:
A tuple (aligned_image, footprint).
aligned_image is a numpy 2D array of the transformed source
footprint is a mask 2D array with True on the regions
with no pixel information.
"""
if hasattr(source, 'data') and isinstance(source.data, _np.ndarray):
source_data = source.data
else:
source_data = source
if hasattr(target, 'data') and isinstance(target.data, _np.ndarray):
target_data = target.data
else:
target_data = target
aligned_image = warp(source_data, inverse_map=transform.inverse,
output_shape=target_data.shape, order=3, mode='constant',
cval=_np.median(source_data), clip=False,
preserve_range=True)
footprint = warp(_np.zeros(source_data.shape, dtype='float32'),
inverse_map=transform.inverse,
output_shape=target_data.shape,
cval=1.0)
footprint = footprint > 0.4
if hasattr(source, 'mask') and propagate_mask:
source_mask = _np.array(source.mask)
if source_mask.shape == source_data.shape:
source_mask_rot = warp(source_mask.astype('float32'),
inverse_map=transform.inverse,
output_shape=target_data.shape,
cval=1.0)
source_mask_rot = source_mask_rot > 0.4
footprint = footprint | source_mask_rot
if fill_value is not None:
aligned_image[footprint] = fill_value
return aligned_image, footprint
|
97843939a6e03389d8c4741a04cea77ac7e1e0c4
| 3,643,378
|
def _with_extension(base: str, extension: str) -> str:
"""
Adds an extension to a base name
"""
if "sus" in base:
return f"{extension}{base}"
else:
return f"{base}{extension}"
|
5a1253763808127f296c3bcb04c07562346dea2d
| 3,643,379
|
def putin_rfid_no_order_api():
"""
无订单的情况下入库, 自动创建订单(类型为生产入库), 订单行入库
post req: withlock
{
lines: [{line_id:~, qty, location, lpn='', sku,
rfid_list[rfid1, rfid2, rfid3...],
rfid_details[{rfid1, weight, gross_weight, qty_inner}, {rfid2}, {rfid3}...}],
}...]
w_user_code,
w_user_name
}
sample:{
lines: [
{qty, sku, location:~, rfid_details[{rfid1, weight, gross_weight, qty_inner}, ], }
]
}
"""
if request.method == 'POST':
is_overcharge = ('overcharge' in request.path) or g.owner.is_overcharge
is_enable_fast_stockin_qty_inner = g.owner.is_enable_fast_stockin_qty_inner
data = request.json.pop('lines', [])# [{line_id, qty, location, lpn=''}...]
w_user_code = request.json.pop('w_user_code', None)
w_user_name = request.json.pop('w_user_name', None)
# 每次只一个RFID入库时, 判断RFID是否已经入库了.
if len(data) == 1:
r_details = data[0].get('rfid_details', [])
if len(r_details) == 1:
rfid0 = r_details[0]['rfid']
inv0 = InvRfid.query.t_query.filter_by(rfid=rfid0).first()
if inv0 and inv0.qty == 1:
return json_response({'status': 'fail', 'msg': u'已经入库过了'})
ok, order = StockinAction.create_stockin({'xtype': 'produce'}, g)
db.session.add(order)
db.session.flush()
action = StockinAction(order)
for xd in data:
d = DictNone(xd)
if d.get('qty', 0) <= 0:
continue
# 填充, rfid有数据详情的情况
rfid_details = {}
if not d.get('rfid_list', None) and d.get('rfid_details', None):
r_details = d.get('rfid_details', [])
rfid_list = [r['rfid'] for r in r_details]
d['rfid_list'] = rfid_list
rfid_details = {r['rfid']:r for r in r_details}
# ('spec','brand','unit','style','color','size','level')
ld = DictNone()
ld.sku = d.sku
ld.qty = 1 if is_enable_fast_stockin_qty_inner else (d.qty or 1)
ld.location_code = d.location or ''
ld.batch_code = d.batch_code or ''
ld.spec = d.spec or ''
ld.style = d.style or ''
ld.color = d.color or ''
ld.size = d.size or ''
ld.level = d.level or ''
ld.twisted = d.twisted or ''
line = StockinAction.create_stockin_line(ld, order, poplist=None, is_add=True)
db.session.add(line)
db.session.flush()
# line_id, qty, location, lpn='', line=None
is_overcharge, qty_off, qty_real = action.putin(line_id=None, line=line, qty=ld.qty, location=(ld.location_code or 'STAGE'), \
rfid_list=d['rfid_list'], rfid_details=rfid_details, \
w_user_code=w_user_code, w_user_name=w_user_name, is_overcharge=is_overcharge)
d['qty_real'] = qty_real
order.state = 'all'
db.session.flush()
finish = True
for line in order.lines:
if line.qty_real < line.qty: # 有单行小于预期数量的时候, 则未完成
finish = False
# 计重
_ = line.weight, line.gross_weight, line.qty_inner
# 计重
_ = order.weight, order.gross_weight, order.qty_inner
order.state = 'all' if finish else 'part'
if order.state == 'all':
order.finish()
db.session.commit()
return json_response({'status': 'success', 'msg': u'ok', 'data':data})
|
6637fba766e86bc25dae733d7ddc102114e79e27
| 3,643,380
|
def GuessLanguage(filename):
""" Attempts to Guess Langauge of `filename`. Essentially, we do a
filename.rsplit('.', 1), and a lookup into a dictionary of extensions."""
try:
(_, extension) = filename.rsplit('.', 1)
except ValueError:
raise ValueError("Could not guess language as '%s' does not have an \
extension"%filename)
return {'c' : 'c'
,'py' : 'python'}[extension]
|
3cd1289ab3140256dfbeb3718f30a3ac3ffca6f2
| 3,643,381
|
import numpy
def extract_data_size(series, *names):
"""
Determines series data size from the first available property, which
provides direct values as list, tuple or NumPy array.
Args:
series: perrot.Series
Series from which to extract data size.
names: (str,)
Sequence of property names to check.
Returns:
int or None
Determined data size.
"""
# get size
for name in names:
# check property
if not series.has_property(name):
continue
# get property
prop = series.get_property(name, native=True)
# get size
if isinstance(prop, (list, tuple, numpy.ndarray)):
return len(prop)
# no data
return None
|
39d503b359318d9dc118481baa7f99a43b926711
| 3,643,382
|
def uintToQuint (v, length=2):
""" Turn any integer into a proquint with fixed length """
assert 0 <= v < 2**(length*16)
return '-'.join (reversed ([u16ToQuint ((v>>(x*16))&0xffff) for x in range (length)]))
|
96f707ed527e1063d055ab1b6d1f8a17308ed772
| 3,643,383
|
import hashlib
import base64
def alphanumeric_hash(s: str, size=5):
"""Short alphanumeric string derived from hash of given string"""
hash_object = hashlib.md5(s.encode('ascii'))
s = base64.b32encode(hash_object.digest())
result = s[:size].decode('ascii').lower()
return result
|
915159aa2242eedfe8dcba682ae4bcf4fdebc3c4
| 3,643,384
|
def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs):
"""A reply handler for commands that haven't been added to the reply list.
Returns empty strings for stdout and stderr.
"""
return '', ''
|
e73bd970030c4f78aebf2913b1540fc1b370d906
| 3,643,385
|
from typing import List
from pathlib import Path
def require(section: str = "install") -> List[str]:
""" Requirements txt parser. """
require_txt = Path(".").parent / "requirements.txt"
if not Path(require_txt).is_file():
return []
requires = defaultdict(list) # type: Dict[str, List[str]]
with open(str(require_txt), "rb") as fh:
key = "" # type: str
for line in fh.read().decode("utf-8").split("\n"):
if not line.strip():
" empty line "
continue
if line[0] == "#":
" section key "
key = line[2:]
continue
# actual package
requires[key].append(line.strip())
return requires[section]
|
efda45491798e5b7b66e0f2d6a4ac7b9fc3324d0
| 3,643,386
|
import ast
def find_pkgutil_ns_hints(tree):
"""
Analyze an AST for hints that we're dealing with a Python module that defines a pkgutil-style namespace package.
:param tree:
The result of :func:`ast.parse()` when run on a Python module (which is
assumed to be an ``__init__.py`` file).
:returns:
A :class:`set` of strings where each string represents a hint (an
indication) that we're dealing with a pkgutil-style namespace module. No
single hint can definitely tell us, but a couple of unique hints taken
together should provide a reasonable amount of confidence (at least this
is the idea, how well this works in practice remains to be seen).
"""
hints = set()
for node in ast.walk(tree):
if isinstance(node, ast.Attribute):
if node.attr == "extend_path":
logger.debug("Found hint! ('extend_path' reference)")
hints.add("extend_path")
elif isinstance(node, ast.Import) and any(alias.name == "pkgutil" for alias in node.names):
logger.debug("Found hint! (import pkg_util)")
hints.update(("import", "pkgutil"))
elif (
isinstance(node, ast.ImportFrom)
and node.module == "pkgutil"
and any(alias.name == "extend_path" for alias in node.names)
):
logger.debug("Found hint! (from pkg_util import extend_path)")
hints.update(("import", "pkgutil", "extend_path"))
elif isinstance(node, ast.Name):
if node.id == "extend_path":
logger.debug("Found hint! ('extend_path' reference)")
hints.add("extend_path")
elif node.id == "pkgutil":
logger.debug("Found hint! ('pkgutil' reference)")
hints.add("pkgutil")
elif node.id == "__import__":
logger.debug("Found hint! ('__import__' reference)")
hints.add("import")
elif node.id == "__name__":
logger.debug("Found hint! ('__name__' reference)")
hints.add("__name__")
elif node.id == "__path__":
logger.debug("Found hint! ('__path__' reference)")
hints.add("__path__")
elif isinstance(node, ast.Str) and node.s in ("pkgutil", "extend_path"):
logger.debug("Found hint! ('%s' string literal)", node.s)
hints.add(node.s)
return hints
|
c342bc4f663e52359b420491a8b21fa79cec201a
| 3,643,387
|
def string_in_list_of_dicts(key, search_value, list_of_dicts):
"""
Returns True if search_value is list of dictionaries at specified key.
Case insensitive and without leading or trailing whitespaces.
:return: True if found, else False
"""
for item in list_of_dicts:
if equals(item[key], search_value):
return True
return False
|
a761e3b44efc6e584c8f9045be307837daad49c4
| 3,643,388
|
import itertools
import pandas
def get_data(station_id, elements=None, update=True, as_dataframe=False):
"""Retrieves data for a given station.
Parameters
----------
station_id : str
Station ID to retrieve data for.
elements : ``None``, str, or list of str
If specified, limits the query to given element code(s).
update : bool
If ``True`` (default), new data files will be downloaded if they are
newer than any previously cached files. If ``False``, then previously
downloaded files will be used and new files will only be downloaded if
there is not a previously downloaded file for a given station.
as_dataframe : bool
If ``False`` (default), a dict with element codes mapped to value dicts
is returned. If ``True``, a dict with element codes mapped to equivalent
pandas.DataFrame objects will be returned. The pandas dataframe is used
internally, so setting this to ``True`` is a little bit faster as it
skips a serialization step.
Returns
-------
site_dict : dict
A dict with element codes as keys, mapped to collections of values. See
the ``as_dataframe`` parameter for more.
"""
if isinstance(elements, basestring):
elements = [elements]
start_columns = [
('year', 11, 15, int),
('month', 15, 17, int),
('element', 17, 21, str),
]
value_columns = [
('value', 0, 5, float),
('mflag', 5, 6, str),
('qflag', 6, 7, str),
('sflag', 7, 8, str),
]
columns = list(itertools.chain(start_columns, *[
[(name + str(n), start + 13 + (8 * n), end + 13 + (8 * n), converter)
for name, start, end, converter in value_columns]
for n in xrange(1, 32)
]))
station_file_path = _get_ghcn_file(
station_id + '.dly', check_modified=update)
station_data = util.parse_fwf(station_file_path, columns, na_values=[-9999])
dataframes = {}
for element_name, element_df in station_data.groupby('element'):
if not elements is None and element_name not in elements:
continue
element_df['month_period'] = element_df.apply(
lambda x: pandas.Period('%s-%s' % (x['year'], x['month'])),
axis=1)
element_df = element_df.set_index('month_period')
monthly_index = element_df.index
# here we're just using pandas' builtin resample logic to construct a daily
# index for the timespan
daily_index = element_df.resample('D').index.copy()
# XXX: hackish; pandas support for this sort of thing will probably be
# added soon
month_starts = (monthly_index - 1).asfreq('D') + 1
dataframe = pandas.DataFrame(
columns=['value', 'mflag', 'qflag', 'sflag'], index=daily_index)
for day_of_month in range(1, 32):
dates = [date for date in (month_starts + day_of_month - 1)
if date.day == day_of_month]
if not len(dates):
continue
months = pandas.PeriodIndex([pandas.Period(date, 'M') for date in dates])
for column_name in dataframe.columns:
col = column_name + str(day_of_month)
dataframe[column_name][dates] = element_df[col][months]
dataframes[element_name] = dataframe
if as_dataframe:
return dataframes
else:
return dict([
(key, util.dict_from_dataframe(dataframe))
for key, dataframe in dataframes.iteritems()
])
|
7eaa0d152a8f76fa7bfc4109fb4e0a5c3d90e318
| 3,643,389
|
def Find_Peaks(profile, scale, **kwargs):
"""
Pulls out the peaks from a radial profile
Inputs:
profile : dictionary, contains intensity profile and pixel scale of
diffraction pattern
calibration : dictionary, contains camera parameters to scale data
properly in two theta space
is_profile : boolean, changes processing for profiles vs 2D patterns
scale_bar : string, determines which conversions need to be run
to convert to two theta
display_type: string, determines which plots to show
Outputs:
peak_locs : dictionary, contains two_theta, d_spacings, and input_vector arrays
peaks locations found in the profile
"""
max_numpeaks = kwargs.get('max_numpeaks', 75)
scale_range = kwargs.get('dspace_range',[0.5, 6])
squished_scale = [True if x<scale_range[1] and x >scale_range[0] else False for x in scale]
print(squished_scale)
filter_size_default=max(int(scale[squished_scale].shape[0]/50),3)
print(filter_size_default)
kwargs['filter_size'] = kwargs.get('filter_size',filter_size_default)
print('filter size')
print(kwargs['filter_size'])
# find the location of the peaks in pixel space
peaks = pfnd.vote_peaks(profile[squished_scale], **kwargs)
peaks_d = scale[squished_scale][peaks>0]
scale_d = scale
thresh = 0
orig_length = len(peaks_d)
if len(peaks_d) > max_numpeaks:
print(len(peaks_d))
print("WARNING: {} peaks were detected," +
" some of the peaks will be trimmed."+
"\nFor best results. Please check calibration or run manual peak detection.".format(len(peaks_d)))
srt_peaks = np.sort(peaks[peaks>0])
thresh = srt_peaks[len(peaks_d)-max_numpeaks]
if len(scale[squished_scale][peaks>thresh]) ==0 and thresh>0:
thresh -=1
peaks_d = scale[squished_scale][peaks>thresh]
print(len(peaks_d))
print(thresh)
print(srt_peaks)
if len(peaks_d) == orig_length:
print("WARNING: reduction based on votes unsuccessful. try other parameters")
elif len(peaks_d)> max_numpeaks:
print("WARNING: partial reduction to {} peaks.".format(len(peaks_d)))
peak_locs = {"d_spacing":scale[squished_scale][peaks>thresh],
"vec":[int(round((x-.5)*164))-1 for x in peaks_d]
}
# Display the data
peaks_h = pfnd.plot_peaks(profile[squished_scale], scale[squished_scale], peaks, thresh, **kwargs)
if len(peak_locs['vec']) <= 4:
print("WARNING: only {} peaks were detected," +
" this is lower than the recommended 4+ peaks needed"+
"\nFor best results. Please check calibration.".format(len(peaks_d)))
return peak_locs, peaks_h
|
3d5cf4a5d559d54aa061d4abd9a02efb96c03d05
| 3,643,390
|
def empty_items(item_list, total):
"""
Returns a list of null objects. Useful when you want to always show n
results and you have a list of < n.
"""
list_length = len(item_list)
expected_total = int(total)
if list_length != expected_total:
return range(0, expected_total-list_length)
return ''
|
12848fe61457b2d138a2fcd074fb6ec6d09cbaf5
| 3,643,391
|
import struct
def _read_string(fp):
"""Read the next sigproc-format string in the file.
Parameters
----------
fp : file
file object to read from.
Returns
-------
str
read value from the file
"""
strlen = struct.unpack("I", fp.read(struct.calcsize("I")))[0]
return fp.read(strlen).decode()
|
346a65e6be15f593c91dde34cb45c53cb5731877
| 3,643,392
|
def add_optional_parameters(detail_json, detail, rating, rating_n, popularity, current_popularity, time_spent, detailFromGoogle={}):
"""
check for optional return parameters and add them to the result json
:param detail_json:
:param detail:
:param rating:
:param rating_n:
:param popularity:
:param current_popularity:
:param time_spent:
:return:
"""
if rating:
detail_json["rating"] = rating
elif "rating" in detail:
detail_json["rating"] = detail["rating"]
if rating_n:
detail_json["rating_n"] = rating_n
if "international_phone_number" in detail:
detail_json["international_phone_number"] = detail["international_phone_number"]
if current_popularity:
detail_json["current_popularity"] = current_popularity
if popularity:
popularity, wait_times = get_popularity_for_day(popularity)
detail_json["populartimes"] = popularity
if wait_times:
detail_json["time_wait"] = wait_times
if time_spent:
detail_json["time_spent"] = time_spent
if ("name" in detailFromGoogle):
detail_json.update(detailFromGoogle)
return detail_json
|
176fab2255f9302c945cb29ac5f9513da368a57e
| 3,643,393
|
def build_get_string_with_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get string dictionary value {"0": "foo", "1": null, "2": "foo2"}.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"str": "str" # Optional.
}
"""
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/dictionary/prim/string/foo.null.foo2')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
|
976b20770b74b4cf8504f673e66aec94fbf55c2b
| 3,643,394
|
def get_db_url(db_host, db_name, db_user, db_pass):
"""
Helper function for creating the "pyodbc" connection string.
@see /etc/freetds.conf
@see http://docs.sqlalchemy.org/en/latest/dialects/mssql.html
@see https://code.google.com/p/pyodbc/wiki/ConnectionStrings
"""
params = parse.quote(
"Driver={{FreeTDS}};Server={};Port=1433;"
"Database={};UID={};PWD={};"
.format(db_host, db_name, db_user, db_pass))
return 'mssql+pyodbc:///?odbc_connect={}'.format(params)
|
f0ed18ac321fcc9e93b038dc2f3905af52191c7b
| 3,643,395
|
import torch
def boxes_iou3d_cpu(boxes_a, boxes_b, box_mode='wlh', rect=False, need_bev=False):
"""
Input (torch):
boxes_a: (N, 7) [x, y, z, h, w, l, ry], torch tensor with type float32
boxes_b: (M, 7) [x, y, z, h, w, l, ry], torch tensor with type float32
rect: True/False means boxes in camera/velodyne coord system.
Output:
iou_3d: (N, M)
"""
w_index, l_index, h_index = box_mode.index('w') + 3, box_mode.index('l') + 3, box_mode.index('h') + 3
boxes_a_bev = utils.boxes3d_to_bev_torch(boxes_a, box_mode, rect)
boxes_b_bev = utils.boxes3d_to_bev_torch(boxes_b, box_mode, rect)
overlaps_bev = torch.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_() # (N, M)
iou3d_cuda.boxes_overlap_bev_cpu(boxes_a_bev.contiguous(), boxes_b_bev.contiguous(), overlaps_bev)
# bev iou
area_a = (boxes_a[:, w_index] * boxes_a[:, l_index]).view(-1, 1) # (N, 1)
area_b = (boxes_b[:, w_index] * boxes_b[:, l_index]).view(1, -1) # (1, M) -> broadcast (N, M)
iou_bev = overlaps_bev / torch.clamp(area_a + area_b - overlaps_bev, min=1e-7)
# height overlap
if rect:
boxes_a_height_min = (boxes_a[:, 1] - boxes_a[:, h_index]).view(-1, 1) # y - h
boxes_a_height_max = boxes_a[:, 1].view(-1, 1) # y
boxes_b_height_min = (boxes_b[:, 1] - boxes_b[:, h_index]).view(1, -1)
boxes_b_height_max = boxes_b[:, 1].view(1, -1)
else:
boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, h_index]).view(-1, 1) # z - h, (N, 1)
boxes_a_height_max = boxes_a[:, 2].view(-1, 1) # z
boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, h_index]).view(1, -1) # (1, M)
boxes_b_height_max = boxes_b[:, 2].view(1, -1)
max_of_min = torch.max(boxes_a_height_min, boxes_b_height_min) # (N, 1)
min_of_max = torch.min(boxes_a_height_max, boxes_b_height_max) # (1, M)
overlaps_h = torch.clamp(min_of_max - max_of_min, min=0) # (N, M)
# 3d iou
overlaps_3d = overlaps_bev * overlaps_h # broadcast: (N, M)
vol_a = (boxes_a[:, h_index] * boxes_a[:, w_index] * boxes_a[:, l_index]).view(-1, 1) # (N, 1)
vol_b = (boxes_b[:, h_index] * boxes_b[:, w_index] * boxes_b[:, l_index]).view(1, -1) # (1, M) -> broadcast (N, M)
iou3d = overlaps_3d / torch.clamp(vol_a + vol_b - overlaps_3d, min=1e-7)
if need_bev:
return iou3d, iou_bev
return iou3d
|
e3b40e2c4c35a7f423739791cc9268ecd22cdf42
| 3,643,396
|
def make_attrstring(attr):
"""Returns an attribute string in the form key="val" """
attrstring = ' '.join(['%s="%s"' % (k, v) for k, v in attr.items()])
return '%s%s' % (' ' if attrstring != '' else '', attrstring)
|
fbaf2b763b4b1f4399c45c3a19698d0602f0b224
| 3,643,397
|
from typing import Iterable
from typing import Callable
import random
def distribute(
computation_graph: ComputationGraph,
agentsdef: Iterable[AgentDef],
hints=None,
computation_memory: Callable[[ComputationNode], float] = None,
communication_load: Callable[[ComputationNode, str], float] = None,
) -> Distribution:
"""
gh-cgdp distribution method.
Heuristic distribution baed on communication and hosting costs, while respecting
agent's capacities
Parameters
----------
computation_graph
agentsdef
hints
computation_memory
communication_load
Returns
-------
Distribution:
The distribution for the computation graph.
"""
# Place computations with hosting costs == 0
# For SECP, this assign actuators var and factor to the right device.
fixed_mapping = {}
for comp in computation_graph.node_names():
for agent in agentsdef:
if agent.hosting_cost(comp) == 0:
fixed_mapping[comp] = (
agent.name,
computation_memory(computation_graph.computation(comp)),
)
break
# Sort computation by footprint, but add a random element to avoid sorting on names
computations = [
(computation_memory(n), n, None, random.random())
for n in computation_graph.nodes
if n.name not in fixed_mapping
]
computations = sorted(computations, key=lambda o: (o[0], o[3]), reverse=True)
computations = [t[:-1] for t in computations]
logger.info("placing computations %s", [(f, c.name) for f, c, _ in computations])
current_mapping = {} # Type: Dict[str, str]
i = 0
while len(current_mapping) != len(computations):
footprint, computation, candidates = computations[i]
logger.debug(
"Trying to place computation %s with footprint %s",
computation.name,
footprint,
)
# look for cancidiate agents for computation c
# TODO: keep a list of remaining capacities for agents ?
if candidates is None:
candidates = candidate_hosts(
computation,
footprint,
computations,
agentsdef,
communication_load,
current_mapping,
fixed_mapping,
)
computations[i] = footprint, computation, candidates
logger.debug("Candidates for computation %s : %s", computation.name, candidates)
if not candidates:
if i == 0:
logger.error(
f"Cannot find a distribution, no candidate for computation {computation}\n"
f" current mapping: {current_mapping}"
)
raise ImpossibleDistributionException(
f"Impossible Distribution, no candidate for {computation}"
)
# no candidate : backtrack !
i -= 1
logger.info(
"No candidate for %s, backtrack placement "
"of computation %s (was on %s",
computation.name,
computations[i][1].name,
current_mapping[computations[i][1].name],
)
current_mapping.pop(computations[i][1].name)
# FIXME : eliminate selected agent for previous computation
else:
_, selected = candidates.pop()
current_mapping[computation.name] = selected.name
computations[i] = footprint, computation, candidates
logger.debug(
"Place computation %s on agent %s", computation.name, selected.name
)
i += 1
# Build the distribution for the mapping
agt_mapping = defaultdict(lambda: [])
for c, a in current_mapping.items():
agt_mapping[a].append(c)
for c, (a, _) in fixed_mapping.items():
agt_mapping[a].append(c)
dist = Distribution(agt_mapping)
return dist
|
21a0c240e7dab8240269d5bdeadd4fcef49c1a3c
| 3,643,398
|
import requests
from bs4 import BeautifulSoup
from datetime import datetime
def depreciated_get_paste(paste_tup):
"""
This takes a tuple consisting of href from a paste link and a name that identify a pastebin paste.
It scrapes the page for the pastes content.
:param paste_tup: (string, string)
:return: Paste if successful or False
"""
href, name = paste_tup
# Form the url from the href and perform GET request
paste_url = 'http://pastebin.com' + href
paste_page = requests.get(paste_url)
# Collect the paste details from paste page
if paste_page.status_code == 200:
text = paste_page.text
soup = BeautifulSoup(text, 'html.parser')
# soup.textarea.get_text() return the paste content
paste = Paste(url="http://www.pastebin.com"+href, name=name, content=soup.textarea.get_text(), datetime=datetime.now())
return paste
# Return False if the scrape failed
return False
|
6f3620354827998eade57b989c503be4f093b6d8
| 3,643,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.