content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def test_role_present_absent_dry_run(vmware_datacenter, service_instance, dry_run):
"""
Test scenarios for vmware_esxi.role_present state run with test=True
"""
role_name = "A{}".format(uuid.uuid4())
random_role = "Random{}".format(uuid.uuid4())
# create a new role
ret = esxi.role_present(name=role_name, privilege_ids=["Folder.Create"])
assert ret["result"] is None
assert not ret["changes"]
assert "Role {} will be created.".format(role_name) == ret["comment"]
# create the role using exec mod
ret = esxi_mod.add_role(
role_name=role_name, privilege_ids=["Folder.Create"], service_instance=service_instance
)
# update the role
ret = esxi.role_present(name=role_name, privilege_ids=["Folder.Delete"])
assert ret["result"] is None
assert not ret["changes"]
assert "Folder.Delete privileges will be added" in ret["comment"]
assert "Folder.Create privileges will be removed" in ret["comment"]
ret = esxi.role_absent(name=role_name)
assert ret["result"] is None
assert not ret["changes"]
assert "Role {} will be deleted.".format(role_name) == ret["comment"]
# Remove a non-existent user
ret = esxi.role_absent(name=random_role)
assert ret["result"] is None
assert not ret["changes"]
assert "Role {} is not present.".format(random_role) in ret["comment"]
| 5,338,000
|
def extract_filter(filter_path):
"""Given a path to the weka's filter file,
return a list of selected features."""
with open(filepath) as f:
lnum = 0
for line in f:
lnum += 1 #pointer to the next line to read
if line.strip().startswith('Selected attributes:'):
print "next line to read: ",lnum
break
features = []
for line in f: #keep reading from where we stopped (from 'break' point)
# if len(line.strip()) != 0:
features.append(line.strip())
return features
| 5,338,001
|
def pull(local_dir, remote_host, remote_dir, delete=False, verbose=0):
"""
Get the remote files to the local directory.
"""
ignore_file_pth = join(local_dir, ignore_file_name)
transmit_dir(source_host=remote_host, source_dir=remote_dir, target_host=None, target_dir=local_dir,
delete=delete, ignore_file_pth=ignore_file_pth, verbose=verbose)
| 5,338,002
|
def convert_to_roman_numeral(number_to_convert):
"""
Converts Hindi/Arabic (decimal) integers to Roman Numerals.
Args:
param1: Hindi/Arabic (decimal) integer.
Returns:
Roman Numeral, or an empty string for zero.
"""
arabic_numbers = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
roman_numerals = ('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I')
result = ""
for index, arabic_number in enumerate(arabic_numbers):
count = int(number_to_convert / arabic_number)
result += roman_numerals[index] * count
number_to_convert -= arabic_numbers[index] * count
return result
| 5,338,003
|
def plot_with_pandas():
"""Generate plot, using pandas."""
df = pd.read_csv('inputs/run_times.csv') # CHANGE THIS LINE TO READ FILE WITH SPEEDS, NOTJUST RUN TIMES
df.plot(x="trial", y="distance") # CHANGE THIS LINE TO PLOT TRIALS ON X-AXIS AND SPEEDS ON Y-AXIS
plt.show(block=False)
# ADD LABELS FOR X AND Y AXIS, ADD TITLE, ADD GRID
| 5,338,004
|
def rotate_xyz(vector: Vector, angle_x: float = 0., angle_y: float = 0., angle_z: float = 0.):
"""
Rotate a 3d-vector around the third (z) axis
:param vector: Vector to rotate
:param angle_x: Rotation angle around x-axis (in degrees)
:param angle_y: Rotation angle around y-axis (in degrees)
:param angle_z: Rotation angle around z-axis (in degrees)
:return: Rotated 3d vector
Dimension of vector is not checked for faster execution
"""
vec = rotate_x(vector, angle_x)
vec = rotate_y(vec, angle_y)
vec = rotate_z(vec, angle_z)
return vec
| 5,338,005
|
def get_project_from_data(data):
"""
Get a project from json data posted to the API
"""
if 'project_id' in data:
return get_project_by_id(data['project_id'])
if 'project_slug' in data:
return get_project_by_slug(data['project_slug'])
if 'project_name' in data:
return get_project_by_name(data['project_name'])
return None
| 5,338,006
|
def transform_bundle(bundle_uuid: str,
bundle_version: str,
bundle_path: str,
bundle_manifest_path: str,
extractor: DSSExtractor):
"""
Per bundle callback passed to DSSExtractor.extract.
Generates cell and expression table rows from a downloaded DSS bundle.
:param bundle_uuid: Downloaded bundle UUID
:param bundle_version: Downloaded bundle version
:param bundle_manifest_path: Local path to downloaded bundle dir
:param extractor: ETL extractor object
"""
logger.info(f"ETL: Downloaded bundle {bundle_uuid}.{bundle_version}. Transforming to PSV.")
transformer = CellExpressionTransformer(extractor.sd)
try:
transformer.transform_bundle(bundle_path, bundle_manifest_path)
except Exception as ex:
_log_error(f"{bundle_uuid}.{bundle_version}", ex, traceback.format_exc(), extractor)
| 5,338,007
|
def _from_atoms_and_bonds(atm_dct, bnd_dct):
""" Construct a molecular graph from atom and bond dictionaries.
format:
gra = (atm_dct, bnd_dct)
:param atm_dct: atom dictionary
:type atm_dct: dict
:param bnd_dct: bond dictionary
:type bnd_dct: dict
:rtype: (dict, dict)
"""
atm_dct = dict(atm_dct)
bnd_dct = dict(bnd_dct)
atm_keys = set(atm_dct.keys())
bnd_keys = set(bnd_dct.keys())
assert all(bnd_key <= atm_keys for bnd_key in bnd_keys)
return (atm_dct, bnd_dct)
| 5,338,008
|
def __getattr__(item):
"""Ping the func map, if an attrib is not registered, fallback to the dll"""
try:
res = func_map[item]
except KeyError:
return dll.__getattr__(item)
else:
if callable(res):
return res # Return methods from interface.
else:
return dll_func(*res)
| 5,338,009
|
def moves(possibleMoves):
"""shows all the possible moves available"""
Game()
for i in range(8):
for j in range(8):
if possibleMoves[i][j] != "":
pg.draw.circle(window, green, (j * 100 + 50, (i * 100 + 50)), 10)
pg.display.update()
| 5,338,010
|
def seed_random_state(seed):
"""
Turn seed into np.random.RandomState instance
"""
if (seed is None) or (isinstance(seed, int)):
return np.random.RandomState(seed)
elif isinstance(seed, np.random.RandomState):
return seed
raise ValueError("%r cannot be used to generate numpy.random.RandomState"
"instance" % seed)
| 5,338,011
|
def test_input_redundancy_RULE110():
"""Test Input Redundancy - RULE110"""
n = RULE110()
k_r, true_k_r = n.input_redundancy(mode='node',bound='upper',norm=False) , 7/8
assert (k_r == true_k_r) , ('Input Redundancy (node,upper bound) for RULE110 node does not match. %s != %s' % (k_r,true_k_r))
k_r, true_k_r = n.input_redundancy(mode='node',bound='lower',norm=False) , 7/8
assert (k_r == true_k_r) , ('Input Redundancy (node,lower bound) for RULE110 node does not match. %s != %s' % (k_r,true_k_r))
k_r, true_k_r = n.input_redundancy(mode='node',bound='upper',norm=True) , (7/8)/3
assert (k_r == true_k_r) , ('Input Redundancy (node,upper bound,normed) for RULE110 node does not match. %s != %s' % (k_r,true_k_r))
k_r, true_k_r = n.input_redundancy(mode='node',bound='lower',norm=True) , (7/8)/3
assert (k_r == true_k_r) , ('Input Redundancy (node,lower bound,normed) for RULE110 node does not match. %s != %s' % (k_r,true_k_r))
k_r, true_k_r = n.input_redundancy(mode='input',bound='upper',norm=False) , [6/8,2/8,2/8]
assert (k_r == true_k_r) , ('Input Redundancy (input,upper bound) for RULE110 node does not match. %s != %s' % (k_r,true_k_r))
k_r, true_k_r = n.input_redundancy(mode='input',bound='mean',norm=False) , [5/8,1/8,1/8]
assert (k_r == true_k_r) , ('Input Redundancy (input,mean) for RULE110 node does not match. %s != %s' % (k_r,true_k_r))
k_r, true_k_r = n.input_redundancy(mode='input',bound='lower',norm=False) , [4/8.,0/8,0/8]
assert (k_r == true_k_r) , ('Input Redundancy (input,lower bound) for RULE110 node does not match. %s != %s' % (k_r,true_k_r))
k_r, true_k_r = n.input_redundancy(mode='input',bound='tuple',norm=False) , [(0.5, 0.75), (0.0, 0.25), (0.0, 0.25)]
assert (k_r == true_k_r) , ('Input Redundancy (input,tuples) for RULE110 node does not match. %s != %s' % (k_r,true_k_r))
| 5,338,012
|
def _normalize_dashboard_link(link, request):
"""
Given a dashboard link, make sure it conforms to what we expect.
"""
if not link.startswith("http"):
# If a local url is given, assume it is using the same host
# as the application, and prepend that.
link = url_path_join(f"{request.protocol}://{request.host}", link)
if link.endswith("/status"):
# If the default "status" dashboard is given, strip it.
link = link[: -len("/status")]
return link
| 5,338,013
|
def _plot_func_posterior_pdf_node_nn(
bottom_node,
axis,
value_range=None,
samples=10,
bin_size=0.2,
plot_likelihood_raw=False,
**kwargs
):
"""Calculate posterior predictives from raw likelihood values and plot it on top of a histogram of the real data.
The function does not define a figure, but manipulates an axis object.
Arguments:
bottom_node : pymc.stochastic
Bottom node to compute posterior over.
axis : matplotlib.axis
Axis to plot into.
value_range : numpy.ndarray
Range over which to evaluate the likelihood.
Optional:
model : str <default='ddm_vanilla'>
str that defines the generative model underlying the kabuki model from which the bottom_node
argument derives.
samples : int <default=10>
Number of posterior samples to use.
bin_size: float <default=0.2>
Size of bins for the data histogram.
plot_likelihood_raw : bool <default=False>
Whether or not to plot likelihoods sample wise.
add_legend : bool <default=True>
Whether or not to add a legend to the plot
linewidth : float <default=0.5>
Linewidth of histogram outlines.
"""
# Setup -----
color_dict = {
-1: "black",
0: "black",
1: "green",
2: "blue",
3: "red",
4: "orange",
5: "purple",
6: "brown",
}
model_ = kwargs.pop("model_", "ddm_vanilla")
add_legend = kwargs.pop("add_legend", True)
alpha_line = kwargs.pop("alpha", 0.05)
lw_ = kwargs.pop("linewidth", 0.5)
choices = model_config[model_]["choices"]
n_choices = model_config[model_]["n_choices"]
bins = np.arange(value_range[0], value_range[-1], bin_size)
if value_range is None:
# Infer from data by finding the min and max from the nodes
raise NotImplementedError("value_range keyword argument must be supplied.")
if n_choices == 2:
like = np.empty((samples, len(value_range)), dtype=np.float32)
pdf_in = value_range
else:
like = np.empty((samples, len(value_range), n_choices), dtype=np.float32)
pdf_in = np.zeros((len(value_range), 2))
pdf_in[:, 0] = value_range
# -----
# Get posterior parameters and plot corresponding likelihoods (if desired) ---
for sample in range(samples):
# Get random posterior sample
_parents_to_random_posterior_sample(bottom_node)
# Generate likelihood for parents parameters
if n_choices == 2:
like[sample, :] = bottom_node.pdf(pdf_in)
if plot_likelihood_raw:
axis.plot(
value_range,
like[sample, :],
color="black",
lw=1.0,
alpha=alpha_line,
)
else:
c_cnt = 0
for choice in choices:
pdf_in[:, 1] = choice
like[sample, :, c_cnt] = bottom_node.pdf(pdf_in)
if plot_likelihood_raw:
like[sample, :, c_cnt] = bottom_node.pdf(pdf_in)
axis.plot(
pdf_in[:, 0],
like[sample, :, c_cnt],
color=color_dict[choice],
lw=1.0,
alpha=alpha_line,
)
c_cnt += 1
# -------
# If we don't plot raw likelihoods, we generate a mean likelihood from the samples above
# and plot it as a line with uncertainty bars
if not plot_likelihood_raw:
y = like.mean(axis=0)
try:
y_std = like.std(axis=0)
except FloatingPointError:
print(
"WARNING! %s threw FloatingPointError over std computation. Setting to 0 and continuing."
% bottom_node.__name__
)
y_std = np.zeros_like(y)
if n_choices == 2:
axis.plot(value_range, y, label="post pred", color="black")
axis.fill_between(
value_range, y - y_std, y + y_std, color="black", alpha=0.5
)
else:
c_cnt = 0
for choice in choices:
axis.plot(
value_range,
y[:, c_cnt],
label="post pred",
color=color_dict[choice],
)
axis.fill_between(
value_range,
y[:, c_cnt] - y_std[:, c_cnt],
y[:, c_cnt] + y_std[:, c_cnt],
color=color_dict[choice],
alpha=0.5,
)
c_cnt += 1
# Plot data
if len(bottom_node.value) != 0:
if n_choices == 2:
rt_dat = bottom_node.value.copy()
if np.sum(rt_dat.rt < 0) == 0:
rt_dat.loc[rt_dat.response != 1, "rt"] = (-1) * rt_dat.rt[
rt_dat.response != 1
].values
axis.hist(
rt_dat.rt.values,
density=True,
color="blue",
label="data",
bins=bins,
linestyle="-",
histtype="step",
lw=lw_,
)
else:
for choice in choices:
weights = np.tile(
(1 / bin_size) / bottom_node.value.shape[0],
reps=bottom_node.value[bottom_node.value.response == choice].shape[
0
],
)
if np.sum(bottom_node.value.response == choice) > 0:
axis.hist(
bottom_node.value.rt[bottom_node.value.response == choice],
bins=np.arange(value_range[0], value_range[-1], bin_size),
weights=weights,
color=color_dict[choice],
label="data",
linestyle="dashed",
histtype="step",
lw=lw_,
)
axis.set_ylim(bottom=0) # Likelihood and histogram can only be positive
# Add a custom legend
if add_legend:
# If two choices only --> show data in blue, posterior samples in black
if n_choices == 2:
custom_elems = []
custom_titles = []
custom_elems.append(Line2D([0], [0], color="blue", lw=1.0, linestyle="-"))
custom_elems.append(Line2D([0], [0], color="black", lw=1.0, linestyle="-"))
custom_titles.append("Data")
custom_titles.append("Posterior")
# If more than two choices --> more styling
else:
custom_elems = [
Line2D([0], [0], color=color_dict[choice], lw=1) for choice in choices
]
custom_titles = ["response: " + str(choice) for choice in choices]
custom_elems.append(
Line2D([0], [0], color="black", lw=1.0, linestyle="dashed")
)
custom_elems.append(Line2D([0], [0], color="black", lw=1.0, linestyle="-"))
custom_titles.append("Data")
custom_titles.append("Posterior")
axis.legend(custom_elems, custom_titles, loc="upper right")
| 5,338,014
|
async def delete(category_id: int):
"""Delete category with set id."""
apm.capture_message(param_message={'message': 'Category with %s id deleted.', 'params': category_id})
return await db.delete(category_id)
| 5,338,015
|
def find_files(fields):
""" Finds all FLT files from given fields and places them with metadata
into OrderedDicts.
Parameters
----------
fields : list of strings
The CLEAR fields; retain ability to specify the individual pointings
so that can easily re-run single ones if find an issue.
Returns
-------
visits : OrderedDict
Keys of 'files' and 'products'; values of list of FLT files and product name.
filters : OrderedDict
Keys of filters; values of OrderedDicts with keys of orient and values of
lists of FLT files.
"""
files = glob.glob(os.path.join(PATH_RAW, '*flt.fits'))
info = grizli.utils.get_flt_info(files)
# 'info' is an astropy table.
# Creating a new table and inserting only the rows I want is quite annoying
# Just convert 'info' into an ordered dictionary
info_dict = bypass_table.decompose_table(info, return_type=dict, include_meta=True)
new_info_list = []
# Convert table to ordered dictionary, put it in a list, convert to numpy array
# to convert it back into a table. awesome
for row in range(len(info_dict['TARGNAME'])):
if info_dict['TARGNAME'][row] in fields:
new_info_list.append([info_dict[key][row] for key in info_dict.keys() if key != 'meta'])
# Now, if one of the fields is GOODS-N, need be sure all the Barro programs
# are also retained.
for field in [field for field in fields if 'N' in field or 'ERSPRIME' in field]:
if info_dict['TARGNAME'][row] in overlapping_fields[field]:
new_info_list.append([info_dict[key][row] for key in info_dict.keys() if key != 'meta'])
# Break so the Barro programs are added only once.
break
# Convert 'info' back into a table
# I couldn't simply do dtype=info.dtype, so just hard code it
new_info_tab = Table(np.array(new_info_list), names=info.colnames, meta=info.meta,
dtype=['S18', 'S5', 'S8', 'S10', 'S8', '<f8', '<f8', '<f8', '<f8',
'<f8', '<f8', '<f8'])
visits, filters = grizli.utils.parse_flt_files(info=new_info_tab, uniquename=True)
return visits, filters
| 5,338,016
|
def test_orderstorage__Orderstorage__isLast__1(storage):
"""It raises a `KeyError` if the item is not in the list."""
with pytest.raises(KeyError):
storage.isLast('foo', 'fuz')
| 5,338,017
|
def tag_boundaries(htmlifiers):
"""Return a sequence of (offset, is_start, Region/Ref/Line) tuples.
Basically, split the atomic tags that come out of plugins into separate
start and end points, which can then be thrown together in a bag and sorted
as the first step in the tag-balancing process.
Like in Python slice notation, the offset of a tag refers to the index of
the source code char it comes before.
"""
for h in htmlifiers:
for intervals, cls in [(h.regions(), Region), (h.refs(), Ref)]:
for start, end, data in intervals:
tag = cls(data)
# Filter out zero-length spans which don't do any good and
# which can cause starts to sort after ends, crashing the tag
# balancer. Incidentally filter out spans where start tags come
# after end tags, though that should never happen.
#
# Also filter out None starts and ends. I don't know where they
# come from. That shouldn't happen and should be fixed in the
# plugins.
if (start is not None and start != -1 and
end is not None and end != -1 and
start < end):
yield start, True, tag
yield end, False, tag
| 5,338,018
|
def to_canonical_name(resource_name: str) -> str:
"""Parse a resource name and return the canonical version."""
return str(ResourceName.from_string(resource_name))
| 5,338,019
|
def apply_query_filters(query, model, **kwargs):
"""Parses through a list of kwargs to determine which exist on the model,
which should be filtered as ==, and which should be filtered as LIKE
"""
for k, v in six.iteritems(kwargs):
if v and hasattr(model, k):
column = getattr(model, k)
if column.is_attribute:
if isinstance(v, list):
# List() style parameters receive WHERE IN logic.
query = query.filter(column.in_(v))
elif isinstance(column.type, sqltypes.String):
# Filter strings with LIKE
query = query.filter(column.like("%" + v + "%"))
else:
# Everything else is a strict equal
query = query.filter(column == v)
return query
| 5,338,020
|
def bytes_to_b64_str(bytestring: bytes) -> str:
"""Converts random bytes into a utf-8 encoded string"""
return b64encode(bytestring).decode(config.security.ENCODING)
| 5,338,021
|
def read_event_analysis(s:Session, eventId:int) -> AnalysisData:
"""read the analysis data by its eventId"""
res = s.query(AnalysisData).filter_by(eventId=eventId).first()
return res
| 5,338,022
|
def eval_semeval2012_analogies(
vectors, weight_direct, weight_transpose, subset, subclass
):
"""
For a set of test pairs:
* Compute a Spearman correlation coefficient between the ranks produced by vectors and
gold ranks.
* Compute an accuracy score of answering MaxDiff questions.
"""
train_pairs = read_train_pairs_semeval2012(subset, subclass)
test_questions = read_test_questions_semeval2012(subset, subclass)
pairqnum2least, pairqnum2most = read_turk_answers_semeval2012(
subset, subclass, test_questions
)
turk_rank = read_turk_ranks_semeval2012(subset, subclass)
pairs_to_rank = [pair for pair, score in turk_rank]
# Assign a score to each pair, according to pairwise_analogy_func
our_pair_scores = {}
for pair in pairs_to_rank:
rank_pair_scores = []
for train_pair in train_pairs:
pair_to_rank = pair.strip().replace('"', '').split(':')
score = pairwise_analogy_func(
vectors,
standardized_uri('en', train_pair[0]),
standardized_uri('en', train_pair[1]),
standardized_uri('en', pair_to_rank[0]),
standardized_uri('en', pair_to_rank[1]),
weight_direct,
weight_transpose,
)
rank_pair_scores.append(score)
our_pair_scores[pair] = np.mean(rank_pair_scores)
# Answer MaxDiff questions using the ranks from the previous step
correct_most = 0
correct_least = 0
total = 0
for i, question in enumerate(test_questions):
question_pairs_scores = []
for question_pair in question:
score = our_pair_scores[question_pair]
question_pairs_scores.append(score)
our_answer_most = question[np.argmax(question_pairs_scores)]
our_answer_least = question[np.argmin(question_pairs_scores)]
votes_guess_least = pairqnum2least[(i, our_answer_least)]
votes_guess_most = pairqnum2most[(i, our_answer_most)]
max_votes_least = 0
max_votes_most = 0
for question_pair in question:
num_votes_least = pairqnum2least[(i, question_pair)]
num_votes_most = pairqnum2most[(i, question_pair)]
if num_votes_least > max_votes_least:
max_votes_least = num_votes_least
if num_votes_most > max_votes_most:
max_votes_most = num_votes_most
# a guess is correct if it got the same number of votes as the most frequent turkers' answer
if votes_guess_least == max_votes_least:
correct_least += 1
if votes_guess_most == max_votes_most:
correct_most += 1
total += 1
# Compute Spearman correlation of our ranks and MT ranks
our_semeval_scores = [score for pair, score in sorted(our_pair_scores.items())]
turk_semeval_scores = [score for pair, score in turk_rank]
spearman = spearmanr(our_semeval_scores, turk_semeval_scores)[0]
spearman_results = confidence_interval(spearman, total)
# Compute an accuracy score on MaxDiff questions
maxdiff = (correct_least + correct_most) / (2 * total)
low_maxdiff, high_maxdiff = proportion_confint(
(correct_least + correct_most), (2 * total)
)
maxdiff_results = pd.Series(
[maxdiff, low_maxdiff, high_maxdiff], index=['acc', 'low', 'high']
)
return [maxdiff_results, spearman_results]
| 5,338,023
|
def form_examples(request, step_variables):
"""
extract the examples from the request data, if possible
@param request: http request object
@type request rest_framework.request.Request
@param step_variables: set of variable names from the bdd test
@type step_variables: set(basestring)
@return: none if no examples or failed, or formed examples and an error msg,
if applicable
@rtype: (basestring, basestring)
"""
if u'examples' not in request.DATA:
return None, None
examples = request.DATA[u'examples']
log.debug(u'request has examples:\n{}'.format(examples))
# examples should be an array of json objects, each object being an
# example row
if not isinstance(examples, list):
return None, u'examples payload was not an array'
if not examples:
return None, u'examples array was empty'
# form the actual gherkin example text (sans "Examples:", engine adds it)
text = [u'|' + u'|'.join(step_variables) + u'|']
for ex in examples:
# verify the example obj has all the expected headers/fields
ex_field_diffs = step_variables.difference(ex.keys())
if ex_field_diffs:
return None, u'an example object was missing some fields: {} given: {}'.format(ex_field_diffs, ex)
vals = [unicode(ex[key]) for key in step_variables]
text.append(u'|' + u'|'.join(vals) + u'|')
text = u'\n'.join(text)
log.debug(u'resulting example text\n{}'.format(text))
return text, None
| 5,338,024
|
def get_typefromSelection(objectType="Edge", info=0):
""" """
m_num_obj, m_selEx, m_objs, m_objNames = get_InfoObjects(info=0, printError=False)
m_found = False
for m_i_o in range(m_num_obj):
if m_found:
break
Sel_i_Object = m_selEx[m_i_o]
Obj_i_Object = m_objs[m_i_o]
Name_i_Object = m_objNames[m_i_o]
if info != 0:
print("Sel_i_Object = " + str(Sel_i_Object))
print("Obj_i_Object = " + str(Obj_i_Object))
print("Name_i_Object = " + str(Name_i_Object))
SubObjects_Inside = Sel_i_Object.SubObjects
for n in range(len(SubObjects_Inside)):
SubObject = SubObjects_Inside[n]
if info != 0:
print("SubObject = " + str(SubObject))
print("SubObject.ShapeType = " + str(SubObject.ShapeType))
if SubObject.ShapeType == objectType:
m_found = True
break
if m_found:
return Sel_i_Object, Obj_i_Object, Name_i_Object
else:
return None, None, None
| 5,338,025
|
def load_file(file_path, mode='rb', encoder='utf-8'):
"""
Loads the content of a given filename
:param file_path: The file path to load
:param mode: optional mode options
:param encoder: the encoder
:return: The content of the file
"""
with xbmcvfs.File(xbmcvfs.translatePath(file_path), mode) as file_handle:
return file_handle.readBytes().decode(encoder)
| 5,338,026
|
def centcalc_by_weight(data):
"""
Determines the center (of grtavity) of a neutron beam on a 2D detector by weigthing each pixel with its count
--------------------------------------------------
Argments:
----------
data : ndarray : l x m x n array with 'pixel' - data to weight over m and n
Return:
----------
centers : ndarray : l x 2 array with all the centers (cx, cy)
INFO:
----------
1. Method implemented by C. Herb
2. CHECK the order of cx, cy if it fits to all other interpretations of 2d dimnensions
"""
centerdata = zeros((data.shape[0], 2))
for row in centerdata:
x_int = sum(data,axis = 0)
y_int = sum(data,axis = 1)
row[0] = sum([i* xval for i,xval in enumerate(x_int) ])/sum(x_int)
row[1] = sum([j* yval for j,yval in enumerate(y_int) ])/sum(y_int)
return centerdata
| 5,338,027
|
def sum(a, axis=None, dtype=None, out=None, keepdims=False):
"""Returns the sum of an array along given axes.
Args:
a (cupy.ndarray): Array to take sum.
axis (int or sequence of ints): Axes along which the sum is taken.
dtype: Data type specifier.
out (cupy.ndarray): Output array.
keepdims (bool): If ``True``, the specified axes are remained as axes
of length one.
Returns:
cupy.ndarray: The result array.
.. seealso:: :func:`numpy.sum`
"""
if fusion._is_fusing():
if keepdims:
raise NotImplementedError(
'cupy.sum does not support `keepdims` in fusion yet.')
return fusion._call_reduction(_math.sum_auto_dtype,
a, axis=axis, dtype=dtype, out=out)
# TODO(okuta): check type
return a.sum(axis, dtype, out, keepdims)
| 5,338,028
|
def to_bool(env, default="false"):
"""
Convert a string to a bool.
"""
return bool(util.strtobool(os.getenv(env, default)))
| 5,338,029
|
def public_key():
""" returns public key """
return textwrap.dedent('''
-----BEGIN RSA PUBLIC KEY-----
MIIBCgKCAQEAwBLTc+75h13ZyLWlvup0OmbhZWxohLMMFCUBClSMxZxZdMvyzBnW
+JpOQuvnasAeTLLtEDWSID0AB/EG68Sesr58Js88ORUw3VrjObiG15/iLtAm6hiN
BboTqd8jgWr1yC3LfNSKJk82qQzHJPlCO9Gc5HcqvWrIrqrJL2kwjOU66U/iRxJu
dyOrz0sBkVgfwDBqNS96L0zFQCqk70w9KyOJqe4JNJUtBas6lbwgChDU4/B3BDW5
PYJy2Pp8MSs2n1lhrUkXxRnj+Vl5wLQLdwog1XAGu2J8pIckPg/aB7mB/fSlFihU
bnFlRlgHrlh8gyNYztbGWKMrQ4Bz2831PQIDAQAB
-----END RSA PUBLIC KEY-----
''')
| 5,338,030
|
def register_blueprints(current_app) -> None:
"""Register app blueprints."""
current_app.register_blueprint(product_blueprint)
current_app.register_blueprint(cart_blueprint)
current_app.register_blueprint(customer_blueprint)
current_app.register_blueprint(restaurant_blueprint)
current_app.register_blueprint(auth_blueprint)
| 5,338,031
|
def fetch_WIPOgamma(subset, classification_level, data_home, extracted_path, text_fields = ['abstract', 'description'], limit_description=300):
"""
Fetchs the WIPO-gamma dataset
:param subset: 'train' or 'test' split
:param classification_level: the classification level, either 'subclass' or 'maingroup'
:param data_home: directory containing the original 11 English zips
:param extracted_path: directory used to extract and process the original files
:param text_fields: indicates the fields to extract, in 'abstract', 'description', 'claims'
:param limit_description: the maximum number of words to take from the description field (default 300); set to -1 for all
:return:
"""
assert subset in {"train", "test"}, 'unknown target request (valid ones are "train" or "test")'
assert len(text_fields)>0, 'at least some text field should be indicated'
if not exists(data_home):
raise ValueError(f'{data_home} does not exist, and the dataset cannot be automatically download, '
f'since you need to request for permission. Please refer to {WIPO_URL}')
create_if_not_exist(extracted_path)
config = f'{"-".join(text_fields)}'
if 'description' in text_fields: config+='-{limit_description}'
pickle_path=join(extracted_path, f'wipo-{subset}-{classification_level}-{config}.pickle')
if exists(pickle_path):
print(f'loading pickled file in {pickle_path}')
return pickle.load(open(pickle_path,'rb'))
print('pickle file not found, processing...(this will take some minutes)')
extracted = sum([exists(f'{extracted_path}/EnglishWipoGamma{(i+1)}-{config}.txt') for i in range(11)])==11
if not extracted:
print(f'extraction files not found, extracting files in {data_home}... (this will take some additional minutes)')
Parallel(n_jobs=-1)(
delayed(extract)(
join(data_home, file), join(extracted_path, file.replace('.zip', f'-{config}.txt')), text_fields, limit_description
)
for file in list_files(data_home)
)
doc_labels, train_ids, test_ids = read_classification_file(data_home, classification_level=classification_level) # or maingroup
print(f'{len(doc_labels)} documents classified split in {len(train_ids)} train and {len(test_ids)} test documents')
train_request = []
test_request = []
pbar = tqdm([filename for filename in list_files(extracted_path) if filename.endswith(f'-{config}.txt')])
labelcut = LabelCut(classification_level)
errors=0
for proc_file in pbar:
pbar.set_description(f'processing {proc_file} [errors={errors}]')
if not proc_file.endswith(f'-{config}.txt'): continue
lines = open(f'{extracted_path}/{proc_file}', 'rt').readlines()
for lineno,line in enumerate(lines):
parts = line.split('\t')
assert len(parts)==4, f'wrong format in {extracted_path}/{proc_file} line {lineno}'
id,mainlabel,alllabels,text=parts
mainlabel = labelcut.trim(mainlabel)
alllabels = labelcut.trim(alllabels.split())
# assert id in train_ids or id in test_ids, f'id {id} out of scope'
if id not in train_ids and id not in test_ids:
errors+=1
else:
# assert mainlabel == doc_labels[id][0], 'main label not consistent'
request = train_request if id in train_ids else test_request
request.append(WipoGammaDocument(id, text, mainlabel, alllabels))
print('pickling requests for faster subsequent runs')
pickle.dump(train_request, open(join(extracted_path,f'wipo-train-{classification_level}-{config}.pickle'), 'wb', pickle.HIGHEST_PROTOCOL))
pickle.dump(test_request, open(join(extracted_path, f'wipo-test-{classification_level}-{config}.pickle'), 'wb', pickle.HIGHEST_PROTOCOL))
if subset== 'train':
return train_request
else:
return test_request
| 5,338,032
|
def latest_maven_version(group_id: str, artifact_id: str) -> Optional[str]:
"""Helper function to find the latest released version of a Maven artifact.
Fetches metadata from Maven Central and parses out the latest released
version.
Args:
group_id (str): The groupId of the Maven artifact
artifact_id (str): The artifactId of the Maven artifact
Returns:
The latest version of the artifact as a string or None
"""
group_path = "/".join(group_id.split("."))
url = (
f"https://repo1.maven.org/maven2/{group_path}/{artifact_id}/maven-metadata.xml"
)
response = requests.get(url)
if response.status_code >= 400:
return "0.0.0"
return version_from_maven_metadata(response.text)
| 5,338,033
|
def zeropoint(info_dict):
"""
Computes the zero point of a particular system configuration
(filter, atmospheric conditions,optics,camera).
The zeropoint is the magnitude which will lead to one count per second.
By definition ZP = -2.5*log10( Flux_1e-_per_s / Flux_zeromag ),
where Flux_1e-_per_s = 1 e-/s and Flux_zeromag =
sum_over_passband ( zero_flux * system_response * A_tel ) in e-/s
Hence:
ZP = 2.5*log10( sum_over_passband ( zero_flux * system_response * A_tel ))
Parameters
----------
info_dict: dictionary
wavelength: array
wavelength in angstrom
Returns
------_
zeropoint: float
zeropoint in magnitude
"""
# Integrate over the wavelengths
# Flux_zero = np.trapz(zeromag_to_flux(info_dict,wavelength,unit='ph')
# * precomp.system_response(info_dict,wavelength),
# wavelength)*precomp.A_tel(info_dict)
# Flux_zero = np.trapz(zeromag_to_flux(info_dict,unit='ph')
# * info_dict['system_response'],
# info_dict['wavelength_ang'])
# * info_dict['A_tel']
Flux_zero = (
np.trapz(
utils.flambda_to_fph(
info_dict["wavelength_ang"],
utils.fJy_to_flambda(
info_dict["wavelength_ang"], info_dict["Flux_zero_Jy"]
),
)
* info_dict["system_response"]
* info_dict["Trans_atmosphere"],
info_dict["wavelength_ang"],
)
* info_dict["A_tel"]
)
ZP = 2.5 * np.log10(Flux_zero)
info_dict["zeropoint"] = ZP
return info_dict
| 5,338,034
|
def logUncaughtExceptions(*exc_info):
"""
Set sys.excepthook to this if you want to make sure a script-ending
exception gets logged. Otherwise that info tends to disappear.
I got this script here:
http://blog.tplus1.com/index.php/2012/08/05/python-log-uncaught-exceptions-with-sys-excepthook/
I also have some stuffed tagged in bookmarks for getting pdb started when an
exception like this happens.
"""
logging.critical('Unhandled exception:', exc_info=exc_info)
| 5,338,035
|
def extract_binary_function_range(ida64_path, script_path, binary_paths):
"""this will generate a .json file for each binary file in same path indicating the range of function"""
# binary_paths = use_ida_scripts_get_function_starts_and_ends.read_binary_list(binary_path_list)
# print(binary_paths)
for binary_path in binary_paths:
if os.path.exists(binary_path + ".json"):
continue
print("processing path: {}".format(binary_path))
use_ida_scripts_get_function_starts_and_ends.execute_ida_scripts_get_functions_starts_and_ends(binary_path,
ida64_path,
script_path)
| 5,338,036
|
def model_comp(real_data, deltaT, binSize, maxTimeLag, abc_results1, final_step1, abc_results2, final_step2,\
model1, model2, distFunc, summStat_metric, ifNorm,\
numSamplesModelComp, eval_start = 3, disp1 = None, disp2 = None):
"""Perform Baysian model comparison with ABC fits from model1 and model2.
Parameters
-----------
real_data : nd array
time-series of continous data, e.g., OU process, (numTrials * numTimePoints)
or binned spike counts (numTrials * numBin).
deltaT : float
temporal resolution of data (or binSize of spike counts).
binSize : float
bin-size for computing the autocorrelation.
maxTimeLag : float
maximum time-lag for computing the autocorrelation.
abc_results1: object
output of fitting model1 with aABC algorithm.
final_step1 : int
final step of aABC fitting for model1.
abc_results2: object
output of fitting model2 with aABC algorithm.
final_step2 : int
final step of aABC fitting for model2.
model1: string
selected generative model for model1 (from generative models list).
model2: string
selected generative model for model2 (from generative models list).
distFunc: string
'linear_distance' or 'logarithmic_distance'.
summStat_metric : string
metric for computing summay statistics ('comp_cc', 'comp_ac_fft', 'comp_psd').
ifNorm : string
if normalize the autocorrelation or PSD.
numSamplesModelComp: int
number of samples from posterior distributions to compute the Bayes factor.
eval_start : int, default 3
defines the number of smallest errors we ignore before starting CDF computation.
disp1 : float, default None
The value of dispersion parameter if computed with the grid search method for model1.
disp2 : float, default None
The value of dispersion parameter if computed with the grid search method for model2.
Returns
-------
d1 : 1d array
distribution of errors (distances) for model1.
d2 : 1d array
distribution of errors (distances) for model2.
cdf1 : 1d array
CDF of errors for model1.
cdf2 : 1d array
CDF of errors for model2.
err_threshs : 1d array
error thresholds for which CDFs are computed
bf : 1d array
Bayes factors for each error threshold in "err_threshs" (CDF_M2/CDF_M1).
"""
# extract abc fits
theta_accepted1 = abc_results1[final_step1 - 1]['theta accepted']
theta_accepted2 = abc_results2[final_step2 - 1]['theta accepted']
# extract real data statistics
data_sumStat, data_mean, data_var, T, numTrials = extract_stats(real_data, deltaT, binSize,\
summStat_metric, ifNorm, maxTimeLag)
# compute distances
numSamplesPosterior1 = len(theta_accepted1[0])
numSamplesPosterior2 = len(theta_accepted2[0])
print('Computing distances for model1:')
d1 = gen_model_dist(data_sumStat, theta_accepted1, numSamplesModelComp, numSamplesPosterior1, model1, distFunc,\
summStat_metric, ifNorm, deltaT, binSize, T, numTrials, data_mean, data_var, maxTimeLag, disp1)
print('Computing distances for model2:')
d2 = gen_model_dist(data_sumStat, theta_accepted2, numSamplesModelComp, numSamplesPosterior2, model2, distFunc,\
summStat_metric, ifNorm, deltaT, binSize, T, numTrials, data_mean, data_var, maxTimeLag, disp2)
# compute CDFs and Bayes factors
cdf1, cdf2, eval_points, bf = comp_cdf(d1, d2, numSamplesModelComp, eval_start)
err_threshs = eval_points
return d1, d2, cdf1, cdf2, err_threshs, bf
| 5,338,037
|
def unwrap(*args, **kwargs):
"""
This in a alias for unwrap_array, which you should use now.
"""
if deprecation_warnings:
print('Use of "unwrap" function is deprecated, the new name '\
' is "unwrap_array".')
return unwrap_array(*args, **kwargs)
| 5,338,038
|
def solve():
"""solve form"""
if request.method == 'GET':
sql="SELECT g.class, p.origin, p.no, p.title, p.address FROM GIVE g, PROBLEM p WHERE g.origin=p.origin AND g.no=p.no AND class = 1"
cursor.execute(sql)
week1=[]
for result in cursor.fetchall():
week1.append({
"class":result['class'],
"origin":result['origin'],
"no":result['no'],
"title":result['title'],
"address":result['address']
})
sql="SELECT g.class, p.origin, p.no, p.title, p.address FROM GIVE g, PROBLEM p WHERE g.origin=p.origin AND g.no=p.no AND class = 2"
cursor.execute(sql)
week2=[]
for result in cursor.fetchall():
week2.append({
"class":result['class'],
"origin":result['origin'],
"no":result['no'],
"title":result['title'],
"address":result['address']
})
return render_template('week.html',week1 = week1, week2 = week2)
| 5,338,039
|
def build_path(entities, path_patterns, strict=False):
"""
Constructs a path given a set of entities and a list of potential
filename patterns to use.
Args:
entities (dict): A dictionary mapping entity names to entity values.
path_patterns (str, list): One or more filename patterns to write
the file to. Entities should be represented by the name
surrounded by curly braces. Optional portions of the patterns
should be denoted by square brackets. Entities that require a
specific value for the pattern to match can pass them inside
carets. Default values can be assigned by specifying a string after
the pipe operator. E.g., (e.g., {type<image>|bold} would only match
the pattern if the entity 'type' was passed and its value is
"image", otherwise the default value "bold" will be used).
Example 1: 'sub-{subject}/[var-{name}/]{id}.csv'
Result 2: 'sub-01/var-SES/1045.csv'
strict (bool): If True, all passed entities must be matched inside a
pattern in order to be a valid match. If False, extra entities will
be ignored so long as all mandatory entities are found.
Returns:
A constructed path for this file based on the provided patterns.
"""
if isinstance(path_patterns, string_types):
path_patterns = [path_patterns]
# Loop over available patherns, return first one that matches all
for pattern in path_patterns:
# If strict, all entities must be contained in the pattern
if strict:
defined = re.findall('\{(.*?)(?:<[^>]+>)?\}', pattern)
if set(entities.keys()) - set(defined):
continue
# Iterate through the provided path patterns
new_path = pattern
optional_patterns = re.findall('\[(.*?)\]', pattern)
# First build from optional patterns if possible
for optional_pattern in optional_patterns:
optional_chunk = replace_entities(entities, optional_pattern) or ''
new_path = new_path.replace('[%s]' % optional_pattern,
optional_chunk)
# Replace remaining entities
new_path = replace_entities(entities, new_path)
if new_path:
return new_path
return None
| 5,338,040
|
def schedule_compensate():
"""
swagger-doc: 'schedule'
required: []
req:
course_schedule_id:
description: '课程id'
type: 'string'
start:
description: '课程开始时间 format YYYY-mm-dd HH:MM:ss.SSS'
type: 'string'
end:
description: '课程结束时间 in sql format YYYY-mm-dd HH:MM:ss.SSS'
type: 'string'
schedule_type:
description: '课节类型'
type: 'string'
res:
verify_code:
description: 'id'
type: ''
"""
course_schedule_id = request.json['course_schedule_id']
start = request.json['start'].replace('T', ' ').replace('Z', '')
end = request.json['end'].replace('T', ' ').replace('Z', '')
schedule_type = request.json['schedule_type']
with session_scope(db) as session:
courseSchedule = session.query(CourseSchedule).filter_by(id=course_schedule_id).one_or_none()
if courseSchedule is None:
return jsonify({
"error": "not found course_schedule: {0}".format(
course_schedule_id)
}), 500
courseschedule = CourseSchedule(
start = start,
end = end,
name = courseSchedule.name,
state = 98,
override_course_type=courseSchedule.override_course_type,
course_id = courseSchedule.course_id,
schedule_type = schedule_type,
delete_flag = 'IN_FORCE',
updated_by=getattr(g, current_app.config['CUR_USER'])['username']
)
session.add(courseschedule)
session.flush()
course = session.query(Course).filter_by(id=courseSchedule.course_id).one_or_none()
class_type =ClassroomTypeEnum.ONE_VS_ONE.name
if course.class_type != 1:
class_type = ClassroomTypeEnum.ONE_VS_MANY.name
live_service.create_room(getattr(g, current_app.config['CUR_USER'])['username'], courseschedule.id,courseSchedule.name, getTimeDiff(start,end),class_type,request.json['start'],0,'en')
studyschedules = session.query(StudySchedule).filter_by(course_schedule_id=course_schedule_id).all()
for studyschedule in studyschedules:
sudyschedule = StudySchedule(
actual_start = start,
actual_end = end,
name = courseSchedule.name,
study_state = 1,
order_id = studyschedule.order_id,
course_schedule_id = courseschedule.id,
student_id = studyschedule.student_id,
schedule_type = schedule_type,
delete_flag = 'IN_FORCE',
updated_by=getattr(g, current_app.config['CUR_USER'])['username']
)
session.add(sudyschedule)
session.flush()
return jsonify({'id':courseschedule.id })
| 5,338,041
|
def parse(xml):
"""
Parse headerdoc XML into a dictionary format.
Extract classes, functions, and global variables from the given XML output
by headerdoc. Some formatting and text manipulation takes place while
parsing. For example, the `@example` is no longer recognized by headerdoc.
`parse()` will extract examples separately from the given description.
[Admonitions](https://python-markdown.github.io/extensions/admonition/)
are also not kept in the correct format by headerdoc. Admonitions text must
be indented to the same level as the admonition title, but headerdoc strips
leading whitespace. The dictionary returned from `parse` will have the
correct indentation restored.
Args:
xml (ElementTree): An `ElementTree` read from a headerdoc XML file. The
root must be the `<header>` element.
Returns:
Dict
"""
return _parse_script(xml)
| 5,338,042
|
def ReadKeywordValueInFile(filename,keyword):
""" Get value in the expression of keyword=vlaue in file
:param str filenname: file name
:param str keywors: keyword string
:return: value(str) - value string
"""
value=None; lenkey=len(keyword)
if not os.path.exists(filename): return value
fmomenu=False; found=False
f=open(filename)
dat=''; lenkey=len(keyword); leftpar=0; rightpar=0
for s in f.readlines():
cm=s.find('#')
if cm > 0: s=s[:cm]
s=s.strip()
if len(s) == 0: continue
items=s.split()
for item in items:
if item[:lenkey] == keyword:
found=True; value=item.split('=')[1]; value=value.strip()
break
f.close()
if not found: value=None
return value
| 5,338,043
|
def deprecation_warning(
old: str,
new: Optional[str] = None,
error: Optional[Union[bool, Exception]] = None) -> None:
"""Warns (via the `logger` object) or throws a deprecation warning/error.
Args:
old (str): A description of the "thing" that is to be deprecated.
new (Optional[str]): A description of the new "thing" that replaces it.
error (Optional[Union[bool, Exception]]): Whether or which exception to
throw. If True, throw ValueError. If False, just warn.
If Exception, throw that Exception.
"""
msg = "`{}` has been deprecated.{}".format(
old, (" Use `{}` instead.".format(new) if new else ""))
if error is True:
raise ValueError(msg)
elif error and issubclass(error, Exception):
raise error(msg)
else:
logger.warning("DeprecationWarning: " + msg +
" This will raise an error in the future!")
| 5,338,044
|
def getSolventList():
"""
Return list of solvent molecules for initializing solvation search form.
If any of the Mintz parameters are None, that solvent is not shown in the list since it will cause error.
"""
database.load('solvation', '')
solvent_list = []
for index, entry in database.solvation.libraries['solvent'].entries.items():
mintz_parameter_list = [entry.data.s_h, entry.data.b_h, entry.data.e_h, entry.data.l_h, entry.data.a_h,
entry.data.c_h]
if not any(h is None for h in mintz_parameter_list):
solvent_list.append((entry.label, index))
return solvent_list
| 5,338,045
|
def subset_samples(md_fp: str, factor: str, unstacked_md: pd.DataFrame,
number_of_samples: int, logs:list) -> pd.DataFrame:
"""
Subset the metadata to a maximum set of 100 samples.
! ATTENTION ! In case there are many columns with np.nan,
these should be selected to select samples
that do have actual numerical values...
Parameters
----------
md_fp : str
Metadata file path.
factor : str
Stratification factor.
unstacked_md : pd.DataFrame
Metadata table subset for the current
stratification and numerical variables.
number_of_samples : int
Number of samples to randomly select to
compute the distributions.
logs : list
List of lists: each nested list is:
[variable, metadata file path, warning message, a number]
Returns
-------
figure_tab : pd.DataFrame
re-stacked metadata table
"""
# get the unique samples names
samples = set(list(unstacked_md.sample_name.tolist()))
# take either 100 or if less, all the samples as data for the figure
if len(samples) < number_of_samples:
logs.append([factor, md_fp, 'not enough samples', len(samples)])
figure_tab = unstacked_md.copy()
else:
random_samples = random.sample(samples, number_of_samples)
figure_tab = unstacked_md.loc[unstacked_md.sample_name.isin(random_samples),:].copy()
return figure_tab
| 5,338,046
|
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.GFile(filename, "rb") as f:
image_data = f.read()
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image, height, width
| 5,338,047
|
def broadcast(msg):
"""
Send msg to every client.
"""
for client in CLIENT_LIST:
if type(msg) is list:
for m in msg:
client.send(m + '\n')
else:
client.send(msg + '\n')
| 5,338,048
|
def to_rna_sequences(model):
"""
Convert all the sequences present in the model to RNA.
:args dict model: Description model.
"""
for seq, path in yield_sub_model(model, ["sequence"]):
set_by_path(model, path, str(Seq(seq).transcribe().lower()))
return model
| 5,338,049
|
async def test_fetch_convbot_with_exc():
"""Test fetch_convbot_with_exc."""
my_route = respx.post("https://convbot-yucongo.koyeb.app/text/").mock(
return_value=Response(204)
)
await fetch_convbot("Hello")
assert my_route.called
| 5,338,050
|
def rest_get_repositories(script, project=None, start=0, limit=50):
"""
Gets a list of repositories via REST
:param script: A TestScript instance
:type script: TestScript
:param project: An optional project
:type project: str
:param start: The offset to start from
:type start: int
:param limit: The max number of items to return
:type limit: int
:return: repositories or None
:rtype: list(Repository)
"""
if project:
project_filter = "projects/%s/" % project
else:
project_filter = ""
j = script.rest("GET", "/rest/api/latest/%srepos" % project_filter, {
"start": str(start),
"limit": str(limit)
})
if is_http_ok():
return map(lambda repo: Repository(repo["project"]["key"], repo["slug"]), j["values"])
| 5,338,051
|
def clean_float(input_float):
"""
Return float in seconds (even if it was a timestamp originally)
"""
return (timestamp_to_seconds(input_float)
if ":" in str(input_float) else std_float(input_float))
| 5,338,052
|
def resize(
image,
output_shape,
order=1,
mode="constant",
cval=0,
clip=True,
preserve_range=False,
anti_aliasing=False,
anti_aliasing_sigma=None,
):
"""A wrapper for Scikit-Image resize().
Scikit-Image generates warnings on every call to resize() if it doesn't
receive the right parameters. The right parameters depend on the version
of skimage. This solves the problem by using different parameters per
version. And it provides a central place to control resizing defaults.
"""
if LooseVersion(skimage.__version__) >= LooseVersion("0.14"):
# New in 0.14: anti_aliasing. Default it to False for backward
# compatibility with skimage 0.13.
return skimage.transform.resize(
image,
output_shape,
order=order,
mode=mode,
cval=cval,
clip=clip,
preserve_range=preserve_range,
anti_aliasing=anti_aliasing,
anti_aliasing_sigma=anti_aliasing_sigma,
)
else:
return skimage.transform.resize(
image,
output_shape,
order=order,
mode=mode,
cval=cval,
clip=clip,
preserve_range=preserve_range,
)
| 5,338,053
|
def get_subsystem_fidelity(statevector, trace_systems, subsystem_state):
"""
Compute the fidelity of the quantum subsystem.
Args:
statevector (list|array): The state vector of the complete system
trace_systems (list|range): The indices of the qubits to be traced.
to trace qubits 0 and 4 trace_systems = [0,4]
subsystem_state (list|array): The ground-truth state vector of the subsystem
Returns:
The subsystem fidelity
"""
rho = np.outer(np.conj(statevector), statevector)
rho_sub = partial_trace(rho, trace_systems)
rho_sub_in = np.outer(np.conj(subsystem_state), subsystem_state)
fidelity = np.trace(
sqrtm(
np.dot(
np.dot(sqrtm(rho_sub), rho_sub_in),
sqrtm(rho_sub)
)
)
) ** 2
return fidelity
| 5,338,054
|
def jyfm_data_coke(indicator="焦炭总库存", headers=""):
"""
交易法门-数据-黑色系-焦炭
:param indicator: ["焦企产能利用率-100家独立焦企产能利用率", "焦企产能利用率-230家独立焦企产能利用率",
"焦炭日均产量-100家独立焦企焦炭日均产量", "焦炭日均产量-230家独立焦企焦炭日均产量", "焦炭总库存",
"焦炭焦企库存-100家独立焦企焦炭库存", "焦炭焦企库存-230家独立焦企焦炭库存", "焦炭钢厂库存", "焦炭港口库存", "焦企焦化利润"]
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: result
:rtype: pandas.DataFrame
"""
res = requests.get(jyfm_data_coke_url_dict[indicator], headers=headers,)
# 由于返回的数据长度不一致,先补齐再转置
return pd.read_json(res.text, orient="index").T
| 5,338,055
|
def reset_s3_client():
"""Clear the S3 client, to free memory."""
global S3_CLIENT
S3_CLIENT = None
| 5,338,056
|
def test_wrong_data():
"""
Feature: Fault injector
Description: Test fault injector
Expectation: Throw TypeError exception
"""
# load model
ckpt_path = '../../dataset/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
net = Net()
param_dict = load_checkpoint(ckpt_path)
load_param_into_net(net, param_dict)
model = Model(net)
ds_data = ds.GeneratorDataset(dataset_generator, ['image', 'label'])
ds_label = ds.GeneratorDataset(dataset_generator, ['image', 'label'])
fi_type = ['bitflips_random', 'bitflips_designated', 'random', 'zeros',
'nan', 'inf', 'anti_activation', 'precision_loss']
fi_mode = ['single_layer', 'all_layer']
fi_size = [1]
# Fault injection
with pytest.raises(TypeError) as exc_info:
fi = FaultInjector(model, fi_type, fi_mode, fi_size)
_ = fi.kick_off(ds_data, ds_label, iter_times=100)
_ = fi.metrics()
assert exc_info.type is TypeError
| 5,338,057
|
def generate_index_distribution(
numTrain: int, numTest: int, numValidation: int, params: UQDistDict
) -> Tuple[Any, ...]:
"""
Generates a vector of indices to partition the data for training. NO
CHECKING IS DONE: it is assumed that the data could be partitioned in the
specified blocks and that the block indices describe a coherent partition.
:param int numTrain: Number of training data points
:param int numTest: Number of testing data points
:param int numValidation: Number of validation data points (may be zero)
:param Dict params: Contains the keywords that control the behavior of the function \
(uq_train_fr, uq_valid_fr, uq_test_fr for fraction specification, \
uq_train_vec, uq_valid_vec, uq_test_vec for block list specification, and \
uq_train_bks, uq_valid_bks, uq_test_bks for block number specification)
:return: Tuple of numpy arrays
- indexTrain (int numpy array): Indices for data in training
- indexValidation (int numpy array): Indices for data in validation (if any)
- indexTest (int numpy array): Indices for data in testing (if merging)
"""
if all(k in params for k in ("uq_train_fr", "uq_valid_fr", "uq_test_fr")):
# specification by fraction
print("Computing UQ cross-validation - Distributing by FRACTION")
return generate_index_distribution_from_fraction(
numTrain, numTest, numValidation, params
)
elif all(k in params for k in ("uq_train_vec", "uq_valid_vec", "uq_test_vec")):
# specification by block list
print("Computing UQ cross-validation - Distributing by BLOCK LIST")
return generate_index_distribution_from_block_list(
numTrain, numTest, numValidation, params
)
elif all(k in params for k in ("uq_train_bks", "uq_valid_bks", "uq_test_bks")):
# specification by block size
print("Computing UQ cross-validation - Distributing by BLOCK NUMBER")
return generate_index_distribution_from_blocks(
numTrain, numTest, numValidation, params
)
else:
print("ERROR !! No consistent UQ parameter specification found !! ... exiting ")
raise KeyError(
"No valid triplet of ('uq_train_*', 'uq_valid_*', 'uq_test_*') found. (* is any of fr, vec or bks)"
)
| 5,338,058
|
def generacionT (v, m):
"""
Signature of a message with hash, hashed. Here we have to use the private key.
Parameters:
m (int): Number of oil
v (int): Number of vinager
Returns:
T (matrix): Matrix with dimension nxn
"""
#Matriz de distorsion
T = []
n = v + m
for i in range(n):
row = []
if i < v:
for k in range(n):
if k < v: #Matriz indentidad dimension v
if i == k:
row += [1]
else:
row += [0]
else: #Matriz aleatoria vxm
if randint(0,2) == 1:
row += [1]
else:
row += [0]
else:
for k in range(n):
if k < v: #Matriz nula dimension v
row += [0]
else: #Matriz identidad dimension m
if i == k:
row += [1]
else:
row += [0]
T += [row]
return T
| 5,338,059
|
def isotherm_from_bel(path):
"""
Get the isotherm and sample data from a BEL Japan .dat file.
Parameters
----------
path : str
Path to the file to be read.
Returns
-------
dataDF
"""
with open(path) as file:
line = file.readline().rstrip()
meta = {}
data = StringIO()
while line != '':
values = line.split(sep='\t')
line = file.readline().rstrip()
if len(values) < 2: # If "title" section
# read adsorption section
if values[0].strip().lower().startswith('adsorption data'):
line = file.readline().rstrip() # header
file_headers = line.replace('"', '').split('\t')
new_headers = ['branch']
for h in file_headers:
txt = next((
_FIELDS['isotherm_data'][a]
for a in _FIELDS['isotherm_data']
if h.lower().startswith(a)
), h)
new_headers.append(txt)
if txt == 'loading':
meta['loading_basis'] = 'molar'
for (u, c) in (
('/mmol', 'mmol'),
('/mol', 'mol'),
('/ml(STP)', 'cm3(STP)'),
('/cm3(STP)', 'cm3(STP)'),
):
if u in h:
meta['loading_unit'] = c
meta['material_basis'] = 'mass'
for (u, c) in (
('g-1', 'g'),
('kg-1', 'kg'),
):
if u in h:
meta['material_unit'] = c
if txt == 'pressure':
meta['pressure_mode'] = 'absolute'
for (u, c) in (
('/mmHg', 'torr'),
('/torr', 'torr'),
('/kPa', 'kPa'),
('/bar', 'bar'),
):
if u in h:
meta['pressure_unit'] = c
data.write('\t'.join(new_headers) + '\n')
line = file.readline() # firstline
while not line.startswith('0'):
data.write('False\t' + line)
line = file.readline()
# read desorption section
elif values[0].strip().lower().startswith('desorption data'):
file.readline() # header - discard
line = file.readline() # firstline
while not line.startswith('0'):
data.write('True\t' + line)
line = file.readline()
else:
continue
else:
values = [v.strip('"') for v in values]
key = values[0].lower()
try:
field = next(
v for k, v in _FIELDS.items()
if any([key.startswith(n) for n in v.get('text', [])])
)
except StopIteration:
continue
meta[field['name']] = values[1]
# Read prepared table
data.seek(0) # Reset string buffer to 0
data_df = pandas.read_csv(data, sep='\t')
data_df.dropna(inplace=True, how='all', axis='columns')
# Set extra metadata
meta['date'] = datetime.strptime(meta['date'], r'%y/%m/%d').isoformat()
meta['apparatus'] = 'BEL ' + meta["serialnumber"]
meta['loading_key'] = 'loading'
meta['pressure_key'] = 'pressure'
meta['other_keys'] = sorted([
a for a in data_df.columns
if a not in ['loading', 'pressure', 'measurement', 'branch']
])
return PointIsotherm(isotherm_data=data_df, **meta)
| 5,338,060
|
def image(height, width, image_dir):
"""
Create a background with a image
"""
images = [xx for xx in os.listdir(image_dir) \
if xx.endswith(".jpeg") or xx.endswith(".jpg") or xx.endswith(".png")]
if len(images) > 0:
image_name = images[random.randint(0, len(images) - 1)]
pic = Image.open(os.path.join(image_dir, image_name))
pic_original_width = pic.size[0]
pic_original_height = pic.size[1]
if pic.size[0] < width:
pic = pic.resize([width, int(pic.size[1] * (width / pic.size[0]))], Image.ANTIALIAS)
if pic.size[1] < height:
pic = pic.resize([int(pic.size[0] * (height / pic.size[1])), height], Image.ANTIALIAS)
pic_final_width = pic.size[0]
pic_final_height = pic.size[1]
if pic.size[0] == width:
x = 0
else:
x = random.randint(0, pic.size[0] - width)
if pic.size[1] == height:
y = 0
else:
y = random.randint(0, pic.size[1] - height)
return pic.crop((x, y, x + width, y + height)), (image_name, pic_original_width, pic_original_height,
pic_final_width, pic_final_height, x, y, x + width, y + height)
else:
raise Exception("No images where found in the images folder!")
| 5,338,061
|
def model(x, a, b, c):
"""
Compute
.. math::
y = A + Be^{Cx}
Parameters
----------
x : array-like
The value of the model will be the same shape as the input.
a : float
The additive bias.
b : float
The multiplicative bias.
c : float
The exponent.
Return
------
y : array-like
An array of the same shape as ``x``, containing the model
computed for the given parameters.
"""
return a + b * exp(c * x)
| 5,338,062
|
def test_export_slow_mo_unadjusted():
"""test VideoAsset.export for slow mo video"""
test_dict = UUID_DICT["slow_mo"]
uuid = test_dict["uuid"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
with tempfile.TemporaryDirectory(prefix="photokit_test") as tempdir:
export_path = photo.export(tempdir, version=PHOTOS_VERSION_UNADJUSTED)
export_path = pathlib.Path(export_path[0])
assert export_path.is_file()
filename = test_dict["filename"]
assert export_path.stem == pathlib.Path(filename).stem
| 5,338,063
|
def add_abspath(dirs: List):
"""Recursively append the absolute path to the paths in a nested list
If not a list, returns the string with absolute path.
"""
if isinstance(dirs, list):
for i, elem in enumerate(dirs):
if isinstance(elem, str):
dirs[i] = os.path.abspath(elem)
else:
dirs[i] = add_abspath(elem)
return dirs
else:
return os.path.abspath(dirs)
| 5,338,064
|
def download(new_file_name, file_id):
"""
A function that accesses and downloads files from Google Drive.
Parameters
----------
new_file_name: string
The file name you want a google doc to have after download.
file_if: string
The file id of your desired file on google drive.
Returns
-------
N/A:
The function downloads a file onto the actual computer
under new_file_name
https://developers.google.com/drive/api/v3/quickstart/python?source=post_page
"""
SCOPES = 'https://www.googleapis.com/auth/drive.readonly' # noqa: F841
store = file.Storage('token.json')
creds = store.get()
DRIVE = discovery.build('drive', 'v3', http=creds.authorize(Http()))
# if you get the shareable link, the link contains this id,
# replace the file_id below
request = DRIVE.files().get_media(fileId=file_id)
# replace the filename and extension in the first field below
fh = io.FileIO(new_file_name, mode='w')
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print("Download %d%%." % int(status.progress() * 100))
return
| 5,338,065
|
def equal_matches(
matches_a: kapture.Matches,
matches_b: kapture.Matches) -> bool:
"""
Compare two instances of kapture.Matches.
:param matches_a: first set of matches
:param matches_b: second set of matches
:return: True if they are identical, False otherwise.
"""
assert isinstance(matches_a, kapture.Matches)
assert isinstance(matches_b, kapture.Matches)
current_function_name = inspect.getframeinfo(inspect.currentframe()).function
return equal_sets(matches_a, matches_b, current_function_name)
| 5,338,066
|
def convert_to_example(img_data, target_data, img_shape, target_shape, dltile):
""" Converts image and target data into TFRecords example.
Parameters
----------
img_data: ndarray
Image data
target_data: ndarray
Target data
img_shape: tuple
Shape of the image data (h, w, c)
target_shape: tuple
Shape of the target data (h, w, c)
dltile: str
DLTile key
Returns
-------
Example: TFRecords example
TFRecords example
"""
if len(target_shape) == 2:
target_shape = (*target_shape, 1)
features = {
"image/image_data": _float64_feature(img_data),
"image/height": _int64_feature(img_shape[0]),
"image/width": _int64_feature(img_shape[1]),
"image/channels": _int64_feature(img_shape[2]),
"target/target_data": _float64_feature(target_data),
"target/height": _int64_feature(target_shape[0]),
"target/width": _int64_feature(target_shape[1]),
"target/channels": _int64_feature(target_shape[2]),
"dltile": _bytes_feature(tf.compat.as_bytes(dltile)),
}
return tf.train.Example(features=tf.train.Features(feature=features))
| 5,338,067
|
def linear(args, output_size, bias, bias_start=0.0, scope=None, var_on_cpu=True, wd=0.0):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
var_on_cpu: if True, put the variables on /cpu:0.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
assert args
if not isinstance(args, (list, tuple)):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
# Now the computation.
with tf.variable_scope(scope or "Linear"):
if var_on_cpu:
with tf.device("/cpu:0"):
matrix = tf.get_variable("Matrix", [total_arg_size, output_size])
else:
matrix = tf.get_variable("Matrix", [total_arg_size, output_size])
if wd:
weight_decay = tf.mul(tf.nn.l2_loss(matrix), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
if len(args) == 1:
res = tf.matmul(args[0], matrix)
else:
res = tf.matmul(tf.concat(1, args), matrix)
if not bias:
return res
if var_on_cpu:
with tf.device("/cpu:0"):
bias_term = tf.get_variable(
"Bias", [output_size],
initializer=tf.constant_initializer(bias_start))
else:
bias_term = tf.get_variable(
"Bias", [output_size],
initializer=tf.constant_initializer(bias_start))
return res + bias_term
| 5,338,068
|
def _start_job(rule, settings, urls=None):
"""Start a new job for an InfernoRule
Note that the output of this function is a tuple of (InfernoJob, DiscoJob)
If this InfernoJob fails to start by some reasons, e.g. not enough blobs,
the DiscoJob would be None.
"""
job = InfernoJob(rule, settings, urls)
return job, job.start()
| 5,338,069
|
def set_driver(driver):
"""
Sets the Selenium WebDriver used to execute Helium commands. See also
:py:func:`get_driver`.
"""
_get_api_impl().set_driver_impl(driver)
| 5,338,070
|
def test_code_search_single_page(mocker, responses):
"""Tests ls.ls for a single page of responses"""
response_content = {
'items': [{
'repository': {
'full_name': 'repo/repo1',
},
}, {
'repository': {
'full_name': 'repo/repo2',
},
}],
}
responses.add(responses.GET, 'https://api.github.com/search/code',
status=requests.codes.ok, json=response_content)
query = 'user:user {} in:path "template_path" in:file'.format(constants.TEMPLE_CONFIG_FILE)
repos = ls._code_search(query)
assert repos == {
'git@github.com:repo/repo1.git': {'full_name': 'repo/repo1'},
'git@github.com:repo/repo2.git': {'full_name': 'repo/repo2'},
}
assert len(responses.calls) == 1
url = urllib.parse.urlparse(responses.calls[0].request.url)
parsed_query = urllib.parse.parse_qs(url.query)
assert parsed_query == {
'per_page': ['100'],
'q': [query],
}
| 5,338,071
|
def mult(dic,data,r=1.0,i=1.0,c=1.0,inv=False,hdr=False,x1=1.0,xn='default'):
"""
Multiple by a Constant
Parameter c is used even when r and i are defined. NMRPipe ignores c when
r or i are defined.
Parameters:
* dic Dictionary of NMRPipe parameters.
* data array of spectral data.
* r Constant to multply real data by.
* i Constant to multiply imaginary data by.
* c Constant to multiply both real and imaginary data by.
* inv Multiply by inverse of Constant (both real and imaginary)
* hdr Use constant value from header.
* x1 First point of region to multiply constant by.
* xn Last point of region to multiply constant by. 'default' specifies
the end of the vector.
"""
mn = x1 - 1
if xn == 'default':
mx = data.shape[-1]
else:
mx = xn
if hdr: # read in C from header
fn = "FDF"+str(int(dic["FDDIMORDER"][0])) # F1, F2, etc
c = dic[fn+"C1"]
r = 1.0
i = 1.0
rf = (r*c) # real factor
cf = (i*c) # complex factor
if inv:
rf = 1/rf
cf = 1/cf
data[...,mn:mx] = p.mult(data[...,mn:mx],r=rf,i=cf,c=1.0)
dic = update_minmax(dic,data)
return dic,data
| 5,338,072
|
def generate_gps_photon(stream, source, focus, angle_gantry, angle_couch, angle_coll, beamletsize, sad, sfd, energy_mev, desc='Diverging Square field', gps_template=None):
"""Generate the gps input file using a template
Args:
idx (int): index of beamlet in beam (row-major order)
source (x, y, z, u): coordinates
focus (x, y, z, u): coordinates
angle_gantry (float): gantry angle
beamletsize (x, z, u)
sad (float): sad (units must match beamletsize units)
sfd (float): src-focus-distance (units must match beamletsize units)
"""
extra_kwargs = {}
# try to match requested template
if gps_template is not None:
fullpath = pjoin(TEMPLATES, gps_template)
if not os.path.isfile(fullpath):
raise FileNotFoundError('GPS template "{}" doesn\'t exist'.format(fullpath))
else:
if energy_mev is not None and is_numeric(energy_mev):
gps_template = 'gps_photon_mono.mac.tpl'
extra_kwargs['energy'] = float(energy_mev)
else:
gps_template = 'gps_photon_6MV.mac.tpl'
xp, yp = calculate_plane_rotation(angle_gantry, angle_couch, angle_coll)
adj_fsize = [0.5*sfd/sad*beamletsize[ii] for ii in range(2)]
with open(pjoin(TEMPLATES, gps_template), 'r') as fd:
stream.write(
fd.read().format(
description=desc,
cx=source[0],
cy=source[1],
cz=source[2],
cu=source[3],
rot1x=xp[0],
rot1y=xp[1],
rot1z=xp[2],
rot2x=yp[0],
rot2y=yp[1],
rot2z=yp[2],
fsx=adj_fsize[0],
fsy=adj_fsize[1],
fsu=beamletsize[2],
fx=focus[0],
fy=focus[1],
fz=focus[2],
fu=focus[3],
**extra_kwargs,
)
)
return stream
| 5,338,073
|
def navigation_target(m) -> re.Pattern:
"""A target to navigate to. Returns a regular expression."""
if hasattr(m, 'any_alphanumeric_key'):
return re.compile(re.escape(m.any_alphanumeric_key), re.IGNORECASE)
if hasattr(m, 'navigation_target_name'):
return re.compile(m.navigation_target_name)
return re.compile(re.escape(m.text), re.IGNORECASE)
| 5,338,074
|
def hammingDistance(strA, strB):
""" Determines the bitwise Hamming Distance between two strings. Used to
determine the fitness of a mutating string against the input.
Example:
bin(ord('a')) == '0b1100001'
bin(ord('9')) == '0b0111001'
bin(ord('a') ^ ord('9')) == '0b1011000'
bin(ord('a') ^ ord('9')).count('1') == 3
hammingDistance('a', '9') == 3
hammingDistance('a', '9') * 4 == 12
hammingDistance('aaaa', '9999') == 12
Args:
strA: A string
strB: A string
Returns:
Returns an integer that represents the Hamming Distance from a to b.
Raises:
ValueError: If the two strings are unequal in length or if one input is
not a string.
"""
if (not isinstance(strA, basestring) or not isinstance(strB, basestring)):
raise ValueError('Input is not a string', strA, strB)
if len(strA) != len(strB):
raise ValueError('The two strings are unequal in length', strA, strB)
# base case, hamming distance of nothing and nothing is 0
if (len(strA) == 0) and (len(strB) == 0):
return 0
# XOR both first characters, count the 1s, remaining is recursive case
return (
bin(ord(strA[0]) ^ ord(strB[0])).count('1') +
hammingDistance(strA[1:], strB[1:])
)
| 5,338,075
|
def dump_tree(ifaces):
"""
Yields all the interfaces transitively implemented by the set in
reverse-depth-first order
"""
for i in ifaces:
yield from dump_tree(i.ifaces)
yield i
| 5,338,076
|
def json_parser(path):
"""
Generator that parse a JSON file which contains an array of hashes.
INPUT -> string, path to JSON file
OUTPUT -> dict, yield a media
"""
with open(path) as json_file:
json_data = json.load(json_file)
for i in range(len(json_data)):
yield json_data[i]
| 5,338,077
|
def _change_draft_metadata_file_names(bumped_data_structures: dict, new_version: str) -> None:
"""
Change metadata file names of data structures that were RELEASED
from <dataset>__DRAFT.json to <dataset>__<new_version>.json.
"""
for data_structure in bumped_data_structures:
if data_structure["releaseStatus"] == "RELEASED":
datastore.change_draft_metadata_file_name(
data_structure["name"], new_version
)
| 5,338,078
|
def convert(s, syntax=None):
"""Convert a regex regular expression to re syntax.
The first argument is the regular expression, as a string object,
just like it would be passed to regex.compile(). (I.e., pass the
actual string object -- string quotes must already have been
removed and the standard escape processing has already been done,
e.g. by eval().)
The optional second argument is the regex syntax variant to be
used. This is an integer mask as passed to regex.set_syntax();
the flag bits are defined in regex_syntax. When not specified, or
when None is given, the current regex syntax mask (as retrieved by
regex.get_syntax()) is used -- which is 0 by default.
The return value is a regular expression, as a string object that
could be passed to re.compile(). (I.e., no string quotes have
been added -- use quote() below, or repr().)
The conversion is not always guaranteed to be correct. More
syntactical analysis should be performed to detect borderline
cases and decide what to do with them. For example, 'x*?' is not
translated correctly.
"""
table = mastertable.copy()
if syntax is None:
syntax = regex.get_syntax()
if syntax & RE_NO_BK_PARENS:
del table[r'\('], table[r'\)']
del table['('], table[')']
if syntax & RE_NO_BK_VBAR:
del table[r'\|']
del table['|']
if syntax & RE_BK_PLUS_QM:
table['+'] = r'\+'
table['?'] = r'\?'
table[r'\+'] = '+'
table[r'\?'] = '?'
if syntax & RE_NEWLINE_OR:
table['\n'] = '|'
res = ""
i = 0
end = len(s)
while i < end:
c = s[i]
i = i+1
if c == '\\':
c = s[i]
i = i+1
key = '\\' + c
key = table.get(key, key)
res = res + key
else:
c = table.get(c, c)
res = res + c
return res
| 5,338,079
|
def current_user(request):
"""Return the list of all the users with their ids.
"""
query = select([
User.id.label('PK_id'),
User.Login.label('fullname')
]).where(User.id == request.authenticated_userid)
print
return dict(DBSession.execute(query).fetchone())
| 5,338,080
|
def test_auth_decorator_no_permissions():
"""Test auth decorator when no permissions are supplied"""
with pytest.raises(AuthError) as err:
get_access("", "latte")()
assert err.value.code == 401
assert err.value.description == "User don't have access to resource."
| 5,338,081
|
async def test_allowlist(hass, mock_client):
"""Test an allowlist only config."""
await _setup(
hass,
{
"include_domains": ["light"],
"include_entity_globs": ["sensor.included_*"],
"include_entities": ["binary_sensor.included"],
},
)
tests = [
FilterTest("climate.excluded", False),
FilterTest("light.included", True),
FilterTest("sensor.excluded_test", False),
FilterTest("sensor.included_test", True),
FilterTest("binary_sensor.included", True),
FilterTest("binary_sensor.excluded", False),
]
await _run_filter_tests(hass, tests, mock_client)
| 5,338,082
|
def get_test_examples_labels(dev_example_list, batch_size):
"""
:param dev_example_list: list of filenames containing dev examples
:param batch_size: int
:return: list of nlplingo dev examples, dev labels
"""
dev_chunk_generator = divide_chunks(dev_example_list, NUM_BIG_CHUNKS)
test_examples = []
# dev_chunk_generator yields lists, each of len == NUM_BIG_CHUNKS
for big_chunk in dev_chunk_generator:
chunk_lst = load_big_chunk(big_chunk) # big_chunk is a filepath to .npz
example_lst = []
for chunk in chunk_lst:
example_lst.extend(chunk)
example_generator = divide_chunks(example_lst, batch_size)
for example_chunk in example_generator:
test_examples.extend(example_chunk)
labels = [example.label for example in test_examples]
test_label = np.asarray(labels)
return test_examples, test_label
| 5,338,083
|
def unpack_collections(*args, **kwargs):
"""Extract collections in preparation for compute/persist/etc...
Intended use is to find all collections in a set of (possibly nested)
python objects, do something to them (compute, etc...), then repackage them
in equivalent python objects.
Parameters
----------
*args
Any number of objects. If it is a dask collection, it's extracted and
added to the list of collections returned. By default, python builtin
collections are also traversed to look for dask collections (for more
information see the ``traverse`` keyword).
traverse : bool, optional
If True (default), builtin python collections are traversed looking for
any dask collections they might contain.
Returns
-------
collections : list
A list of all dask collections contained in ``args``
repack : callable
A function to call on the transformed collections to repackage them as
they were in the original ``args``.
"""
traverse = kwargs.pop("traverse", True)
collections = []
repack_dsk = {}
collections_token = uuid.uuid4().hex
def _unpack(expr):
if is_dask_collection(expr):
tok = tokenize(expr)
if tok not in repack_dsk:
repack_dsk[tok] = (getitem, collections_token, len(collections))
collections.append(expr)
return tok
tok = uuid.uuid4().hex
if not traverse:
tsk = quote(expr)
else:
# Treat iterators like lists
typ = list if isinstance(expr, Iterator) else type(expr)
if typ in (list, tuple, set):
tsk = (typ, [_unpack(i) for i in expr])
elif typ in (dict, OrderedDict):
tsk = (typ, [[_unpack(k), _unpack(v)] for k, v in expr.items()])
elif is_dataclass(expr) and not isinstance(expr, type):
tsk = (
apply,
typ,
(),
(
dict,
[
[f.name, _unpack(getattr(expr, f.name))]
for f in fields(expr)
],
),
)
else:
return expr
repack_dsk[tok] = tsk
return tok
out = uuid.uuid4().hex
repack_dsk[out] = (tuple, [_unpack(i) for i in args])
def repack(results):
dsk = repack_dsk.copy()
dsk[collections_token] = quote(results)
return simple_get(dsk, out)
return collections, repack
| 5,338,084
|
def get_usernames(joomlasession):
"""Get list of usernames on the homepage."""
users = joomlasession.query(Jos_Users).all()
return [user.username for user in users]
| 5,338,085
|
def minor_block_encoder(block, include_transactions=False, extra_info=None):
"""Encode a block as JSON object.
:param block: a :class:`ethereum.block.Block`
:param include_transactions: if true transaction details are included, otherwise
only their hashes
:param extra_info: MinorBlockExtraInfo
:returns: a json encodable dictionary
"""
header = block.header
meta = block.meta
header_info = minor_block_header_encoder(header)
d = {
**header_info,
"hashMerkleRoot": data_encoder(meta.hash_merkle_root),
"hashEvmStateRoot": data_encoder(meta.hash_evm_state_root),
"gasUsed": quantity_encoder(meta.evm_gas_used),
"size": quantity_encoder(len(block.serialize())),
}
if include_transactions:
d["transactions"] = []
for i, _ in enumerate(block.tx_list):
d["transactions"].append(tx_encoder(block, i))
else:
d["transactions"] = [
id_encoder(tx.get_hash(), block.header.branch.get_full_shard_id())
for tx in block.tx_list
]
if extra_info:
_add_posw_info_to_resp(d, header.difficulty, extra_info)
return d
| 5,338,086
|
def test_interpretation(capsys, inputs, kgos, options):
"""Test metadata interpretation. Four tests are run:
- A single compliant file
- A single compliant file with verbose output
- Multiple files, the first of which is non-compliant
- Using the --failures-only option to only print output for non-compliant files.
capsys is a pytest fixture that captures standard output/error for testing.
"""
kgo_dir = acc.kgo_root() / "interpret_metadata"
input_path = [kgo_dir / input for input in inputs]
args = [*input_path, *options]
if "non_compliant_weather_codes.nc" in inputs:
with pytest.raises(ValueError, match=".*not metadata compliant.*"):
run_cli(args)
else:
run_cli(args)
captured = capsys.readouterr()
for kgo in kgos:
assert kgo in captured.out
excluded_kgos = list(set(ALL_KGOS) ^ set(kgos))
for kgo in excluded_kgos:
assert kgo not in captured.out
| 5,338,087
|
def heaviside(x):
"""Implementation of the Heaviside step function (https://en.wikipedia.org/wiki/Heaviside_step_function)
Args:
x: Numpy-Array or single Scalar
Returns:
x with step values
"""
if x <= 0:
return 0
else:
return 1
| 5,338,088
|
def FallbackReader(fname):
"""Guess the encoding of a file by brute force by trying one
encoding after the next until something succeeds.
@param fname: file path to read from
"""
txt = None
for enc in GetEncodings():
try:
handle = open(fname, 'rb')
reader = codecs.getreader(enc)(handle)
txt = reader.read()
reader.close()
except Exception, msg:
handle.close()
continue
else:
return (enc, txt)
return (None, None)
| 5,338,089
|
def graph_3D(data, col="category", list_=[None], game=None, extents=None):
"""
3D t-sne graph data output
:param data: a pandas df generated from app_wrangling.call_boardgame_data()
:param col: string indicating which column (default 'category')
:param list_: list of elements in column (default [None])
:param game: string of board game name (default None)
:param extents: string (default None)
:return fig_out: 3D plotly figure
"""
# layout for the 3D plot:
axis_x = dict(
title="",
showgrid=True,
zeroline=False,
showticklabels=False,
showspikes=False,
range=[extents["min_x"], extents["max_x"]],
)
axis_y = axis_x.copy()
axis_y["range"] = [extents["min_y"], extents["max_y"]]
axis_z = axis_x.copy()
axis_z["range"] = [extents["min_z"], extents["max_z"]]
layout_out = go.Layout(
margin=dict(l=0, r=0, b=0, t=0),
scene=dict(xaxis=axis_x, yaxis=axis_y, zaxis=axis_z),
legend=dict(yanchor="top", y=0.93, xanchor="right", x=0.99),
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)",
)
# plotting data:
if (list_ == [None]) or (not list_):
set_data = data.copy(deep=True)
set_data["group"] = "none"
else:
set_data = app_wr.call_boardgame_radio(data, col, list_).explode("group")
data_out = []
# corresponds with dark2 palette:
# had trouble manually setting color palette for graph_object:
color_list = [
"#1b9e77",
"#d95f02",
"#7570b3",
"#e7298a",
"#66a61e",
"#e6ab02",
"#a6761d",
"#666666",
]
i = 0
for idx, val in set_data.groupby(set_data.group):
if idx == "none":
marker_style = dict(
size=val["average_rating"] * 1.6,
symbol="circle",
opacity=0.1,
color="grey",
)
legend_show = False
else:
marker_style = dict(
size=val["average_rating"] * 1.6,
symbol="circle",
opacity=0.4,
color=color_list[i],
)
legend_show = True
i += 1
scatter = go.Scatter3d(
name=idx,
x=val["x"],
y=val["y"],
z=val["z"],
mode="markers",
marker=marker_style,
text=val["name"],
hoverinfo="text+name",
showlegend=legend_show,
)
data_out.append(scatter)
if game:
game_data = data[data["name"] == game]
marker_style = dict(
size=game_data["average_rating"] * 1.6,
symbol="circle",
opacity=1.0,
color="purple",
)
scatter = go.Scatter3d(
name=game,
x=game_data["x"],
y=game_data["y"],
z=game_data["z"],
mode="markers",
marker=marker_style,
text=game_data["name"],
hoverinfo="text",
)
data_out.append(scatter)
fig_out = {"data": data_out, "layout": layout_out}
return fig_out
| 5,338,090
|
def creation_validation(ctx, **kwargs):
"""
check availability of path used in field private key path of
node properties
"""
# combine properties
obj = combine_properties(ctx, kwargs=kwargs,
names=[PRIVATE_KEY, PUBLIC_KEY],
properties=['auto_generate'])
# get key
key_path = obj.get(PRIVATE_KEY, {}).get(PATH)
if key_path:
key_path = os.path.expanduser(key_path)
if not os.path.isfile(key_path):
raise cfy_exc.NonRecoverableError(
"Private key file {0} is absent".format(key_path))
| 5,338,091
|
def get_selector(info, mode="advanced"):
"""
The selector that decides the scope of the dashboard. It MUST have the keywords
?work and ?author.
You can override everything here by adapting the query on WDQS:
https://w.wiki/3Cmd
Args:
info: either a dict containing complex information for the selector or a list of QIDs
mode: a string representing the mode. If "advanced", then a config is expected for the
info parameters. If "basic", a list of QIDs is expected. Defaults to "advanced".
"""
if mode == "advanced":
fields_of_work = info["restriction"]["author_area"]
if fields_of_work is not None:
field_of_work_selector = (
"""
VALUES ?field_of_work """
+ format_with_prefix(fields_of_work)
+ """
?author wdt:P101 ?field_of_work.
"""
)
else:
field_of_work_selector = ""
topic_of_work = info["restriction"]["topic_of_work"]
if topic_of_work is not None:
topic_of_work_selector = (
"""
VALUES ?topics """
+ format_with_prefix(topic_of_work)
+ """
?work wdt:P921/wdt:P279* ?topics.
"""
)
else:
topic_of_work_selector = ""
region = info["restriction"]["institution_region"]
if region is not None:
region_selector = (
"""
VALUES ?regions """
+ format_with_prefix(region)
+ """
?country wdt:P361* ?regions.
?author ( wdt:P108 | wdt:P463 | wdt:P1416 ) / wdt:P361* ?organization .
?organization wdt:P17 ?country.
"""
)
else:
region_selector = ""
gender = info["restriction"]["gender"]
if gender is not None:
gender_selector = (
"""
VALUES ?gender """
+ format_with_prefix(gender)
+ """
?author wdt:P21 ?gender.
"""
)
else:
gender_selector = ""
event = info["restriction"]["event"]
if event is not None:
# P823 - speaker
# P664 - organizer
# P1334 - has participant
# ^P710 - inverse of (participated in)
event_selector = (
"""
VALUES ?event """
+ format_with_prefix(event)
+ """
?event wdt:P823 | wdt:P664 | wdt:P1344 | ^wdt:P710 ?author.
"""
)
else:
event_selector = ""
author_is_topic_of = info["restriction"]["author_is_topic_of"]
if author_is_topic_of is not None:
author_is_topic_of_selector = (
"""
VALUES ?biographical_work """
+ format_with_prefix(author_is_topic_of)
+ """
?biographical_work wdt:P921 ?author.
"""
)
else:
author_is_topic_of_selector = ""
selector = (
field_of_work_selector
+ topic_of_work_selector
+ region_selector
+ gender_selector
+ event_selector
+ author_is_topic_of_selector
+ """
?work wdt:P50 ?author.
"""
)
else:
selector = f"""
VALUES ?work {format_with_prefix(info)} .
?work wdt:P50 ?author .
"""
return selector
| 5,338,092
|
def set_row_csr(csr, rows, value=0):
"""Set all nonzero elements to the given value. Useful to set to 0 mostly."""
for row in rows:
start = csr.indptr[row]
end = csr.indptr[row + 1]
csr.data[start:end] = value
if value == 0:
csr.eliminate_zeros()
| 5,338,093
|
def get_table_names(self, connection, schema=None, **kw):
"""
Get table names
Args:
connection ():
schema ():
**kw:
Returns:
"""
return self._get_table_or_view_names(
["r", "e"], connection, schema, **kw
)
| 5,338,094
|
def get_general(prefix, generator, pars, **kwargs):
""" A general getter function that either gets the asked-for data
from a file or generates it with the given generator function. """
pars = get_pars(pars, **kwargs)
id_pars, pars = get_id_pars_and_set_default_pars(pars)
try:
result = read_tensor_file(prefix=prefix, pars=id_pars,
filename=filename)
except RuntimeError:
result = generator(pars, id_pars)
return result
| 5,338,095
|
def gen_case(test):
"""Generates an OK test case for a test
Args:
test (``Test``): OK test for this test case
Returns:
``dict``: the OK test case
"""
code_lines = str_to_doctest(test.input.split('\n'), [])
for i in range(len(code_lines) - 1):
if code_lines[i+1].startswith('>>>') and len(code_lines[i].strip()) > 3 and not code_lines[i].strip().endswith("\\"):
code_lines[i] += ';'
code_lines.append(test.output)
return {
'code': '\n'.join(code_lines),
'hidden': test.hidden,
'locked': False
}
| 5,338,096
|
def run_count1(known_args, options):
"""Runs the first example pipeline."""
logging.info('Running first pipeline')
p = beam.Pipeline(options=options)
(p | beam.io.ReadFromText(known_args.input)
| Count1()
| beam.io.WriteToText(known_args.output))
p.run().wait_until_finish()
| 5,338,097
|
def get_user_list_view(request):
"""
render user admin view
Arguments:
request {object} -- wsgi http request object
Returns:
html -- render html template
"""
if request.user.has_perm('auth.view_user'):
user_list = User.objects.all()
temp_name = 'admin/list_users.html'
context = {
'user_url_path': '用户',
'obj': user_list
}
else:
temp_name = 'admin/error.html'
context = {}
return render(
request,
temp_name,
context=context
)
| 5,338,098
|
def to_odds(p):
"""
Converts a probability to odds
"""
with np.errstate(divide='ignore'):
return p / (1 - p)
| 5,338,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.