content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def capitalize_title(title):
"""Convert the first letter of each word in the title to uppercase if needed.
:param title: str - title string that needs title casing.
:return: str - title string in title case (first letters capitalized).
"""
pass
| 5,335,700
|
def persist_bra(con: Connection, bra: List[Dict]):
"""
This function is dumb, because it does not take care of the case when bra already exist
"""
with con.begin():
for entities in bra:
for e, data in entities.items():
# https://docs.sqlalchemy.org/en/13/core/tutorial.html#executing-multiple-statements
if isinstance(data, Generator):
# execute is not capable of understanding a generator. But it understand list.
# behind the scene, the DBAPI `executemany` is called.
intermediate_data = list()
for x in data:
if x:
intermediate_data.append(x)
data = intermediate_data
# data can be null (generator yield None) then no need to execute
if data:
con.execute(insert(e), data)
| 5,335,701
|
def compare_dicts(cloud1, cloud2):
"""
Compare the dicts containing cloud images or flavours
"""
if len(cloud1) != len(cloud2):
return False
for item in cloud1:
if item in cloud2:
if cloud1[item] != cloud2[item]:
return False
else:
return False
return True
| 5,335,702
|
def ajax_stats():
"""
获取客户统计
:return:
"""
time_based = request.args.get('time_based', 'hour')
result_customer_middleman = customer_middleman_stats(time_based)
result_customer_end_user = customer_end_user_stats(time_based)
line_chart_data = {
'labels': [label for label, _ in result_customer_middleman],
'datasets': [
{
'label': '同行',
'backgroundColor': 'rgba(220,220,220,0.5)',
'borderColor': 'rgba(220,220,220,1)',
'pointBackgroundColor': 'rgba(220,220,220,1)',
'pointBorderColor': '#fff',
'pointBorderWidth': 2,
'data': [data for _, data in result_customer_middleman]
},
{
'label': '终端',
'backgroundColor': 'rgba(151,187,205,0.5)',
'borderColor': 'rgba(151,187,205,1)',
'pointBackgroundColor': 'rgba(151,187,205,1)',
'pointBorderColor': '#fff',
'pointBorderWidth': 2,
'data': [data for _, data in result_customer_end_user]
}
]
}
return json.dumps(line_chart_data, default=json_default)
| 5,335,703
|
def check_output(dut, trace):
"""Check data written to the output FIFO for correctness.
The coroutine monitors the data written to the FIFO and checks whether it
matches the data of the input trace file.
"""
# get trace size
trace_size = trace.size()
# check data written to fifo for correctness
for i in range(trace_size*8/AXI_BIT_WIDTH):
# wait for fifo wr enable
while True:
yield RisingEdge(dut.clk)
# make sure module active status signal is high
check_value("status_active_o", int(dut.status_active_o), 1)
if int(dut.fifo_wr_en_o):
# output fifo data is valid
break
# the order of the 8 byte words is reversed in the 64 byte output word
output = (int(dut.fifo_din_o) & 2**64-1) << 448
output |= ((int(dut.fifo_din_o) >> 64) & 2**64-1) << 384
output |= ((int(dut.fifo_din_o) >> 128) & 2**64-1) << 320
output |= ((int(dut.fifo_din_o) >> 192) & 2**64-1) << 256
output |= ((int(dut.fifo_din_o) >> 256) & 2**64-1) << 192
output |= ((int(dut.fifo_din_o) >> 320) & 2**64-1) << 128
output |= ((int(dut.fifo_din_o) >> 384) & 2**64-1) << 64
output |= (int(dut.fifo_din_o) >> 448) & 2**64-1
# get exepcted output
output_ref = trace.read_reverse_byte_order(i*AXI_BIT_WIDTH/8,
AXI_BIT_WIDTH/8)
# make sure values match
check_value("fifo_din_o", output_ref, output)
# wait one clock cycle and make sure active signal is low then
yield RisingEdge(dut.clk)
check_value("status_active_o", int(dut.status_active_o), 0)
| 5,335,704
|
def str2int(string_with_int):
""" Collect digits from a string """
return int("".join([char for char in string_with_int if char in string.digits]) or 0)
| 5,335,705
|
def grid_to_3d(reward: np.ndarray) -> np.ndarray:
"""Convert gridworld state-only reward R[i,j] to 3D reward R[s,a,s']."""
assert reward.ndim == 2
reward = reward.flatten()
ns = reward.shape[0]
return state_to_3d(reward, ns, 5)
| 5,335,706
|
def find_routes(paths) -> list:
"""returns routes as tuple from path as list\
like 1,2,3 --> (1,2)(2,3)"""
routes = []
for path in paths:
for i in range(len(path)):
try:
route = (path[i], path[i + 1])
if route not in routes:
routes.append(route)
except IndexError:
pass
return routes
| 5,335,707
|
def plot_class_distributions(
training_data_filepath, test_data_filepath, figures_folderpath
):
"""Plots the training set and test set class distributions"""
# Load the training and test data
project_dir = Path(__file__).resolve().parents[2]
train_set_path = str(project_dir) + training_data_filepath
_, train_labels = torch.load(train_set_path)
test_set_path = str(project_dir) + test_data_filepath
_, test_labels = torch.load(test_set_path)
# Plot the data distribution of the fish train and test sets
names = ["Training", "Test"]
labels = [train_labels, test_labels]
for i in range(2):
f = plt.figure(figsize=(12, 8))
plt.hist(labels[i].numpy(), density=False, bins=30)
plt.ylabel("Count")
plt.xlabel("Class ID")
f.savefig(
project_dir.joinpath(figures_folderpath).joinpath(
names[i] + "_Class_Distribution.pdf"
),
bbox_inches="tight",
)
| 5,335,708
|
def z_step_ncg_hess_(Z, v, Y, F, phi, C_Z, eta_Z):
"""A wrapper of the hess-vector product for ncg calls."""
return z_step_tron_hess(v, Y, F, phi, C_Z, eta_Z)
| 5,335,709
|
def test_table_no_envvar():
"""Tests that `table()` raises an exception in the absence of the table name env var."""
assert 'DYNAMODB_TABLE' not in os.environ
dynamodb._table = None
with pytest.raises(RuntimeError):
dynamodb.table()
| 5,335,710
|
def list_servers(**kwargs) -> "list[NovaServer]":
"""List all servers under the current project.
Args:
kwargs: Keyword arguments, which will be passed to
:func:`novaclient.v2.servers.list`. For example, to filter by
instance name, provide ``search_opts={'name': 'my-instance'}``
Returns:
All servers associated with the current project.
"""
return nova().servers.list(**kwargs)
| 5,335,711
|
def resolve_xref(
app: Sphinx,
env: BuildEnvironment,
node: nodes.Node,
contnode: nodes.Node,
) -> Optional[nodes.reference]:
"""
Resolve as-yet-unresolved XRefs for :rst:role:`tconf` roles.
:param app: The Sphinx application.
:param env: The Sphinx build environment.
:param node: The cross reference node which has not yet been.
:param contnode: The child node of the reference node, which provides the formatted text.
"""
if not isinstance(node, nodes.Element): # pragma: no cover
return None
if node.get("refdomain", None) != "std": # pragma: no cover
return None
elif node.get("reftype", None) != "tconf": # pragma: no cover
return None
elif not node.get("reftarget"): # pragma: no cover
return None
std_domain = cast(StandardDomain, env.get_domain("std"))
objtypes = std_domain.objtypes_for_role("tconf") or []
reftarget = node["reftarget"]
candidates = []
for (obj_type, obj_name), (docname, labelid) in std_domain.objects.items():
if not docname: # pragma: no cover
continue
if obj_type in objtypes:
if obj_name.endswith(f".{reftarget}"):
candidates.append((docname, labelid, obj_name))
if not candidates:
return None # pragma: no cover
elif len(candidates) > 1:
logger.warning(
__("more than one target found for cross-reference %r: %s"),
reftarget,
", ".join(c[2] for c in candidates),
type="ref",
subtype="tconf",
location=node,
)
return make_refnode(
app.builder,
env.docname,
candidates[0][0], # docname
candidates[0][1], # labelid
contnode,
)
| 5,335,712
|
def deploy_tester_contract(
web3,
contracts_manager,
deploy_contract,
contract_deployer_address,
get_random_address,
):
"""Returns a function that can be used to deploy a named contract,
using conract manager to compile the bytecode and get the ABI"""
def f(contract_name, libs=None, args=None):
json_contract = contracts_manager.get_contract(contract_name)
contract = deploy_contract(
web3,
contract_deployer_address,
json_contract['abi'],
json_contract['bin'],
args,
)
return contract
return f
| 5,335,713
|
def ccsValidator(results=None):
"""
Persist standard file patterns, e.g., '*.fits', 'pd-values*.txt', 'Photo*.txt',
using lcatr.schema.
"""
if results is None:
results = []
files = glob.glob('*/*.fits')
files += glob.glob('*/*.txt')
files += glob.glob('pd-values*.txt')
files += glob.glob('*.png')
files += glob.glob('*.seq')
unique_files = set(files)
results.extend([lcatr.schema.fileref.make(item) for item in unique_files])
results.extend(siteUtils.jobInfo())
results = siteUtils.persist_ccs_versions(results)
#hn results = siteUtils.persist_reb_info(results)
lcatr.schema.write_file(results)
lcatr.schema.validate_file()
| 5,335,714
|
def make_hashable_params(params):
"""
Checks to make sure that the parameters submitted is hashable.
Args:
params(dict):
Returns:
"""
tuple_params = []
for key, value in params.items():
if isinstance(value, dict):
dict_tuple = tuple([(key2, value2) for key2, value2 in value.items()])
tuple_params.append(dict_tuple)
else:
if isinstance(value, (list, set)):
tuple_params.append((key, tuple(value)))
else:
tuple_params.append((key, value))
tuple_params = tuple(tuple_params)
try:
hash(tuple_params)
except TypeError:
raise TypeError('The values of keywords given to this class must be hashable.')
return tuple_params
| 5,335,715
|
def check_member_role(member: discord.Member, role_id: int) -> bool:
"""
Checks if the Member has the Role
"""
return any(role.id == role_id for role in member.roles)
| 5,335,716
|
def test_to_graph_should_return_dct_identifier_as_graph() -> None:
"""It returns a dct_identifier graph isomorphic to spec."""
simpletype = SimpleType()
simpletype.identifier = "http://example.com/simpletypes/1"
simpletype.dct_identifier = "123456789"
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix modelldcatno: <https://data.norge.no/vocabulary/modelldcatno#> .
<http://example.com/simpletypes/1> a modelldcatno:SimpleType ;
dct:identifier "123456789";
.
"""
g1 = Graph().parse(data=simpletype.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
assert_isomorphic(g1, g2)
| 5,335,717
|
def tag_bedpe(b, beds, verbose=False):
"""
Tag each end of a BEDPE with a set of (possibly many) query BED files.
For example, given a BEDPE of interacting fragments from a Hi-C experiment,
identify the contacts between promoters and ChIP-seq peaks. In this case,
promoters and ChIP-seq peaks of interest would be provided as BED files.
The strategy is to split the BEDPE into two separate files. Each file is
intersected independently with the set of queries. The results are then
iterated through in parallel to tie the ends back together. It is this
iterator that is returned (see example below).
Parameters
----------
bedpe : str
BEDPE-format file. Must be name-sorted.
queries : dict
Dictionary of BED/GFF/GTF/VCF files to use. After splitting the BEDPE,
these query files (values in the dictionary) will be passed as the `-b`
arg to `bedtools intersect`. The keys are passed as the `names`
argument for `bedtools intersect`.
Returns
-------
Tuple of (iterator, n, extra).
`iterator` is described below. `n` is the total number of lines in the
BEDPE file, which is useful for calculating percentage complete for
downstream work. `extra` is the number of extra fields found in the BEDPE
(also useful for downstream processing).
`iterator` yields tuples of (label, end1_hits, end2_hits) where `label` is
the name field of one line of the original BEDPE file. `end1_hits` and
`end2_hits` are each iterators of BED-like lines representing all
identified intersections across all query BED files for end1 and end2 for
this pair.
Recall that BEDPE format defines a single name and a single score for each
pair. For each item in `end1_hits`, the fields are::
chrom1
start1
end1
name
score
strand1
[extra fields]
query_label
fields_from_query_intersecting_end1
where `[extra fields]` are any additional fields from the original BEDPE,
`query_label` is one of the keys in the `beds` input dictionary, and the
remaining fields in the line are the intersecting line from the
corresponding BED file in the `beds` input dictionary.
Similarly, each item in `end2_hits` consists of:
chrom2
start2
end2
name
score
strand2
[extra fields]
query_label
fields_from_query_intersecting_end2
At least one line is reported for every line in the BEDPE file. If there
was no intersection, the standard BEDTools null fields will be shown. In
`end1_hits` and `end2_hits`, a line will be reported for each hit in each
query.
Example
-------
Consider the following BEDPE (where "x1" is an aribtrary extra field).
>>> bedpe = pybedtools.example_bedtool('test_bedpe.bed')
>>> print(bedpe) # doctest: +NORMALIZE_WHITESPACE
chr1 1 10 chr1 50 90 pair1 5 + - x1
chr1 2 15 chr1 200 210 pair2 1 + + y1
<BLANKLINE>
And the following transcription start sites (TSSes) in BED4 format:
>>> tsses = pybedtools.example_bedtool('test_tsses.bed')
>>> print(tsses) # doctest: +NORMALIZE_WHITESPACE
chr1 5 6 gene1
chr1 60 61 gene2
chr1 88 89 gene3
<BLANKLINE>
And the following called peaks as BED6:
>>> peaks = pybedtools.example_bedtool('test_peaks.bed')
>>> print(peaks) # doctest: +NORMALIZE_WHITESPACE
chr1 3 4 peak1 50 .
<BLANKLINE>
Then we can get the following iterator, n, and extra:
>>> from pybedtools.contrib.long_range_interaction import tag_bedpe
>>> iterator, n, extra = tag_bedpe(bedpe, {'tss': tsses, 'pk': peaks})
>>> print(n)
2
>>> print(extra)
1
The following illustrates that each item in the iterator represents one
pair, and each item in each group represents an intersection with one end:
>>> for (label, end1_hits, end2_hits) in iterator:
... print('PAIR = {}'.format(label))
... print('end1_hits:')
... for i in end1_hits:
... print(i, end='')
... print('end2_hits:')
... for i in end2_hits:
... print(i, end='') # doctest: +NORMALIZE_WHITESPACE
PAIR = pair1
end1_hits:
chr1 1 10 pair1 5 + x1 pk chr1 3 4 peak1 50 . 1
chr1 1 10 pair1 5 + x1 tss chr1 5 6 gene1 1
end2_hits:
chr1 50 90 pair1 5 - x1 tss chr1 60 61 gene2 1
chr1 50 90 pair1 5 - x1 tss chr1 88 89 gene3 1
PAIR = pair2
end1_hits:
chr1 2 15 pair2 1 + y1 pk chr1 3 4 peak1 50 . 1
chr1 2 15 pair2 1 + y1 tss chr1 5 6 gene1 1
end2_hits:
chr1 200 210 pair2 1 + y1 . -1 -1 . -1 . 0
See the `cis_trans_interactions()` function for one way of summarizing
these data.
"""
b = pybedtools.BedTool(b)
# Figure out if the supplied bedpe had any extra fields. If so, the fields
# are repeated in each of the split output files.
observed = b.field_count()
extra = observed - 10
extra_inds = [10 + i for i in range(extra)]
end1_fn = pybedtools.BedTool._tmp()
end2_fn = pybedtools.BedTool._tmp()
# Performance notes:
# We don't need the overhead of converting every line into
# a pybedtools.Interval object just so we can grab the fields. Doing so
# takes 3.5x more time than simply splitting each line on a tab.
if verbose:
print('splitting BEDPE into separate files')
n = 0
with open(end1_fn, 'w') as end1_out, open(end2_fn, 'w') as end2_out:
for line in open(b.fn):
n += 1
f = line.strip().split('\t')
end1_out.write(
'\t'.join(
(f[i] for i in [0, 1, 2, 6, 7, 8] + extra_inds)) + '\n')
end2_out.write(
'\t'.join(
(f[i] for i in [3, 4, 5, 6, 7, 9] + extra_inds)) + '\n')
# Performance notes:
#
# For small BEDPE and large set of query files, it would be faster to sort
# these independently, intersect with sorted=True, and then re-sort by name
# for the grouping. For large BEDPE, I don't think the sorted=True
# performance gain outweighs the hit from sorting twice.
#
# On the other hand, if BEDPE was coord-sorted in the first place, only
# end2 would need to be sorted and re-sorted. On the other (third!?) hand,
# BEDPE creation from BAM implies name-sorting, so it's probably not
# reasonable to assume coord-sorted.
#
# In the end: don't do any sorting.
end1_bt = pybedtools.BedTool(end1_fn)
end2_bt = pybedtools.BedTool(end2_fn)
names, fns = [], []
for name, fn in beds.items():
names.append(name)
if isinstance(fn, pybedtools.BedTool):
fns.append(fn.fn)
else:
fns.append(fn)
if verbose:
print('intersecting end 1')
end1_hits = end1_bt.intersect(list(fns), names=names, wao=True)
if verbose:
print('intersecting end 2')
end2_hits = end2_bt.intersect(list(fns), names=names, wao=True)
grouped_end1 = itertools.groupby(end1_hits, lambda f: f[3])
grouped_end2 = itertools.groupby(end2_hits, lambda f: f[3])
def gen():
for (label1, group1), (label2, group2) \
in itertools.izip(grouped_end1, grouped_end2):
assert label1 == label2
yield label1, group1, group2
return gen(), n, extra
| 5,335,718
|
def my_example_serial(datadir="./data", firstfile=0, lastfile=11):
"""
Calculates the mean for numbers across different data files. Is only run
on one process to validate the results of `my_example()`.
If the data files do not exist, creates files containing random numbers.
Parameters
----------
datadir: String. Default: './data'.
The directory the data files are located in.
firstfile, lastfile: Integers. Default: 0, 11.
The range of file numbers that are being read.
Returns
----------
None.
"""
print("Running my example in serial.")
# Check to see if the data directory exists.
if not os.path.exists(datadir) and rank == 0:
os.makedirs(datadir)
# If there aren't any data files, create some data.
fname = "{0}/data_{1}.txt".format(datadir, firstfile)
if not os.path.isfile(fname):
create_data(datadir=datadir, firstfile=firstfile, lastfile=lastfile)
sum_local = 0 # Initialize.
N_local = 0
# Now each Task will get its own set of files to read in.
# This loop ensures each file is only read one.
for filenr in range(firstfile, lastfile + 1):
fname = "{0}/data_{1}.txt".format(datadir, filenr)
data_thisfile = np.loadtxt(fname)
# Sum up the data from this file.
sum_local += sum(data_thisfile)
N_local += len(data_thisfile)
print("There were {0} values processed with a sum of {1} and mean of {2}"
.format(N_local, sum_local, sum_local / N_local))
| 5,335,719
|
def point(x: float, y: float, z: float) -> Tuple:
"""Create a point."""
return Tuple(x, y, z, 1.0)
| 5,335,720
|
def partial_pipeline_data(backend, user=None, *args, **kwargs): # pragma: no cover
"""
Add the session key to a signed base64 encoded signature on the email request.
"""
data = backend.strategy.request_data()
if 'signature' in data:
try:
signed_details = signing.loads(data['signature'], key=settings.SECRET_KEY)
session = Session.objects.get(pk=signed_details['session_key'])
except (BadSignature, Session.DoesNotExist) as e:
raise InvalidEmail(backend)
session_details = session.get_decoded()
backend.strategy.session_set('email_validation_address', session_details['email_validation_address'])
backend.strategy.session_set('next', session_details.get('next'))
backend.strategy.session_set('partial_pipeline', session_details['partial_pipeline'])
backend.strategy.session_set(backend.name + '_state', session_details.get(backend.name + '_state'))
backend.strategy.session_set(backend.name + 'unauthorized_token_name',
session_details.get(backend.name + 'unauthorized_token_name'))
partial = backend.strategy.session_get('partial_pipeline', None)
if partial:
idx, backend_name, xargs, xkwargs = \
backend.strategy.partial_from_session(partial)
if backend_name == backend.name:
kwargs.setdefault('pipeline_index', idx)
if user: # don't update user if it's None
kwargs.setdefault('user', user)
kwargs.setdefault('request', backend.strategy.request_data())
xkwargs.update(kwargs)
return xargs, xkwargs
else:
backend.strategy.clean_partial_pipeline()
| 5,335,721
|
def arrayinv(F, Fx):
"""
Args:
F: dx.ds function value at x
Fx: dx.dx.ds derivative of function at x
Returns:
"""
return np.array([np.linalg.solve(a, b) for a, b in zip(Fx.swapaxes(0,2), F.T)]).T
| 5,335,722
|
def schedule_list(req):
"""List scheduled jobs
"""
schedule = []
if os.path.exists(SCHEDULE):
with open(SCHEDULE) as f:
for n, a, t in csv.reader(f):
schedule.append({'Name': n, 'Timer': t, 'Action': a})
return {'Err': '', 'Schedule': schedule}
| 5,335,723
|
def mil(val):
"""convert mil to mm"""
return float(val) * 0.0254
| 5,335,724
|
def ask_number(question, low, high):
"""Poproś o podanie liczby z określonego zakresu."""
response = None
while type(response) != int:
try:
response = int(input(question))
while response not in range(low, high):
response = int(input(question))
except ValueError:
print("Value must be a number")
return response
| 5,335,725
|
def CheckGypFile(gypfile):
"""Check |gypfile| for common mistakes."""
if not os.path.exists(gypfile):
# The file has been deleted.
return
with open(gypfile) as fp:
return CheckGypData(gypfile, fp.read())
| 5,335,726
|
def _beams_longitude_latitude(
ping_header: PingHeader, along_track: numpy.ndarray, across_track: numpy.ndarray
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Calculate the longitude and latitude for each beam.
https://en.wikipedia.org/wiki/Geographic_coordinate_system
For lonitude and latitude calculations:
* lat_m_sf = A - B * cos(2 * lat) + C * cos(4 * lat) - D * cos(6 * lat)
* lon_m_sf = E * cos(lat) - F * cos(3 * lat) + G * cos(5 * lat)
"""
# see https://math.stackexchange.com/questions/389942/why-is-it-necessary-to-use-sin-or-cos-to-determine-heading-dead-reckoning # noqa: E501
lat_radians = math.radians(ping_header.latitude)
coef_a = WGS84Coefficients.A.value
coef_b = WGS84Coefficients.B.value
coef_c = WGS84Coefficients.C.value
coef_d = WGS84Coefficients.D.value
coef_e = WGS84Coefficients.E.value
coef_f = WGS84Coefficients.F.value
coef_g = WGS84Coefficients.G.value
lat_mtr_sf = (
coef_a
- coef_b * math.cos(2 * lat_radians)
+ coef_c * math.cos(4 * lat_radians)
- coef_d * math.cos(6 * lat_radians)
)
lon_mtr_sf = (
coef_e * math.cos(lat_radians)
- coef_f * math.cos(3 * lat_radians)
+ coef_g * math.cos(5 * lat_radians)
)
delta_x = math.sin(math.radians(ping_header.heading))
delta_y = math.cos(math.radians(ping_header.heading))
lon2 = (
ping_header.longitude
+ delta_y / lon_mtr_sf * across_track
+ delta_x / lon_mtr_sf * along_track
)
lat2 = (
ping_header.latitude
- delta_x / lat_mtr_sf * across_track
+ delta_y / lat_mtr_sf * along_track
)
return lon2, lat2
| 5,335,727
|
def true_range_nb(high: tp.Array2d, low: tp.Array2d, close: tp.Array2d) -> tp.Array2d:
"""Calculate true range."""
prev_close = generic_nb.fshift_nb(close, 1)
tr1 = high - low
tr2 = np.abs(high - prev_close)
tr3 = np.abs(low - prev_close)
tr = np.empty(prev_close.shape, dtype=np.float_)
for col in range(tr.shape[1]):
for i in range(tr.shape[0]):
tr[i, col] = max(tr1[i, col], tr2[i, col], tr3[i, col])
return tr
| 5,335,728
|
def laplacian_operator(data):
"""
apply laplacian operator on data
"""
lap = []
lap.append(0.0)
for index in range(1, len(data) - 1):
lap.append((data[index + 1] + data[index - 1]) / 2.0 - data[index])
lap.append(0.0)
return lap
| 5,335,729
|
def _stringmatcher(pattern):
"""
accepts a string, possibly starting with 're:' or 'literal:' prefix.
returns the matcher name, pattern, and matcher function.
missing or unknown prefixes are treated as literal matches.
helper for tests:
>>> def test(pattern, *tests):
... kind, pattern, matcher = _stringmatcher(pattern)
... return (kind, pattern, [bool(matcher(t)) for t in tests])
exact matching (no prefix):
>>> test('abcdefg', 'abc', 'def', 'abcdefg')
('literal', 'abcdefg', [False, False, True])
regex matching ('re:' prefix)
>>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
('re', 'a.+b', [False, False, True])
force exact matches ('literal:' prefix)
>>> test('literal:re:foobar', 'foobar', 're:foobar')
('literal', 're:foobar', [False, True])
unknown prefixes are ignored and treated as literals
>>> test('foo:bar', 'foo', 'bar', 'foo:bar')
('literal', 'foo:bar', [False, False, True])
"""
if pattern.startswith('re:'):
pattern = pattern[3:]
try:
regex = re.compile(pattern)
except re.error, e:
raise error.ParseError(_('invalid regular expression: %s')
% e)
return 're', pattern, regex.search
elif pattern.startswith('literal:'):
pattern = pattern[8:]
return 'literal', pattern, pattern.__eq__
| 5,335,730
|
def enu_to_ecef(ref_lat_rad, ref_lon_rad, ref_alt_m, e_m, n_m, u_m):
"""Convert ENU coordinates relative to reference location to ECEF coordinates.
This converts local east-north-up (ENU) coordinates relative to a given
reference position to earth-centered, earth-fixed (ECEF) cartesian
coordinates. The reference position is specified by its geodetic latitude,
longitude and altitude.
Parameters
----------
ref_lat_rad, ref_lon_rad : float or array
Geodetic latitude and longitude of reference position, in radians
ref_alt_m : float or array
Geodetic altitude of reference position, in metres above WGS84 ellipsoid
e_m, n_m, u_m : float or array
East, North, Up coordinates, in metres
Returns
-------
x_m, y_m, z_m : float or array
X, Y, Z coordinates, in metres
"""
# ECEF coordinates of reference point
ref_x_m, ref_y_m, ref_z_m = lla_to_ecef(ref_lat_rad, ref_lon_rad, ref_alt_m)
sin_lat, cos_lat = np.sin(ref_lat_rad), np.cos(ref_lat_rad)
sin_lon, cos_lon = np.sin(ref_lon_rad), np.cos(ref_lon_rad)
x_m = ref_x_m - sin_lon*e_m - sin_lat*cos_lon*n_m + cos_lat*cos_lon*u_m
y_m = ref_y_m + cos_lon*e_m - sin_lat*sin_lon*n_m + cos_lat*sin_lon*u_m
z_m = ref_z_m + cos_lat*n_m + sin_lat*u_m
return x_m, y_m, z_m
| 5,335,731
|
def yield_nodes(sitemap):
"""Generator for all node specifications in a sitemap. It yields tuples
`(code, depth)` whereas `code` is a string representation of the node
and `depth` is a number corresponding to the depth the node corresponds
to.
"""
max_headline_depth = 6
headline_re = r"(={1,%s})(.*)\1" % max_headline_depth
list_re = r"([*]+)(.*)"
for line in sitemap.splitlines():
for regex, depth_start in ((headline_re, 0),
(list_re, max_headline_depth)):
match = re.match(regex, line)
if match:
code = match.group(2).strip()
depth = depth_start + len(match.group(1))
yield create_empty_node(code, depth)
| 5,335,732
|
def is_file_url(share_url: str) -> bool:
"""判断是否为文件的分享链接"""
base_pat = r'https?://[a-zA-Z0-9-]*?\.?lanzou[a-z].com/.+' # 子域名可个性化设置或者不存在
user_pat = r'https?://[a-zA-Z0-9-]*?\.?lanzou[a-z].com/i[a-zA-Z0-9]{5,}/?' # 普通用户 URL 规则
if not re.fullmatch(base_pat, share_url):
return False
elif re.fullmatch(user_pat, share_url):
return True
else: # VIP 用户的 URL 很随意
try:
html = requests.get(share_url, headers=headers).text
html = remove_notes(html)
return True if re.search(r'class="fileinfo"|id="file"|文件描述', html) else False
except (requests.RequestException, Exception):
return False
| 5,335,733
|
def get_compton_fraction_artis(energy):
"""Gets the Compton scattering/absorption fraction
and angle following the scheme in ARTIS
Parameters
----------
energy : float
Energy of the gamma-ray
Returns
-------
float
Scattering angle
float
Compton scattering fraction
"""
energy_norm = kappa_calculation(energy)
fraction_max = 1.0 + 2.0 * energy_norm
fraction_min = 1.0
normalization = np.random.random() * compton_opacity_partial(
energy_norm, fraction_max
)
epsilon = 1.0e20
count = 0
while epsilon > 1.0e-4:
fraction_try = (fraction_max + fraction_min) / 2.0
sigma_try = compton_opacity_partial(energy_norm, fraction_try)
if sigma_try > normalization:
fraction_max = fraction_try
epsilon = (sigma_try - normalization) / normalization
else:
fraction_min = fraction_try
epsilon = (normalization - sigma_try) / normalization
count += 1
if count > 1000:
print("Error, failure to get a Compton fraction")
break
angle = np.arccos(1.0 - ((fraction_try - 1) / energy_norm))
return angle, fraction_try
| 5,335,734
|
def ConfigureNoOpAuthIfNeeded():
"""Sets up no-op auth handler if no boto credentials are configured."""
if not HasConfiguredCredentials():
if (config.has_option('Credentials', 'gs_service_client_id') and
not HAS_CRYPTO):
if system_util.InvokedViaCloudSdk():
raise CommandException('\n'.join(
textwrap.wrap(
'Your gsutil is configured with an OAuth2 service account, but '
'you do not have PyOpenSSL or PyCrypto 2.6 or later installed. '
'Service account authentication requires one of these libraries; '
'please reactivate your service account via the gcloud auth '
'command and ensure any gcloud packages necessary for '
'service accounts are present.')))
else:
raise CommandException('\n'.join(
textwrap.wrap(
'Your gsutil is configured with an OAuth2 service account, but '
'you do not have PyOpenSSL or PyCrypto 2.6 or later installed. '
'Service account authentication requires one of these libraries; '
'please install either of them to proceed, or configure a '
'different type of credentials with "gsutil config".')))
else:
# With no boto config file the user can still access publicly readable
# buckets and objects.
from gslib import no_op_auth_plugin # pylint: disable=unused-variable
| 5,335,735
|
def is_argspec_compatible_with_types(argspec, *args, **kwargs):
"""Determines if functions matching 'argspec' accept given 'args'/'kwargs'.
Args:
argspec: An instance of inspect.ArgSpec to verify agains the arguments.
*args: Zero or more positional arguments, all of which must be instances of
computation_types.Type or something convertible to it by
computation_types.to_type().
**kwargs: Zero or more keyword arguments, all of which must be instances of
computation_types.Type or something convertible to it by
computation_types.to_type().
Returns:
True or false, depending on the outcome of the test.
Raises:
TypeError: if the arguments are of the wrong computation_types.
"""
try:
callargs = get_callargs_for_argspec(argspec, *args, **kwargs)
if not argspec.defaults:
return True
except TypeError:
return False
# As long as we have been able to construct 'callargs', and there are no
# default values to verify against the given types, there is nothing more
# to do here, otherwise we have to verify the types of defaults against
# the types we've been given as parameters to this function.
num_specargs_without_defaults = len(argspec.args) - len(argspec.defaults)
for idx, default_value in enumerate(argspec.defaults):
if default_value is not None:
arg_name = argspec.args[num_specargs_without_defaults + idx]
call_arg = callargs[arg_name]
if call_arg is not default_value:
arg_type = computation_types.to_type(call_arg)
default_type = type_utils.infer_type(default_value)
if not type_utils.is_assignable_from(arg_type, default_type):
return False
return True
| 5,335,736
|
def get_span_feats_stopwords(stopwords):
"""Get a span dependency tree unary function"""
return partial(get_span_feats, stopwords=stopwords)
| 5,335,737
|
def get_argument_parser(argparser):
"""Augments the given ArgumentParser for use with the Bonobo ETL framework."""
return bonobo.get_argument_parser(parser=argparser)
| 5,335,738
|
def input_file_path(directory: str, file_name: str) -> Path:
"""Given the string paths to the result directory, and the input file
return the path to the file.
1. check if the input_file is an absolute path, and if so, return that.
2. if the input_file is a relative path, combine it with the result_directory
and return that.
The resultant path must exist and be a file, otherwise raise an FileNotFoundException.
"""
path_to_file = Path(file_name)
if path_to_file.is_absolute() and path_to_file.is_file():
return path_to_file
input_directory_path = Path(directory)
path_to_file = input_directory_path / path_to_file
if path_to_file.is_file():
return path_to_file.resolve()
else:
raise FileNotFoundError(
'did not find the input file using result_directory={directory}, input_file={input_file}'.format(
directory=directory, input_file=file_name
)
)
| 5,335,739
|
def identify_in_large_person_group(subscription_key):
"""IdentifyInLargePersonGroup.
This will identify faces in a large person group.
"""
face_base_url = "https://{}.api.cognitive.microsoft.com".format(FACE_LOCATION)
face_client = FaceClient(endpoint=face_base_url, credentials=CognitiveServicesCredentials(subscription_key))
image_url_prefix = "https://csdx.blob.core.windows.net/resources/Face/Images/"
target_image_file_dictionary = {
"Family1-Dad": ["Family1-Dad1.jpg", "Family1-Dad2.jpg"],
"Family1-Mom": ["Family1-Mom1.jpg", "Family1-Mom2.jpg"],
"Family1-Son": ["Family1-Son1.jpg", "Family1-Son2.jpg"],
"Family1-Daughter": ["Family1-Daughter1.jpg", "Family1-Daughter2.jpg"],
"Family2-Lady": ["Family2-Lady1.jpg", "Family2-Lady2.jpg"],
"Family2-Man": ["Family2-Man1.jpg", "Family2-Man2.jpg"]
}
source_image_file_name = "identification1.jpg"
# Create a large person group.
large_person_group_id = str(uuid.uuid4())
print("Create a large person group {}.".format(large_person_group_id))
face_client.large_person_group.create(large_person_group_id=large_person_group_id, name=large_person_group_id)
for target_image_file_dictionary_name in target_image_file_dictionary.keys():
person_id = face_client.large_person_group_person.create(large_person_group_id=large_person_group_id, name=target_image_file_dictionary_name).person_id
# Create a person group person.
person = Person(name=target_image_file_dictionary_name, user_data="Person for sample", person_id=person_id)
print("Create a large person group person {}.".format(person.name))
for target_image_file_name in target_image_file_dictionary[target_image_file_dictionary_name]:
# Add face to the person group person
print("Add face to the large person group person {} from image.".format(target_image_file_dictionary_name, target_image_file_name))
face = face_client.large_person_group_person.add_face_from_url(
large_person_group_id=large_person_group_id,
person_id=person.person_id,
url=image_url_prefix + target_image_file_name,
user_data=target_image_file_name
)
if not face:
raise Exception("No persisted face from image {}".format(target_image_file_name))
# Start to train the large person group.
print("Train large person group {}.".format(large_person_group_id))
face_client.large_person_group.train(large_person_group_id=large_person_group_id)
training_status = face_client.large_person_group.get_training_status(large_person_group_id=large_person_group_id)
print("Training status is {}".format(training_status.status))
if training_status.status == TrainingStatusType.failed:
raise Exception("Training failed with message {}.".format(training_status.message))
# Detect faces from source image url and add detected face ids to source_face_ids
source_face_ids = [detected_face.face_id for detected_face in _detect_faces_helper(face_client=face_client, image_url=image_url_prefix + source_image_file_name)]
# Identify example of identifying faces towards large person group.
identify_results = face_client.face.identify(face_ids=source_face_ids, large_person_group_id=large_person_group_id)
if not identify_results:
print("No person identified in the large person group for faces from the {}.".format(source_image_file_name))
return
for identify_result in identify_results:
person = face_client.large_person_group_person.get(large_person_group_id=large_person_group_id, person_id=identify_result.candidates[0].person_id)
print("Person {} is identified for face: {} - {}, confidence: {}.".format(
person.name,
source_image_file_name,
identify_result.face_id,
identify_result.candidates[0].confidence)
)
# Delete the person group.
face_client.large_person_group.delete(large_person_group_id=large_person_group_id)
print("Delete the large person group {}.\n".format(large_person_group_id))
| 5,335,740
|
def loss(S, K, n_samples=None):
"""Loss function for time-varying graphical lasso."""
if n_samples is None:
n_samples = np.ones(S.shape[0])
return sum(
-ni * logl(emp_cov, precision)
for emp_cov, precision, ni in zip(S, K, n_samples))
| 5,335,741
|
def test_depth():
"""Node.depth."""
root = Node("root")
s0 = Node("sub0", parent=root)
s0b = Node("sub0B", parent=s0)
s0a = Node("sub0A", parent=s0)
s1 = Node("sub1", parent=root)
s1c = Node("sub1C", parent=s1)
s1ca = Node("sub1Ca", parent=s1c)
eq_(root.depth, 0)
eq_(s0.depth, 1)
eq_(s0b.depth, 2)
eq_(s0a.depth, 2)
eq_(s1.depth, 1)
eq_(s1c.depth, 2)
eq_(s1ca.depth, 3)
| 5,335,742
|
def flat_dict(df):
"""
Add each key-value of a nested dictionary that is saved in a dataframe, as a new column
"""
for col in df.columns:
if type(df[col][0]) == dict:
df = pd.concat(
[df.drop([col], axis=1), df[col].apply(pd.Series)], axis=1)
# sometimes a column is dropped but column 0 stays
df = df.drop([0], axis=1, errors='ignore')
return df
| 5,335,743
|
def train_agent(agent, environment, plot_flag=True, *args, **kwargs):
"""Train an agent in an environment.
Parameters
----------
agent: AbstractAgent
environment: AbstractEnvironment
plot_flag: bool, optional.
Other Parameters
----------------
See rollout_agent.
"""
agent.train()
rollout_agent(environment, agent, *args, **kwargs)
if plot_flag:
for key in agent.logger.keys:
plt.plot(agent.logger.get(key))
plt.xlabel("Episode")
plt.ylabel(" ".join(key.split("_")).title())
plt.title(f"{agent.name} in {environment.name}")
plt.show()
print(agent)
| 5,335,744
|
async def run_setup_pys(
targets_with_origins: TargetsWithOrigins,
setup_py_subsystem: SetupPySubsystem,
console: Console,
python_setup: PythonSetup,
distdir: DistDir,
workspace: Workspace,
union_membership: UnionMembership,
) -> SetupPy:
"""Run setup.py commands on all exported targets addressed."""
validate_args(setup_py_subsystem.args)
# Get all exported targets, ignoring any non-exported targets that happened to be
# globbed over, but erroring on any explicitly-requested non-exported targets.
exported_targets: List[ExportedTarget] = []
explicit_nonexported_targets: List[Target] = []
for target_with_origin in targets_with_origins:
tgt = target_with_origin.target
if tgt.has_field(PythonProvidesField):
exported_targets.append(ExportedTarget(tgt))
elif isinstance(target_with_origin.origin, AddressLiteralSpec):
explicit_nonexported_targets.append(tgt)
if explicit_nonexported_targets:
raise TargetNotExported(
"Cannot run setup.py on these targets, because they have no `provides=` clause: "
f'{", ".join(so.address.spec for so in explicit_nonexported_targets)}'
)
if setup_py_subsystem.transitive:
# Expand out to all owners of the entire dep closure.
transitive_targets = await Get(
TransitiveTargets, Addresses(et.target.address for et in exported_targets)
)
owners = await MultiGet(
Get(ExportedTarget, OwnedDependency(tgt))
for tgt in transitive_targets.closure
if is_ownable_target(tgt, union_membership)
)
exported_targets = list(FrozenOrderedSet(owners))
py2 = is_python2(
python_setup.compatibilities_or_constraints(
target_with_origin.target.get(PythonInterpreterCompatibility).value
for target_with_origin in targets_with_origins
)
)
chroots = await MultiGet(
Get(SetupPyChroot, SetupPyChrootRequest(exported_target, py2))
for exported_target in exported_targets
)
# If args were provided, run setup.py with them; Otherwise just dump chroots.
if setup_py_subsystem.args:
setup_py_results = await MultiGet(
Get(
RunSetupPyResult,
RunSetupPyRequest(exported_target, chroot, setup_py_subsystem.args),
)
for exported_target, chroot in zip(exported_targets, chroots)
)
for exported_target, setup_py_result in zip(exported_targets, setup_py_results):
addr = exported_target.target.address.spec
console.print_stderr(f"Writing dist for {addr} under {distdir.relpath}/.")
workspace.write_digest(setup_py_result.output, path_prefix=str(distdir.relpath))
else:
# Just dump the chroot.
for exported_target, chroot in zip(exported_targets, chroots):
addr = exported_target.target.address.spec
provides = exported_target.provides
setup_py_dir = distdir.relpath / f"{provides.name}-{provides.version}"
console.print_stderr(f"Writing setup.py chroot for {addr} to {setup_py_dir}")
workspace.write_digest(chroot.digest, path_prefix=str(setup_py_dir))
return SetupPy(0)
| 5,335,745
|
def test_safety_check(mocker, url, safety_check, should_error):
"""
Test kf_utils.dataservice.delete.safe_delete
"""
# Setup mocks
mock_session = mocker.patch("kf_utils.dataservice.delete.Session")()
mock_resp = MagicMock()
mock_session.delete.return_value = mock_resp
kfids = [f"PT_{i}" for i in range(2)]
if should_error:
with pytest.raises(Exception) as e:
delete_kfids(url, kfids, safety_check=safety_check)
assert "safety_check is ENABLED" in str(e)
else:
delete_kfids(url, kfids, safety_check=safety_check)
| 5,335,746
|
def list_tasks():
"""
显示所有任务列表,方便管理任务
:return:
"""
try:
task_id = request.args.get("task_id")
task_status = request.args.get('status')
# 构造条件查询元组
task_info_list = list()
tasks = TaskService.get_tasks_url_num(task_id=task_id, task_status=task_status)
for task in tasks:
hook_rule = task.hook_rule
# RedisService.get_task(task.id)["hook_rule"]
unscaned_urls_num = task.unscaned_urls_num
scaned_urls_num = task.scaned_urls_num
total_url_num = unscaned_urls_num + scaned_urls_num
if task.task_status in [TaskStatus.KILLED, TaskStatus.DONE]:
percent = 100
else:
percent = 0 if total_url_num == 0 else int((scaned_urls_num / total_url_num) * 100)
task_info_list.append({'receiver_emails': task.receivers_email, 'task_name': task.task_name,
'create_time': task.created_time.strftime("%Y-%m-%d %H:%M"), 'percent': percent,
'unscaned_url_num': unscaned_urls_num, 'scaned_url_num': scaned_urls_num,
'total_url_num': total_url_num, 'hook_rule': hook_rule, 'task_id': task.id,
'task_access_key': task.access_key, 'task_status': task.task_status,
"create_user_name": task.create_user_name})
task_info_list.reverse()
response = jsonify(status=200, message="查询成功", data=task_info_list)
return response
except Exception as e:
logger.exception("show_current_tasks rasie error")
if isinstance(e, BaseHunterException):
return jsonify(status=400, message=str(e), data={"extra_info": "查询任务时传入非法的task_id"})
return jsonify(status=500, message="未知异常", data={"extra_info": "查询任务时出现未知异常,请联系管理员查看异常日志"})
| 5,335,747
|
def CRUD_remote_followers(author: Author, follower_dict_list: list):
"""
This will create, update or delete followers based on the remote responses
args:
author - The author to update the followers on
follower_dict_list - The list of followers in private dict form to add to the author's list of followers
return:
None
"""
try:
for author_dict in follower_dict_list:
Author.objects.update_or_create(id=author_dict['id'], defaults=author_dict)
ids = [author_dict['id'] for author_dict in follower_dict_list]
followers = Author.objects.filter(id__in=ids)
author.followers.set(followers)
except Exception as e:
print("CRUD_remote_followers exception : {}\n\n{}".format(type(e), str(e)))
| 5,335,748
|
def make_fixed_size(
protein,
shape_schema,
msa_cluster_size,
extra_msa_size,
num_res=0,
num_templates=0,
):
"""Guess at the MSA and sequence dimension to make fixed size."""
pad_size_map = {
NUM_RES: num_res,
NUM_MSA_SEQ: msa_cluster_size,
NUM_EXTRA_SEQ: extra_msa_size,
NUM_TEMPLATES: num_templates,
}
for k, v in protein.items():
# Don't transfer this to the accelerator.
if k == "extra_cluster_assignment":
continue
shape = list(v.shape)
schema = shape_schema[k]
msg = "Rank mismatch between shape and shape schema for"
assert len(shape) == len(schema), f"{msg} {k}: {shape} vs {schema}"
pad_size = [
pad_size_map.get(s2, None) or s1 for (s1, s2) in zip(shape, schema)
]
padding = [(0, p - v.shape[i]) for i, p in enumerate(pad_size)]
padding.reverse()
padding = list(itertools.chain(*padding))
if padding:
protein[k] = torch.nn.functional.pad(v, padding)
protein[k] = torch.reshape(protein[k], pad_size)
return protein
| 5,335,749
|
def codegen_reload_data():
"""Parameters to codegen used to generate the fn_urlhaus package"""
reload_params = {"package": u"fn_urlhaus",
"incident_fields": [],
"action_fields": [],
"function_params": [u"urlhaus_artifact_type", u"urlhaus_artifact_value"],
"datatables": [],
"message_destinations": [u"fn_urlhaus"],
"functions": [u"fn_urlhaus", u"fn_urlhaus_submission"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"example_urlhaus_lookup", u"example_urlhaus_url_submission"],
"actions": [u"Example: URLhaus Lookup", u"Example: URLhaus URL Submission"]
}
return reload_params
| 5,335,750
|
def urls(page, baseurl=auto, direct=True, prev=True, next=True):
"""
Return a list of pagination URLs extracted form the page.
When baseurl is None relative URLs are returned; pass baseurl
to get absolute URLs.
``prev``, ``next`` and ``direct`` arguments control whether to return
'next page', 'previous page' links and links to specific pages.
By default, all link types are returned.
"""
return get_shared_autopager().urls(page, baseurl, direct, prev, next)
| 5,335,751
|
def sinc_filter(audio: tf.Tensor,
cutoff_frequency: tf.Tensor,
window_size: int = 512,
sample_rate: int = None,
padding: Text = 'same') -> tf.Tensor:
"""Filter audio with sinc low-pass filter.
Args:
audio: Input audio. Tensor of shape [batch, audio_timesteps].
cutoff_frequency: Frequency cutoff for low-pass sinc filter. If the
sample_rate is given, cutoff_frequency is in Hertz. If sample_rate is
None, cutoff_frequency is normalized ratio (frequency/nyquist) in the
range [0, 1.0]. Shape [batch_size, n_time, 1].
window_size: Size of the Hamming window to apply to the impulse.
sample_rate: Optionally provide the sample rate.
padding: Either 'valid' or 'same'. For 'same' the final output to be the
same size as the input audio (audio_timesteps). For 'valid' the audio is
extended to include the tail of the impulse response (audio_timesteps +
window_size - 1).
Returns:
Filtered audio. Tensor of shape
[batch, audio_timesteps + window_size - 1] ('valid' padding) or shape
[batch, audio_timesteps] ('same' padding).
"""
impulse_response = sinc_impulse_response(cutoff_frequency,
window_size=window_size,
sample_rate=sample_rate)
return fft_convolve(audio, impulse_response, padding=padding)
| 5,335,752
|
def _getDataFlows(blocks):
"""
Given a block dictonary from bifrost.proclog.load_by_pid(), return a list
of chains that give the data flow.
"""
# Find out what rings we have to work with and which blocks are sources
# or sinks
rings = []
sources, sourceRings = [], []
sinks, sinkRings = [], []
for block in blocks.keys():
rins, routs = [], []
rFound = False
for log in blocks[block].keys():
if log not in ('in', 'out'):
continue
for key in blocks[block][log]:
if key[:4] == 'ring':
rFound = True
value = blocks[block][log][key]
if value not in rings:
rings.append( value )
if log == 'in':
if value not in rins:
rins.append( value )
else:
if value not in routs:
routs.append( value )
if rFound:
if len(rins) == 0:
sources.append( block )
sourceRings.extend( routs )
if len(routs) == 0:
sinks.append( block )
sinkRings.extend( rins )
# Find out the chains
chains = []
for refRing in rings:
for block in blocks.keys():
rins, routs = [], []
for log in blocks[block].keys():
if log not in ('in', 'out'):
continue
for key in blocks[block][log]:
if key[:4] == 'ring':
value = blocks[block][log][key]
if log == 'in':
if value not in rins:
rins.append( value )
else:
if value not in routs:
routs.append( value )
if refRing in routs:
refBlock = block
refROuts = routs
for block in blocks.keys():
rins, routs = [], []
dtype = None
for log in blocks[block].keys():
if log.startswith('sequence'):
try:
bits = blocks[block][log]['nbit']
if blocks[block][log]['complex']:
bits *= 2
name = 'cplx' if blocks[block][log]['complex'] else 'real'
dtype = '%s%i' % (name, bits)
except KeyError:
pass
elif log not in ('in', 'out'):
continue
for key in blocks[block][log]:
if key[:4] == 'ring':
value = blocks[block][log][key]
if log == 'in':
if value not in rins:
rins.append( value )
else:
if value not in routs:
routs.append( value )
for ring in rins:
if ring in refROuts:
#print refRing, rins, block
chains.append( {'link':(refBlock,block), 'dtype':dtype} )
# Find out the associations (based on core binding)
associations = []
for block in blocks:
refBlock = block
refCores = []
for i in xrange(32):
try:
refCores.append( blocks[block]['bind']['core%i' % i] )
except KeyError:
break
if len(refCores) == 0:
continue
for block in blocks:
if block == refBlock:
continue
cores = []
for i in xrange(32):
try:
cores.append( blocks[block]['bind']['core%i' % i] )
except KeyError:
break
if len(cores) == 0:
continue
for core in cores:
if core in refCores:
if (refBlock,block) not in associations:
if (block,refBlock) not in associations:
associations.append( (refBlock, block) )
return sources, sinks, chains, associations
| 5,335,753
|
def find_lowest_cost_node(costs: dict, processed: list) -> dict:
"""Return the node with the lowest cost"""
lowest_cost = float("inf") # Infinity
lowest_cost_node = None
for node in costs:
cost = costs[node]
if cost < lowest_cost and node not in processed:
lowest_cost = cost
lowest_cost_node = node
return lowest_cost_node
| 5,335,754
|
def main(options):
"""Parse options and find out what to do"""
if options.graph=='numsites':
tms=[]
sites=[]
import pylab
for sitenum in range(1,10):
options.kin_sites=sitenum
options.plot=False
xs,ys,Tm=kinetic(options)
pylab.plot(xs,ys,'o-',label='%d sites. Tm: %5.1f' %(sitenum,Tm),linewidth=2)
sites.append(sitenum)
tms.append(Tm)
pylab.xlabel('Temperature (C)')
pylab.ylabel('Residual activity (%)')
pylab.ylim([0.0,100.0])
pylab.legend()
pylab.title('Number of protease sites')
pylab.show()
#
pylab.clf()
pylab.plot(sites,tms,'ro-',linewidth=2)
pylab.xlabel('Number of sites')
pylab.ylabel('T50')
pylab.show()
return
elif options.graph=='twosites':
# fast / slow site, + stabilization
import pylab
std_reactions=[[1E10,1E5,5E4,1E10],
[1E10,9E4,5E4,1E10]]
for stab1,stab2 in [[0,0],[options.stab,0],[0,options.stab],[options.stab,options.stab]]:
import string,copy
options.reactions=copy.deepcopy(std_reactions)
options.reactions[0][1]=options.reactions[0][1]+stab1*1000.0
options.reactions[1][1]=options.reactions[1][1]+stab2*1000.0
#options.reactions[0]=string.join(options.reactions[0],',')
#options.reactions[1]=string.join(options.reactions[1],',')
#
options.plot=False
#print options.reactions
#print std_reactions,'R'
xs,ys,Tm=kinetic(options)
pylab.plot(xs,ys,'o-',label='%5.1f,%5.1f, T50: %5.1f' %(stab1,stab2,Tm))
pylab.legend()
pylab.suptitle('Ea1: %3.2e kJ/mol, Ea2 %3.2e kJ/mol stab: %5.1f kJ/mol' %(std_reactions[0][1],std_reactions[1][1],options.stab))
pylab.title('Two sites')
pylab.show()
if options.unfoldtype=='kinetic':
kinetic(options)
elif options.unfoldtype=='equilibrium':
equilibrium(options)
else:
raise Exception('unknown unfolding type: %s' %options.unfoldtype)
#
return
| 5,335,755
|
def scale_img(image, random_coordinate=False):
"""
对原图大小进行处理,
:param image:
:param random_coordinate:
:return:
"""
h, w, c = image.shape
if max(h, w) > 640:
f_scale = min(640./h, 640./w) # scale factor
image = cv2.resize(src=image, dsize=None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_CUBIC)
else:
f_scale = 1.
h_s, w_s, c_s = image.shape # h scaled
image_full = 255 * np.zeros((640, 640, c), dtype=np.uint8)
if random_coordinate: # random coordinate
h_random = np.random.randint(0, 640 - h + 1)
w_random = np.random.randint(0, 640 - w + 1)
image_full[h_random:h_random + h_s, w_random:w_random + w_s, :] = image.astype(np.uint8)
else:
image_full[0:h_s, 0:w_s, :] = image.astype(np.uint8)
return image_full / 255., f_scale
| 5,335,756
|
def magnitude(v: Vector) -> float:
"""computes the magnitude (length) of a vector"""
return math.sqrt(sum_of_squares(v))
| 5,335,757
|
def pew(text):
"""PEW -- Percentage of Echomimetic (onomatopoeic) Words."""
pew = None
onomatopoeic_words_num = 0
path = '/tmp/onomatopoeic_words_en-1.0.txt'
if not os.path.exists(path):
url = 'https://raw.githubusercontent.com/korniichuk/phd/master/resources/onomatopoeic_words_en-1.0.txt' # noqa: E501
r = requests.get(url)
with open(path, 'w') as f:
f.write(r.text)
with open(path, 'r') as f:
onomatopoeic_words = f.read().splitlines()
words_num, words = word_counter(text, 'en')
for word in words:
word_lower = word.lower().strip()
if word_lower in onomatopoeic_words:
onomatopoeic_words_num += 1
if words_num != 0:
pew = onomatopoeic_words_num / words_num
return pew
| 5,335,758
|
def topngbytes(name, rows, x, y, **k):
"""
Convenience function for creating a PNG file "in memory" as
a string. Creates a :class:`Writer` instance using the keyword
arguments, then passes `rows` to its :meth:`Writer.write` method.
The resulting PNG file is returned as bytes. `name` is used
to identify the file for debugging.
"""
import os
if os.environ.get('PYPNG_TEST_FILENAME'):
print(name, file=sys.stderr)
f = BytesIO()
w = png.Writer(x, y, **k)
w.write(f, rows)
if os.environ.get('PYPNG_TEST_TMP'):
w = open(name, 'wb')
w.write(f.getvalue())
w.close()
return f.getvalue()
| 5,335,759
|
def supply_domes1finesk():
"""
Real Name: b'"Supply Domes-1Finesk"'
Original Eqn: b'MIN("Domes-1 Demad finesk" (Time), (outflow Finesk) )'
Units: b'MCM/Month'
Limits: (None, None)
Type: component
b''
"""
return np.minimum(domes1_demad_finesk(time()), (outflow_finesk()))
| 5,335,760
|
def poll(handle):
"""
Polls an push_pull handle to determine whether underlying
asynchronous operation has completed. After `poll()` returns `True`, `synchronize()`
will return without blocking.
Arguments:
handle: A handle returned by an push_pull asynchronous
operation.
Returns:
A flag indicating whether the operation has completed.
"""
return c_lib.byteps_torch_poll(handle) != 0
| 5,335,761
|
def _increase_explicit_hydrogen_for_bond_atom(
rwmol: Chem.rdchem.RWMol,
remove_bidx: bool,
bidx: int,
remove_eidx: bool,
eidx: int,
ai_to_remove: list,
) -> Tuple[Chem.rdchem.RWMol, list]:
"""Increase number of explicit hydrogens for atom in a bond.
Args:
rwmol: An RDKit RWmolecule (rdkit.Chem.rdchem.RWMol)
remove_bidx: Begin atom in bond will increase explicit hydrogens (bool)
remove_eidx: End atom in bond will increase explicit hydrogens (bool)
Returns:
Tuple with an RDKit RWmolecule and an updated list to remove
(rdkit.Chem.rdchem.RWMol, list).
"""
if remove_bidx or remove_eidx:
if remove_bidx:
ai_to_remove.append(bidx)
_increase_explicit_hydrogens(rwmol, eidx)
if remove_eidx:
ai_to_remove.append(eidx)
_increase_explicit_hydrogens(rwmol, bidx)
rwmol.RemoveBond(bidx, eidx)
return rwmol, ai_to_remove
| 5,335,762
|
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not gfile.Exists(work_directory):
gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not gfile.Exists(filepath):
with tempfile.NamedTemporaryFile() as tmpfile:
temp_file_name = tmpfile.name
urllib.request.urlretrieve(source_url, temp_file_name)
gfile.Copy(temp_file_name, filepath)
with gfile.GFile(filepath) as f:
size = f.Size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
| 5,335,763
|
def step_i_get_the_entity_with_params_in_filename(
context, service_name, filename
):
"""
:type context: behave.runner.Context
:type service_name: str
:type filename: str
"""
location = context.json_location
data = read_json_from_file(filename, location)
keys = data.keys()
suffix = "?"
for key in keys:
suffix = "{0}{1}={2}&".format(suffix, key, data[key])
# trim trailing &
suffix = suffix.rstrip("&")
context.services[service_name]["param_data"] = data
service_client = context.services[service_name]["client"]
context.services[service_name]["resp"] = service_client.get(
resource_id=context.services[service_name]["id"], url_suffix=suffix
)
| 5,335,764
|
def plot_historical_actuals_forecast(e, title=None, ylabel='',
include_pred_int=False,
years_prior_include=2,
forecast_display_start=None,
e2=None):
"""Produce a plot of the ensemble forecasts
Returns
----------
plt object
"""
if e.forecast['consensus'] is None:
raise Exception('No forecast found.')
if title is None and e.validation['consensus'] is not None:
title = 'Training, forecast and actuals'
if title is None and e.validation['consensus'] is None:
title = 'Training and forecast'
fig, ax = plt.subplots(figsize=(13, 11))
fig.suptitle(title, fontsize=24)
plt.ylabel(ylabel, fontsize=20)
plt.rc('legend', fontsize=18)
plt.rc('ytick', labelsize=18)
plt.rc('xtick', labelsize=18)
plt.xticks(rotation = 30)
ax.xaxis.set_major_locator(mdates.AutoDateLocator(maxticks=12))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d-%y'))
ax.yaxis.set_major_formatter(FuncFormatter(human_format))
if forecast_display_start is None:
forecast_display_start = min(e.forecast['consensus'].dt)
forecast_mask = (e.forecast['consensus'].dt >= forecast_display_start)
forecast_len = forecast_mask.sum()
max_vals = []
for yp in list(range(1, years_prior_include + 1)):
if len(e.periods_agg) > 0 and max(e.periods_agg) > 1:
agg_str = 'period' + str(max(e.periods_agg))
range_train_yp = {'min':(forecast_display_start -
_datetime_delta(yp, 'Y') +
_datetime_delta(yp, 'D')),
'max':(max(e.forecast['consensus'].dt) -
_datetime_delta(yp, 'Y') +
_datetime_delta(yp, 'D'))}
training_mask = (
(e.training['aggregated'][agg_str].dt >= range_train_yp['min']) &
(e.training['aggregated'][agg_str].dt <= range_train_yp['max']))
train_len = training_mask.sum()
fp = plt.plot(e.forecast['consensus'].dt.loc[forecast_mask][:train_len],
e.training['aggregated'][agg_str].actual.loc[
training_mask][:forecast_len],
label='actuals ' + str(int(yp)) + 'YA',
linewidth=4)
history_len = e.training['aggregated'][agg_str].shape[0]
max_vals = max_vals + [max(
e.training['aggregated'][agg_str].actual.loc[
training_mask][:forecast_len])]
else:
range_train_yp = {'min':(forecast_display_start -
_datetime_delta(yp, 'Y')),
'max':(max(e.forecast['consensus'].dt) -
_datetime_delta(yp, 'Y'))}
training_mask = (
(e.training['history'].dt >= range_train_yp['min']) &
(e.training['history'].dt <= range_train_yp['max']))
fp = plt.plot(e.forecast['consensus'].dt.loc[forecast_mask],
e.training['history'].actual.loc[training_mask],
label='actuals ' + str(int(yp)) + 'YA', linewidth=2)
history_len = e.training['history'].shape[0]
max_vals = max_vals + [max(
e.training['history'].actual.loc[training_mask])]
total_len = history_len + e.forecast['consensus'].shape[0]
fp = plt.plot(e.forecast['consensus'].dt.loc[forecast_mask],
e.forecast['consensus'].forecast.loc[forecast_mask],
label='forecast',
linewidth=2 + 2 * int(total_len < 400),
c='indianred')
max_vals = max_vals + [max(
e.forecast['consensus'].forecast.loc[forecast_mask])]
if include_pred_int:
fp = plt.fill_between(e.forecast['consensus'].dt.loc[forecast_mask],
e.forecast['consensus'].forecast_lower.loc[
forecast_mask],
e.forecast['consensus'].forecast_upper.loc[
forecast_mask],
color='indianred', alpha=0.3,
label=str(round(
e.pred_level * 100)) + '% prediction band')
max_vals = max_vals + [max(e.forecast['consensus'].forecast_upper.loc[
forecast_mask])]
if (e.validation['consensus'] is not None and
len(e.validation['consensus']) > 0):
fp = plt.plot(e.validation['consensus'].dt.loc[forecast_mask],
e.validation['consensus'].actual.loc[forecast_mask],
label='actuals', c='mediumseagreen',
linewidth=2 + 2 * int(total_len < 400))
max_vals = max_vals + [max(
e.validation['consensus'].actual.loc[forecast_mask])]
if (e2 is not None and
len(e.forecast['consensus'].dt) > 0):
forecast_mask2 = (e2.forecast['consensus'].dt >= forecast_display_start)
fp = plt.plot(e2.forecast['consensus'].dt.loc[forecast_mask2],
e2.forecast['consensus'].forecast.loc[forecast_mask2],
label='latest forecast',
linewidth=2 + 2 * int(total_len < 400),
c='purple')
max_vals = max_vals + [max(
e2.forecast['consensus'].forecast.loc[forecast_mask2])]
plt.ylim([0, 1.05 * max(max_vals)])
plt.legend(loc='lower center', ncol=3, framealpha=0.05)
plt.grid()
return fp
| 5,335,765
|
def f2(a, b):
"""
concurrent_num = 600 不用怕,因为这是智能线程池,如果函数耗时短,不会真开那么多线程。
这个例子是测试函数耗时是动态变化的,这样就不可能通过提前设置参数预估函数固定耗时和搞鬼了。看看能不能实现qps稳定和线程池自动扩大自动缩小
要说明的是打印的线程数量也包含了框架启动时候几个其他的线程,所以数量不是刚好和所需的线程计算一样的。
## 可以在运行控制台搜索 新启动线程 这个关键字,看看是不是何时适合扩大线程数量。
## 可以在运行控制台搜索 停止线程 这个关键字,看看是不是何时适合缩小线程数量。
"""
result = a + b
sleep_time = 0.01
if time.time() - t_start > 60: # 先测试函数耗时慢慢变大了,框架能不能按需自动增大线程数量
sleep_time = 7
if time.time() - t_start > 120:
sleep_time = 31
if time.time() - t_start > 200:
sleep_time = 79
if time.time() - t_start > 400: # 最后把函数耗时又减小,看看框架能不能自动缩小线程数量。
sleep_time = 0.8
if time.time() - t_start > 500:
sleep_time = None
print(f'{time.strftime("%H:%M:%S")} ,当前线程数量是 {threading.active_count()}, {a} + {b} 的结果是 {result}, sleep {sleep_time} 秒')
if sleep_time is not None:
time.sleep(sleep_time) # 模拟做某事需要阻塞n秒种,必须用并发绕过此阻塞。
return result
| 5,335,766
|
def minimize_loss_single_machine_manual(loss,
accuracy,
layer_collection,
device=None,
session_config=None):
"""Minimize loss with K-FAC on a single machine(Illustrative purpose only).
This function does inverse and covariance computation manually
for illustrative pupose. Check `minimize_loss_single_machine` for
automatic inverse and covariance op placement and execution.
A single Session is responsible for running all of K-FAC's ops. The covariance
and inverse update ops are placed on `device`. All model variables are on CPU.
Args:
loss: 0-D Tensor. Loss to be minimized.
accuracy: 0-D Tensor. Accuracy of classifier on current minibatch.
layer_collection: LayerCollection instance describing model architecture.
Used by K-FAC to construct preconditioner.
device: string or None. The covariance and inverse update ops are run on
this device. If empty or None, the default device will be used.
(Default: None)
session_config: None or tf.ConfigProto. Configuration for tf.Session().
Returns:
final value for 'accuracy'.
"""
device_list = [] if not device else [device]
# Train with K-FAC.
g_step = tf.train.get_or_create_global_step()
optimizer = kfac.KfacOptimizer(
learning_rate=0.0001,
cov_ema_decay=0.95,
damping=0.001,
layer_collection=layer_collection,
placement_strategy="round_robin",
cov_devices=device_list,
inv_devices=device_list,
trans_devices=device_list,
momentum=0.9)
(cov_update_thunks,
inv_update_thunks) = optimizer.make_vars_and_create_op_thunks()
def make_update_op(update_thunks):
update_ops = [thunk() for thunk in update_thunks]
return tf.group(*update_ops)
cov_update_op = make_update_op(cov_update_thunks)
with tf.control_dependencies([cov_update_op]):
inverse_op = tf.cond(
tf.equal(tf.mod(g_step, _INVERT_EVERY), 0),
lambda: make_update_op(inv_update_thunks), tf.no_op)
with tf.control_dependencies([inverse_op]):
with tf.device(device):
train_op = optimizer.minimize(loss, global_step=g_step)
tf.logging.info("Starting training.")
with tf.train.MonitoredTrainingSession(config=session_config) as sess:
while not sess.should_stop():
global_step_, loss_, accuracy_, _ = sess.run(
[g_step, loss, accuracy, train_op])
if global_step_ % _REPORT_EVERY == 0:
tf.logging.info("global_step: %d | loss: %f | accuracy: %s",
global_step_, loss_, accuracy_)
return accuracy_
| 5,335,767
|
def downgrade():
""""Downgrade database schema and/or data back to the previous revision."""
op.drop_column('workflows', 'repeat_multiplier')
op.drop_column('workflows', 'unit')
op.drop_column('workflows', 'repeat_every')
| 5,335,768
|
def bass_call_0(function, *args):
"""Makes a call to bass and raises an exception if it fails. Does not consider 0 an error."""
res = function(*args)
if res == -1:
code = BASS_ErrorGetCode()
raise BassError(code, get_error_description(code))
return res
| 5,335,769
|
def saveFigures(folder, name, summaryDict):
"""
:param folder: the folder where we want to save it
:param name: the name of the figures
:param summaryDict: the data of the training we want to plot
Save the plot of the evolution of the training loss and the testing loss through the epochs
Save the plot of the evolution of the training accuracy and the testing loss accuracy the epochs
"""
loss_train = summaryDict['loss_train']
loss_test = summaryDict['loss_test']
acc_train = summaryDict['acc_train']
acc_test = summaryDict['acc_test']
nb_epochs = summaryDict['nb_epochs']
best_epoch = summaryDict['best_model']['epoch']
best_loss_train = summaryDict['best_model']['loss_train']
best_acc_train = summaryDict['best_model']['acc_train']
best_loss_test = summaryDict['best_model']['loss_test']
best_acc_test = summaryDict['best_model']['acc_test']
min_loss = min(min(loss_train), min(loss_test))
max_loss = max(max(loss_train), max(loss_test))
min_acc = min(min(acc_train), min(acc_test))
max_acc = max(max(acc_train), max(acc_test))
x = np.arange(1, nb_epochs + 1)
# Save of the loss
plt.figure()
plt.plot(x, loss_train, 'steelblue', label='Training Loss')
plt.plot(x, loss_test, 'darkorange', label='Testing Loss')
plt.title('Variation of the Loss through the epochs\n' + name)
plt.xlabel('Epoch')
plt.ylabel('Loss value')
plt.plot([1, nb_epochs], [best_loss_train, best_loss_train], 'steelblue', linestyle='--',
label='Model training loss : {0}'.format(round(best_loss_train, 4)))
plt.plot([1, nb_epochs], [best_loss_test, best_loss_test], color='darkorange', linestyle='--',
label='Model testing loss : {0}'.format(round(best_loss_test, 4)))
plt.plot([best_epoch, best_epoch], [min_loss, max_loss], color='dimgray', linestyle='--',
label='Best Epoch : {0}'.format(best_epoch))
plt.legend()
plt.grid()
plt.savefig(os.path.join(folder, 'LossFigure_' + name + '.png'))
# Save the accuracy
plt.figure()
plt.plot(x, acc_train, 'steelblue', label='Training Accuracy')
plt.plot(x, acc_test, 'darkorange', label='Testing Accuracy')
plt.title('Variation of the Accuracy through the epochs\n' + name)
plt.xlabel('Epoch')
plt.ylabel('Accuracy value (%)')
plt.plot([1, nb_epochs], [best_acc_train, best_acc_train], color='steelblue', linestyle='--',
label='Model train accuracy : {0}'.format(round(best_acc_train, 2)))
plt.plot([1, nb_epochs], [best_acc_test, best_acc_test], color='darkorange', linestyle='--',
label='Model test accuracy : {0}'.format(round(best_acc_test, 2)))
plt.plot([best_epoch, best_epoch], [min_acc, max_acc], color='dimgray', linestyle='--',
label='Best Epoch : {0}'.format(best_epoch))
plt.legend()
plt.grid()
plt.savefig(os.path.join(folder, 'AccuracyFigure_' + name + '.png'))
| 5,335,770
|
def port_speed(value : str | None = None) -> int | None:
"""Port speed -> Mb/s parcer"""
if value is None:
return None
elif value == "X":
return 0
elif value == "M":
return 100
elif value == "G":
return 1000
elif value == "Q":
return 2500
else:
raise(AsusRouterNotImplementedError(value))
| 5,335,771
|
def convert_flag_frame_to_strings(flag_frame, sep=', ', empty='OK'):
"""
Convert the `flag_frame` output of :py:func:`~convert_mask_into_dataframe`
into a pandas.Series of strings which are the active flag names separated
by `sep`. Any row where all columns are false will have a value of `empty`.
Parameters
----------
flag_frame : pandas.DataFrame
Boolean DataFrame with descriptive column names
sep : str
String to separate column names by
empty : str
String to replace rows where no columns are True
Returns
-------
pandas.Series
Of joined column names from `flag_frame` separated by `sep` if True.
Has the same index as `flag_frame`.
"""
return np.logical_and(flag_frame, flag_frame.columns + sep).replace(
False, '').sum(axis=1).str.rstrip(sep).replace('', empty)
| 5,335,772
|
def pp_file_to_dataframe(pp_filename):
""" read a pilot point file to a pandas Dataframe
Parameters
----------
pp_filename : str
pilot point file
Returns
-------
df : pandas.DataFrame
a dataframe with pp_utils.PP_NAMES for columns
"""
df = pd.read_csv(pp_filename, delim_whitespace=True,
header=None, names=PP_NAMES,usecols=[0,1,2,3,4])
df.loc[:,"name"] = df.name.apply(str).apply(str.lower)
return df
| 5,335,773
|
def get_ps_lib_dirs():
"""
Add directory to list as required
"""
polysync_install = os.path.join('/', 'usr', 'local', 'polysync')
polysync_lib = os.path.join(polysync_install, 'lib')
polysync_vendor = os.path.join(polysync_install, 'vendor', 'lib')
return [
polysync_lib,
polysync_vendor, ]
| 5,335,774
|
def edit_product(request, product_id):
""" Edit a product in the store """
if not request.user.is_superuser:
messages.error(request, 'Sorry, only store owners can do that.')
return redirect(reverse('home'))
product = get_object_or_404(Product, pk=product_id)
if request.method == 'POST':
form = ProductForm(request.POST, request.FILES, instance=product)
if form.is_valid():
form.save()
messages.success(request, 'Successfully updated product!')
return redirect(reverse('individual_product', args=[product.id]))
else:
messages.error(
request, 'Failed to update product. '
'Please ensure the form is valid.')
else:
form = ProductForm(instance=product)
messages.info(request, f'You are editing {product.name}')
template = 'products/edit_product.html'
context = {
'form': form,
'product': product,
}
return render(request, template, context)
| 5,335,775
|
def _resolve_target(target, target_frame='icrs'):
"""Return an `astropy.coordinates.SkyCoord` form `target` and its frame."""
if target_frame == 'icrs':
return parse_coordinates(target)
return SkyCoord(target, frame=target_frame)
| 5,335,776
|
def iter_fragments(fragiter, start_frag_id = None, stop_frag_id = None):
"""Given a fragment iterator and a start and end fragment id,
return an iterator which yields only fragments within the range.
"""
if start_frag_id and stop_frag_id:
dpred = lambda f: fragment_id_lt(f.fragment_id, start_frag_id)
tpred = lambda f: fragment_id_le(f.fragment_id, stop_frag_id)
return itertools.takewhile(tpred, itertools.dropwhile(dpred, fragiter))
elif start_frag_id and not stop_frag_id:
dpred = lambda f: fragment_id_lt(f.fragment_id, start_frag_id)
return itertools.dropwhile(dpred, fragiter)
elif not start_frag_id and stop_frag_id:
tpred = lambda f: fragment_id_le(f.fragment_id, stop_frag_id)
return itertools.takewhile(tpred, fragiter)
return fragiter
| 5,335,777
|
def _remove_candidate(cells, candidate: int, SUDOKU_SIZE: int) -> None:
"""
Remove the candidate from the cells in place
cells: an object supporting __len__ and __getitem__ (np.ndarray or np.flatiter)
"""
for i in range(len(cells)):
if cells[i] == -1:
continue
# Example of the following code:
# if we have SUDOKU_SIZE = 9 and candidate = 4,
# mask = 0b111111111
# bit = 0b000001000
# mask ^ bit = 0b111110111
# and so the cell will set to zero the conresponding bit
mask = (1 << SUDOKU_SIZE) - 1
bit = 1 << (candidate - 1)
cells[i] &= mask ^ bit
| 5,335,778
|
def evaluate_gpt_with_distgen(settings,
archive_path=None,
merit_f=None,
gpt_input_file=None,
distgen_input_file=None,
workdir=None,
use_tempdir=True,
gpt_bin='$GPT_BIN',
timeout=2500,
auto_phase=False,
verbose=False,
gpt_verbose=False,
asci2gdf_bin='$ASCI2GDF_BIN',
kill_msgs=DEFAULT_KILL_MSGS
):
"""
Simple evaluate GPT.
Similar to run_astra_with_distgen, but returns a flat dict of outputs.
Will raise an exception if there is an error.
"""
G = run_gpt_with_distgen(settings=settings,
gpt_input_file=gpt_input_file,
distgen_input_file=distgen_input_file,
workdir=workdir,
use_tempdir=use_tempdir,
gpt_bin=gpt_bin,
timeout=timeout,
auto_phase=auto_phase,
verbose=verbose,
gpt_verbose=gpt_verbose,
asci2gdf_bin=asci2gdf_bin,
kill_msgs=kill_msgs)
if merit_f:
merit_f = tools.get_function(merit_f)
output = merit_f(G)
else:
output = default_gpt_merit(G)
if output['error']:
raise ValueError('error occured!')
#Recreate Generator object for fingerprint, proper archiving
# TODO: make this cleaner
gen = Generator()
gen.input = G.distgen_input
fingerprint = fingerprint_gpt_with_distgen(G, gen)
output['fingerprint'] = fingerprint
if archive_path:
path = tools.full_path(archive_path)
assert os.path.exists(path), f'archive path does not exist: {path}'
archive_file = os.path.join(path, fingerprint+'.h5')
output['archive'] = archive_file
# Call the composite archive method
archive_gpt_with_distgen(G, gen, archive_file=archive_file)
return output
| 5,335,779
|
def bst_right_imbalanced():
"""Bst that extends right."""
from bst import BST
test_bst = BST((1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
return test_bst
| 5,335,780
|
def retry(exception_to_check, tries=4, delay=0.5, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
Args:
exception_to_check (Exception): the exception to check.
may be a tuple of exceptions to check
tries (int): number of times to try (not retry) before giving up
delay (float, int): initial delay between retries in seconds
backoff (int): backoff multiplier e.g. value of 2 will double the delay
each retry
logger (logging.Logger): logger to use. If None, print
"""
def deco_retry(func):
@wraps(func)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return func(*args, **kwargs)
except exception_to_check as exc:
msg = "%s, Retrying in %s seconds..." % (str(exc), mdelay)
if logger:
logger.warning(msg)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return func(*args, **kwargs)
return f_retry # true decorator
return deco_retry
| 5,335,781
|
def create_wham_whamr_csv(
datapath,
savepath,
fs,
version="min",
savename="whamr_",
set_types=["tr", "cv", "tt"],
add_reverb=True,
):
"""
This function creates the csv files to get the speechbrain data loaders for the whamr dataset.
Arguments:
datapath (str) : path for the wsj0-mix dataset.
savepath (str) : path where we save the csv file
fs (int) : the sampling rate
version (str) : min or max
savename (str) : the prefix to use for the .csv files
set_types (list) : the sets to create
"""
if fs == 8000:
sample_rate = "8k"
elif fs == 16000:
sample_rate = "16k"
else:
raise ValueError("Unsupported sampling rate")
if add_reverb:
mix_both = "mix_both_reverb/"
s1 = "s1_anechoic/"
s2 = "s2_anechoic/"
else:
mix_both = "mix_both/"
s1 = "s1/"
s2 = "s2/"
for set_type in set_types:
mix_path = os.path.join(
datapath, "wav{}".format(sample_rate), version, set_type, mix_both,
)
s1_path = os.path.join(
datapath, "wav{}".format(sample_rate), version, set_type, s1,
)
s2_path = os.path.join(
datapath, "wav{}".format(sample_rate), version, set_type, s2,
)
noise_path = os.path.join(
datapath, "wav{}".format(sample_rate), version, set_type, "noise/"
)
# rir_path = os.path.join(
# datapath, "wav{}".format(sample_rate), version, set_type, "rirs/"
# )
files = os.listdir(mix_path)
mix_fl_paths = [mix_path + fl for fl in files]
s1_fl_paths = [s1_path + fl for fl in files]
s2_fl_paths = [s2_path + fl for fl in files]
noise_fl_paths = [noise_path + fl for fl in files]
# rir_fl_paths = [rir_path + fl + ".t" for fl in files]
csv_columns = [
"ID",
"duration",
"mix_wav",
"mix_wav_format",
"mix_wav_opts",
"s1_wav",
"s1_wav_format",
"s1_wav_opts",
"s2_wav",
"s2_wav_format",
"s2_wav_opts",
"noise_wav",
"noise_wav_format",
"noise_wav_opts",
# "rir_t",
# "rir_format",
# "rir_opts",
]
with open(
os.path.join(savepath, savename + set_type + ".csv"), "w"
) as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for (i, (mix_path, s1_path, s2_path, noise_path),) in enumerate(
zip(
mix_fl_paths,
s1_fl_paths,
s2_fl_paths,
noise_fl_paths,
# rir_fl_paths,
)
):
row = {
"ID": i,
"duration": 1.0,
"mix_wav": mix_path,
"mix_wav_format": "wav",
"mix_wav_opts": None,
"s1_wav": s1_path,
"s1_wav_format": "wav",
"s1_wav_opts": None,
"s2_wav": s2_path,
"s2_wav_format": "wav",
"s2_wav_opts": None,
"noise_wav": noise_path,
"noise_wav_format": "wav",
"noise_wav_opts": None,
# "rir_t": rir_path,
# "rir_format": ".t",
# "rir_opts": None,
}
writer.writerow(row)
| 5,335,782
|
def test_FUNC_call_WITH_not_existing_endpoint_EXPECT_not_found() -> None:
"""
Check whether the call failed.
:return: No return.
"""
with pook.use():
pook.get("https://example.com/does-not-exist", status=404)
with pytest.raises(InternalError, match="Http call failed with status: 404."):
HttpCall.call('GET', "https://example.com/does-not-exist")
| 5,335,783
|
def bio_ner_to_tsv(dataDir, readFile, wrtDir, transParamDict, isTrainFile=False):
"""
This function transforms the BIO style data and transforms into the tsv format required
for NER. Following transformed files are written at wrtDir,
- NER transformed tsv file.
- NER label map joblib file.
For using this transform function, set ``transform_func`` : **bio_ner_to_tsv** in transform file.
Args:
dataDir (:obj:`str`) : Path to the directory where the raw data files to be read are present..
readFile (:obj:`str`) : This is the file which is currently being read and transformed by the function.
wrtDir (:obj:`str`) : Path to the directory where to save the transformed tsv files.
transParamDict (:obj:`dict`, defaults to :obj:`None`): Dictionary requiring the following parameters as key-value
- ``save_prefix`` (defaults to 'bio_ner') : save file name prefix.
- ``col_sep`` : (defaults to " ") : separator for columns
- ``tag_col`` (defaults to 1) : column number where label NER tag is present for each row. Counting starts from 0.
- ``sen_sep`` (defaults to " ") : end of sentence separator.
"""
transParamDict.setdefault("save_prefix", "bio_ner")
transParamDict.setdefault("tag_col", 1)
transParamDict.setdefault("col_sep", " ")
transParamDict.setdefault("sen_sep", "\n")
f = open(os.path.join(dataDir,readFile))
nerW = open(os.path.join(wrtDir, '{}_{}.tsv'.format(transParamDict["save_prefix"],
readFile.split('.')[0])), 'w')
labelMapNer = {}
sentence = []
senLens = []
labelNer = []
uid = 0
print("Making data from file {} ...".format(readFile))
for i, line in enumerate(f):
if i%5000 == 0:
print("Processing {} rows...".format(i))
line = line.strip(' ') #don't use strip empty as it also removes \n
wordSplit = line.rstrip('\n').split(transParamDict["col_sep"])
if len(line)==0 or line[0]==transParamDict["sen_sep"]:
if len(sentence) > 0:
nerW.write("{}\t{}\t{}\n".format(uid, labelNer, sentence))
senLens.append(len(sentence))
#print("len of sentence :", len(sentence))
sentence = []
labelNer = []
uid += 1
continue
sentence.append(wordSplit[0])
labelNer.append(wordSplit[int(transParamDict["tag_col"])])
if isTrainFile:
if wordSplit[int(transParamDict["tag_col"])] not in labelMapNer:
# ONLY TRAIN FILE SHOULD BE USED TO CREATE LABEL MAP FILE.
labelMapNer[wordSplit[-1]] = len(labelMapNer)
print("NER File Written at {}".format(wrtDir))
#writing label map
if labelMapNer != {} and isTrainFile:
print("Created NER label map from train file {}".format(readFile))
print(labelMapNer)
labelMapNerPath = os.path.join(wrtDir, "{}_{}_label_map.joblib".format(transParamDict["save_prefix"], readFile.split('.')[0]) )
joblib.dump(labelMapNer, labelMapNerPath)
print("label Map NER written at {}".format(labelMapNerPath))
f.close()
nerW.close()
print('Max len of sentence: ', max(senLens))
print('Mean len of sentences: ', sum(senLens)/len(senLens))
print('Median len of sentences: ', median(senLens))
| 5,335,784
|
def read_key_value_pairs_from_file(*path):
"""
Read key value pairs from a file (each pair on a separate line).
Key and value are separated by ' ' as often used by the kernel.
@return a generator of tuples
"""
with open(os.path.join(*path)) as f:
for line in f:
yield line.split(' ', 1)
| 5,335,785
|
def HESSIAN_DIAG(fn):
"""Generates a function which computes per-argument partial Hessians."""
def h_fn(*args, **kwargs):
args = (args,) if not isinstance(args, (tuple, list)) else tuple(args)
ret = [
jaxm.hessian(
lambda arg: fn(*args[:i], arg, *args[i + 1 :], **kwargs)
)(arg)
for (i, arg) in enumerate(args)
]
return ret
return h_fn
| 5,335,786
|
def match_xy(x1, y1, x2, y2, neighbors=1):
"""Match x1 & y1 to x2 & y2, neighbors nearest neighbors.
Finds the neighbors nearest neighbors to each point in x2, y2 among
all x1, y1."""
from scipy.spatial import cKDTree
vec1 = numpy.array([x1, y1]).T
vec2 = numpy.array([x2, y2]).T
kdt = cKDTree(vec1)
dist, idx = kdt.query(vec2, neighbors)
m1 = idx.ravel()
m2 = numpy.repeat(numpy.arange(len(vec2), dtype='i4'), neighbors)
dist = dist.ravel()
dist = dist
m = m1 < len(x1) # possible if fewer than neighbors elements in x1.
return m1[m], m2[m], dist[m]
| 5,335,787
|
def make_doc():
""" Only used for sphinx documentation """
doc_app = Flask(__name__)
doc_app.register_blueprint(blueprint())
return doc_app
| 5,335,788
|
def test_class_named_argument_default_value():
"""Allow classes as default argument values if argument name ends with `_class`."""
class Foo:
pass
class Bar:
def __init__(self, foo_class=Foo):
self.foo_class = foo_class
class Container(Injector):
bar = Bar
assert Container.bar.foo_class is Foo
| 5,335,789
|
def get_participants(reaction):
"""
get iterator for the reaction participants (reactants + products)
@type reaction: libsbml.Reaction
@rtype: Iterator
"""
for s in reaction.getListOfReactants():
yield s
for s in reaction.getListOfProducts():
yield s
| 5,335,790
|
def logout(request):
"""Logs out the user"""
user_logout(request)
return redirect(auth_views.login)
| 5,335,791
|
def test_cannot_start_in_midworkflow(sample_data1):
"""Ensures that intermediate fates do not create labors when no labor
exists.
Given a Fate C -> D, and intermediate Fate D -> E,
Throw event D and ensure Labor D is not created since Labor C does not exist.
"""
labors = sample_data1.query(Labor).all()
assert len(labors) == 0
event_type_d = sample_data1.query(EventType).get(4)
host = sample_data1.query(Host).get(1)
Event.create(sample_data1, host, "system", event_type_d)
event = (
sample_data1.query(Event)
.order_by(desc(Event.id)).first()
)
assert event.host == host
assert event.event_type == event_type_d
labors = Labor.get_open_unacknowledged(sample_data1)
assert len(labors) == 0
| 5,335,792
|
def test_invalid_not_square():
"""
Tests that an error is raised when constructing an operator with a matrix
which is not square.
"""
with pytest.raises(ValueError):
sr.RealSpaceOperator([[0, 1]])
| 5,335,793
|
def to_pydot(obj):
"""Specify either of the following options: a dot string (filename or text),
a networkx graph, a pydot graph, an igraph graph, or a callable function.
The function will be called with a filename to write it's dot output to."""
if isinstance(obj, pydot.Graph):
return obj
elif isinstance(obj, str):
if os.path.isfile(obj):
return pydot.graph_from_dot_file(obj)[0]
else:
return pydot.graph_from_dot_data(obj)[0]
elif is_networkx(obj):
return nx_pydot.to_pydot(obj)
elif is_igraph(obj):
with tempfile.NamedTemporaryFile(mode='w+') as f:
obj.write_dot(f.name)
return pydot.graph_from_dot_file(f.name)[0]
elif callable(obj):
with tempfile.NamedTemporaryFile(mode='w+') as f:
obj(f.name)
return pydot.graph_from_dot_file(f.name)[0]
elif hasattr(obj, 'to_dot') and callable(obj.to_dot):
return to_pydot(obj.to_dot())
else:
raise TypeError("Can't convert to pydot")
| 5,335,794
|
def workout_train_chunk_length(inp_len: int,
resampling_factor: int = 1,
num_encoders: int = 5,
kernel: int = 8,
stride: int = 2) -> int:
"""
Given inp_len, return the chunk size for training
"""
out_len = inp_len * resampling_factor
for _ in range(num_encoders):
out_len = math.ceil((out_len - kernel) / stride) + 1
for _ in range(num_encoders):
out_len = (out_len - 1) * stride + kernel
return math.ceil(out_len / resampling_factor)
| 5,335,795
|
def asin(e):
"""
:rtype: Column
"""
return col(Asin(parse(e)))
| 5,335,796
|
def display_summary(codebase, scan_names, processes, errors, echo_func=echo_stderr):
"""
Display a scan summary.
"""
error_messages, summary_messages = get_displayable_summary(
codebase, scan_names, processes, errors)
for msg in error_messages:
echo_func(msg, fg='red')
for msg in summary_messages:
echo_func(msg)
| 5,335,797
|
def gs_exists(gs_url):
"""Check if gs_url points to a valid file we can access"""
# If gs_url is not accessible, the response could be one of:
# 1. "You aren't authorized to read ..."
# 2. "No URLs matched: ..."
# and it would have a non-0 status, which would be raised.
#
# Otherwise, it would return a bunch of information about the file,
# one of them being "Creation time".
try:
res = run_cmd(f"gsutil stat {gs_url}")
return "Creation time:" in res.stdout.decode("utf-8")
except subprocess.CalledProcessError:
return False
| 5,335,798
|
def unblock_expired_blacklistings():
"""
Some timestamps are set on blacklisted until their time stamp is hit.
This task is for unblocking those at their given date within whatever
time interval you choose to perform the task.
"""
blacklisted_emails = BlacklistEmail.objects.filter(
is_blocked=True, blocked_until__isnull=False, blocked_until__gte=datetime.now()
)
blacklisted_domains = BlacklistDomain.objects.filter(
is_blocked=True, blocked_until__isnull=False, blocked_until__gte=datetime.now()
)
if blacklisted_emails:
blacklisted_emails.update(is_blocked=False)
if blacklisted_domains:
blacklisted_domains.update(is_blocked=False)
| 5,335,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.