content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def train_callbacks(loop:Loop)->"A cluster of callback function":
"""
call backs allow optimizing model weights
"""
loop.core.metric_tab = MetricTab()
@loop.every_start_FORWARD
def switch_model_to_train(loop:Loop): loop.model("train")()
@loop.on_DATA_PROCESS
def opt_zero_grad(loop:Loop):loop.opt("zero_grad")()
@loop.on_BACKWARD
def opt_move(loop:Loop):
loop.loss("backward")()
loop.opt("step")() | 5,327,000 |
def pb_set_defaults():
"""Set board defaults. Must be called before using any other board functions."""
return spinapi.pb_set_defaults() | 5,327,001 |
def count_routes_graph(graph, source_node, dest_node):
"""
classic tree-like graph traversal
"""
if dest_node == source_node or dest_node - source_node == 1:
return 1
else:
routes = 0
for child in graph[source_node]:
routes += count_routes_graph(graph, child, dest_node)
return routes | 5,327,002 |
def pluck_state(obj: Dict) -> str:
"""A wrapper to illustrate composing
the above two functions.
Args:
obj: The dictionary created from the json string.
"""
plucker = pipe(get_metadata, get_state_from_meta)
return plucker(obj) | 5,327,003 |
def value(
parser: Callable[[str, Mapping[str, str]], Any] = nop,
tag_: Optional[str] = None,
var: Optional[str] = None,
) -> Parser:
"""Return a parser to parse a simple value assignment XML tag.
:param parser:
The text parser to use for the contents of the given `tag_`. It will
also be given the attributes mapping.
:param tag_:
The name of the tag to parse. The default is to consume any tag.
:param var:
Override the name the value is to be assigned to. The default is the
tag name.
.. note::
Use of this will break the AST's ability to make suggestions when
attempting to assign to an invalid variable as that feature
requires the tag and variable to have the same name.
:return:
A parser that consumes the given XML `tag_` and produces a
:class:`rads.config.ast.Assignment` AST node.
:raises rads.config.xml_parsers.TerminalXMLParseError:
Raised by the returned parser if the consumed tag is empty or the given
text `parser` produces a :class:`rads.config.text_parsers.TextParseError`.
"""
def process(element: Element) -> Assignment:
var_ = var if var else element.tag
condition = parse_condition(element.attributes)
action = parse_action(element)
text = element.text if element.text else ""
source = source_from_element(element)
try:
value = parser(text, element.attributes)
except TextParseError as err:
raise error_at(element)(str(err)) from err
return Assignment(
name=var_, value=value, condition=condition, action=action, source=source
)
if tag_:
return tag(tag_) ^ process
return any() ^ process | 5,327,004 |
def list_locations_command():
"""Getting all locations
"""
locations = list_locations().get("value")
outputs = list()
if locations:
for location in locations:
if location.get("properties") and location.get("properties").get(
"homeRegionName"
):
home_region_name = location.get("properties").get("homeRegionName")
else:
home_region_name = None
outputs.append(
{
"HomeRegionName": home_region_name,
"Name": location.get("name"),
"ID": location.get("id"),
}
)
md = tableToMarkdown(
"Azure Security Center - List Locations",
outputs,
["HomeRegionName", "Name", "ID"],
removeNull=True,
)
ec = {"AzureSecurityCenter.Location(val.ID && val.ID === obj.ID)": outputs}
entry = {
"Type": entryTypes["note"],
"Contents": locations,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
demisto.results(entry)
else:
demisto.results("No locations found") | 5,327,005 |
def compute_Rnorm(image, mask_field, cen, R=12, wid=1, mask_cross=True, display=False):
""" Compute (3 sigma-clipped) normalization using an annulus.
Note the output values of normalization contain background.
Paramters
----------
image : input image for measurement
mask_field : mask map with nearby sources masked as 1.
cen : center of target
R : radius of annulus
wid : half-width of annulus
Returns
-------
I_mean: mean value in the annulus
I_med : median value in the annulus
I_std : std value in the annulus
I_flag : 0 good / 1 bad (available pixles < 5)
"""
annulus_ma = CircularAnnulus([cen], R-wid, R+wid).to_mask()[0]
mask_ring = annulus_ma.to_image(image.shape) > 0.5 # sky ring (R-wid, R+wid)
mask_clean = mask_ring & (~mask_field) # sky ring with other sources masked
# Whether to mask the cross regions, important if R is small
if mask_cross:
yy, xx = np.indices(image.shape)
rr = np.sqrt((xx-cen[0])**2+(yy-cen[1])**2)
cross = ((abs(xx-cen[0])<4)|(abs(yy-cen[1])<4))
mask_clean = mask_clean * (~cross)
if len(image[mask_clean]) < 5:
return [np.nan] * 3 + [1]
z = sigma_clip(np.log10(image[mask_clean]), sigma=2, maxiters=5)
I_mean, I_med, I_std = 10**np.mean(z), 10**np.median(z.compressed()), np.std(10**z)
if display:
z = 10**z
fig, (ax1,ax2) = plt.subplots(nrows=1, ncols=2, figsize=(9,4))
ax1.imshow(mask_clean, cmap="gray", alpha=0.7)
ax1.imshow(image, vmin=image.min(), vmax=I_med+50*I_std,
cmap='viridis', norm=AsinhNorm(), alpha=0.7)
ax1.plot(cen[0], cen[1], 'r*', ms=10)
ax2.hist(sigma_clip(z),alpha=0.7)
# Label mean value
plt.axvline(I_mean, color='k')
plt.text(0.5, 0.9, "%.1f"%I_mean, color='darkorange', ha='center', transform=ax2.transAxes)
# Label 20% / 80% quantiles
I_20 = np.quantile(z.compressed(), 0.2)
I_80 = np.quantile(z.compressed(), 0.8)
for I, x_txt in zip([I_20, I_80], [0.2, 0.8]):
plt.axvline(I, color='k', ls="--")
plt.text(x_txt, 0.9, "%.1f"%I, color='orange',
ha='center', transform=ax2.transAxes)
return I_mean, I_med, I_std, 0 | 5,327,006 |
def add_filter(
self, gene_filter=None, transcript_filter=None, ref_transcript_filter=None
):
"""Defines and assigns filter flags, which can be used by iter_transcripts.
Filters are defined as dict, where the key is a filter identifier, and the value is an expression,
which gets evaluated on the gene/transcript. For examples, see the default filter definitions
isotools.DEFAULT_GENE_FILTER, isotools.DEFAULT_TRANSCRIPT_FILTER and isotools.DEFAULT_REF_TRANSCRIPT_FILTER.
:param gene_filter: dict of gene filters. If omitted the default gene filters apply.
:param transcript_filter: dict of gene filters. If omitted the default reference filters apply.
:param ref_transcript_filter: dict of gene filters. If omitted the default transcript filters apply.
"""
gene_attributes = {k for g in self for k in g.data.keys() if k.isidentifier()}
tr_attributes = {
k for g in self for tr in g.transcripts for k in tr.keys() if k.isidentifier()
}
ref_tr_attributes = {
k
for g in self
if g.is_annotated
for tr in g.ref_transcripts
for k in tr.keys()
if k.isidentifier()
}
tr_attributes.add("filter")
ref_tr_attributes.add("filter")
if gene_filter is None:
gene_filter = DEFAULT_GENE_FILTER
if transcript_filter is None:
transcript_filter = DEFAULT_TRANSCRIPT_FILTER
if ref_transcript_filter is None:
ref_transcript_filter = DEFAULT_REF_TRANSCRIPT_FILTER
gene_ffun = {
label: _filter_function(gene_attributes, fun)
for label, fun in gene_filter.items()
}
tr_ffun = {
label: _filter_function(tr_attributes, fun)
for label, fun in transcript_filter.items()
}
reftr_ffun = {
label: _filter_function(ref_tr_attributes, fun)
for label, fun in ref_transcript_filter.items()
}
for g in tqdm(self):
g.add_filter(gene_ffun, tr_ffun, reftr_ffun)
self.infos["filter"] = {
"gene_filter": gene_filter,
"transcript_filter": transcript_filter,
"ref_transcript_filter": ref_transcript_filter,
} | 5,327,007 |
def inject_timeout(func):
"""Decorator which injects ``timeout`` parameter into request.
On client initiation, default timeout is set. This timeout will be
injected into any request if no explicit parameter is set.
:return: Value of decorated function.
"""
@six.wraps(func)
def decorator(self, *args, **kwargs):
kwargs.setdefault("timeout", self._timeout)
return func(self, *args, **kwargs)
return decorator | 5,327,008 |
def chunkify(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n] | 5,327,009 |
def test_match_partial(values):
"""@match_partial allows not covering all the cases."""
v, v2 = values
@match_partial(MyType)
class get_partial_value(object):
def MyConstructor(x):
return x
assert get_partial_value(v) == 3 | 5,327,010 |
def assert_sim_of_model_with_itself_is_approx_one(mdl: nn.Module, X: Tensor,
layer_name: str,
metric_comparison_type: str = 'pwcca',
metric_as_sim_or_dist: str = 'dist') -> bool:
"""
Returns true if model is ok. If not it asserts against you (never returns False).
"""
dist: float = get_metric(mdl, mdl, X, X, layer_name, metric_comparison_type=metric_comparison_type,
metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'Should be very very close to 0.0: {dist=} ({metric_comparison_type=})')
assert approx_equal(dist, 0.0), f'Sim should be close to 1.0 but got: {dist=}'
return True | 5,327,011 |
def cver(verstr):
"""Converts a version string into a number"""
if verstr.startswith("b"):
return float(verstr[1:])-100000
return float(verstr) | 5,327,012 |
def test_if_supported_tags_are_valid(client):
"""
GIVEN a Request object
WHEN validating the tags property of the object
THEN is allows only valid and up-to-date choices
"""
r = Request()
actual = r.fields["tag"].choices
expected = [entry["tag"]
for entry in client.retrieve_supported_tags().json()["data"]]
assert set(actual) == set(expected) | 5,327,013 |
def log_and_raise_exception(error_message):
"""input: error_message (error message string)
logs error and raises exception
"""
logger.error(error_message)
raise Exception(error_message) | 5,327,014 |
def test_float_const(message_type):
"""
message Message {
float value = 1 [(validate.rules).float.const = 4.2];
}
"""
validate(message_type(value=4.2))
with pytest.raises(ValidationError, match="value not equal to"):
validate(message_type(value=2.4)) | 5,327,015 |
def _GetGaeCookie(host, service, auth_token, secure):
"""This function creates a login cookie using the authentication token
obtained after logging in successfully in the Google account.
Args:
host: Host where the user wants to login.
service: Service code where the user wants to login.
auth_token: Authentication token obtained from ClientLogin.
secure: True if we want a secure cookie, false if not.
Returns:
A cookie for the specifed service.
Raises:
urllib2.HTTPError: This exception is raised when the cookie cannot be
obtained and the user is redirected to another place.
"""
# Create a request for Google's service with the authentication token.
continue_location = 'http://localhost/'
cookie_request_data_map = {
'continue' : continue_location,
'auth' : auth_token,
}
cookie_request_data = urllib.urlencode(cookie_request_data_map)
cookie_url = '{protocol}://{host}/_{service}/login?{data}'.format(
protocol=('https' if secure else 'http'), host=host, service=service,
data=cookie_request_data)
cookie_request = urllib2.Request(cookie_url)
try:
# Create a custom opener, make the request and extract the body.
http_opener = _GetHTTPOpener()
cookie_response = http_opener.open(cookie_request)
except urllib2.HTTPError as e:
# Keep the error as the cookie response.
cookie_response = e
# Check that a redirection was made to the required continue location.
# Otherwise, return an HTTP error.
response_code = cookie_response.code
if (response_code != 302 or
cookie_response.info()['location'] != continue_location):
raise urllib2.HTTPError(cookie_request.get_full_url(), response_code,
cookie_response.msg, cookie_response.headers,
cookie_response.fp)
# Extract the cookie from the headers and remove 'HttpOnly' from it.
cookie = cookie_response.headers.get('Set-Cookie')
return cookie.replace('; HttpOnly', '') | 5,327,016 |
async def ping_handler() -> data.PingResponse:
"""
Check server status.
"""
return data.PingResponse(status="ok") | 5,327,017 |
def add_era5_global_attributes(ds, creation_datetime):
"""Adds global attributes to datasets"""
global_attrs = {
r"conventions": r"CF-1.7",
r"contact": r"l.c.denby[at]leeds[dot]ac[dot again]uk s.boeing[at]leeds[dot]ac[dot again]uk",
r"era5_reference": r"Hersbach, H., Bell, B., Berrisford, P., Hirahara, S., Horányi, A., Muñoz‐Sabater, J., ... & Simmons, A. (2020). The ERA5 global reanalysis. Quarterly Journal of the Royal Meteorological Society.",
r"created": creation_datetime.isoformat(),
r"created_with": r"https://github.com/EUREC4A-UK/lagtraj",
r"note": "Contains modified Copernicus Service information ",
}
for attribute in global_attrs:
ds.attrs[attribute] = global_attrs[attribute] | 5,327,018 |
def test_degree(poly_equation):
"""
The degree is correct.
"""
equation = poly_equation
A = 1e10
degree = np.log(equation.flux(A)/equation.flux(1))/np.log(A)
npt.assert_allclose(equation.degree(), degree) | 5,327,019 |
def main():
"""Start a child process, output status, and monitor exit."""
args = docopt.docopt(__doc__, options_first=True, version=__version__)
command = " ".join(args["<command>"])
timeout = parse_time(args["--timeout"])
# Calculate the time at which we will kill the child process.
now = now_no_us()
killtime = now + timeout
# Log some startup information for the user.
cprint(f"Running: {command}")
cprint(f"Max runtime {timeout}")
cprint(f"Will kill at {killtime} UTC")
# Start the child process.
child = subprocess.Popen(command, shell=True) # nosec
# Loop until it is time to kill the child process.
while now < killtime:
# Log how much time is remaining.
remaining_delta = killtime - now
cprint(f"{remaining_delta} remaining", severity=Severity.WARNING)
try:
sleep_time = calculate_sleep_time(remaining_delta)
# Sleep while waiting for the child to exit.
child.wait(sleep_time)
# The child has exited before the timeout
break
except subprocess.TimeoutExpired:
# The child did not exit. Not a problem.
pass
now = now_no_us()
else:
# We've reached the killtime.
cprint("Timeout reached... killing child.", severity=Severity.FAIL)
child.kill()
# Wait for the child to exit if it hasn't already.
return_code = child.wait()
# Log the return code of the child.
if return_code == 0:
cprint(f"Child has exited with: {return_code}", severity=Severity.GOOD)
else:
cprint(f"Child has exited with: {return_code}", severity=Severity.FAIL)
# Return the child's return code as our own so that it can be acted upon.
return return_code | 5,327,020 |
def duplicate_keypair_name():
"""
Duplicate key pair name.
@raise Ec2stackError: Defining a bad request and message.
"""
raise Ec2stackError(
'400',
'InvalidKeyPair.Duplicate',
'The keypair already exists.'
) | 5,327,021 |
def get_columns_sql(table):
"""Construct SQL component specifying table columns"""
# Read rows and append column name and data type to main container
template_path = os.path.join(os.environ['MYSQL_TABLE_TEMPLATES_DIR'], f'{table}.csv')
with open(template_path, newline='') as f:
template_reader = csv.reader(f, delimiter=',')
# Rows in the CSV template (corresponding to columns into MySQL table)
columns = []
for row in template_reader:
columns.append(row[:2])
# SQL to construct column name component for query
sql = ', '.join([' '.join(c) for c in columns])
return sql | 5,327,022 |
def publish_alert_to_sns(binary: BinaryInfo, topic_arn: str) -> None:
"""Publish a JSON SNS alert: a binary has matched one or more YARA rules.
Args:
binary: Instance containing information about the binary.
topic_arn: Publish to this SNS topic ARN.
"""
subject = '[BinaryAlert] {} matches a YARA rule'.format(
binary.filepath or binary.computed_sha)
SNS.Topic(topic_arn).publish(
Subject=_elide_string_middle(subject, SNS_PUBLISH_SUBJECT_MAX_SIZE),
Message=(json.dumps(binary.summary(), indent=4, sort_keys=True))
) | 5,327,023 |
def create_session_cookie():
"""
Creates a cookie containing a session for a user
Stolen from https://stackoverflow.com/questions/22494583/login-with-code-when-using-liveservertestcase-with-django
:param username:
:param password:
:return:
"""
# First, create a new test user
user = AuthUserFactory()
# Then create the authenticated session using the new user credentials
session = SessionStore()
session[SESSION_KEY] = user.pk
session[BACKEND_SESSION_KEY] = settings.AUTHENTICATION_BACKENDS[0]
session[HASH_SESSION_KEY] = user.get_session_auth_hash()
session.save()
# Finally, create the cookie dictionary
cookie = {settings.SESSION_COOKIE_NAME: session.session_key}
return cookie | 5,327,024 |
def is_excluded(src_path: Path, globs: Optional[List[str]] = None) -> bool:
"""
Determine if a src_path should be excluded.
Supports globs (e.g. folder/* or *.md).
Credits: code inspired by / adapted from
https://github.com/apenwarr/mkdocs-exclude/blob/master/mkdocs_exclude/plugin.py
Args:
src_path (Path): Path of file
globs (list): list of globs
Returns:
(bool): whether src_path should be excluded
"""
if globs is None or len(globs) == 0:
return False
assert isinstance(src_path, Path)
assert hasattr(globs, "__iter__") # list or tuple
# Windows reports filenames as eg. a\\b\\c instead of a/b/c.
# To make the same globs/regexes match filenames on Windows and
# other OSes, let's try matching against converted filenames.
# On the other hand, Unix actually allows filenames to contain
# literal \\ characters (although it is rare), so we won't
# always convert them. We only convert if os.sep reports
# something unusual. Conversely, some future mkdocs might
# report Windows filenames using / separators regardless of
# os.sep, so we *always* test with / above.
if os.sep != "/":
src_path_fix = str(src_path).replace(os.sep, "/")
else:
src_path_fix = str(src_path)
for g in globs:
if fnmatch.fnmatchcase(src_path_fix, g):
return True
if src_path.name == g:
return True
return False | 5,327,025 |
def get_seattle_streets(filename=None, folder="."):
"""
Retrieves processed data from
`Seattle Streets <https://data.seattle.gov/dataset/Street-Network-Database/
afip-2mzr/data)>`_.
@param filename local filename
@param folder temporary folder where to download files
@return shapes, records
The function returns a filename.
"""
if filename is None:
names = download_data("WGS84_seattle_street.zip", whereTo=folder)
shp = [n for n in names if n.endswith('.shp')]
if len(shp) != 1:
from pyquickhelper.loghelper import BufferedPrint
buf = BufferedPrint()
names = download_data("WGS84_seattle_street.zip",
whereTo=folder, fLOG=buf.fprint)
raise FileNotFoundError(
"Unable to download data 'WGS84_seattle_street.zip' to '{0}', log={1}\nnames={2}.".format(
filename, str(buf), "\n".join(names)))
filename = shp[0]
elif not os.path.exists(filename):
raise FileNotFoundError(filename)
return filename | 5,327,026 |
def _replace_oov(original_vocab, line):
"""Replace out-of-vocab words with "UNK".
This maintains compatibility with published results.
Args:
original_vocab: a set of strings (The standard vocabulary for the dataset)
line: a unicode string - a space-delimited sequence of words.
Returns:
a unicode string - a space-delimited sequence of words.
"""
return u" ".join(
[word if word in original_vocab else u"UNK" for word in line.split()]) | 5,327,027 |
def linear_CMD_fit(x,y,xerr,yerr):
"""
Does a linear fit to CMD data where x is color and y is amplitude, returning some fit
statistics
Parameters
----------
x : array-like
color
y : array-like
magnitude
xerr : array-like
color errors
yerr : array-like
magnitude errors
Returns
-------
slope : float
slope of best-fit line
r_squared : float
Correlation coefficient (R^2)
"""
data = RealData(x, y, sx=xerr, sy=yerr)
mod = Model(line)
odr = ODR(data, mod, beta0=[-0.1, np.mean(y)])
out = odr.run()
slope = out.beta[0]
r_squared = r2_score(y, line(out.beta, x))
return slope, r_squared | 5,327,028 |
def save_vehicle(vehicle):
"""
新增车辆
:parameter: vehicle 车辆信息元组(
客户ID,车牌号,型号,车辆登记日期,公里数,过户次数,
贷款产品,贷款期次,贷款年限,贷款金额,贷款提报日期,贷款通过日期,放款日期,
承保公司ID,险种,保险生效日期,保险到期日期,
备注)
"""
# 数据库对象
db = sqlite.Database()
# 操作语句
sql = "INSERT INTO T_VEHICLE VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 0, ?);"
# 数据集合
data = (get_uuid(),) + vehicle + (get_now(), auth.Auth.logon_user[0])
# 执行数据库操作
db.execute_update(sql, data) | 5,327,029 |
def find_uts_hlines(ndvar):
"""Find horizontal lines for uts plots (based on contours)
Parameters
----------
ndvar : NDVar
Data to be plotted.
Returns
-------
h_lines : iterator
Iterator over (y, kwa) tuples.
"""
contours = ndvar.info.get('contours', None)
if contours:
for level in sorted(contours):
args = contours[level]
if isinstance(args, dict):
yield level, args.copy()
else:
yield level, {'color': args} | 5,327,030 |
def _verify_path_value(value, is_str, is_kind=False):
"""Verify a key path value: one of a kind, string ID or integer ID.
Args:
value (Union[str, int]): The value to verify
is_str (bool): Flag indicating if the ``value`` is a string. If
:data:`False`, then the ``value`` is assumed to be an integer.
is_kind (Optional[bool]): Flag indicating if the value is meant to
be a kind. Defaults to :data:`False`.
Returns:
Union[str, int]: The ``value`` passed in, if it passed verification
checks.
Raises:
ValueError: If the ``value`` is a ``str`` for the kind, but the number
of UTF-8 encoded bytes is outside of the range ``[1, 1500]``.
ValueError: If the ``value`` is a ``str`` for the name, but the number
of UTF-8 encoded bytes is outside of the range ``[1, 1500]``.
ValueError: If the ``value`` is an integer but lies outside of the
range ``[1, 2^63 - 1]``.
"""
if is_str:
if 1 <= len(value.encode("utf-8")) <= _MAX_KEYPART_BYTES:
return value
if is_kind:
raise ValueError(_BAD_KIND.format(_MAX_KEYPART_BYTES, value))
else:
raise ValueError(_BAD_STRING_ID.format(_MAX_KEYPART_BYTES, value))
else:
if 1 <= value <= _MAX_INTEGER_ID:
return value
raise ValueError(_BAD_INTEGER_ID.format(value)) | 5,327,031 |
def test_count(client, index):
""" count """
yield from client.index(index, 'testdoc',
MESSAGES[0], '1',
refresh=True)
yield from client.index(index, 'testdoc',
MESSAGES[1], '2',
refresh=True)
yield from client.index(index, 'testdoc',
MESSAGES[2], '3',
refresh=True)
data = yield from client.count(
index, 'testdoc', q='skills:Python')
assert data['count'] == 2
data = yield from client.count(
index, 'testdoc', q='skills:Python',
ignore_unavailable=True,
expand_wildcards='open',
allow_no_indices=False,
min_score=1,
preference='random')
assert data['count'] == 0
with pytest.raises(TypeError):
yield from client.count(
index, 'testdoc',
expand_wildcards=1)
with pytest.raises(ValueError):
yield from client.count(
index, 'testdoc', q='skills:Python',
expand_wildcards='1',
routing='Sidor',
source='Query DSL') | 5,327,032 |
def abort(*args, **kwargs) -> None:
"""
Abort execution without an additional error
"""
logging.info(*args, **kwargs)
counted_error_at_exit() | 5,327,033 |
def _is_tipologia_header(row):
"""Controlla se la riga corrente e' una voce o l'header di una
nuova tipologia di voci ("Personale", "Noli", etc).
"""
if type(row.iloc[1]) is not str:
return False
if type(row.iloc[2]) is str:
if row.iloc[2] != HEADERS["units"]:
return False
else:
if not np.isnan(row.iloc[2]):
return False
return True | 5,327,034 |
def validateFloat(
value,
blank=False,
strip=None,
allowRegexes=None,
blockRegexes=None,
min=None,
max=None,
lessThan=None,
greaterThan=None,
excMsg=None,
):
# type: (str, bool, Union[None, str, bool], Union[None, Sequence[Union[Pattern, str]]], Union[None, Sequence[Union[Pattern, str, Sequence[Union[Pattern, str]]]]], Optional[int], Optional[int], Optional[int], Optional[int], Optional[str]) -> Union[float, str]
"""Raises ValidationException if value is not a float.
Returns value, so it can be used inline in an expression:
print(2 + validateFloat(your_number))
Note that since float() ignore leading or trailing whitespace
when converting a string to a number, so does this validateNum().
* value (str): The value being validated as an int or float.
* blank (bool): If True, a blank string will be accepted. Defaults to False.
* strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped.
* allowRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers.
* blockRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation.
* _numType (str): One of 'num', 'int', or 'float' for the kind of number to validate against, where 'num' means int or float.
* min (int, float): The (inclusive) minimum value for the value to pass validation.
* max (int, float): The (inclusive) maximum value for the value to pass validation.
* lessThan (int, float): The (exclusive) minimum value for the value to pass validation.
* greaterThan (int, float): The (exclusive) maximum value for the value to pass validation.
* excMsg (str): A custom message to use in the raised ValidationException.
If you specify min or max, you cannot also respectively specify lessThan
or greaterThan. Doing so will raise PySimpleValidateException.
>>> import pysimplevalidate as pysv
>>> pysv.validateFloat('3.14')
3.14
>>> pysv.validateFloat('pi')
Traceback (most recent call last):
...
pysimplevalidate.ValidationException: 'pi' is not a float.
>>> pysv.validateFloat('3')
3.0
>>> pysv.validateFloat('3', min=3)
3.0
>>> pysv.validateFloat('3', greaterThan=3)
Traceback (most recent call last):
...
pysimplevalidate.ValidationException: Number must be greater than 3.
"""
# Even though validateNum *could* return a int, it won't if _numType is 'float', so ignore mypy's complaint:
return validateNum(
value=value,
blank=blank,
strip=strip,
allowRegexes=allowRegexes,
blockRegexes=blockRegexes,
_numType="float",
min=min,
max=max,
lessThan=lessThan,
greaterThan=greaterThan,
) | 5,327,035 |
def send_sms(mobile: str, sms_code: str) -> Dict[str, Any]:
"""发送短信"""
sdk: SmsSDK = SmsSDK(
celery.app.config.get("SMS_ACCOUNT_ID"),
celery.app.config.get("SMS_ACCOUNT_TOKEN"),
celery.app.config.get("SMS_APP_ID")
)
try:
ret: str = sdk.sendMessage(
celery.app.config.get("SMS_TEMPLATE_ID"), # 模板ID
mobile, # 用户手机号
(sms_code, celery.app.config.get("SMS_EXPIRE_TIME") // 60) # 模板变量信息
)
# 容联云短信返回的结果是json格式的字符串,需要转换成dict
result: Dict[str, Any] = orjson.loads(ret)
# 6个0表示短信发送成功,将验证码缓存到redis中
if result["statusCode"] == "000000":
pipe: Pipeline = redis.pipeline()
pipe.multi() # 开启事务
# 保存短信记录到redis中
pipe.setex("sms_%s" % mobile, celery.app.config.get("SMS_EXPIRE_TIME"), sms_code)
# 进行冷却倒计时
pipe.setex("int_%s" % mobile, celery.app.config.get("SMS_INTERVAL_TIME"), "_")
pipe.execute() # 提交事务
return result
else:
raise Exception
except Exception as exc:
celery.app.logger.error("短信发送失败!\r\n%s" % exc)
return result | 5,327,036 |
def from_dataframe(df, name='df', client=None):
"""
convenience function to construct an ibis table
from a DataFrame
EXPERIMENTAL API
Parameters
----------
df : DataFrame
name : str, default 'df'
client : Client, default new PandasClient
client dictionary will be mutated with the
name of the DataFrame
Returns
-------
Table
"""
if client is None:
return connect({name: df}).table(name)
client.dictionary[name] = df
return client.table(name) | 5,327,037 |
def make_parser() -> argparse.ArgumentParser:
"""Make parser for CLI arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"site_name", help="name of the site you want to get data for",
)
parser.add_argument(
"--no-expand-meta",
action="store_true",
help="don't include links that use the old domain name structure",
)
parser.add_argument(
"-d",
"--download",
action="store_true",
help="redownload data, even if it exists in the cache",
)
parser.add_argument(
"--min",
type=int,
default=0,
help="minimum sized networks to include in output",
)
parser.add_argument(
"--max",
type=int,
default=float("inf"),
help="maximum sized networks to include in output",
)
parser.add_argument(
"-o", "--output", default="{site_name}", help="output file name",
)
parser.add_argument(
"--cache-dir", default=".cache/", help="cache directory",
)
return parser | 5,327,038 |
def get_files_to_parse(relative_path):
"""Walks through given directory and returns all files with ending
with an accepted file extension
Arguments:
relative_path {string} -- path to pull files from recursively
Returns:
List<String> -- list of filenames with fullpath
"""
files = []
filepath = os.path.realpath(relative_path)
if os.path.isfile(filepath):
files.append(filepath)
else:
for r, d, f in os.walk(filepath):
for file in f:
if not file.split(".")[-1] in ACCEPTED_FILE_EXTENSIONS:
continue
full_file_path = os.path.join(r, file)
files.append(os.path.join(r, full_file_path))
return files | 5,327,039 |
async def stat_data(full_path: str, isFolder=False) -> dict:
"""
only call this on a validated full path
"""
file_stats = os.stat(full_path)
filename = os.path.basename(full_path)
return {
'name': filename,
'path': full_path,
'mtime': int(file_stats.st_mtime*1000), # given in seconds, want ms
'size': file_stats.st_size,
'isFolder': isFolder
} | 5,327,040 |
def start_browser(cfg):
"""
Start browser with disabled "Save PDF" dialog
Download files to data folder
"""
my_options = Options()
if cfg.headless:
my_options.headless = True
my_options.add_argument('--window-size=1920,1200')
my_profile = webdriver.FirefoxProfile()
my_profile.set_preference('general.useragent.override', cfg.user_agent)
my_profile.set_preference('browser.download.folderList', 2)
my_profile.set_preference('browser.download.manager.showWhenStarting', False)
my_profile.set_preference('browser.download.manager.useWindow', False)
my_profile.set_preference('pdfjs.disabled', True)
my_profile.set_preference('browser.download.dir',
os.path.join(os.getcwd(), 'data'))
my_profile.set_preference('browser.helperApps.neverAsk.openFile',
'application/octet-stream, application/pdf, application/x-www-form-urlencoded')
my_profile.set_preference('browser.helperApps.neverAsk.saveToDisk',
'application/octet-stream, application/pdf, application/x-www-form-urlencoded')
return webdriver.Firefox(executable_path=gecko_path(), options=my_options, firefox_profile=my_profile) | 5,327,041 |
def filter_list(prev_list, current_list, zeta):
"""
apply filter to the all elements
of the list one by one
"""
filtered_list = []
for i, current_val in enumerate(current_list):
prev_val = prev_list[i]
filtered_list.append(
moving_average_filter(current_val, prev_val, zeta))
return filtered_list | 5,327,042 |
def CleanRules(nodes, marker=IPTABLES_COMMENT_MARKER):
"""Removes all QA `iptables` rules matching a given marker from a given node.
If no marker is given, the global default is used, which clean all custom
markers.
"""
if not hasattr(nodes, '__iter__'):
nodes = [nodes]
for node in nodes:
AssertCommand(("iptables-save | grep -v '%s' | iptables-restore" %
(marker, )),
node=node) | 5,327,043 |
def has_prefix(sub_s):
"""
:param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid
:return: (bool) If there is any words with prefix stored in sub_s
"""
for word in dict_list:
if word.startswith(sub_s):
return True
return False | 5,327,044 |
def get_dir_size_recursive(directoryPath):
"""
Returns the size of a directory's contents (recursive) in bytes.
:param directoryPath: string, path of directory to be analyzed
:return: int, size of sum of files in directory in bytes
"""
# Collect directory size recursively
total_size = 0
for dirpath, dirnames, filenames in walk(directoryPath):
for f in filenames:
fp = path.join(dirpath, f)
total_size += path.getsize(fp)
return total_size | 5,327,045 |
def test_pause(
decoy: Decoy,
engine_client: SyncClient,
subject: ProtocolContext,
) -> None:
"""It should be able to issue a Pause command through the client."""
subject.pause()
decoy.verify(engine_client.pause(message=None), times=1)
subject.pause(msg="hello world")
decoy.verify(engine_client.pause(message="hello world"), times=1) | 5,327,046 |
def test_as_custom_details_ignores_custom_fields():
"""Publishers - PagerDuty - as_custom_details - Ignore Magic Keys"""
alert = get_alert(context={'context': 'value'})
alert.created = datetime(2019, 1, 1)
alert.publishers = {
'pagerduty': [
'stream_alert.shared.publisher.DefaultPublisher',
'publishers.community.pagerduty.pagerduty_layout.ShortenTitle',
'publishers.community.pagerduty.pagerduty_layout.as_custom_details',
]
}
output = MagicMock(spec=OutputDispatcher)
output.__service__ = 'pagerduty'
descriptor = 'unit_test_channel'
publication = compose_alert(alert, output, descriptor)
# We don't care about the entire payload; let's check a few top-level keys we know
# are supposed to be here..
assert_true(publication['source_entity'])
assert_true(publication['outputs'])
assert_true(publication['log_source'])
# Check that the title keys exists
assert_true(publication['@pagerduty.description'])
# now check that the details key exists
assert_true(publication['@pagerduty.details'])
# And check that it has no magic keys
assert_false('@pagerduty.description' in publication['@pagerduty.details'])
assert_false('@pagerduty-v2.summary' in publication['@pagerduty.details']) | 5,327,047 |
def main():
""" """
try:
# read parameters configuration file yaml
with open(setupcfg.extraParam, "r") as stream:
try:
param = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
# check parameters file
return _check_param(param)
except Exception:
_logger.exception(
f"Something goes wrong when loading extra parameters file -{setupcfg.extraParam}-."
)
raise | 5,327,048 |
def MSXopen(nomeinp):
"""Opens the MSX Toolkit to analyze a particular distribution system
Arguments:
nomeinp: name of the msx input file
"""
ierr= _lib.MSXopen(ctypes.c_char_p(nomeinp.encode()))
if ierr!=0: raise MSXtoolkitError(ierr) | 5,327,049 |
def primary_key(field_type):
"""
* Returns the field to be treated as the "primary key" for this type
* Primary key is determined as the first of:
* - non-null ID field
* - ID field
* - first String field
* - first field
*
* @param {object_type_definition} type
* @returns {FieldDefinition} primary key field
"""
# Find the primary key for the type
# first field with a required ID
# if no required ID type then first required type
pk = first_non_null_and_id_field(field_type)
if not pk:
pk = first_id_field(field_type)
if not pk:
pk = first_non_null_field(field_type)
if not pk:
pk = first_field(field_type)
return pk | 5,327,050 |
def get_discussion_data_list_with_percentage(session: Session, doi, limit: int = 20, min_percentage: float = 1,
dd_type="lang"):
""" get discussion types with count an percentage from postgresql """
query = """
WITH result AS
(
(
SELECT "value",
count as c,
ROUND(count / CAST(SUM(count) OVER () AS FLOAT) * 1000) / 10 as p
FROM counted_discussion_data
JOIN discussion_data as dd ON (discussion_data_point_id = dd.id)
WHERE type = :type and value != 'und' and value != 'unknown'
ORDER BY c DESC
LIMIT :limit
)
UNION
(
SELECT 'total' as "value", SUM(count) as c, 100 as p
FROM counted_discussion_data
JOIN discussion_data as dd ON (discussion_data_point_id = dd.id)
WHERE type = :type and value != 'und' and value != 'unknown'
)
)
SELECT "value", c as count, p
FROM result
WHERE result.p >= :mp
ORDER BY count DESC;
"""
params = {
'type': dd_type,
'limit': limit,
'mp': min_percentage
}
if doi:
query = """
WITH result AS
(
(
SELECT "value",
SUM(count) as c,
ROUND(SUM(count) / CAST(SUM(SUM(count)) OVER () AS FLOAT) * 1000) / 10 as p
FROM (SELECT "value", "count"
FROM discussion_data_point as ddp
JOIN discussion_data as dd ON (ddp.discussion_data_point_id = dd.id)
WHERE type = :type and value != 'und' and value != 'unknown'
AND publication_doi=:doi
) temp
GROUP BY "value"
ORDER BY c DESC
LIMIT :limit
)
UNION
(
SELECT 'total' as "value", SUM(count) as c, 100 as p
FROM discussion_data_point as ddp
JOIN discussion_data as dd ON (ddp.discussion_data_point_id = dd.id)
WHERE type = :type and value != 'und' and value != 'unknown'
AND publication_doi=:doi
)
)
SELECT "value", c as count, p
FROM result
WHERE result.p >= :mp
ORDER BY count DESC;
"""
params['doi'] = doi
s = text(query)
# print(query)
# print(params)
if 'doi' in params:
s = s.bindparams(bindparam('type'), bindparam('limit'), bindparam('mp'), bindparam('doi'))
else:
s = s.bindparams(bindparam('type'), bindparam('limit'), bindparam('mp'))
return session.execute(s, params).fetchall() | 5,327,051 |
def validate(config, model, val_iterator, criterion, scheduler=None):
"""Runs one standard validation pass over the val_iterator.
This function automatically measures timing for various operations such
as host to device transfer and processing time for the batch.
It also automatically detects and places the data on the given GPU device
if available.
Raises:
ValueError if multiple models/schedulers are provided. You
are expected to have a custom validation function if you wish
to use multiple models/schedulers.
Args:
config: (dict): A user configuration provided into the Trainer
constructor.
model: The model as created by the model_creator.
train_iterator: An iterator created from the DataLoader which
wraps the provided Dataset.
criterion: The loss object created by the loss_creator.
scheduler (optional): The torch.optim.lr_scheduler object
as created by the scheduler_creator. By default,
this is not used in this function.
Returns:
A dict of metrics from the evaluation.
"""
if isinstance(model, collections.Iterable) or isinstance(
scheduler, collections.Iterable):
raise ValueError(
"Need to provide custom validation function if using multi-model "
"or multi-scheduler training.")
batch_time = AverageMeter()
losses = AverageMeter()
# switch to evaluate mode
model.eval()
correct = 0
total = 0
batch_idx = 0
with torch.no_grad():
end = time.time()
for batch_idx, (features, target) in enumerate(val_iterator):
if torch.cuda.is_available():
features = features.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(features)
loss = criterion(output, target)
_, predicted = torch.max(output.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
# measure accuracy and record loss
losses.update(loss.item(), features.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if config.get(TEST_MODE) and batch_idx == 0:
break
stats = {
BATCH_COUNT: batch_idx + 1,
"batch_time": batch_time.avg,
"validation_loss": losses.avg,
"mean_accuracy": correct / total,
"mean_loss": losses.sum / total,
}
return stats | 5,327,052 |
def fix_path(file_path):
"""fixes a path so project files can be located via a relative path"""
script_path = os.path.dirname(__file__)
return os.path.normpath(os.path.join(script_path, file_path)) | 5,327,053 |
def cmd(cmd_name, source, args: list = [], version={}, params={}):
"""Wrap command interaction for easier use with python objects."""
in_json = json.dumps({
"source": source,
"version": version,
"params": params,
})
command = ['/opt/resource/' + cmd_name] + args
output = subprocess.check_output(command,
stderr=sys.stderr, input=bytes(in_json, 'utf-8'))
return json.loads(output.decode()) | 5,327,054 |
def update_gateway_software_now(GatewayARN=None):
"""
Updates the gateway virtual machine (VM) software. The request immediately triggers the software update.
See also: AWS API Documentation
Exceptions
Examples
Updates the gateway virtual machine (VM) software. The request immediately triggers the software update.
Expected Output:
:example: response = client.update_gateway_software_now(
GatewayARN='string'
)
:type GatewayARN: string
:param GatewayARN: [REQUIRED]\nThe Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and AWS Region.\n
:rtype: dict
ReturnsResponse Syntax{
'GatewayARN': 'string'
}
Response Structure
(dict) --A JSON object containing the Amazon Resource Name (ARN) of the gateway that was updated.
GatewayARN (string) --The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and AWS Region.
Exceptions
StorageGateway.Client.exceptions.InvalidGatewayRequestException
StorageGateway.Client.exceptions.InternalServerError
Examples
Updates the gateway virtual machine (VM) software. The request immediately triggers the software update.
response = client.update_gateway_software_now(
GatewayARN='arn:aws:storagegateway:us-east-1:111122223333:gateway/sgw-12A3456B',
)
print(response)
Expected Output:
{
'GatewayARN': 'arn:aws:storagegateway:us-east-1:111122223333:gateway/sgw-12A3456B',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'GatewayARN': 'string'
}
"""
pass | 5,327,055 |
def geq_indicate(var, indicator, var_max, thr):
"""Generates constraints that make indicator 1 iff var >= thr, else 0.
Parameters
----------
var : str
Variable on which thresholding is performed.
indicator : str
Identifier of the indicator variable.
var_max : int
An upper bound on var.
the : int
Comparison threshold.
Returns
-------
List[str]
A list holding the two constraints.
"""
lb = "- %s + %d %s <= 0" % (var, thr, indicator)
ub = "- %s + %d %s >= -%d" % (var, var_max - thr + 1, indicator, thr - 1)
return [lb, ub] | 5,327,056 |
def parse_manpage_number(path):
"""
Parse number of man page group.
"""
# Create regular expression
number_regex = re.compile(r".*/man(\d).*")
# Get number of manpage group
number = number_regex.search(path)
only_number = ""
if number is not None:
number = number.group(1)
return number | 5,327,057 |
def sample_coordinates_from_coupling(c, row_points, column_points, num_samples=None, return_all = False, thr = 10**(-6)):
"""
Generates [x, y] samples from the coupling c.
If return_all is True, returns [x,y] coordinates of every pair with coupling value >thr
"""
index_samples = sample_indices_from_coupling(c, num_samples = num_samples, return_all = return_all, thr = thr)
return np.array([ [row_points[s[0], :], column_points[s[1],:]] for s in index_samples]) | 5,327,058 |
def delete(path):
"""
Send a DELETE request
"""
response = client.delete(url=path)
click.echo(format_response(response)) | 5,327,059 |
def is_suppress_importerror(node: ast.With):
"""
Returns whether the given ``with`` block contains a
:func:`contextlib.suppress(ImportError) <contextlib.suppress>` contextmanager.
.. versionadded:: 0.5.0 (private)
:param node:
""" # noqa: D400
item: ast.withitem
for item in node.items:
if not isinstance(item.context_expr, ast.Call):
continue
try:
name = '.'.join(get_attribute_name(item.context_expr.func))
except NotImplementedError: # pragma: no cover
continue
if name not in {"suppress", "contextlib.suppress", "contextlib2.suppress"}:
continue
for arg in item.context_expr.args:
try:
arg_name = '.'.join(get_attribute_name(arg))
except NotImplementedError: # pragma: no cover
continue
if arg_name in {"ImportError", "ModuleNotFoundError"}:
return True
return False | 5,327,060 |
def random_flip_left_right(data):
""" Randomly flip an image or batch of image left/right uniformly
Args:
data: tensor of shape (H, W, C) or (N, H, W, C)
Returns:
Randomly flipped data
"""
data_con, C, N = _concat_batch(data)
data_con = tf.image.random_flip_left_right(data_con)
return _unconcat_batch(data_con, C, N) | 5,327,061 |
def copyfileobj_example(source, dest, buffer_size=1024*1024*1024):
"""
Copy a file from source to dest. source and dest
must be file-like objects, i.e. any object with a read or
write method, like for example StringIO.
"""
while True:
copy_buffer = source.read(buffer_size)
if not copy_buffer:
break
dest.write(copy_buffer) | 5,327,062 |
def test_triangle_circumradius(point1, point2, point3, expected_radius):
"""
Verify that the circumradius function returns expected values
"""
triangle = decide.Triangle(
decide.Point(point1), decide.Point(point2), decide.Point(point3)
)
assert triangle.circumradius() == pytest.approx(expected_radius) | 5,327,063 |
def run_cnfs(fets, args, sims):
""" Trains a model for each provided configuration. """
# Assemble configurations.
cnfs = [
{**vars(args), "features": fets_, "sims": sims, "sync": True,
"out_dir": path.join(args.out_dir, subdir),
"tmp_dir": path.join("/tmp", subdir)}
for fets_, subdir in zip(
fets,
# Create a subdirectory name for each list of features.
[",".join([
str(fet).replace(" ", "_").replace("/", "p")
for fet in fets_])
for fets_ in fets])]
# Train configurations.
if defaults.SYNC:
res = [train.run_trials(cnf) for cnf in cnfs]
else:
with multiprocessing.Pool(processes=4) as pol:
res = pol.map(train.run_trials, cnfs)
# Remove temporary subdirs.
for cnf in cnfs:
try:
shutil.rmtree(cnf["tmp_dir"])
except FileNotFoundError:
pass
# Note that accuracy = 1 - loss.
return dict(zip(
[tuple(cnf["features"]) for cnf in cnfs], 1 - np.array(res))) | 5,327,064 |
def ParseArgs(argv):
"""Parses command line arguments."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-b', '--bundle-identifier', required=True,
help='bundle identifier for the application')
parser.add_argument(
'-o', '--output', default='-',
help='path to the result; - means stdout')
return parser.parse_args(argv) | 5,327,065 |
def GetCurrentBaselinePath():
"""Returns path of folder containing baseline file corresponding to the current test."""
currentTestPath = os.path.dirname(os.getenv('PYTEST_CURRENT_TEST').split(":")[0])
currentBaselinePath = baselinePath + "/" + currentTestPath + "/"
return currentBaselinePath | 5,327,066 |
def get_all_lobbyists(official_id, cycle=None, api_key=None):
"""
https://www.opensecrets.org/api/?method=candContrib&cid=N00007360&cycle=2020&apikey=__apikey__
"""
if cycle is None:
cycle = 2020 # I don't actually know how the cycles work; I assume you can't just take the current year?
# if API key none, get it from some sort of appwide config defined above
w = Wrapper(api_key)
return w.get({'method':'candContrib', 'cid': official_id, 'cycle': cycle}) | 5,327,067 |
def get_sale(this_line):
"""Convert the input into a dictionary, with keys matching
the CSV column headers in the scrape_util module.
"""
sale = {}
sale['consignor_name'] = this_line.pop(0)
sale['consignor_city'] = this_line.pop(0).title()
try:
maybe_head = this_line[0].split()
int(maybe_head[0])
sale['cattle_head'] = maybe_head[0]
sale['cattle_cattle'] = ' '.join(maybe_head[1:])
this_line.pop(0)
except:
sale['cattle_cattle'] = this_line.pop(0)
sale['cattle_avg_weight'] = this_line.pop(0)
price_string = this_line.pop(0)
sale['cattle_price_cwt'] = price_string.replace(',', '')
return sale | 5,327,068 |
def validate_besseli(nu, z, n):
"""
Compares the results of besseli function with scipy.special. If the return
is zero, the result matches with scipy.special.
.. note::
Scipy cannot compute this special case: ``scipy.special.iv(nu, 0)``,
where nu is negative and non-integer. The correct answer is -inf, but
scipy's result is +inf. This issue also affects derivatives of the
iv function at ``z = 0``. For example, ``scipy.special.ivp(nu, 0, n)``.
However, the results for *complex* argument ``z = 0j`` is correctly
returned by scipy (which is ``nan``).
"""
# Compute using special_functions package
i_specf = besseli(nu, z, n)
# Compute using scipy.special package
if n == 0:
if not isinstance(z, complex) and nu == 0:
i_scipy = i0(z)
elif not isinstance(z, complex) and nu == 1:
i_scipy = i1(z)
else:
i_scipy = iv(nu, z)
else:
i_scipy = ivp(nu, z, n)
# Whitelist false scipy results. See note in docstring above.
ignore_scipy = False
if (nu < 0) and (round(nu) != nu) and (z.real == 0) and (z.imag == 0):
ignore_scipy = True
if (round(nu) != nu) and (z.real == 0) and (z.imag == 0) and (n > 0):
ignore_scipy = True
# Compare
error = i_specf - i_scipy
tolerance = 1e-14
if ignore_scipy:
error_detected = False
elif isinstance(error, float) and isinf(i_specf) and isinf(i_scipy) \
and (copysign(1, i_specf) == copysign(1, i_scipy)):
error_detected = False
elif isinstance(error, complex) and isinf(i_specf.real) and \
isinf(i_scipy.real) and \
(copysign(1, i_specf.real) == copysign(1, i_scipy.real)):
error_detected = False
elif isinstance(error, float) and isnan(i_specf) and isnan(i_scipy):
error_detected = False
elif isinstance(error, complex) and isnan(i_specf.real) and \
isnan(i_scipy.real):
error_detected = False
elif error.real < tolerance and error.real > -tolerance and \
error.imag < tolerance and error.imag > -tolerance:
error_detected = False
else:
error_detected = True
if isinstance(z, complex):
print('ERROR: nu: %+0.2f, z: (%+0.2f,%+0.2f), n: %d, '
% (nu, z.real, z.imag, n), end=" ")
else:
print('ERROR: nu: %+0.2f, z: (%+0.2f,.....), n: %d, '
% (nu, z.real, n), end=" ")
if isinstance(i_specf, complex):
print('i_nu: (%+0.3f,%+0.3f) '
% (i_specf.real, i_specf.imag), end=" ")
else:
print('i_nu: (%+0.3f,......) ' % (i_specf), end=" ")
if isinstance(i_scipy, complex):
print('!= (%+0.3f,%+0.3f), '
% (i_scipy.real, i_scipy.imag), end=" ")
else:
print('!= (%+0.3f,......), ' % (i_scipy), end=" ")
if isinstance(error, complex):
print('error: (%+0.3e,%+0.3e)'
% (error.real, error.imag))
else:
print('error: (%+0.3e,..........)' % (error))
return error_detected | 5,327,069 |
def delete_by_ip(*ip_address: Any) -> List:
"""
Remove the rules connected to specific ip_address.
"""
removed_rules = []
counter = 1
for rule in rules():
if rule.src in ip_address:
removed_rules.append(rule)
execute("delete", counter, force=True)
else:
counter += 1
return removed_rules | 5,327,070 |
def findMaxWindow(a, w):
"""
:param a: input array of integers
:param w: window size
:return: array of max val in every window
"""
max = [0] * (len(a)-w+1)
maxPointer = 0
maxCount = 0
q = Queue()
for i in range(0, w):
if a[i] > max[maxPointer]:
max[maxPointer] = a[i]
elif a[i] == max[maxPointer]:
maxCount += 1
if w>1:
q.enqueue(a[i])
maxPointer += 1
for i in range(w, len(a)):
if w>1:
a0 = q.dequeue()
if a0 == max[maxPointer-1]:
maxCount -= 1
if a[i] > max[maxPointer-1]:
maxCount = 0
max[maxPointer] = a[i]
elif a[i] == max[maxPointer-1]:
max[maxPointer] = a[i]
maxCount += 1
else:
max[maxPointer] = max[maxPointer-1]
q.enqueue(a[i])
maxPointer += 1
return max | 5,327,071 |
def filtering_news(news: list, filtered_news: list):
"""
Filters news to remove unwanted removed articles
Args:
news (list): List of articles to remove from
filtered_news (list): List of titles to filter the unwanted news with
Returns:
news (list): List of articles with undesired articles removed
"""
for x in filtered_news:
for y in news: # Nested loop to loop through the titles since it is a list of dictionaries
if y["title"] == x["title"]:
news.remove(y)
logging.info("News filtered, removed {}".format(x["title"]))
break
return news | 5,327,072 |
def update_persona_use_counts_file(
fptah: str, counts: Dict[str, int], sorted_order=True
):
"""
Writes the persona use counts to file.
This is to keep track of use counts for the next time that the task was restarted.
See `load_previously_used_personas_counts` function above.
"""
logging.info(f'Writting new persona counts to {fptah}')
items = counts.items()
if sorted_order:
items = sorted(items, key=lambda x: x[1], reverse=True)
saved_count = 0
with open(fptah, 'w') as fo:
for p, c in items:
if c > 0:
saved_count += 1
fo.write(f'{p} ; {c}\n')
logging.info(f'Saved {saved_count} recent persona counts successfully.') | 5,327,073 |
def extract_subsequence(sequence, start_time, end_time):
"""Extracts a subsequence from a NoteSequence.
Notes starting before `start_time` are not included. Notes ending after
`end_time` are truncated.
Args:
sequence: The NoteSequence to extract a subsequence from.
start_time: The float time in seconds to start the subsequence.
end_time: The float time in seconds to end the subsequence.
Returns:
A new NoteSequence that is a subsequence of `sequence` in the specified time
range.
"""
subsequence = music_pb2.NoteSequence()
subsequence.CopyFrom(sequence)
del subsequence.notes[:]
for note in sequence.notes:
if note.start_time < start_time or note.start_time >= end_time:
continue
new_note = subsequence.notes.add()
new_note.CopyFrom(note)
new_note.end_time = min(note.end_time, end_time)
subsequence.total_time = min(sequence.total_time, end_time)
return subsequence | 5,327,074 |
def read_data(filename):
"""Read the raw tweet data from a file. Replace Emails etc with special tokens """
with open(filename, 'r') as f:
all_lines=f.readlines()
padded_lines=[]
for line in all_lines:
line = emoticonsPattern.sub(lambda m: rep[re.escape(m.group(0))], line.lower().strip())
line = userMentionsRegex.sub(' USER ', line )
line = emailsRegex.sub(' EMAIL ', line )
line=urlsRegex.sub(' URL ', line)
line=numsRegex.sub(' NUM ',line)
line=punctuationNotEmoticonsRegex.sub(' PUN ',line)
line=re.sub(r'(.)\1{2,}', r'\1\1',line)
words_tokens=[token for token in TweetTokenizer().tokenize(line)]
line= ' '.join(token for token in words_tokens )
padded_lines.append(line)
padded_data=' '.join(line for line in padded_lines)
encoded_data=tf.compat.as_str(padded_data).split()
return encoded_data | 5,327,075 |
def client():
"""AlgodClient for testing"""
client = _algod_client()
client.flat_fee = True
client.fee = 1000
print("fee ", client.fee)
return client | 5,327,076 |
def GRU_sent_encoder(batch_size, max_len, vocab_size, hidden_dim, wordembed_dim,
dropout=0.0, is_train=True, n_gpus=1):
"""
Implementing the GRU of skip-thought vectors.
Use masks so that sentences at different lengths can be put into the same batch.
sent_seq: sequence of tokens consisting a sentence, shape: batch_size x max_len
mask: 1 indicating valid, 0 invalid, shape: batch_size x max_len
embed_weight: word embedding, shape:
"""
sent_seq = mx.sym.Variable('sent_seq')
mask = mx.sym.Variable('mask')
embed_weight = mx.sym.Variable('embed_weight')
embeded_seq = mx.sym.Embedding(data=sent_seq, input_dim=vocab_size, weight=embed_weight,
output_dim=wordembed_dim, name='sent_embedding')
sent_vec = GRU_unroll(batch_size, embeded_seq, mask=mask,
in_dim=wordembed_dim, seq_len=max_len,
num_hidden=hidden_dim, dropout=dropout,
prefix='sent', n_gpus=n_gpus)
return sent_vec | 5,327,077 |
def process_contours(frame_resized):
"""Get contours of the object detected"""
blurred = cv2.GaussianBlur(frame_resized, (11, 9), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, constants.blueLower, constants.blueUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
return contours | 5,327,078 |
def add_ignore_file_arguments(files: Optional[List[str]] = None) -> List[str]:
"""Adds ignore file variables to the scope of the deployment"""
default_ignores = ["config.json", "Dockerfile", ".dockerignore"]
# Combine default files and files
ingore_files = default_ignores + (files or [])
return list(
itertools.chain.from_iterable(
[["--ignore-file", filename] for filename in ingore_files]
)
) | 5,327,079 |
def compute_accuracy(logits, targets):
"""Compute the accuracy"""
with torch.no_grad():
_, predictions = torch.max(logits, dim=1)
accuracy = torch.mean(predictions.eq(targets).float())
return accuracy.item() | 5,327,080 |
def division_by_zero(number: int):
"""Divide by zero. Should raise exception.
Try requesting http://your-app/_divide_by_zero/7
"""
result = -1
try:
result = number / 0
except ZeroDivisionError:
logger.exception("Failed to divide by zero", exc_info=True)
return f"{number} divided by zeor is {result}" | 5,327,081 |
def additional_setup_linear(args: Namespace):
"""Provides final setup for linear evaluation to non-user given parameters by changing args.
Parsers arguments to extract the number of classes of a dataset, correctly parse gpus, identify
if a cifar dataset is being used and adjust the lr.
Args:
args: Namespace object that needs to contain, at least:
- dataset: dataset name.
- optimizer: optimizer name being used.
- gpus: list of gpus to use.
- lr: learning rate.
"""
if args.dataset in N_CLASSES_PER_DATASET:
args.num_classes = N_CLASSES_PER_DATASET[args.dataset]
else:
# hack to maintain the current pipeline
# even if the custom dataset doesn't have any labels
dir_path = args.data_dir / args.train_dir
args.num_classes = max(
1,
len([entry.name for entry in os.scandir(dir_path) if entry.is_dir]),
)
# create backbone-specific arguments
args.backbone_args = {"cifar": args.dataset in ["cifar10", "cifar100"]}
if "resnet" not in args.backbone and "convnext" not in args.backbone:
# dataset related for all transformers
crop_size = args.crop_size[0]
args.backbone_args["img_size"] = crop_size
if "vit" in args.backbone:
args.backbone_args["patch_size"] = args.patch_size
with suppress(AttributeError):
del args.patch_size
if args.dali:
assert args.dataset in ["imagenet100", "imagenet", "custom"]
args.extra_optimizer_args = {}
if args.optimizer == "sgd":
args.extra_optimizer_args["momentum"] = 0.9
if isinstance(args.gpus, int):
args.gpus = [args.gpus]
elif isinstance(args.gpus, str):
args.gpus = [int(gpu) for gpu in args.gpus.split(",") if gpu] | 5,327,082 |
def is_ELF_got_pointer_to_external(ea):
"""Similar to `is_ELF_got_pointer`, but requires that the eventual target
of the pointer is an external."""
if not is_ELF_got_pointer(ea):
return False
target_ea = get_reference_target(ea)
return is_external_segment(target_ea) | 5,327,083 |
def _check_same_nobs(*argv):
"""Raise an arror if elements in argv have different number of obs."""
n_obs = set(obj.n_obs for obj in argv)
if len(n_obs) > 1:
raise ValueError("Elements do not have the same number"
" of observations.") | 5,327,084 |
def adv_search_product_of_two_seq(seq: str):
"""
Check If the Sequence is The Product of Two Sequences
seq: A string that contains a comma seperated numbers
returns: nothing
"""
numeric_seq = utils.convert_str_to_list(seq, True, False)
result_list = []
for i in range(0, len(seq_list_numeric)):
for n in range(i + 1, len(seq_list_numeric)):
seq_result = utils_calc.multiply_sequences(list(seq_list_numeric[i]), list(seq_list_numeric[n]))
if utils.list_a_in_b(numeric_seq, seq_result):
result_list.append([get_sequence_name(seq_list[i]), get_sequence_name(seq_list[n])])
# Progress ...
utils.waiting(i, len(seq_list_numeric))
if len(result_list) == 0:
print("\n[#] Nothing Found")
else:
print("\n[#]")
for i in range(len(result_list)):
print(result_list[i][0] + " <--> " + result_list[i][1]) | 5,327,085 |
def _normalise_dataset_path(input_path: Path) -> Path:
"""
Dataset path should be either the direct imagery folder (mtl+bands) or a tar path.
Translate other inputs (example: the MTL path) to one of the two.
>>> tmppath = Path(tempfile.mkdtemp())
>>> ds_path = tmppath.joinpath('LE07_L1GT_104078_20131209_20161119_01_T1')
>>> ds_path.mkdir()
>>> mtl_path = ds_path / 'LC08_L1TP_090084_20160121_20170405_01_T1_MTL.txt'
>>> mtl_path.write_text('<mtl content>')
13
>>> _normalise_dataset_path(ds_path).relative_to(tmppath).as_posix()
'LE07_L1GT_104078_20131209_20161119_01_T1'
>>> _normalise_dataset_path(mtl_path).relative_to(tmppath).as_posix()
'LE07_L1GT_104078_20131209_20161119_01_T1'
>>> tar_path = tmppath / 'LS_L1GT.tar.gz'
>>> tar_path.write_text('fake tar')
8
>>> _normalise_dataset_path(tar_path).relative_to(tmppath).as_posix()
'LS_L1GT.tar.gz'
>>> _normalise_dataset_path(Path(tempfile.mkdtemp()))
Traceback (most recent call last):
...
ValueError: No MTL files within input path .... Not a dataset?
"""
input_path = normalise_nci_symlinks(input_path)
if input_path.is_file():
if ".tar" in input_path.suffixes:
return input_path
input_path = input_path.parent
mtl_files = list(input_path.rglob("*_MTL.txt"))
if not mtl_files:
raise ValueError(
"No MTL files within input path '{}'. Not a dataset?".format(input_path)
)
if len(mtl_files) > 1:
raise ValueError(
"Multiple MTL files in a single dataset (got path: {})".format(input_path)
)
return input_path | 5,327,086 |
def callLater(delay, func, *args, **kwargs):
"""
Call a function on the Main thread after a delay (async).
"""
pool = NSAutoreleasePool.alloc().init()
runner = PyObjCMessageRunner.alloc().initWithPayload_((func, args, kwargs))
runner.callLater_(delay)
del runner
del pool | 5,327,087 |
def get_customers():
"""returns an array of dicts with the customers
Returns:
Array[Dict]: returns an array of dicts of the customers
"""
try:
openConnection
with conn.cursor() as cur:
result = cur.run_query('SELECT * FROM customer')
cur.close()
conn.close()
except:
return Exception
customers = []
for row in result:
if row[0] == 1:
continue
customer = {'id': row[0], 'name':row[1], 'credit': 0, 'rfid': row[2]}
customers.append(customer)
return customers | 5,327,088 |
def fixtureid_es_server(fixture_value):
"""
Return a fixture ID to be used by pytest for fixture `es_server()`.
Parameters:
fixture_value (:class:`~easy_server.Server`):
The server the test runs against.
"""
es_obj = fixture_value
assert isinstance(es_obj, easy_server.Server)
return "es_server={0}".format(es_obj.nickname) | 5,327,089 |
def topn_vocabulary(document, TFIDF_model, topn=100):
"""
Find the top n most important words in a document.
Parameters
----------
`document` : The document to find important words in.
`TFIDF_model` : The TF-IDF model that will be used.
`topn`: Default = 100. Amount of top words.
Returns
-------
`dictionary` : A dictionary containing words and their importance as a `float`.
"""
import custom_logic.src.utils
if type(document) == list:
document = " ".join(document)
weight_list = TFIDF_list_of_weigths(TFIDF_model=TFIDF_model, abstract=document)
temp_dict = utils.tuples_to_dict(weight_list[:topn])
return temp_dict | 5,327,090 |
def embedding_table(inputs, vocab_size, embed_size, zero_pad=False,
trainable=True, scope="embedding", reuse=None):
""" Generating Embedding Table with given parameters
:param inputs: A 'Tensor' with type 'int8' or 'int16' or 'int32' or 'int64'
containing the ids to be looked up in 'lookup table'.
:param vocab_size: An int. Vocabulary size.
:param embed_size: An int. Number of size of embedding vector.
:param zero_pad: A boolean. If True, all the values of the first low (id 0)
should be constant zeros.
:param trainable: A boolean. Whether freeze the embedding matrix or not.
:param scope: A str, Optional scope for 'variable_scope'.
:param reuse: A boolean. Whether to reuse the weights of a previous layer
by the same name.
:return: A 'Tensor' with ...
"""
with tf.variable_scope(scope, reuse=reuse):
embed_table = tf.get_variable('embedding_table',
shape=[vocab_size, embed_size],
initializer=_init,
trainable=trainable,
dtype=tf.float32)
if zero_pad:
embed_table = tf.concat((tf.zeros(shape=[1, embed_size]), embed_table[1:, :]),
axis=0)
return tf.nn.embedding_lookup(embed_table, inputs) | 5,327,091 |
def get_trading_dates(start_date, end_date):
"""
获取某个国家市场的交易日列表(起止日期加入判断)。目前仅支持中国市场。
:param start_date: 开始日期
:type start_date: `str` | `date` | `datetime` | `pandas.Timestamp`
:param end_date: 结束如期
:type end_date: `str` | `date` | `datetime` | `pandas.Timestamp`
:return: list[`datetime.date`]
:example:
.. code-block:: python3
:linenos:
[In]get_trading_dates(start_date='2016-05-05', end_date='20160505')
[Out]
[datetime.date(2016, 5, 5)]
"""
return DataProxy.get_instance().get_trading_dates(start_date, end_date) | 5,327,092 |
def event_loop():
"""Create an instance of the default event loop for all test cases."""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close() | 5,327,093 |
def drawFigure7():
"""Draws Figure 7 (impact of the format combination)."""
colors = [colorRed, colorGray, colorBlue, colorGreen]
order = ["ActualWorst{}", "Uncompr", "StaticBP32", "ActualBest{}"]
labels = ["worst combination", "uncompressed", "Static-BP-32", "best combination"]
filename = "figure07_ssb_formats"
_drawDia("cs", order, colors, dfMemMorphStore, dfPerfMorphStore)
utils.saveFig(filename)
utils.drawLegendRect(labels, colors)
utils.saveFig(filename + "_legend") | 5,327,094 |
def gm_put(state, b1, b2):
"""
If goal is ('pos',b1,b2) and we're holding b1,
Generate either a putdown or a stack subtask for b1.
b2 is b1's destination: either the table or another block.
"""
if b2 != 'hand' and state.pos[b1] == 'hand':
if b2 == 'table':
return [('a_putdown', b1)]
elif state.clear[b2]:
return [('a_stack', b1, b2)] | 5,327,095 |
def cs_management_client(context):
"""Return Cloud Services mgmt client"""
context.cs_mgmt_client = CSManagementClient(user=os.environ['F5_CS_USER'],
password=os.environ['F5_CS_PWD'])
return context.cs_mgmt_client | 5,327,096 |
def createTemporaryDirectory():
"""Create temporary directory and chdir into it."""
global tmp_dir
time_tuple = time.localtime(time.time())
current_time = "%4i-%2i-%2i_%2i-%2i_" % (time_tuple[0],time_tuple[1],
time_tuple[2],time_tuple[3],
time_tuple[4])
current_time = current_time.replace(" ","0")
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
tmp_dir_root = os.path.join(tmp_path,current_time)
letter_index = 0
while os.path.isdir("%s%s" % (tmp_dir_root,letters[letter_index])):
letter_index += 1
tmp_dir = "%s%s" % (tmp_dir_root,letters[letter_index])
os.mkdir(tmp_dir)
os.chdir(tmp_dir) | 5,327,097 |
def pad_to_shape_label(label, shape):
"""
Pad the label array to the given shape by 0 and 1.
:param label: The label for padding, of shape [n_batch, *vol_shape, n_class].
:param shape: The shape of the padded array, of value [n_batch, *vol_shape, n_class].
:return: The padded label array.
"""
assert np.all(label.shape <= shape), "The shape of array to be padded is larger than the target shape."
offset1 = (shape[1] - label.shape[1]) // 2
offset2 = (shape[2] - label.shape[2]) // 2
remainder1 = (shape[1] - label.shape[1]) % 2
remainder2 = (shape[2] - label.shape[2]) % 2
class_pred = []
for k in range(label.shape[-1]):
if k == 0:
class_pred.append(np.pad(label[..., k],
((0, 0),
(offset1, offset1 + remainder1),
(offset2, offset2 + remainder2)),
'constant', constant_values=1))
else:
class_pred.append(np.pad(label[..., k],
((0, 0),
(offset1, offset1 + remainder1),
(offset2, offset2 + remainder2)),
'constant'))
return np.stack(class_pred, axis=-1) | 5,327,098 |
def download_report(
bucket_name: str, client: BaseClient, report: str, location: str
) -> bool:
"""
Downloads the original report
to the temporary work area
"""
response = client.download_file(
Bucket=bucket_name, FileName=report, Location=location
)
return response | 5,327,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.