content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def generate_bit_byte_overview(inputstring, number_of_indent_spaces=4, show_reverse_bitnumbering=False):
"""Generate a nice overview of a CAN frame.
Args:
inputstring (str): String that should be printed. Should be 64 characters long.
number_of_indent_spaces (int): Size of indentation
Raises:
ValueError when *inputstring* has wrong length.
Returns:
A multi-line string.
"""
if len(inputstring) != constants.BITS_IN_FULL_DATA:
raise ValueError("The inputstring is wrong length: {}. {!r}".format(len(inputstring), inputstring))
paddedstring = " ".join([inputstring[i:i + 8] for i in range(0, 64, 8)])
indent = " " * number_of_indent_spaces
text = indent + " 111111 22221111 33222222 33333333 44444444 55555544 66665555\n"
text += indent + "76543210 54321098 32109876 10987654 98765432 76543210 54321098 32109876\n"
text += indent + "Byte0 Byte1 Byte2 Byte3 Byte4 Byte5 Byte6 Byte7\n"
text += indent + paddedstring + "\n"
if show_reverse_bitnumbering:
text += indent + "66665555 55555544 44444444 33333333 33222222 22221111 111111\n"
text += indent + "32109876 54321098 76543210 98765432 10987654 32109876 54321098 76543210\n"
return text | 32,000 |
def setup_env(deployment_name: str):
"""Ensures the environment is set up appropriately for interacting
with Local Grapl (running inside a Docker Compose network locally)
from *outside* that network (i.e., from your workstation).
"""
# NOTE: These values are copied from local-grapl.env. It's
# unfortunate, yes, but in the interests of a decent
# user-experience, we'll eat that pain for now. In the near term,
# we should pull this functionality into something like graplctl
# with a more formalized way of pointing to a specific Grapl
# instance.
if deployment_name == "local-grapl":
kvs = [
("AWS_REGION", "us-east-1"),
("S3_ENDPOINT", "http://localhost:4566"),
("S3_ACCESS_KEY_ID", "test"),
("S3_ACCESS_KEY_SECRET", "test"),
("SQS_ENDPOINT", "http://localhost:4566"),
("SQS_ACCESS_KEY_ID", "test"),
("SQS_ACCESS_KEY_SECRET", "test"),
]
for (k, v) in kvs:
# fun fact: os.putenv is bad and this is preferred
os.environ[k] = v | 32,001 |
def return_list_of_file_paths(folder_path):
"""Returns a list of file paths
Args:
folder_path: The folder path were the files are in
Returns:
file_info: List of full file paths
"""
file_info = []
list_of_file_names = [fileName for fileName in listdir(folder_path) if isfile(join(folder_path, fileName))]
list_of_file_paths = [join(folder_path, fileName) for fileName in listdir(folder_path) if
isfile(join(folder_path, fileName))]
file_info.append(list_of_file_names)
file_info.append(list_of_file_paths)
return file_info | 32,002 |
def artificial_signal( frequencys, sampling_frequency=16000, duration=0.025 ):
"""
Concatonates a sequence of sinusoids of frequency f in frequencies
"""
sins = map( lambda f : sinusoid(f, sampling_frequency, duration), frequencys)
return numpy.concatenate( tuple(sins) ) | 32,003 |
def _sources():
"""Return the subdir name and extension of each of the contact prediction types.
:return: Contact prediction types and location.
:rtype: dict [list [str]]
"""
sources = _sourcenames()
confiledir = ["deepmetapsicov", "deepmetapsicov", "deepmetapsicov"]
confilesuffix = ["psicov", "ccmpred", "deepmetapsicov.con"]
conkittype = ["psicov", "ccmpred", "psicov"]
threshold = [0.2, 0.1, 0.1]
outsinfo = {}
for n in range(len(sources)):
outsinfo[sources[n]] = [confiledir[n], confilesuffix[n],
conkittype[n], threshold[n]]
return outsinfo | 32,004 |
def pdns_forward(hostname):
"""Get the IP addresses to which the given host has resolved."""
response = get(BASE_API_URL + "pdns/forward/{}".format(hostname))
return response | 32,005 |
def make_conv(in_channels, out_channels, conv_type="normal", kernel_size=3, mask_activation=None, version=2, mask_init_bias=0, depth_multiplier=1, **kwargs):
"""Create a convolution layer. Options: deformable, separable, or normal convolution
"""
assert conv_type in ("deformable", "separable", "normal")
padding = (kernel_size-1)//2
if conv_type == "deformable":
conv_layer = nn.Sequential(
DeformableConv2dBlock(
in_channels, out_channels, kernel_size, padding=padding, bias=False,
mask_activation=mask_activation, version=version, mask_init_bias=mask_init_bias
),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
elif conv_type == "separable":
hidden_channels = in_channels * depth_multiplier
conv_layer = nn.Sequential(
# dw
nn.Conv2d(in_channels, hidden_channels, kernel_size, padding=padding, groups=in_channels, bias=False),
nn.BatchNorm2d(in_channels),
nn.ReLU6(inplace=True),
# pw
nn.Conv2d(hidden_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU6(inplace=True)
)
nn.init.kaiming_normal_(conv_layer[0].weight, mode="fan_out", nonlinearity="relu")
nn.init.kaiming_normal_(conv_layer[3].weight, mode="fan_out", nonlinearity="relu")
else: # normal convolution
conv_layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
nn.init.kaiming_normal_(conv_layer[0].weight, mode="fan_out", nonlinearity="relu")
return conv_layer | 32,006 |
def url_to_html_func(kind="requests") -> Callable:
"""Get a url_to_html function of a given kind."""
url_to_html = None
if kind == "requests":
import requests
def url_to_html(url):
r = requests.get(url)
if r.status_code != 200:
print(
f"An error occured. Returning the response object for you to analyze: {r}"
)
return r
return r.content
elif kind == "chrome":
from selenium import webdriver
from time import sleep
def url_to_html(url, wait=2):
b = webdriver.Chrome()
b.get(url)
if isinstance(wait, (int, float)):
sleep(wait)
html = b.page_source
b.close()
return html
else:
raise ValueError(f"Unknown url_to_html value: {url_to_html}")
assert callable(url_to_html), "Couldn't make a url_to_html function"
return url_to_html | 32,007 |
def get_payload_command(job):
"""
Return the full command for executing the payload, including the sourcing of all setup files and setting of
environment variables.
:param job: job object.
:raises PilotException: TrfDownloadFailure.
:return: command (string).
"""
show_memory_usage()
# Should the pilot do the setup or does jobPars already contain the information?
preparesetup = should_pilot_prepare_setup(job.noexecstrcnv, job.jobparams)
# Get the platform value
# platform = job.infosys.queuedata.platform
# Is it a user job or not?
userjob = job.is_analysis()
logger.info('pilot is running a user analysis job') if userjob else logger.info('pilot is running a production job')
resource_name = get_resource_name() # 'grid' if no hpc_resource is set
resource = __import__('pilot.user.atlas.resource.%s' % resource_name, globals(), locals(), [resource_name], 0) # Python 3, -1 -> 0
# get the general setup command and then verify it if required
cmd = resource.get_setup_command(job, preparesetup)
if cmd:
ec, diagnostics = resource.verify_setup_command(cmd)
if ec != 0:
job.piloterrorcodes, job.piloterrordiags = errors.add_error_code(ec)
raise PilotException(diagnostics, code=ec)
# make sure that remote file can be opened before executing payload
catchall = job.infosys.queuedata.catchall.lower() if job.infosys.queuedata.catchall else ''
if config.Pilot.remotefileverification_log and 'remoteio_test=false' not in catchall:
ec = 0
diagnostics = ""
not_opened_turls = ""
try:
ec, diagnostics, not_opened_turls = open_remote_files(job.indata, job.workdir)
except Exception as e:
logger.warning('caught exception: %s' % e)
else:
# read back the base trace report
path = os.path.join(job.workdir, config.Pilot.base_trace_report)
if not os.path.exists(path):
logger.warning('base trace report does not exist (%s) - input file traces should already have been sent' % path)
else:
process_remote_file_traces(path, job, not_opened_turls)
# fail the job if the remote files could not be verified
if ec != 0:
job.piloterrorcodes, job.piloterrordiags = errors.add_error_code(ec)
raise PilotException(diagnostics, code=ec)
else:
logger.debug('no remote file open verification')
if is_standard_atlas_job(job.swrelease):
# Normal setup (production and user jobs)
logger.info("preparing normal production/analysis job setup command")
cmd = get_normal_payload_command(cmd, job, preparesetup, userjob)
else: # Generic, non-ATLAS specific jobs, or at least a job with undefined swRelease
logger.info("generic job (non-ATLAS specific or with undefined swRelease)")
cmd = get_generic_payload_command(cmd, job, preparesetup, userjob)
# add any missing trailing ;
if not cmd.endswith(';'):
cmd += '; '
# only if not using a user container
if not job.imagename:
site = os.environ.get('PILOT_SITENAME', '')
variables = get_payload_environment_variables(cmd, job.jobid, job.taskid, job.attemptnr, job.processingtype, site, userjob)
cmd = ''.join(variables) + cmd
# prepend PanDA job id in case it is not there already (e.g. runcontainer jobs)
if 'export PandaID' not in cmd:
cmd = "export PandaID=%s;" % job.jobid + cmd
cmd = cmd.replace(';;', ';')
# For direct access in prod jobs, we need to substitute the input file names with the corresponding TURLs
# get relevant file transfer info
#use_copy_tool, use_direct_access, use_pfc_turl = get_file_transfer_info(job)
#if not userjob and use_direct_access and job.transfertype == 'direct':
if not userjob and not job.is_build_job() and job.has_remoteio(): ## ported from old logic
## ported from old logic but still it looks strange (anisyonk)
## the "PoolFileCatalog.xml" should already contains proper TURLs values as it created by create_input_file_metadata()
## if the case is just to patch `writetofile` file, than logic should be cleaned and decoupled
## anyway, instead of parsing the file, it's much more easy to generate properly `writetofile` content from the beginning with TURL data
lfns = job.get_lfns_and_guids()[0]
cmd = replace_lfns_with_turls(cmd, job.workdir, "PoolFileCatalog.xml", lfns, writetofile=job.writetofile)
# Explicitly add the ATHENA_PROC_NUMBER (or JOB value)
cmd = add_athena_proc_number(cmd)
show_memory_usage()
logger.info('payload run command: %s' % cmd)
return cmd | 32,008 |
def assert_equal(
actual: statsmodels.tsa.statespace._kalman_filter.dKalmanFilter,
desired: statsmodels.tsa.statespace._kalman_filter.dKalmanFilter,
):
"""
usage.statsmodels: 2
"""
... | 32,009 |
def parseExonBounds(start, end, n, sizes, offsets):
"""
Parse the last 2 columns of a BED12 file and return a list of tuples with
(exon start, exon end) entries.
If the line is malformed, issue a warning and return (start, end)
"""
offsets = offsets.strip(",").split(",")
sizes = sizes.strip(",").split(",")
offsets = offsets[0:n]
sizes = sizes[0:n]
try:
starts = [start + int(x) for x in offsets]
ends = [start + int(x) + int(y) for x, y in zip(offsets, sizes)]
except:
sys.stderr.write("Warning: Received an invalid exon offset ({0}) or size ({1}), using the entry bounds instead ({2}-{3})\n".format(offsets, sizes, start, end))
return [(start, end)]
if len(offsets) < n or len(sizes) < n:
sys.stderr.write("Warning: There were too few exon start/end offsets ({0}) or sizes ({1}), using the entry bounds instead ({2}-{3})\n".format(offsets, sizes, start, end))
return [(start, end)]
return [(x, y) for x, y in zip(starts, ends)] | 32,010 |
def head(filename, format=None, **kwargs):
"""
Returns the header of a file. Reads the information about the content of the file
without actually loading the data. Returns either an Header class or an Archive
accordingly if the file contains a single object or it is an archive, respectively.
Parameters
----------
filename: str, file-like object
The filename of the data file to read. It can also be a file-like object.
format: str, Format
One of the implemented formats. See documentation for more details.
kwargs: dict
Additional options for performing the reading. The list of options depends
on the format.
"""
filename = find_file(filename)
return formats.get_format(format, filename=filename).head(filename, **kwargs) | 32,011 |
def write_gpt(beam,outfile,verbose=0,params=None,asci2gdf_bin=None):
""" Writes particles to file in GPT format """
watch = StopWatch()
# Format particles
gpt_units={"x":"m", "y":"m", "z":"m","px":"GB","py":"GB","pz":"GB","t":"s"}
qspecies = get_species_charge(beam.species)
qspecies.ito("coulomb")
qs = np.full((beam['n_particle'],),1.0)*qspecies
qbunch = beam.q.to("coulomb")
watch.start()
assert beam.species == 'electron' # TODO: add more species
nspecies = np.abs(qbunch.magnitude/qspecies.magnitude)
nmacro = nspecies*np.abs(beam["w"]) #np.full((beam.n,),1)*np.abs( (beam.q.to("coulomb")).magnitude/beam.n/qspecies.magnitude)
vprint(f'Printing {(beam["n_particle"])} particles to "{outfile}": ',verbose>0, 0, False)
# Scale parameters to GPT units
for var in gpt_units:
beam[var].ito(gpt_units[var])
headers = odict( {'x':'x', 'y':'y', 'z':'z', 'px':'GBx', 'py':'GBy', 'pz':'GBz', 't':'t', 'q':'q', 'nmacro':'nmacro'} )
header = ' '.join(headers.values())
data = np.zeros( (len(beam["x"]),len(headers)) )
for index, var in enumerate(headers):
if(var=="q"):
data[:,index]=qs.magnitude
elif(var=="nmacro"):
data[:,index]=nmacro.magnitude
else:
data[:,index] = beam[var].magnitude
if(".txt"==outfile[-4:]):
gdffile = outfile[:-4]+".gdf"
elif('.gdf'==outfile[-4:]):
gdffile = outfile
outfile = outfile+'.txt'
else:
gdffile = outfile+".gdf"
np.savetxt(outfile ,data, header=header ,comments='')
if(asci2gdf_bin):
gdfwatch = StopWatch()
gdfwatch.start()
vprint('Converting file to GDF: ',verbose>0,1,False)
try:
asci2gdf(gdffile, outfile, asci2gdf_bin)
gdfwatch.stop()
except Exception as ex:
print('Error occured while converting ascii to gdf file: ')
print(str(ex))
gdfwatch.stop()
vprint(f'done. Time ellapsed: {gdfwatch.print()}.', verbose>0, 0, True)
watch.stop()
vprint(f'...done. Time ellapsed: {watch.print()}.', verbose>0 and asci2gdf_bin, 0, True)
vprint(f'done. Time ellapsed: {watch.print()}.', verbose>0 and not asci2gdf_bin, 0, True) | 32,012 |
def uncomment_magic(
source, language="python", global_escape_flag=True, explicitly_code=True
):
"""Unescape Jupyter magics"""
parser = StringParser(language)
next_is_magic = False
for pos, line in enumerate(source):
if not parser.is_quoted() and (
next_is_magic
or is_magic(line, language, global_escape_flag, explicitly_code)
):
source[pos] = unesc(line, language)
next_is_magic = language == "python" and _LINE_CONTINUATION_RE.match(line)
parser.read_line(line)
return source | 32,013 |
def infer_dtype_from_object(dtype: Literal["float64"]):
"""
usage.koalas: 3
"""
... | 32,014 |
def clean_nice_ionice_parameters(value):
"""Verify that the passed parameters are not exploits"""
if value:
parser = ErrorCatchingArgumentParser()
# Nice parameters
parser.add_argument("-n", "--adjustment", type=int)
# Ionice parameters, not supporting -p
parser.add_argument("--classdata", type=int)
parser.add_argument("-c", "--class", type=int)
parser.add_argument("-t", "--ignore", action="store_true")
try:
parser.parse_args(value.split())
except ValueError:
# Also log at start-up if invalid parameter was set in the ini
msg = "%s: %s" % (T("Incorrect parameter"), value)
logging.error(msg)
return msg, None
return None, value | 32,015 |
def get_interest_data(src, targ, tuples: dict, case: str):
"""extract the interested part of the whole .npy data
Args:
src ([str]): [source location of the main .npy files]
targ ([str]): [target location of the interested .npy files]
tuples (dict): [dict holding the interest range tupls]
case (str): ['AD'/'CN'/'MCI']
"""
os.chdir(src.format(case))
for file in os.listdir():
data = np.load(file, allow_pickle=True)
index = int(re.findall('\d+', file)[0])
tup = tuples[index]
i_data = data[tup[0] : tup[1] + 1, :, :]
print(f'{case} {file} with interest space {tup} loaded', end=' - ')
output = targ.format(case) + f'{file}'
np.save(output, i_data)
print('saved')
return | 32,016 |
def rand_alnum(length=0):
"""
Create a random string with random length
:return: A random string of with length > 10 and length < 30.
"""
jibber = ''.join([letters, digits])
return ''.join(choice(jibber) for _ in xrange(length or randint(10, 30))) | 32,017 |
def _GenerateGstorageLink(c, p, b):
"""Generate Google storage link given channel, platform, and build."""
return 'gs://chromeos-releases/%s-channel/%s/%s/' % (c, p, b) | 32,018 |
def parse_decl(inputtype, flags):
"""
Parse type declaration
@param inputtype: file name or C declarations (depending on the flags)
@param flags: combination of PT_... constants or 0
@return: None on failure or (name, type, fields) tuple
"""
if len(inputtype) != 0 and inputtype[-1] != ';':
inputtype = inputtype + ';'
return ida_typeinf.idc_parse_decl(None, inputtype, flags) | 32,019 |
def post_attention(h, attn_vec, d_model, n_head, d_head, dropout, is_training,
kernel_initializer, residual=True):
"""Post-attention processing."""
monitor_dict = {}
# post-attention projection (back to `d_model`)
proj_o = tf.get_variable("o/kernel", [d_model, n_head, d_head],
dtype=h.dtype, initializer=kernel_initializer)
einsum_prefix = get_einsum_prefix(attn_vec.shape.ndims - 2)
einsum_str = "{0}nd,hnd->{0}h".format(einsum_prefix)
attn_out = tf.einsum(einsum_str, attn_vec, proj_o)
proj_bias = tf.get_variable("o/bias",
[d_model], dtype=h.dtype,
initializer=tf.zeros_initializer())
attn_out += proj_bias
attn_out = tf.layers.dropout(attn_out, dropout, training=is_training)
output, res_lnorm_dict = residual_and_layer_norm(
h, attn_out, use_residual=residual)
monitor_dict = update_monitor_dict(monitor_dict, res_lnorm_dict)
return output, monitor_dict | 32,020 |
def get_recursively(in_dict, search_pattern):
"""
Takes a dict with nested lists and dicts,
and searches all dicts for a key of the field
provided.
"""
fields_found = []
for key, value in in_dict.items():
if key == search_pattern:
fields_found.append(value)
elif isinstance(value, dict):
results = get_recursively(value, search_pattern)
for result in results:
fields_found.append(result)
return(fields_found) | 32,021 |
def _send_pubsub_message(project_id, reporting_topic, pubsub_payload):
"""Sends a pubsub message.
Args:
project_id: ID of the Google Cloud Project where the solution is deployed.
reporting_topic: Pub/Sub topic to use in the message to be sent.
pubsub_payload: Payload of the Pub/Sub message to be sent.
Returns:
None.
"""
publisher = pubsub_v1.PublisherClient()
topic_path_reporting = publisher.topic_path(project_id, reporting_topic)
publisher.publish(
topic_path_reporting, data=bytes(json.dumps(pubsub_payload),
'utf-8')).result() | 32,022 |
def neals_funnel(ndims = 10,
name = 'neals_funnel'):
"""Creates a funnel-shaped distribution.
This distribution was first described in [1]. The distribution is constructed
by transforming a N-D gaussian with scale [3, 1, ...] by scaling all but the
first dimensions by `exp(x0 / 2)` where `x0` is the value of the first
dimension.
This distribution is notable for having a relatively very narrow "neck" region
which is challenging for HMC to explore. This distribution resembles the
posteriors of centrally parameterized hierarchical models.
Args:
ndims: Dimensionality of the distribution. Must be at least 2.
name: Name to prepend to ops created in this function, as well as to the
`code_name` in the returned `TargetDensity`.
Returns:
target: `TargetDensity` specifying the funnel distribution. The
`distribution` attribute is an instance of `TransformedDistribution`.
Raises:
ValueError: If ndims < 2.
#### References
1. Neal, R. M. (2003). Slice sampling. Annals of Statistics, 31(3), 705-767.
"""
if ndims < 2:
raise ValueError(f'ndims must be at least 2, saw: {ndims}')
with tf.name_scope(name):
def bijector_fn(x):
"""Funnel transform."""
batch_shape = tf.shape(x)[:-1]
scale = tf.concat(
[
tf.ones(tf.concat([batch_shape, [1]], axis=0)),
tf.exp(x[Ellipsis, :1] / 2) *
tf.ones(tf.concat([batch_shape, [ndims - 1]], axis=0)),
],
axis=-1,
)
return tfb.Scale(scale)
mg = tfd.MultivariateNormalDiag(
loc=tf.zeros(ndims), scale_diag=[3.] + [1.] * (ndims - 1))
dist = tfd.TransformedDistribution(
mg, bijector=tfb.MaskedAutoregressiveFlow(bijector_fn=bijector_fn))
return target_spec.TargetDensity.from_distribution(
distribution=dist,
constraining_bijectors=tfb.Identity(),
expectations=dict(
params=target_spec.expectation(
fn=tf.identity,
human_name='Parameters',
# The trailing dimensions come from a product distribution of
# independent standard normal and a log-normal with a scale of
# 3 / 2.
# See https://en.wikipedia.org/wiki/Product_distribution for the
# formulas.
# For the mean, the formulas yield zero.
ground_truth_mean=np.zeros(ndims),
# For the standard deviation, all means are zero and standard
# deivations of the normals are 1, so the formula reduces to
# `sqrt((sigma_log_normal + mean_log_normal**2))` which reduces
# to `exp((sigma_log_normal)**2)`.
ground_truth_standard_deviation=np.array([3.] +
[np.exp((3. / 2)**2)] *
(ndims - 1)),
),),
code_name=f'{name}_ndims_{ndims}',
human_name='Neal\'s Funnel',
) | 32,023 |
def _has_desired_permit(permits, acategory, astatus):
"""
return True if permits has one whose
category_code and status_code match with the given ones
"""
if permits is None:
return False
for permit in permits:
if permit.category_code == acategory and\
permit.status_code == astatus:
return True
return False | 32,024 |
def colon(mac):
""" aa:aa:aa:aa:aa:aa """
return _reformat(mac, separator=':', digit_grouping=2) | 32,025 |
def greet(info: Info, string: str, repeat: int, out: TextIO):
"""
Print 'Hello <string>!' <repeat> times.
"""
if info.verbose:
click.echo("'greet' is running in verbose mode.")
click.echo('Home directory is {}'.format(info.home_directory))
for _ in range(0, repeat):
click.echo('Hello {}!'.format(string), file=out) | 32,026 |
def create_strings_from_wikipedia(minimum_length, count, lang):
"""
Create all string by randomly picking Wikipedia articles and taking sentences from them.
"""
sentences = []
while len(sentences) < count:
# We fetch a random page
page_url = "https://{}.wikipedia.org/wiki/Special:Random".format(lang)
try:
page = requests.get(page_url, timeout=3.0) # take into account timeouts
except:
continue
soup = BeautifulSoup(page.text, "html.parser")
for script in soup(["script", "style"]):
script.extract()
# Only take a certain length
lines = list(
filter(
lambda s: len(s.split(" ")) > minimum_length
and not "Wikipedia" in s
and not "wikipedia" in s,
[
" ".join(re.findall(r"[\w']+", s.strip()))[0:200]
for s in soup.get_text().splitlines()
],
)
)
# Remove the last lines that talks about contributing
sentences.extend(lines[0: max([1, len(lines) - 5])])
return sentences[0:count] | 32,027 |
def test_get_post_stac_search__pagination__page_1__limit_2():
"""[GET|POST] /stac/search"""
service = STAC(url)
params = {
"bbox": [ -68.0273437, -25.0059726, -34.9365234, 0.3515602 ],
"datetime": "2019-12-22T00:00:00/2020-01-22T23:59:00",
"page": 1,
"limit": 2
}
expected = {
"stac_version": "0.9.0",
"stac_extensions": ["context"],
"context": {
"page": 1,
"limit": 2,
"matched": 1859,
"returned": 2,
"meta": [
{
"name": "CBERS4A_MUX_L2_DN",
"context": {
"page": 1,
"limit": 2,
"matched": 759,
"returned": 2
}
},
{
"name": "CBERS4A_MUX_L4_DN",
"context": {
"page": 1,
"limit": 2,
"matched": 242,
"returned": 0
}
},
{
"name": "CBERS4A_WFI_L2_DN",
"context": {
"page": 1,
"limit": 2,
"matched": 94,
"returned": 0
}
},
{
"name": "CBERS4A_WFI_L4_DN",
"context": {
"page": 1,
"limit": 2,
"matched": 35,
"returned": 0
}
},
{
"name": "CBERS4A_WPM_L2_DN",
"context": {
"page": 1,
"limit": 2,
"matched": 606,
"returned": 0
}
},
{
"name": "CBERS4A_WPM_L4_DN",
"context": {
"page": 1,
"limit": 2,
"matched": 123,
"returned": 0
}
}
]
},
"type": "FeatureCollection",
"features": [
{
"stac_version": "0.9.0",
"stac_extensions": [
"eo",
"query"
],
"type": "Feature",
"id": "CBERS4A_MUX21614520200122",
"collection": "CBERS4A_MUX_L2_DN",
"geometry": {
"type": "Polygon",
"coordinates": [
[
[
-58.3382,
-24.8867
],
[
-58.3382,
-25.9742
],
[
-57.1717,
-25.9742
],
[
-57.1717,
-24.8867
],
[
-58.3382,
-24.8867
]
]
]
},
"bbox": [
-58.3382,
-25.9742,
-57.1717,
-24.8867
],
"properties": {
"datetime": "2020-01-22T14:08:20",
"path": 216,
"row": 145,
"satellite": "CBERS4A",
"sensor": "MUX",
"cloud_cover": 90,
"sync_loss": None,
"eo:gsd": -1,
"eo:bands": [
{
"name": "blue",
"common_name": "blue"
},
{
"name": "green",
"common_name": "green"
},
{
"name": "red",
"common_name": "red"
},
{
"name": "nir",
"common_name": "nir"
}
]
},
"assets": {
"blue": {
"href": "http://localhost:8089/api/download/TIFF/CBERS4A/2020_01/CBERS_4A_MUX_RAW_2020_01_22.14_00_00_ETC2/216_145_0/2_BC_UTM_WGS84/CBERS_4A_MUX_20200122_216_145_L2_BAND5.tif",
"type": "image/tiff; application=geotiff",
"eo:bands": [
0
]
},
"blue_xml": {
"href": "http://localhost:8089/api/download/TIFF/CBERS4A/2020_01/CBERS_4A_MUX_RAW_2020_01_22.14_00_00_ETC2/216_145_0/2_BC_UTM_WGS84/CBERS_4A_MUX_20200122_216_145_L2_BAND5.xml",
"type": "application/xml"
},
"green": {
"href": "http://localhost:8089/api/download/TIFF/CBERS4A/2020_01/CBERS_4A_MUX_RAW_2020_01_22.14_00_00_ETC2/216_145_0/2_BC_UTM_WGS84/CBERS_4A_MUX_20200122_216_145_L2_BAND6.tif",
"type": "image/tiff; application=geotiff",
"eo:bands": [
1
]
},
"green_xml": {
"href": "http://localhost:8089/api/download/TIFF/CBERS4A/2020_01/CBERS_4A_MUX_RAW_2020_01_22.14_00_00_ETC2/216_145_0/2_BC_UTM_WGS84/CBERS_4A_MUX_20200122_216_145_L2_BAND6.xml",
"type": "application/xml"
},
"red": {
"href": "http://localhost:8089/api/download/TIFF/CBERS4A/2020_01/CBERS_4A_MUX_RAW_2020_01_22.14_00_00_ETC2/216_145_0/2_BC_UTM_WGS84/CBERS_4A_MUX_20200122_216_145_L2_BAND7.tif",
"type": "image/tiff; application=geotiff",
"eo:bands": [
2
]
},
"red_xml": {
"href": "http://localhost:8089/api/download/TIFF/CBERS4A/2020_01/CBERS_4A_MUX_RAW_2020_01_22.14_00_00_ETC2/216_145_0/2_BC_UTM_WGS84/CBERS_4A_MUX_20200122_216_145_L2_BAND7.xml",
"type": "application/xml"
},
"nir": {
"href": "http://localhost:8089/api/download/TIFF/CBERS4A/2020_01/CBERS_4A_MUX_RAW_2020_01_22.14_00_00_ETC2/216_145_0/2_BC_UTM_WGS84/CBERS_4A_MUX_20200122_216_145_L2_BAND8.tif",
"type": "image/tiff; application=geotiff",
"eo:bands": [
3
]
},
"nir_xml": {
"href": "http://localhost:8089/api/download/TIFF/CBERS4A/2020_01/CBERS_4A_MUX_RAW_2020_01_22.14_00_00_ETC2/216_145_0/2_BC_UTM_WGS84/CBERS_4A_MUX_20200122_216_145_L2_BAND8.xml",
"type": "application/xml"
},
"thumbnail": {
"href": "http://localhost:8089/datastore/TIFF/CBERS4A/2020_01/CBERS_4A_MUX_RAW_2020_01_22.14_00_00_ETC2/216_145_0/2_BC_UTM_WGS84/CBERS_4A_MUX_20200122_216_145.png",
"type": "image/png"
}
},
"links": [
{
"href": "http://localhost:8089/inpe-stac/collections/CBERS4A_MUX_L2_DN/items/CBERS4A_MUX21614520200122",
"rel": "self"
},
{
"href": "http://localhost:8089/inpe-stac/collections/CBERS4A_MUX_L2_DN",
"rel": "parent"
},
{
"href": "http://localhost:8089/inpe-stac/collections/CBERS4A_MUX_L2_DN",
"rel": "collection"
},
{
"href": "http://localhost:8089/inpe-stac/stac",
"rel": "root"
}
]
},
{
"stac_version": "0.9.0",
"stac_extensions": [
"eo",
"query"
],
"type": "Feature",
"id": "CBERS4A_MUX21614420200122",
"collection": "CBERS4A_MUX_L2_DN",
"geometry": {
"type": "Polygon",
"coordinates": [
[
[
-58.1453,
-24.0965
],
[
-58.1453,
-25.1831
],
[
-56.9876,
-25.1831
],
[
-56.9876,
-24.0965
],
[
-58.1453,
-24.0965
]
]
]
},
"bbox": [
-58.1453,
-25.1831,
-56.9876,
-24.0965
],
"properties": {
"datetime": "2020-01-22T14:08:07",
"path": 216,
"row": 144,
"satellite": "CBERS4A",
"sensor": "MUX",
"cloud_cover": 90,
"sync_loss": None,
"eo:gsd": -1,
"eo:bands": [
{
"name": "blue",
"common_name": "blue"
},
{
"name": "green",
"common_name": "green"
},
{
"name": "red",
"common_name": "red"
},
{
"name": "nir",
"common_name": "nir"
}
]
},
"assets": {
"blue": {
"href": "http://localhost:8089/api/download/TIFF/CBERS4A/2020_01/CBERS_4A_MUX_RAW_2020_01_22.14_00_00_ETC2/216_144_0/2_BC_UTM_WGS84/CBERS_4A_MUX_20200122_216_144_L2_BAND5.tif",
"type": "image/tiff; application=geotiff",
"eo:bands": [
0
]
},
"blue_xml": {
"href": "http://localhost:8089/api/download/TIFF/CBERS4A/2020_01/CBERS_4A_MUX_RAW_2020_01_22.14_00_00_ETC2/216_144_0/2_BC_UTM_WGS84/CBERS_4A_MUX_20200122_216_144_L2_BAND5.xml",
"type": "application/xml"
},
"green": {
"href": "http://localhost:8089/api/download/TIFF/CBERS4A/2020_01/CBERS_4A_MUX_RAW_2020_01_22.14_00_00_ETC2/216_144_0/2_BC_UTM_WGS84/CBERS_4A_MUX_20200122_216_144_L2_BAND6.tif",
"type": "image/tiff; application=geotiff",
"eo:bands": [
1
]
},
"green_xml": {
"href": "http://localhost:8089/api/download/TIFF/CBERS4A/2020_01/CBERS_4A_MUX_RAW_2020_01_22.14_00_00_ETC2/216_144_0/2_BC_UTM_WGS84/CBERS_4A_MUX_20200122_216_144_L2_BAND6.xml",
"type": "application/xml"
},
"red": {
"href": "http://localhost:8089/api/download/TIFF/CBERS4A/2020_01/CBERS_4A_MUX_RAW_2020_01_22.14_00_00_ETC2/216_144_0/2_BC_UTM_WGS84/CBERS_4A_MUX_20200122_216_144_L2_BAND7.tif",
"type": "image/tiff; application=geotiff",
"eo:bands": [
2
]
},
"red_xml": {
"href": "http://localhost:8089/api/download/TIFF/CBERS4A/2020_01/CBERS_4A_MUX_RAW_2020_01_22.14_00_00_ETC2/216_144_0/2_BC_UTM_WGS84/CBERS_4A_MUX_20200122_216_144_L2_BAND7.xml",
"type": "application/xml"
},
"nir": {
"href": "http://localhost:8089/api/download/TIFF/CBERS4A/2020_01/CBERS_4A_MUX_RAW_2020_01_22.14_00_00_ETC2/216_144_0/2_BC_UTM_WGS84/CBERS_4A_MUX_20200122_216_144_L2_BAND8.tif",
"type": "image/tiff; application=geotiff",
"eo:bands": [
3
]
},
"nir_xml": {
"href": "http://localhost:8089/api/download/TIFF/CBERS4A/2020_01/CBERS_4A_MUX_RAW_2020_01_22.14_00_00_ETC2/216_144_0/2_BC_UTM_WGS84/CBERS_4A_MUX_20200122_216_144_L2_BAND8.xml",
"type": "application/xml"
},
"thumbnail": {
"href": "http://localhost:8089/datastore/TIFF/CBERS4A/2020_01/CBERS_4A_MUX_RAW_2020_01_22.14_00_00_ETC2/216_144_0/2_BC_UTM_WGS84/CBERS_4A_MUX_20200122_216_144.png",
"type": "image/png"
}
},
"links": [
{
"href": "http://localhost:8089/inpe-stac/collections/CBERS4A_MUX_L2_DN/items/CBERS4A_MUX21614420200122",
"rel": "self"
},
{
"href": "http://localhost:8089/inpe-stac/collections/CBERS4A_MUX_L2_DN",
"rel": "parent"
},
{
"href": "http://localhost:8089/inpe-stac/collections/CBERS4A_MUX_L2_DN",
"rel": "collection"
},
{
"href": "http://localhost:8089/inpe-stac/stac",
"rel": "root"
}
]
}
]
}
links_GET_method, links_POST_method = stac_search__get_expected_links_to_GET_method(params)
expected['links'] = links_GET_method
response = service.search(params=params)
assert 200 == response.status_code
assert response.headers.get('content-type') in ('application/json', 'application/geo+json')
assert expected == response.json()
expected['links'] = links_POST_method
response = service.search(params=params, method="POST")
assert 200 == response.status_code
assert response.headers.get('content-type') in ('application/json', 'application/geo+json')
assert expected == response.json() | 32,028 |
def computeHashCheck(ringInputString, ringSize):
"""Calculate the knot hash check.
Args:
ringInputString (str): The list of ints to be hashed as a comma-separated list.
ringSize (int): The size of the ring to be \"knotted\".
Returns:
int: Value of the hash check.
"""
ringInputList = [int(i) for i in ringInputString.split(',')]
ringContents = [i for i in range(ringSize)]
cursorPosition = 0
skipSize = 0
# Hashing algorithm as defined in AoC Day 10 instructions...
for length in ringInputList:
#
# Duplicate the ring contents to allow for exceeding the length of the original list
#
doubleContents = ringContents + ringContents
# Reverse the order of that length of elements in the list, starting with the element
# at the current position
sublist = doubleContents[cursorPosition:cursorPosition+length]
sublist.reverse()
doubleContents[cursorPosition:cursorPosition+length] = sublist
if cursorPosition + length > ringSize:
ringContents = doubleContents[ringSize:cursorPosition+ringSize] + doubleContents[cursorPosition:ringSize]
else:
ringContents = doubleContents[:ringSize]
# Move the current position forward by that length plus the skip size
cursorPosition = cursorPosition + length + skipSize
# Deal with going around the ring
if cursorPosition > ringSize:
cursorPosition -= ringSize
# Increase the skip size by one
skipSize += 1
# The hash is then the product of the first two elements in the transformed list
check = ringContents[0] * ringContents[1]
#print(ringContents)
return check | 32,029 |
def emPerformance(filesAndDirectories=None, resultsFileName=None, iterationCount=3, modes=None, testTypes=None, viewports=None, verbose=False):
"""
Same as emPerformanceTest but the options separated into different
arguments. Legacy support.
filesAndDirectories : List of locations in which to find Maya files to test
resultsFileName : Location of results. Default is stdout.
Also correctly interprets the names 'stderr',
'cout', and 'cerr', plus if you use the
destination 'csv' it will return a Python list
of the CSV data.
See the emPerformanceOptions class for a description of the other args.
"""
pass | 32,030 |
def delete_tag(xmltree: Union[etree._Element, etree._ElementTree],
schema_dict: 'fleur_schema.SchemaDict',
tag_name: str,
complex_xpath: 'etree._xpath' = None,
occurrences: Union[int, Iterable[int]] = None,
**kwargs: Any) -> Union[etree._Element, etree._ElementTree]:
"""
This method deletes a tag with a uniquely identified xpath.
:param xmltree: an xmltree that represents inp.xml
:param schema_dict: InputSchemaDict containing all information about the structure of the input
:param tag: str of the tag to delete
:param complex_xpath: an optional xpath to use instead of the simple xpath for the evaluation
:param occurrences: int or list of int. Which occurence of the parent nodes to delete a tag.
By default all nodes are used.
Kwargs:
:param contains: str, this string has to be in the final path
:param not_contains: str, this string has to NOT be in the final path
:returns: xmltree with deleted tags
"""
from masci_tools.util.xml.xml_setters_basic import xml_delete_tag
from masci_tools.util.xml.common_functions import check_complex_xpath
base_xpath = schema_dict.tag_xpath(tag_name, **kwargs)
if complex_xpath is None:
complex_xpath = base_xpath
check_complex_xpath(xmltree, base_xpath, complex_xpath)
xmltree = xml_delete_tag(xmltree, complex_xpath, occurrences=occurrences)
return xmltree | 32,031 |
def cart2pol_vectorised(x, y):
"""
A vectorised version of the cartesian to polar conversion.
:param x:
:param y:
:return:
"""
r = np.sqrt(np.add(np.power(x, 2), np.power(y, 2)))
th = np.arctan2(y, x)
return r, th | 32,032 |
def encryption(text):
"""
encryption function for saving ideas
:param text:
:return:
"""
return AES.new(cipher_key, AES.MODE_CBC, cipher_IV456).encrypt(text * 16) | 32,033 |
def add_common_arguments(parser):
"""Add parser arguments common to train and test."""
parser.add_argument(
"--config",
is_config_file=True,
dest="config",
required=False,
help="config in yaml format",
)
parser.add_argument(
"--pretrain_url",
help="URL to pre-trained model (default downloads SwAV-ImageNet)",
)
parser.add_argument("--pretrain_fpath", help="File path to trained model")
parser.add_argument(
"--test_ds_names",
nargs="+",
help="Dataset names (directories in ../datasets/emotions/test)",
)
parser.add_argument(
"--device",
help="Device number used if cuda is detected",
)
parser.add_argument(
"--model_pretrain_method",
help="Which model to train, try Swav, SimCLR or SimSiam",
) | 32,034 |
def alteryx_job_path_is_file(job_dict: dict) -> bool:
"""
Alteryx job path must point to an existing file.
"""
if not os.path.isfile(job_dict["path"]):
raise InvalidFilePathError("Alteryx path file not exists")
return True | 32,035 |
def _concat(to_stack):
""" function to stack (or concatentate) depending on dimensions """
if np.asarray(to_stack[0]).ndim >= 2:
return np.concatenate(to_stack)
else:
return np.hstack(to_stack) | 32,036 |
def make_orthonormal_matrix(n):
"""
Makes a square matrix which is orthonormal by concatenating
random Householder transformations
Note: May not distribute uniformly in the O(n) manifold.
Note: Naively using ortho_group, special_ortho_group in scipy will result in unbearable computing time! Not useful
"""
A = np.identity(n)
d = np.zeros(n)
d[n-1] = np.random.choice([-1.0, 1.0])
for k in range(n-2, -1, -1):
# generate random Householder transformation
x = np.random.randn(n-k)
s = np.sqrt((x**2).sum()) # norm(x)
sign = math.copysign(1.0, x[0])
s *= sign
d[k] = -sign
x[0] += s
beta = s * x[0]
# apply the transformation
y = np.dot(x,A[k:n,:]) / beta
A[k:n, :] -= np.outer(x,y)
# change sign of rows
A *= d.reshape(n,1)
return A | 32,037 |
def device_to_target(device: Device):
"""Map a Netbox VirtualMachine to a Prometheus target"""
target = Target(device.name)
target.add_label("type", TargetType.DEVICE.value)
target.add_label("status", device.status)
extract_tenant(device, target)
if hasattr(device, "primary_ip") and device.primary_ip is not None:
target.add_label("ip", str(IPNetwork(device.primary_ip.address).ip))
if hasattr(device, "device_role") and device.device_role is not None:
target.add_label("role", device.device_role.name)
target.add_label("role_slug", device.device_role.slug)
if hasattr(device, "device_type") and device.device_type is not None:
target.add_label("device_type", device.device_type.model)
target.add_label("device_type_slug", device.device_type.slug)
if hasattr(device, "platform") and device.platform is not None:
target.add_label("platform", device.platform.name)
target.add_label("platform_slug", device.platform.slug)
if hasattr(device, "site") and device.site is not None:
target.add_label("site", device.site.name)
target.add_label("site_slug", device.site.slug)
services = []
for service in Service.objects.filter(device__id=device.id).all():
services.append(service.name)
if len(services) > 0:
target.add_label("services", ',{},'.format(','.join(services)))
# todo: Add more fields
# tags
return target | 32,038 |
def build_synthetic_dataset_cae(window_size:int, **kwargs:Dict)->Tuple[SingleGapWindowsSequence, SingleGapWindowsSequence]:
"""Return SingleGapWindowsSequence for training and testing.
Parameters
--------------------------
window_size: int,
Windows size to use for rendering the synthetic datasets.
"""
return build_synthetic_dataset(window_size, SingleGapWindowsSequence, **kwargs) | 32,039 |
def montecarlo_2048(game, simulations_per_move, steps, count_zeros=False, print_averages=True, return_scores=False):
"""
Test each possible move, run montecarlo simulations and return a dictionary of average scores,
one score for each possible move
"""
# Retrieve game score at the current state
game_score = game.calculate_score()
# Retrieve list of possible moves
allowed_moves = game.check_allowed_moves()
# Create a dictionary to store average scores per allowable move
average_scores = np.zeros(4)
# Will contain 4 lists of scores, one list for each starting move (LEFT, DOWN, RIGHT, UP)
scores_per_move = [[0]] * 4
for move in allowed_moves:
score_list = []
for simulation in range(simulations_per_move):
# Create a a copy of the game at the current state
game_copy = deepcopy(game)
game_copy.make_move(move)
for i in range(steps):
# Check if there is any move allowed
if len(game_copy.check_allowed_moves()) > 0:
# Pick a random move within the allowed ones
random_move = random.choice(game_copy.check_allowed_moves())
game_copy.make_move(random_move)
# append simulation result
if count_zeros == True:
score_list.append(game_copy.calculate_score(score_type="simple_sum"))
else:
score_list.append(game_copy.calculate_score(score_type="simple_sum"))
scores_per_move[move-1] = score_list
average_scores[move-1] = np.average(score_list)
if print_averages:
print("[1] LEFT score: ", average_scores[0])
print("[2] DOWN score: ", average_scores[1])
print("[3] RIGHT score: ", average_scores[2])
print("[4] UP score: ", average_scores[3])
print("average_scores: ", average_scores)
choice = np.argmax(average_scores) + 1
steal = 0
for value in average_scores:
if value > 0:
steal = 1
if steal == 0:
random_scores = np.zeros(4)
random_scores[np.random.choice([0,1,2,3])] = 1
return random_scores
if return_scores:
return scores_per_move
else:
return average_scores | 32,040 |
def getAggregation(name, local=False, minOnly=False, maxOnly=False):
"""
Get aggregation.
"""
toReturn = STATISTICS[name].getStatistic()
if local:
return STATISTICS[name].getLocalValue()
elif minOnly and "min" in toReturn:
return toReturn["min"]
elif maxOnly and "max" in toReturn:
return toReturn["max"]
else:
return toReturn | 32,041 |
def get_gene_symbol(row):
"""Extracts gene name from annotation
Args:
row (pandas.Series): annotation info (str) at 'annotation' index
Returns:
gene_symbol (str): gene name(s)
"""
pd.options.mode.chained_assignment = None
lst = row["annotation"].split(",")
genes = [token.split("|")[0] for token in lst]
gene_symbol = ",".join(set(genes))
return gene_symbol | 32,042 |
def session_login():
"""
Session login
:return:
"""
print("Session Login")
# Get the ID token sent by the client
# id_token = request.headers.get('csfToken')
id_token = request.values.get('idToken')
# Set session expiration to 5 days.
expires_in = datetime.timedelta(days=5)
try:
# Create the session cookie. This will also verify the ID token in the process.
# The session cookie will have the same claims as the ID token.
session_cookie = auth.create_session_cookie(id_token, expires_in=expires_in)
response = jsonify({'status': 'success'})
# Set cookie policy for session cookie.
expires = datetime.datetime.now() + expires_in
response.set_cookie('session', session_cookie, expires=expires, httponly=True, secure=True)
return response
except exceptions.FirebaseError:
return abort(401, 'Failed to create a session cookie') | 32,043 |
def get_cache_factory(cache_type):
"""
Helper to only return a single instance of a cache
As of django 1.7, may not be needed.
"""
from django.core.cache import get_cache
if cache_type is None:
cache_type = 'default'
if not cache_type in cache_factory:
cache_factory[cache_type] = get_cache(cache_type)
return cache_factory[cache_type] | 32,044 |
def ruleset_from_pickle(file):
""" Read a pickled ruleset from disk
This can be either pickled Rules or Ryu Rules.
file: The readable binary file-like object, or the name of the input file
return: A ruleset, a list of Rules
"""
if six.PY3:
ruleset = pickle.load(file, encoding='latin1')
else:
ruleset = pickle.load(file)
# Did we load a list of Rules()?
if isinstance(ruleset, list) and ruleset and isinstance(ruleset[0], Rule):
return ruleset
# Must be Ryu rules
if isinstance(ruleset, dict):
ruleset = ruleset["flow_stats"]
ruleset = [rule_from_ryu(r) for r in ruleset]
return ruleset | 32,045 |
def gpu_memory_usage():
""" return gpu memory usage for current process in MB """
try:
s = nvidia_smi(robust=False)
except Exception:
return 0
gpu_processes = _nvidia_smi_parse_processes(s)
my_pid = os.getpid()
my_memory_usage_mb = 0
for gpu_idx, pid, type, process_name, memory_usage_mb in gpu_processes:
if pid == my_pid:
my_memory_usage_mb += memory_usage_mb
return my_memory_usage_mb | 32,046 |
def get_templates_for_angular(path, all_html_files, appname, underscore_js_compile, comment):
"""
Creates templates.js files for AngularJs
"""
f = []
pat = r"<!--template=\"([a-zA-Z0-9\\-]+)\"-->"
f.append("//")
f.append("//Knight generated templates file.")
if comment is not None:
f.append("// * ")
f.append("// * " + comment)
f.append("// * ")
f.append("//")
f.append("(function () {")
f.append(" var o = {")
k = len(all_html_files)
i = 0
for h in all_html_files:
i += 1
# get file content
txt = Scribe.read(h)
# check if the rx matches the contents
m = re.search(pat, txt)
if m:
# get the template name from the group
name = m.group(1)
# remove the template name comment
txt = re.sub(pat, "", txt)
else:
# get the filename with extension
name = os.path.basename(h)
# remove extension
name = os.path.splitext(name)[0]
# escape single quotes
txt = re.sub("'", "\\'", txt)
# condensate
txt = Text.condensate(txt)
f.append(" \'{0}\': \'{1}\'{2}".format(name, txt, "," if i < k else ""))
f.append(" };")
f.append(" var f = function(a) {")
if underscore_js_compile is None or underscore_js_compile == "":
#plain templates
f.append(" var x;")
f.append(" for (x in o) {")
f.append(" a.put(x, o[x]);")
f.append(" }")
else:
#templates run into UnderscoreJs template function
f.append(" var ctx = {};".format(underscore_js_compile))
f.append(" _.each(o, function (v, k) {")
f.append(" a.put(k, _.template(v, ctx));")
f.append(" });")
f.append(" };")
f.append(" f.$inject = ['$templateCache'];")
f.append(" {0}.run(f);".format(appname))
f.append("})();")
code = "\n".join(f)
# save templates.js
outputPath = os.path.join(path, "templates.js")
print("...saving file {}".format(outputPath))
Scribe.write(code, outputPath) | 32,047 |
def print_words(text, disable_tokenizers=None, delimiter=" ",
additional_tokenizers=None):
"""
Method to compare the segmented words.
Print the results of the compared word segmentation.
Parameters
----------
texts : str
An input text
disable_tokenizers : list
A list of non-comparative tokenizers
delimiter : str
A delimiter of words
additional_tokenizers: dict
A list of tokenizers to be added to the comparison is
in dictionary form,
where the keys are names and the values are functions.
Examples
--------
>>> text = "単語分割の結果を比較します"
>>> tokenizers.print_words(text, delimiter="|")
nagisa: 単語|分割|の|結果|を|比較|し|ます
janome: 単語|分割|の|結果|を|比較|し|ます
mecab_python3: 単語|分割|の|結果|を|比較|し|ます
"""
tokenizers = get_avaiable_tokenizers(disable_tokenizers=disable_tokenizers)
if additional_tokenizers:
tokenizers.update(additional_tokenizers)
for tokenizer_name, tokenize in tokenizers.items():
words = tokenize(text)
words = delimiter.join(words)
print(f"{tokenizer_name:>14}: {words}") | 32,048 |
def delayed_read_band_data(fpar_dataset_name, qc_dataset_name):
"""Read band data from a HDF4 file.
Assumes the first dimensions have a size 1.
FparLai_QC.
Bit no. 5-7 3-4 2 1 0
Acceptable values:
000 00 0 0 0
001 01 0 0 0
Unacceptable mask:
110 10 1 1 1
"""
with rasterio.open(fpar_dataset_name) as dataset:
fpar_data = dataset.read()[0]
with rasterio.open(qc_dataset_name) as dataset:
qc_data = dataset.read()[0]
assert fpar_data.shape == tile_shape
assert qc_data.shape == tile_shape
# Ignore invalid and poor quality data.
fpar_data[
np.logical_or(fpar_data > max_valid, np.bitwise_and(qc_data, 0b11010111))
] = fill_value
return fpar_data | 32,049 |
def _aggregate_pop_simplified_comix(
pop: pd.Series, target: pd.DataFrame
) -> pd.DataFrame:
"""
Aggregates the population matrix based on the CoMix table.
:param pop: 1-year based population
:param target: target dataframe we will want to multiply or divide with
:return: Retuns a dataframe that can be multiplied with the comix matrix to get a table of contacts or it can be
used to divide the contacts table to get the CoMix back
"""
agg = pd.DataFrame(
{
"[0,17)": [pop[:17].sum()],
"[17,70)": [pop[17:69].sum()],
"70+": [pop[70:].sum()],
}
)
return pd.concat([agg] * len(target.columns)).set_index(target.index).T | 32,050 |
def questions_test():
"""
method to execute tests on questions route
"""
get_questions(client) | 32,051 |
def authenticate():
"""
Uses HTTP basic authentication to generate an authentication token.
Any resource that requires authentication can use either basic auth or this token.
"""
token = serialize_token(basic_auth.current_user())
response = {'token': token.decode('ascii')}
return jsonify(response) | 32,052 |
def split_image(image, N):
"""
image: (B, C, W, H)
"""
batches = []
for i in list(torch.split(image, N, dim=2)):
batches.extend(list(torch.split(i, N, dim=3)))
return batches | 32,053 |
def _upsample_add(x, y):
"""Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
"""
_, _, H, W = y.size()
return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y | 32,054 |
def get_eta_and_mu(alpha):
"""Get the value of eta and mu. See (4.46) of the PhD thesis of J.-M. Battini.
Parameters
----------
alpha: float
the angle of the rotation.
Returns
-------
The first coefficient eta: float.
The second coefficient mu: float.
"""
if alpha == 0.:
eta = 1 / 12
mu = 1 / 360
else:
eta = (2 * sin(alpha) - alpha * (1 + cos(alpha))) / \
(2 * alpha ** 2 * sin(alpha))
mu = (alpha * (alpha + sin(alpha)) - 8 * sin(alpha / 2) ** 2) / \
(4 * alpha ** 4 * sin(alpha / 2) ** 2)
return eta, mu | 32,055 |
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df_all - Pandas DataFrame containing city data with no filters
df - Pandas DataFrame containing city data filtered by month and day
"""
print('Loading city data...')
# Load DataFrame for city
df = pd.read_csv(CITY_DATA[city])
# Convert start and end times to datetime type
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['End Time'] = pd.to_datetime(df['End Time'])
# Create multiple new DataFrame Time Series
df['month'] = df['Start Time'].dt.month
df['day_str'] = df['Start Time'].dt.weekday_name
df['day_int'] = df['Start Time'].dt.weekday
df['hour'] = df['Start Time'].dt.hour
# Create side copy of df without filters
df_all = df.copy()
# Filter DataFrame by month
month_idx = month_list.index(month)
if month != 'All':
df = df[df['month'] == month_idx]
# Filter DataFrame by day of week
if day != 'All':
df = df[df['day_str'] == day]
print('-'*40)
return df_all, df | 32,056 |
def install_hooks(context):
"""Installs pre-commit hooks"""
print('Installing pre-commit hook')
context.run('pre-commit install') | 32,057 |
def play(session, bpm=125, shuffle=0.0, forever=False):
"""
:type session: sails.session.Session
:type forever: bool
"""
playback = BasicPlayback()
clock = BasicClock(bpm=bpm, shuffle=shuffle)
pos = (-1, 0)
last_ctl_pos = max(session._ctl_timelines)
with session[last_ctl_pos]:
last_cmd_pos = max(session.cmd_timeline)
start_time = None
while forever or pos <= last_cmd_pos:
with session[pos] as cpos:
if pos == (0, 0):
start_time = time.time()
if pos >= (0, 0):
clock.advance()
wait_until = start_time + clock.last_tick_frame / clock.freq
first_wait = max(0, (wait_until - time.time()) * 0.75)
time.sleep(first_wait)
while time.time() < wait_until:
time.sleep(0)
keep_processing = True
while keep_processing:
cmds = session.cmd_timeline.get(pos, [])
processed = 0
for cmd in cmds:
if not cmd.processed:
print('pos={!r} cmd={!r}'.format(pos, cmd))
playback.process(cmd)
processed += 1
advanced = list(playback.advance_generators(session, pos))
keep_processing = len(advanced) > 0
cmds = session.cmd_timeline.get(pos, [])
for cmd in cmds:
if not cmd.processed:
print('pos={!r} cmd={!r}'.format(pos, cmd))
playback.process(cmd)
processed += 1
if pos[0] >= 0 and processed == 0:
print('pos={!r}'.format(pos), end='\r')
pos = (cpos + 1).pos
clock.stop() | 32,058 |
def donot_servicegroup_modify_labonly(cc, args):
"""LAB ONLY Update a servicegroup. """
# JKUNG comment this out prior to delivery
patch = utils.args_array_to_patch("replace", args.attributes[0])
try:
iservicegroup = cc.smapiClient.iservicegroup.update(args.iservicegroup, patch)
except exc.HTTPNotFound:
raise exc.CommandError('Service Group not found: %s' % args.iservicegroup)
_print_iservicegroup_show(iservicegroup) | 32,059 |
def predict_model(predictor: str, params: Params, archive_dir: str,
input_file: str, output_file: str, batch_size: int = 1):
"""
Predict output annotations from the given model and input file and produce an output file.
:param predictor: the type of predictor to use, e.g., "udify_predictor"
:param params: the Params of the model
:param archive_dir: the saved model archive
:param input_file: the input file to predict
:param output_file: the output file to save
:param batch_size: the batch size, set this higher to speed up GPU inference
"""
archive = os.path.join(archive_dir, "model.tar.gz")
predict_model_with_archive(predictor, params, archive, input_file, output_file, batch_size) | 32,060 |
def write_error_row(rowNum, errInfo):
"""Google Sheets API Code.
Writes all team news link data from RSS feed to the NFL Team Articles speadsheet.
https://docs.google.com/spreadsheets/d/1XiOZWw3S__3l20Fo0LzpMmnro9NYDulJtMko09KsZJQ/edit#gid=0
"""
credentials = get_credentials()
http = credentials.authorize(mgs.httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = mgs.discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
spreadsheet_id = '1XiOZWw3S__3l20Fo0LzpMmnro9NYDulJtMko09KsZJQ'
value_input_option = 'RAW'
rangeName = 'ERROR!A' + str(rowNum)
values = errInfo
body = {
'values': values
}
result = service.spreadsheets().values().update(spreadsheetId=spreadsheet_id, range=rangeName,
valueInputOption=value_input_option, body=body).execute()
return result | 32,061 |
def ghr():
"""
>>> import os
>>> os.environ.update(dict(OS="ubuntu-latest"))
>>> ghr() # doctest: +ELLIPSIS
::set-output name=GHR_BINARY_PATH::...ghr_v0.13.0_linux_amd64...ghr
>>> os.environ["OS"] = "macos-latest"
>>> ghr() # doctest: +ELLIPSIS
::set-output name=GHR_BINARY_PATH::...ghr_v0.13.0_darwin_amd64...ghr
>>> os.environ["OS"] = "windows-latest"
>>> ghr() # doctest: +ELLIPSIS
::set-output name=GHR_BINARY_PATH::...ghr_v0.13.0_windows_amd64...ghr.exe
"""
running_os = get_running_os()
os_map = {"ubuntu": "linux", "macos": "darwin"}
ghr_os = os_map.get(running_os, running_os)
ghr_extension = "tar.gz" if running_os == "ubuntu" else "zip"
gh_binary_suffix = ".exe" if running_os == "windows" else ""
ghr_folder = f"ghr_{GHR_VERSION}_{ghr_os}_amd64"
ghr_file = f"{ghr_folder}.{ghr_extension}"
ghr_file_destination = os.path.join(TEMP, ghr_file)
os.makedirs("../", exist_ok=True)
ghr_path = os.path.join(TEMP, ghr_folder, f"ghr{gh_binary_suffix}")
download(GHR_URL.format(version=GHR_VERSION, file=ghr_file), ghr_file_destination)
validate_checksum(ghr_file_destination, GHR_TRUSTED_CHECKSUMS[running_os])
if running_os == "ubuntu":
extract_command = ["tar", "xvf", ghr_file_destination, "-C", TEMP]
else:
extract_command = ["7z", "x", f"-o{TEMP}", ghr_file_destination]
check_call(extract_command)
output_to_actions("GHR_BINARY_PATH", ghr_path) | 32,062 |
def generate_submit(tab):
""" tab->list : prediction
create the file and write the prediction
have to be in the good order
"""
with open("answer.txt", 'w') as f:
for i in tab:
f.write(str(i) + '\n') | 32,063 |
def get_last_transaction():
""" return last transaction form blockchain """
try:
transaction = w3.eth.get_transaction_by_block(w3.eth.blockNumber, 0)
tx_dict = dict(transaction)
tx_json = json.dumps(tx_dict, cls=HexJsonEncoder)
return tx_json
except Exception as err:
print("Error '{0}' occurred.".format(err))
return {'error':'Error while fetching transaction'} | 32,064 |
def test_vectorizers_n_jobs(Estimator):
"""Check that parallel feature ingestion works"""
text = ["Εν οίδα ότι ουδέν οίδα"]
vect = Estimator(n_jobs=2)
vect.fit(text)
vect.transform(text)
with pytest.raises(ValueError, match="n_jobs=0 must be a integer >= 1"):
Estimator(n_jobs=0).fit(text) | 32,065 |
def get_tp_model() -> TargetPlatformModel:
"""
A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
bits configuration list for mixed-precision quantization.
NOTE: in order to generate a target platform model with different configurations but with the same Operators Sets
(for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
Returns: A TargetPlatformModel object.
"""
base_config, mixed_precision_cfg_list = get_op_quantization_configs()
return generate_tp_model(default_config=base_config,
base_config=base_config,
mixed_precision_cfg_list=mixed_precision_cfg_list,
name='qnnpack_tp_model') | 32,066 |
def IsARepoRoot(directory):
"""Returns True if directory is the root of a repo checkout."""
return os.path.exists(
os.path.join(os.path.realpath(os.path.expanduser(directory)), '.repo')) | 32,067 |
def check_sparv_version() -> Optional[bool]:
"""Check if the Sparv data dir is outdated.
Returns:
True if up to date, False if outdated, None if version file is missing.
"""
data_dir = paths.get_data_path()
version_file = (data_dir / VERSION_FILE)
if version_file.is_file():
return version_file.read_text() == __version__
return None | 32,068 |
def grav_n(expt_name, num_samples, num_particles, T_max, dt, srate, noise_std, seed):
"""2-body gravitational problem"""
##### ENERGY #####
def potential_energy(state):
'''U=sum_i,j>i G m_i m_j / r_ij'''
tot_energy = np.zeros((1, 1, state.shape[2]))
for i in range(state.shape[0]):
for j in range(i + 1, state.shape[0]):
r_ij = ((state[i:i + 1, 1:3] - state[j:j + 1, 1:3]) ** 2).sum(1, keepdims=True) ** .5
m_i = state[i:i + 1, 0:1]
m_j = state[j:j + 1, 0:1]
tot_energy += m_i * m_j / r_ij
U = -tot_energy.sum(0).squeeze()
return U
def kinetic_energy(state):
'''T=sum_i .5*m*v^2'''
energies = .5 * state[:, 0:1] * (state[:, 3:5] ** 2).sum(1, keepdims=True)
T = energies.sum(0).squeeze()
return T
def total_energy(state):
return potential_energy(state) + kinetic_energy(state)
##### DYNAMICS #####
def get_accelerations(state, epsilon=0):
# shape of state is [bodies x properties]
net_accs = [] # [nbodies x 2]
for i in range(state.shape[0]): # number of bodies
other_bodies = np.concatenate([state[:i, :], state[i + 1:, :]], axis=0)
displacements = other_bodies[:, 1:3] - state[i, 1:3] # indexes 1:3 -> pxs, pys
distances = (displacements ** 2).sum(1, keepdims=True) ** 0.5
masses = other_bodies[:, 0:1] # index 0 -> mass
pointwise_accs = masses * displacements / (distances ** 3 + epsilon) # G=1
net_acc = pointwise_accs.sum(0, keepdims=True)
net_accs.append(net_acc)
net_accs = np.concatenate(net_accs, axis=0)
return net_accs
def update(t, state):
state = state.reshape(-1, 5) # [bodies, properties]
# print(state.shape)
deriv = np.zeros_like(state)
deriv[:, 1:3] = state[:, 3:5] # dx, dy = vx, vy
deriv[:, 3:5] = get_accelerations(state)
return deriv.reshape(-1)
##### INTEGRATION SETTINGS #####
def get_orbit(state, update_fn=update, t_points=100, t_span=[0, 2], **kwargs):
if not 'rtol' in kwargs.keys():
kwargs['rtol'] = 1e-12
# kwargs['atol'] = 1e-12
# kwargs['atol'] = 1e-9
orbit_settings = locals()
nbodies = state.shape[0]
t_eval = np.arange(t_span[0], t_span[1], dt)
if len(t_eval) != t_points:
t_eval = t_eval[:-1]
orbit_settings['t_eval'] = t_eval
path = solve_ivp(fun=update_fn, t_span=t_span, y0=state.flatten(),
t_eval=t_eval,method='DOP853', **kwargs)
orbit = path['y'].reshape(nbodies, 5, t_points)
return orbit, orbit_settings
# spring_ivp = rk(update_fn, t_eval, state.reshape(-1), dt)
# spring_ivp = np.array(spring_ivp)
# print(spring_ivp.shape)
# q, p = spring_ivp[:, 0], spring_ivp[:, 1]
# dydt = [dynamics_fn(y, None) for y in spring_ivp]
# dydt = np.stack(dydt).T
# dqdt, dpdt = np.split(dydt, 2)
# return spring_ivp.reshape(nbodies,5,t_points), 33
##### INITIALIZE THE TWO BODIES #####
def random_config(orbit_noise=5e-2, min_radius=0.5, max_radius=1.5):
state = np.zeros((2, 5))
state[:, 0] = 1
pos = np.random.rand(2) * (max_radius - min_radius) + min_radius
r = np.sqrt(np.sum((pos ** 2)))
# velocity that yields a circular orbit
vel = np.flipud(pos) / (2 * r ** 1.5)
vel[0] *= -1
vel *= 1 + orbit_noise * np.random.randn()
# make the circular orbits SLIGHTLY elliptical
state[:, 1:3] = pos
state[:, 3:5] = vel
state[1, 1:] *= -1
return state
##### HELPER FUNCTION #####
def coords2state(coords, nbodies=2, mass=1):
timesteps = coords.shape[0]
state = coords.T
state = state.reshape(-1, nbodies, timesteps).transpose(1, 0, 2)
mass_vec = mass * np.ones((nbodies, 1, timesteps))
state = np.concatenate([mass_vec, state], axis=1)
return state
##### INTEGRATE AN ORBIT OR TWO #####
def sample_orbits(timesteps=50, trials=1000, nbodies=2, orbit_noise=5e-2,
min_radius=0.5, max_radius=1.5, t_span=[0, 20], verbose=False, **kwargs):
orbit_settings = locals()
if verbose:
print("Making a dataset of near-circular 2-body orbits:")
x, dx, e, ks, ms = [], [], [], [], []
# samps_per_trial = np.ceil((T_max / srate))
# N = samps_per_trial * trials
np.random.seed(seed)
for _ in range(trials):
state = random_config(orbit_noise, min_radius, max_radius)
orbit, _ = get_orbit(state, t_points=timesteps, t_span=t_span, **kwargs)
print(orbit.shape)
batch = orbit.transpose(2, 0, 1).reshape(-1, 10)
ssr = int(srate / dt)
# (batch.shape)
batch = batch[::ssr]
# print('ssr')
# print(batch.shape)
sbx, sbdx, sbe = [], [], []
for state in batch:
dstate = update(None, state)
# reshape from [nbodies, state] where state=[m, qx, qy, px, py]
# to [canonical_coords] = [qx1, qx2, qy1, qy2, px1,px2,....]
coords = state.reshape(nbodies, 5).T[1:].flatten()
dcoords = dstate.reshape(nbodies, 5).T[1:].flatten()
# print(coords.shape)
coords += np.random.randn(*coords.shape) * noise_std
dcoords += np.random.randn(*dcoords.shape) * noise_std
x.append(coords)
dx.append(dcoords)
shaped_state = state.copy().reshape(2, 5, 1)
e.append(total_energy(shaped_state))
ks.append(np.ones(num_particles))
ms.append(np.ones(num_particles))
# print(len(x))
data = {'x': np.stack(x)[:, [0, 2, 1, 3, 4, 6, 5, 7]],
'dx': np.stack(dx)[:, [0, 2, 1, 3, 4, 6, 5, 7]],
'energy': np.stack(e),
'ks': np.stack(ks),
'mass': np.stack(ms)}
return data
return sample_orbits(timesteps=int(np.ceil(T_max / dt)), trials=num_samples, nbodies=2, orbit_noise=5e-2,
min_radius=0.5, max_radius=1.5, t_span=[0, T_max], verbose=False) | 32,069 |
def fit_spline_linear_extrapolation(cumul_observations, smoothing_fun=simple_mirroring, smoothed_dat=[],
plotf=False, smoothep=True, smooth=0.5, ns=3, H=7):
"""
Linear extrapolation by splines on log daily cases
Input:
cumul_observations: cumulative observations,
smoothed_dat: list of trends of incremental history,
ns: optional smoothing window parameter,
H: forecasting horison
smooth: whether to compute mean from trend or from raw data
Output: forecast on horison H in terms of cumulative numbers
starting from the last observation
"""
if len(smoothed_dat) == 0:
smoothed_dat = smoothing_fun(cumul_observations, Ws=ns)
val_start = smoothed_dat[-1]
dat = np.log(list(smoothed_dat + 1))
spl = csaps.UnivariateCubicSmoothingSpline(range(len(dat)), dat, smooth=smooth)
dat_diff = np.diff(spl(np.arange(len(dat))))
x = np.arange(len(dat_diff))
spl = csaps.UnivariateCubicSmoothingSpline(x, dat_diff, smooth=smooth)
dat_diff_sm = spl(x)
step = dat_diff_sm[-1] - dat_diff_sm[-2]
if smoothep:
dat_forecast = dat_diff_sm[-1] + step * np.arange(1, H + 1) # + seasonality
else:
dat_forecast = dat_diff[-1] + step * np.arange(1, H + 1) # + seasonality
forecast = np.insert(np.exp(np.cumsum(dat_forecast)) * val_start, 0, val_start)
return forecast | 32,070 |
def container():
"""The container subcommand.
The `container` command interacts with containerised virtual environment,
such as Docker and Singularity containers. This provides an uniform
interface for creating either Docker or Singularity containers from the
same directories, as long as the corresponding `Dockerfile` or
`Singularity.def` definition files are present to define the environments.
""" | 32,071 |
def test_incompatible_expected_units():
"""Test error is raised if value and expected_units are not equivalent."""
pytest.raises(
ValueError,
unp.UnitParameter,
name="unp1",
value=3 * units.m,
tols=(1e-9, 1 * units.m),
expected_units=units.Jy,
) | 32,072 |
def import_chrome(profile, bookmark_types, output_format):
"""Import bookmarks and search keywords from Chrome-type profiles.
On Chrome, keywords and search engines are the same thing and handled in
their own database table; bookmarks cannot have associated keywords. This
is why the dictionary lookups here are much simpler.
"""
out_template = {
'bookmark': '{url} {name}',
'quickmark': '{name} {url}',
'search': "c.url.searchengines['{keyword}'] = '{url}'",
'oldsearch': '{keyword} {url}'
}
if 'search' in bookmark_types:
webdata = sqlite3.connect(os.path.join(profile, 'Web Data'))
c = webdata.cursor()
c.execute('SELECT keyword,url FROM keywords;')
for keyword, url in c:
try:
url = opensearch_convert(url)
print(out_template[output_format].format(
keyword=keyword, url=url))
except KeyError:
print('# Unsupported parameter in url for {}; skipping....'.
format(keyword))
else:
with open(os.path.join(profile, 'Bookmarks'), encoding='utf-8') as f:
bookmarks = json.load(f)
def bm_tree_walk(bm, template):
"""Recursive function to walk through bookmarks."""
assert 'type' in bm, bm
if bm['type'] == 'url':
if urllib.parse.urlparse(bm['url']).scheme != 'chrome':
print(template.format(**bm))
elif bm['type'] == 'folder':
for child in bm['children']:
bm_tree_walk(child, template)
for root in bookmarks['roots'].values():
bm_tree_walk(root, out_template[output_format]) | 32,073 |
def test_time():
"""Test instantiating a time object."""
time = Time(step=1e-3, duration=1e-1)
assert time.step == 1e-3
assert time.duration == 1e-1
assert time.nsteps == 100
assert time.nsteps_detected == 100
assert len(time.values) == time.nsteps | 32,074 |
def auto_discover():
"""自动发现内置的拼音风格实现"""
for path in glob.glob(current_dir + os.path.sep + '*.py'):
filename = os.path.basename(path)
module_name = filename.split('.')[0]
if (not module_name) or module_name.startswith('_'):
continue
full_module_name = 'pypinyin.style.{0}'.format(module_name)
__import__(full_module_name) | 32,075 |
def compute_dl_target(location):
"""
When the location is empty, set the location path to
/usr/sys/inst.images
return:
return code : 0 - OK
1 - if error
dl_target value or msg in case of error
"""
if not location or not location.strip():
loc = "/usr/sys/inst.images"
else:
loc = location.rstrip('/')
dl_target = loc
return 0, dl_target | 32,076 |
def hz_to_angstrom(frequency):
"""Convert a frequency in Hz to a wavelength in Angstroms.
Parameters
----------
frequency: float
The frequency in Hz.
Returns
-------
The wavelength in Angstroms.
"""
return C / frequency / ANGSTROM | 32,077 |
def main():
""" main. """
app.run(proc_argv) | 32,078 |
def sync(
query: List[str],
downloader: Downloader,
m3u_file: Optional[None] = None,
) -> None:
"""
Removes the songs that are no longer present in the list and downloads the new ones.
### Arguments
- query: list of strings to search for.
- downloader: Already initialized downloader instance.
- m3u_file: Path to the file to save the metadata to.
### Notes
- notes if any
"""
# Parse the query
songs_list = parse_query(query, downloader.threads)
if m3u_file:
create_m3u_file(
m3u_file, songs_list, downloader.output, downloader.output_format, False
)
# Get all files that are in the output directory
parent_dir = Path(downloader.output).parent
old_files = [
Path(file) for file in glob.glob(f"{parent_dir}/*.{downloader.output_format}")
]
# Get all output file names
new_files = [
create_file_name(song, downloader.output, downloader.output_format)
for song in songs_list
]
# Get all files that are no longer in the query
to_delete = set(old_files) - set(new_files)
# Delete all files that are no longer in the query
for file in to_delete:
file.unlink()
# Download the rest of the songs
try:
to_download = []
for song in songs_list:
song_path = create_file_name(
song, downloader.output, downloader.output_format
)
if Path(song_path).exists():
if downloader.overwrite == "force":
downloader.progress_handler.log(f"Overwriting {song.display_name}")
to_download.append(song)
else:
to_download.append(song)
if len(to_download) == 0:
downloader.progress_handler.log("Nothing to do...")
return
downloader.download_multiple_songs(to_download)
except Exception as exception:
downloader.progress_handler.debug(traceback.format_exc())
downloader.progress_handler.error(str(exception)) | 32,079 |
def is_literal(token):
"""
リテラル判定(文字列・数値)
"""
return token.ttype in T.Literal | 32,080 |
def test_non_batching_collated_task_dataset_getitem_bad_index(
non_batching_collated_task_dataset, index):
"""Test NonBatchingCollatedTaskDataset.__getitem__ dies when index != 0."""
with pytest.raises(IndexError, match='.*must be 0.*'):
non_batching_collated_task_dataset[index] | 32,081 |
def PutObject(*, session, bucket, key, content, type_="application/octet-stream"):
"""Saves data to S3 under specified filename and bucketname
:param session: The session to use for AWS connection
:type session: boto3.session.Session
:param bucket: Name of bucket
:type bucket: str
:param key: Name of file
:type key: str
:param content: Data to save
:type content: bytes | str
:param type_: Content type of the data to put
:type type_: str
:return: The new S3 object
:rtype: boto3.core.resource.S3Object
"""
s3conn = session.connect_to("s3")
# Make sure, we have the bucket to add object to
try:
b = GetOrCreateBuckets(session, bucket)
except Exception as e:
# There is a chance that the user trying to PutObject does not have permissions
# to Create/List Buckets. In such cases and error is thrown. We can still try to
# save and assume the bucket already exists.
pass
# Now we can create the object
S3Objects = session.get_collection("s3", "S3ObjectCollection")
s3objects = S3Objects(connection=s3conn, bucket=bucket, key=key)
if isinstance(content, str):
bindata = content.encode("utf-8")
else:
bindata = content
# Now we create the object
return s3objects.create(key=key, acl="private", content_type=type_, body=bindata) | 32,082 |
def insert_sequences_into_tree(aln, moltype, params={},
write_log=True):
"""Returns a tree from Alignment object aln.
aln: an xxx.Alignment object, or data that can be used to build one.
moltype: cogent.core.moltype.MolType object
params: dict of parameters to pass in to the RAxML app controller.
The result will be an xxx.Alignment object, or None if tree fails.
"""
# convert aln to phy since seq_names need fixed to run through pplacer
new_aln=get_align_for_phylip(StringIO(aln))
# convert aln to fasta in case it is not already a fasta file
aln2 = Alignment(new_aln)
seqs = aln2.toFasta()
ih = '_input_as_multiline_string'
pplacer_app = Pplacer(params=params,
InputHandler=ih,
WorkingDir=None,
SuppressStderr=False,
SuppressStdout=False)
pplacer_result = pplacer_app(seqs)
# write a log file
if write_log:
log_fp = join(params["--out-dir"],'log_pplacer_' + \
split(get_tmp_filename())[-1])
log_file=open(log_fp,'w')
log_file.write(pplacer_result['StdOut'].read())
log_file.close()
# use guppy to convert json file into a placement tree
guppy_params={'tog':None}
new_tree=build_tree_from_json_using_params(pplacer_result['json'].name, \
output_dir=params['--out-dir'], \
params=guppy_params)
pplacer_result.cleanUp()
return new_tree | 32,083 |
def billing_invoice_download(client,
account_name=None,
invoice_name=None,
download_token=None,
download_urls=None):
"""
Get URL to download invoice
:param account_name: The ID that uniquely identifies a billing account.
:param invoice_name: The ID that uniquely identifies an invoice.
:param download_token: The download token with document source and document ID.
:param download_urls: An array of download urls for individual.
"""
if account_name and invoice_name and download_token:
return client.download_invoice(account_name, invoice_name, download_token)
if account_name and download_urls:
return client.download_multiple_modern_invoice(account_name, download_urls)
if download_urls:
return client.download_multiple_billing_subscription_invoice(download_urls)
if invoice_name and download_token:
return client.download_billing_subscription_invoice(
invoice_name, download_token
)
from azure.cli.core.azclierror import CLIInternalError
raise CLIInternalError(
"Uncaught argument combinations for Azure CLI to handle. Please submit an issue"
) | 32,084 |
def find_classes(text):
"""
find line that contains a top-level open brace
then look for class { in that line
"""
nest_level = 0
brace_re = re.compile("[\{\}]")
classname_re = "[\w\<\>\:]+"
class_re = re.compile(
"(?:class|struct)\s*(\w+)\s*(?:\:\s*public\s*"
+ classname_re + "(?:,\s*public\s*" + classname_re + ")*)?\s*\{")
classes = []
lines = text.split("\n")
for (i,line) in enumerate(lines):
if True:#nest_level == 0 and (i==0 or "template" not in lines[i-1]):
classes.extend(class_re.findall(line))
braces = brace_re.findall(line)
for brace in braces:
if brace == "{": nest_level += 1
elif brace == "}": nest_level -= 1
return classes | 32,085 |
def kineticEnergyCOM(robot : object, symbolic = False):
"""This function calculates the total kinetic energy, with respect to each center of mass, given linear and angular velocities
Args:
robot (object): serial robot (this won't work with other type of robots)
symbolic (bool, optional): used to calculate symbolic equations. Defaults to False.
Returns:
K (SymPy Matrix): kinetic matrix (symbolical)
"""
# Kinetic Matrix calculation
D = inertiaMatrixCOM(robot, symbolic)
return 0.5 * (robot.qdSymbolic.T * D * robot.qdSymbolic) if symbolic else 0.5 * (robot.jointsVelocities.T.dot(D).dot(robot.jointsVelocities)) | 32,086 |
def get_input(request) -> str:
"""Get the input song from the request form."""
return request.form.get('input') | 32,087 |
def _unit_circle_positions(item_counts: dict[Hashable, tuple[int, int]], radius=0.45, center_x=0.5,
center_y=0.5) -> dict[Hashable, tuple[float, float]]:
"""
computes equally spaced points on a circle based on the radius and center positions
:param item_counts: item dict LinkedNetwork.get_item_link_count_dict()
:param radius: radius of the circle
:param center_x: x center position
:param center_y: y center position
:return: dict of items and their corresponding positions
"""
r = radius
cx, cy = center_x, center_y
a = math.radians(360) / len(item_counts)
points = {}
i = 0
for key, _ in item_counts.items():
points[key] = (math.cos(a * i) * r + cx, math.sin(a * i) * r + cy)
i += 1
return points | 32,088 |
def times(*args):
"""Select time steps"""
try: state.time_selection.parse_times(" ".join(args))
except timestep_selection.Error, err: raise BlotishError(err)
print " Select specified whole times"
print " Number of selected times = %d" % len(state.time_selection.selected_indices)
print | 32,089 |
def rate_table_download(request, table_id):
"""
Download a calcification rate table as CSV.
"""
def render_permission_error(request, message):
return render(request, 'permission_denied.html', dict(error=message))
table_permission_error_message = \
f"You don't have permission to download table of ID {table_id}."
try:
rate_table = CalcifyRateTable.objects.get(pk=table_id)
except CalcifyRateTable.DoesNotExist:
# Technically the error message isn't accurate here, since it
# implies the table ID exists. But users don't really have any
# business knowing which table IDs exist or not outside their source.
# So this obfuscation makes sense.
return render_permission_error(request, table_permission_error_message)
if rate_table.source:
if not rate_table.source.visible_to_user(request.user):
# Table belongs to a source, and the user doesn't have access to
# that source.
return render_permission_error(
request, table_permission_error_message)
# The source_id parameter tells us to limit the downloaded CSV to the
# entries in the specified source's labelset, rather than including all
# the rows of the rate table. This is particularly useful when downloading
# a default rate table.
if 'source_id' in request.GET:
source_id = request.GET['source_id']
source_permission_error_message = \
f"You don't have permission to access source of ID {source_id}."
try:
source = Source.objects.get(pk=source_id)
except Source.DoesNotExist:
return render_permission_error(
request, source_permission_error_message)
if not source.visible_to_user(request.user):
return render_permission_error(
request, source_permission_error_message)
else:
source = None
# At this point we do have permission, so proceed.
# Convert the rate table's name to a valid filename in Windows and
# Linux/Mac (or at least make a reasonable effort to).
# Convert chars that are problematic in either OS to underscores.
#
# Linux only disallows / (and the null char, but we'll ignore that case).
# Windows:
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#naming-conventions
non_filename_chars_regex = re.compile(r'[<>:"/\\|?*]')
csv_filename = non_filename_chars_regex.sub('_', rate_table.name)
# Make a CSV stream response and write the data to it.
response = create_csv_stream_response('{}.csv'.format(csv_filename))
rate_table_json_to_csv(response, rate_table, source=source)
return response | 32,090 |
def gui():
"""
Launch the igel gui application.
PS: you need to have nodejs on your machine
"""
igel_ui_path = Path(os.getcwd()) / "igel-ui"
if not Path.exists(igel_ui_path):
subprocess.check_call(
["git"] + ["clone", "https://github.com/nidhaloff/igel-ui.git"]
)
logger.info(f"igel UI cloned successfully")
os.chdir(igel_ui_path)
logger.info(f"switching to -> {igel_ui_path}")
logger.info(f"current dir: {os.getcwd()}")
logger.info(f"make sure you have nodejs installed!!")
subprocess.Popen(["node", "npm", "install", "open"], shell=True)
subprocess.Popen(["node", "npm", "install electron", "open"], shell=True)
logger.info("installing dependencies ...")
logger.info(f"dependencies installed successfully")
logger.info(f"node version:")
subprocess.check_call("node -v", shell=True)
logger.info(f"npm version:")
subprocess.check_call("npm -v", shell=True)
subprocess.check_call("npm i electron", shell=True)
logger.info("running igel UI...")
subprocess.check_call("npm start", shell=True) | 32,091 |
def normalize_path(path):
""" Normalize a pathname by collapsing redundant separators and up-level references """
from os.path import normpath, sep
result = normpath(path)
result = result.replace("/",sep)
result = result.replace("\\",sep)
return adapt_path(result) | 32,092 |
def rgb_to_RGB255(rgb: RGBTuple) -> RGB255Tuple:
"""
Convert from Color.rgb's 0-1 range to ANSI RGB (0-255) range.
>>> rgb_to_RGB255((1, 0.5, 0))
(255, 128, 0)
"""
return tuple([int(round(map_interval(0, 1, 0, 255, c))) for c in rgb]) | 32,093 |
def test_torch_rnn_classifier(X_sequence):
"""Just makes sure that this code will run; it doesn't check that
it is creating good models.
"""
train, test, vocab = X_sequence
mod = torch_rnn_classifier.TorchRNNClassifier(
vocab=vocab, max_iter=100)
X, y = zip(*train)
X_test, _ = zip(*test)
mod.fit(X, y)
mod.predict(X_test)
mod.predict_proba(X_test) | 32,094 |
def setup_logging(log_level):
""" Used to test functions locally """
if len(logging.getLogger().handlers)>0:
logging.getLogger().setLevel(log_level)
else:
logging.basicConfig(filename='script.log',
format="%(name)s - %(module)s - %(message)s",
level=log_level)
#format='%(asctime)s %(message)s', | 32,095 |
def check_blinka_python_version():
"""
Check the Python 3 version for Blinka (which may be a later version than we're running this script with)
"""
print("Making sure the required version of Python is installed")
if get_python3_version() < blinka_minimum_python_version:
shell.bail("Blinka requires a minimum of Python version {} to install. Please update your OS!".format(blinka_minimum_python_version)) | 32,096 |
def check_rest_version(host="http://www.compbio.dundee.ac.uk/jpred4/cgi-bin/rest", suffix="version", silent=False):
"""Check version of JPred REST interface.
:param str host: JPred host address.
:param str suffix: Host address suffix.
:param silent: Should the work be done silently?
:type silent: :py:obj:`True` or :py:obj:`False`
:return: Version of JPred REST API.
:rtype: :py:class:`str`
"""
version_url = "{}/{}".format(host, suffix)
response = requests.get(version_url)
version = re.search(r"VERSION=(v\.[0-9]*.[0-9]*)", response.text).group(1)
if not silent:
print(version)
return version | 32,097 |
def test_car_move():
"""Sprawdzanie czy pojazd porusza się prawidłowo w dwóch kierunkach"""
car = Car(60, 60, BLACK)
car.move((80, 80))
assert car.xpos == 80
assert car.ypos == 80
assert car.xv == 20
assert car.yv == 20
car.move((60, 60))
assert car.xpos == 60
assert car.ypos == 60
assert car.xv == -20
assert car.yv == -20 | 32,098 |
def quads(minlon, minlat, maxlon, maxlat):
""" Generate a list of southwest (lon, lat) for 1-degree quads of SRTM1 data.
"""
lon = floor(minlon)
while lon <= maxlon:
lat = floor(minlat)
while lat <= maxlat:
yield lon, lat
lat += 1
lon += 1 | 32,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.