content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def reshape(spectra):
"""Rearrange a compressed 1d array of spherical harmonics to 2d
Args:
spectra (np.ndarray): 1 dimensional storage of 2d spherical modes
Returns:
np.ndarray:
2-dimensional array of the reshaped input with zonal and meridional
wavenumber coordinates
"""
# Account for complex inputs as two dimensions
if spectra.ndim == 2:
spectra = spectra[:, 0] + spectra[:, 1]*1j
if spectra.ndim != 1:
raise ValueError('Spectra must be a 1-dimensional array')
# Deduce truncation from shape
trunc = find_trunc(len(spectra))
# Zeros for output
spectra_2d = np.zeros((trunc, trunc))
idx0 = 0
idx1 = trunc
for i in range(trunc):
spectra_2d[i, i:trunc] = spectra[idx0:idx1]
idx0 += trunc - i
idx1 += trunc - i - 1
return spectra_2d | af6bf177a860dd8f7d37a9d3586ab54f76e05a1f | 25,300 |
def dict_factory(cursor, row):
"""
This method is used to convert tuple type to dict after execute SQL queries in python.
:param cursor:
:param row:
:return:
"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d | 4f44cb368cff38db313e476a9b3a3bcabcca3bb3 | 25,301 |
import torch
def predict_batch(model, x_batch, dynamics, fast_init):
"""
Compute the softmax prediction probabilities for a given data batch.
Args:
model: EnergyBasedModel
x_batch: Batch of input tensors
dynamics: Dictionary containing the keyword arguments
for the relaxation dynamics on u
fast_init: Boolean to specify if fast feedforward initilization
is used for the prediction
Returns:
Softmax classification probabilities for the given data batch
"""
# Initialize the neural state variables
model.reset_state()
# Clamp the input to the test sample, and remove nudging from ouput
model.clamp_layer(0, x_batch.view(-1, model.dimensions[0]))
model.set_C_target(None)
# Generate the prediction
if fast_init:
model.fast_init()
else:
model.u_relax(**dynamics)
return torch.nn.functional.softmax(model.u[-1].detach(), dim=1) | 61102cfa3bcb3e7d52e9f3eca8c97db4d726c1a7 | 25,302 |
def render_openapi(api, request):
"""Prepare openapi specs."""
# Setup Specs
options = dict(api.openapi_options)
options.setdefault('servers', [{
'url': str(request.url.with_query('').with_path(api.prefix))
}])
spec = APISpec(
options['info'].pop('title', f"{ api.app.cfg.name.title() } API"),
options['info'].pop('version', '1.0.0'),
options.pop('openapi_version', '3.0.0'),
**options, plugins=[MarshmallowPlugin()])
spec.tags = {}
# Setup Authorization
if api.authorize:
_, _, schema = parse_docs(api.authorize)
spec.options['security'] = []
for key, value in schema.items():
spec.components.security_scheme(key, value)
spec.options['security'].append({key: []})
# Setup Paths
routes = api.router.routes()
for route in routes:
if route.path in SKIP_PATH:
continue
spec.path(route.path, **route_to_spec(route, spec))
return spec.to_dict() | 96e49080a6c66f05210676c71a6e169f4caeca95 | 25,303 |
def apply_transformations(initial_representation: list, events: list) -> float:
"""Apply the transformations in the events list to the initial representation"""
scale = 1
rot_angle = 0
trans_vector = [0, 0]
for item in events:
for event in item["events"]:
if event["type"] == "TRANSLATION":
trans_vector[X_COORDINATE] += event["trigger"]["transformation"][X_COORDINATE]
trans_vector[Y_COORDINATE] += event["trigger"]["transformation"][Y_COORDINATE]
elif event["type"] == "ROTATION":
rot_angle += event["trigger"]["transformation"]
elif event["type"] == "UNIFORM_SCALE":
scale *= event["trigger"]["transformation"]
# Apply multiplication
polygon = geometry.Polygon(initial_representation)
s_polygon = affinity.scale(polygon,
xfact=scale,
yfact=scale,
origin=(0, 0))
r_s_polygon = affinity.rotate(s_polygon,
rot_angle,
origin=(0, 0))
t_r_s_polygon = affinity.translate(r_s_polygon,
xoff=trans_vector[0],
yoff=trans_vector[1])
return polygon_to_vertices_list(t_r_s_polygon) | a96be8bafe3b3cde3411f9c8efbf95629319b3cb | 25,304 |
def affine_transform(transform, points):
"""
Transforms a set of N x 2 points using the given Affine object.
"""
reshaped_points = np.vstack([points.T, np.ones((1, points.shape[0]))])
transformed = np.dot(affine_to_matrix(transform), reshaped_points)
return transformed.T[:,:2] | b976466d688003c7822f363949168d4acb6addb0 | 25,305 |
from datetime import datetime
def get_current_time_in_millisecs():
"""Returns time in milliseconds since the Epoch."""
return get_time_in_millisecs(datetime.datetime.utcnow()) | 17348e6c4994b3da0669be0ab39a7e8b01fd0e1c | 25,306 |
def _convert_named_signatures_to_signature_def(signatures):
"""Convert named signatures to object of type SignatureDef.
Args:
signatures: object of type manifest_pb2.Signatures()
Returns:
object of type SignatureDef which contains a converted version of named
signatures from input signatures object
Raises:
RuntimeError: if input and output named signatures are not of type
GenericSignature
"""
signature_def = meta_graph_pb2.SignatureDef()
input_signature = signatures.named_signatures[
signature_constants.PREDICT_INPUTS]
output_signature = signatures.named_signatures[
signature_constants.PREDICT_OUTPUTS]
# TODO(pdudnik): what if there are other signatures? Mimic cr/140900781 once
# it is submitted.
if (input_signature.WhichOneof("type") != "generic_signature" or
output_signature.WhichOneof("type") != "generic_signature"):
raise RuntimeError("Named input and output signatures can only be "
"up-converted if they are generic signature. "
"Input signature type is %s, output signature type is "
"%s" % (input_signature.WhichOneof("type"),
output_signature.WhichOneof("type")))
signature_def.method_name = signature_constants.PREDICT_METHOD_NAME
for key, val in input_signature.generic_signature.map.items():
_add_input_to_signature_def(val.tensor_name, key, signature_def)
for key, val in output_signature.generic_signature.map.items():
_add_output_to_signature_def(val.tensor_name, key, signature_def)
return signature_def | f48f573860d7a4fc2f1d480e4c34fbe33c701481 | 25,307 |
import math
def get_test_paths(paths, snaps):
"""
Return $snaps paths to be tested on GLUE
"""
if snaps == -1:
return paths
interval = len(paths) * 1. / snaps
test_paths = []
for i in range(1, snaps+1):
idx = int(math.ceil(interval * i)) - 1
test_paths.append(paths[idx])
return test_paths | a2ac1f89740e85b6322e553559850c0e686a28c8 | 25,308 |
def is_success(code):
"""Return that the client's request was successfully received, understood, and accepted."""
return 200 <= code <= 299 | 8a6e64c0f218ca5a866a444c730e1ebf7628727e | 25,309 |
def is_palindrome_v3(s):
""" (str) -> bool
Return True if and only if s is a palindrome.
>>> is_palindrome_v3('noon')
True
>>> is_palindrome_v3('racecar')
True
>>> is_palindrome_v3('dented')
False
>>> is_palindrome_v3('')
True
>>> is_palindrome_v3(' ')
True
"""
j = len(s) - 1
for i in range(len(s) // 2):
if s[i] != s[j - i]:
return False
return True | 70f3393e39b30198879dbc5856c0c73b4be9601d | 25,310 |
from typing import BinaryIO
def read(fd: BinaryIO) -> Entity:
"""Read mug scene from `fd` file object.
Args:
fd: File object to read from.
Returns:
Root entity.
"""
if fd.read(4) != b'MUGS':
raise ValueError("not a valid mug file format")
return read_recursive(fd) | 2cb0b695ec1d75987940e862226166be22bad91d | 25,311 |
def transform(x,y):
"""
This function takes an input vector of x values and y values, transforms them
to return the y in a linearized format (assuming nlogn function was used
to create y from x)
"""
final = []
for i in range(0, len(y)):
new = y[i]#/x[i]
final.append(2 ** new)
return final | 119db625f5ebf469794bf3bdacd20a1c70ccd133 | 25,312 |
def _GRIsAreEnabled():
"""Returns True if GRIs are enabled."""
return (properties.VALUES.core.enable_gri.GetBool() or
properties.VALUES.core.resource_completion_style.Get() == 'gri') | 55713933090509944dc42333bb478c11db9a4176 | 25,313 |
def fatorial(n, show=False):
"""
-> Calcula o Fatorial de um número
:param n: O número a ser calculado.
:param show: (opcional) Mostrar ou não a conta.
:return: O valor do Fatorial de um número n.
"""
contador = n
resultado = guardado = 0
print('-' * 35)
while contador >= 0:
guardado = n * (n - 1)
resultado += guardado
contador -= 1
if show:
for contando in range(n, 0, -1):
print(contando, end='')
if contando > 1:
print(' x ', end='')
print(' = ', end='')
return resultado | ddb4d91b3813e270e746bb0d463a5da595366b86 | 25,314 |
def start_job(job, hal_id, refGenome, opts):
"""Set up the structure of the pipeline."""
hal = hal_id
# Newick representation of the HAL species tree.
newick_string = get_hal_tree(hal)
job.fileStore.logToMaster("Newick string: %s" % (newick_string))
tree = newick.loads(newick_string)[0]
rerooted = reroot_tree(tree, refGenome)
job.fileStore.logToMaster("Rerooted newick string: %s" % (newick.dumps([rerooted])))
if opts.targetGenomes is not None:
# We don't need the alignment to all genomes, just a subset.
prune_tree(rerooted, opts.targetGenomes)
job.fileStore.logToMaster("Pruned newick string: %s" % newick.dumps(rerooted))
def setup_jobs(node):
"""Recursively set up jobs for this node and its children."""
prev_data = [setup_jobs(child) for child in node.descendants]
# At this point all of the jobs for the lower parts of the tree have been set up.
lifted_data = [prev_lifted for _, prev_lifted in prev_data]
merge_job = job.wrapJobFn(merge_blocks_job, node.name, [n.name for n in node.descendants], lifted_data, hal_id, opts)
for prev_job, _ in prev_data:
prev_job.addFollowOn(merge_job)
if node.is_leaf:
job.addChild(merge_job)
if node.ancestor is None:
return merge_job.rv()
else:
# Find out whether we have to lift up or down
original_node = find_node_by_name(tree, node.name)
if original_node.ancestor is None or node.ancestor.name != original_node.ancestor.name:
lift_down_job = merge_job.addFollowOnJobFn(lift_job, 'down', node.name, node.ancestor.name, merge_job.rv(), hal_id, opts)
return lift_down_job, lift_down_job.rv()
else:
lift_up_job = merge_job.addFollowOnJobFn(lift_job, 'up', node.name, node.ancestor.name, merge_job.rv(), hal_id, opts)
return lift_up_job, lift_up_job.rv()
blocks_on_ref = setup_jobs(rerooted)
all_genomes = [node.name for node in tree.walk()]
return job.addFollowOnJobFn(maf_export_job, hal, all_genomes, blocks_on_ref, opts).rv() | 5ddade182d045d7a6384d7e6c8d133650dafebbd | 25,315 |
import os
def user_base(username):
"""Base path of user files"""
return os.path.join(BASE['user'], username) | afa3acffe821c1da92a133bc71966ee2c8e10593 | 25,316 |
import argparse
def get_args(arg_input):
"""Takes args input and returns them as a argparse parser
Parameters
-------------
arg_input : list, shape (n_nargs,)
contains list of arguments passed to function
Returns
-------------
args : namespace
contains namespace with keys and values for each parser argument
"""
parser = argparse.ArgumentParser(description='tpu creation script')
parser.add_argument(
'--name',
type=str,
default='tpu',
help='Name to use for tpu vm',
)
parser.add_argument(
'--zone',
type=str,
default='europe-west4-a',
help='zone',
)
parser.add_argument(
'--version',
type=str,
default='tpu-vm-pt-1.11',
help='software version to load',
)
parser.add_argument(
'--accelerator-type',
type=str,
default='v3-8',
help='accelerator type. Eg v3-8, v2-8',
)
parser.add_argument(
'--project',
type=str,
default='trc-generative',
help='gcloud project name',
)
parser.add_argument(
'-n',
'--number_of_tpus',
type=int,
default=1,
help='Minimum number of atleast_tags required.',
)
args = parser.parse_args(arg_input)
return args | 25b01282deef981197b2c7b62d33211a3200e787 | 25,317 |
def cleanup_databases():
"""
Returns:
bool: admin_client fixture should ignore any existing databases at
start of test and clean them up.
"""
return False | 63fa94389609b8e28779d1e9e55e9b1ecde502b6 | 25,318 |
import sys
def _round_down(rough_value, increment, minimum=None, maximum=None):
"""Utility method for rounding a value down to an increment.
Args:
rough_value: A float. The initial value to be rounded.
increment: The increment to round down to.
minimum: Optional minimum value, default is increment.
maximum: Optional maximum value, default is the max int.
Returns:
An integer formed by rounding rough_value down to the nearest positive
number of increments while staying between minimum and maximum.
"""
if not minimum:
minimum = increment
if not maximum:
maximum = sys.maxint
rounded_value = rough_value - (rough_value % increment)
return int(min(maximum, max(minimum, rounded_value))) | 2246258fee06817f84647eb8a404a4230c89e8d3 | 25,319 |
def document_index_list(request, document_id):
"""
Show a list of indexes where the current document can be found
"""
document = get_object_or_404(Document, pk=document_id)
object_list = []
queryset = document.indexinstancenode_set.all()
try:
# TODO: should be AND not OR
Permission.objects.check_permissions(request.user, [PERMISSION_DOCUMENT_VIEW, PERMISSION_DOCUMENT_INDEXING_VIEW])
except PermissionDenied:
queryset = AccessEntry.objects.filter_objects_by_access(PERMISSION_DOCUMENT_INDEXING_VIEW, request.user, queryset)
for index_instance in queryset:
object_list.append(get_breadcrumbs(index_instance, single_link=True, include_count=True))
return render_to_response('generic_list.html', {
'title': _(u'indexes containing: %s') % document,
'object_list': object_list,
'hide_link': True,
'object': document
}, context_instance=RequestContext(request)) | fcf8a9f135f0117221b32ebe13ec358cea527cf8 | 25,320 |
def spot_centroid(regions):
"""Returns centroids for a list of regionprops.
Args:
regions (regionprops): List of region proposals (skimage.measure).
Returns:
list: Centroids of regionprops.
"""
return [r.centroid for r in regions] | f53f403dddf0af123afd207e33cc06254a0f2538 | 25,321 |
import os
def FindChromeSrcFromFilename(filename):
"""Searches for the root of the Chromium checkout.
Simply checks parent directories until it finds .gclient and src/.
Args:
filename: (String) Path to source file being edited.
Returns:
(String) Path of 'src/', or None if unable to find.
"""
curdir = os.path.normpath(os.path.dirname(filename))
while not (os.path.basename(os.path.realpath(curdir)) == 'src'
and PathExists(curdir, 'DEPS')
and (PathExists(curdir, '..', '.gclient')
or PathExists(curdir, '.git'))):
nextdir = os.path.normpath(os.path.join(curdir, '..'))
if nextdir == curdir:
return None
curdir = nextdir
return curdir | 562dfa55da0e74402c6dca4ca51eea1ecbd4b0bc | 25,322 |
def one_election_set_reg(request, election):
"""
Set whether this is open registration or not
"""
# only allow this for public elections
if not election.private_p:
open_p = bool(int(request.GET['open_p']))
election.openreg = open_p
election.save()
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(voters_list_pretty, args=[election.uuid])) | 5486c8b277528204260189f1f248aadc20cd9354 | 25,323 |
def overrides(conf, var):
"""This api overrides the dictionary which contains same keys"""
if isinstance(var, list):
for item in var:
if item in conf:
for key, value in conf[item].items():
conf[key] = value
elif var in conf:
for key, value in conf[var].items():
conf[key] = value
return conf | 18375dc43a0d684feaf9089756ecb45eb5a366f3 | 25,324 |
def annual_to_daily_rate(rate, trading_days_in_year=TRADING_DAYS_IN_YEAR):
"""
Infer daily rate from annual rate
:param rate: the annual rate of return
:param trading_days_in_year: optional, trading days in year (default = 252)
:return: the daily rate
"""
return subdivide_rate(rate, trading_days_in_year) | a93ce4e3b0ba247f37b5a867025670a7f064022e | 25,325 |
def make_hashable(data):
"""Make the given object hashable.
It makes it ready to use in a `hash()` call, making sure that
it's always the same for lists and dictionaries if they have the same items.
:param object data: the object to hash
:return: a hashable object
:rtype: object
"""
if isinstance(data, (list, tuple)):
return tuple((make_hashable(item) for item in data))
elif isinstance(data, dict):
return tuple(
(key, make_hashable(value))
for key, value in sorted(data.items())
)
else:
return data | e4b88978ddee6d4dfc354845184a0e80b1f434bf | 25,326 |
from typing import Dict
from typing import Tuple
def prepare_model_parameters(
parameters: Dict[str, FloatOrDistVar], data: DataFrame,
beta_fun, splines, spline_power
) -> Tuple[Dict[str, FloatLike], Dict[str, NormalDistVar]]:
"""Prepares model input parameters and returns independent and dependent parameters
Also shifts back simulation to start with only exposed people.
"""
# Set up fit parameters
## Dependent parameters which will be fitted
pp = {key: val for key, val in parameters.items() if isinstance(val, GVar)}
## Independent model meta parameters
xx = {key: val for key, val in parameters.items() if key not in pp}
# This part ensures that the simulation starts with only exposed persons
## E.g., we shift the simulation backwards such that exposed people start to
## become infected
xx["offset"] = int(
expon.ppf(0.99, 1 / pp["incubation_days"].mean)
) # Enough time for 95% of exposed to become infected
# pp["logistic_x0"] += xx["offset"]
xx['beta_fun'] = beta_fun
xx['knots'] = splines
xx['spline_power'] = spline_power
## Store the actual first day and the actual last day
xx["day0"] = data.index.min()
xx["day-1"] = data.index.max()
## And start earlier in time
xx["dates"] = date_range(
xx["day0"] - timedelta(xx["offset"]), freq="D", periods=xx["offset"]
).union(data.index)
# initialize the spline parameters on the flexible beta
if xx['beta_fun'] == "flexible_beta":
pp['beta_splines'] = gvar([pp['pen_beta'].mean for i in range(len(xx['knots']))],
[pp['pen_beta'].sdev for i in range(len(xx['knots']))])
pp.pop("pen_beta")
pp.pop('logistic_k')
pp.pop('logistic_x0')
pp.pop('logistic_L')
## Thus, all compartment but exposed and susceptible are 0
for key in ["infected", "recovered", "icu", "vent", "hospital"]:
xx[f"initial_{key}"] = 0
pp["initial_exposed"] = (
xx["n_hosp"] / xx["market_share"] / pp["hospital_probability"]
)
xx["initial_susceptible"] -= pp["initial_exposed"].mean
return xx, pp | c68692148115aa8aa5b9a0b600d6b335cd7ed99f | 25,327 |
import logging
def paper_features_to_author_features(
author_paper_index, paper_features):
"""Averages paper features to authors."""
assert paper_features.shape[0] == NUM_PAPERS
assert author_paper_index.shape[0] == NUM_AUTHORS
author_features = np.zeros(
[NUM_AUTHORS, paper_features.shape[1]], dtype=paper_features.dtype)
for author_i in range(NUM_AUTHORS):
paper_indices = author_paper_index[author_i].indices
author_features[author_i] = paper_features[paper_indices].mean(
axis=0, dtype=np.float32)
if author_i % 10000 == 0:
logging.info("%d/%d", author_i, NUM_AUTHORS)
return author_features | 6394c57d2b48461287cc77cca021d25c377efaae | 25,328 |
def set_chart_time_horizon(request) -> JsonResponse:
"""
Set the x-axis (time horizon) of a chart.
API Call:
/set_chart_time_horizon?
monitor_name=<monitor name>&
value=<time horizon to set>
:param request: HTTP request that expects a 'monitor_name' and 'value' argument. 'value' represents the new time horizon to be set. Valid time horizons are: 'day', 'week', 'month', 'year', or an integer representing a number of most recent hours to display.
:return: The new value after being set or the old value if it was not set.
"""
kwargs = _parse_args(request, 'monitor_name', 'value')
rv = MonitorServiceManager().set_value(kwargs.get('monitor_name'), 'charting_time_horizon', kwargs.get('value'))
return JsonResponse(rv, safe=False) | 94eb982090da2e761032aa5fef4f210c2ff312ee | 25,329 |
def get_single_io_arg(info):
"""
Get single input/output arg from io info
:param info:
:return:input/output arg
"""
if 'valid' not in info:
raise ValueError("Json string Errors, key:valid not found.")
if info['valid']:
check_arg_info(info)
del info['valid']
del info['name']
if 'range' in info:
for i in range(len(info['range'])):
if info['range'][i][1] == -1:
info['range'][i][1] = None
res = info
else:
res = None
return res | 6062f5e1ea61d6a999d106e40e72610b2373b26a | 25,330 |
from typing import Dict
from typing import Any
def new_credentials(
client_id: str, consumer_secret: str, data: Dict[str, Any]
) -> Credentials:
"""Create Credentials from config and json."""
return Credentials(
access_token=str_or_raise(data.get("access_token")),
token_expiry=arrow.utcnow().timestamp + data.get("expires_in"),
token_type=str_or_raise(data.get("token_type")),
refresh_token=str_or_raise(data.get("refresh_token")),
userid=int_or_raise(data.get("userid")),
client_id=str_or_raise(client_id),
consumer_secret=str_or_raise(consumer_secret),
) | 2f582de9863ed15122678d8268c5f2a9c8a0484e | 25,331 |
def get_chains(table, ipv6=False):
""" Return the existing chains of a table """
iptc_table = _iptc_gettable(table, ipv6)
return [iptc_chain.name for iptc_chain in iptc_table.chains] | 659119d90befb241a205f2a70eeafc273098a91a | 25,332 |
import os
import glob
def check_file(file):
"""
检查本地有没有这个文件,相关文件路径能否找到文件 并返回文件名
"""
# 如果传进来的是文件或者是’‘, 直接返回文件名str
if os.path.isfile(file) or file == '':
return file
# 如果传进来的就是当前项目下的一个全局路径 查找匹配的文件名返回第一个
else:
files = glob.glob('./**/' + file, recursive=True)
# 验证文件名是否存在
assert len(files), 'File Not Found: %s' % file
# 验证文件名是否唯一
assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files)
# 返回第一个匹配到的文件名
return files[0] | 9a1d3283a26a1fcdca67622413dc4fc8e887fbee | 25,333 |
def unscale_fundamental_matrix(fundamental_matrix, M):
"""
Unscale fundamental matrix by coordinate scaling factor
:param fundamental_matrix:
:param M: Scaling factor
:return: Unscaled fundamental matrix
"""
T = np.diag([1 / M, 1 / M, 1])
unscaled_F = T.T.dot(fundamental_matrix).dot(T)
return unscaled_F | 968c924870a93363cf78bfcc593b5c4367375692 | 25,334 |
def initialize_server_request(request):
"""Shortcut for initialization."""
# Django converts Authorization header in HTTP_AUTHORIZATION
# Warning: it doesn't happen in tests but it's useful, do not remove!
auth_header = {}
if 'Authorization' in request.META:
auth_header = {'Authorization': request.META['Authorization']}
elif 'HTTP_AUTHORIZATION' in request.META:
auth_header = {'Authorization': request.META['HTTP_AUTHORIZATION']}
oauth_request = OAuthRequest.from_request(request.method,
request.build_absolute_uri(),
headers=auth_header,
parameters=dict(request.REQUEST.items()),
query_string=request.environ.get('QUERY_STRING', ''))
if oauth_request:
oauth_server = OAuthServer(DataStore(oauth_request))
oauth_server.add_signature_method(OAuthSignatureMethod_PLAINTEXT())
oauth_server.add_signature_method(OAuthSignatureMethod_HMAC_SHA1())
else:
oauth_server = None
return oauth_server, oauth_request | f844a677e48a933401dbd6028ce7faf8e58e77cd | 25,335 |
from .sky_coordinate import SkyCoord
from typing import Sequence
def _parse_coordinate_arg(coords, frame, units, init_kwargs):
"""
Single unnamed arg supplied. This must be:
- Coordinate frame with data
- Representation
- SkyCoord
- List or tuple of:
- String which splits into two values
- Iterable with two values
- SkyCoord, frame, or representation objects.
Returns a dict mapping coordinate attribute names to values (or lists of
values)
"""
is_scalar = False # Differentiate between scalar and list input
# valid_kwargs = {} # Returned dict of lon, lat, and distance (optional)
components = {}
skycoord_kwargs = {}
frame_attr_names = list(frame.representation_component_names.keys())
repr_attr_names = list(frame.representation_component_names.values())
repr_attr_classes = list(frame.representation.attr_classes.values())
n_attr_names = len(repr_attr_names)
# Turn a single string into a list of strings for convenience
if isinstance(coords, str):
is_scalar = True
coords = [coords]
if isinstance(coords, (SkyCoord, BaseCoordinateFrame)):
# Note that during parsing of `frame` it is checked that any coordinate
# args have the same frame as explicitly supplied, so don't worry here.
if not coords.has_data:
raise ValueError('Cannot initialize from a frame without coordinate data')
data = coords.data.represent_as(frame.representation_type)
values = [] # List of values corresponding to representation attrs
repr_attr_name_to_drop = []
for repr_attr_name in repr_attr_names:
# If coords did not have an explicit distance then don't include in initializers.
if (isinstance(coords.data, UnitSphericalRepresentation) and
repr_attr_name == 'distance'):
repr_attr_name_to_drop.append(repr_attr_name)
continue
# Get the value from `data` in the eventual representation
values.append(getattr(data, repr_attr_name))
# drop the ones that were skipped because they were distances
for nametodrop in repr_attr_name_to_drop:
nameidx = repr_attr_names.index(nametodrop)
del repr_attr_names[nameidx]
del units[nameidx]
del frame_attr_names[nameidx]
del repr_attr_classes[nameidx]
if coords.data.differentials and 's' in coords.data.differentials:
orig_vel = coords.data.differentials['s']
vel = coords.data.represent_as(frame.representation, frame.get_representation_cls('s')).differentials['s']
for frname, reprname in frame.get_representation_component_names('s').items():
if (reprname == 'd_distance' and not hasattr(orig_vel, reprname) and
'unit' in orig_vel.get_name()):
continue
values.append(getattr(vel, reprname))
units.append(None)
frame_attr_names.append(frname)
repr_attr_names.append(reprname)
repr_attr_classes.append(vel.attr_classes[reprname])
for attr in frame_transform_graph.frame_attributes:
value = getattr(coords, attr, None)
use_value = (isinstance(coords, SkyCoord)
or attr not in coords._attr_names_with_defaults)
if use_value and value is not None:
skycoord_kwargs[attr] = value
elif isinstance(coords, BaseRepresentation):
if coords.differentials and 's' in coords.differentials:
diffs = frame.get_representation_cls('s')
data = coords.represent_as(frame.representation_type, diffs)
values = [getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names]
for frname, reprname in frame.get_representation_component_names('s').items():
values.append(getattr(data.differentials['s'], reprname))
units.append(None)
frame_attr_names.append(frname)
repr_attr_names.append(reprname)
repr_attr_classes.append(data.differentials['s'].attr_classes[reprname])
else:
data = coords.represent_as(frame.representation)
values = [getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names]
elif (isinstance(coords, np.ndarray) and coords.dtype.kind in 'if'
and coords.ndim == 2 and coords.shape[1] <= 3):
# 2-d array of coordinate values. Handle specially for efficiency.
values = coords.transpose() # Iterates over repr attrs
elif isinstance(coords, (Sequence, np.ndarray)):
# Handles list-like input.
vals = []
is_ra_dec_representation = ('ra' in frame.representation_component_names and
'dec' in frame.representation_component_names)
coord_types = (SkyCoord, BaseCoordinateFrame, BaseRepresentation)
if any(isinstance(coord, coord_types) for coord in coords):
# this parsing path is used when there are coordinate-like objects
# in the list - instead of creating lists of values, we create
# SkyCoords from the list elements and then combine them.
scs = [SkyCoord(coord, **init_kwargs) for coord in coords]
# Check that all frames are equivalent
for sc in scs[1:]:
if not sc.is_equivalent_frame(scs[0]):
raise ValueError("List of inputs don't have equivalent "
"frames: {0} != {1}".format(sc, scs[0]))
# Now use the first to determine if they are all UnitSpherical
allunitsphrepr = isinstance(scs[0].data, UnitSphericalRepresentation)
# get the frame attributes from the first coord in the list, because
# from the above we know it matches all the others. First copy over
# the attributes that are in the frame itself, then copy over any
# extras in the SkyCoord
for fattrnm in scs[0].frame.frame_attributes:
skycoord_kwargs[fattrnm] = getattr(scs[0].frame, fattrnm)
for fattrnm in scs[0]._extra_frameattr_names:
skycoord_kwargs[fattrnm] = getattr(scs[0], fattrnm)
# Now combine the values, to be used below
values = []
for data_attr_name, repr_attr_name in zip(frame_attr_names, repr_attr_names):
if allunitsphrepr and repr_attr_name == 'distance':
# if they are *all* UnitSpherical, don't give a distance
continue
data_vals = []
for sc in scs:
data_val = getattr(sc, data_attr_name)
data_vals.append(data_val.reshape(1,) if sc.isscalar else data_val)
concat_vals = np.concatenate(data_vals)
# Hack because np.concatenate doesn't fully work with Quantity
if isinstance(concat_vals, u.Quantity):
concat_vals._unit = data_val.unit
values.append(concat_vals)
else:
# none of the elements are "frame-like"
# turn into a list of lists like [[v1_0, v2_0, v3_0], ... [v1_N, v2_N, v3_N]]
for coord in coords:
if isinstance(coord, str):
coord1 = coord.split()
if len(coord1) == 6:
coord = (' '.join(coord1[:3]), ' '.join(coord1[3:]))
elif is_ra_dec_representation:
coord = _parse_ra_dec(coord)
else:
coord = coord1
vals.append(coord) # Assumes coord is a sequence at this point
# Do some basic validation of the list elements: all have a length and all
# lengths the same
try:
n_coords = sorted(set(len(x) for x in vals))
except Exception:
raise ValueError('One or more elements of input sequence does not have a length')
if len(n_coords) > 1:
raise ValueError('Input coordinate values must have same number of elements, found {0}'
.format(n_coords))
n_coords = n_coords[0]
# Must have no more coord inputs than representation attributes
if n_coords > n_attr_names:
raise ValueError('Input coordinates have {0} values but '
'representation {1} only accepts {2}'
.format(n_coords,
frame.representation_type.get_name(),
n_attr_names))
# Now transpose vals to get [(v1_0 .. v1_N), (v2_0 .. v2_N), (v3_0 .. v3_N)]
# (ok since we know it is exactly rectangular). (Note: can't just use zip(*values)
# because Longitude et al distinguishes list from tuple so [a1, a2, ..] is needed
# while (a1, a2, ..) doesn't work.
values = [list(x) for x in zip(*vals)]
if is_scalar:
values = [x[0] for x in values]
else:
raise ValueError('Cannot parse coordinates from first argument')
# Finally we have a list of values from which to create the keyword args
# for the frame initialization. Validate by running through the appropriate
# class initializer and supply units (which might be None).
try:
for frame_attr_name, repr_attr_class, value, unit in zip(
frame_attr_names, repr_attr_classes, values, units):
components[frame_attr_name] = repr_attr_class(value, unit=unit,
copy=False)
except Exception as err:
raise ValueError('Cannot parse first argument data "{0}" for attribute '
'{1}'.format(value, frame_attr_name), err)
return skycoord_kwargs, components | 6ed456c07b407476a9c02d7d87c9351d27fb7513 | 25,336 |
from typing import Tuple
def build_names(dependency: Dependency, version_in_url: bool = True) -> Tuple[RemoteResolver, str, str]:
"""
A function to build directory and file names based on the given dependency..
:param dependency: the dependency to create the file container for.
:param version_in_url: a flag noting whether the dependency version should be included
in the URL we build.
:return: a tuple containing an appropriate remote resolver, a classified base file name
and a base file name.
"""
resolver = create_remote_resolver(
dependency.group, dependency.name, dependency.version if version_in_url else None
)
name = dependency.name
version = dependency.version
classifier = dependency.classifier
base_name = f'{name}-{version}'
classified_name = f'{base_name}-{classifier}' if classifier else base_name
return resolver, classified_name, base_name | 83a3c063e69a6a8e16e0af6f2e6f43d9f423cd3e | 25,337 |
def convert_apc_examples_to_features(examples, label_list, max_seq_len, tokenizer, opt=None):
"""Loads a data file into a list of `InputBatch`s."""
configure_spacy_model(opt)
bos_token = tokenizer.bos_token
eos_token = tokenizer.eos_token
label_map = {label: i for i, label in enumerate(label_list, 1)}
opt.IOB_label_to_index = label_map
features = []
for (ex_index, example) in enumerate(examples):
text_tokens = example.text_a[:]
aspect_tokens = example.text_b[:]
IOB_label = example.IOB_label
# aspect_label = example.aspect_label
aspect_label = ['B-ASP'] * len(aspect_tokens)
polarity = [-999] + example.polarity + [-999]
positions = np.where(np.array(polarity) > 0)[0].tolist()
tokens = []
labels = []
valid = []
label_mask = []
enum_tokens = [bos_token] + text_tokens + [eos_token] + aspect_tokens + [eos_token]
IOB_label = [bos_token] + IOB_label + [eos_token] + aspect_label + [eos_token]
enum_tokens = enum_tokens[:max_seq_len]
IOB_label = IOB_label[:max_seq_len]
aspect = ' '.join(example.text_b)
try:
text_left, _, text_right = [s.strip() for s in ' '.join(example.text_a).partition(aspect)]
except:
text_left = ' '.join(example.text_a)
text_right = ''
aspect = ''
text_raw = text_left + ' ' + aspect + ' ' + text_right
validate_example(text_raw, aspect, '')
prepared_inputs = prepare_input_for_atepc(opt, tokenizer, text_left, text_right, aspect)
lcf_cdm_vec = prepared_inputs['lcf_cdm_vec']
lcf_cdw_vec = prepared_inputs['lcf_cdw_vec']
for i, word in enumerate(enum_tokens):
token = tokenizer.tokenize(word)
tokens.extend(token)
cur_iob = IOB_label[i]
for m in range(len(token)):
if m == 0:
label_mask.append(1)
labels.append(cur_iob)
valid.append(1)
else:
valid.append(0)
tokens = tokens[0:min(len(tokens), max_seq_len - 2)]
labels = labels[0:min(len(labels), max_seq_len - 2)]
valid = valid[0:min(len(valid), max_seq_len - 2)]
segment_ids = [0] * len(example.text_a[:]) + [1] * (max_seq_len - len([0] * len(example.text_a[:])))
segment_ids = segment_ids[:max_seq_len]
label_ids = []
for i, token in enumerate(tokens):
if len(labels) > i:
label_ids.append(label_map[labels[i]])
input_ids_spc = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids_spc)
label_mask = [1] * len(label_ids)
while len(input_ids_spc) < max_seq_len:
input_ids_spc.append(0)
input_mask.append(0)
label_ids.append(0)
label_mask.append(0)
while len(valid) < max_seq_len:
valid.append(1)
while len(label_ids) < max_seq_len:
label_ids.append(0)
label_mask.append(0)
assert len(input_ids_spc) == max_seq_len
assert len(input_mask) == max_seq_len
assert len(segment_ids) == max_seq_len
assert len(label_ids) == max_seq_len
assert len(valid) == max_seq_len
assert len(label_mask) == max_seq_len
features.append(
InputFeatures(input_ids_spc=input_ids_spc,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_ids,
polarity=polarity,
valid_ids=valid,
label_mask=label_mask,
tokens=example.text_a,
lcf_cdm_vec=lcf_cdm_vec,
lcf_cdw_vec=lcf_cdw_vec,
aspect=aspect,
positions=positions
)
)
return features | 8eb8570e4d6f2f5fa22583ff216cbf216ac10145 | 25,338 |
from datetime import datetime
from pathlib import Path
def write_output(opts: AppOptions, out_lines):
"""
Writes the modified document lines to a new file with "MODIFIED" and
a date_time tag added to the file name. Returns the file name.
"""
ds = datetime.now().strftime("%Y%m%d_%H%M%S")
out_name = f"{opts.doc_path.stem}_MODIFIED_{ds}{opts.doc_path.suffix}"
out_path = Path(opts.doc_path).parent.joinpath(out_name)
assert not out_path.exists()
print(f"\nSaving '{out_path}'")
with open(out_path, "w") as out_file:
for s in out_lines:
out_file.write(f"{s}\n")
return str(out_path) | 613bb6a3290e8a86eb8c945248b6088db3e9dc91 | 25,339 |
def ssh_pub_key(key_file):
"""Creates a string of a public key from the private key file.
"""
key = paramiko.RSAKey(filename=key_file)
pub = "{0} {1} autogenerated by polyphemus"
pub = pub.format(key.get_name(), key.get_base64())
return pub | bf46c62031a7d761278612e5835b829a32f7fbe2 | 25,340 |
def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True, use_zeros=False, init=None):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
use_xavier: bool, whether to use xavier initializer
use_zeros: bool, whether to use zero initializer
Returns:
Variable Tensor
"""
if use_xavier:
#initializer = tf.contrib.layers.xavier_initializer()
initializer = tf.initializers.glorot_normal()
elif use_zeros:
initializer = tf.constant_initializer(0.0)
else:
initializer = tf.truncated_normal_initializer(stddev=stddev)
var = _variable_on_cpu(name, shape, initializer)
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var | a3d43147508bd10040521dc59cfc49de81bbb70b | 25,341 |
def build_mobile_vit(config):
"""Build MobileViT by reading options in config object
Args:
config: config instance contains setting options
Returns:
model: MobileViT model
"""
model = MobileViT(in_channels=config.MODEL.IN_CHANNELS,
dims=config.MODEL.DIMS, # XS: [16, 32, 48, 48, 48, 64, 80, 96, 384]
hidden_dims=config.MODEL.HIDDEN_DIMS, # XS: [96, 120, 144], # d: hidden dims in mobilevit block
num_classes=config.MODEL.NUM_CLASSES)
return model | 7c8eb92214240b922f4df242f937ba05985c3d2c | 25,342 |
def _openSerialPort(comport):
"""Opens the serial port name passed in comport. Returns the stream id"""
#debuglog.info("Check if serial module is available in sys {}".format(sys.modules["serial"]))
s = None
try:
s = serial.Serial(
port=comport,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
except serial.SerialException as ex:
print(f"Failed to capture serial port: {ex}")
raise serial.SerialException
finally:
return s | 738512c801b01b9ce1234f2800bb3312f7f8caba | 25,343 |
from functools import reduce
from bs4 import BeautifulSoup
def search_youtube(query, retries = 4, max_num_results = -1):
""" Unlimited youtube search by web scrapping """
transformed_query = reduce(lambda s_ant, s_sig : s_ant + '+' + s_sig, query) if len(query) != 0 else ''
scrapped_data = []
num_of_requests = 0
for i in range(retries):
page = get_html(transformed_query)
num_of_requests += 1
if "</ol>" in page.text:
break
logger.info(f" Number of requests : {num_of_requests}")
soup = BeautifulSoup(page.content, 'html.parser')
item_list = soup.find('ol', class_='item-section')
if item_list is None:
raise Exception(" Html without list of results ")
items = item_list.find_all('li')
scrapped_data = [x for x in map(extract_data, items) if x is not None]
return scrapped_data if max_num_results <= 0 else scrapped_data[:max_num_results] | 119305a5e6ad562fa9f537d42fd36617338b2472 | 25,344 |
import sys
def Pull( display_sentinel,
json_filename,
result_filename,
first=False,
output_stream=sys.stdout,
):
"""Called prior to committing a change pushed from a client to the local repository"""
return _Impl( display_sentinel,
json_filename,
result_filename,
first,
output_stream,
Constants.HOOK_ENVIRONMENT_PULL_METHOD_NAME,
HooksImplParser.Pushed_FromJson,
) | 2eab49d028b66a0daff18786bd99827134fce96e | 25,345 |
from typing import List
from typing import Tuple
from typing import OrderedDict
def define_empty_source_parallel_buckets(max_seq_len_target: int,
bucket_width: int = 10) -> List[Tuple[int, int]]:
"""
Returns (source, target) buckets up to (None, max_seq_len_target). The source
is empty since it is supposed to not contain data that can be bucketized.
The target is used as reference to create the buckets.
:param max_seq_len_target: Maximum target bucket size.
:param bucket_width: Width of buckets on longer side.
"""
target_step_size = max(1, bucket_width)
target_buckets = define_buckets(max_seq_len_target, step=target_step_size)
# source buckets are always 0 since there is no text
source_buckets = [0 for b in target_buckets]
target_buckets = [max(2, b) for b in target_buckets]
parallel_buckets = list(zip(source_buckets, target_buckets))
# deduplicate for return
buckets = list(OrderedDict.fromkeys(parallel_buckets))
buckets.sort()
return buckets | 3e5261191c5a55f4d82ee84b9000e0996e06c9f9 | 25,346 |
def _hue_scaling(args):
"""return scaled hue values as described in
http://dlmf.nist.gov/help/vrml/aboutcolor
args : ndarray of args / angle of complex numbers between in the open
interval [0, 2*pi)
q : scaled values returned in the interval [0, 1)
"""
q = 4.0*_np.mod((args/(2*_np.pi) + 1), 1)
mask1 = (q >= 0) * (q < 1)
mask2 = (q >= 1) * (q < 2)
mask3 = (q >= 2) * (q < 3)
mask4 = (q >= 3) * (q < 4)
q[mask1] = (60.0/360)*q[mask1]
q[mask2] = (60.0/360)*(2.0*q[mask2] - 1)
q[mask3] = (60.0/360)*(q[mask3] + 1)
q[mask4] = (60.0/360)*2.0*(q[mask4] - 1)
return q | 42fa445d5e790eed13d692613771d02cc55fad94 | 25,347 |
from qharv.inspect import axes_pos
def get_orbs(fp, orbs, truncate=False, tol=1e-8):
""" return the list of requested Kohn-Sham orbitals
Args:
fp (h5py.File): wf h5 file
orbs (list): a list of 3-tuples, each tuple species the KS state
by (kpoint/twist, spin, band) i.e. (ik, ispin, ib)
truncate (bool, optional): remove PWs with ``small'' coefficient
tol (float, optional): define ``small'' as |ck|^2 < tol
"""
gvecs = get(fp, 'gvectors')
qvecs = get_twists(fp)
axes = get(fp, 'axes')
raxes = axes_pos.raxes(axes)
kvecsl = []
psigl = []
for orb in orbs:
ik, ispin, ib = orb
# PW basis
kvecs = np.dot(gvecs+qvecs[ik], raxes)
npw = len(kvecs)
# PW coefficients
psig = get_orb_in_pw(fp, ik, ispin, ib)
sel = np.ones(npw, dtype=bool)
if truncate: # cut down on the # of PWs
pg2 = (psig.conj()*psig).real
sel = pg2 > tol
kvecsl.append(kvecs[sel])
psigl.append(psig[sel])
return kvecsl, psigl | d4b7bf639b81f1d776f29bb736a51b7b8684c42d | 25,348 |
def geturlcgivars(baseurl, port):
"""
Extract CGI variables from baseurl
>>> geturlcgivars("http://host.org/base", "80")
('host.org', '80', '/base')
>>> geturlcgivars("http://host.org:8000/base", "80")
('host.org', '8000', '/base')
>>> geturlcgivars('/base', 8000)
('', '8000', '/base')
>>> geturlcgivars("base", '8000')
('', '8000', '/base')
>>> geturlcgivars("http://host", '8000')
('host', '8000', '/')
>>> geturlcgivars("http://host/", '8000')
('host', '8000', '/')
"""
u = util.url(baseurl)
name = u.host or ''
if u.port:
port = u.port
path = u.path or ""
if not path.startswith('/'):
path = '/' + path
return name, str(port), path | 116ace2bb8275de5faddb4c40e9de05d8a7aee95 | 25,349 |
import json
def insert_rails(file_path, half_gauge, drive_right = True, mr = 12.5, copy = True): # TODO - print 'progress messages' for done steps
"""
Deduces all rails' vertices from graph data and adds them to the json blueprint
half_gauge represents distance between center of the road and of the vehicle,
drive_right characterizes whether cars in network drive right (eg. Germany) or left (eg. UK)
mr stands for 'minimal radius' - minimal turning radius of the network in meters
(...default - 12.5 meters, German car standard)
Function's present inner structure is:
(0 - define nested strip())
1 - validate input JSON dict (must contain valid Vertices and Edges)
2 - iterate through all intersections (graph vertices)
* load and order all its neighbours (establish edges)
* double each edge into 'intersection_rails' to find new rails' points
* find 'intersection_rails' crossings - Rails: Vertices xys
3 - add found Rails' Vertices into emerging pyJSON dict
4 - deduce rails (edges) from vertices, add into pyJSON dict
5 - add Shapes of complex rails (Rails' Edges) derived from complex graph edges
* divide Shape by type into polyline and bezier graph segments
* double shape segments one by one, then glue them back together in correct driving order
* Chop off multi-segment Shapes' inner overcrossings and insert Shapes to rails_dict
6 - Fix Shapes' incorrect, ignored 1st points and overflowing end bits
* Correct Shape's first, incorrect, ignored point
* Chop off Shapes' overflowing last points
7 - Recompute beziers to fit inside Offsets
8 - Smoothen out all corners and insert crossings
9 - Write finished "Rails" pyJSON into the source .json file
* (OPTIONAL) Create legible .txt copy
Includes: strip()
Uses: abstract_geometry.offset_polyline(), .offset_bezier(), .intersection_point(), .bezier_crossing(), .orient_line(), .bezier_intersection()
read_json.get_dict(), .order_neighbours(), .smoothen_rails()
.replicate_json
Input: json file (path str), half_gauge (int) IN METERS, drive_right (bool), step (float), mr (False or float - meters) copy (bool)
Output: modifies json file - adds "Rails" dictionary
"""
""" ~~~~~~~~~~ (0 - define nested strip()) ~~~~~~~~~~ """
# strip():
# Strips vertice indexes from label ("ACE41" -> "ACE")
#
# Input: label (str)
# Output: stripped label (str)
def strip(label):
label = list(label)
ciphers = set([str(num) for num in range(10)])
for char in label[::-1]:
if char in ciphers:
label.pop()
return "".join(label)
""" ~~~~~~~~~~ 1 - validate input JSON dict (must contain valid Vertices and Edges) ~~~~~~~~~~ """
try:
json_data = get_dict(file_path)
vertices = json_data["Vertices"]
edges = json_data["Edges"]
assert vertices and edges, "JSON invalid (\"Vertices\" or \"Edges\" could not be loaded). ~ write_json.insert_rails()"
except KeyError:
print("JSON invalid (\"Vertices\" or \"Edges\" could not be loaded). ~ write_json.insert_rails()")
assert 1 < half_gauge < 10, "given road width (half_gauge) is out of this world! ~ write_json.insert_rails()"
""" ~~~~~~~~~~ 2 - iterate through all intersections (graph vertices) ~~~~~~~~~~ """
# 2.1 # finding rail points of all intersections:
rails_dict = {"Vertices":{}, "Edges": []} # will be written inside JSON
for vertice_key in vertices:
vertice_points = [] # cross points of intersection' rails
vertice = vertices[vertice_key]
# edges stemming from vertice must be in counter-clockwise order...
intersection_rails = [] # list of [xy1, xy2] pairs (1 pair for each rail)
# 2.2 # doubling edges into rails, ALWAYS: 1, right-of-edge 2, left-of-edge:
# orders neighbour vertices counter-clockwise (clockwise == False)
neighbours = order_neighbours(vertice_key, vertices, False)
for neighbour in neighbours: # go through all neighbours
try:
doubled_edge = next(edge
for edge in edges
if edge["Vertices"] == [vertice_key, neighbour]
or edge["Vertices"] == [neighbour, vertice_key]
)
except StopIteration:
print("Could not find edge for", [vertice_key, neighbour], "- meaning:")
print("Invalid entry data (Vertices' Neighbours don't cross-match). ~ write_json.insert_rails()")
return False
xy1 = vertice["Coordinates"]
neighbour_index = 1 if doubled_edge["Vertices"].index(neighbour) == 1 else -2 # second or second last (is edge oriented vertice -> neighbour?)
if "Shape" not in doubled_edge.keys(): # simple line
xy2 = vertices[neighbour]["Coordinates"]
intersection_rails.append(offset_polyline([xy1, xy2], half_gauge, True)) # First append right rail
intersection_rails.append(offset_polyline([xy1, xy2], half_gauge, False)) # Then append left rail
elif not isinstance(doubled_edge["Shape"][1-abs(neighbour_index)][0], list): # 1st (or -1st for inverted) segment is a line (Ie. is not Bezier)
xy2 = doubled_edge["Shape"][neighbour_index] # 2nd polyline control point
intersection_rails.append(offset_polyline([xy1, xy2], half_gauge, True)) # First append right rail
intersection_rails.append(offset_polyline([xy1, xy2], half_gauge, False)) # Then append left rail
else: # edge is bezier, append doubled control points as a rail
points = doubled_edge["Shape"][1-abs(neighbour_index)][::3-abs(neighbour_index)*2] # sliced either ::1 (no change) or ::-1 (reversed)
assert len(points) == 3, "Only quadratic (3-control-points) beziers are allowed. ~ write_json.insert_rails()"
# only append first offset subbeziers:
if mr:
iterations = int((distance(*points[:2]) + distance(*points[1:])) // (mr * 3)) # adjust subdivision to the length of bezier's control polygon
else:
iterations = 2
iterations = 2 if iterations > 2 else iterations
intersection_rails.append(offset_bezier(points, half_gauge, True, split_iterations=iterations)) # First append right rail
intersection_rails.append(offset_bezier(points, half_gauge, False, split_iterations=iterations)) # Then append left rail
# shuffle first rail to the end to change order to L, R-L, R-L, (...) , R-L, R
first_vertice = intersection_rails.pop(0)
intersection_rails.append(first_vertice)
# 2.3 # find 'intersection_rails' crossings - Rails: Vertices xys
# ...first found intersection point (or intersection line) is always between: 1st vertice's left rail and 2nd vertice's right rail
for i in range(len(intersection_rails) // 2):
if len(neighbours) == 1: # this vertice is a dead-end, no intersection point
# find out which end of edge is further from graph:
for rail in intersection_rails:
distances = []
ref_point = vertices[neighbours[0]]["Coordinates"]
if isinstance(rail[0][0], list): # dead-end is bezier
end_points = [rail[0][0], rail[0][-1]]
else: # dead-end is line (edge itself or 1st polyline section)
end_points = rail
for end_point in end_points:
distances.append(distance(end_point, ref_point))
dead_point = rail[0] if distances[0] > distances[1] else rail[1]
vertice_points.append(dead_point)
if isinstance(intersection_rails[2*i][0][0], list) or isinstance(intersection_rails[2*i+1][0][0], list): # at least one bezier in currently computed rail pair
if isinstance(intersection_rails[2*i][0][0], list):
vertice_point = beziers_crossing(intersection_rails[2*i], intersection_rails[2*i+1])[0]
else:
vertice_point = beziers_crossing(intersection_rails[2*i+1], intersection_rails[2*i])[0]
else:
vertice_dict = intersection_point(intersection_rails[2*i], intersection_rails[2*i+1], cut_lines=True)
if vertice_dict:
intersection_rails[2*i] = vertice_dict["line1"]
intersection_rails[2*i+1] = vertice_dict["line2"]
vertice_point = vertice_dict["point"]
else:
vertice_point = False
if vertice_point != False: # point found
vertice_points.append(vertice_point)
else: # lines don't cross
vertice_line = []
for rail in [intersection_rails[2*i], intersection_rails[2*i+1]]:
if isinstance(rail[0][0], list): # line is bezier
rail = orient_line(vertice["Coordinates"], [rail[0][0], rail[0][-1]]) # transforms bezier into properly oriented line
vertice_line.append(rail[0]) # beginning of properly ordered line abstracted from bezier
else:
rail = orient_line(vertice["Coordinates"], rail) # Order lines' points by proximity to vertice point:
vertice_line.append(rail[0]) # beginning of properly ordered line
# Insert beginnings of a rail - [[xy], [xy]] - Vertice Coordinates are a line!
vertice_line = vertice_line[::(drive_right*2)-1] # vertice line counter-clockwise, flip it if drive_right == False (0)
vertice_points.append(vertice_line)
if len(neighbours) == 1: # parallel lines - skip crossing process in step 3 (right below)
for i in range(2):
if isinstance(vertice_points[i][0], list): # vertice_point is bezier
rails_dict["Vertices"][vertice_key + str(i+1)] = {"Coordinates" : vertice_points[i][0]}
else: # vertice_point is part of line
rails_dict["Vertices"][vertice_key + str(i+1)] = {"Coordinates" : vertice_points[i]}
# adhere to "NeighbourVertices" naming convention:
# [first's left rail, counter-clockwise second's right rail] (from present vertice's perspective)
if i == 0:
rails_dict["Vertices"][vertice_key + str(i+1)]["NeighbourVertices"] = [neighbours[0], " "]
else:
rails_dict["Vertices"][vertice_key + str(i+1)]["NeighbourVertices"] = [" ", neighbours[0]]
continue
""" ~~~~~~~~~~ 3 - write Rails' Vertices into emerging pyJSON dict ~~~~~~~~~~ """
# Write JSON vertices:
for i in range(len(vertice_points)):
rails_dict["Vertices"][vertice_key + str(i+1)] = {"Coordinates" : vertice_points[i]}
rails_dict["Vertices"][vertice_key + str(i+1)]["NeighbourVertices"] = [neighbours[i]] # making use of the prior counter-clockwise ordering
try:
rails_dict["Vertices"][vertice_key + str(i+1)]["NeighbourVertices"].append(neighbours[i+1]) # Neighbours: [left rail's, right rails's]
except IndexError: # last intersection - 2nd neighbour is right rail of first neighbour's edge
rails_dict["Vertices"][vertice_key + str(i+1)]["NeighbourVertices"].append(neighbours[0])
""" ~~~~~~~~~~ 4 - deduce rails (edges) from vertices, add into pyJSON dict ~~~~~~~~~~ """
# deduce rails from vertices, thanks to naming convention:
for key in rails_dict["Vertices"].keys(): # add Neighbours list to vertices
rails_dict["Vertices"][key]["Neighbours"] = []
for vertice_label, vertice_data in rails_dict["Vertices"].items():
neighbours = []
label = strip(vertice_label)
# inserting Neighbours in vertices:
searched_neighbours = vertice_data["NeighbourVertices"]
for neighbour_label, neighbour_data in rails_dict["Vertices"].items():
if neighbour_label == vertice_label:
continue
# insert "Rails": "Edges"
if strip(neighbour_label) == searched_neighbours[0] and neighbour_data["NeighbourVertices"][1] == label:
rails_dict["Vertices"][vertice_label]["Neighbours"].insert(0, neighbour_label)
if drive_right == False: # rail Edges format: [start, end]
rails_dict["Edges"].append({"Vertices": [vertice_label, neighbour_label]})
elif strip(neighbour_label) == searched_neighbours[1] and neighbour_data["NeighbourVertices"][0] == label:
rails_dict["Vertices"][vertice_label]["Neighbours"].append(neighbour_label)
if drive_right:
rails_dict["Edges"].append({"Vertices": [vertice_label, neighbour_label]})
""" ~~~~~~~~~~ 5 - add Shapes of complex rails (Rails' Edges) derived from complex graph edges ~~~~~~~~~~ """
# modify the shapes of those Rails edges based on complex graph edges:
# note for direction - first load and compute, eventually reverse order only at the end
complex_edges = {} # format: set(vertice1, vertice2) : [shape]
for edge in json_data["Edges"]: # find complex edges
if "Shape" in edge.keys():
complex_edges[tuple(edge["Vertices"])] = edge["Shape"]
# complex rails' last part wasn't chopped off in step 2, it can only be done ex-post
unchopped_shapes = {} # dict, fotmat: {rail_index : complex rail, ...}
# insert "Shape" into Rails' Edges
for rail_index in range(len(rails_dict["Edges"])): # iterate through all rails
label1 = rails_dict["Edges"][rail_index]["Vertices"][0]
label2 = rails_dict["Edges"][rail_index]["Vertices"][1]
original_labels = [strip(label1), strip(label2)]
if tuple(original_labels) in complex_edges.keys() or tuple(original_labels[::-1]) in complex_edges.keys(): # rail should have complex Shape
original_shape = complex_edges[tuple(original_labels)] if tuple(original_labels) in complex_edges.keys() else complex_edges[tuple(original_labels[::-1])]
# 5.1 # divide doubled Shape into individual polyline and bezier sublines:
shape_segments = [] # Bezier distinguished by virtue of being a nested list, as customary
segment_index = 0
polyline_started = False # algorithm assumes we begin on bezier
for shape_index in range(len(original_shape)): # going through the entire "Shape"
if isinstance(original_shape[shape_index][0], list): # bezier encountered
if polyline_started:
segment_index += 1 # move from polyline to this new segment
polyline_started = False
shape_segments.append([original_shape[shape_index]]) # bezier distinguished by being nested list, as is conventional
segment_index += 1 # move to next segment
else: # polyline point encountered
if polyline_started == False:
shape_segments.append([]) # new segment buffer
polyline_started = True
shape_segments[segment_index].append(original_shape[shape_index])
# 5.2 # double shape segments one by one, then glue them back together in correct driving order (into doubled_segments):
doubled_segments = []
for segment in shape_segments:
if isinstance(segment[0][0], list): # Bezier curve, add shifted control points
if tuple(original_labels) in complex_edges.keys(): # control points are in the right direction
if mr:
iterations = int((distance(*segment[0][:2]) + distance(*segment[0][1:])) // (mr * 3)) # adjust subdivision to the length of bezier's control polygon
else:
iterations = 2
iterations = 2 if iterations > 2 else iterations
subbeziers = offset_bezier(segment[0], half_gauge, drive_right, split_iterations=iterations)
for subbez in subbeziers:
doubled_segments.append([subbez]) # drive_right True -> we want right rail
elif tuple(original_labels[::-1]) in complex_edges.keys(): # control points are in opposite direction - reverse control points, append to start
if mr:
iterations = int((distance(*segment[0][:2]) + distance(*segment[0][1:])) // (mr * 3)) # adjust subdivision to the length of bezier's control polygon
else:
iterations = 2
iterations = 2 if iterations > 2 else iterations
subbeziers = offset_bezier(segment[0][::-1], half_gauge, drive_right, split_iterations=iterations)
wrapped_subbeziers = []
for subbez in subbeziers:
wrapped_subbeziers.append([subbez])
doubled_segments = wrapped_subbeziers + doubled_segments
else: # polyline, add shifted points
if tuple(original_labels) in complex_edges.keys(): # polyline is in right direction
doubled_segments.append(offset_polyline(segment, half_gauge, drive_right)) # drive_right True -> we want right rail
elif tuple(original_labels[::-1]) in complex_edges.keys(): # polyline is in opposite direction - reverse rail points, append to start
doubled_segments = [offset_polyline(segment, half_gauge, 1-drive_right)[::-1]] + doubled_segments # append to front
# 5.4 # Chop off multi-segment Shapes' inner overcrossings and insert Shapes to rails_dict:
if len(doubled_segments) == 1: # just 1 segment
rails_dict["Edges"][rail_index]["Shape"] = doubled_segments[0]
else:
# solve inner crossings, only then insert:
doubled_index = 0
while doubled_index < len(doubled_segments) - 1: # list may dynamically expand, this prevents overflow
segment = doubled_segments[doubled_index]
next_segment = doubled_segments[doubled_index+1]
if isinstance(segment[0][0], list) and isinstance(next_segment[0][0], list): # segments: bezier ; bezier
chop_point = bezier_intersection(segment, next_segment)
last_dict = int(isinstance(segment[0][-1], dict)) # segment ends on dict -> 1 / doesn't -> 0
if chop_point: # interection exists
if chop_point != segment[0][-1]: # interection exists, it's not just a touch
if isinstance(segment[0][-1], dict):
segment[0][-1]["Offsets"][1] = chop_point
else:
segment[0].append({"Offsets":[False, chop_point]})
if chop_point != next_segment[0][-1]: # next bezier needs to be chopped, it's not just a touch
next_segment[0].append({"Offsets":[chop_point, False]})
doubled_segments[doubled_index] = segment
doubled_segments[doubled_index+1] = next_segment
elif segment[0][-1-last_dict] != next_segment[0][0]: # beziers don't touch, insert connecting line (complex_vert alternative)
doubled_segments.insert(doubled_index + 1, [segment[0][-1-last_dict], next_segment[0][0]]) # insert new "poly"line between beziers
doubled_index += 1 # move on to next segment
elif isinstance(segment[0][0], list): # segments: bezier ; line
offsets_dict = segment[0].pop() if isinstance(segment[0][-1], dict) else False
chop_point = bezier_intersection(segment, [next_segment[0], next_segment[1]])
if offsets_dict: # offsets were lost in bezier_intersection()
segment[0].append(offsets_dict)
if chop_point:
if isinstance(segment[0][-1], dict):
segment[0][-1]["Offsets"][1] = chop_point
else:
segment[0].append({"Offsets":[False, chop_point]})
next_segment[0] = chop_point
else:
last_dict = int(isinstance(segment[0][-1], dict)) # segment ends on dict -> 1 / doesn't -> 0
next_segment = [segment[0][-1-last_dict]] + next_segment # insert point at the beginning of next polyline
doubled_segments[doubled_index] = segment
doubled_segments[doubled_index+1] = next_segment
elif isinstance(next_segment[0][0], list): # segments: line ; bezier
chop_point = bezier_intersection(next_segment, [segment[-1], segment[-2]])
if chop_point:
segment[-1] = chop_point
next_segment[0].append({"Offsets":[chop_point, False]})
else:
segment.append(next_segment[0][0]) # append bezier's point to this polyline
doubled_segments[doubled_index] = segment
doubled_segments[doubled_index+1] = next_segment
else: # segments: line ; line
chop_point = intersection_point([segment[-1], segment[-2]], [next_segment[0], next_segment[1]])
if chop_point:
segment[-1] = chop_point
next_segment[0] = chop_point
else:
segment.append(next_segment[0]) # append point to this polyline
doubled_segments[doubled_index] = segment
doubled_segments[doubled_index+1] = next_segment
doubled_index += 1
rails_dict["Edges"][rail_index]["Shape"] = []
for doubled_segment in doubled_segments: # finally insert Shape to rails_dict
if isinstance(doubled_segment[0][0], list): # appending bezier
rails_dict["Edges"][rail_index]["Shape"].append(doubled_segment[0])
else: # appending polyline
rails_dict["Edges"][rail_index]["Shape"] += doubled_segment
unchopped_shapes[rail_index] = rails_dict["Edges"][rail_index] # add for multi-edge corrections in step 6
""" ~~~~~~~~~~ 6 - Fix Shapes' incorrect, ignored 1st points and overflowing end bits ~~~~~~~~~~ """
# chop off last part of complex edges, that ignore intersection point (bit of line or bit of curve):
for rail_index, unchopped_rail in unchopped_shapes.items():
# 6.1 # Correct Shape's first, incorrect, ignored point
vert_label = unchopped_rail["Vertices"][0] # current rail's start vertice label
# load standardized (vertice) xy
if isinstance(rails_dict["Vertices"][vert_label]["Coordinates"][0], list): # Complex vertice [[xy], [xy]]
ignored_start = rails_dict["Vertices"][vert_label]["Coordinates"][1]
else: # Simple point
ignored_start = rails_dict["Vertices"][vert_label]["Coordinates"]
if isinstance(rails_dict["Edges"][rail_index]["Shape"][0][0], list): # correcting 1st control point of bezier
suspect_subbeziers = [] # ignored_start may cut some of those
subbez_index = 0
while isinstance(rails_dict["Edges"][rail_index]["Shape"][subbez_index][0], list): # scans through subbeziers, will run at least once
# create a protected copy to prevent bezier_crossing from overriding Offsets:
if not isinstance(rails_dict["Edges"][rail_index]["Shape"][subbez_index][-1], dict):
subbez_copy = [xy.copy() for xy in rails_dict["Edges"][rail_index]["Shape"][subbez_index]]
else: # omit offset from creating subbez_copy
subbez_copy = [xy.copy() for xy in rails_dict["Edges"][rail_index]["Shape"][subbez_index][:-1]]
suspect_subbeziers.append(subbez_copy)
subbez_index += 1
# suspecing last subbez, while condition would overflow
if len(rails_dict["Edges"][rail_index]["Shape"]) == subbez_index:
break
(crossed_start, crossed_index) = beziers_crossing(suspect_subbeziers, ignored_start)
if crossed_start:
# chop off omitted beginning:
rails_dict["Edges"][rail_index]["Shape"] = rails_dict["Edges"][rail_index]["Shape"][crossed_index:] # cut off entirely omitted subbeziers, perhaps TEMP?
offsets = rails_dict["Edges"][rail_index]["Shape"][0][-1] # protect offsets from bezier_intersection()
offsets_defined = isinstance(rails_dict["Edges"][rail_index]["Shape"][0][-1], dict) # Were offsets already inserted?
if offsets_defined:
rails_dict["Edges"][rail_index]["Shape"][0][-1]["Offsets"][0] = ignored_start
else:
rails_dict["Edges"][rail_index]["Shape"][0].append({"Offsets":[ignored_start, False]})
else: # ignored point is not part of bezier
if isinstance(rails_dict["Vertices"][vert_label]["Coordinates"][0], list): # Coordinates already a complex vertice [[xy], [xy]]
rails_dict["Vertices"][vert_label]["Coordinates"][1] = rails_dict["Edges"][rail_index]["Shape"][0][0]
else:
rails_dict["Vertices"][vert_label]["Coordinates"] = [rails_dict["Vertices"][vert_label]["Coordinates"], rails_dict["Edges"][rail_index]["Shape"][0][0]] # Make coordinates a complex vertice
else: # correcting 1st point of polyline
rails_dict["Edges"][rail_index]["Shape"][0] = ignored_start # rewrite 1st polyline point
# 6.2 # Chop off Shapes' overflowing last point
end_vertice = next(vert # finding the vertice at the end of doubled complex edge
for vert in rails_dict["Vertices"]
if vert == unchopped_rail["Vertices"][1]
)
sibling_vertice = next(vert # finding sibling graph vertice (needed to fing its crossed copy)
for vert in rails_dict["Vertices"][end_vertice]["NeighbourVertices"]
if vert != strip(vert_label) # target's NeighbourVertices are *this one* (vert_label) and *the other one*. We want the other one
)
if sibling_vertice != " ": # ignore dead-end vertices, no cutting required there...
try:
ignored_label = next(rail["Vertices"][0] # finding crossed intersection
for rail in rails_dict["Edges"]
if rail["Vertices"][0] == end_vertice
and sibling_vertice in rail["Vertices"][1]
)
except StopIteration:
print("JSON doesn't have properly linked Vertices' Neighbours ~ write_json.insert_rails()")
return False
if isinstance(rails_dict["Vertices"][ignored_label]["Coordinates"][0], list): # is intersection a complex vertice?
ignored_end = rails_dict["Vertices"][ignored_label]["Coordinates"][0]
else:
ignored_end = rails_dict["Vertices"][ignored_label]["Coordinates"]
if type(rails_dict["Edges"][rail_index]["Shape"][-1][-1]) in [list, dict]: # unchopped_rail ends on a bezier
last_dict = isinstance(rails_dict["Edges"][rail_index]["Shape"][-1][-1], dict) # unchopped_rail ends on a bezier with defined Offsets
if last_dict:
offsets = rails_dict["Edges"][rail_index]["Shape"][-1].pop(-1)["Offsets"]
if ignored_end == rails_dict["Edges"][rail_index]["Shape"][-1][-1]: # does part of bezier need to be chopped off at all?
continue
else: # check for beziers' crossing:
end_subbeziers = [] # ignored_start may cut some of those, in end-start order! (respective to vertice)
negative_index = -1
while isinstance(rails_dict["Edges"][rail_index]["Shape"][negative_index][0], list): # iterate through ending subbeziers
# create a protected copy to prevent bezier_crossing from overriding Offsets:
if not isinstance(rails_dict["Edges"][rail_index]["Shape"][negative_index][-1], dict):
subbez_copy = [xy.copy() for xy in rails_dict["Edges"][rail_index]["Shape"][negative_index]]
else: # omit offset from creating subbez_copy
subbez_copy = [xy.copy() for xy in rails_dict["Edges"][rail_index]["Shape"][negative_index][:-1]]
end_subbeziers.append(subbez_copy)
negative_index -= 1
# suspecing first subbez, while condition would underflow
if negative_index == -len(rails_dict["Edges"][rail_index]["Shape"]) - 1:
break
(crossed_end, crossed_index) = beziers_crossing(end_subbeziers, ignored_end)
if crossed_end:
# chop off omitted end subbeziers:
if crossed_index != 0: # [:-0] slice would delete whole Shape
rails_dict["Edges"][rail_index]["Shape"] = rails_dict["Edges"][rail_index]["Shape"][:-crossed_index]
if last_dict and crossed_index == 0:
rails_dict["Edges"][rail_index]["Shape"][-1].append({"Offsets":[offsets[0], ignored_end]})
else:
rails_dict["Edges"][rail_index]["Shape"][-1].append({"Offsets":[False, ignored_end]})
elif isinstance(rails_dict["Vertices"][ignored_label]["Coordinates"][0], list): # no intersection, modify complex vertice
rails_dict["Vertices"][ignored_label]["Coordinates"][0] = rails_dict["Edges"][rail_index]["Shape"][-1][-1]
else: # no intersection, complexify vertice
rails_dict["Vertices"][ignored_label]["Coordinates"] = [rails_dict["Edges"][rail_index]["Shape"][-1][-1], rails_dict["Vertices"][ignored_label]["Coordinates"]]
else: # unchopped_rail ends on a polyline:
rails_dict["Edges"][rail_index]["Shape"][-1] = ignored_end
""" ~~~~~~~~~~ 7 - Recompute beziers to fit inside Offsets ~~~~~~~~~~ """
rails_dict["Edges"] = evaluate_offsets(rails_dict["Edges"])
""" ~~~~~~~~~~ 8 - Smoothen out all corners and insert crossings ~~~~~~~~~~ """
rails_dict = smoothen_rails(rails_dict, mr)
json_data["Rails"] = rails_dict
json_data["Vertices"] = vertices
json_data["Edges"] = edges
json_data = add_crossings(json_data, mr, drive_right)
""" ~~~~~~~~~~ 9 - Insert finished "Rails" pyJSON into the source .json file ~~~~~~~~~~ """
with open(file_path, "w", encoding="utf-8") as json_file:
json.dump(json_data, json_file)
print("Added Rails to " + file_path)
# 9.1 #: OPTIONAL 9 - create legible .txt copy)
if copy:
replicate_json(file_path) | 90973eb4a9ff6560d5865f1d3e513f9af2fdd804 | 25,350 |
def prepare_log_for_upload(symbolized_output, return_code):
"""Prepare log for upload."""
# Add revision information to the logs.
app_revision = environment.get_value('APP_REVISION')
job_name = environment.get_value('JOB_NAME')
components = revisions.get_component_list(app_revision, job_name)
component_revisions = (
revisions.format_revision_list(components, use_html=False) or
'Not available.\n')
revisions_header = (
'Component revisions (build r{app_revision}):\n{component_revisions}\n'.
format(
app_revision=app_revision, component_revisions=component_revisions))
return_code_header = 'Return code: %s\n\n' % return_code
return revisions_header + return_code_header + symbolized_output | 78e41f67be726f2fb0fb4fb59aa23d124a371055 | 25,351 |
import math
def bucketize(point, bucket_size):
"""floor the point to the next lower multiple of bucket_size"""
return bucket_size * math.floor(point / bucket_size) | ff152cb5b646df1fe883bd943033c05e83623f31 | 25,352 |
def find_unit(df):
"""find unit in the df, add column to df indicating which token contains unit
and return the unit as string."""
doc_unit = ""
# thousand = "(\$)(0){3}|thousand|€(\s*)thous|TEUR|T(\s*)€|Tsd|Tausend"
# million = "millions|million|£(\s*)m|$(\s*)m|€(\s*)m|mn|mio(\s*)€|in(\s+)mio|MM|\d+(M){1}"
# billion = "billion|Bn|Mrd|Md"
units = {"thousand": THOUSAND, "million": MILLION, "billion": BILLION}
for key, value in units.items():
if df.apply(lambda x: x.str.contains(value, case=True).any(), axis=1).any(
axis=None
):
# If doc_unit is set two times => set undefined
if doc_unit:
doc_unit = "1"
break
# Set doc currency
else:
doc_unit = key
# Create column for unit in df marking the token which contains unit
df.loc[:, "unit"] = False
for key, value in units.items():
df.loc[df["text"].str.contains(value, case=True), "unit"] = True
# Set default unit to 1
if not doc_unit:
doc_unit = "1"
return doc_unit | a46eede3e5dc928f90969ff89df15c5e0039991d | 25,353 |
import torch
def regular_channels(audio ,new_channels):
"""
torchaudio-file([tensor,sample_rate])+target_channel -> new_tensor
"""
sig ,sr =audio
if sig.shape[0 ]==new_channels:
return audio
if new_channels==1:
new_sig =sig[:1 ,:] # 直接取得第一个channel的frame进行操作即可
else:
# 融合(赋值)第一个通道
new_sig =torch.cat([sig ,sig] ,dim=0) # c*f->2c*f
# 顺带提一句——
return [new_sig ,sr] | 5b055d965f35fc4cf0f434b34e8f579f321fee89 | 25,354 |
from typing import Union
from typing import Callable
def is_less_than(maximum: Union[int, float, Decimal]) -> Callable[[Union[int, float, Decimal]], bool]:
"""
:param maximum: A number
:return: A predicate that checks if a value is less than the given number
"""
def predicate(i: Union[int, float, Decimal]):
"""
:param i: A number
:return: Whether the number is less than the maximum
"""
return i < maximum
predicate.__name__ = f'_{is_less_than.__name__}_{maximum}'
return predicate | 9f381ae1901581f24af1f3b51f9d64a3b976d6e9 | 25,355 |
def a_function(my_arg, another):
"""
This is the brief description of my function.
This is a more complete example of my function. It can include doctest,
code blocks or any other reST structure.
>>> a_function(10, [MyClass('a'), MyClass('b')])
20
:param int my_arg: The first argument of the function. Just a number.
:param another: The other argument of the important function.
:type another: A list of :class:`MyClass`
:return: The length of the second argument times the first argument.
:rtype: int
"""
return my_arg * len(another) | 8624edfe3ec06b53e065a6672c3b21682cdefe06 | 25,356 |
import os
def substitute_vars(oldList, runSet=None, task_file=None):
"""
This method replaces special substrings from a list of string
and return a new list.
"""
keyValueList = []
if runSet:
benchmark = runSet.benchmark
# list with tuples (key, value): 'key' is replaced by 'value'
keyValueList = [
("benchmark_name", benchmark.name),
("benchmark_date", benchmark.instance),
("benchmark_path", benchmark.base_dir or "."),
("benchmark_path_abs", os.path.abspath(benchmark.base_dir)),
("benchmark_file", os.path.basename(benchmark.benchmark_file)),
(
"benchmark_file_abs",
os.path.abspath(os.path.basename(benchmark.benchmark_file)),
),
("logfile_path", os.path.dirname(runSet.log_folder) or "."),
("logfile_path_abs", os.path.abspath(runSet.log_folder)),
("rundefinition_name", runSet.real_name if runSet.real_name else ""),
("test_name", runSet.real_name if runSet.real_name else ""),
]
if task_file:
var_prefix = "taskdef_" if task_file.endswith(".yml") else "inputfile_"
keyValueList.append((var_prefix + "name", os.path.basename(task_file)))
keyValueList.append((var_prefix + "path", os.path.dirname(task_file) or "."))
keyValueList.append(
(var_prefix + "path_abs", os.path.dirname(os.path.abspath(task_file)))
)
# do not use keys twice
assert len({key for (key, value) in keyValueList}) == len(keyValueList)
return [util.substitute_vars(s, keyValueList) for s in oldList] | 7074d333d3dbffdc8d70f250cbef6bdb2abbdc0c | 25,357 |
from datetime import datetime
def _safe_filename(filename):
"""
Generates a safe filename that is unlikely to collide with existing objects
in Google Cloud Storage.
``filename.ext`` is transformed into ``filename-YYYY-MM-DD-HHMMSS.ext``
"""
filename = secure_filename(filename)
date = datetime.datetime.utcnow().strftime("%Y-%m-%d-%H%M%S")
basename, extension = filename.rsplit('.', 1)
return "{0}-{1}.{2}".format(basename, date, extension) | 2ea94c9d18240cf1f1c4dadd19f2bd87b39e6538 | 25,358 |
import os
def load_model(model_name, data_dir=''):
"""
Load and return a trained model
@param model_name: base name for saved files
@param data_dir: directory containing trained model
"""
# load json and create model
json_file = open(os.path.join(data_dir, '%s.json' % model_name), 'r')
loaded_model_json = json_file.read()
json_file.close()
with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights(os.path.join(data_dir, '%s.h5' % model_name))
return model | 3522fe0bc1e222c0d714b665bc2259476af59404 | 25,359 |
import collections
def _make_with_custom_variables(func, variables):
"""Calls func and replaces any trainable variables.
This returns the output of func, but whenever `get_variable` is called it
will replace any trainable variables with the tensors in `variables`, in the
same order. Non-trainable variables will re-use any variables already
created.
Args:
func: Function to be called.
variables: A list of tensors replacing the trainable variables.
Returns:
The return value of func is returned.
"""
variables = collections.deque(variables)
def custom_getter(getter, name, **kwargs):
if kwargs["trainable"]:
return variables.popleft()
else:
kwargs["reuse"] = True
return getter(name, **kwargs)
return _wrap_variable_creation(func, custom_getter) | a36923223d22b6b09d9697da3a072c3eef4e739a | 25,360 |
def create_loss_and_learner(
model, labels, learning_rate,
momentum_coef=0.0, wdecay=0.0, nesterov=False,
gradient_clip_norm=None, gradient_clip_value=None):
"""
Auxiliary function to create loss function (cross entropy and softmax)
and trainer using stochastic gradient descent with momentum.
Arguments:
model - imported model
labels - placeholder for one-hot labels array
learning_rate - learning rate for trainer
momentum_coef - coefficient of momentum (deafult 0.0)
wdecay - amount of weight decay (default 0.0)
nesterov - use nesterov accelerated gradient (dafault False)
gradient_clip_norm - target gradient norm (default None)
gradient_clip_value - value to element-wise clip gradients (default None)
Returns:
Loss function (mean for batch)
"""
if model.axes.lengths != labels.axes.lengths:
labels = ng.Transpose(labels)
assert model.axes.lengths == labels.axes.lengths
model = ng.cast_axes(model, axes=labels.axes)
loss = ng.cross_entropy_multi(ng.softmax(model), labels)
optimizer = GradientDescentMomentum(
learning_rate, momentum_coef, wdecay,
gradient_clip_norm, gradient_clip_value, nesterov
)
return ng.sequential([optimizer(loss), ng.mean(loss, out_axes=())]) | 49669aa109f748b13b3c52efea7cbadb94d492b7 | 25,361 |
import os
import sys
def get_opentsdb_config():
"""Read and parse Open TSDB config from config.ini"""
if os.path.exists(os.path.abspath(os.path.join(__file__, os.pardir, "config.ini"))):
config_parser = ConfigParser.SafeConfigParser()
config_parser.read(os.path.abspath(os.path.join(__file__, os.pardir, "config.ini")))
try:
opentsdb_url = config_parser.get('opentsdb', 'opentsdb_server_url')
opentsdb_token = config_parser.get('opentsdb', 'token')
opentsdb_metrics = config_parser.get('opentsdb', 'metrics')
except ConfigParser.NoOptionError:
logger.error(
"Agent not correctly configured. Check config file.")
sys.exit(1)
if len(opentsdb_url) == 0:
logger.warning(
"Agent not correctly configured(OPENTSDB_URL). Check config file. Using \"127.0.0.1:4242\" as default.")
opentsdb_url = "http://127.0.0.1:4242"
if len(opentsdb_metrics) != 0:
opentsdb_metrics = opentsdb_metrics.split(",")
else:
opentsdb_metrics = []
opentsdb_config = {
"OPENTSDB_URL": opentsdb_url,
"OPENTSDB_METRICS": opentsdb_metrics,
"OPENTSDB_TOKEN": opentsdb_token
}
else:
logger.warning("No config file found. Using defaults.")
opentsdb_config = {
"OPENTSDB_URL": "http://127.0.0.1:4242",
"OPENTSDB_METRICS": "",
"OPENTSDB_TOKEN": ""
}
return opentsdb_config | 05c47aebb9cc06632eff079ecf621ad3c6f2b82d | 25,362 |
def mesh_subdivide_tri(mesh, k=1):
"""Subdivide a mesh using simple insertion of vertices.
Parameters
----------
mesh : Mesh
The mesh object that will be subdivided.
k : int
Optional. The number of levels of subdivision. Default is ``1``.
Returns
-------
Mesh
A new subdivided mesh.
Examples
--------
>>> box = Box.from_corner_corner_height([0.0, 0.0, 0.0], [1.0, 1.0, 0.0], 1.0)
>>> mesh = Mesh.from_shape(box)
>>> k = 2
>>> subd = mesh_subdivide_tri(mesh, k=k)
>>> mesh is subd
False
>>> type(mesh) is type(subd)
True
>>> k1 = sum(len(mesh.face_vertices(fkey)) for fkey in mesh.faces())
>>> subd.number_of_faces() == (k1 if k == 1 else k1 * 3 ** (k - 1))
True
"""
cls = type(mesh)
subd = mesh_fast_copy(mesh)
for _ in range(k):
for fkey in list(subd.faces()):
subd.insert_vertex(fkey)
return cls.from_data(subd.data) | 54912f1777dc1d508a9ab55289e90bcb28b9586f | 25,363 |
import cartopy.crs as ccrs
def make_projection(proj_params):
"""
turn a set of proj4 parameters into a cartopy laea projection
introduced in read_resample.ipynb
Parameters
----------
proj_params: dict
dictionary with parameters lat_0, lon_0 datum and ellps
Returns
-------
cartopy projection object
"""
globe_w = ccrs.Globe(datum=proj_params["datum"],ellipse=proj_params['ellps'])
projection_w=ccrs.LambertAzimuthalEqualArea(central_latitude=float(proj_params['lat_0']),
central_longitude= float(proj_params['lon_0']),globe=globe_w)
return projection_w | 1852115888107b5ae9353dd746d56fb3896c1992 | 25,364 |
import pandas
import logging
def _shape(df):
""" Return DataFrame shape even if is not a Pandas dataframe."""
if type(df) == pandas.DataFrame or type(df) == pandas.Series:
return df.shape
try:
shape = (len(df), len(df.columns))
except Exception as e:
logging.error(e)
raise e
return shape | d5af0e3f92ee649091d9fc8b904e60931fb0f2f7 | 25,365 |
from typing import List
from pathlib import Path
def hvplot_line(
df, title, x, y: List[str], output_dir: Path, vlines=None, save_figure=True, **kwargs
):
"""Draw line splot with optional vertical lines.
Example:
hvplot_line(
df,
title=col,
x="time", # This is index name
y=col_name,
vlines=outliers,
output_dir=args.output_dir / "single",
save_figure=True,
width=1500,
height=500,
# by="timestamp.month",
# groupby=["timestamp.year", "timestamp.month"],
)
Args:
df ([type]): Input dataframe
title ([type]): Graph title
x ([type]): Column name for x-axis, can be index's name
y (List[str]): Column name for y-axis
output_dir (Path): Output dir for html files
vlines ([type], optional): Vertiline of interest. Defaults to None.
save_figure (bool, optional): True to save html file. Defaults to True.
Returns:
[type]: [description]
"""
output_dir.mkdir(parents=True, exist_ok=True)
p = df.hvplot(
x=x,
y=y,
title=title,
kind="line",
xlabel="Time",
ylabel="Value",
size=10,
grid=True,
legend=True,
fontsize=15,
rot=45,
**kwargs,
)
if vlines is not None:
for x in vlines:
p = p * hv.VLine(pd.to_datetime(x)).opts(color="red", alpha=0.3)
if save_figure:
hvplot.save(p, output_dir / f"{title}.html")
return p | 1f7400bc12b492648074b2cb2dc52835cb509a1e | 25,366 |
def load_zstack(fn):
"""
Returns zstack, [zmin, zmax]
"""
with open(fn, "rb") as f:
d = np.fromfile(f,dtype=header_dtype,count=1,sep="")
version, shape, zrange = d[0]
zstack = np.fromfile(f,dtype='<f4',sep="").reshape(shape)
return zstack, zrange | 19fd400b0b341569b448e45d482df264310c9cf7 | 25,367 |
def parseFile(path):
"""
Read sections headed by :SectionName into lists by section name in a dictionary
blank lines, line preceeding and ending whitespace and #Comments are stripped
"""
d={}
currentList=None
f = open(pathPrefix()+path, 'r')
for t in f.readlines():
# Remove comments
i=t.find('#')
if i!=-1:
t=t[:i]
# Strip excess whitespace
t=t.strip()
if len(t)>0:
if t[0]==':':
currentList=[]
d[t[1:]]=currentList
else:
if currentList!=None:
currentList.append(t)
return d | db5a73b5a46fc1026df775de994da150d33e3aad | 25,368 |
def setName(name):
"""
Sets the name of the robot.
This is cleared with a power cycle and displayed on the robot screen during idle times
Name will be shortened to 11 characters
Args:
name (any): Name to set for the robot. Will be cast to a string
Returns:
None
"""
name = str(name)[:11]
return _rc.writeAttribute(OPTYPE.ROBOT_NAME, stringToBytes(name) + [0]) | 5f1e12635df0cf4c95b3ce90e97a51492a477914 | 25,369 |
import subprocess
def get_cmd_output(cmd):
"""Run a command in shell, and return the Unicode output."""
try:
data = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
data = ex.output
try:
data = data.decode("utf-8")
except UnicodeDecodeError:
data = data.decode("latin1")
return data | f1ac61b45b2adb404b50c5001e8737db2c0a9c95 | 25,370 |
def twoBodyCMmom(m_0, m_1, m_2):
"""relative momentum for 0 -> 1 + 2"""
M12S = m_1 + m_2
M12D = m_1 - m_2
if hasattr(M12S, "dtype"):
m_0 = tf.convert_to_tensor(m_0, dtype=M12S.dtype)
# m_eff = tf.where(m_0 > M12S, m_0, M12S)
# p = (m_eff - M12S) * (m_eff + M12S) * (m_eff - M12D) * (m_eff + M12D)
# if p is negative, which results from bad data, the return value is 0.0
# print("p", tf.where(p==0), m_0, m_1, m_2)
p = (m_0 - M12S) * (m_0 + M12S) * (m_0 - M12D) * (m_0 + M12D)
zeros = tf.zeros_like(m_0)
ret = tf.where(p > 0, tf.sqrt(p) / (2 * m_0), zeros)
return ret | 5b8576dc33a4570f976efc1bab67a432032b02ff | 25,371 |
def svn_repos_post_commit_hook(*args):
"""svn_repos_post_commit_hook(svn_repos_t repos, apr_pool_t pool) -> char"""
return _repos.svn_repos_post_commit_hook(*args) | bbb9eb6fe81e80ef4729791448e5abe1d7b6ab12 | 25,372 |
import os
def get_window_size():
"""Return the window width and height"""
width = os.popen(
"xrandr --current | grep '*' | uniq | awk '{print $1}' | cut -d 'x' -f1").read().strip(
"\n")
height = os.popen(
"xrandr --current | grep '*' | uniq | awk '{print $1}' | cut -d 'x' -f2").read().strip(
"\n")
if '\n' in width:
widths = width.split('\n')
heights = height.split('\n')
return widths, heights
else:
return width, height | 7d28746f6c1805e31d93b4c934b9442a19d4cfa1 | 25,373 |
def slot_selection_is_free(effect):
"""
all slots ar selected when participant applies
"""
activity = effect.instance.activity
return activity.slot_selection == 'free' | 153efa36dda70de02613201540d97e8f22f2bdd9 | 25,374 |
def get_consumption_tax(amount, tax_rate, decimal_type):
"""消費税を取得する。
:param amount:
:param tax_rate:
:param decimal_type:
:return:
"""
if not amount:
return 0
return get_integer(decimal_type, float(amount) * float(tax_rate)) | d982d2ebc65770477a9ec06e1305115ecf2eab9e | 25,375 |
def homology(long_sequence, short_sequence):
"""
Cross-compare to find the strand of long sequence with the highest similarity with the short sequence.
:param long_sequence: str
:param short_sequence: str
:return ans: str, the strand of long sequence with the highest similarity with the short sequence
"""
# number of characters in the long sequence
i = len(long_sequence)
# number of characters in the short sequence
j = len(short_sequence)
# number of the same element between long- and short- sequence in a certain part of the long sequence
max_match = 0
# position where the max_match begins in long sequence
max_match_point = 0
ans = ''
# (i - j + 1) = times needed for cross-comparison
for k in range(i - j + 1):
match = 0
for n in range(j):
# if find the same element in the same position of long- and short- sequence, count one
if short_sequence[n] == long_sequence[n+k]:
match += 1
# find the biggest match, and the start position(k) in long sequence
if match > max_match:
max_match = match
max_match_point = k
# the strand of long sequence with the highest similarity with the short sequence
ans = long_sequence[max_match_point:(max_match_point + j)]
return ans | 1865e7b60cfce3b1ca4e7884377a5a218ecba96a | 25,376 |
def oneliner_to_phylip(line):
"""Convert one-liner to phylip format."""
seqs = line.strip(";\n").split(',')
label_seqs = zip(seqs[:-1:2], seqs[1::2])
taxa_count = len(label_seqs)
seq_length = len(label_seqs[0][1])
# pad all names to length of longest name + 1 space
max_name_length = max([len(val) for val in seqs[:-1:2]]) + 1
# add header
header = "%s %s\n" % (taxa_count, seq_length)
alignment = '\n'.join(['%s%s' % (i[0].ljust(max_name_length), i[1]) for i in label_seqs])
return header + alignment | 783d9e68172d4d30de44564b88001d30af4d8e45 | 25,377 |
def get_final_histogram(n_states, logfile, temp):
"""
This function analyzes the log file and performs the following tasks:
1. Output the counts of each lambda state at the last time frame (for plotting histogram)
2. Estimate the uncertainty of free energy difference from the final histogram
Paraneters
----------
n_states : int
Number of lambda states
logfile : str
The filename of the log file
Returns
-------
counts : np.array
The counts of each lambda state
Example
-------
>>> get_final_histogram(40, 'solvent_0.log')
[8678. 8437. 8680. 9007. 8606. 7642. 8269. 7878. 7689. 7906. 7451. 7416.
7939. 7470. 7540. 7858. 7664. 7423. 7527. 7322. 7325. 7538. 7173. 7034.
6943. 6910. 6935. 6805. 6463. 6371. 6249. 6425. 6353. 6618. 6789. 6810.
6426. 6408. 6675. 6271.]
"""
f = open(logfile, 'r')
lines = f.readlines()
f.close()
lines.reverse() # from this point, lines has been reverse
line_n = 0
counts = np.zeros(n_states)
for l in lines:
line_n += 1
if 'MC-lambda information' in l:
for i in range(n_states):
# start from lines[line_n - 3]
counts[i] = float(lines[line_n - 3 - i].split()[5])
break
kb = 1.38064852E-23 # Boltzmann constant
Na = 6.0221409E23 # Avogadro's number
error = np.abs(np.log(counts[0] / counts[-1])) # dimensionless error
if temp is None:
print('The uncertainty of the free energy difference is %5.3f kT.' % error)
temp = 298.15 # default
error *= (kb * Na * temp / 1000) * 0.23900573613
print('Or at 298.15K, the uncertainty is %5.3f kcal/mol' % error)
else:
error *= (kb * Na * float(temp) / 1000) * \
0.23900573613 # unit: kcal/mol
print('The uncertainty of the free energy difference is %5.3f kcal/mol.' % error)
return counts | a72ecd4aa6b47e2c8d1368a03ea5b5887abc27c2 | 25,378 |
import logging
def create_sql_delete_stmt(del_list, name):
"""
:param del_list: list of records that need to be formatted in SQL delete statement.
:param name: the name of the table
:return: SQL statement for deleting the specific records
"""
sql_list = ", ".join(del_list)
sql_stmt = f"DELETE FROM method_usage.pandas_{name} WHERE {name}_id IN ({sql_list})"
logging.info(f"{len(del_list)} {name} in delete statement")
return sql_stmt | aec744198f1b0dd30836f431ac51a4080911f8ae | 25,379 |
def parse_track(trackelement):
"""Extract info from every track entry and output to list."""
print(trackelement)
if trackelement.find('artist').getchildren():
#artist info is nested in loved/banned tracks xml
artistname = trackelement.find('artist').find('name').text
artistmbid = trackelement.find('artist').find('mbid').text
else:
artistname = trackelement.find('artist').text
artistmbid = trackelement.find('artist').get('mbid')
if trackelement.find('album') is None:
#no album info for loved/banned tracks
albumname = ''
albummbid = ''
else:
albumname = trackelement.find('album').text
albummbid = trackelement.find('album').get('mbid')
trackname = trackelement.find('name').text
trackmbid = trackelement.find('mbid').text
date = trackelement.find('date').get('uts')
output = [date, trackname, artistname, albumname, trackmbid, artistmbid, albummbid]
for i, v in enumerate(output):
if v is None:
output[i] = ''
return output | e5cd49d765885fd0e701864831f9a2958076ca93 | 25,380 |
from typing import Mapping
def _coord_matrix(model, pos, noutp):
"""
Create an array representing inputs and outputs of a simple model.
The array has a shape (noutp, model.n_inputs).
Parameters
----------
model : `astropy.modeling.Model`
model
pos : str
Position of this model in the expression tree.
One of ['left', 'right'].
noutp : int
Number of outputs of the compound model of which the input model
is a left or right child.
"""
if isinstance(model, Mapping):
axes = []
for i in model.mapping:
axis = np.zeros((model.n_inputs,))
axis[i] = 1
axes.append(axis)
m = np.vstack(axes)
mat = np.zeros((noutp, model.n_inputs))
if pos == 'left':
mat[: model.n_outputs, :model.n_inputs] = m
else:
mat[-model.n_outputs:, -model.n_inputs:] = m
return mat
if not model.separable:
# this does not work for more than 2 coordinates
mat = np.zeros((noutp, model.n_inputs))
if pos == 'left':
mat[:model.n_outputs, : model.n_inputs] = 1
else:
mat[-model.n_outputs:, -model.n_inputs:] = 1
else:
mat = np.zeros((noutp, model.n_inputs))
for i in range(model.n_inputs):
mat[i, i] = 1
if pos == 'right':
mat = np.roll(mat, (noutp - model.n_outputs))
return mat | 841b2ba8df26e424f2fcafc0d3180c3409078896 | 25,381 |
def showp2rev(context, mapping):
"""Integer. The repository-local revision number of the changeset's
second parent, or -1 if the changeset has no second parent."""
ctx = context.resource(mapping, 'ctx')
return ctx.p2().rev() | 854225fca900e5e46ecd18efeab9fc8d3e1f9168 | 25,382 |
from typing import Optional
def puan_kam(text: str = 'สวัสดี',
first: Optional[bool] = None,
keep_tone: Optional[bool] = None,
all: Optional[bool] = False,
skip_tokenize: Optional[bool] = None):
"""Puan kum (ผวนคำ) is a Thai toung twister, This API convert string into kampuan
Play around with the options to see different results.
-Args:
- **text** (str): Defaults to 'สวัสดี'.
- input string 'ไปเที่ยว' -> auto tokenize will apply and split to ไป and เที่ยว
- list of string which accepted 3 formats: ['ไป','กิน','ข้าว'] | 'ไป','กิน','ข้าว' | ไป,กิน,ข้าว, the list input will also neglect auto tokenization.
- **first** (bool, optional): if True use the first word to puan together with the last word otherwise will select second word and last word
(None will let us decide). Defaults to None.
- **keep_tone** (bool, optional): force whether to keep the tone when doing the puan (None will let us decide). Defaults to None.
- **all** (bool, optional): if True will provide all 4 puan results. Defaults to False.
- **skip_tokenize** (bool, optional): if True will skip tokenzation and use user provided list of words (input pure string will force to False or dont skip tokenization). Defaults to None.
-Returns:
- **results**: List of คำผวน
"""
if not check_thai_ch(text):
raise HTTPException(400, detail=f'Input contains non Thai')
text = process_text_2_list(text)
try:
split_words = kp.puan_kam_preprocess(text, skip_tokenize=skip_tokenize)
except ValueError:
try:
split_words = kp.puan_kam_preprocess(
text, skip_tokenize=not(skip_tokenize))
except ValueError as e:
raise HTTPException(422, detail=f'Input error: {e}')
if all is not None and all:
return {'input': text,
'results': kp.puan_kam_all(text=split_words)}
else:
if first is None and keep_tone is None:
return {'input': text,
'results': kp.puan_kam(text=split_words)}
else:
return {'input': text,
'results': kp.puan_kam_base(text=split_words, keep_tone=keep_tone, use_first=first)} | 5b49aab2437a906ea23122722a45bb472011bbbb | 25,383 |
import sys
import importlib
def _flask_app_from_location(module_name: str) -> flask.app.Flask:
"""
:param module_name: String specifying path and module name as well as
actual flask app attribute. e.g., /path/to/module:flask_app
"""
module_and_app_name: str = (module_name.split('/')[-1])
module_file: str = module_and_app_name.split(':')[0]
flask_app_obj: str = module_and_app_name.split(':')[1]
path = '/'.join(module_name.split('/')[0:-1])
sys.path.append(path)
flask_app_module = importlib.import_module(module_file)
return getattr(flask_app_module, flask_app_obj) | 8115c6df3a45990954f541de5c8b18d639f7a1f7 | 25,384 |
import wx
def get_app_wx(*args, **kwargs):
"""Create a new wx app or return an exiting one."""
app = wx.GetApp()
if app is None:
if 'redirect' not in kwargs:
kwargs['redirect'] = False
app = wx.PySimpleApp(*args, **kwargs)
return app | ad4f79e57562e199833d0d948934ec6e9211eea4 | 25,385 |
def do_part_1():
"""
Solve the puzzle.
"""
data = input_lines(1)
total = 0
for line in data:
val, op = interpret_line(line)
total = op(total, val)
print(total)
return total | af8c96a3963bf2732b0281a2b425da73a1ab26e5 | 25,386 |
def handle_rpc_errors(fnc):
"""Decorator to add more context to RPC errors"""
@wraps(fnc)
def wrapper(*args, **kwargs):
try:
return fnc(*args, **kwargs)
except grpc.RpcError as exc:
# lnd might be active, but not possible to contact
# using RPC if the wallet is encrypted. If we get
# an rpc error code Unimplemented, it means that lnd is
# running, but the RPC server is not active yet (only
# WalletUnlocker server active) and most likely this
# is because of an encrypted wallet.
exc.code().value
exc.details()
if exc.code() == grpc.StatusCode.UNIMPLEMENTED:
# raise WalletEncryptedError from None
print("unimplemented")
raise exc
elif exc.code() == grpc.StatusCode.UNAVAILABLE:
print("UNAVAILABLE")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.UNKNOWN and exc.details() == "wallet locked, unlock it to enable full RPC access":
print("WALLET IS LOCKED!")
raise exc
elif exc.code() == grpc.StatusCode.UNKNOWN:
print("unknown")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.NOT_FOUND:
print("NOT FOUND")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.PERMISSION_DENIED:
print("PERMISSION_DENIED")
print(f"ERROR MESSAGE: {exc.details()}")
else:
raise exc
return exc
except Exception as exc:
print("unknown exception")
print(exc)
return wrapper | 4f1cb19918fa5410bef4f14540a35c9f75d113bd | 25,387 |
import ast
def _check_BoolOp_expr(boolop, t, env):
"""Boolean Operations."""
assert boolop.__class__ is ast.BoolOp
op = boolop.op
es = boolop.values
assert op.__class__ in bool_ops, "%s not in bool ops" % cname(op)
# (BoolOp) assignment rule.
return all(check_expr(e, t, env) for e in es) | 528f55894a60ec8f781e5600a82f32697382e4bb | 25,388 |
def get_matproj(dbpath, cutoff, api_key, dataset_properties):
"""
Args:
dbpath (str): path to the local database
cutoff (float): cutoff radius
api_key (str): personal api_key for materialsproject.org
dataset_properties (list): properties of the dataset
Returns:
AtomsData object
"""
return MaterialsProject(dbpath, cutoff, api_key,
properties=dataset_properties) | d40b3578a5ac315e1b4d5180113d517d88b5305b | 25,389 |
def parse_change_values_from_opts(opts):
"""
Convert optparse style options into a dictionary for changing.
:param opts: optparse style options
:returns: a dictonary with change values to filter devices,
supported parameters are ip, port, replication_ip,
replication_port
"""
change_values = {}
for key in ('change_ip', 'change_port', 'change_replication_ip',
'change_replication_port', 'change_device', 'change_meta'):
value = getattr(opts, key, None)
if value:
if key == 'change_ip' or key == 'change_replication_ip':
value = validate_and_normalize_address(value)
change_values[key.replace('change_', '')] = value
return change_values | d8168163c308b510f12a5cd2baf9dd800b80912a | 25,390 |
def chooseMove(board,gameState):
"""called once per turn. Calls either escapeTrail or approachOpponent to determine move choice"""
def escapeTrail():
"""returns a command to move to the next space if we are in danger of an explosion Trail, or None if we are safe"""
# if we are not currently on a space that is slated to contain a trail, we don't need to do anything
if (not board[int(gameState['player']['x'])][int(gameState['player']['y'])].containsUpcomingTrail):
return None
escapePath = util.findPath(gameState,board,board[int(gameState['player']['x'])][int(gameState['player']['y'])],"containsUpcomingTrail",False,allowSoftBlocks=False,allowOpponent=False)
print("escape path: {0}\nnext block is: {1}".format(escapePath,escapePath[-1]))
if (escapePath == None): # todo: we should probably do something here even though we couldn't find a path to escape
return ''
if (not escapePath[-1].containsTrail):
if (escapePath[-1].type == SpaceType.softBlock):
# todo: we should probably do something here even though the next space in our path is currently a soft block
return ''
return util.moveTo(gameState,board,escapePath[-1])
else:
# todo: we should probably do something here even though the next space in our path is currently lethal
return ''
def approachOpponent():
"""returns a command to move to the next space in order to approach the opponent, or a bomb command if in range to hit opponent"""
approachPath = util.findPath(gameState,board,board[int(gameState['player']['x'])][int(gameState['player']['y'])],"containsOpponent")
print("approach path: {0}\nnext block is: {1}".format(approachPath,approachPath[-1]))
if (approachPath == None): # todo: we should probably do something here even though we couldn't find a path to approach (this state may be unreachable though depending on implementation)
return ''
if (not (approachPath[-1].containsTrail or approachPath[-1].containsUpcomingTrail)): #don't approach into a trail OR an upcoming trail todo: check number of ticks on upcoming trail instead
if (approachPath[-1].type == SpaceType.softBlock or approachPath[-1].containsOpponent): # place a bomb if we are right next to a soft block or the opponent
return "b" # todo: this assumes that we currently have a bomb available. Account for case when we do not have any bombs available to use
return util.moveTo(gameState,board,approachPath[-1])
else:
# todo: we should probably do something here even though the next space in our path is currently lethal
return ''
def tryPurchaseUpgrade():
# attempt to select an upgrade to purchase
# we only buy pierce up til 3 (max pierce for range 3)
if(gameState['player']['bombPierce'] < 3):
return "buy_pierce"
return ''
move = escapeTrail()
if (move == None):
move = approachOpponent()
if (move == None or move == ""):
move = tryPurchaseUpgrade()
return move | 139a85264b388e9faf9e007af37c153f3985347c | 25,391 |
def xr_linear_trends_2D(da, dim_names, with_nans=False):
""" calculate linear trend of 2D field in time
! slow, use xr_2D_trends instead
input:
da .. 3D xr DataArray with (dim_names) dimensions
dim_names .. tuple of 2 strings: e.g. lat, lon dimension names
output:
da_trend .. slope of linear regression
"""
if type(da.time.values[0]) in [np.datetime64, cftime._cftime.Datetime360Day]:
x, time_ = datetime_to_float(da)
# time_to_float = True
def xr_linear_trend_with_nans(x):
""" function to compute a linear trend coefficient of a timeseries """
if np.isnan(x).any():
x = x.dropna(dim='time')
if x.size>1:
pf = np.polynomial.polynomial.polyfit(x.time, x, 1)
else:
pf = np.array([np.nan, np.nan])
else:
pf = np.polynomial.polynomial.polyfit(x.time, x, 1)
return xr.DataArray(pf[1])
(dim1, dim2) = dim_names
# stack lat and lon into a single dimension called allpoints
stacked = da.stack(allpoints=[dim1, dim2])
# apply the function over allpoints to calculate the trend at each point
if with_nans==False:
trend = stacked.groupby('allpoints').apply(xr_linear_trend)
# unstack back to lat lon coordinates
da_trend = trend.unstack('allpoints')
if with_nans==True:
trend = stacked.groupby('allpoints').apply(xr_linear_trend_with_nans)
# unstack back to lat lon coordinates
da_trend = trend.unstack('allpoints')
# if time_to_float: da_trend.time.values = time_
# print(da_trend)
if 'allpoints_level_0' in da_trend.coords.keys():
da_trend = da_trend.rename({'allpoints_level_0':dim1, 'allpoints_level_1':dim2})
return da_trend | ba948729cd8e8bde037ad91bde788443cd7eb54c | 25,392 |
def sanitize_email(email):
"""
Returns an e-mail address in lower-case and strip leading and trailing
whitespaces.
>>> sanitize_email(' MyEmailAddress@example.com ')
'myemailaddress@example.com'
"""
return email.lower().strip() | b99e9c38db4fe889e1d0a9175d6535c4790f2f43 | 25,393 |
import numba
def draw_perm_reps(data_1, data_2, func, size=1, args=()):
"""
Generate permutation replicates of `func` from `data_1` and
`data_2`
Parameters
----------
data_1 : array_like
One-dimensional array of data.
data_2 : array_like
One-dimensional array of data.
func : function
Function, with call signature `func(x, y, *args)` to compute
replicate statistic from permutation sample. It must return
a single, scalar value.
size : int, default 1
Number of pairs bootstrap replicates to draw.
args : tuple, default ()
Arguments to be passed to `func`.
Returns
-------
output : ndarray
Permutation replicates.
"""
# Convert to Numpy arrays
data_1 = utils._convert_data(data_1)
data_2 = utils._convert_data(data_2)
if args == ():
if func == diff_of_means:
return _draw_perm_reps_diff_of_means(data_1, data_2, size=size)
elif func == studentized_diff_of_means:
if len(data_1) == 1 or len(data_2) == 1:
raise RuntimeError("Data sets must have at least two entries")
return _draw_perm_reps_studentized_diff_of_means(data_1, data_2, size=size)
# Make a Numba'd function for drawing reps.
f, numba_success = utils._make_two_arg_numba_func(func, args)
if numba_success:
jit = numba.jit
else:
jit = utils._dummy_jit
@jit(nopython=True)
def _draw_perm_reps(data_1, data_2):
n1 = len(data_1)
x = np.concatenate((data_1, data_2))
perm_reps = np.empty(size)
for i in range(size):
np.random.shuffle(x)
perm_reps[i] = f(x[:n1], x[n1:], args)
return perm_reps
return _draw_perm_reps(data_1, data_2) | 6a5e46e39ace1815297812fbe17acd7db7fb89db | 25,394 |
import os
from bs4 import BeautifulSoup
def construct(template_name, parameter_dict, path=""):
"""Construct an HTML file using a given template and parameters.
Handles all necessary tasks for generating finished HTML files in output directory.
Likely the tuscon function that the user will call most often in their code.
:param template_name: Path and name of the template file
:type template_name: str
:param parameter_dict: Dictionary of parameters and their values which will be used for filling the template
:type parameter_dict: dict
:param path: Path and name of the newly constructed HTML file (If path == "", no file is output)
:type path: str
:return: Final HTML string that would go into the newly constructed HTML file
:rtype: str
:raises Exception: If tuscon_params HTML tag is absent in template or if a parameter demanded by tuscon_params is
not found in dictionary"""
template_name = check_path(os.path.join(templates_dir, template_name))
final_html = ""
with open(template_name) as template_file:
template = BeautifulSoup(template_file, "lxml")
if len(template.find_all("tuscon_params")) == 0:
raise Exception(
"<tuscon_params> tag must be present in a template. If generation is not needed, then serve() "
"as a static file instead.")
parameter_names = str(template.tuscon_params.string).split(",")
# Remove spaces from parameter names and ensure they all exist within the dictionary
for p in range(len(parameter_names)):
parameter_names[p] = parameter_names[p].replace(" ", "")
if parameter_names[p] not in parameter_dict:
raise Exception("Parameter \"" + parameter_names[p] + "\" demanded by template not found in dictionary")
parse_children(template, parameter_dict, template)
cleanup(template)
final_html = template.prettify()
if path != "":
path = check_path(os.path.join(output_dir + path), True)
with open(path, "w") as output_file:
output_file.write(final_html)
print("Successfully generated " + path + " using template " + template_name)
return final_html | 07ed889df234bd9622e6ba85f29bdf6c12a9b0c1 | 25,395 |
def get_attachment_form(parser, token):
"""
Get a (new) form object to upload a new attachment
Syntax::
{% get_attachment_form for [object] as [varname] %}
{% get_attachment_for for [app].[model] [object_id] as [varname] %}
"""
return AttachmentFormNode.handle_token(parser, token) | 5ae8c049eef6618f358755e5363520a3bc126780 | 25,396 |
def greet(name):
"""Greet message, formatted differently for johnny."""
if name == "Johnny":
return "Hello, my love!"
return "Hello, {name}!".format(name=name) | 86efdaccd65a870fd80e402491e9468669cdcd40 | 25,397 |
import numpy
def get_naca_points(naca_digits, number_of_points=100,
sharp_trailing_edge=True,
abscissa_map=lambda x: 0.03*x+0.97*x**2,
verbose=False):
"""
Return a list of coordinates of NACA 4-digit and 5-digit series
airfoils.
"""
if verbose:
def explain(*s):
print(" ".join(str(s_i) for s_i in s))
else:
def explain(*s):
pass
explain("Airfoil: NACA-%s" % naca_digits)
if sharp_trailing_edge:
explain("Sharp trailing edge")
edge_coeff = 0.1036
else:
explain("Blunt trailing edge")
edge_coeff = 0.1015
raw_abscissae = numpy.linspace(0, 1, number_of_points, endpoint=True)
abscissae = numpy.empty_like(raw_abscissae)
for i in range(number_of_points):
abscissae[i] = abscissa_map(raw_abscissae[i])
digits_int = int(naca_digits)
if len(naca_digits) == 4:
thickness = (digits_int % 100)
max_camber_pos = (digits_int % 1000) - thickness
max_camber = (digits_int % 10000) - max_camber_pos - thickness
thickness = thickness / 1e2
max_camber_pos = max_camber_pos / 1e3
max_camber = max_camber / 1e5
explain("Thickness:", thickness)
explain("Position of maximum camber:", max_camber_pos)
explain("Maximum camber:", max_camber)
if max_camber == 0 and max_camber_pos == 0:
explain("Symmetric 4-digit airfoil")
points = FourDigitsSymmetric(thickness, edge_coeff)
elif max_camber != 0 and max_camber_pos != 0:
explain("Cambered 4-digit airfoil")
points = FourDigitsCambered(thickness, max_camber,
max_camber_pos, edge_coeff)
else:
raise NotImplementedError(
"You must decide whether your airfoil shall be cambered or not!")
elif len(naca_digits) == 5:
thickness = (digits_int % 100)
max_camber_pos = (digits_int % 10000) - thickness
thickness = thickness / 1e2
max_camber_pos = max_camber_pos / 2e4
explain("Thickness:", thickness)
explain("Position of maximum camber:", max_camber_pos)
identifier = digits_int // 100
if identifier == 210:
m = 0.058
k1 = 361.4
elif identifier == 220:
m = 0.126
k1 = 51.64
elif identifier == 230:
m = 0.2025
k1 = 15.957
elif identifier == 240:
m = 0.29
k1 = 6.643
elif identifier == 250:
m = 0.391
k1 = 3.23
else:
raise NotImplementedError("5-digit series only implemented for "
"the first three digits in 210, 220, 230, 240, 250!")
explain("5-digit airfoil")
points = FiveDigits(thickness, m, k1, edge_coeff)
else:
raise NotImplementedError(
"Only the 4-digit and 5-digit series are implemented!")
points_upper = numpy.zeros((len(abscissae), 2))
points_lower = numpy.zeros((len(abscissae), 2))
for i in range(len(abscissae)):
points_upper[i] = points(abscissae[i], "upper")
points_lower[i] = points(abscissae[i], "lower")
if sharp_trailing_edge:
return list(points_upper)[1:-1] + list(points_lower[::-1])
else:
return list(points_upper)[1:] + list(points_lower[::-1]) | ec7f4b2c0104639e727febc6ea1a0ab0f1575f9e | 25,398 |
def get_request_now():
"""
When constructing the SOAP request, the timestamps have to be naive but with localtime values.
E.g. if the current offset is utc+1 and the utc now is 2016/03/30 0:00, the SOAP endpoint expects 2016/03/30 1:00
without tzinfo. That's pretty ugly but ¯\_(ツ)_/¯
In order to do that, this function gets the utc value, translates it into a local one and makes it naive by
deleting the tzinfo.
"""
now = timezone.localtime(timezone.now())
return timezone.make_naive(now) | 8563eeb9757a7d09b7a0c3253e64cb30443aaa55 | 25,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.