content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from typing import Type
def _get_func_def_ret_type(semanal: SemanticAnalyzerPluginInterface, funcdef: FuncDef) -> Type:
"""
Given a `FuncDef`, return its return-type (or `Any`)
"""
ret_type = None
type_ = funcdef.type
if isinstance(type_, CallableType):
ret_type = type_.ret_type
if isinstance(ret_type, UnboundType):
ret_type = semanal.anal_type(ret_type)
return ret_type or AnyType(TypeOfAny.unannotated) | cb5d22aa5ebf5988c24f322281c3bd8e3ddb757c | 3,633,200 |
def setup(hass, config):
"""Mock a successful setup."""
return True | fd2977534aa8a165b49c4fbddc513c8f77b0588d | 3,633,201 |
def generate_breadcrumbs(text):
"""
It expects a string with format `name=url, name=url, ...' and it will automatically convert that into a list of
dictionaries `[{name: url}, {name: url}, ...]`.
"""
entries = text.split(',')
values = {}
for entry in entries:
entry = entry.split('=')
breadcrumb = entry[0]
try:
url = entry[1]
url = reverse(url)
except (NoReverseMatch, IndexError):
url = '#no-url'
values.update({breadcrumb: url})
return values | 4b647e14ba28c25769d6c2d288cc115558ccc331 | 3,633,202 |
def _compile_docker_commands(app_name, assembled_specs, port_spec):
""" This is used to compile the command that will be run when the docker container starts
up. This command has to install any libs that the app uses, run the `always` command, and
run the `once` command if the container is being launched for the first time """
app_spec = assembled_specs['apps'][app_name]
commands = ['set -e']
commands += _lib_install_commands_for_app(app_name, assembled_specs)
if app_spec['mount']:
commands.append("cd {}".format(container_code_path(app_spec)))
commands.append("export PATH=$PATH:{}".format(container_code_path(app_spec)))
commands += _copy_assets_commands_for_app(app_spec, assembled_specs)
commands += _get_once_commands(app_spec, port_spec)
commands += _get_always_commands(app_spec)
return commands | 6b3e359e0b773acfbee7250e5bc1933a42eefdd2 | 3,633,203 |
def equation_expow3(x, a, b, c, d, e):
"""Equation form for expow3 """
return exp(a + b*x + x*c/2) | 0e77c3537c37ce22d6cea00b373e98523335c205 | 3,633,204 |
def _process_cosmos_qa(example):
"""Process cosmos_qa dataset example."""
label = tf.cast(example['label'], tf.int32)
one_hot = tf.one_hot(label, 4)
options = tf.stack([
example['answer0'],
example['answer1'],
example['answer2'],
example['answer3'],
])
return {
'context': example['context'],
'question': example['question'],
'options': options,
'label': label,
'answer': tf.boolean_mask(options, one_hot)[0],
} | 42776f9958c57eb332f3130db996d1943094e6ab | 3,633,205 |
def resolve_collisions(rlist, newr, collision):
"""Adjust region list rlist to avoid overlaps with newr"""
updated = []
for i in range(len(rlist)):
if collision[i]:
updated.extend(split(rlist[i], newr))
else:
updated.append(rlist[i])
return updated | e9ecebc31348eb00bfa543890870b0155b050da9 | 3,633,206 |
from typing import Sequence
from typing import Optional
from typing import List
import os
def create_source_list(paths: Sequence[str], options: Options,
fscache: Optional[FileSystemCache] = None,
allow_empty_dir: bool = False) -> List[BuildSource]:
"""From a list of source files/directories, makes a list of BuildSources.
Raises InvalidSourceList on errors.
"""
fscache = fscache or FileSystemCache()
finder = SourceFinder(fscache)
sources = []
for path in paths:
path = os.path.normpath(path)
if path.endswith(PY_EXTENSIONS):
# Can raise InvalidSourceList if a directory doesn't have a valid module name.
name, base_dir = finder.crawl_up(path)
sources.append(BuildSource(path, name, None, base_dir))
elif fscache.isdir(path):
sub_sources = finder.find_sources_in_dir(path, explicit_package_roots=None)
if not sub_sources and not allow_empty_dir:
raise InvalidSourceList(
"There are no .py[i] files in directory '{}'".format(path)
)
sources.extend(sub_sources)
else:
mod = os.path.basename(path) if options.scripts_are_modules else None
sources.append(BuildSource(path, mod, None))
return sources | e34b37abfe82b1f25a493b0e6fa773eae9b59eef | 3,633,207 |
def beale(xy):
"""
The Beale function, as a set of residuals (cost = sum(residuals**2))
The standard Beale's function is with data as [1.5, 2.25, 2.625],
and has a global minima at (3, 0.5). Beale's function is a coupled
model, linear in x and quartic in y. Its data-space dimension (3) is
greater than its model-space dimension (2).
Parameters
----------
- xy : 2-element list-like
The x,y parameters of the model.
Returns
-------
3-element list-like
The residuals of the model.
"""
x, y = xy
r1 = x - x*y
r2 = x - x*y*y
r3 = x - x*y*y*y
return np.array([r1, r2, r3]) | 222600a1ff08791e07d85e04cc481b879fa73d42 | 3,633,208 |
def ks_twosamp(data1, data2, alternative="two-sided"):
"""
Computes the Kolmogorov-Smirnov test on two samples.
Missing values are discarded.
Parameters
----------
data1 : array_like
First data set
data2 : array_like
Second data set
alternative : {'two-sided', 'less', 'greater'}, optional
Indicates the alternative hypothesis. Default is 'two-sided'.
Returns
-------
d : float
Value of the Kolmogorov Smirnov test
p : float
Corresponding p-value.
"""
(data1, data2) = (ma.asarray(data1), ma.asarray(data2))
(n1, n2) = (data1.count(), data2.count())
n = (n1*n2/float(n1+n2))
mix = ma.concatenate((data1.compressed(), data2.compressed()))
mixsort = mix.argsort(kind='mergesort')
csum = np.where(mixsort < n1, 1./n1, -1./n2).cumsum()
# Check for ties
if len(np.unique(mix)) < (n1+n2):
csum = csum[np.r_[np.diff(mix[mixsort]).nonzero()[0],-1]]
alternative = str(alternative).lower()[0]
if alternative == 't':
d = ma.abs(csum).max()
prob = special.kolmogorov(np.sqrt(n)*d)
elif alternative == 'l':
d = -csum.min()
prob = np.exp(-2*n*d**2)
elif alternative == 'g':
d = csum.max()
prob = np.exp(-2*n*d**2)
else:
raise ValueError("Invalid value for the alternative hypothesis: "
"should be in 'two-sided', 'less' or 'greater'")
return (d, prob) | c2fbca3ed32e666407f20976cffe56e4457c0e39 | 3,633,209 |
def getIdFromVpcArn(resources):
""" given a vpc arn, strip off all but the id """
vpcStr = 'vpc/'
ids = []
for resource in resources:
if vpcStr in resource:
index = resource.rfind(vpcStr)
id = resource[index+len(vpcStr):]
ids.append(id)
return ids | c1c5e5aef145ee8d3a2072604a1f531ad9198385 | 3,633,210 |
def input_output_mapping():
"""Build a mapping dictionary from pfb input to output numbers."""
# the polyphase filter bank maps inputs to outputs, which the MWA
# correlator then records as the antenna indices.
# the following is taken from mwa_build_lfiles/mwac_utils.c
# inputs are mapped to outputs via pfb_mapper as follows
# (from mwa_build_lfiles/antenna_mapping.h):
# floor(index/4) + index%4 * 16 = input
# for the first 64 outputs, pfb_mapper[output] = input
pfb_mapper = [0, 16, 32, 48, 1, 17, 33, 49, 2, 18, 34, 50, 3, 19, 35, 51,
4, 20, 36, 52, 5, 21, 37, 53, 6, 22, 38, 54, 7, 23, 39, 55,
8, 24, 40, 56, 9, 25, 41, 57, 10, 26, 42, 58, 11, 27, 43, 59,
12, 28, 44, 60, 13, 29, 45, 61, 14, 30, 46, 62, 15, 31, 47,
63]
# build a mapper for all 256 inputs
pfb_inputs_to_outputs = {}
for p in range(4):
for i in range(64):
pfb_inputs_to_outputs[pfb_mapper[i] + p * 64] = p * 64 + i
return pfb_inputs_to_outputs | 1c88c7aba95218a4ce4ae792b2001e4f912da178 | 3,633,211 |
def get_testmeta_min_max_count(leaf, x_axis):
"""
This function expects all x label titles to be numeric value
so we can calculate the minimum and maximum of them.
"""
testcases = get_testcases(leaf)
# logaritmic, we need to calculate the position
minval = None
maxval = None
for title, testcase_folder in testcases:
x = float(title)
if minval is None or x < minval:
minval = x
if maxval is None or x > maxval:
maxval = x
return minval, maxval, len(testcases) | 4a84a5ccdf9aa61cfee9c0f37fe5bdb56e3d32a2 | 3,633,212 |
def xplus(x, v):
"""
Adds two 3DOF states with an arbitrary number of parameters.
"""
xv = np.add(x, v)
xv[2] = unwrap(xv[2])
return xv | 2861807c7288b4a7b1abf58098160215f4cd8e2c | 3,633,213 |
import subprocess
def rexec(cmd):
""" executes shell command cmd with the output
returned as a string when the command has
finished.
"""
try:
output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
return exc.output[:-1]
else:
return output[:-1] | 79a7441c9798c53c2a9f064cec15d38adbe1dd6b | 3,633,214 |
import requests
import json
import time
import sys
def get_value(symbol, amount, compare=args.c):
"""
Convert some amount of some coin, into the currency specified by -c
"""
if symbol.upper() == compare.upper():
return amount
#sometimes the API does not respond so this loop tri
num_attempts = 10
for attempt in range(num_attempts):
url = "https://api.cryptonator.com/api/ticker/{}-{}".format(symbol.lower(), compare.lower())
raw_response = requests.get(url).text
try:
response = json.loads(raw_response)
if(response["success"]):
price = response["ticker"]["price"]
value = float(price) * float(amount)
print("Successfully obtained data for " +symbol)
return value
else:
print(symbol + " is not on ticker website")
return 0 #returns zero if the coin is not on the ticker website
except json.decoder.JSONDecodeError:
print("JSON decode error from ticker " + symbol + " in attempt #" + str(attempt + 1))
time.sleep(10)
print("Trying again...")
sys.exit("couldn't get response from website")
return 0 | 90958c6e65e684ddeeff91c99af5f014d8575d27 | 3,633,215 |
def list_times(service):
"""
Lists the times from the selected calendar in ascending order.
:param service: a google 'service' object
:return: busy is a sorted list of busy times and free is a sorted list of
free times for the selected calendar(s)
"""
app.logger.debug('Entering list_times')
print('begin {}'.format(flask.session['begin_date']))
print('end {}'.format(flask.session['end_date']))
events = get_events(service)
busy = get_busy_times(events)
free = get_free_times(busy, flask.session["begin_date"],
flask.session['end_date'])
return busy, free | 7d87714bb29cc2e298a64a285b2ef72205cb6e4a | 3,633,216 |
import logging
def process(utim, data):
"""
Run process
:param Utim utim: Utim instance
:param list data: Data to process [source, destination, status, body]
:return list: [from, to, status, body]
"""
source = data[SubprocessorIndex.source.value]
destination = data[SubprocessorIndex.destination.value]
status = data[SubprocessorIndex.status.value]
body = data[SubprocessorIndex.body.value]
if (source == Address.ADDRESS_DEVICE and destination == Address.ADDRESS_UTIM and
status == Status.STATUS_PROCESS):
tag = body[0:1]
# length_bytes = body[1:3]
# length = int.from_bytes(length_bytes, byteorder='big', signed=False)
# value = body[3:3 + length]
if tag == Tag.INBOUND.NETWORK_READY:
# Get SRP step
srp_step = utim.get_srp_step()
if srp_step is None:
# Get SRP client
srp_client = utim.get_srp_client()
if srp_client is not None:
# Init srp session
uname, a = srp_client.start_authentication()
command = Tag.UCOMMAND.assemble_hello(a)
# Set new SRP step value
utim.set_srp_step(1)
# Set output parameters
source = Address.ADDRESS_UTIM
destination = Address.ADDRESS_UHOST
status = Status.STATUS_PROCESS
body = command
print('Starting SRP sequence...')
# Return STATUS_TO_SEND result
return [source, destination, status, body]
else:
logging.error("SRP client is None")
else:
logging.error("Invalid SRP step: %s", str(srp_step))
else:
logging.error("Invalid tag: %s", str(tag))
else:
logging.error("Invalid metadata: source=%s, destination=%s, status=%s", source, destination,
status)
# Return STATUS_FINALIZED result
status = Status.STATUS_FINALIZED
return [source, destination, status, body] | 5774ecf399274fa19513d866cf8a29d6d698a70d | 3,633,217 |
def get_event_list_current_file(df, fname):
"""
Get list of events for a given filename
:param df: pd.DataFrame, the dataframe to search on
:param fname: the filename to extract the value from the dataframe
:return: list of events (dictionaries) for the given filename
"""
event_file = df[df["filename"] == fname]
if len(event_file) == 1:
if pd.isna(event_file["event_label"].iloc[0]):
event_list_for_current_file = [{"filename": fname}]
else:
event_list_for_current_file = event_file.to_dict('records')
else:
event_list_for_current_file = event_file.to_dict('records')
return event_list_for_current_file | 4fc56e23e57f021a5c84d5650d2c9586ed86b19e | 3,633,218 |
def get_dealer_reviews_from_cf(url, **kwargs):
""" Get Reviews"""
results = []
json_result = get_request(url)
if json_result:
reviews = json_result["entries"]
for review in reviews:
dealer_review = DealerReview(id=review["id"],
name=review["name"],
dealership=review["dealership"],
review=review["review"],
purchase=review["purchase"],
purchase_date=review["purchase_date"],
car_make=review["car_make"],
car_model=review["car_model"],
car_year=review["car_year"],
sentiment=analyze_review_sentiments(review.get("review", "")))
results.append(dealer_review)
return results | 08f16863e1f05d5fe45d7e6bec0cceded41994af | 3,633,219 |
def fused_normalize(x: th.Tensor, mean: th.Tensor, std: th.Tensor, eps: float = 1e-8):
"""Normalize or standardize."""
return (x - mean) / (std + eps) | 971e061da3d55642fc32729132a647cdf63d6d42 | 3,633,220 |
def get_attachment_file_upload_to(instance, filename):
""" Returns a valid upload path for the file of an attachment. """
return instance.get_file_upload_to(filename) | e38c51a2ca947bebe1ed274c4265081c6b9e7c41 | 3,633,221 |
def temp_ann(S_SHSTA_0, S_SHSTA_1, S_SHSTA_2, S_SHSTA_3, I_SHSTA_0, I_SHSTA_1,
I_SHSTA_2, I_SHSTA_3, C_KSWCK_0, C_KSWCK_1, C_KSWCK_2, C_KSWCK_3):
"""
Notes
-----
Where t = 0, provide the current time step. Where t = 1, provide the
1-month prior time step value. Repeat this pattern for all other time
steps.
Parameters
----------
S_SHSTA_t : units TAF
Volume of Shasta Reservoir in CalSim3.
I_SHSTA_t : units CFS
Flow of Shasta Inflow in CalSim3.
C_KSWCK_t : units CFS
Flow of Keswick Release in CalSim3.
Returns
-------
y : units Degrees Fahrenheit
Channel temperature of the Sacramento River Below Clear Creek.
"""
# Construct input array.
x = np.array([S_SHSTA_0, S_SHSTA_1, S_SHSTA_2, S_SHSTA_3,
I_SHSTA_0, I_SHSTA_1, I_SHSTA_2, I_SHSTA_3,
C_KSWCK_0, C_KSWCK_1, C_KSWCK_2, C_KSWCK_3])
# Pass through hidden layer 1.
W1 = np.loadtxt('W1.txt')
B1 = np.loadtxt('B1.txt')
h1 = relu(np.dot(W1, x) + B1)
# Pass through hidden layer 2.
W2 = np.loadtxt('W2.txt')
B2 = np.loadtxt('B2.txt')
h2 = relu(np.dot(W2, h1) + B2)
# Pass through hidden layer 3.
W3 = np.loadtxt('W3.txt')
B3 = np.loadtxt('B3.txt')
h3 = relu(np.dot(W3, h2) + B3)
# Pass through hidden layer 4.
W4 = np.loadtxt('W4.txt')
B4 = np.loadtxt('B4.txt')
h4 = relu(np.dot(W4, h3) + B4)
# Pass through output layer.
WO = np.loadtxt('WO.txt')
BO = np.loadtxt('BO.txt')
y = relu(np.dot(WO, h4) + BO)
print(y)
# Return result.
return y | cddc9ac237fcc446daff7f2f4ffd99783a9faab5 | 3,633,222 |
def slice(Matrix, a, b):
"""Slice a matrix properly- like Octave.
Addresses the confounding inconsistency that `M[a,b]` acts differently if
`a` and `b` are the same length or different lengths.
Parameters
----------
Matrix : float array
Arbitrary array
a, b : int lists or arrays
list of rows and columns to be selected from `Matrix`
Returns
-------
Matrix : float array
Properly sliced matrix- no casting allowed.
"""
# a = a.reshape(-1)
# b = b.reshape(-1)
return (Matrix[np.array(a).reshape(-1, 1), b]
.reshape(np.array(a).shape[0], np.array(b).shape[0])) | a66dbdca7bbaf1ecf556e4cdd340d10dca28be02 | 3,633,223 |
import _functools
def completing(rf, cf=identity):
"""Returns a wrapper around `rf` that calls `cf` when invoked with one argument.
Args:
rf: A :any:`reducing function`.
cf: An optional function that accepts a single argument. Used as the
completion arity for the returned :any:`reducing function`.
Returns:
A :any:`reducing function` that dispatches to `cf` when called with a single
argument or `rf` when called with any other number of arguments.
"""
@_functools.wraps(rf)
def wrapper(*args):
if len(args) == 1:
return cf(*args)
return rf(*args)
return wrapper | 9efc81357d65871871a335d1e66fe687127568aa | 3,633,224 |
def build_feet(
filter1, # type: pymunk.ShapeFilter
normal_rect, # type: pygame.Rect
pymunk_objects, # type: List[Any]
body_body, # type: pymunk.Body
seat_body, # type: pymunk.Body
):
# type: (...) -> Tuple[pymunk.Body, pygame.Sprite]
"""
Builds our unicycle cat's feet.
"""
radius = normal_rect.width * 0.55
feet_body = pymunk.Body()
feet_shape = pymunk.Circle(feet_body, radius, (0, 0))
feet_shape.mass = 1
feet_shape.elasticity = 0
feet_shape.friction = 100
feet_shape.filter = filter1
feet_sprite = ShapeSprite(
resources.gfx('wheel.png', convert_alpha=True), feet_shape
)
feet_sprite.layer = 0
pymunk_objects.append(feet_body)
pymunk_objects.append(feet_shape)
# adjust the position of the feet and body
feet_body.position = normal_rect.midbottom
# motor and joints for feet
joint = pymunk.PivotJoint(
body_body, feet_body, (normal_rect.centerx, normal_rect.bottom - 10), (0, 0)
)
pymunk_objects.append(joint)
joint = pymunk.PivotJoint(
seat_body, feet_body, (normal_rect.centerx, normal_rect.bottom - 10), (0, 0)
)
pymunk_objects.append(joint)
return feet_body, feet_sprite | f903b89feedab4bf80cf34b2b6c0fd6e296330d3 | 3,633,225 |
def zone_distances(zones):
"""
:param zones
GeoDataFrame [*index, zone, geometry]
Must be in a CRS of unit: metre
"""
for ax in zones.crs.axis_info:
assert ax.unit_name == 'metre'
print("Calculating distances between zones...")
distances_meters = pairwise_distances(
list(zip(
zones.geometry.centroid.x.to_list(),
zones.geometry.centroid.y.to_list(),
))
)
distances = pd.DataFrame(
distances_meters / 1000,
columns=zones.zone,
index=zones.zone,
).stack()
return distances | 74538d679e7efa3e4a2dfa031548e7e2062053cb | 3,633,226 |
import re
import base64
def decode_base64(data, altchars=b'+/'):
"""Decode base64, padding being optional.
:param data: Base64 data as an ASCII byte string
:returns: The decoded byte string.
"""
data = re.sub(rb'[^a-zA-Z0-9%s]+' % altchars,
b'', data.encode())
missing_padding = len(data) % 4
if missing_padding:
data += b'=' * (4 - missing_padding)
return base64.b64decode(data, altchars) | c99f4c832e8e990611ad8413d4b508a697504218 | 3,633,227 |
from operator import ge
def _run_after(a, b):
"""Force operation a to run after b. Do not add control dependencies
to ops that already run after. Returns 0 if no dependencies were added,
1 otherwise."""
already_after = (b in a.control_inputs) or (b in [i.op for i in a.inputs])
if already_after:
return 0
ge.reroute.add_control_inputs(a, [b])
return 1 | 36489af41d4671a952dd62c4dd5d68618606a361 | 3,633,228 |
def handle_negations(tweet_tokens, lexicon_scores):
"""
Handling of negations occuring in tweets -> shifts meaning of words
-> if a negation was found the polarity of the following words will change
Parameters
----------
tweet_tokens : List
list of tweet tokens that were already prepocessed (cleaning, etc.)
lexicon_scores : List
list of assigned sentiment scores per token (decoded as Integers)
Returns
-------
new_scores : List
list of sentiment scores (as Integers) after negiation handling
"""
# new score list
new_scores = lexicon_scores
# Words defining negations
# taken from https://github.com/gkotsis/negation-detection/blob/master/negation_detection.py
# and Kolchyna et al. (2015)
negations_adverbs = ["no", "without", "nil","not", "n't", "never", "none", "neith", "nor", "non",
"seldom", "rarely", "scarcely", "barely", "hardly", "lack", "lacking", "lacks",
"neither", "cannot", "can't", "daren't", "doesn't", "didn't", "hadn't",
"wasn't", "won't", "without", "hadnt", "haven't", "weren't"]
negations_verbs = ["deny", "reject", "refuse", "subside", "retract", "non"]
# find negations in the tweet_tokens list and change the scores of negative and positive tokens
# immediatly following the negation
index = 0
for ii in range(len(tweet_tokens)):
token = tweet_tokens[index]
if (token in negations_adverbs or token in negations_verbs):
# makle sure that the end of the tweet isn't reached yet
if (index < len(tweet_tokens)-1):
# if the sentiment of the next token is positive change it to negative
if (lexicon_scores[index + 1] == 1):
new_scores[index + 1] = -1
index += 2
# else change it to positive if it is negative
elif (lexicon_scores[index + 1] == -1):
new_scores[index + 1] = 1
index += 2
else:
index += 1
# endif
else:
break
# endif
# if neutral let it neutral -> go to the next token
else:
index +=1
# endif
# exit the loop when all tokens have been checked
if (index >= len(tweet_tokens)-1):
break
# endif
# endfor
# return the new scores
return new_scores | cca56e5fa1b611aa6adb2e74ab580fed49b923ee | 3,633,229 |
def build_keras(hyperparams_fn, freeze_batchnorm, inplace_batchnorm_update,
num_predictions_per_location_list, box_predictor_config,
is_training, num_classes, add_background_class=True):
"""Builds a Keras-based box predictor based on the configuration.
Builds Keras-based box predictor based on the configuration.
See box_predictor.proto for configurable options. Also, see box_predictor.py
for more details.
Args:
hyperparams_fn: A function that takes a hyperparams_pb2.Hyperparams
proto and returns a `hyperparams_builder.KerasLayerHyperparams`
for Conv or FC hyperparameters.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
box_predictor_config: box_predictor_pb2.BoxPredictor proto containing
configuration.
is_training: Whether the models is in training mode.
num_classes: Number of classes to predict.
add_background_class: Whether to add an implicit background class.
Returns:
box_predictor: box_predictor.KerasBoxPredictor object.
Raises:
ValueError: On unknown box predictor, or one with no Keras box predictor.
"""
if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor):
raise ValueError('box_predictor_config not of type '
'box_predictor_pb2.BoxPredictor.')
box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof')
if box_predictor_oneof == 'convolutional_box_predictor':
config_box_predictor = box_predictor_config.convolutional_box_predictor
conv_hyperparams = hyperparams_fn(
config_box_predictor.conv_hyperparams)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
return build_convolutional_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
num_predictions_per_location_list=num_predictions_per_location_list,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
min_depth=config_box_predictor.min_depth,
max_depth=config_box_predictor.max_depth,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_depthwise=config_box_predictor.use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
if box_predictor_oneof == 'weight_shared_convolutional_box_predictor':
config_box_predictor = (
box_predictor_config.weight_shared_convolutional_box_predictor)
conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams)
apply_batch_norm = config_box_predictor.conv_hyperparams.HasField(
'batch_norm')
# During training phase, logits are used to compute the loss. Only apply
# sigmoid at inference to make the inference graph TPU friendly. This is
# required because during TPU inference, model.postprocess is not called.
score_converter_fn = build_score_converter(
config_box_predictor.score_converter, is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
keyword_args = None
return build_weight_shared_convolutional_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
num_predictions_per_location_list=num_predictions_per_location_list,
depth=config_box_predictor.depth,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
add_background_class=add_background_class,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
share_prediction_tower=config_box_predictor.share_prediction_tower,
apply_batch_norm=apply_batch_norm,
use_depthwise=config_box_predictor.use_depthwise,
apply_conv_hyperparams_to_heads=(
config_box_predictor.apply_conv_hyperparams_to_heads),
apply_conv_hyperparams_pointwise=(
config_box_predictor.apply_conv_hyperparams_pointwise),
score_converter_fn=score_converter_fn,
box_encodings_clip_range=box_encodings_clip_range,
keyword_args=keyword_args)
if box_predictor_oneof == 'mask_rcnn_box_predictor':
config_box_predictor = box_predictor_config.mask_rcnn_box_predictor
fc_hyperparams = hyperparams_fn(config_box_predictor.fc_hyperparams)
conv_hyperparams = None
if config_box_predictor.HasField('conv_hyperparams'):
conv_hyperparams = hyperparams_fn(
config_box_predictor.conv_hyperparams)
return build_mask_rcnn_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
share_box_across_classes=(
config_box_predictor.share_box_across_classes),
predict_instance_masks=config_box_predictor.predict_instance_masks,
conv_hyperparams=conv_hyperparams,
mask_height=config_box_predictor.mask_height,
mask_width=config_box_predictor.mask_width,
mask_prediction_num_conv_layers=(
config_box_predictor.mask_prediction_num_conv_layers),
mask_prediction_conv_depth=(
config_box_predictor.mask_prediction_conv_depth),
masks_are_class_agnostic=(
config_box_predictor.masks_are_class_agnostic),
convolve_then_upsample_masks=(
config_box_predictor.convolve_then_upsample_masks))
if box_predictor_oneof == 'rfcn_box_predictor':
config_box_predictor = box_predictor_config.rfcn_box_predictor
conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams)
box_predictor_object = rfcn_keras_box_predictor.RfcnKerasBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
crop_size=[config_box_predictor.crop_height,
config_box_predictor.crop_width],
num_spatial_bins=[config_box_predictor.num_spatial_bins_height,
config_box_predictor.num_spatial_bins_width],
depth=config_box_predictor.depth,
box_code_size=config_box_predictor.box_code_size)
return box_predictor_object
raise ValueError(
'Unknown box predictor for Keras: {}'.format(box_predictor_oneof)) | 9b5247042bb1a47715c0f64f19011c5634c4aff3 | 3,633,230 |
def log_level(non_prod_value: str, prod_value: str) -> str:
"""
Helper function for setting an appropriate log level in prod.
"""
return prod_value | 86f098cfe9137519da1d160c22dfc1f43303c546 | 3,633,231 |
import os
def partitioning_df(stats_df,plus_and_minus,tmp_dir,chunk_size = 1000):
""" the first state for large files is very large. We split the first state in a separate file.
Then all the other states are splitted into several files.
"""
# stats_df.to_csv('stats_df.csv', index=False, header=True, float_format='%.15f', compression='gzip',
# encoding='utf-8')
stats_df.sort_values(['prev_state','concept:name','state'], ascending=True, inplace=True)
plus_and_minus.sort_values(['prev_state','concept:name','state'], ascending=True, inplace=True)
# unique_states = stats_df.state.unique()
unique_states = stats_df.groupby(['prev_state','concept:name','state']).size().reset_index().rename(columns={0:'count'}).drop('count',axis=1)
large_states=stats_df.groupby(['prev_state','concept:name','state']).relative_time.count()
#separating large states from the others
large_states=large_states[large_states>1000].reset_index()
#['prev_state', 'concept:name', 'state']
curr_dir = os.getcwd()
idx=0
"""large state separately"""
for index,row in large_states.iterrows():
res = stats_df.loc[(stats_df.state==row['state']) & (stats_df.prev_state==row['prev_state']) & (stats_df['concept:name']==row['concept:name']), :]
res.to_pickle(os.path.join( tmp_dir, 'stats_df_%s' % (idx)))
plus_and_minus.loc[ (plus_and_minus.state==row['state']) & (plus_and_minus.prev_state==row['prev_state']) & (plus_and_minus['concept:name']==row['concept:name']), :] \
.to_pickle(os.path.join( tmp_dir, 'plus_and_minus_%s' % (idx)))
# unique_states=unique_states[unique_states!=current_state]
row_id=unique_states.index[ (unique_states.state==row['state'] )& (unique_states.prev_state==row['prev_state']) & (unique_states['concept:name']==row['concept:name'])].tolist()[0]
unique_states.drop(row_id, axis=0,inplace=True)
idx += 1
""" splitting other states regularly"""
max_index_of_large_states=idx
# print("partition of large states is %s"%(max_index_of_large_states-1))
for i in range(0, unique_states.shape[0], chunk_size):
# print("Current Chunck is: %s" % (i))
current_states = unique_states[i:i + chunk_size]
# res = stats_df.loc[stats_df.state.isin(current_states), :]
res = stats_df.iloc[current_states.index]
res.to_pickle(os.path.join( tmp_dir,'stats_df_%s'%(idx)))
# plus_and_minus.loc[plus_and_minus.state.isin(current_states), :]\
# .to_pickle(os.path.join( tmp_dir,'plus_and_minus_%s'%(idx)))
plus_and_minus.iloc[current_states.index] \
.to_pickle(os.path.join( tmp_dir, 'plus_and_minus_%s' % (idx)))
idx+=1
# return len(list(range(0, unique_states.shape[0], chunk_size))) #number of chunks
return idx , max_index_of_large_states | 3a62c7abe0ac67b095351ea69bad642b3479a36e | 3,633,232 |
def add(x, y):
""" Add two numbers and return their sum"""
return x+y | 5afb9194e696fe87f9f29f8893fd2f0e90673a1b | 3,633,233 |
from typing import Any
from typing import Optional
from typing import Union
def default_resolve_type_fn(
value: Any,
info: GraphQLResolveInfo,
abstract_type: GraphQLAbstractType
) -> MaybeAwaitable[Optional[Union[GraphQLObjectType, str]]]:
"""Default type resolver function.
If a resolveType function is not given, then a default resolve behavior is
used which attempts two strategies:
First, See if the provided value has a `__typename` field defined, if so,
use that value as name of the resolved type.
Otherwise, test each possible type for the abstract type by calling
is_type_of for the object being coerced, returning the first type that
matches.
"""
# First, look for `__typename`.
if isinstance(value, dict) and isinstance(value.get('__typename'), str):
return value['__typename']
# Otherwise, test each possible type.
possible_types = info.schema.get_possible_types(abstract_type)
is_type_of_results_async = []
for type_ in possible_types:
if type_.is_type_of:
is_type_of_result = type_.is_type_of(value, info)
if isawaitable(is_type_of_result):
is_type_of_results_async.append((is_type_of_result, type_))
elif is_type_of_result:
return type_
if is_type_of_results_async:
# noinspection PyShadowingNames
async def get_type():
is_type_of_results = [
(await is_type_of_result, type_)
for is_type_of_result, type_ in is_type_of_results_async]
for is_type_of_result, type_ in is_type_of_results:
if is_type_of_result:
return type_
return get_type()
return None | d1266fc358bbce50c226d7e40f95f6930261a121 | 3,633,234 |
def svn_repos_parse_fns2_invoke_close_node(*args):
"""svn_repos_parse_fns2_invoke_close_node(svn_repos_parse_fns2_t _obj, void * node_baton) -> svn_error_t"""
return _repos.svn_repos_parse_fns2_invoke_close_node(*args) | 996ebf6c153a659361d429ae6cbb977dc4202f3a | 3,633,235 |
def sample_dball(dimension: int, amount: int, radius: float = 1) -> np.ndarray:
"""
**Sample from a d-ball by drop of coordinates.**
Similar to the sphere, values are randomly assigned to each dimension dimension from a certain interval
evenly distributed. Since the radius can be determined via the norm of the boundary points, these
is also the parameter for the maximum radius. Note that there will no points be sampled on the boundary itself.
The computation time for this algorithm is `O(n * d)`, with `n` being the number of samples
and `d` the number of dimensions.
+ param **dimension**: as dimension of the embedding space, type `int`.
+ param **amount**: amount of sample points, type `float`.
+ param **radius**: radius of the d-sphere, type `float`.
+ return **np.ndarray**: data points, type `np.ndarray`.
"""
# An array of d (dimension) normally distributed random variables.
# In this case, the dimension is an integer.
# This method is a result from https://www.sciencedirect.com/science/article/pii/S0047259X10001211.
# The result has been proven by http://compneuro.uwaterloo.ca/files/publications/voelker.2017.pdf.
ball = np.zeros(shape=(amount, dimension))
for i in range(0, amount):
# Radius indicates the size of the ball.
u = np.random.normal(0, radius, dimension + 2)
norm = np.sum(u ** 2) ** (0.5)
u = u / norm
x = u[0:dimension]
for j in range(0, dimension):
ball[i][j] = x[j]
return ball | a1bbf552ea05dad5b1d11fa397ae8c2ce98c77a5 | 3,633,236 |
def distinguishable_paths(path1, path2):
"""
Checks if two model paths are distinguishable in a deterministic way, without looking forward
or backtracking. The arguments are lists containing paths from the base group of the model to
a couple of leaf elements. Returns `True` if there is a deterministic separation between paths,
`False` if the paths are ambiguous.
"""
e1, e2 = path1[-1], path2[-1]
for k, e in enumerate(path1):
if e not in path2:
depth = k - 1
break
else:
depth = 0
if path1[depth].max_occurs == 0:
return True
univocal1 = univocal2 = True
if path1[depth].model == 'sequence':
idx1 = path1[depth].index(path1[depth + 1])
idx2 = path2[depth].index(path2[depth + 1])
before1 = any(not e.is_emptiable() for e in path1[depth][:idx1])
after1 = before2 = any(not e.is_emptiable() for e in path1[depth][idx1 + 1:idx2])
after2 = any(not e.is_emptiable() for e in path1[depth][idx2 + 1:])
else:
before1 = after1 = before2 = after2 = False
for k in range(depth + 1, len(path1) - 1):
univocal1 &= path1[k].is_univocal()
idx = path1[k].index(path1[k + 1])
if path1[k].model == 'sequence':
before1 |= any(not e.is_emptiable() for e in path1[k][:idx])
after1 |= any(not e.is_emptiable() for e in path1[k][idx + 1:])
elif path1[k].model in ('all', 'choice'):
if any(e.is_emptiable() for e in path1[k] if e is not path1[k][idx]):
univocal1 = before1 = after1 = False
else:
if len(path2[k]) > 1 and all(e.is_emptiable() for e in path1[k]
if e is not path1[k][idx]):
univocal1 = before1 = after1 = False
for k in range(depth + 1, len(path2) - 1):
univocal2 &= path2[k].is_univocal()
idx = path2[k].index(path2[k + 1])
if path2[k].model == 'sequence':
before2 |= any(not e.is_emptiable() for e in path2[k][:idx])
after2 |= any(not e.is_emptiable() for e in path2[k][idx + 1:])
elif path2[k].model in ('all', 'choice'):
if any(e.is_emptiable() for e in path2[k] if e is not path2[k][idx]):
univocal2 = before2 = after2 = False
else:
if len(path2[k]) > 1 and all(e.is_emptiable() for e in path2[k]
if e is not path2[k][idx]):
univocal2 = before2 = after2 = False
if path1[depth].model != 'sequence':
if before1 and before2:
return True
elif before1:
return univocal1 and e1.is_univocal() or after1 or path1[depth].max_occurs == 1
elif before2:
return univocal2 and e2.is_univocal() or after2 or path2[depth].max_occurs == 1
else:
return False
elif path1[depth].max_occurs == 1:
return before2 or (before1 or univocal1) and (e1.is_univocal() or after1)
else:
return (before2 or (before1 or univocal1) and (e1.is_univocal() or after1)) and \
(before1 or (before2 or univocal2) and (e2.is_univocal() or after2)) | 140f8f18f030df233490ef242504649f175f62c7 | 3,633,237 |
def _correct_rotation(img: np.ndarray) -> np.ndarray:
"""if image is rotated correct for this and return it"""
edges = cv2.Canny(img, 50, 150, apertureSize=3)
lines = cv2.HoughLinesP(
edges, 1, np.pi / 180, 100, minLineLength=100, maxLineGap=10
)
avg_slope = 0
cnt = 0
try:
for line in lines:
rise = line[0][3] - line[0][1]
run = line[0][2] - line[0][0]
if run != 0:
if rise / run < 0.5:
avg_slope += rise / run
cnt += 1
avg_slope = avg_slope / cnt
image_center = tuple(np.array(img.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(
image_center, np.arctan(avg_slope) * 180 / np.pi, 1.0
)
result = cv2.warpAffine(img, rot_mat, img.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
except TypeError:
return img | d2a3760ceb4a73260f8013a9aeafcfd21cfd6a07 | 3,633,238 |
def get_xyz_coords(illuminant, observer):
"""Get the XYZ coordinates of the given illuminant and observer [1]_.
Parameters
----------
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
(x, y, z) : tuple
A tuple with 3 elements containing the XYZ coordinates of the given
illuminant.
Raises
------
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
References
----------
.. [1] https://en.wikipedia.org/wiki/Standard_illuminant
"""
illuminant = illuminant.upper()
try:
return illuminants[illuminant][observer]
except KeyError:
raise ValueError("Unknown illuminant/observer combination\
(\'{0}\', \'{1}\')".format(illuminant, observer)) | bca3f1e4a195fc2dcfc0dd9d440a84336573d34f | 3,633,239 |
import sys
def get_progname():
"""Get program name."""
return PurePath(sys.argv[0]).name | 50d49770479f4fb72711745a22ba2fbf3b0b0cf6 | 3,633,240 |
async def document_analyze(*, db:AsyncIOMotorClient = Depends(get_database), payload: NoteSchema):
"""[summary]
View inserts item in the document string.
[description]
Endpoint to retrieve an specific item.
"""
analyze = await gensim.result(db, payload)
return fix_item_id(analyze) | a7dc75306a40c2cc89016f7cf632e558e50fef7e | 3,633,241 |
import os
import shutil
import tempfile
import re
def run_gfail(args):
"""Runs ground failure.
Args:
args: dictionary or argument parser Namespace output by bin/gfail
program.
Returns:
list: Names of created files.
"""
# TODO: ADD CONFIG VALIDATION STEP THAT MAKES SURE ALL THE FILES EXIST
filenames = []
# If args is a dictionary, convert to a Namespace
if isinstance(args, dict):
args = Namespace(**args)
if args.set_default_paths:
set_default_paths(args)
print('default paths set, continuing...\n')
if args.list_default_paths:
list_default_paths()
return
if args.reset_default_paths:
reset_default_paths()
return
if args.make_webpage:
# Turn on GIS and HDF5 flags
gis = True
hdf5 = True
else:
gis = args.gis
hdf5 = args.hdf5
# Figure out what models will be run
if args.shakefile is not None: # user intends to actually run some models
shakefile = args.shakefile
# make output location for things
if args.output_filepath is None:
outdir = os.getcwd()
else:
outdir = args.output_filepath
if (hdf5 or args.make_static_pngs or
args.make_static_pdfs or
args.make_interactive_plots or
gis):
if not os.path.exists(outdir):
os.makedirs(outdir)
# download if is url
# cleanup = False
if not os.path.isfile(shakefile):
if isURL(shakefile):
# getGridURL returns a named temporary file object
shakefile = getGridURL(shakefile)
# cleanup = True # Be sure to delete it after
else:
raise NameError('Could not find "%s" as a file or a valid url'
% (shakefile))
return
eventid = getHeaderData(shakefile)[0]['event_id']
# Get entire path so won't break if running gfail with relative path
shakefile = os.path.abspath(shakefile)
if args.nest_folder:
outfolder = os.path.join(outdir, eventid)
if not os.path.exists(outfolder):
os.makedirs(outfolder)
else:
outfolder = outdir
# Copy shake grid into output directory
# --- this is base on advice from Mike that when running in production
# the shake grids are not archived and so if we need/want to have
# the exact grid used for the calculation later if there's every a
# question about how the calculation was done, the safest thing is
# to store a copy of it here.
shake_copy = os.path.join(outfolder, "grid.xml")
shutil.copyfile(shakefile, shake_copy)
# Write shakefile to a file for use later
shakename = os.path.join(outfolder, "shakefile.txt")
shake_file = open(shakename, "wt")
shake_file.write(shake_copy)
shake_file.close()
filenames.append(shakename)
config = args.config
if args.config_filepath is not None:
# only add config_filepath if full filepath not given and file
# ext is .ini
if (not os.path.isabs(config) and
os.path.splitext(config)[-1] == '.ini'):
config = os.path.join(args.config_filepath, config)
if os.path.splitext(config)[-1] == '.ini':
temp = ConfigObj(config)
if len(temp) == 0:
raise Exception(
'Could not find specified .ini file: %s' % config)
if args.data_path is not None:
temp = correct_config_filepaths(args.data_path, temp)
configs = [temp]
conffail = []
else:
# input is a list of config files
f = open(config, 'r')
configlist = f.readlines()
configs = []
conffail = []
for conf in configlist:
conf = conf.strip()
if not os.path.isabs(conf):
# only add config_filepath if full filepath not given
conf = os.path.join(args.config_filepath, conf)
try:
temp = ConfigObj(conf)
if temp:
if args.data_path is not None:
temp = correct_config_filepaths(
args.data_path, temp)
configs.append(temp)
else:
conffail.append(conf)
except:
conffail.append(conf)
print('\nRunning the following models:')
for conf in configs:
print('\t%s' % conf.keys()[0])
if len(conffail) > 0:
print('Could not find or read in the following config files:\n')
for conf in conffail:
print('\t%s' % conf)
print('\nContinuing...\n')
if args.set_bounds is not None:
if 'zoom' in args.set_bounds:
temp = args.set_bounds.split(',')
print('Using %s threshold of %1.1f to cut model bounds'
% (temp[1].strip(), float(temp[2].strip())))
bounds = get_bounds(shakefile, temp[1].strip(),
float(temp[2].strip()))
else:
temp = eval(args.set_bounds)
latmin = temp[0]
latmax = temp[1]
lonmin = temp[2]
lonmax = temp[3]
bounds = {'xmin': lonmin, 'xmax': lonmax,
'ymin': latmin, 'ymax': latmax}
print('Applying bounds of lonmin %1.2f, lonmax %1.2f, '
'latmin %1.2f, latmax %1.2f'
% (bounds['xmin'], bounds['xmax'],
bounds['ymin'], bounds['ymax']))
else:
bounds = None
if args.make_webpage or args.make_summary:
results = []
# pre-read in ocean trimming file polygons so only do this step once
if args.trimfile is not None:
if not os.path.exists(args.trimfile):
print('trimfile defined does not exist: %s\n'
'Ocean will not be trimmed.' % args.trimfile)
trimfile = None
elif os.path.splitext(args.trimfile)[1] != '.shp':
print('trimfile must be a shapefile, '
'ocean will not be trimmed')
trimfile = None
else:
trimfile = args.trimfile
else:
trimfile = None
# Get finite fault ready, if exists
ffault = None
point = True
if args.finite_fault is not None:
point = False
try:
if os.path.splitext(args.finite_fault)[-1] == '.txt':
ffault = text_to_json(args.finite_fault)
elif os.path.splitext(args.finite_fault)[-1] == '.json':
ffault = args.finite_fault
else:
print('Could not read in finite fault, will '
'try to download from comcat')
ffault = None
except:
print('Could not read in finite fault, will try to '
'download from comcat')
ffault = None
if ffault is None:
# Try to get finite fault file, if it exists
try:
testjd, detail, temp = get_event_comcat(shakefile)
if 'faultfiles' in testjd['input']['event_information']:
ffilename = testjd['input']['event_information']['faultfiles']
if len(ffilename) > 0:
# Download the file
with tempfile.NamedTemporaryFile(delete=False, mode='w') as f:
temp.getContent(ffilename, filename=f.name)
ffault = text_to_json(f.name)
os.remove(f.name)
point = False
else:
point = True
except Exception as e:
print(e)
print('Unable to determine source type, unknown if finite'
' fault or point source')
ffault = None
point = False
# Loop over config files
for conf in configs:
modelname = conf.keys()[0]
print('\nNow running %s:' % modelname)
modelfunc = conf[modelname]['funcname']
if modelfunc == 'LogisticModel':
lm = LM.LogisticModel(shakefile, conf,
uncertfile=args.uncertfile,
saveinputs=args.save_inputs,
bounds=bounds,
numstd=float(args.std),
trimfile=trimfile)
maplayers = lm.calculate()
elif modelfunc == 'godt2008':
maplayers = godt2008(shakefile, conf,
uncertfile=args.uncertfile,
saveinputs=args.save_inputs,
bounds=bounds,
numstd=float(args.std),
trimfile=trimfile)
else:
print('Unknown model function specified in config for %s '
'model, skipping to next config' % modelfunc)
continue
# time1 = datetime.datetime.utcnow().strftime('%d%b%Y_%H%M')
# filename = ('%s_%s_%s' % (eventid, modelname, time1))
if args.appendname is not None:
filename = ('%s_%s_%s' % (eventid, modelname, args.appendname))
else:
filename = ('%s_%s' % (eventid, modelname))
if hdf5:
filenameh = filename + '.hdf5'
if os.path.exists(filenameh):
os.remove(filenameh)
savelayers(maplayers, os.path.join(outfolder, filenameh))
filenames.append(filenameh)
if args.make_static_pdfs or args.make_static_pngs:
plotorder, logscale, lims, colormaps, maskthreshes = \
parseConfigLayers(maplayers, conf)
mapconfig = ConfigObj(args.mapconfig)
kwargs = parseMapConfig(
mapconfig, fileext=args.mapdata_filepath)
junk, filenames1 = modelMap(
maplayers, shakefile,
suptitle=conf[modelname]['shortref'],
boundaries=None,
zthresh=0.,
lims=lims,
plotorder=plotorder,
maskthreshes=maskthreshes,
maproads=False,
mapcities=True,
colormaps=colormaps,
savepdf=args.make_static_pdfs,
savepng=args.make_static_pngs,
printparam=True,
inventory_shapefile=None,
outputdir=outfolder,
outfilename=filename,
scaletype='continuous',
logscale=logscale, **kwargs)
for filen in filenames1:
filenames.append(filen)
# make model only plots too
if len(maplayers) > 1:
plotorder, logscale, lims, colormaps, maskthreshes = \
parseConfigLayers(maplayers, conf, keys=['model'])
junk, filenames1 = modelMap(
maplayers, shakefile,
suptitle=conf[modelname]['shortref'], boundaries=None,
zthresh=0., lims=lims, plotorder=plotorder,
maskthreshes=maskthreshes, maproads=False,
mapcities=True, savepdf=args.make_static_pdfs,
savepng=args.make_static_pngs, printparam=True,
inventory_shapefile=None, outputdir=outfolder,
outfilename=filename + '-just_model',
colormaps=colormaps, scaletype='continuous',
logscale=logscale, **kwargs)
for filen in filenames1:
filenames.append(filen)
if args.make_interactive_plots:
plotorder, logscale, lims, colormaps, maskthreshes = \
parseConfigLayers(maplayers, conf)
junk, filenames1 = interactiveMap(
maplayers, plotorder=plotorder, shakefile=shakefile,
inventory_shapefile=None, maskthreshes=maskthreshes,
colormaps=colormaps, isScenario=False,
scaletype='continuous', lims=lims, logscale=logscale,
ALPHA=0.7, outputdir=outfolder, outfilename=filename,
tiletype='Stamen Terrain', separate=True,
faultfile=ffault)
for filen in filenames1:
filenames.append(filen)
if gis:
for key in maplayers:
# Get simplified name of key for file naming
RIDOF = '[+-]?(?=\d*[.eE])(?=\.?\d)'\
'\d*\.?\d*(?:[eE][+-]?\d+)?'
OPERATORPAT = '[\+\-\*\/]*'
keyS = re.sub(OPERATORPAT, '', key)
# remove floating point numbers
keyS = re.sub(RIDOF, '', keyS)
# remove parentheses
keyS = re.sub('[()]*', '', keyS)
# remove any blank spaces
keyS = keyS.replace(' ', '')
filen = os.path.join(outfolder, '%s_%s.bil'
% (filename, keyS))
fileh = os.path.join(outfolder, '%s_%s.hdr'
% (filename, keyS))
fileg = os.path.join(outfolder, '%s_%s.tif'
% (filename, keyS))
GDALGrid.copyFromGrid(maplayers[key]['grid']).save(filen)
cmd = 'gdal_translate -a_srs EPSG:4326 -of GTiff %s %s' % (
filen, fileg)
rc, so, se = get_command_output(cmd)
# Delete bil file and its header
os.remove(filen)
os.remove(fileh)
filenames.append(fileg)
if args.make_webpage:
# Compile into list of results for later
results.append(maplayers)
# Make binary output for ShakeCast
filef = os.path.join(outfolder, '%s_model.flt'
% filename)
# And get name of header
filefh = os.path.join(outfolder, '%s_model.hdr'
% filename)
# Make file
write_floats(filef, maplayers['model']['grid'])
filenames.append(filef)
filenames.append(filefh)
if args.make_summary and not args.make_webpage:
# Compile into list of results for later
results.append(maplayers)
if args.make_webpage:
outputs = hazdev(results, configs,
shakefile, outfolder=outfolder,
pop_file=args.popfile)
filenames = filenames + outputs
if args.make_summary:
outputs = GFSummary(results, configs, args.web_template,
shakefile, outfolder=outfolder, cleanup=True,
faultfile=ffault, point=point, pop_file=args.popfile)
filenames = filenames + outputs
print('\nFiles created:\n')
for filen in filenames:
print('%s' % filen)
return filenames | 9ec18779d7742d08702a441ff192c51c899774af | 3,633,242 |
def create_example_concept_description() -> model.ConceptDescription:
"""
Creates an example :class:`~aas.model.concept.ConceptDescription`
:return: example concept description
"""
concept_description = model.ConceptDescription(
identification=model.Identifier(id_='https://acplt.org/Test_ConceptDescription',
id_type=model.IdentifierType.IRI),
is_case_of={model.Reference((model.Key(type_=model.KeyElements.GLOBAL_REFERENCE,
local=False,
value='http://acplt.org/DataSpecifications/'
'ConceptDescriptions/TestConceptDescription',
id_type=model.KeyType.IRI),))},
id_short='TestConceptDescription',
category=None,
description={'en-us': 'An example concept description for the test application',
'de': 'Ein Beispiel-ConceptDescription für eine Test-Anwendung'},
parent=None,
administration=model.AdministrativeInformation(version='0.9',
revision='0'))
return concept_description | 72c60aca03bd85fc66a5600675888a10542c1c96 | 3,633,243 |
def to_int(x):
"""
Try to convert a string to int
:param x: str
:return: int or np.nan
"""
try:
return int(x)
except:
return np.nan | 6d488258d49fc0cb396d48e1dd74c8bd9ed9fbea | 3,633,244 |
import progressbar
import gc
def train_single_batch_agent(agent, train_batch, val_batch, acc_tolerance=1.0, train_loss_tolerance=0.01):
"""
Train untils the accuracy on the specified batch has perfect interpolation in loss and accuracy.
It also prints and tb logs every iteration.
todo - compare with train one batch
:param acc_tolerance:
:param train_loss_tolerance:
:return:
"""
set_system_wide_force_flush2()
# train_batch = next(iter(agent.dataloaders['train']))
# val_batch = next(iter(agent.dataloaders['val']))
def log_train_stats(it: int, train_loss: float, acc: float):
val_loss, val_acc = agent.forward_one_batch(val_batch, training=False)
agent.log_tb(it=it, tag1='train loss', loss=float(train_loss), tag2='train acc', acc=float(acc))
agent.log_tb(it=it, tag1='val loss', loss=float(val_loss), tag2='val acc', acc=float(val_acc))
agent.log(f"\n{it=}: {train_loss=} {acc=}")
agent.log(f"{it=}: {val_loss=} {val_acc=}")
# first batch
avg_loss = AverageMeter('train loss')
avg_acc = AverageMeter('train accuracy')
agent.args.it = 0
bar = uutils.get_good_progressbar(max_value=progressbar.UnknownLength)
while True:
train_loss, train_acc = agent.forward_one_batch(train_batch, training=True)
agent.optimizer.zero_grad()
train_loss.backward() # each process synchronizes it's gradients in the backward pass
agent.optimizer.step() # the right update is done since all procs have the right synced grads
# if agent.agent.is_lead_worker() and agent.args.it % 10 == 0:
if agent.args.it % 10 == 0:
bar.update(agent.args.it)
log_train_stats(agent.args.it, train_loss, train_acc)
agent.save(agent.args.it) # very expensive! since your only fitting one batch its ok to save it every time you log - but you might want to do this left often.
agent.args.it += 1
gc.collect()
# if train_acc >= acc_tolerance and train_loss <= train_loss_tolerance:
if train_acc >= acc_tolerance:
log_train_stats(agent.args.it, train_loss, train_acc)
agent.save(agent.args.it) # very expensive! since your only fitting one batch its ok to save it every time you log - but you might want to do this left often.
bar.update(agent.args.it)
break # halt once performance is good enough
return avg_loss.item(), avg_acc.item() | f35d143f7d39802e35246705ddf317b7f066986a | 3,633,245 |
def residuals(pars, data):
"""Returns data - model for given values of parameters
Parameters
----------
pars : array-like
[alpha, beta, gamma] parameters.
data : Data object (string)
Data to be compared with model.
"""
alpha, beta, gamma = pars
mod = model(data.x, alpha, beta, gamma)
res = data.y - mod
return res | ee69bb681c003041de62ea9bb7774b51f9de5600 | 3,633,246 |
def plot_psth_photostim_effect(units, condition_name_kw=['both_alm'], axs=None):
"""
For the specified `units`, plot PSTH comparison between stim vs. no-stim with left/right trial instruction
The stim location (or other appropriate search keywords) can be specified in `condition_name_kw` (default: bilateral ALM)
"""
units = units.proj()
fig = None
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
assert axs.size == 2
hemi = _get_units_hemisphere(units)
# no photostim:
psth_n_l = psth.TrialCondition.get_cond_name_from_keywords(['_nostim', '_left'])[0]
psth_n_r = psth.TrialCondition.get_cond_name_from_keywords(['_nostim', '_right'])[0]
psth_n_l = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_n_l} & 'unit_psth is not NULL').fetch('unit_psth')
psth_n_r = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_n_r} & 'unit_psth is not NULL').fetch('unit_psth')
psth_s_l = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim_left'])[0]
psth_s_r = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim_right'])[0]
psth_s_l = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_s_l} & 'unit_psth is not NULL').fetch('unit_psth')
psth_s_r = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_s_r} & 'unit_psth is not NULL').fetch('unit_psth')
# get event start times: sample, delay, response
period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')
# get photostim duration
stim_trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim'])[0]
stim_durs = np.unique((experiment.Photostim & experiment.PhotostimEvent
* psth.TrialCondition().get_trials(stim_trial_cond_name)
& units).fetch('duration'))
stim_dur = _extract_one_stim_dur(stim_durs)
if hemi == 'left':
psth_s_i = psth_s_l
psth_n_i = psth_n_l
psth_s_c = psth_s_r
psth_n_c = psth_n_r
else:
psth_s_i = psth_s_r
psth_n_i = psth_n_r
psth_s_c = psth_s_l
psth_n_c = psth_n_l
_plot_avg_psth(psth_n_i, psth_n_c, period_starts, axs[0],
'Control')
_plot_avg_psth(psth_s_i, psth_s_c, period_starts, axs[1],
'Photostim')
# cosmetic
ymax = max([ax.get_ylim()[1] for ax in axs])
for ax in axs:
ax.set_ylim((0, ymax))
ax.set_xlim([_plt_xmin, _plt_xmax])
# add shaded bar for photostim
stim_time = period_starts[np.where(period_names == 'delay')[0][0]]
axs[1].axvspan(stim_time, stim_time + stim_dur, alpha=0.3, color='royalblue')
return fig | d416207f19cf640faac1da6ab2e97c6a3fad251e | 3,633,247 |
import configparser
def load_versions(versions):
"""
parses 'jc221,jc221' etc.
returns the supported versions and orders them
from newest to oldest
"""
props = configparser.ConfigParser()
props.read(LIB_DIR / "sdkversions.properties")
known = list(props["SUPPORTED_VERSIONS"])
filtered = []
for version in versions:
if version in known:
filtered.append(version)
# sort the values based on the order of JC versions in sdkversions.properties
filtered.sort(key=known.index)
return filtered[::-1] | 28bd0e7f080fb75707f716117c0c6b9ed6d05d77 | 3,633,248 |
import click
def market_search_formatter(search_results):
""" Formats the search results into a tabular paginated format
Args:
search_results (list): a list of results in dict format returned from the REST API
Returs:
str: formatted results in tabular format
"""
headers = ["id", "Details", "Creator", "Price", "Category", "Rating"]
rows = []
for _, item in enumerate(search_results):
if item["min_price"] != item["max_price"]:
price_range = click.style("Variable", fg="blue")
else:
price_range = click.style("{} Satoshis".format(item["min_price"]),
fg="blue")
category = click.style("{}".format(item["category"]), fg="blue")
creator = click.style("{}".format(item["username"]), fg="blue")
title = click.style(item["title"], fg="blue")
rating = "Not yet rated"
if item["rating_count"] != 0:
rating = "{:.1f} ({} rating".format(item["average_rating"], int(item["rating_count"]))
if item["rating_count"] > 1:
rating += "s"
rating += ")"
rating = click.style(rating, fg="blue")
rows.append([item["id"], title, creator, price_range, category, rating])
for _, l in enumerate(wrap(item["description"])):
rows.append(["", l, "", "", "", ""])
rows.append(["", "", "", "", "", ""])
return tabulate(rows, headers=headers, tablefmt="simple") | 24942024599ddd5117a604cbd4e4890a8ccb5bf6 | 3,633,249 |
from typing import Optional
from pathlib import Path
def get_run_dir(run_number: Optional[int] = None) -> Path:
"""
Returns the directory corresponding to a given run number as a Path.
If no run number is provided, return the current run directory.
"""
if run_number is None:
run_number = options.get('current run number')
return runsdir.joinpath(f"run{run_number}") | 513c913effeadbf1267e63e1948e7ff9cb2fe753 | 3,633,250 |
def read_to_ulens_in_intvls(read, intvls):
"""Extract units within `intvls` from `read.units`."""
return [unit.length for unit in read.units
if unit.length in intvls] | 11159bea8bbf0cb68f0e9a7355c82e93b430065d | 3,633,251 |
def org_identities_edit(self) -> bool:
"""
### NOT IMPLEMENTED ###
Edit an existing Organizational Identity.
:param self:
:return
501 Server Error: Not Implemented for url: mock://not_implemented_501.local:
"""
url = self._MOCK_501_URL
resp = self._mock_session.get(
url=url
)
if resp.status_code == 200:
return True
else:
resp.raise_for_status() | bb08be8788812b396776c41f195ccce91c36a071 | 3,633,252 |
import secrets
def makePrimes(bits):
"""
Generates the prime numbers p and q.
Param bits: int -- the bit length of each prime
Returns a tuple of prime numbers.
"""
p = None
q = None
for _ in range(500):
p = secrets.randbits(bits)
q = secrets.randbits(bits)
if p != q and p > 3 and q > 3 and isPrime(p) == True and isPrime(q) == True:
return (p, q) | f14c31c1ef6bbf151a743b2468db7d545ea7feb3 | 3,633,253 |
from beakerx import TableDisplay
import ipywidgets
def _in_splice_compatible_env():
"""
Determines if a user is using the Splice Machine managed notebooks or not
:return: Boolean if the user is using the Splice Environment
"""
try:
except ImportError:
return False
return get_ipython() | 8088e1526daa33c86a88acd08a14c77b3de5fc8d | 3,633,254 |
import torch
def make_split(dataset, holdout_fraction, seed=0, sort=False):
""" Split a Torch TensorDataset into (1-holdout_fraction) / holdout_fraction.
Args:
dataset (TensorDataset): Tensor dataset that has 2 tensors -> data, targets
holdout_fraction (float): Fraction of the dataset that is gonna be in the validation set
seed (int, optional): seed used for the shuffling of the data before splitting. Defaults to 0.
sort (bool, optional): If ''True'' the dataset is gonna be sorted after splitting. Defaults to False.
Returns:
TensorDataset: 1-holdout_fraction part of the split
TensorDataset: holdout_fractoin part of the split
"""
in_keys, out_keys = get_split(dataset, holdout_fraction, seed=seed, sort=sort)
in_split = dataset[in_keys]
out_split = dataset[out_keys]
return torch.utils.data.TensorDataset(*in_split), torch.utils.data.TensorDataset(*out_split) | 148f0569320329c1737d2d585d9502db2215e9a4 | 3,633,255 |
from typing import Callable
def singleton(instance: str = 'name') -> Callable:
"""
Wrap injector decorator.
:param instance: name of instance to inject
:type instance: str
:return: injector decorator
:rtype: Callable
"""
def save_to_storage(factory_method):
"""
Decorate factory function with dependency storage.
:param factory_method: method that supplies specific instance
:type factory_method: Callable
:return: wrapped function
:rtype: Callable
"""
@wraps(factory_method)
def wrapper(*args, **kwargs):
returned_instance = factory_method(*args, **kwargs)
Injector.inject(returned_instance, to=instance)
return returned_instance
return wrapper
return save_to_storage | d8e9b63d81f95c18663edf00d0fb462a44805c0e | 3,633,256 |
def fillNaToNone(data):
"""Iterates through NA values and changes them to None
Parameters:
dataset (pd.Dataset): Both datasets
Returns:
data (pd.Dataset): Dataset with any NA values in the columns listed changed to None
"""
columns = ["PoolQC", "MiscFeature", "Alley", "Fence", "FireplaceQu", "GarageType", "GarageFinish",
"GarageQual", "GarageCond", "BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2",
"MasVnrType"]
for column in columns:
data[column] = data[column].fillna("None")
return data | 2a6fc8008447abefd9f993b01606c1afc5aa5a8a | 3,633,257 |
import re
def is_ld_block_defn_line(mdfl):
"""
Parse GFM link definition lines of the form...
[10]: https://www.google.com
[11]: https://www.google.com "Title Info"
[1a]: https://www.google.com "Title Info {}"
[2b]: https://www.google.com "Title Info {biblio info}"
Returns footnote id, url, title, biblio-info as a list
"""
retval = re.findall("^\[([a-zA-Z0-9_-]*)\]:\s*(https?://\S*)\s*\"?([^{]*)([^\"]*)\"?$", mdfl)
if not retval:
return None
ref_hdl = retval[0][0]
ref_url = retval[0][1].strip()
ref_tit = retval[0][2].strip().strip('"')
ref_bib = retval[0][3].strip().strip('"{}')
return [ref_hdl, ref_url, ref_tit, ref_bib] | 0ebd01c0c05634ee33a320fa4c280ad575ee9b25 | 3,633,258 |
def return_data_frame_deaths_vs_cases(dict_corona_virus: dict, data_frame_countries: pd.DataFrame) -> pd.DataFrame:
"""
This method will return a data_coronavirus frame with the fields we need to create our map with circles:
In this map, the circles will represent the ratio between deaths and number of cases
:param dict_corona_virus: This is a dictionary that contains the information about corona virus spread for
country, they key is the country and value is a list with 3 elements, cases, deaths and recovered in that order.
This is the result of a web scraper
:param data_frame_countries: Data frame with columns Country, Lat and Lon (where Lat y Lon is the latitude
and longitude of that country's capital
:return: A data_coronavirus frame with the following columns Country, Popup_Text (the text we will display when
clicking in the circle), Population (in that country), Cases_Rate (cases/population), Lat (latitude of the capital
of that country), Lon (longitude of the capital of that country)
"""
# We create a list that contains all the column for the data_coronavirus frame result
columns_list = ["Country", "Popup_Text", "Recovered", "Recovered_Rate", "Lat", "Lon"]
# We create a new empty data_coronavirus frame with those columns
result_data_frame = pd.DataFrame(columns=columns_list)
# We get every country that has people affected by the virus
for country in dict_corona_virus.keys():
# We check in that country is in the dictionary for populations and the data_coronavirus frame for
# countries and capitals
if country in data_frame_countries["Country"].values:
# We get the number of deaths due to corona virus in that country
deaths = dict_corona_virus[country][1]
# We get the number of cases of corona virus in that country
cases = dict_corona_virus[country][0]
# We get the rate between recovered cases and cases in those cases
if deaths > 0:
rate = int(deaths) / int(cases)
else:
rate = 0
# We get the row in the data_coronavirus frame that contains the countries and longitude and latitude of the
# capital
df_aux = data_frame_countries.loc[data_frame_countries['Country'] == country]
# We get the longitude of the capital of the country for this iteration
lon = df_aux["Lon"].values
# We get the latitude of the capital of the country for this iteration
lat = df_aux["Lat"].values
# We set the text for the popup
popup_text = "<h3>{0}</h3>\n<h5>Deaths: {1}</h5>\n<h5>Cases: {2}</h5>".format(country, deaths, cases)
# We create the now row for this country
df2 = pd.DataFrame([[country, popup_text, deaths, rate, lat[0], lon[0]]], columns=columns_list)
# We add the row to the data_coronavirus frame that we will return
result_data_frame = result_data_frame.append(df2, ignore_index=True)
# We return the data_coronavirus frame
return result_data_frame | c7b1cf4456f2495de1302eb75c0617cef1c965ae | 3,633,259 |
def vn_islowercase(char):
"""Check is lowercase for a vn character
:param char: a unicode character
:return:
"""
if char in _DIGIT or char in _ADDITIONAL_CHARACTERS:
return True
return char in VN_LOWERCASE | 2393de33155a940f260d91f7304f7f3c35ce3e4e | 3,633,260 |
def shared_template(testconfig):
"""Shared template for hyperfoil test"""
shared_template = testconfig.get('hyperfoil', {}).get('shared_template', {})
return shared_template.to_dict() | 160daa08699ae973d5cbbfe28b75f08ff3eb2f52 | 3,633,261 |
def doubleSlit_interaction(psi, j0, j1, i0, i1, i2, i3):
"""
Function responsible of the interaction of the psi wave function with the
double slit in the case of rigid walls.
The indices j0, j1, i0, i1, i2, i3 define the extent of the double slit.
slit.
Input parameters:
psi -> Numpy array with the values of the wave function at each point
in 2D space.
Indices that parameterize the double slit in the space of
points:
Horizontal axis.
j0 -> Left edge.
j1 -> Right edge.
Vertical axis.
i0 -> Lower edge of the lower slit.
i1 -> Upper edge of the lower slit.
i2 -> Lower edge of upper slit.
i3 -> Upper edge of upper slit.
Returns the array with the wave function values at each point in 2D space
updated with the interaction with the double slit of rigid walls.
"""
psi = np.asarray(psi) # Ensures that psi is a numpy array.
# We cancel the wave function inside the walls of the double slit.
psi[0:i3, j0:j1] = 0
psi[i2:i1,j0:j1] = 0
psi[i0:, j0:j1] = 0
return psi | 99fe70f564a72ff84d2de09bc93b3c9dada3c4a0 | 3,633,262 |
import random
def gnp_from_data(sizes, densities, directed = True):#, p_disconnect_node = None):
"""
Given a set of graph sizes (number of nodes) and densities, generate a new gnp (Bernoulli/Erdos-Renyi) random graph with size selected from the given graph sizes. Density is estimated based on a linear model of the observed sizes and densities. Currently defaults to directed graph.
"""
sizes = np.array(sizes)
densities = np.array(densities)
slope, intercept, r_value, p_value, std_err = stats.linregress(x = sizes, y = densities)
dhats = sizes*slope + intercept
resid = densities - dhats
resid_std = np.std(resid)
size = random.choice(sizes)
dhat = size*slope + intercept
err = stats.norm(0, resid_std).rvs()
d = dhat + err
rg = nx.gnp_random_graph(size, d, directed = directed)
return rg | e63cea7d705eacb77b6fdc97654d049967a4de4a | 3,633,263 |
def greedy_tsp(G, weight="weight", source=None):
"""Return a low cost cycle starting at `source` and its cost.
This approximates a solution to the traveling salesman problem.
It finds a cycle of all the nodes that a salesman can visit in order
to visit many nodes while minimizing total distance.
It uses a simple greedy algorithm.
In essence, this function returns a large cycle given a source point
for which the total cost of the cycle is minimized.
Parameters
----------
G : Graph
The Graph should be a complete weighted undirected graph.
The distance between all pairs of nodes should be included.
weight : string, optional (default="weight")
Edge data key corresponding to the edge weight.
If any edge does not have this attribute the weight is set to 1.
source : node, optional (default: first node in list(G))
Starting node. If None, defaults to ``next(iter(G))``
Returns
-------
cycle : list of nodes
Returns the cycle (list of nodes) that a salesman
can follow to minimize total weight of the trip.
Raises
------
NetworkXError
If `G` is not complete, the algorithm raises an exception.
Examples
--------
>>> from networkx.algorithms import approximation as approx
>>> G = nx.DiGraph()
>>> G.add_weighted_edges_from({
... ("A", "B", 3), ("A", "C", 17), ("A", "D", 14), ("B", "A", 3),
... ("B", "C", 12), ("B", "D", 16), ("C", "A", 13),("C", "B", 12),
... ("C", "D", 4), ("D", "A", 14), ("D", "B", 15), ("D", "C", 2)
... })
>>> cycle = approx.greedy_tsp(G, source="D")
>>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle))
>>> cycle
['D', 'C', 'B', 'A', 'D']
>>> cost
31
Notes
-----
This implementation of a greedy algorithm is based on the following:
- The algorithm adds a node to the solution at every iteration.
- The algorithm selects a node not already in the cycle whose connection
to the previous node adds the least cost to the cycle.
A greedy algorithm does not always give the best solution.
However, it can construct a first feasible solution which can
be passed as a parameter to an iterative improvement algorithm such
as Simulated Annealing, or Threshold Accepting.
Time complexity: It has a running time $O(|V|^2)$
"""
# Check that G is a complete graph
N = len(G) - 1
# This check ignores selfloops which is what we want here.
if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()):
raise nx.NetworkXError("G must be a complete graph.")
if source is None:
source = nx.utils.arbitrary_element(G)
if G.number_of_nodes() == 2:
neighbor = next(G.neighbors(source))
return [source, neighbor, source]
nodeset = set(G)
nodeset.remove(source)
cycle = [source]
next_node = source
while nodeset:
nbrdict = G[next_node]
next_node = min(nodeset, key=lambda n: nbrdict[n].get(weight, 1))
cycle.append(next_node)
nodeset.remove(next_node)
cycle.append(cycle[0])
return cycle | e9dbb0c2bb4b1b41545fd5e47d03e022bdfd5ca9 | 3,633,264 |
def get_sector(sector_id):
"""
GET: Gets all Entities on the required sector.
https://meinformoapi.herokuapp.com/entities/sectors/Ejecutivo
"""
sector_id = str(sector_id)
output = tools.filter_dict(current_entities,"sector", [sector_id])
return Response(dumps(output), mimetype='application/json') | 4df0d454e1f592173e8c43d26254a00e054fd721 | 3,633,265 |
def map_get_by_key_range(bin_name, key_range_start,
key_range_end, return_type, inverted=False):
"""Creates a map_get_by_key_range operation to be used with operate or operate_ordered
The operation returns items with keys between key_range_start(inclusive) and
key_range_end(exclusive) from the map
Args:
bin_name (str): The name of the bin containing the map.
key_range_start: The start of the range of keys to be returned. (Inclusive)
key_range_end: The end of the range of keys to be returned. (Exclusive)
return_type (int): Value specifying what should be returned from the operation.
This should be one of the aerospike.MAP_RETURN_* values.
inverted (bool): If True, values outside of the specified range will be returned, and
values inside of the range will be ignored. Default: False
Returns:
A dictionary usable in operate or operate_ordered. The format of the dictionary
should be considered an internal detail, and subject to change.
"""
op_dict = {
OP_KEY: aerospike.OP_MAP_GET_BY_KEY_RANGE,
BIN_KEY: bin_name,
KEY_KEY: key_range_start,
RANGE_KEY: key_range_end,
RETURN_TYPE_KEY: return_type,
INVERTED_KEY: inverted
}
return op_dict | 4a2ffac60203e88520a46fb28d4fc31715c16369 | 3,633,266 |
def output(input_text : str = ""):
"""
It will take input as a string and return the output of the given input mathematics problem
"""
cal_object = Calculator(input_text)
return cal_object.result | 3393216343c4c2aa7a4b5e8fd73494b65f2c652f | 3,633,267 |
import torch
from typing import Sequence
def to_tensor(X, use_cuda):
"""Turn to torch Variable.
Handles the cases:
* Variable
* PackedSequence
* numpy array
* torch Tensor
* list or tuple of one of the former
* dict of one of the former
"""
to_tensor_ = partial(to_tensor, use_cuda=use_cuda)
if isinstance(X, (Variable, nn.utils.rnn.PackedSequence)):
return X
if isinstance(X, dict):
return {key: to_tensor_(val) for key, val in X.items()}
if isinstance(X, (list, tuple)):
return [to_tensor_(x) for x in X]
if isinstance(X, np.ndarray):
X = torch.from_numpy(X)
if isinstance(X, Sequence):
X = torch.from_numpy(np.array(X))
elif np.isscalar(X):
X = torch.from_numpy(np.array([X]))
if not is_torch_data_type(X):
raise TypeError("Cannot convert this data type to a torch tensor.")
if use_cuda:
X = X.cuda()
return X | 51eaec2cdd4b64ca2a1922f36221305992d78d2b | 3,633,268 |
def get_regularization_losses(scope=None):
"""Gets the list of regularization losses.
Args:
scope: An optional scope name for filtering the losses to return.
Returns:
A list of regularization losses as Tensors.
"""
return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope) | 442d47f32e1d4be11072d0731c58bac795cce1ff | 3,633,269 |
def get_output_tracking_error_message(ulog: ULog) -> str:
"""
return the name of the message containing the output_tracking_error
:param ulog:
:return: str
"""
for elem in ulog.data_list:
if elem.name == "ekf2_innovations":
return "ekf2_innovations"
if elem.name == "estimator_innovations":
return "estimator_status"
raise PreconditionError("Could not detect the message containing the output tracking error") | 55445033308ca476b31e06a4374ad098e74f0c92 | 3,633,270 |
def getBpms():
"""
return a list of bpms object.
this calls :func:`~aphla.lattice.Lattice.getGroupMembers` of current
lattice and take a "union".
"""
return machine._lat.getGroupMembers('BPM', op='union') | 44ad3256074be5f5d521892ed48379cc6539f9d3 | 3,633,271 |
import os
import subprocess
def get_test_disk(node=1):
"""获取可测试使用的磁盘
Args:
node (int, optional): 节点号. Defaults to 1.
Returns:
[str]: 磁盘名称
"""
if os.environ.get("NODE" + str(node) + "LOCALTION") == "local":
used_disk = subprocess.getoutput(
"lsblk -l | grep -e '/.*\|\[.*\]' | awk '{print $1}' | tr -d '[0-9]' | uniq | sed -e ':a;N;$!ba;s/\\n/ /g'"
)
test_disk = subprocess.getoutput(
"lsblk -n | grep -v '└─.*\|"
+ used_disk.replace(" ", "\|")
+ "' | awk '{print $1}' | sed -e ':a;N;$!ba;s/\\n/ /g'"
)
else:
conn = ssh_cmd.pssh_conn(
os.environ.get("NODE" + str(node) + "_IPV4"),
os.environ.get("NODE" + str(node) + "_PASSWORD"),
os.environ.get("NODE" + str(node) + "_SSH_PORT"),
os.environ.get("NODE" + str(node) + "_USER"),
)
used_disk = ssh_cmd.pssh_cmd(
conn,
"lsblk -l | grep -e '/.*\|\[.*\]' | awk '{print $1}' | tr -d '[0-9]' | uniq | sed -e ':a;N;$!ba;s/\\n/ /g'",
)[1]
test_disk = ssh_cmd.pssh_cmd(
conn,
"lsblk -n | grep -v '└─.*\|"
+ used_disk.replace(" ", "\|")
+ "' | awk '{print $1}' | sed -e ':a;N;$!ba;s/\\n/ /g'",
)[1]
return test_disk | c7e7e40e0ea8a6ca939cdd0b81b426475d9ebbdc | 3,633,272 |
def get_single_label(label_id):
"""Get an ID as a single element.
Args:
label_id: Single ID or sequence of IDs.
Returns:
The first elements if ``label_id`` is a sequence, or the
``label_id`` itself if not.
"""
if libmag.is_seq(label_id) and len(label_id) > 0:
return label_id[0]
return label_id | b97b6dbaf5fbb56204acef637f358ec35a331db7 | 3,633,273 |
from sentinelsat import SentinelAPI
import pyproj
import numpy as np
import shapely.geometry as sg
from shapely.wkt import loads
from astropy.time import Time, TimeDelta
from tqdm import tqdm
import sys
def search_sentinels(platform_name, df, aoi, dt=2, user=None, pwd=None,
proj_string='+init=EPSG:3995', product_type=None,
min_cloud_cover=0, max_cloud_cover=100,
swath_type=None, f_out=None):
"""
Search Sentinel-1/2 images overlapping ICESat-2 data within +- dt
Parameters:
-----------
platform_name : str ['Sentinel-1 | Sentinel-2']
name of the platform for which images will be searched
df : panda dataframe
ICESat-2 data
aoi: str, list
area of interest as WKT string or bounding box[lllon, lllat, urlon, urlat]
dt: int, float
difference in hours between CS2 and S2
user : str
username to connect to the Copernicus Scientific Hub
pwd : str
password to connect to the Copernicus Scientific Hub
proj_string: str
projection string to be used with the pyproj module
product_type : str
name of the type of product to be searched (more info at https://scihub.copernicus.eu/userguide/)
swath_type : str
name of the type of swath to be searched (Sentinel-1 only, more info at https://scihub.copernicus.eu/userguide/)
min_cloud_cover: int, float
Minimum cloud coverage in percentage (Sentinel-2 only)
max_cloud_cover: int, float
Maximum cloud coverage in percentage (Sentinel-2 only)
f_out : str
path to file where to write results
Returns: (to be finished!)
--------
"""
#==========================================================================
# Pre-processing
#==========================================================================
### Imports
# import wkt
### Convert aoi to shapely polygon in projected CRS
# define projection
print("Creating AOI polygon...")
proj = pyproj.Proj(proj_string)
# read aoi polygon
if type(aoi) == str:
aoi_temp = loads(aoi)
elif type(aoi) in (list, tuple):
aoi_temp = sg.box(aoi[0], aoi[1], aoi[2], aoi[3])
aoi = aoi_temp.wkt
else:
print("ERROR: 'aoi' should be provided as a WKT string or bounding box (list)")
sys.exit(1)
### Check input parameters
if product_type == None:
if platform_name == 'Sentinel-1':
product_type = 'GRD'
print("product_type set to: ", product_type)
if platform_name == 'Sentinel-2':
product_type = 'S2MSI1C'
print("product_type set to: ", product_type)
if swath_type == None and platform_name == 'Sentinel-1':
swath_type = 'EW'
print("swath_type set to: ", swath_type)
# project coordinates and convert to shapely polygon
x, y = proj(aoi_temp.exterior.xy[0], aoi_temp.exterior.xy[1])
aoi_poly = sg.Polygon(list(zip(x, y)))
### Convert dt to astropy time object
dtt = TimeDelta(3600 * dt, format='sec')
#==========================================================================
# Processing
#==========================================================================
### Project IS2 data to desired CRS
print("Selecting orbit data inside AOI...")
lon, lat = np.array(df['lons']), np.array(df['lats'])
x, y = proj(lon, lat)
### Extract IS2 orbit number
is2_orbits = np.unique(df['orbit_number'])
print("N. of orbits/points inside AOI: {}/{}".format(len(is2_orbits),
len(df)))
### Extract time period from IS2 data to query the server
t_is2 = Time(df['time'], scale='utc')
t_is2_start = min(t_is2) - dtt
t_is2_stop = max(t_is2) + dtt
### Read metadata
print("Query for metadata...")
api = SentinelAPI(user, pwd,'https://scihub.copernicus.eu/dhus',
timeout=600)
if platform_name == 'Sentinel-1':
md = api.query(area=aoi, date=(t_is2_start.datetime, t_is2_stop.datetime),
platformname='Sentinel-1', area_relation='Intersects',
producttype=product_type, sensoroperationalmode=swath_type)
elif platform_name == 'Sentinel-2':
md = api.query(area=aoi, date=(t_is2_start.datetime, t_is2_stop.datetime),
platformname='Sentinel-2', area_relation='Intersects',
cloudcoverpercentage=(min_cloud_cover, max_cloud_cover),
producttype=product_type)
print("N. of total images: {}".format(len(md)))
if len(md) == 0:
return [], [], [], [], [], []
### Convert Sentinel-2 time strings to astropy time objects
t_sen = {}
print("Converting time to astropy objects...")
for el in md:
t_sen[el] = Time(md[el]['beginposition'], format='datetime',
scale='utc')
### Loop over orbits to find images that satisfy time costraints
TimeDict = {}
t_is2 = []
print("Looping over orbits to find intersections within {}h...".format(dt))
for c, o in tqdm(enumerate(is2_orbits)):
### select CS2 data
d_is2 = df[df['orbit_number'] == o]
### compute CS2 track central time
t_temp = Time(d_is2['time'], scale='utc')
t_start_is2 = min(t_temp)
t_stop_is2 = max(t_temp)
t_is2_o = t_start_is2 + (t_stop_is2 - t_start_is2) / 2
t_is2.append(t_is2_o)
### save dict keys of images within +-dt from CS2
i_t = np.array(
[el for el in md if np.abs((t_sen[el] - t_is2_o).sec) <= dtt.sec])
TimeDict[o] = i_t
# get unique images within +-dt from all orbit data
i_sen_t_int = set(np.concatenate(list(TimeDict.values())).ravel())
print("N. of images within {}h: {}".format(dt, len(i_sen_t_int)))
if len(i_sen_t_int) == 0:
return [], [], [], [], [], []
### Project images corner coordinates and convert to shapely polygons
print("Creating images footprint polygons...")
# loop over them, project corner coords and create polygons
SenPolygonsDict = {}
for i in i_sen_t_int:
# load S2 footprint
aoi_sen = loads(md[i]['footprint'])
# check if multipolygon has more than 1 polygon defined
if len(aoi_sen) > 1:
print("WARNING: footprint for product {}".format(i),
"is defined by more than 1 polygon!!!")
aoi_sen = aoi_sen[0]
# project corner coords
x_sen, y_sen = proj(aoi_sen.exterior.xy[0], aoi_sen.exterior.xy[1])
# add polygon to dictionary
SenPolygonsDict[i] = sg.Polygon(list(zip(x_sen, y_sen)))
### Loop over orbits to find spatial intersections
print("Looping over orbits to find intersections...")
orbit_number = []
product_name = []
browse_url = []
download_url = []
t_diff = []
md_out = {}
for c, o in tqdm(enumerate(is2_orbits)):
### select CS2 data
i = df['orbit_number'] == o
# check if track has at least 2 points
if sum(i) < 2:
continue
d_is2 = df[i]
x_is2 = x[i]
y_is2 = y[i]
### create shapely line from CS track
is2_line = sg.LineString(list(zip(x_is2, y_is2)))
### collect LS8 polygon indices
i_sen = TimeDict[o]
### Loop over S2 polygons
for i_poly in i_sen:
ls_poly = SenPolygonsDict[i_poly]
if is2_line.intersects(ls_poly):
orbit_number.append(o)
t_diff.append((t_sen[i_poly] - t_is2[c]).sec / 3600)
product_name.append(md[i_poly]['filename'])
download_url.append(md[i_poly]['link'])
browse_url.append(md[i_poly]['link_icon'])
md_out[i_poly] = md[i_poly]
print("N. of total intersections: {}".format(len(orbit_number)))
### Print to file
if f_out != None:
print("Printing results to {}...".format(f_out))
with open(f_out, 'w') as fp:
fp.write("orbit_number,t_diff_(h),product_id,dowload_url,browse_url\n")
for i in range(len(orbit_number)):
fp.write("{},{:.2f},{},{},{}\n".format(
orbit_number[i], t_diff[i], product_name[i],
download_url[i], browse_url[i]))
return orbit_number, product_name, browse_url, download_url, t_diff, md_out | ff7585bd66a60c1ba0ce6d09c535b19378dadcd9 | 3,633,274 |
import unittest
def not_implemented(cls):
"""Decorator for TestCase classes to indicate that the tests have not been written (yet)."""
msg = "%s: tests have not been implemented" % cls.__name__
_NOT_IMPLEMENTED.append(msg)
return unittest.skip(msg)(cls) | 0454ffeb08e4367dbb70c90f40748a32c5cec05d | 3,633,275 |
def get_default_container_image_for_current_sdk(job_type):
"""For internal use only; no backwards-compatibility guarantees.
Args:
job_type (str): BEAM job type.
Returns:
str: Google Cloud Dataflow container image for remote execution.
"""
# TODO(tvalentyn): Use enumerated type instead of strings for job types.
if job_type == 'FNAPI_BATCH' or job_type == 'FNAPI_STREAMING':
image_name = DATAFLOW_CONTAINER_IMAGE_REPOSITORY + '/python-fnapi'
else:
image_name = DATAFLOW_CONTAINER_IMAGE_REPOSITORY + '/python'
image_tag = _get_required_container_version(job_type)
return image_name + ':' + image_tag | a27fa86cd8bb6dd5ea7fec5b3597521d969f4b30 | 3,633,276 |
from pymatgen.io.cif import CifParser
def get_structure_tuple(fileobject, fileformat, extra_data=None):
"""
Given a file-like object (using StringIO or open()), and a string
identifying the file format, return a structure tuple as accepted
by seekpath.
:param fileobject: a file-like object containing the file content
:param fileformat: a string with the format to use to parse the data
:return: a structure tuple (cell, positions, numbers) as accepted
by seekpath.
"""
ase_fileformats = {
'vasp-ase': 'vasp',
'xsf-ase': 'xsf',
'castep-ase': 'castep-cell',
'pdb-ase': 'proteindatabank',
'xyz-ase': 'xyz',
'cif-ase':
'cif', # currently broken in ASE: https://gitlab.com/ase/ase/issues/15
}
if fileformat in ase_fileformats.keys():
asestructure = ase.io.read(
fileobject, format=ase_fileformats[fileformat])
if fileformat == 'xyz-ase':
# XYZ does not contain cell information, add them back from the
# additional form data (note that at the moment we are not using the
# extended XYZ format)
try:
if extra_data is None:
raise ValueError(
"Please pass also the extra_data with the cell information if you want to use the xyz format"
)
cell = list(
tuple(
float(extra_data['xyzCellVec' + v + a][0])
for a in 'xyz')
for v in 'ABC')
# ^^^ avoid generator expressions by explicitly requesting tuple/list
except (KeyError, ValueError):
raise # at some point we might want to convert the different conversion errors to a custom exception
asestructure.set_cell(cell)
return tuple_from_ase(asestructure)
elif fileformat == "cif-pymatgen":
# Only get the first structure, if more than one
pmgstructure = CifParser(fileobject).get_structures()[0]
return tuple_from_pymatgen(pmgstructure)
elif fileformat == 'qeinp-qetools':
pwfile = qe_tools.PwInputFile(fileobject)
pwparsed = pwfile.get_structure_from_qeinput()
cell = pwparsed['cell']
rel_position = np.dot(pwparsed['positions'],
np.linalg.inv(cell)).tolist()
species_dict = {
name: pseudo_file_name for name, pseudo_file_name in zip(
pwparsed['species']['names'], pwparsed['species'][
'pseudo_file_names'])
}
numbers = []
# Heuristics to get the chemical element
for name in pwparsed['atom_names']:
# Take only characters, take only up to two characters
chemical_name = "".join(
char for char in name if char.isalpha())[:2].capitalize()
number_from_name = atoms_num_dict.get(chemical_name, None)
# Infer chemical element from element
pseudo_name = species_dict[name]
name_from_pseudo = pseudo_name
for sep in ['-', '.', '_']:
name_from_pseudo = name_from_pseudo.partition(sep)[0]
name_from_pseudo = name_from_pseudo.capitalize()
number_from_pseudo = atoms_num_dict.get(name_from_pseudo, None)
if number_from_name is None and number_from_pseudo is None:
raise KeyError(
'Unable to parse the chemical element either from the atom name or for the pseudo name'
)
# I make number_from_pseudo prioritary if both are parsed,
# even if they are different
if number_from_pseudo is not None:
numbers.append(number_from_pseudo)
continue
# If we are here, number_from_pseudo is None and number_from_name is not
numbers.append(number_from_name)
continue
# Old conversion. This does not work for multiple species
# for the same chemical element, e.g. Si1 and Si2
#numbers = [atoms_num_dict[sym] for sym in pwparsed['atom_names']]
structure_tuple = (cell, rel_position, numbers)
return structure_tuple
raise UnknownFormatError(fileformat) | ada7d694b0d9ec60f6b1e40dd21487650c637207 | 3,633,277 |
def find_merge_commit_in_prs(needle, prs):
"""Find the merge commit `needle` in the list of `prs`
If found, returns the pr the merge commit comes from. If not found, return
None
"""
for pr in prs[::-1]:
if pr['merge_commit'] is not None:
if pr['merge_commit']['hash'] == needle[1][:12]:
return pr
return None | 42320473aff84985e35cdf9024a64a18fe6f14f1 | 3,633,278 |
def date_breaks(width):
"""
Regularly spaced dates
Parameters
----------
width:
an interval specification. must be one of [minute, hour, day, week, month, year]
Examples
--------
>>> date_breaks(width = '1 year')
>>> date_breaks(width = '6 weeks')
>>> date_breaks('months')
"""
period, units = parse_break_str(width)
Locator = LOCATORS.get(units)
locator = Locator(interval=period)
return locator | a1808ccb7c09fcc3f2d0367f64cd3533eb63a33d | 3,633,279 |
def create_valid_url(url: str) -> str:
"""
Generate a video direct play url.
"""
return url | a04a22ec64b346be83b020745aeb33f74ca90b74 | 3,633,280 |
import numpy
def circular_weight(angle):
"""This function utilizes the precomputed circular bezier function
with a fit to a 10th order curve created by the following code block:
.. code-block:: python
x = numpy.arange(.5, 180, 0.5)
y = []
for i in x:
y.append(bezier.find_circular_weight(i, tol=1e-12, max_iters=500))
y = numpy.array(y)
z = numpy.polyfit(x, y, 10)
Parameters
----------
angle : float
enter the angle to be traversed in degrees
Returns
-------
weight : float
Weight value for calculating bezier control points that
approximate a circular curve.
"""
z = numpy.array([
-2.45143082907626980583458614241573e-24,
1.58856196152315352138612607918623e-21,
-5.03264989277462933391916020538014e-19,
8.57954915199159887348249578203777e-17,
-1.09982713519619074150585319501519e-14,
6.42175701661701683377126465867012e-13,
-1.95012445981222027957307425487916e-10,
6.98338125134285339870680633234242e-10,
-1.27018636324842636571531492850617e-05,
5.58069196465371404519196542326487e-08,
6.66666581437823202449521886592265e-01
])
p = numpy.poly1d(z)
return p(angle) | 4341173c3e3584fcddbe04c60f7dd43fe859ac89 | 3,633,281 |
def parse_single_example(serialized, # pylint: disable=invalid-name
names=None,
sparse_keys=None,
sparse_types=None,
dense_keys=None,
dense_types=None,
dense_defaults=None,
dense_shapes=None,
name="ParseSingleExample"):
"""Parses a single `Example` proto.
Similar to `parse_example`, except:
For dense tensors, the returned `Tensor` is identical to the output of
`parse_example`, except there is no batch dimension, the output shape is the
same as the shape given in `dense_shape`.
For `SparseTensor`s, the first (batch) column of the indices matrix is removed
(the indices matrix is a column vector), the values vector is unchanged, and
the first (batch_size) entry of the shape vector is removed (it is now a
single element vector).
See also `parse_example`.
Args:
serialized: A scalar string Tensor, a single serialized Example.
See parse_example documentation for more details.
names: (Optional) A scalar string Tensor, the associated name.
See parse_example documentation for more details.
sparse_keys: See parse_example documentation for more details.
sparse_types: See parse_example documentation for more details.
dense_keys: See parse_example documentation for more details.
dense_types: See parse_example documentation for more details.
dense_defaults: See parse_example documentation for more details.
dense_shapes: See parse_example documentation for more details.
name: A name for this operation (optional).
Returns:
A dictionary mapping keys to Tensors and SparseTensors.
Raises:
ValueError: if "scalar" or "names" have known shapes, and are not scalars.
"""
with ops.op_scope([serialized, names], name, "parse_single_example"):
serialized = ops.convert_to_tensor(serialized)
serialized_shape = serialized.get_shape()
if serialized_shape.ndims is not None:
if serialized_shape.ndims != 0:
raise ValueError("Input serialized must be a scalar")
else:
serialized = control_flow_ops.with_dependencies(
[logging_ops.Assert(
math_ops.equal(array_ops.rank(serialized), 0),
["Input serialized must be a scalar"],
name="SerializedIsScalar")],
serialized,
name="SerializedDependencies")
serialized = array_ops.expand_dims(serialized, 0)
if names is not None:
names = ops.convert_to_tensor(names)
names_shape = names.get_shape()
if names_shape.ndims is not None:
if names_shape.ndims != 0:
raise ValueError("Input names must be a scalar")
else:
names = control_flow_ops.with_dependencies(
[logging_ops.Assert(
math_ops.equal(array_ops.rank(names), 0),
["Input names must be a scalar"],
name="NamesIsScalar")],
names,
name="NamesDependencies")
names = array_ops.expand_dims(names, 0)
outputs = parse_example(serialized,
names=names,
sparse_keys=sparse_keys,
sparse_types=sparse_types,
dense_keys=dense_keys,
dense_types=dense_types,
dense_defaults=dense_defaults,
dense_shapes=dense_shapes,
name=name)
if dense_keys is not None:
for d in dense_keys:
outputs[d] = array_ops.squeeze(outputs[d], [0], name="Squeeze_%s" % d)
if sparse_keys is not None:
for s in sparse_keys:
outputs[s] = ops.SparseTensor(
array_ops.slice(outputs[s].indices,
[0, 1], [-1, -1], name="Slice_Indices_%s" % s),
outputs[s].values,
array_ops.slice(outputs[s].shape,
[1], [-1], name="Squeeze_Shape_%s" % s))
return outputs | aa2a7774a5b03e0b89b6a55c13a13ed45c1e700d | 3,633,282 |
def delta_date_feature(dates):
"""
Given a 2d array containing dates (in any format recognized by
pd.to_datetime), it returns the delta in days between each date and the
most recent date in its column
"""
date_sanitized = pd.DataFrame(dates).apply(pd.to_datetime)
return (date_sanitized
.apply(lambda d: (d.max() - d).dt.days, axis=0)
.to_numpy()) | bfdde9fe12ffabb336d2f92b9bd3875782a9f8ff | 3,633,283 |
import torch
import timeit
def benchmark_training(model, opts):
"""Benchmarks training phase.
:param obj model: A model to benchmark
:param dict opts: A dictionary of parameters.
:rtype: tuple:
:return: A tuple of (model_name, list of batch times)
"""
def _reduce_tensor(tensor):
reduced = tensor.clone()
dist.all_reduce(reduced, op=dist.reduce_op.SUM)
reduced /= opts['world_size']
return reduced
if opts['phase'] != 'training':
raise "Phase in benchmark_training func is '%s'" % opts['phase']
opts['distributed'] = opts['world_size'] > 1
opts['with_cuda'] = opts['device'] == 'gpu'
opts['fp16'] = opts['dtype'] == 'float16'
opts['loss_scale'] = 1
if opts['fp16'] and not opts['with_cuda']:
raise ValueError("Configuration error: FP16 can only be used with GPUs")
if opts['with_cuda']:
torch.cuda.set_device(opts['local_rank'])
cudnn.benchmark = opts['cudnn_benchmark']
cudnn.fastest = opts['cudnn_fastest']
if opts['distributed']:
dist.init_process_group(backend=opts['dist_backend'], init_method='env://')
if opts['with_cuda']:
model = model.cuda()
if opts['dtype'] == 'float16':
model = network_to_half(model)
if opts['distributed']:
model = DDP(model, shared_param=True)
if opts['fp16']:
model_params, master_params = prep_param_lists(model)
else:
master_params = list(model.parameters())
criterion = nn.CrossEntropyLoss()
if opts['with_cuda']:
criterion = criterion.cuda()
optimizer = optim.SGD(master_params, lr=0.01, momentum=0.9, weight_decay=1e-4)
data_loader = DatasetFactory.get_data_loader(opts, opts['__input_shape'], opts['__num_classes'])
is_warmup = opts['num_warmup_batches'] > 0
done = opts['num_warmup_batches'] == 0
num_iterations_done = 0
model.train()
batch_times = np.zeros(opts['num_batches'])
end_time = timeit.default_timer()
while not done:
prefetcher = DataPrefetcher(data_loader, opts)
batch_data, batch_labels = prefetcher.next()
while batch_data is not None:
data_var = torch.autograd.Variable(batch_data)
labels_var = torch.autograd.Variable(batch_labels)
output = model(data_var)
loss = criterion(output, labels_var)
loss = loss * opts['loss_scale']
# I'll need this for reporting
#reduced_loss = _reduce_tensor(loss.data) if opts['distributed'] else loss.data
if opts['fp16']:
model.zero_grad()
loss.backward()
model_grads_to_master_grads(model_params, master_params)
if opts['loss_scale'] != 1:
for param in master_params:
param.grad.data = param.grad.data / opts['loss_scale']
optimizer.step()
master_params_to_model_params(model_params, master_params)
else:
optimizer.zero_grad()
loss.backward()
optimizer.step()
if opts['with_cuda']:
torch.cuda.synchronize()
# Track progress
num_iterations_done += 1
cur_time = timeit.default_timer()
batch_data, batch_labels = prefetcher.next()
if is_warmup:
if num_iterations_done >= opts['num_warmup_batches']:
is_warmup = False
num_iterations_done = 0
else:
if opts['num_batches'] != 0:
batch_times[num_iterations_done-1] = cur_time - end_time
if num_iterations_done >= opts['num_batches']:
done = True
break
end_time = cur_time
return (opts['__name'], batch_times) | 45f9328949e3385c1001db3dc2097d7a814455a4 | 3,633,284 |
import subprocess
def runprog(*args):
"""Runs specified program and args, returns (exitcode, stdout, stderr)."""
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return (p.returncode, out, err) | 2146d967c961f4c9ed1f62288436a82cc7a62189 | 3,633,285 |
def inhib_kin_query(inhib_pubchem_cid):
"""
Query to pull targeted kinases using inhib CID
:param inhib_pubchem_cid: string inhib CID
:return: Flask_Table Kinase object
"""
session = create_sqlsession()
q = session.query(Inhibitor).filter_by(inhib_pubchem_cid= inhib_pubchem_cid)
inh = q.first()
#subset of information about substrate phosphosites sites.
subsets = inh.inhib_target_kinases
table = Kinase_results(subsets)
session.close()
return table | cf06cd79e057e4bd7f6c8abc8db115a94cc51a0f | 3,633,286 |
from typing import Set
def abstract_methods_of(cls) -> Set[str]:
"""
Gets the abstract methods of a class.
:param cls: The class to get the abstract methods from.
:return:
"""
return getattr(cls, ABSTRACT_CLASS_ATTRIBUTE, set()) | 0cf9fa46433230e535bb01daaa27109ccb246aef | 3,633,287 |
from optparse import OptionParser
import os
def parse_cmd_line_args():
"""
Parse command line parameters
"""
parser = OptionParser()
parser.add_option("-m", "--model", dest="model", default='standard_glm',
help="Type of model to use. See model_factory.py for available types.")
parser.add_option("-r", "--resultsDir", dest="resultsDir", default='.',
help="Save the results to this directory.")
parser.add_option("-N", "--N", dest="N", default=1,
help="Number of neurons.")
parser.add_option("-T", "--T_stop", dest="T_stop", default=60.0,
help="Length of simulation (sec).")
parser.add_option("-u", "--unique_result", dest="unique_results", default="true",
help="Whether or not to create a unique results directory.")
(options, args) = parser.parse_args()
# Make sure parameters are of the correct type
if not isinstance(options.N, int):
options.N = int(options.N)
if not isinstance(options.T_stop, float):
options.T_stop = float(options.T_stop)
# Check if specified files exist
if not options.resultsDir is None and not os.path.exists(options.resultsDir):
raise Exception("Invalid results folder specified: %s" % options.resultsDir)
if not( options.unique_results == "0" or \
options.unique_results.lower() == "false"):
options.resultsDir = create_unique_results_folder(options.resultsDir)
return (options, args) | 22f0eb365914f9fa915f3352152f29809db70944 | 3,633,288 |
def parameterized_qubit_qnode():
"""A parametrized qubit ciruit."""
def qfunc(a, b, c, angles):
qml.RX(a, wires=0)
qml.RX(b, wires=1)
qml.PauliZ(1)
qml.CNOT(wires=[0, 1]).inv()
qml.CRY(b, wires=[3, 1])
qml.RX(angles[0], wires=0)
qml.RX(4 * angles[1], wires=1)
qml.PhaseShift(17 / 9 * c, wires=2)
qml.RZ(b, wires=3)
qml.RX(angles[2], wires=2).inv()
qml.CRY(0.3589, wires=[3, 1]).inv()
qml.CSWAP(wires=[4, 2, 1]).inv()
qml.QubitUnitary(np.eye(2), wires=[2])
qml.Toffoli(wires=[0, 2, 1])
qml.CNOT(wires=[0, 2])
qml.PauliZ(wires=[1])
qml.PauliZ(wires=[1]).inv()
qml.CZ(wires=[0, 1])
qml.CZ(wires=[0, 2]).inv()
qml.CNOT(wires=[2, 1])
qml.CNOT(wires=[0, 2])
qml.SWAP(wires=[0, 2]).inv()
qml.CNOT(wires=[1, 3])
qml.RZ(b, wires=3)
qml.CSWAP(wires=[4, 0, 1])
return [
qml.expval(qml.PauliY(0)),
qml.var(qml.Hadamard(wires=1)),
qml.sample(qml.PauliX(2)),
qml.expval(qml.Hermitian(np.eye(4), wires=[3, 4])),
]
dev = qml.device("default.qubit", wires=5)
qnode = qml.QNode(qfunc, dev)
qnode._construct((0.1, 0.2, 0.3, np.array([0.4, 0.5, 0.6])), {})
qnode.evaluate((0.1, 0.2, 0.3, np.array([0.4, 0.5, 0.6])), {})
return qnode | e82a93b3f3c9c7d9a7c63c5f22c80e2248c1bdf4 | 3,633,289 |
import yaml
def fem_context( filename, comm=MPI.COMM_WORLD ):
"""
Create tensor-product spline space and mapping from geometry input file
in HDF5 format (single-patch only).
Parameters
----------
filename : str
Name of HDF5 input file.
comm : mpi4py.Comm
MPI communicator.
Results
-------
w : TensorFemSpace
Tensor-product spline space.
m : SplineMapping
Tensor-product spline mapping.
"""
if comm.size > 1:
kwargs = dict( driver='mpio', comm=comm )
else:
kwargs = {}
h5 = h5py.File( filename, mode='r', **kwargs )
yml = yaml.load( h5['geometry.yml'][()] )
ldim = yml['ldim']
pdim = yml['pdim']
num_patches = len( yml['patches'] )
if num_patches == 0:
h5.close()
raise ValueError( "Input file contains no patches." )
elif num_patches == 1:
item = yml['patches'][0]
patch = h5[item['name']]
degree = [int (p) for p in patch.attrs['degree' ]]
periodic = [bool(b) for b in patch.attrs['periodic']]
knots = [patch['knots_{}'.format(d)][:] for d in range( ldim )]
spaces = [SplineSpace( degree=p, knots=k, periodic=b )
for p,k,b in zip( degree, knots, periodic )]
tensor_space = TensorFemSpace( *spaces, comm=comm )
mapping = SplineMapping.from_control_points( tensor_space, patch['points'] )
h5.close()
return tensor_space, mapping
else:
# TODO: multipatch geometry
h5.close()
raise NotImplementedError( "PSYDAC library cannot handle multipatch geometries yet." ) | f5016a4df27699814622f9843706f5096052d92d | 3,633,290 |
def get_indicators_from_fred(start=start, end=end):
"""
Fetch quarterly data on 6 leading indicators from time period start:end
"""
# yield curve, unemployment, change in inventory, new private housing permits
yc_unemp_inv_permit = (
web.DataReader(["T10Y2Y", "UNRATE", "CBIC1",
"PERMIT"], "fred", start, end)
.resample("QS")
.mean()
)
# percent change in housing prices and retail sales
hpi_retail = (
web.DataReader(["USSTHPI", "SLRTTO01USQ661S"], "fred", start, end)
.resample("QS") # already quarterly, adjusting so index is same
.mean()
.pipe(pct_change_on_last_year)
.dropna()
)
indicators = (
yc_unemp_inv_permit
.join(hpi_retail)
.dropna()
.rename(columns=dict(
USSTHPI="pct_change_hpi",
T10Y2Y="yield_curve",
UNRATE="unemp",
CBIC1="inventory",
SLRTTO01USQ661S="retail_sales",
PERMIT="house_permits"
))
)
return indicators | 8ed26654d64ca8c5a08c74ecc48e326396ecb311 | 3,633,291 |
def permission_denied_exception_handler(exc, context):
"""If the object exist but the user does not have permission for it, change the status code and message."""
# Call REST framework's default exception handler first to get the standard error response.
response = exception_handler(exc, context)
if context["view"].queryset:
model = context["view"].queryset.model
if (response.status_code == status.HTTP_404_NOT_FOUND and
model.objects.filter(id=context["kwargs"]["pk"]).exists()):
response.status_code = status.HTTP_403_FORBIDDEN
response.data["detail"] = "You do not have permission to perform this action."
return response | c3abfb58419a9cd2e07d29b3340ba3042db93506 | 3,633,292 |
from typing import Optional
import ray
def get_current_placement_group() -> Optional[PlacementGroup]:
"""Get the current placement group which a task or actor is using.
It returns None if there's no current placement group for the worker.
For example, if you call this method in your driver, it returns None
(because drivers never belong to any placement group).
Examples:
>>> @ray.remote
>>> def f():
>>> # This will return the placement group the task f belongs to.
>>> # It means this pg will be identical to the pg created below.
>>> pg = get_current_placement_group()
>>> pg = placement_group([{"CPU": 2}])
>>> f.options(placement_group=pg).remote()
>>> # New script.
>>> ray.init()
>>> # New script doesn't belong to any placement group,
>>> # so it returns None.
>>> assert get_current_placement_group() is None
Return:
PlacementGroup: Placement group object.
None if the current task or actor wasn't
created with any placement group.
"""
worker = ray.worker.global_worker
worker.check_connected()
pg_id = worker.placement_group_id
if pg_id.is_nil():
return None
return PlacementGroup(pg_id) | 5a7fd8cad03adaad2479bdb33cf2182c050dedf4 | 3,633,293 |
def _GenerateManifest(args, service_account_key_data, image_pull_secret_data,
upgrade, membership_ref, release_track=None):
"""Generate the manifest for connect agent from API.
Args:
args: arguments of the command.
service_account_key_data: The contents of a Google IAM service account JSON
file.
image_pull_secret_data: The image pull secret content to use for private
registries.
upgrade: if this is an upgrade operation.
membership_ref: The membership associated with the connect agent in the
format of `projects/[PROJECT]/locations/global/memberships/[MEMBERSHIP]`
release_track: the release_track used in the gcloud command,
or None if it is not available.
Returns:
The full manifest to deploy the connect agent resources.
"""
api_version = gkehub_api_util.GetApiVersionForTrack(release_track)
delimeter = '---\n'
full_manifest = ''
# Based on the API version, use api_adapter if GenerateConnectAgentManifest is
# a nested message, else use the default api_client.
if api_version in ['v1beta1']:
adapter = gkehub_api_adapter.NewAPIAdapter(api_version)
connect_agent_ref = _GetConnectAgentOptions(args, upgrade,
DEFAULT_NAMESPACE,
image_pull_secret_data,
membership_ref)
manifest_resources = adapter.GenerateConnectAgentManifest(connect_agent_ref)
for resource in manifest_resources:
full_manifest = full_manifest + (getattr(resource, 'manifest') if hasattr(
resource, 'manifest') else '') + delimeter
else:
# If Workload Identity is enabled, the Hub API will detect the issuer on
# the membership resource and seamlessly return a manifest that correctly
# configures the Connect Agent to use Workload Identity.
manifest_resources = api_util.GenerateConnectAgentManifest(
membership_ref,
image_pull_secret_content=image_pull_secret_data,
is_upgrade=upgrade,
namespace=DEFAULT_NAMESPACE,
proxy=args.proxy,
registry=args.docker_registry,
version=args.version,
release_track=release_track)
for resource in manifest_resources.manifest:
full_manifest = full_manifest + resource.manifest + delimeter
# Append creds secret.
full_manifest = full_manifest + CREDENTIAL_SECRET_TEMPLATE.format(
namespace=DEFAULT_NAMESPACE,
gcp_sa_key_secret_name=GCP_SA_KEY_SECRET_NAME,
gcp_sa_key=encoding.Decode(service_account_key_data, encoding='utf8'))
return full_manifest | af3553a3cbdc7c8bd75cafe5c48cfa06a72ec347 | 3,633,294 |
import uuid
import os
import io
def save_image(img_type, elem):
"""
Save post cover or user avatar to local filesystem in dev or to S3 in prod
:param img_type: 'avatars' or 'covers'
:param elem: post or user obj on which to save the image
:return: name of the file to be saved
"""
image = request.files['file']
if elem:
filename = elem.photo.rsplit('/', 1)[-1]
# Do not overwrite default image but generate unique file name instead
if filename == 'default.jpg':
filename = str(uuid.uuid4()) + '.' + image.filename.rsplit('.', 1)[1]
elem.photo = app.config['IMG_FOLDER'] + img_type + '/' + filename
else:
filename = str(uuid.uuid4()) + '.' + image.filename.rsplit('.', 1)[1]
img = Image.open(image)
if img_type == 'avatars':
size = 512
else:
size = 1024
maxsize = (size, size)
img.thumbnail(maxsize, Image.ANTIALIAS)
if 'DYNO' in os.environ: # check if the app is running on Heroku server
s3 = boto3.resource('s3')
output = io.BytesIO()
img.save(output, format='JPEG')
s3.Object('theeblog', img_type + '/' + filename).put(Body=output.getvalue())
else: # Otherwise save to local filesystem
img.save(os.path.join(app.config['UPLOAD_FOLDER'] + img_type, filename))
return filename | 4a346963490c4b2f41542b19b7f537bbbb30438e | 3,633,295 |
import re
def check_conjunctions(sentence: str) -> list or None:
"""
Returns the list of messages about a punctuation error
with conjunctions if there is one.
"""
sentence = sentence.lower()
conjunctions = {'а', 'але', 'однак', 'проте', 'зате', 'хоч', 'хоча'}
errors = []
for word in conjunctions:
pattern = rf'^.*[^,] {word}(([^\w].*)|$)'
if re.fullmatch(pattern, sentence):
errors.append(f'️❗ Перед "{word}" повинна стояти кома.')
if errors:
return errors
return None | 9bbf6e72e431cf652cdac659e8e0a4da7b7a9b3c | 3,633,296 |
import numpy
def decompose(poly: PolyLike) -> ndpoly:
"""
Decompose a polynomial to component form.
In array missing values are padded with 0 to make decomposition compatible
with ``chaospy.sum(output, 0)``.
Args:
poly:
Polynomial to decompose.
Returns:
Decomposed polynomial with ``poly.shape==(M,)+output.shape``,
where ``M`` is the number of components in `poly`.
Examples:
>>> q0 = numpoly.variable()
>>> poly = numpoly.polynomial([q0**2-1, 2])
>>> poly
polynomial([q0**2-1, 2])
>>> numpoly.decompose(poly)
polynomial([[-1, 2],
[q0**2, 0]])
>>> numpoly.sum(numpoly.decompose(poly), 0)
polynomial([q0**2-1, 2])
"""
poly = numpoly.aspolynomial(poly)
return numpoly.concatenate([
numpoly.construct.polynomial_from_attributes(
exponents=[expon],
coefficients=[numpy.asarray(poly.values[key])],
names=poly.indeterminants,
retain_coefficients=True,
retain_names=True,
)[numpy.newaxis] for key, expon in zip(poly.keys, poly.exponents)
]) | d2817904fb6a2f1977d92a99c75d640b5b869fca | 3,633,297 |
def letter_to_vec(letter):
"""returns one-hot representation of given letter
"""
index = ALL_LETTERS.find(letter)
return _one_hot(index, NUM_LETTERS) | 490aa2f3c5a9ddf7bf950c309f30c7753ea6628d | 3,633,298 |
def Hellinger2D(dist1, dist2, x_low=-np.inf, x_high=np.inf, y_low=None,
y_high=None):
""" Computes the Hellinger distance between two bivariate probability
distributions, dist1 and dist2.
inputs:
dist1: a function that returns the probability of x, y
dist2: a function that returns the probability of x, y
x_low: float indicating the lower bound of the integration
interval with respect to x (for computing the Hellinger distance)
x_high: float indicating the upper bound of the integration
interval with respect to x (for computing the Hellinger distance)
y_low: a callable function describing the lower boundary curve of
y as a function of x
y_high: a callable function describing the upper boundary curve of
y as a function of x
outputs:
a scalar value representing the Hellinger distance.
"""
if y_low == None:
y_low = lambda x: x_low
if y_high == None:
y_high = lambda x: x_high
if type(y_low) == np.float64 or type(y_low)==float:
y_low = lambda x, y_low=y_low: y_low
if type(y_high) == np.float64 or type(y_high)==float:
y_high = lambda x, y_high=y_high: y_high
func = lambda x,y: (np.sqrt(dist1(x,y) * dist2(x,y))).reshape(-1, 1)
out = dblquad(func, x_low, x_high, y_low, y_high)
hellinger = 1. - np.sqrt(out[0])
return hellinger | 0541e44529d9c04916ee8ab88f2eaf4295b77256 | 3,633,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.