content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def parse_field(source, loc, tokens):
"""
Returns the tokens of a field as key-value pair.
"""
name = tokens[0].lower()
value = normalize_value(tokens[2])
if name == 'author' and ' and ' in value:
value = [field.strip() for field in value.split(' and ')]
return (name, value) | be5533cc53fc73fe84d8bd79465ef03ba22cfa5f | 3,637,900 |
def evaluate_models_exploratory(X_normal:np.ndarray,
X_te:np.ndarray,
X_adv_deepfool:np.ndarray,
X_adv_fgsm:np.ndarray,
X_adv_pgd:np.ndarray,
X_adv_dt:np.ndarray,
Y:np.ndarray,
Y_aml:np.ndarray,
perfs:dict,
contamination:float=.05,
degree:float=3.,
support_fraction:float=.5):
"""
"""
MODELS = [IsolationForest(contamination=contamination),
OneClassSVM(kernel='poly', degree=degree),
EllipticEnvelope(contamination=contamination, support_fraction=support_fraction),
LocalOutlierFactor(contamination=contamination)]
MODELS_NAMES = ['if', 'svm', 'ee', 'lo']
ATTACKS = ['baseline', 'deepfool', 'fgsm', 'pgd', 'dt']
for model, model_name in zip(MODELS, MODELS_NAMES):
# fit the model on the normal data
model.fit(X_normal)
# if we are running the local outlier factor then we need to set the novelty bit
# in the class
if hasattr(model, 'novelty'):
model.novelty = True
#Y_hat, Y_deepfool, Y_fgsm, Y_pgd, Y_dt
outputs = model.predict(X_te), model.predict(X_adv_deepfool), \
model.predict(X_adv_fgsm), model.predict(X_adv_pgd), model.predict(X_adv_dt)
for y_hat, attack_type in zip(outputs, ATTACKS):
if attack_type == 'baseline':
labels = Y
else:
labels = Y_aml
acc, fs, tpr, tnr, mcc = get_performance(y_true=labels, y_hat=y_hat)
perfs[''.join(['accs_', model_name, '_', attack_type])] += acc
perfs[''.join(['fss_', model_name, '_', attack_type])] += fs
perfs[''.join(['tprs_', model_name, '_', attack_type])] += tpr
perfs[''.join(['tnrs_', model_name, '_', attack_type])] += tnr
perfs[''.join(['mccs_', model_name, '_', attack_type])] += mcc
return perfs | 17435e38512c9c158db6d6ae12ab44c38f573c61 | 3,637,901 |
def get_files_endpoint(entity_name):
"""
Given an entity name, generate a flask_restful `Resource` class. In
`create_api_endpoints()`, these generated classes are registered with the API e.g.
`api.add_resource(get_files_endpoint("Dataset"), "/datasets/<string:pid>/files")`
:param entity_name: Name of the entity
:type entity_name: :class:`str`
:return: Generated endpoint class
"""
class FilesEndpoint(Resource):
@search_api_error_handling
def get(self, pid):
filters = get_filters_from_query_string("search_api", entity_name)
log.debug("Filters: %s", filters)
return get_files(entity_name, pid, filters), 200
get.__doc__ = f"""
---
summary: Get {entity_name}s for the given Dataset
description: Retrieves a list of {entity_name} objects for a given Dataset
object
tags:
- Dataset
parameters:
- in: path
required: true
name: pid
description: The pid of the entity to retrieve
schema:
oneOf:
- type: string
- FILTER
responses:
200:
description: Success - returns {entity_name}s for the given Dataset
object that satisfy the filter
content:
application/json:
schema:
type: array
items:
$ref:
'#/components/schemas/{entity_name}'
400:
description: Bad request - Something was wrong with the request
404:
description: No such record - Unable to find a record in ICAT
"""
FilesEndpoint.__name__ = entity_name
return FilesEndpoint | 7d2928b1b1545c9a2b7a7b44aafa64fbf7b77532 | 3,637,902 |
import re
import os
def get_name(path):
"""get the name from a repo path"""
return re.sub(r"\.git$", "", os.path.basename(path)) | 42c410fd21e1d50270cf7703b9f054a8455c7efd | 3,637,903 |
import os
def abs_path(*paths):
"""Get the absolute path of the given file path.
Args:
*paths: path parts.
Returns:
An abs path string.
"""
return os.path.abspath(os.path.join(script_dir, '..', *paths)) | d3d1b349e6df5ab1e22e913aac6e510c5bcc1cc5 | 3,637,904 |
from typing import Iterable
from typing import Optional
def is_valid_shipping_method(
checkout: Checkout,
lines: Iterable["CheckoutLineInfo"],
discounts: Iterable[DiscountInfo],
subtotal: Optional["TaxedMoney"] = None,
):
"""Check if shipping method is valid and remove (if not)."""
if not checkout.shipping_method:
return False
if not checkout.shipping_address:
return False
valid_methods = get_valid_shipping_methods_for_checkout(
checkout, lines, discounts, subtotal=subtotal
)
if valid_methods is None or checkout.shipping_method not in valid_methods:
clear_shipping_method(checkout)
return False
return True | 01cbbc493d74057c8f2e252cfd217b3cd108b97c | 3,637,905 |
async def load_gdq_index():
"""
Returns the GDQ index (main) page, includes donation totals
:return: json object
"""
return (await load_gdq_json(f"?type=event&id={config['event_id']}"))[0]['fields'] | 6d1383990a2fb98c7a42b4423f21728f4f473e1a | 3,637,906 |
def deleteRestaurantForm(r_id):
"""Create form to delete existing restaurant
Args:
r_id: id extracted from URL
"""
session = createDBSession()
restaurant = session.query(Restaurant).get(r_id)
if restaurant is None:
output = ("<p>The restaurant you're looking for doesn't exist.<br>"
"<a href='/restaurants'>Back to listings</a></p>")
else:
output = ("<form method='POST' enctype='multipart/form-data' "
"action='/restaurants/%s/delete'>"
"<h2>Delete %s restaurant</h2><p>Are you sure? "
"<input type='hidden' name='restaurantID' value='%s'>"
"<input type='submit' value='Delete'></p></form>"
"<p><a href='/restaurants'>No, take me back to the listings"
"</a></p>") % (restaurant.id, restaurant.name, restaurant.id)
return output | 530a03f17bb28e7967c375d7da6f6e077584cd37 | 3,637,907 |
import ast
from datetime import datetime
def password_account(data):
"""Modify account password.
etcd_key: <ETCD_PREFIX>/account/<name>
data: {'name': , 'pass': , 'pass2': }
"""
t_ret = (False, '')
s_rsc = '{}/account/{}'.format(etcdc.prefix, data['name'])
try:
r = etcdc.read(s_rsc)
except etcd.EtcdKeyNotFound as e:
log.error(e)
return (False, 'EtcdKeyNotFound')
d = ast.literal_eval(r.value)
# check data['pass'] is valid.
(b_ret, s_msg) = _pass_validate(data)
if not b_ret:
log.debug((b_ret, s_msg))
return (b_ret, s_msg)
# password is okay. go head.
new_data = dict()
s_modified = datetime.utcnow().isoformat() + 'Z'
data['modifiedAt'] = s_modified
# Put d['pass'] to oldpass entry.
if 'oldpass' in d:
new_data['oldpass'].append(d['pass'])
else:
new_data['oldpass'] = [d['pass']]
# Create new hashed password.
bytes_salt = bytes(d['salt'], 'utf-8')
new_data['pass'] = bcrypt.hashpw(str.encode(data['pass']),
bytes_salt).decode()
d.update(new_data.items())
s_rsc = '{}/account/{}'.format(etcdc.prefix, data['name'])
try:
etcdc.write(s_rsc, d, prevExist=True)
except etcd.EtcdKeyNotFound as e:
log.error(e)
t_ret = (False, e)
else:
t_ret = (True, 'user {} password is modified.'.format(data['name']))
finally:
return t_ret | 65dfec27cfa558c7a6f5758696acac063936337f | 3,637,908 |
def split_pkg(pkg):
"""nice little code snippet from isuru and CJ"""
if not pkg.endswith(".tar.bz2"):
raise RuntimeError("Can only process packages that end in .tar.bz2")
pkg = pkg[:-8]
plat, pkg_name = pkg.split("/")
name_ver, build = pkg_name.rsplit("-", 1)
name, ver = name_ver.rsplit("-", 1)
return plat, name, ver, build | 3568fc28c54e7de16e969be627804fbb80938d65 | 3,637,909 |
def gaussian(k, x):
""" gaussian function
k - coefficient array, x - values """
return k[2] * np.exp( -(x - k[0]) * (x - k[0]) / (2 * k[1] * k[1])) + k[3] | f58279de58992efd34bd1fa84bbecc64e3dd52ee | 3,637,910 |
def get_physical_locator(context, record_dict):
"""Get physical locator that matches the supplied uuid."""
try:
query = context.session.query(models.PhysicalLocators)
physical_locator = query.filter_by(
uuid=record_dict['uuid'],
ovsdb_identifier=record_dict['ovsdb_identifier']).one()
except exc.NoResultFound:
LOG.debug('no physical locator found for %s and %s',
record_dict['uuid'],
record_dict['ovsdb_identifier'])
return
return physical_locator | 83067d4ad5a475a2f0ca5d22ba29837df8879609 | 3,637,911 |
def coins(n, arr):
"""
Counting all ways e.g.: (5,1) and (1,5)
"""
# Stop case
if n < 0:
return 0
if n == 0:
return 1
ways = 0
for i in range(0, len(arr)):
ways += coins(n - arr[i], arr)
return ways | cb269db7aef58ae2368a6e6dc04ce6743ebd3d0e | 3,637,912 |
def compute_diff(old, new):
"""
Compute a diff that, when applied to object `old`, will give object
`new`. Do not modify `old` or `new`.
"""
if not isinstance(old, dict) or not isinstance(new, dict):
return new
diff = {}
for key, val in new.items():
if key not in old:
diff[key] = val
elif old[key] != val:
diff[key] = compute_diff(old[key], val)
for key in old:
if key not in new:
diff[key] = "$delete"
return diff | f6e7674faa2a60be17994fbd110f8e1d67eb9886 | 3,637,913 |
import os
def get_file_to_dict(fliepath,splitsign,name):
"""
读取对应路径的文件,如果没有则创建
返回dict,splitsign为分隔符
"""
if os.path.exists(fliepath+name+'.txt'):
dict = {}
with open(fliepath+name+'.txt',mode='r',encoding='utf-8') as ff:
try:
list = ff.read().splitlines()
for l in list:
s = str(l).split(splitsign,1)
dict[s[0].strip()] = s[1].strip()
except:
dict = {}
ff.close()
else:
with open(fliepath+name+'.txt', mode='w', encoding='utf-8') as ff:
dict = {}
ff.close()
return dict | 998daf4fa33453c30d6543febe275d46939113f3 | 3,637,914 |
def assign_nuts1_to_lad(c, lu=_LAD_NUTS1_LOOKUP):
"""Assigns nuts1 to LAD"""
if c in lu.keys():
return lu[c]
elif c[0] == "S":
return "Scotland"
elif c[0] == "W":
return "Wales"
elif c[0] == "N":
return "Northern Ireland"
else:
return np.nan | 7d9ceb2d2eaedf72eef8243d5880b547c989d2fb | 3,637,915 |
async def get_all_persons():
"""List of all people."""
with Session(DB.engine) as session:
persons = session.query(Person).all()
return [p.to_dict() for p in persons] | 455533d6ee6b4139354c524d6fd6db29ff2ad123 | 3,637,916 |
from typing import List
from typing import Dict
def pivot_pull(pull: List[Dict[str, str]]):
"""Pivot so columns are measures and rows are dates."""
parsed_pull = parse_dates(pull)
dates = sorted(list(set(row["sample_date"] for row in parsed_pull)))
pivot = list()
for date in dates:
row = {"sample_date": date}
observations = [row for row in parsed_pull if row["sample_date"] == date]
for measure in MEASUREMENT_GROUPS:
observation = [row for row in observations if row["parameter"] == measure]
if len(observation) != 1:
raise ValueError(
"Should only have one value per date observation combo."
)
row[measure] = observation[0]["numeric_result"]
pivot.append(row)
return pivot | 53d945cff023c9cb0233b3198a89da1e57ba18d6 | 3,637,917 |
def _location_sensitive_score(W_query, W_fil, W_keys):
"""Impelements Bahdanau-style (cumulative) scoring function.
This attention is described in:
J. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben-
gio, “Attention-based models for speech recognition,” in Ad-
vances in Neural Information Processing Systems, 2015, pp.
577–585.
#############################################################################
hybrid attention (content-based + location-based)
f = F * α_{i-1}
energy = dot(v_a, tanh(W_keys(h_enc) + W_query(h_dec) + W_fil(f) + b_a))
#############################################################################
Args:
W_query: Tensor, shape "[batch_size, 1, attention_dim]" to compare to location features.
W_location: processed previous alignments into location features, shape "[batch_size, max_time, attention_dim]"
W_keys: Tensor, shape "[batch_size, max_time, attention_dim]", typically the encoder outputs.
Returns:
A "[batch_size, max_time]" attention score (energy)
"""
# Get the number of hidden units from the trailing dimension of keys
dtype = W_query.dtype
num_units = W_keys.shape[-1].value or array_ops.shape(W_keys)[-1]
v_a = tf.get_variable(
"attention_variable_projection", shape=[num_units], dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer())
b_a = tf.get_variable(
"attention_bias", shape=[num_units], dtype=dtype,
initializer=tf.zeros_initializer())
return tf.reduce_sum(v_a * tf.tanh(W_keys + W_query + W_fil + b_a), [2]) | 4c91f7f682c1a06303c877a81f434cb265dcb1af | 3,637,918 |
def dh_noConv( value, pattern, limit ):
"""decoding helper for a single integer value, no conversion, no rounding"""
return dh( value, pattern, encNoConv, decSinglVal, limit ) | 55c2306efc1873a283fc4ed24be6677816029122 | 3,637,919 |
def chooseFile():
"""
Parameters
----------
None
No parameters are specified.
Returns
-------
filenames: tuple
A tuple that contains the list of files to be loaded.
"""
## change the wd to dir containing the script
curpath = os.path.dirname(os.path.realpath(__file__))
os.chdir(curpath)
root = Tk()
root.withdraw()
filenames = askopenfilename(parent= root, filetypes = (("CSV files", "*.csv"), ("Text files", "*.txt"), ("All files", "*.*")), multiple= True)
if len(filenames) == 1:
print len(filenames), " file is loaded."
elif len(filenames) > 1:
print len(filenames), " files are loaded."
else:
print "No files are loaded."
return filenames | 480125e9cef6e2334dd21a113fead441388b1f10 | 3,637,920 |
def reward_strategy(orig_reward, actualperf, judgeperf, weight={'TP':1, 'TN': 1, 'FP': -1, 'FN':-1}):
"""
"""
assert list(weight.keys()) == ['TP', 'TN', 'FP', 'FN'], "Please assign weights to TP, TN, FP and FN."
# assert sum(weight.values()) == 0, "Summation of weight values needs to be 0."
if actualperf & judgeperf:
cond = 'TP'
elif (not actualperf) & (not judgeperf):
cond = 'TN'
elif (not actualperf) & judgeperf:
cond = 'FP'
elif actualperf & (not judgeperf):
cond = 'FN'
else:
pass
reward = orig_reward + weight[cond]
reward = round(reward, 2)
return reward | 3bed44a11197898a94939ed2cc9a400243f604b6 | 3,637,921 |
def get_user_ids_from_primary_location_ids(domain, location_ids):
"""
Returns {user_id: primary_location_id, ...}
"""
result = (
UserES()
.domain(domain)
.primary_location(location_ids)
.non_null('location_id')
.fields(['location_id', '_id'])
.run().hits
)
ret = {}
for r in result:
if 'location_id' in r:
loc = r['location_id']
ret[r['_id']] = loc
return ret | 9707cda74324c983add4872ca4b27057b7ab8809 | 3,637,922 |
def get_next_states(state: State):
"""Create new states, but prioritize the following:
asdjkgnmweormelfkmw
Prioritize nothing...
"""
out = []
# First we check hallways.
for i in HALLWAY_IND:
# Check if the room has any crabs
hall = state.rooms[i]
if hall.is_empty(): continue
# Get the crab
crab, crab_pos = hall.get_next()
# Get target room
target_room = enum2room[crab]
if state.rooms[target_room].is_empty():
# Wait, first we need to see if we can move it to the room
if i < target_room:
# Hallway is on the left of the room
left = i
right = target_room
else:
left = target_room
right = i
but_can_it_move = True
for j in range(left, right):
if j % 2:
continue
if j == i:
continue
if state.rooms[j].has_space():
continue
but_can_it_move = False
break
if but_can_it_move:
# We can move the crab!
new_state = _deepcopy(state)
# Calculate the new cost
# The path is the current position of the crab in the current
# hallway, then the position in the target room and finaly
# the move between the hallways and rooms
target_position = state.rooms[target_room].get_position()
move = abs(target_room - i)
new_cost = (crab_pos + target_position + move) * crab
# Apply changes to the state
new_state.rooms[i].pos[crab_pos] = E
new_state.rooms[target_room].pos[target_position - 1] = crab
new_state.count_completed_rooms()
out.append((new_cost, new_state))
for i in ROOMS_INDIC:
# Check if room is complete
room = state.rooms[i]
if room.is_complete():
continue
if room.is_empty():
continue
# The room is not complete so we have to move the topmost crab out.
crab, crab_pos = room.get_next()
# See where it has to go
target_room = enum2room[crab]
# See if target room is empty so we can directly move in to the
# target room
if state.rooms[target_room].is_empty():
if i < target_room:
left = i
right = target_room
else:
left = target_room
right = i
but_can_it_move = True
for j in range(left, right):
if j % 2:
# Other rooms
continue
if j == i:
continue
if state.rooms[j].has_space():
continue
but_can_it_move = False
break
if but_can_it_move:
new_state = _deepcopy(state)
target_position = state.rooms[target_room].get_position()
# Calculate the new state
move = abs(target_room - i) + 1
new_cost = (crab_pos + move + target_position) * crab
# Apply changes
new_state.rooms[i].pos[crab_pos] = E
new_state.rooms[target_room].pos[target_position - 1] = crab
new_state.count_completed_rooms()
out.append((new_cost, new_state))
# Well now let's see if we can move to a halway
for j in HALLWAY_IND:
# We fill all the hallways. All of them...
hall = state.rooms[j]
if hall.has_space():
# We can move it here.
but_can_it_move = True
if i < j:
left = i
right = j
else:
left = j
right = i
for l in range(left, right):
if l == j: # Ignore target hall
continue
if l % 2: # Ignore rooms
continue
if state.rooms[l].is_empty():
continue
but_can_it_move = False
break
if but_can_it_move:
# Fill all possible positions for this hallway.
for k in range(hall.s -1, -1, -1):
if hall.pos[k]:
continue
new_state = _deepcopy(state)
move = abs(i - j)
new_cost = (crab_pos + k + 1 + move) * crab
# Make the change
new_state.rooms[i].pos[crab_pos] = E
new_state.rooms[j].pos[k] = crab
new_state.count_completed_rooms()
out.append((new_cost, new_state))
return out | fdc270ef292ce1c1507937975743f8877844b063 | 3,637,923 |
def _build_trainstep(fcn, projector, optimizer, strategy, temp=1, tau_plus=0, beta=0, weight_decay=0):
"""
Build a distributed training step for SimCLR or HCL.
Set tau_plus and beta to 0 for SimCLR parameters.
:model: Keras projection model
:optimizer: Keras optimizer
:strategy: tf.distribute.Strategy object
:temp: temperature parameter
:tau_plus: HCL class probability parameter
:beta: HCL concentration parameter
:weightdecay: L2 loss coefficient. 0 to disable
Returns a distributed training function
"""
trainvars = fcn.trainable_variables + projector.trainable_variables
def _step(x1, m1, x2, m2):
with tf.GradientTape() as tape:
loss = 0
# get replica context- we'll use this to aggregate embeddings
# across different GPUs
context = tf.distribute.get_replica_context()
#print("x,y:", x.shape, y.shape)
# run images through model and normalize embeddings. do this
# in three steps:
# 1) compute features with FCN (N, w, h, feature_dim)
# 2) compute segment-weighted features (N*num_samples, feature_dim)
# 3) compute projections z (N*num_samples, d)
x1 = fcn(x1, training=True)
hm1 = _prepare_embeddings(x1, m1)
z1 = tf.nn.l2_normalize(projector(hm1, training=True), 1)
x2 = fcn(x2, training=True)
hm2 = _prepare_embeddings(x2, m2)
z2 = tf.nn.l2_normalize(projector(hm2, training=True), 1)
# mask out all positive pairs where one mask or the other
# is empty
mask = tf.stop_gradient(_prepare_mask(m1, m2))
# aggregate projections across replicas. z1 and z2 should
# now correspond to the global batch size (gbs*num_samples, d)
z1 = context.all_gather(z1, 0)
z2 = context.all_gather(z2, 0)
print("z1,z2:", z1.shape, z2.shape)
mask = context.all_gather(mask, 0)
print("mask:", mask.shape)
with tape.stop_recording():
gbs = z1.shape[0]
negmask = _build_negative_mask(gbs)
# SimCLR loss case
if (tau_plus == 0)&(beta == 0):
softmax_prob, nce_batch_acc = _simclr_softmax_prob(z1, z2, temp, negmask)
# HCL loss case
elif (tau_plus > 0)&(beta > 0):
softmax_prob, nce_batch_acc = _hcl_softmax_prob(z1, z2, temp,
beta, tau_plus, negmask)
else:
assert False, "both tau_plus and beta must be nonzero to run HCL"
softmax_loss = tf.reduce_mean(-1*mask*tf.math.log(softmax_prob))
loss += softmax_loss
if weight_decay > 0:
l2_loss = compute_l2_loss(fcn) + compute_l2_loss(projector)
loss += weight_decay*l2_loss
else:
l2_loss = 0
grad = tape.gradient(loss, trainvars)
optimizer.apply_gradients(zip(grad, trainvars))
return {"loss":loss, "nt_xent_loss":softmax_loss,
"l2_loss":l2_loss,
"nce_batch_accuracy":nce_batch_acc}
@tf.function
def trainstep(x1, m1, x2, m2):
per_example_losses = strategy.run(_step, args=(x1, m1, x2, m2))
lossdict = {k:strategy.reduce(
tf.distribute.ReduceOp.MEAN,
per_example_losses[k], axis=None)
for k in per_example_losses}
return lossdict
return trainstep | 2ceb7aa171835b45376f11e28fd4fe323d1ac7f1 | 3,637,924 |
import pyprind
from LibrairieVideoAna import PositionTrack
import os
def GatherToDataframe( session, analysis, version , save = True, **kwargs ):
"""
Load external data (pickle files mostly) into a session dataframe or series of session dataframes columns.
You can specify the analysis type and version of that analysis you want to get loaded and saved inside a sessiondataframe.
This function is destined to be used before calling a MultisessionDatabase (because that function just merges the data inside the sessiondaataframes).
Parameters
----------
session : int
session_number.
analysis : str
type of analysis to load on the session (must match an existing one in the .config file).
version : str
Version of that analysis (in case you ran it multiple times with different version numbers) example : 'V1' or 'V2'.
save : bool, optional
Save on disk (true) or only return the dataframe. The default is True.
**kwargs : TYPE
- reload : default False.
If the column supposed to hold the .piuckle file data already exists, the function returns. To avoid this behavior and reload all data, use reload = True.
- silent : default True
Print warnings (True) or not.
- all the kwargs allowed for SessionDataframe, used when loading the dataframes. See that function for more details :
- source : default None.
- sql_engine : default None.
- force : default False.
BE CAREFULL - READ ENTIRELY -
If True, the function will first regenerate a dataframe from mysql (erasing all data previously merged inside it)
and then GatherToDataframe will remerge the data from the current analysis and version.
If you wish to load several analysis types inside the same sessiondataframe,
you must not specify True to this argument after the first call or previously loaded data will be removed.
- castErrors : default False.
Returns
-------
SessionDataBase
A SessionDataframe with the data loaded inside it.
"""
if isinstance(session , (int, np.integer) ):
SessionDataFrame = SessionDataframe(session, method = "new", **kwargs)
else :
SessionDataFrame = session
session = SessionDataFrame.identity["Session"]
level , column_names , filename_contruct , sublevel_folder , applies_to = ConstructName(SessionDataFrame, analysis)
col_found = True
for column_name in column_names :
if column_name != "" :
if not column_name in SessionDataFrame.columns:
SessionDataFrame.loc[:,column_name] = None
col_found = False
else :
col_found = False
if col_found and not kwargs.get("reload",False):
if not kwargs.get("silent",False):
print("Data already exist on a saved dataframe, returning")
return SessionDataFrame
if applies_to == "trial" :
bar = pyprind.ProgBar(SessionDataFrame.shape[0], track_time=True, title=f'Gathering {analysis}',bar_char='█',update_interval = 1)
for index, row in SessionDataFrame.iterrows():
bar.update()
input_path = os.path.join(SessionDataFrame.dirs[level], sublevel_folder , eval(filename_contruct))
if os.path.isfile(input_path):
if analysis == "ShapeMatch_trajectories" :
mesh = PositionTrack.LoadTrackerMesh(input_path, loadtype = "results")
if mesh is not None :
trajes = PositionTrack.GetTrajectoryResults(mesh)
if trajes is not None :
trajlist = []
for key in trajes.keys() :
trajlist.append(trajes[key])
#SessionDataFrame.loc[index,key] = trajes[key]
#SessionDataFrame.loc[index,key] = geometry.UPointCollection(trajes[key])
SessionDataFrame.loc[index,column_names[0]] = geometry.ULineCollection ( np.hstack( [ trajlist[0] ,trajlist[1] ] ) )
if save :
SessionDataFrame.save()
return SessionDataFrame
elif applies_to == "session":
input_path = os.path.join(SessionDataFrame.dirs[level], sublevel_folder , eval(filename_contruct))
with open(input_path,"rb") as f :
item1 = CustomUnpickler(f).load()
return item1
else :
raise NotImplementedError | 5cd2bc23c49258834ad3ed555a15f3608d9e5eb5 | 3,637,925 |
def get_environment():
""" Light-weight routine for reading the <Environment> block: does most of the work through side effects on PETRglobals """
ValidExclude = None
ValidInclude = None
ValidOnly = True
ValidPause = 0
#PETRglobals.CodeWithPetrarch1 = True
#PETRglobals.CodeWithPetrarch2 = False
line = fin.readline()
while len(line) > 0 and not line.startswith("<Environment>"): # loop through the file
line = fin.readline()
if len(line) == 0:
print("Can't find <Environment> block")
exit()
line = fin.readline()
while "</Environment>" not in line: # loop through the file
print(line[:-1])
if '<Verbfile' in line:
PETRglobals.VerbFileName = line[line.find(">") + 1:line.find("</")]
elif '<Actorfile' in line:
PETRglobals.ActorFileList = line[line.find(">") + 1:line.find("</")].split(',')
elif '<Agentfile' in line:
PETRglobals.AgentFileList = line[line.find(">") + 1:line.find("</")].split(',')
elif '<Discardfile' in line:
PETRglobals.DiscardFileName = line[line.find(">") + 1:line.find("</")]
elif '<PICOfile' in line:
PETRglobals.InternalCodingOntologyFileName = line[line.find(">") + 1:line.find("</")]
elif '<Include' in line:
ValidInclude = line[line.find(">") + 1:line.find("</")].split()
print('<Include> categories', ValidInclude)
if 'valid' in ValidInclude:
ValidOnly = True
ValidInclude.remove('valid')
elif '<Exclude' in line:
ValidExclude = line[line.find(">") + 1:line.find("</")].split()
print('<Exclude> categories', ValidExclude)
elif '<Pause' in line:
theval = line[line.find(">") + 1:line.find("</")]
if 'lways' in theval:
ValidPause = 1 # skip first char to allow upper/lower case
elif 'ever' in theval:
ValidPause = 2
elif 'top' in theval:
ValidPause = 3
line = fin.readline()
print(PETRglobals.VerbFileName, PETRglobals.ActorFileList[0], PETRglobals.AgentFileList[0], PETRglobals.DiscardFileName)
print(ValidInclude, ValidExclude)
print(ValidPause, ValidOnly)
return ValidInclude, ValidExclude, ValidPause, ValidOnly | 966cb1ad713f5b0c87cdabe12475b4239d5b7469 | 3,637,926 |
def create_variables_from_samples(sample_z_logits, sample_z_logp, sample_b, batch_index, sequence_index):
"""
Create the variables for RELAX control variate. Assumes sampled tokens come from decoder.
:param sample_z_logits: [B,T,V] tensor containing sampled processed logits created by stacking logits during
decoding loop of sampling process
:param sample_z_logp: [B,T,V] tensor containing sampled processed logp created by stacking logp during
decoding loop of sampling process
:param sample_b: the [B,T] tensor containing the H(z) indices (Gumbel-Max)
:param batch_index: [B,T] tensor of the batch size repeated for seq len
:param sequence_index: [B,T] tensor of range(0, seq len)
:return: z_tilde, and logp(b) for equation
"""
v = tf.random_uniform(shape=sample_z_logp.get_shape().as_list(),
minval=1e-8,
maxval=1,
dtype=tf.float32)
# create index tensor where b is the argmax, to use as indexer for substitution
b_new = tf.cast(tf.squeeze(sample_b, 0), tf.int64) # assumes sample_b = [BxT]
index_tensor_b = tf.expand_dims(tf.stack([batch_index, sequence_index, b_new], axis=1), 0)
v_b = tf.gather_nd(v, index_tensor_b) # values of v where b are the argmax indexes
update = -tf.log(-tf.log(v_b)) # for i == b
# create z_tilde as for the case where i != b
clipped_logit_probs = tf.clip_by_value(tf.math.softmax(sample_z_logits, axis=2), 1e-8, 1.0)
z_tilde = -tf.log(-tf.div(tf.log(v), clipped_logit_probs) - tf.expand_dims(tf.log(v_b), 2))
z_tilde = tf.tensor_scatter_nd_update(z_tilde, index_tensor_b, update)
logp_b = tf.gather_nd(sample_z_logp, index_tensor_b) # used in loss func
return z_tilde, logp_b | 615e73b136b16979257eb5b32ec9a696c17730be | 3,637,927 |
from typing import OrderedDict
import copy
def get2DHisto_(detector,plotNumber,geometry):
"""
This function opens the appropiate ROOT file,
extracts the TProfile2D and turns it into a Histogram,
if it is a compound detector, this function
takes care of the subdetectors' addition.
Note that it takes plotNumber as opposed to plot
"""
histo = None
rootFile = TFile()
detectorFilename = 'matbdg_%s_%s.root'%(detector,geometry)
if detector not in COMPOUNDS.keys() or checkFile_(detectorFilename):
if not checkFile_(detectorFilename):
print('Warning: %s not found' % detectorFilename)
return 0
rootFile = TFile.Open(detectorFilename,'READ')
prof = rootFile.Get("%d" % plotNumber)
if not prof: return 0
# Prevent memory leaking by specifing a unique name
prof.SetName('%u_%s_%s' %(plotNumber,detector,geometry))
prof.__class__ = TProfile2D
histo = prof.ProjectionXY()
else:
histos = OrderedDict()
theFiles = []
for subDetector in COMPOUNDS[detector]:
subDetectorFilename = 'matbdg_%s_%s.root' % (subDetector,geometry)
if not checkFile_(subDetectorFilename):
print('Warning: %s not found'%subDetectorFilename)
continue
subDetectorFile = TFile.Open(subDetectorFilename,'READ')
theFiles.append(subDetectorFile)
print('*** Open file... %s' % subDetectorFilename)
prof = subDetectorFile.Get('%d'%plotNumber)
if not prof: return 0
prof.__class__ = TProfile2D
if not histo:
histo = prof.ProjectionXY('B_%s' % prof.GetName())
else:
histo.Add(prof.ProjectionXY('B_%s' % prof.GetName()))
return copy.deepcopy(histo) | 5759bc16686642e377c9bd37dde73374f74870ac | 3,637,928 |
import shlex
import json
import traceback
import time
def binlog2sql(request):
"""
通过解析binlog获取SQL
:param request:
:return:
"""
instance_name = request.POST.get('instance_name')
save_sql = True if request.POST.get('save_sql') == 'true' else False
instance = Instance.objects.get(instance_name=instance_name)
no_pk = True if request.POST.get('no_pk') == 'true' else False
flashback = True if request.POST.get('flashback') == 'true' else False
back_interval = 0 if request.POST.get('back_interval') == '' else int(request.POST.get('back_interval'))
num = 30 if request.POST.get('num') == '' else int(request.POST.get('num'))
start_file = request.POST.get('start_file')
start_pos = request.POST.get('start_pos') if request.POST.get('start_pos') == '' else int(
request.POST.get('start_pos'))
end_file = request.POST.get('end_file')
end_pos = request.POST.get('end_pos') if request.POST.get('end_pos') == '' else int(request.POST.get('end_pos'))
stop_time = request.POST.get('stop_time')
start_time = request.POST.get('start_time')
only_schemas = request.POST.getlist('only_schemas')
only_tables = request.POST.getlist('only_tables[]')
only_dml = True if request.POST.get('only_dml') == 'true' else False
sql_type = ['INSERT', 'UPDATE', 'DELETE'] if request.POST.getlist('sql_type[]') == [] else request.POST.getlist(
'sql_type[]')
# 校验sql_type
if [i for i in sql_type if i not in ['INSERT', 'UPDATE', 'DELETE']]:
return JsonResponse({'status': 1, 'msg': '类型过滤参数不正确', 'data': {}})
# flashback=True获取DML回滚语句
result = {'status': 0, 'msg': 'ok', 'data': ''}
# 提交给binlog2sql进行解析
binlog2sql = Binlog2Sql()
# 准备参数
args = {"conn_options": fr"-h{shlex.quote(str(instance.host))} -u{shlex.quote(str(instance.user))} \
-p'{shlex.quote(str(instance.password))}' -P{shlex.quote(str(instance.port))} ",
"stop_never": False,
"no-primary-key": no_pk,
"flashback": flashback,
"back-interval": back_interval,
"start-file": start_file,
"start-position": start_pos,
"stop-file": end_file,
"stop-position": end_pos,
"start-datetime": '"'+start_time+'"',
"stop-datetime": '"'+stop_time+'"',
"databases": ' '.join(only_schemas),
"tables": ' '.join(only_tables),
"only-dml": only_dml,
"sql-type": ' '.join(sql_type),
"instance": instance
}
# 参数检查
args_check_result = binlog2sql.check_args(args)
if args_check_result['status'] == 1:
return HttpResponse(json.dumps(args_check_result), content_type='application/json')
# 参数转换
cmd_args = binlog2sql.generate_args2cmd(args, shell=True)
# 执行命令
try:
p = binlog2sql.execute_cmd(cmd_args, shell=True)
# 读取前num行后结束
rows = []
n = 1
for line in iter(p.stdout.readline, ''):
if n <= num:
n = n + 1
row_info = {}
try:
row_info['sql'] = line.split('; #')[0] + ";"
row_info['binlog_info'] = line.split('; #')[1].rstrip('\"')
except IndexError:
row_info['sql'] = line
row_info['binlog_info'] = None
rows.append(row_info)
else:
break
if rows.__len__() == 0:
# 判断是否有异常
stderr = p.stderr.read()
if stderr:
result['status'] = 1
result['msg'] = stderr
return HttpResponse(json.dumps(result), content_type='application/json')
# 终止子进程
p.kill()
result['data'] = rows
except Exception as e:
logger.error(traceback.format_exc())
result['status'] = 1
result['msg'] = str(e)
# 异步保存到文件
if save_sql:
args.pop('conn_options')
async_task(binlog2sql_file, args=args, user=request.user, hook=notify_for_binlog2sql, timeout=-1,
task_name=f'binlog2sql-{time.time()}')
# 返回查询结果
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json') | 82cf06637024a0feb2c2ae0203cd795d3f6eb944 | 3,637,929 |
def smtp_config_generator_str(results, key, inp):
"""
Set server/username config.
:param kwargs: Values. Refer to `:func:smtp_config_writer`.
:type kwargs: dict
:param key: Key for results dict.
:type key: str
:param inp: Input question.
:type inp: str
"""
if results[key] is None:
results[key] = input(inp)
return results | f2cccfaf569f005e03bb351379db85de7146eda0 | 3,637,930 |
import logging
def delete_object_by_name(name, ignore_errors=False):
"""
Attempts to find an object by the name given and deletes it from the scene.
:param name: the name of this object
:param ignore_errors: if True, no exception is raised when the object is deleted. Otherwise, you will get a
KeyError if no object by that name exists.
:return: True if the object was found and deleted successfully
"""
try:
logging.debug("Attempting to delete object '%s'" % name)
obj = data.objects[name]
except KeyError as ex:
if ignore_errors: # are we ignoring errors?
logging.debug("Didn't delete '%s'. Probably didn't exist. Error ignored." % name)
return False # just report that we weren't successful
raise ex # object doesn't exist so raise this exception
ops.object.select_all(action='DESELECT')
obj.select_set(state=True)
context.view_layer.objects.active = obj
bpy.ops.object.delete() | faff975ceb328cca8fe898075fb0cfb0decf437f | 3,637,931 |
def default_rollout_step(policy, obs, step_num):
"""
The default rollout step function is the policy's compute_action function.
A rollout step function allows a developer to specify the behavior
that will occur at every step of the rollout--given a policy
and the last observation from the env--to decide
what action to take next. This usually involves the rollout's
policy and may perform learning. It also, may involve using, updating,
or saving learning related state including hyper-parameters
such as epsilon in epsilon greedy.
You can provide your own function with the same signature as this default
if you want to have a more complex behavior at each step of the rollout.
"""
return policy.compute_action(obs) | a6e9dff784e46b9a59ae34334a027b427e8d230a | 3,637,932 |
def perfilsersic(r_e, I_e, n, r):
"""Evaluate a Sersic Profile.
funcion que evalua a un dado radio r el valor de
brillo correspondiente a un perfil de sersic
r_e : Radio de escala
I_e : Intensidad de escala
n : Indice de Sersic
r : Radio medido desde el centro en pixeles
"""
b = 1.999 * n - 0.327
I_r = I_e * np.exp(-b * (((r / r_e) ** (1 / np.float(n))) - 1))
I_r = I_r / (I_e * np.exp(-b * (((0.0 / r_e) ** (1 / np.float(n))) - 1)))
return I_r | b8cd900d28be3fef1efc142c07012174f30c9f9d | 3,637,933 |
import numpy as np
from scipy import interpolate
def background_profile(img, smo1=30, badval=None):
"""
helper routine to determine for the rotated image
(spectrum in rows) the background using sigma clipping.
"""
bgimg = img.copy()
nx = bgimg.shape[1] # number of points in direction of dispersion
ny = bgimg.shape[0] # width of the image
# look at the summed rows of the image
u_ysum = []
for i in range(ny):
u_ysum.append(bgimg[i,:].mean())
u_ysum = np.asarray(u_ysum)
u_ymask = sigclip1d_mask(u_ysum, 2.5, badval=badval, conv=1e-5, maxloop=30)
u_ymean = u_ysum[u_ymask].mean()
# look at the summed columns after filtering bad rows
u_yindex = np.where(u_ymask)[0]
u_xsum = []
u_std = []
for i in range(nx):
u_x1 = bgimg[u_yindex, i].squeeze()
# clip u_x1
u_x1mask = sigclip1d_mask(u_x1, 2.5, badval=None, conv=1e-5, maxloop=30)
u_xsum.append(u_x1[u_x1mask].mean())
u_std.append(u_x1[u_x1mask].std())
#print u_x1[u_x1mask]
#if np.isfinite(u_x1mask.mean()) & len(u_x1[u_x1mask])>0:
# print "%8.2f %8.2f %8.2f "%(u_x1[u_x1mask].mean(),u_x1[u_x1mask].std(),u_x1[u_x1mask].max())
# the best background estimate of the typical row is now u_xsum
# fit a smooth spline through the u_xsum values (or boxcar?)
#print "u_x means "
#print u_xsum
u_xsum = np.asarray(u_xsum)
u_std = np.asarray(u_std)
u_xsum_ok = np.isfinite(u_xsum)
bg_tcp = interpolate.splrep(np.arange(nx)[u_xsum_ok],
np.asarray(u_xsum)[u_xsum_ok], s=smo1)
# representative background profile in column
u_x = interpolate.splev(np.arange(nx), bg_tcp, )
return u_xsum, u_x, u_std | 59b090a2c05d8a520a3c9f980885c9488bdc7615 | 3,637,934 |
def get_object(bucket,key,fname):
"""Given a bucket and a key, upload a file"""
return aws_s3api(['get-object','--bucket',bucket,'--key',key,fname]) | 6687c657ba364757bd519f370c546ad9b7b033f7 | 3,637,935 |
import glob
def find_file(filename):
"""
This helper function checks whether the file exists or not
"""
file_list = list(glob.glob("*.txt"))
if filename in file_list:
return True
else:
return False | 42895e66e258ba960c890f871be8c261aec02852 | 3,637,936 |
import os
def read(fname):
"""Read a file and return its content."""
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read() | 51e399082554fde2d0d8d429f8f50f743fcd0655 | 3,637,937 |
def tweetnacl_crypto_secretbox(max_messagelength=256):
"""
max_messagelength: maximum length of the message, in bytes.
i.e., the symbolic execution will not consider messages longer than max_messagelength
"""
proj = tweetnaclProject()
state = funcEntryState(proj, "crypto_secretbox_xsalsa20poly1305_tweet", [
("c", pointerToUnconstrainedPublic()), # Output parameter, will hold ciphertext, length 'mlen'
("m", pointerToUnconstrainedPublic()), # message: length 'mlen'
("mlen", publicValue()), # length of message. Not a pointer
("n", pointerTo(secretArray(24), 24)), # nonce, buffer of size crypto_secretbox_NONCEBYTES
("k", pointerTo(secretArray(32), 32)) # secret key: size 32 bytes
])
state.add_constraints(getArgBVS(state, 'mlen') <= max_messagelength)
addDevURandom(state)
return (proj, state) | b380c2848da0c271895dcf893adf1d12c5d86289 | 3,637,938 |
def parameterized_dropout(probs: Tensor,
mask: Tensor,
values: Tensor,
random_rate: float = 0.5,
epsilon: float = 0.1) -> Tensor:
"""
This function returns (values * mask) if random_rate == 1.0 and
(values * probs) if random_rate == 0.0 or if we are in eval mode
(self.training == false). Otherwise, it randomly selects on frame-by-frame
/ vector-by-vector basis, which of the two to use. The main point of this
function is that it intelligently backpropagates derivatives in such a way
that you can meaningfully train `probs`. See the function `get_derivative_scales()`
to understand the central point of how we get derivatives w.r.t. `probs`.
Args:
probs: the probabilities with which the `mask` vector was chosen; we'll be able
to compute derivatives w.r.t. this. A Tensor of shape (*, C) where C is
interpreted as the channel dimension. These must be in the interval [0,1].
mask: A (possibly boolean) Tensor of shape (*, C) and values 0/False or 1/True,
True/1 if this value is to be "passed through".
The caller asserts that these values have been chosen with probabilities
equal to `probs`, e.g. as:
mask = (torch.rand_like(probs) < probs)
(In practice we may be sampling with a more complicated method which has
marginal probabilities equal to `probs`; the correctness of the derivatives
becomes a little weaker in that case).
values: A Tensor of shape (*, C), the same as probs_and mask; these are the
values that are to be multiplied by a mask (or sometimes scaled by `probs`,
if random_rate < 1). The derivatives backpropagated to here are exact,
i.e. just output_grad * mask. We currently require that elements of values
be in the interval [0,1] (this is needed for a formula involving epsilon).
random_rate: A float value that determines how often we use the zero-one mask; the
rest of the time, we use the expected value (probs).
epsilon: A float value used to prevent division by zero in backprop; controls
a bias-variance tradeoff in derivatives (small->lower bias, higher
variance).
Returns: A Tensor with the same shape as `probs`, `mask` and `values`, i.e.
(*, C), which is randomly somewhere between values * mask and
values * probs.
"""
return _ParameterizedDropout.apply(probs, mask, values, random_rate, epsilon) | 36d26d0deda5b4394457e235e61f334ea6dc767d | 3,637,939 |
from typing import Optional
def get_auto_scale_v_core(resource_group_name: Optional[str] = None,
vcore_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAutoScaleVCoreResult:
"""
Represents an instance of an auto scale v-core resource.
Latest API Version: 2021-01-01.
:param str resource_group_name: The name of the Azure Resource group of which a given PowerBIDedicated capacity is part. This name must be at least 1 character in length, and no more than 90.
:param str vcore_name: The name of the auto scale v-core. It must be a minimum of 3 characters, and a maximum of 63.
"""
pulumi.log.warn("""get_auto_scale_v_core is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:powerbidedicated:getAutoScaleVCore'.""")
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['vcoreName'] = vcore_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:powerbidedicated/latest:getAutoScaleVCore', __args__, opts=opts, typ=GetAutoScaleVCoreResult).value
return AwaitableGetAutoScaleVCoreResult(
capacity_limit=__ret__.capacity_limit,
capacity_object_id=__ret__.capacity_object_id,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
sku=__ret__.sku,
system_data=__ret__.system_data,
tags=__ret__.tags,
type=__ret__.type) | e3eda57b7afdfe2a7e1b7e3e958b5140a7120a95 | 3,637,940 |
import json
def text_output(xml,count):
"""Returns JSON-formatted text from the XML retured from E-Fetch"""
xmldoc = minidom.parseString(xml.encode('utf-8').strip())
jsonout = []
for i in range(count):
title = ''
title = xmldoc.getElementsByTagName('ArticleTitle')
title = parse_xml(title, i, '')
pmid = ''
pmid = xmldoc.getElementsByTagName('PMID')
pmid = parse_xml(pmid, i, '')
abstract = ''
abstract = xmldoc.getElementsByTagName('AbstractText')
abstract = parse_xml(abstract, i, '')
try:
authors = xmldoc.getElementsByTagName('AuthorList')
authors = authors[i].getElementsByTagName('Author')
authorlist = []
for author in authors:
LastName = author.getElementsByTagName('LastName')
LastName = parse_xml(LastName, 0, '')
Initials = author.getElementsByTagName('Initials')
Initials = parse_xml(Initials, 0, '')
if LastName != '' and Initials != '':
author = '%s, %s' % (LastName, Initials)
else:
author = ''
authorlist.append(author)
except Exception:
authorlist = []
pass
try:
journalinfo = xmldoc.getElementsByTagName('Journal')[i]
journalIssue = journalinfo.getElementsByTagName('JournalIssue')[0]
except Exception:
journalinfo = None
journalIssue = None
pass
journal = ''
year = ''
volume = ''
issue = ''
pages = ''
if journalinfo != None:
journal = parse_xml(journalinfo.getElementsByTagName('Title'), 0, '')
year = journalIssue.getElementsByTagName('Year')
year = parse_xml(year, 0, '')
volume = journalIssue.getElementsByTagName('Volume')
volume = parse_xml(volume, 0, '')
issue = journalIssue.getElementsByTagName('Issue')
issue = parse_xml(issue, 0, '')
pages = xmldoc.getElementsByTagName('MedlinePgn')
pages = parse_xml(pages, 0, '')
jsonout.append({
'pmid':pmid,
'title':title,
'authors':authorlist,
'journal':journal,
'year':year,
'volume':volume,
'issue':issue,
'pages':pages,
'abstract':abstract
})
return json.dumps(jsonout) | 0c48a67b123b55ed5c8777d5fd0ad009578ba1ae | 3,637,941 |
from datetime import datetime
def datetime2str(target, fmt='%Y-%m-%d %H:%M:%S'):
"""
将datetime对象转换成字符串
:param target: datetime
:param fmt: string
:return: string
"""
return datetime.datetime.strftime(target, fmt) | 9111040a6136ef675929d30f3aba3eb983b45197 | 3,637,942 |
def periodic_targets_form(request, program):
"""
Returns a form for the periodic targets sub-section,
used by the Indicator Form
For historical reasons, the input is a POST of the whole indicator form sent via ajax
from which a subset of fields are used to generate the returned template
"""
if not request.has_write_access:
raise PermissionDenied
program = get_object_or_404(Program, pk=program)
form = PTFormInputsForm(data=request.POST)
if not form.is_valid():
return JsonResponse(form.errors)
event_name = ''
start_date = ''
target_frequency_num_periods = 1
target_frequency_type = form.cleaned_data.get('target_frequency')
if target_frequency_type in Indicator.REGULAR_TARGET_FREQUENCIES:
start_date = program.reporting_period_start
target_frequency_num_periods = len(
[p for p in PeriodicTarget.generate_for_frequency(
target_frequency_type)(start_date, program.reporting_period_end)])
generated_targets = generate_periodic_targets(
target_frequency_type, start_date, target_frequency_num_periods, event_name)
dummy_indicator = Indicator(
target_frequency=target_frequency_type,
unit_of_measure_type=form.cleaned_data.get('unit_of_measure_type'),
is_cumulative=False,
)
content = render_to_string('indicators/indicatortargets.html', {
'indicator': dummy_indicator,
'periodic_targets': generated_targets
})
return JsonResponse({
'content': content,
}) | 62e6ef666f113c5aefda858e91c31f3dfd0bcacf | 3,637,943 |
import sqlite3
def get_db():
"""Returns an sqlite3.Connection object stored in g.
Or creates it if doesn't exist yet."""
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db | 1a8be7a3d42123db213cd4c2a5968bdede92e677 | 3,637,944 |
def is_string_like(obj): # from John Hunter, types-free version
"""Check if obj is string."""
return isinstance(obj, basestring) | 5b3b66dd0706f1f0e0257ea50ba48f463a07d6ca | 3,637,945 |
import json
def _export_gene_set_pan_genome(meth, pan_genome_id):
"""Export orthologs from Pangenome as external FeatureSet objects. [26]
:param pan_genome_id: ID of pangenome object [26.1]
:type pan_genome_id: kbtypes.KBaseGenomes.Pangenome
:ui_name pan_genome_id: Pangenome ID
:return: Generated Compare Genome
:rtype: kbtypes.KBaseGenomes.Pangenome
:output_widget: kbasePanGenome
"""
meth.stages = 1 # for reporting progress
return json.dumps({'ws': meth.workspace_id, 'name': pan_genome_id, 'withExport': 'true'}) | 5dfc5a6d253a66cf7ff50c736f9fdaa43a2334a8 | 3,637,946 |
import os
def collect_includes(formula):
""" one of the most basic things to know about a module is which
include files it comes with: e.g. for boost you're supposed
to #include "boost/regex/foo.h", not #include "regex/foo.h"
or #include "foo.h".
For most modules this list of #includes can be generated from
the module's unpacked archive directly (assuming the root
include directories are listed in the module's artifact list),
but for the kinds of modules that generate or modify #include
files during ./configure you should collect_includes only after
./configure or even after make. These kinds of libs are hopefully
rare though.
This func here will modify the $formula in-place, adding the
list of #include files as an 'includes' property.
Returns a list of TwoComponentPath objects.
"""
gyp = get_library().load_gyp(formula)
module = formula['module']
version = formula['version']
gyp_root_dir = os.path.join('./bru_modules', module)
# here we assume the gyp file is located in gyp_root_dir
include_files = []
for target in gyp['targets']:
if not 'include_dirs' in target:
continue # e.g. target zlib:zlib_test doesn't need include_dirs
include_dirs = target['include_dirs']
for include_dir in include_dirs:
abs_include_dir = os.path.join(gyp_root_dir, include_dir)
include_files += [TwoComponentPath(abs_include_dir, include_file)
for include_file
in get_include_files(abs_include_dir)]
#assert len(include_files) > 0, "missing includes for " + module
if len(include_files) == 0:
# didn't create an ICU gyp file yet, looks painful to me
print("WARNING: no includes for module ", module)
return include_files | 54ec3516e3ef4adb80ee8d61dc11535848e8f6db | 3,637,947 |
import logging
import time
from datetime import datetime
def get_log_by_date(log_file, log_capture_date, log_capture_date_option, log_capture_maxlen, log_min_level):
"""
capture log files based on capture_date before or after fields
:param log_file:
:param log_capture_date epoch formatted field in milliseconds
:param log_capture_date_option: 'before', 'on', or 'after'
:param log_capture_maxlen: # of lines to capture at end of list
:param log_min_level: DEBUG, INFO, WARNING, ERROR levels to filter DEBUG is all, INFO imcludes
WARNING and ERROR, etc.
:return: list of log fields to capture
"""
log = logging.getLogger(__name__)
# read from the beginning looking for lines to capture based on timestamp
time_struct = time.localtime(log_capture_date/1000)
compare_date = datetime.fromtimestamp(time.mktime(time_struct))
if log_capture_date_option == 'on':
compare_date = compare_date.replace(hour=0, minute=0, second=0, microsecond=0)
log.debug("Looking for date: {}".format(time.strftime(DATE_TIME_FORMAT, time_struct)))
captured_list = []
result_line = None
triggered = False
for line in get_log_file_data(log_file):
capture = False
# attempt to get the date string from the log entry.
# Some entries are multi-line, so not all lines will have a date string
try:
if log_capture_date_option == 'on':
log_file_date = datetime.strptime(line.split(' ', 1)[0], DATE_FORMAT)
else:
log_file_date = datetime.strptime(' '.join(line.split(' ', 2)[:2]), DATE_TIME_MS_FORMAT)
except (ValueError, TypeError):
log_file_date = None
if not triggered:
if log_file_date:
if log_capture_date_option == 'before' and log_file_date <= compare_date:
triggered = True
capture = True
elif log_capture_date_option == 'on' and log_file_date == compare_date:
triggered = True
capture = True
elif log_capture_date_option == 'after' and log_file_date >= compare_date:
triggered = True
capture = True
else:
# don't capture after the compare_date
if log_capture_date_option == 'before' and log_file_date:
if log_file_date <= compare_date:
capture = True
else:
break
# only capture for the given date
elif log_capture_date_option == 'on' and log_file_date:
if log_file_date == compare_date:
capture = True
else:
break
else:
capture = True
if capture:
result_line = filter_log_level(log_min_level, line, multi_line=result_line)
if result_line:
captured_list.append(line)
# add maxlen
if log_capture_maxlen:
d = deque(captured_list, maxlen=log_capture_maxlen)
d_list = list(d)
captured_lines = "".join(d_list)
num_of_lines = len(d_list)
else:
captured_lines = "".join(captured_list)
num_of_lines = len(captured_list)
return num_of_lines, captured_lines | 1bd34317a30754e01865a17d3b96ff6252abbee8 | 3,637,948 |
def makeId(timestamp=0, machine=0, flow=0):
"""
using unix style timestamp, not python timestamp
"""
timestamp -= _base
return (timestamp << 13) | (machine << 8) | flow | 0443714cd5e87c93dc8ee8a156cd406f481c6d82 | 3,637,949 |
import time
def _get_token(cls, token_type):
"""
when token expire flush,return token value
"""
assert token_type in ['tenant_access_token', 'app_access_token'], token_type
if not hasattr(cls.request, token_type) or hasattr(cls.request, token_type) or\
time.time() >= getattr(cls.request, token_type)['expire']:
setattr(cls.request, token_type, getattr(cls, 'get_%s' % token_type)())
return getattr(cls.request, token_type)[token_type] | 15ee9c6926f929960b20ee68901f1d675f43639a | 3,637,950 |
from typing import Iterator
def chunk(it: Iterator, size: int) -> Iterator:
""" Nice chunking method from: https://stackoverflow.com/a/22045226 """
it = iter(it)
return iter(lambda: tuple(islice(it, size)), ()) | 8cd199b1d2092373a156dac74cd80b700fb70fcd | 3,637,951 |
def sort_by_directory(path):
"""returns 0 if path is a directory, otherwise 1 (for sorting)"""
return 1 - path.is_directory | 1cd066e69885901ae1b2b167feb061e98ed5f3ed | 3,637,952 |
import json
def read_config(config):
"""
Read config file containing information of type and default values of fields
:param config: path to config file
:return: dictionary containing type and or default value for each field in the file
"""
dic_types = json.load(open(config, 'r'))
to_remove = []
for attribute, value in dic_types.items():
ls_val = value.keys()
if 'type' in ls_val:
val = value['type']
value['type'] = str_to_type(val)
none_type = False
if not value['type']:
none_type = True
if not 'default' in ls_val and none_type:
to_remove.append(attribute)
value['type'] = val
for to_rm in to_remove:
print(' [WARN] Config for' , '\'' + to_rm + '\'', 'incorrect and ommitted: Type', '\'' + dic_types[to_rm]['type'] + '\'' , 'is not valid and no default value is indicated')
del dic_types[to_rm]
return dic_types | ebdf6a001f65fb2ad2c3da6015a5b8998f1fda4a | 3,637,953 |
def is_right_hand_coordinate_system3(pose):
"""Checks whether the given pose follows the right-hand rule."""
n, o, a = pose[:3, 0], pose[:3, 1], pose[:3, 2]
return n.dot(n).simplify() == 1 and o.dot(o).simplify() == 1 and a.dot(a).simplify() == 1 and sp.simplify(n.cross(o)) == a | f6b1fcff1d3502c78db294fe2ad496a214c13366 | 3,637,954 |
def model_airmassfit(hjd, am, rawflux, limbB1, limB2, inc, period, a_Rs, Rp_Rs, show=False):
"""
Return the bestfit model for the lightcurve using 4 models of airmass correction:
1. model with no airmass correction
2. model with exponential airmass correction
3. model with linear airmass correction
4, model with 2deg polynomial airmass correction
___
INPUT:
hjd:
am:
rawflux:
limbB1:
limbB2:
inc:
period:
a_Rs:
startpar:
OUTPUT:
result: dataframe structure with besfit values for each model, the errors and BIC values.
phase: from the bestfit model
lc: lightcurve from the bestfit model
"""
# Model 1: no airmass correction
startpar = [Rp_Rs, np.mean(hjd), 1., 0.]
PARINFO = [{'value':Rp_Rs,'limits':(0,1.)}, {'value':np.mean(hjd)}, {'value':1.}, {'value':0.,'fixed':True}]
pfit1, results1 = mpyfit.fit(residuals_am_exp, startpar, args = (hjd,rawflux,eflux), parinfo=PARINFO)
model1 = model_am_exp(hjd,pfit1[0],pfit1[1],pfit1[2],pfit1[3])
phase1 = (hjd - pfit1[1])/period
if show == True:
print '...'
print 'Model 1: no airmass correction'
print 'bestfit values = ',pfit1
print 'error = ', results1['parerrors']
print 'bestnorm1 = ', results1['bestnorm']
print 'chi-square, scipy routine = ',chisquare(rawflux, model1)
#Model 2: exponential airmass correction
PARINFO = [{'value':Rp_Rs,'limits':(0,1.)}, {'value':np.mean(hjd)}, {'value':1.}, {'value':0.,'fixed':False}]
pfit2, results2 = mpyfit.fit(residuals_am_exp, startpar, args = (hjd,rawflux,eflux), parinfo=PARINFO)
model2 = model_am_exp(hjd,pfit2[0],pfit2[1],pfit2[2],pfit2[3])
phase2 = (hjd - pfit2[1])/period
if show == True:
print '...'
print 'Model 2: exponential airmass correction'
print 'bestfit values = ',pfit2
print 'error = ', results2['parerrors']
print 'bestnorm1 = ', results2['bestnorm']
print 'chi-square, scipy routine = ',chisquare(rawflux, model2)
#Model 3: linear airmass correction
PARINFO = [{'value':Rp_Rs,'limits':(0,1.)},{'value':np.mean(hjd)},{'value':1.}, {'value':0.,'fixed':False}]
pfit3, results3 = mpyfit.fit(residuals_linear, startpar, args = (hjd,rawflux,eflux), parinfo=PARINFO)
model3 = model_am_linear(hjd,pfit3[0],pfit3[1],pfit3[2],pfit3[3])
phase3 = (hjd - pfit3[1])/period
if show == True:
print '...'
print 'Model 3: linear airmass correction'
print 'bestfit values = ',pfit3
print 'error = ', results3['parerrors']
print 'bestnorm1 = ', results3['bestnorm']
print 'chi-square, scipy routine = ',chisquare(rawflux, model3)
#Model 4: 2deg polynomial airmss correction
PARINFO = [{'value':Rp_Rs,'limits':(0,1.)},{'value':np.mean(hjd)},{'value':1.},{'value':0.},{'value':0.}]
pstart = [Rp_Rs,np.mean(hjd),1.,0.,0.]
pfit4, results4 = mpyfit.fit(residuals_2deg_mpfit, pstart, args = (hjd,rawflux,eflux), parinfo=PARINFO)
model4 = model_am_2deg(hjd,pfit4[0],pfit4[1],pfit4[2],pfit4[3],pfit4[4])
phase4 = (hjd - pfit4[1])/period
if show == True:
print '...'
print 'Model 4: 2deg poly airmass correction'
print 'bestfit values = ',pfit4
print 'error = ', results4['parerrors']
print 'bestnorm1 = ', results4['bestnorm']
print 'chi-square, scipy routine = ',chisquare(rawflux, model4)
#Obtain BIC values:
#Let's create our fit file and our best BIC
BICarray = ['none', 'exponential', 'linear','2nd_deg_poly']
nfree = [3,4,4,5]
bestnorm = [results1['bestnorm'],results2['bestnorm'],results3['bestnorm'],results4['bestnorm']]
bic = BIC(nfree,bestnorm,len(rawflux))
RpRs = [pfit1[0], pfit2[0], pfit3[0], pfit4[0]]
Tc = [pfit1[1], pfit2[1], pfit3[1], pfit4[1]]
a = [pfit1[2], pfit2[2], pfit3[2], pfit4[2]]
b = [pfit1[3], pfit2[3], pfit3[3], pfit4[3]]
c = ['Nan','Nan','Nan',pfit4[4]]
error1 = [results1['parerrors'][0], results2['parerrors'][0], results3['parerrors'][0], results4['parerrors'][0]]
error2 = [results1['parerrors'][1], results2['parerrors'][1], results3['parerrors'][1], results4['parerrors'][1]]
error3 = [results1['parerrors'][2], results2['parerrors'][2], results3['parerrors'][2], results4['parerrors'][2]]
error4 = [results1['parerrors'][3], results2['parerrors'][3], results3['parerrors'][3], results4['parerrors'][3]]
error5 = ['Nan','Nan','Nan', results4['parerrors'][0]]
result = DataFrame([BICarray,list(bic),RpRs,error1,Tc,error2,a,error3,b,error4,c,error5]).T
result.columns=['Model','BIC','RpRs','eRpRs','Tc','eTc','a','ea','b','eb','c','ec']
if show == True:
print '... Results:'
print result
print 'The best model is: ',result.Model[result.BIC == result.BIC.min()]
print 'with the BIC = ',result.BIC.min()
#Saving the bestfit transit image:
bestfit = np.where(result.BIC == result.BIC.min())
indx = bestfit[0][0]
if indx == 0:
lc = model1
phase = phase1
if indx == 1:
lc = model2
phase = phase2
if indx == 2:
lc = model3
phase = phase3
if indx == 3:
lc = model4
phase = phase4
return result, phase, lc | 6cc74f5820aff6a36fdd89c37df141d82a558dab | 3,637,955 |
def common_params_for_list(args, fields, field_labels):
"""Generate 'params' dict that is common for every 'list' command.
:param args: arguments from command line.
:param fields: possible fields for sorting.
:param field_labels: possible field labels for sorting.
:returns: a dict with params to pass to the client method.
"""
params = {}
if args.limit is not None:
if args.limit < 0:
raise exc.CommandError(
_('Expected non-negative --limit, got %s') % args.limit)
params['limit'] = args.limit
if args.sort_key is not None:
# Support using both heading and field name for sort_key
fields_map = dict(zip(field_labels, fields))
fields_map.update(zip(fields, fields))
try:
sort_key = fields_map[args.sort_key]
except KeyError:
raise exc.CommandError(
_("%(sort_key)s is an invalid field for sorting, "
"valid values for --sort-key are: %(valid)s") %
{'sort_key': args.sort_key,
'valid': list(fields_map)})
params['sort_key'] = sort_key
if args.sort_dir is not None:
if args.sort_dir not in ('asc', 'desc'):
raise exc.CommandError(
_("%s is an invalid value for sort direction, "
"valid values for --sort-dir are: 'asc', 'desc'") %
args.sort_dir)
params['sort_dir'] = args.sort_dir
marker = getattr(args, 'marker', None)
if marker is not None:
params['marker'] = marker
params['detail'] = args.detail
return params | 6e432213a504b2423dca4add79c860d9bffe4ad4 | 3,637,956 |
def _finditem(obj, key):
"""
Check if giben key exists in an object
:param obj: dictionary/list
:param key: key
:return: value at the key position
"""
if key in obj:
return obj[key]
for k, v in obj.items():
if isinstance(v, dict):
item = _finditem(v, key)
if item is not None:
return item | 0f7c5b801acfae6a66d175163d726cba22380f7c | 3,637,957 |
def decompress(obj):
"""Decompress LZSS-compressed bytes or a file-like object.
Shells out to decompress_file() or decompress_bytes() depending on
whether or not the passed-in object has a 'read' attribute or not.
Returns a bytearray."""
if hasattr(obj, 'read'):
return decompress_file(obj)
else:
return decompress_bytes(obj) | 58f090f02e76e90606b7ec83d2f9567235b90213 | 3,637,958 |
def trailing_whitespace(text):
"""Gets trailing whitespace of text."""
trailing = ""
while text and text[-1] in WHITESPACE_OR_NL_CHARS:
trailing = text[-1] + trailing
text = text[:-1]
return trailing | ddb3fba9d41261f45a0ea3e0ffd03949950532ce | 3,637,959 |
import numpy
def rjust(a, width, fillchar=' '):
"""
Return an array with the elements of `a` right-justified in a
string of length `width`.
Calls `str.rjust` element-wise.
Parameters
----------
a : array_like of str or unicode
width : int
The length of the resulting strings
fillchar : str or unicode, optional
The character to use for padding
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See Also
--------
str.rjust
"""
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
size = int(numpy.max(width_arr.flat))
if numpy.issubdtype(a_arr.dtype, numpy.string_):
fillchar = asbytes(fillchar)
return _vec_string(
a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar)) | 05d99fcd2600bcfee19c60b75f50af868d853a87 | 3,637,960 |
import tempfile
def form_image_helper(caption_list, dummy=True, image_suffix='.jpg'):
"""
:param caption_list: <class 'list'>
:param dummy: <class 'bool'>
:param image_suffix: <class 'str'>
:return: <class 'dict'> [dict of dummy images for form submission in django unittest]
"""
try:
res = {}
image = Image.new('RGB', (100, 100))
tmp_file = tempfile.NamedTemporaryFile(suffix=image_suffix)
image.save(tmp_file)
with open(tmp_file.name, 'rb') as fp:
for title in caption_list:
res[title] = fp
return res
except Exception as e:
print(str(e))
raise ValueError(str(e)) | b299ce536de33fe04821a7c1306dfff61000eed4 | 3,637,961 |
from typing import Any
import inspect
def is_complex_converter(obj: Any) -> bool:
"""Check if the object is a complex converter.`"""
return isinstance(obj, ComplexConverterABC) or inspect.isclass(obj) and issubclass(obj, ComplexConverterABC) | d6069161819f348a4cec516f348ed93d59d38a74 | 3,637,962 |
def find_pool_or_vip_by_name(full_list, queried_name, fuzzy_search):
"""Wrapper function for searching the pools or VIPs."""
logger.debug(
"Performing search for %s in %s partition",
queried_name,
ARGS.partition,
)
if fuzzy_search:
matches = find_resource_by_fuzzysearch(full_list, queried_name)
else:
matches = find_resource_by_regex(full_list, queried_name)
return matches | bc94b5e3523f3abf8e01597d830a136ab21b23c9 | 3,637,963 |
def _get_spreadsheet_with_values(spreadsheet_id):
"""Gets all sheets and their cells."""
sheets = []
if not service:
return sheets
result = service.spreadsheets().get(spreadsheetId=spreadsheet_id, fields=FIELDS).execute()
for sheet in result["sheets"]:
if "merges" in sheet:
merges_info = sheet["merges"]
else:
merges_info = []
values = sheet["data"][0]
cells = []
if "rowData" in values:
for y, row in enumerate(values["rowData"]):
cells.append([])
if "values" in row:
for x, value in enumerate(row["values"]):
cell = Cell(x, y, _get_cell_value(value))
for merge_info in merges_info:
x_merge_range = range(merge_info["startColumnIndex"], merge_info["endColumnIndex"])
y_merge_range = range(merge_info["startRowIndex"], merge_info["endRowIndex"])
if x in x_merge_range and y in y_merge_range:
cell.set_merge_range(x_merge_range, y_merge_range)
cells[y].append(cell)
sheets.append({"name": sheet["properties"]["title"], "cells": cells})
# TODO: store sheet["properties"]["gridProperties"]["rowCount"/"columnCount"] too
return sheets | 45ea52477dfb6dd951e4780064e8f0f51279a98a | 3,637,964 |
def some_task():
""" Some task """
get_word_counts('python.txt')
get_word_counts('go.txt')
get_word_counts('erlang.txt')
get_word_counts('javascript.txt')
return "Task Done" | a6dcacaf22cb39b886feb4566d05b48c0e7db1e3 | 3,637,965 |
def _accelerate(f, n_devices):
"""JIT-compiled version of `f` running on `n_devices`."""
if n_devices == 1:
return fastmath.jit(f)
return fastmath.pmap(f, axis_name='batch') | f9735078b012d49e15aa16b9911b2c897f9987c1 | 3,637,966 |
from datetime import datetime
def parse_shadowserver_time(time_string: Text) -> datetime.datetime:
"""Parse a date on the format '2018-10-17 20:36:23'"""
try:
return datetime.datetime.strptime(time_string[:19], '%Y-%m-%d %H:%M:%S')
except:
print(time_string)
raise | 9cf267bac24fcd4efdc73d4839493b9440c178ce | 3,637,967 |
import random
def sample_along_rays(rng,
origins,
directions,
radii,
num_samples,
near,
far,
genspace_fn,
ray_shape,
single_jitter,
diag=True):
"""Stratified sampling along the rays.
Args:
rng: random generator. If `None`, use deterministic sampling.
origins: [..., 3], ray origins.
directions: [..., 3], ray directions.
radii: [..., 3], ray radii.
num_samples: int.
near: [..., 1], near-plane camera distance.
far: [..., 1], far-plane camera distance.
genspace_fn: Callable, the curve function used when spacing t values.
ray_shape: string, which shape ray to assume.
single_jitter: bool, if True, apply the same offset to each sample in a ray.
diag: bool, if True, produce diagonal covariances (full otherwise).
Returns:
t_vals: [..., num_samples], sampled t values,
(means: [..., num_samples, 3], means,
covs: [..., num_samples, 3{, 3}], covariances, shape depends on `diag`).
"""
t_vals = spacing.genspace(near, far, num_samples + 1, fn=genspace_fn)
sample_shape = list(origins.shape)[:-1] + [num_samples + 1]
if rng is None:
# Broadcast t_vals to make the returned shape consistent.
t_vals = jnp.broadcast_to(t_vals, sample_shape)
else:
mids = 0.5 * (t_vals[Ellipsis, 1:] + t_vals[Ellipsis, :-1])
upper = jnp.concatenate([mids, t_vals[Ellipsis, -1:]], axis=-1)
lower = jnp.concatenate([t_vals[Ellipsis, :1], mids], axis=-1)
if single_jitter:
t_rand = random.uniform(rng, sample_shape[:-1])[Ellipsis, None]
else:
t_rand = random.uniform(rng, sample_shape)
t_vals = lower + (upper - lower) * t_rand
means, covs = cast_rays(
t_vals, origins, directions, radii, ray_shape, diag=diag)
return t_vals, (means, covs) | 0745c1e7ed152c93c49bfcf59bb0255422b018ec | 3,637,968 |
import PIL
def resize(img, size, interpolation=PIL.Image.BILINEAR):
"""Resize image to match the given shape.
This method uses :mod:`cv2` or :mod:`PIL` for the backend.
If :mod:`cv2` is installed, this function uses the implementation in
:mod:`cv2`. This implementation is faster than the implementation in
:mod:`PIL`. Under Anaconda environment,
:mod:`cv2` can be installed by the following command.
.. code::
$ conda install -c menpo opencv3=3.2.0
Args:
img (~numpy.ndarray): An array to be transformed.
This is in CHW format and the type should be :obj:`numpy.float32`.
size (tuple): This is a tuple of length 2. Its elements are
ordered as (height, width).
interpolation (int): Determines sampling strategy. This is one of
:obj:`PIL.Image.NEAREST`, :obj:`PIL.Image.BILINEAR`,
:obj:`PIL.Image.BICUBIC`, :obj:`PIL.Image.LANCZOS`.
Bilinear interpolation is the default strategy.
Returns:
~numpy.ndarray: A resize array in CHW format.
"""
img = _resize(img, size, interpolation)
return img | fa2a407f42672e5ea91e8e2bd1c186d4ca05d98c | 3,637,969 |
def build_punt():
""" Construye la ventana del registro del usuario"""
easy = PA.dividir_puntajes("facil")
medium = PA.dividir_puntajes("medio")
hard = PA.dividir_puntajes("dificil")
rows = len(max([easy, medium, hard], key=len))
def create_table(data, dif):
return sg.Table(
values = data,
headings = [dif, "Puntos"],
auto_size_columns = True,
justification = 'center',
alternating_row_color = 'lightblue',
hide_vertical_scroll = True,
num_rows = rows
)
layout = [
[ create_table(easy if (len(easy) > 0) else ["Vacio", ""], "Facil"),
create_table(medium if (len(medium) > 0) else ["vacio", ""], "Medio"),
create_table(hard if (len(hard) > 0) else ["vacio", ""], "Dificil")
],
[sg.Ok()]
]
window = sg.Window('Mejores puntajes por dificultad ', layout, element_justification='center')
return window | a97dc74855ccb8e65096d74c7fe59d89d5a3f92b | 3,637,970 |
def true_fov(M, fov_e=50):
"""Calulates the True Field of View (FOV) of the telescope & eyepiece pair
Args:
fov_e (float): FOV of eyepiece; default 50 deg
M (float): Magnification of Telescope
Returns:
float: True Field of View (deg)
"""
return fov_e/M | 7735135d326f3000ac60274972263a8a71648033 | 3,637,971 |
async def test_html_content_type_with_utf8_encoding(unused_tcp_port, postgres_db, database_name, async_finalizer):
"""Test whether API endpoints with a "text/html; charset=UTF-8" content-type work."""
configure(unused_tcp_port, database_name, postgres_db.port)
html_content = "<html><body>test</body></html>"
@protocol.typedmethod(path="/test", operation="GET", client_types=["api"])
def test_method() -> ReturnValue[str]: # NOQA
pass
class TestServer(ServerSlice):
@protocol.handle(test_method)
async def test_methodY(self) -> ReturnValue[str]: # NOQA
return ReturnValue(response=html_content, content_type=HTML_CONTENT_WITH_UTF8_CHARSET)
rs = Server()
server = TestServer(name="testserver")
rs.add_slice(server)
await rs.start()
async_finalizer.add(server.stop)
async_finalizer.add(rs.stop)
# client based calls
client = protocol.Client("client")
response = await client.test_method()
assert response.code == 200
assert response.result == html_content | 74d76c70ede4ad7b6c65170142136a6278c39802 | 3,637,972 |
def _increment_inertia(centroid, reference_point, m, mass, cg, I):
"""helper method"""
if m == 0.:
return mass
(x, y, z) = centroid - reference_point
x2 = x * x
y2 = y * y
z2 = z * z
I[0] += m * (y2 + z2) # Ixx
I[1] += m * (x2 + z2) # Iyy
I[2] += m * (x2 + y2) # Izz
I[3] += m * x * y # Ixy
I[4] += m * x * z # Ixz
I[5] += m * y * z # Iyz
mass += m
cg += m * centroid
return mass | f95d8c01061243929fa1d3dd48903bedc938bbd8 | 3,637,973 |
def token_addresses(
request,
token_amount,
number_of_tokens,
blockchain_services,
cached_genesis,
register_tokens):
""" Fixture that yields `number_of_tokens` ERC20 token addresses, where the
`token_amount` (per token) is distributed among the addresses behind `blockchain_services` and
potentially pre-registered with the raiden Registry.
The following arguments can control the behavior:
Args:
token_amount (int): the overall number of units minted per token
number_of_tokens (int): the number of token instances
register_tokens (bool): controls if tokens will be registered with raiden Registry
"""
if cached_genesis:
token_addresses = [
address_decoder(token_address)
for token_address in cached_genesis['config']['tokenAddresses']
]
else:
participants = [
privatekey_to_address(blockchain_service.private_key) for
blockchain_service in blockchain_services.blockchain_services
]
token_addresses = _token_addresses(
token_amount,
number_of_tokens,
blockchain_services.deploy_service,
participants,
register_tokens
)
return token_addresses | 8c79175f3a1ca312dfb03de60262f019b74b965c | 3,637,974 |
def saturating_sigmoid(x):
"""Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1]."""
with tf.name_scope("saturating_sigmoid", [x]):
y = tf.sigmoid(x)
return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1)) | d779f059251cc99e40bf5d13ee2d31bc786c48b7 | 3,637,975 |
def simulate_strategy(game, strategies, init_states, func):
"""
Harvest an accurate average payoff with the two strategies, considering traversing
all possible private cards dealt by nature
game : the pyspiel game
strategies : the index of player's strategy, a length two list
init_states : a set of possible openspiel state after chance deals private cards
func : given a strategy index and an infostate, output action
"""
payoff = np.array([0, 0], dtype=int)
for root in init_states: # traverse game tree
node = root
while not node.is_terminal():
assert not node.is_chance_node(), "Doesn't exist chance nodes in kuhn's poker after private hands are dealt"
player = node.current_player()
action = func(strategies[player], node.information_state_string(), player)
assert action in node.legal_actions(), "action not legal!"
node = node.child(action)
payoff = payoff + node.returns()
return payoff / len(init_states) | 47f60a2998ed8deba95b32edfa4221d7f2d767cc | 3,637,976 |
async def is_nsfw_and_guild_predicate(ctx):
"""A predicate to test if a command was run in
an NSFW channel and inside a guild
:param ctx: The context of the predicate
"""
if not ctx.guild or not ctx.channel.is_nsfw():
raise NotNSFWOrGuild()
return True | 20e5ec228f337fcae1e28ad12cde515be9e50f4b | 3,637,977 |
from typing import Optional
def get_account(opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccountResult:
"""
Get information on your DigitalOcean account.
## Example Usage
Get the account:
```python
import pulumi
import pulumi_digitalocean as digitalocean
example = digitalocean.get_account()
```
"""
__args__ = dict()
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('digitalocean:index/getAccount:getAccount', __args__, opts=opts, typ=GetAccountResult).value
return AwaitableGetAccountResult(
droplet_limit=__ret__.droplet_limit,
email=__ret__.email,
email_verified=__ret__.email_verified,
floating_ip_limit=__ret__.floating_ip_limit,
id=__ret__.id,
status=__ret__.status,
status_message=__ret__.status_message,
uuid=__ret__.uuid) | 64758aacd0f294a2f08e60d59edec6c088942e11 | 3,637,978 |
import six
def get_partition_leaders(cluster_config):
"""Return the current leaders of all partitions. Partitions are
returned as a "topic-partition" string.
:param cluster_config: the cluster
:type cluster_config: kafka_utils.utils.config.ClusterConfig
:returns: leaders for partitions
:rtype: map of ("topic-partition", broker_id) pairs
"""
client = KafkaClient(cluster_config.broker_list)
result = {}
for topic, topic_data in six.iteritems(client.topic_partitions):
for partition, p_data in six.iteritems(topic_data):
topic_partition = topic + "-" + str(partition)
result[topic_partition] = p_data.leader
return result | 07aed255fa6d2ec65854d4a5ed1ab1ada6dcca73 | 3,637,979 |
import pathlib
import os
import json
def xyzToAtomsPositionsWrapper(
xyzFileOrDir,
outDir=None,
outFileBaseName=None,
fileExt=None,
noOutFile=False):
"""
Wrapper for the xyzToAtomsPositions function.
Returns atom positions (order) given a molecule9s) in an xyz format.
The heavy atoms positions are based on inchi, thus should always
be the same, regardless of the heavy atoms order in the xyz file.
The hydrogen positions ARE NOT UNIQUE. They will depend, to some
extent, on their order in the xyz file.
Use this function to set the atoms positions in a reference
molecule. The idea is to assign the positions once and to never
change them again.
Arguments:
----------
xyzFileOrDir : str
input xyz molecule (either file or dir path)
outDir : str, optional, default = cwd
directory to write the results into
outFileBaseName : str, optional, default xyzFile name
base name of all output files
fileExt : str, optional, default = xyz
extension of the input files
noOutFile : bool, optional, default False
suppresses writing any output files
Returns:
----------
atomsPositions: list of tuples
also written to a file
"""
if outDir is None: outDir= ioutils.getBaseDirPath(xyzFileOrDir)
if fileExt is None: fileExt= '.xyz'
xyz_files = ioutils.getFilesWithExtensions(xyzFileOrDir, fileExt)
if not xyz_files: return
atomsPositions = []
for i, xyz_file in enumerate(xyz_files):
_atomsPositions = xyztools.xyzToAtomsPositions(xyz_file)
filePathObj = pathlib.Path(xyz_file)
fileName = filePathObj.name
if not noOutFile:
outFileName = fileName+'_atomspositions.json'
if outFileBaseName is not None:
outFileName = outFileBaseName+'_'+str(i)+'_atomspositions.json'
outPath = os.path.join(outDir,outFileName)
ioutils.writeFile(path=outPath,
data=json.dumps({fileName:_atomsPositions}, indent=4),
newline='')
atomsPositions.append({fileName:_atomsPositions})
return atomsPositions | a3e5613676414fc2cf689c978404d3913f9d4169 | 3,637,980 |
import pathlib
def find_mod_names(file_path=__file__):
"""Find Ice module names that start with 'ice_test_'.
The returned names are without the extension '.ice'.
TODO: Needs to recurse in `test_` sub directories.
"""
directory = pathlib.Path(file_path).absolute().parent
# pylint: disable=no-member
return [f_name.stem for f_name in directory.iterdir() if
f_name.stem.startswith('ice_test_') and
f_name.suffix == '.ice'] | 05b85ab7b500e5ff51e5425a8b79e87e5d67291b | 3,637,981 |
def WR(df, N=10, N1=6):
"""
威廉指标
:param df:
:param N:
:param N1:
:return:
"""
HIGH = df['high']
LOW = df['low']
CLOSE = df['close']
WR1 = 100 * (HHV(HIGH, N) - CLOSE) / (HHV(HIGH, N) - LLV(LOW, N))
WR2 = 100 * (HHV(HIGH, N1) - CLOSE) / (HHV(HIGH, N1) - LLV(LOW, N1))
return pd.DataFrame({
'WR1': WR1, 'WR2': WR2
}) | 28cccd0b5f7d0a0a772327901b838e9bf8aa862d | 3,637,982 |
from hask3.lang.hindley_milner import Function
def make_fn_type(params):
"""Turn type parameters into corresponding internal type system object.
Returned object will represent the type of a function over the
parameters.
:param params: a list of type paramaters, e.g. from a type
signature. These should be instances of TypeOperator or
TypeVariable.
:returns: An instance of TypeOperator representing the function type.
"""
if len(params) == 2:
last_input, return_type = params
return Function(last_input, return_type)
else:
return Function(params[0], make_fn_type(params[1:])) | 876d1d9c4243e0ed8a71b9fda2b2469519f4c89b | 3,637,983 |
def export(request, wid):
"""Export the logs from the given workflow
:param request: HTML request
:param pk: pk of the workflow to export
:return: Return a CSV download of the logs
"""
dataset = LogResource().export(
Log.objects.filter(user=request.user, workflow__id=wid)
)
# Create the response as a csv download
response = HttpResponse(dataset.csv, content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="logs.csv"'
return response | ff06c49642a4ab84b7c779522a13ce56e19cc9a9 | 3,637,984 |
def _validate_valid_xml(value):
"""
Checks whether the given value is well-formed and valid XML.
"""
try:
# Try to create an XML tree from the given String value.
_value = XML_DECL.sub(u'', value)
_ = etree.fromstring(_value.encode('utf-8'))
return True
except etree.ParseError, parse_error:
# In case of an exception, we raise a ValidationError.
raise ValidationError(parse_error)
# cfedermann: in case of other exceptions, raise a ValidationError with
# the corresponding error message. This will prevent the exception
# page handler to be shown and is hence more acceptable for end users.
except Exception, error:
raise ValidationError(error) | dbdcce58a6dbbbaf90b98c6f56fa7f9c6f830e00 | 3,637,985 |
import importlib
import pkgutil
def import_submodules(package, recursive=True):
"""Import all submodules of a module, recursively, including subpackages
:param recursive: bool
:param package: package (name or actual module)
:type package: str | module
:rtype: dict[str, types.ModuleType]
"""
if isinstance(package, str):
package = importlib.import_module(package)
results = {}
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if recursive and is_pkg:
results.update(import_submodules(full_name))
return results | df1756f59763adf446a6e42f92c5bb193a8740bd | 3,637,986 |
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.sin(season_time * 2),
1 / np.exp(3 * season_time)) | 06430ce5d3da7d44fc4a8b5e32ca4d5567b1cc15 | 3,637,987 |
def dice(labels, predictions, axis, weights=1.0, scope=None, loss_collection=tf.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Dice loss for binary segmentation. The Dice loss is one minus the Dice
coefficient, and therefore this loss converges towards zero.
The Dice loss between predictions `p` and labels `g` is
.. math::
1 - \frac{2 \Sigma_i^N p_i g_i + \epsilon}
{\Sigma_i^N p_i^2 + \Sigma_i^N g_i^2 + \epsilon}
where `\epsilon` is a small value for stability.
Parameters
----------
labels: float `Tensor`
predictions: float `Tensor`
References
----------
https://arxiv.org/pdf/1606.04797.pdf
"""
if labels is None:
raise ValueError("labels must not be None.")
if predictions is None:
raise ValueError("predictions must not be None.")
with tf.name_scope(scope, "dice",
(predictions, labels, weights)) as scope:
predictions = tf.to_float(predictions)
labels = tf.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
intersection = tf.reduce_sum(tf.abs(predictions * labels), axis=axis)
union = (tf.reduce_sum(predictions, axis=axis) +
tf.reduce_sum(labels, axis=axis))
losses = 1. - ((2 * intersection + _EPSILON) / (union + _EPSILON))
return compute_weighted_loss(
losses=losses,
weights=weights,
scope=scope,
loss_collection=loss_collection,
reduction=reduction) | 70e0e44e7d9b07350497a2c048f2bdd1c8ea952e | 3,637,988 |
def air_density(temp, patm, pw = 0):
"""
Calculates the density of dry air by means of the universal gas law as a
function of air temperature and atmospheric pressure.
m / V = [Pw / (Rv * T)] + [Pd / (Rd * T)]
where:
Pd: Patm - Pw
Rw: specific gas constant for water vapour [Rw = 461.495 MJ/kg/K]
Rv: specific gas constant for dry air [Rv = 287.058 MJ/kg/K]
T: air temperature [K]
m/V: density of air [kg/m³]
Parameters
----------
temp : float
Air temperature [K].
patm : float
Atmospheric pressure [Pa].
pw : float
Vapour pressure [Pa]. Default to 0 Pa (dry air).
Returns
-------
float
Air density [kg/m³].
"""
rd, rw = 287.058, 461.495 # specific gas constant for dry air and water vapour [J / (kg K)]
pd = patm - pw
return (pd / (rd * temp)) + (pw / (rw * temp)) | 1af7afbf562fec105566a2c934f83c73f0be1173 | 3,637,989 |
def index(dataset: Dataset, min_df=5, inplace=False, **kwargs):
"""
Indexes the tokens of a textual :class:`quapy.data.base.Dataset` of string documents.
To index a document means to replace each different token by a unique numerical index.
Rare words (i.e., words occurring less than `min_df` times) are replaced by a special token `UNK`
:param dataset: a :class:`quapy.data.base.Dataset` object where the instances of training and test documents
are lists of str
:param min_df: minimum number of occurrences below which the term is replaced by a `UNK` index
:param inplace: whether or not to apply the transformation inplace (True), or to a new copy (False, default)
:param kwargs: the rest of parameters of the transformation (as for sklearn's
`CountVectorizer <https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html>_`)
:return: a new :class:`quapy.data.base.Dataset` (if inplace=False) or a reference to the current
:class:`quapy.data.base.Dataset` (inplace=True) consisting of lists of integer values representing indices.
"""
__check_type(dataset.training.instances, np.ndarray, str)
__check_type(dataset.test.instances, np.ndarray, str)
indexer = IndexTransformer(min_df=min_df, **kwargs)
training_index = indexer.fit_transform(dataset.training.instances)
test_index = indexer.transform(dataset.test.instances)
if inplace:
dataset.training = LabelledCollection(training_index, dataset.training.labels, dataset.classes_)
dataset.test = LabelledCollection(test_index, dataset.test.labels, dataset.classes_)
dataset.vocabulary = indexer.vocabulary_
return dataset
else:
training = LabelledCollection(training_index, dataset.training.labels.copy(), dataset.classes_)
test = LabelledCollection(test_index, dataset.test.labels.copy(), dataset.classes_)
return Dataset(training, test, indexer.vocabulary_) | 8cde5ed740e4879d62f4a73bf06d1da7b78bc22a | 3,637,990 |
def PerpendicularFrameAt(thisCurve, t, multiple=False):
"""
Return a 3d frame at a parameter. This is slightly different than FrameAt in
that the frame is computed in a way so there is minimal rotation from one
frame to the next.
Args:
t (double): Evaluation parameter.
Returns:
bool: True on success, False on failure.
plane (Plane): The frame is returned here.
"""
url = "rhino/geometry/curve/perpendicularframeat-curve_double_plane"
if multiple: url += "?multiple=true"
args = [thisCurve, t]
if multiple: args = list(zip(thisCurve, t))
response = Util.ComputeFetch(url, args)
return response | 14216aa0091f82cb47dafe0970d685463529cfbb | 3,637,991 |
from typing import Collection
def _attributes_cosmo2dict(cosmo):
"""
Converts CoSMoMVPA-like attributes to a dictionary form
Parameters
----------
cosmo: dict
Dictionary that may contains fields 'sa', 'fa', 'a'. For any of these
fields the contents can be a dict, np.ndarray (object array as returned
by loadmat) or ArrayCollectable (from a PyMVPA Dataset's .a, .fa or .sa)
Returns
-------
pymvpa_attributes: dict
Data represented in cosmo with fields 'sa', 'fa' and 'a'. Each element
in pymvpa_attributes[key] is a dict itself mapping an attribute name
to a value.
"""
# space for output
pymvpa_attributes = dict()
# go over 'sa', 'fa' and 'a'
for fieldname, do_transpose in _attr_fieldname2do_transpose.items():
attrs = dict()
if fieldname in cosmo:
v = cosmo[fieldname]
if type(v) is dict:
# copy the data over
attrs.update(v)
elif isinstance(v, np.ndarray):
# extract singleton element
fsa_mat = _from_singleton(v)
if fsa_mat is not None:
# assume an object array
fsa_keys = fsa_mat.dtype.names
for fsa_key in fsa_keys:
dim = fsa_mat[fsa_key]
if do_transpose:
# feature attribute case, to match dimensionality
# in second dimension
dim = dim.T
# transform row-vectors in matrix form (shape=(1,P))
# to vectors (shape=(P,))
if len(dim.shape) == 2 and dim.shape[1] == 1:
dim = dim.ravel()
attrs[fsa_key] = dim
elif isinstance(v, Collection):
# from PyMVPA Dataset, extract keys and values
attrs.update((k, v[k].value) for k in v)
elif v is None:
pass
else:
raise TypeError("Unsupported input %s" % v)
pymvpa_attributes[fieldname] = attrs
return pymvpa_attributes | 3d3369ce0a1f65cd1bc1b8629ca028a4561224ca | 3,637,992 |
def is_instrument_port(port_name):
"""test if a string can be a com of gpib port"""
answer = False
if isinstance(port_name, str):
ports = ["COM", "com", "GPIB0::", "gpib0::"]
for port in ports:
if port in port_name:
answer = not (port == port_name)
return answer | f45f47d35a9172264d0474502b0df883685071a0 | 3,637,993 |
import os
def get_anvil_path():
"""Gets the anvil/ path.
Returns:
The full path to the anvil/ source.
"""
return os.path.normpath(os.path.dirname(__file__)) | 2b8b0b0d99764634a5307f12864d85fe9b75ad57 | 3,637,994 |
import types
def share_data(value):
""" Take a value and use the same value from the store,
if the value isn't in the store this one becomes the shared version. """
# We don't want to change the types of strings, between str <=> unicode
# and hash('a') == hash(u'a') ... so use different stores.
# In theory eventaully we'll have all of one type, but don't hold breath.
store = _share_data_store
if isinstance(value, unicode):
store = _share_data_store_u
# hahahah, of course the above means that:
# hash(('a', 'b')) == hash((u'a', u'b'))
# ...which we have in deptuples, so just screw sharing those atm.
if type(value) == types.TupleType:
return value
return store.setdefault(value, value) | 70edaad0ef52e6f6866049bcd199ef109ebb825d | 3,637,995 |
import math
def gaussian_dropout(incoming, keep_prob, mc, scale_during_training = True, name=None):
""" Gaussian Dropout.
Outputs the input element multiplied by a random variable sampled from a Gaussian distribution with mean 1 and either variance keep_prob*(1-keep_prob) (scale_during_training False) or (1-keep_prob)/keep_prob (scale_during_training True)
Arguments:
incoming : A `Tensor`. The incoming tensor.
keep_prob : A float representing the probability that each element is kept by Bernoulli dropout which is used to set the variance of the Gaussian distribution.
scale_during_training : A boolean determining whether to match the variance of the Gaussian distribution to Bernoulli dropout with scaling during testing (False) or training (True)
mc : A boolean Tensor correponding to whether or not Monte-Carlo sampling will be used to calculate the networks output
name : A name for this layer (optional).
References:
Dropout: A Simple Way to Prevent Neural Networks from Overfitting.
N. Srivastava, G. Hinton, A. Krizhevsky, I. Sutskever & R. Salakhutdinov,
(2014), Journal of Machine Learning Research, 5(Jun)(2), 1929-1958.
Links:
[https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf]
(https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf)
"""
with tf.name_scope(name) as scope:
inference = incoming
if scale_during_training:
stddev = math.sqrt((1-keep_prob)/keep_prob)
else:
stddev = math.sqrt((1-keep_prob)*keep_prob)
def apply_gaussian_dropout():
return tf.multiply(inference,tf.random_normal(tf.shape(inference), mean = 1, stddev = stddev))
inference = tf.cond(mc, apply_gaussian_dropout, lambda: inference)
return inference | 225c88e0f45c2b319cfabfcb6c65162a5a21f778 | 3,637,996 |
import os
def checkTimeStamp(op, graph, frm, to):
"""
Confirm the timestamp formats within the metadata did not change.
:param op:
:param graph:
:param frm:
:param to:
:return:
"""
edge = graph.get_edge(frm, to)
pred = graph.predecessors(to)
if pred<2:
ffile = os.path.join(graph.dir, graph.get_node(frm)['file'])
sfile = os.path.join(graph.dir, graph.get_node(to)['file'])
timediffs = dateTimeStampCompare(ffile,sfile)
if len(timediffs) != 0:
return (Severity.INFO, "Timestamps " + str(timediffs) + " are in a format different from prior node")
else:
for p in pred:
if p != frm:
ffile = os.path.join(graph.dir, graph.get_node(p)['file'])
sfile = os.path.join(graph.dir, graph.get_node(to)['file'])
timediffs = dateTimeStampCompare(ffile, sfile)
if len(timediffs) != 0:
return (Severity.INFO, "Timestamps "+ str(timediffs) + " are in a format different from donor") | 527a22f3e09b64ea9398f2e793c57bb5d96960e1 | 3,637,997 |
def bottom_up_low_space(N,K,ts):
"""
Recursive algorithm.
args:
N :: int
length of ts
K :: int
ts :: list of ints
returns:
res :: bool
True :: if a subset of ts sums to K
False :: otherwise
subset :: list of tuples
index and value in ts of the subset that sums to K.
"""
U = np.zeros(K+1, dtype = int)
U[0] = 1
for t in ts:
j = K
while j >= t:
if U[j-t] != 0:
U[j] = t
j -= 1
res = U[K] != 0
subset = []
k = K
while U[k] != 0 and k > 0:
t = U[k]
i = [i for i, x in enumerate(U[k] == ts) if x]
for l in i:
if not (l,t) in subset:
subset.append((l,t))
k -= U[k]
return res, sorted(subset) | 31e810871088309fded97b2ab844c3f910c84ffb | 3,637,998 |
import ctypes
def sincpt(method, target, et, fixref, abcorr, obsrvr, dref, dvec):
"""
Given an observer and a direction vector defining a ray, compute
the surface intercept of the ray on a target body at a specified
epoch, optionally corrected for light time and stellar
aberration.
This routine supersedes :func:`srfxpt`.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sincpt_c.html
:param method: Computation method.
:type method: str
:param target: Name of target body.
:type target: str
:param et: Epoch in ephemeris seconds past J2000 TDB.
:type et: float
:param fixref: Body-fixed, body-centered target body frame.
:type fixref: str
:param abcorr: Aberration correction.
:type abcorr: str
:param obsrvr: Name of observing body.
:type obsrvr: str
:param dref: Reference frame of ray's direction vector.
:type dref: str
:param dvec: Ray's direction vector.
:type dvec: 3-Element Array of floats
:return:
Surface intercept point on the target body,
Intercept epoch,
Vector from observer to intercept point.
:rtype: tuple
"""
method = stypes.stringToCharP(method)
target = stypes.stringToCharP(target)
et = ctypes.c_double(et)
fixref = stypes.stringToCharP(fixref)
abcorr = stypes.stringToCharP(abcorr)
obsrvr = stypes.stringToCharP(obsrvr)
dref = stypes.stringToCharP(dref)
dvec = stypes.toDoubleVector(dvec)
spoint = stypes.emptyDoubleVector(3)
trgepc = ctypes.c_double(0)
srfvec = stypes.emptyDoubleVector(3)
found = ctypes.c_int(0)
libspice.sincpt_c(method, target, et, fixref, abcorr, obsrvr, dref, dvec,
spoint, ctypes.byref(trgepc), srfvec, ctypes.byref(found))
return stypes.cVectorToPython(spoint), trgepc.value, stypes.cVectorToPython(
srfvec), bool(found.value) | 7de9e6362aade6cf331ad317ac4f1c2146aa5048 | 3,637,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.