content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def get_active_users(URM, popular_threshold=100):
"""
Get the users with activity above a certain threshold
:param URM: URM on which users will be extracted
:param popular_threshold: popularty threshold
:return:
"""
return _get_popular(URM, popular_threshold, axis=1)
| 23,800
|
def default_data_to_device(
input, target=None, device: str = "cuda", non_blocking: bool = True
):
"""Sends data output from a PyTorch Dataloader to the device."""
input = input.to(device=device, non_blocking=non_blocking)
if target is not None:
target = target.to(device=device, non_blocking=non_blocking)
return input, target
| 23,801
|
def _get_diff2_data(request, ps_left_id, ps_right_id, patch_id, context,
column_width, tab_spaces, patch_filename=None):
"""Helper function that returns objects for diff2 views"""
ps_left = models.PatchSet.get_by_id(int(ps_left_id), parent=request.issue.key)
if ps_left is None:
return HttpTextResponse(
'No patch set exists with that id (%s)' % ps_left_id, status=404)
ps_left.issue_key = request.issue.key
ps_right = models.PatchSet.get_by_id(
int(ps_right_id), parent=request.issue.key)
if ps_right is None:
return HttpTextResponse(
'No patch set exists with that id (%s)' % ps_right_id, status=404)
ps_right.issue_key = request.issue.key
if patch_id is not None:
patch_right = models.Patch.get_by_id(int(patch_id), parent=ps_right.key)
else:
patch_right = None
if patch_right is not None:
patch_right.patchset_key = ps_right.key
if patch_filename is None:
patch_filename = patch_right.filename
# Now find the corresponding patch in ps_left
patch_left = models.Patch.query(
models.Patch.filename == patch_filename,
ancestor=ps_left.key).get()
if patch_left:
try:
new_content_left = patch_left.get_patched_content()
except FetchError as err:
return HttpTextResponse(str(err), status=404)
lines_left = new_content_left.lines
elif patch_right:
lines_left = patch_right.get_content().lines
else:
lines_left = []
if patch_right:
try:
new_content_right = patch_right.get_patched_content()
except FetchError as err:
return HttpTextResponse(str(err), status=404)
lines_right = new_content_right.lines
elif patch_left:
lines_right = patch_left.get_content().lines
else:
lines_right = []
rows = engine.RenderDiff2TableRows(request,
lines_left, patch_left,
lines_right, patch_right,
context=context,
colwidth=column_width,
tabspaces=tab_spaces)
rows = list(rows)
if rows and rows[-1] is None:
del rows[-1]
return dict(patch_left=patch_left, patch_right=patch_right,
ps_left=ps_left, ps_right=ps_right, rows=rows)
| 23,802
|
def vec_add(iter_a, iter_b):
"""element wise addition"""
if len(iter_a) != len(iter_b):
raise ValueError
return (a + b for a, b in zip(iter_a, iter_b))
| 23,803
|
def run():
""" Read inputs into a dictionary for recursive searching """
for line in inputs:
# Strip the trailing "." and split
container, rest = line[:-1].split(" contain ")
# Strip the trailing " bags"
container = container[:-5]
contained = []
for bag in rest.split(", "):
if bag[:2] != "no":
# Strip the leading number and the trailing "bags" or " bag"
contained.append(bag[2:-4].strip())
bags[container] = contained
return sum(1 if search(bag) else 0 for bag in bags)
| 23,804
|
def _get_prolongation_coordinates(grid, d1, d2):
"""Calculate required coordinates of finer grid for prolongation."""
D2, D1 = np.broadcast_arrays(
getattr(grid, 'vectorN'+d2), getattr(grid, 'vectorN'+d1)[:, None])
return np.r_[D1.ravel('F'), D2.ravel('F')].reshape(-1, 2, order='F')
| 23,805
|
async def dump_devinfo(dev: Device, file):
"""Dump developer information.
Pass `file` to write the results directly into a file.
"""
import attr
methods = await dev.get_supported_methods()
res = {
"supported_methods": {k: v.asdict() for k, v in methods.items()},
"settings": [attr.asdict(x) for x in await dev.get_settings()],
"sysinfo": attr.asdict(await dev.get_system_info()),
"interface_info": attr.asdict(await dev.get_interface_information()),
}
if file:
click.echo("Saving to file: %s" % file.name)
json.dump(res, file, sort_keys=True, indent=4)
else:
click.echo(json.dumps(res, sort_keys=True, indent=4))
| 23,806
|
def push_image(tag):
"""Push docker image via gcloud context to resolve permission issues."""
gcloud.docker('--', 'push', tag, _out=sys.stdout, _err=sys.stderr)
| 23,807
|
def get_sorted_filediffs(filediffs, key=None):
"""Sorts a list of filediffs.
The list of filediffs will be sorted first by their base paths in
ascending order.
Within a base path, they'll be sorted by base name (minus the extension)
in ascending order.
If two files have the same base path and base name, we'll sort by the
extension in descending order. This will make :file:`*.h` sort ahead of
:file:`*.c`/:file:`*.cpp`, for example.
If the list being passed in is actually not a list of FileDiffs, it
must provide a callable ``key`` parameter that will return a FileDiff
for the given entry in the list. This will only be called once per
item.
"""
def cmp_filediffs(filediff1, filediff2):
x = make_key(filediff1)
y = make_key(filediff2)
# Sort based on basepath in ascending order.
if x[0] != y[0]:
a = x[0]
b = y[0]
else:
# Sort based on filename in ascending order, then based on
# the extension in descending order, to make *.h sort ahead of
# *.c/cpp.
x_file, x_ext = os.path.splitext(x[1])
y_file, y_ext = os.path.splitext(y[1])
if x_file == y_file:
a = y_ext
b = x_ext
else:
a = x_file
b = y_file
return cmp(a, b)
def make_key(filediff):
if key:
filediff = key(filediff)
filename = filediff.dest_file
i = filename.rfind('/')
if i == -1:
return '', filename
else:
return filename[:i], filename[i + 1:]
return sorted(filediffs, key=cmp_to_key(cmp_filediffs))
| 23,808
|
def get_info(obj):
"""
get info from account obj
:type obj: account object
:param obj: the object of account
:return: dict of account info
"""
if obj:
return dict(db_instance_id=obj.dbinstance_id,
account_name=obj.account_name,
account_status=obj.account_status,
account_type=obj.account_type,
account_description=obj.account_description,
database_privileges=obj.database_privileges)
return {}
| 23,809
|
def vlookup(x0, vals, ind, approx=True):
"""
Equivalent to the spreadsheet VLOOKUP function
:param vals: array_like
2d array of values - first column is searched for index
:param x0:
:param ind:
:param approx:
:return:
"""
if isinstance(vals[0][0], str):
x0 = str(x0)
if not approx: # need exact match
return vals[int(ind)][np.where(x0 == np.array(vals[0]))[0][0]]
else:
inds = np.searchsorted(vals[0], x0, side='right') - 1
return vals[ind][int(inds)]
| 23,810
|
def matmul(a00, a10, a01, a11, b00, b10, b01, b11):
"""
Compute 2x2 matrix mutiplication in vector way
C = A*B
C = [a00 a01] * [b00 b01] = [c00 c01]
[a10 a11] [b10 b11] [c10 c11]
"""
c00 = a00*b00 + a01*b10
c10 = a10*b00 + a11*b10
c01 = a00*b01 + a01*b11
c11 = a10*b01 + a11*b11
return c00, c10, c01, c11
| 23,811
|
def random_sample_with_weight_and_cost(population, weights, costs, cost_limit):
"""
Like random_sample_with_weight but with the addition of a cost and limit.
While performing random samples (with priority for higher weight) we'll keep track of cost
If cost exceeds the cost limit, we stop selecting
Basically the knapsack problem, but with deliberately random selection rather than dynamic optimization
"""
population_weights = {request: weight for (request, weight) in zip(population, weights)}
population_costs = {request: cost for (request, cost) in zip(population, costs)}
selected = []
not_selected = []
cost = 0
# Create a Counter from the population, assigning count by weight
counter = collections.Counter(population_weights)
while counter:
# Turn the Counter into a list for random selection from
# The list will have n repetitions of an element with weight n
choice = random.choice(list(counter.elements()))
choice_cost = population_costs[choice]
# If the cost would cause us to exceed our limit it shouldn't be selected
if cost + choice_cost > cost_limit:
not_selected.append(choice)
else:
cost += choice_cost
selected.append(choice)
# When chosen (whether selected or not), remove the element from the population
# Effectively removes all repetitions of the element
counter.pop(choice)
return selected, not_selected
| 23,812
|
def hardnet68ds(pretrained=False, **kwargs):
""" # This docstring shows up in hub.help()
Harmonic DenseNet 68ds (Depthwise Separable) model
pretrained (bool): kwargs, load pretrained weights into the model
"""
# Call the model, load pretrained weights
model = hardnet.HarDNet(depth_wise=True, arch=68, pretrained=pretrained)
return model
| 23,813
|
def taxiProgram():
"""
Taxi program implementation here.
Parameters: None
Returns: None
"""
logger.error("main/taxiProgram: Taxi Program Started")
# Set up data structures for first POGI retrieval
pogiInitPipeline = mp.Queue()
firstTelemetry = None
while True:
# Read pogi data and put it into the pipeline if it is available
pogi_subworker(pogiInitPipeline, POGI_DIRECTORY)
# If we don't get any data, try again
if pogiInitPipeline.empty():
continue
# Once we have data, break out of the loop
firstTelemetry = pogiInitPipeline.get()
break
# Get cached Pylon GPS coordinates
pylonGpsData = None
with open("temp_pylon_gps") as file:
pylonGpsData = json.load(file)
# If any of the two pieces of data from above are None, throw an error and leave
if firstTelemetry is None:
logger.error("main/taxiProgram: Taxi program couldn't get telemetry data")
return
if pylonGpsData is None:
logger.error("main/taxiProgram: Taxi program couldn't get cached pylon gps data")
return
# Get result from search and run taxi command worker with the given heading command
searchResult = searchWorker(firstTelemetry.data, pylonGpsData)
taxi_command_worker_first(searchResult)
# Set up pipeline architecture for taxi
deckLinkSrcOutTaxiInPipeline = mp.Queue() # Timestamped data
taxiOutCommandInPipeline = mp.Queue()
pause = mp.Lock()
quit = mp.Queue()
processes = [
mp.Process(target=decklinkSrcWorker, args=(pause, quit, deckLinkSrcOutTaxiInPipeline)),
mp.Process(target=taxi_worker, args=(pause, quit, deckLinkSrcOutTaxiInPipeline, taxiOutCommandInPipeline)),
mp.Process(target=command_taxi_worker_continuous, args=(pause, quit, taxiOutCommandInPipeline))
]
for p in processes:
p.start()
logger.error("main/taxiProgram: Taxi Program Init Finished")
return
| 23,814
|
def session_validity_stats():
"""Output statistics about how many sessions were valid."""
valid = db.session_data(only_valid=True)
all = db.session_data(only_valid=False)
print("Session Validity stats")
print(f"total: {len(all)}")
print(f"valid: {len(valid)}")
print(f"invalid: {len(all) - len(valid)}")
print("\n")
| 23,815
|
def get_config():
"""Base config for training models."""
config = ml_collections.ConfigDict()
# How often to save the model checkpoint.
config.save_checkpoints_steps: int = 1000
# Frequency fo eval during training, e.g. every 1000 steps.
config.eval_frequency: int = 1000
# Total batch size for training.
config.train_batch_size: int = 32
# Total batch size for eval.
config.eval_batch_size: int = 8
# The base learning rate for Adam.
config.learning_rate: float = 1e-4
# Initial checkpoint directory (usually from a pre-trained model).
config.init_checkpoint_dir: str = ''
# Whether to lower case the input text. Should be True for uncased models and
# False for cased models.
config.do_lower_case: bool = True
# Model parameters.
# For pre-training, we only need 2 segment types (for NSP), but we allow up to
# 4 for GLUE/SuperGLUE fine-tuning.
config.type_vocab_size: int = 4
# Embedding dimension for each token.
config.d_emb: int = 768
# Hidden dimension of model.
config.d_model: int = 768
# Hidden dimension for feed-forward layer.
config.d_ff: int = 3072
# The maximum total input sequence length after tokenization. Sequences longer
# than this will be truncated, and sequences shorter than this will be padded.
config.max_seq_length: int = 512
# Number of self-attention heads. Only used for BERT models.
config.num_heads: int = 12
# Number of model blocks / layers.
config.num_layers: int = 12
# Regular dropout rate, applied throughout model.
config.dropout_rate: float = 0.1
# Dropout rate used in mixing module, e.g. self-attention sublayer.
config.mixing_dropout_rate: float = 0.1
# Determines how discrete Fourier Transforms are computed. Only used for FNet
# models. Set to true if running on TPU hardware, in which case matrix
# multiplications will be favored for relatively shorter input sequences. Set
# to false for GPU/CPU hardware, in which case FFTs are used for all input
# sequence lengths.
config.use_tpu_fourier_optimizations: bool = False
# Dummy parameter for repeated runs.
config.trial: int = 0
return config
| 23,816
|
def slave_addresses(dns):
"""List of slave IP addresses
@returns: str Comma delimited list of slave IP addresses
"""
return ', '.join(['{}:53'.format(s['address'])
for s in dns.pool_config])
| 23,817
|
def processAndLabelStates(role, states, reason, positiveStates=None, negativeStates=None, positiveStateLabelDict={}, negativeStateLabelDict={}):
"""Processes the states for an object and returns the appropriate state labels for both positive and negative states.
@param role: The role of the object to process states for (e.g. C{ROLE_CHECKBOX}.
@type role: int
@param states: The raw states for an object to process.
@type states: set
@param reason: The reason to process the states (e.g. C{REASON_FOCUS}.
@type reason: str
@param positiveStates: Used for C{REASON_CHANGE}, specifies states changed from negative to positive;
@type positiveStates: set
@param negativeStates: Used for C{REASON_CHANGE}, specifies states changed from positive to negative;
@type negativeStates: setpositiveStateLabelDict={}, negativeStateLabelDict
@param positiveStateLabelDict: Dictionary containing state identifiers as keys and associated positive labels as their values.
@type positiveStateLabelDict: dict
@param negativeStateLabelDict: Dictionary containing state identifiers as keys and associated negative labels as their values.
@type negativeStateLabelDict: dict
@return: The labels of the relevant positive and negative states.
@rtype: [str, ...]
"""
mergedStateLabels=[]
positiveStates = processPositiveStates(role, states, reason, positiveStates)
negativeStates = processNegativeStates(role, states, reason, negativeStates)
for state in sorted(positiveStates | negativeStates):
if state in positiveStates:
mergedStateLabels.append(positiveStateLabelDict.get(state, stateLabels[state]))
elif state in negativeStates:
# Translators: Indicates that a particular state of an object is negated.
# Separate strings have now been defined for commonly negated states (e.g. not selected and not checked),
# but this still might be used in some other cases.
# %s will be replaced with the full identifier of the negated state (e.g. selected).
mergedStateLabels.append(negativeStateLabelDict.get(state, negativeStateLabels.get(state, _("not %s") % stateLabels[state])))
return mergedStateLabels
| 23,818
|
def _views(config, wapp, plugins):
"""initialize core api handlers"""
log.debug('init views')
wapp.route('/_/', 'GET', status.view, name = 'core.status',
apply = plugins)
| 23,819
|
def alpha_setup_backtest(strategy, from_date, to_date, base_timeframe=Instrument.TF_TICK):
"""
Simple load history of OHLCs, initialize all strategy traders here (sync).
"""
for market_id, instrument in strategy._instruments.items():
# retrieve the related price and volume watcher
watcher = instrument.watcher(Watcher.WATCHER_PRICE_AND_VOLUME)
if watcher:
# wait for timeframes before query
for k, timeframe in strategy.parameters.get('timeframes', {}).items():
if timeframe['timeframe'] > 0:
instrument.want_timeframe(timeframe['timeframe'])
# query for most recent candles per timeframe from the database
for k, timeframe in strategy.parameters.get('timeframes', {}).items():
if timeframe['timeframe'] > 0:
l_from = from_date - timedelta(seconds=timeframe['history']*timeframe['timeframe'])
l_to = from_date - timedelta(seconds=1)
watcher.historical_data(instrument.market_id, timeframe['timeframe'],
from_date=l_from, to_date=l_to)
# create a feeder per instrument and fetch ticks and candles + ticks
feeder = StrategyDataFeeder(strategy, instrument.market_id, [], True)
strategy.add_feeder(feeder)
# fetch market info from the DB
Database.inst().load_market_info(strategy.service, watcher.name, instrument.market_id)
feeder.initialize(watcher.name, from_date, to_date)
# initialized state
for k, strategy_trader in strategy._strategy_traders.items():
with strategy_trader._mutex:
strategy_trader._initialized = 0
| 23,820
|
def shape_for_stateful_rnn(data, batch_size, seq_length, seq_step):
"""
Reformat our data vector into input and target sequences to feed into our
RNN. Tricky with stateful RNNs.
"""
# Our target sequences are simply one timestep ahead of our input sequences.
# e.g. with an input vector "wherefore"...
# targets: h e r e f o r e
# predicts ^ ^ ^ ^ ^ ^ ^ ^
# inputs: w h e r e f o r
inputs = data[:-1]
targets = data[1:]
# We split our long vectors into semi-redundant seq_length sequences
inputs = _create_sequences(inputs, seq_length, seq_step)
targets = _create_sequences(targets, seq_length, seq_step)
# Make sure our sequences line up across batches for stateful RNNs
inputs = _batch_sort_for_stateful_rnn(inputs, batch_size)
targets = _batch_sort_for_stateful_rnn(targets, batch_size)
# Our target data needs an extra axis to work with the sparse categorical
# crossentropy loss function
targets = targets[:, :, np.newaxis]
return inputs, targets
| 23,821
|
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Sets up appliance binary sensors"""
hub: Hub = hass.data[DOMAIN][config_entry.entry_id]
# Dehumidifier sensors
async_add_entities(
TankFullSensor(c) for c in hub.coordinators if c.is_dehumidifier()
)
async_add_entities(
FilterReplacementSensor(c) for c in hub.coordinators if c.is_dehumidifier()
)
async_add_entities(
DefrostingSensor(c) for c in hub.coordinators if c.is_dehumidifier()
)
| 23,822
|
def setup_features(dataRaw, label='flux', notFeatures=[], pipeline=None, verbose=False, resample=False, returnAll=None):
"""Example function with types documented in the docstring.
For production level usage: All scaling and transformations must be done
with respect to the calibration data distributions
Args:
features (nD-array): Array of input raw features.
labels (1D-array): The second parameter.
pipeline (int): The first parameter.
label_scaler (str): The second parameter.
feature_scaler (str): The second parameter.
Returns:
features_transformed, labels_scaled
.. _PEP 484:
https://github.com/ExoWanderer/
"""
# if label in notFeatures: notFeatures.remove(label)
if isinstance(dataRaw,str):
dataRaw = pd.read_csv(filename)
elif isinstance(dataRaw, dict):
dataRaw = pd.DataFrame(dataRaw)
elif not isinstance(dataRaw, pd.DataFrame):
raise TypeError('The input must be a `pandas.DataFrame` or a `dict` with Equal Size Entries (to convert to df here)')
# WHY IS THIS ALLOWED TO NOT HAVE PARENTHESES?
# assert isinstance(dataRaw, pd.DataFrame), 'The input must be a Pandas DataFrame or Dictionary with Equal Size Entries'
inputData = dataRaw.copy()
# PLDpixels = pd.DataFrame({key:dataRaw[key] for key in dataRaw.columns if 'pix' in key})
pixCols = [colname for colname in inputData.columns if 'pix' in colname.lower() or 'pld' in colname.lower()]
PLDnorm = np.sum(np.array(inputData[pixCols]),axis=1)
inputData[pixCols] = (np.array(inputData[pixCols]).T / PLDnorm).T
# # Overwrite the PLDpixels entries with the normalized version
# for key in dataRaw.columns:
# if key in PLDpixels.columns:
# inputData[key] = PLDpixels[key]
#
# Assign the labels
n_PLD = len([key for key in dataRaw.keys() if 'err' not in colname.lower() and ('pix' in key.lower() or 'pld' in key.lower())])
input_labels = [colname for colname in dataRaw.columns if colname not in notFeatures and 'err' not in colname.lower()]
errors_labels = [colname for colname in dataRaw.columns if colname not in notFeatures and 'err' in colname.lower()]
# resampling_inputs = ['flux', 'xpos', 'ypos', 'xfwhm', 'yfwhm', 'bg_flux', 'bmjd', 'np'] + ['pix{}'.format(k) for k in range(1,10)]
# resampling_errors = ['fluxerr', 'xerr', 'yerr', 'xerr', 'yerr', 'sigma_bg_flux', 'bmjd_err', 'np_err'] + ['fluxerr']*n_PLD
start = time()
if resample:
print("Resampling ", end=" ")
inputData = pd.DataFrame({colname:np.random.normal(dataRaw[colname], dataRaw[colerr]) \
for colname, colerr in tqdm(zip(input_labels, errors_labels), total=len(input_labels))
})
print("took {} seconds".format(time() - start))
else:
inputData = pd.DataFrame({colname:dataRaw[colname] for colname in input_labels})
labels = dataRaw[label].values
# explicitly remove the label
if label in inputData.columns: inputData.drop(label, axis=1, inplace=True)
feature_columns = [colname for colname in inputData.columns if colname not in notFeatures]
features = inputData[feature_columns].values
if verbose: print('Shape of Features Array is', features.shape)
if verbose: start = time()
# labels_scaled = labels# label_scaler.fit_transform(labels[:,None]).ravel() if label_scaler is not None else labels
features_trnsfrmd = pipeline.fit_transform(features) if pipeline is not None else features
if verbose: print('took {} seconds'.format(time() - start))
collection = features_trnsfrmd, labels
if returnAll == True:
collection = features_trnsfrmd, labels, pipeline
if returnAll == 'features':
collection = features_trnsfrmd
if returnAll == 'with raw data':
collection.append(dataRaw)
return collection
| 23,823
|
def plot_marginal_effects(model: ModelBridge, metric: str) -> AxPlotConfig:
"""
Calculates and plots the marginal effects -- the effect of changing one
factor away from the randomized distribution of the experiment and fixing it
at a particular level.
Args:
model: Model to use for estimating effects
metric: The metric for which to plot marginal effects.
Returns:
AxPlotConfig of the marginal effects
"""
plot_data, _, _ = get_plot_data(model, {}, {metric})
arm_dfs = []
for arm in plot_data.in_sample.values():
arm_df = pd.DataFrame(arm.parameters, index=[arm.name])
arm_df["mean"] = arm.y_hat[metric]
arm_df["sem"] = arm.se_hat[metric]
arm_dfs.append(arm_df)
effect_table = marginal_effects(pd.concat(arm_dfs, 0))
varnames = effect_table["Name"].unique()
data: List[Any] = []
for varname in varnames:
var_df = effect_table[effect_table["Name"] == varname]
data += [
go.Bar(
x=var_df["Level"],
y=var_df["Beta"],
error_y={"type": "data", "array": var_df["SE"]},
name=varname,
)
]
fig = subplots.make_subplots(
cols=len(varnames),
rows=1,
subplot_titles=list(varnames),
print_grid=False,
shared_yaxes=True,
)
for idx, item in enumerate(data):
fig.append_trace(item, 1, idx + 1)
fig.layout.showlegend = False
# fig.layout.margin = go.layout.Margin(l=2, r=2)
fig.layout.title = "Marginal Effects by Factor"
fig.layout.yaxis = {
"title": "% better than experiment average",
"hoverformat": ".{}f".format(DECIMALS),
}
return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC)
| 23,824
|
def update_user_config_in_db(config: UserConfig, io: IO, month_year: tuple[int, int]) -> None:
"""
Updates UserConfig based on latest calendar Month/Year view preference. Saves UserConfig to db
:param config: instance of UserConfig
:param io: instance of IO object
:param month_year: tuple[int, int] Month, Year
:return: None
"""
config.user_calendar_date_preference = month_year
io.save_user_config(config)
| 23,825
|
def _client_row_class(client: dict) -> str:
"""
Set the row class depending on what's in the client record.
"""
required_cols = ['trust_balance', 'refresh_trigger']
for col in required_cols:
if col not in client:
return 'dark'
try:
if client['trust_balance'] > client['refresh_trigger']:
return 'success'
except TypeError:
return 'dark'
return 'danger'
| 23,826
|
def num2ord(place):
"""Return ordinal for the given place."""
omap = { u'1' : u'st',
u'2' : u'nd',
u'3' : u'rd',
u'11' : u'th',
u'12' : u'th',
u'13' : u'th' }
if place in omap:
return place + omap[place]
elif place.isdigit():
if len(place) > 1 and place[-1] in omap: # last digit 1,2,3
return place + omap[place[-1]]
else:
return place + u'th'
else:
return place
| 23,827
|
def _get_dist_class(
policy: Policy, config: AlgorithmConfigDict, action_space: gym.spaces.Space
) -> Type[TFActionDistribution]:
"""Helper function to return a dist class based on config and action space.
Args:
policy: The policy for which to return the action
dist class.
config: The Algorithm's config dict.
action_space (gym.spaces.Space): The action space used.
Returns:
Type[TFActionDistribution]: A TF distribution class.
"""
if hasattr(policy, "dist_class") and policy.dist_class is not None:
return policy.dist_class
elif config["model"].get("custom_action_dist"):
action_dist_class, _ = ModelCatalog.get_action_dist(
action_space, config["model"], framework="tf"
)
return action_dist_class
elif isinstance(action_space, Discrete):
return Categorical
elif isinstance(action_space, Simplex):
return Dirichlet
else:
assert isinstance(action_space, Box)
if config["normalize_actions"]:
return SquashedGaussian if not config["_use_beta_distribution"] else Beta
else:
return DiagGaussian
| 23,828
|
def gen_all_param2vals(param2requests: Dict[str, list],
param2default: Dict[str, Any],
) -> List[Dict[str, Any]]:
"""
return multiple param2val objects,
each defining the parameter configuration for a single job
"""
# check that requests are lists and not empty
for k, v in param2requests.items():
if not isinstance(v, list):
raise ValueError('Ludwig: Values in param2requests must be of type list.')
if len(v) == 0:
raise ValueError('Ludwig: Found empty list in param2requests.')
# complete partial request made by user
full_request = {k: [v] if k not in param2requests else param2requests[k]
for k, v in param2default.items()}
#
param2opts = tuple(full_request.items())
param_ids = _iter_over_cycles(param2opts)
seen = []
for ids in param_ids:
# map param names to integers corresponding to which param value to use
param2val = {k: v[i] for (k, v), i in zip(param2opts, ids)}
if param2val in seen:
raise ValueError('Found duplicate value in param2requests')
seen.append(param2val)
yield param2val
| 23,829
|
def logout():
"""
Logout current user.
"""
controller.clear_user()
typer.echo(f"Logged out. Bye!")
| 23,830
|
def expectation_values(times, states, operator):
"""expectation values of operator at times wrt states"""
def exp_value(state, operator, time):
if len(state.shape) == 2: #DensityMatrix
return np.trace(np.dot(state, operator(time)))
else: #StateVector
return np.vdot(state, np.dot(operator(time), state))
evs = np.ndarray(times.shape, dtype=complex)
for i in range(times.shape[0]):
evs[i] = exp_value(states[i], operator, times[i])
return evs
| 23,831
|
def compute_f_all(F1,fft_size,windowing,dtype_complex,F_frac=[],F_fs=[],F_refs=[],freq_channel=0,\
F_first_sample=[],F_rates=[],F_pcal_fix=[],F_side=[],F_ind=[],F_lti=[]):
"""
Compute FFTs for all stations (all-baselines-per-task mode), and correct for fractional sample correction (linear phase).
Parameters
----------
F1
list of stored samples (corresponding actually to F1_partial). Each element of the list is a numpy array
with the complex samples in the time domain, with a number of samples that is a multiply of the FFT length.
fft_size : int
number of coefficients in the FFT.
windowing : str
shape of the window before FFT, currently 'square' by default.
dtype_complex: type of data for initialization of the rotators.
F_frac
fractional and integer offsets applied at the mapper (acces via F_refs).
F_fs
sampling frequency for each stream in F1.
F_refs
indices to acces F_frac etc based on F_ind, i.e. from stored to new.
freq_channel
sky frequency.
F_first_sample
first sample number (actually last sample number plus one, it has to be corrected by subtracting the number of samples in F1.
F_rates
delay information for each of the streams (access via F_refs).
F_pcal_fix
offset for pcal accumulation results (due to the initial offset applied in the mapper). Later the pcal
signals will be realigned as if no delay was applied to them.
F_side
list of single side band side for each stream, 'l' LSB or 'u' USB (access via F_refs).
F_ind
list of station-polarization identifiers corresponding to the streams in F1 (this actually corresponds
to F1_ind_partial.
Returns
-------
F1_fft
list of array of arrays with FFTs with rotations applied.
None
[unused] previously outputing the conjugate of F1_fft, removed for efficiency.
F_adj_shift_partial_out
[unused] previously used to keep track of the number of samples to
add/drop due to fractional sample overflows, superseded for F_frac_over.
F_adj_shift_pcal_out
[unused] previously used to keep track of the number of samples to
roll the phase calibration results prior to FFT them, superseded for F_pcal_fix_out.
F_pcal_fix_out
list with number of samples to roll the pcal streams prior to FFT them.
F_first_sample_out
first sample for each stream (actually last sample number plus one).
Notes
-----
|
| **Procedure:**
|
| For each element in F1:
| 1. Create an array of arrays with the FFTs of the samples grouped into arrays of fft_size samples.
| 2. Create a frequency scale of fft_size (linear from 0 to (n-1)/n).
| 3a. If the computations have already been done for the same station, take the results.
| 3b. Otherwise:
| Compute delay for the first sample, then fractional part of this delay, then scale frequency scale, then exponential.
| Rotate the FFT using the previous rotator.
|
|
| **References:**
|
| [Th04] p363
|
|
| **TO DO:**
|
| Detail where in the FFT the fractional sample for the rotator is evaluated.
|
| Check correction to phase in p363.
"""
F_adj_shift_partial_out=[]
F_adj_shift_partial_mon=[]
F_adj_shift_pcal_out=[]
F_pcal_fix_out=[]
F_first_sample_out=[]
# TO DO: assuming all data same type for now
[sideband,data_type]=F_side[0]
# Windowing and FFT
first_iteration=1
last_fractional_recalc=0
last_str_st=""
F1_fft = window_and_fft(F1,fft_size,windowing,flatten_chunks=0,dtype_complex=dtype_complex) # ,rfft_data_type=data_type)
# If real samples take only half FFT (LSB or USB as applicable)
if data_type=='r':
if sideband=='L':
F1_fft = np.delete(F1_fft,np.s_[:fft_size//2],2)
else:
F1_fft = np.delete(F1_fft,np.s_[fft_size//2:],2)
shift_int=0
# Fractional sample correction
F1_fft_rot=np.zeros(F1_fft.shape,dtype=dtype_complex)
error_f_frac=1
if F_rates!=[]:
if data_type=='c':
freqscale2 = np.arange(0,1,1/float(fft_size))
fft_size_comp=fft_size
else:
if sideband=='L':
freqscale2 = float(-1)*np.arange(0.5,0,float(-1)/float(fft_size)) # First half of the full vector (e.g. [-0.5 -0.375 -0.25 -0.125] with fft_size=8)
else:
freqscale2 = np.arange(0,1,1/float(fft_size))[:fft_size//2] # Second half the full vector (e.g. [ 0. 0.125 0.25 0.375] with fft_size=8)
fft_size_comp=fft_size//2
# p363
for stpol in range(F1_fft.shape[0]):
fs=F_fs[F_refs[stpol]]
Ts=1/fs
[sideband,data_type]=F_side[F_refs[stpol]]
#str_st=F_ind[F_refs[stpol]].split('.')[0]
str_st=F_ind[stpol].split('.')[0]
#sample0=F_first_sample[F_refs[stpol]]-len(F1[F_refs[stpol]])
sample0=F_first_sample[F_refs[stpol]]
##adjustments (padding) from hstack_samples...
#sample0+=F_lti[stpol][3]
num_samples=len(F1[F_refs[stpol]])
F_first_sample_out.append(sample0+num_samples)
computed=0
error_f_frac=0
i_row=-1
if last_str_st!=str_st or first_iteration:
if SAVE_TIME_ROTATIONS:
first_iteration=0
first_sample=sample0
first_sample_s=first_sample*Ts
[delay_rate_0,delay_rate_1,delay_rate_2,delay_rate_ref,clock_rate_0,\
clock_rate_1,clock_abs_rate_0,clock_abs_rate_1,clock_rate_ref,\
model_only_delay,clock_only_delay,diff_frac]=F_rates[F_refs[stpol]]
diff_frac=0
[fractional_sample_correction,shift_delay]=F_frac[F_refs[stpol]]
shift_s=shift_delay*fs
frtot_v=[]
first_iteration_recalc=1
# TO DO: consider centering in the interval (N//2)
timescale=[0]
clock_diff = [clock_rate_0,clock_rate_1]
poly_diff = [delay_rate_0,delay_rate_1,delay_rate_2]
clock_abs = [clock_abs_rate_0,clock_abs_rate_1]
seconds_ref_clock = clock_rate_ref
#if USE_NE_F:
# npr1 = np.arange(F1_fft.shape[1])
# total_timescale = ne.evaluate("Ts*(sample0+fft_size*npr1)") # slower
#else:
total_timescale =Ts*(sample0+fft_size*np.arange(F1_fft.shape[1]))
total_seconds_offset=0
[r_recalc,m_unused,c_recalc,r_unused,a_unused] = get_delay_val(\
clock_diff=clock_diff,\
poly_diff=poly_diff,\
seconds_ref_clock=seconds_ref_clock,\
seconds_ref_poly=delay_rate_ref,\
seconds=total_timescale,\
seconds_offset=total_seconds_offset,\
v=DEBUG_LIB_DELAY,diff_pol=DIFF_POLY)
[full_fractional_recalc,fractional_recalc] = get_full_frac_val(r_recalc,fs)
for row in range(F1_fft.shape[1]):
i_row+=1
fsr=sample0+i_row*fft_size
lsr=fsr+fft_size
if DEBUG_DELAYS:
print_debug_r_delays_f(stpol,F_refs[stpol],F_ind[stpol],fsr,num_samples,len(timescale),\
total_timescale[row],0.0,\
total_seconds_offset,r_recalc[row],r_unused,a_unused,fractional_sample_correction,\
full_fractional_recalc[row],fractional_recalc[row],diff_frac)
computed=0
if last_fractional_recalc!=fractional_recalc[row] or first_iteration_recalc:
if SAVE_TIME_ROTATIONS:
first_iteration_recalc=0
computed=1
#print(str_st)
#print(freqscale2*(fractional_recalc[row]))
[fr6,nr]=get_exp(freqscale2*(fractional_recalc[row]))
if not(nr):
#frtot=get_rotator([fr6])
frtot=fr6
frtot_v.append([frtot])
else:
frtot_v.append([1.0])
else:
# Skipping, copy last value
if not(nr):
frtot_v.append([frtot])
else:
frtot_v.append([1.0])
last_fractional_recalc=fractional_recalc[row]
last_str_st=str_st
for row in range(F1_fft.shape[1]):
if not nr:
try:
np.multiply(F1_fft[stpol,row,:],frtot_v[row][0],F1_fft[stpol,row,:])
except IndexError:
print("Error in rotation: "+str(len(frtot_v))+", "+str(F1_fft.shape))
if DEBUG_DELAYS:
print("zR"+KEY_SEP+"f "+str(stpol).rjust(5)+str(F_refs[stpol]).rjust(8)+F_ind[stpol].rjust(8)+\
str(fsr).rjust(10)+str(num_samples).rjust(10) +\
" C,R >>>>".ljust(191)+str(computed).rjust(3)+str(int(not(nr))).rjust(3))
if error_f_frac==0:
if DEBUG_FRAC_OVER:
# TO DO: create functions in lib_debug(?)
print("zR"+KEY_SEP+"o".ljust(35)+str(stpol).rjust(5)+str(F_refs[stpol]).rjust(8)+F_ind[stpol].rjust(8)+\
str(fsr).rjust(10)+str(num_samples).rjust(10)+str(len(timescale)).rjust(10)+\
str(timescale[0]).rjust(16)+str(r_recalc[0]).rjust(20)+\
str(full_fractional_recalc[row]).rjust(20)+\
str(fractional_recalc[row]).rjust(20)+\
#str(frac_re).rjust(10)+\
#str(total_frac_delay_int).rjust(10)+\
"".rjust(10)+\
#str(clock_frac_delay_diff).rjust(20)+\
"".rjust(20)+\
#str(clock_frac_delay_int).rjust(10))
"".rjust(10))
else:
if DEBUG_FRAC_OVER:
print("zR"+KEY_SEP+"o "+"error")
# Correction for pcal
F_pcal_fix_out.append(shift_delay)
else:
F_first_sample_out=F_first_sample
#for stpol in range(F1_fft.shape[0]):
# for row in range(F1_fft.shape[1]):
# F1_fft_rot[stpol,row,:]=F1_fft[F_refs[stpol],row,:]
print("zR\tWarning: no rotation: first sample "+str(F_first_sample))
if (len(F_pcal_fix)>=len(F_pcal_fix_out))or(error_f_frac==1):
F_pcal_fix_out=F_pcal_fix #[:]
#F2_fft_rot = np.conj(F1_fft_rot)
if DEBUG_DELAYS or DEBUG_LIB_DELAY: #or DEBUG_FRAC_OVER :
print("zR"+KEY_SEP+"oj".ljust(20)+str(len(F_adj_shift_partial_out))+" "+\
','.join(map(str,F_adj_shift_partial_out))+" "+\
" mon "+','.join(map(str,F_adj_shift_partial_mon)))
print("zR"+KEY_SEP+"---------------")
return([F1_fft,None,F_adj_shift_partial_out,F_adj_shift_pcal_out,F_pcal_fix_out,F_first_sample_out])
| 23,832
|
def bookmark(request):
"""
Add or remove a bookmark based on POST data.
"""
if request.method == 'POST':
# getting handler
model_name = request.POST.get('model', u'')
model = django_apps.get_model(*model_name.split('.'))
if model is None:
# invalid model -> bad request
return http.HttpResponseBadRequest(ERRORS['model'])
handler = handlers.library.get_handler(model)
if handler is None:
# bad or unregistered model -> bad request
return http.HttpResponseBadRequest(ERRORS['handler'])
# getting form
form = handler.get_form(request, data=request.POST)
if form.is_valid():
instance = form.instance()
bookmark_model = handler.backend.get_model()
# validating the bookmark key
key = handler.get_key(request, instance, form.cleaned_data['key'])
if not handler.allow_key(request, instance, key):
return http.HttpResponseBadRequest(ERRORS['key'])
# pre-save signal: receivers can stop the bookmark process
# note: one receiver is always called: *handler.pre_save*
# handler can disallow the vote
responses = signals.bookmark_pre_save.send(sender=bookmark_model,
form=form, request=request)
# if one of the receivers returns False then bookmark process
# must be killed
for receiver, response in responses:
if response is False:
return http.HttpResponseBadRequest(
u'Receiver %r killed the bookmark process' %
receiver.__name__)
# adding or removing the bookmark
bookmark = handler.save(request, form)
created = bool(bookmark.pk)
# post-save signal
# note: one receiver is always called: *handler.post_save*
signals.bookmark_post_save.send(sender=bookmark_model,
bookmark=bookmark, request=request, created=created)
# process completed successfully: redirect
return handler.response(request, bookmark, created)
# form is not valid: must handle errors
return handler.fail(request, form.errors)
# only answer POST requests
return http.HttpResponseForbidden('Forbidden.')
| 23,833
|
def visualize_map_features_row_separate(args, seq, seq_agents_df, map_feature_row):
"""Visualize a row of map features and the scene."""
print("Visualizing sequence {}, agent {}, with {} candidates.".format(map_feature_row["SEQUENCE"], map_feature_row["TRACK_ID"], len(map_feature_row["CANDIDATE_CENTERLINES"])))
candidate_centerlines = map_feature_row["CANDIDATE_CENTERLINES"]
plt.figure(figsize=(8, 7))
agent_xy = plot_scene(args, seq, seq_agents_df, map_feature_row)
plt.xlabel("Map X")
plt.ylabel("Map Y")
plt.axis("off")
plt.title(f"Num candidates = {len(candidate_centerlines)}, Track Len = {len(agent_xy)}")
plt.savefig(f"{args.feature_dir}/{seq}_{map_feature_row['TRACK_ID']}.png")
| 23,834
|
def start_nodenetrunner(nodenet_uid):
"""Starts a thread that regularly advances the given nodenet by one step."""
nodenets[nodenet_uid].is_active = True
if runner['runner'].paused:
runner['runner'].resume()
return True
| 23,835
|
def transform_coordinates_3d(coordinates, RT):
"""
Input:
coordinates: [3, N]
RT: [4, 4]
Return
new_coordinates: [3, N]
"""
if coordinates.shape[0] != 3 and coordinates.shape[1]==3:
coordinates = coordinates.transpose()
coordinates = np.vstack([coordinates, np.ones((1, coordinates.shape[1]), dtype=np.float32)])
new_coordinates = RT @ coordinates
new_coordinates = new_coordinates[:3, :]/new_coordinates[3, :]
return new_coordinates
| 23,836
|
def upload_to_s3(
file: Path, bucket: str = f"arr-packit-{getenv('DEPLOYMENT', 'dev')}"
) -> None:
"""Upload a file to an S3 bucket.
Args:
file: File to upload.
bucket: Bucket to upload to.
"""
s3_client = boto3_client("s3")
try:
logger.info(f"Uploading {file} to S3 ({bucket})")
s3_client.upload_file(str(file), bucket, file.name)
except ClientError as e:
logger.error(e)
raise
| 23,837
|
def _get_misclass_auroc(preds, targets, criterion, topk=1, expected_data_uncertainty_array=None):
"""
Get AUROC for Misclassification detection
:param preds: Prediction probabilities as numpy array
:param targets: Targets as numpy array
:param criterion: Criterion to use for scoring on misclassification detection.
:param topk: Top-kl class probabilities to consider while making predictions.
:param expected_data_uncertainty_array: Expected data uncertainty as numpy array
:return: AUROC on misclassification detection
"""
misclassification_targets = (1 - _misclass_tgt(preds, targets, (topk,))).astype(bool)
if criterion == 'entropy':
criterion_values = np.sum(-preds * np.log(preds), axis=1)
elif criterion == 'confidence':
criterion_values = -preds.max(axis=1)
elif criterion == 'model_uncertainty':
criterion_values = np.sum(-preds * np.log(preds), axis=1) - expected_data_uncertainty_array
else:
raise NotImplementedError
return auroc(misclassification_targets, criterion_values)
| 23,838
|
def test_preprocess_data_weighted():
"""Copied from sklearn/linear_model/tests/test_base.py, with small
modifications.
"""
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
nnlr = NonNegativeLinearRegression()
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
expected_X_norm = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_norm = \
nnlr._preprocess_data(X, y, fit_intercept=True, normalize=False,
sample_weight=sample_weight)
np.testing.assert_allclose(X_mean, expected_X_mean)
np.testing.assert_allclose(y_mean, expected_y_mean)
np.testing.assert_allclose(X_norm, np.ones(n_features))
np.testing.assert_allclose(Xt, X - expected_X_mean)
np.testing.assert_allclose(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
nnlr._preprocess_data(X, y, fit_intercept=True, normalize=True,
sample_weight=sample_weight)
np.testing.assert_allclose(X_mean, expected_X_mean)
np.testing.assert_allclose(y_mean, expected_y_mean)
np.testing.assert_allclose(X_norm, expected_X_norm)
np.testing.assert_allclose(Xt, (X - expected_X_mean) / expected_X_norm)
np.testing.assert_allclose(yt, y - expected_y_mean)
| 23,839
|
def test_grpc_list_refresh_tokens():
""" gRPC: Test listing refresh tokens """
dex = make_client()
refresh_tokens = dex.list_refresh(*test_http_args)
| 23,840
|
def flatten(l: Iterable) -> List:
"""Return a list of all non-list items in l
:param l: list to be flattened
:return:
"""
rval = []
for e in l:
if not isinstance(e, str) and isinstance(e, Iterable):
if len(list(e)):
rval += flatten(e)
else:
rval.append(e)
return rval
| 23,841
|
def create_note(dataset_id, fhir_store_id, note_id): # noqa: E501
"""Create a note
Create a note # noqa: E501
:param dataset_id: The ID of the dataset
:type dataset_id: str
:param fhir_store_id: The ID of the FHIR store
:type fhir_store_id: str
:param note_id: The ID of the note that is being created
:type note_id: str
:rtype: NoteCreateResponse
"""
res = None
status = None
try:
store_name = None
try:
store_name = "datasets/%s/fhirStores/%s" % \
(dataset_id, fhir_store_id)
DbFhirStore.objects.get(name=store_name)
except DoesNotExist:
status = 400
res = Error("The specified FHIR store was not found", status)
return res, status
try:
note_create_request = NoteCreateRequest.from_dict(
connexion.request.get_json())
try:
DbPatient.objects.get(
fhirStoreName=store_name,
identifier=note_create_request.patient_id)
except DoesNotExist:
status = 400
res = Error("The specified patient was not found", status)
return res, status
resource_name = "%s/fhir/Note/%s" % (store_name, note_id)
DbNote(
identifier=note_id,
resourceName=resource_name,
fhirStoreName=store_name,
text=note_create_request.text,
type=note_create_request.type,
patientId=note_create_request.patient_id
).save()
note_resource_name = "%s/fhir/Note/%s" % (store_name, note_id)
res = NoteCreateResponse(name=note_resource_name)
status = 201
except NotUniqueError as error:
status = 409
res = Error("Conflict", status, str(error))
except Exception as error:
status = 500
res = Error("Internal error", status, str(error))
return res, status
| 23,842
|
def load_credential_from_args(args):
"""load credential from command
Args:
args(str): str join `,`
Returns:
list of credential content
"""
if ',' not in args:
raise
file_path_list = args.split(',')
if len(file_path_list) != 2:
raise
if not file_path_list[0].endswith('.key'):
file_path_list[0], file_path_list[1] = file_path_list[1], file_path_list[0]
return [load_credential_from_file(file_path) for file_path in file_path_list]
| 23,843
|
def encode(string: str, key: str) -> str:
"""
Encode string using the Caesar cipher with the given key
:param string: string to be encoded
:param key: letter to be used as given shift
:return: encoded string
:raises: ValueError if key len is invalid
"""
if len(key) > 1:
raise ValueError("[ERROR] Length of a key may not exceed 1 for Caesar cipher")
return vigener.encode(string, key)
| 23,844
|
def compile_binary(binary, compiler, override_operator=None, **kw):
"""
If there are more than 10 elements in the `IN` set, inline them to avoid hitting the limit of \
the number of query arguments in Postgres (1<<15).
""" # noqa: D200
operator = override_operator or binary.operator
if operator is not in_op and operator is not notin_op:
return compiler.visit_binary(binary, override_operator=override_operator, **kw)
if isinstance(binary.right, BindParameter):
right_len = len(binary.right.value)
else:
right_len = 0
if right_len >= 10:
left = compiler.process(binary.left, **kw)
kw["literal_binds"] = True
use_any = getattr(binary, "any_values", False) and compiler.dialect.name == "postgresql"
negate = use_any and operator is notin_op
if use_any:
# ANY(VALUES ...) seems to be performing the best among these three:
# 1. IN (...)
# 2. IN(ARRAY[...])
# 3. IN(VALUES ...)
right = any_(Grouping(Values(
binary.left, literal_binds=True,
).data(TupleWrapper(binary.right.value))))
operator = operators.eq
else:
right = binary.right
right = compiler.process(right, **kw)
sql = left + OPERATORS[operator] + right
if negate:
sql = "NOT (%s)" % sql
return sql
elif operator is in_op and right_len == 1:
# IN (<value>) -> = <value>
return compiler.process(binary.left == binary.right.value[0], **kw)
return compiler.visit_binary(binary, override_operator=override_operator, **kw)
| 23,845
|
def flat_map(
fn: Callable[[_T], Iterable[_S]], collection: Iterable[_T]
) -> Iterator[_S]:
"""Map a function over a collection and flatten the result by one-level"""
return itertools.chain.from_iterable(map(fn, collection))
| 23,846
|
def add_origin(folder: PurePath, remote_path: str):
"""Add a remote pointing to GitHub repository.
:param folder: project repository.
:param remote_path: remote repository path."""
remote_repo = f"https://github.com/{remote_path}"
try:
helpers.execute_subprocess(
["git", "remote", "add", "origin", f"{remote_repo}.git"],
folder,
)
except (OSError, CalledProcessError) as e:
error = f"Subprocess error: {e}"
logging.critical(error)
print(error)
print("\nCancelling...")
exit()
else:
logging.info("Remote repository added.")
| 23,847
|
def create_vector_clock(node_id, timeout):
"""This method builds the initial vector clock for a new key.
Parameters
----------
node_id : int
the id of one node in the cluster
timeout : int
the expire timeout of the key
Returns
-------
dict
the vector clock as dictonary
"""
if node_id is not None and timeout is not None:
return {
"versions": [{"nodeId": node_id, "version": 1}],
"timestamp": timeout
}
else:
raise ValueError("You must gave the node id and the timeout.")
| 23,848
|
def ParseStateFoldersFromFiles(state_files):
"""Returns list of StateFolder objects parsed from state_files.
Args:
state_files: list of absolute paths to state files.
"""
def CreateStateFolder(folderpath, parent_namespace):
del parent_namespace # Unused by StateFolder.
return state_lib.StateFolder(folderpath)
return _ParseFoldersFromFiles(state_files, base_lib.ComponentType.MULTI_STATE,
CreateStateFolder)
| 23,849
|
def broadcast(bot, event, *args):
"""broadcast a message to chats, use with care"""
if args:
subcmd = args[0]
parameters = args[1:]
if subcmd == "info":
"""display broadcast data such as message and target rooms"""
conv_info = [ "<b><pre>{}</pre></b> ... <pre>{}</pre>".format(bot.conversations.get_name(convid), convid)
for convid in _internal["broadcast"]["conversations"] ]
if not _internal["broadcast"]["message"]:
yield from bot.coro_send_message(event.conv, _("broadcast: no message set"))
return
if not conv_info:
yield from bot.coro_send_message(event.conv, _("broadcast: no conversations available"))
return
yield from bot.coro_send_message(event.conv, _(
"<b>message:</b><br />"
"{}<br />"
"<b>to:</b><br />"
"{}".format(_internal["broadcast"]["message"],
"<br />".join(conv_info))))
elif subcmd == "message":
"""set broadcast message"""
message = ' '.join(parameters)
if message:
if message.lower().strip().startswith(tuple([_.lower() for _ in bot._handlers.bot_command])):
yield from bot.coro_send_message(event.conv, _("broadcast: message not allowed"))
return
_internal["broadcast"]["message"] = message
else:
yield from bot.coro_send_message(event.conv, _("broadcast: message must be supplied after subcommand"))
elif subcmd == "add":
"""add conversations to a broadcast"""
if parameters[0] == "groups":
"""add all groups (chats with users > 1, bot not counted)"""
for convid, convdata in bot.conversations.get().items():
if(len(convdata["participants"]) > 1):
_internal["broadcast"]["conversations"].append(convid)
elif parameters[0] == "ALL":
"""add EVERYTHING - try not to use this, will message 1-to-1s as well"""
for convid, convdata in bot.conversations.get().items():
_internal["broadcast"]["conversations"].append(convid)
else:
"""add by wild card search of title or id"""
search = " ".join(parameters)
for convid, convdata in bot.conversations.get().items():
if search.lower() in convdata["title"].lower() or search in convid:
_internal["broadcast"]["conversations"].append(convid)
_internal["broadcast"]["conversations"] = list(set(_internal["broadcast"]["conversations"]))
yield from bot.coro_send_message(event.conv, _("broadcast: {} conversation(s)".format(len(_internal["broadcast"]["conversations"]))))
elif subcmd == "remove":
if parameters[0].lower() == "all":
"""remove all conversations from broadcast"""
_internal["broadcast"]["conversations"] = []
else:
"""remove by wild card search of title or id"""
search = " ".join(parameters)
removed = []
for convid in _internal["broadcast"]["conversations"]:
if search.lower() in bot.conversations.get_name(convid).lower() or search in convid:
_internal["broadcast"]["conversations"].remove(convid)
removed.append("<b><pre>{}</pre></b> (<pre>{}</pre>)".format(bot.conversations.get_name(convid), convid))
if removed:
yield from bot.coro_send_message(event.conv, _("broadcast: removed {}".format(", ".join(removed))))
elif subcmd == "NOW":
"""send the broadcast - no turning back!"""
context = { "explicit_relay": True } # prevent echos across syncrooms
for convid in _internal["broadcast"]["conversations"]:
yield from bot.coro_send_message(convid, _internal["broadcast"]["message"], context=context)
yield from bot.coro_send_message(event.conv, _("broadcast: message sent to {} chats".format(len(_internal["broadcast"]["conversations"]))))
else:
yield from bot.coro_send_message(event.conv, _("broadcast: /bot broadcast [info|message|add|remove|NOW] ..."))
else:
yield from bot.coro_send_message(event.conv, _("broadcast: /bot broadcast [info|message|add|remove|NOW]"))
| 23,850
|
def spectral_norm(inputs, epsilon=1e-12, singular_value="left"):
"""Performs Spectral Normalization on a weight tensor.
Details of why this is helpful for GAN's can be found in "Spectral
Normalization for Generative Adversarial Networks", Miyato T. et al., 2018.
[https://arxiv.org/abs/1802.05957].
Args:
inputs: The weight tensor to normalize.
epsilon: Epsilon for L2 normalization.
singular_value: Which first singular value to store (left or right). Use
"auto" to automatically choose the one that has fewer dimensions.
Returns:
The normalized weight tensor.
"""
if len(inputs.shape) < 2:
raise ValueError(
"Spectral norm can only be applied to multi-dimensional tensors")
# The paper says to flatten convnet kernel weights from (C_out, C_in, KH, KW)
# to (C_out, C_in * KH * KW). Our Conv2D kernel shape is (KH, KW, C_in, C_out)
# so it should be reshaped to (KH * KW * C_in, C_out), and similarly for other
# layers that put output channels as last dimension. This implies that w
# here is equivalent to w.T in the paper.
w = tf.reshape(inputs, (-1, inputs.shape[-1]))
# Choose whether to persist the first left or first right singular vector.
# As the underlying matrix is PSD, this should be equivalent, but in practice
# the shape of the persisted vector is different. Here one can choose whether
# to maintain the left or right one, or pick the one which has the smaller
# dimension. We use the same variable for the singular vector if we switch
# from normal weights to EMA weights.
var_name = inputs.name.replace("/ExponentialMovingAverage", "").split("/")[-1]
var_name = var_name.split(":")[0] + "/u_var"
if singular_value == "auto":
singular_value = "left" if w.shape[0] <= w.shape[1] else "right"
u_shape = (w.shape[0], 1) if singular_value == "left" else (1, w.shape[-1])
u_var = tf.get_variable(
var_name,
shape=u_shape,
dtype=w.dtype,
initializer=tf.random_normal_initializer(),
trainable=False)
u = u_var
# Use power iteration method to approximate the spectral norm.
# The authors suggest that one round of power iteration was sufficient in the
# actual experiment to achieve satisfactory performance.
power_iteration_rounds = 1
for _ in range(power_iteration_rounds):
if singular_value == "left":
# `v` approximates the first right singular vector of matrix `w`.
v = tf.math.l2_normalize(
tf.matmul(tf.transpose(w), u), axis=None, epsilon=epsilon)
u = tf.math.l2_normalize(tf.matmul(w, v), axis=None, epsilon=epsilon)
else:
v = tf.math.l2_normalize(tf.matmul(u, w, transpose_b=True),
epsilon=epsilon)
u = tf.math.l2_normalize(tf.matmul(v, w), epsilon=epsilon)
# Update the approximation.
with tf.control_dependencies([tf.assign(u_var, u, name="update_u")]):
u = tf.identity(u)
# The authors of SN-GAN chose to stop gradient propagating through u and v
# and we maintain that option.
u = tf.stop_gradient(u)
v = tf.stop_gradient(v)
if singular_value == "left":
norm_value = tf.matmul(tf.matmul(tf.transpose(u), w), v)
else:
norm_value = tf.matmul(tf.matmul(v, w), u, transpose_b=True)
norm_value.shape.assert_is_fully_defined()
norm_value.shape.assert_is_compatible_with([1, 1])
w_normalized = w / norm_value
# Deflate normalized weights to match the unnormalized tensor.
w_tensor_normalized = tf.reshape(w_normalized, inputs.shape)
return w_tensor_normalized
| 23,851
|
def rs_for_staff(user_id):
"""Returns simple JSON for research studies in staff user's domain
---
tags:
- User
- ResearchStudy
operationId: research_studies_for_staff
parameters:
- name: user_id
in: path
description: TrueNTH user ID, typically subject or staff
required: true
type: integer
format: int64
produces:
- application/json
responses:
200:
description:
Returns the list of research_studies the requested staff user is
associated with.
schema:
id: nested_research_studies
properties:
research_study:
type: array
items:
type: object
required:
- title
properties:
title:
type: string
description: Research study title
resourceType:
type: string
description: FHIR like resourceType, "ResearchStudy"
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view requested user_id
security:
- ServiceToken: []
"""
user = get_user(
user_id, 'view', allow_on_url_authenticated_encounters=True)
if user.has_role(ROLE.PATIENT.value):
abort(
400,
"wrong request path for patient,"
" see /api/patient/<int:user_id>/research_study")
# Assume some staff like role - find all research studies
# in the org tree at, above or below all of the user's orgs
orgs = set()
ot = OrgTree()
for org in user.organizations:
try:
orgs.update(ot.at_and_above_ids(org.id))
except ValueError as ve:
raise ValueError(f"Failed at_and_above lookup on {org.id}")
orgs.update(ot.here_and_below_id(org.id))
studies = OrganizationResearchProtocol.query.filter(
OrganizationResearchProtocol.organization_id.in_(
tuple(orgs))).join(
ResearchProtocol,
OrganizationResearchProtocol.research_protocol_id ==
ResearchProtocol.id).with_entities(
ResearchProtocol.research_study_id).order_by(
ResearchProtocol.research_study_id).distinct()
results = [
ResearchStudy.query.get(s.research_study_id).as_fhir()
for s in studies]
return jsonify(research_study=results)
| 23,852
|
def from_hetionet_json(
hetionet_dict: Mapping[str, Any],
use_tqdm: bool = True,
) -> BELGraph:
"""Convert a Hetionet dictionary to a BEL graph."""
graph = BELGraph( # FIXME what metadata is appropriate?
name='Hetionet',
version='1.0',
authors='Daniel Himmelstein',
)
# FIXME add namespaces
# graph.namespace_pattern.update({})
kind_identifier_to_name = {
(x['kind'], x['identifier']): x['name']
for x in hetionet_dict['nodes']
}
edges = hetionet_dict['edges']
if use_tqdm:
edges = tqdm(edges, desc='Converting Hetionet', unit_scale=True)
it_logger = edges.write
else:
it_logger = logger.info
for edge in edges:
_add_edge(graph, edge, kind_identifier_to_name, it_logger)
return graph
| 23,853
|
def validate_memory(_ctx, _param, value):
"""Validate memory string."""
if value is None:
return None
if not re.search(r'\d+[KkMmGg]$', value):
raise click.BadParameter('Memory format: nnn[K|M|G].')
return value
| 23,854
|
def load_db_dump(dump_file):
"""Load db dump on a remote environment."""
require('environment')
temp_file = os.path.join(env.home, '%(project)s-%(environment)s.sql' % env)
put(dump_file, temp_file, use_sudo=True)
sudo('psql -d %s -f %s' % (env.db, temp_file), user=env.project_user)
| 23,855
|
def BlockAvg3D( data , blocksize , mask ):
"""
3-D version of block averaging. Mainly applicable to making superpixel averages of datfile traces.
Not sure non-averaging calcs makes sense?
mask is a currently built for a 2d boolean array of same size as (data[0], data[1]) where pixels to be averaged are True.
"""
rows = data.shape[0]
cols = data.shape[1]
frames = data.shape[2]
if np.mod(rows,blocksize[0]) == 0 and np.mod(cols,blocksize[1]) == 0:
blockR = rows / blocksize[0]
blockC = cols / blocksize[1]
else:
print( 'Error, blocksize not evenly divisible into data size.')
return None
output = np.zeros((blockR,blockC,frames))
# Previous algorithm was slow and used annoying looping
# Improved algorithm that doeesn't need any looping. takes about 1.4 seconds instead of 60.
msk = np.array( mask , float )
msk.resize(rows, cols , 1 )
masked = np.array( data , float ) * np.tile( msk , ( 1 , 1 , frames ) )
step1 = masked.reshape(rows , blockC , -1 , frames).sum(2)
step2 = np.transpose(step1 , (1,0,2)).reshape(blockC , blockR , -1 , frames).sum(2)
step3 = np.transpose(step2 , (1,0,2))
mask1 = mask.reshape(rows , blockC , -1 ).sum(2)
count = mask1.transpose().reshape(blockC , blockR , -1).sum(2).transpose()
#mask1 = mask.reshape(rows , blockC , -1 , frames).sum(2)
#count = mask1.transpose().reshape(blockC , blockR , -1 , frames).sum(2).transpose()
output = step3 / count[:,:,np.newaxis]
output[ np.isnan(output) ] = 0
output[ np.isinf(output) ] = 0
return output
| 23,856
|
def plot(b_csvs, g_csvs, g_x, g_y, b_x, b_y, trials, seeds, plt_file, env_id,
x_label, y_label):
"""
Plot benchmark from csv files of garage and baselines.
:param b_csvs: A list contains all csv files in the task.
:param g_csvs: A list contains all csv files in the task.
:param g_x: X column names of garage csv.
:param g_y: Y column names of garage csv.
:param b_x: X column names of baselines csv.
:param b_y: Y column names of baselines csv.
:param trials: Number of trials in the task.
:param seeds: A list contains all the seeds in the task.
:param plt_file: Path of the plot png file.
:param env_id: String contains the id of the environment.
:return:
"""
assert len(b_csvs) == len(g_csvs)
for trial in range(trials):
seed = seeds[trial]
df_g = pd.read_csv(g_csvs[trial])
df_b = pd.read_csv(b_csvs[trial])
plt.plot(
df_g[g_x],
df_g[g_y],
label="garage_trial%d_seed%d" % (trial + 1, seed))
plt.plot(
df_b[b_x],
df_b[b_y],
label="baselines_trial%d_seed%d" % (trial + 1, seed))
plt.legend()
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(env_id)
plt.savefig(plt_file)
plt.close()
| 23,857
|
def build(buildconfig: BuildConfig, merge_train_and_test_data: bool = False):
"""Build regressor or classifier model and return it."""
estimator = buildconfig.algorithm.estimator()
if merge_train_and_test_data:
train_smiles, train_y = buildconfig.data.get_merged_sets()
else:
train_smiles, train_y, _, _ = buildconfig.data.get_sets()
train_X = descriptor_from_config(train_smiles, buildconfig.descriptor)
estimator.fit(train_X, train_y)
if merge_train_and_test_data:
train_scores = get_merged_train_score(estimator, buildconfig)
test_scores = None
else:
train_scores, test_scores = get_train_test_scores(estimator, buildconfig)
return estimator, train_scores, test_scores
| 23,858
|
def GetXML(filename, output=OUTPUT_ELEMENT, **params):
"""
Get the XML representation of a file, as produced by the Databrowse library
Arguments:
filename - Relative or absolute path to file of interest
output - Determines the type of output to be returned from the function
dbl.OUTPUT_ELEMENT returns an LXML etree.Element
dbl.OUTPUT_ETREE returns an LXML etree.ElementTree
dbl.OUTPUT_STRING returns a string containing the XML
dbl.OUTPUT_STDOUT prints the XML string and returns nothing
**params - A variable number of optional parameters that are treated the
same way as query string values that would be POST or GET to
the web server when Databrowse is being used from the web.
Usage:
>>> from databrowse.lib import db_lib as dbl
>>> dbl.GetXML('/tmp/emptyfile', output=dbl.OUTPUT_STDOUT)
<default:default>
<filename>emptyfile</filename>
<path>/tmp</path>
<size>0.0 byte</size>
<mtime>Tue Sep 3 10:12:40 2013</mtime>
<ctime>Tue Sep 3 10:12:40 2013</ctime>
<atime>Tue Sep 3 10:12:42 2013</atime>
<contenttype>text/plain</contenttype>
<permissions>-rw-rw-r--</permissions>
<owner>user:user</owner>
</default:default>
See also: DebugGetXML()
"""
import databrowse.support.dummy_web_support as db_web_support_module
# Set up web_support class with environment information
db_web_support = db_web_support_module.web_support(filename, params)
# Determine Requested File/Folder Absolute Path and Path Relative to Dataroot
if "path" not in db_web_support.req.form:
fullpath = db_web_support.dataroot
relpath = '/'
pass
else:
fullpath = os.path.abspath(db_web_support.req.form["path"].value)
if not fullpath.startswith(db_web_support.dataroot):
return db_web_support.req.return_error(403)
if os.access(fullpath, os.R_OK) and os.path.exists(fullpath):
if fullpath == db_web_support.dataroot:
relpath = '/'
pass
else:
relpath = fullpath.replace(db_web_support.dataroot, '')
pass
pass
elif not os.path.exists(fullpath):
return db_web_support.req.return_error(404)
else:
return db_web_support.req.return_error(401)
pass
# Import Plugin Directory
#if db_web_support.pluginpath not in sys.path: # Removed 8/5/13 - Transition to Installed Modules
# sys.path.append(db_web_support.pluginpath)
# Determine handler for requested path
#import handler_support as handler_support_module
import databrowse.support.handler_support as handler_support_module
handler_support = handler_support_module.handler_support(db_web_support.icondbpath, db_web_support.hiddenfiledbpath, db_web_support.directorypluginpath)
handlers = handler_support.GetHandler(fullpath)
handler = handlers[-1]
# Let's see if we want to override the default handler
if "handler" in db_web_support.req.form:
handler = db_web_support.req.form['handler'].value
pass
# Get A Handle to The Rendering Plugin
caller = "databrowse"
exec("import databrowse.plugins.%s.%s as %s_module" % (handler, handler, handler))
exec("renderer = %s_module.%s(relpath, fullpath, db_web_support, handler_support, caller, handlers%s%s%s)" % (handler, handler,\
', content_mode="' + db_web_support.req.form["content_mode"].value + '"' if "content_mode" in db_web_support.req.form else '',\
', style_mode="' + db_web_support.req.form['style_mode'].value + '"' if "style_mode" in db_web_support.req.form else '',\
', recursion_depth=' + db_web_support.req.form['recursion_depth'].value + '' if "recursion_depth" in db_web_support.req.form else ''))
# Register Primary Namespace
#etree.register_namespace('db', 'http://thermal.cnde.iastate.edu/databrowse')
if output == OUTPUT_ETREE:
return etree.ElementTree(renderer.getContent())
elif output == OUTPUT_STRING:
xmltree = etree.ElementTree(renderer.getContent())
return etree.tostring(xmltree)
elif output == OUTPUT_ELEMENT:
return renderer.getContent()
elif output == OUTPUT_STDOUT:
xmltree = etree.ElementTree(renderer.getContent())
print(etree.tostring(xmltree, pretty_print=True))
else:
return etree.ElementTree(renderer.getContent())
| 23,859
|
def stellar_radius(M, logg):
"""Calculate stellar radius given mass and logg"""
if not isinstance(M, (int, float)):
raise TypeError('Mass must be int or float. {} type given'.format(type(M)))
if not isinstance(logg, (int, float)):
raise TypeError('logg must be int or float. {} type given'.format(type(logg)))
if M < 0:
raise ValueError('Only positive stellar masses allowed.')
M = float(M)
return M/(10**(logg-4.44))
| 23,860
|
def clean_dir(directory):
"""
Creates (or empties) a directory
:param directory: Directory to create
:return: None
"""
if os.path.exists(directory):
import shutil
logging.warning('<<< Deleting directory: %s >>>' % directory)
shutil.rmtree(directory)
os.makedirs(directory)
else:
os.makedirs(directory)
| 23,861
|
def set_plus_row(sets, row):
"""Update each set in list with values in row."""
for i in range(len(sets)):
sets[i].add(row[i])
return sets
| 23,862
|
def bytes_to_string(
bytes_to_convert: List[int], strip_null: bool = False
) -> Union[str, None]:
"""
Litteral bytes to string
:param bytes_to_convert: list of bytes in integer format
:return: resulting string
"""
try:
value = "".join(chr(i) for i in bytes_to_convert)
if strip_null:
return value.strip("\x00")
return value
# AttributeError when None object has no strip attribute
except (ValueError, TypeError, AttributeError):
return None
| 23,863
|
def completion(shell):
"""
Output odahuflowctl completion code to stdout.\n
\b
Load the zsh completion in the current shell:
source <(odahuflowctl completion zsh)
\b
Load the powershell completion in the current shell:
odahuflowctl completion > $HOME\.odahuflow\odahu_completion.ps1;
. $HOME\.odahuflow\odahu_completion.ps1;
Remove-Item $HOME\.odahuflow\odahu_completion.ps1
"""
shell = shell or click_completion.lib.get_auto_shell()
if shell in click_completion.core.shells:
click.echo(click_completion.core.get_code(shell))
else:
raise click.ClickException(f'"{shell}" shell is not supported.')
| 23,864
|
def test_sad_bad_project_prevents_seeing_custom_field():
"""
raises an error on attempt to identify that a known custom field
exists with an invalid project identifier
"""
jp = JiraProxy(GOOD_VANILLA_SERVER_CONFIG)
cfs = jp.getCustomFields(PROJECT_KEY_1, 'Bug')
assert "RallyItem" in cfs
assert jp.fieldExists(PROJECT_KEY_1, "Bug", "RallyID")
with py.test.raises(JiraProxyError) as excinfo:
trvth = jp.isCustomField(BAD_PROJECT_KEY_1, "Bug", "RallyURL")
actualErrorMessage = excErrorMessage(excinfo)
assert 'Could not find project for identifier:' in actualErrorMessage
| 23,865
|
def interpolate(t,y,num_obs=50):
"""
Interpolates each trajectory such that observation times coincide for each one.
Note: initially cubic interpolation gave great power, but this happens as an artifact of the interpolation,
as both trajectories have the same number of observations. Type I error was increased as a result. To avoid
this we settled for a linear interpolation between observations.
Splines were also tried but gave very bad interpolations.
"""
t = np.array([np.sort(row) for row in t])
t = np.insert(t, 0, 0, axis=1)
t = np.insert(t, len(t[0]), 1, axis=1)
y = np.insert(y, 0, y[:,0], axis=1)
y = np.insert(y, len(y[0]), y[:,-1], axis=1)
new_t = np.zeros(num_obs)
new_y = np.zeros(num_obs)
for i in range(len(t)):
f = interp1d(t[i], y[i], kind='linear')
#f = splrep(t[i], y[i])
t_temp = np.random.uniform(low=0.0, high=1.0, size=num_obs)#np.linspace(0.1,0.9,num_obs)
y_temp = f(t_temp)
#y_temp = splev(t_temp, f, der=0)
new_y = np.vstack((new_y, y_temp))
new_t = np.vstack((new_t, t_temp))
return new_t[1:], new_y[1:]
| 23,866
|
def uniqids(seqdata, trimdefline=True, checkrevcom=True, fastq=False,
paired=False):
"""
Given a file of Fasta sequences `seqdata`, determine unique sequences and
provide their IDs. Generator function yields lists of sequence IDs.
"""
seqids = group_seqids_by_sha1(
seqdata,
trimdefline=trimdefline,
checkrevcom=checkrevcom,
fastq=fastq,
paired=paired)
for seqsha in seqids:
idlist = seqids[seqsha]
if len(idlist) > 1:
yield idlist
| 23,867
|
def tc_request_send(telecommand_number, telecommand_data, telecommand_data_type, is_continuous):
""" Check data type of Telecommand Request before formatting and send over COM Port. """
try:
type_error = ""
telecommand_number = int(telecommand_number)
except ValueError as err:
print(str(repr(err)))
print("ERROR: Telecommand Request Channel is invalid")
callback_exception_handler("Telecommand Request Channel is invalid")
try:
if telecommand_data_type == "String":
# Prepend whitespace until string is 8 chars
while len(telecommand_data) < 8:
telecommand_data = " " + telecommand_data
# Format the data as an 8 byte string
data = data_format([telecommand_number, *bytes(telecommand_data, "ascii")],
telecommand_request_builder_string)
except struct.error as err:
print(repr(err))
print("ERROR: Telecommand Data Value is not String")
type_error = "Telecommand Data Value is not String"
try:
if telecommand_data_type == "Integer":
# Format the data as a 64 bit signed integer
data = data_format([telecommand_number, int(telecommand_data)],
telecommand_request_builder_integer)
except ValueError as err:
# Handle exception if data is not an integer
print(repr(err))
print("ERROR: Telecommand Data Value is not Integer")
type_error = "Telecommand Data Value is not Integer"
try:
if telecommand_data_type == "Floating Point":
# Format the data as a double
data = data_format([telecommand_number, float(telecommand_data)],
telecommand_request_builder_float)
except ValueError as err:
# Handle exception if data is not a float
print(repr(err))
print("ERROR: Telecommand Data Value is not Floating Point Number")
type_error = "Telecommand Data Value is not Floating Point Number"
try:
# Add telecommand message to database to enable matching with response
telecommand_database.append(Telecommand(telecommand_number, config.TIMEOUT))
# Format the telecommand as a frame and send
packetize(data, DataType.TELECOMMAND_REQUEST.value, is_continuous, telecommand_database,
telecommand_database[-1])
except UnboundLocalError as err:
print(repr(err))
print("ERROR: Could not format message")
callback_exception_handler("ERROR: Could not format message, " + type_error)
| 23,868
|
def locktime_from_duration(duration):
"""
Parses a duration string and return a locktime timestamp
@param duration: A string represent a duration if the format of XXhXXmXXs and return a timestamp
@returns: number of seconds represented by the duration string
"""
if not duration:
raise ValueError("Duration needs to be in the format {}".format(DURATION_TEMPLATE))
match = re.search(DURATION_REGX_PATTERN, duration)
if not match:
raise ValueError("Duration needs to be in the format {}".format(DURATION_TEMPLATE))
values = match.groupdict()
result = 0
if values['hours']:
result += int(values['hours']) * 60 * 60
if values['minutes']:
result += int(values['minutes']) * 60
if values['seconds']:
result += int(values['seconds'])
return int(result)
| 23,869
|
def black_payers_swaption_value_fhess_by_strike(
init_swap_rate,
option_strike,
swap_annuity,
option_maturity,
vol):
"""black_payers_swaption_value_fhess_by_strike
Second derivative of value of payer's swaption with respect to strike
under black model.
See :py:func:`black_payers_swaption_value`.
.. math::
\\frac{\partial^{2} }{\partial K^{2}}
V_{\mathrm{payer}}(K; S, A, T, \sigma)
= - A\phi(d_{2}(K)) d_{2}^{\prime}(K)
where
:math:`S` is `init_swap_rate`,
:math:`K` is `option_strike`,
:math:`A` is `swap_annuity`,
:math:`T` is `option_maturity`,
:math:`\sigma` is `vol`,
:math:`d_{1}, d_{2}` is defined
in :py:func:`black_payers_swaption_value`,
:math:`\Phi(\cdot)` is c.d.f. of standard normal distribution,
:math:`\phi(\cdot)` is p.d.f. of standard normal distribution.
:param float init_swap_rate: initial swap rate.
:param float option_strike:
:param float swap_annuity:
:param float option_maturity:
:param float vol: volatility. must be non-negative.
:return: value of derivative.
:rtype: float
:raises AssertionError: if volatility is not positive.
"""
assert(vol > 0.0)
value = mafipy.function.black_scholes_call_value_fhess_by_strike(
init_swap_rate, option_strike, 0.0, option_maturity, vol)
return swap_annuity * value
| 23,870
|
def get_resource(cls):
""" gets the resource of a timon class if existing """
if not cls.resources:
return None
resources = cls.resources
assert len(resources) == 1
return TiMonResource.get(resources[0])
| 23,871
|
def get_data():
"""
Esta función sirve para recopilar datos del UART y almacenar los datos filtrados
en una variable global.
"""
global serial_object
global filter_data
while(1):
try:
serial_data = serial_object.readline()
temp_data = serial_data.decode('utf-8')
#filter_data = temp_data[:-2] # Removes last two characters (\r\n)
filter_data = temp_data.strip('\n').strip('\r')
print ('%s' %filter_data)
except TypeError:
pass
| 23,872
|
def blobs(program, *tags):
"""Usage: [tag ...]
List all blobs reachable from tag[s].
"""
for replicas in program.blobs(*tags):
print('\t'.join(replicas))
| 23,873
|
def _log(x):
"""_log
to prevent np.log_log(0), caluculate np.log(x + EPS)
Args:
x (array)
Returns:
array: same shape as x, log equals np.log(x + EPS)
"""
if np.any(x < 0):
print("log < 0")
exit()
return np.log(x + EPS)
| 23,874
|
def gauss_distribution(x, mu, sigma):
"""
Calculate value of gauss (normal) distribution
Parameters
----------
x : float
Input argument
mu :
Mean of distribution
sigma :
Standard deviation
Returns
-------
float
Probability, values from range [0-1]
"""
return 1 / (2 * math.sqrt(math.pi) * sigma) * math.exp(-(1 / 2) * ((x - mu) / sigma) ** 2)
| 23,875
|
def extend_vocab_OOV(source_words, word2id, vocab_size, max_unk_words):
"""
Map source words to their ids, including OOV words. Also return a list of OOVs in the article.
WARNING: if the number of oovs in the source text is more than max_unk_words, ignore and replace them as <unk>
Args:
source_words: list of words (strings)
word2id: vocab word2id
vocab_size: the maximum acceptable index of word in vocab
Returns:
ids: A list of word ids (integers); OOVs are represented by their temporary article OOV number. If the vocabulary size is 50k and the article has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002.
oovs: A list of the OOV words in the article (strings), in the order corresponding to their temporary article OOV numbers.
"""
src_ext = []
oov_dict = {}
for w in source_words:
if w in word2id and word2id[w] < vocab_size: # a OOV can be either outside the vocab or id>=vocab_size
src_ext.append(word2id[w])
else:
if len(oov_dict) < max_unk_words:
# e.g. 50000 for the first article OOV, 50001 for the second...
word_id = oov_dict.get(w, len(oov_dict) + vocab_size)
oov_dict[w] = word_id
src_ext.append(word_id)
else:
# exceeds the maximum number of acceptable oov words, replace it with <unk>
word_id = word2id[UNK_WORD]
src_ext.append(word_id)
oov_list = [w for w, w_id in sorted(oov_dict.items(), key=lambda x:x[1])]
return src_ext, oov_dict, oov_list
| 23,876
|
def isint(s):
"""**Returns**: True if s is the string representation of an integer
:param s: the candidate string to test
**Precondition**: s is a string
"""
try:
x = int(s)
return True
except:
return False
| 23,877
|
def create_app(config_name):
"""function creating the flask app"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(config[config_name])
app.config.from_pyfile('config.py')
app.register_blueprint(v2)
app.register_error_handler(404, not_found)
app.register_error_handler(405, bad_request)
app.register_error_handler(500, internal_server_error)
db_conn.create_tables()
return app
| 23,878
|
def test_lorent_norm():
"""Test 1d lorentzian
"""
p = np.array([0., 0., 1., -30., 3., 1., 30., 3.])
x = np.linspace(-1e6, 1e6, int(8e6) + 1)
y = functions.lorentzian(p, x)
integ = simps(y, x)
assert (abs(integ - 2.) < 1e-5)
| 23,879
|
def build_attention_network(features2d,
attention_groups,
attention_layers_per_group,
is_training):
"""Builds attention network.
Args:
features2d: A Tensor of type float32. A 4-D float tensor of shape
[batch_size, height, width, channels].
attention_groups: (Integer) Number of network groups.
attention_layers_per_group: (Integer) Number of layers per group.
is_training: (Boolean) To indicate training or inference modes.
Returns:
features_embedded: A Tensor of type float32. A 4-D float tensor of shape
[batch_size, height, width, channels].
"""
channels = features2d.shape.as_list()[-1]
with tf.variable_scope("attention_network"):
features_embedded = features2d
for i in range(attention_groups):
filters = channels // 2**(i+1)
for j in range(attention_layers_per_group):
features_embedded = tf.layers.conv2d(
features_embedded,
filters=filters,
kernel_size=3 if j == (attention_layers_per_group-1)
else 1,
strides=1,
dilation_rate=(2, 2) if j == (attention_layers_per_group-1)
else (1, 1),
activation=None,
use_bias=False,
name="features2d_embedding%d_%d" %(i, j),
padding="same")
features_embedded = tf.layers.batch_normalization(
features_embedded, training=is_training,
momentum=MOMENTUM, epsilon=EPS,
name="features2d_embedding%d_%d" %(i, j))
features_embedded = tf.nn.relu(features_embedded)
tf.logging.info("Constructing layer: %s", features_embedded)
return features_embedded
| 23,880
|
def INDIRECT(cell_reference_as_string):
"""Returns a cell reference specified by a string."""
raise NotImplementedError()
| 23,881
|
def get_catalog_config(catalog):
"""
get the config dict of *catalog*
"""
return resolve_config_alias(available_catalogs[catalog])
| 23,882
|
def manhatten(type_profile, song_profile):
"""
Calculate the Manhatten distance between the profile of specific
output_colums value (e.g. specific composer) and the profile of a
song
"""
# Sort profiles by frequency
type_profile = type_profile.most_common()
song_profile = song_profile.most_common()
flat_type_profile = [ngram for (ngram, freq) in type_profile]
flat_song_profile = [ngram for (ngram, freq) in song_profile]
manhatten = 0
for i in range(len(flat_song_profile)):
ngram = flat_song_profile[i]
if ngram in flat_type_profile:
manhatten += abs(flat_type_profile.index(ngram) - i)
else:
manhatten += abs(len(flat_type_profile) - i)
return manhatten
| 23,883
|
def raven(request):
"""lets you know whether raven is being used"""
return {
'RAVEN': RAVEN
}
| 23,884
|
def _non_max_suppress_mask(
bbox: np.array,
scores: np.array,
classes: np.array,
masks: Union[np.array, None],
filter_class: int,
iou: float = 0.8,
confidence: float = 0.001,
) -> tuple:
"""Perform non max suppression on the detection output if it is mask.
:param bbox: Bbox outputs.
:param scores: Score outputs.
:param classes: Class outputs.
:param masks: Mask outputs
:param filter_class: The specific class required.
:param iou: The intersection of union value to be considered.
:param confidence: The confidence threshold for scores.
:returns: tuple of suppressed bbox, suppressed scores,
suppressed classes, and suppressed masks.
"""
filter_idx = _filter_class_and_zero_scores(
scores,
classes,
filter_class,
confidence,
)
scores_filter = np.array(np.array(scores)[filter_idx])
bbox_filter = np.array(np.array(bbox)[filter_idx])
classes_filter = np.array(np.array(classes)[filter_idx])
masks_filter = np.array(np.array(masks)[filter_idx])
areas = np.empty(masks_filter.shape[0])
for index, mask in enumerate(masks_filter):
areas[index] = np.count_nonzero(mask)
sorted_scores = scores_filter.argsort()[::-1]
keep = []
while sorted_scores.size > 0:
score = sorted_scores[0]
# keep the largest sorted score (sorted_scores[0] represent the largest score)
keep.append(score)
# with:
# x = [0 0 1 1] and y = [0 1 1 0],
# the intersect is x && y element-wise -> [0 0 1 0]
intersect = np.empty_like(sorted_scores[1:])
for index, others in enumerate(masks_filter[sorted_scores[1:]]):
intersect[index] = np.count_nonzero(
np.logical_and(masks_filter[score], others)
)
overlap = intersect / (
areas[score] + areas[sorted_scores[1:]] - intersect
)
sorted_scores = sorted_scores[
np.union1d(
np.where(overlap <= 1 - iou)[0],
np.where(
classes_filter[sorted_scores[1:]] != classes_filter[score]
),
)
+ 1
]
detection_boxes = list(map(tuple, bbox_filter[keep]))
detection_scores = list(scores_filter[keep])
detection_classes = list(classes_filter[keep])
detection_masks = list(masks_filter[keep])
detection_boxes = [
(float(item[0]), float(item[1]), float(item[2]), float(item[3]))
for item in detection_boxes
]
detection_scores = [float(item) for item in detection_scores]
detection_classes = [int(item) for item in detection_classes]
return (
detection_boxes,
detection_scores,
detection_classes,
detection_masks,
)
| 23,885
|
def edit(client: ModelDeploymentClient, md_id: str, file: str, wait: bool, timeout: int, image: str):
"""
\b
Update a deployment.
You should specify a path to file with a deployment. The file must contain only one deployment.
For now, CLI supports YAML and JSON file formats.
If you want to update multiple deployments, you should use "odahuflowctl bulk apply" instead.
If you provide the deployment id parameter, it will override before sending to API server.
\b
Usage example:
* odahuflowctl dep update -f dep.yaml --id examples-git
\f
:param client: Model deployment HTTP client
:param md_id: Model deployment ID
:param file: Path to the file with only one deployment
:param timeout: timeout in seconds. for wait (if no-wait is off)
:param wait: no wait until edit will be finished
:param image: Override Docker image from file
"""
md = parse_resources_file_with_one_item(file).resource
if not isinstance(md, ModelDeployment):
raise ValueError(f'Model deployment expected, but {type(md)} provided')
if md_id:
md.id = md_id
if image:
md.spec.image = image
click.echo(client.edit(md))
wait_deployment_finish(timeout, wait, md.id, client)
| 23,886
|
def pdu_create(ctx, name, pdu_type, interface, description, vim_account, descriptor_file):
"""creates a new Physical Deployment Unit (PDU)"""
logger.debug("")
# try:
check_client_version(ctx.obj, ctx.command.name)
pdu = {}
if not descriptor_file:
if not name:
raise ClientException('in absence of descriptor file, option "--name" is mandatory')
if not pdu_type:
raise ClientException('in absence of descriptor file, option "--pdu_type" is mandatory')
if not interface:
raise ClientException('in absence of descriptor file, option "--interface" is mandatory (at least once)')
if not vim_account:
raise ClientException('in absence of descriptor file, option "--vim_account" is mandatory (at least once)')
else:
with open(descriptor_file, 'r') as df:
pdu = yaml.safe_load(df.read())
if name: pdu["name"] = name
if pdu_type: pdu["type"] = pdu_type
if description: pdu["description"] = description
if vim_account: pdu["vim_accounts"] = vim_account
if interface:
ifaces_list = []
for iface in interface:
new_iface={k:v for k,v in [i.split('=') for i in iface.split(',')]}
new_iface["mgmt"] = (new_iface.get("mgmt","false").lower() == "true")
ifaces_list.append(new_iface)
pdu["interfaces"] = ifaces_list
ctx.obj.pdu.create(pdu)
# except ClientException as e:
# print(str(e))
# exit(1)
| 23,887
|
def main(args):
"""
In order to detect discontinuity, two lines are loaded. If the timestamp differs
by more than the threshold set by -time-threshold, then the distance between
points is calculated. If the distance is greater than the threshold set by
-distance-threshold then the points are assumed to be discontinuous.
Currently the output file is just a count of MMSI's and number of discontinuous points
"""
global UTIL_NAME
#/* ----------------------------------------------------------------------- */#
#/* Print usage
#/* ----------------------------------------------------------------------- */#
if len(args) is 0:
return print_usage()
#/* ----------------------------------------------------------------------- */#
#/* Defaults
#/* ----------------------------------------------------------------------- */#
write_mode = 'w'
skip_lines = 0
overwrite_mode = False
assign_srs_from_cmdl = 'EPSG:4326'
time_threshold = 269200
distance_threshold = 1
quiet_mode = False
output_product = 'csv'
input_file_format = None
#/* ----------------------------------------------------------------------- */#
#/* Containers
#/* ----------------------------------------------------------------------- */#
input_file = None
output_file = None
input_schema = None
valid_output_products = ('frequency', 'csv', 'csv-no-schema', 'newline', 'flag-csv', 'flag-no-schema', 'flag-newline')
valid_input_file_formats = ('csv', 'newline', 'json')
#/* ----------------------------------------------------------------------- */#
#/* Parse arguments
#/* ----------------------------------------------------------------------- */#
i = 0
arg = None
arg_error = False
while i < len(args):
try:
arg = args[i]
# Help arguments
if arg in ('--help', '-help'):
return print_help()
elif arg in ('--usage', '-usage'):
return print_usage()
elif arg in ('--long-usage', '-long-usage', '-lu'):
return print_long_usage()
# Algorithm settings
elif arg in ('-tt', '-time-threshold'):
i += 2
time_threshold = int(args[i - 1])
elif arg in ('-dt', '-distance-threshold'):
i += 2
distance_threshold = int(args[i - 1])
# Define the output schema
elif arg in ('-s', '-schema', '-header'):
i += 2
input_schema = args[i - 1].split(',')
# Skip lines in input file
elif arg in ('-sl', '-skip-lines'):
i += 2
skip_lines = int(args[i - 1])
# Determine if reading from stdin
elif arg == '-' and not input_file and sys.stdin.isatty():
i += 1
arg_error = True
print("ERROR: Trying to read from empty stdin")
# Additional options
elif arg in ('-q', '-quiet'):
i += 1
quiet_mode = True
elif arg in ('-overwrite', '--overwrite'):
i += 1
overwrite_mode = True
elif arg in ('-a-srs', '-assign-srs'):
i += 2
assign_srs_from_cmdl = args[i - 1]
elif arg in ('-wm', '-write-mode'):
i += 2
write_mode = args[i - 1]
elif arg in ('-op', '-output-product'):
i += 2
output_product = args[i - 1].lower()
elif arg in ('-ff', '-file-format'):
i += 2
input_file_format = args[i - 1]
elif arg == '-stdin':
i += 1
input_file = '-'
elif arg == '-stdout':
i += 1
output_file = '-'
# Catch invalid arguments
elif arg[0] == '-' and arg != '-':
i += 1
arg_error = True
print("ERROR: Unrecognized argument: %s" % arg)
# Positional arguments and errors
else:
i += 1
# Catch input file
if input_file is None:
if arg == '-':
input_file = arg
else:
input_file = abspath(expanduser(arg))
# Catch output file
elif output_file is None:
if arg == '-':
output_file = arg
else:
output_file = abspath(expanduser(arg))
# Unrecognized argument
else:
arg_error = True
print("ERROR: Unrecognized argument: %s" % arg)
# This catches several conditions:
# 1. The last argument is a flag that requires parameters but the user did not supply the parameter
# 2. The arg parser did not properly consume all parameters for an argument
# 3. The arg parser did not properly iterate the 'i' variable
# 4. An argument split on '=' doesn't have anything after '=' - e.g. '--output-file='
except (IndexError, ValueError):
i += 1
arg_error = True
print("ERROR: An argument has invalid parameters: %s" % arg)
#/* ----------------------------------------------------------------------- */#
#/* Transform arguments
#/* ----------------------------------------------------------------------- */#
# Attempt to sniff file type
if not input_file_format and input_file != '-':
input_file_format = input_file.rsplit('.')[-1]
#/* ----------------------------------------------------------------------- */#
#/* Validate parameters
#/* ----------------------------------------------------------------------- */#
bail = False
# Check arguments
if arg_error:
bail = True
print("ERROR: Did not successfully parse arguments")
# Create SRS to apply to points
try:
assign_srs = osr.SpatialReference()
assign_srs.SetFromUserInput(str(assign_srs_from_cmdl))
except RuntimeError:
assign_srs = None
bail = True
print("Invalid assign SRS: '%s'" % assign_srs_from_cmdl)
# Check algorithm options
if not 0 <= time_threshold:
bail = True
print("ERROR: Invalid time threshold - must be >= 0: '%s'" % time_threshold)
if not 0 <= distance_threshold:
bail = True
print("ERROR: Invalid distance threshold - must be >= 0: '%s'" % distance_threshold)
# Check output product options
if output_product not in valid_output_products:
bail = True
print("ERROR: Invalid output product: '%s'" % output_product)
print(" Options: %s" % ', '.join(valid_output_products))
# Check input file format
if input_file_format not in valid_input_file_formats:
bail = True
print("ERROR: Invalid input file format: '%s'" % input_file_format)
print(" Options: %s" % ', '.join(valid_input_file_formats))
# Check input files
if input_file is None:
bail = True
print("ERROR: Need an input file")
elif input_file != '-' and not os.access(input_file, os.R_OK):
bail = True
print("ERROR: Can't access input file: '%s'" % input_file)
# Check output file
if output_file is None:
bail = True
print("ERROR: Need an output file")
elif output_file != '-' and not overwrite_mode and isfile(output_file):
bail = True
print("ERROR: Overwrite=%s but output file exists: '%s'" % (overwrite_mode, output_file))
elif output_file != '-' and isfile(output_file) and not os.access(output_file, os.W_OK):
bail = True
print("ERROR: Need write access for output file: '%s'" % output_file)
elif output_file != '-' and not isfile(output_file) and not os.access(dirname(output_file), os.W_OK):
bail = True
print("ERROR: Need write access for output dir: '%s'" % dirname(output_file))
# Exit if something did not pass validation
if bail:
return 1
#/* ----------------------------------------------------------------------- */#
#/* Prepare data
#/* ----------------------------------------------------------------------- */#
# Be absolutely sure quiet mode is on if the output is stdout, otherwise the output will contain user feedback
if output_file == '-':
quiet_mode = True
if not quiet_mode:
print("Input file: %s" % input_file)
print("Output file: %s" % output_file)
print("Schema: %s" % (','.join(input_schema) if isinstance(input_schema, (list, tuple)) else input_schema))
# Get line count, which is only used when writing to a file and NOT for stdout
prog_total = 0
if not quiet_mode and output_file != '-':
with sys.stdin if input_file == '-' else open(input_file) as i_f:
for row in i_f:
prog_total += 1
# Remove the number of skipped lines and CSV header
prog_total -= skip_lines
if input_schema is None:
prog_total -= 1
#/* ----------------------------------------------------------------------- */#
#/* Process data
#/* ----------------------------------------------------------------------- */#
flag_field = UTIL_NAME
flag_val = 1
# Open input file or stdin
with sys.stdin if input_file == '-' else open(input_file) as i_f:
# Open output file or stdin
with sys.stdout if output_file == '-' else open(output_file, write_mode) as o_f:
# Construct a reader
if input_file_format == 'json':
try:
reader = NewlineJSONReader(i_f)
fieldnames = reader.fieldnames
except ValueError:
print("ERROR: Input file format is '%s' but could not be decoded" % input_file_format)
return 1
elif input_file_format == 'csv':
if input_schema:
reader = csv.DictReader(i_f, fieldnames=input_schema)
else:
reader = csv.DictReader(i_f)
fieldnames = reader.fieldnames
else:
raise IOError("Could not determine input file format - valid formats are newline delimited JSON and CSV")
# Make sure the writer has the flag field if necessary
if 'flag' in output_product:
writer_fieldnames = reader.fieldnames + [flag_field]
else:
writer_fieldnames = reader.fieldnames
# Construct a writer for the output product
if output_product == 'frequency':
# The 'frequency' writer is established later once all data is collected
pass
elif 'csv' in output_product:
writer = csv.DictWriter(o_f, writer_fieldnames)
if output_product in ('csv', 'flag-csv'):
writer.writeheader()
elif 'newline' in output_product:
writer = NewlineJSONWriter(o_f, writer_fieldnames)
else:
raise IOError("Invalid output product: '%s'" % output_product)
# Loop over input file
discontinuity_counts = {}
last_row = None
for prog_i, row in enumerate(reader):
# Only process rows once the proper number of lines has been skipped
if prog_i >= skip_lines:
# Update user, but NOT if writing to stdout
if not quiet_mode and output_file != '-':
sys.stdout.write("\r\x1b[K" + " %s/%s" % (prog_i, prog_total))
sys.stdout.flush()
# Compare MMSI values - if they don't match then re-set the last row to start processing the new MMSI
try:
if last_row and row['mmsi'] != last_row['mmsi']:
last_row = None
except KeyError:
print(row)
print(last_row)
return 1
# If flagging output, make sure all rows contain the field
if 'flag' in output_product:
row[flag_field] = ''
# Normal processing
if last_row is not None and is_discontinuous(row, last_row, tt=time_threshold,
dt=distance_threshold, a_srs=assign_srs):
# Flag output
if 'flag' in output_product:
row[flag_field] = flag_val
# Collect frequency counts
if output_product == 'frequency':
if row['mmsi'] not in discontinuity_counts:
discontinuity_counts[row['mmsi']] = 1
else:
discontinuity_counts[row['mmsi']] += 1
# Write discontinous row
else:
writer.writerow(row)
# Make sure all rows are written when flagging output
# This also catches MMSi's containing only a single point AND the firs row of every MMSI
elif 'flag' in output_product:
writer.writerow(row)
# Mark the row just processed as the last row in preparation for processing the next row
last_row = row.copy()
#/* ----------------------------------------------------------------------- */#
#/* Dump results if output product is 'frequency'
#/* ----------------------------------------------------------------------- */#
if output_product == 'frequency':
writer = csv.DictWriter(o_f, ['mmsi', 'count'])
writer.writeheader()
for mmsi, count in discontinuity_counts.iteritems():
writer.writerow({'mmsi': mmsi, 'count': count})
#/* ----------------------------------------------------------------------- */#
#/* Cleanup and return
#/* ----------------------------------------------------------------------- */#
if not quiet_mode:
print(" - Done")
return 0
| 23,888
|
def notify_resource_event(
request, parent_id, timestamp, data, action, old=None, resource_name=None, resource_data=None
):
"""Request helper to stack a resource event.
If a similar event (same resource, same action) already occured during the
current transaction (e.g. batch) then just extend the impacted objects of
the previous one.
:param resource_name: The name of the resource on which the event
happened (taken from the request if not provided).
:param resource_data: Information about the resource on which the
event is being emitted. Usually contains information about how
to find this object in the hierarchy (for instance,
``bucket_id`` and ``collection_id`` for a record). Taken from
the request matchdict if absent.
:type resource_data: dict
"""
if action == ACTIONS.READ:
if not isinstance(data, list):
data = [data]
impacted = data
elif action == ACTIONS.CREATE:
impacted = [{"new": data}]
elif action == ACTIONS.DELETE:
if not isinstance(data, list):
impacted = [{"new": data, "old": old}]
else:
impacted = []
for i, new in enumerate(data):
impacted.append({"new": new, "old": old[i]})
else: # ACTIONS.UPDATE:
impacted = [{"new": data, "old": old}]
# Get previously triggered events.
events = request.bound_data.setdefault("resource_events", EventCollector())
resource_name = resource_name or request.current_resource_name
matchdict = resource_data or dict(request.matchdict)
payload = {
"timestamp": timestamp,
"action": action.value,
# Deprecated: don't actually use URI (see #945).
"uri": strip_uri_prefix(request.path),
"user_id": request.prefixed_userid,
"resource_name": resource_name,
}
# Deprecated: don't actually use `resource_name_id` either (see #945).
if "id" in request.matchdict:
matchdict[resource_name + "_id"] = matchdict.pop("id")
payload.update(**matchdict)
events.add_event(resource_name, parent_id, action, payload, impacted, request)
| 23,889
|
def _mark_untranslated_strings(translation_dict):
"""Marks all untranslated keys as untranslated by surrounding them with
lte and gte symbols.
This function modifies the translation dictionary passed into it in-place
and then returns it.
"""
# This was a requirement when burton was written, but may be an unwanted
# side effect for other projects that adopt burton. We should replace it
# with something more flexible.
for key in translation_dict:
if key is not None and translation_dict[key] is None:
translation_dict[key] = u"\u2264" + key + u"\u2265"
return translation_dict
| 23,890
|
def pull_models():
"""
Pulls models from current project
"""
create_process("mlm pull models")
| 23,891
|
def build(app, path):
"""
Build and return documents without known warnings
:param app:
:param path:
:return:
"""
with warnings.catch_warnings():
# Ignore warnings emitted by docutils internals.
warnings.filterwarnings(
"ignore",
"'U' mode is deprecated",
DeprecationWarning)
app.build()
#return (app.outdir / path).read_text()
with codecs.open((app.outdir / path), 'r', encoding='utf-8') as content_file:
return content_file.read()
| 23,892
|
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Fasta parser for GC content',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'fasta', metavar='FILE', nargs='+', help='FASTA file(s)')
parser.add_argument(
'-o',
'--outdir',
help='Output directory',
metavar='DIR',
type=str,
default='out')
parser.add_argument(
'-p',
'--pct_gc',
help='Dividing line for percent GC',
metavar='int',
type=int,
default=50)
return parser.parse_args()
| 23,893
|
def SMWatConstrained(CSM, ci, cj, matchFunction, hvPenalty = -0.3, backtrace = False):
"""
Implicit Smith Waterman alignment on a binary cross-similarity matrix
with constraints
:param CSM: A binary N x M cross-similarity matrix
:param ci: The index along the first sequence that must be matched to cj
:param cj: The index along the second sequence that must be matched to ci
:param matchFunction: A function that scores matching/mismatching
:param hvPenalty: The amount by which to penalize horizontal/vertical moves
:returns (Distance (scalar), (N+1)x(M+1) dynamic programming matrix)
"""
res1 = SMWat(CSM[0:ci+1, 0:cj+1], matchFunction, hvPenalty, backtrace = backtrace, backidx = [ci+1, cj+1])
CSM2 = np.fliplr(np.flipud(CSM[ci::, cj::]))
res2 = SMWat(CSM2, matchFunction, hvPenalty, backtrace = backtrace, backidx = [CSM2.shape[0], CSM2.shape[1]])
res = {'score':res1['D'][-1, -1] + res2['D'][-1, -1]}
res['D1'] = res1['D']
res['D2'] = res2['D']
if backtrace:
path2 = [[ci+1+(CSM2.shape[0]+1-x), cj+1+(CSM2.shape[1]+1-y)] for [x, y] in res2['path']]
res['path'] = res1['path'] + path2
return res
| 23,894
|
def check_images(
coords,
species,
lattice,
PBC=[1, 1, 1],
tm=Tol_matrix(prototype="atomic"),
tol=None,
d_factor=1.0,
):
"""
Given a set of (unfiltered) frac coordinates, checks if the periodic images are too close.
Args:
coords: a list of fractional coordinates
species: the atomic species of each coordinate
lattice: a 3x3 lattice matrix
PBC: the periodic boundary conditions
tm: a Tol_matrix object
tol: a single override value for the distance tolerances
d_factor: the tolerance is multiplied by this amount. Larger values
mean atoms must be farther apart
Returns:
False if distances are too close. True if distances are not too close
"""
# If no PBC, there are no images to check
if PBC == [0, 0, 0]:
return True
# Create image coords from given coords and PBC
coords = np.array(coords)
m = create_matrix(PBC=PBC, omit=True)
new_coords = []
new_species = []
for v in m:
for v2 in coords + v:
new_coords.append(v2)
new_coords = np.array(new_coords)
# Create a distance matrix
dm = distance_matrix(coords, new_coords, lattice, PBC=[0, 0, 0])
# Define tolerances
if tol is None:
tols = np.zeros((len(species), len(species)))
for i, s1 in enumerate(species):
for j, s2 in enumerate(species):
if i <= j:
tols[i][j] = tm.get_tol(s1, s2)
tols[j][i] = tm.get_tol(s1, s2)
tols2 = np.tile(tols, int(len(new_coords) / len(coords)))
if (dm < tols2).any():
return False
else:
return True
elif tol is not None:
if (dm < tol).any():
return False
else:
return True
return True
| 23,895
|
def run_search(win: Surface, graph_: Grid, auto=False):
"""
Calls the Algorithm class to visualize
the pathfinder algorithm using the Visualize class
"""
print('Visualization started with:', pf.settings.default_alg.title())
if not graph_.has_bomb:
node_list = [graph_.start, graph_.end]
search_colors = [pf.BLUE]
else:
node_list = [graph_.start, graph_.bomb, graph_.end]
search_colors = [pf.DARK_PINK, pf.BLUE]
alg = Algorithm(pf.settings.default_alg, node_list, pf.GRID_OFFSET,
graph_.walls, graph_.weights)
alg(win, graph_, search_colors, auto)
print('Visualization done')
| 23,896
|
def get_hamming_distances(genomes):
"""Calculate pairwise Hamming distances between the given list of genomes
and return the nonredundant array of values for use with scipy's squareform function.
Bases other than standard nucleotides (A, T, C, G) are ignored.
Parameters
----------
genomes : list
a list of strings corresponding to genomes that should be compared
Returns
-------
list
a list of distinct Hamming distances as a vector-form distance vector
>>> genomes = ["ATGCT", "ATGCT", "ACGCT"]
>>> get_hamming_distances(genomes)
[0, 1, 1]
>>> genomes = ["AT-GCT", "AT--CT", "AC--CT"]
>>> get_hamming_distances(genomes)
[0, 1, 1]
"""
# Define an array of valid nucleotides to use in pairwise distance calculations.
# Using a numpy array of byte strings allows us to apply numpy.isin later.
nucleotides = np.array([b'A', b'T', b'C', b'G'])
# Convert genome strings into numpy arrays to enable vectorized comparisons.
genome_arrays = [
np.frombuffer(genome.encode(), dtype="S1")
for genome in genomes
]
# Precalculate positions of valid bases (A, T, C, and G) in each genome to speed up later comparisons.
valid_bases = [
np.isin(genome_array, nucleotides)
for genome_array in genome_arrays
]
# Calculate Hamming distance between all distinct pairs of genomes at valid bases.
# The resulting list is a reduced representation of a symmetric matrix that can be
# converted to a square matrix with scipy's squareform function:
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.squareform.html
hamming_distances = []
for i in range(len(genomes)):
# Only compare the current genome, i, with all later genomes.
# This avoids repeating comparisons or comparing each genome to itself.
for j in range(i + 1, len(genomes)):
# Find all mismatches between these two genomes.
mismatches = genome_arrays[i] != genome_arrays[j]
# Count the number of mismatches where both genomes have valid bases.
hamming_distances.append((mismatches & valid_bases[i] & valid_bases[j]).sum())
return hamming_distances
| 23,897
|
async def codec(gc: GroupControl):
"""Codec settings."""
codec = await gc.get_codec()
click.echo("Codec: %s" % codec)
| 23,898
|
def serialize(name: str, engine: str) -> Dict:
"""Get dictionary serialization for a dataset locator.
Parameters
----------
name: string
Unique dataset name.
engine: string
Unique identifier of the database engine (API).
Returns
-------
dict
"""
return {'name': name, 'database': engine}
| 23,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.