content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import os
def gas_3parallel(method="nikuradse"):
"""
:param method: Which results should be loaded: nikuradse or prandtl-colebrook
:type method: str, default "nikuradse"
:return: net - STANET network converted to a pandapipes network
:rtype: pandapipesNet
:Example:
>>> pandapipes.networks.simple_gas_networks.gas_3parallel(method="n")
"""
log_result_upon_loading(logger, method=method, converter="stanet")
net_name = "parallel_N.json" if method.lower() in ["nikuradse", "n"] else "parallel_PC.json"
return from_json(os.path.join(gas_stanet_path, "combined_networks", net_name)) | b06b11bc346e430252901fe8fec19402e1e6aff9 | 3,630,700 |
def get_coordinates(df, year: str):
"""
Add column to the given DataFrame which contains coordinates of
all films filmed at given year.
"""
df = df[df["Year"] == year]
coordinates = []
geolocator = Nominatim(user_agent="main.py")
for i in range(120):
df_location = df.iloc[i, 2]
location = geolocator.geocode(df_location)
if location == None:
coordinates.append(0)
else:
coordinates.append((location.latitude, location.longitude))
df["Coordinates"] = coordinates
df = df[df["Coordinates"] != 0]
return df | ed08cddf7c8a0333dc6cfca4e4c70703bff029f0 | 3,630,701 |
from pathlib import Path
import json
def get_partyname_wordlist(
json_path: Path,
filter_full_name: t.List = [],
name_keys: t.List = ["full_name", "label", "short_name", "other_names"],
lowercase: bool = False,
add_spaces: bool = False,
):
"""Create a set of all possible and abreviatoions of party names"""
parties = json.loads(Path(json_path).read_text())
if filter_full_name:
parties = [x for x in parties if x.get("full_name") in filter_full_name]
party_names = set()
for party in parties:
for key in name_keys:
name = party.get(key)
if isinstance(name, str):
party_names.add(name)
elif isinstance(name, list):
for n in name:
party_names.add(n)
if lowercase:
party_names = {x.lower() for x in party_names}
if add_spaces:
party_names = {f" {x} " for x in party_names}
return party_names | ed10da9f689af8f56cc2fc4eeb899764a5512646 | 3,630,702 |
def ceil_even(x):
"""
Return the smallest even integer not less than x. x can be integer or float.
"""
return round_even(x+1) | 098f1fb847aa36f288f6b43030627ab5570117a6 | 3,630,703 |
def dict_to_PI(d, classes):
"""
Convert a dictionary to a PresentationInfo,
using a pre-fetched dictionary of CssClass objects
"""
if d['prestype'] == 'command':
return PresentationInfo(prestype=d['prestype'], name=d['name'])
else:
c = classes.get(d['name'])
if c is None:
return None
else:
return css_class_to_presentation_class(c) | 620b0d21bc199046bf5dc05be0cba7269f340bbb | 3,630,704 |
import typing
def no_batch_embed(sentence: str) -> typing.List[float]:
"""Returns a list with the numbers of the vector into which the
model embedded the string."""
return model.encode(sentence).tolist() | 1351c4e3fc69ea261a149f864545c2c9275e78a5 | 3,630,705 |
def make_video(video_images_files, name, fps=30):
"""Given list of image files, create video"""
# create video
print('\nCreating video...')
clip = ImageSequenceClip(video_images_files, fps)
# write video
clip.write_videofile(name)
return clip | 54037289258c5bfd5f682d6f75a442158949b228 | 3,630,706 |
def find_internal_gaps(seq):
"""
Accepts a string and returns the positions of all of the gaps in the sequence which are flanked by nt bases
:param seq: str
:return: list of [start,end] of all of the internal gaps
"""
gaps = find_gaps(seq)
seq_len = len(seq) -1
internal_gaps = []
iupac = IUPAC_LOOK_UP
for gap in gaps:
start = gap[0]
if start != gap[1]:
end = gap[1] - 1
else:
end = gap[1]
if start == 0 or end >= seq_len:
continue
if seq[start-1] not in iupac or seq[end+1] not in iupac:
continue
internal_gaps.append("{}:{}".format(start,end))
return internal_gaps | a890eba3dff6c8be186c6be1a2477daf658a65e6 | 3,630,707 |
def test_loop(model, ins, batch_size=None, verbose=0, steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
model: Model instance that is being evaluated in Eager mode.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size or `None`.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
K.set_learning_phase(False)
num_samples = model._check_num_samples(ins, batch_size, steps, 'steps')
outs = []
if verbose == 1:
progbar = Progbar(target=num_samples)
batches = _make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if isinstance(ins[-1], float):
# Do not slice the training phase flag.
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
ins_batch_converted = []
for ib in ins_batch:
ins_batch_converted.append(ops.convert_to_tensor(ib, dtype=K.floatx()))
eager_model_inputs = []
eager_model_outputs = []
for i in range(len(model.inputs)):
eager_model_inputs.append(ins_batch_converted[i])
for i in range(len(model.inputs), len(ins_batch_converted)):
eager_model_outputs.append(ins_batch_converted[i])
loss_outs, loss, loss_metrics = _model_loss(model, eager_model_inputs,
eager_model_outputs)
_, metrics_results = _eager_metrics_fn(model, loss_outs,
eager_model_outputs)
batch_outs = []
for _, v in zip(model.metrics_names,
[K.mean(loss)] + loss_metrics + metrics_results):
batch_outs.append(tensor_util.constant_value(v))
if isinstance(batch_outs, list):
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i in range(len(outs)):
outs[i] /= num_samples
if len(outs) == 1:
return outs[0]
return outs | 9bed19f31eabdad791091bd68196e90d422a65e1 | 3,630,708 |
def find_bands(bands, target_avg, target_range, min_shows):
"""
Searches dictionary of bands with band name as keys and
competition scores as values for bands that are within the
range of the target average and have performed the minimum
number of shows. Returns a list of bands that meet the search
criteria.
Parameters:
bands: Dictionary with band name as keys and scores as values.
target_avg: Tuple containing the average to look for and the
amount of scores to look at.
target_range: Range to look away from the target average.
min_shows: Minimum number of shows to be eligible.
Returns:
List of bands that meet the search criteria.
>>> DCI = {'Blue Devils': [98.2, 97.1, 99.1, 97.3, 98.2], \
'Blue Coats': [98, 96.5, 97.2, 93, 92.1, 92, 97.4], \
'Carolina Crown': [75.7, 82.8, 86.1, 98.2], \
'The Cadets': [96.1, 93.4, 81, 78, 57.9, 86, 71.2, 35.5], \
'Mandarins': [89.3, 88.1, 85.6, 83.8, 79.1, 88.4, 75.7], \
'Little Rocks':[42], \
'Logan Colts':[98.2, 84.4, 69.2, 42, 84]}
>>> find_bands(DCI, (0, 10), 30, 2)
[]
>>> find_bands(DCI, (90, 5), 5, 7)
['Mandarins']
>>> find_bands(DCI, (70, 8), 10, 5)
['The Cadets', 'Logan Colts']
>>> find_bands(DCI, (95, 3), 5, 4)
['Blue Devils', 'Blue Coats', 'The Cadets']
# My doctests
>>> find_bands(DCI, (42, 10), 1, 1)
['Little Rocks']
>>> find_bands(DCI, (87, 10), 5, 5)
['Mandarins']
>>> DCI2 = {'UCSD': [100, 99, 100, 100, 100, 100], \
'UCLA': [50, 49, 100, 100, 100], \
'UCD': [90, 90, 87, 45, 79]}
>>> find_bands(DCI2, (95, 3), 5, 4)
['UCSD']
>>> find_bands(DCI2, (75, 5), 10, 4)
['UCLA', 'UCD']
"""
search_range = [target_avg[0] - target_range, target_avg[0] + target_range]
lower_bound = search_range[0]
upper_bound = search_range[1]
noted_scores = target_avg[1]
score_index = 1
in_range = lambda avg: (avg >= lower_bound and avg <= upper_bound)
score_avg = lambda scores, kept_scores: sum(scores) / len(scores) \
if len(scores) <= kept_scores \
else sum(scores[0:kept_scores]) / kept_scores
return list(map(lambda name: name[0], \
list(filter(lambda band: \
in_range(score_avg(band[score_index], noted_scores)), \
filter(lambda band: True if len(band[score_index]) >= min_shows \
else False, list(bands.items())))))) | 1b2b93f0a1d4236ad62102205606eff8afb3802a | 3,630,709 |
def get_named_targets():
""" Return a list of named target date ranges """
return ["std_train", "std_val", "std_test", "std_ens", "std_all", \
"std_future", "std_contest_fri", "std_contest", "std_contest_daily", "std_contest_eval", \
"std_contest_eval_daily", "std_paper", "std_paper_daily"] | 23a15efff1facc5028e980d659ca6d2f61cdddf0 | 3,630,710 |
import scipy
def create_edge_linestrings(G_, remove_redundant=True, verbose=False):
"""
Ensure all edges have the 'geometry' tag, use shapely linestrings.
Notes
-----
If identical edges exist, remove extras.
Arguments
---------
G_ : networkx graph
Input networkx graph, with edges assumed to have a dictioary of
properties that may or may not include 'geometry'.
remove_redundant : boolean
Switch to remove identical edges, if they exist.
verbose : boolean
Switch to print relevant values to screen. Defaults to ``False``.
Returns
-------
G_ : networkx graph
Updated graph with every edge containing the 'geometry' tag.
"""
# clean out redundant edges with identical geometry
edge_seen_set = set([])
geom_seen = []
bad_edges = []
# G_ = G_.copy()
# for i,(u, v, key, data) in enumerate(G_.edges(keys=True, data=True)):
for i, (u, v, data) in enumerate(G_.edges(data=True)):
# create linestring if no geometry reported
if 'geometry' not in data:
sourcex, sourcey = G_.nodes[u]['x'], G_.nodes[u]['y']
targetx, targety = G_.nodes[v]['x'], G_.nodes[v]['y']
line_geom = LineString([Point(sourcex, sourcey),
Point(targetx, targety)])
data['geometry'] = line_geom
# get reversed line
coords = list(data['geometry'].coords)[::-1]
line_geom_rev = LineString(coords)
# G_.edges[u][v]['geometry'] = lstring
else:
# check which direction linestring is travelling (it may be going
# from v -> u, which means we need to reverse the linestring)
# otherwise new edge is tangled
line_geom = data['geometry']
# print (u,v,key,"create_edge_linestrings() line_geom:", line_geom)
u_loc = [G_.nodes[u]['x'], G_.nodes[u]['y']]
v_loc = [G_.nodes[v]['x'], G_.nodes[v]['y']]
geom_p0 = list(line_geom.coords)[0]
dist_to_u = scipy.spatial.distance.euclidean(u_loc, geom_p0)
dist_to_v = scipy.spatial.distance.euclidean(v_loc, geom_p0)
# print "dist_to_u, dist_to_v:", dist_to_u, dist_to_v
coords = list(data['geometry'].coords)[::-1]
line_geom_rev = LineString(coords)
if dist_to_u > dist_to_v:
# data['geometry'].coords = list(line_geom.coords)[::-1]
data['geometry'] = line_geom_rev
# else:
# continue
# flag redundant edges
if remove_redundant:
if i == 0:
edge_seen_set = set([(u, v)])
edge_seen_set.add((v, u))
geom_seen.append(line_geom)
else:
if ((u, v) in edge_seen_set) or ((v, u) in edge_seen_set):
# test if geoms have already been seen
for geom_seen_tmp in geom_seen:
if (line_geom == geom_seen_tmp) \
or (line_geom_rev == geom_seen_tmp):
bad_edges.append((u, v)) # , key))
if verbose:
print("\nRedundant edge:", u, v) # , key)
else:
edge_seen_set.add((u, v))
geom_seen.append(line_geom)
geom_seen.append(line_geom_rev)
if remove_redundant:
if verbose:
print("\nedge_seen_set:", edge_seen_set)
print("redundant edges:", bad_edges)
for (u, v) in bad_edges:
if G_.has_edge(u, v):
G_.remove_edge(u, v) # , key)
# # for (u,v,key) in bad_edges:
# try:
# G_.remove_edge(u, v) # , key)
# except:
# if verbose:
# print("Edge DNE:", u, v) # ,key)
# pass
return G_ | 478459007538c8d250b1d6f518300d9476866df4 | 3,630,711 |
def get_licenses(service_instance, license_manager=None):
"""
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
"""
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug("Retrieving licenses")
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
"Not enough permissions. Required privilege: " "{}".format(exc.privilegeId)
)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg) | f0c4f7fdc2418f09e7c7e319b8ec670f98db9ca3 | 3,630,712 |
def is_installed(request, project_id=None):
"""Check whether the extension {{ cookiecutter.project_name }} is installed."""
return JsonResponse({'is_installed': True, 'msg': '{{ cookiecutter.project_name }} is installed'}) | ab25af53719e832d5f97b1f8757d630f6d4fec40 | 3,630,713 |
import os
def predict(model, data, out_fname = None):
"""
Description:
-----------
This function is used to predict the EV values for a given dataframe
Parameters:
-----------
model: The model to be finetuned (tf.keras.models.Model)
data: The dataframe to be predicted (pandas.DataFrame)
out_fname: The file path for the exported dataframe (str)
Returns:
--------
data: The dataframe with the predictions (pandas.DataFrame)
"""
binary_threshold_15_min = 0.10
# Raise an exception if the dataframe is empty
if data.empty:
raise Exception("DataFrame is empty!")
# Converting column names to lower case internally for validation purposes
data.columns = map(str.lower, data.columns)
# Raise an exception if the column grid is not found
if 'grid' not in data.columns:
raise Exception("Column named 'grid' not found in the dataframe!")
# Raise an exception if the column grid has NaN values
if data.grid.isna().any():
raise Exception("Missing grid data (NaN values)")
# Raise an exception if the column grid has data other than int and float
if pd.api.types.is_numeric_dtype(data.grid) == False:
raise Exception("Grid data must be a numeric type (integer or float)")
# Raise an exception if there are not enough samples in the dataframe
if data.shape[0] < 900:
raise Exception("Data too short for 15 minute prediction")
# Internal function to output the EV 0/1 values given the sequence data
def predict_loop(infer_data):
"""
Description:
-----------
This function is used to predict the EV values for a given sequence data
Parameters:
-----------
infer_data: The sequence data to be predicted (numpy.ndarray)
Returns:
--------
EV: The EV value 0/1 for the sequence data (float)
"""
# Reshaping the data to be compatible with the model and predicting the EV values
outputs = model.model_15_min(tf.reshape(infer_data, shape = (1, 1, 900)), training = False).numpy()[0]
# Converting the EV values to 0/1 based on the threshold
EV = 0
if outputs > binary_threshold_15_min:
EV = 1
return EV
# Creating a new column for the EV values
preds = [None]
print("Predicting 15 min EV values")
# Looping through the dataframe and generating the EV values
for i, r in data.iterrows():
if i == 0:
pass
else:
if i % 900 == 0 and i < (len(data) - 900):
preds.append(predict_loop(data.iloc[i-899:i+1]['grid'].to_list()))
else:
preds.append(None)
data['EV'] = preds
# Exporting the dataframe with the predictions column to a csv file
f = "predictions_" + str(15) + "_min.csv"
if out_fname is not None:
f = out_fname
# Exporting the dataframe to a csv file
data.to_csv(os.path.join(os.getcwd(), "output", "files", f))
return data | 5207523a3247f8bfbc816cf53e122c3924266846 | 3,630,714 |
def line_order(line):
"""Recursive search for the line's hydrological level.
Parameters
----------
line: a Centerline instance
Returns
-------
The line's order
"""
if len(line.inflows) == 0:
return 0
else:
levels = [line_order(s) for s in line.inflows]
return np.max(levels) + 1 | d2342477abc9d53fbbe02c5c426b518ee59ca732 | 3,630,715 |
def get_data_colums(epoch):
"""Return the data columns of a given epoch
:param epoch: given epoch in a numpy array, already readed from .csv
"""
ID = epoch[:,0];
RA = epoch[:,1];
RA_err = epoch[:,2];
Dec = epoch[:,3];
Dec_err = epoch[:,4];
Flux = epoch[:,5];
Flux_err = epoch[:,6];
if epoch.shape[1] > 7:
Neighbr = epoch[:,7];
Nhbr1_d = epoch[:,8];
Nhbr2_d = epoch[:,9];
return ID, RA, RA_err, Dec, Dec_err, Flux, Flux_err, Neighbr, Nhbr1_d, Nhbr2_d ;
else:
return ID, RA, RA_err, Dec, Dec_err, Flux, Flux_err; | ba497f0aacf8356b80c8c433af05716b90519665 | 3,630,716 |
def reset_columns_DataFrame(df, new_columns=None):
"""
Rename *all* columns in a dataframe (and return a copy).
Possible new_columns values:
- None: df.columns = list(df.columns)
- List: df.columns = new_columns
- callable: df.columns = [new_columns(x) for x in df.columns]
- str && df.shape[1] == 1: df.columns = [new_columns]
new_columns=None is useful when you were transposing categorical indices and
now can no longer assign columns. (Arguably a pandas bug)
"""
if new_columns is None:
df.columns = list(df.columns)
elif isinstance(new_columns, list) or isinstance(new_columns, pd.MultiIndex):
df.columns = new_columns
elif isinstance(new_columns, str):
if df.shape[1] == 1:
df.columns = [new_columns]
else:
raise ValueError("Single string only supported for dfs with 1 column")
else:
df.columns = [new_columns(x) for x in df.columns]
return df | 7251ec76dcf828ad1ebc4bf96dfc19a2059f37f5 | 3,630,717 |
def bin_position(max_val):
"""returns position features using some symbols. Concatenate them at the end of
sentences to represent sentence lengths in terms of one of the three buckets.
"""
symbol_map = {0: " `", 1: " _", 2: " @"}
if max_val <= 3:
return [symbol_map[i] for i in range(max_val)]
first = max_val // 3
second = 2 * first
return [" `"] * first + [" _"] * (second - first) + [" @"] * (max_val - second) | 2c6caf100c07d56211ba8f8bfcef103dd623c6f5 | 3,630,718 |
def norm(g,scale=True):
"""normalises a network on the last axis, scale decides if there is a learnable multiplicative factor"""
g.X=BatchNormalization(axis=-1,scale=scale)(g.X)
return g | ef9a26e93b870a3d2cfd85aed3cf24d8129b464a | 3,630,719 |
def makeEvent(rawEvent, time = 0):
"""Create a midi event from a raw event received from the sequencer.
"""
eventData = rawEvent.data
if rawEvent.type == SSE.NOTEON:
result = NoteOn(time, eventData.note.channel, eventData.note.note,
eventData.note.velocity
)
elif rawEvent.type == SSE.NOTEOFF:
result = NoteOff(time, eventData.note.channel, eventData.note.note, 0)
elif rawEvent.type == SSE.PITCHBEND:
result = PitchWheel(time, eventData.control.channel,
eventData.control.value
)
elif rawEvent.type == SSE.PGMCHANGE:
result = ProgramChange(time, eventData.control.channel,
eventData.control.value
)
elif rawEvent.type == SSE.CONTROLLER:
result = ControlChange(time, eventData.control.channel,
eventData.control.param,
eventData.control.value
)
elif rawEvent.type == SSE.SYSEX:
result = SysEx(time, eventData.ext)
elif rawEvent.type == SSE.START:
result = SysStart(time)
elif rawEvent.type == SSE.CONTINUE:
result = SysContinue(time)
elif rawEvent.type == SSE.STOP:
result = SysStop(time)
else:
result = UnknownEvent(time, rawEvent.type)
return result | 5b7a24dfecfb5f0e1531e799dd0e77d9bb548360 | 3,630,720 |
def rule2(n):
"""2sqrt"""
k = map(lambda x:x*2,rule1(n))
return k | 7ea1b5e06be20ab4832e68536e210d8baf036925 | 3,630,721 |
def charge_sublayers(ich):
""" Parse the InChI string for the formula sublayer.
:param ich: InChI string
:type ich: str
:rtype: dict[str: str]
"""
return automol.convert.inchi.charge_sublayers(ich) | 6532fc0ab5bd5b8a1e97ec5297a8394f67a6066d | 3,630,722 |
import os
def f_split_path(fpath, normpath=True):
"""
Splits path into a list of its component folders
Args:
normpath: call os.path.normpath to remove redundant '/' and
up-level references like ".."
"""
if normpath:
fpath = os.path.normpath(fpath)
allparts = []
while 1:
parts = os.path.split(fpath)
if parts[0] == fpath: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == fpath: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
fpath = parts[0]
allparts.insert(0, parts[1])
return allparts | 0a0fafe2263cb77727609053866f7c1b95fa12d0 | 3,630,723 |
import os
def get_path(*args):
""" utility method"""
return os.path.join(THIS_DIR, 'primitives_data', *args) | 5d274e81a3b6bd621ff54ce841f26a978406928d | 3,630,724 |
import requests
def mixcloud_profile():
""" Display the authorized user's profile """
# Note: We would normally do this but Mixcloud requires specific parameters
#client_id = current_app.config['CONFIG']['accounts']['mixcloud']['client_id']
#mixcloud_session = OAuth2Session(client_id, token=session['oauth_token'])
#return jsonify(mixcloud_session.get(profile_url))
params = {
"access_token": session['oauth_token']['access_token']
}
response = requests.get(profile_url, params=params)
return redirect(response.url) | 18f0f29b31e0becb04fd2a6bb59763298f246668 | 3,630,725 |
def search_metas(metas, criteria, keywords):
""" Note: storage may contain message from others """
msgs = []
for meta in metas:
if not satisfy_criteria(criteria, meta):
continue
if keywords == None or len(keywords) == 0:
msgs.append(meta)
continue
l = ['']
for name in ('from', 'subject', 'purpose', 'msg'):
l.append(meta[name])
l.append('')
s = '\n'.join(l).lower()
for keyword in keywords:
if s.find(keyword.lower()) >= 0:
msgs.append(meta)
break
return msgs | 6bdf1593b2591ab0bc1a028ae854e19858614cda | 3,630,726 |
import logging
def count_large_cargos(b):
"""Gather number of large cargos in each planet."""
def find_planets():
return sln.finds(sln.find(b, By.ID, 'planetList'),
By.CLASS_NAME, 'planetlink')
num_planets = len(find_planets())
logging.info('Found {} planets'.format(num_planets))
fleet = {}
for i in range(num_planets):
logging.info('Navigating to planet #{}'.format(i))
# Need to find the planets again since previous references are stale.
planets = find_planets()
planets[i].click()
# Navigate to fleet view (only needed for the first planet).
if i == 0:
logging.info('Navigating to fleet view')
sln.finds(sln.find(b, By.ID, 'links'),
By.CLASS_NAME, 'menubutton')[7].click()
try:
large_cargos = sln.find(sln.find(b, By.ID, 'button203', timeout=5),
By.CLASS_NAME, 'level').text
except TimeoutException:
# Most likely indicates there is no fleet on this planet.
logging.warn('No fleet on this planet')
large_cargos = 0
logging.info('Planet {} has {} large cargos'.format(i, large_cargos))
fleet[i] = int(large_cargos)
return fleet | 31b462da2a54be6979b6b7861709caa27af5e37b | 3,630,727 |
def get_project(service, project_id):
"""Build service object and return the result of calling the API 'get' function for the projects resource."""
operation = service.projects().get(projectId=project_id).execute()
return operation | 9c512fbf2476039524e67178ada36fd15ebe8cee | 3,630,728 |
def parse(src: str):
"""
Compila string de entrada e retorna a S-expression equivalente.
"""
return parser.parse(src) | 4ffcc39b63839c5668b3ea435064249f60dbf222 | 3,630,729 |
import ast
from typing import Tuple
from typing import List
from typing import Set
def get_parser_init_and_actions(source: ast.Module) -> \
Tuple[List[ast.AST], str, Set[str]]:
"""
Function used to extract necessary imports, parser and argument creation
function calls
Parameters
----------
source : ast.Module
source file parsed into ATT
Returns
-------
List of extracted AST nodes, the main name of the parser and a set of
section names
"""
discovery_classes = [ImportDiscovery, ParserDiscovery,
GroupDiscovery, ArgumentCreationDiscovery]
findings = [],
for cls in discovery_classes:
discovery = cls(*findings)
discovery.visit(source)
findings = discovery.report_findings()
actions, main_name, sections = findings
return actions, main_name, sections | 0a8920d69f51a7a379ee8415efcd277d3adcdc48 | 3,630,730 |
from pathlib import Path
from typing import Dict
def write_json_to_file(filepath: Path, jsonstr: Dict[str, str], indent: int = 4, eof_line=True):
"""Dosyaya JSON yazar.
Dosya bulunamazsa ekrana raporlar hata fırlatmaz
Arguments:
filepath {Path} -- Okunacak dosyanın yolu
jsonstr {Dict[str, str]} -- JSON objesi
indent {int} -- JSON yazımı için varsayılan girinti uzunluğu
eof_line {bool} -- Dosyanın sonuna '\\n' koyar
Returns:
bool -- Yazma işlemi başarılı ise `True`
"""
jsonstr = dumps_json(jsonstr, indent=4)
jsonstr += "\n" if eof_line else ""
return write_to_file(filepath, jsonstr) | 59fdf74661350811c0fcd685d5949c37376e53d7 | 3,630,731 |
import os
def get_version():
""" str: The package version. """
global_vars = {}
# Compile and execute the individual file to prevent
# the package from being automatically loaded.
source = read(os.path.join("test_python_package", "__version__.py"))
code = compile(source, "version.py", "exec")
exec(code, global_vars)
return global_vars['__version__'] | 2f2769927e050dab348ff421aa541066184f322d | 3,630,732 |
def aTimesFiltered(data, filterFunction, microBin=False):
"""
Filter a list of arrivalTimes
===========================================================================
Input Meaning
---------------------------------------------------------------------------
data Object with for each detector element field with a 2D int
array, e.g.:
data.det0 = np.array(N x 2)
with N the number of photons
First column: macro arrival times [a.u.]
Second column: micro arrival times [*]
data.macrotime: macro time [s]
data.microtime: micro time [s]
data.microbintime: micro bin time [s]
filterFunction np.array(M x Nf) with the filter functions
M: number of lifetime bins
Nf: number of filters, tyically 2 (1 fluor, 1 afterpulse)
sum of each row = 1
For multiple detector elements, and thus multiple filters,
this variable is np.array(Ndet x M x Nf)
microBin Boolean
True if micro arrival times [*] are in bin numbers
False if micro arrival times [*] are in [a.u.]
In this case, the bin numbers are calculated as
bin = t * data.microtime / data.microbintime
with data.microtime the microtime unit in s
and data.microbintime the bin time in s
===========================================================================
Output Meaning
---------------------------------------------------------------------------
data Same object as input but data.det0 is now np.array(N x 2+Nf)
For every detector element, Nf columns are added with
the filtered weights for the arrival times
===========================================================================
"""
# get list of detector fields
listOfFields = list(data.__dict__.keys())
listOfFields = [i for i in listOfFields if i.startswith('det')]
Ndet = len(listOfFields)
# make sure filterFunction is a 3D array (detector, microbin, filter function)
if len(np.shape(filterFunction)) == 2:
filterFunction = np.expand_dims(filterFunction, 0)
# number of filters
Nf = np.shape(filterFunction)[2]
# number of time bins
M = np.shape(filterFunction)[1]
# micro times normalization factor
microN = 1
if not microBin:
microN = data.microtime / data.microbintime
# go through each channel and create filtered intensity trace
for det in range(Ndet):
print("Calculating filtered photon streams " + listOfFields[det])
# get photon streams single detector element
dataSingleDet = getattr(data, listOfFields[det])
# remove exessive columns which may already contain filtered photon streams
dataSingleDet = dataSingleDet[:, 0:2]
# calculate for each photon the filtered values
photonMicroBins = np.int64(np.floor(dataSingleDet[:,1] * microN))
photonMicroBins = np.clip(photonMicroBins, 0, M-1)
for filt in range(Nf):
filteredValues = np.expand_dims(np.take(np.squeeze(filterFunction[det, :, filt]), photonMicroBins), 1)
dataSingleDet = np.concatenate((dataSingleDet, filteredValues), axis=1)
setattr(data, listOfFields[det], dataSingleDet)
return data | b71a08c8431fc2c7cb8bca6c075d798ebea48db4 | 3,630,733 |
import select
def get_publication_group(project, group_id):
"""
Get all data for a single publication group
"""
connection = db_engine.connect()
groups = get_table("publication_group")
statement = select([groups]).where(groups.c.id == int_or_none(group_id))
rows = connection.execute(statement).fetchall()
result = dict(rows[0])
connection.close()
return jsonify(result) | f17c54800d8f4e84e7433a23a7d222e4d3d67bbf | 3,630,734 |
import traceback
import six
def format_traceback(exc_info, encoding='utf-8'):
"""
Returns the exception's traceback in a nice format.
"""
ec, ev, tb = exc_info
# Skip test runner traceback levels
while tb and _is_relevant_tb_level(tb):
tb = tb.tb_next
# Our exception object may have been turned into a string, and Python
# 3's traceback.format_exception() doesn't take kindly to that (it
# expects an actual exception object). So we work around it, by doing
# the work ourselves if ev is not an exception object.
if isinstance(ev, BaseException):
return ''.join(
force_text(line, encoding, errors='replace')
for line
in traceback.format_exception(ec, ev, tb)
)
else:
tb_data = ''.join(
force_text(line, encoding, errors='replace')
for line
in traceback.format_tb(tb)
)
if not isinstance(ev, six.text_format):
ev = force_text(repr(ev))
return tb_data + ev | c962b11bf629908b7575e6cf25ffebeb2b5956ec | 3,630,735 |
import os
def creatadata(datadir=None,exprmatrix=None,expermatrix_filename="matrix.mtx",is_mtx=True,cell_info=None,cell_info_filename="barcodes.tsv",gene_info=None,gene_info_filename="genes.tsv",project_name=None):
"""
Construct a anndata object
Construct a anndata from data in memory or files on disk. If datadir is a dir, there must be at least include "matrix.mtx" or data.txt(without anly columns name or rowname and sep="\t") ,
"""
if (datadir is None and expermatrix is None and expermatrix_filename is None):
raise ValueError("Please provide either the expression matrix or the ful path to the expression matrix!!")
#something wrong
cell_info=pd.DataFrame(["cell_"+str(i) for i in range(1,x.shape[0]+1)],columns=["cellname"]) if cell_info is not None else cell_info
gene_info=pd.DataFrame(["gene_"+str(i) for i in range(1,x.shape[1]+1)],columns=["genename"]) if gene_info is not None else gene_info
if datadir is not None:
cell_and_gene_file = [f for f in os.listdir(datadir) if os.path.isfile(os.path.join(datadir, f))]
if (os.path.isdir(datadir) and is_mtx==True): #sparse
print("Start to read expression data (matrix.mtx)")
x=sc.read_mtx(os.path.join(datadir,expermatrix_filename)).X.T
else: #nonsparse
x=pd.read_csv(os.path.join(datadir,expermatrix_filename),sep="\t",header=F)
#only matrix with row names and colnames
if cell_info_filename in cell_and_gene_file:
cell_info=pd.read_csv(os.path.join(datadir,cell_info_filename),sep="\t",header=0,na_filter=False)
if gene_info_filename in cell_and_gene_file:
gene_info=pd.read_csv(os.path.join(datadir,gene_info_filename),sep="\t",header=0,na_filter=False)
else:
x=exprmatrix # n*p matrix, cell* gene
adata=sc.AnnData(x,obs=cell_info,var=gene_info)
a=adata.obs["cellname"] if "cellname" in adata.obs.keys() else adata.obs.index
adata.var_names=adata.var["genename"] if "genename" in adata.var.keys() else adata.var.index
adata.obs_names_make_unique(join="-")
adata.var_names_make_unique(join="-")
adata.uns["ProjectName"]="DEC_clust_algorithm" if project_name is None else project_name
return adata | b0b64920032836fe0c79de1827afa166c4a5d59c | 3,630,736 |
def test_declarative_region_modifier_zoom_in():
"""Test that '+' suffix on area string properly decreases extent of map."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
contour.field = 'Temperature'
contour.level = 700 * units.hPa
panel = MapPanel()
panel.area = 'sc++'
panel.layers = ['coastline', 'borders', 'usstates']
panel.plots = [contour]
pc = PanelContainer()
pc.size = (8.0, 8)
pc.panels = [panel]
pc.draw()
return pc.figure | 6d68ec6a15cc451226379f65cef69820f2169de7 | 3,630,737 |
def get_report(path):
"""
Downloads the MVP report.
:param path:
:return:
"""
return flask.send_from_directory(Parameters.TMP_DIR, path) | f6f9cc18c901b3ac89769f7bfb758ff538699269 | 3,630,738 |
def qbinomial(n, k, q = 2):
"""
Calculate q-binomial coefficient
"""
c = 1
for j in range(k):
c *= q**n - q**j
for j in range(k):
c //= q**k - q**j
return c | 43c167aa506bd9ee6b87163d10da5b02e297e067 | 3,630,739 |
def _igraph_from_nxgraph(graph):
"""
Helper function that converts a networkx graph object into an igraph graph object.
"""
nodes = graph.nodes(data=True)
new_igraph = igraph.Graph()
for node in nodes:
new_igraph.add_vertex(name=str(node[0]), species=node[1]["specie"], coords=node[1]["coords"])
new_igraph.add_edges([(str(edge[0]), str(edge[1])) for edge in graph.edges()])
return new_igraph | 04525dbdda343fdc1b572639eea0f4e5933ffb39 | 3,630,740 |
def _tiramisu_parameters(preset_model='tiramisu-67'):
"""Returns Tiramisu parameters based on the chosen model."""
if preset_model == 'tiramisu-56':
parameters = {
'filters_first_conv': 48,
'pool': 5,
'growth_rate': 12,
'layers_per_block': 4
}
elif preset_model == 'tiramisu-67':
parameters = {
'filters_first_conv': 48,
'pool': 5,
'growth_rate': 16,
'layers_per_block': 5
}
elif preset_model == 'tiramisu-103':
parameters = {
'filters_first_conv': 48,
'pool': 5,
'growth_rate': 16,
'layers_per_block': [4, 5, 7, 10, 12, 15, 12, 10, 7, 5, 4]
}
else:
raise ValueError(f'Tiramisu model {preset_model} not available.')
return parameters | 74e2dadf2a6af864b3f9dfec6241bf71833676f8 | 3,630,741 |
import time
import requests
import json
def send_msg(request):
"""
发送消息
:param request:
:return:
"""
to_user = request.GET.get('toUser')
msg = request.GET.get('msg')
url = 'https://wx.qq.com/cgi-bin/mmwebwx-bin/webwxsendmsg?lang=zh_CN&pass_ticket=%s' %(TICKET_DICT['pass_ticket'],)
ctime = str(int(time.time()*1000))
post_dict = {
'BaseRequest': {
'DeviceID': "e402310790089148",
'Sid':TICKET_DICT['wxsid'],
'Uin':TICKET_DICT['wxuin'],
'Skey':TICKET_DICT['skey'],
},
"Msg": {
'ClientMsgId': ctime,
'Content': msg,
'FromUserName':USER_INIT_DICT['User']['UserName'],
'LocalID': ctime,
'ToUserName': to_user.strip(),
'Type': 1
},
'Scene':0
}
# response = requests.post(url=url,json=post_dict,cookies=ALL_COOKIE_DICT)
response = requests.post(url=url,data=bytes(json.dumps(post_dict,ensure_ascii=False),encoding='utf-8'))
print(response.text)
return HttpResponse('ok') | a028f37567e6fca6dc5e4b7c9e1ce826a184a5e1 | 3,630,742 |
import os
import stat
def get_type(path, follow=True, name_pri=100):
"""Returns type of file indicated by path.
path :
pathname to check (need not exist)
follow :
when reading file, follow symbolic links
name_pri :
Priority to do name matches. 100=override magic
This tries to use the contents of the file, and falls back to the name. It
can also handle special filesystem objects like directories and sockets.
"""
update_cache()
try:
if follow:
st = os.stat(path)
else:
st = os.lstat(path)
except:
t = get_type_by_name(path)
return t or text
if stat.S_ISREG(st.st_mode):
t = get_type_by_contents(path, min_pri=name_pri)
if not t: t = get_type_by_name(path)
if not t: t = get_type_by_contents(path, max_pri=name_pri)
if t is None:
if stat.S_IMODE(st.st_mode) & 0o111:
return app_exe
else:
return text
return t
elif stat.S_ISDIR(st.st_mode): return inode_dir
elif stat.S_ISCHR(st.st_mode): return inode_char
elif stat.S_ISBLK(st.st_mode): return inode_block
elif stat.S_ISFIFO(st.st_mode): return inode_fifo
elif stat.S_ISLNK(st.st_mode): return inode_symlink
elif stat.S_ISSOCK(st.st_mode): return inode_socket
return inode_door | 45a20cc179d569f5c993e9b90de393f210ab524d | 3,630,743 |
import os
def ELA(impath, Quality=90, Multiplier=15, Flatten=True):
"""
Main driver for ELA algorithm.
Args:
impath: Path to image to be transformed.
Quality (optional, default=90): the quality in which to recompress the image. (0-100 integer).
Multiplier (optional, default=15): value with which to multiply the residual to make it more visible. (Float).
Flatten (optional, default=True): Boolean. Describes whether to flatten OutputMap.
Returns:
OutputMap: Output of ELA algorithm.
"""
ImIn = np.double(cv2.imread(impath))
cv2.imwrite('tmpResave.jpg', ImIn, [cv2.IMWRITE_JPEG_QUALITY, Quality])
ImJPG = np.double(cv2.imread('tmpResave.jpg'))
OutputMap = (np.abs(ImIn-ImJPG))*Multiplier
OutputMap[:, :, [0, 2]] = OutputMap[:, :, [2, 0]]
if Flatten is True:
OutputMap = np.mean(OutputMap, 2)
os.remove('tmpResave.jpg')
return OutputMap | dced44b4db1ef25bd914b2609a0e7eb9dd07d8b9 | 3,630,744 |
import yaml
def create_deployment_for_compin(compin: ManagedCompin, assessment: bool = False) -> str:
"""
Creates a Kubernetes deployment YAML descriptor for a provided compin. The compin's deployment template is enhanced
in the following ways:
(1) A node selector is added in order to ensure that Kubernetes scheduler deploys this compin in the selected
node.
(2) The image pull secret of the application is added to the descriptor so that the image of the compin would
be downloaded.
(3) The port for MiddlewareAgent is opened
:param compin: A compin to create the deployment descriptor for
:param assessment: Specify if compin will run in assessment cloud
:return: Kubernetes deployment descriptor converted to string
"""
deployment = yaml.load(DEPLOYMENT_TEMPLATE)
if compin.component.container_spec != "":
deployment['spec']['template']['spec']['containers'][0] = yaml.load(compin.component.container_spec)
deployment['metadata']['name'] = compin.deployment_name()
deployment['spec']['template']['metadata']['labels']['deployment'] = compin.deployment_name()
deployment['spec']['selector']['matchLabels']['deployment'] = compin.deployment_name()
deployment['metadata']['labels']['deployment'] = compin.deployment_name()
deployment['spec']['template']['spec']['nodeSelector'] = {}
deployment['spec']['template']['spec']['nodeSelector'][HOSTNAME_LABEL] = compin.node_name
deployment['spec']['template']['spec']['imagePullSecrets'] = []
deployment['spec']['template']['spec']['imagePullSecrets'].append({})
deployment['spec']['template']['spec']['imagePullSecrets'][0]['name'] = DEFAULT_SECRET_NAME
if 'ports' not in deployment['spec']['template']['spec']['containers'][0]:
deployment['spec']['template']['spec']['containers'][0]['ports'] = []
deployment['spec']['template']['spec']['containers'][0]['ports'].append(
{
'containerPort': middleware.AGENT_PORT,
'name': 'service-port'
}
)
if assessment:
# Enable SYS_ADMIN capability needed for CPU measuring
deployment['spec']['template']['spec']['containers'][0]['securityContext'] = {}
deployment['spec']['template']['spec']['containers'][0]['securityContext']['capabilities'] = {}
deployment['spec']['template']['spec']['containers'][0]['securityContext']['capabilities']['add'] = [
"SYS_ADMIN"]
return yaml.dump(deployment) | 90bb543a0c384b188f8284f4688a98d7e443d0c2 | 3,630,745 |
import requests
def titles_request() -> 'Response':
"""Request titles.
https://wiki.anidb.net/w/API#Anime_Titles
"""
return requests.get(_TITLES) | 41aebc83511d440a91e5be0c866702260536dccc | 3,630,746 |
def get_jds(text):
"""Given a text (string), returns a list of the Journal Descriptors contained"""
scores = jdi.GetJdiScoresByTextMesh(text, InputFilterOption(LegalWordsOption.DEFAULT_JDI))
output_filter_option = OutputFilterOption()
output_filter_option.SetOutputNum(3)
result = OutputFilter.ProcessText(scores, jdi.GetJournalDescriptors(), output_filter_option).split("\n")
journal_descriptors = list()
if len(result) > 5:
for i in range(len(result)):
if i == 2 or i == 3 or i == 4:
ttt = result[i].split("|")
blyad = ttt[3].strip().replace(",", ".")
journal_descriptors.append(blyad)
return journal_descriptors | 9b508adfce00ba49a6b3fd303c98cb0056d62863 | 3,630,747 |
from pathlib import Path
import subprocess
def rpsbproc(results):
"""Convert raw rpsblast results into CD-Search results using rpsbproc.
Note that since rpsbproc is reliant upon data files that generally are installed in
the same directory as the executable (and synthaser makes no provisions for them
being stored elsewhere), we must make sure we have the full path to the original
executable. If it is called via e.g. symlink, rpsbproc will not find the data files
it requires and throw an error.
The CompletedProcess returned by this function contains a standard CD-Search results
file, able to be parsed directly by the results module.
"""
path = get_program_path("rpsbproc")
command = [str(path), "-m", "full", "--quiet", "-t", "doms", "-f"]
if isinstance(results, str) and Path(results).exists():
command.extend(["-i", results])
process = subprocess.run(
command,
input=results if "-i" not in command else None,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
return process | d3fbc8bc6456ed340db58f3809552c13e03a2c1b | 3,630,748 |
def replacespecial(string, char_replacement=replacements):
"""Return unicode string with special characters replaced"""
return "".join(c if c not in char_replacement else char_replacement[c] for c in string) | 67d3e397ab35392a242cab48e1ad628b19d1a488 | 3,630,749 |
def parse_orcid_response(response):
"""
This safely digs into the ORCID user summary response and returns consistent dict
representation independent of the user's visibility settings.
"""
return {
"orcid": get_nested_key(response, "orcid-identifier", "path"),
"email": get_nested_key(response, "person", "emails", "email", 0, "email"),
"first_name": get_nested_key(response, "person", "name", "given-names", "value"),
"last_name": get_nested_key(response, "person", "name", "family-name", "value"),
} | 5fdbc6307b7146c0454e824d18269314e7d89569 | 3,630,750 |
def discrete_metropolis_hastings(P, n_samples = 10000, n_iterations = 10000, stepsize = None):
"""
Perform a random walk in the discrete distribution P (array)
"""
#ensure normality
n = np.sum(P)
Px = interp1d(np.linspace(0,1,len(P)), P/n)
x = np.random.uniform(0,1,n_samples)
if stepsize is None:
#set stepsize proportional to discretization
stepsize = .5*len(P)**-1
print("stepsize:", stepsize)
for i in range(n_iterations):
dx = np.random.normal(0,stepsize, n_samples)
xdx = x + dx
# periodic boundaries
xdx[xdx<0] += 1
xdx[xdx>1] -= 1
accept = Px(xdx)/Px(x) > np.random.uniform(0,1,n_samples)
x[accept] = xdx[accept]
return np.array(x*len(P), dtype = int) | 3167ac2e03763693ea70bec7fee9071074331515 | 3,630,751 |
def vat(x, logits, model, v, eps, xi=1e-6):
"""
Generate an adeversarial perturbation.
Args:
x: tensor, batch of labeled input images of shape [batch, height, width, channels]
logits: tensor, holding model outputs of input
model: tf.keras model
v: generator, random number generator
eps: float, small epsilon
xi: float, small xi
Returns:
Adversarial perturbation to be applied to x.
"""
# v = tf.random.Generator.from_non_deterministic_state()
# v = tf.random.normal(shape=tf.shape(x))
v = xi * get_normalized_vector(v.normal(shape=tf.shape(x)))
logits_p = logits
logits_m = model(x + v, training=True)[0]
dist = kl_divergence_with_logits(logits_p, logits_m)
grad = tf.gradients(tf.reduce_mean(dist), [v], aggregation_method=2)[0]
v = tf.stop_gradient(grad)
return eps * get_normalized_vector(v) | 6e8c50018e9b49e38d32c92acc3ae5fcce4654c0 | 3,630,752 |
def bond_yield(price, face_value, years_to_maturity, coupon=0):
"""
"""
return (face_value / price) ** (1 / years_to_maturity) - 1 | 4c4a90f0fb29564acdad05138ca17932da39eb61 | 3,630,753 |
import ctypes
def getTime(type_of_clock):
"""
Arg:
type_of_clock...int
case '1': CLOCK_REALTIME;
case '2': CLOCK_MONOTONIC;
case '3': CLOCK_MONOTONIC_COARSE;
case '4': CLOCK_MONOTONIC_RAW;
case '5': CLOCK_BOOTTIME;
default: CLOCK_REALTIME;
Return:
String e.g. '1521588964.827307889' in nano sec
"""
global _vtclock
se = ctypes.c_int(0)
na = ctypes.c_int(0)
_vtclock.getTime(ctypes.c_int(type_of_clock), ctypes.byref(se), ctypes.byref(na))
return str(se.value) + '.' + str(na.value) | 7fee5476bf967ca5fa74abfa4722a8ad2570a975 | 3,630,754 |
def ar_gain(alpha):
"""
Calculate ratio between the standard deviation of the noise term in an AR(1) process and the resultant
standard deviation of the AR(1) process.
:param alpha: Parameter of AR(1)
:return: Ratio between std of noise term and std of AR(1)
"""
return np.sqrt((1 + alpha) / (1 - alpha)) | 966b2ade2aa0a70d71df2c9ee1567271bb988f07 | 3,630,755 |
import csv
def fetch_data_2014():
"""For import data year 2014 from excel"""
static = open("airtraffict.csv", newline="")
data = csv.reader(static)
static = [run for run in data]
static_2014 = []
for run in static:
if run[3] == "2014":
static_2014.append(run)
return static_2014 | bd31335f2f4344330ca0c390d33891d2c8b7b843 | 3,630,756 |
from typing import Counter
def classifyChord(chordPosition):
"""
:param chordPosition:所有音符的位置,所有音符都是在不同弦上,并且各位置的距离是限制在人类手掌范围内的。
例如:([6, 5], [5, 7], [4, 7], [3, 5], [2, 5]),表示6弦5品,5弦7品,4弦7品,3弦5品,2弦5品
:return:和弦类型,是一个列表,用来表示所有非空弦音,从低品到高品对应的个数。
例如:输入([6, 5], [5, 7], [4, 7], [3, 5], [2, 5]),返回[[5,3],[7,2]]
和弦分类决定了和弦的处理方式
"""
frets = []
for item in chordPosition:
if item[1] != 0:
frets.append(item[1])
return Counter(frets).most_common() | 1c9af3737f2e4ba2437a457e74f37fbbe1ff0406 | 3,630,757 |
def clean_columns(data):
"""
Removes : EventId, KaggleSet, KaggleWeight
Cast labels to float.
"""
data = data.drop(["DER_mass_MMC", "EventId", "KaggleSet", "KaggleWeight",], axis=1)
label_to_float(data) # Works inplace
return data | 073b41bfeaf9a3236698b04b74a621efffa135cf | 3,630,758 |
def update_active_boxes(cur_boxes, active_boxes=None):
"""
Args:
cur_boxes:
active_boxes:
Returns:
"""
if active_boxes is None:
active_boxes = cur_boxes
else:
active_boxes[0] = min(active_boxes[0], cur_boxes[0])
active_boxes[1] = min(active_boxes[1], cur_boxes[1])
active_boxes[2] = max(active_boxes[2], cur_boxes[2])
active_boxes[3] = max(active_boxes[3], cur_boxes[3])
return active_boxes | dfa1c9b32b9af9c6c9a1fb321f907dad51f9cca0 | 3,630,759 |
def get_user_request():
"""Return the user's json."""
assert namespace_manager.get_namespace() == ''
person = get_person()
values = person.to_dict()
return flask.jsonify(objects=[values]) | be8d8a36ce096d6ac74cad3c62cdaaeb0ad2e326 | 3,630,760 |
def paginate_data(counted, limit, offset):
"""
Custom pagination function.
:param counted:
:param limit:
:param offset:
:return: {}
"""
total_pages = ceil(counted / int(limit))
current_page = find_page(total_pages, limit, offset)
if not current_page:
return None
base_url = request.url.rsplit("?", 2)[0] + '?limit={0}'.format(limit)
result = {}
if current_page < total_pages:
new_start = (current_page * limit) + 1
next_start = new_start if new_start <= counted else counted
result['next'] = base_url + '&offset={0}'.format(next_start)
if current_page > 1:
new_start = (offset - limit)
prev_start = new_start if new_start > 1 else 0
result['prev'] = base_url + '&offset={0}'.format(prev_start)
result['total_pages'] = total_pages
result['num_results'] = counted
result['page'] = current_page
return result | b34f55efac09c277b0ffabffa5012b29087f8cde | 3,630,761 |
import torch
def ones(shape, dtype=None):
"""Wrapper of `torch.ones`.
Parameters
----------
shape : tuple of ints
Shape of output tensor.
dtype : data-type, optional
Data type of output tensor, by default None
"""
return torch.ones(shape, dtype=dtype) | a234936baa16c8efdc63e903d8455895ab7f2f0c | 3,630,762 |
def parse_catalog(catalog):
"""parses an atom feed thinking that it is OPDS compliant"""
author = None
title = None
links = []
entries = []
updated = None
for child in catalog:
if child.tag == LINK_ELEM:
links.append(parse_link(child))
elif child.tag == ENTRY_ELEM:
entries.append(parse_entry(child))
elif child.tag == AUTHOR_ELEM:
author = parse_author(child)
elif child.tag == UPDATED_ELEM:
updated = child.text
elif child.tag == TITLE_ELEM:
title = child.get('type', 'text'), child.text
return {
'author' : author,
'title' : title,
'links' : links,
'entries' : entries,
'updated' : updated
} | 1a6b0b6d8b94f916f37f976cfa6feb7e4c7d6178 | 3,630,763 |
def get_queue(shares):
"""Transform category sizes to block queue optimally
catsizes = [cs1, cs2, cs3, cs4] - blocks numbers of each color category
"""
# Defining catsizes matching the MSE-limit
# amount = 1 # starting amount
# lim = 0.03 # MSE-limit
# while True:
# error = 0
# catsizes = get_sizes(shares, amount)
# for cs, w in zip(catsizes, shares):
# error += (cs / amount - w) ** 2
# error = m.sqrt(error / 4)
# if error > lim:
# amount += 1
# else:
# break
# Defining catsizes matching fixed amount
catsizes = get_sizes(shares, 8)
# ======================================================================
# Evenly distributing algorithm of block queue using dimensional method
# (catsizes = (cs1; cs2; cs3; cs4) - 4D-vector)
fullvec = sum([cs * cs for cs in catsizes])
passedvec = 0
point = [0] * 4
delta = [0] * 4
queue = []
for _ in range(sum(catsizes)):
# Defining the minimal delta for each point (???)
for coord in range(4):
delta[coord] = (2 * point[coord] + 1) * fullvec - (2 * passedvec + catsizes[coord]) * catsizes[coord]
bestcoord = delta.index(min(delta))
passedvec += catsizes[bestcoord]
point[bestcoord] += 1
queue.append(bestcoord)
# ======================================================================
return queue | acb8bd7372a2338b36c77c03776af45fc2727d89 | 3,630,764 |
def view_post(request, slug):
"""View post view"""
post = get_object_or_404(Post, slug=slug)
if not post.published:
raise Http404
ret_dict = {
'post': post,
}
ret_dict = __append_common_vars(request, ret_dict)
return render(request, 'blog/view_post.html', ret_dict) | 5d777e6664555172a159b11ee3e6796a2767401a | 3,630,765 |
def relhum(temperature, mixing_ratio, pressure):
"""This function calculates the relative humidity given temperature, mixing
ratio, and pressure.
"Improved Magnus' Form Approx. of Saturation Vapor pressure"
Oleg A. Alduchov and Robert E. Eskridge
http://www.osti.gov/scitech/servlets/purl/548871/
https://doi.org/10.2172/548871
Args:
temperature (:class:`numpy.ndarray`, :class:`xr.DataArray`, :obj:`list`, or :obj:`float`):
Temperature in Kelvin
mixing_ratio (:class:`numpy.ndarray`, :class:`xr.DataArray`, :obj:`list`, or :obj:`float`):
Mixing ratio in kg/kg. Must have the same dimensions as temperature
pressure (:class:`numpy.ndarray`, :class:`xr.DataArray`, :obj:`list`, or :obj:`float`):
Pressure in Pa. Must have the same dimensions as temperature
Returns:
relative_humidity (:class:`numpy.ndarray` or :class:`xr.DataArray`):
Relative humidity. Will have the same dimensions as temperature
"""
# If xarray input, pull data and store metadata
x_out = False
if isinstance(temperature, xr.DataArray):
x_out = True
save_dims = temperature.dims
save_coords = temperature.coords
save_attrs = temperature.attrs
# ensure in numpy array for function call
temperature = np.asarray(temperature)
mixing_ratio = np.asarray(mixing_ratio)
pressure = np.asarray(pressure)
# ensure all inputs same size
if np.shape(temperature) != np.shape(mixing_ratio) or np.shape(
temperature) != np.shape(pressure):
raise ValueError(f"relhum: dimensions of inputs are not the same")
relative_humidity = _relhum(temperature, mixing_ratio, pressure)
# output as xarray if input as xarray
if x_out:
relative_humidity = xr.DataArray(data=relative_humidity,
coords=save_coords,
dims=save_dims,
attrs=save_attrs)
return relative_humidity | 3db1b72a96ac76fce041b8c5462d77ab5f0db9bb | 3,630,766 |
def submit(year, day, part, session=None, input_file=None):
"""
Puzzle decorator used to submit a solution to advent_of_code server and provide
result. If input_file is not present then it tries to download file and cache it
for submiting solution else it require to be provided with input_file path which
input it can use out
"""
def _action(function):
operation_type = "submit"
return _Puzzle(function, operation_type, year, day, part, session, input_file)
return _action | f90b52eaa5e1ee78f257f33fd8454380e57dd71e | 3,630,767 |
def mse(y, y_pred):
"""
Computes mean squared error.
Parameters
----------
y: np.ndarray (1d array)
Target variable of regression problems.
Number of elements is the number of data samples.
y_pred: np.ndarray (1d array)
Predicted values for the given target variable.
Number of elements is the number of data samples.
Returns
-------
float
mean squared error
"""
return np.mean(squared_errors(y, y_pred)) | 87c13131be28f92d3b9d75192d2e0e0d651979cc | 3,630,768 |
from django.db import transaction
from django.db import transaction
def update_items(item_seq, batch_len=500, dry_run=True, start_batch=0, end_batch=None, ignore_errors=False, verbosity=1):
"""Given a sequence (queryset, generator, tuple, list) of dicts run the _update method on them and do bulk_update"""
stats = collections.Counter()
try:
try:
src_qs = item_seq.objects.all()
except AttributeError:
src_qs = item_seq.all()
N = src_qs.count()
except AttributeError:
item_seq = iter(src_qs)
print_exc()
N = item_seq.count()
if not N:
if verbosity > 0:
print 'No records found in %r' % src_qs
return N
# make sure there's a valid last batch number so the verbose messages will make sense
end_batch = end_batch or int(N / float(batch_len))
if verbosity > 0:
print('Updating from a source queryset/model/sequence with %r records...' % N)
widgets = [pb.Counter(), '/%d rows: ' % N or 1, pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()]
pbar = pb.ProgressBar(widgets=widgets, maxval=N).start()
for batch_num, obj_batch in enumerate(util.generate_batches(item_seq, batch_len=batch_len, start_batch=start_batch)):
if start_batch + batch_num > end_batch:
if verbosity > 1:
print('Stopping before batch {0} because it is not between {1} and {2}'.format(batch_num, start_batch, end_batch))
break
for obj in obj_batch:
if verbosity > 2:
print(repr(obj))
try:
if hasattr(obj, '_update'):
obj._update(save=False, overwrite=False)
except:
if verbosity > 0:
print_exc()
print 'ERROR: Unable to update record: %r' % obj
pass
if verbosity and verbosity < 2:
pbar.update(batch_num * batch_len + len(obj_batch))
elif verbosity > 1:
print('Writing {0} items (of type {1}) from batch {2}. Will stop at batch {3} which is records {4} - {5} ...'.format(
len(obj_batch), src_qs.model, batch_num, end_batch, min(batch_len * (start_batch + end_batch), N)
))
if not dry_run:
try:
bulk_update(obj_batch, ignore_errors=ignore_errors, delete_first=True, verbosity=verbosity)
except Exception as err:
transaction.rollback()
print '%s' % err
print 'Attempting to save objects one at a time instead of as a batch...'
for obj in obj_batch:
try:
obj.save()
stats += collections.Counter(['batch_Error'])
except:
transaction.rollback()
print str(obj)
print repr(obj.__dict__)
print_exc()
stats += collections.Counter(['save_Error'])
if not ignore_errors:
print_exc()
raise
if batch_num < end_batch:
if len(obj_batch) != batch_len:
stats += collections.Counter(['batch_len={0}'.format(len(obj_batch))])
print('Retrieving {0} {1} items for the next batch, batch number {2}...'.format(
batch_len, src_qs.model, batch_num + 1))
if verbosity > 0:
pbar.finish()
return stats | c734f097db2ef8d5ff620cfd6386d53db4ed4543 | 3,630,769 |
def hue_weight(image, neighbor_filter, sigma_I = 0.05):
"""
Calculate likelihood of pixels in image by their metric in hue.
Args:
image: tensor [B, H, W, C]
neighbor_filter: is tensor list: [rows, cols, vals].
where rows, and cols are pixel in image,
val is their likelihood in distance.
sigma_I: sigma for metric of intensity.
returns:
SparseTensor properties:\
indeces: [N, ndims]
bright_weight: [N, batch_size]
dense_shape
"""
indeces, vals, dense_shape = neighbor_filter
rows = indeces[:,0]
cols = indeces[:,1]
image_shape = image.get_shape()
weight_size = image_shape[1].value * image_shape[2].value
hsv_image = tf.image.rgb_to_hsv(image / 255)
hue_image = hsv_image[:,:,:,0] # [B, H, W]
hue_image = tf.reshape(hue_image, shape=(-1, weight_size)) # [B, W*H]
hue_image = tf.transpose(hue_image, [1,0]) # [W*H,B]
Fi = tf.transpose(tf.nn.embedding_lookup(hue_image, rows),[1,0]) # [B, #elements]
Fj = tf.transpose(tf.nn.embedding_lookup(hue_image, cols),[1,0]) # [B, #elements]
bright_weight = tf.exp(-(Fi - Fj)**2 / sigma_I**2) * vals
bright_weight = tf.transpose(bright_weight,[1,0]) # [#elements, B]
return indeces, bright_weight, dense_shape | 976a93ee7a5a83280485e1cd693b2cf10e67421a | 3,630,770 |
def build_blueprint_with_loan_actions(app):
"""."""
blueprint = Blueprint(
'invenio_circulation',
__name__,
url_prefix='',
)
create_error_handlers(blueprint)
endpoints = app.config.get('CIRCULATION_REST_ENDPOINTS', [])
pid_type = 'loan_pid'
options = endpoints.get(pid_type, {})
if options:
options = deepcopy(options)
serializers = {}
if 'record_serializers' in options:
rec_serializers = options.get('record_serializers')
serializers = {mime: obj_or_import_string(func)
for mime, func in rec_serializers.items()}
loan_actions = LoanActionResource.as_view(
LoanActionResource.view_name.format(pid_type),
serializers=serializers,
ctx=dict(
links_factory=app.config.get('CIRCULATION_LOAN_LINKS_FACTORY')
),
)
distinct_actions = extract_transitions_from_app(app)
url = '{0}/<any({1}):action>'.format(
options['item_route'],
','.join(distinct_actions),
)
blueprint.add_url_rule(
url,
view_func=loan_actions,
methods=['POST'],
)
return blueprint | 0081a7795bbcab334c6a56991ebd3727e3799d5b | 3,630,771 |
def featurize_and_to_numpy(featurizer, X_train, y_train, X_test, y_test):
"""
Featurize the given datasets, and convert to numpy arrays.
"""
featurizer.fit(X_train)
X_train_feats = featurizer.transform(X_train)
X_test_feats = featurizer.transform(X_test)
X_train_np = X_train_feats.astype(np.float).values
y_train_np = y_train.values
X_test_np = X_test_feats.astype(np.float).values
if y_test is None:
y_test_np = None
else:
y_test_np = y_test.values
return X_train_np, y_train_np, X_test_np, y_test_np | 71e325e79770e4d049760e7701b98e8628b1c91f | 3,630,772 |
def noise_per_box_v2_(boxes, valid_mask, loc_noises, rot_noises,
global_rot_noises):
"""add noise for each box and check collision to make sure noisy bboxes do not collide with other boxes
loc_noises and rot_noises are some noisy candidates, first successful noisy bbox candidate is chosen.
:param boxes: [N, 5]
:param valid_mask: [N] only add noise if valid_mask[i] is True
:param loc_noises: [N, M, 3]
:param rot_noises: [N, M]
:param global_rot_noises: ?
:return: success_mask [N] range in [0, M) or -1, the index of successful test
"""
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes)
current_corners = np.zeros((4, 2), dtype=boxes.dtype)
current_box = np.zeros((1, 5), dtype=boxes.dtype)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
dst_pos = np.zeros((2, ), dtype=boxes.dtype)
success_mask = -np.ones((num_boxes, ), dtype=np.int64)
corners_norm = np.zeros((4, 2), dtype=boxes.dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)
corners_norm = corners_norm.reshape(4, 2)
for i in range(num_boxes):
if valid_mask[i]:
for j in range(num_tests):
current_box[0, :] = boxes[i]
current_radius = np.sqrt(boxes[i, 0]**2 + boxes[i, 1]**2)
current_grot = np.arctan2(boxes[i, 0], boxes[i, 1])
dst_grot = current_grot + global_rot_noises[i, j]
dst_pos[0] = current_radius * np.sin(dst_grot)
dst_pos[1] = current_radius * np.cos(dst_grot)
current_box[0, :2] = dst_pos
current_box[0, -1] += (dst_grot - current_grot)
rot_sin = np.sin(current_box[0, -1])
rot_cos = np.cos(current_box[0, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
current_corners[:] = current_box[0, 2:
4] * corners_norm @ rot_mat_T + current_box[0, :
2]
current_corners -= current_box[0, :2]
_rotation_box2d_jit_(current_corners, rot_noises[i, j],
rot_mat_T)
current_corners += current_box[0, :2] + loc_noises[i, j, :2]
coll_mat = box_collision_test(
current_corners.reshape(1, 4, 2), box_corners)
coll_mat[0, i] = False
if not coll_mat.any():
success_mask[i] = j
box_corners[i] = current_corners
loc_noises[i, j, :2] += (dst_pos - boxes[i, :2])
rot_noises[i, j] += (dst_grot - current_grot)
break
return success_mask | 68e75778dd6da7b1cbe4d1dfbf83b60034d43f61 | 3,630,773 |
def calculate_cornea_center_wcs(u1_wcs, u2_wcs, o_wcs, l1_wcs, l2_wcs, R, initial_solution):
"""
Estimates cornea center using equation 3.11:
min ||c1(kq1) - c2(kq2)||
The cornea center should have the same coordinates, however, in the presents of the noise it is not always the case.
Thus, the task is to find such parameters kq1 and kq2 that will minimize the difference between corneas centers.
During the calculations all parameters are assumed to be given in the units of World Coordinate System.
:param u1_wcs: image of corneal reflection center from the light on the left
:param u2_wcs: image of corneal reflection center from the light on the right
:param o_wcs: nodal point of camera
:param l1_wcs: light coordinates on the left
:param l2_wcs: light coordinates on the right
:param R: radius of cornea surface
:return: cornea center
"""
known_data = (u1_wcs, u2_wcs, o_wcs, l1_wcs, l2_wcs, R)
sol = opt.minimize(distance_between_corneas, initial_solution, known_data)
kq1, kq2 = sol.x
q1 = calculate_q(kq1, o_wcs, u1_wcs)
c1 = calculate_c(q1, l1_wcs, o_wcs, R)
q2 = calculate_q(kq2, o_wcs, u2_wcs)
c2 = calculate_c(q2, l2_wcs, o_wcs, R)
return (c1 + c2)/2 | 67f153fbe39baf3b435f7fe11b2b2da611f08aab | 3,630,774 |
def luminosity(S_obs, z, D_L=0, alpha=0):
"""Get radio luminosity with error. Default is LDR2. See
https://www.fxsolver.com/browse/formulas/Radio+luminosity.
"""
if D_L == 0:
D_L, _ = get_dl_and_kpc_per_asec(z=z)
return (S_obs * 4 * np.pi * (D_L ** 2)) / (1 + z) ** (1 + alpha) | 276c73e1575b67c918884bfc1f9a55feb3844a58 | 3,630,775 |
def add_categories_to(gifid, category_id):
"""
REST-like endpoint to add a category to a bookmarked gif
:returns: Customized output from GIPHY
:rtype: json
"""
user = (
models.database.session.query(models.users.User)
.filter(
models.users.User.token == flask.request.cookies["X-Auth-Token"]
)
.one()
)
bookmark = (
models.database.session.query(models.bookmarks.Bookmark)
.filter(models.bookmarks.Bookmark.giphy_id == gifid)
.filter(models.bookmarks.Bookmark.user == user)
.one()
)
try:
models.database.session.add(
models.bookmark_xref_models.categories.BookmarkXrefCategory(
bookmark_id=bookmark.id, category_id=category_id
)
)
models.database.session.commit()
except sqlalchemy.exc.IntegrityError:
# This should return a non 200 for already being present
return flask.make_response("")
return flask.make_response("") | 8444400fb198b1d2630b2097bc77e19a347e32ef | 3,630,776 |
import numpy
def vtk_image_to_array(vtk_image) :
""" Create an ``numpy.ndarray`` matching the contents and type of given image.
If the number of scalars components in the image is greater than 1, then
the ndarray will be 4D, otherwise it will be 3D.
"""
exporter = vtkImageExport()
exporter.SetInput(vtk_image)
# Create the destination array
extent = vtk_image.GetWholeExtent()
shape = [extent[5]-extent[4]+1,
extent[3]-extent[2]+1,
extent[1]-extent[0]+1]
if vtk_image.GetNumberOfScalarComponents() > 1:
shape += [vtk_image.GetNumberOfScalarComponents()]
dtype = vtk.util.numpy_support.get_numpy_array_type(vtk_image.GetScalarType())
array = numpy.zeros(shape, dtype=dtype)
exporter.Export(array)
return array | e7640b69f7489d434da20a117ef46253198f7a7e | 3,630,777 |
def delete_network_acl(acl_id):
"""Delete a network ACL."""
client = get_client("ec2")
params = {}
params["NetworkAclId"] = acl_id
return client.delete_network_acl(**params) | 87fca1c7dcd258e5ffcce638d9d74a3a50fbd0e9 | 3,630,778 |
def catalog_sections(context, slug=None, level=3, **kwargs):
"""
Отображает иерерхический список категорий каталога.
Для каждой категории отображается количество содержащегося в ней товара.
Пример использования::
{% catalog_sections 'section_slug' 2 class='catalog-class' %}
:param context: контекст
:param slug: символьный код родительской категории, если не задан, отображается вся иерархия
:param level: отображаемый уровень вложенности
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
count_products = Count(Case(When(product__active=True, then=Value(1)), output_field=IntegerField()))
if slug is None:
sections = Section.objects.annotate(product__count=count_products).all()
max_level = level - 1
else:
section = Section.objects.get(slug=slug)
sections = section.get_descendants().annotate(product__count=count_products).all()
max_level = section.level + level
mark_current_menus(sections, context['request'].path_info)
return {'sections': sections, 'max_level': max_level, 'data': kwargs} | 2c75a83aebbb494549443d08c8d0c6b054a20804 | 3,630,779 |
def get_scrapable_links(
args, base_url, links_found, context, context_printed, rdf=False
):
"""Filters out anchor tags without href attribute, internal links and
mailto scheme links
Args:
base_url (string): URL on which the license page will be displayed
links_found (list): List of all the links found in file
Returns:
list: valid_anchors - list of all scrapable anchor tags
list: valid_links - list of all absolute scrapable links
bool: context_printed
"""
valid_links = []
valid_anchors = []
warnings = []
for link in links_found:
if rdf:
try:
href = link["href"]
except KeyError:
if href.startswith("#"):
# anchor links are valid, but out of scope
# No need to report non-issue (not actionable)
# warnings.append(
# " {:<24}{}".format("Skipping internal link ", link)
# )
continue
elif href.startswith("mailto:"):
# mailto links are valid, but out of scope
# No need to report non-issue (not actionable)
# warnings.append
# " {:<24}{}".format("Skipping mailto link ", link)
# )
continue
else:
link_text = str(link).replace("\n", "")
try:
href = link["href"]
except KeyError:
try:
assert link["id"]
except KeyError:
try:
assert link["name"]
warnings.append(
f" {'Anchor uses name':<24}{link_text}"
)
except:
warnings.append(
f" {'Anchor w/o href or id':<24}{link_text}"
)
continue
if href == "":
warnings.append(f" {'Empty href':<24}{link_text}")
continue
elif href.startswith("#"):
# anchor links are valid, but out of scope
# No need to report non-issue (not actionable)
# warnings.append(
# " {:<24}{}".format("Skipping internal link ", link)
# )
continue
elif href.startswith("mailto:"):
# mailto links are valid, but out of scope
# No need to report non-issue (not actionable)
# warnings.append
# " {:<24}{}".format("Skipping mailto link ", link)
# )
continue
analyze = urlsplit(href)
valid_links.append(create_absolute_link(base_url, analyze))
if rdf:
valid_anchors.append(link["tag"])
else:
valid_anchors.append(link)
# Logging level WARNING or lower
if warnings and args.log_level <= WARNING:
print(context)
print("Warnings:")
print("\n".join(warnings))
context_printed = True
return (valid_anchors, valid_links, context_printed) | ad078463ff146ea649250201f5cd625e277dc5d8 | 3,630,780 |
def AccuracyTestNMax():
"""[summary]
Test the accuracy using n-max random points
"""
plgs = getTestPolygons()
pnt = Point(0, 0)
# Calculate D using polygon partitioning method
print("---------D---------------")
for i in range(len(plgs)):
print(DistCalc.DistCalcPART(pnt, plgs[i]))
print("----------------------------")
# calculate d-bar and MOE of d using nmax random points
print("---------MOE=0.1 nmax=68------------")
for i in range(len(plgs)):
dist = [0 for k in range(100)]
for k in range(100):
dist[k], t = DistCalc.DistCalcRPT(pnt, plgs[i], 68)
print(fsum(dist) / 100, std(dist) * 1.6449)
print("----------------------------")
print("---------MOE=0.05 nmax=271------------")
for i in range(len(plgs)):
dist = [0 for k in range(100)]
for k in range(100):
dist[k], t = DistCalc.DistCalcRPT(pnt, plgs[i], 271)
print(fsum(dist) / 100, std(dist) * 1.6449)
print("----------------------------")
print("---------MOE=0.01 nmax=6764------------")
for i in range(len(plgs)):
dist = [0 for k in range(100)]
for k in range(100):
dist[k], t = DistCalc.DistCalcRPT(pnt, plgs[i], 6764)
print(fsum(dist) / 100, std(dist) * 1.6449)
print("----------------------------")
return 0 | 0f2fa1bf2990eafdf484123d4463056884a77cff | 3,630,781 |
import pandas
def read_source_MCI(csv_path: str | None = "volume_sum_icv_site.csv"):
"""
:param csv_path: str
:return: Train and test dataset of independent variables X and dependent variable y
"""
MRI_source_df = pandas.read_csv(csv_path)
X_df = MRI_source_df.iloc[:, 2:-2]
del X_df['age']
y_df = MRI_source_df.iloc[:, -1]
X = X_df.to_numpy()
y = y_df.to_numpy()
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
return X_train, X_test, y_train, y_test | 3433f70d9da17a8eedfa9b3a7f047e4efb324dd0 | 3,630,782 |
import os
def get_exp_logger(sess, log_folder):
"""Gets a TensorBoard logger."""
with tf.name_scope('Summary'):
writer = tf.summary.FileWriter(os.path.join(log_folder), sess.graph)
class ExperimentLogger():
def log(self, niter, name, value):
summary = tf.Summary()
summary.value.add(tag=name, simple_value=value)
writer.add_summary(summary, niter)
def flush(self):
"""Flushes results to disk."""
writer.flush()
return ExperimentLogger() | fe80f2db4f175e2ad4cc053fb5b29d0a54ea5772 | 3,630,783 |
import random
def rollDie():
"""returns a random int between 1 and 6"""
return random.choice([1, 2, 3, 4, 5, 6]) | 27a3d3586fe313d78a5aea6dab8d10c58e76df56 | 3,630,784 |
def calc_ari(A, B):
""" Adjusted Rand Index"""
A = {v: k for k, s in A.items() for v in s}
B = {v: k for k, s in B.items() for v in s}
df = pd.DataFrame({'A': A, 'B': B}).dropna()
A = df['A'].to_list()
B = df['B'].to_list()
return adjusted_rand_score(A, B) | 8904c6232e47428364f97c26d55492890dff6cc0 | 3,630,785 |
def encode_adj(adj, max_prev_node=10, is_full=False):
"""
:param adj: n*n, rows means time step, while columns are input dimension
:param max_degree: we want to keep row number, but truncate column numbers
:return:
"""
if is_full:
max_prev_node = adj.shape[0] - 1
# pick up lower tri
adj = np.tril(adj, k=-1)
n = adj.shape[0]
adj = adj[1:n, 0 : n - 1]
# use max_prev_node to truncate
# note: now adj is a (n-1)*(n-1) matrix
adj_output = np.zeros((adj.shape[0], max_prev_node))
for i in range(adj.shape[0]):
input_start = max(0, i - max_prev_node + 1)
input_end = i + 1
output_start = max_prev_node + input_start - input_end
output_end = max_prev_node
adj_output[i, output_start:output_end] = adj[i, input_start:input_end]
adj_output[i, :] = adj_output[i, :][::-1] # reverse order
return adj_output | 8c2f21705b3b1aeff64a1b02c0519e51bb101d65 | 3,630,786 |
def get_datasource_content_choices(model_name):
"""Get a list (suitable for use with forms.ChoiceField, etc.) of valid datasource content choices."""
return sorted(
[(entry.content_identifier, entry.name) for entry in registry["datasource_contents"].get(model_name, [])]
) | 3ce217f606e013e35189ffcdabff46c37060bf19 | 3,630,787 |
def setFDKToolsPath(toolName):
""" On Mac, add std FDK path to sys.environ PATH.
On all, check if tool is available.
"""
toolPath = 0
if sys.platform == "darwin":
paths = os.environ["PATH"]
if "FDK/Tools/osx" not in paths:
home = os.environ["HOME"]
fdkPath = ":%s/bin/FDK/Tools/osx" % (home)
os.environ["PATH"] = paths + fdkPath
if os.name == "nt":
p = os.popen("for %%i in (%s) do @echo. %%~$PATH:i" % (toolName))
log = p.read()
p.close()
log = log.strip()
if log:
toolPath = log
else:
p = os.popen("which %s" % (toolName))
log = p.read()
p.close()
log = log.strip()
if log:
toolPath = log
if not toolPath:
print """
The script cannot run the command-line program '%s'. Please make sure the AFDKO is installed, and the system environment variable PATH
contains the path the to FDK sub-directory containing '%s'.""" % (toolName, toolName)
return toolPath | 229f79954f5488f7bae067a5ad39a83d5b2fe527 | 3,630,788 |
import os
import builtins
def datasplit(datastream,split_param=None,split_value=0.2):
"""
Very flexible function for splitting the dataset into train-test or train-test-validation dataset. If datastream
contains field `filename` - all splitting is performed based on the filename (directories are ommited to simplify
moving data and split file onto different path). If not - original objects are used.
:param datastream: datastream to split
:param split_param: either filename of 'split.txt' file, or dictionary of filenames. If the file does not exist - stratified split is performed, and file is created. If `split_param` is None, temporary split is performed.
:param split_value: either one value (default is 0.2), in which case train-test split is performed, or pair of `(validation,test)` values
:return: datastream with additional field `split`
"""
def mktype(x):
t = os.path.basename(x['filename']) if 'filename' in x.keys() else x
if t in dict['Test']:
return SplitType.Test
elif t in dict['Train']:
return SplitType.Train
elif 'Validation' in dict.keys() and t in dict['Validation']:
return SplitType.Validation
else:
return SplitType.Unknown
dict = None
if isinstance(split_param,builtins.dict):
dict = split_param
elif isinstance(split_param,str):
if os.path.isfile(split_param):
dict = read_dict_text(split_param)
elif split_param:
raise("Wrong type of split_param")
else:
pass
if not dict:
datastream = list(datastream)
dict = make_split(datastream,split_value)
if isinstance(split_param,str):
write_dict_text(split_param,dict)
return datastream | fapply('split', mktype) | 3554f795728edd7d4cc6b2595771811fc20897d7 | 3,630,789 |
def upd_p_fdtd_srl_2D_slope(p, p1, p2, fsrc, fsrc2, Nb, c, rho, Ts, dx, Cn,
A, B, C, x_in_idx, y_in_idx, x_edges_idx,
y_edges_idx, x_corners_idx,
y_corners_idx, slope_start):
"""
This FDTD update is designed for case 5: slope.
It calculates the pressure at the discrete time n+1 (p),
as a function of the pressures at time n (p1) and n-1 (p2).
The stencil follows a Standard RectiLinear (SRL) implementation.
This update is implemented with a frequency-dependent boundary condition.
:param p: updated pressure at time n+1, (Pa).
:type p: 2D numpy array of floats.
:param p1: current pressure at time n, (Pa).
:type p1: 2D numpy array of floats.
:param p2: past pressure at time n-1, (Pa).
:type p2: 2D numpy array of floats.
:param fsrc: soft source at time n+1, (Pa).
:type fsrc: 2D numpy array of floats.
:param fsrc1: soft source at time n, (Pa).
:type fsrc1: 2D numpy array of floats.
:param fsrc2: soft source at time n-1, (Pa).
:type fsrc2: 2D numpy array of floats.
:param Nb: boundary of the domain (1 if BC, 0 else).
:type Nb: 2D numpy array of integers.
:param c: sound speed (m.s-1).
:type c: float
:param rho: air density (kg.m-3).
:type rho: float
:param Ts: time step (s).
:type Ts: float
:param dx: spatial step (m).
:type dx: float
:param Cn: Courant number.
:type Cn: float
:param A: inertance of the boundary.
:type A: float
:param B: stiffness of the boundary.
:type B: float
:param C: resistivity of the boundary.
:type C: float
:param x_in_idx: x coordinates of the p=0 cells under the slope BC cells
:type x_in_idx: list of integers
:param y_in_idx: y coordinates of the p=0 cells under the slope BC cells
:type y_in_idx: list of integers
:param x_edges_idx: x coordinates of the slope BC cells that have single
face contact, i.e. edges
:type x_edges_idx: list of integers
:param y_edges_idx: y coordinates of the slope BC cells that have single
face contact, i.e. edges
:type y_edges_idx: list of integers
:param x_corners_idx: x coordinates of the slope BC cells that have two
faces contact, i.e. corners
:type x_corners_idx: list of integers
:param y_corners_idx: y coordinates of the slope BC cells that have two
faces contact, i.e. corners
:type y_corners_idx: list of integers
:param slope_start: index of the slope start along the x axis
:type slope_start: int
:return: the updated pressure at the time n+1, variable p.
:rtype: 2D numpy array of floats.
"""
j = 0
p[:, j] = 0
j = p.shape[1] - 1
p[:, j] = 0
i = 0
p[i, :] = 0
i = p.shape[0] - 1
p[i, :] = 0
# =========================================================================
# main solver (inside the domain): SRL FDTD <==> Cubic cell FVTD
# =========================================================================
A_corr_src = -1. / 3.1739566e3
p[1:-1, 1:-1] = \
1. / (1. + (Nb[1:-1, 1:-1] * Cn * ((A / Ts) + (.5 * B)))) * \
(
(
(Cn ** 2) *
(p1[2:, 1:-1] + p1[:-2, 1:-1] +
p1[1:-1, 2:] + p1[1:-1, :-2] -
((4. - Nb[1:-1, 1:-1]) * p1[1:-1, 1:-1]))
)
+ (1. + (Nb[1:-1, 1:-1] * Cn * ((A / Ts) - (.5 * C * Ts)))) * 2. *
p1[1:-1, 1:-1]
- (1. + (Nb[1:-1, 1:-1] * Cn * ((A / Ts) - (.5 * B)))) *
p2[1:-1, 1:-1]
) - A_corr_src * rho * (c ** 2 * Ts ** 2 / dx ** 2) * \
(fsrc[1:-1, 1:-1] - fsrc2[1:-1, 1:-1]) / (2 * Ts)
# =========================================================================
# Zero pressure for the cells inside the cylinder
# =========================================================================
for ndx_0 in range(1,len(x_in_idx)):
p[x_in_idx[ndx_0], y_in_idx[ndx_0]] = 0
# =========================================================================
# Hamilton's update in DAFx 2014 Eq. (39) with "full cell" implementation
# =========================================================================
# Edge-cell
for ndx_co in range(len(x_edges_idx)):
i = x_edges_idx[ndx_co]
j = y_edges_idx[ndx_co]
p[i, j] = \
1. / (1. + (Nb[i, j] * Cn * ((A / Ts) + (.5 * B)))) * \
( ( (Cn ** 2) *
(p1[i + 1, j] + p1[i - 1, j] +
p1[i, j + 1] + p1[i, j - 1] -
(3. * p1[i, j])))
+ (1. + (Nb[i, j] * Cn * ((A / Ts) - (.5 * C * Ts)))) * 2. *
p1[i, j]
- (1. + (Nb[i, j] * Cn * ((A / Ts) - (.5 * B)))) * p2[i, j])
# Corner-cell
for ndx_co in range(len(x_corners_idx)):
i = x_corners_idx[ndx_co]
j = y_corners_idx[ndx_co]
p[i, j] = \
1. / (1. + (Nb[i, j] * Cn * ((A / Ts) + (.5 * B)))) * \
( ( (Cn ** 2) *
(p1[i + 1, j] + p1[i - 1, j] +
p1[i, j + 1] + p1[i, j - 1] -
(2. * p1[i, j]) ) )
+ (1. + (Nb[i, j] * Cn * ((A / Ts) - (.5 * C * Ts)))) * 2. *
p1[i, j]
- (1. + (Nb[i, j] * Cn * ((A / Ts) - (.5 * B)))) * p2[i, j])
# =========================================================================
# B.C. on the frame Dirichlet = perfectly reflecting.
# =========================================================================
j = 1
p[:slope_start - 2, j] = p[:slope_start - 2, j + 1]
j = p.shape[1] - 2
p[:, j] = p[:, j - 1]
i = 1
p[i, :] = p[i + 1, :]
i = p.shape[0] - 2
p[i, :] = p[i - 1, :]
return p | bb536af34d655b741ee84c0649adfb2dd76eed4e | 3,630,790 |
from datetime import datetime
import calendar
def PlistValueToPlainValue(plist):
"""Takes the plist contents generated by binplist and returns a plain dict.
binplist uses rich types to express some of the plist types. We need to
convert them to types that RDFValueArray will be able to transport.
Args:
plist: A plist to convert.
Returns:
A simple python type.
"""
if isinstance(plist, dict):
ret_value = dict()
for key, value in iteritems(plist):
ret_value[key] = PlistValueToPlainValue(value)
return ret_value
elif isinstance(plist, list):
return [PlistValueToPlainValue(value) for value in plist]
elif isinstance(plist, binplist.RawValue):
return plist.value
elif (isinstance(plist, binplist.CorruptReference) or
isinstance(plist, binplist.UnknownObject)):
return None
elif isinstance(plist, datetime.datetime):
return (calendar.timegm(plist.utctimetuple()) * 1000000) + plist.microsecond
return plist | 4e78b5c85dce44846d27e7ca8b3ea2bceeeba5eb | 3,630,791 |
def columns_not_to_edit():
"""
Defines column names that shouldn't be edited.
"""
## Occasionally unchanging things like NIGHT or TILEID have been missing in the headers, so we won't restrict
## that even though it typically shouldn't be edited if the data is there
return ['EXPID', 'CAMWORD', 'OBSTYPE'] | 430430c121d784727808b8e7c98d96bd846dc65f | 3,630,792 |
def cat_dog(s):
"""Solution of problem at http://codingbat.com/prob/p164876
>>> cat_dog('catdog')
True
>>> cat_dog('catcat')
False
>>> cat_dog('1cat1cadodog')
True
"""
last_3_chars = deque(maxlen=3)
cat = deque('cat')
dog = deque('dog')
count = 0
for c in s:
last_3_chars.append(c)
if last_3_chars == cat:
count += 1
elif last_3_chars == dog:
count -= 1
return count == 0 | dacc2b2e7fc6e19980afff0c010a9d64bf45e163 | 3,630,793 |
def CanCreateGroup(perms):
"""Return True if the given user may create a user group.
Args:
perms: Permissionset for the current user.
Returns:
True if the user should be allowed to create a group.
"""
# "ANYONE" means anyone who has the needed perm.
if (settings.group_creation_restriction ==
site_pb2.UserTypeRestriction.ANYONE):
return perms.HasPerm(CREATE_GROUP, None, None)
if (settings.group_creation_restriction ==
site_pb2.UserTypeRestriction.ADMIN_ONLY):
return perms.HasPerm(ADMINISTER_SITE, None, None)
return False | b4149315cef8086042b30be4334a426cf15927d6 | 3,630,794 |
def quad_corner_diff(hull_poly, bquad_poly, region_size=0.9):
"""
Returns the difference between areas in the corners of a rounded
corner and the aproximating sharp corner quadrilateral.
region_size (param) determines the region around the corner where
the comparison is done.
"""
bquad_corners = np.zeros((4, 2))
bquad_corners[:, 0] = np.asarray(bquad_poly.exterior.coords)[:-1, 0]
bquad_corners[:, 1] = np.asarray(bquad_poly.exterior.coords)[:-1, 1]
# The point inside the quadrilateral, region_size towards the quad center
interior_points = np.zeros((4, 2))
interior_points[:, 0] = np.average(bquad_corners[:, 0]) + \
region_size * (bquad_corners[:, 0] - np.average(bquad_corners[:, 0]))
interior_points[:, 1] = np.average(bquad_corners[:, 1]) + \
region_size * (bquad_corners[:, 1] - np.average(bquad_corners[:, 1]))
# The points p0 and p1 (at each corner) define the line whose intersections
# with the quad together with the corner point define the triangular
# area where the roundness of the convex hull in relation to the bounding
# quadrilateral is evaluated.
# The line (out of p0 and p1) is constructed such that it goes through the
# "interior_point" and is orthogonal to the line going from the corner to
# the center of the quad.
p0_x = interior_points[:, 0] + \
(bquad_corners[:, 1] - np.average(bquad_corners[:, 1]))
p1_x = interior_points[:, 0] - \
(bquad_corners[:, 1] - np.average(bquad_corners[:, 1]))
p0_y = interior_points[:, 1] - \
(bquad_corners[:, 0] - np.average(bquad_corners[:, 0]))
p1_y = interior_points[:, 1] + \
(bquad_corners[:, 0] - np.average(bquad_corners[:, 0]))
corner_area_polys = []
for i in range(len(interior_points[:, 0])):
bline = LineString([(p0_x[i], p0_y[i]), (p1_x[i], p1_y[i])])
corner_area_polys.append(Polygon(
[bquad_poly.intersection(bline).coords[0],
bquad_poly.intersection(bline).coords[1],
(bquad_corners[i, 0], bquad_corners[i, 1])]))
hull_corner_area = 0
quad_corner_area = 0
for capoly in corner_area_polys:
quad_corner_area += capoly.area
hull_corner_area += capoly.intersection(hull_poly).area
return 1. - hull_corner_area / quad_corner_area | 1190b4ff43632c072c220b5bdfae29c239e8662f | 3,630,795 |
def _context_deleteserver(ip, port, server_name, config=None, disabled=None):
"""Delete a server context.
"""
if config is None or ('_isdirty' in config and config['_isdirty']):
config = loadconfig(APACHECONF, True)
scontext = _context_getserver(ip, port, server_name, config=config, disabled=disabled)
if not scontext: return False
filepath, line_start, line_end = _getcontextrange(scontext, config)
config['_isdirty'] = True
return _delete(filepath, line_start, line_end) | cf41b61c4d8296373a4f4df4f1764485ec221466 | 3,630,796 |
def CreateStyleFromConfig(style_config):
"""Create a style dict from the given config.
Arguments:
style_config: either a style name or a file name. The file is expected to
contain settings. It can have a special BASED_ON_STYLE setting naming the
style which it derives from. If no such setting is found, it derives from
the default style. When style_config is None, the _GLOBAL_STYLE_FACTORY
config is created.
Returns:
A style dict.
Raises:
StyleConfigError: if an unknown style option was encountered.
"""
def GlobalStyles():
for style, _ in _DEFAULT_STYLE_TO_FACTORY:
yield style
def_style = False
if style_config is None:
for style in GlobalStyles():
if _style == style:
def_style = True
break
if not def_style:
return _style
return _GLOBAL_STYLE_FACTORY()
if isinstance(style_config, dict):
config = _CreateConfigParserFromConfigDict(style_config)
elif isinstance(style_config, py3compat.basestring):
style_factory = _STYLE_NAME_TO_FACTORY.get(style_config.lower())
if style_factory is not None:
return style_factory()
if style_config.startswith('{'):
# Most likely a style specification from the command line.
config = _CreateConfigParserFromConfigString(style_config)
else:
# Unknown config name: assume it's a file name then.
config = _CreateConfigParserFromConfigFile(style_config)
return _CreateStyleFromConfigParser(config) | 42c36df604b26cdad9f8b0571863bfbfd92c9df9 | 3,630,797 |
def build_weighted_matrix(corpus, tokenizing_func=basic_tokenizer,
mincount=300, vocab_size=None, window_size=10,
weighting_function=lambda x: 1 / (x + 1)):
"""Builds a count matrix based on a co-occurrence window of
`window_size` elements before and `window_size` elements after the
focal word, where the counts are weighted based on proximity to the
focal word.
Parameters
----------
corpus : iterable of str
Texts to tokenize.
tokenizing_func : function
Must map strings to lists of strings.
mincount : int
Only words with at least this many tokens will be included.
vocab_size : int or None
If this is an int above 0, then, the top `vocab_size` words
by frequency are included in the matrix, and `mincount`
is ignored.
window_size : int
Size of the window before and after. (So the total window size
is 2 times this value, with the focal word at the center.)
weighting_function : function from ints to floats
How to weight counts based on distance. The default is 1/d
where d is the distance in words.
Returns
-------
pd.DataFrame
This is guaranteed to be a symmetric matrix, because of the
way the counts are collected.
"""
tokens = [tokenizing_func(text) for text in corpus]
# Counts for filtering:
wc = defaultdict(int)
for toks in tokens:
for tok in toks:
wc[tok] += 1
if vocab_size:
srt = sorted(wc.items(), key=itemgetter(1), reverse=True)
vocab_set = {w for w, c in srt[: vocab_size]}
else:
vocab_set = {w for w, c in wc.items() if c >= mincount}
vocab = sorted(vocab_set)
n_words = len(vocab)
# Weighted counts:
counts = defaultdict(float)
for toks in tokens:
window_iter = _window_based_iterator(toks, window_size, weighting_function)
for w, w_c, val in window_iter:
if w in vocab_set and w_c in vocab_set:
counts[(w, w_c)] += val
# Matrix:
X = np.zeros((n_words, n_words))
for i, w1 in enumerate(vocab):
for j, w2 in enumerate(vocab):
X[i, j] = counts[(w1, w2)]
# DataFrame:
X = pd.DataFrame(X, columns=vocab, index=pd.Index(vocab))
return X | d21b220c7697fb59ed2a4590bd54d9bbb0331758 | 3,630,798 |
def checksum_data_16bit(data):
"""Calculate 16 bit checksum (really just summing up shorts) over a chunk."""
return reduce(lambda r, x: (r + x) & 0xFFFF, map(lambda (x, y): (ord(y) << 8) | ord(x), zip(*[iter(data)] * 2)), 0) | 4d453e5a02eb3359e5aa30442230604f45d6d27b | 3,630,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.