content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def test_genUniqueDigits(base=10, maxBasePow=4):
"""
Run fairly comprehensive test on genUniqueDigits(), digitsToInt(),
digitsToStr(), and intToStr() in specified base
Test involves incrementing digits from 0 to base**maxBasePow
"""
maxNum = base**maxBasePow
genDigitsAll = genUniqueDigits(base=base, exclude0=False, leading0=True,
sortedDigits=False, repeatDigits=True,
minNumDigits=1, maxNumDigits=float('inf'),
maxDigit=None)
genDigitsInt = genUniqueDigits(base=base, leading0=False)
genDigitsSorted = genUniqueDigits(base=base, sortedDigits=True,
leading0=False)
genDigitsSortNoReps = genUniqueDigits(base=base, sortedDigits=True,
leading0=False,repeatDigits=False)
genThreePlus = genUniqueDigits(base=base, repeatDigits=False, leading0=True,
minNumDigits=3, maxNumDigits=maxBasePow)
genPal = genPalindromes(maxDigit=maxBasePow, base=base)
for n in range(maxNum):
# test genDigits() and digitsToInt()
trueDigits = list(genDigits(n, base=base, leastFirst=False))
nDigits = digitsToInt(trueDigits, base=base)
testAssert( nDigits == n,
"genDigits(%d, base=%d) = %s, and digitsToInt(%s) -> %d != %d"
% (n, base, str(trueDigits), str(trueDigits), nDigits, n)
)
# test intToStr() and digitsToStr()
nStr = intToStr(n, base=base)
digitStr = digitsToStr(trueDigits, base=base)
testAssert( nStr == digitStr,
"intToStr(%d, base=%d) != digitsToStr(%s, base=%d)"
% (n, base, str(trueDigits), base)
)
# test genUniqueDigits:
# -with no leading 0s (i.e. generate integer digits)
digits = next(genDigitsInt)
testAssert( trueDigits == digits,
"genUniqueDigits(base=%d) generated %s instead of %s"
% (base, str(digits), str(trueDigits))
)
# -with leading 0s (i.e. generate all possible digit sequences in order)
allDigits = next(genDigitsAll)
if allDigits[0] == 0 and n > 0:
testAssert( allDigits[1:] == digits[1:],
"allDigits(base=%d) generated %s instead of %s"
% (base, str(allDigits), str([0] + digits[1:]))
)
for dummy in range(-1 + base**(len(allDigits)-1)):
next(genDigitsAll)
allDigits = next(genDigitsAll)
testAssert( allDigits == digits,
"allDigits(base=%d) generated %s instead of %s"
% (base, str(allDigits), str(digits))
)
# -with digits sorted into decreasing order
isSorted = (digits == sorted(digits, reverse=True))
noRepeats = (len(set(digits)) == len(digits))
if isSorted:
# digits are in sorted order, repeats allowed
sDigits = next(genDigitsSorted)
testAssert( sDigits == digits,
"sortedDigits(base=%d) generated %s instead of %s"
% (base, str(sDigits), str(digits))
)
if noRepeats:
# digits are sorted, and no digit may repeat
sNoRepDigits = next(genDigitsSortNoReps)
testAssert( sNoRepDigits == digits,
"sortedNoRepeatDigits(base=%d) generated %s instead of %s"
% (base, str(sNoRepDigits), str(digits))
)
# -with minimum three digits and maximum maxBasePow digits, no repeats
if getNumDigits(n, base=base) >= 3 and noRepeats:
tpDigits = next(genThreePlus)
if n > 0:
while(tpDigits[0] == 0):
tpDigits = next(genThreePlus)
testAssert( tpDigits == digits,
"threePlusDigits(base=%d) generated %s instead of %s"
% (base, str(tpDigits), str(digits))
)
# test isPalindrome() and genPalendromes()
if isPalindrome(n, base):
pal = next(genPal)
testAssert( pal == n,
"genPalendromes(base=%d) generated %s instead of %s"
% (base, intToStr(pal,base=base), nStr)
)
try:
tpDigits = next(genThreePlus)
raise AssertionError( "genUniqueDigits did not generate all threePlus,"
" still had not produced %s" % str(tpDigits) )
except StopIteration:
pass
try:
palStr = intToStr(next(genPal), base=base)
raise AssertionError( "genPalendromes did not generate all palendromes,"
" still had not produced %s" % palStr )
except StopIteration:
pass
| 24,300
|
def find_edges(mesh, key):
""" Temp replacement for mesh.findEdges().
This is painfully slow.
"""
for edge in mesh.edges:
v = edge.vertices
if key[0] == v[0] and key[1] == v[1]:
return edge.index
| 24,301
|
def thumbnail(link):
"""
Returns the URL to a thumbnail for a given identifier.
"""
targetid, service = _targetid(link), _service(link)
if targetid:
if service in _OEMBED_MAP:
try:
return _embed_json(service, targetid)["thumbnail_url"]
except (ValueError, KeyError):
return None
elif service == "bandcamp":
# Sometime in the future, parse the HTML for the image_src meta tag
return None
return None
| 24,302
|
def valve_gas_cv(m_dot, p_1, p_2, m_molar, T):
"""Find the required valve Cv for a given mass flow and pressure drop.
Assumes that a compressible gas is flowing through the valve.
Arguments:
m_dot (scalar): Mass flow rate [units: kilogram second**-1].
p_1 (scalar): Inlet pressure [units: pascal].
p_2 (scalar): Outlet pressure [units: pascal].
m_molar (scalar): Gas molar mass [units: kilogram mole**-1].
T (scalar): Gas temperature [units: kelvin].
Returns:
scalar: Valve flow coefficient Cv [units: gallon minute**-1 psi**-1].
"""
# Specific gravity of the gas [units: dimensionless]:
spec_grav = m_molar / proptools.constants.m_molar_air
# Convert gas flow to standard cubic feet per hour
flow_scfh = m_dot_to_scfh(m_dot, m_molar)
# Determine if the flow is choked.
# Checking if `p_1 >= 2 * p_2` is suggested by [1].
# There is a more accurate choked flow criterion which depends
# on the ratio of specific heats.
choked = p_1 >= 2 * p_2
if choked:
cv = flow_scfh / 0.08821 * (spec_grav * T)**0.5 / p_1
else:
cv = flow_scfh / 0.1040 * (spec_grav * T / (p_1**2 - p_2**2))**0.5
return cv
| 24,303
|
def frequency_based_dissim(record, modes):
"""
Frequency-based dissimilarity function
inspired by "Improving K-Modes Algorithm Considering Frequencies of Attribute Values in Mode" by He et al.
"""
list_dissim = []
for cluster_mode in modes:
sum_dissim = 0
for i in range(len(record)): #zip(record,cluster_mode.mode):
#if (elem1 != elem2):
if (record[i] != cluster_mode.attrs[i]):
sum_dissim += 1
else:
sum_dissim += 1 - cluster_mode.attr_frequencies[i]
list_dissim.append(sum_dissim)
return list_dissim
| 24,304
|
def _process_create_group(event: dict) -> list:
""" Process CreateGroup event. This function doesn't set tags. """
return [event['responseElements']['group']['groupName']]
| 24,305
|
def tree_labels(t: Node):
"""Collect all labels of a tree into a list."""
def f(label: Any, folded_subtrees: List) -> List:
return [label] + folded_subtrees
def g(folded_first: List, folded_rest: List) -> List:
return folded_first + folded_rest
return foldtree(f, g, [], t)
| 24,306
|
def find_best_split(rows):
"""Find the best question to ask by iterating over every feature / value
and calculating the information gain."""
best_gain = 0 # keep track of the best information gain
best_question = None # keep train of the feature / value that produced it
current_uncertainty = gini(rows)
n_features = len(rows[0]) - 1 # number of columns
for col in range(n_features): # for each feature
values = set([row[col] for row in rows]) # unique values in the column
for val in values: # for each value
question = Question(col, val)
# try splitting the dataset
true_rows, false_rows = partition(rows, question)
# Skip this split if it doesn't divide the
# dataset.
if len(true_rows) == 0 or len(false_rows) == 0:
continue
# Calculate the information gain from this split
gain = info_gain(true_rows, false_rows, current_uncertainty)
# You actually can use '>' instead of '>=' here
# but I wanted the tree to look a certain way for our
# toy dataset.
if gain >= best_gain:
best_gain, best_question = gain, question
return best_gain, best_question
| 24,307
|
def pad_data(data, context_size, target_size, pad_at_begin= False):
"""
Performs data padding for both target and aggregate consumption
:param data: The aggregate power
:type data: np.array
:param context_size: The input sequence length
:type context_size: int
:param target_size: The target sequence length
:type target_size: int
:param pad_at_begin: Specified how the padded values are inserted, defaults to False
:type pad_at_begin: bool, optional
:return: The padded aggregate power.
:rtype: np.array
"""
sequence_length = context_size + target_size
units_to_pad = sequence_length // 2
padding = (context_size,target_size) if pad_at_begin else (units_to_pad,units_to_pad+1)
if data.ndim==1:
new_mains = np.pad(data, padding,'constant',constant_values=(0,0))
return new_mains
else:
new_mains = []
for i in range(data.shape[-1]):
new_mains.append(np.pad(data[:,i], padding,'constant',constant_values=(0,0)))
return np.stack(new_mains).T
| 24,308
|
def create_callbacks(path):
"""
Creates the callbacks to use during training.
Args
training_model: The model that is used for training.
prediction_model: The model that should be used for validation.
validation_generator: The generator for creating validation data.
args: parseargs args object.
Returns:
A list of callbacks used for training.
"""
callbacks = []
# save the model
# ensure directory created first; otherwise h5py will error after epoch.
checkpoint = keras.callbacks.ModelCheckpoint(
os.path.join(
path,
'efdet_model.h5'
),
verbose=1,
save_weights_only=True,
# save_best_only=True,
# monitor="mAP",
# mode='max'
)
callbacks.append(checkpoint)
# callbacks.append(keras.callbacks.ReduceLROnPlateau(
# monitor='loss',
# factor=0.1,
# patience=2,
# verbose=1,
# mode='auto',
# min_delta=0.0001,
# cooldown=0,
# min_lr=0
# ))
return callbacks
| 24,309
|
def SaveAsImageFile(preferences, image):
"""Save the current image as a PNG picture."""
extension_map = {"png": wx.BITMAP_TYPE_PNG}
extensions = extension_map.keys()
wildcard = create_wildcard("Image files", extensions)
dialog = wx.FileDialog(None, message="Export to Image",
wildcard=wildcard, style=wx.FD_SAVE)
saved = False
if dialog.ShowModal() == wx.ID_OK:
path, extension = extend_path(dialog.GetPath(), extensions, "png")
overwrite_question = "File '{:s}' exists. Overwrite?".format(path)
if not os.path.exists(path) or ShowYesNoQuestion(dialog, preferences, overwrite_question) == wx.YES:
image.SaveFile(path, extension_map[extension])
saved = True
dialog.Destroy()
return saved
| 24,310
|
def egg_translator(cell):
"""If the cell has the DNA for harboring its offspring inside it, granting it additional food
and protection at the risk of the parent cell, it is an egg.
Active DNA: x,A,(C/D),x,x,x
"""
dna = cell.dna.split(',')
if dna[1] == 'A' and dna[2] == 'C':
return True
elif dna[1] == 'A' and dna[2] == 'D':
return True
else:
return False
del dna[:]
| 24,311
|
def all_different_cst(xs, cst):
"""
all_different_cst(xs, cst)
Ensure that all elements in xs + cst are distinct
"""
return [AllDifferent([(x + c) for (x,c) in zip(xs,cst)])]
| 24,312
|
def test_no_remaining_worker():
"""Runner stops if we have not more trials to run"""
idle_timeout = 2
pop_time = 1
runner = new_runner(idle_timeout)
runner.pending_trials[0] = None
def no_more_trials():
time.sleep(pop_time)
runner.pending_trials = dict()
runner.trials = 2
start = time.time()
thread = Thread(target=no_more_trials)
thread.start()
# Lazy worker is not raised because we have executed
# the max number of trials on this worker
runner.run()
elapsed = time.time() - start
assert (
int(elapsed - pop_time) == 0
), "Runner terminated gracefully once max trials was reached"
runner.client.close()
| 24,313
|
def processing(task, region: dict, raster: str, parameters: dict):
"""
Cuts the raster according to given region and applies some filters
in order to find the district heating potentials and
related indicators.
Inputs :
* region : selected zone where the district heating potential is studied.
* raster : raster of the heat demand.
* parameters : the pixel and area thresholds.
Output :
* Indicators :
* Graphics : Potential of areas that pass the filters.
* Layer : Areas that pass the filters.
"""
with TemporaryDirectory(dir=settings.TESTDATA_DIR) as temp_dir:
clipped_raster = join(temp_dir, "raster_tmp.tif")
clip_raster(src=raster, shapes=region, dst=clipped_raster)
(
geo_transform,
total_heat_demand,
areas,
filtered_map,
total_potential,
areas_potential,
) = get_areas(
heat_density_map=clipped_raster,
pixel_threshold=parameters["Heat demand in hectare (MWh/ha)"],
district_heating_zone_threshold=parameters[
"Heat demand in a DH zone (GWh/year)"
],
)
dst_raster = join(temp_dir, "dst.tif")
write_raster(
map_array=filtered_map,
projection=get_projection(geofile=clipped_raster),
geotransform=geo_transform,
dst=dst_raster,
)
raster_name = "areas.tif"
with open(dst_raster, mode="rb") as raster_fd:
task.post_raster(raster_name=raster_name, raster_fd=raster_fd)
response = get_response(
total_potential=total_potential,
total_heat_demand=total_heat_demand,
areas_potential=areas_potential,
raster_name=raster_name,
)
validate(response)
return response
| 24,314
|
def disconnect(event):
"""Attempts to disconnect from the remote server"""
elements.TOOLBAR.connect_button.SetLabel('Connect')
elements.TOOLBAR.send_button.Disable()
elements.TOOLBAR.run_button.Disable()
elements.TOOLBAR.shutdown_button.Disable()
elements.TOOLBAR.restart_button.Disable()
elements.TOOLBAR.connect_button.Bind(wx.EVT_BUTTON, connect)
elements.REMOTE_SERVER.disconnect()
elements.REMOTE_SERVER.remote_server = None
elements.MAIN.SetStatusText('Disconnected')
| 24,315
|
def test_questionnaire_10(base_settings):
"""No. 10 tests collection for Questionnaire.
Test File: elementdefinition-de-questionnaire.json
"""
filename = (
base_settings["unittest_data_dir"] / "elementdefinition-de-questionnaire.json"
)
inst = questionnaire.Questionnaire.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Questionnaire" == inst.resource_type
impl_questionnaire_10(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Questionnaire" == data["resourceType"]
inst2 = questionnaire.Questionnaire(**data)
impl_questionnaire_10(inst2)
| 24,316
|
def randbit():
"""Returns a random bit."""
return random.randrange(2)
| 24,317
|
def calc_points(goals, assists):
"""
Calculate the total traditional and weighted points for all
players, grouped by player id.
Author: Rasmus Säfvenberg
Parameters
----------
goals : pandas.DataFrame
A data frame with total goals and weighted assists per player.
assists : pandas.DataFrame
A data frame with total assists and weighted assists per player.
Returns
-------
points : pandas.DataFrame
A data frame with total points and weighted points per player.
"""
# Specify columns to keep for merging
goals = goals[["PlayerId", "PlayerName", "Position", "Goals", "WeightedGoals"]]
assists = assists[["PlayerId", "PlayerName", "Position", "Assists", "WeightedAssists"]]
# Combine goals and assists
points = goals.merge(assists, on=["PlayerId", "PlayerName", "Position"],
how="outer")
# Fill missing values with 0 (some players only score goals etc.)
points.fillna(0, inplace=True)
# Calculate points = goals + assists
points["Points"] = points["Goals"] + points["Assists"]
# Calculate weighted points = weighted goals + weighted assists
points["WeightedPoints"] = points["WeightedGoals"] + points["WeightedAssists"]
# Sort by weighted points
points.sort_values("WeightedPoints", ascending=False, inplace=True)
return points
| 24,318
|
def get_links(browser, elemento):
"""
Pega todos os links dentro de um elemento
- browser = a instância do navegador
- element = ['aside', main, body, ul, ol]
"""
resultado = {}
element = browser.find_element_by_tag_name(elemento)
ancoras = element.find_elements_by_tag_name('a')
for ancora in ancoras:
resultado[ancora.text] = ancora.get_attribute('href')
return resultado
| 24,319
|
def file_to_base64(path):
"""
Convert specified file to base64 string
Args:
path (string): path to file
Return:
string: base64 encoded file content
"""
with io.open(path, 'rb') as file_to_convert:
return base64.b64encode(file_to_convert.read())
| 24,320
|
def get_simverb(subset=None):
"""
Get SimVerb-3500 data
:return: (pairs, scores)
"""
simverb = []
if subset == 'dev':
name = '500-dev'
elif subset == 'test':
name = '3000-test'
else:
name = '3500'
with open('../data/SimVerb-3500/SimVerb-{}.txt'.format(name)) as f:
f.readline() # first line is headings
for line in f:
simverb.append(line.strip().split('\t'))
all_pairs = [(x[0], x[1]) for x in simverb]
all_scores = np.array([float(x[3]) for x in simverb])
return (all_pairs, all_scores)
| 24,321
|
def sample_image(size, min_r, max_r, circles, squares, pixel_value):
"""Generate image with geometrical shapes (circles and squares).
"""
img = np.zeros((size, size, 2))
loc = []
if pixel_value is None:
vals = np.random.randint(0, 256, circles + squares)
else:
vals = [pixel_value] * (circles + squares)
for f, v in zip(["c"] * circles + ["s"] * squares, vals):
r = np.random.randint(min_r, max_r + 1)
xc, yc = np.random.randint(r, size - r + 1, 2)
if f == "c":
mask = circle(xc, yc, r, (size, size))
if f == "s":
mask = polygon((xc - r, xc + r, xc + r, xc - r),
(yc - r, yc - r, yc + r, yc + r), (size, size))
img[:, :, ["c", "s"].index(f)][mask] = v
loc.append([xc, yc, r])
return img, np.array(loc)
| 24,322
|
def stage_input_file(workdir_path, files):
"""Stage an input file into the working directory whose path
is in workdir_path. Uses the basename if given. Recursively
stages secondary files.
Adds a 'path' key with the path to the File objects in files.
Args:
workdir_path (str): Path to the working directory
files (Union[dict,[dict]]): A dictionary with a CWL File \
object, or a list of such.
"""
if not isinstance(files, list):
files = [files]
for file_dict in files:
location = urlparse(file_dict['location'])
if 'basename' in file_dict:
dest_path = os.path.join(workdir_path, file_dict['basename'])
else:
dest_path = os.path.join(workdir_path, os.path.basename(location.path))
shutil.copy(location.path, dest_path)
file_dict['path'] = dest_path
for i, secondary_file in enumerate(file_dict.get('secondaryFiles', [])):
stage_input_file(workdir_path, file_dict['secondaryFiles'][i])
| 24,323
|
def host_allocations(auth):
"""Retrieve host allocations"""
response = API.get(auth, '/os-hosts/allocations')
return response.json()['allocations']
| 24,324
|
def rosenbrock_grad(x, y):
"""Gradient of Rosenbrock function."""
return (-400 * x * (-(x ** 2) + y) + 2 * x - 2, -200 * x ** 2 + 200 * y)
| 24,325
|
def vic2nc(options, global_atts, domain_dict, fields):
""" Convert ascii VIC files to netCDF format"""
# determine run mode
if (options['memory_mode'] == 'standard') \
and (options['chunksize'] in ['all', 'All', 'ALL', 0]):
memory_mode = 'big_memory'
else:
memory_mode = options['memory_mode']
print("\n-------------------------------")
print("Configuration File Options")
print("-------------OPTIONS-------------")
for pair in options.items():
print("{0}: {1}".format(*pair))
print('Fields: {0}'.format(", ".join(fields.keys())))
if domain_dict:
print("-------------DOMAIN--------------")
for pair in domain_dict.items():
print("{0}: {1}".format(*pair))
print("--------GLOBAL_ATTRIBUTES--------")
for pair in global_atts.items():
print("{0}: {1}".format(*pair))
print("--------RUN MODE--------")
print('Memory Mode: {0}'.format(memory_mode))
if memory_mode == 'standard':
print('Chunksize={0}'.format(options['chunksize']))
print("---------------------------------\n")
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Make output directory
if not os.path.exists(options['out_directory']):
os.makedirs(options['out_directory'])
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Make pairs (i.e. find inds)
files = glob(options['input_files'])
points = get_file_coords(files)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Get target grid information
if domain_dict:
domain = read_domain(domain_dict)
target_grid_file = path.split(domain_dict['filename'])[1]
global_atts['target_grid_file'] = target_grid_file
else:
# must be a regular grid, build from file names
domain = calc_grid(points.get_lats(), points.get_lons())
target_grid_file = None
domain_dict = {'y_x_dims': ['lat', 'lon']}
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Get grid index locations
points = get_grid_inds(domain, points)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Get timestamps
if options['input_file_format'].lower() == 'ascii':
if ('bin_start_date' in options
and 'bin_end_date' in options
and 'bin_dt_sec' in options):
vic_datelist, vic_ordtime = make_dates(
options['bin_start_date'],
options['bin_end_date'],
options['bin_dt_sec'],
calendar=options['calendar'])
else:
vic_datelist = get_dates(files[0])
vic_ordtime = date2num(vic_datelist, TIMEUNITS,
calendar=options['calendar'])
elif options['input_file_format'].lower() in ['binary', 'netcdf']:
vic_datelist, vic_ordtime = make_dates(options['bin_start_date'],
options['bin_end_date'],
options['bin_dt_sec'],
calendar=options['calendar'])
else:
raise ValueError('Unknown input file format: {}. Valid options are \
ascii or binary'.format(options['input_file_format']))
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Determine time segmentation
if options['start_date']:
start_date = datetime.strptime(options['start_date'], TIMESTAMPFORM)
if start_date < vic_datelist[0]:
print("WARNING: Start date in configuration file is before "
"first date in file.")
start_date = vic_datelist[0]
print('WARNING: New start date is {0}'.format(start_date))
else:
start_date = vic_datelist[0]
if options['end_date']:
end_date = datetime.strptime(options['end_date'], TIMESTAMPFORM)
if end_date > vic_datelist[-1]:
print("WARNING: End date in configuration file is after "
"last date in file.")
end_date = vic_datelist[-1]
print('WARNING: New end date is {0}'.format(end_date))
else:
end_date = vic_datelist[-1]
# Ordinal Time
start_ord = date2num(start_date, TIMEUNITS, calendar=options['calendar'])
end_ord = date2num(end_date, TIMEUNITS, calendar=options['calendar'])
print("netCDF Start Date: {0}".format(start_date))
print("netCDF End Date: {0}".format(end_date))
segment_dates = []
if options['time_segment'] == 'day':
# calendar insensitive
num_segments = np.ceil(end_ord - start_ord)
if start_date.hour == 0:
segment_dates = num2date(np.arange(start_ord, end_ord + 1, 1),
TIMEUNITS, calendar=options['calendar'])
else:
# allow start at time other than 0
temp = [start_ord].append(np.arange(np.ceil(start_ord),
end_ord + 1, 1))
segment_dates = num2date(temp, TIMEUNITS,
calendar=options['calendar'])
elif options['time_segment'] == 'month':
num_segments = (end_date.year - start_date.year) * 12 \
+ end_date.month - start_date.month + 1
month = start_date.month
year = start_date.year
for i in pyrange(num_segments + 1):
segment_dates.append(datetime(year, month, 1))
month += 1
if month == 13:
month = 1
year += 1
elif options['time_segment'] == 'year':
num_segments = end_date.year - start_date.year + 1
year = start_date.year
for i in pyrange(num_segments + 1):
segment_dates.append(datetime(year, 1, 1))
year += 1
elif options['time_segment'] == 'decade':
num_segments = (end_date.year - start_date.year) / 10 + 1
year = start_date.year
for i in pyrange(num_segments + 1):
segment_dates.append(datetime(year, 1, 1))
year += 10
elif options['time_segment'] == 'all':
num_segments = 1
segment_dates = [start_date, end_date]
else:
raise ValueError('Unknown timesegment options \
{0}'.format(options['time_segment']))
print("Number of files: {0}".format(len(segment_dates) - 1))
assert len(segment_dates) == num_segments + 1
# Make sure the first and last dates are start/end_date
segment_dates[0] = start_date
segment_dates[-1] = end_date + timedelta(minutes=1)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Setup Segments
segments = deque()
for num in pyrange(num_segments):
# Segment time bounds
t0 = segment_dates[num]
t1 = segment_dates[num + 1]
# Get segment inds
i0 = bisect_left(vic_datelist, t0)
i1 = bisect_left(vic_datelist, t1)
# Make segment filename (with path)
if options['time_segment'] == 'day':
filename = "{0}.{1}.nc".format(options['out_file_prefix'],
t0.strftime('%Y-%m-%d'))
elif options['time_segment'] == 'month':
filename = "{0}.{1}.nc".format(options['out_file_prefix'],
t0.strftime('%Y-%m'))
elif options['time_segment'] == 'year':
filename = "{0}.{1}.nc".format(options['out_file_prefix'],
t0.strftime('%Y'))
elif options['time_segment'] == 'all':
filename = "{0}.{1}-{2}.nc".format(options['out_file_prefix'],
t0.strftime('%Y%m%d'),
t1.strftime('%Y%m%d'))
filename = path.join(options['out_directory'], filename)
# Setup segment and initialize netcdf
segment = Segment(num, i0, i1, options['out_file_format'],
filename, memory_mode=memory_mode)
segment.nc_globals(**global_atts)
segment.nc_time(t0, t1, vic_ordtime, options['calendar'])
segment.nc_dimensions(snow_bands=options['snow_bands'],
veg_tiles=options['veg_tiles'],
soil_layers=options['soil_layers'])
segment.nc_domain(domain)
segment.nc_fields(fields,
domain_dict['y_x_dims'], options['precision'])
print(repr(segment))
segments.append(segment)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Get column numbers and names (will help speed up reading)
names = []
usecols = []
dtypes = []
bin_dtypes = []
bin_mults = []
if options['precision'] == 'double':
prec = NC_DOUBLE
else:
prec = NC_FLOAT
for name, field in fields.items():
if not np.isscalar(field['column']):
# multiple levels
for i, col in enumerate(field['column']):
names.append(name + str(i))
usecols.append(col)
if 'type' in field:
if type(field['type']) == list:
dtypes.extend(field['type'])
else:
dtypes.extend([field['type']] * len(field['column']))
else:
dtypes.append([prec] * len(field['column']))
if options['input_file_format'].lower() == 'binary':
if 'bin_dtype' in field:
if type(field['bin_dtype']) == list:
bin_dtypes.extend(field['bin_dtype'])
else:
bin_dtypes.extend([field['bin_dtype']] *
len(field['column']))
else:
raise ValueError('bin_dtype not in field: {}'.format(name))
if 'bin_mult' in field:
if type(field['bin_mult']) == list:
bin_mults.extend(field['bin_mult'])
else:
bin_mults.extend([field['bin_mult']] *
len(field['column']))
else:
bin_mults.extend([1.0] * len(field['column']))
else:
# no levels
names.append(name)
usecols.append(field['column'])
if 'type' in field:
dtypes.append(field['type'])
else:
dtypes.append(prec)
if options['input_file_format'].lower() == 'binary':
if 'bin_dtype' in field:
bin_dtypes.append(field['bin_dtype'])
else:
raise ValueError('bin_dtype not in field: {}'.format(name))
if 'bin_mult' in field:
bin_mults.append(field['bin_mult'])
else:
bin_mults.append(1.0)
print('setting point attributes (fileformat, names, usecols, and dtypes)')
# pandas.read_table does not 'honor' the order of the columns in usecols
# it simply uses them in ascending order. So the names need to be sorted
# the same way. For example, if the columns in the VIC file are:
# 3: prcp; 4: evap; 5: runoff; 6; baseflow; 7: sm1; 8: sm2; 9: sm3; 10: swe
# and this is parsed from the configuration file as
# usecols = [3, 4, 5, 6, 10, 7, 8, 9]
# names=['prcp', 'evap', 'runoff', 'baseflow', 'swe', 'sm1', 'sm2', 'sm3']
# then without sorting, the netcdf file will have the wrong variables:
# nc_swe will contain sm1, nc_sm1 will contain sm2, nc_sm2: sm3 and
# nc_swe: sm3
# the following will ensure that the names are sorted in increasing column
# order. Note that sorted(usecols) is not strictly necessary, since
# apparently that is done in read_table, but it keeps the names and columns
# in the same order
names = [x for (y, x) in sorted(pyzip(usecols, names))]
usecols = sorted(usecols)
points.set_names(names)
points.set_usecols(usecols)
points.set_dtypes(dtypes)
# set binary attributes
if options['input_file_format'].lower() == 'binary':
points.set_bin_dtypes(bin_dtypes)
points.set_bin_mults(bin_mults)
points.set_fileformat(options['input_file_format'])
print('done')
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
if memory_mode == 'big_memory':
# ------------------------------------------------------------ #
# run in big memory mode
for i, segment in enumerate(segments):
segments[i].allocate()
while points:
point = points.popleft()
point.open()
point.read()
point.close()
for segment in segments:
segment.nc_add_data_to_array(point)
for segment in segments:
segment.nc_write_data_from_array()
segment.nc_close()
# ------------------------------------------------------------ #
elif memory_mode == 'standard':
# ------------------------------------------------------------ #
# Open VIC files and put data into netcdfs
chunk = Plist()
while points:
point = points.popleft()
point.open()
point.read()
point.close()
chunk.append(point)
if len(chunk) > int(options['chunksize']) or len(points) == 0:
for segment in segments:
segment.nc_add_data_standard(chunk)
chunk = Plist()
del point
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Close the netcdf files
for segment in segments:
segment.nc_close()
# ------------------------------------------------------------ #
elif memory_mode == 'original':
# ------------------------------------------------------------ #
# Run in original memory mode (a.k.a. vic2nc.c mode)
# Open all files
for point in points:
point.open()
while segments:
segment = segments.popleft()
segment.allocate()
count = segment.count
for point in points:
point.read(count)
segment.nc_add_data_to_array(point)
segment.nc_write_data_from_array()
segment.nc_close()
for point in points:
point.close()
# ------------------------------------------------------------ #
return
| 24,326
|
def get_file(
fname,
origin,
untar=False,
cache_subdir="datasets",
extract=False,
archive_format="auto",
cache_dir=None,
):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Arguments:
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of 'extract'.
boolean, whether the file should be decompressed
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the [Keras
Directory](/faq/#where-is-the-keras-configuration-filed-stored).
Returns:
Path to the downloaded file
"""
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser("~"), ".keras")
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join("/tmp", ".keras")
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + ".tar.gz"
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
download = False
else:
download = True
if download:
print("Downloading data from", origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = "URL fetch failure on {}: {} -- {}"
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format="tar")
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
| 24,327
|
def extractYoujinsite(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if '[God & Devil World]' in item['tags'] and (chp or vol):
return buildReleaseMessageWithType(item, 'Shenmo Xitong', vol, chp, frag=frag, postfix=postfix)
if '[LBD&A]' in item['tags'] and (chp or vol):
return buildReleaseMessageWithType(item, 'Line between Devil and Angel', vol, chp, frag=frag, postfix=postfix)
if '[VW: Conquer the World]' in item['tags'] and (chp or vol):
return buildReleaseMessageWithType(item, 'VW: Conquering the World', vol, chp, frag=frag, postfix=postfix)
return False
| 24,328
|
def test_deprecated_properties(
obs_st: dict[str, Any], caplog: LogCaptureFixture
) -> None:
"""Test a warning is loggeed for deprecated properties."""
device = TempestDevice(serial_number=TEMPEST_SERIAL_NUMBER, data=obs_st)
assert device.model == "Tempest"
assert device.serial_number == TEMPEST_SERIAL_NUMBER
assert device.hub_sn == HUB_SERIAL_NUMBER
device.parse_message(obs_st)
assert device.rain_amount_previous_minute == 0.01 * UNIT_MILLIMETERS_PER_MINUTE
assert (
"The property 'rain_amount_previous_minute' has been deprecated" in caplog.text
)
device.calculate_sea_level_pressure(height=STATION_ALTITUDE)
assert (
"The parameter 'height' has been renamed to `altitude` to reduce ambiguity."
in caplog.text
)
| 24,329
|
def make_executable(p):
"""
Make file in given path executable
Source:
http://stackoverflow.com/a/33179977/3023841
"""
st = os.stat(p)
os.chmod(p, st.st_mode | 0o111)
| 24,330
|
def get_patient_dirs(root_dir):
"""
Function used to get the root director for all patients
:param root_dir: root director of all image data
:return patient_paths: list of all patient paths, one for each patient
"""
search_path = os.path.join(root_dir, '[0-1]', '*')
patient_paths = glob.glob(search_path)
return patient_paths
| 24,331
|
def _download(path: str, url: str):
"""
Gets a file from cache or downloads it
Args:
path: path to the file in cache
url: url to the file
Returns:
path: path to the file in cache
"""
if url is None:
return None
if not os.path.exists(path):
master_device = not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0
if not os.path.exists(path):
if master_device:
os.makedirs(MEGATRON_CACHE, exist_ok=True)
logging.info(f"Downloading from {url}")
wget.download(url, path)
# wait until the master process downloads the file and writes it to the cache dir
if torch.distributed.is_initialized():
torch.distributed.barrier()
return path
| 24,332
|
def get_logger() -> Logger:
""" This function returns the logger for this project """
return getLogger(LOGGER_NAME)
| 24,333
|
def eagle(ctx, alloc, walltime, feature, stdout_path):
"""Eagle submission tool for PLEXOS aggregation."""
name = ctx.obj['NAME']
plexos_table = ctx.obj['PLEXOS_TABLE']
sc_table = ctx.obj['SC_TABLE']
cf_fpath = ctx.obj['CF_FPATH']
out_dir = ctx.obj['OUT_DIR']
dist_percentile = ctx.obj['DIST_PERCENTILE']
lcoe_col = ctx.obj['LCOE_COL']
lcoe_thresh = ctx.obj['LCOE_THRESH']
max_workers = ctx.obj['MAX_WORKERS']
points_per_worker = ctx.obj['POINTS_PER_WORKER']
plants_per_worker = ctx.obj['PLANTS_PER_WORKER']
offshore = ctx.obj['OFFSHORE']
verbose = ctx.obj['VERBOSE']
if stdout_path is None:
stdout_path = os.path.join(out_dir, 'stdout/')
slurm_manager = ctx.obj.get('SLURM_MANAGER', None)
if slurm_manager is None:
slurm_manager = SLURM()
ctx.obj['SLURM_MANAGER'] = slurm_manager
cmd = get_node_cmd(name, plexos_table, sc_table, cf_fpath, out_dir,
dist_percentile, lcoe_col, lcoe_thresh, max_workers,
points_per_worker, plants_per_worker, offshore, verbose)
logger.info('Running reVX plexos plant aggregation on Eagle with '
'node name "{}"'.format(name))
out = slurm_manager.sbatch(cmd, alloc=alloc, walltime=walltime,
feature=feature, name=name,
stdout_path=stdout_path)[0]
if out:
msg = ('Kicked off reVX plexos aggregation job "{}" '
'(SLURM jobid #{}) on Eagle.'
.format(name, out))
else:
msg = ('Was unable to kick off reVX plexos aggregation job "{}". '
'Please see the stdout error messages'
.format(name))
click.echo(msg)
logger.info(msg)
| 24,334
|
def test_space_features_operations(space_object):
"""Test for get, add, update and delete features operations."""
gdf = space_object.get_features(feature_ids=["DEU", "ITA"], geo_dataframe=True)
assert isinstance(gdf, gpd.GeoDataFrame)
# get two features
data = space_object.get_features(feature_ids=["DEU", "ITA"])
assert isinstance(data, GeoJSON)
space_object.delete_features(feature_ids=["DEU", "ITA"])
res = space_object.add_features(features=data)
assert isinstance(res, GeoJSON)
data["features"][0]["id"] = "test1"
data["features"][1]["id"] = "test2"
res = space_object.update_features(features=data, add_tags=["foo", "bar"])
assert isinstance(res, GeoJSON)
| 24,335
|
def write_guess_json(guesser, filename, fold, run_length=200, censor_features=["id", "label"], num_guesses=5):
"""
Returns the vocab, which is a list of all features.
"""
vocab = [kBIAS]
print("Writing guesses to %s" % filename)
num = 0
with open(filename, 'w') as outfile:
total = len(fold)
for qq in fold:
num += 1
if num % (total // 80) == 0:
print('.', end='', flush=True)
runs = qq.runs(run_length)
guesses = guesser.guess(runs[0], max_n_guesses=5)
for rr in runs[0]:
guesses = guesser.guess([rr], max_n_guesses=num_guesses)
for raw_guess in guesses[0]:
gg, ss = raw_guess
guess = {"id": qq.qanta_id,
"guess:%s" % gg: 1,
"run_length": len(rr)/1000,
"score": ss,
"label": qq.page==gg,
"category:%s" % qq.category: 1,
"year:%s" % qq.year: 1}
for ii in guess:
# Don't let it use features that would allow cheating
if ii not in censor_features and ii not in vocab:
vocab.append(ii)
outfile.write(json.dumps(guess, sort_keys=True))
outfile.write("\n")
print("")
return vocab
| 24,336
|
def _remove_overloaded_functions(asts):
"""Cython cannot handle overloaded functions, we will take the first one."""
functions = [n for ast in asts for n in ast.nodes
if isinstance(n, Function)]
function_names = []
removed_functions = []
for f in functions:
if f.name in function_names:
warnings.warn(
"Function '%s' is already defined. Only one method "
"will be exposed." % f.name)
removed_functions.append(f)
else:
function_names.append(f.name)
for ast in asts:
ast.nodes = [n for n in ast.nodes if n not in removed_functions]
| 24,337
|
def upgrade_to_4g(region, strategy, costs, global_parameters,
core_lut, country_parameters):
"""
Reflects the baseline scenario of needing to build a single dedicated
network.
"""
backhaul = '{}_backhaul'.format(strategy.split('_')[2])
sharing = strategy.split('_')[3]
geotype = region['geotype'].split(' ')[0]
# generation_core_backhaul_sharing_networks_spectrum_tax
network_strategy = strategy.split('_')[4]
networks = country_parameters['networks']['baseline' + '_' + geotype]
if network_strategy == 'srn' and geotype == 'rural':
sharing = 'cns'
shared_assets = INFRA_SHARING_ASSETS[sharing]
assets = {
'single_sector_antenna': costs['single_sector_antenna'],
'single_remote_radio_unit': costs['single_remote_radio_unit'],
'io_fronthaul': costs['io_fronthaul'],
'processing': costs['processing'],
'io_s1_x2': costs['io_s1_x2'],
'control_unit': costs['control_unit'],
'cooling_fans': costs['cooling_fans'],
'distributed_power_supply_converter': costs['distributed_power_supply_converter'],
'bbu_cabinet': costs['bbu_cabinet'],
'installation': costs['installation'],
'site_rental': costs['site_rental_{}'.format(geotype)],
'router': costs['router'],
'backhaul': get_backhaul_costs(region, backhaul, costs, core_lut),
'core_edge': core_costs(region, 'core_edge', costs, core_lut, strategy, country_parameters),
'core_node': core_costs(region, 'core_node', costs, core_lut, strategy, country_parameters),
'regional_edge': regional_net_costs(region, 'regional_edge', costs, core_lut, strategy, country_parameters),
'regional_node': regional_net_costs(region, 'regional_node', costs, core_lut, strategy, country_parameters),
'per_site_spectrum_acquisition_cost': costs['per_site_spectrum_acquisition_cost'],
'per_site_administration_cost': costs['per_site_administration_cost'],
}
cost_structure = {}
for key, value in assets.items():
if not key in shared_assets:
cost_structure[key] = value
else:
if network_strategy == 'srn' and geotype == 'rural':
value = value * (1 / networks)
cost_structure[key] = value
else:
value = value / networks
cost_structure[key] = value
return cost_structure
| 24,338
|
def get_url(request):
"""
Use devId and key and some hashing thing to get the url, needs /v3/api as input
"""
devId = DEV_ID
key = KEY
request = request + ('&' if ('?' in request) else '?')
raw = request + f"devid={devId}"
raw = raw.encode()
hashed = hmac.new(key, raw, sha1)
signature = hashed.hexdigest()
raw = raw.decode()
return 'http://timetableapi.ptv.vic.gov.au'+raw+f'&signature={signature}'
| 24,339
|
def test_atom_order_in_mol_copy(toolkit, smiles):
"""Test that atom orders do not change when copying molecule"""
import copy
mol = utils.load_molecule(smiles, toolkit=toolkit)
if not utils.has_explicit_hydrogen(mol):
mol = utils.add_explicit_hydrogen(mol)
molcopy = copy.deepcopy(mol)
for a1, a2 in zip(mol.GetAtoms(), molcopy.GetAtoms()):
if toolkit == 'openeye':
assert a1.GetIdx() == a2.GetIdx()
assert a1.GetName() == a2.GetName()
assert a1.GetMapIdx() == a2.GetMapIdx()
if toolkit == 'rdkit':
assert a1.GetIdx() == a2.GetIdx()
assert a1.GetAtomMapNum() == a2.GetAtomMapNum()
assert a1.GetSmarts() == a2.GetSmarts()
| 24,340
|
def _is_mapped_class(cls):
"""Return True if the given object is a mapped class,
:class:`.Mapper`, or :class:`.AliasedClass`."""
if isinstance(cls, (AliasedClass, mapperlib.Mapper)):
return True
if isinstance(cls, expression.ClauseElement):
return False
if isinstance(cls, type):
manager = attributes.manager_of_class(cls)
return manager and _INSTRUMENTOR in manager.info
return False
| 24,341
|
def get_all_links_in_catalog(html) -> list:
"""Получает список всех ссылок на пункты из каталога."""
_soup = BeautifulSoup(html, 'html.parser')
_items = _soup.find('div', class_='catalog_section_list').find_all('li', class_='name')
links_list = []
for item in _items:
links_list.append(item.find('a', class_='dark_link').get('href'))
return links_list
| 24,342
|
def digitize(n):
"""Convert a number to a reversed array of digits."""
l = list(str(n))
n_l = []
for d in l:
n_l.append(int(d))
n_l.reverse()
return n_l
| 24,343
|
def run_example(device_id, do_plot=False):
"""
Run the example: Connect to the device specified by device_id and obtain
impedance data using ziDAQServer's blocking (synchronous) poll() command.
Requirements:
Hardware configuration: Connect signal output 1 to signal input 1 with a
BNC cable.
Arguments:
device_id (str): The ID of the device to run the example with. For
example, `dev3006` or `mf-dev3006`.
amplitude (float, optional): The amplitude to set on the signal output.
do_plot (bool, optional): Specify whether to plot the polled data. Default
is no plot output.
Returns:
sample (dict of numpy arrays): The impedance sample dictionary as returned
by poll.
Raises:
RuntimeError: If the device is not "discoverable" from the API.
See the "LabOne Programing Manual" for further help, available:
- On Windows via the Start-Menu:
Programs -> Zurich Instruments -> Documentation
- On Linux in the LabOne .tar.gz archive in the "Documentation"
sub-folder.
"""
apilevel_example = 6 # The API level supported by this example.
# Call a zhinst utility function that returns:
# - an API session `daq` in order to communicate with devices via the data server.
# - the device ID string that specifies the device branch in the server's node hierarchy.
# - the device's discovery properties.
err_msg = "This example only supports instruments with IA option."
(daq, device, _) = zhinst.utils.create_api_session(device_id, apilevel_example,
required_options=['IA'],
required_err_msg=err_msg)
zhinst.utils.api_server_version_check(daq)
# Create a base configuration: Disable all available outputs, awgs, demods, scopes,...
zhinst.utils.disable_everything(daq, device)
# We use the auto-range example to perform some basic device configuration
# and wait until signal input ranges have been configured by the device.
zhinst.examples.common.example_autoranging_impedance.run_example(device)
# Subscribe to the impedance sample node path.
imp_index = 0
path = '/%s/imps/%d/sample' % (device, imp_index)
daq.subscribe(path)
# Sleep for demonstration purposes: Allow data to accumulate in the data
# server's buffers for one second: poll() will not only return the data
# accumulated during the specified poll_length, but also for data
# accumulated since the subscribe() or the previous poll.
sleep_length = 1.0
# For demonstration only: We could, for example, be processing the data
# returned from a previous poll().
time.sleep(sleep_length)
# Poll the subscribed data from the data server. Poll will block and record
# for poll_length seconds.
poll_length = 0.1 # [s]
poll_timeout = 500 # [ms]
poll_flags = 0
poll_return_flat_dict = True
data = daq.poll(poll_length, poll_timeout, poll_flags, poll_return_flat_dict)
# Unsubscribe from all paths.
daq.unsubscribe('*')
# Check the dictionary returned is non-empty
assert data, "poll() returned an empty data dictionary, did you subscribe to any paths?"
# The data returned is a dictionary of dictionaries that reflects the node's path.
# Note, the data could be empty if no data had arrived, e.g., if the imps
# were disabled or had transfer rate 0.
assert path in data, "The data dictionary returned by poll has no key `%s`." % path
# Access the impedance sample using the node's path. For more information
# see the data structure documentation for ZIImpedanceSample in the LabOne
# Programming Manual.
impedanceSample = data[path]
# Get the sampling rate of the device's ADCs, the device clockbase in order
# to convert the sample's timestamps to seconds.
clockbase = float(daq.getInt('/%s/clockbase' % device))
dt_seconds = (impedanceSample['timestamp'][-1] - impedanceSample['timestamp'][0])/clockbase
num_samples = len(impedanceSample['timestamp'])
print("poll() returned {} samples of impedance data spanning {:.3f} seconds.".format(num_samples, dt_seconds))
print("Average measured resitance: {} Ohm.".format(np.mean(impedanceSample['param0'])))
print("Average measured capacitance: {} F.".format(np.mean(impedanceSample['param1'])))
if do_plot:
import matplotlib.pyplot as plt
# Convert timestamps from ticks to seconds via clockbase.
t = (impedanceSample['timestamp'] - impedanceSample['timestamp'][0])/clockbase
plt.close('all')
# Create plot
_, ax = plt.subplots(2, sharex=True)
ax[0].plot(t, impedanceSample['param0'])
ax[0].set_title('Impedance Parameters')
ax[0].grid(True)
ax[0].set_ylabel(r'Resistance ($\Omega$)')
ax[0].autoscale(enable=True, axis='x', tight=True)
ax[1].plot(t, impedanceSample['param1'])
ax[1].grid(True)
ax[1].set_ylabel(r'Capacitance (F)')
ax[1].set_xlabel('Time (s)')
ax[1].autoscale(enable=True, axis='x', tight=True)
plt.draw()
plt.show()
return data
| 24,344
|
def _available_algorithms():
"""Verify which algorithms are supported on the current machine.
This is done by verifying that the required modules and solvers are available.
"""
available = []
for algorithm in ALGORITHM_NAMES:
if "gurobi" in algorithm and not abcrules_gurobi.gb:
continue
if algorithm == "gmpy2-fractions" and not mpq:
continue
available.append(algorithm)
return available
| 24,345
|
def lock(
server_url: str,
semaphore: str,
count: int = 1,
timeout: Optional[timedelta] = None,
) -> Iterator[Peer]:
"""
Acquires a lock to a semaphore
## Keyword arguments:
* `count`: Lock count. May not exceed the full count of the semaphore
* `timeout`: Leaving this at None, let's the lock block until the lock can be acquired. Should a
timeout be specified the call is going to raise a `Timeout` exception should it exceed before
the lock is acquired.
"""
peer = get_local_peer(server_url)
# During waiting for the lock, repeated calls to acquire, fill the role of the
# heartbeat.
peer.stop_heartbeat()
# Remember this moment in order to figure out later how much time has passed since
# we started to acquire the lock
start = time()
passed = timedelta(seconds=0)
# We pass this as a parameter to the throttle server. It will wait for this amount of time
# before answering, that the lease is still pending. In case the lease can be acquired it is
# still going to answer immediatly, of course.
block_for = timedelta(seconds=5)
while True:
if timeout:
# If we time out in a timespan < block_for, we want to block only for the time
# until the timeout.
block_for = min(timeout - passed, block_for)
try:
if peer.acquire(semaphore, count=count, block_for=block_for):
# Remember that we acquired that lock, so heartbeat can restore it, if need be.
peer.acquired[semaphore] = count
break
except UnknownPeer:
peer.restore()
if timeout:
# The time between now and start is the amount of time we are waiting for the
# lock.
now = time()
passed = timedelta(seconds=now - start)
# Figure out if the lock timed out
if timeout < passed:
raise Timeout
# Start heartbeat to keep lock alive during the time we hold it.
peer.start_heartbeat()
try:
yield peer
finally:
assert peer.acquired.pop(semaphore) == count
try:
if peer.acquired:
# Acquired dict still holds locks, remove only this one.
peer.release(semaphore)
# We don't stop the heartbeat, since we still hold other locks.
else:
if isinstance(peer, PeerWithHeartbeat):
peer.stop_heartbeat()
# No more locks associated with this peer. Let's remove it entirely
peer.remove_from_server()
del threadlocal.peers[server_url]
except requests.ConnectionError:
# Ignore recoverable errors. `release` retried alread. The litter collection on
# server side, takes care of freeing the lease.
pass
| 24,346
|
def symbols(*names, **kwargs):
"""
Emulates the behaviour of sympy.symbols.
"""
shape=kwargs.pop('shape', ())
s = names[0]
if not isinstance(s, list):
import re
s = re.split('\s|,', s)
res = []
for t in s:
# skip empty strings
if not t:
continue
sym = Symbol(t, shape, **kwargs)
res.append(sym)
res = tuple(res)
if len(res) == 0: # var('')
res = None
elif len(res) == 1: # var('x')
res = res[0]
# otherwise var('a b ...')
return res
| 24,347
|
def cpuPercent():
"""
START Test Results
For 10 times:
Current CPU Usage: 0.0
Current CPU Usage: 0.0
Current CPU Usage: 0.0
Current CPU Usage: 0.8
Current CPU Usage: 0.0
Current CPU Usage: 0.1
Current CPU Usage: 1.5
Current CPU Usage: 0.0
Current CPU Usage: 0.0
Current CPU Usage: 0.8
END Test Results
"""
for i in range(5):
print("Current CPU Usage: {0}".format(cps.getCpuPercent()))
| 24,348
|
def create_intent(intent, project_id, language_code):
"""Create intent in dialogflow
:param intent: dict, intent for api
:param project_id: str, secret project id
:param language_code: event with update tg object
:return:
"""
client = dialogflow.IntentsClient()
parent = client.project_agent_path(project_id)
response = client.create_intent(parent, intent, language_code=language_code)
return response
| 24,349
|
def construct_object_types(list_of_oids: List[str]) -> List[hlapi.ObjectType]:
"""Builds and returns a list of special 'ObjectType'
from pysnmp"""
object_types: List[hlapi.ObjectType] = []
for oid in list_of_oids:
object_types.append(hlapi.ObjectType(hlapi.ObjectIdentity(oid)))
return object_types
| 24,350
|
def test_resolve_private_address(peripheral, central):
"""Ensure that if the central privacy policy is set to resolve and filter
and the device has no bond then resolved private addresses are not filtered"""
advertising_types = [
"ADV_CONNECTABLE_UNDIRECTED",
# "ADV_CONNECTABLE_DIRECTED",
"ADV_SCANNABLE_UNDIRECTED",
"ADV_NON_CONNECTABLE_UNDIRECTED"
]
central.gap.setCentralPrivacyConfiguration(
False,
"RESOLVE_AND_FILTER"
)
# test that scan works as intended
for advertising_type in advertising_types:
peripheral.gap.setAdvertisingType(advertising_type)
peripheral.gap.startAdvertising()
scan_records = central.gap.startScan(
SCAN_TIMEOUT,
get_advertising_data(peripheral)
).result
assert len(scan_records) > 0
for scan in scan_records:
assert scan["addressType"] == "ADDR_TYPE_RANDOM_PRIVATE_RESOLVABLE"
assert scan["peerAddrType"] == "RANDOM"
assert address_is_random_resolvable(scan["peerAddr"])
peripheral.gap.stopAdvertising()
# test that connection works as intended
peripheral.gap.setAdvertisingType("ADV_CONNECTABLE_UNDIRECTED")
peripheral.gap.startAdvertising()
# get the private address of the device
scan_records = central.gap.startScan(
SCAN_TIMEOUT,
get_advertising_data(peripheral)
).result
assert len(scan_records) > 0
peripheral_address_type = scan_records[0]["peerAddrType"]
peripheral_address = scan_records[0]["peerAddr"]
# connect to the device
connection_params = get_connection_args(
peripheral_address_type,
peripheral_address
)
peripheral_connection = peripheral.gap.waitForConnection.setAsync()(1000)
central_connection = central.gap.connect(*connection_params).result
peripheral_connection = peripheral_connection.result
for connection_result in [central_connection, peripheral_connection]:
assert connection_result["ownAddrType"] == "ADDR_TYPE_RANDOM_PRIVATE_RESOLVABLE"
assert connection_result["peerAddrType"] == "ADDR_TYPE_RANDOM_PRIVATE_RESOLVABLE"
assert connection_result["peerAddressType"] == "RANDOM"
| 24,351
|
def base_sampler(models, nevents, floating_params=None):
"""
Creates samplers from models.
Args:
models (list(model)): models to sample
nevents (list(int)): number of in each sampler
floating_params (list(parameter), optionnal): floating parameter in the samplers
Returns:
Samplers
"""
assert all(is_valid_pdf(m) for m in models)
assert len(nevents) == len(models)
if floating_params:
floating_params_names = [f.name for f in floating_params]
samplers = []
fixed_params = []
for m in models:
def to_fix(p):
if floating_params:
return p.name in floating_params_names
else:
return False
fixed = [p for p in m.get_params() if not to_fix(p)]
fixed_params.append(fixed)
for i, (m, p) in enumerate(zip(models, fixed_params)):
sampler = m.create_sampler(n=nevents[i], fixed_params=p)
samplers.append(sampler)
return samplers
| 24,352
|
def menu_maker():
"""Top Menu Maker In each html page
"""
result = "<center>"
for i,item in enumerate(page_name):
if item == "Home":
targets_blank = ""
else:
targets_blank = 'target="blank"'
# Hyper Link To Each Page In HTML File
result += '\t<a href="' \
+ actual_name[i] + '.html"' + targets_blank + '>' + name_standard(item) + "</a>\n"
result += " \n"
result += "</center>"
result = result + "\t\t" + break_line # Add Break line to End Of The Menu
return result
| 24,353
|
def test_gamma_map():
"""Test Gamma MAP inverse."""
forward = read_forward_solution(fname_fwd)
forward = convert_forward_solution(forward, surf_ori=True)
forward = pick_types_forward(forward, meg=False, eeg=True)
evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0),
proj=False)
evoked.resample(50, npad=100)
evoked.crop(tmin=0.1, tmax=0.14) # crop to window around peak
cov = read_cov(fname_cov)
cov = regularize(cov, evoked.info, rank=None)
alpha = 0.5
stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4,
xyz_same_gamma=True, update_mode=1)
_check_stc(stc, evoked, 68477, 'lh', fwd=forward)
vec_stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4,
xyz_same_gamma=True, update_mode=1, pick_ori='vector')
assert_stcs_equal(vec_stc.magnitude(), stc)
stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4,
xyz_same_gamma=False, update_mode=1)
_check_stc(stc, evoked, 82010, 'lh', fwd=forward)
dips = gamma_map(evoked, forward, cov, alpha, tol=1e-4,
xyz_same_gamma=False, update_mode=1,
return_as_dipoles=True)
assert (isinstance(dips[0], Dipole))
stc_dip = make_stc_from_dipoles(dips, forward['src'])
assert_stcs_equal(stc, stc_dip)
# force fixed orientation
stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4,
xyz_same_gamma=False, update_mode=2,
loose=0, return_residual=False)
_check_stc(stc, evoked, 85739, 'lh', fwd=forward, ratio=20.)
| 24,354
|
def project_generate_private_link_post(auth, node, **kwargs):
""" creata a new private link object and add it to the node and its selected children"""
node_ids = request.json.get('node_ids', [])
name = request.json.get('name', '')
anonymous = request.json.get('anonymous', False)
if node._id not in node_ids:
node_ids.insert(0, node._id)
nodes = [AbstractNode.load(node_id) for node_id in node_ids]
try:
new_link = new_private_link(
name=name, user=auth.user, nodes=nodes, anonymous=anonymous
)
except ValidationError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
return new_link
| 24,355
|
def access_token_old_api(authen_code):
"""
通过此接口获取登录用户身份(疑似是一个旧接口)
:param authen_code:
:return:
"""
# 先获取app_access_token
app_access_token = _get_app_access_token()
if not app_access_token:
return None
access_token_old_url = cfg.access_token_old_url
headers = {"Content-Type": "application/json"}
payload = {
"app_id": cfg.app_id,
"app_secret": cfg.app_secret,
"app_access_token": app_access_token,
"grant_type": "authorization_code",
"code": authen_code,
}
result = post_http_request(access_token_old_url, headers=headers, payload=payload)
return result
| 24,356
|
def denoising(image):
"""improve image quality by remove unimportant details"""
denoised = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
return denoised
| 24,357
|
def test_normalize_paths(value, expected):
"""Verify we normalizes a sequence of paths provided to the tool."""
assert utils.normalize_paths(value) == expected
| 24,358
|
def teams():
"""Redirect the to the Slack team authentication url."""
return redirect(auth.get_redirect('team'))
| 24,359
|
def _clarans(metric):
"""Clustering Large Applications based on RANdomized Search."""
# choose which implementation to use, hybrid or cpu
get_clusters = _get_clusters(metric, method='cpu')
@jit(nopython=True)
def clarans(data, k, numlocal, maxneighbor):
"""Clustering Large Applications based on RANdomized Search.
Parameters
----------
data : (n,) ndarray
Data set.
k : int
Number of desired clusters.
metric : function
Function to compute pairwise distances.
numlocal : int
Number of times to repeat the search for other local minima.
maxneighbor : int
Maximum number of the neighbors to look at.
Returns
-------
clusterid : (n,) ndarray
An array containing the number of the cluster to which each object
was assigned, where the cluster number is defined as the object
number of the objects representing the cluster centroid.
error : float
The within-cluster sum of distances of the clustering solution.
Algorithm
---------
1. Choose an arbitrary node from the data set.
2. Consider a random neighbor of the current node.
3. If the random neighbor has a lower error than the current node, set
it as the current node.
4. Repeat step 2-3 ``maxneighbor`` times.
5. Repeat step 1-4 ``numlocal`` times and retain the best clustering.
Notes
-----
The best way to explain CLARANS is via a graph abstraction. In fact,
the process of finding k medoids can be viewed abstractly as searching
through a certain graph. In this graph, a set of k objects is called
node. Two nodes are neighbors if their sets differ by only one object.
Since a node represent a collection of k objects, they can be seen as
medoids and hence induce a clustering.
Each node can be assigned an error that is defined to be the total
dissimilarity (i.e. sum of distances) between every object and the
medoid of its cluster.
References
----------
.. R.T. Ng, Jiawei Han, "CLARANS: a method for clustering objects for
spatial data mining"
"""
n = data.shape[0]
choices = np.arange(n)
best_medoids = np.empty(k, dtype=np.uint32)
best_error = np.inf
min_dist = 0
for _ in range(numlocal):
# step 1
# choose an arbitrary node as starting medoids and compute its
# error
medoids = np.empty(k, dtype=np.uint32)
for i in range(k):
np.random.shuffle(choices)
medoids[i] = choices[-1]
choices = choices[:-1]
error = 0
for i in range(n):
min_dist = np.inf
for med in medoids:
dist = metric(data[i], data[med])
if dist < min_dist:
min_dist = dist
error += min_dist
for _ in range(maxneighbor):
# step 2
# find a random neighbor, i.e. change only one of the medoids
# with a random object (that is not already a medoid) of the
# whole data set
random_neigh = np.copy(medoids)
np.random.shuffle(choices)
non_med = choices[-1]
non_med_i = np.random.choice(k)
random_neigh[non_med_i] = non_med
# step 3
# compute the error of the random neighbor and compare it with
# the current node (i.e. current medoids)
new_error = 0
for i in range(n):
min_dist = np.inf
for med in random_neigh:
dist = metric(data[i], data[med])
if dist < min_dist:
min_dist = dist
new_error += min_dist
# choose the induced clustering with lower error
if new_error < error:
error = new_error
choices[-1] = medoids[non_med_i]
medoids = random_neigh
# retain the clustering solution with the lowest error
if error < best_error:
best_error = error
best_medoids = medoids
return get_clusters(data, best_medoids)
return clarans
| 24,360
|
def create_outlier_mask(df, target_var, number_of_stds, grouping_cols=None):
"""
Create a row-wise mask to filter-out outliers based on target_var.
Optionally allows you to filter outliers by group for hier. data.
"""
def flag_outliers_within_groups(df, target_var,
grouping_cols, number_of_stds):
groups = df.groupby(grouping_cols)
means = groups[target_var].transform('mean')
stds = groups[target_var].transform('std')
upper_bound = means + stds * number_of_stds
lower_bound = means - stds * number_of_stds
return df[target_var].between(lower_bound, upper_bound)
def flag_outliers_without_groups(df, target_var, number_of_stds):
mean_val = df[target_var].mean()
std_val = df[target_var].std()
upper_bound = (mean_val + (std_val * number_of_stds))
lower_bound = (mean_val - (std_val * number_of_stds))
return (df[target_var] > lower_bound) & (df[target_var] < upper_bound)
if grouping_cols:
mask = flag_outliers_within_groups(
df=df, target_var=target_var,
number_of_stds=number_of_stds, grouping_cols=grouping_cols
)
else:
mask = flag_outliers_without_groups(
df=df, target_var=target_var,
number_of_stds=number_of_stds
)
return mask
| 24,361
|
def closing_all(*args):
"""
Return a context manager closing the passed arguments.
"""
return contextlib.nested(*[contextlib.closing(f) for f in args])
| 24,362
|
def _non_max_suppression(objects, threshold):
"""Returns a list of indexes of objects passing the NMS.
Args:
objects: result candidates.
threshold: the threshold of overlapping IoU to merge the boxes.
Returns:
A list of indexes containings the objects that pass the NMS.
"""
if len(objects) == 1:
return [0]
if len(objects) == 0:
return []
boxes = np.array([o.bbox for o in objects])
xmins = boxes[:, 0]
ymins = boxes[:, 1]
xmaxs = boxes[:, 2]
ymaxs = boxes[:, 3]
areas = (xmaxs - xmins) * (ymaxs - ymins)
scores = [o.score for o in objects]
idxs = np.argsort(scores)
selected_idxs = []
while idxs.size != 0:
selected_idx = idxs[-1]
selected_idxs.append(selected_idx)
overlapped_xmins = np.maximum(xmins[selected_idx], xmins[idxs[:-1]])
overlapped_ymins = np.maximum(ymins[selected_idx], ymins[idxs[:-1]])
overlapped_xmaxs = np.minimum(xmaxs[selected_idx], xmaxs[idxs[:-1]])
overlapped_ymaxs = np.minimum(ymaxs[selected_idx], ymaxs[idxs[:-1]])
w = np.maximum(0, overlapped_xmaxs - overlapped_xmins)
h = np.maximum(0, overlapped_ymaxs - overlapped_ymins)
intersections = w * h
unions = areas[idxs[:-1]] + areas[selected_idx] - intersections
ious = intersections / unions
idxs = np.delete(
idxs, np.concatenate(([len(idxs) - 1], np.where(ious > threshold)[0]))
)
return selected_idxs
| 24,363
|
def log_prefect(
msg: str, start: bool = True, use_prefect: bool = False
) -> None:
"""Logging with Prefect."""
if use_prefect:
if start:
logger = get_run_logger()
logger.info(msg)
else:
print(msg)
| 24,364
|
def binary_search(x,l):
""" Esse algorítmo é o algorítmo de busca binária, mas ele retorna
qual o índice o qual devo colocar o elemento para que a lista
permaneça ordenada.
Input: elemento x e lista l
Output: Índice em que o elemento deve ser inserido para manter a ordenação da lista
"""
lo = 0 # Cota inferior inicial (Lower bound)
up = len(l) # Cota superior inicial (Upper bound)
while lo < up:
mid = int((lo+up)/2) #Ponto Médio
if l[mid] < x:
lo = mid + 1
else:
up = mid
return up
| 24,365
|
def check_abrp(config):
"""Check for geocodio options and return"""
try:
abrpOptions = config.abrp.as_dict()
except:
return {}
options = {}
abrp_keys = ["enable", "api_key", "token"]
for key in abrp_keys:
if key not in abrpOptions.keys():
_LOGGER.error(f"Missing required '{key}' option in 'abrp' settings")
return {}
options[key] = abrpOptions.get(key, None)
return options
| 24,366
|
def connect(config, job, attach):
"""
Connect to job.
JOB may be specified by name or ID, but ID is preferred.
"""
jobs = config.trainml.run(config.trainml.client.jobs.list())
found = search_by_id_name(job, jobs)
if None is found:
raise click.UsageError("Cannot find specified job.")
if found.type != "notebook":
try:
if attach:
config.trainml.run(found.connect(), found.attach())
return config.trainml.run(found.disconnect())
else:
return config.trainml.run(found.connect())
except:
try:
config.trainml.run(found.disconnect())
except:
pass
raise
else:
if found.status == "waiting for data/model download":
try:
if attach:
config.trainml.run(found.connect(), found.attach())
config.trainml.run(found.disconnect())
click.echo("Launching...", file=config.stdout)
browse(found.notebook_url)
else:
return config.trainml.run(found.connect())
except:
try:
config.trainml.run(found.disconnect())
except:
pass
raise
else:
config.trainml.run(found.wait_for("running"))
click.echo("Launching...", file=config.stdout)
browse(found.notebook_url)
| 24,367
|
def convert_magicc7_to_openscm_variables(variables, inverse=False):
"""
Convert MAGICC7 variables to OpenSCM variables
Parameters
----------
variables : list_like, str
Variables to convert
inverse : bool
If True, convert the other way i.e. convert OpenSCM variables to MAGICC7
variables
Returns
-------
``type(variables)``
Set of converted variables
"""
if inverse:
return apply_string_substitutions(
variables, OPENSCM_TO_MAGICC7_VARIABLES_MAPPING
)
else:
return apply_string_substitutions(
variables, MAGICC7_TO_OPENSCM_VARIABLES_MAPPING
)
| 24,368
|
async def fetch_disclosure(start, end):
"""期间沪深二市所有类型的公司公告
Args:
start (date like): 开始日期
end (date like): 结束日期
Returns:
list: list of dict
"""
start, end = pd.Timestamp(start), pd.Timestamp(end)
start_str = start.strftime(r'%Y-%m-%d')
end_str = end.strftime(r'%Y-%m-%d')
sem = asyncio.BoundedSemaphore(MAX_WORKER)
tasks = []
async with aiohttp.ClientSession() as session:
for column in COLUMNS.keys():
tasks.append(
_fetch_disclosure(sem, session, column, start_str, end_str))
data = await asyncio.gather(*tasks)
res = []
for d in data:
res.extend(parse_data(d))
return res
| 24,369
|
def get_loader(
image_dir,
attr_path,
selected_attrs,
crop_size=178,
image_size=128,
batch_size=16,
dataset="CelebA",
mode="train",
affectnet_emo_descr="emotiw",
num_workers=1,
):
"""Build and return a data loader."""
transform = []
if mode == "train":
transform.append(T.RandomHorizontalFlip())
transform.append(T.CenterCrop(crop_size))
transform.append(T.Resize(image_size))
transform.append(T.ToTensor())
transform.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
transform = T.Compose(transform)
if dataset == "CelebA":
dataset = CelebA(image_dir, attr_path, selected_attrs, transform, mode)
elif dataset == "RaFD":
dataset = ImageFolder(image_dir, transform)
elif dataset == "AffectNet":
dataset = AffectNet(image_dir, affectnet_emo_descr, transform, mode)
data_loader = data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=(mode == "train"),
num_workers=num_workers,
)
return data_loader
| 24,370
|
def remove_samples(request, product_id):
"""Removes passed samples from product with passed id.
"""
parent_product = Product.objects.get(pk=product_id)
for temp_id in request.POST.keys():
if temp_id.startswith("product") is False:
continue
temp_id = temp_id.split("-")[1]
remove_sample(product=parent_product, sample_id=temp_id)
# This isn't necessary but it cleans the cache. See lfs.cache listeners
# for more
parent_product.save()
html = [["#samples-inline", manage_samples_inline(request, product_id, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Samples have been removed.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
| 24,371
|
def test_search_value_not_list():
""""Testing for value not found in list."""
test_instance = LinkedList("NotHere")
test_node = Node("notnode")
assert test_instance.search(test_node.data) is None
| 24,372
|
def csu_to_field(field, radar, units='unitless',
long_name='Hydrometeor ID',
standard_name='Hydrometeor ID',
dz_field='ZC'):
"""
Adds a newly created field to the Py-ART
radar object. If reflectivity is a masked array,
make the new field masked the same as reflectivity.
"""
fill_value = -32768
masked_field = np.ma.asanyarray(field)
masked_field.mask = masked_field == fill_value
if hasattr(radar.fields[dz_field]['data'], 'mask'):
setattr(masked_field, 'mask',
np.logical_or(masked_field.mask,
radar.fields[dz_field]['data'].mask))
fill_value = radar.fields[dz_field]['_FillValue']
field_dict = {'data': masked_field,
'units': units,
'long_name': long_name,
'standard_name': standard_name,
'_FillValue': fill_value}
return field_dict
| 24,373
|
def RunQa():
"""Main QA body.
"""
rapi_user = "ganeti-qa"
RunTestBlock(RunEnvTests)
rapi_secret = SetupCluster(rapi_user)
if qa_rapi.Enabled():
# Load RAPI certificate
qa_rapi.Setup(rapi_user, rapi_secret)
RunTestBlock(RunClusterTests)
RunTestBlock(RunOsTests)
RunTestIf("tags", qa_tags.TestClusterTags)
RunTestBlock(RunCommonNodeTests)
RunTestBlock(RunGroupListTests)
RunTestBlock(RunGroupRwTests)
RunTestBlock(RunNetworkTests)
RunTestBlock(RunFilterTests)
# The master shouldn't be readded or put offline; "delay" needs a non-master
# node to test
pnode = qa_config.AcquireNode(exclude=qa_config.GetMasterNode())
try:
RunTestIf("node-readd", qa_node.TestNodeReadd, pnode)
RunTestIf("node-modify", qa_node.TestNodeModify, pnode)
RunTestIf("delay", qa_cluster.TestDelay, pnode)
finally:
pnode.Release()
# Make sure the cluster is clean before running instance tests
qa_cluster.AssertClusterVerify()
pnode = qa_config.AcquireNode()
try:
RunTestIf("tags", qa_tags.TestNodeTags, pnode)
if qa_rapi.Enabled():
RunTest(qa_rapi.TestNode, pnode)
if (qa_config.TestEnabled("instance-add-plain-disk")
and qa_config.IsTemplateSupported(constants.DT_PLAIN)):
# Normal instance allocation via RAPI
for use_client in [True, False]:
rapi_instance = RunTest(qa_rapi.TestRapiInstanceAdd, pnode,
use_client)
try:
if qa_config.TestEnabled("instance-plain-rapi-common-tests"):
RunCommonInstanceTests(rapi_instance, [pnode])
RunTest(qa_rapi.TestRapiInstanceRemove, rapi_instance, use_client)
finally:
rapi_instance.Release()
del rapi_instance
# Multi-instance allocation
rapi_instance_one, rapi_instance_two = \
RunTest(qa_rapi.TestRapiInstanceMultiAlloc, pnode)
try:
RunTest(qa_rapi.TestRapiInstanceRemove, rapi_instance_one, True)
RunTest(qa_rapi.TestRapiInstanceRemove, rapi_instance_two, True)
finally:
rapi_instance_one.Release()
rapi_instance_two.Release()
finally:
pnode.Release()
config_list = [
("default-instance-tests", lambda: None, lambda _: None),
(IsExclusiveStorageInstanceTestEnabled,
lambda: qa_cluster.TestSetExclStorCluster(True),
qa_cluster.TestSetExclStorCluster),
]
for (conf_name, setup_conf_f, restore_conf_f) in config_list:
if qa_config.TestEnabled(conf_name):
oldconf = setup_conf_f()
RunTestBlock(RunInstanceTests)
restore_conf_f(oldconf)
pnode = qa_config.AcquireNode()
try:
if qa_config.TestEnabled(["instance-add-plain-disk", "instance-export"]):
for shutdown in [False, True]:
instance = RunTest(qa_instance.TestInstanceAddWithPlainDisk, [pnode])
try:
expnode = qa_config.AcquireNode(exclude=pnode)
try:
if shutdown:
# Stop instance before exporting and removing it
RunTest(qa_instance.TestInstanceShutdown, instance)
RunTest(qa_instance.TestInstanceExportWithRemove, instance, expnode)
RunTest(qa_instance.TestBackupList, expnode)
finally:
expnode.Release()
finally:
instance.Release()
del expnode
del instance
qa_cluster.AssertClusterVerify()
finally:
pnode.Release()
if qa_rapi.Enabled():
RunTestIf("filters", qa_rapi.TestFilters)
RunTestIf("cluster-upgrade", qa_cluster.TestUpgrade)
RunTestBlock(RunExclusiveStorageTests)
RunTestIf(["cluster-instance-policy", "instance-add-plain-disk"],
TestIPolicyPlainInstance)
RunTestBlock(RunCustomSshPortTests)
RunTestIf(
"instance-add-restricted-by-disktemplates",
qa_instance.TestInstanceCreationRestrictedByDiskTemplates)
# Test removing instance with offline drbd secondary
if qa_config.TestEnabled(["instance-remove-drbd-offline",
"instance-add-drbd-disk"]):
# Make sure the master is not put offline
snode = qa_config.AcquireNode(exclude=qa_config.GetMasterNode())
try:
pnode = qa_config.AcquireNode(exclude=snode)
try:
instance = qa_instance.TestInstanceAddWithDrbdDisk([pnode, snode])
set_offline = lambda node: qa_node.MakeNodeOffline(node, "yes")
set_online = lambda node: qa_node.MakeNodeOffline(node, "no")
RunTest(qa_instance.TestRemoveInstanceOfflineNode, instance, snode,
set_offline, set_online)
finally:
pnode.Release()
finally:
snode.Release()
qa_cluster.AssertClusterVerify()
RunTestBlock(RunMonitoringTests)
RunPerformanceTests()
RunTestIf("create-cluster", qa_node.TestNodeRemoveAll)
RunTestIf("cluster-destroy", qa_cluster.TestClusterDestroy)
| 24,374
|
def test_parse(component_validator):
"""Test that action data is properly parsed.
1. Create an action.
2. Create an action parser.
3. Replace parser methods so that they return predefined data.
4. Parse the action.
5. Check the parsed data.
"""
action = Action(
name="parsed name",
classes=("parsed class 1", "parsed class 2"),
method=Method.PUT,
target="/parsed/target",
title="parsed title",
media_type="application/parsed+type",
fields=(Field(name="first"), Field(name="second")),
)
parser = ActionParser(data={}, parser=JSONParser())
parser.parse_name = lambda: action.name
parser.parse_classes = lambda: action.classes
parser.parse_method = lambda: action.method
parser.parse_target = lambda: action.target
parser.parse_title = lambda: action.title
parser.parse_media_type = lambda: action.media_type
parser.parse_fields = lambda: action.fields
actual_action = parser.parse()
component_validator.validate_action(actual_action, action)
| 24,375
|
def get_port_use_db():
"""Gets the services that commonly run on certain ports
:return: dict[port] = service
:rtype: dict
"""
url = "http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv"
db_path = "/tmp/port_db"
if not os.path.isfile(db_path):
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(db_path, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
db = {}
with open(db_path) as f:
content = f.read()
for line in content.split("\n")[1:]:
if line:
parts = line.split(",")
if len(parts) >= 4:
service = parts[0]
port = parts[1]
if service:
db[port] = service
return db
| 24,376
|
def calculate_com(structure):
"""
Calculates center of mass of the structure (ligand or protein).
Parameters
----------
structure : biopython Structure object
PDB of choice loaded into biopython (only chains of interest).
Returns
-------
A list defining center of mass of the structure.
"""
structure_mass = 0.0
com = np.zeros(3)
for atom in structure.get_atoms():
com = com + np.array(list(atom.get_vector())) * atom.mass
structure_mass += atom.mass
com = com / structure_mass
return com
| 24,377
|
def generate_config(context):
""" Generate the deployment configuration. """
resources = []
name = context.properties.get('name', context.env['name'])
resources = [
{
'name': name,
'type': 'appengine.v1.version',
'properties': context.properties
}
]
outputs = [
{
'name': 'name',
'value': '$(ref.{}.name)'.format(name)
},
{
'name': 'createTime',
'value': '$(ref.{}.createTime)'.format(name)
},
{
'name': 'versionUrl',
'value': '$(ref.{}.versionUrl)'.format(name)
}
]
return {'resources': resources, 'outputs': outputs}
| 24,378
|
def check_regs(region_df, chr_name=None, start_name=None, stop_name=None,
strand_name=None, sample_name=None):
""" Modifies a region dataframe to be coherent with the GMQL data model
:param region_df: a pandas Dataframe of regions that is coherent with the GMQL data model
:param chr_name: (optional) which column of :attr:`~.region_df` is the chromosome
:param start_name: (optional) which column of :attr:`~.region_df` is the start
:param stop_name: (optional) which column of :attr:`~.region_df` is the stop
:param strand_name: (optional) which column of :attr:`~.region_df` is the strand
:return: a modified pandas Dataframe
"""
if sample_name is None:
region_df.index = np.repeat(default_id_sample, len(region_df))
else:
region_df = search_column(region_df, id_sample_aliases,
id_sample_types, 'id_sample', sample_name)
region_df = region_df.set_index("id_sample", drop=True)
region_df = region_df.sort_index()
region_df = search_column(region_df, chr_aliases, chr_types, 'chr', chr_name)
region_df = search_column(region_df, start_aliases, start_types, 'start', start_name)
region_df = search_column(region_df, stop_aliases, stop_types, 'stop', stop_name)
region_df = search_column(region_df, strand_aliases, strand_types, 'strand', strand_name)
return region_df
| 24,379
|
def build_assets_job(
name: str,
assets: List[OpDefinition],
source_assets: Optional[Sequence[Union[ForeignAsset, OpDefinition]]] = None,
resource_defs: Optional[Dict[str, ResourceDefinition]] = None,
description: Optional[str] = None,
config: Union[ConfigMapping, Dict[str, Any], PartitionedConfig] = None,
tags: Optional[Dict[str, Any]] = None,
) -> JobDefinition:
"""Builds a job that materializes the given assets.
The dependencies between the ops in the job are determined by the asset dependencies defined
in the metadata on the provided asset nodes.
Args:
name (str): The name of the job.
assets (List[OpDefinition]): A list of assets or multi-assets - usually constructed using
the :py:func:`@asset` or :py:func:`@multi_asset` decorator.
source_assets (Optional[Sequence[Union[ForeignAsset, OpDefinition]]]): A list of assets
that are not materialized by this job, but that assets in this job depend on.
resource_defs (Optional[Dict[str, ResourceDefinition]]): Resource defs to be included in
this job.
description (Optional[str]): A description of the job.
Examples:
.. code-block:: python
@asset
def asset1():
return 5
@asset
def asset2(asset1):
return my_upstream_asset + 1
my_assets_job = build_assets_job("my_assets_job", assets=[asset1, asset2])
Returns:
JobDefinition: A job that materializes the given assets.
"""
check.str_param(name, "name")
check.list_param(assets, "assets", of_type=OpDefinition)
check.opt_list_param(source_assets, "source_assets", of_type=(ForeignAsset, OpDefinition))
check.opt_str_param(description, "description")
source_assets_by_key = build_source_assets_by_key(source_assets)
op_defs = build_op_deps(assets, source_assets_by_key.keys())
root_manager = build_root_manager(source_assets_by_key)
return GraphDefinition(
name=name,
node_defs=cast(List[NodeDefinition], assets),
dependencies=op_defs,
description=description,
input_mappings=None,
output_mappings=None,
config=None,
).to_job(
resource_defs=merge_dicts(resource_defs or {}, {"root_manager": root_manager}),
config=config,
tags=tags,
)
| 24,380
|
def isinf(a):
"""isinf(a)"""
| 24,381
|
def createBinarySearchTree(vs):
"""
Generate a balanced binary search tree based on the given array.
Args:
vs - an integer array
{4, 5, 5, 7, 2, 1, 3}
4
/ \
2 5
/ \ / \
1 3 5 7
"""
def _helper(vs, left, right):
if left > right:
return None
mid = (left + right) >> 1
node = TreeNode(vs[mid])
node.left = _helper(vs, left, mid - 1)
if node.left:
node.left.parent = node
node.right = _helper(vs, mid + 1, right)
if node.right:
node.right.parent = node
return node
vs = sorted(vs)
root = _helper(vs, 0, len(vs) - 1)
return root
| 24,382
|
def remove_end_same_as_start_transitions(df, start_col, end_col):
"""Remove rows corresponding to transitions where start equals end state.
Millington 2009 used a methodology where if a combination of conditions
didn't result in a transition, this would be represented in the model by
specifying a transition with start and end state being the same, and a
transition time of 0 years.
AgroSuccess will handle 'no transition' rules differently, so these dummy
transitions should be excluded.
"""
def start_different_to_end(row):
if row[start_col] == row[end_col]:
return False
else:
return True
return df[df.apply(start_different_to_end, axis=1)]
| 24,383
|
def visualizeDomain():
""" Run the simplest case with random policy; visualize the domain to see how it looks. """
print 'in visualizeDomain'
plotSettings = RoverSettings(rewardType = 'DIFFERENCE',
moveRandomly = False,
numAgents = 30,
sensorRange = 10000, # essentially inf for our world size :)
sensorFov = 4, # 360 degrees
sensorNoiseInt = 0 # no noise)
)
print 'reward type in visualizeDomain: ', plotSettings.rewardType
main(plotSettings, episodes = 5, lengthOfPool = 10, plotPlease = 'last')
| 24,384
|
def read_config(path: str) -> Dict[str, Any]:
"""Return dict with contents of configuration file."""
newconf = {
"setup": False,
"servers": [],
"okurls": [],
"loggers": [],
"localaddr": None,
# Legacy idlerpg option
"debug": False,
# Non-idlerpg config needs defaults
"confpath": os.path.realpath(path),
"datadir": os.path.realpath(os.path.dirname(path)),
"rpmaxexplvl": 60,
"allylvlbase": 200,
"allylvlstep": 1.16,
"allymaxexplvl": 60,
"backupdir": ".dbbackup",
"store_format": "idlerpg",
"daemonize": True,
"loglevel": "DEBUG",
"throttle": True,
"throttle_rate": 4,
"throttle_period": 1,
"penquest": 15,
"pennick": 30,
"penmessage": 1,
"penpart": 200,
"penkick": 250,
"penquit": 20,
"pendropped": 20,
"penlogout": 20,
"good_battle_pct": 110,
"evil_battle_pct": 90,
"max_name_len": 16,
"max_class_len": 30,
"message_wrap_len": 400,
"quest_interval_min": 12*3600,
"quest_interval_max": 24*3600,
"quest_min_level": 24,
"color": False,
"namecolor": "cyan",
"durationcolor": "green",
"itemcolor": "olive",
}
ignore_line_re = re.compile(r"^\s*(?:#|$)")
config_line_re = re.compile(r"^\s*(\S+)\s*(.*)$")
try:
with open(path) as inf:
for line in inf:
if ignore_line_re.match(line):
continue
match = config_line_re.match(line)
if not match:
log.warning("Invalid config line: "+line)
continue
key, val = match[1].lower(), match[2].rstrip()
if key == "die":
log.critical(f"Please edit {path} to setup your bot's options.")
sys.exit(1)
elif key == "server":
cast(List[str], newconf["servers"]).append(val)
elif key == "okurl":
cast(List[str], newconf["okurls"]).append(val)
elif key == "log":
cast(List[List[str]], newconf["loggers"]).append(val.split(" ", 2))
else:
newconf[key] = parse_val(val)
except OSError as err:
log.critical(f"Unable to read {path}")
sys.exit(1)
return newconf
| 24,385
|
def cleanup_one_header(header_path, patterns, options):
"""Clean regex-matched lines away from a file.
Arguments:
header_path: path to the cleaned file.
patterns: list of regex patterns. Any lines matching to these
patterns are deleted.
options: option flags.
"""
with open(header_path) as f:
lines = f.readlines()
matched = []
for i, line in enumerate(lines):
if i - 1 in matched and lines[i - 1][-2:] == '\\\n':
matched.append(i)
continue
for pattern in patterns:
if pattern.search(line):
matched.append(i)
break
if not matched:
return
# remove empty #ifdef ... #endif, successive blank lines
pattern_if = re.compile(r'#\s*if(def|ndef)?\W') # #if, #ifdef, #ifndef
pattern_elif = re.compile(r'#\s*el(if|se)\W') # #elif, #else
pattern_endif = re.compile(r'#\s*endif\W') # #endif
pattern_blank = re.compile(r'^\s*$') # empty line
while True:
old_matched = copy.copy(matched)
extend_matched_lines(lines, matched, [pattern_if],
[pattern_endif], True, True)
extend_matched_lines(lines, matched, [pattern_elif],
[pattern_elif, pattern_endif], True, False)
extend_matched_lines(lines, matched, [pattern_if, pattern_elif],
[pattern_blank], False, True)
extend_matched_lines(lines, matched, [pattern_blank],
[pattern_elif, pattern_endif], True, False)
extend_matched_lines(lines, matched, [pattern_blank],
[pattern_blank], True, False)
if matched == old_matched:
break
tolines = copy.copy(lines)
for i in reversed(matched):
tolines.pop(i)
show_diff(lines, tolines, header_path, options.color)
if options.dry_run:
return
with open(header_path, 'w') as f:
for line in tolines:
f.write(line)
| 24,386
|
def turn_on(hass, entity_id=ENTITY_MATCH_ALL):
"""Turn on specified media player or all."""
hass.add_job(async_turn_on, hass, entity_id)
| 24,387
|
def sieve(iterable, inspector, *keys):
"""Separates @iterable into multiple lists, with @inspector(item) -> k for k in @keys defining the separation.
e.g., sieve(range(10), lambda x: x % 2, 0, 1) -> [[evens], [odds]]
"""
s = {k: [] for k in keys}
for item in iterable:
k = inspector(item)
if k not in s:
raise KeyError(f"Unexpected key <{k}> found by inspector in sieve.")
s[inspector(item)].append(item)
return [s[k] for k in keys]
| 24,388
|
def get_model(config: BraveConfig) -> embedding_model.MultimodalEmbeddingModel:
"""Construct a model implementing BraVe.
Args:
config: Configuration for BraVe.
Returns:
A `MultimodalEmbeddingModel` to train BraVe.
"""
init_fn, parameterized_fns = _build_parameterized_fns(config)
loss_fn = _build_loss_fn(config, parameterized_fns)
forward_fns = {
'broad_video': parameterized_fns.broad_video_embedding,
'broad_audio': parameterized_fns.broad_audio_embedding,
'narrow_video': parameterized_fns.narrow_video_embedding,
}
return embedding_model.MultimodalEmbeddingModel(
init_fn=init_fn,
forward_fns=forward_fns,
loss_fn=loss_fn,
evaluate_fn=_build_eval_fn(forward_fns),
train_dataset_builder_fn=_train_dataset_builder(config),
)
| 24,389
|
def upilab6_1_5 () :
"""
6.1.5. Exercice UpyLaB 6.2 - Parcours vert bleu rouge
(D’après une idée de Jacky Trinh le 19/02/2018)
Monsieur Germain est une personne très âgée. Il aimerait préparer une liste de courses à faire à l’avance. Ayant un
budget assez serré, il voudrait que sa liste de courses soit dans ses capacités. Son seul petit souci est qu’il a une
très mauvaise vue et n’arrive donc pas à voir le prix associé à chaque produit contenu dans le catalogue de courses.
Écrire une fonction calcul_prix(produits, catalogue) où :
produits est un dictionnaire contenant, comme clés, les produits souhaités par Monsieur Germain et comme valeurs
associées, la quantité désirée de chacun d’entre eux,
catalogue est un dictionnaire contenant tous les produits du magasin avec leur prix associé.
La fonction retourne le montant total des achats de Monsieur Germain.
Exemple : L’appel suivant de la fonction :
calcul_prix({"brocoli":2, "mouchoirs":5, "bouteilles d'eau":6},
{"brocoli":1.50, "bouteilles d'eau":1, "bière":2,
"savon":2.50, "mouchoirs":0.80})
doit retourner : 13.0
"""
def calcul_prix(produits, catalogue):
somme = 0
for p in produits:
somme += catalogue[p] * produits[p]
return somme
test = [({'pack de fruits': 1, 'poisson': 2, 'jambon': 1, 'citron': 1, 'tomate': 1, 'pâtes': 1, 'sucre': 1,
'pack de légumes': 1, 'café': 1, 'brocoli': 1, 'déodorant': 1, 'bière': 1},
{'confiture': 3.15, 'vin': 6.3, 'poisson': 6.45, 'jambon': 2.1, 'pain': 1.25, 'shampooing': 2.5,
"bouteilles d'eau": 1, 'tomate': 0.75, 'yaourts': 2.85, 'sucre': 0.65, 'pack de légumes': 4.2,
'café': 4.75, 'brocoli': 1.5, 'riz': 3.1, 'jus de fruits': 2.25, 'déodorant': 2.2, 'dentifrice': 1.95,
'fromage': 2.65, 'chocolats': 3.2, 'pack de fruits': 3.3, 'viande': 5.2, 'petits gâteaux': 4.35,
'citron': 0.9, 'mouchoirs': 0.8, 'frites': 3.55, 'farine': 0.95, 'pâtes': 1.1, 'savon': 1.9,
'bière': 2, 'huile': 1.65}),
({'chocolats': 1, 'jambon': 1, 'citron': 1, 'fromage': 2, 'yaourts': 1, 'pâtes': 2, 'savon': 1,
'pack de légumes': 1, 'café': 2, 'brocoli': 1, 'riz': 2, 'mouchoirs': 1},
{'confiture': 3.15, 'vin': 6.3, 'poisson': 6.45, 'jambon': 2.1, 'pain': 1.25, 'shampooing': 2.5,
"bouteilles d'eau": 1, 'tomate': 0.75, 'yaourts': 2.85, 'sucre': 0.65, 'pack de légumes': 4.2,
'café': 4.75, 'brocoli': 1.5, 'riz': 3.1, 'jus de fruits': 2.25, 'déodorant': 2.2, 'dentifrice': 1.95,
'fromage': 2.65, 'chocolats': 3.2, 'pack de fruits': 3.3, 'viande': 5.2, 'petits gâteaux': 4.35,
'citron': 0.9, 'mouchoirs': 0.8, 'frites': 3.55, 'farine': 0.95, 'pâtes': 1.1, 'savon': 1.9, 'bière': 2,
'huile': 1.65})]
reponse =[36.35, 40.650000000000006]
for produits, catalogue in test :
print("Le pépé a besoin de : ")
for article in produits :
print(produits[article], " x l'article : ", article)
cout = calcul_prix(produits, catalogue)
print( "cela coûtera", cout)
printt("tes réussi ? : ", cout == reponse[test.index((produit,catalogue))])
| 24,390
|
def construct_filtering_input_data(xyz_s, xyz_t, data, overlapped_pair_tensors, dist_th=0.05, mutuals_flag=None):
"""
Prepares the input dictionary for the filtering network
Args:
xyz_s (torch tensor): coordinates of the sampled points in the source point cloud [b,n,3]
xyz_t (torch tensor): coordinates of the correspondences from the traget point cloud [b,n,3]
data (dict): input data from the data loader
dist_th (float): distance threshold to determine if the correspondence is an inlier or an outlier
mutuals (torch tensor): torch tensor of the mutually nearest neighbors (can be used as side information to the filtering network)
Returns:
filtering_data (dict): input data for the filtering network
"""
filtering_data = {}
Rs, ts = extract_transformation_matrices(data['T_global_0'], overlapped_pair_tensors)
ys = transformation_residuals(xyz_s, xyz_t, Rs, ts)
xs = torch.cat((xyz_s,xyz_t),dim=-1) # [b, n, 6]
if mutuals_flag is not None:
xs = torch.cat((xs,mutuals_flag.reshape(-1,1)), dim=-1) # [b, n, 7]
# Threshold ys based on the distance threshol
ys_binary = (ys < dist_th).type(xs.type())
# Construct the data dictionary
filtering_data['xs'] = xs
filtering_data['ys'] = ys
filtering_data['ts'] = ts
filtering_data['Rs'] = Rs
return filtering_data
| 24,391
|
def on_follow_action_notify_followed_party(sender, **kwargs):
"""
when a new person follows me, a notification is created and email queued
"""
notification = Notification(
title='Someone followed you',
body=user_followed_message.format(
username=kwargs['who_followed'].username,
link_to_profile=kwargs[
'who_followed'].url))
notification.save()
notification.recipients.add(kwargs['who_was_followed'])
notification.queue_notification_emails()
| 24,392
|
def rmean(x, N):
""" cutting off the edges. """
s = int(N-1)
return np.convolve(x, np.ones((N,))/N)[s:-s]
| 24,393
|
def add_variant_to_existing_lines(group, variant, total_quantity):
"""
Adds variant to existing lines with same variant.
Variant is added by increasing quantity of lines with same variant,
as long as total_quantity of variant will be added
or there is no more lines with same variant.
Returns quantity that could not be fulfilled with existing lines.
"""
# order descending by lines' stock available quantity
lines = group.lines.filter(
product=variant.product, product_sku=variant.sku,
stock__isnull=False).order_by(
F('stock__quantity_allocated') - F('stock__quantity'))
quantity_left = total_quantity
for line in lines:
quantity = (
line.stock.quantity_available
if quantity_left > line.stock.quantity_available
else quantity_left)
line.quantity += quantity
line.save()
Stock.objects.allocate_stock(line.stock, quantity)
quantity_left -= quantity
if quantity_left == 0:
break
return quantity_left
| 24,394
|
def calculate_position(c, t):
"""
Calculates a position given a set of quintic coefficients and a time.
Args
c: List of coefficients generated by a quintic polynomial
trajectory generator.
t: Time at which to calculate the position
Returns
Position
"""
return c[0] * t**5 + c[1] * t**4 + c[2] * t**3 + c[3] * t**2 + c[4] * t + c[5]
| 24,395
|
def step_impl(context, query):
"""
Send a query to the Indigo reinforcement learning reasoner
"""
context.query_json = {"message": {
"query_graph": {
"nodes": [
{
"id": "n00",
"curie": "185855",
"type": "chemical_substance"
},
{
"id": "n01",
"curie": "?",
"type": "disease"
}
],
"edges": [
{
"id": "e00",
"type": "HAS_INDICATION",
"source_id": "n00",
"target_id": "n01"
}
]
}
}
}
url = "https://indigo.ncats.io/rlr/api/v0"
headers = {'accept': 'application/json'}
print(context.query_json)
print(url)
with closing(requests.post(url + "/query", json=context.query_json, headers=headers)) as response:
print(response)
print(json.loads(response.json())[0]["nodes"])
context.code = response.status_code
context.content_type = response.headers['content-type']
assert response.status_code == 200
context.response = response
context.response_text = response.text
context.response_json = response.json()
| 24,396
|
def get_dqa(df):
"""Method to get DQA issues."""
try:
df0 = df[(df.dob == '') | (df.dqa_sex != 'OK') |
(df.dqa_age != 'OK') | (df.case_status == 'Pending')]
df1 = df0[['cpims_id', 'child_names', 'age', 'case_category',
'dqa_sex', 'dqa_dob', 'dqa_age', 'case_status',
'case_date']].drop_duplicates()
# print(df1)
except Exception as e:
print('Error getting data frame - %s' % (e))
brdf = Blank()
brdf.index = []
return brdf
else:
return df1
| 24,397
|
def get_3C_coords(name):
"""
Formatted J2000 right ascension and declination and IAU name
Returns the formatted J2000 right ascension and declination and IAU name
given the 3C name.
Example
>>> ra,dec,iau = get_3C_coords('3C286')
>>> print ra,dec,iau
13h31m08.287984s 30d30'32.958850" 1331+305
@param name : 3C name, like 3C123
@return: ra, dec, IAU_name
"""
dbfile = open(cal_dir+'3C_VLA_cals','r')
data = pickle.load(dbfile)
dbfile.close()
return data[name]
| 24,398
|
def get(player):
"""Get the cipher that corresponding to the YouTube player version.
Args:
player (dict): Contains the 'sts' value and URL of the YouTube player.
Note:
If the cipher is missing in known ciphers, then the 'update' method will be used.
"""
if DIR.exists() and CIPHERS.exists():
try:
with CIPHERS.open('r') as file:
ciphers = json.load(file)
cipher = ciphers.get(player['sts'])
if cipher is not None:
return cipher
else:
return update(player)
except json.decoder.JSONDecodeError:
return update(player)
else:
return update(player)
| 24,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.