content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def slowness_to_velocity(slowness):
"""
Convert a slowness log in µs per unit depth, to velocity in unit depth
per second.
Args:
slowness (ndarray): A value or sequence of values.
Returns:
ndarray: The velocity.
"""
return 1e6 / np.array(slowness)
| 18,600
|
def SingleDetectorLogLikelihoodModelViaArray(lookupNKDict,ctUArrayDict,ctVArrayDict, tref, RA,DEC, thS,phiS,psi, dist,det):
"""
DOCUMENT ME!!!
"""
global distMpcRef
# N.B.: The Ylms are a function of - phiref b/c we are passively rotating
# the source frame, rather than actively rotating the binary.
# Said another way, the m^th harmonic of the waveform should transform as
# e^{- i m phiref}, but the Ylms go as e^{+ i m phiref}, so we must give
# - phiref as an argument so Y_lm h_lm has the proper phiref dependence
U = ctUArrayDict[det]
V = ctVArrayDict[det]
Ylms = ComputeYlmsArray(lookupNKDict[det], thS,-phiS)
if (det == "Fake"):
F=np.exp(-2.*1j*psi) # psi is applied through *F* in our model
else:
F = ComplexAntennaFactor(det, RA,DEC,psi,tref)
distMpc = dist/(lal.PC_SI*1e6)
# Term 2 part 1 : conj(Ylms*F)*crossTermsU*F*Ylms
# Term 2 part 2: Ylms*F*crossTermsV*F*Ylms
term2 = 0.j
term2 += F*np.conj(F)*(np.dot(np.conj(Ylms), np.dot(U,Ylms)))
term2 += F*F*np.dot(Ylms,np.dot(V,Ylms))
term2 = np.sum(term2)
term2 = -np.real(term2) / 4. /(distMpc/distMpcRef)**2
return term2
| 18,601
|
def monitor(ctx, config, mon_csv, gdal_frmt, date_frmt, ndv=0):
"""Command line interface to handle monitoring of new imagery. This program will not
pre-process the data, which is done in yatsm.process_modis. This program will calculate
the change probabilities in time-sequential order for all images in input monitoring log.
Currently, the output is written to shapefiles for tileing on Mapbox. """
logger_algo.setLevel(logging.DEBUG)
#Parse config and open csv with images previously processed
cfg = parse_config_file(config)
done_csv = cfg['dataset']['input_file']
read=csv.reader(open(done_csv,"rb"),delimiter=',')
done_array = list(read)
try:
begin_monitor = cfg['NRT']['begin_monitor']
except:
begin_monitor = 2016001
#Get first and last dates
last = int(done_array[-1][0])
veryfirst = int(done_array[1][0])
#Read monitor csv with images to use in monitoring
read_mon=csv.reader(open(mon_csv,"rb"),delimiter=',')
monitor_array = list(read_mon)
if monitor_array is None:
logger.error('Incorrect path to monitor csv')
raise click.Abort()
if len(monitor_array) == 0:
logger.error('Not new images to monitor')
raise click.Abort()
first = int(monitor_array[0][0])
#Loop over each date in monitor list. Check again if the date is in input list
num_monitor=len(monitor_array)
for i in range(num_monitor):
cur_image = monitor_array[i]
date = int(cur_image[0])
image_path = cur_image[1]
if date <= last:
logger.error('Previous results processed past image date. Skipping.')
continue
#Read the image as an array.
try:
image_ds = gdal.Open(image_path, gdal.GA_ReadOnly)
except:
logger.error('Could not open new image for reading')
raise click.Abort()
#Do monitor
logger.info('Doing image %s' % image_path)
out = ccdc_monitor(cfg, date, image_ds)
#Get output file names
output_lp_today, output_hp_today, output_lp, output_hp, output_conf, output_conf_today, master = get_mon_outputs(cfg, date)
#Write out the shapefiles. Currently a copy is saved in the daily folders in addition to a master version.
if np.any(out['lowprob'] > begin_monitor):
write_shapefile(out['lowprob'], output_lp_today,image_ds, gdal_frmt,
ndv, band_names=None)
if os.path.isfile(output_lp):
os.remove(output_lp)
write_shapefile(out['lowprob'], output_lp,image_ds, gdal_frmt,
ndv, band_names=None)
if np.any(out['highprob'] > begin_monitor):
write_shapefile(out['highprob'], output_hp_today,image_ds, gdal_frmt,
ndv, band_names=None)
if os.path.isfile(output_hp):
os.remove(output_hp)
write_shapefile(out['highprob'], output_hp,image_ds, gdal_frmt,
ndv, band_names=None)
if np.any(out['confirmed_today'] > begin_monitor):
write_shapefile(out['confirmed_today'], output_conf_today,image_ds, gdal_frmt,
ndv, band_names=None)
if np.any(out['confirmed'] > begin_monitor):
if os.path.isfile(master):
os.remove(master)
write_shapefile(out['confirmed'], master,image_ds, gdal_frmt,
ndv, band_names=None)
#update processed image csv
out_log = [str(date),'Com',image_path]
done_array.append(out_log)
with open(done_csv, 'wb') as f:
writer = csv.writer(f)
writer.writerows(done_array)
output_rast = None
| 18,602
|
def manualcropping(I, pointsfile):
"""This function crops a copy of image I according to points stored
in a text file (pointsfile) and corresponding to aponeuroses (see
Args section).
Args:
I (array): 3-canal image
pointsfile (text file): contains points' coordinates. Pointsfile must be
organized such that:
- column 0 is the ID of each point
- column 1 is the X coordinate of each point, that is the corresponding
column in I
- column 2 is the Y coordinate, that is the row in I
- row 0 is for txt columns' names
- rows 1 and 2 are for two points of the scale
- rows 3 to 13 are aponeuroses' points in panoramic images // raws 3
to 10 in simple images
- following rows are for muscle fascicles (and are optional for this
function)
Other requirements: pointsfile's name must 1) include extension
2) indicates whether I is panoramic or simple by having 'p' or
's' just before the point of the extension.
Returns:
I2 (array) : array of same type than I. It is the cropped image of I according
to the aponeuroses' points manually picked and stored in pointsfile.
point_of_intersect (tuple) : point at right
of the image; should correspond to the point of intersection of deep
and upper aponeuroses.
min_raw, max_raw, min_col, max_col: indices of the location of the cropped image
in the input raw image
"""
import numpy as np
data = open(pointsfile, 'r')
#finds whether the image is panoramic or simple
search_point = -1
while (pointsfile[search_point] != '.') and (search_point > (-len(pointsfile))):
search_point = search_point-1
if (search_point == -len(pointsfile)):
raise TypeError("Input pointsfile's name is not correct. Check extension.")
else:
imagetype = pointsfile[search_point-1]
#extract points from the input file
picked_points = []
for line in data:
line = line.strip('\n')
x = line.split('\t')
picked_points.append((x[1], x[2]))
#keep aponeuroses points according to image type
if imagetype == 'p': #keep points 3 to 13 included
apos = np.asarray(picked_points[3:14], dtype=np.float64, order='C')
elif imagetype == 's': #keep points 3 to 10 included
apos = np.asarray(picked_points[3:11], dtype=np.float64, order='C')
else:
raise ValueError("pointsfile's name does not fulfill conditions. See docstrings")
#find max and min indexes for columns and raws to crop image I
#with a margin of 10 pixels (5 pixels for min_raw).
#Coordinates are inverted in apos
min_raw = max(0, np.min(apos[:, 1])-10)
max_raw = min(I.shape[0], np.max(apos[:, 1])+20)
min_col = max(0, np.min(apos[:, 0])-10)
max_col = min(I.shape[1], np.max(apos[:, 0])+10)
i_cropped = np.copy(I[int(min_raw):int(max_raw), int(min_col):int(max_col), :])
index = np.argmax(apos[:, 0])
point_of_intersect = (apos[index][1] - min_raw, apos[index][0] - min_col)
#close file
data.close()
return i_cropped, point_of_intersect, int(min_raw), int(max_raw), int(min_col), int(max_col)
| 18,603
|
def prepare_time_micros(data, schema):
"""Convert datetime.time to int timestamp with microseconds"""
if isinstance(data, datetime.time):
return int(data.hour * MCS_PER_HOUR + data.minute * MCS_PER_MINUTE
+ data.second * MCS_PER_SECOND + data.microsecond)
else:
return data
| 18,604
|
def get_configs_path_mapping():
"""
Gets a dictionary mapping directories to back up to their destination path.
"""
return {
"Library/Application Support/Sublime Text 2/Packages/User/": "sublime_2",
"Library/Application Support/Sublime Text 3/Packages/User/": "sublime_3",
"Library/Preferences/IntelliJIdea2018.2/": "intellijidea_2018.2",
"Library/Preferences/PyCharm2018.2/": "pycharm_2018.2",
"Library/Preferences/CLion2018.2/": "clion_2018.2",
"Library/Preferences/PhpStorm2018.2": "phpstorm_2018.2",
}
| 18,605
|
def plot_sample_variation_polar(annots_df_group, **kwargs):
"""
Function: plot polar coordinate values of R3, R4, T3, T4, T3' positions of wild-type flies of a specific age. bundles from one sample are plotted together on the same subplot.
Inputs:
- annots_df_group: DataFrame group. Processed annotation information of a specific age, grouped by sample number.
- Additional inputs:
- is_save: Boolean. Save figures or not. Default = False.
- fig_format: extension figure format. Default = "svg".
- fig_res: figure resolution. Default = 300.
Output:
- Figure.
- sum_coords: summary of polar coordinates.
"""
### parameters
if('is_save' in kwargs.keys()):
is_save = kwargs['is_save']
else:
is_save = False
if('fig_format' in kwargs.keys()):
fig_format = kwargs['fig_format']
else:
fig_format = 'svg'
if('fig_res' in kwargs.keys()):
fig_res = kwargs['fig_res']
else:
fig_res = 300
### Params
paths = settings.paths
phi_unit = get_angle_unit_theory('phi_unit')
color_code = settings.matching_info.color_code
plot_color = {
'R3':color_code[3],
'R4':color_code[4],
'T4':color_code[4],
'T3':color_code[3],
'T7':color_code[7],
}
num_subplots = len(annots_df_group)
### Figure set-up
fig, axes = plt.subplots(num_subplots, 1, figsize = (30, 15), subplot_kw={'projection': 'polar'})
fig.tight_layout()
sum_coords = {}
coords = {}
for i in plot_color.keys():
sum_coords[i] = np.zeros((2, num_subplots))
for i_fig in range(num_subplots):
i_sample = i_fig
### calculating
sample_id = list(annots_df_group.groups.keys())[i_sample]
annots_df_current = annots_df_group.get_group(sample_id).reset_index(drop = True)
annots_df_current.set_index('bundle_no', drop = True, inplace = True)
### initialization
coords[i_fig] = {}
for i in plot_color.keys():
coords[i_fig][i] = np.zeros((2, len(annots_df_current)))
### loop through bundle
for ind, bundle_no in enumerate(annots_df_current.index):
pos_t3 = annots_df_current.loc[bundle_no, 'T3c']
pos_t4 = 1
pos_t7 = annots_df_current.loc[bundle_no, 'T7c']
dTiCs = {3:pos_t3, 7:pos_t7, 4: pos_t4}
target_grid_polar = get_target_grid_polar_summary(return_type = 'theory', dTiCs = dTiCs)
coords[i_fig]['R3'][0, ind] = target_grid_polar[2,0]
coords[i_fig]['R3'][1, ind] = annots_df_current.loc[bundle_no, 'R3']
coords[i_fig]['R4'][0, ind] = target_grid_polar[5,0]
coords[i_fig]['R4'][1, ind] = annots_df_current.loc[bundle_no, 'R4']
coords[i_fig]['T3'][0, ind] = target_grid_polar[2,0]
coords[i_fig]['T3'][1, ind] = annots_df_current.loc[bundle_no, 'T3c']
coords[i_fig]['T7'][0, ind] = target_grid_polar[5,0]
coords[i_fig]['T7'][1, ind] = annots_df_current.loc[bundle_no, 'T7c']
coords[i_fig]['T4'][0, ind] = 0
coords[i_fig]['T4'][1, ind] = 1
### get centroids
for t in coords[i_fig].keys():
sum_coords[t][:, i_sample] = np.mean(coords[i_fig][t], axis = 1)
### Plotting
ax = axes.ravel()[i_fig]
### references
ax.plot([0,0], [0,2.5], '--', color = "0.8", linewidth = 0.5)
ax.plot([0,target_grid_polar[2,0]], [0,2.5], '--', color = "0.8", linewidth = 0.5)
ax.plot([0,target_grid_polar[5,0]], [0,2.5], '--', color = "0.8", linewidth = 0.5)
### individual dots
for ind in range(len(annots_df_current)):
for t in ['R3', 'R4']:
ax.plot(coords[i_fig][t][0, ind], coords[i_fig][t][1, ind],
'o', color = plot_color[t], markersize = 10, alpha = 0.5)
for t in ['T3', 'T4', 'T7']:
ax.plot(coords[i_fig][t][0, ind], coords[i_fig][t][1, ind],
'o', mec = plot_color[t], markersize = 25, mew = 1.0, mfc = 'none', alpha = 0.8)
ax.plot(0, 0, 'o', color = 'k', markersize = 5)
ax.text(0.3, -1, "C")
### axis
ax.set_thetamin(-30)
ax.set_thetamax(30)
ax.set_rlim(0, 2.5)
ax.set_yticks([0, 0.5, 1.0, 1.5, 2.0])
ax.set_xticks([-phi_unit, 0, phi_unit])
ax.set_xticklabels([1, 0, -1])
ax.grid(axis = 'y', linestyle = '--', which = 'major', linewidth = 0.5)
ax.grid(axis = 'x', linestyle = '--', which = 'major', linewidth = 0.5)
ax.tick_params()
if(i_fig == num_subplots-1): ### last sub-figure
ax.set_xlabel('Relative Length (a.u.)')
if(i_fig == round(num_subplots/2)-1): ### middle sub-figure.
ax.set_ylabel("\nRelative Angle (a.u.)")
ax.yaxis.set_label_position("right")
### saving
fig_format = 'svg'
fig_name = f'S3C_Fig.{fig_format}'
fig_save_path = os.path.join(paths.output_prefix, fig_name)
if(is_save):
plt.savefig(fig_save_path, dpi=fig_res, bbox_inches='tight', format = fig_format)
plt.show()
return coords, sum_coords
| 18,606
|
def num_from_bins(bins, cls, reg):
"""
:param bins: list
The bins
:param cls: int
Classification result
:param reg:
Regression result
:return: computed value
"""
bin_width = bins[0][1] - bins[0][0]
bin_center = float(bins[cls][0] + bins[cls][1]) / 2
return bin_center + reg * bin_width
| 18,607
|
def main():
"""Main loop."""
options, args = parse_options()
verbose = options.verbose
if verbose:
LOG.logger.setLevel(logging.DEBUG)
else:
LOG.logger.setLevel(logging.INFO)
LOG.info('Cleaning stale locks from %s' % FLAGS.lock_path)
utils.cleanup_file_locks()
LOG.info('Finished')
| 18,608
|
def rotate90(matrix: list) -> tuple:
"""return the matrix rotated by 90"""
return tuple(''.join(column)[::-1] for column in zip(*matrix))
| 18,609
|
def get_aspect(jdate, body1, body2):
"""
Return the aspect and orb between two bodies for a certain date
Return None if there's no aspect
"""
if body1 > body2:
body1, body2 = body2, body1
dist = distance(long(jdate, body1),
long(jdate, body2))
for i_asp, aspect in enumerate(aspects['value']):
orb = get_orb(body1, body2, i_asp)
if i_asp == 0 and dist <= orb:
return body1, body2, i_asp, dist
elif aspect - orb <= dist <= aspect + orb:
return body1, body2, i_asp, aspect - dist
return None
| 18,610
|
def create_rollout_policy(domain: Simulator, rollout_descr: str) -> Policy:
"""returns, if available, a domain specific rollout policy
Currently only supported by grid-verse environment:
- "default" -- default "informed" rollout policy
- "gridverse-extra" -- straight if possible, otherwise turn
:param domain: environment
:param rollout_descr: "default" or "gridverse-extra"
"""
if isinstance(domain, gridverse_domain.GridverseDomain):
if rollout_descr == "default":
pol = partial(
gridverse_domain.default_rollout_policy,
encoding=domain._state_encoding, # pylint: disable=protected-access
)
elif rollout_descr == "gridverse-extra":
pol = partial(
gridverse_domain.straight_or_turn_policy,
encoding=domain._state_encoding, # pylint: disable=protected-access
)
else:
if rollout_descr:
raise ValueError(
f"{rollout_descr} not accepted as rollout policy for domain {domain}"
)
pol = partial(random_policy, action_space=domain.action_space)
def rollout(augmented_state: BADDr.AugmentedState) -> int:
"""
So normally PO-UCT expects states to be numpy arrays and everything is
dandy, but we are planning in augmented space here in secret. So the
typical rollout policy of the environment will not work: it does not
expect an `AugmentedState`. So here we gently provide it the underlying
state and all is well
:param augmented_state:
"""
return pol(augmented_state.domain_state)
return RolloutPolicyForPlanning(rollout)
| 18,611
|
def get_event_details(event):
"""Extract event image and timestamp - image with no tag will be tagged as latest.
:param dict event: start container event dictionary.
:return tuple: (container image, last use timestamp).
"""
image = str(event['from'] if ":" in event['from'] else event['from'] + ":latest")
timestamp = event['time']
return image, timestamp
| 18,612
|
def add_extra_vars_rm_some_data(df=None, target='CHBr3',
restrict_data_max=False, restrict_min_salinity=False,
# use_median_value_for_chlor_when_NaN=False,
# median_4MLD_when_NaN_or_less_than_0=False,
# median_4depth_when_greater_than_0=False,
rm_LOD_filled_data=False,
# add_modulus_of_lat=False,
# rm_Skagerrak_data=False,
rm_outliers=False, verbose=True, debug=False):
"""
Add, process, or remove (requested) derivative variables for use with ML code
Parameters
-------
Returns
-------
(pd.DataFrame)
"""
# --- Apply choices & Make user aware of choices applied to data
Shape0 = str(df.shape)
N0 = df.shape[0]
# remove the outlier values
if rm_outliers:
Outlier = utils.get_outlier_value(
df, var2use=target, check_full_df_used=False)
bool = df[target] < Outlier
df_tmp = df.loc[bool]
prt_str = 'Removing outlier {} values. (df {}=>{},{})'
N = int(df_tmp.shape[0])
if verbose:
print(prt_str.format(target, Shape0, str(df_tmp.shape), N0-N))
df = df_tmp
return df
| 18,613
|
def cvCreateMemStorage(*args):
"""cvCreateMemStorage(int block_size=0) -> CvMemStorage"""
return _cv.cvCreateMemStorage(*args)
| 18,614
|
def copyJSONable(obj):
"""
Creates a copy of obj and ensures it is JSONable.
:return: copy of obj.
:raises:
TypeError: if the obj is not JSONable.
"""
return json.loads(json.dumps(obj))
| 18,615
|
def get_eez_and_land_union_shapes(iso2_codes: List[str]) -> pd.Series:
"""
Return Marineregions.org EEZ and land union geographical shapes for a list of countries.
Parameters
----------
iso2_codes: List[str]
List of ISO2 codes.
Returns
-------
shapes: pd.Series:
Shapes of the union of EEZ and land for each countries.
Notes
-----
Union shapes are divided based on their territorial ISO codes. For example, the shapes
for French Guyana and France are associated to different entries.
"""
shape_fn = f"{data_path}geographics/source/EEZ_land_union/EEZ_Land_v3_202030.shp"
shapes = gpd.read_file(shape_fn)
# Convert country ISO2 codes to ISO3
iso3_codes = convert_country_codes(iso2_codes, 'alpha_2', 'alpha_3', throw_error=True)
# Get 'union' polygons associated with each code
shapes = shapes.set_index("ISO_TER1")["geometry"]
missing_codes = set(iso3_codes) - set(shapes.index)
assert not missing_codes, f"Error: Shapes not available for codes {sorted(list(missing_codes))}"
shapes = shapes.loc[iso3_codes]
shapes.index = convert_country_codes(list(shapes.index), 'alpha_3', 'alpha_2', throw_error=True)
return shapes
| 18,616
|
def putrowstride(a,s):
"""
Put the stride of a matrix view object
"""
t=getType(a)
f={'mview_f':vsip_mputrowstride_f,
'mview_d':vsip_mputrowstride_d,
'mview_i':vsip_mputrowstride_i,
'mview_si':vsip_mputrowstride_si,
'mview_uc':vsip_mputrowstride_uc,
'mview_bl':vsip_mputrowstride_bl,
'cmview_f':vsip_cmputrowstride_f,
'cmview_d':vsip_cmputrowstride_d }
assert t[0] and t[1] in f,'Type <:%s:> not a supported type for for putrowstride'%t[1]
return f[t[1]](a,s)
| 18,617
|
def list_tracked_stocks():
"""Returns a list of all stock symbols for the stocks being tracker"""
data = read_json("stockJSON/tracked_stocks.json")
return list(data.keys())
| 18,618
|
def startStream(redisHost="localhost", visualSourceName="local_cam"):
"""
startStream streaming the frames into redis stream
Args:
redisHost (str, optional): Redis Hostname URL/IP. Defaults to "localhost".
visualSourceName (str, optional): visual data source name. Defaults to "local_webcam".
"""
# Load redis connection obj
rcon = redisConnection(redisHost)
# Load the visualInput vType=0 for webcam
framesStream = VisualInput(vType=0, fps=15)
# Move though the frames
for (orderID, img) in framesStream:
# reading frames
status, frame = cv2.imencode(".jpg", img)
# Compose them into message
message = {'orderID': orderID, 'image': frame.tobytes()}
# Stream the frames into redis stream
streamID = rcon.xadd(visualSourceName, message,maxlen=10)
print("Setting vdata with ID: {0}, Order: {1}, Image: {2}".format(
streamID, message['orderID'], message['image'][0:10]))
| 18,619
|
def bettorbar(ax, *args, **kwargs):
"""A better error bar function.
Adds kwargs: elinestyle, ecolor
Attempts to set zorder to be the same for all lines"""
mplkwargs = kwargs.copy()
mplkwargs.pop('ecolor', None)
mplkwargs.pop('elinestyle', None)
err = ax.errorbar(*args, **mplkwargs)
color = kwargs.get('ecolor', kwargs['c'])
zorder = err[0].zorder
err[2][0].set(linestyle = kwargs.get('elinestyle', kwargs.get('ls', kwargs.get('linestyle'))), color=color, zorder=zorder)
err[1][0].set(color=color, zorder=zorder)
err[1][1].set(color=color, zorder=zorder)
| 18,620
|
def min_max_two(first, second):
"""Pomocna funkce, vrati dvojici:
(mensi ze zadanych prvku, vetsi ze zadanych prvku).
K tomu potrebuje pouze jedno porovnani."""
return (first, second) if first < second else (second, first)
| 18,621
|
def expand_pin_groups_and_identify_pin_types(tsm: SMContext, pins_in):
"""
for the given pins expand all the pin groups and identifies the pin types
Args:
tsm (SMContext): semiconductor module context from teststand
pins_in (_type_): list of pins for which information needs to be expanded if it is pin group
Returns:
pins_info, pins_expanded: tuple of pins_info and pins_expanded.
"""
pins_temp, pin_types_temp = get_all_pins(tsm)
pins_info = []
pins_expanded = []
i = 0
for d_pin in pins_in:
if d_pin in pins_temp:
index_d = pins_temp.index(d_pin)
d_pin_type = pin_types_temp[index_d]
count = 1
pin_expanded = ExpandedPinInformation(d_pin, d_pin_type, i)
pins_expanded.append(pin_expanded)
else:
d_pin_type = PinType.PIN_GROUP
temp_exp_pins = tsm.get_pins_in_pin_groups(d_pin) # This works fine
count = len(temp_exp_pins)
for a_pin in temp_exp_pins:
index_a = pins_temp.index(a_pin)
a_pin_type = pin_types_temp[index_a]
pin_expanded = ExpandedPinInformation(
a_pin, a_pin_type, i
) # Found bug here due to class & fixed it.
pins_expanded.append(pin_expanded)
pin_info = PinInformation(d_pin, d_pin_type, count)
pins_info.append(pin_info)
i += 1
pins_expanded = remove_duplicates_from_tsm_pin_information_array(pins_info, pins_expanded)
return pins_info, pins_expanded
| 18,622
|
def multi_ways_balance_merge_sort(a):
"""
多路平衡归并排序
- 多用于外部排序
- 使用多维数组模拟外部存储归并段
- 使用loser tree来实现多路归并
- 归并的趟数跟路数k成反比,增加路数k可以调高效率
:param a:
:return:
"""
SENTRY = float('inf') # 哨兵,作为归并段的结尾
leaves = [] # 每个归并段中的一个元素构成loser tree的原始序列
b = [] # 输出归并段,此实现中简化为以为数组。实际情况下也需要对输出分段。
for v in a:
merge_sort(v) # 归并段内排序,采用归并排序
v.append(SENTRY) # 每个归并段追加哨兵
leaves.append(v[0]) # 每个归并段的首元素构成初始化loser tree的原始序列
del v[0] # 删除各归并段的首元素
lt = LoserTree(leaves) # 构建loser tree
# 循环获取winner
while True:
i, v = lt.winner # winner
if v == SENTRY:
# 排序结束
break
b.append(v) # 将winner写入输出归并段
lt.modify_key(i, a[i][0]) # winner所在的归并段的下一个元素更新入loser tree
del a[i][0] # 删除已处理数据
return b
| 18,623
|
def inner_cg_rhs(rhs, u, v, EHs, tau):
"""Compute right hand side for inner CG method
``rhs = u^n + tau * (div_h v^{n+1} + EHs)``
Args:
rhs (gpuarray): Right hand side.
u (gpuarray): u.
v (gpuarray): v.
EHs (gpuarray): EHs.
tau (float): tau.
"""
inner_cg_rhs_func(rhs, u, v, EHs, np.float32(tau), np.int32(u.shape[0]),
np.int32(u.shape[1]), np.int32(1), block=block,
grid=get_grid(u))
| 18,624
|
def check_instance_of(value, types, message = None):
"""
Raises a #TypeError if *value* is not an instance of the specified *types*. If no message is
provided, it will be auto-generated for the given *types*.
"""
if not isinstance(value, types):
if message is None:
message = f'expected {_repr_types(types)}, got {type(value).__name__} instead'
raise TypeError(_get_message(message))
return value
| 18,625
|
def create_input_metadatav1():
"""Factory pattern for the input to the marshmallow.json.MetadataSchemaV1.
"""
def _create_input_metadatav1(data={}):
data_to_use = {
'title': 'A title',
'authors': [
{
'first_name': 'An',
'last_name': 'author'
}
],
'description': 'A description',
'resource_type': {
'general': 'other',
'specific': 'other'
},
'license': 'mit-license',
'permissions': 'all_view',
}
data_to_use.update(data)
return data_to_use
return _create_input_metadatav1
| 18,626
|
def create_job(title: str = Body(None, description='The title of the codingjob'),
codebook: dict = Body(None, description='The codebook'),
units: list = Body(None, description='The units'),
rules: dict = Body(None, description='The rules'),
debriefing: dict = Body(None, description='Debriefing information'),
jobsets: list = Body(None, description='A list of codingjob jobsets. An array of objects, with keys: name, codebook, unit_set'),
authorization: dict = Body(None, description='A dictionnary containing authorization settings'),
provenance: dict = Body(None, description='A dictionary containing any information about the units'),
user: User = Depends(auth_user),
db: Session = Depends(get_db)):
"""
Create a new codingjob. Body should be json structured as follows:
{
"title": <string>,
"codebook": {.. blob ..}, # required, but can be omitted if specified in every jobset
"units": [
{"id": <string> # An id string. Needs to be unique within a codingjob (not necessarily across codingjobs)
"unit": {.. blob ..},
"gold": {.. blob ..}, # optional, include correct answer here for gold questions
}
..
],
"rules": {
"ruleset": <string>,
"authorization": "open"|"restricted", # optional, default: open
.. additional ruleset parameters ..
},
"debriefing": {
"message": <string>,
"link": <string> (url)
}
"jobsets": [ # optional
{"name": <string>,
"codebook": <codebook>, ## optional
"unit_set": [<external_id>] ## optional
}
]
"authorization": { # optional, default: {'restricted': False}
restricted: boolean,
users: [emails]
},
"provenance": {.. blob ..}, # optional
}
Where ..blob.. indicates that this is not processed by the backend, so can be annotator specific.
See the annotator documentation for additional informations.
The rules distribute how units should be distributed, how to deal with quality control, etc.
The ruleset name specifies the class of rules to be used (currently "crowd" or "expert").
Depending on the ruleset, additional options can be given.
See the rules documentation for additional information
"""
check_admin(user)
if not title or not codebook or not units or not rules:
raise HTTPException(status_code=400, detail='Codingjob is missing keys')
try:
job = crud_codingjob.create_codingjob(db, title=title, codebook=codebook, jobsets=jobsets, provenance=provenance, rules=rules, debriefing=debriefing, creator=user, units=units, authorization=authorization)
except Exception as e:
logging.error(e)
raise HTTPException(status_code=400, detail='Could not create codingjob')
return dict(id=job.id)
| 18,627
|
async def finalize(
db,
pg: AsyncEngine,
subtraction_id: str,
gc: Dict[str, float],
count: int,
) -> dict:
"""
Finalize a subtraction by setting `ready` to True and updating the `gc` and `files`
fields.
:param db: the application database client
:param pg: the PostgreSQL AsyncEngine object
:param subtraction_id: the id of the subtraction
:param gc: a dict contains gc data
:return: the updated subtraction document
"""
updated_document = await db.subtraction.find_one_and_update(
{"_id": subtraction_id},
{
"$set": {
"gc": gc,
"ready": True,
"count": count,
}
},
)
return updated_document
| 18,628
|
def find_path(a, b, is_open):
"""
:param a: Start Point
:param b: Finish Point
:param is_open: Function returning True if the Point argument is an open square
:return: A list of Points containing the moves needed to get from a to b
"""
if a == b:
return []
if not is_open(b):
return None
moves = rectilinear_path(a, b, is_open) or direct_path(a, b, is_open) or find_path_using_a_star(a, b, is_open)
return moves
| 18,629
|
def view_skill_api():
""" General API for skills and posts """
dbsess = get_session()
action = request.form["action"]
kind = request.form["kind"]
if kind == "post":
if action == "read":
post = models.Post.get_by_id(dbsess, int(request.form["post-id"]))
if not post:
return "", 404
return jsonify({
"title": post.title,
"content": post.body,
})
if action == "create":
skills = request.form.getlist("skill-ids[]")
post = models.Post(title=request.form["title"],
body=request.form["content"])
dbsess.add(post)
dbsess.commit()
for skill_id in skills:
postskill = models.PostSkill(post_id=post.id, skill_id=skill_id)
dbsess.add(postskill)
dbsess.commit()
return jsonify({"new-id": post.id}), 201
if action == "modify":
skills = [int(_id) for _id in request.form.getlist("skill-ids[]")]
post = models.Post.get_by_id(dbsess, int(request.form["post-id"]))
post.title = request.form["title"]
post.body = request.form["content"]
dbsess.query(models.PostSkill).filter_by(post_id=post.id).delete()
for skill_id in skills:
postskill = models.PostSkill(post_id=post.id, skill_id=skill_id)
dbsess.add(postskill)
dbsess.commit()
dbsess.add(post)
dbsess.commit()
return "", 202
if action == "delete":
pass
if kind == "skill":
if action == "read":
send_skills = []
skills = dbsess.query(models.Skill).all()
post = models.Post.get_by_id(dbsess, int(request.form["post-id"]))
for skill in skills:
send_skills.append({
"name": skill.name,
"id": skill.id,
"selected": skill in [skl.skill for skl in post.skills] if post else False,
})
return jsonify({"skills": send_skills}), 200
return "", 400
return "", 400
| 18,630
|
def current_team() -> None:
"""Print the team currently authenticated against."""
client: Client = _load_client()
print(client.default_team)
| 18,631
|
def _plot_raw_time(value, params):
"""Deal with changed time value"""
info = params['info']
max_times = params['n_times'] / float(info['sfreq']) - params['duration']
if value > max_times:
value = params['n_times'] / info['sfreq'] - params['duration']
if value < 0:
value = 0
if params['t_start'] != value:
params['t_start'] = value
params['hsel_patch'].set_x(value)
| 18,632
|
def custom_swagger_client(func: Callable) -> None:
"""
Allows client to customize a SwaggerClient, so that they can leverage
the default request handler.
"""
get_abstraction().client = func()
| 18,633
|
def check_match(candidate_smiles, gen):
"""
If A is a substructure of B and B is a substructure of A then the two must be isomorphic. Does that make sense?
That's the logic I used!
"""
candidate = MolFromSmiles(candidate_smiles)
for m in test_set:
if m.HasSubstructMatch(candidate) and candidate.HasSubstructMatch(m):
print(f'{candidate_smiles} matched in {gen}')
match_gen_map[candidate_smiles] = int(gen[1])
# update flag for the matched str.
matched_flag_list[test_set.index(m)] = True
| 18,634
|
def read_event(suppress=False):
"""
Blocks until a keyboard event happens, then returns that event.
"""
queue = _queue.Queue(maxsize=1)
hooked = hook(queue.put, suppress=suppress)
while True:
event = queue.get()
unhook(hooked)
return event
| 18,635
|
def save_new_playlist(uid=None):
"""Gets and saves a New Playlist to a specific user"""
# Creates a new playlist
if uid:
playlist = create_playlist_user(uid)
else:
playlist = create_playlist_general()
# Connects and write into DataBase
db = connection_database()
collection = db['spot_playlists']
collection.insert_one(playlist)
print("A new Playlist was added to DataBase")
| 18,636
|
def part_1(data):
"""Part 1"""
start = time.perf_counter()
# CODE HERE
legality = [check_line(line) for line in data]
points = [POINTS[CLOSINGS.index(c)] for _,c in legality if c is not None]
end = time.perf_counter()
# OUTPUT HERE
print(f'total={sum(points)}')
print(f'elapsed = {end-start:.4f}')
| 18,637
|
def rotations(it):
""" rotations([0,1,2]) --> [[0, 1, 2], [1, 2, 0], [2, 0, 1]] """
l = list(it)
for i in range(len(l)):
yield iter(l)
l = l[1:]+[l[0]]
| 18,638
|
def searchCVE(service, version):
"""Return a list of strings"""
re.search
url = "https://cve.mitre.org/cgi-bin/cvekey.cgi?keyword="+service+"+"+version
res = requests.get(url)
soup = BeautifulSoup(res.content, "lxml")
listCVE = []
for elt in soup.find_all('a', attrs={'href' : re.compile("^/cgi-bin/")}):
listCVE.append(elt.get_text())
return url, listCVE
| 18,639
|
def config_from_args(args) -> config.TestConfig:
"""Convert args read from cli to config"""
return config.TestConfig(program=args.p,
test_dir=args.d,
verifier=args.v,
break_on_error=args.b == 'true',
groups=args.g or ['.*'],
timeout=args.t,
timer=args.timer,
sha=args.sha)
| 18,640
|
def create_eulerian_path(graph_augmented, graph_original, start_node=None):
"""
Args:
graph_augmented (networkx graph): graph w links between odd degree nodes created from `add_augmenting_path_to_graph`.
graph_original (networkx graph): orginal graph created from `create_networkx_graph_from_edgelist`
start_node (str): name of starting node for the no return CPP solution.
Returns:
networkx graph (`graph_original`) augmented with edges directly between the odd nodes
"""
euler_path = list(nx.eulerian_path(graph_augmented, source=start_node, keys=True))
assert len(graph_augmented.edges()) == len(euler_path), 'graph and euler_circuit do not have equal number of edges.'
for edge in euler_path:
aug_path = nx.shortest_path(graph_original, edge[0], edge[1], weight='distance')
edge_attr = graph_augmented[edge[0]][edge[1]][edge[2]]
if not edge_attr.get('augmented'):
yield edge + (edge_attr,)
else:
for edge_aug in list(zip(aug_path[:-1], aug_path[1:])):
# find edge with shortest distance (if there are two parallel edges between the same nodes)
edge_aug_dict = graph_original[edge_aug[0]][edge_aug[1]]
edge_key = min(edge_aug_dict.keys(), key=(lambda k: edge_aug_dict[k]['distance'])) # index with min distance
edge_aug_shortest = edge_aug_dict[edge_key]
edge_aug_shortest['augmented'] = True
edge_aug_shortest['id'] = edge_aug_dict[edge_key]['id']
yield edge_aug + (edge_key, edge_aug_shortest, )
| 18,641
|
def build_json(
spec_filename: str,
package_name: str,
dist_path: str,
format_: PackageFormat = PackageFormat.NONE,
) -> None:
"""
Create an OpenAlchemy distribution package with the SQLAlchemy models.
The package can be uploaded to, for example, PyPI or a private repository for
distribution.
The formats can be combined with the bitwise operator or (``|``), for
instance, building both sdist and wheel packages can be specified like that:
.. code-block: python
format_ = PackageFormat.SDIST|PackageFormat.WHEEL
Args:
spec_filename: filename of an OpenAPI spec in JSON format
package_name: The name of the package.
dist_path: The directory to output the package to.
format_: (optional) The format(s) of the archive(s) to build.
"""
# Most OpenAPI specs are YAML, so, for efficiency, we only import json if we
# need it:
import json # pylint: disable=import-outside-toplevel
with open(spec_filename) as spec_file:
spec = json.load(spec_file)
return _build_module.execute(
spec=spec, name=package_name, path=dist_path, format_=format_
)
| 18,642
|
def get_default_plugins_folder():
"""
:returns: Default location for the plugins folder.
:rtype: str
"""
return path.join(get_install_folder(), "plugins")
| 18,643
|
def py_write_qr(image_name):
"""Write QR code image file (internal method)."""
uid = uuid.uuid4()
randStr = uid.hex[:4]
img = qrcode.make(image_name)
m = re.search('/([^/]+?)/.+?$', image_name)
filename = m.group(1) + "_" + randStr + '.png'
img.save(str(filename))
combine_images.main(filename='horizontally_combined.jpg')
| 18,644
|
def importFromDotSpec(spec):
"""
Import an object from an arbitrary dotted sequence of packages, e.g.,
"a.b.c.x" by splitting this into "a.b.c" and "x" and calling importFrom().
:param spec: (str) a specification of the form package.module.object
:return: none
:raises PygcamException: if the import fails
"""
modname, objname = spec.rsplit('.', 1)
try:
return importFrom(modname, objname)
except ImportError:
raise PygcamException("Can't import '%s' from '%s'" % (objname, modname))
| 18,645
|
def usgs_graphite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[5:9]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 13:
df_data.columns = ["Production", "space_1", "Unit", "space_6",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['graphite'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
| 18,646
|
def test_unicode_display_name(executed_docstring_source):
"""
>>> import allure
>>> @allure.title(u"Лунтик")
>>> def test_unicode_display_name_example():
... pass
"""
assert_that(executed_docstring_source.allure_report,
has_test_case("test_unicode_display_name_example", has_title(u"Лунтик"))
)
| 18,647
|
def test_regrid_bilinear_2():
"""Test bilinear regridding option 'bilinear-2'"""
cube_in, cube_out_mask, _ = define_source_target_grid_data()
regrid_bilinear = RegridLandSea(regrid_mode="bilinear-2",)(cube_in, cube_out_mask)
expected_results = np.array(
[
[0.5, 0.8, 1.1, 1.4, 1.7, 2.0, 2.3, 2.6, 2.9, 3.2, 3.5],
[2.5, 2.8, 3.1, 3.4, 3.7, 4.0, 4.3, 4.6, 4.9, 5.2, 5.5],
[4.5, 4.8, 5.1, 5.4, 5.7, 6.0, 6.3, 6.6, 6.9, 7.2, 7.5],
[6.5, 6.8, 7.1, 7.4, 7.7, 8.0, 8.3, 8.6, 8.9, 9.2, 9.5],
[8.5, 8.8, 9.1, 9.4, 9.7, 10.0, 10.3, 10.6, 10.9, 11.2, 11.5],
[10.5, 10.8, 11.1, 11.4, 11.7, 12.0, 12.3, 12.6, 12.9, 13.2, 13.5],
[12.5, 12.8, 13.1, 13.4, 13.7, 14.0, 14.3, 14.6, 14.9, 15.2, 15.5],
[14.5, 14.8, 15.1, 15.4, 15.7, 16.0, 16.3, 16.6, 16.9, 17.2, 17.5],
]
)
np.testing.assert_allclose(regrid_bilinear.data, expected_results, atol=1e-3)
| 18,648
|
def _gen_np_divide(arg1, arg2, out_ir, typemap):
"""generate np.divide() instead of / for array_expr to get numpy error model
like inf for division by zero (test_division_by_zero).
"""
scope = arg1.scope
loc = arg1.loc
# g_np_var = Global(numpy)
g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
typemap[g_np_var.name] = types.misc.Module(numpy)
g_np = ir.Global('np', numpy, loc)
g_np_assign = ir.Assign(g_np, g_np_var, loc)
# attr call: div_attr = getattr(g_np_var, divide)
div_attr_call = ir.Expr.getattr(g_np_var, "divide", loc)
attr_var = ir.Var(scope, mk_unique_var("$div_attr"), loc)
func_var_typ = get_np_ufunc_typ(numpy.divide)
typemap[attr_var.name] = func_var_typ
attr_assign = ir.Assign(div_attr_call, attr_var, loc)
# divide call: div_attr(arg1, arg2)
div_call = ir.Expr.call(attr_var, [arg1, arg2], (), loc)
func_typ = func_var_typ.get_call_type(
typing.Context(), [typemap[arg1.name], typemap[arg2.name]], {})
out_ir.extend([g_np_assign, attr_assign])
return func_typ, div_call
| 18,649
|
def add_yfull(
kit: Optional[str] = Option(None, "--kit", "-k", help = "The kit number."),
group: Optional[str] = Option(None, "--group", help = "The group within which the sample clusters."),
ancestor: Optional[str] = Option(None, "--ancestor", help = "The earliest known patrilineal ancestor."),
country: Optional[str] = Option(None, "--country", help = "The country from which the ancestor came."),
haplogroup: Optional[str] = Option(None, "--haplogroup", help = "The haplogroup of the sample."),
file: Path = Argument(..., exists = True, dir_okay = False, help = "The YFull SNP file for the kit."),
) -> None:
"""Add a YFull kit to the SNP database."""
if kit is None:
match = re.fullmatch(r"SNP_for_(YF\d+)_(\d+)", file.stem)
if not match:
raise BadParameter("Could not infer kit name from filename; specify it explicitly.", param = kit)
kit = match.group(1)
yfull_haplogroup, _yfull_terminal_snps, yfull_df = get_yfull_df(file)
info_df = pd.DataFrame(
{
"Kit Number": pd.Series(kit, dtype = "str"),
"Group": pd.Series(group, dtype = "str"),
"Paternal Ancestor Name": pd.Series(ancestor, dtype = "str"),
"Country": pd.Series(country, dtype = "str"),
"Haplogroup": pd.Series(haplogroup or yfull_haplogroup, dtype = "str"),
}
)
info_df.set_index("Kit Number", inplace = True)
yfull_series = yfull_df["Call"]
yfull_df = yfull_series.to_frame(kit).T
yfull_df.rename_axis("Kit Number", axis = 0, inplace = True)
kit_df = pd.concat([info_df, yfull_df], axis = 1)
echo(f"Added kit {kit}.")
merge_db(kits_snp_path, kit_df)
echo(f"Kits SNP database written to `{kits_snp_path}`.")
| 18,650
|
def pseudo_volume_watson(eos, r, temp, press_eos, rho_eos, a_mix, b_mix, desired_phase):
"""
Calculates a pseudo volume based on the algorithm described by Watson (2018)
in thesis "Robust Simulation and Optimization Methods for Natural Gas Liquefaction Processes"
Available at https://dspace.mit.edu/handle/1721.1/115702
"""
if eos == 0:
u, w = 1, 0
elif eos == 1:
u, w = 2, -1
else:
return '', 0, 0
# kappa is a tuning parameter whose details are given by Watson.
# Remains untouched for most cases
kappa = 0.9
solution_found, rho_mc, rho_lo, rho_hi, rho_omega, temp_mc = pseudo_root_search_mathias(eos, r,
temp, a_mix, b_mix,
desired_phase, kappa)
if desired_phase == 'liq':
rho_L_omega = rho_omega
if not solution_found:
rho_L_star = rho_mc
else:
rho_L_star = mid(rho_mc, rho_L_omega, rho_hi)
rho_test = rho_L_star
press_star = r * temp / (-b_mix + 1 / rho_test) - a_mix / (
w * b_mix ** 2 + u * b_mix / rho_test + rho_test ** (-2))
d_press_d_rho_star = r * temp / (rho_test ** 2 * (-b_mix + 1 / rho_test) ** 2) - (
u * b_mix / rho_test ** 2 + 2 / rho_test ** 3) * a_mix / (
w * b_mix ** 2 + u * b_mix / rho_test + rho_test ** (-2)) ** 2
B_L = d_press_d_rho_star * (rho_L_star - 0.7 * rho_mc)
A_L = (press_star - B_L * math.log(rho_L_star - 0.7 * rho_mc))
rho_L_extrap = min(math.exp((press_eos - A_L) / B_L) + 0.7 * rho_mc, rho_hi)
rho_L = mid(rho_eos, rho_L_star, rho_L_extrap)
rho_test = rho_L
press_calc = r * temp / (-b_mix + 1 / rho_test) - a_mix / (
w * b_mix ** 2 + u * b_mix / rho_test + rho_test ** (-2))
return desired_phase, 1 / rho_L, abs(press_calc)
elif desired_phase == 'vap':
rho_V_omega = rho_omega
if not solution_found:
rho_V_star = kappa * rho_mc
else:
rho_V_star = mid(rho_lo, rho_V_omega, kappa * rho_mc)
rho_V_bound = mid(rho_lo, rho_V_omega, kappa * rho_mc)
rho_test = rho_V_star
press_star = r * temp / (-b_mix + 1 / rho_test) - a_mix / (
w * b_mix ** 2 + u * b_mix / rho_test + rho_test ** (-2))
# Derivative of the EOS in terms of rho_test
d_press_d_rho_star = r * temp / (rho_test ** 2 * (-b_mix + 1 / rho_test) ** 2) - (
u * b_mix / rho_test ** 2 + 2 / rho_test ** 3) * a_mix / (
w * b_mix ** 2 + u * b_mix / rho_test + rho_test ** (-2)) ** 2
A_V = 1 / press_star
B_V = -d_press_d_rho_star / (press_star ** 2)
C_V = -abs(A_V + 0.5 * B_V * (rho_mc - rho_V_star)) / ((0.5 * (rho_mc - rho_V_star)) ** 2)
term2 = (-B_V - math.sqrt(B_V ** 2 - 4 * C_V * max(0, (A_V - 1 / press_eos)))) / (2 * C_V)
rho_test = rho_V_omega
d_press_d_rho_omega = r * temp / (rho_test ** 2 * (-b_mix + 1 / rho_test) ** 2) - (
u * b_mix / rho_test ** 2 + 2 / rho_test ** 3) * a_mix / (
w * b_mix ** 2 + u * b_mix / rho_test + rho_test ** (-2)) ** 2
term3 = min(0, press_eos - press_star) / d_press_d_rho_omega + term2 + max(0, temp - temp_mc) * max(0,
d_press_d_rho_star - d_press_d_rho_omega)
rho_V_extrap = mid(0, rho_hi, rho_V_bound + term3)
rho_V = mid(rho_eos, rho_V_star, rho_V_extrap)
# Do we need to correct the vapor fugacity coefficients?
# rho_test = rho_V
# press_calc = r*temp/(-b_mix + 1/rho_test) - a_mix/(w*b_mix**2 + u*b_mix/rho_test + rho_test**(-2))
return desired_phase, 1 / rho_V, press_eos
else:
return '', 0, 0
| 18,651
|
def test_hook_manager_can_call_hooks_defined_in_specs(
hook_specs, hook_name, hook_params
):
"""Tests to make sure that the hook manager can call all hooks defined by specs."""
cli_hook_manager = CLIHooksManager()
hook = getattr(cli_hook_manager.hook, hook_name)
assert hook.spec.namespace == hook_specs
kwargs = {param: None for param in hook_params}
result = hook(**kwargs)
# since there hasn't been any hook implementation, the result should be empty
# but it shouldn't have raised
assert result == []
| 18,652
|
def layer_norm(x, axes=1, initial_bias_value=0.0, epsilon=1e-3, name="var"):
"""
Apply layer normalization to x
Args:
x: input variable.
initial_bias_value: initial value for the LN bias.
epsilon: small constant value to avoid division by zero.
scope: scope or name for the LN op.
Returns:
LN(x) with same shape as x
"""
if not isinstance(axes, list):
axes = [axes]
scope = tf.get_variable_scope()
with tf.variable_scope(scope):
with tf.variable_scope(name):
mean = tf.reduce_mean(x, axes, keep_dims=True)
variance = tf.sqrt(tf.reduce_mean(tf.square(x - mean), axes, keep_dims=True))
with tf.device('/cpu:0'):
gain = tf.get_variable('gain', x.get_shape().as_list()[1:],
initializer=tf.constant_initializer(1.0))
bias = tf.get_variable('bias', x.get_shape().as_list()[1:],
initializer=tf.constant_initializer(initial_bias_value))
return gain * (x - mean) / (variance + epsilon) + bias
| 18,653
|
def P2D_p(df, attr):
"""
Calcul de la probabilité conditionnelle P(target | attribut).
*les parametres:
df: dataframe avec les données. Doit contenir une colonne nommée "target".
attr: attribut à utiliser, nom d'une colonne du dataframe.
*le return:
de type dictionnaire de dictionnaire, dictionnaire_proba. dictionnaire_proba[t][a] contient P(target = t | attribut = a).
"""
list_cle = np.unique(df[attr].values) #Valeurs possibles de l'attribut.
dictionnaire_proba = dict.fromkeys(list_cle)
for cle in dictionnaire_proba:
dictionnaire_proba[cle] = dict.fromkeys([0,1], 0) #Target a toujours pour valeur soit 0 soit 1.
group = df.groupby(["target", attr]).groups
for t, val in group:
dictionnaire_proba[val][t] = len(group[(t, val)])
for cle in dictionnaire_proba:
taille = (df[attr] == cle).sum()
for i in range (2):
dictionnaire_proba[cle][i] = dictionnaire_proba[cle][i] / taille
return dictionnaire_proba
| 18,654
|
def save_examples(logdir = None):
"""save examples of true and false positives to help
visualize the learning"""
if logdir is None:
return
input_map = {}
for error_type in ['false_negatives', 'false_positives','true_positives', 'true_negatives']:
#load the names of the files
files = np.loadtxt(logdir + '/' + error_type + '.csv', dtype=str, delimiter = ',')
if len(files.shape) < 2:
files = np.array([files])
files1 = files[:,0]
files2 = files[:,1]
wrong_pairs = np.zeros((1,0,200,3)) #create a null image to concat to
#for each pair...
for i in range(len(files1)):
wrong_row = np.zeros((1,100,0,3))
#cycle through both types of files
for j in range(2) :
filelist = [files1,files2][j]
image_paths_placeholder = tf.placeholder(tf.string, name='image_paths'+str(i) +str(j))
input_map[image_paths_placeholder] = filelist[i]
#read the contents of the file and write it
file_contents = tf.read_file(image_paths_placeholder)
img = tf.image.decode_image(file_contents)
img = tf.reshape(img, (1,100,100,3)) #TODO: hard coded dimensions
wrong_row = tf.concat((wrong_row,img),axis=2)
wrong_pairs = tf.concat((wrong_pairs,wrong_row),axis=1)
#concat row to total
tf.summary.image(error_type, wrong_pairs, max_outputs=100)
#run a small network just to save the output
summary_op = tf.summary.merge_all()
with tf.Session() as sess:
summary = sess.run(summary_op, feed_dict=input_map)
writer = tf.summary.FileWriter(logdir)
writer.add_summary(summary, 0)
| 18,655
|
def has_entries(*keys_valuematchers, **kv_args):
"""Matches if dictionary contains entries satisfying a dictionary of keys
and corresponding value matchers.
:param matcher_dict: A dictionary mapping keys to associated value matchers,
or to expected values for
:py:func:`~hamcrest.core.core.isequal.equal_to` matching.
Note that the keys must be actual keys, not matchers. Any value argument
that is not a matcher is implicitly wrapped in an
:py:func:`~hamcrest.core.core.isequal.equal_to` matcher to check for
equality.
Examples::
has_entries({'foo':equal_to(1), 'bar':equal_to(2)})
has_entries({'foo':1, 'bar':2})
``has_entries`` also accepts a list of keyword arguments:
.. function:: has_entries(keyword1=value_matcher1[, keyword2=value_matcher2[, ...]])
:param keyword1: A keyword to look up.
:param valueMatcher1: The matcher to satisfy for the value, or an expected
value for :py:func:`~hamcrest.core.core.isequal.equal_to` matching.
Examples::
has_entries(foo=equal_to(1), bar=equal_to(2))
has_entries(foo=1, bar=2)
Finally, ``has_entries`` also accepts a list of alternating keys and their
value matchers:
.. function:: has_entries(key1, value_matcher1[, ...])
:param key1: A key (not a matcher) to look up.
:param valueMatcher1: The matcher to satisfy for the value, or an expected
value for :py:func:`~hamcrest.core.core.isequal.equal_to` matching.
Examples::
has_entries('foo', equal_to(1), 'bar', equal_to(2))
has_entries('foo', 1, 'bar', 2)
"""
if len(keys_valuematchers) == 1:
try:
base_dict = keys_valuematchers[0].copy()
for key in base_dict:
base_dict[key] = wrap_matcher(base_dict[key])
except AttributeError:
raise ValueError(
"single-argument calls to has_entries must pass a dict as the argument"
)
else:
if len(keys_valuematchers) % 2:
raise ValueError("has_entries requires key-value pairs")
base_dict = {}
for index in range(int(len(keys_valuematchers) / 2)):
base_dict[keys_valuematchers[2 * index]] = wrap_matcher(
keys_valuematchers[2 * index + 1]
)
for key, value in kv_args.items():
base_dict[key] = wrap_matcher(value)
return IsDictContainingEntries(base_dict)
| 18,656
|
def main():
"""
Main function
"""
# Print all Flags to confirm parameter settings
print_flags()
if not os.path.exists(FLAGS.data_dir):
os.makedirs(FLAGS.data_dir)
# Run the training operation
train()
| 18,657
|
def giou_loss(y_true: TensorLike,
y_pred: TensorLike,
mode: str = 'giou') -> tf.Tensor:
"""
Args:
y_true: true targets tensor. The coordinates of the each bounding
box in boxes are encoded as [y_min, x_min, y_max, x_max].
y_pred: predictions tensor. The coordinates of the each bounding
box in boxes are encoded as [y_min, x_min, y_max, x_max].
mode: one of ['giou', 'iou'], decided to calculate GIoU or IoU loss.
Returns:
GIoU loss float `Tensor`.
"""
if mode not in ['giou', 'iou']:
raise ValueError("Value of mode should be 'iou' or 'giou'")
y_pred = tf.convert_to_tensor(y_pred)
if not y_pred.dtype.is_floating:
y_pred = tf.cast(y_pred, tf.float32)
y_true = tf.cast(y_true, y_pred.dtype)
giou = _calculate_giou(y_pred, y_true, mode)
return 1 - giou
| 18,658
|
def generate_one_frame(
df_dict: dict,
tag_list: list,
fig,
up_to_index,
time_column,
batch_ids_to_animate: list,
animation_colour_assignment,
show_legend=False,
hovertemplate: str = "",
max_columns=0,
) -> List[Dict]:
"""
Returns a list of dictionaries.
Each entry in the list is for each subplot; in the order of the subplots.
Since each subplot is a tag, we need the `tag_list` as input.
"""
output = []
row = col = 1
for tag in tag_list:
for batch_id in batch_ids_to_animate:
# These 4 lines are duplicated from the outside function
if time_column in df_dict[batch_id].columns:
time_data = df_dict[batch_id][time_column]
else:
time_data = list(range(df_dict[batch_id].shape[0]))
output.append(
go.Scatter(
x=time_data[0:up_to_index],
y=df_dict[batch_id][tag][0:up_to_index],
name=batch_id,
mode="lines",
hovertemplate=hovertemplate,
line=animation_colour_assignment[batch_id],
legendgroup=batch_id,
showlegend=show_legend if tag == tag_list[0] else False,
xaxis=fig.get_subplot(row, col)[1]["anchor"],
yaxis=fig.get_subplot(row, col)[0]["anchor"],
)
)
# One level outdented: if the loop for the tags, not in the loop for
# the `batch_ids_to_animate`!
col += 1
if col > max_columns:
row += 1
col = 1
return output
| 18,659
|
def invite_contributor_post(node, **kwargs):
"""API view for inviting an unregistered user. Performs validation, but does not actually invite the user.
Expects JSON arguments with 'fullname' (required) and email (not required).
"""
fullname = request.json.get('fullname').strip()
email = request.json.get('email')
# Validate and sanitize inputs as needed. Email will raise error if invalid.
fullname = sanitize.strip_html(fullname)
if email:
email = email.lower().strip()
try:
validate_email(email)
except ValidationError as e:
return {'status': 400, 'message': e.message}, 400
if not fullname:
return {'status': 400, 'message': 'Full name field cannot be empty'}, 400
# Check if email is in the database
user = get_user(email=email)
if user:
if user.is_registered:
msg = 'User is already in database. Please go back and try your search again.'
return {'status': 400, 'message': msg}, 400
elif node.is_contributor(user):
msg = 'User with this email address is already a contributor to this project.'
return {'status': 400, 'message': msg}, 400
elif not user.is_confirmed:
serialized = profile_utils.serialize_unregistered(fullname, email)
else:
serialized = profile_utils.add_contributor_json(user)
# use correct display name
serialized['fullname'] = fullname
serialized['email'] = email
else:
# Create a placeholder
serialized = profile_utils.serialize_unregistered(fullname, email)
return {'status': 'success', 'contributor': serialized}
| 18,660
|
def upload_file(data_bucket):
"""Upload a file to an S3 object"""
try:
file_path = 'synthea_data/'
current_directory = pathlib.Path(file_path)
for current_file in current_directory.iterdir():
s3_resource.meta.client.upload_file(
str(current_file), data_bucket, str(current_file))
logging.info(f'File upload completed: {current_file.name.replace(file_path, "")} ({round(current_file.stat().st_size / 1024 / 1024, 2)} MB)')
except ClientError as e:
logging.error(e)
exit(1)
| 18,661
|
def requires_roles(*roles):
""" """
def wrapper(f):
@wraps(f)
def wrapped(*args, **kwargs):
try:
if current_user.role.name not in roles:
abort(403)
except AttributeError:
pass
return f(*args, **kwargs)
return wrapped
return wrapper
| 18,662
|
def home(request):
"""
Homepage, user must login to view
"""
context = {
'posts': BlogPost.objects.all().order_by('-date'), #Get all event announcement blog posts
'master': MasterControl.objects.get(identifier="MASTER") #Get the master control object
}
return render(request, 'blog/home.html', context)
| 18,663
|
def user_login_success(request):
""" Success login page """
return core.render(request, 'login/login-conf.html')
| 18,664
|
def use_scope() -> Scope:
"""Get the current ASGI scope dictionary"""
return use_websocket().scope
| 18,665
|
def test_TODO():
"""TODO(github.com/ChrisCummins/ProGraML/issues/5): Short summary of test."""
assert migrate_graph_database
| 18,666
|
def save_emails_to_file(emails, filename, reason):
"""Save a list of emails to a file in the current working directory.
Keyword arguments:
emails -- list of Email objects to be saved to file.
filename -- the filename to save unsent emails to.
reason -- the text or exception reason the emails couldn't be sent.
"""
now = datetime.datetime.now()
date = '{0}-{1}-{2}'.format(now.day, now.month, now.year)
time = '{0}:{1}:{2}'.format(now.hour, now.minute, now.second)
with open(filename, 'a') as f:
f.write('On the {0}, at {1}, the following emails could not be sent:\n'.format(date, time))
f.write('The reason for this: {0}\n'.format(reason))
for e in emails:
f.write('{0}\n'.format(e))
| 18,667
|
def test_json():
"""Basic integration test for run_benchmark.py. It runs full
benchmarking process for arbitrarily chosen parameters.
"""
build_configuration = {
"db_bench": {
"repo_url": project_path,
"commit": "HEAD",
"env": {},
},
"pmemkv": {
"repo_url": "https://github.com/pmem/pmemkv.git",
"commit": "HEAD",
"cmake_params": [
"-DCMAKE_BUILD_TYPE=Release",
"-DENGINE_CMAP=1",
"-DENGINE_CSMAP=1",
"-DENGINE_RADIX=1",
"-DENGINE_STREE=1",
"-DENGINE_ROBINHOOD=1",
"-DBUILD_JSON_CONFIG=1",
"-DCXX_STANDARD=20",
"-DBUILD_TESTS=OFF",
"-DBUILD_DOC=OFF",
"-DBUILD_EXAMPLES=OFF",
],
"env": {"CC": "gcc", "CXX": "g++"},
},
"libpmemobjcpp": {
"repo_url": "https://github.com/pmem/libpmemobj-cpp.git",
"commit": "HEAD",
"cmake_params": [
"-DBUILD_EXAMPLES=OFF",
"-DBUILD_TESTS=OFF",
"-DBUILD_DOC=OFF",
"-DBUILD_BENCHMARKS=OFF",
"-DCMAKE_BUILD_TYPE=Release",
],
"env": {"CC": "gcc", "CXX": "g++"},
},
}
benchmark_configuration = [
{
"env": {"PMEM_IS_PMEM_FORCE": "1"},
"params": {
"--db": os.getenv("TEST_PATH", "/dev/shm/pmemkv"),
"--db_size_in_gb": "1",
"--benchmarks": "fillrandom",
"--engine": "cmap",
"--num": "100",
"--value_size": "8",
"--threads": "2",
},
},
{
"env": {},
"params": {
"--db": os.getenv("TEST_PATH", "/dev/shm/pmemkv"),
"--db_size_in_gb": "2",
"--benchmarks": "fillseq",
"--engine": "radix",
"--num": "100",
"--value_size": "1024",
"--threads": "1",
},
},
]
build_config_file = create_config_file(build_configuration)
test_config_file = create_config_file(benchmark_configuration)
sys.argv = ["dummy.py", build_config_file.name, test_config_file.name]
try:
result = rb.main()
except Exception as e:
assert False, f"run-bench raised exception: {e}"
| 18,668
|
def test_delete_all(collection):
"""
Testing the 'delete_many' method to remove all
:param collection: pytest fixture that returns the collection
:return:
"""
collection['tiny'].delete_many({})
c = collection['tiny'].find({})
assert c.count() == 0
| 18,669
|
def cli(method, *kargs, **arguments):
"""Simulates testing a hug cli method from the command line"""
collect_output = arguments.pop('collect_output', True)
command_args = [method.__name__] + list(kargs)
for name, values in arguments.items():
if not isinstance(values, (tuple, list)):
values = (values, )
for value in values:
command_args.append('--{0}'.format(name))
if not value in (True, False):
command_args.append('{0}'.format(value))
old_sys_argv = sys.argv
sys.argv = [str(part) for part in command_args]
old_output = method.interface.cli.output
if collect_output:
method.interface.cli.outputs = lambda data: to_return.append(data)
to_return = []
try:
method.interface.cli()
except Exception as e:
to_return = (e, )
method.interface.cli.output = old_output
sys.argv = old_sys_argv
return to_return and to_return[0] or None
| 18,670
|
def set_start_stop_from_input(spiketrains):
"""
Sets the start :attr:`t_start`and stop :attr:`t_stop` point
from given input.
If one nep.SpikeTrain objects is given the start :attr:`t_stop `and stop
:attr:`t_stop` of the spike train is returned.
Otherwise the aligned times are returned, which are the maximal start point
and minimal stop point.
Parameters
----------
spiketrains: neo.SpikeTrain object, list or array of neo.core.SpikeTrain
objects
List of neo.core SpikeTrain objects to extract `t_start` and
`t_stop` from.
Returns
-------
start : quantities.Quantity
Start point extracted from input :attr:`spiketrains`
stop : quantities.Quantity
Stop point extracted from input :attr:`spiketrains`
"""
if isinstance(spiketrains, neo.SpikeTrain):
return spiketrains.t_start, spiketrains.t_stop
else:
start = max([elem.t_start for elem in spiketrains])
stop = min([elem.t_stop for elem in spiketrains])
return start, stop
| 18,671
|
def less(data_str):
"""Pretty print JSON and pipe to less."""
p = Popen('less', stdin=PIPE)
p.stdin.write(data_str.encode())
p.stdin.close()
p.wait()
return True
| 18,672
|
def get_extensions(f, v):
"""
Get a dictionary which maps each extension name to a bool whether it is
enabled in the file
Parameters
----------
f : an h5py.File or h5py.Group object
The object in which to find claimed extensions
v : bool
Verbose option
Returns
-------
A dictionary {string:bool} where the keys are the extension names and the
bool states whether it is enabled or not
"""
valid, extensionIDs = get_attr(f, "openPMDextension")
result = {ext: False for ext in ext_list.keys()}
if valid:
enabledExtMask = 0
for extension, bitmask in ext_list.items():
# This uses a bitmask to identify activated extensions
if (bitmask & extensionIDs) == bitmask:
result[extension] = True
enabledExtMask |= bitmask
if v:
print("Info: Found extension '%s'." % extension)
# Mask out the extension bits we have already detected so only
# unknown ones are left
excessIDs = extensionIDs & ~enabledExtMask
if excessIDs:
print("Warning: Unknown extension Mask left: %s" % excessIDs)
return result
| 18,673
|
def init_parser():
"""
initialize argument parser for S1 processing utilities
"""
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--transform', action='store_true', help='transform the final DEM to UTM coordinates')
parser.add_argument('-l', '--logfiles', action='store_true', help='create logfiles of the executed GAMMA commands')
parser.add_argument('-i', '--intermediates', action='store_true', help='keep intermediate files')
parser.add_argument('-q', '--quiet', action='store_true', help='suppress standard console prints')
parser.add_argument('-tr', '--targetresolution', default=20, help='the target resolution in meters for x and y',
type=int)
parser.add_argument('-fg', '--func_geoback', default=2, help='backward geocoding interpolation function; '
'0 - Nearest Neighbor, 1 - Bicubic Spline, 2 - Bicubic Spline-Log; '
'method 1: negative values possible (e.g. in urban areas) - use method 2 to avoid this',
type=int)
parser.add_argument('-fi', '--func_interp', default=0,
help='function for interpolation of layover/shadow/foreshortening/DEM gaps; '
'0 - set to 0, 1 - linear interpolation, 2 - actual value, 3 - nn-thinned', type=int)
parser.add_argument('-poe', '--poedir', default=None,
help='directory containing aux_poeorb (precise orbit ephemerides) orbit state vector files')
parser.add_argument('-res', '--resdir', default=None,
help='directory containing aux_resorb (restituted orbit) orbit state vector files')
parser.add_argument('zipfile', help='S1 zipped scene archive to be used')
parser.add_argument('tempdir', help='temporary directory for intermediate files')
parser.add_argument('outdir', help='output directory')
parser.add_argument('srtmdir', help='directory containing SRTM hgt tiles (subdirectories possible)')
return parser
| 18,674
|
async def remove_device(ws_client, device_id, config_entry_id):
"""Remove config entry from a device."""
await ws_client.send_json(
{
"id": 5,
"type": "config/device_registry/remove_config_entry",
"config_entry_id": config_entry_id,
"device_id": device_id,
}
)
response = await ws_client.receive_json()
return response["success"]
| 18,675
|
def deco_inside_ctx_method_self(target):
"""decorator: wrap a class method inside a `with self: ...` context"""
def tgt(self, *args, **kwargs):
with self:
return target(self, *args, **kwargs)
return tgt
| 18,676
|
def c_get_mechanism_info(slot, mechanism_type):
"""Gets a mechanism's info
:param slot: The slot to query
:param mechanism_type: The type of the mechanism to get the information for
:returns: The result code, The mechanism info
"""
mech_info = CK_MECHANISM_INFO()
ret = C_GetMechanismInfo(CK_ULONG(slot), CK_MECHANISM_TYPE(mechanism_type), byref(mech_info))
return ret, mech_info
| 18,677
|
def test_reset(cli_runner: CliRunner) -> None:
"""Test set vin."""
assert not os.path.exists(os.path.expanduser(CREDENTIAL_PATH))
result = cli_runner.invoke(__main__.main, f"set --locale {TEST_LOCALE}")
assert result.exit_code == 0
assert os.path.exists(os.path.expanduser(CREDENTIAL_PATH))
# Reset a first time - file should get deleted
result = cli_runner.invoke(__main__.main, "reset")
assert result.exit_code == 0
assert not os.path.exists(os.path.expanduser(CREDENTIAL_PATH))
# Reset a second time - make sure it doesn't error
result = cli_runner.invoke(__main__.main, "reset")
assert result.exit_code == 0
assert not os.path.exists(os.path.expanduser(CREDENTIAL_PATH))
| 18,678
|
def make_legend(names, colors):
"""
Make a list of legend handles and colours
:param names: list of names
:param colors: list of colors
:return: list of matplotlib.patches.Patch objects for legend
"""
legend_elements = []
for idx, name in enumerate(names):
el = Patch(color=colors[idx], label=name)
legend_elements.append(el)
return legend_elements
| 18,679
|
def create_fuku_table(conn):
""" Creacion de tabla de usuarios, numeros"""
try:
cur = conn.cursor()
cur.execute(CREATE_TABLE_FUKU_QUERY)
cur.close()
except mysql.connector.Error as e:
logger.error(e)
| 18,680
|
def optimize(iterable):
"""
Yields a simplified sequence of patch operations from iterable.
"""
iterable = check_stream(iterable)
header = next(iterable)
yield header
lastItem = next(iterable)
if isinstance(lastItem, ops.SourceCopy) and lastItem.offset == 0:
# SourceCopy is copying from the start of the file, so it might as well
# be a SourceRead.
lastItem = ops.SourceRead(lastItem.bytespan)
targetWriteOffset = 0
for item in iterable:
if (
isinstance(lastItem, ops.SourceRead) and
isinstance(item, ops.SourceRead)
):
# We can merge consecutive SourceRead operations.
lastItem.extend(item)
continue
elif (
isinstance(lastItem, ops.TargetRead) and
isinstance(item, ops.TargetRead)
):
# We can merge consecutive TargetRead operations.
lastItem.extend(item)
continue
elif (
isinstance(lastItem, ops.SourceCopy) and
isinstance(item, ops.SourceCopy) and
lastItem.offset + lastItem.bytespan == item.offset
):
# We can merge consecutive SourceCopy operations, as long as the
# following ones have a relative offset of 0 from the end of the
# previous one.
lastItem.extend(item)
continue
elif (
isinstance(lastItem, ops.TargetCopy) and
isinstance(item, ops.TargetCopy) and
lastItem.offset + lastItem.bytespan == item.offset
):
# We can merge consecutive TargetCopy operations, as long as the
# following ones have a relative offset of 0 from the end of the
# previous one.
lastItem.extend(item)
continue
if (
isinstance(lastItem, ops.SourceCopy) and
lastItem.offset == targetWriteOffset
):
# A SourceRead is just a SourceCopy that implicitly has its read
# off set set to targetWriteOffset.
lastItem = ops.SourceRead(lastItem.bytespan)
yield lastItem
targetWriteOffset += lastItem.bytespan
lastItem = item
yield lastItem
| 18,681
|
def create_visitor_id(visitor_id, options):
"""Creates new VisitorId"""
if not visitor_id:
visitor_id = VisitorId()
if not options:
options = {}
device_id = options.get("device_id")
visitor = options.get("visitor")
if not visitor_id.tnt_id:
visitor_id.tnt_id = device_id
if not visitor_id.marketing_cloud_visitor_id:
visitor_id.marketing_cloud_visitor_id = get_marketing_cloud_visitor_id(visitor)
visitor_id.customer_ids = get_customer_ids(visitor_id.customer_ids, visitor)
return visitor_id
| 18,682
|
def validate_crc(response: str, candidate: str) -> bool:
"""Calculates and validates the response CRC against expected"""
expected_crc = '{:04X}'.format(crc(response))
return expected_crc == candidate.replace('*', '')
| 18,683
|
def z_gate():
"""
Pauli z
"""
return torch.tensor([[1, 0], [0, -1]]) + 0j
| 18,684
|
def numpy_ewma(data, window):
"""
:param data:
:param window:
:return:
"""
alpha = 1 / window
scale = 1 / (1 - alpha)
n = data.shape[0]
scale_arr = (1 - alpha) ** (-1 * np.arange(n))
weights = (1 - alpha) ** np.arange(n)
pw0 = (1 - alpha) ** (n - 1)
mult = data * pw0 * scale_arr
cumsums = mult.cumsum()
out = cumsums * scale_arr[::-1] / weights.cumsum()
return out
| 18,685
|
def read(home_dir=os.path.expanduser('~')):
"""Read user profile from .pgdocs config"""
outfile = config_file(home_dir)
# create empty file and return empty profile with empty sessions
if not os.path.exists(outfile):
open(outfile, "w").close()
return Profile([])
with open(config_file(home_dir), "r") as inpfile:
# return empty profile if we cannot parse json
try:
return Profile.from_json(json.load(inpfile))
except:
return Profile([])
| 18,686
|
def tagged_sha256(tag: bytes, msg: bytes) -> bytes:
"""
Compute a tagged hash as defined in BIP-340.
This is useful for creating a message hash and achieving domain separation
through an application-specific tag. This function returns
SHA256(SHA256(tag)||SHA256(tag)||msg).
:param tag: tag
:param msg: message
:return: 32-byte hash
:raises ValueError: if arguments are invalid type
:raises Libsecp256k1Exception: arguments are invalid
"""
hash32 = ctypes.create_string_buffer(HASH32)
result = lib.secp256k1_tagged_sha256(
secp256k1_context_verify, hash32, tag, len(tag), msg, len(msg)
)
if result != 1:
assert_zero_return_code(result)
raise Libsecp256k1Exception("invalid arguments")
return hash32.raw[:HASH32]
| 18,687
|
def validate_blacklist(password):
""" It does not contain the strings ab, cd, pq, or xy """
for blacklisted in ['ab', 'cd', 'pq', 'xy']:
if blacklisted in password:
return False
return True
| 18,688
|
def get_mobilenet(model, method, num_classes):
"""Returns the requested model, ready for training/pruning with the specified method.
:param model: str
:param method: full or prune
:param num_classes: int, num classes in the dataset
:return: A prunable MobileNet model
"""
ModuleInjection.pruning_method = method
ModuleInjection.prunable_modules = []
if model == 'mobilenetv2':
net = MobileNetv2(num_classes)
net.prunable_modules = ModuleInjection.prunable_modules
return net
| 18,689
|
def test_network_firmware_auth_exception(switch_vendor, get_firmware_dell):
"""Test that the `canu report network firmware` command catches auth exception."""
with runner.isolated_filesystem():
switch_vendor.return_value = "dell"
get_firmware_dell.side_effect = ssh_exception.NetmikoAuthenticationException
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"report",
"network",
"firmware",
"--csm",
csm,
"--ips",
ip_dell,
"--username",
username,
"--password",
password,
],
)
assert result.exit_code == 0
assert (
"192.168.1.2 - Authentication error. Check the credentials or IP address and try again"
in str(result.output)
)
| 18,690
|
def f(x):
"""Squares something"""
time.sleep(10)
return x * x
| 18,691
|
def get_added_after(
fetch_full_feed, initial_interval, last_fetch_time=None, filter_args=None
):
"""
Creates the added_after param, or extracts it from the filter_args
:param fetch_full_feed: when set to true, will limit added_after
:param initial_interval: initial_interval if no
:param last_fetch_time: last_fetch time value (str)
:param filter_args: set of filter_args defined by the user to be merged with added_after
:return: added_after
"""
if fetch_full_feed:
return initial_interval
if not filter_args or "added_after" not in filter_args:
return last_fetch_time or initial_interval
return filter_args["added_after"]
| 18,692
|
def gdf_lineStrings():
"""Construct a gdf that contains two LineStrings."""
ls_short = LineString([(13.476808430, 48.573711823), (13.506804, 48.939008), (13.4664690, 48.5706414)])
ls_long = LineString([(13.476808430, 48.573711823), (11.5675446, 48.1485459), (8.5067847, 47.4084269)])
a_list = [(0, ls_short), (1, ls_long)]
gdf = gpd.GeoDataFrame(a_list, columns=["id", "geometry"]).set_geometry("geometry")
gdf = gdf.set_crs("wgs84")
return gdf
| 18,693
|
def createFormattedDir(source, dirname, batchSize = 128, cleanDir=False, verbose=True):
"""Given a source that supports __len__ and __getitem__,
and a directory that's empty, create a formatted directory that's
then used by BatchDataset. This only needs to be run once, so it's a utility.
Adjust batchSize so two batch leaves enough (CPU) RAM for normal processes,
but is as large as possible. BathDataset sometimes loads two batches,
to avoid waiting on a disk read."""
if verifyDirEmpty(dirname, cleanDir=cleanDir):
if verbose:
print("Directory '" + dirname + "' is empty.")
else:
raise OSError("Directory '" + dirname + "' not empty; choose a new directory or set cleanDir=True.")
totalRecords = len(source)
numBatches = math.ceil(totalRecords / batchSize)
if writeManifest(dirname, totalRecords, batchSize, numBatches) and verbose:
print("Manifest written.")
def _writeBatch(container, recordId):
batchId = recordId // batchSize
pathName = os.path.join(dirname, 'batch' + str(batchId))
torch.save(container, pathName)
if verbose:
print("Records up to " + str(recordId + 1) + " saved; " + str(batchId + 1) + " batches written.")
# Main loop to store all the records.
# By default, python list are the batches, but the BatchDataset class will work with any indexable container.
res = []
for i in range(totalRecords):
res.append(source[i])
if (i+1) % batchSize == 0:
_writeBatch(res, i)
res = []
if len(res) > 0: # Take care of stragglers.
_writeBatch(res, i)
res = []
print("Finished! Directory '" + dirname + "' is ready to be used for a BatchDataset.")
return True
| 18,694
|
def runnify(
async_function: Callable[T_ParamSpec, Coroutine[Any, Any, T_Retval]],
backend: str = "asyncio",
backend_options: Optional[Dict[str, Any]] = None,
) -> Callable[T_ParamSpec, T_Retval]:
"""
Take an async function and create a regular (blocking) function that receives the
same keyword and positional arguments for the original async function, and that when
called will create an event loop and use it to run the original `async_function`
with those arguments.
That function returns the return value from the original `async_function`.
The current thread must not be already running an event loop.
This calls `anyio.run()` underneath.
Use it like this:
```Python
async def program(name: str) -> str:
return f"Hello {name}"
result = asyncer.runnify(program)(name="World")
print(result)
```
## Arguments
`async_function`: an async function to call
`backend` name of the asynchronous event loop implementation - currently either
`asyncio` or `trio`
`backend_options` keyword arguments to call the backend `run()` implementation with
## Return
The return value of the async function
## Raises
`RuntimeError`: if an asynchronous event loop is already running in this thread
`LookupError`: if the named backend is not found
"""
@functools.wraps(async_function)
def wrapper(*args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs) -> T_Retval:
partial_f = functools.partial(async_function, *args, **kwargs)
return anyio.run(partial_f, backend=backend, backend_options=backend_options)
return wrapper
| 18,695
|
def _binary_stdio():
"""Construct binary stdio streams (not text mode).
This seems to be different for Window/Unix Python2/3, so going by:
https://stackoverflow.com/questions/2850893/reading-binary-data-from-stdin
NOTE: this method is borrowed from python-language-server:
https://github.com/palantir/python-language-server/blob/develop/pyls/__main__.py
"""
PY3K = sys.version_info >= (3, 0)
if PY3K:
# pylint: disable=no-member
stdin, stdout = sys.stdin.buffer, sys.stdout.buffer
else:
# Python 2 on Windows opens sys.stdin in text mode, and
# binary data that read from it becomes corrupted on \r\n
if sys.platform == "win32":
# set sys.stdin to binary mode
# pylint: disable=no-member,import-error
import os
import msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
stdin, stdout = sys.stdin, sys.stdout
return stdin, stdout
| 18,696
|
def _get_properties(rsp: Dict[Text, Any]) -> Iterable[CdProperty]:
""" Retrieve key properties to be passed onto dynatrace server. """
return [
CdProperty("Status", rsp.get("status", "N/A")),
CdProperty("Entry point", rsp.get("entryPoint", "N/A")),
CdProperty("Available memory Mb", rsp.get("availableMemoryMb", "N/A")),
CdProperty("Runtime", rsp.get("runtime", "")),
CdProperty("Ingress settings", rsp.get("ingressSettings", "")),
]
| 18,697
|
async def test_api_key_flunks_bad_email():
"""api_key() rejects an obviously malformed email address"""
# But only the most obvious cases involving misplaced '@' or lack of '.'
bad_addrs = ['f@@ey', '56b7165e4f8a54b4faf1e04c46a6145c']
for addr in bad_addrs:
path_params = {'email': addr}
request = post_request("/v2/api_key/%s" % addr,
path_params=path_params)
with pytest.raises(HTTPException) as e:
await v2_handlers.api_key(request)
assert e.status_code == 400
| 18,698
|
def factors(n):
"""Yield the factors of n in ascending order."""
rtn = isqrt(n)
smalls = list(filter(lambda k: n % k == 0, range(1, rtn + 1)))
larges = [n // k for k in smalls]
if rtn * rtn == n:
smalls.pop()
yield from smalls
yield from reversed(larges)
| 18,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.