content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def versioning(version: str) -> str:
"""
version to specification
Author: Huan <zixia@zixia.net> (https://github.com/huan)
X.Y.Z -> X.Y.devZ
"""
sem_ver = semver.parse(version)
major = sem_ver['major']
minor = sem_ver['minor']
patch = str(sem_ver['patch'])
if minor % 2:
patch = 'dev' + patch
fin_ver = '%d.%d.%s' % (
major,
minor,
patch,
)
return fin_ver
| 17,100
|
def freshdesk_sync_contacts(contacts=None, companies=None, agents=None):
"""Iterate through all DepartmentUser objects, and ensure that each user's
information is synced correctly to a Freshdesk contact.
May optionally be passed in dicts of contacts & companies.
"""
try:
if not contacts:
LOGGER.info('Querying Freshdesk for current contacts')
contacts = get_freshdesk_objects(obj_type='contacts', progress=False, params={'page': 1})
contacts = {c['email'].lower(): c for c in contacts if c['email']}
if not companies:
LOGGER.info('Querying Freshdesk for current companies')
companies = get_freshdesk_objects(obj_type='companies', progress=False, params={'page': 1})
companies = {c['name']: c for c in companies}
# FIXME: ignore Agents for the time being.
#if not agents:
# LOGGER.info('Querying Freshdesk for current agents')
# agents = get_freshdesk_objects(obj_type='agents', progress=False, params={'page': 1})
# agents = {a['contact']['email'].lower(): a['contact'] for a in agents if a['contact']['email']}
except Exception as e:
LOGGER.exception(e)
return False
# Filter DepartmentUsers: valid email (contains @), not -admin, DN contains 'OU=Users', active
d_users = DepartmentUser.objects.filter(email__contains='@', ad_dn__contains='OU=Users', active=True).exclude(email__contains='-admin')
LOGGER.info('Syncing details for {} DepartmentUsers to Freshdesk'.format(d_users.count()))
for user in d_users:
if user.email.lower() in contacts:
# The DepartmentUser exists in Freshdesk; verify and update details.
fd = contacts[user.email.lower()]
data = {}
user_sync = False
changes = []
if user.name != fd['name']:
user_sync = True
data['name'] = user.name
changes.append('name')
if user.telephone != fd['phone']:
user_sync = True
data['phone'] = user.telephone
changes.append('phone')
if user.title != fd['job_title']:
user_sync = True
data['job_title'] = user.title
changes.append('job_title')
if user_sync: # Sync user details to their Freshdesk contact.
r = update_freshdesk_object('contacts', data, fd['id'])
if r.status_code == 403: # Forbidden
# A 403 response probably means that we hit the API throttle limit.
# Abort the synchronisation.
LOGGER.error('HTTP403 received from Freshdesk API, aborting')
return False
LOGGER.info('{} was updated in Freshdesk (status {}), changed: {}'.format(
user.email.lower(), r.status_code, ', '.join(changes)))
else:
data = {'name': user.name, 'email': user.email.lower(),
'phone': user.telephone, 'job_title': user.title}
department = user.org_data.get('units', []) if user.org_data else []
department = department[0].get('name', '') if len(department) > 0 else None
if department and department in companies:
data['company_id'] = companies[department]['id']
r = update_freshdesk_object('contacts', data)
if not r.status_code == 200: # Error, unable to process request.
LOGGER.warn('{} not created in Freshdesk (status {})'.format(user.email.lower(), r.status_code))
else:
LOGGER.info('{} created in Freshdesk (status {})'.format(user.email.lower(), r.status_code))
return True
| 17,101
|
def device_create_from_symmetric_key(transportType, deviceId, hostname, symmetricKey): # noqa: E501
"""Create a device client from a symmetric key
# noqa: E501
:param transportType: Transport to use
:type transportType: str
:param deviceId:
:type deviceId: str
:param hostname: name of the host to connect to
:type hostname: str
:param symmetricKey: key to use for connection
:type symmetricKey: str
:rtype: ConnectResponse
"""
return "do some magic!"
| 17,102
|
def create_with_index(data, columns):
"""
Create a new indexed pd.DataFrame
"""
to_df = {columns[0]: [x for x in range(1, len(data) + 1)], columns[1]: data}
data_frame = pd.DataFrame(to_df)
data_frame.set_index("Index", inplace=True)
return data_frame
| 17,103
|
def pyfancy_critical(pyfancy_variable):
"""Critical level."""
pyfancy_function(
logger_function().critical,
pyfancy().red_bg().bold(),
pyfancy_variable)
| 17,104
|
def random_aes_key(blocksize=16):
"""Set 2 - Challenge 11"""
return afb(np.random.bytes(blocksize))
| 17,105
|
def check_decoded_times(start_time, timewindow=30):
"""Check the decoded IRIG-B times within a window of size timewindow
surrounding the event time and make sure the times are all correct (i.e.
they are either the correct UTC or GPS time; either one is considered
correct). Save the results of the check to a text file for upload to LIGO's
EVNT log."""
for chan in CHANS:
format_string = 'Decoding {}+/-{}s on channel {}.'
start_time = int(start_time)
msg = format_string.format(start_time, timewindow, chan)
print(msg)
# print a header, since output will be arranged tabularly, e.g.
# 1225152018 | 1225152018 | 18 | GPS | 0 | 0 | 0 | 306 | ...
# 2018 | Thu Nov 02 00:00:00 2018 | Fri Nov 02 23:59:42 2018
print("Actual GPS | Decode GPS | Leap | Scale | Sec | Min | Hr | Day "
"| Year | Decoded Date/Time | Actual UTC Date/Time")
print("-----------+------------+------+-------+-----+-----+----+-----"
"+------+--------------------------+-------------------------")
row_fmt = ("{gps_actual:>10d} | {gps_decoded:>10d} | {leap:>4d} | "
"{scale:<5} | {second:>3d} | {minute:>3d} | {hour:>2d} | "
"{day:>3d} | {year:>4d} | {datetime_decoded:<24} | "
"{datetime_actual:<24}")
timeseries = TimeSeries.fetch(chan, start_time-timewindow,
start_time+timewindow+1).value
# deal with one second at a time, writing results to file
for i in range(2*timewindow + 1):
timeseries_slice = timeseries[i*BITRATE:(i+1)*BITRATE]
gps_actual = (start_time - timewindow) + i
leap_seconds = get_leap_seconds(gps_actual)
t_actual = Time(gps_actual, format='gps', scale='utc')
decoded = geco_irig_decode.decode_timeseries(timeseries_slice)
t = decoded['datetime']
dt = (t - t_actual.to_datetime()).seconds
datetime_actual = t_actual.to_datetime().strftime(TFORMAT)
# check whether the times agree, or whether they are off by the
# current number of leap seconds
if dt == 0:
scale = "UTC"
elif dt == leap_seconds:
scale = "GPS"
else:
scale = "ERROR"
print(row_fmt.format(gps_actual=gps_actual,
# foo = dict(gps_actual=gps_actual,
gps_decoded=int(Time(t).gps),
leap=leap_seconds, scale=scale,
datetime_decoded=t.strftime(TFORMAT),
datetime_actual=datetime_actual,
**decoded))
# print(foo)
| 17,106
|
def show_ner(text: str, ner_model: str):
"""
Just a shortcut to annotate the text with the given NER model
"""
nlp = stanza.Pipeline(lang='en', package='craft',
dir=STANZA_DOWNLOAD_DIR,
processors={'ner': ner_model})
doc = nlp(text)
print(f'\nNER MODEL: {ner_model}')
print('TYPE TEXT')
for ent in doc.entities:
print(f'{ent.type:<10} {ent.text}')
| 17,107
|
def create_and_load(directory: str,
name: str,
new_name: str = None) -> nn.Module:
"""Instantiate an unkown function (uf) required
by the high-order functions with a trained neural network
Args:
directory: directory to the saved weights of an NN
name: name of the unknown function
new_name: the new name of the unknown function
"""
if new_name is None:
new_name = name
with open('{}/{}.json'.format(directory, name)) as json_data:
params_dict = json.load(json_data)
params_dict['name'] = new_name
if params_dict['output_activation'] == 'None':
params_dict['output_activation'] = None
elif params_dict['output_activation'] == 'sigmoid':
params_dict['output_activation'] = torch.sigmoid
elif params_dict['output_activation'] == 'softmax':
params_dict['output_activation'] = nn.Softmax(dim=1)
else:
raise NotImplementedError()
new_fn, _ = get_nn_from_params_dict(params_dict)
new_fn.load('{}/{}.pth'.format(directory, name))
new_fn.eval()
return new_fn
| 17,108
|
def select_region_climatedata(gcm_name, rcp, main_glac_rgi):
"""
Get the regional temperature and precipitation for a given dataset.
Extracts all nearest neighbor temperature and precipitation data for a given set of glaciers. The mean temperature
and precipitation of the group of glaciers is returned. If two glaciers have the same temp/prec data, that data
is only used once in the mean calculations. Additionally, one would not expect for different GCMs to be similar
because they all have different resolutions, so this mean calculations will have different numbers of pixels.
Parameters
----------
gcm_name : str
GCM name
rcp : str
rcp scenario (ex. rcp26)
main_glac_rgi : pd.DataFrame
glacier dataset used to select the nearest neighbor climate data
"""
# Date tables
print('select_region_climatedata fxn dates supplied manually')
dates_table_ref = modelsetup.datesmodelrun(startyear=2000, endyear=2100, spinupyears=0,
option_wateryear=1)
dates_table = modelsetup.datesmodelrun(startyear=2000, endyear=2100, spinupyears=0,
option_wateryear=1)
# Load gcm lat/lons
gcm = class_climate.GCM(name=gcm_name, rcp_scenario=rcp)
# Select lat/lon from GCM
ds_elev = xr.open_dataset(gcm.fx_fp + gcm.elev_fn)
gcm_lat_values_all = ds_elev.lat.values
gcm_lon_values_all = ds_elev.lon.values
ds_elev.close()
# Lat/lon dictionary to convert
gcm_lat_dict = dict(zip(range(gcm_lat_values_all.shape[0]), list(gcm_lat_values_all)))
gcm_lon_dict = dict(zip(range(gcm_lon_values_all.shape[0]), list(gcm_lon_values_all)))
# Find nearest neighbors for glaciers that have pixles
latlon_nearidx = pd.DataFrame(np.zeros((main_glac_rgi.shape[0],2)), columns=['CenLat','CenLon'])
latlon_nearidx.iloc[:,0] = (np.abs(main_glac_rgi.CenLat.values[:,np.newaxis] - gcm_lat_values_all).argmin(axis=1))
latlon_nearidx.iloc[:,1] = (np.abs(main_glac_rgi.CenLon.values[:,np.newaxis] - gcm_lon_values_all).argmin(axis=1))
latlon_nearidx = latlon_nearidx.drop_duplicates().sort_values(['CenLat', 'CenLon'])
latlon_nearidx.reset_index(drop=True, inplace=True)
latlon_reg = latlon_nearidx.copy()
latlon_reg.CenLat.replace(gcm_lat_dict, inplace=True)
latlon_reg.CenLon.replace(gcm_lon_dict, inplace=True)
# ===== LOAD CLIMATE DATA =====
# Reference climate data
ref_gcm = class_climate.GCM(name=input.ref_gcm_name)
# Air temperature [degC], Precipitation [m], Elevation [masl], Lapse rate [K m-1]
ref_temp, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.temp_fn, ref_gcm.temp_vn, latlon_reg,
dates_table_ref)
ref_prec, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.prec_fn, ref_gcm.prec_vn, latlon_reg,
dates_table_ref)
# ref_elev = ref_gcm.importGCMfxnearestneighbor_xarray(ref_gcm.elev_fn, ref_gcm.elev_vn, latlon_reg)
# GCM climate data
gcm_temp_all, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.temp_fn, gcm.temp_vn, latlon_reg, dates_table)
gcm_prec_all, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.prec_fn, gcm.prec_vn, latlon_reg, dates_table)
# gcm_elev = gcm.importGCMfxnearestneighbor_xarray(gcm.elev_fn, gcm.elev_vn, latlon_reg)
# GCM subset to agree with reference time period to calculate bias corrections
gcm_subset_idx_start = np.where(dates_table.date.values == dates_table_ref.date.values[0])[0][0]
gcm_subset_idx_end = np.where(dates_table.date.values == dates_table_ref.date.values[-1])[0][0]
gcm_temp = gcm_temp_all[:,gcm_subset_idx_start:gcm_subset_idx_end+1]
gcm_prec = gcm_prec_all[:,gcm_subset_idx_start:gcm_subset_idx_end+1]
## ===== BIAS ADJUSTMENTS =====
# OPTION 2: Adjust temp and prec according to Huss and Hock (2015) accounts for means and interannual variability
if input.option_bias_adjustment == 2:
# TEMPERATURE BIAS CORRECTIONS
# Mean monthly temperature
ref_temp_monthly_avg = (ref_temp.reshape(-1,12).transpose()
.reshape(-1,int(ref_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
gcm_temp_monthly_avg = (gcm_temp.reshape(-1,12).transpose()
.reshape(-1,int(gcm_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
# Monthly bias adjustment
gcm_temp_monthly_adj = ref_temp_monthly_avg - gcm_temp_monthly_avg
# Monthly temperature bias adjusted according to monthly average
t_mt = gcm_temp_all + np.tile(gcm_temp_monthly_adj, int(gcm_temp_all.shape[1]/12))
# Mean monthly temperature bias adjusted according to monthly average
t_m25avg = np.tile(gcm_temp_monthly_avg + gcm_temp_monthly_adj, int(gcm_temp_all.shape[1]/12))
# Calculate monthly standard deviation of temperature
ref_temp_monthly_std = (ref_temp.reshape(-1,12).transpose()
.reshape(-1,int(ref_temp.shape[1]/12)).std(1).reshape(12,-1).transpose())
gcm_temp_monthly_std = (gcm_temp.reshape(-1,12).transpose()
.reshape(-1,int(gcm_temp.shape[1]/12)).std(1).reshape(12,-1).transpose())
variability_monthly_std = ref_temp_monthly_std / gcm_temp_monthly_std
# Bias adjusted temperature accounting for monthly mean and variability
gcm_temp_bias_adj = t_m25avg + (t_mt - t_m25avg) * np.tile(variability_monthly_std, int(gcm_temp_all.shape[1]/12))
# PRECIPITATION BIAS CORRECTIONS
# Calculate monthly mean precipitation
ref_prec_monthly_avg = (ref_prec.reshape(-1,12).transpose()
.reshape(-1,int(ref_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
gcm_prec_monthly_avg = (gcm_prec.reshape(-1,12).transpose()
.reshape(-1,int(gcm_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
bias_adj_prec = ref_prec_monthly_avg / gcm_prec_monthly_avg
# Bias adjusted precipitation accounting for differences in monthly mean
gcm_prec_bias_adj = gcm_prec_all * np.tile(bias_adj_prec, int(gcm_temp_all.shape[1]/12))
# Regional means
reg_mean_temp_biasadj = gcm_temp_bias_adj.mean(axis=0)
reg_mean_prec_biasadj = gcm_prec_bias_adj.mean(axis=0)
return reg_mean_temp_biasadj, reg_mean_prec_biasadj
| 17,109
|
def get_patch_boundaries(mask_slice, eps=2):
"""
Computes coordinates of SINGLE patch on the slice. Behaves incorrectly in the case of multiple tumors on the slice.
:mask_slice: 2D ndarray, contains mask with <0, 1, 2> values of pixels
:eps: int, number of additional pixels we extract around the actual mask coordinates
:return: `x_min`, `x_max`, `y_min`, `ymax`
"""
# check if we work with mask_slice that contains at least one non-zero pixel
if np.sum(mask_slice[:, :]) <= 0:
raise ValueError("Slice does not contains any tumors.")
# smallest index that has something except in its layer
x_min = None
for x in range(mask_slice.shape[0]):
if np.sum(mask_slice[x, :]) > 0:
# get first from the left index of nonzero 1D slice and break
x_min = x
break
x_max = None
for x in range(mask_slice.shape[0] - 1, -1, -1):
if np.sum(mask_slice[x, :]) > 0:
# get the first from the right index of nonzero 1D slice and break
x_max = x
break
y_min = None
for y in range(mask_slice.shape[1]):
if np.sum(mask_slice[:, y]) > 0:
# get the first from the bottom index of nonzero 1D slice and break
y_min = y
break
y_max = None
for y in range(mask_slice.shape[1] - 1, -1, -1):
if np.sum(mask_slice[:, y]) > 0:
# get the first from the top index of nonzero 1D slice and break
y_max = y
break
# apply `eps` parameter to the actual `min` and `max` values
x_min = max(x_min - eps, 0)
x_max = min(x_max + eps, mask_slice.shape[0] - 1)
y_min = max(y_min - eps, 0)
y_max = min(y_max + eps, mask_slice.shape[1] - 1)
return x_min, x_max, y_min, y_max
| 17,110
|
def get_sqrt_ggn_extension(
subsampling: Union[None, List[int]], mc_samples: int
) -> Union[SqrtGGNExact, SqrtGGNMC]:
"""Instantiate ``SqrtGGN{Exact, MC} extension.
Args:
subsampling: Indices of active samples.
mc_samples: Number of MC-samples to approximate the loss Hessian. ``0``
uses the exact loss Hessian.
Returns:
Instantiated SqrtGGN extension.
"""
return (
SqrtGGNExact(subsampling=subsampling)
if mc_samples == 0
else SqrtGGNMC(subsampling=subsampling, mc_samples=mc_samples)
)
| 17,111
|
def gray_to_rgb(image):
"""convert cv2 image from GRAYSCALE to RGB
:param image: the image to be converted
:type image: cv2 image
:return: converted image
:rtype: cv2 image
"""
return cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
| 17,112
|
def test_eq(character: Character, other: Any, expected_equality: bool) -> None:
"""Test illud.character.Character.__eq__."""
equality: bool = character == other
assert equality == expected_equality
| 17,113
|
def run_evaluation(model, dataset_name, dataset,
mesh, batch_size=32, img_res=224,
num_workers=32, shuffle=False, log_freq=50):
"""Run evaluation on the datasets and metrics we report in the paper. """
renderer = PartRenderer()
# Create SMPL model
smpl = SMPL().cuda()
# Regressor for H36m joints
J_regressor = torch.from_numpy(np.load(cfg.JOINT_REGRESSOR_H36M)).float()
# Create dataloader for the dataset
data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
# Transfer model to the GPU
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model.to(device)
model.eval()
# Pose metrics
# MPJPE and Reconstruction error for the non-parametric and parametric shapes
mpjpe = np.zeros(len(dataset))
recon_err = np.zeros(len(dataset))
mpjpe_smpl = np.zeros(len(dataset))
recon_err_smpl = np.zeros(len(dataset))
# Shape metrics
# Mean per-vertex error
shape_err = np.zeros(len(dataset))
shape_err_smpl = np.zeros(len(dataset))
# Mask and part metrics
# Accuracy
accuracy = 0.
parts_accuracy = 0.
# True positive, false positive and false negative
tp = np.zeros((2,1))
fp = np.zeros((2,1))
fn = np.zeros((2,1))
parts_tp = np.zeros((7,1))
parts_fp = np.zeros((7,1))
parts_fn = np.zeros((7,1))
# Pixel count accumulators
pixel_count = 0
parts_pixel_count = 0
eval_pose = False
eval_shape = False
eval_masks = False
eval_parts = False
# Choose appropriate evaluation for each dataset
if dataset_name == 'h36m-p1' or dataset_name == 'h36m-p2':
eval_pose = True
elif dataset_name == 'up-3d':
eval_shape = True
elif dataset_name == 'lsp':
eval_masks = True
eval_parts = True
annot_path = cfg.DATASET_FOLDERS['upi-s1h']
# Iterate over the entire dataset
for step, batch in enumerate(tqdm(data_loader, desc='Eval', total=len(data_loader))):
# Get ground truth annotations from the batch
gt_pose = batch['pose'].to(device)
gt_betas = batch['betas'].to(device)
gt_vertices = smpl(gt_pose, gt_betas)
images = batch['img'].to(device)
curr_batch_size = images.shape[0]
# Run inference
with torch.no_grad():
pred_vertices, pred_vertices_smpl, camera, pred_rotmat, pred_betas = model(images)
# 3D pose evaluation
if eval_pose:
# Regressor broadcasting
J_regressor_batch = J_regressor[None, :].expand(pred_vertices.shape[0], -1, -1).to(device)
# Get 14 ground truth joints
gt_keypoints_3d = batch['pose_3d'].cuda()
gt_keypoints_3d = gt_keypoints_3d[:, cfg.J24_TO_J14, :-1]
# Get 14 predicted joints from the non-parametic mesh
pred_keypoints_3d = torch.matmul(J_regressor_batch, pred_vertices)
pred_pelvis = pred_keypoints_3d[:, [0],:].clone()
pred_keypoints_3d = pred_keypoints_3d[:, cfg.H36M_TO_J14, :]
pred_keypoints_3d = pred_keypoints_3d - pred_pelvis
# Get 14 predicted joints from the SMPL mesh
pred_keypoints_3d_smpl = torch.matmul(J_regressor_batch, pred_vertices_smpl)
pred_pelvis_smpl = pred_keypoints_3d_smpl[:, [0],:].clone()
pred_keypoints_3d_smpl = pred_keypoints_3d_smpl[:, cfg.H36M_TO_J14, :]
pred_keypoints_3d_smpl = pred_keypoints_3d_smpl - pred_pelvis_smpl
# Compute error metrics
# Absolute error (MPJPE)
error = torch.sqrt(((pred_keypoints_3d - gt_keypoints_3d) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
error_smpl = torch.sqrt(
((pred_keypoints_3d_smpl - gt_keypoints_3d) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
mpjpe[step * batch_size:step * batch_size + curr_batch_size] = error
mpjpe_smpl[step * batch_size:step * batch_size + curr_batch_size] = error_smpl
# Reconstuction_error
r_error = reconstruction_error(pred_keypoints_3d.cpu().numpy(), gt_keypoints_3d.cpu().numpy(),
reduction=None)
r_error_smpl = reconstruction_error(pred_keypoints_3d_smpl.cpu().numpy(),
gt_keypoints_3d.cpu().numpy(), reduction=None)
recon_err[step * batch_size:step * batch_size + curr_batch_size] = r_error
recon_err_smpl[step * batch_size:step * batch_size + curr_batch_size] = r_error_smpl
# Shape evaluation (Mean per-vertex error)
if eval_shape:
se = torch.sqrt(((pred_vertices - gt_vertices) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
se_smpl = torch.sqrt(((pred_vertices_smpl - gt_vertices) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
shape_err[step * batch_size:step * batch_size + curr_batch_size] = se
shape_err_smpl[step * batch_size:step * batch_size + curr_batch_size] = se_smpl
# If mask or part evaluation, render the mask and part images
if eval_masks or eval_parts:
mask, parts = renderer(pred_vertices, camera)
# Mask evaluation (for LSP)
if eval_masks:
center = batch['center'].cpu().numpy()
scale = batch['scale'].cpu().numpy()
# Dimensions of original image
orig_shape = batch['orig_shape'].cpu().numpy()
for i in range(curr_batch_size):
# After rendering, convert imate back to original resolution
pred_mask = uncrop(mask[i].cpu().numpy(), center[i], scale[i], orig_shape[i]) > 0
# Load gt mask
gt_mask = cv2.imread(os.path.join(annot_path, batch['maskname'][i]), 0) > 0
# Evaluation consistent with the original UP-3D code
accuracy += (gt_mask == pred_mask).sum()
pixel_count += np.prod(np.array(gt_mask.shape))
for c in range(2):
cgt = gt_mask == c
cpred = pred_mask == c
tp[c] += (cgt & cpred).sum()
fp[c] += (~cgt & cpred).sum()
fn[c] += (cgt & ~cpred).sum()
f1 = 2 * tp / (2 * tp + fp + fn)
# Part evaluation (for LSP)
if eval_parts:
center = batch['center'].cpu().numpy()
scale = batch['scale'].cpu().numpy()
orig_shape = batch['orig_shape'].cpu().numpy()
for i in range(curr_batch_size):
pred_parts = uncrop(parts[i].cpu().numpy().astype(np.uint8), center[i], scale[i], orig_shape[i])
# Load gt part segmentation
gt_parts = cv2.imread(os.path.join(annot_path, batch['partname'][i]), 0)
# Evaluation consistent with the original UP-3D code
# 6 parts + background
for c in range(7):
cgt = gt_parts == c
cpred = pred_parts == c
cpred[gt_parts == 255] = 0
parts_tp[c] += (cgt & cpred).sum()
parts_fp[c] += (~cgt & cpred).sum()
parts_fn[c] += (cgt & ~cpred).sum()
gt_parts[gt_parts == 255] = 0
pred_parts[pred_parts == 255] = 0
parts_f1 = 2 * parts_tp / (2 * parts_tp + parts_fp + parts_fn)
parts_accuracy += (gt_parts == pred_parts).sum()
parts_pixel_count += np.prod(np.array(gt_parts.shape))
# Print intermediate results during evaluation
if step % log_freq == log_freq - 1:
if eval_pose:
print('MPJPE (NonParam): ' + str(1000 * mpjpe[:step * batch_size].mean()))
print('Reconstruction Error (NonParam): ' + str(1000 * recon_err[:step * batch_size].mean()))
print('MPJPE (Param): ' + str(1000 * mpjpe_smpl[:step * batch_size].mean()))
print('Reconstruction Error (Param): ' + str(1000 * recon_err_smpl[:step * batch_size].mean()))
print()
if eval_shape:
print('Shape Error (NonParam): ' + str(1000 * shape_err[:step * batch_size].mean()))
print('Shape Error (Param): ' + str(1000 * shape_err_smpl[:step * batch_size].mean()))
print()
if eval_masks:
print('Accuracy: ', accuracy / pixel_count)
print('F1: ', f1.mean())
print()
if eval_parts:
print('Parts Accuracy: ', parts_accuracy / parts_pixel_count)
print('Parts F1 (BG): ', parts_f1[[0,1,2,3,4,5,6]].mean())
print()
# Print final results during evaluation
print('*** Final Results ***')
print()
if eval_pose:
print('MPJPE (NonParam): ' + str(1000 * mpjpe.mean()))
print('Reconstruction Error (NonParam): ' + str(1000 * recon_err.mean()))
print('MPJPE (Param): ' + str(1000 * mpjpe_smpl.mean()))
print('Reconstruction Error (Param): ' + str(1000 * recon_err_smpl.mean()))
print()
if eval_shape:
print('Shape Error (NonParam): ' + str(1000 * shape_err.mean()))
print('Shape Error (Param): ' + str(1000 * shape_err_smpl.mean()))
print()
if eval_masks:
print('Accuracy: ', accuracy / pixel_count)
print('F1: ', f1.mean())
print()
if eval_parts:
print('Parts Accuracy: ', parts_accuracy / parts_pixel_count)
print('Parts F1 (BG): ', parts_f1[[0,1,2,3,4,5,6]].mean())
print()
| 17,114
|
def win_to_cygwin(winpath):
"""run `cygpath winpath` to get cygwin path"""
x = detail.command.run(['cygpath', winpath])
assert(len(x) == 1)
return x[0]
| 17,115
|
def test_register_options():
"""Verify we call add_options on the plugin only if it exists."""
# Set up our mocks and Plugin object
entry_point = mock.Mock(spec=['load'])
plugin_obj = mock.Mock(spec_set=['name', 'version', 'add_options',
'parse_options'])
option_manager = mock.MagicMock(spec=options_manager.OptionManager)
plugin = manager.Plugin('T000', entry_point)
plugin._plugin = plugin_obj
# Call the method we're testing.
plugin.register_options(option_manager)
# Assert that we call add_options
plugin_obj.add_options.assert_called_once_with(option_manager)
| 17,116
|
def extract_array_from_gpu( part_idx_start, array, selected ):
"""
Extract a selection of particles from the GPU and
store them in a 1D array (N_part,)
Selection goes from starting index (part_idx_start)
to (part_idx_start + N_part-1), where N_part is derived
from the shape of the array `selected`.
Parameters
----------
part_idx_start : int
The starting index needed for the extraction process.
( minimum particle index to be extracted )
array : 1D arrays of ints or floats
The GPU particle arrays for a given species. (e.g. particle id)
selected : 1D array of ints or floats
An empty GPU array to store the particles that are extracted.
"""
i = cuda.grid(1)
N_part = selected.shape[0]
if i < N_part:
selected[i] = array[part_idx_start+i]
| 17,117
|
def main(argv=None):
"""
*Optional but needed it you want to run the model from the command line*
Perform command line execution of the model. This example uses the getopt
module to parse the command line options.
The user can set the caseName, the runDir, the timestep and the configfile.
"""
global multpars
caseName = "default_sediment"
runId = "run_default"
configfile = "wflow_sediment.ini"
_lastTimeStep = 0
_firstTimeStep = 0
timestepsecs = 86400
wflow_cloneMap = "wflow_subcatch.map"
# This allows us to use the model both on the command line and to call
# the model using main function from another python script.
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
return
opts, args = getopt.getopt(argv, "C:S:T:c:s:R:")
for o, a in opts:
if o == "-C":
caseName = a
if o == "-R":
runId = a
if o == "-c":
configfile = a
if o == "-s":
timestepsecs = int(a)
if o == "-T":
_lastTimeStep = int(a)
if o == "-S":
_firstTimeStep = int(a)
if len(opts) <= 1:
usage()
myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
dynModelFw = wf_DynamicFramework(
myModel, _lastTimeStep, firstTimestep=_firstTimeStep
)
dynModelFw.createRunId(NoOverWrite=False, level=logging.DEBUG)
dynModelFw._runInitial()
dynModelFw._runResume()
# dynModelFw._runDynamic(0,0)
dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
| 17,118
|
def nucleotide_composition_to_letter(composition):
"""
Converts dictionary of {nucleotide letter: proportion} pairs
to IUPAC degenerate DNA letter.
Usage:
c = {'A': 1}
print(nucleotide_composition_to_letter(c)) --> 'A'
c = dict(zip('ACGT', [1, 1, 1, 1]))
print(nucleotide_composition_to_letter(c)) --> 'N'
c = dict(zip('ACGT', [1, 1, 2, 1]))
print(nucleotide_composition_to_letter(c)) --> 'n'
"""
nonzero_nucleotides = ''.join(sorted([n
for n, v in composition.items()
if v > 0]))
nonzero_proportions = [composition[n] for n in nonzero_nucleotides]
equimolar = min(nonzero_proportions) == max(nonzero_proportions)
letter = DEGENERATE_NUCLEOTIDE_CODE_REVERSED.get(nonzero_nucleotides,
DEFAULT_NUCLEOTIDE_LABEL)
if equimolar:
return letter
return letter.lower()
| 17,119
|
def forcast(doc):
"""
:param: doc object
:returns: tuple with grade level, age level
"""
word_tokens = doc.word_tokens
monosyllables = 0
for i in word_tokens:
if i.isalpha() == False and len(i) < 2:
word_tokens.remove(i)
for i in word_tokens[10:159]:
if syllable_count(i) < 2:
monosyllables += 1
gl = 20 - (monosyllables/10)
ra = 25 - (monosyllables/10)
return (gl, ra, monosyllables)
| 17,120
|
def organization_following_request_get(
organization_id: UUID = typer.Argument(..., help="UUID of the organization"),
following_id: UUID = typer.Argument(..., help="UUID of the following request")
):
"""
Returns a request received by an organization to follow another.
"""
client = Client(profile=global_options['profile'])
dump_json(client.get_organization_following_request(organization_id, following_id))
| 17,121
|
def convert_coordinates_to_country(deg_x: float, deg_y: float) -> str:
""" returns country name """
return geocoder.osm([deg_x, deg_y], method="reverse").country
| 17,122
|
def fixture_items(test_list):
"""Returns an instance of ItemCollection for testing"""
return test_list.get_items(query=QUERY)
| 17,123
|
def match_files(base_dir, pattern):
"""
Return the files matching the given pattern.
:param base_dir: directory to search in
:param pattern: file pattern to use
:returns generator: the generator which iterates the matching file names
"""
for path, _, files in os.walk(base_dir):
for name in files:
if fnmatch.fnmatch(name, pattern):
yield os.path.join(path, name)
| 17,124
|
def numbers_lists_entry_widget(
list_param: list,
name: str,
expect_amount: int = -1,
expect_int: bool = False,
help=None,
) -> list:
"""
create a list text input field and checks if expected amount and type matches if set.
:param list_param: a list variable handled by this wiget
:param name: the name/unique key of the text input for streamlit
:param expect_amount: set >0 to activate
:return: the list maybe modified by users input
"""
# train_lists_str = clean_list_str(str(self.trainer_params_json["gen"]["train"]["lists"]))
lists_str = clean_list_str(str(list_param))
logger.debug(f"cleaned str: {lists_str}")
lists_field, lists_state = st.columns([10, 1])
lists_field = lists_field.text_input(name, value=lists_str, help=help)
if lists_field:
lists_field = clean_list_str(lists_field)
lists_list = [str(x).replace(" ", "") for x in lists_field.split(",")]
ok = True if lists_list else False
if expect_amount > 0 and len(lists_list) != expect_amount:
ok = False
if expect_int:
for idx in range(len(lists_list)):
try:
lists_list[idx] = int(lists_list[idx])
except:
ok = False
else:
for idx in range(len(lists_list)):
try:
lists_list[idx] = float(lists_list[idx])
except:
ok = False
if ok:
lists_state.latex(state_ok)
return lists_list
else:
lists_state.latex(state_failed)
return []
| 17,125
|
def annotate_image(image: Image.Image, room_classification: dict) -> None:
"""Annotate a given image. This is done in-place. Nothing is returned.
Args
----
image: Pillow image
room_classification:
"""
logging.debug(f"annotating image ...")
draw = ImageDraw.Draw(image)
font = ImageFont.truetype("fonts/arial.ttf", 25)
label = max(room_classification, key=room_classification.get)
prob = round(room_classification[label], 4)
width, height = image.size
center = width // 2, height // 2
draw.text(
center,
f"{label}: {str(round(prob * 100))}" + str("%"),
fill=(255, 0, 0),
font=font,
)
| 17,126
|
def get_daily_discussion_post(subreddit_instance: praw.models.Subreddit):
"""Try to get the daily discussions post for a subreddit.
Args:
subreddit_instance
Returns:
The submission object for the discussion post, or None if it couldn't be found.
Works by searching the stickied posts of the subreddit for a post with 'daily discussion' in the title.
"""
print('Searching stickied posts for daily discussion posts..')
for sticky_num in [1, 2]:
discussion_post = subreddit_instance.sticky(number=sticky_num)
if 'daily discussion' in discussion_post.title.lower():
print(f'Got daily discussion post, title {discussion_post.title}')
return discussion_post
print("Couldn't find daily discussion post!")
return None
| 17,127
|
def menu_rekening_opzeggen(): # IO ()
"""
Menu-item twee. Vraagt de nodige invoer om rekening_verwijderen() aan te kunnen roepen.
:return: ()
"""
rekeningnr = input_rekeningnr("Welk rekeningnummer wil je opzeggen? ")
rekening_verwijderen(rekeningnr)
| 17,128
|
def metric_by_training_size(X, y, classifier_list, training_set, metric, as_percentage=True):
"""
This is a refactoriation of code to repeat metrics for best fitted models by training set percentage size.
i.e.: Find accuracy rating for multiple training-test splits for svm, random forests, and naive bayes and return an
np.ndarray
:param X:
:param y:
:param classifier_list:
:param training_set:
:param metric:
:param as_percentage:
:return: np.ndarray
"""
metric_array = np.zeros((len(training_set), len(classifier_list)))
for row_num, training_size in enumerate(training_set):
X_train_iter, X_test_iter, y_train_iter, y_test_iter = train_test_split(X, y,
test_size=1 - training_size,
random_state=0)
metric_list = []
for classifier in classifier_list:
y_pred = classifier.fit(X_train_iter, y_train_iter).predict(X_test_iter)
metric_list.append(metric(y_test_iter, y_pred))
metric_array[row_num] = metric_list
metric_array = metric_array.transpose()
return 100 * metric_array if as_percentage else metric_array
| 17,129
|
def apply_back_defence(
board: Board, opponent: Optional[Player] = None
) -> Optional[Action]:
"""
Move to intercept.
"""
player = board.controlled_player
ball = board.ball
if not opponent:
opponent = ball.player
if opponent.vector.x < 0:
# trying to predict opponent's next move
new_vector = __get_opponent_vector(board, opponent)
opponent = deepcopy(opponent)
opponent.vector = new_vector
intercept_interval = speed_interval(opponent.position, opponent.vector, opponent=player)
if intercept_interval:
target = opponent.future_position(turns=intercept_interval.lower() + 3)
else:
target = opponent.future_position(turns=5)
vector = Vector.from_point(target - player.position)
should_slide = __should_slide(board, player, ball, intercept_interval)
action = Action.Slide if should_slide else None
logger.debug(
"Slide action: Move to intercept, "
f"opponent = {opponent}, "
f"action = {action}, "
f"intercept_interval = {intercept_interval}, "
f"intercept_vector = {vector}."
)
return board.set_action(action, vector, sprint=True, dribble=False)
| 17,130
|
def your_sanity_checks():
"""
Use this space add any additional sanity checks by running:
python q2_neural.py
This function will not be called by the autograder, nor will
your additional tests be graded.
"""
print "Running your sanity checks..."
| 17,131
|
def remove_namespace(tag, ns):
"""Remove namespace from xml tag."""
for n in ns.values():
tag = tag.replace('{' + n + '}', '')
return tag
| 17,132
|
def test_close_trustline_last_edge_insufficient_capacity(
complex_community_with_trustlines_and_fees,
):
"""A owes money to B and A wants to reduce that amount with the help of C"""
complex_community_with_trustlines_and_fees.update_balance(
A, B, 50000
) # amount B owes A
complex_community_with_trustlines_and_fees.update_balance(A, C, 10000)
complex_community_with_trustlines_and_fees.update_balance(B, D, -10000)
complex_community_with_trustlines_and_fees.update_balance(C, D, 10000)
now = int(time.time())
payment_path = complex_community_with_trustlines_and_fees.close_trustline_path_triangulation(
now, A, B
)
assert payment_path.path == []
| 17,133
|
def false_prediction_pairs(y_pred, y_true):
"""
Prints pairs of predicted and true classes that differ.
Returns
-------
false_pairs
The pairs of classes that differ.
counts
Number of occurences of the pairs.
"""
cond = y_pred != y_true
false_preds = np.stack([y_true[cond], y_pred[cond]], axis=-1)
false_pairs, counts = np.unique(false_preds, axis=0, return_counts=True)
return false_pairs, counts
| 17,134
|
def draw_boxes_and_labels_to_image_multi_classes(image, classes, coords, scores=None, classes_name=None, classes_colors=None, font_color=[0, 0, 255]):
"""
Draw bboxes and class labels on image. Return or save the image with bboxes
Parameters
-----------
image : numpy.array
The RGB image [height, width, channel].
classes : list of int
A list of class ID (int).
coords : list of int
A list of list for coordinates.
- Should be [x, y, x2, y2]
scores : list of float
A list of score (float). (Optional)
classes_name : list of str
For converting ID to string on image.
classes_colors : list of color
A list of color [ [r,g,b], ...].
font_color : front color
Front color
Returns
-------
numpy.array
The output image.
"""
image = image.copy()
imh, imw = image.shape[0:2]
thick = int((imh + imw) // 500) # 粗细
for i, _v in enumerate(coords):
x, y, x2, y2 = np.asarray(coords[i], np.int32)
bbox_color = [0, 255, 0] if classes_colors is None else classes_colors[classes[i]]
cv2.rectangle(image, (x, y), (x2, y2), bbox_color, thick)
if classes is not None:
text = []
for c in classes[i]:
class_text = classes_name[c] if classes_name is not None else str(c)
# score_text = " %.2f" % (scores[i]) if scores is not None else ''
t = class_text #+ score_text
text.append(t)
text = '\n'.join(text)
score_text = " %.2f" % (scores[i]) if scores is not None else ''
text += score_text
font_scale = 1.0e-3 * imh
# text_size, _ = cv2.getTextSize(text, 0, font_scale, int(thick / 2) + 1)
# cv2.rectangle(image, (x, y), (x+text_size[0], y-text_size[1]), bbox_color, -1)
# cv2.putText(image, text, (x, y), 0, font_scale, font_color, int(thick / 3) + 1)
image = im_tool.put_text(image, text, (x, y), font_scale*32, font_color, bbox_color)
return image
| 17,135
|
def route_open_file_dialog(fqname):
"""Return html of file structure for that parameter"""
# these arguments are only set when called with the `navigate_to` function on an already open
# file dialog
current_folder = request.args.get('current_folder')
folder = request.args.get('folder')
config = current_app.cea_config
section, parameter_name = fqname.split(':')
parameter = config.sections[section].parameters[parameter_name]
if not current_folder:
# first time calling, use current value of parameter for current folder
current_folder = os.path.dirname(parameter.get())
folder = None
else:
current_folder = os.path.abspath(os.path.join(current_folder, folder))
if not os.path.exists(current_folder):
# use home directory if it doesn't exist
current_folder = os.path.expanduser('~')
folders = []
files = []
for entry in os.listdir(current_folder):
if os.path.isdir(os.path.join(current_folder, entry)):
folders.append(entry)
else:
ext = os.path.splitext(entry)[1]
if parameter._extensions and ext and ext[1:] in parameter._extensions:
files.append(entry)
elif not parameter._extensions:
# any file can be added
files.append(entry)
breadcrumbs = os.path.normpath(current_folder).split(os.path.sep)
return render_template('file_listing.html', current_folder=current_folder,
folders=folders, files=files, title=parameter.help, fqname=fqname,
parameter_name=parameter.name, breadcrumbs=breadcrumbs)
| 17,136
|
def simulate():
"""
Runs a simulation given a context, a simulator, a trace, and a depth
Method PUT
"""
context = request.get_json()['context']
simulator = request.get_json()['simulator']
trace = request.get_json()['trace']
depth = request.get_json()['depth']
if context is None or simulator is None or trace is None or depth is None:
return {'result': 'error'}, 400
ctx = contexts[context]['context']
sim = contexts[context]['simulators'][simulator]
tra = contexts[context]['traces'][trace]
dep = int(depth)
assert ctx is not None
assert sim is not None
assert tra is not None
sim.simulate(tra, dep)
return {'result': 'ok'}, 200
| 17,137
|
def soft_expected_backup_rl(
next_q: Array,
next_pol: Array,
next_log_pol: Array,
rew: Array,
done: Array,
discount: float,
er_coef: float,
) -> Array:
"""Do soft expected bellman-backup :math:`r + \gamma P \langle \pi, q - \tau * \log{\pi}\rangle`.
Args:
next_q (Array): ? x dA q-values.
next_pol (Array): ? x dA policy.
next_log_pol (Array): ? x dA log-policy.
rew (Array): ? x 1 rewards.
done (Array): ? x 1 done flags.
discount (float): Discount factor.
er_coef (float): Entropy coefficient.
Returns:
q (Array): ? x 1 q-values.
"""
chex.assert_rank([next_q, next_pol], 2)
next_v = next_pol * (next_q - er_coef * next_log_pol)
next_v = next_v.sum(axis=-1, keepdims=True) # ? x 1
q = rew + discount * next_v * (~done)
return q
| 17,138
|
def readcal(calfile):
"""
This reads all of the information from a master calibration index and returns
it in a dictionary where each calibration type has a structured arrays that
can be accessed by the calibration name (e.g. 'dark').
"""
if os.path.exists(calfile) == False:
raise ValueError(calfile+' NOT FOUND')
lines = dln.readlines(calfile)
lines = np.char.array(lines)
# Get rid of comment and blank lines
gd,ngd = dln.where(( lines.find('#') != 0) & (lines=='')==False )
if ngd==0:
raise ValueError('No good calibration lines')
lines = lines[gd]
# Initialize calibration dictionary
caldict = OrderedDict()
dtdict = OrderedDict()
# -- Darks --
# mjd1, mjd2, name, frames
# dark 55600 56860 12910009 12910009-12910037
# dark 56861 99999 15640003 15640003-15640021
dtdict['dark'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('frames',np.str,100)])
# -- Flats --
# mjd1, mjd2, name, frames, nrep, dithered
# flat 99999 55761 01380106 1380106-1380134 1 1
# flat 99999 99999 02410013 2410013-2410022 1 0
dtdict['flat'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('frames',np.str,100),
('nrep',int),('dithered',int)])
# -- Sparse --
# mjd1, mjd2, name, frames, darkframes, dmax, maxread
# sparse 55600 55761 01590015 1590015-1590024 0 21 30,30,20
# sparse 55797 99999 02410059 2410059-2410068 2410058,2410069 21 30,30,20
dtdict['sparse'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('frames',np.str,100),
('darkframes',np.str,100),('dmax',int),('maxread',np.str,100)])
# -- Fiber --
# mjd1, mjd2, name
# fiber 55600 55761 01970078
# fiber 55797 56860 02410024
dtdict['fiber'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50)])
# -- Badfiber --
# mjd1, mjd2, frames
# badfiber 55600 57008 0
# badfiber 57009 57177 195
dtdict['badfiber'] = np.dtype([('mjd1',int),('mjd2',int),('frames',np.str,100)])
# -- Fixfiber --
# mjd1, mjd2, name
# fixfiber 56764 56773 1
# fixfiber 58038 58046 2
dtdict['fixfiber'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50)])
# -- Wave --
# mjd1, mjd2, name, frames, psfid
# wave 55699 55699 01370096 1370096,1370099 1370098
# wave 55700 55700 01380079 1380079 1380081
dtdict['wave'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('frames',np.str,100),
('psfid',int)])
# -- Multiwave --
# mjd1, mjd2, name, frames
# multiwave 55800 56130 2380000 02390007,02390008,02500007
# multiwave 56130 56512 5680000 05870007,05870008,05870018,05870019
dtdict['multiwave'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('frames',np.str,500)])
# -- LSF --
# mjd1, mjd2, name, frames, psfid
# lsf 55800 56130 03430016 03430016 03430020
# lsf 56130 56512 07510018 07510018 07510022
dtdict['lsf'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('frames',np.str,100),
('psfid',int)])
# -- Det --
# mjd1, mjd2, name, linid
# det 99999 99999 55640 0
# det 55600 56860 11870003 11870003
dtdict['det'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('linid',int)])
# -- BPM --
# mjd1, mjd2, name, darkid, flatid
# bpm 99999 99999 05560001 5560001 4750009
# bpm 55600 56860 12910009 12910009 4750009
dtdict['bpm'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('darkid',int),
('flatid',int)])
# -- Littrow --
# mjd1, mjd2, name, psfid
# littrow 55600 56860 06670109 6670109
# littrow 56861 99999 13400052 13400052
dtdict['littrow'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('psfid',int)])
# -- Persist --
# mjd1, mjd2, name, darkid, flatid, thresh
# persist 55600 56860 04680019 4680019 4680018 0.03
# persist 56861 99999 13400061 13400061 13400060 0.03
dtdict['persist'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('darkid',int),
('flatid',int),('thresh',float)])
# -- Persistmodel --
# mjd1, mjd2, name
# persistmodel 55600 56860 57184
# persistmodel 56861 99999 0
dtdict['persistmodel'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50)])
# -- Response --
# mjd1, mjd2, name, fluxid, psfid, temp
# response 55600 99999 0 0 0 0
dtdict['response'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('fluxid',int),
('psfid',int),('temp',float)])
# Readnoise
# frame1, frame2
# rn 1380094 1380095
# rn 1380102 1380103
#dtdict['rn'] = np.dtype([('frame1',int),('frame2',int)])
# Gain
# frame1, frame2
#dtdict['gain'] = np.dtype([('frame1',int),('frame2',int)])
# READNOISE and GAIN lines are NOT used
# Load the data
for caltype in dtdict.keys():
cat = loadcaltype(lines,caltype,dtdict[caltype])
caldict[caltype.strip()] = cat
return caldict
| 17,139
|
def global_attributes_dict():
# type: () -> Dict[str, str]
"""Set global attributes required by conventions.
Currently CF-1.6 and ACDD-1.3.
Returns
-------
global_atts: dict
Still needs title, summary, source, creator_institution,
product_version, references, cdm_data_type, institution,
geospatial_vertical_{min,max,positive,units}, ...
References
----------
CF Conventions document:
http://cfconventions.org
ACDD document:
http://wiki.esipfed.org/index.php/Category:Attribute_Conventions_Dataset_Discovery
NCEI Templates:
https://www.nodc.noaa.gov/data/formats/netcdf/v2.0/
"""
username = getpwuid(os.getuid())[0]
global_atts = dict(
Conventions="CF-1.6 ACDD-1.3",
standard_name_vocabulary="CF Standard Name Table v32",
history=(
"{now:{date_fmt:s}}: Created by {progname:s} "
"with command line: {cmd_line:s}"
).format(
now=RUN_DATE,
date_fmt=UDUNITS_DATE,
progname=sys.argv[0],
cmd_line=COMMAND_LINE,
),
source=("Created by {progname:s} " "with command line: {cmd_line:s}").format(
progname=sys.argv[0],
cmd_line=COMMAND_LINE,
),
date_created="{now:{date_fmt:s}}".format(now=RUN_DATE, date_fmt=ACDD_DATE),
date_modified="{now:{date_fmt:s}}".format(now=RUN_DATE, date_fmt=ACDD_DATE),
date_metadata_modified="{now:{date_fmt:s}}".format(
now=RUN_DATE, date_fmt=ACDD_DATE
),
creator_name=username,
creator_email="{username:s}@{host:s}".format(
username=username,
host=MAIN_HOST,
),
creator_institution=MAIN_HOST,
)
try:
global_atts["conda_packages"] = subprocess.check_output(
# Full urls including package, version, build, and MD5
["conda", "list", "--explicit", "--md5"],
universal_newlines=True,
)
except OSError:
pass
try:
global_atts["pip_packages"] = subprocess.check_output(
[sys.executable, "-m", "pip", "freeze"],
universal_newlines=True,
)
except OSError:
pass
return global_atts
| 17,140
|
def test_write_cif_PIYZAZ():
"""
Tests writing cif formatted molecule file
File comparison tests are OS dependent, they should only work in UNIX but not Windows.
"""
piyzaz = Molecule(atoms=piyzaz_atoms, coordinates=piyzaz_coors)
test_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'piyzaz_test.cif')
piyzaz.write(test_file, cell=piyzaz_cell_parameters, header='piyzaz')
assert filecmp.cmp(piyzaz_cif, test_file)
os.remove(test_file)
| 17,141
|
def add_txt(axes):
"""Add text to the plot.
Parameters
----------
axes :
"""
axes.text(0.5, 1.0, "FOURIER ", transform=axes.transAxes,
ha="right", va="bottom", color="w",
family="sans-serif", fontweight="light", fontsize=16)
axes.text(0.5, 1.0, "UNCHAINED", transform=axes.transAxes,
ha="left", va="bottom", color="w",
family="sans-serif", fontweight="bold", fontsize=16)
| 17,142
|
def test_write_yml():
"""读写"""
data = {"name": "wxnacy"}
_path = f'/tmp/wpy_{random.randint(9000, 10000)}.yml'
path.write_yml(_path, data)
res = path.read_dict(_path)
assert data, res
| 17,143
|
def generate_bias(series: pd.Series, effect_size: float = 1, power: float = 1) -> pd.Series:
"""
Calculate bias for sensitive attribute
Parameters
----------
series : pd.Series
sensitive attribute for which the bias is calculated.
effect_size : float, optional
Size of the bias for 1 std from the mean. The default is 1.
power : float, optional
power=1: linear bias, power=2: quadratic bias, etc. The default is 1.
Returns
-------
pd.Series
DESCRIPTION.
"""
bias = series.sub(series.mean()).pow(power)
bias = (bias - bias.mean())/bias.std() # Make the bias neutral
return bias * effect_size
| 17,144
|
def compress_coils(kspace,
num_output_coils=None,
tol=None,
coil_axis=-1,
matrix=None,
method='svd',
**kwargs):
"""Coil compression gateway.
This function estimates a coil compression matrix and uses it to compress
`kspace`. Alternatively, this method can use a precomputed coil compression
matrix to perform the compression. In this case, use
`tfmr.coil_compression_matrix` to calculate the compression matrix, then pass
it to this function using the `matrix` argument. Use this two-step process
if you intend to reuse a coil compression matrix or need to calibrate the
compression using different data.
This function supports the following coil compression methods:
* **SVD**: Based on direct singular-value decomposition (SVD) of *k*-space
data [1]_. This coil compression method supports Cartesian and
non-Cartesian data. This method is resilient to noise, but does not
achieve optimal compression if there are fully-sampled dimensions.
.. * **Geometric**: Performs local compression along fully-sampled dimensions
.. to improve compression. This method only supports Cartesian data. This
.. method can suffer from low SNR in sections of k-space.
.. * **ESPIRiT**: Performs local compression along fully-sampled dimensions
.. and is robust to noise. This method only supports Cartesian data.
Args:
kspace: A `Tensor`. The multi-coil *k*-space data. Must have type
`complex64` or `complex128`. Must have shape `[..., Cin]`, where `...` are
the encoding dimensions and `Cin` is the number of coils. Alternatively,
the position of the coil axis may be different as long as the `coil_axis`
argument is set accordingly. If `method` is `"svd"`, `kspace` can be
Cartesian or non-Cartesian. If `method` is `"geometric"` or `"espirit"`,
`kspace` must be Cartesian.
num_output_coils: An `int`. The number of desired virtual output coils. If
`None`, the number of output coils is automatically determined based on
`tol`. If `tol` is also None, all virtual coils are returned.
tol: A `float` between 0.0 and 1.0. Virtual coils whose singular value is
less than `tol` times the first singular value are discarded. `tol` is
ignored if `num_output_coils` is also specified.
coil_axis: An `int`. Defaults to -1.
matrix: An optional `Tensor`. The coil compression matrix. If provided,
`matrix` is used to calculate the compressed output. Must have the same
type as `kspace`. Must have shape `[Cin, Cout]`, where `Cin` is the number
of input coils and `Cout` is the number of output coils. If `matrix` is
provided, arguments `num_output_coils` and `tol` are ignored.
method: A `string`. The coil compression algorithm. Must be `"svd"`.
**kwargs: Additional method-specific keyword arguments. See Notes for more
details.
Notes:
This function also accepts the following method-specific keyword arguments:
* For `method="svd"`, no additional keyword arguments are accepted.
Returns:
A `Tensor` containing the compressed *k*-space data. Has shape
`[..., Cout]`, where `Cout` is determined based on `num_output_coils` or
`tol` and `...` are the unmodified encoding dimensions.
References:
.. [1] Huang, F., Vijayakumar, S., Li, Y., Hertel, S. and Duensing, G.R.
(2008). A software channel compression technique for faster reconstruction
with many channels. Magn Reson Imaging, 26(1): 133-141.
.. [2] Zhang, T., Pauly, J.M., Vasanawala, S.S. and Lustig, M. (2013), Coil
compression for accelerated imaging with Cartesian sampling. Magn
Reson Med, 69: 571-582. https://doi.org/10.1002/mrm.24267
.. [3] Bahri, D., Uecker, M., & Lustig, M. (2013). ESPIRIT-based coil
compression for cartesian sampling. In Proceedings of the 21st
Annual Meeting of ISMRM, Salt Lake City, Utah, USA (Vol. 47).
"""
# pylint: disable=missing-raises-doc
kspace = tf.convert_to_tensor(kspace)
tf.debugging.assert_rank_at_least(kspace, 2, message=(
f"Argument `kspace` must have rank of at least 2, but got shape: "
f"{kspace.shape}"))
coil_axis = check_util.validate_type(coil_axis, int, name='coil_axis')
method = check_util.validate_enum(
method, {'svd', 'geometric', 'espirit'}, name='method')
# Move coil axis to innermost dimension if not already there.
if coil_axis != -1:
rank = kspace.shape.rank
canonical_coil_axis = coil_axis + rank if coil_axis < 0 else coil_axis
perm = (
[ax for ax in range(rank) if not ax == canonical_coil_axis] +
[canonical_coil_axis])
kspace = tf.transpose(kspace, perm)
# Calculate the compression matrix, unless one was already provided.
if matrix is None:
matrix = coil_compression_matrix(kspace,
num_output_coils=num_output_coils,
tol=tol,
method=method,
**kwargs)
# Apply the compression.
compressed_kspace = _apply_coil_compression(kspace, matrix)
# If necessary, move coil axis back to its original location.
if coil_axis != -1:
inv_perm = tf.math.invert_permutation(perm)
compressed_kspace = tf.transpose(compressed_kspace, inv_perm)
return compressed_kspace
| 17,145
|
def get_thellier_gui_meas_mapping(input_df, output=2):
"""
Get the appropriate mapping for translating measurements in Thellier GUI.
This requires special handling for treat_step_num/measurement/measurement_number.
Parameters
----------
input_df : pandas DataFrame
MagIC records
output : int
output to this MagIC data model (2 or 3)
Output
--------
mapping : dict (used in convert_meas_df_thellier_gui)
"""
if int(output) == 2:
thellier_gui_meas3_2_meas2_map = meas_magic3_2_magic2_map.copy()
if 'treat_step_num' in input_df.columns:
thellier_gui_meas3_2_meas2_map.update(
{'treat_step_num': 'measurement_number'})
thellier_gui_meas3_2_meas2_map.pop('measurement')
return thellier_gui_meas3_2_meas2_map
# 2 --> 3
else:
thellier_gui_meas2_2_meas3_map = meas_magic2_2_magic3_map.copy()
if 'measurement' in input_df.columns:
thellier_gui_meas2_2_meas3_map.pop('measurement_number')
try:
res = int(input_df.iloc[0]['measurement_number'])
if res < 100:
thellier_gui_meas2_2_meas3_map['measurement_number'] = 'treat_step_num'
except ValueError as ex:
pass
return thellier_gui_meas2_2_meas3_map
| 17,146
|
def test_make_df_2(pg_conn):
"""Test of main Postgres binary data to Pandas dataframe pipeline.
This tests boolean data.
"""
cursor = pg_conn.cursor()
# Copy binary data to a tempfile
path = tempfile.mkstemp()[1]
query = 'COPY test2 TO STDOUT BINARY;'
with open(path, 'wb') as f:
cursor.copy_expert(sql=query, file=f)
pg_conn.commit()
pg = Pg2Pd(path, ['boolean', 'boolean'], ['t', 'f'])
df = pg.make_df()
assert df['t'].tolist() == [True]
assert df['f'].tolist() == [False]
| 17,147
|
def new_unsigned_vaccination_credential(
passenger_first_name: str,
passenger_last_name: str,
passenger_id_number: str,
passenger_date_of_birth: str,
vaccination_disease: str,
vaccination_vaccine: str,
vaccination_product: str,
vaccination_auth_holder: str,
vaccination_dose_number: str,
vaccination_total_doses: str,
vaccination_batch: str,
vaccination_date: str,
vaccination_next_date: str,
vaccination_center: str,
vaccination_professional: str,
vaccination_country: str,
issuer_did: str
):
"""Create a Claims object for a Verifiable Credentia in JWT format.
The returned object has just the plain claims object, and has to be
signed later.
"""
# Generate a random UUID, not related to anything in the credential
# This is important for privacy reasons to avoid possibility of
# correlation if the UUID is used for Revocation Lists in a blockchain
uid = unique_id.uuid4().hex
# Current time and expiration
now = int(time.time())
exp = now + 365*24*60*60 # The token will expire in 365 days
# Generate a template Verifiable Credential
credential = {
"iss": issuer_did,
"sub": passenger_id_number,
"iat": now,
"exp": exp,
"uuid": uid,
"vc": {
"@context": [
"https://www.w3.org/2018/credentials/v1",
"https://alastria.github.io/identity/credentials/v1",
"https://safeisland.org/.well-known/w3c-covid-test/v1"
],
"type": [
"VerifiableCredential",
"AlastriaVerifiableCredential",
"SafeIslandVaccinationCredential"
],
"credentialSchema": {
"id": "vaccinationCredential",
"type": "JsonSchemaValidator2018"
},
"credentialSubject": {
"vaccinationCredential": {
"patient": {
"name": passenger_last_name.upper() + "/" + passenger_first_name.upper(),
"idnumber": passenger_id_number,
"dob": passenger_date_of_birth
},
"vaccination": {
"disease": vaccination_disease,
"vaccine": vaccination_vaccine,
"product": vaccination_product,
"auth_holder": vaccination_auth_holder,
"dose_number": vaccination_dose_number,
"total_doses": vaccination_total_doses,
"batch": vaccination_batch,
"date": vaccination_date,
"next_date": vaccination_next_date,
"center": vaccination_center,
"professional": vaccination_professional,
"country": vaccination_country,
},
"comments": "These are some comments"
},
"issuedAt": ["redt.alastria"],
"levelOfAssurance": 2
}
}
}
return credential
| 17,148
|
def calc_pk_integrated_intensities(p,x,pktype,num_pks):
"""
Calculates the area under the curve (integrated intensities) for fit peaks
Required Arguments:
p -- (m x u + v) peak parameters for number of peaks, m is the number of
parameters per peak ("gaussian" and "lorentzian" - 3, "pvoigt" - 4, "split_pvoigt"
- 5), v is the number of parameters for chosen bgtype
x -- (n) ndarray of coordinate positions
f -- (n) ndarray of intensity measurements at coordinate positions x
pktype -- string, type of analytic function that will be used to fit the data,
current options are "gaussian","lorentzian","pvoigt" (psuedo voigt), and
"split_pvoigt" (split psuedo voigt)
num_pks -- integer 'u' indicating the number of pks, must match length of p
Outputs:
ints -- (m) integrated intensities for m fit peaks
"""
ints=np.zeros(num_pks)
if pktype == 'gaussian' or pktype == 'lorentzian':
p_fit=np.reshape(p[:3*num_pks],[num_pks,3])
elif pktype == 'pvoigt':
p_fit=np.reshape(p[:4*num_pks],[num_pks,4])
elif pktype == 'split_pvoigt':
p_fit=np.reshape(p[:6*num_pks],[num_pks,6])
for ii in np.arange(num_pks):
if pktype == 'gaussian':
ints[ii]=integrate.simps(pkfuncs._gaussian1d_no_bg(p_fit[ii],x),x)
elif pktype == 'lorentzian':
ints[ii]=integrate.simps(pkfuncs._lorentzian1d_no_bg(p_fit[ii],x),x)
elif pktype == 'pvoigt':
ints[ii]=integrate.simps(pkfuncs._pvoigt1d_no_bg(p_fit[ii],x),x)
elif pktype == 'split_pvoigt':
ints[ii]=integrate.simps(pkfuncs._split_pvoigt1d_no_bg(p_fit[ii],x),x)
return ints
| 17,149
|
def cant_serialize(media_type: str) -> NoReturn: # type: ignore
"""Reject the current example if we don't know how to send this data to the application."""
event_text = f"Can't serialize data to `{media_type}`."
note(f"{event_text} {SERIALIZERS_SUGGESTION_MESSAGE}")
event(event_text)
reject()
| 17,150
|
def read_fid_ntraces(filename, shape=None, torder='flat', as_2d=False,
read_blockhead=False):
"""
Read a Agilent/Varian binary (fid) file possibility having multiple
traces per block.
Parameters
----------
filename : str
Filename of Agilent/Varian binary file (fid) to read.
shape : tuple of ints, optional
Shape of the binary data. If not provided data is returned as a 2D
array. Required if more than one trace per block (non-standard).
torder : {'f', 'n', 'o'}
Trace order. See :py:func:`read` for details.
as_2d : bool, optional
True to return the data as a 2D array, ignoring the shape and torder
parameters.
read_blockhead : bool, optional
True to read the Agilent/Varian blockheaders(s) into the returned
dictionary. False ignores them.
Returns
-------
dic : dict
Dictionary of Agilent/Varian binary file parameters.
data : array_like
Low memory object which can access NMR data on demand.
See Also
--------
read_fid : Read a Agilent/Varian binary file with one trace per block.
read_fid_lowmem : Read a Agilent/Varian binary file with one trace per
block using minimal amounts of memory.
"""
# open the file
f = open(filename, 'rb')
# read the fileheader
dic = fileheader2dic(get_fileheader(f))
# data parameters
dt = find_dtype(dic)
nblocks = dic["nblocks"]
pts = dic["np"]
nbheaders = dic["nbheaders"]
ntraces = dic["ntraces"]
# read the data
if read_blockhead:
bdic, data = get_nblocks_ntraces(f, nblocks, ntraces, pts,
nbheaders, dt, read_blockhead)
dic["blockheader"] = bdic
else:
data = get_nblocks_ntraces(f, nblocks, ntraces, pts, nbheaders, dt,
read_blockhead)
f.close()
# uninterleave the real and imaginary data
data = uninterleave_data(data)
# if 2D array requested, return unshaped
if as_2d:
return dic, data
# check for 1D
if data.shape[0] == 1:
return dic, np.squeeze(data)
# try to reshape
if shape is None:
warn("unknown shape, returning unshaped data")
return dic, data
# reorder 3D/4D data
if len(shape) >= 3:
return dic, reorder_data(data, shape, torder)
try:
data = data.reshape(shape)
except ValueError:
warn(str(data.shape) + "cannot be shaped into" + str(shape))
return dic, data
return dic, data
| 17,151
|
async def async_get_erc20_decimals(
token: spec.ERC20Reference,
block: typing.Optional[spec.BlockNumberReference] = None,
**rpc_kwargs: typing.Any
) -> int:
"""get decimals of an erc20"""
return await erc20_generic.async_erc20_eth_call(
function_name='decimals', token=token, block=block, **rpc_kwargs
)
| 17,152
|
def test_toc_all_references_should_exist_pep420_disabled(make_app, apidoc):
"""All references in toc should exist. This test doesn't say if
directories with empty __init__.py and and nothing else should be
skipped, just ensures consistency between what's referenced in the toc
and what is created. This is the variant with pep420 disabled.
"""
outdir = apidoc.outdir
assert (outdir / 'conf.py').isfile()
toc = extract_toc(outdir / 'mypackage.rst')
refs = [l.strip() for l in toc.splitlines() if l.strip()]
found_refs = []
missing_files = []
for ref in refs:
if ref and ref[0] in (':', '#'):
continue
filename = "{}.rst".format(ref)
found_refs.append(ref)
if not (outdir / filename).isfile():
missing_files.append(filename)
assert len(missing_files) == 0, \
'File(s) referenced in TOC not found: {}\n' \
'TOC:\n{}'.format(", ".join(missing_files), toc)
| 17,153
|
def get_game_log(game_id: int):
"""
Method used to get list of important events of macau game with given game id.
:param game_id: integer value of existing game
:return: list with string with all important events in game
"""
if game_id >= len(games_container):
return JSONResponse(content={'status': 'No game', 'output': None}, status_code=404)
outputs = games_container[game_id]['outputs']['game']
return {"status": "OK", "output": outputs}
| 17,154
|
def get_exploration_components_from_dir(dir_path):
"""Gets the (yaml, assets) from the contents of an exploration data dir.
Args:
dir_path: str. a full path to the exploration root directory.
Returns:
*. A 2-tuple, the first element of which is a yaml string, and the
second element of which is a list of (filepath, content) 2-tuples.
The filepath does not include the assets/ prefix.
Raises:
Exception: if the following condition doesn't hold: "There is exactly one
file not in assets/, and this file has a .yaml suffix".
"""
yaml_content = None
assets_list = []
dir_path_array = dir_path.split('/')
while dir_path_array[-1] == '':
dir_path_array = dir_path_array[:-1]
dir_path_length = len(dir_path_array)
for root, dirs, files in os.walk(dir_path):
for directory in dirs:
if root == dir_path and directory != 'assets':
raise Exception(
'The only directory in %s should be assets/' % dir_path)
for filename in files:
filepath = os.path.join(root, filename)
if root == dir_path:
if filepath.endswith('.DS_Store'):
# These files are added automatically by Mac OS Xsystems.
# We ignore them.
continue
if yaml_content is not None:
raise Exception('More than one non-asset file specified '
'for %s' % dir_path)
elif not filepath.endswith('.yaml'):
raise Exception('Found invalid non-asset file %s. There '
'should only be a single non-asset file, '
'and it should have a .yaml suffix.' %
filepath)
else:
yaml_content = get_file_contents(filepath)
else:
filepath_array = filepath.split('/')
# The additional offset is to remove the 'assets/' prefix.
filename = '/'.join(filepath_array[dir_path_length + 1:])
assets_list.append((filename, get_file_contents(
filepath, raw_bytes=True)))
if yaml_content is None:
raise Exception('No yaml file specifed for %s' % dir_path)
return yaml_content, assets_list
| 17,155
|
def iterate_pattern_mapping(pattern_mapping: Union[List[Dict[str, Any]], Dict[str, Any]]) -> Generator[Mapping, None, None]:
""" Iterate mapping entry.
:param pattern_mapping: The pattern mapping table.
:return: Each mapping entry. {pattern:, exclude:, algorithm:, output:, next:}
"""
if isinstance(pattern_mapping, list):
for m in pattern_mapping:
yield from iterate_pattern_mapping(m)
elif isinstance(pattern_mapping, dict):
for pattern, repl in pattern_mapping.items():
yield Mapping(pattern, repl)
else:
raise ValueError(f'pattern-mapping must be an array or an object: {pattern_mapping}')
| 17,156
|
def time_this_function(func):
"""
Time the function.
use as a decorator.
Examples
---------
::
@time_this_function
def func(x):
return x
a= func(1)
Parameters
----------
func: Callable
function
Returns
-------
result
function results
"""
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print(func.__name__, "time", end - start)
return result
return wrapper
| 17,157
|
def test_true_azimuth(coord_system, bldg_north, zone_rel_north, expected):
"""py.test for true_azimuth"""
fhandle = StringIO(idftxt)
idf = IDF(fhandle)
geom_rules = idf.idfobjects["GlobalGeometryRules"][0]
building = idf.idfobjects["Building"][0]
zone = idf.idfobjects["Zone"][0]
surface = idf.idfobjects["BuildingSurface:Detailed"][0]
geom_rules.Coordinate_System = coord_system
building.North_Axis = bldg_north
zone.Direction_of_Relative_North = zone_rel_north
result = fh.true_azimuth(surface)
assert almostequal(expected, result, places=3) == True
| 17,158
|
def power_plot(data, sfreq, toffset, log_scale, zscale, title):
"""Plot the computed power of the iq data."""
print("power")
t_axis = np.arange(0, len(data)) / sfreq + toffset
if log_scale:
lrxpwr = 10 * np.log10(data + 1e-12)
else:
lrxpwr = data
zscale_low, zscale_high = zscale
if zscale_low == 0 and zscale_high == 0:
if log_scale:
zscale_low = np.min(lrxpwr[np.where(lrxpwr.real != -np.Inf)])
zscale_high = np.max(lrxpwr) + 3.0
else:
zscale_low = np.min(lrxpwr)
zscale_high = np.max(lrxpwr)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(t_axis, lrxpwr.real)
ax.grid(True)
ax.axis([toffset, t_axis[len(t_axis) - 1], zscale_low, zscale_high])
ax.set_xlabel("time (seconds)")
if log_scale:
ax.set_ylabel("power (dB)")
else:
ax.set_ylabel("power")
ax.set_title(title)
return fig
| 17,159
|
def load_dataset(dataset):
"""
Loads a dataset and returns train, val and test partitions.
"""
dataset_to_class = {
'mnist': torchvision.datasets.MNIST,
'cifar10': torchvision.datasets.CIFAR10,
'fa-mnist': torchvision.datasets.FashionMNIST
}
assert dataset in dataset_to_class.keys()
transform = transforms.Compose([transforms.ToTensor()])
train_dataset = dataset_to_class[dataset](root='./data', train=True, download=True, transform=transform)
train_split, val_split = torch.utils.data.random_split(train_dataset, lengths=[len(train_dataset)-10000, 10000])
test_split = dataset_to_class[dataset](root='./data', train=False, download=True, transform=transform)
return train_split, val_split, test_split
| 17,160
|
def batch_sql_query(sql_statement, key_name, key_list, dry_run=False):
"""Run a query on the specifies list of primary keys."""
for key in key_list:
if isinstance(key, dict):
sql = "{sql_statement} where ".format(sql_statement=sql_statement)
andcount = 0
for k in key:
value = key[k]
if isinstance(value, str):
value = "'{}'".format(value)
sql += "{key_name} = {key}".format(key_name=k, key=value)
if andcount < 1:
andcount += 1
sql += " and "
else:
sql = "{sql_statement} where {key_name} = {key}".format(sql_statement=sql_statement, key_name=key_name,
key=key)
logging.debug("Executing: {}".format(sql))
if dry_run:
logging.info("Would execute: {}".format(sql))
else:
result = session.execute(sql)
logging.debug(result)
time.sleep(0.1)
| 17,161
|
def compareTo(s1, s2):
"""Compares two strings to check if they are the same length and whether one is longer
than the other"""
move_slice1 = 0
move_slice2 = 1
if s1[move_slice1:move_slice2] == '' and s2[move_slice1:move_slice2] == '':
return 0 # return 0 if same length
elif s1[move_slice1:move_slice2] == '' and s2[move_slice1:move_slice2] != '':
return len(s2) * -1 # return negative number if s2 > s1
elif s1[move_slice1:move_slice2] != '' and s2[move_slice1:move_slice2] == '':
return len(s1) # return positive number if s1 > s2
else:
move_slice1 += 1 # with each new call, the next object in the string is checked if empty or not
move_slice2 += 1
return compareTo(s1[1:], s2[1:])
| 17,162
|
def score_network(network, node_to_score, output_file):
"""
Scores a network
"""
# Get all nodes
all_nodes = node_to_score.keys()
# Here, the network is scored and stored in a file
with open(output_file, 'w') as f:
for u, v in network.edges():
score_u = node_to_score[u]
score_v = node_to_score[v]
score = (float(score_u) + float(score_v)) / 2
f.write('{}\t{:f}\t{}\n'.format(u, score, v))
return
| 17,163
|
def is_unique(x):
# A set cannot contain any duplicate, so we just check that the length of the list is the same as the length of the corresponding set
"""Check that the given list x has no duplicate
Returns:
boolean: tells if there are only unique values or not
Args:
x (list): elements to be compared
"""
return len(x) == len(set(x))
| 17,164
|
def str2format(fmt, ignore_types=None):
"""Convert a string to a list of formats."""
ignore_types = ignore_types if ignore_types else ()
token_to_format = {
"s": "",
"S": "",
"d": "g",
"f": "f",
"e": "e",
}
base_fmt = "{{:{}}}"
out = []
for i, token in enumerate(fmt.split(",")):
n = token[:-1]
if i in ignore_types:
out.append(base_fmt.format(n.split(".")[0]))
elif token[-1].lower() == "s":
out.append(base_fmt.format("{}.{}".format(n, n)))
else:
out.append(base_fmt.format(">{}{}".format(n, token_to_format[token[-1]])))
return out
| 17,165
|
def add_zones_to_elements(net, elements=["line", "trafo", "ext_grid", "switch"]):
"""
Adds zones to elements, inferring them from the zones of buses they are
connected to.
"""
for element in elements:
if element == "sgen":
net["sgen"]["zone"] = net["bus"]["zone"].loc[net["sgen"]["bus"]].values
elif element == "load":
net["load"]["zone"] = net["bus"]["zone"].loc[net["load"]["bus"]].values
elif element == "ext_grid":
net["ext_grid"]["zone"] = net["bus"]["zone"].loc[net["ext_grid"]["bus"]].values
elif element == "switch":
net["switch"]["zone"] = net["bus"]["zone"].loc[net["switch"]["bus"]].values
elif element == "line":
net["line"]["zone"] = net["bus"]["zone"].loc[net["line"]["from_bus"]].values
crossing = sum(net["bus"]["zone"].loc[net["line"]["from_bus"]].values !=
net["bus"]["zone"].loc[net["line"]["to_bus"]].values)
if crossing > 0:
logger.warn("There have been %i lines with different zones at from- and to-bus"
% crossing)
elif element == "trafo":
net["trafo"]["zone"] = net["bus"]["zone"].loc[net["trafo"]["hv_bus"]].values
crossing = sum(net["bus"]["zone"].loc[net["trafo"]["hv_bus"]].values !=
net["bus"]["zone"].loc[net["trafo"]["lv_bus"]].values)
if crossing > 0:
logger.warn("There have been %i trafos with different zones at lv_bus and hv_bus"
% crossing)
elif element == "impedance":
net["impedance"]["zone"] = net["bus"]["zone"].loc[net["impedance"]["from_bus"]].values
crossing = sum(net["bus"]["zone"].loc[net["impedance"]["from_bus"]].values !=
net["bus"]["zone"].loc[net["impedance"]["to_bus"]].values)
if crossing > 0:
logger.warn("There have been %i impedances with different zones at from_bus and "
"to_bus" % crossing)
elif element == "shunt":
net["shunt"]["zone"] = net["bus"]["zone"].loc[net["shunt"]["bus"]].values
elif element == "ward":
net["ward"]["zone"] = net["bus"]["zone"].loc[net["ward"]["bus"]].values
elif element == "xward":
net["xward"]["zone"] = net["bus"]["zone"].loc[net["xward"]["bus"]].values
else:
raise UserWarning("Unkown element %s" % element)
| 17,166
|
def _read_concordance(filename: Path, Sample_IDs: pd.Index) -> pd.DataFrame:
"""Create a flag of known replicates that show low concordance.
Given a set of samples that are known to be from the same Subject. Flag
samples that show low concordance with one or more replicates.
Returns:
pd.Series:
- Sample_ID (pd.Index)
- is_discordant_replicate (bool): True if replicates show
a concordance below the supplied threshold. Otherwise False.
"""
df = sample_concordance.read(filename)
return (
df.melt(
id_vars=["is_discordant_replicate"],
value_vars=["Sample_ID1", "Sample_ID2"],
var_name="To_Drop",
value_name="Sample_ID",
)
.drop("To_Drop", axis=1)
.groupby("Sample_ID")
.max() # Flag a sample as True if it is True for any comparison.
.astype("boolean")
.reindex(Sample_IDs)
)
| 17,167
|
def load_image(name):
""" Get and cache an enaml Image for the given icon name.
"""
path = icon_path(name)
global _IMAGE_CACHE
if path not in _IMAGE_CACHE:
with open(path, 'rb') as f:
data = f.read()
_IMAGE_CACHE[path] = Image(data=data)
return _IMAGE_CACHE[path]
| 17,168
|
def add_shipment_comment(
tracking_id: str,
body: CreateComment = Body(...),
client: VBR_Api = Depends(vbr_admin_client),
):
"""Add a Comment to a Shipment.
Requires: **VBR_WRITE_PUBLIC**"""
tracking_id = sanitize_identifier_string(tracking_id)
shipment = client.get_shipment_by_tracking_id(tracking_id)
data_event = client.create_and_link(comment=body.comment, link_target=shipment)[0]
return Comment(comment=data_event.comment, timestamp=data_event.event_ts)
| 17,169
|
def train_faster_rcnn_alternating(base_model_file_name, debug_output=False):
"""
4-Step Alternating Training scheme from the Faster R-CNN paper:
# Create initial network, only rpn, without detection network
# --> train only the rpn (and conv3_1 and up for VGG16)
# buffer region proposals from rpn
# Create full network, initialize conv layers with imagenet, use buffered proposals
# --> train only detection network (and conv3_1 and up for VGG16)
# Keep conv weights from detection network and fix them
# --> train only rpn
# buffer region proposals from rpn
# Keep conv and rpn weights from step 3 and fix them
# --> train only detection network
"""
# Learning parameters
rpn_lr_factor = globalvars['rpn_lr_factor']
rpn_lr_per_sample_scaled = [x * rpn_lr_factor for x in cfg["CNTK"].RPN_LR_PER_SAMPLE]
frcn_lr_factor = globalvars['frcn_lr_factor']
frcn_lr_per_sample_scaled = [x * frcn_lr_factor for x in cfg["CNTK"].FRCN_LR_PER_SAMPLE]
l2_reg_weight = cfg["CNTK"].L2_REG_WEIGHT
mm_schedule = momentum_schedule(globalvars['momentum_per_mb'])
rpn_epochs = globalvars['rpn_epochs']
frcn_epochs = globalvars['frcn_epochs']
print("Using base model: {}".format(cfg["CNTK"].BASE_MODEL))
print("rpn_lr_per_sample: {}".format(rpn_lr_per_sample_scaled))
print("frcn_lr_per_sample: {}".format(frcn_lr_per_sample_scaled))
if debug_output:
print("Storing graphs and models to %s." % globalvars['output_path'])
# Input variables denoting features, labeled ground truth rois (as 5-tuples per roi) and image dimensions
image_input = input_variable((num_channels, image_height, image_width), dynamic_axes=[Axis.default_batch_axis()],
name=feature_node_name)
feat_norm = image_input - normalization_const
roi_input = input_variable((cfg["CNTK"].INPUT_ROIS_PER_IMAGE, 5), dynamic_axes=[Axis.default_batch_axis()])
scaled_gt_boxes = alias(roi_input, name='roi_input')
dims_input = input_variable((6), dynamic_axes=[Axis.default_batch_axis()])
dims_node = alias(dims_input, name='dims_input')
rpn_rois_input = input_variable((cfg["TRAIN"].RPN_POST_NMS_TOP_N, 4), dynamic_axes=[Axis.default_batch_axis()])
rpn_rois_buf = alias(rpn_rois_input, name='rpn_rois')
# base image classification model (e.g. VGG16 or AlexNet)
base_model = load_model(base_model_file_name)
print("stage 1a - rpn")
if True:
# Create initial network, only rpn, without detection network
# initial weights train?
# conv: base_model only conv3_1 and up
# rpn: init new yes
# frcn: - -
# conv layers
conv_layers = clone_conv_layers(base_model)
conv_out = conv_layers(feat_norm)
# RPN and losses
rpn_rois, rpn_losses = create_rpn(conv_out, scaled_gt_boxes, dims_node, proposal_layer_param_string=cfg["CNTK"].PROPOSAL_LAYER_PARAMS)
stage1_rpn_network = combine([rpn_rois, rpn_losses])
# train
if debug_output: plot(stage1_rpn_network, os.path.join(globalvars['output_path'], "graph_frcn_train_stage1a_rpn." + cfg["CNTK"].GRAPH_TYPE))
train_model(image_input, roi_input, dims_input, rpn_losses, rpn_losses,
rpn_lr_per_sample_scaled, mm_schedule, l2_reg_weight, epochs_to_train=rpn_epochs)
print("stage 1a - buffering rpn proposals")
buffered_proposals_s1 = compute_rpn_proposals(stage1_rpn_network, image_input, roi_input, dims_input)
print("stage 1b - frcn")
if True:
# Create full network, initialize conv layers with imagenet, fix rpn weights
# initial weights train?
# conv: base_model only conv3_1 and up
# rpn: stage1a rpn model no --> use buffered proposals
# frcn: base_model + new yes
# conv_layers
conv_layers = clone_conv_layers(base_model)
conv_out = conv_layers(feat_norm)
# use buffered proposals in target layer
rois, label_targets, bbox_targets, bbox_inside_weights = \
create_proposal_target_layer(rpn_rois_buf, scaled_gt_boxes, num_classes=globalvars['num_classes'])
# Fast RCNN and losses
fc_layers = clone_model(base_model, [pool_node_name], [last_hidden_node_name], CloneMethod.clone)
cls_score, bbox_pred = create_fast_rcnn_predictor(conv_out, rois, fc_layers)
detection_losses = create_detection_losses(cls_score, label_targets, rois, bbox_pred, bbox_targets, bbox_inside_weights)
pred_error = classification_error(cls_score, label_targets, axis=1, name="pred_error")
stage1_frcn_network = combine([rois, cls_score, bbox_pred, detection_losses, pred_error])
# train
if debug_output: plot(stage1_frcn_network, os.path.join(globalvars['output_path'], "graph_frcn_train_stage1b_frcn." + cfg["CNTK"].GRAPH_TYPE))
train_model(image_input, roi_input, dims_input, detection_losses, pred_error,
frcn_lr_per_sample_scaled, mm_schedule, l2_reg_weight, epochs_to_train=frcn_epochs,
rpn_rois_input=rpn_rois_input, buffered_rpn_proposals=buffered_proposals_s1)
buffered_proposals_s1 = None
print("stage 2a - rpn")
if True:
# Keep conv weights from detection network and fix them
# initial weights train?
# conv: stage1b frcn model no
# rpn: stage1a rpn model yes
# frcn: - -
# conv_layers
conv_layers = clone_model(stage1_frcn_network, [feature_node_name], [last_conv_node_name], CloneMethod.freeze)
conv_out = conv_layers(image_input)
# RPN and losses
rpn = clone_model(stage1_rpn_network, [last_conv_node_name, "roi_input", "dims_input"], ["rpn_rois", "rpn_losses"], CloneMethod.clone)
rpn_net = rpn(conv_out, dims_node, scaled_gt_boxes)
rpn_rois = rpn_net.outputs[0]
rpn_losses = rpn_net.outputs[1]
stage2_rpn_network = combine([rpn_rois, rpn_losses])
# train
if debug_output: plot(stage2_rpn_network, os.path.join(globalvars['output_path'], "graph_frcn_train_stage2a_rpn." + cfg["CNTK"].GRAPH_TYPE))
train_model(image_input, roi_input, dims_input, rpn_losses, rpn_losses,
rpn_lr_per_sample_scaled, mm_schedule, l2_reg_weight, epochs_to_train=rpn_epochs)
print("stage 2a - buffering rpn proposals")
buffered_proposals_s2 = compute_rpn_proposals(stage2_rpn_network, image_input, roi_input, dims_input)
print("stage 2b - frcn")
if True:
# Keep conv and rpn weights from step 3 and fix them
# initial weights train?
# conv: stage2a rpn model no
# rpn: stage2a rpn model no --> use buffered proposals
# frcn: stage1b frcn model yes -
# conv_layers
conv_layers = clone_model(stage2_rpn_network, [feature_node_name], [last_conv_node_name], CloneMethod.freeze)
conv_out = conv_layers(image_input)
# Fast RCNN and losses
frcn = clone_model(stage1_frcn_network, [last_conv_node_name, "rpn_rois", "roi_input"],
["cls_score", "bbox_regr", "rpn_target_rois", "detection_losses", "pred_error"], CloneMethod.clone)
stage2_frcn_network = frcn(conv_out, rpn_rois_buf, scaled_gt_boxes)
detection_losses = stage2_frcn_network.outputs[3]
pred_error = stage2_frcn_network.outputs[4]
# train
if debug_output: plot(stage2_frcn_network, os.path.join(globalvars['output_path'], "graph_frcn_train_stage2b_frcn." + cfg["CNTK"].GRAPH_TYPE))
train_model(image_input, roi_input, dims_input, detection_losses, pred_error,
frcn_lr_per_sample_scaled, mm_schedule, l2_reg_weight, epochs_to_train=frcn_epochs,
rpn_rois_input=rpn_rois_input, buffered_rpn_proposals=buffered_proposals_s2)
buffered_proposals_s2 = None
return create_eval_model(stage2_frcn_network, image_input, dims_input, rpn_model=stage2_rpn_network)
| 17,170
|
def _mp2_energy(output_str):
""" Reads the MP2 energy from the output file string.
Returns the energy in Hartrees.
:param output_str: string of the program's output file
:type output_str: str
:rtype: float
"""
ene = ar.energy.read(
output_str,
app.one_of_these([
app.escape('Total MP2 energy'),
app.escape('MP2 energy')
]))
return ene
| 17,171
|
def fastlcs(a,b,Dmax=None):
"""
return the length of the longest common substring or 0 if the maximum number of difference Dmax cannot be respected
Implementation: see the excellent paper "An O(ND) Difference Algorithm and Its Variations" by EUGENE W. MYERS, 1986
NOTE:
let D be the minimal number of insertion or deletion that transform A into B
let L be the length of a longest common substring
we always have D = M + N - 2 * L
"""
N, M = len(a), len(b)
if N+M == 0: return 0 #very special case...
if Dmax == None:
Dmax = N + M #worse case
else:
Dmax = min(Dmax, M+N) #a larger value does not make sense!
assert Dmax >= 0, "SOFWARE ERROR: Dmax must be a positive integer"
sesLength = None
W = [0] * (Dmax * 2 + 2) #for i in -Dmax..Dmax, V[i] == W[i+Dmax)
for D in range(0, Dmax+1):
for k in range(-D, +D+1, 2):
if k == -D or (k != D and W[k-1+Dmax] < W[k+1+Dmax]): #k == -D or (k != D and V[k-1] < V[k+1])
x = W[k+1+Dmax] #x = V[k+1]
else:
x = W[k-1+Dmax]+1 #x = V[k-1]+1
y = x - k
while x < N and y < M and a[x] == b[y]: #follow any snake
x += 1
y += 1
W[k+Dmax] = x # V[k] = x #farstest reaching point with D edits
if x >= N and y >= M:
sesLength = D
L = (M+N-D) / 2
assert D == M+N-L-L, ("INTERNAL SOFWARE ERROR", M,N,D)
return L
return 0
| 17,172
|
def FeaturesExtractor( # pylint: disable=invalid-name
eval_config: config_pb2.EvalConfig,
tensor_representations: Optional[Mapping[
Text, schema_pb2.TensorRepresentation]] = None) -> extractor.Extractor:
"""Creates an extractor for extracting features.
The extractor acts as follows depending on the existence of certain keys
within the incoming extracts:
1) Extracts contains tfma.ARROW_RECORD_BATCH_KEY
The features stored in the RecordBatch will be extracted and added to the
output extract under the key tfma.FEATURES_KEY and the raw serialized inputs
will be added under the tfma.INPUT_KEY. Any extracts that already exist will
be merged with the values from the RecordBatch with the RecordBatch values
taking precedence when duplicate keys are detected. The
tfma.ARROW_RECORD_BATCH_KEY key will be removed from the output extracts.
2) Extracts contains tfma.FEATURES_KEY (but not tfma.ARROW_RECORD_BATCH_KEY)
The operation will be a no-op and the incoming extracts will be passed as is
to the output.
3) Extracts contains neither tfma.FEATURES_KEY | tfma.ARROW_RECORD_BATCH_KEY
An exception will be raised.
Args:
eval_config: Eval config.
tensor_representations: Optional tensor representations to use when parsing
the data. If tensor_representations are not passed or a representation is
not found for a given feature name a default representation will be used
where possible, otherwise an exception will be raised.
Returns:
Extractor for extracting features.
"""
del eval_config
# pylint: disable=no-value-for-parameter
return extractor.Extractor(
stage_name=_FEATURES_EXTRACTOR_STAGE_NAME,
ptransform=_ExtractFeatures(tensor_representations or {}))
| 17,173
|
def clean_reorgs(horizon=settings.MEX_SYNC_HORIZON):
"""
Clean chain reorganizations up to `horizon` number of blocks in the past.
First we compare the latest `horizon` block hashes from the database with
those form the authoritative node. If we find a difference we will delete
all the blocks starting at the oldest differing block. Deleteing those
blocks will automatically cascade throuth the datamodel and delete all
dependant transactions, inputs and outputs.
"""
log.info("clean reorgs with horizon {}".format(horizon))
api = get_client()
node_height = api.getblockcount()
db_height = Block.get_db_height()
if db_height > node_height:
log.warning("database is ahead of node")
return
db_blocks = list(
Block.objects.order_by("-height").values_list("height", "hash")[:horizon]
)
if not db_blocks:
log.info("database has no block data")
return
db_height = db_blocks[0][0]
db_horizon = db_blocks[-1][0]
horizon_range = "%s-%s" % (db_horizon, db_height)
node_data = api.listblocks(horizon_range, False)
node_blocks = [(b["height"], b["hash"]) for b in reversed(node_data)]
difference = set(db_blocks).difference(set(node_blocks))
if not difference:
log.info("no reorgs found")
return
fork_height = min(difference)[0]
log.info("database reorg from height %s" % fork_height)
Block.objects.filter(height__gte=fork_height).delete()
| 17,174
|
def assert__(engine, obj, condition, message=u'Assertion failed'):
""":yaql:assert
Evaluates condition against object. If it evaluates to true returns the
object, otherwise throws an exception with provided message.
:signature: obj.assert(condition, message => "Assertion failed")
:arg obj: object to evaluate condition on
:argType obj: any
:arg condition: lambda function to be evaluated on obj. If result of
function evaluates to false then trows exception message
:argType condition: lambda
:arg message: message to trow if condition returns false
:argType message: string
:returnType: obj type or message
.. code::
yaql> 12.assert($ < 2)
Execution exception: Assertion failed
yaql> 12.assert($ < 20)
12
yaql> [].assert($, "Failed assertion")
Execution exception: Failed assertion
"""
if utils.is_iterator(obj):
obj = utils.memorize(obj, engine)
if not condition(obj):
raise AssertionError(message)
return obj
| 17,175
|
def create_review(request, item_id,
template_name="reviewclone/create_review.html"):
"""
Current user can create a new review.
Find the item with `item_id` then make sure the current user
does not already have a review. If a review is found
`review_exist` will be True. If the current user's review count is less
than `REVIEWCLONE_REVIEW_MIN`,`random_item` will be 1 item the user
has not reviewed yet.
"""
review_exist = False
random_item = None
item = get_object_or_404(Item, pk=item_id)
if Review.objects.filter(item=item, user=request.user).count() > 0:
review_exist = True
if request.POST:
form = ReviewForm(request.POST)
if form.is_valid() and review_exist == False:
form.instance.user = request.user
form.instance.item = item
form.save()
if form.cleaned_data.get('post_review_message') == True:
request.facebook.graph.put_wall_post(
# TODO: Change to template
'I just gave \"%s\" %s Stars on reviewclone.com.' % (item.name, form.instance.amount),
# TODO: Add attachment
)
messages.add_message(request, messages.INFO,
'Your review was posted to your Facebook wall.')
messages.add_message(request, messages.INFO,
'You reviewed %s.' % item)
return HttpResponseRedirect(reverse('after_review',
args=[form.instance.pk]))
else:
user_reviews = Review.objects.filter(user=request.user)
if user_reviews.count() < settings.REVIEWCLONE_REVIEW_MIN:
random_item = Item.objects.all().exclude(
pk__in=user_reviews.values_list('item__pk')
).order_by('?')[0]
form = ReviewForm()
return render_to_response(
template_name,
{
'item': item,
'review_exist': review_exist,
'form': form,
'random': random_item,
},
context_instance=RequestContext(request)
)
| 17,176
|
def removeDir(path):
""" Remove dir on a given path, even if it is not empty. """
try:
shutil.rmtree(path)
except OSError as exc:
# Silently catch the exception if the directory does not exist
if exc.errno != 2:
raise
| 17,177
|
def get_backend_bucket_output(backend_bucket: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBackendBucketResult]:
"""
Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request.
"""
...
| 17,178
|
def build_level_codes(incoming_column_name: str, levels: Iterable) -> List[str]:
"""
Pick level names for a set of levels.
:param incoming_column_name:
:param levels:
:return:
"""
levels = [str(lev) for lev in levels]
levels = [incoming_column_name + "_lev_" + clean_string(lev) for lev in levels]
if len(set(levels)) != len(levels):
levels = [levels[i] + "_" + str(i) for i in range(len(levels))]
return levels
| 17,179
|
def get_all(isamAppliance, check_mode=False, force=False):
"""
Get all rsyslog objects
"""
return isamAppliance.invoke_get("Get all rsyslog objects",
"/core/rsp_rsyslog_objs")
| 17,180
|
def genomic_dup1_37_loc():
"""Create test fixture GRCh37 duplication subject"""
return {
"_id": "ga4gh:VSL.CXcLL6RUPkro3dLXN0miGEzlzPYiqw2q",
"sequence_id": "ga4gh:SQ.VNBualIltAyi2AI_uXcKU7M9XUOuA7MS",
"interval": {
"type": "SequenceInterval",
"start": {"value": 49568693, "type": "Number"},
"end": {"value": 49568695, "type": "Number"},
},
"type": "SequenceLocation",
}
| 17,181
|
def parse(f, _bytes):
"""
Parse function will take a parser combinator and parse some set of bytes
"""
if type(_bytes) == Parser:
return f(_bytes)
else:
s = Parser(_bytes, 0)
return f(s)
| 17,182
|
def save(filestring, path):
"""Saves a filestring to file.
:param str filestring: the string to save.
:param str path: the place to save it."""
try:
with builtins.open(path, "w") as f: f.write(filestring)
except:
with builtins.open(path, "wb") as f: f.write(filestring)
| 17,183
|
def spiral_tm(wg_width=0.5, length=2):
""" sample of component cutback """
c = spiral_inner_io_euler(wg_width=wg_width, length=length, dx=10, dy=10, N=5)
cc = add_gratings_and_loop_back(
component=c,
grating_coupler=pp.c.grating_coupler_elliptical_tm,
bend_factory=pp.c.bend_circular,
)
return cc
| 17,184
|
def test_show_router_interface(parsed_show_router_int):
"""
Test extracting router interfaces from show command
"""
data = "tests/fixtures/show_output/show_router_interface.txt"
sros_parser = SrosParser(data)
result = sros_parser.show_router_interface()
assert result == parsed_show_router_int
| 17,185
|
def test_cleanup_policy(faker):
"""Ensure the command creates a new policy and that is shows on the list"""
x_name = faker.pystr()
# CLI accepts days, Nexus stores seconds
downloaded = faker.random_int(1, 365)
x_downloaded = str(downloaded * 86400)
updated = faker.random_int(1, 365)
x_updated = str(updated * 86400)
create_command = (f'nexus3 cleanup_policy create {x_name} '
f'--downloaded={downloaded} --updated={updated}')
list_command = 'nexus3 cleanup_policy list'
create_retcode = check_call(create_command.split())
output = check_output(list_command.split(), encoding='utf-8')
# find our entry in output
entry = ''
for line in output.splitlines():
print('checking', line)
if line.startswith(x_name):
entry = line
break
assert create_retcode == 0
assert x_name in entry
assert x_downloaded in entry
assert x_updated in entry
assert 'ALL_FORMATS' in entry
| 17,186
|
def get_url_until_success(url):
"""Continuously tries to open a url until it succeeds or times out."""
time_spent = 0
while (time_spent < RECEIVE_TIMEOUT):
try:
helps = urllib2.urlopen(url)
break
except:
time.sleep(RETRY_INTERVAL)
time_spent += RETRY_INTERVAL
if (time_spent >= RECEIVE_TIMEOUT):
print >> sys.stderr, 'Timeout attempting to hit url: %s' % (url)
sys.exit(1)
return helps.read()
| 17,187
|
def _chunk(fst: pynini.Fst) -> List[Tuple[str, str]]:
"""Chunks a string transducer into tuples.
This function is given a string transducer of the form:
il1 il2 il3 il4 il5 il6
ol1 eps eps ol2 eps ol3
And returns the list:
[(il1 il2 il3, ol1), (il4 il5, ol2), (il6, ol3)]
It thus recovers the "many-to-one" alignment.
Args:
fst: a string transducer containing the alignment.
Returns:
A list of string, char tuples.
"""
# Input epsilon-normalization and removal forces a sensible alignment.
fst = pynini.epsnormalize(fst).rmepsilon()
assert (
fst.properties(pynini.STRING, True) == pynini.STRING
), "FST is not a string automaton"
alignment: List[Tuple[str, str]] = []
state = 0
arc = fst.arcs(state).value()
assert arc.ilabel, f"Input label leaving state {state} contains epsilon"
ilabels = bytearray([arc.ilabel])
assert arc.olabel, f"Output label leaving state {state} contains epsilon"
olabel = arc.olabel
for state in range(1, fst.num_states() - 1):
arc = fst.arcs(state).value()
assert (
arc.ilabel
), f"Input label leaving state {state} contains epsilon"
# A non-epsilon olabel signals a new chunk.
if arc.olabel:
alignment.append((ilabels.decode("utf8"), chr(olabel)))
ilabels.clear()
olabel = arc.olabel
ilabels.append(arc.ilabel)
assert (
ilabels
), f"Input label leaving penultimate state {state} contains epsilon"
alignment.append((ilabels.decode("utf8"), chr(olabel)))
return alignment
| 17,188
|
def auth_token_required(func):
"""Your auth here"""
return func
| 17,189
|
def run_data_assimilation(wrf_model, fm10, wrf_model_prev = None):
"""
Run the fuel moisture and DA for all time steps in the model wrf_model.
If a previous run is available, the fuel moisture values (and covariance if available)
are transferred.
:param wrf_model: the current WRF data file to process (wrf input or wrf output)
:param fm10: a list of the observations of 10-hr fuel moisture available
:param wrf_model_prev: optional, the previous WRF data file from which fm state may be copied
:return: the fuel moisture model with the assimilated fields
"""
tss = wrf_model.get_gmt_times()
lat, lon = wrf_model.get_lats(), wrf_model.get_lons()
dom_shape = lat.shape
T2 = wrf_model['T2']
Q2 = wrf_model['Q2']
PSFC = wrf_model['PSFC']
hgt = wrf_model['HGT']
rain = wrf_model['RAIN']
rain = np.log(rain + 1.0)
constant = np.ones_like(T2)
Ed,Ew = wrf_model['Ed'], wrf_model['Ew']
E = 0.5 * (Ed[0,:,:] + Ew[0,:,:])
P0 = np.diag([0.01,0.01,0.01,0.01,0.001,0.001])
Tk = np.array([1.0, 10.0, 100.0, 1000.0]) * 3600
Q = np.diag([1e-4,5e-5,1e-5,1e-6,1e-6,1e-6])
# initialize the grid moisture model with the fuel moisture equilibrium
model = FuelMoistureModel(E[:,:,np.newaxis][:,:,np.zeros((4,),dtype=np.int)], Tk, P0)
# if a previous fuel moisture model run is available, copy it's state
if wrf_model_prev is not None:
logging.info('FMDA replacing fuel moisture equilibrium with previous calculation from %s.' % wrf_model_prev.path)
prev_tss = wrf_model_prev.get_gmt_times()
if tss[0] in prev_tss:
prev_ndx = prev_tss.index(tss[0])
model.get_state()[:,:,:3] = wrf_model_prev['FMC_GC'][prev_ndx,:3,:,:].transpose((1,2,0))
model.get_state()[:,:,3:5] = wrf_model_prev['FMEP'][prev_ndx,:,:,:].transpose((1,2,0))
# precompute static covariates (we assume domains don't move around)
cov_lon = lon - np.mean(lon)
cov_lat = lat - np.mean(lat)
cov_hgt = hgt / 1000.0
cov_const = np.ones(dom_shape)
# advance model and run DA for each timestep
for i, ts in enumerate(tss):
cov_t2 = T2[i,:,:]
cov_q2 = Q2[i,:,:]
cov_psfc = PSFC[i,:,:]
cov_rain = np.log(rain[i,:,:] + 1.0)
covariates = [cov_t2, cov_psfc,cov_lon,cov_lat,cov_hgt,cov_t2,cov_q2,cov_const]
covariates_names = ['t2', 'psfc','lon','lat','hgt','t2','q2','const']
if np.any(rain > 0.0):
covariates.append(rain)
covariates_names.append('rain')
if i > 0:
model.advance_model(Ed[i,:,:], Ew[i,:,:], rain[i,:,:], (ts - tss[i-1]).seconds, Q)
logging.info('FMDA calling execute_da_step with %d covariates' % len(covariates))
execute_da_step(model, ts, covariates, covariates_names, fm10)
# overwrite the WRF model variables for this time step
d = netCDF4.Dataset(wrf_model.path, 'r+')
d.variables['FMC_GC'][i,:3,:,:] = model.get_state()[:,:,:3].transpose(2,0,1)
d.variables['FMEP'][i,:,:,:] = model.get_state()[:,:,4:6].transpose(2,0,1)
d.close()
| 17,190
|
def base_convert_money(amount, currency_from, currency_to):
"""
Convert 'amount' from 'currency_from' to 'currency_to'
"""
source = get_rate_source()
# Get rate for currency_from.
if source.base_currency != currency_from:
rate_from = get_rate(currency_from)
else:
# If currency from is the same as base currency its rate is 1.
rate_from = Decimal(1)
# Get rate for currency_to.
rate_to = get_rate(currency_to)
if isinstance(amount, float):
amount = Decimal(amount).quantize(Decimal('.000001'))
# After finishing the operation, quantize down final amount to two points.
return ((amount / rate_from) * rate_to).quantize(Decimal("1.00"))
| 17,191
|
def init_dask_workers(worker, config, obj_dict=None):
"""
Initalize for all dask workers
:param worker: Dask worker
:type worker: object
:param config: Configuration which contains source and sink details
:type config: dict
:param obj_dict: Objects that are required to be present on every dask worker
:type obj_dict: dict
:return: worker: Dask worker
:rtype: object
"""
if obj_dict is not None:
for key in obj_dict.keys():
worker.data[key] = obj_dict[key]
sink = config["sink"]
if sink == SINK_KAFKA:
import confluent_kafka as ck
producer_conf = config["kafka_conf"]["producer_conf"]
print("Producer conf: " + str(producer_conf))
producer = ck.Producer(producer_conf)
worker.data["sink"] = producer
elif sink == SINK_ES:
from elasticsearch import Elasticsearch
es_conf = config["elasticsearch_conf"]
if "username" in es_conf and "password" in es_conf:
es_client = Elasticsearch(
[
es_conf["url"].format(
es_conf["username"], es_conf["password"], es_conf["port"]
)
],
use_ssl=True,
verify_certs=True,
ca_certs=es_conf["ca_file"],
)
else:
es_client = Elasticsearch(
[{"host": config["elasticsearch_conf"]["url"]}],
port=config["elasticsearch_conf"]["port"],
)
worker.data["sink"] = es_client
elif sink == SINK_FS:
print(
"Streaming process will write the output to location '{}'".format(
config["output_dir"]
)
)
else:
print(
"No valid sink provided in the configuration file. Please provide kafka/elasticsearch/filsesystem"
)
sys.exit(-1)
print("Successfully initialized dask worker " + str(worker))
return worker
| 17,192
|
async def contestant() -> dict:
"""Create a mock contestant object."""
return {
"id": "290e70d5-0933-4af0-bb53-1d705ba7eb95",
"first_name": "Cont E.",
"last_name": "Stant",
"birth_date": date(1970, 1, 1).isoformat(),
"gender": "M",
"ageclass": "G 12 år",
"region": "Oslo Skikrets",
"club": "Lyn Ski",
"team": "Team Kollen",
"email": "post@example.com",
"event_id": "ref_to_event",
"bib": 1,
}
| 17,193
|
def GetApitoolsTransport(timeout='unset',
enable_resource_quota=True,
response_encoding=None,
ca_certs=None,
allow_account_impersonation=True,
use_google_auth=None,
response_handler=None,
redact_request_body_reason=None):
"""Get an transport client for use with apitools.
Args:
timeout: double, The timeout in seconds to pass to httplib2. This is the
socket level timeout. If timeout is None, timeout is infinite. If
default argument 'unset' is given, a sensible default is selected.
enable_resource_quota: bool, By default, we are going to tell APIs to use
the quota of the project being operated on. For some APIs we want to use
gcloud's quota, so you can explicitly disable that behavior by passing
False here.
response_encoding: str, the encoding to use to decode the response.
ca_certs: str, absolute filename of a ca_certs file that overrides the
default
allow_account_impersonation: bool, True to allow use of impersonated service
account credentials for calls made with this client. If False, the
active user credentials will always be used.
use_google_auth: bool, True if the calling command indicates to use
google-auth library for authentication. If False, authentication will
fallback to using the oauth2client library.
response_handler: requests.ResponseHandler, handler that gets executed
before any other response handling.
redact_request_body_reason: str, the reason why the request body must be
redacted if --log-http is used. If None, the body is not redacted.
Returns:
1. A httplib2.Http-like object backed by httplib2 or requests.
"""
if base.UseRequests():
if response_handler:
if not isinstance(response_handler, core_requests.ResponseHandler):
raise ValueError('response_handler should be of type ResponseHandler.')
if (properties.VALUES.core.log_http.GetBool() and
properties.VALUES.core.log_http_streaming_body.GetBool()):
# We want to print the actual body instead of printing the placeholder.
# To achieve this, we need to set streaming_response_body as False.
# Not that the body will be empty if the response_handler has already
# consumed the stream.
streaming_response_body = False
else:
streaming_response_body = response_handler.use_stream
else:
streaming_response_body = False
session = requests.GetSession(
timeout=timeout,
enable_resource_quota=enable_resource_quota,
ca_certs=ca_certs,
allow_account_impersonation=allow_account_impersonation,
streaming_response_body=streaming_response_body,
redact_request_body_reason=redact_request_body_reason)
return core_requests.GetApitoolsRequests(session, response_handler,
response_encoding)
return http.Http(timeout=timeout,
enable_resource_quota=enable_resource_quota,
response_encoding=response_encoding,
ca_certs=ca_certs,
allow_account_impersonation=allow_account_impersonation,
use_google_auth=use_google_auth)
| 17,194
|
def user_requested_anomaly7():
""" Checks if the user requested an anomaly, and returns True/False accordingly. """
digit = 0
res = False
if is_nonzero_file7(summon_filename):
lines = []
with open(get_full_path(summon_filename)) as f:
lines = f.readlines()
if len(lines) > 0:
try:
digit = int(lines[0])
if digit > 0:
res = True
except Exception as e:
res = False
append_logs("ERROR:" + str(e), name4logs, "always")
else:
res = False
else:
res = False
# Disable summoning of anomalies after the requested number of anomalies were added
if res:
with open(get_full_path(summon_filename), "w") as f:
if digit > 0:
f.write(str(digit - 1))
else:
f.write("0")
return res
| 17,195
|
def test_data_process_content():
"""Test the _data_process_content function."""
# test case 1
dataio = fmu.dataio.ExportData(
name="Valysar",
config=CFG2,
content="depth",
timedata=[["20210101", "first"], [20210902, "second"]],
tagname="WhatEver",
)
obj = xtgeo.RegularSurface(
name="SomeName", ncol=3, nrow=4, xinc=22, yinc=22, values=0
)
exportitem = ei._ExportItem(dataio, obj, verbosity="INFO")
exportitem._data_process_content()
assert dataio._meta_data["content"] == "depth"
# test case 2
dataio = fmu.dataio.ExportData(
name="Valysar",
config=CFG2,
content={"seismic": {"attribute": "attribute_timeshifted_somehow"}},
timedata=[["20210101", "first"], [20210902, "second"]],
tagname="WhatEver",
)
obj = xtgeo.RegularSurface(
name="SomeName", ncol=3, nrow=4, xinc=22, yinc=22, values=0
)
exportitem = ei._ExportItem(dataio, obj, verbosity="INFO")
exportitem._data_process_content()
assert dataio._meta_data["content"] == "seismic"
assert dataio._meta_data["seismic"]["attribute"] == "attribute_timeshifted_somehow"
| 17,196
|
def test_path_count_priority_cache(tmpdir, allocate_GB):
"""
Test PathCountPriorityCache by runnin the same DWWC computation three times.
"""
hetmat = get_graph('bupropion-subgraph', hetmat=True, directory=tmpdir)
cache = hetmech.hetmat.caching.PathCountPriorityCache(hetmat, allocate_GB)
hetmat.path_counts_cache = cache
print(cache.get_stats)
# First run
assert sum(cache.hits.values()) == 0
row_ids, col_ids, matrix = hetmech.degree_weight.dwwc(
graph=hetmat, metapath='CbGpPWpGaD', damping=0.5,
dwwc_method=hetmech.degree_weight.dwwc_recursive,
)
assert sum(cache.hits.values()) > 0
if allocate_GB == 0:
assert cache.hits['memory'] == 0
assert cache.hits['disk'] == 0
assert cache.hits['absent'] == 4
elif allocate_GB > 0:
assert cache.hits['memory'] == 0
assert cache.hits['disk'] == 0
assert cache.hits['absent'] == 4
# Second run
row_ids, col_ids, matrix = hetmech.degree_weight.dwwc(
graph=hetmat, metapath='CbGpPWpGaD', damping=0.5,
dwwc_method=hetmech.degree_weight.dwwc_recursive,
)
if allocate_GB == 0:
assert cache.hits['memory'] == 0
assert cache.hits['disk'] == 0
assert cache.hits['absent'] == 8
elif allocate_GB > 0:
assert cache.hits['memory'] == 1
assert cache.hits['disk'] == 0
assert cache.hits['absent'] == 4
# Save DWWC matrix
path = hetmat.get_path_counts_path('CbGpPWpGaD', 'dwwc', 0.5, 'npy')
path.parent.mkdir(parents=True)
hetmech.hetmat.save_matrix(matrix, path)
# Third run
row_ids, col_ids, matrix = hetmech.degree_weight.dwwc(
graph=hetmat, metapath='CbGpPWpGaD', damping=0.5,
dwwc_method=hetmech.degree_weight.dwwc_recursive,
)
if allocate_GB == 0:
assert cache.hits['memory'] == 0
assert cache.hits['disk'] == 1
assert cache.hits['absent'] == 8
elif allocate_GB > 0:
assert cache.hits['memory'] == 2
assert cache.hits['disk'] == 0
assert cache.hits['absent'] == 4
print(cache.get_stats)
| 17,197
|
def mat_to_r(_line, _mat_object : MatlabObject, _r_object : RObject = RObject()):
"""Move variables from Matlab to R
Parameters
----------
_line : str, Iterable[str]
If str, one of the following:
1. '#! m[at[lab]] -> <vars>'
2. '<vars>'
where <vars> is a comma separated list of Matlab variable names
If Iterable[str]: [<var1>, <var2>, ...]
where <varX> is the name of a Matlab variable
All variables must be str, int, float.
_mat_object : Matlabobject
The Matlab environment where the variables are stored
_r_object : optional[RObject]
The R environment to load the variables into
Default: new RObject()
Returns
-------
MatlabObject
A Matlab environment with the given variables loaded
Raises
------
RuntimeError:
If _mat_object or _r_object is not alive
ValueError
If _line is not the right format
NameError
If a requested variable is not in the given Matlab environment
"""
## input validation
if not _mat_object.isalive:
# can't do anything
raise RuntimeError('Matlab connection was killed before things could be brought back to Python.')
if type(_line) is str and ('#!' in _line or '%!' in _line):
# _line = '#! <lang> -> <vars>'
if not '->' in _line:
raise ValueError('Misformatted line: "' + _line + '"')
_to_load = _line.split('->')[1].replace(' ','').split(',')
elif type(_line) is str:
# _line = '<vars>'
_to_load = _line.replace(' ','').split(',')
elif hasattr(_line, '__iter__') and all([type(i) is str for i in _line]):
# _line = [<var i>, ...]
_to_load = list(_line)
else:
raise ValueError('Unrecognized _line')
if not _r_object.isalive:
# can't do anything
raise RuntimeError('R connection was killed before things could be send to it.')
if _to_load[0] == '':
# null case
return _r_object
# check the variables
_who = _mat_object.who
for i in _to_load:
if i not in _who:
raise NameError(str(i) + ' not in Matlab environment')
# bundle them
_random_name = ''.join(choices('abcdefghijklmnopqrstuvwxyz', k=10))
_mat_object.sendline(_random_name + ' = tempname')
_temp_file = _mat_object.before.split('\r\n\r\n')[2].strip()[1:-1]
# get them
_mat_object.sendlines([
'save ' + _temp_file + '.mat ' + ' '.join(_to_load),
'clear ' + _random_name
])
# load them
_r_object.sendlines(
[
'library("R.matlab")',
_random_name + ' <- readMat("' + _temp_file + '.mat")'
] + [
_current + ' <- ' + _random_name + '$' + _current
for _current in _to_load
] + [
'rm(' + _random_name + ')'
]
)
return _r_object
| 17,198
|
def write_powerball_numbers():
"""Writes a CSV file of "Powerball" numbers and their draw distribution.
Args:
None
Returns:
A CSV file where the first column contains each "Powerball" number
and the second column contains the corresponding draw distribution
for that number. Please update your directory path accordingly if you're
not me.
Raises:
None
"""
# Retrieves the matrix of Powerball drawing dates and winning combinations.
original_output = retrieve_raw_lottery_data()
# Portion of the matrix that corresponds to Powerball's 35+ number drawing.
pre_change_powerball = []
# Portion of the matrix that corresponds to Powerball's 26 number drawing.
post_change_powerball = []
# Counter ensuring only one copy of the header row appends to
# 'pre_change_powerball'.
counter = 0
# Counter used to identify how many qualifing drawings have occurred.
powerball_counter = 0
# Will be used to identify where the split between the 26 num drawing and
# 35+ num drawing occurs.
correct_index = 0
# Contains Powerball numbers and the frequency that they're drawn.
output_list = []
for index, list_element in enumerate(original_output):
# Strips the header row, which is not necessary.
if index == 0:
original_output.remove(list_element)
# Identifies which entries belong to Powerball's 26 num drawing.
if list_element[0] == "2015-10-07":
correct_index = int(index)
# Overwrites the winning number combination for each drawing date (str)
# with the "Powerball" number for that drawing date (an int).
for index, list_element in enumerate(original_output):
temporary = list_element[1]
change1 = temporary.replace(" ",",")
change2 = (int(change1[15]+change1[16]))
list_element[1] = change2
# Isolates matrix entries that belong to Powerball's 26 num drawing.
if index <= correct_index:
post_change_powerball.append(list_element[1])
# Isolates matrix entries that belong to Powerball's 35+ num drawings.
if index > correct_index:
pre_change_powerball.append(list_element[1])
# Populates a new list with all possible "Powerball" numbers and their
# draw frequency.
powerball_dictionary = dict(Counter(post_change_powerball))
for key, value in powerball_dictionary.items():
powerball_counter += value
for key, value in powerball_dictionary.items():
output_list.append([key,(value/powerball_counter)])
# Writes 'output_list' draw distribution matrix to a CSV file. If the file
# exists, it will overwrite it. This is the intended behavior. Analyzing
# outdated results is not as useful.
with open(
"C:\\Users\\Rane\\boring\\lottery\\post_change_powerball_number_analysis.csv",
"w", newline = '') as file:
writer = csv.writer(file)
writer.writerows(output_list)
| 17,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.