content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def make_feature_scale_factors():
"""Saves a dictionary of features->scale_factors such that each feature array can be mapped into [0,1]."""
X, y = make_X_and_y()
sqm = make_sqm_X()
scale_factors = {
"indoor_temp": np.max(X[:,:,0]),
"outdoor_temp": np.max(X[:,:,1]),
"gas_kwh": np.max(X[:,:,2]),
"elec_kwh": np.max(X[:,:,3]),
"floor_area": np.max(sqm),
"htc": np.max(y),
}
with open(os.path.join(_TRAINING_DATA_PATH, "scalefactors.json"), "w+") as f:
json.dump(scale_factors, f)
| 5,343,600
|
async def help_json(ctx: commands.Context, indent: int = 4) -> None:
"""
Create a dictionary with all the commands, convert it to a json file ans send it to the author
:param indent: (Optional) The number of spaces to indent the file.
"""
# Create the dictionary with the commands
commands_dict = {}
for c in client.walk_commands():
commands_dict[c.name] = {
"aliases": c.aliases,
"brief": c.brief,
"parameters": c.signature
}
# Sort the commands so they are easier to read
sorted_commands = client.helpers.sort_dict(commands_dict)
# Create a file-like object to send over discord
data = io.StringIO(json.dumps(sorted_commands, indent=indent))
# Send the JSON file
await ctx.send(f"{ctx.author.mention}. Commands:\n", file=discord.File(data, filename="commands.json"))
| 5,343,601
|
def load_AUXTEL_image(image): # pragma: no cover
"""Specific routine to load AUXTEL fits files and load their data and properties for Spectractor.
Parameters
----------
image: Image
The Image instance to fill with file data and header.
"""
image.my_logger.info(f'\n\tLoading AUXTEL image {image.file_name}...')
hdu_list = fits.open(image.file_name)
image.header = hdu_list[0].header
image.data = hdu_list[1].data.astype(np.float64)
hdu_list.close() # need to free allocation for file descripto
image.date_obs = image.header['DATE']
image.expo = float(image.header['EXPTIME'])
if "empty" not in image.header['FILTER'].lower():
image.filter_label = image.header['FILTER']
# transformations so that stars are like in Stellarium up to a rotation
# with spectrogram nearly horizontal and on the right of central star
image.data = image.data.T[::-1, ::-1]
if image.header["AMSTART"] is not None:
image.airmass = 0.5 * (float(image.header["AMSTART"]) + float(image.header["AMEND"]))
else:
image.airmass = float(image.header['AIRMASS'])
image.my_logger.info('\n\tImage loaded')
# compute CCD gain map
image.gain = float(parameters.CCD_GAIN) * np.ones_like(image.data)
parameters.CCD_IMSIZE = image.data.shape[1]
image.disperser_label = image.header['GRATING']
image.ra = Angle(image.header['RA'], unit="deg")
image.dec = Angle(image.header['DEC'], unit="deg")
if 'HASTART' in image.header and image.header['HASTART'] is not None:
image.hour_angle = Angle(image.header['HASTART'], unit="hourangle")
else:
image.hour_angle = Angle(image.header['HA'], unit="deg")
if 'AIRTEMP' in image.header:
image.temperature = image.header['AIRTEMP']
else:
image.temperature = 10
if 'PRESSURE' in image.header:
image.pressure = image.header['PRESSURE']
else:
image.pressure = 743
if 'HUMIDITY' in image.header:
image.humidity = image.header['HUMIDITY']
else:
image.humidity = 40
if 'adu' in image.header['BUNIT']:
image.units = 'ADU'
parameters.OBS_CAMERA_ROTATION = 90 - float(image.header["ROTPA"])
if parameters.OBS_CAMERA_ROTATION > 360:
parameters.OBS_CAMERA_ROTATION -= 360
if parameters.OBS_CAMERA_ROTATION < -360:
parameters.OBS_CAMERA_ROTATION += 360
if "CD2_1" in hdu_list[1].header:
rotation_wcs = 180 / np.pi * np.arctan2(hdu_list[1].header["CD2_1"], hdu_list[1].header["CD1_1"]) + 90
if not np.isclose(rotation_wcs % 360, parameters.OBS_CAMERA_ROTATION % 360, atol=2):
image.my_logger.warning(f"\n\tWCS rotation angle is {rotation_wcs} degree while "
f"parameters.OBS_CAMERA_ROTATION={parameters.OBS_CAMERA_ROTATION} degree. "
f"\nBoth differs by more than 2 degree... bug ?")
parameters.OBS_ALTITUDE = float(image.header['OBS-ELEV']) / 1000
parameters.OBS_LATITUDE = image.header['OBS-LAT']
image.read_out_noise = 8.5 * np.ones_like(image.data)
image.target_label = image.header["OBJECT"].replace(" ", "")
if "OBJECTX" in image.header:
image.target_guess = [parameters.CCD_IMSIZE - float(image.header["OBJECTY"]),
parameters.CCD_IMSIZE - float(image.header["OBJECTX"])]
image.disperser_label = image.header["GRATING"]
parameters.DISTANCE2CCD = 115 + float(image.header["LINSPOS"]) # mm
image.compute_parallactic_angle()
| 5,343,602
|
def manhattan_loadings(
iteration,
gtf,
loadings,
title=None,
size=4,
hover_fields=None,
collect_all=False,
n_divisions=500,
):
"""modify hail manhattan plot"""
palette = [
'#1f77b4',
'#ff7f0e',
'#2ca02c',
'#d62728',
'#9467bd',
'#8c564b',
'#e377c2',
'#7f7f7f',
'#bcbd22',
'#17becf',
]
# add gene names, p-values, and locus info
loadings = loadings.annotate(gene_names=gtf[loadings.locus].gene_name)
pvals = hl.abs(loadings.loadings[iteration])
locus = loadings.locus
if hover_fields is None:
hover_fields = {}
hover_fields['locus'] = hl.str(locus)
hover_fields['gene'] = hl.str(loadings.gene_names)
source_pd = (
hl.plot.plots._collect_scatter_plot_data( # pylint: disable=protected-access
('_global_locus', locus.global_position()),
('_pval', pvals),
fields=hover_fields,
n_divisions=None if collect_all else n_divisions,
)
)
source_pd['p_value'] = source_pd['_pval']
source_pd['_contig'] = [locus.split(':')[0] for locus in source_pd['locus']]
observed_contigs = set(source_pd['_contig'])
ref = locus.dtype.reference_genome
observed_contigs = [
contig for contig in ref.contigs.copy() if contig in observed_contigs
]
contig_ticks = [
ref._contig_global_position(contig) # pylint: disable=protected-access
+ ref.contig_length(contig) // 2
for contig in observed_contigs
]
color_mapper = CategoricalColorMapper(
factors=ref.contigs, palette=palette[:2] * int((len(ref.contigs) + 1) / 2)
)
p = figure(
title=title, x_axis_label='Chromosome', y_axis_label='Loadings', width=1000
)
(
p,
_,
legend,
_,
_,
_,
) = hl.plot.plots._get_scatter_plot_elements( # pylint: disable=protected-access
p,
source_pd,
x_col='_global_locus',
y_col='_pval',
label_cols=['_contig'],
colors={'_contig': color_mapper},
size=size,
)
legend.visible = False
p.xaxis.ticker = contig_ticks
p.xaxis.major_label_overrides = dict(zip(contig_ticks, observed_contigs))
p.select_one(HoverTool).tooltips = [
t for t in p.select_one(HoverTool).tooltips if not t[0].startswith('_')
]
return p
| 5,343,603
|
def check_para_vaild():
""" 检查参数options中的键(key)是否合法 """
global g_isdst
para_keylist = g_opts_dict.keys()
# 判断传入的参数的key是否是在给定的列表范围内
# (子列表判断)
if not is_sublist(para_keylist,g_all_optskey):
logger.error("Unknown options key found,please check...")
sys.exit(1)
# 判断是否包含必要的option
if not is_sublist(['ZONET','DST'],para_keylist):
logger.error("options less necessary key('ZONET','DST')...")
sys.exit(1)
# 检查时区 ZONET的值的格式
check_zone_format()
# 不需要夏令时,直接进入设置
if g_opts_dict['DST'] == 'NO':
g_isdst = False
return
# 需要夏令时
g_isdst = True
# 需要根据开始时间类型和结束时间类型,确定key的最小集
#if not is_sublist(['SMODE','EMODE'],para_keylist):
if not exist_key('SMODE') or not exist_key('EMODE'):
logger.error("options less necessary key('SMODE', 'EMODE')...")
sys.exit(1)
min_optskey = get_min_optskey(g_opts_dict['SMODE'], g_opts_dict['EMODE'])
if not min_optskey:
logger.error("get NULL min_optskey...")
sys.exit(18)
# 判断参数options的key是否包含最小集
if not is_sublist(min_optskey, para_keylist):
less_list = []
for i in min_optskey:
if i not in para_keylist:
less_list.append(i)
logger.error("options less necessary keys:%s" % less_list)
logger.debug('must include %s' % min_optskey)
sys.exit(1)
# 检查 STIME和ETIME的格式是否非法
# 由于STIME和ETIME不管在那种类型下都是必然存在的
# 所以两个都要检查
check_time_format()
# 检查传入的值的范围
check_para_range(min_optskey)
| 5,343,604
|
def write():
"""Used to write the page in the app.py file"""
st.title("Italian Regional Cases")
cn.DATE_SPANS()
st.markdown(cn.HORIZONTAL_RULE, unsafe_allow_html=True)
regions = [
"Lazio", # 12
"Puglia", # 16
"Sicilia", # 19
"Basilicata", # 17
"Calabria", # 18
"Campania", # 15
"Sardegna", # 20
"Umbria" # 19
]
df = pd.read_csv(cn.ITALY_DATA)
df['data'] = df['data'].map(lambda x: x[0:10])
regionCount = 0
for region in regions:
regionCount += 1
dfRegion = df[df["denominazione_regione"] == region]
dfRegion['nuovi_deceduti'] = dfRegion['deceduti'].diff(1)
'''
df['Rolling'] = df['Price'].rolling(5).mean()
print(df.head(10))
'''
dfRegion = dfRegion.tail(40)
dfRegion["rolling"] = dfRegion["nuovi_positivi"].rolling(10).mean()
dfRegion = dfRegion.tail(30)
# dfFirst = dfRegion.head(1)
# firstDate = dfFirst.iloc[0]["data"]
# cn.ITALY_FIRST_DATE = firstDate
# dfLast = dfRegion.tail(1)
# lastDate = dfLast.iloc[0]["data"]
# cn.ITALY_LAST_DATE = lastDate
# cn.DATE_SPANS()
fig1 = plt.figure(1, figsize=(8, 5))
plt.title(f'{region} - New Cases and New Deaths', fontsize='large')
plt.xlabel="Date"
plt.ylabel="Number"
ax = plt.gca()
ax.xaxis.set_major_locator(ticker.MultipleLocator(5))
#plt.plot(dfPr['date'], dfProv['confirmedNewMean'], label='New Cases - Smoothed')
#plt.plot(dfRegion['data'], dfRegion['totale_positivi'], label='Total Positives')
plt.bar(dfRegion['data'], dfRegion['nuovi_positivi'], label='New Positives', color='0')
plt.plot(dfRegion['data'], dfRegion['rolling'], label='Average New Positives', color='r')
plt.plot(dfRegion['data'], dfRegion['nuovi_deceduti'], label='Deaths', color='c')
#plt.plot(dfRegion['data'], dfRegion['totale_casi'], label='Total Cases')
# Add a legend
plt.legend(['Average New Positives', 'New Deaths', 'New Positives'])
plt.grid(b=True, which='major')
st.pyplot(fig1)
#plt.show()
plt.close()
| 5,343,605
|
def mirror_z(table, label):
"""Mirror the z-component of the vector.
Parameters
----------
label: string containing the name of the column you want to convert
"""
c = table.updDependentColumn(label)
for i in range(c.size()):
c[i] = opensim.Vec3(c[i][0], c[i][1], -c[i][2])
| 5,343,606
|
def preprocess_point(p, C):
"""Preprocess a single point (a clip).
WARN: NAN-preserving
Arguments:
p {ndarray} -- shape = (variable, C.joint_n, C.joint_d)
C {DDNetConfig} -- A Config object
Returns:
ndarray, ndarray -- X0, X1 to input to the net
"""
assert p.shape[1:] == (C.joint_n, C.joint_d)
p = zoom(p,target_l=C.frame_l,joints_num=C.joint_n,joints_dim=C.joint_d)
# interploate to the right number of frames
assert p.shape == (C.frame_l, C.joint_n, C.joint_d)
M = get_CG(p, C)
return M, p
| 5,343,607
|
def html_decode_raw(client, args):
"""
Same as html_decode but the output will never be printed as a hex dump and
results will not be copied. It is suggested you redirect the output
to a file.
"""
print(_code_helper(args, html_decode_helper, copy=False))
| 5,343,608
|
def test_aggregate_per_slice(dummy_metrics):
"""Test extraction of metrics aggregation per slice: Selected slices"""
agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], slices=[3, 4],
perslice=True,
group_funcs=(('WA', aggregate_slicewise.func_wa),))
assert agg_metric[(3,)]['WA()'] == 41.0
assert agg_metric[(4,)]['WA()'] == 50.0
| 5,343,609
|
def link_to_existing_user_by_email_if_backend_is_trusted(backend, details, user=None, *args, **kwargs):
"""Return user entry with same email address as one returned on details."""
if user or not _is_trusted_email_backend(backend):
return
email = details.get('email')
if email:
# try to link accounts registered with the same email address,
# only if it's a single object. AuthException is raised if multiple
# objects are returned
try:
return {'user': EmailAddress.objects.get(email=email).user}
except MultipleObjectsReturned:
raise AuthException(kwargs['backend'], 'Not unique email address.')
except ObjectDoesNotExist:
pass
| 5,343,610
|
def _monthlyfile(yr, path, ppath, force, layer, ANfn, latmax, latmin, lonmin, lonmax):
"""
Function to proccess the monthl;y data into an annual file
args:
yr: int
year
path: str
dir to do the work in
ppath: str
processed path
"""
# ========== get the web address ==========
address = "ftp://anon-ftp.ceda.ac.uk/neodc/esacci/fire/data/burned_area/MODIS/pixel/v5.1/compressed/%d/" % yr
# ========== list to holf the file names ==========
ptnames = []
# ========== process the mask ==========
def _maskmaker(ds_tomask):
print("starting the 2018 mask")
ipdb.set_trace()
# dates = datefixer(yr, 12, 31)
maskfn = "/media/ubuntu/Seagate Backup Plus Drive/Data51/BurntArea/esacci/FRI/esacci_landseamask.nc"
# change the values
with ProgressBar():
ds_mask = (ds_tomask != -2).astype("float32").sum(dim="time").compute()
# +++++ ds_mask has 1 for land 0 for water +++++
# that is then summed over time
# ========== create a date ==========
dates = datefixer(2018, 12, 31)
# ========= find plces with a few good values ==========
ds_mask = (ds_mask>=5).astype("float32")
ds_mask = ds_mask.where(ds_mask == 1.0).rename({layer:"mask"}).expand_dims({"time":dates["CFTime"]})
# ===== fix the time =====
# ds_mask["time"] = dates["CFTime"]
ds_mask.time.attrs["calendar"] = dates["calendar"]
ds_mask.time.attrs["units"] = dates["units"]
# da_mask = (da_mask == -2).mean(dim="time")
# da_mask = attrs_fixer(da_mask, dates)
# ========== Setup the metadata ==========
ds_mask.attrs = GlobalAttributes(maskfn)
# layers = OrderedDict()
# layers["mask"] = da_mask
# ========== create the dataset ==========
ds_mask = tempNCmaker(ds_mask, maskfn, "mask", chunks={"latitude":1000, 'longitude': 1000}, pro = "%d mask"% yr)
return ds_mask
# ========== loop over the month ==========
for mn in range(1, 13):
print(yr, mn, pd.Timestamp.now())
# ========== Create the file name and check if they need to get downloaded ==========
fnA = "%d%02d01-ESACCI-L3S_FIRE-BA-MODIS-AREA_4-fv5.1.tar.gz" % (yr, mn)
fnE = "%d%02d01-ESACCI-L3S_FIRE-BA-MODIS-AREA_3-fv5.1.tar.gz" % (yr, mn)
for fn in [fnA, fnE]:
filefetcher(fn, address, path)
# ========== Make the file name and see if it already exists ==========
ftout = path+"tmp/%d%02d01_tmp_%s.nc" %(yr, mn, layer)
if os.path.isfile(ftout):
ds_testing = xr.open_dataset(ftout)
ptnames.append(ftout)
print(ds_testing[layer].shape)
continue
# ========== open the components ==========
fn_XR = glob.glob(path+"tmp/%d%02d*-%s.tif" % (yr, mn, layer))
renm = {"band":"time","x":"longitude", "y":"latitude"}
da_ls = [xr.open_rasterio(fnr).rename(renm).sel(
dict(
latitude=slice(latmax, latmin),
longitude=slice(lonmin, lonmax))) for fnr in fn_XR]
# Address tiny rounding errors in the data
da_ls[0]["latitude"] = da_ls[1].latitude.values
# ========== Merge into a single dataset ==========
ds_out = xr.Dataset({layer:xr.concat(da_ls, dim="longitude").chunk({"longitude":1000})})#.sortby("latitude", ascending=False)#.transpose("latitude")
date = pd.Timestamp("%d-%02d-01" % (yr, mn))
ds_out["time"] = [date]
# ========== Save a tempoary netcdf fiel ==========
ds_out = tempNCmaker(ds_out, ftout, layer, chunks={'longitude': 1000}, skip=False)
# ========== append the save name ==========
ptnames.append(ftout)
# ========== Build annual dataset ==========
da = xr.open_mfdataset(
ptnames, concat_dim="time",
chunks={"time":1, "latitude":1000, 'longitude': 1000})[layer]
da = da.reindex(latitude=list(reversed(da.latitude)))
if yr == 2018:
# Copy the data
da_mask = _maskmaker(da.copy())
ipdb.set_trace()
# ========== mask it away ==========
da_bl = da.where( da > 0)
# ========== Aggragate and finalise the da ==========
dates = datefixer(yr, 12, 31)
da_out = da_bl.sum("time")
da_out = da_out.where(da_out<= 0, 1).rename()
da_out = attrs_fixer(da_out, dates)
# ========== Setup the metadata ==========
global_attrs = GlobalAttributes(ANfn)
layers = OrderedDict()
layers["BA"] = da_out
# ========== create the dataset ==========
ds = xr.Dataset(layers, attrs= global_attrs)
# ========== build a mask ==========
ds = tempNCmaker(ds, ANfn, "BA", chunks={"latitude":1000, 'longitude': 1000}, pro = "%d Burnt Area"% yr)
# ========== return the dataset ==========
return ds
| 5,343,611
|
def ingest_sessions(session_csv_path='./user_data/session/sessions.csv',
skip_duplicates=True):
"""
Inserts data from a sessions csv into corresponding session schema tables
By default, uses data from workflow_session/user_data/session/
:param session_csv_path: relative path of subject csv
:param skip_duplicates=True: datajoint insert function param
"""
csvs = [session_csv_path, session_csv_path, session_csv_path]
tables = [session.Session(), session.SessionDirectory(),
session.SessionNote()]
ingest_general(csvs, tables, skip_duplicates=skip_duplicates)
| 5,343,612
|
def get_global_threshold(image_gray, threshold_value=130):
""" 이미지에 Global Threshold 를 적용해서 흑백(Binary) 이미지객체를 반환합니다.
하나의 값(threshold_value)을 기준으로 이미지 전체에 적용하여 Threshold 를 적용합니다.
픽셀의 밝기 값이 기준 값 이상이면 흰색, 기준 값 이하이면 검정색을 적용합니다.
이 때 인자로 입력되는 이미지는 Gray-scale 이 적용된 2차원 이미지여야 합니다.
:param image_gray:
:param threshold_value: 이미지 전체에 Threshold 를 적용할 기준 값.
:return: Global Threshold 를 적용한 흑백(Binary) 이미지
"""
copy = image_gray.copy() # copy the image to be processed
_, binary_image = cv2.threshold(copy, threshold_value, 255, cv2.THRESH_BINARY)
return binary_image
| 5,343,613
|
def get_batch(src_gen, trgt_gen, batch_size=10):
"""
Return a batch of batch_size of results as in get_rotated_src_target_spirals
Args:
batch_size (int): number of samples in the batch
factor (float): scaling factor for the spiral
Return:
[torch.tensor,torch.tensor]: src and target batches
"""
batch_points = [src_gen.generate() for _ in range(batch_size)]
batch_targets = [trgt_gen.generate() for _ in range(batch_size)]
batch_points = [to_torch_tensor(i) for i in batch_points]
batch_targets = [to_torch_tensor(i) for i in batch_targets]
return torch.cat(batch_points), torch.cat(batch_targets)
| 5,343,614
|
def compute_crop_parameters(image_size, bbox, image_center=None):
"""
Computes the principal point and scaling factor for focal length given a square
bounding box crop of an image.
These intrinsic parameters are used to preserve the original principal point even
after cropping the image.
Args:
image_size (int or array): Size of image, either length of longer dimension or
(N, H, C).
bbox: Square bounding box in xyxy (4,).
image_center: Center of projection/principal point (2,).
Returns:
principal_point: Coordinates in NDC using Pytorch3D convention with (1, 1)
as upper-left (2,).
crop_scale (float): Scaling factor for focal length.
"""
bbox = np.array(bbox)
b = max(bbox[2:] - bbox[:2])
if isinstance(image_size, int):
h = w = image_size
else:
h, w, *c = image_size
image_size = max(image_size)
if image_center is None:
image_center = np.array([w / 2, h / 2])
bbox_center = (bbox[:2] + bbox[2:]) / 2
crop_scale = b / image_size
principal_point = 2 * (bbox_center - image_center) / b
return principal_point, crop_scale
| 5,343,615
|
def input_fn_builder(input_file, seq_length, num_labels, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([num_labels], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _cast_features(features):
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in features:
t = features[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
features[name] = t
return features
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
features = tf.parse_single_example(record, name_to_features)
return _cast_features(features)
def file_based_input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
def serving_input_receiver_fn():
"""An input_fn that expects a serialized tf.Example."""
serialized_tf_example = tf.placeholder(
dtype=tf.string,
name='input_example_tensor')
receiver_tensors = {'examples': serialized_tf_example}
features = tf.parse_example(serialized_tf_example, name_to_features)
features = _cast_features(features)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
if input_file is not None:
return file_based_input_fn
else:
return serving_input_receiver_fn
| 5,343,616
|
def test_gf_matching_with_exception(get_mock):
"""Test GoogleFinance match with network exception
"""
gf_matcher = GoogleFinanceNameMatcher()
symbols = gf_matcher.match_by('apple', sleep=0)
assert get_mock.call_count == 3
assert symbols['apple'][0][0] == 'Apple Inc.'
assert symbols['apple'][0][1].ticker == 'AAPL'
assert symbols['apple'][0][1].country_code == 'US'
symbols = gf_matcher.match_by('apple', retry=2, sleep=0)
try:
get_mock.assert_called()
except AttributeError:
assert get_mock.called
assert not symbols['apple']
| 5,343,617
|
def get_user_by_username(username):
"""Return User by username"""
try:
return User.objects.get(username=username)
except User.DoesNotExist:
return None
| 5,343,618
|
def init_logger() -> None:
"""Initialize logger, set log format and the base logging level."""
global logger
logger.remove()
logger.add(
sink=sys.stdout,
level=logging.INFO,
format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | " "<level>{level}</level> | " "<level>{message}</level>",
)
| 5,343,619
|
def get_xyz_to_rgb_matrix(name):
"""
XYZ to RGB の Matrix を求める。
DCI-P3 で D65 の係数を返せるように内部関数化した。
"""
if name != "DCI-P3":
xyz_to_rgb_matrix = RGB_COLOURSPACES[name].XYZ_to_RGB_matrix
else:
rgb_to_xyz_matrix\
= calc_rgb_to_xyz_matrix(RGB_COLOURSPACES[DCI_P3].primaries,
xy_to_XYZ(ILLUMINANTS[CMFS_NAME]['D65']))
xyz_to_rgb_matrix = linalg.inv(rgb_to_xyz_matrix)
return xyz_to_rgb_matrix
| 5,343,620
|
def find_bordering_snapnums(
snap_times_gyr,
dGyr=.005,
tmin=None,
tmax=None):
""" """
## handle default maximum time
tmax = snap_times_gyr[-1] if tmax is None else tmax
## handle default minimum time
if tmin is None:
tmin = snap_times_gyr[0]
## remove dGyr so that tmin is included in arange below
elif tmin - dGyr > 0:
tmin = tmin-dGyr
## create list of times, -1e-9 to avoid landing exactly on a snapshot number
times_gyr = np.arange(tmax,tmin,-dGyr)[::-1]-1e-9
inds_next = np.argmax((times_gyr - snap_times_gyr[:,None]) < 0 ,axis=0)
inds_prev = inds_next-1
return (
times_gyr,
np.array(list(zip(inds_prev,inds_next))),
np.array(list(zip(snap_times_gyr[inds_prev],snap_times_gyr[inds_next]))))
| 5,343,621
|
def maxIterationComb(N,k,l):
"""
title::
maxIterationComb
description::
Compute N!/k!l!(N-k-l)! (max iterations).
attributes::
N
Number of targets (graph size)
k
Number of human patrollers
l
Number of drones
returns::
Resulting maximum iterations (integer).
author::
Elizabeth Bondi (ebondi@g.harvard.edu)
Hoon Oh, Haifeng Xu, Kai Wang
disclaimer::
This source code is provided "as is" and without warranties as to
performance or merchantability. The author and/or distributors of
this source code may have made statements about this source code.
Any such statements do not constitute warranties and shall not be
relied on by the user in deciding whether to use this source code.
This source code is provided without any express or implied warranties
whatsoever. Because of the diversity of conditions and hardware under
which this source code may be used, no warranty of fitness for a
particular purpose is offered. The user is advised to test the source
code thoroughly before relying on it. The user must assume the entire
risk of using the source code.
"""
return int(comb(N,k)*comb(N-k,l))
| 5,343,622
|
def test_fail_retrieve_tokens(oauth2_connector, secrets_keeper):
"""
It should fail ig the stored state does not match the received state
"""
secrets_keeper.save('test', {'state': JsonWrapper.dumps({'token': 'the_token'})})
with pytest.raises(AssertionError):
oauth2_connector.retrieve_tokens(
f'http://localhost/?state={JsonWrapper.dumps({"token": "bad_token"})}'
)
| 5,343,623
|
def create_project_details_list (project):
"""makes a projects details section for the html
Parameters
----------
project: HeatRecovery
A HeatRecovery object thats run function has been called
Returns
-------
dict
with values used by summary
"""
try:
costs = '${:,.0f}'.format(project.get_NPV_costs())
except ValueError:
costs = project.get_NPV_costs()
try:
benefits = '${:,.0f}'.format(project.get_NPV_benefits())
except ValueError:
benefits = project.get_NPV_benefits()
try:
net_benefits = '${:,.0f}'.format(project.get_NPV_net_benefit())
except ValueError:
net_benefits = project.get_NPV_net_benefit()
try:
BC = '{:,.1f}'.format(project.get_BC_ratio())
except ValueError:
BC = project.get_BC_ratio()
try:
source = "<a href='" + \
project.comp_specs['link'] + "'> link </a>"
except StandardError as e:
source = "unknown"
try:
notes = project.comp_specs['notes']
except StandardError as e:
notes = "N/a"
try:
potential_hr = '{:,.0f} gallons'.format(float(
project.comp_specs[
'proposed gallons diesel offset']))
except ValueError:
potential_hr =\
str(project.comp_specs[
'proposed gallons diesel offset'])
try:
dist = \
'{:,.0f} ft'.format(\
float(project.comp_specs['total feet piping needed']))
except ValueError:
dist = 'Unknown'
#~ print dist
return [
{'words':'Capital cost',
'value': costs},
{'words':'Lifetime energy cost savings',
'value': benefits},
{'words':'Net lifetime savings',
'value': net_benefits},
{'words':'Benefit-cost ratio',
'value': BC},
{'words':'Est. potential annual heating fuel gallons displaced',
'value': potential_hr},
{'words':'Number of buildings to be connected',
'value': str(project.comp_specs['estimate buildings to heat'])},
{'words':'Round-trip distance of piping',
'value': dist},
{'words':'Source',
'value': source},
{'words':'Notes',
'value': notes},
]
| 5,343,624
|
def compute_cluster_top_objects_by_distance(precomputed_distances,
max_top_number=10,
object_clusters=None):
"""
Compute the most representative objects for each cluster
using the precomputed_distances.
Parameters
----------
precomputed_distances : np.array
array of shape (n_topics, n_objects) -
a matrix of pairwise distances: distance from ith cluster centroid to the jth object
max_top_number : int
maximum number of top objects of cluster (resulting number can be less than it)
(Default value = 10)
object_clusters : np,array
array of shape n_objects - precomputed clusters for objects
Returns
-------
clusters_top_objects : list of list of indexes
(Default value = None)
""" # noqa: W291
# prediction for objects
if object_clusters is None:
object_clusters = predict_cluster_by_precomputed_distances(precomputed_distances)
# transformation from list to dict
clusters = transform_cluster_objects_list_to_dict(object_clusters)
n_topics = precomputed_distances.shape[0]
clusters_top_objects = []
for cluster_label in range(n_topics):
# cluster is empty
if cluster_label not in clusters.keys():
clusters_top_objects.append([])
continue
cluster_objects = np.array(clusters[cluster_label])
cluster_objects_to_center_distances = (
precomputed_distances[cluster_label][cluster_objects]
)
if max_top_number >= cluster_objects.shape[0]:
# cluster is too small; grab all objects
indexes_of_top_objects = np.arange(0, cluster_objects.shape[0])
else:
# filter by distance with partition
indexes_of_top_objects = np.argpartition(
cluster_objects_to_center_distances,
kth=max_top_number
)[:max_top_number]
distances_of_top_objects = cluster_objects_to_center_distances[indexes_of_top_objects]
top_objects = cluster_objects[indexes_of_top_objects]
# sorted partitioned array
indexes_of_top_objects_sorted_by_distance = np.argsort(distances_of_top_objects)
sorted_top_objects = top_objects[indexes_of_top_objects_sorted_by_distance]
clusters_top_objects.append(sorted_top_objects.tolist())
return clusters_top_objects
| 5,343,625
|
def get_rpm_package_list():
""" Gets all installed packages in the system """
pkgstr = subprocess.check_output(['rpm', '-qa', '--queryformat', '%{NAME}\n'])
return pkgstr.splitlines()
| 5,343,626
|
def validate_ints(*args):
""" validates that inputs are ints only """
for value in args:
if not isinstance(value, int):
return False
return True
| 5,343,627
|
def smooth_l1_loss(y_true, y_pred):
"""
Computes the smooth-L1 loss.
Parameters
----------
y_true : tensor
Ground-truth targets of any shape.
y_pred : tensor
Estimates of same shape as y_true.
Returns
-------
loss : tensor
The loss, sumed over all elements from the last dim of y_true, i.e.,
same shape as y_true without the last dim.
"""
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true, dtype=y_pred.dtype)
diff = tf.math.abs(y_true - y_pred)
less_than_one = tf.cast(tf.math.less(diff, 1.0), y_pred.dtype)
# smooth l1 loss, loss.shape == y_true.shape
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
# sum over all elements in the last dim
loss = tf.math.reduce_sum(loss, axis=-1)
return loss
| 5,343,628
|
def get_class_occurrences(layer_types):
"""
Takes in a numpy.ndarray of size (nb_points, 10) describing for each point of the track the types of clouds identified at each of the 10 heights
times counting the number of times 8 type of clouds was spotted vertically.
and returns occrrences (binary) as the label of the present/absent of cloud
The height and cloud type information is then lost.
"""
layer_types = np.array(layer_types)
total = 0
occurrences = np.zeros((layer_types.shape[0], 1))
for occ, labels in zip(occurrences, layer_types):
for l in labels:
# keep only cloud types (no 0 or -9)
if l > 0:
total += 1
if total > 0:
occ = 1
return occurrences
| 5,343,629
|
def test_stream_targets_properties_specific():
"""
Tests an API call to get a specific stream target property by id
"""
response = stream_targets_instance.properties(stream_target_id, property_id)
assert isinstance(response, dict)
assert 'property' in response
| 5,343,630
|
def parse_duration(datestring):
"""
Parses an ISO 8601 durations into a float value containing seconds.
The following duration formats are supported:
-PnnW duration in weeks
-PnnYnnMnnDTnnHnnMnnS complete duration specification
Years and month are not supported, values must be zero!
"""
if not isinstance(datestring, str):
raise TypeError("Expecting a string %r" % datestring)
match = ISO8601_PERIOD_REGEX.match(datestring)
if not match:
raise ValueError("Unable to parse duration string %r" % datestring)
groups = match.groupdict()
for key, val in groups.items():
if key not in ('separator', 'sign'):
if val is None:
groups[key] = "0n"
# print groups[key]
if key in ('years', 'months'):
groups[key] = Decimal(groups[key][:-1].replace(',', '.'))
else:
# these values are passed into a timedelta object,
# which works with floats.
groups[key] = float(groups[key][:-1].replace(',', '.'))
if groups["years"] != 0 or groups["months"] != 0:
raise ValueError("Unable to parse duration string %r (Non zero year or month)" % datestring)
else:
ret = timedelta(days=groups["days"], hours=groups["hours"],
minutes=groups["minutes"], seconds=groups["seconds"],
weeks=groups["weeks"])
if groups["sign"] == '-':
ret = timedelta(0) - ret
return ret.total_seconds()
| 5,343,631
|
def knn(position, data_set, labels, k):
"""
k-近邻算法
:param position: 待分类点
:param data_set: 数据样本
:param labels: 标签集合
:param k: 取值
:return: 所属标签
"""
distance_list = []
for index, item in enumerate(data_set):
distance_list.append((
labels[index],
math.sqrt(reduce(operator.add, [(v - position[i]) ** 2 for i, v in enumerate(item)]))
))
distance_list = sorted(distance_list, key=lambda x: x, reverse=True)
result = Counter([val[0] for val in distance_list[:k]])
result_labels = sorted(result.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)
return result_labels[0][0]
| 5,343,632
|
def plot2Dseparation(u, file, **kwargs):
"""
Return nothing and saves the figure in the specified file name.
Args:
cmap: matplotlib cmap. Eg: cmap = "seismic"
lvls: number of levels of the contour. Eg: lvls = 100
lim: min and max values of the contour passed as array. Eg: lim = [-0.5, 0.5]
file: Name of the file to save the plot (recommended .pdf so it can be converted get .svg).
Eg: file = "dUdy.pdf"
Kwargs:
x=[xmin,xmax] is the x axis minimum and maximum specified
y=[ymin,ymax] is the y axis minimum and maximum specified
annotate: Boolean if annotations for min and max values of the field (and locations) are desired
"""
# Internal imports
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
plt.rc('font', family='sans-serif', size=6)
mpl.rc('xtick', labelsize=6)
mpl.rc('ytick', labelsize=6)
N, M = u.shape[0], u.shape[1]
scaling = kwargs.get('scaling', 1)
ptype = kwargs.get('ptype', 'contourf')
xshift = kwargs.get('xshift', 0)
yshift = kwargs.get('yshift', 0)
cmap = kwargs.get('cmap', 'seismic')
lvls = kwargs.get('lvls', 50)
lim = kwargs.get('lim', [np.min(u), np.max(u)])
if not 'grid' in kwargs:
xmin, xmax = 0, N-1
ymin, ymax = -M / 2, M / 2 - 1
x, y = np.linspace(xmin / scaling, xmax / scaling, N), np.linspace(ymin / scaling, ymax / scaling, M)
x, y = x + xshift, y + yshift
x, y = np.meshgrid(x, y)
else:
grid = kwargs['grid']
x, y = grid[0]/scaling, grid[1]/scaling
# Matplotlib definitions
fig = plt.gcf()
ax = plt.gca()
# Create contourf given a normalized (norm) colormap (cmap)
norm = colors.Normalize(vmin=lim[0], vmax=lim[1])
# lvls = np.linspace(lim[0], lim[1], lvls + 1)
if ptype == 'contourf':
# ax.contour(x, y, u, lvls, linewidths=0.2, colors='k')
# cf = ax.contourf(x.T, y.T, u.T, levels=lvls, vmin=lim[0], vmax=lim[1], norm=norm, cmap=cmap, extend='both')
cf = ax.contourf(x.T, y.T, u.T, levels=lvls, vmin=lim[0], vmax=lim[1], norm=norm, cmap=cmap)
else:
cf = ax.pcolormesh(x.T, y.T, u.T, vmin=lim[0], vmax=lim[1], norm=norm, cmap=cmap)
# Scale contourf and set limits
plt.axis('scaled')
plt.xlim(np.min(x), np.max(x))
plt.ylim(np.min(y), np.max(y))
print(np.min(x), np.max(x))
print(np.min(y), np.max(y))
ax.tick_params(bottom=True, top=True, right=True, which='both', direction='in', length=2)
# Add cylinder
grey_color = '#dedede'
cyl = patches.Circle((0, 0),scaling/2, linewidth=0.4, edgecolor='purple', facecolor='None')
ax.add_patch(cyl)
# Colormap
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size="5%", pad=0.05)
# v = np.linspace(lim[0], lim[1], 10, endpoint=True)
# c = mpl.cm.get_cmap(cmap)
# c.set_under('r')
# c.set_over('b')
# plt.colorbar(cf, cax=cax, norm=norm, cmap=c, ticks=v, boundaries=v)
# Show, save and close figure
plt.savefig(file, transparent=True, bbox_inches='tight')
plt.clf()
return
| 5,343,633
|
def _cals(raw):
"""Helper to deal with the .cals->._cals attribute change."""
try:
return raw._cals
except AttributeError:
return raw.cals
| 5,343,634
|
def generate_mlsag(message, pk, xx, kLRki, index, dsRows, mg_buff):
"""
Multilayered Spontaneous Anonymous Group Signatures (MLSAG signatures)
:param message: the full message to be signed (actually its hash)
:param pk: matrix of public keys and commitments
:param xx: input secret array composed of a private key and commitment mask
:param kLRki: used only in multisig, currently not implemented
:param index: specifies corresponding public key to the `xx`'s private key in the `pk` array
:param dsRows: separates pubkeys from commitment
:param mg_buff: mg signature buffer
"""
rows, cols = gen_mlsag_assert(pk, xx, kLRki, index, dsRows)
rows_b_size = int_serialize.uvarint_size(rows)
# Preallocation of the chunked buffer, len + cols + cc
for _ in range(1 + cols + 1):
mg_buff.append(None)
mg_buff[0] = int_serialize.dump_uvarint_b(cols)
cc = crypto.new_scalar() # rv.cc
c = crypto.new_scalar()
L = crypto.new_point()
R = crypto.new_point()
Hi = crypto.new_point()
# calculates the "first" c, key images and random scalars alpha
c_old, II, alpha = generate_first_c_and_key_images(
message, pk, xx, kLRki, index, dsRows, rows, cols
)
i = (index + 1) % cols
if i == 0:
crypto.sc_copy(cc, c_old)
ss = [crypto.new_scalar() for _ in range(rows)]
tmp_buff = bytearray(32)
while i != index:
hasher = _hasher_message(message)
# Serialize size of the row
mg_buff[i + 1] = bytearray(rows_b_size + 32 * rows)
int_serialize.dump_uvarint_b_into(rows, mg_buff[i + 1])
for x in ss:
crypto.random_scalar(x)
for j in range(dsRows):
# L = rv.ss[i][j] * G + c_old * pk[i][j]
crypto.add_keys2_into(
L, ss[j], c_old, crypto.decodepoint_into(Hi, pk[i][j])
)
crypto.hash_to_point_into(Hi, pk[i][j])
# R = rv.ss[i][j] * H(pk[i][j]) + c_old * Ip[j]
crypto.add_keys3_into(R, ss[j], Hi, c_old, II[j])
hasher.update(pk[i][j])
_hash_point(hasher, L, tmp_buff)
_hash_point(hasher, R, tmp_buff)
for j in range(dsRows, rows):
# again, omitting R here as discussed above
crypto.add_keys2_into(
L, ss[j], c_old, crypto.decodepoint_into(Hi, pk[i][j])
)
hasher.update(pk[i][j])
_hash_point(hasher, L, tmp_buff)
for si in range(rows):
crypto.encodeint_into(mg_buff[i + 1], ss[si], rows_b_size + 32 * si)
crypto.decodeint_into(c, hasher.digest())
crypto.sc_copy(c_old, c)
pk[i] = None
i = (i + 1) % cols
if i == 0:
crypto.sc_copy(cc, c_old)
gc.collect()
del II
# Finalizing rv.ss by processing rv.ss[index]
mg_buff[index + 1] = bytearray(rows_b_size + 32 * rows)
int_serialize.dump_uvarint_b_into(rows, mg_buff[index + 1])
for j in range(rows):
crypto.sc_mulsub_into(ss[j], c, xx[j], alpha[j])
crypto.encodeint_into(mg_buff[index + 1], ss[j], rows_b_size + 32 * j)
# rv.cc
mg_buff[-1] = crypto.encodeint(cc)
| 5,343,635
|
def irnn_data_iterator(X, y, batch_size, math_engine):
"""Slices numpy arrays into batches and wraps them in blobs"""
def make_blob(data, math_engine):
"""Wraps numpy data into neoml blob"""
shape = data.shape
if len(shape) == 2: # data
# Wrap 2-D array into blob of (BatchWidth, Channels) shape
return neoml.Blob.asblob(math_engine, data,
(1, shape[0], 1, 1, 1, 1, shape[1]))
elif len(shape) == 1: # dense labels
# Wrap 1-D array into blob of (BatchWidth,) shape
return neoml.Blob.asblob(math_engine, data,
(1, shape[0], 1, 1, 1, 1, 1))
else:
assert(False)
start = 0
data_size = y.shape[0]
while start < data_size:
yield (make_blob(X[start : start+batch_size], math_engine),
make_blob(y[start : start+batch_size], math_engine))
start += batch_size
| 5,343,636
|
def naive_forecast(series, steps_ahead=3, freq='D', series_name='naive'):
"""
Function fits data into the last available observation value.
INPUT:
:param series: pandas Series of data,
:param steps_ahead: number of steps into the future to predict, default is 3,
:param freq: (str) representation of a time frequency,
:param series_name: str
OUTPUT:
:return: series with extrapolated values equal to the last observation.
"""
obs = [series.iloc[-1] for _ in range(steps_ahead)]
future = set_future_series(forecasted_values=obs,
series_name=series_name,
last_date=series.index[-1],
steps_ahead=steps_ahead,
frequency=freq)
return future
| 5,343,637
|
async def delete_original_path(request):
"""
After the processing of the whole data source, this api can be used to delete the original zip
correspoding to a particular username
"""
username = request.args.get("username")
if not username:
raise APIBadRequest("Username for this datasource is required")
res = await get_status(request.app.config[DATASOURCE_NAME]["tables"]["status_table"], username)
result = list(res)
logger.debug(result[0].get("username"))
if not result:
raise APIBadRequest(f"No status present for {DATASOURCE_NAME} for username {username}")
result = result[0]
logger.debug(result)
path_to_be_deleted = result.get("original_path")
logger.warning(f"Path to be deleted is {path_to_be_deleted}")
try:
os.remove(path_to_be_deleted)
logger.success(f"{path_to_be_deleted} is deleted now")
except Exception as e:
return response.json(
{
'error': False,
'success': True,
"message": f"Original path at {path_to_be_deleted} couldnt be delete because of {e.__str__()}",
"data": None
})
return response.json(
{
'error': False,
'success': True,
"message": f"Original path at {path_to_be_deleted} is deleted",
"data": None
})
| 5,343,638
|
def demonstration():
"""
This will render a template that displays all of the form objects if it's
a Get request. If the use is attempting to Post then this view will push
the data to the database.
"""
#this parts a little hard to understand. flask-wtforms does an implicit
#call each time you create a form object. It attempts to see if there's a
#request.form object in this session and if there is it adds the data from
#the request to the form object.
registration_form = RegistrationForm()
#Before we attempt to validate our form data we have to set our select
#field choices. This is just something you need to do if you're going to
#use WTForms, even if it seems silly.
populate_form_choices(registration_form)
#This means that if we're not sending a post request then this if statement
#will always fail. So then we just move on to render the template normally.
if flask.request.method == 'POST' and registration_form.validate():
#If we're making a post request and we passed all the validators then
#create a registered user model and push that model to the database.
print("DEBUGGGGG")
registered_user = RegisteredUser(
first_name=registration_form.data['first_name_field'],
last_name=registration_form.data['last_name_field'],
address_line_one=registration_form.data['address_line_one_field'],
address_line_two=registration_form.data['address_line_two_field'],
city=registration_form.data['city_field'],
state_id=registration_form.data['state_select_field'],
country_id=registration_form.data['country_select_field'],)
db.session.add(registered_user)
db.session.commit()
print("DEBUGGGGGENDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD")
flask.flash("This data was saved to the database!")
return flask.redirect(flask.url_for(
'user_detail',user_id=registered_user.registered_id))
return flask.render_template(
template_name_or_list='registration.html',
registration_form=registration_form,)
| 5,343,639
|
def display_guessed_text(X: str, options: Dict, schemas: Optional[List[str]] = None):
"""Displays all the guessed values of TEXT for a given DNA (``X``), and for a list of schemas."""
st.info(dedent("""### Guess ❓ DNA ➡️ TEXT"""))
with st.container():
col1, col2 = st.columns([6, 1])
with col1:
schemas = list(schemas if schemas else Defaults.CONVERSION_SCHEMAS)
num_schemas = len(schemas)
filler = "all " if num_schemas == len(Defaults.CONVERSION_SCHEMAS) else ""
filler += str(num_schemas)
st.write(
dedent(
f"""
Click the **Guess** button to guess the `TEXT` from the given `DNA` for {filler} schemas.
"""
)
)
with col2:
guess = st.button("Guess")
# Show guessed results
if guess and options["convert_to"] == "TEXT":
with st.expander("Click to see guesses 👇", expanded=False):
_ = generate_guesses(X, options)
| 5,343,640
|
def show_current_task():
"""
显示当前任务正在运行的任务
:return:
"""
try:
current_user_name = session["user_name"]
current_user = RedisService.get_user(current_user_name)
current_task = TaskService.get_working_tasks(user_id=current_user.id)[0]
if current_task:
hook_rule = RedisService.get_task(current_task.id)["hook_rule"]
unscaned_url_num = UrlService.count(where=(Url.task_id == current_task.id, Url.status != TaskStatus.DONE))
scaned_url_num = UrlService.count(where=(Url.task_id == current_task.id, Url.status == TaskStatus.DONE))
total_url_num = unscaned_url_num + scaned_url_num
if current_task.task_status in [TaskStatus.KILLED, TaskStatus.DONE]:
percent = 100
else:
percent = 0 if total_url_num == 0 else (scaned_url_num / total_url_num) * 100
response_data = jsonify(status=200, message="查询成功",
data={'receiver_emails': current_task.receivers_email,
'task_name': current_task.task_name,
'create_time': current_task.created_time.strftime("%Y-%m-%d %H:%M"),
'percent': percent,
'unscaned_url_num': unscaned_url_num, 'scaned_url_num': scaned_url_num,
'total_url_num': total_url_num, 'hook_rule': hook_rule,
'task_id': current_task.id, "task_access_key": current_task.access_key,
'task_status': current_task.task_status, 'user_name': current_user_name})
return response_data
except Exception as e:
if isinstance(e, IndexError):
return jsonify(status=400, message="获取失败", data={"extra_info": "后台无正在运行任务,请登录后台并创建任务"})
logger.exception("show_current_task rasie error")
return jsonify(status=500, message="获取失败", data={"extra_info": "未知异常,可以联系管理员到后台查看"})
| 5,343,641
|
def access_image(access_code:str):
"""
下载图像
post header : {
Content-Type: application/json,
access_token: access_token from vans-token-manager
client_id: client_id from vans-token-manager conf. create by developers.
}
:return:
"""
try:
from rsc.service.ImageService import ImageService
# 获取图片
service = ImageService()
image_data, mime = service.get_image(access_code)
data = json_res_success({"image":image_data, "mime":mime})
return render_json(data)
except Exception as e:
return E400(str(e))
| 5,343,642
|
def decorate_xyi_ax(ax: Axes, tax: TernaryAxesSubplot, node_counts: Dict[str, int]):
"""
Decorate xyi plot.
"""
xcount, ycount, icount = _get_xyi_counts(node_counts)
text = "\n".join(
(
f"n: {xcount+ycount+icount}",
f"X-nodes: {xcount}",
f"Y-nodes: {ycount}",
f"I-nodes: {icount}",
)
)
initialize_ternary_points(ax, tax)
tern_plot_the_fing_lines(tax)
prop = dict(boxstyle="square", facecolor="linen", alpha=1, pad=0.45)
ax.text(
0.85,
1.05,
text,
transform=ax.transAxes,
fontsize="medium",
weight="roman",
verticalalignment="top",
bbox=prop,
fontfamily="DejaVu Sans",
ha="center",
)
| 5,343,643
|
def workaround_issue_20(handler):
"""
Workaround for
https://github.com/pytest-dev/pytest-services/issues/20,
disabling installation of a broken handler.
"""
return hasattr(handler, 'socket')
| 5,343,644
|
def item_count(sequences, sequence_column_name):
"""
input:Dataframe sequences
"""
item_max_id = sequences[sequence_column_name].map(max).max()
return int(item_max_id)
| 5,343,645
|
def bare_stft(x: Tensor, padded_window: Tensor, hop_size: int) -> Tensor:
"""Compute STFT of real 1D signal.
This function does not handle padding of x, and the window tensor.
This function assumes fft_size = window_size.
Args:
x: [..., n_sample]
padded_window: [fft_size], a window padded to fft_size.
hop_size: Also referred to as the frame shift.
Returns:
n_frame: see frame_signal definition.
X: [..., n_frame, fft_size],
where n_frame = n_sample // hop_size
"""
fft_size = len(padded_window)
# Squash x's batch_sizes
batch_size = x.shape[:-1]
n_sample = x.size(-1)
squashed_x = x.reshape(-1, 1, n_sample)
# shape: [prod(batch_size), 1, n_sample]
framed_squashed_x = frame_signal(squashed_x, fft_size, hop_size)
# shape: [prod(batch_size), fft_size, n_frame]
windowed_framed_squashed_x = \
framed_squashed_x * padded_window.unsqueeze(-1)
squashed_X = fft(
windowed_framed_squashed_x.transpose(-1, -2), dim=-1
) # shape: [prod(batch_size), n_frame, fft_size]
X = squashed_X.reshape(*batch_size, *(squashed_X.shape[1:]))
# shape: [*batch_size, n_frame, fft_size]
return X
| 5,343,646
|
def IsDragResultOk(*args, **kwargs):
"""IsDragResultOk(int res) -> bool"""
return _misc_.IsDragResultOk(*args, **kwargs)
| 5,343,647
|
def set_config(args):
"""
get config from file and reset the config by super parameter
"""
configs = 'configs'
cfg = getattr(__import__(configs, fromlist=[args.config_file]),
args.config_file)
config = cfg.res50_config()
config['data_url'] = DATA_PATH
config['log_dir'] = OUTPUT_PATH
config['model_dir'] = OUTPUT_PATH
config['ckpt_dir'] = OUTPUT_PATH
# set param from parse
config['iterations_per_loop'] = int(args.iterations_per_loop)
config['max_train_steps'] = int(args.max_train_steps)
config['debug'] = args.debug
config['eval'] = args.eval
config['model_dir'] = args.model_dir
config['batch_size'] = args.batch_size
config['global_batch_size'] = config['batch_size'] * config['rank_size']
config['num_classes'] = args.num_classes
config['num_epochs'] = args.num_epochs
config['learning_rate_maximum'] = args.learning_rate_maximum
config['restore_path'] = os.path.join(DATA_PATH, "ckpt",
input_args.restore_path)
print("iterations_per_loop :%d" % (config['iterations_per_loop']))
print("max_train_steps :%d" % (config['max_train_steps']))
print("debug :%s" % (config['debug']))
print("eval :%s" % (config['eval']))
print("model_dir :%s" % (config['model_dir']))
print("batch_size :%d" % (config['batch_size']))
if config['num_epochs']:
print("num_epochs :%d" % (config['num_epochs']))
print("learning_rate_maximum :%f" % (config['learning_rate_maximum']))
print("num_classes :%d" % (config['num_classes']))
print("restore_path :%s" % (config['restore_path']))
return config
| 5,343,648
|
def excel_convert_xls_to_xlsx(xls_file_path='',xlsx_file_path=''):
"""
Converts given XLS file to XLSX
"""
try:
# Checking the path and then converting it to xlsx file
from xls2xlsx import XLS2XLSX
if os.path.exists(xls_file_path):
# converting xls to xlsx
x2x = XLS2XLSX(xls_file_path)
x2x.to_xlsx(xlsx_file_path)
return True
except ValueError:
print("Input is not supported, could not convert string to float. Please check the inputs, and try again.")
text_to_speech("Input is not supported, could not convert string to float. Please check the inputs, and try again.", show=False)
except PermissionError:
print("Please close the excel file, and try again.")
text_to_speech("Please close the excel file, and try again.", show=False)
except op.utils.exceptions.InvalidFileException:
print("We currently support only : xlsx,.xlsm,.xltx,.xltm files. Please try again with one of those file formats.")
text_to_speech("we currently support only : .xlsx,.xlsm,.xltx,.xltm files. Please try again with one of those file formats.", show=False)
except Exception as e:
errMsg = f"Error in converting file to xlsx format : {str(e)}"
return errMsg
| 5,343,649
|
def save_info(filename, infos):
""" ===============
Save .text info
===============
Args:
:param: filename(str): filename(test.txt)
:param: infos(list): <teiHeader></heiHeader> contents
"""
filename = (os.getcwd() + filename).replace(".txt", "")
file = open(filename + "_info.txt", "w", encoding="utf-8")
for info in infos:
file.write(info + "\n")
file.close()
| 5,343,650
|
def load_model(fn=None):
"""Load the stored model.
"""
if fn is None:
fn = os.path.dirname(os.path.abspath(__file__)) + \
"/../models/model_default.h5"
return keras.models.load_model(fn)
| 5,343,651
|
def aws_s3_bucket_objects(var):
"""
Creates aws_s3_bucket_object resources for all files in the given
source directory. This is using the "collections" API to create
a reusable function that generates resources.
"""
# Inputs.
yield variable.bucket()
yield variable.prefix(default="")
yield variable.source()
# Get the resource name of the bucket,
# to be used in object resource names.
bucket_label = labels.get(var.bucket)
total_files = 0
total_bytes = 0
# Resources.
for path in Path(var.source).rglob("*"):
if path.is_file():
key = f"{var.prefix}{path.relative_to(var.source)}"
object_label = labels.clean(f"{bucket_label}/{key}")
yield resource.aws_s3_bucket_object[object_label](
bucket=var.bucket.id,
key=key,
source=path,
)
total_files += 1
total_bytes += os.path.getsize(path)
# Outputs.
yield output.total_files(value=total_files)
yield output.total_bytes(value=total_bytes)
| 5,343,652
|
def element_wise(counter_method):
"""This is a decorator function allowing multi-process/thread input.
Note that this decorator should always follow the decorator 'tag_maker'.
"""
def _make_iterator(*args):
"""Make a compound iterator from a process iterator and
a thread one.
Note that 'Application' case should not execute this
function."""
monitor_level = args[1]
arg_pid = args[2]
if hasattr(arg_pid, '__iter__'):
pid_itr = (i for i in xrange(arg_pid[0], arg_pid[1]))
else:
pid_itr = (arg_pid,)
if monitor_level == 'Thread':
arg_tid = args[3]
if hasattr(arg_tid, '__iter__'):
tid_itr = (i for i in xrange(arg_tid[0], arg_tid[1]))
else:
tid_itr = (arg_tid,)
if monitor_level == 'Process':
return_itr = pid_itr
elif monitor_level == 'Thread':
return_itr = (pid_itr, tid_itr)
return return_itr
@wraps(counter_method)
def _element_wise(*args):
"""Distribute multi-process/thread input"""
if args[1] == 'Thread':
pid_itr, tid_itr = _make_iterator(*args)
retval = [counter_method(args[0], args[1], pid, tid)
for pid, tid in product(pid_itr, tid_itr)]
return np.array(retval)
elif args[1] == 'Process':
pid_itr = _make_iterator(*args)
retval = [counter_method(args[0], args[1], pid) for pid in pid_itr]
return np.array(retval)
elif args[1] == 'Application':
return np.array(counter_method(*args))
else:
print 'Unknown monitor level'
return _element_wise
| 5,343,653
|
def get_datacite_dates(prefix):
"""Get sumbitted date for DataCite DOIs with specific prefix"""
doi_dates = {}
doi_urls = {}
url = (
"https://api.datacite.org/dois?query=prefix:"
+ prefix
+ "&page[cursor]=1&page[size]=500"
)
next_link = url
meta = requests.get(next_link).json()["meta"]
for j in progressbar(range(meta["totalPages"])):
r = requests.get(next_link)
data = r.json()
for doi in data["data"]:
date = doi["attributes"]["registered"].split("T")[0]
doi_dates[doi["id"]] = date
doi_urls[doi["id"]] = doi["attributes"]["url"]
if "next" in data["links"]:
next_link = data["links"]["next"]
else:
next_link = None
return doi_dates, doi_urls
| 5,343,654
|
def rate_answer():
"""
**Rates an already given answer**
**Args:**
* json:
* {"insight" : String with the name of the Insight
* "paper_id" : String with the paper_id which is in our case the completet link to the paper
* "upvote" : Boolean if the answer was upvoted(= true) or downvoted (= false)
* "answer" : String with the Answer}
**Returns:**
* json:
* {'status': 'success'}
"""
response_object = {'status': 'success'}
#fetch data from request
post_data = request.get_json()
in_insight_name = post_data.get('insight')
in_paper_id = post_data.get('paper_id')
in_paper_id = url_checker(in_paper_id)
in_upvote = post_data.get('upvote')
in_answer = post_data.get('answer')
#query 'information'
inf = Information.query.filter(Information.paper_id == in_paper_id).filter(Information.insight_name==str(in_insight_name)).first()
#query 'answers'
ans = Answers.query.filter(Answers.information_id==inf.information_id).all()
#upvote correct answer
if (in_upvote):
for a in ans:
if (a.answer==in_answer):
a.answer_upvotes = a.answer_upvotes + 1
a.answer_score = a.answer_score + 1
#downvote correct answer
else :
for a in ans:
if (a.answer==in_answer):
a.answer_downvotes = a.answer_downvotes + 2
a.answer_score = a.answer_score - 2
db.session.commit()
return jsonify(response_object)
| 5,343,655
|
def get_ip():
"""
Query the ipify service (https://www.ipify.org) to retrieve this machine's
public IP address.
:rtype: string
:returns: The public IP address of this machine as a string.
:raises: ConnectionError if the request couldn't reach the ipify service,
or ServiceError if there was a problem getting the IP address from
ipify's service.
"""
try:
resp = _get_ip_resp()
except RequestException:
raise ConnectionError("The request failed because it wasn't able to reach the ipify service. This is most likely due to a networking error of some sort.")
if resp.status_code != 200:
raise ServiceError('Received an invalid status code from ipify:' + str(resp.status_code) + '. The service might be experiencing issues.')
return resp.text
| 5,343,656
|
def update_plot(p1, p2, arrow, txt, ax, fig, reset_points, line):
"""
Given a line with an agent's move and the current plot, update
the plot based on the agent's move.
"""
l = line.strip()
if 'Agent score' in l:
txt.remove()
txt = plt.text(2, 33, 'Agent Score: {0:.2f}'.format(float(l.split()[2])),
fontsize=8)
reset_points = True
else:
p = ast.literal_eval(l[l.find('('):])
if 'actually at point' in l:
p1 = Circle(p, radius=0.2, facecolor='yellow')
ax.add_patch(p1)
elif 'actually attempting to reach point' in l:
p2 = Circle(p, radius=0.2, facecolor='green')
ax.add_patch(p2)
elif 'now at point' in l:
arrow = YAArrow(fig, p2.center, p1.center, width=0.1,
headwidth=0.5, facecolor='red')
ax.add_patch(arrow)
elif 'Resetting agent to point' in l:
p2 = Circle(p, radius=1, facecolor='green')
ax.add_patch(p2)
arrow = YAArrow(fig, p2.center, p1.center, width=0.25,
headwidth=1, facecolor='red')
ax.add_patch(arrow)
return p1, p2, arrow, txt, ax, fig, reset_points
| 5,343,657
|
def upload_file_view(request):
"""Upload file page and retrieve headers"""
data = {}
global ROW_COUNT
if request.method == "GET":
return render(request, "pages/upload-file.html", data)
try:
if request.FILES:
csv_file = request.FILES['csv_file']
request.session['csv'] = str(csv_file)
if not csv_file.name.endswith('.csv'):
messages.error(request, 'File is not CSV type')
return redirect('upload-file')
decoded_file = csv_file.read().decode('utf-8').splitlines()
reader = csv.DictReader(decoded_file)
data['fieldnames'] = reader.fieldnames
data['filename'] = csv_file.name
fs = FileSystemStorage()
fs.save(csv_file.name, csv_file)
file = FilesStatus.objects.create(
user=request.user,
file_name=csv_file.name,
)
ROW_COUNT = sum(1 for row in reader)
request.session['file_status'] = file.id
else:
messages.error(request, 'No file was selected.')
return redirect('upload-file')
except IOError:
return messages.error(request, 'Could not read file')
return render(request, 'pages/upload-file.html', data)
| 5,343,658
|
def get_user_by_login_identifier(user_login_identifier) -> Optional[UserSchema]:
"""Get a user by their login identifier.
:param str user_login_identifier: The user's login identifier, either their \
``email`` or ``display_name`` are valid inputs
:return: The discovered user if they exist
:rtype: Optional[UserSchema]
"""
user = get_user_by_email(user_email=user_login_identifier)
if not user:
return get_user_by_display_name(user_display_name=user_login_identifier)
return user
| 5,343,659
|
def main():
""" This program has RVR drive around in different directions.
"""
try:
rvr.wake()
# Give RVR time to wake up
time.sleep(2)
rvr.reset_yaw()
rvr.raw_motors(
left_mode=RawMotorModesEnum.forward.value,
left_duty_cycle=128, # Valid duty cycle range is 0-255
right_mode=RawMotorModesEnum.forward.value,
right_duty_cycle=128 # Valid duty cycle range is 0-255
)
# Delay to allow RVR to drive
time.sleep(1)
rvr.raw_motors(
left_mode=RawMotorModesEnum.reverse.value,
left_duty_cycle=64, # Valid duty cycle range is 0-255
right_mode=RawMotorModesEnum.reverse.value,
right_duty_cycle=64 # Valid duty cycle range is 0-255
)
# Delay to allow RVR to drive
time.sleep(1)
rvr.raw_motors(
left_mode=RawMotorModesEnum.reverse.value,
left_duty_cycle=128, # Valid duty cycle range is 0-255
right_mode=RawMotorModesEnum.forward.value,
right_duty_cycle=128 # Valid duty cycle range is 0-255
)
# Delay to allow RVR to drive
time.sleep(1)
rvr.raw_motors(
left_mode=RawMotorModesEnum.forward.value,
left_duty_cycle=128, # Valid duty cycle range is 0-255
right_mode=RawMotorModesEnum.forward.value,
right_duty_cycle=128 # Valid duty cycle range is 0-255
)
# Delay to allow RVR to drive
time.sleep(1)
rvr.raw_motors(
left_mode=RawMotorModesEnum.off.value,
left_duty_cycle=0, # Valid duty cycle range is 0-255
right_mode=RawMotorModesEnum.off.value,
right_duty_cycle=0 # Valid duty cycle range is 0-255
)
# Delay to allow RVR to drive
time.sleep(1)
except KeyboardInterrupt:
print('\nProgram terminated with keyboard interrupt.')
finally:
rvr.close()
| 5,343,660
|
def org_profile_post_delete_callback(sender, instance, **kwargs):
"""
Signal handler to delete the organization user object.
"""
# delete the org_user too
instance.user.delete()
safe_delete('{}{}'.format(IS_ORG, instance.pk))
| 5,343,661
|
def decode_fixed64(buf, pos):
"""Decode a single 64 bit fixed-size value"""
return decode_struct(_fixed64_fmt, buf, pos)
| 5,343,662
|
def test_nameconflict(testdir):
"""tests that a name conflict raises an exception"""
testdir.makeconftest(dedent("""
from pytest_pilot import EasyMarker
colormarker = EasyMarker('color', mode="extender", allowed_values=('red', 'yellow'))
"""))
result = testdir.runpytest(testdir.tmpdir)
expected_lines = "ValueError: Error registering <Pytest marker 'color' with CLI option '--color' " \
"and decorator '@pytest.mark.color(<color>)'>: a command with this long or short name already " \
"exists. Conflicting name(s): ['--color']"
result.stderr.fnmatch_lines(expected_lines)
| 5,343,663
|
def from_xfr(xfr, zone_factory=Zone, relativize=True):
"""Convert the output of a zone transfer generator into a zone object.
@param xfr: The xfr generator
@type xfr: generator of dns.message.Message objects
@param relativize: should names be relativized? The default is True.
It is essential that the relativize setting matches the one specified
to dns.query.xfr().
@type relativize: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
z = None
for r in xfr:
if z is None:
if relativize:
origin = r.origin
else:
origin = r.answer[0].name
rdclass = r.answer[0].rdclass
z = zone_factory(origin, rdclass, relativize=relativize)
for rrset in r.answer:
znode = z.nodes.get(rrset.name)
if not znode:
znode = z.node_factory()
z.nodes[rrset.name] = znode
zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype,
rrset.covers, True)
zrds.update_ttl(rrset.ttl)
for rd in rrset:
rd.choose_relativity(z.origin, relativize)
zrds.add(rd)
z.check_origin()
return z
| 5,343,664
|
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Smile switches from a config entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
entities: list[PlugwiseSwitchEntity] = []
for device_id, device in coordinator.data.devices.items():
for description in SWITCHES:
if "switches" not in device or description.key not in device["switches"]:
continue
entities.append(PlugwiseSwitchEntity(coordinator, device_id, description))
async_add_entities(entities)
| 5,343,665
|
def test_read_csv():
""" blah """
text = io.StringIO('exercise,reps\nBurpees,20-50\nSitups,40-100')
assert list(read_csv(text)) == [('Burpees', 20, 50), ('Situps', 40, 100)]
| 5,343,666
|
def compare_sql_datetime_with_string(filter_on, date_string):
"""Filter an SQL query by a date or range of dates
Returns an SQLAlchemy `BinaryExpression` that can be used in a call to
`filter`.
`filter_on` should be an SQLAlchemy column expression that has a date or
datetime value.
`date_string` is a string that includes date(s) in format
`YYYY-MM-DD` and a range operator such as `>` or `<=`.
In full:
====================== ========================================
Query Description
====================== ========================================
YYYY-MM-DD Matches dates on day
>YYYY-MM-DD Matches dates after day
>=YYYY-MM-DD Matches dates on or after day
<YYYY-MM-DD Matches dates before day
<=YYYY-MM-DD Matches dates on or before day
YYYY-MM-DD..YYYY-MM-DD Matches dates between days (inclusively)
====================== ========================================
Examples:
>>> from app.models.main import AuditEvent
>>> # Equivalent to AuditEvent.created_at >= datetime.date(2012, 1, 1)
>>> compare_sql_datetime_with_string(AuditEvent.created_at, ">=2012-01-01")
<sqlalchemy.sql.elements.BinaryExpression object ...>
>>> # Equivalent to AuditEvent.created_at.between(datetime.date(2010, 1, 1), datetime.date(2019-01-31))
>>> AuditEvent.query.filter(
compare_sql_datetime_with_string(AuditEvent.created_at, "2010-01-01..2019-01-31"))
<app.models.main.AuditEvent.query_class object ...>
"""
filter_test = None
def parse_date(s):
return datetime.datetime.strptime(s, DATE_FORMAT)
if date_string.startswith(">="):
date = parse_date(date_string[2:])
filter_test = (filter_on >= date)
elif date_string.startswith(">"):
date = parse_date(date_string[1:])
filter_test = (filter_on > date)
elif date_string.startswith("<="):
date = parse_date(date_string[2:])
filter_test = (filter_on <= date)
elif date_string.startswith("<"):
date = parse_date(date_string[1:])
filter_test = (filter_on < date)
elif ".." in date_string:
args = date_string.partition("..")
from_ = parse_date(args[0])
to_ = parse_date(args[2])
filter_test = filter_on.between(from_, to_)
else:
date = parse_date(date_string)
filter_test = filter_on.between(date, date + datetime.timedelta(days=1))
return filter_test
| 5,343,667
|
def get_users():
"""get_users() -> Fetch all users in the database"""
connect() # Connect
cursor.execute("SELECT * FROM users") # Select all users
item = cursor.fetchall()
users = []
for user in item:
users.append(format_user(user)) # Format the users
disconnect()
return users
| 5,343,668
|
def get_decoder_self_attention_bias(length):
"""Calculate bias for decoder that maintains model's autoregressive property.
Creates a tensor that masks out locations that correspond to illegal
connections, so prediction at position i cannot draw information from future
positions.
Args:
length: int length of sequences in batch.
Returns:
float tensor of shape [1, 1, length, length]
"""
with tf.name_scope("decoder_self_attention_bias"):
valid_locs = tf.linalg.band_part(tf.ones([length, length]), -1, 0)
valid_locs = tf.reshape(valid_locs, [1, 1, length, length])
decoder_bias = _NEG_INF * (1.0 - valid_locs)
return decoder_bias
| 5,343,669
|
def copy_run_set_files(run_desc, desc_file, run_set_dir, run_dir, agrif_n=None):
"""Copy the run-set files given into run_dir.
The YAML run description file (from the command-line) is copied.
The IO defs file is also copied.
The file path/name of the IO defs file is taken from the :kbd:`output`
stanza of the YAML run description file.
The IO defs file is copied as :file:`iodef.xml` because that is the
name that XIOS expects.
The domain defs and field defs files used by XIOS are also copied.
Those file paths/names of those file are taken from the :kbd:`output`
stanza of the YAML run description file.
They are copied to :file:`domain_def.xml` and :file:`field_def.xml`,
respectively, because those are the file names that XIOS expects.
Optionally, the file defs file used by XIOS-2 is also copied.
Its file path/name is also taken from the :kbd:`output` stanza.
It is copied to :file:`file_def.xml` because that is the file name that
XIOS-2 expects.
:param dict run_desc: Run description dictionary.
:param desc_file: File path/name of the YAML run description file.
:type desc_file: :py:class:`pathlib.Path`
:param run_set_dir: Directory containing the run description file,
from which relative paths for the namelist section
files start.
:type run_set_dir: :py:class:`pathlib.Path`
:param run_dir: Path of the temporary run directory.
:type run_dir: :py:class:`pathlib.Path`
:param int agrif_n: AGRIF sub-grid number.
"""
try:
iodefs = get_run_desc_value(
run_desc,
("output", "iodefs"),
resolve_path=True,
run_dir=run_dir,
fatal=False,
)
except KeyError:
# Alternate key spelling for backward compatibility
iodefs = get_run_desc_value(
run_desc, ("output", "files"), resolve_path=True, run_dir=run_dir
)
run_set_files = [
(iodefs, "iodef.xml"),
(run_set_dir / desc_file.name, desc_file.name),
]
try:
keys = ("output", "domaindefs")
domain_def_filename = "domain_def.xml"
if agrif_n is not None:
keys = ("output", "AGRIF_{agrif_n}".format(agrif_n=agrif_n), "domaindefs")
domain_def_filename = "{agrif_n}_domain_def.xml".format(agrif_n=agrif_n)
domains_def = get_run_desc_value(
run_desc, keys, resolve_path=True, run_dir=run_dir, fatal=False
)
except KeyError:
# Alternate key spelling for backward compatibility
keys = ("output", "domain")
if agrif_n is not None:
keys = ("output", "AGRIF_{agrif_n}".format(agrif_n=agrif_n), "domain")
domains_def = get_run_desc_value(
run_desc, keys, resolve_path=True, run_dir=run_dir
)
try:
fields_def = get_run_desc_value(
run_desc,
("output", "fielddefs"),
resolve_path=True,
run_dir=run_dir,
fatal=False,
)
except KeyError:
# Alternate key spelling for backward compatibility
fields_def = get_run_desc_value(
run_desc, ("output", "fields"), resolve_path=True, run_dir=run_dir
)
run_set_files.extend(
[(domains_def, domain_def_filename), (fields_def, "field_def.xml")]
)
try:
keys = ("output", "filedefs")
file_def_filename = "file_def.xml"
if agrif_n is not None:
keys = ("output", "AGRIF_{agrif_n}".format(agrif_n=agrif_n), "filedefs")
file_def_filename = "{agrif_n}_file_def.xml".format(agrif_n=agrif_n)
files_def = get_run_desc_value(
run_desc, keys, resolve_path=True, run_dir=run_dir, fatal=False
)
run_set_files.append((files_def, file_def_filename))
except KeyError:
# `files` key is optional and only used with XIOS-2
pass
for source, dest_name in run_set_files:
shutil.copy2(fspath(source), fspath(run_dir / dest_name))
_set_xios_server_mode(run_desc, run_dir)
| 5,343,670
|
def rollout_discrete(
x_grid: Tensor,
idx: Union[int, Tensor],
model: Model,
best_f: Union[float, Tensor],
bounds: Tensor,
quadrature: Union[str, Tuple] = "qmc",
horizon: int = 4,
num_y_samples: int = 10,
):
"""
continuous domain rollout, expectation estimated using (quasi) Monte Carlo or Gaussian-Hermite quadrature
EI_rollout(x) = E_y[ max(y-y0,0) + EI_rollout(x'| x, y) ], where x'=argmax EI(x' | x, y)
define f(y) = max(y-y0,0) + EI_rollout(x'|x,y)
then
EI_rollout(x) = \int w(y) f(y) dy
where the weight function w(y) is a Gaussian density function N(mu, sigma^2)
We can estimate this integral using quasi Monte Carlo samples from w(y)
or use Gauss-Hermite quadrature, as in Lam et al. (2016):
such a integration can be transformed into the standard Gaussian-Hermite quadrature formulation
EI_rollout(x) = 1/sqrt(pi) \int exp(-t^2) f(sqrt(2)*sigma*t+mu) dt, where t = (y-mu)/sqrt(2)/sigma
We first generate Gauss-Hermite quadrature sample locations t_i and weights w_i using numpy.polynomial.hermite.hermgauss
then estimate the expectation by
EI_rollout(x) \approx 1/sqrt(pi) \sum_i w_i f(sqrt(2)*sigma*t_i +mu)
:param x: a single point
:param model: the GP model
:param best_f: current best observed value
:param bounds: bounds of the domain, shape (2, d)
:param base_acquisition:
:param quadrature: Monte Carlo or Quasi Monte Carlo
:param horizon: rollout horizon
:param num_y_samples: number of (quasi) Monte Carlo samples for estimating the integral
:return:
"""
if horizon == 1:
acq_func = ExpectedImprovement(model=model, best_f=best_f)
return acq_func(x).item()
x = x_grid[idx]
# compute posterior
posterior = model.posterior(x)
if isinstance(quadrature, str) and quadrature == "qmc": # quasi Monte Carlo
with torch.no_grad():
sampler = SobolQMCNormalSampler(num_samples=num_y_samples)
samples = sampler(posterior).squeeze().numpy()
weights = torch.ones(num_y_samples) / num_y_samples
elif isinstance(quadrature, Tuple):
mu = posterior.mean.item()
sigma = torch.sqrt(posterior.variance).item()
samples, weights = np.polynomial.hermite.hermgauss(num_y_samples)
samples = np.sqrt(2.0) * sigma * samples + mu
weights /= np.sqrt(math.pi)
improvement_of_samples = np.zeros(num_y_samples)
for i in range(num_y_samples):
y_sample = samples[i]
one_step_improvement = max(y_sample - best_f, 0)
fake_model: Model = deepcopy(model)
x0 = model.train_inputs[0]
y0 = model.train_targets
train_x = torch.cat([x0, x.unsqueeze(0)], -2)
train_y = torch.cat([y0, Tensor([y_sample])])
fake_model.reinitialize(train_X=train_x, train_Y=train_y)
best_f_new = max(best_f, y_sample) # maximization problem
acq_func = ExpectedImprovement(model=fake_model, best_f=best_f_new)
ei_values = acq_func(X)
idx = torch.argmax(ei_values)
future_reward = rollout(
x_grid,
idx,
model=fake_model,
best_f=best_f_new,
bounds=bounds,
quadrature=quadrature,
horizon=horizon - 1,
num_y_samples=num_y_samples,
)
improvement_of_samples[i] = one_step_improvement + future_reward
return improvement_of_samples.dot(weights)
| 5,343,671
|
def get_accumulative_accuracies(test_loaders, taskcla, result_file, network_cls='resnet32'):
""" Confusion matrix with progressively more classes considered """
iter_model = iter_task_models(network_cls, taskcla, result_file)
accuracies = np.zeros((len(taskcla), len(taskcla)))
classes_so_far = 0.
for task_model, model in enumerate(iter_model):
for task_eval in range(0, task_model+1):
full_test_loader = itertools.chain.from_iterable(test_loaders[:task_eval+1])
with torch.no_grad():
totals = 0.
correct = 0.
logits_mask = np.arange(sum([taskcla[i][1] for i in range(0, task_eval+1)]))
for inputs, targets in full_test_loader:
inputs = inputs.to(device)
targets = targets.to(device)
outputs = torch.cat(model(inputs), dim=1)
outputs = outputs[:, logits_mask]
preds = outputs.argmax(dim=1)
correct += (preds == targets).double().sum()
totals += len(targets)
accuracies[task_model, task_eval] = correct/totals
return accuracies
| 5,343,672
|
def readout_oper(config):
"""get the layer to process the feature asnd the cls token
"""
class Drop(object):
"""drop class
just drop the cls token
"""
def __init__(self, config):
if 'ViT' in config.MODEL.ENCODER.TYPE:
self.token_num = 1
elif 'DeiT' in config.MODEL.ENCODER.TYPE:
self.token_num = 2
self.feature_size = (config.DATA.CROP_SIZE[0] // config.MODEL.TRANS.PATCH_SIZE,
config.DATA.CROP_SIZE[1] // config.MODEL.TRANS.PATCH_SIZE)
def __call__(self, x):
x = x[:, self.token_num:]
x = x.transpose((0, 2, 1))
x = x.reshape((x.shape[0], x.shape[1], self.feature_size[0], self.feature_size[1]))
return x
class Add(object):
"""add class
add the cls token
"""
def __init__(self, config):
if 'ViT' in config.MODEL.ENCODER.TYPE:
self.token_num = 1
elif 'DeiT' in config.MODEL.ENCODER.TYPE:
self.token_num = 2
self.feature_size = (config.DATA.CROP_SIZE[0] // config.MODEL.TRANS.PATCH_SIZE,
config.DATA.CROP_SIZE[1] // config.MODEL.TRANS.PATCH_SIZE)
def __call__(self, x):
token = x[:, :self.token_num]
token = paddle.sum(token, axis=1).unsqueeze(1)
x = x[:, self.token_num:]
x = x + token
x = x.transpose((0, 2, 1))
x = x.reshape((x.shape[0], x.shape[1], self.feature_size[0], self.feature_size[1]))
return x
class Proj(nn.Layer):
"""porject class
use a linear layer to confuse the feature and the cls token
"""
def __init__(self, config):
super(Proj, self).__init__()
if 'ViT' in config.MODEL.ENCODER.TYPE:
self.token_num = 1
elif 'DeiT' in config.MODEL.ENCODER.TYPE:
self.token_num = 2
self.feature_size = (config.DATA.CROP_SIZE[0] // config.MODEL.TRANS.PATCH_SIZE,
config.DATA.CROP_SIZE[1] // config.MODEL.TRANS.PATCH_SIZE)
self.proj = nn.Sequential(
nn.Linear(2 * config.MODEL.TRANS.HIDDEN_SIZE, config.MODEL.TRANS.HIDDEN_SIZE),
nn.GELU()
)
def forward(self, x):
token = x[:, :self.token_num]
token = paddle.sum(token, axis=1).unsqueeze(1)
x = x[:, self.token_num:]
token = token.expand_as(x)
x = paddle.concat([x, token], axis=-1)
x = self.proj(x)
x = x.transpose((0, 2, 1))
x = x.reshape((x.shape[0], x.shape[1], self.feature_size[0], self.feature_size[1]))
return x
if config.MODEL.DPT.READOUT_PROCESS == 'drop':
return [copy.deepcopy(Drop(config)) for _ in range(4)]
if config.MODEL.DPT.READOUT_PROCESS == 'add':
return [copy.deepcopy(Add(config)) for _ in range(4)]
if config.MODEL.DPT.READOUT_PROCESS =='project':
return nn.LayerList([copy.deepcopy(Proj(config)) for _ in range(4)])
return None
| 5,343,673
|
def report_asl(wsp):
"""
Generate a report page about the input ASL data
"""
page = wsp.report.page("asl")
page.heading("ASL input data")
md_table = [(key, value) for key, value in wsp.asldata.metadata_summary().items()]
page.table(md_table)
try:
# Not all data can generate a PWI
img = wsp.asldata.perf_weighted()
img_type = "Perfusion-weighted image"
except ValueError:
img = wsp.asldata.mean()
img_type = "Mean ASL data"
page.heading(img_type, level=1)
page.image("asldata", LightboxImage(img))
| 5,343,674
|
def stream_n_messages(request, n):
"""Stream n JSON messages"""
n = int(n)
response = get_dict(request, 'url', 'args', 'headers', 'origin')
n = min(n, 100)
def generate_stream():
for i in range(n):
response['id'] = i
yield json.dumps(response, default=json_dumps_default) + '\n'
return Response(generate_stream(), headers={
"Content-Type": "application/json",
})
| 5,343,675
|
def read_covid():
"""Read parsed covid table"""
return pd.read_csv(_COVID_FILE, parse_dates=["date"])
| 5,343,676
|
def install_pip(python=sys.executable, *,
info=None,
downloaddir=None,
env=None,
upgrade=True,
**kwargs
):
"""Install pip on the given Python executable."""
if not python:
python = getattr(info, 'executable', None) or sys.executable
# python -m ensurepip
args = ['-m', 'ensurepip', '-v'] # --verbose
if upgrade:
args.append('-U') # --upgrade
res = _utils.run_python(*args, python=python, **kwargs)
ec, _, _ = res
if ec == 0 and is_pip_installed(python, env=env):
return res
##############################
# Fall back to get-pip.py.
if not downloaddir:
downloaddir = '.'
os.makedirs(downloaddir, exist_ok=True)
# download get-pip.py
filename = os.path.join(downloaddir, 'get-pip.py')
if not os.path.exists(filename):
print("Download %s into %s" % (GET_PIP_URL, filename))
_utils.download(GET_PIP_URL, filename)
# python get-pip.py
argv = [python, '-u', filename]
version = get_best_pip_version(info or python)
if version:
argv.append(version)
res = _utils.run_cmd(argv, env=env)
ec, _, _ = res
if ec != 0:
# get-pip.py was maybe not properly downloaded: remove it to
# download it again next time
os.unlink(filename)
return res
| 5,343,677
|
def set_config(cfg):
"""Picks up the settings from the config files and automatically decides between using Local vs AWS DynamoDB"""
if cfg.db.service == 'docker':
print("Running with Local DynamoDB...")
db_path = cfg.get('db').get('path') or None
User.Meta.host = db_path
else:
print("Running with AWS DynamoDB using the credentials in the .env file...")
| 5,343,678
|
def treat_deprecations_as_exceptions():
"""
Turn all DeprecationWarnings (which indicate deprecated uses of
Python itself or Numpy, but not within Astropy, where we use our
own deprecation warning class) into exceptions so that we find
out about them early.
This completely resets the warning filters and any "already seen"
warning state.
"""
# First, totally reset the warning state. The modules may change during
# this iteration thus we copy the original state to a list to iterate
# on. See https://github.com/astropy/astropy/pull/5513.
for module in list(sys.modules.values()):
# We don't want to deal with six.MovedModules, only "real"
# modules.
if (isinstance(module, types.ModuleType) and
hasattr(module, '__warningregistry__')):
del module.__warningregistry__
if not _deprecations_as_exceptions:
return
warnings.resetwarnings()
# Hide the next couple of DeprecationWarnings
warnings.simplefilter('ignore', DeprecationWarning)
# Here's the wrinkle: a couple of our third-party dependencies
# (py.test and scipy) are still using deprecated features
# themselves, and we'd like to ignore those. Fortunately, those
# show up only at import time, so if we import those things *now*,
# before we turn the warnings into exceptions, we're golden.
for m in _modules_to_ignore_on_import:
try:
__import__(m)
except ImportError:
pass
# Now, start over again with the warning filters
warnings.resetwarnings()
# Now, turn DeprecationWarnings into exceptions
_all_warns = [DeprecationWarning]
# Only turn astropy deprecation warnings into exceptions if requested
if _include_astropy_deprecations:
_all_warns += [AstropyDeprecationWarning,
AstropyPendingDeprecationWarning]
for w in _all_warns:
warnings.filterwarnings("error", ".*", w)
# This ignores all deprecation warnings from given module(s),
# not just on import, for use of Astropy affiliated packages.
for m in _warnings_to_ignore_entire_module:
for w in _all_warns:
warnings.filterwarnings('ignore', category=w, module=m)
for v in _warnings_to_ignore_by_pyver:
if v is None or sys.version_info[:2] == v:
for s in _warnings_to_ignore_by_pyver[v]:
warnings.filterwarnings("ignore", s, DeprecationWarning)
| 5,343,679
|
def define_network(*addr):
"""gives all network related data or host addresses if requested
addr = tuple of arguments netaddr/mask[nb of requested hosts]
"""
if len(addr) == 2:
# provides list of host-addresses for this subnet
# we do this by calling the generator host_g
host_g = addr[0].hosts()
return [next(host_g).exploded for i in range(addr[1])]
else:
netdef = [(' Network Address:',
addr[0].network_address.exploded),
(' Broadcast Address:',
addr[0].broadcast_address.exploded),
(' Valid Hosts:', 2 ** (32 - addr[0].prefixlen)-2),
(' Wildcard Mask:', addr[0].hostmask.exploded),
(' Mask bits:', addr[0].prefixlen),
]
return [(' '+addr[0].network_address.exploded+'/32', '')] \
if addr[0].prefixlen == 32 else netdef
| 5,343,680
|
def init_anim() -> List:
"""Initialize the animation."""
return []
| 5,343,681
|
def keep_digits(txt: str) -> str:
"""Discard from ``txt`` all non-numeric characters."""
return "".join(filter(str.isdigit, txt))
| 5,343,682
|
def test_repr(pip_Package):
"""Verifies the result returned from Package.__repr__()"""
assert pip_Package.__repr__() == "<Package(name=pip)>"
| 5,343,683
|
def camel_case_split(identifier):
"""Split camelCase function names to tokens.
Args:
identifier (str): Identifier to split
Returns:
(list): lower case split tokens. ex: ['camel', 'case']
"""
matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)
return [m.group(0) for m in matches]
| 5,343,684
|
def _ensure_meadowrun_sqs_access_policy(iam_client: Any) -> str:
"""
Creates a policy that gives permission to read/write SQS queues for use with
grid_task_queue.py
"""
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.create_policy
ignore_boto3_error_code(
lambda: iam_client.create_policy(
PolicyName=_MEADOWRUN_SQS_ACCESS_POLICY_NAME,
PolicyDocument=_MEADOWRUN_SQS_ACCESS_POLICY_DOCUMENT,
),
"EntityAlreadyExists",
)
return (
f"arn:aws:iam::{_get_account_number()}:policy/"
f"{_MEADOWRUN_SQS_ACCESS_POLICY_NAME}"
)
| 5,343,685
|
def get_subset(dframe, strata, subsetno):
"""This function extracts a subset of the data"""
df_subset = pd.DataFrame(columns=list(dframe)) #initialize
df_real = dframe.dropna() #get rid of nans
edges = np.linspace(0, 1, strata+1) #edges of data strata
for i in range(0, strata):
df_temp = df_real[(df_real['gender diversity score'] > edges[i]) &
(df_real['gender diversity score'] < edges[i+1])]
temp_ind = np.round(np.linspace(0, len(df_temp)-1, subsetno/strata))
df_subset = pd.concat([df_subset,
df_temp.sort_values(by=['gender diversity score']).
iloc[temp_ind, :].reset_index(drop=True)], ignore_index=True)
return df_subset
| 5,343,686
|
def create_logger(name: str) -> logging.Logger:
"""Create logger, adding the common handler."""
if name is None:
raise TypeError("name is None")
logger = logging.getLogger(name)
# Should be unique
logger.addHandler(_LOGGING_HANDLER)
return logger
| 5,343,687
|
def main():
"""Entry point of Kalliope program."""
# create arguments
parser = argparse.ArgumentParser(description='Kalliope')
parser.add_argument("action", help="[start|gui|install|uninstall]")
parser.add_argument("--run-synapse",
help="Name of a synapse to load surrounded by quote")
parser.add_argument("--run-order", help="order surrounded by a quote")
parser.add_argument("--brain-file", help="Full path of a brain file")
parser.add_argument("--debug", action='store_true',
help="Show debug output")
parser.add_argument("--git-url", help="Git URL of the neuron to install")
parser.add_argument("--neuron-name", help="Neuron name to uninstall")
parser.add_argument("--stt-name", help="STT name to uninstall")
parser.add_argument("--tts-name", help="TTS name to uninstall")
parser.add_argument("--trigger-name", help="Trigger name to uninstall")
parser.add_argument('-v', '--version', action='version',
version='Kalliope ' + version_str)
# parse arguments from script parameters
args = parser.parse_args()
# require at least one parameter, the action
if len(sys.argv[1:]) == 0:
parser.print_usage()
sys.exit(1)
# check if we want debug
configure_logging(debug=args.debug)
logger.debug("kalliope args: %s" % args)
# by default, no brain file is set.
# Use the default one: brain.yml in the root path
brain_file = None
# check if user set a brain.yml file
if args.brain_file:
brain_file = args.brain_file
# check the user provide a valid action
if args.action not in ACTION_LIST:
Utils.print_warning("%s is not a recognised action\n" % args.action)
parser.print_help()
# install modules
if args.action == "install":
if not args.git_url:
Utils.print_danger("You must specify the git url")
else:
parameters = {
"git_url": args.git_url
}
res_manager = ResourcesManager(**parameters)
res_manager.install()
return
# uninstall modules
if args.action == "uninstall":
if not args.neuron_name and not args.stt_name and not args.tts_name and not args.trigger_name:
Utils.print_danger("You must specify a module name with --neuron-name or --stt-name or --tts-name "
"or --trigger-name")
else:
res_manager = ResourcesManager()
res_manager.uninstall(neuron_name=args.neuron_name, stt_name=args.stt_name,
tts_name=args.tts_name, trigger_name=args.trigger_name)
return
# load the brain once
brain_loader = BrainLoader(file_path=brain_file)
brain = brain_loader.brain
if args.action == "start":
# user set a synapse to start
if args.run_synapse is not None:
SynapseLauncher.start_synapse(args.run_synapse, brain=brain)
if args.run_order is not None:
order_analyser = OrderAnalyser(args.run_order, brain=brain)
order_analyser.start()
if (args.run_synapse is None) and (args.run_order is None):
# first, load events in event manager
EventManager(brain.synapses)
Utils.print_success("Events loaded")
# then start kalliope
Utils.print_success("Starting Kalliope")
Utils.print_info("Press Ctrl+C for stopping")
# catch signal for killing on Ctrl+C pressed
signal.signal(signal.SIGINT, signal_handler)
# start the state machine
MainController(brain=brain)
if args.action == "gui":
ShellGui(brain=brain)
| 5,343,688
|
def _create_tree(lib_path, tree_object):
"""
Iterate through the elements in the tree_object and create a node module for each one.
:param lib_path: full path to the library where all nodes will be created
:param tree_object: ElementTree instance for the xml_file
:return: None
"""
for element in tree_object.iter('node'):
create_node(lib_path, element)
| 5,343,689
|
def do_mount(devpath, mountpoint, fstype):
"""Execute device mount operation.
:param devpath: The path of mount device.
:param mountpoint: The path of mount point.
:param fstype: The file system type.
"""
try:
if check_already_mounted(devpath, mountpoint):
return
mounter = Mounter()
mounter.mount(devpath, mountpoint, fstype)
except exceptions.MountException:
try:
mounter.make_filesystem(devpath, fstype)
mounter.mount(devpath, mountpoint, fstype)
except exceptions.FuxiException as e:
with excutils.save_and_reraise_exception():
LOG.error(str(e))
| 5,343,690
|
def say_hello():
""" Say hello """
return utils.jsonify_success({
'message': 'Hello {}! You are logged in.'.format(current_user.email)
})
| 5,343,691
|
def relu(x):
"""The rectifier activation function. Only activates if argument x is
positive.
Args:
x (ndarray): weighted sum of inputs
"""
# np.clip(x, 0, np.finfo(x.dtype).max, out=x)
# return x
return np.where(x >= 0, x, 0)
| 5,343,692
|
def k_shortest_paths(G, source, target, k=1, weight='weight'):
"""Returns the k-shortest paths from source to target in a weighted graph flux_graph.
Parameters
----------
flux_graph : NetworkX graph
source : node
Starting node
target : node
Ending node
k : integer, optional (default=1)
The number of shortest paths to find
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
lengths, paths : lists
Returns a tuple with two lists.
The first list stores the length of each k-shortest path.
The second list stores each k-shortest path.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> flux_graph=nx.complete_graph(5)
>>> print(k_shortest_paths(flux_graph, 0, 4, 4))
([1, 2, 2, 2], [[0, 4], [0, 1, 4], [0, 2, 4], [0, 3, 4]])
Notes
------
Edge weight attributes must be numerical and non-negative.
Distances are calculated as sums of weighted edges traversed.
"""
if source == target:
return ([0], [[source]])
length, path = nx.single_source_dijkstra(G, source, target, weight=weight)
if target not in length:
raise nx.NetworkXNoPath("node %s not reachable from %s" % (source, target))
lengths = [length[target]]
paths = [path[target]]
c = count()
B = []
G_original = G.copy()
for i in range(1, k):
for j in range(len(paths[-1]) - 1):
spur_node = paths[-1][j]
root_path = paths[-1][:j + 1]
edges_removed = []
for c_path in paths:
if len(c_path) > j and root_path == c_path[:j + 1]:
u = c_path[j]
v = c_path[j + 1]
if G.has_edge(u, v):
edge_attr = G.edge[u][v]
G.remove_edge(u, v)
edges_removed.append((u, v, edge_attr))
for n in range(len(root_path) - 1):
node = root_path[n]
# out-edges
for u, v, edge_attr in G.edges_iter(node, data=True):
G.remove_edge(u, v)
edges_removed.append((u, v, edge_attr))
if G.is_directed():
# in-edges
for u, v, edge_attr in G.in_edges_iter(node, data=True):
G.remove_edge(u, v)
edges_removed.append((u, v, edge_attr))
spur_path_length, spur_path = nx.single_source_dijkstra(G, spur_node, target, weight=weight)
if target in spur_path and spur_path[target]:
total_path = root_path[:-1] + spur_path[target]
total_path_length = get_path_length(G_original, root_path, weight) + spur_path_length[target]
heappush(B, (total_path_length, next(c), total_path))
for e in edges_removed:
u, v, edge_attr = e
G.add_edge(u, v, edge_attr)
if B:
(l, _, p) = heappop(B)
lengths.append(l)
paths.append(p)
else:
break
return (lengths, paths)
| 5,343,693
|
def allowed_file(filename):
"""
Check the image extension
Currently, only support jpg, jpeg and png
"""
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
| 5,343,694
|
def __best_tour(previous: int, prices: np.ndarray, excess: float, best_solution: set, visited: np.ndarray) -> int:
""" Ищем в лучшем туре """
search = __search(best_solution, previous)
node, alpha = -1, maxsize
for edge in search:
temp = edge[0] if edge[0] != previous else edge[1]
if not visited[temp] and prices[temp] < alpha < excess:
node, alpha = temp, prices[temp]
return node if node != -1 else -1
| 5,343,695
|
def classical_gaussian_kernel(k, sigma):
"""
A function to generate a classical Gaussian kernel
:param k: The size of the kernel, an integer
:param sigma: variance of the gaussian distribution
:return: A Gaussian kernel, a numpy array of shape (k,k)
"""
w = np.linspace(-(k - 1) / 2, (k - 1) / 2, k)
x, y = np.meshgrid(w, w)
kernel = 0.5*np.exp(-0.5*(x**2 + y**2)/(sigma**2))/(np.pi*sigma**2)
return kernel
| 5,343,696
|
def collection_to_csv(collection):
"""
Upload collection value to CSV file
:param collection: Collection
:return: None
"""
print("collection_to_csv")
final_df = pd.DataFrame()
try:
dict4json = []
n_documents = 0
for document in collection.get():
result_dict = document.to_dict()
dict4json.append(result_dict)
n_documents += 1
for result in dict4json:
lst = result["result"]
df = pd.DataFrame(lst)
df = df.reindex(sorted(df.columns), axis=1)
final_df = pd.concat([final_df, df])
except Exception as e:
print(e)
return pd.DataFrame()
finally:
return final_df
| 5,343,697
|
def validate_hatch(s):
"""
Validate a hatch pattern.
A hatch pattern string can have any sequence of the following
characters: ``\\ / | - + * . x o O``.
"""
if not isinstance(s, six.text_type):
raise ValueError("Hatch pattern must be a string")
unique_chars = set(s)
unknown = (unique_chars -
set(['\\', '/', '|', '-', '+', '*', '.', 'x', 'o', 'O']))
if unknown:
raise ValueError("Unknown hatch symbol(s): %s" % list(unknown))
return s
| 5,343,698
|
def iot_hub_service_factory(cli_ctx, *_):
"""
Factory for importing deps and getting service client resources.
Args:
cli_ctx (knack.cli.CLI): CLI context.
*_ : all other args ignored.
Returns:
iot_hub_resource (IotHubClient.iot_hub_resource): operational resource for
working with IoT Hub.
"""
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import ResourceType
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_IOTHUB).iot_hub_resource
| 5,343,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.