content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def get_variable_field_type(variable_name, field_name, error_prefix=''):
"""
获取某个变量的某个字段的类型
"""
schema = get_variable_schema(variable_name)
result_type = schema.get(field_name)
if not result_type:
raise RuntimeError(utf8(error_prefix) + '变量(%s)不包含字段(%s)' % (utf8(variable_name), utf8(field_name)))
# 策略中的目前基本是基本类型
if result_type[1]:
raise RuntimeError(utf8(error_prefix) + '暂不支持%s(%s)这种复杂数据类型' % (utf8(field_name), utf8(result_type)))
return result_type[0]
| 14,400
|
def test(input_test_data):
"""
Run test batches on trained network
:return: Test accuracy [0-1]
"""
print('--- Execute testing ---')
one_hot_label = np.zeros(10, dtype=np.uint8)
correct_n = 0
total_n = 0
for batch_id, (mini_batch, label) in enumerate(input_test_data):
for sample_id, sample in enumerate(mini_batch):
# Flatten input, create 748, input vector
flat_sample = (np.array(sample)).reshape((network.input_dim, 1))
# Forward pass one sample to network
one_hot_label[label[sample_id]] = 1 # we require one-hot encoding for our input data
lossr, result = network.forward_pass(flat_sample, one_hot_label)
# check if sample was correctly classified
if (result == one_hot_label).all():
correct_n += 1
total_n += 1
one_hot_label[:] = 0
# print('batch_id at end: ', batch_id)
if total_n != 0:
return (correct_n / total_n) * 100
else:
print('Warning, total_n should not be 0')
return 0
| 14,401
|
def test(net, example):
"""
Args:
net (FlowNet): Instance of networks.flownet.FlowNet model, only to be used for pre-processing.
example (dict): Un-processed example.
Returns:
good (list, DMatch): List of good SIFT matches.
"""
net.eval()
example = net.preprocess(example)
cs_arr, tg_arr = np.array(example['resized_cs_im']), np.array(example['resized_tg_im'])
cs_mask, tg_mask = example['resized_cs_mask'], example['resized_tg_mask']
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# Find the keypoints and descriptors with SIFT
kp_cs, des_cs = sift.detectAndCompute(cs_arr, mask=cs_mask)
kp_tg, des_tg = sift.detectAndCompute(tg_arr, mask=tg_mask)
example['kp_cs'], example['kp_tg'] = kp_cs, kp_tg
# Return empty list no matches if no matches are found in either scene or target.
if des_cs is None or des_tg is None:
return []
# Make sure that there are at-least 2 features in both scene and target for knn with nn=2.
if len(des_cs) < 2 or len(des_tg) < 2:
return []
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des_tg, des_cs, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
return good
| 14,402
|
def init(skill):
"""
Initializes any data on the parent skill if necessary
"""
for component in skill.components:
if component.defines('init'):
component.init(skill)
| 14,403
|
def get_hashers():
"""
从settings.py中动态导入一连串hashers对象
Read list of hashers from app.settings.py
"""
hashers = []
# 导入报名
for hasher_path in current_app.config.get('PASSWORD_HASHERS'):
hasher_cls = import_string(hasher_path)
hasher = hasher_cls()
hashers.append(hashers)
if not getattr(hasher, 'algorithm'):
raise ImproperlyConfigured("hasher doesn't specify an "
"algorithm name: %s" % hasher_path)
hashers.append(hasher)
return hashers
| 14,404
|
def str2range(s):
"""parse a samtools/tabix type region specification 'chr:start-stop' or 'chr:start..stop'"""
chrom = None
start = 1
stop = None
tmp = s.split(':')
chrom = tmp[0]
if len(tmp)>1:
if '-' in tmp[1]:
tmp = tmp[1].split('-')
else:
tmp = tmp[1].split('..')
start = str2int(tmp[0])
if len(tmp)>1:
stop = str2int(tmp[1])
return (chrom, start, stop)
| 14,405
|
def check_missing_dep():
""" Check for missing dependencies """
global MISSING_PACKAGES, INSTALLED_PACKAGES, ENABLE_CUDA
if ENABLE_CUDA and IS_MACOS:
REQUIRED_PACKAGES.extend(MACOS_REQUIRED_PACKAGES)
MISSING_PACKAGES = []
for pkg in REQUIRED_PACKAGES:
key = pkg.split("==")[0]
if key not in INSTALLED_PACKAGES:
MISSING_PACKAGES.append(pkg)
continue
else:
if len(pkg.split("==")) > 1:
if pkg.split("==")[1] != INSTALLED_PACKAGES.get(key):
MISSING_PACKAGES.append(pkg)
continue
| 14,406
|
def test_get_current_price(create_exchange):
""" Test that we're able to get the current price and that it's not 0"""
assert create_exchange.has_next_observation == True
assert len(create_exchange.data_frame) != 0
# This current_price should not be 0 and should not raise and exception.
assert create_exchange.current_price(symbol="ETH") != 0
# Check that there are enough price observations
| 14,407
|
def turn_read_content(path, labelIdx, dataIdx):
"""
sentences: (dialog_num, turn_num, nbest_num, sentence_len)
scores: (dialog_num, turn_num, nbest_num)
acts: (dialog_num, turn_num, machine_act_len)
labels: (dialog_num, turn_num, [label_dim])
"""
sentences, scores, acts, labels = [], [], [], []
with open(path) as json_file:
data = json.load(json_file)
#print data["data"][dataIdx]
for dialog in data[dataIdx]:
dialog_sentences, dialog_scores, machine_acts, dialog_labels = read_nbest_dialog_content(dialog, labelIdx)
sentences.append(dialog_sentences)
scores.append(dialog_scores)
acts.append(machine_acts)
labels.append(dialog_labels)
return sentences, scores, acts, labels
| 14,408
|
def index_page() -> dict:
"""Get data for Index page , interfaces, dp neighbors, arps, and hsrp"""
interfaces = GetThisDataFromDevice.get_interfaces(request.json.get('ip'), request.json.get('port'), request.json.get('username'), request.json.get('password'))
neighbors = GetThisDataFromDevice.get_dp_neighbors(request.json.get('ip'), request.json.get('port'), request.json.get('username'), request.json.get('password'))
arps = GetThisDataFromDevice.get_arps(request.json.get('ip'), request.json.get('port'), request.json.get('username'), request.json.get('password'))
hsrp = InCaseRestDoesntWork.get_hsrp_status(request.json.get('username'), request.json.get('password'), request.json.get('ip'))
cpu_status = GetThisDataFromDevice.get_cpu_usages(request.json.get('ip'), request.json.get('port'), request.json.get('username'), request.json.get('password'))
return {'interfaces': interfaces, 'arps': arps, 'dp': neighbors, 'hsrp': hsrp, 'cpu': cpu_status[0], 'mem': cpu_status[1]}
| 14,409
|
async def client_close(self: "dragonchain_client.Client") -> None:
"""
Close any aiohttp sessions associated with an instantiated async client
"""
await self.request.session.close()
| 14,410
|
def reverse_int_bits(n: int, n_bits: int = 10) -> int:
"""Reverses the bits of *n*, considering it is padded by *n_bits* first"""
return int(format(n, '0' + str(n_bits) + 'b')[::-1], 2)
| 14,411
|
def generate_following_list(api):
""" Generate Complete following list of Authenticated User """
print '------- Following ---------'
for friend in tweepy.Cursor(api.followers).items():
print friend.screen_name
| 14,412
|
def get_conn():
"""
获取
:return:
"""
for name in GENERATOR_MAP:
print(name)
if not hasattr(g, name):
setattr(g, name + '_cookies', eval('CookiesRedisClient' + '(name="' + name + '")'))
setattr(g, name + '_account', eval('AccountRedisClient' + '(name="' + name + '")'))
return g
| 14,413
|
def get_rule_satisfaction_matrix(x, y, rules):
""" Returns a matrix that shows which instances satisfy which rules
Each column of the returned matrix corresponds to a rules and each row to an instance.
If an instance satisfies a rule, the corresponding value will be 1, else 0.
:param x: np.ndarray
:param y: np.array
:param rules: list
:param opts: AadOpts
:return: np.ndarray
matrix with x.shape[0] rows and len(rules) rows
"""
satisfaction_matrix = np.zeros((x.shape[0], len(rules)), dtype=np.int32)
for i, rule in enumerate(rules):
idxs = rule.where_satisfied(x, y)
satisfaction_matrix[idxs, i] = 1
return satisfaction_matrix
| 14,414
|
def refactor(df, frequency = '1W'):
"""Refactor/rebin the data to a lower cadence
The data is regrouped using pd.Grouper
"""
low = df.low.groupby(pd.Grouper(freq=frequency)).min()
high = df.high.groupby(pd.Grouper(freq=frequency)).max()
close = df.close.groupby(pd.Grouper(freq=frequency)).last()
open_ = df.open.groupby(pd.Grouper(freq=frequency)).first()
volume = df.volume.groupby(pd.Grouper(freq=frequency)).sum()
return pd.DataFrame(dict(low=low, high=high, open=open_, close=close, volume=volume))
| 14,415
|
def pendulum_derivatives(theta, omega, g=9.8, l=1):
"""
\dot{\theta} = \omega
\dot{\omega} = -\frac{g \sin\theta}{l}
:param theta: angel of the pendulum
:param omega: angular velocity of the pendulum
:param g: gravitational acceleration
:param l: length of the pendulum
:return: derivative of angel, derivative of angular velocity
"""
d_theta = omega
d_omega = - np.sin(theta) * g / l
return d_theta, d_omega
| 14,416
|
def test_nested():
"""Validate result of the nested class resolving."""
# pylint: disable=no-member
assert issubclass(NestedSchema, graphene.ObjectType)
assert isinstance(NestedSchema.name, graphene.String)
assert isinstance(NestedSchema.leaf, graphene.Field)
assert str(NestedSchema.leaf.type) == "Leaf"
assert isinstance(NestedSchema.leaf.type.value, graphene.String)
assert isinstance(NestedSchema.leaf.type.leaflets, graphene.List)
| 14,417
|
def get_sql_filtered( source_query, python_types, db_conf_file_name, filters=[]):
"""
Return list of DBAPI tuples (& prefixed header row) filtered by value
Keyword Parameters:
source_query -- String, representing SQL definition of requested datasource
python_types -- JSON encoded string representing a Dict that maps
field names to Python type constructors
db_conf_file_name -- String representing the server/ module .ini settings
file, defining how to connect to the Source
Exceptions:
FilterVariableError -- filters variable not found in header
>>> args1 = { 'source_query': "schema.InformativeView"
... , 'python_types': '{"a":"str","b":"int","z":"float"}'
... , 'db_conf_file_name': 'db_config.ini'
... }
>>> get_sql_filtered( filters=['z=7'], **args1)
('SELECT "raw".* FROM (schema.InformativeView) "raw" WHERE z = %s', [7.0])
>>> get_sql_filtered( filters=['a=77'], **args1)
('SELECT "raw".* FROM (schema.InformativeView) "raw" WHERE a = %s', ['77'])
>>> get_sql_filtered( filters=['a=77','z>=77'], **args1)
('SELECT "raw".* FROM (schema.InformativeView) "raw" WHERE a = %s AND z >= %s', ['77', 77.0])
>>> args1['db_conf_file_name'] = 'db_trawl.ini'
>>> get_sql_filtered( filters=['z=7'], **args1)
('SELECT "raw".* FROM (schema.InformativeView) "raw" WHERE z = :0', [7.0])
>>> get_sql_filtered( filters=['a=77','z>=77'], **args1)
('SELECT "raw".* FROM (schema.InformativeView) "raw" WHERE a = :0 AND z >= :1', ['77', 77.0])
>>> get_sql_filtered( filters=['z~=7'], **args1)
('SELECT "raw".* FROM (schema.InformativeView) "raw" WHERE REGEXP_LIKE(z, :0)', ['7'])
>>> get_sql_filtered( filters=['a|=["5", "77"]'], **args1)
('SELECT "raw".* FROM (schema.InformativeView) "raw" WHERE ( (a = :0) OR (a = :1) )', ['5', '77'])
>>> get_sql_filtered( filters=['z|=["5", "77"]'], **args1)
('SELECT "raw".* FROM (schema.InformativeView) "raw" WHERE ( (z = :0) OR (z = :1) )', [5.0, 77.0])
"""
# wrap source's table name, or inline view definition, with an outer select
manditory_pgsql_alias = '"raw"'#PostgreSQL requires any name for inline view
sql_outer_template = "SELECT {alias}.* FROM ({query}) {alias}"
str_sql_outer = sql_outer_template.format( query=source_query
,alias=manditory_pgsql_alias)
str_pgsql_conn = ['db_config.ini','db_dwsupport.ini']#FIXME:improve detection of SQL dialect
# Append filter to end of the outer select, as conditional access clauses
binds = []
for str_filter_urlencoded in filters:
str_access_clause = "WHERE"
if len(binds) > 0:
str_access_clause = "AND"
bind_start = len(binds)
if db_conf_file_name not in str_pgsql_conn:
#use the Oracle regexp syntax
access_condition, filter_binds =get_filter_condition_oracle_string(
python_types
,str_filter_urlencoded
,bind_start)
else:
#TODO: why not use get_filter_condition_sqlalchemy_pgsql_string ?
access_condition, filter_binds = get_filter_condition_pgsql_string(
python_types
,str_filter_urlencoded
,bind_start)
str_sql_outer += ' ' + str_access_clause + access_condition
binds.extend(filter_binds)
return str_sql_outer, binds
| 14,418
|
def task_edit(request, pk=None):
"""
"""
return edit(request, form_model=TaskForm, model=Task, pk=pk)
| 14,419
|
def get_intersect(pre_df, post_df, args, aoi=None):
"""
Computes intersection of two dataframes and reduces extent by an optional defined AOI.
:param pre_df: dataframe of raster footprints
:param post_df: dataframe of raster footprints
:param args: arguments object
:param aoi: AOI dataframe
:return: tuple of calculated intersection
"""
pre_env = pre_df.to_crs(args.destination_crs).unary_union
post_env = post_df.to_crs(args.destination_crs).unary_union
intersect = pre_env.intersection(post_env)
logger.debug(f'Pre bounds: {pre_env.bounds}')
logger.debug(f'Post bounds: {post_env.bounds}')
assert intersect.area > 0, logger.critical('Pre and post imagery do not intersect')
if aoi is not None:
aoi = aoi.to_crs(args.destination_crs).unary_union
intersect = aoi.intersection(intersect)
assert intersect.area > 0, logger.critical('AOI does not intersect imagery')
logger.info('Intersection calculated with AOI')
# Todo: Return tuple of ((bounds), area) to estimate inference time
return intersect.bounds
| 14,420
|
def _gradient(P, T, N, A):
"""
Creates the gradient operator, starting from the point set P, the topology tensor T, the normal tensor N and the
triangle area tensor A
Parameters
----------
P : Tensor
the (N,3,) point set tensor
T : LongTensor
the (3,M,) topology tensor
N : Tensor
the (M,3,) triangle normal tensor
A : Tensor
the (M,) triangle area tensor
Returns
-------
list
the gradient operator data
"""
device = P.device
def V(i):
return P[T[i], :]
n = row(P)
m = col(T)
i = LongTensor([], device=device)
j = LongTensor([], device=device)
w = FloatTensor([], device=device)
f = indices(0, m - 1, device=device).squeeze()
for k in range(row(T)):
# opposite edge e_i indexes
s = (k+1) % 3
t = (k+2) % 3
# vector N_f^e_i
wk = cross(V(t) - V(s), N, 1)
# update the index listing
i = torch.cat((i, f), dim=0)
j = torch.cat((j, T[k]), dim=0)
w = torch.cat((w, wk), dim=0)
a = diag(torch.reciprocal(A), rows=m)
e = torch.cat((i.unsqueeze(0), j.unsqueeze(0)), dim=0)
G = []
for k in range(col(P)):
G += [torch.matmul(a, adjacency(e, w[:, k], size=[m, n]))]
return G
| 14,421
|
def breast_tissue_diagnostic_black_pen() -> Tuple[
openslide.OpenSlide, str
]: # pragma: no cover
"""breast_tissue_diagnostic_black_pen() -> Tuple[openslide.OpenSlide, str]
Breast tissue, TCGA-BRCA dataset. Diagnostic slide with black pen marks.
This image is available here
https://portal.gdc.cancer.gov/files/e70c89a5-1c2f-43f8-b6be-589beea55338
or through the API
https://api.gdc.cancer.gov/data/e70c89a5-1c2f-43f8-b6be-589beea55338
It corresponds to TCGA file
`TCGA-BH-A201-01Z-00-DX1.6D6E3224-50A0-45A2-B231-EEF27CA7EFD2.svs`
Access: open
Returns
-------
breast_tissue : openslide.OpenSlide
H&E-stained Whole-Slide-Image of breast tissue with green black marks.
path : str
Path where the slide is saved
"""
return _load_svs(
"tcga/breast/TCGA-BH-A201-01Z-00-DX1.6D6E3224-50A0-45A2-B231-EEF27CA7EFD2.svs"
)
| 14,422
|
def test_polyfit():
"""Unit test for the polyfit function"""
stations = build_station_list()
# Update the water levels for all of them
update_water_levels(stations)
# Find which are the 5 stations with highest relative level
high_risk_stations = flood.stations_highest_rel_level(stations,1)
days = 2
dates, levels = [], []
# Add dates and levels to the lists
for st in high_risk_stations:
dates, levels = fetch_measure_levels(st[0].measure_id, dt=datetime.timedelta(days=days))
test_array = analysis.polyfit(dates, levels, 3)
# test that polyfit returns a polynomial of the correct order
assert len(test_array[0])== 3
| 14,423
|
def generic_plot(plot_title, xlims, ylims, xlabel, ylabel, addons, plot_fontsize=18, plot_figsize=(8,6), plot_dpi=400):
""" Wrapper function for plot formatting. """
# Configurations.
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = ['Charter']
pp_fig = plt.figure(figsize=tuple(plot_figsize))
plt.gca().set_ylim(ylims[0], ylims[-1])
plt.gca().set_xlim(xlims[0], xlims[-1])
plt.xticks(fontsize=plot_fontsize)
plt.yticks(fontsize=plot_fontsize)
plt.title(plot_title, fontsize=plot_fontsize+4)
plt.xlabel(xlabel, fontsize=plot_fontsize)
plt.ylabel(ylabel, fontsize=plot_fontsize)
plt.tight_layout()
addons()
plot_savename = re.sub(r'[^\w\s]', '', plot_title).lower().replace(' ' , '_')
plt.savefig(f'history/plot_{plot_savename}.png', dpi=plot_dpi)
logging.info(f'Plot saved as `history/plot_{plot_savename}.png`.')
| 14,424
|
def test_print_flashy(mock_shutil, mock_print_length, scenario, capsys):
"""Test print_flashy."""
mock_shutil.return_value = (scenario["mock"], 1)
mock_print_length.return_value = scenario["message_length"]
expected = (
f"{'>'*scenario['left']} {'a' * scenario['message_length']} "
f"{'<'*scenario['right']}\n"
)
print_flashy("a" * scenario["message_length"])
output, error = capsys.readouterr()
assert not error
assert output == expected
| 14,425
|
def fitcand(t,fm,p,full=False):
"""
Perform a non-linear fit to a putative transit.
Parameters
----------
t : time
fm : flux
p : trial parameter (dictionary)
full : Retrun tdt and fdt
Returns
-------
res : result dictionary.
"""
dtL = LDTwrap(t,fm,p)
dt = np.hstack(dtL)
fdt = dt['fdt']
tdt = dt['tdt']
p0 = np.array([p['P'],p['epoch'],p['df'],p['tdur']])
p1 = optimize.fmin_powell(objMT,p0,args=(tdt,fdt),disp=False)
dp = (p0[:2]-p1[:2])
if (abs(dp) > np.array([dP,depoch])).any():
stbl = False
elif (p1[0] < 0) | (p1[3] < 0):
stbl = False
else:
stbl = True
tfold = getT(tdt,p['P'],p['epoch'],p['tdur'])
fdt = ma.masked_array(fdt,mask=tfold.mask)
tdt = ma.masked_array(tdt,mask=tfold.mask)
s2n = s2n_fit(fdt,tdt,p1)
res = dict(P=p1[0],epoch=p1[1],df=p1[2],tdur=p1[3],s2n=s2n,stbl=stbl)
if full:
res['fdt'] = fdt
res['tdt'] = tdt
return res
| 14,426
|
def parse_prompt(browser):
"""
User enters instructions at a prompt
"""
i = 0
while True:
line = input('webscrape> ')
if line in ['break', 'exit', 'quit']:
break
i += 1
line = sanitize_line_input(i, line)
if line is None:
continue
else:
r = parse_line(browser, i, line)
| 14,427
|
def cbar_for_line_plot(axis, num_steps, discrete_ticks=True, **kwargs):
"""
Adds a colorbar next to a line plot axis
Parameters
----------
axis : matplotlib.axes.Axes
Axis with multiple line objects
num_steps : uint
Number of steps in the colorbar
discrete_ticks : (optional) bool
Whether or not to have the ticks match the number of number of steps. Default = True
"""
if not isinstance(axis, mpl.axes.Axes):
raise TypeError('axis must be a matplotlib.axes.Axes object')
if not isinstance(num_steps, int) and num_steps > 0:
raise TypeError('num_steps must be a whole number')
assert isinstance(discrete_ticks, bool)
cmap = get_cmap_object(kwargs.pop('cmap', None))
cmap = discrete_cmap(num_steps, cmap=cmap.name)
sm = make_scalar_mappable(0, num_steps - 1, cmap=cmap)
if discrete_ticks:
kwargs.update({'ticks': np.arange(num_steps)})
cbar = plt.colorbar(sm, ax=axis, orientation='vertical',
pad=0.04, use_gridspec=True, **kwargs)
return cbar
| 14,428
|
def get_config_based_on_config_file(path: str) -> Union[Config, None]:
"""
load config and check if section exist or not
:param path: path to config file
:return: None if section [laziest] not exist in Config object updated with params from section if exist
"""
cfg = load_config(path)
if section_name not in cfg.sections():
return None
else:
cfg = config[section_name]
common_params = deepcopy(default_settings)
params_from_config = {key: cfg[key] for key in cfg.keys()}
common_params.update(params_from_config)
return Config(common_params)
| 14,429
|
def main():
"""
BST
4
2 6
1 3 5 7
"""
tree = BSTwithNodes()
l1 = [4, 2, 6, 1, 3, 7, 5]
for i in l1: tree.insert(i)
print(tree.root)
print(tree.root.right)
print(tree.root.right.left)
print(tree.root.right.right)
print(tree.root.left)
print(tree.root.left.left)
print(tree.root.left.right)
| 14,430
|
def threadpooled( # noqa: F811
func: typing.Optional[typing.Callable[..., typing.Union["typing.Awaitable[typing.Any]", typing.Any]]] = None,
*,
loop_getter: typing.Union[None, typing.Callable[..., asyncio.AbstractEventLoop], asyncio.AbstractEventLoop] = None,
loop_getter_need_context: bool = False,
) -> typing.Union[
ThreadPooled,
typing.Callable[..., "typing.Union[concurrent.futures.Future[typing.Any], typing.Awaitable[typing.Any]]"],
]:
"""Post function to ThreadPoolExecutor.
:param func: function to wrap
:type func: typing.Optional[typing.Callable[..., typing.Union[typing.Awaitable, typing.Any]]]
:param loop_getter: Method to get event loop, if wrap in asyncio task
:type loop_getter: typing.Union[
None,
typing.Callable[..., asyncio.AbstractEventLoop],
asyncio.AbstractEventLoop
]
:param loop_getter_need_context: Loop getter requires function context
:type loop_getter_need_context: bool
:return: ThreadPooled instance, if called as function or argumented decorator, else callable wrapper
:rtype: typing.Union[ThreadPooled, typing.Callable[..., typing.Union[concurrent.futures.Future, typing.Awaitable]]]
"""
if func is None:
return ThreadPooled(func=func, loop_getter=loop_getter, loop_getter_need_context=loop_getter_need_context)
return ThreadPooled( # type: ignore
func=None, loop_getter=loop_getter, loop_getter_need_context=loop_getter_need_context
)(func)
| 14,431
|
def prepare_solar_dps(dt, source):
"""
This function will prepare the day part separators for solar sources.
To handle this, it will either estimate it from a diurnal pattern if
provided or it will use the average hour for which there is first power
generated over the past two weeks and similarly for the last hour.
Args:
dt (datetime-like): The date to prepare the day part separators
for
source (Source): The source of data
"""
if populator_options.average_sunrise_sunset:
sunrise, sunset = average_sunrise_sunset(dt, source)
else:
diurnal_pattern = pd.read_csv(populator_options.diurnal_pattern_file,
index_col=0, parse_dates=True)
sunrise, sunset = estimate_sunrise_sunset(
dt, diurnal_pattern['diurnal pattern'])
gosm_options.dps_sunrise = sunrise
gosm_options.dps_sunset = sunset
if populator_options.number_dps:
temp_file = "{}{}temp_paths_{}.dat".format(
populator_options.output_directory, os.sep,
gosm_options.scenario_day.date())
# If this option is specified, this will dynamically generate the
# specified amount of day part separators from sunrise to sunset
dps = list(map(round, np.linspace(sunrise, sunset,
populator_options.number_dps)))
dps = list(map(int, dps))
dps[0] = 'sunrise'
dps[-1] = 'sunset'
paths_file = populator_options.dps_paths_file
# The newly generated file will be the same as the specified paths file
# except with the newly generated day part separators
with open(paths_file) as reader, open(temp_file, 'w') as writer:
for line in reader:
if line.startswith('dps'):
writer.write('dps ' + ' '.join(map(str, dps)) + '\n')
else:
writer.write(line)
gosm_options.dps_file = temp_file
| 14,432
|
def koven_temp_atten(soiltemp, airtemp):
"""Define thermal attenuation ratios as in Koven et al 2013."""
# read in list of observed lats and lons from Koven paper
ex_points = permafrost_koven_sites.site_points
# make amplitudes
airtemp_ampl = make_monthly_amp(airtemp)
soiltemp_ampl = make_monthly_amp(soiltemp)
# interpolate the log to the correct depth
soiltemp_log = iris.analysis.maths.log(soiltemp_ampl)
linear = iris.analysis.Linear()
soiltemp_log_surf = soiltemp_log.interpolate([('depth', 0.0)], linear)
soiltemp_ampl_surf = iris.analysis.maths.exp(soiltemp_log_surf)
soiltemp_log_1m = soiltemp_log.interpolate([('depth', 1.0)], linear)
soiltemp_ampl_1m = iris.analysis.maths.exp(soiltemp_log_1m)
# extract points for eachsite
airtemp_ampl_1d = extract_sites(ex_points, airtemp_ampl)
if len(airtemp_ampl_1d.shape) > 1:
airtemp_ampl_1d = airtemp_ampl_1d[:, 0]
soiltemp_ampl_surf_1d = extract_sites(ex_points, soiltemp_ampl_surf)
soiltemp_ampl_1m_1d = extract_sites(ex_points, soiltemp_ampl_1m)
# assign metrics
metrics = {}
metrics['attenuation 1m over surface'] = np.median(
soiltemp_ampl_1m_1d / soiltemp_ampl_surf_1d)
metrics['attenuation surface over air'] = np.median(
soiltemp_ampl_surf_1d / airtemp_ampl_1d)
return metrics
| 14,433
|
def clipup(step_size: float,
momentum: float = 0.9,
max_speed: float = 0.15,
fix_gradient_size: bool = True):
"""Construct optimizer triple for ClipUp."""
step_size = optimizers.make_schedule(step_size)
def init(x0):
v0 = jnp.zeros_like(x0)
return x0, v0
def update(i, g, state):
x, v = state
g = jax.lax.cond(fix_gradient_size,
lambda p: p / jnp.sqrt(jnp.sum(p * p)),
lambda p: p,
g)
step = g * step_size(i)
v = momentum * v + step
# Clip.
length = jnp.sqrt(jnp.sum(v * v))
v = jax.lax.cond(length > max_speed,
lambda p: p * max_speed / length,
lambda p: p,
v)
return x - v, v
def get_params(state):
x, _ = state
return x
return init, update, get_params
| 14,434
|
def ramsey_echo_sequence(length, target):
"""
Generate a gate sequence to measure dephasing time in a two-qubit chip including a flip in the middle.
This echo reduce effects detrimental to the dephasing measurement.
Parameters
----------
length : int
Number of Identity gates. Should be even.
target : str
Which qubit is measured. Options: "left" or "right"
Returns
-------
list
Dephasing sequence.
"""
wait = ["Id:Id"]
hlength = length // 2
if target == "left":
rotate_90_p = ["X90p:Id"]
rotate_90_m = ["X90m:Id"]
elif target == "right":
rotate_90_p = ["Id:X90p"]
rotate_90_m = ["Id:X90m"]
S = []
S.extend(rotate_90_p)
S.extend(wait * hlength)
S.extend(rotate_90_p)
S.extend(rotate_90_p)
S.extend(wait * hlength)
S.extend(rotate_90_m)
return S
| 14,435
|
def gauss_elimination(matrix) -> np.array:
"""
This function compute Gauss elimination process
:param matrix: generic matrix
:return: matrix after the Gauss elimination
"""
import sympy
return np.array(sympy.Matrix(matrix).rref()[0])
| 14,436
|
def prune(value, is_removable_function=is_removable):
"""
Deletes ``None`` and empty lists and dicts, recursively.
"""
if isinstance(value, list):
for i, v in enumerate(value):
if is_removable_function(value, i, v):
del value[i]
else:
prune(v, is_removable_function)
elif isinstance(value, dict):
for k, v in value.items():
if is_removable_function(value, k, v):
del value[k]
else:
prune(v, is_removable_function)
return value
| 14,437
|
def test_clone_layer_uses_previous_config() -> None:
"""Tests that clone_layer uses previous layer configuration."""
units = 10
activation = "relu"
use_bias = False
layer = Dense(units, activation=activation, use_bias=use_bias)
cloned = clone_layer(layer)
assert cloned.units == units
assert cloned.activation.__name__ == activation
assert cloned.use_bias == use_bias
| 14,438
|
def submissions_score_set_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
Consume the score_set signal defined in the Submissions API, and convert it
to a PROBLEM_WEIGHTED_SCORE_CHANGED signal defined in this module. Converts the
unicode keys for user, course and item into the standard representation for the
PROBLEM_WEIGHTED_SCORE_CHANGED signal.
This method expects that the kwargs dictionary will contain the following
entries (See the definition of score_set):
- 'points_possible': integer,
- 'points_earned': integer,
- 'anonymous_user_id': unicode,
- 'course_id': unicode,
- 'item_id': unicode
"""
points_possible = kwargs['points_possible']
points_earned = kwargs['points_earned']
course_id = kwargs['course_id']
usage_id = kwargs['item_id']
user = user_by_anonymous_id(kwargs['anonymous_user_id'])
if user is None:
return
if points_possible == 0:
# This scenario is known to not succeed, see TNL-6559 for details.
return
PROBLEM_WEIGHTED_SCORE_CHANGED.send(
sender=None,
weighted_earned=points_earned,
weighted_possible=points_possible,
user_id=user.id,
anonymous_user_id=kwargs['anonymous_user_id'],
course_id=course_id,
usage_id=usage_id,
modified=kwargs['created_at'],
score_db_table=ScoreDatabaseTableEnum.submissions,
)
| 14,439
|
def get_country_gateway_url(country):
"""TODO: Keep config in environment or file"""
return {
'countrya': environ.get('first_gateway_url'),
'countryb': environ.get('first_gateway_url'),
'countryc': environ.get('second_gateway_url'),
}.get(country.lower())
| 14,440
|
def n_permutations(n, r=None):
"""Number of permutations (unique by position)
:param n: population length
:param r: sample length
:return: int
"""
if r is None:
r = n
if n < 0 or r < 0:
raise ValueError("n and r must be positive")
if n == 0 or r > n:
return 0
return math.factorial(n) // math.factorial(n - r)
| 14,441
|
def parse_s3_event(event):
"""Decode the S3 `event` message generated by message write operations.
See S3 docs: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-content-structure.html
See also the callers of this function.
Returns bucket_name, ipppssoot
"""
log.verbose("S3 Event:", event)
message = event["Records"][0]["s3"]["object"]["key"]
bucket_name = event["Records"][0]["s3"]["bucket"]["name"]
ipst = message.split("-")[-1]
log.info(f"received {message} : bucket = {bucket_name}, ipppssoot = {ipst}")
return "s3://" + bucket_name, ipst
| 14,442
|
def clip_2d_liang_barsky(xmin, ymin, xmax, ymax, x0, y0, x1, y1):
"""Clips the two-dimensional line segment by the algorithm of Liang and
Barsky. Adapted from James D. Foley, ed., __Computer Graphics: Principles
and Practice__ (Reading, Mass. [u.a.]: Addison-wesley, 1998), 122.
Parameters
----------
xmin, ymin, xmax, ymax, x0, y0, x1, y1 : float
Returns
-------
is_visible : bool
x0, y0, x1, y1 : float
"""
dx = x1 - x0
dy = y1 - y0
if dx == 0 and dy == 0 and clip_point(xmin, ymin, xmax, ymax, x0, y0):
return False, x0, y0, x1, y1
tE_tL = np.array((0.0, 1.2))
if clip_t(dx, xmin - x0, tE_tL):
if clip_t(-dx, x0 - xmax, tE_tL):
if clip_t(dy, ymin - y0, tE_tL):
if clip_t(-dy, y0 - ymax, tE_tL):
# compute PL intersection, if tL has moved
tE, tL = tE_tL
if tL < 1:
x1 = x0 + tL * dx
y1 = y0 + tL * dy
# compute PE intersection, if tE has moved
if tE > 0:
x0 += tE * dx
y0 += tE * dy
return True, x0, y0, x1, y1
return False, x0, y0, x1, y1
| 14,443
|
def _(pattern, key_path: str, case_ignored=False) -> bool:
"""Called when the concerned Key is defined as a re.Pattern, and case_ignored flag is neglected."""
return re.fullmatch(pattern, key_path) is not None
| 14,444
|
def test_export(client, mock_env):
# pylint: disable=unused-argument
# mock_env is a fixture and creates a false positive for pylint
"""Test export message response"""
with open('tests/mocks/export_submissions.json', 'r') as file_obj:
mock_responses = json.load(file_obj)
assert mock_responses
with patch('service.modules.permit_applications.requests.get') as mock:
mock.return_value.status_code = 200
mock.return_value.json.return_value = mock_responses
with patch('service.resources.export.Export.send_email') as mock_send_email:
mock_send_email.return_value.status_code = 202
mock_send_email.return_value.body = "Content"
mock_send_email.return_value.headers = "X-Message-Id: 12345"
response = client.simulate_get(
'/export', params={
"actionState": "Export to PTS",
"token": "xyz",
"start_date": "2020-01-01",
"name": "Building Permit Application",
"send_email": "1"})
assert response.status_code == 200
response_json = response.json
assert response_json['status'] == 'success'
assert 'data' in response_json
assert 'responses' in response_json['data']
with patch('service.resources.export.Export.sftp') as mock_sftp:
mock_sftp.return_value.status_code = 200
mock_sftp.return_value.body = "Data"
response = client.simulate_get(
'/export', params={
"actionState": "Export to PTS",
"token": "xyz",
"form_id": "123",
"start_date": "2020-01-01",
"name": "Building Permit Application",
"sftp_upload": "1"})
assert response.status_code == 200
response_json = response.json
assert response_json['status'] == 'success'
assert 'data' in response_json
assert 'responses' in response_json['data']
| 14,445
|
def rotate(angle):
"""Make the robot rotate for a certain angle."""
move(angle, pi / 4, True)
| 14,446
|
def sophos_firewall_web_filter_update_command(client: Client, params: dict) -> CommandResults:
"""Update an existing object
Args:
client (Client): Sophos XG Firewall Client
params (dict): params to update the object with
Returns:
CommandResults: Command results object
"""
return generic_save_and_get(client, WEB_FILTER['endpoint_tag'], params, web_filter_builder,
WEB_FILTER['table_headers'], True)
| 14,447
|
def transition_with_random_block(block_randomizer):
"""
Build a block transition with randomized data.
Provide optional sub-transitions to advance some
number of epochs or slots before applying the random block.
"""
return {
"block_producer": block_randomizer,
}
| 14,448
|
def vmobj_to_list(o):
"""Converts TVM objects returned by VM execution to Python List.
Parameters
----------
o : Obj
VM Object as output from VM runtime executor.
Returns
-------
result : list
Numpy objects as list with equivalent values to the input object.
"""
if isinstance(o, tvm.nd.NDArray):
result = [o.numpy()]
elif isinstance(o, tvm.runtime.container.ADT):
result = []
for f in o:
result.extend(vmobj_to_list(f))
elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
if o.constructor.name_hint == "Cons":
tl = vmobj_to_list(o.fields[1])
hd = vmobj_to_list(o.fields[0])
hd.extend(tl)
result = hd
elif o.constructor.name_hint == "Nil":
result = []
elif "tensor_nil" in o.constructor.name_hint:
result = [0]
elif "tensor" in o.constructor.name_hint:
result = [o.fields[0].numpy()]
else:
raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint)
else:
raise RuntimeError("Unknown object type: %s" % type(o))
return result
| 14,449
|
def match_histogram_with_merges(source: str, source_mask: str, reference: str, reference_mask: str, block_size: int = None):
"""Normalize the source image histogram with reference image.
This functions implements the `skimage.exposure.match_histograms`, which consists in the manipulate the pixels of an
input image and match the histogram with the reference image.
See more in `Histogram Matching <https://scikit-image.org/docs/dev/auto_examples/color_exposure/plot_histogram_matching.html>`_.
Note:
It overwrites the source file.
Args:
source (str): Path to the rasterio data set file
source_mask (str): Path to the rasterio data set file
reference (str): Path to the rasterio data set file
reference_mask (str): Path to the rasterio data set file
"""
from skimage.exposure import match_histograms as _match_histograms
with rasterio.open(source) as source_data_set, rasterio.open(source_mask) as source_mask_data_set:
source_arr = source_data_set.read(1, masked=True)
source_mask_arr = source_mask_data_set.read(1)
source_options = source_data_set.profile.copy()
with rasterio.open(reference) as reference_data_set, rasterio.open(reference_mask) as reference_mask_data_set:
reference_arr = reference_data_set.read(1, masked=True)
reference_mask_arr = reference_mask_data_set.read(1)
intersect_mask = numpy.logical_and(
source_mask_arr < 255, # CHECK: Use only valid data? numpy.isin(source_mask_arr, [0, 1, 3]),
reference_mask_arr < 255, # CHECK: Use only valid data? numpy.isin(reference_mask_arr, [0, 1, 3]),
)
valid_positions = numpy.where(intersect_mask)
if valid_positions and len(valid_positions[0]) == 0:
return
intersected_source_arr = source_arr[valid_positions]
intersected_reference_arr = reference_arr[valid_positions]
histogram = _match_histograms(intersected_source_arr, intersected_reference_arr)
histogram = numpy.round(histogram).astype(source_options['dtype'])
source_arr[valid_positions] = histogram
save_as_cog(str(source), source_arr, block_size=block_size, mode='w', **source_options)
| 14,450
|
def fetch_run(workspace: Workspace, run_recovery_id: str) -> Run:
"""
Finds an existing run in an experiment, based on a recovery ID that contains the experiment ID
and the actual RunId. The run can be specified either in the experiment_name:run_id format,
or just the run_id.
:param workspace: the configured AzureML workspace to search for the experiment.
:param run_recovery_id: The Run to find. Either in the full recovery ID format, experiment_name:run_id
or just the run_id
:return: The AzureML run.
"""
experiment, run = split_recovery_id(run_recovery_id)
try:
experiment_to_recover = Experiment(workspace, experiment)
except Exception as ex:
raise Exception(f"Unable to retrieve run {run} in experiment {experiment}: {str(ex)}")
run_to_recover = fetch_run_for_experiment(experiment_to_recover, run)
logging.info("Fetched run #{} {} from experiment {}.".format(run, run_to_recover.number, experiment))
return run_to_recover
| 14,451
|
def get_wl_band(radar_frequency):
"""Returns integer corresponding to radar frequency.
Args:
radar_frequency (float): Radar frequency (GHz).
Returns:
int: 0=35GHz radar, 1=94Ghz radar.
"""
return 0 if (30 < radar_frequency < 40) else 1
| 14,452
|
def get_topology_node(name: str, topology: ServiceTopology) -> TopologyNode:
"""
Fetch a topology node by name
:param name: node name
:param topology: service topology with all nodes
:return: TopologyNode
"""
node = topology.__dict__.get(name)
if not node:
raise ValueError(f"{name} node not found in {topology}")
return node
| 14,453
|
def place(c, project, language, appname):
"""copy translation(s) into <appname>
plaats het gecompileerde language file zodat het gebruikt kan worden
"""
base = get_base_dir(project)
if not base:
print('unknown project')
return
if appname == '*':
appname = os.path.basename(base)
fromname = language + '.po'
toname = os.path.join(language, 'LC_MESSAGES', appname + '.mo')
# kijken of er al een werkend .mo file bestaat, voor het geval die eigenlijk in mixed-case is
# helaas werkt dit nog niet
loc = os.path.join(language, 'LC_MESSAGES')
for name in os.listdir(os.path.join(base, 'locale', loc)):
print(name)
if os.path.splitext(name) == '.mo':
toname = os.path.join(loc, name)
break
command = 'msgfmt {} -o {}'.format(fromname, toname)
with c.cd(os.path.join(base, 'locale')):
c.run(command)
| 14,454
|
def md5hash(string):
"""
Return the MD5 hex digest of the given string.
"""
return hashlib.md5(string).hexdigest()
| 14,455
|
def test_unknown_node():
"""An unknown node type should raise an error."""
with pytest.raises(UnknownNode):
compose("")
| 14,456
|
def userlist(request):
"""Shows a user list."""
return common_userlist(request, locale=request.view_lang)
| 14,457
|
def return_heartrates(patient_id):
"""
access database to get heart rate history for a patient
:param patient_id: integer ID of patient to get heart rates of
:return: json with the heart rate list for patient, or error message
"""
patient_id = int(patient_id)
if heart_server_helpers.validate_patient(pat_id) is False:
return jsonify({"Error": "invalid patient ID"})
if heart_server_helpers.existing_beats(pat_id) is False:
return jsonify({"Error": "no heartbeats recorded for patient"})
for user in Patient.objects.raw({"_id": patient_id}):
patient = user
heartrate_list = patient.heart_rate
print(heartrate_list)
heart_list = {"heat_rates": heartrate_list}
return jsonify(heart_list)
| 14,458
|
def describe_vpn_connections_header():
"""generate output header"""
return misc.format_line((
"Account",
"Region",
"VpcId",
"VpcCidr",
"VpnName",
"VpnId",
"State",
"CutomerGwId",
"CutomerGwAddress",
"Type"
))
| 14,459
|
def to_numpy(a):
"""Convert an object to NumPy.
Args:
a (object): Object to convert.
Returns:
`np.ndarray`: `a` as NumPy.
"""
return convert(a, NPOrNum)
| 14,460
|
def transpose(self, perm=None, copy=True):
"""Return a tensor with permuted axes.
Parameters
----------
perm : Union[Sequence[int], dragon.Tensor]], optional
The output permutation.
copy : bool, optional, default=True
Return a new tensor or transpose in-place.
Returns
-------
dragon.Tensor
The output tensor.
See Also
--------
`dragon.transpose(...)`_
"""
return array_ops.transpose(self, perm=perm, copy=copy)
| 14,461
|
def GetInfoFromKegg():
"""
"""
pass
| 14,462
|
def brainrender_vis(regions, colors=None, atlas_name="allen_mouse_25um"):
"""Visualise regions in atlas using brainrender"""
if colors is None:
cm = ColorManager(num_colors=len(regions), method="rgb")
colors = cm.colors
def get_n_random_points_in_region(region, N):
"""
Gets N random points inside (or on the surface) of a mes
"""
region_bounds = region.mesh.bounds()
X = np.random.randint(region_bounds[0], region_bounds[1], size=10000)
Y = np.random.randint(region_bounds[2], region_bounds[3], size=10000)
Z = np.random.randint(region_bounds[4], region_bounds[5], size=10000)
pts = [[x, y, z] for x, y, z in zip(X, Y, Z)]
ipts = region.mesh.insidePoints(pts).points()
if N < ipts.shape[0]:
return ipts[np.random.choice(ipts.shape[0], N, replace=False), :]
else:
return ipts
scene = brainrender.Scene(root=True, title="Labelled cells", atlas_name=atlas_name)
# Get a numpy array with (fake) coordinates of some labelled cells
brain_region_actors = []
for region, color in zip(regions, colors):
brain_region = scene.add_brain_region(region, alpha=0.15, color=color)
coordinates = get_n_random_points_in_region(brain_region.mesh, 2000)
color = [color] * coordinates.shape[0]
# Add to scene
scene.add(
brainrender.actors.Points(coordinates, name=f"{region} CELLS", colors=color)
)
brain_region_actors.append(brain_region)
hemisphere_points = [
get_points_in_hemisphere(scene.atlas, brain_region_actor)
for brain_region_actor in brain_region_actors
]
p1 = hemisphere_points[0].mean(axis=0)
p2 = hemisphere_points[1].mean(axis=0)
mesh = vedo.shapes.Cylinder(pos=[p1, p2], c="blue", r=100, alpha=0.5)
cylinder = brainrender.actor.Actor(mesh, name="Cylinder", br_class="Cylinder")
scene.add(cylinder)
# render
scene.content
scene.render()
| 14,463
|
def randsel(path, minlen=0, maxlen=None, unit="second"):
"""Randomly select a portion of audio from path.
Parameters
----------
path: str
File path to audio.
minlen: float, optional
Inclusive minimum length of selection in seconds or samples.
maxlen: float, optional
Exclusive maximum length of selection in seconds or samples.
unit: str, optional
The unit in which `minlen` and `maxlen` are interpreted.
Options are:
- 'second' (default)
- 'sample'
Returns
-------
tstart, tend: tuple of int
integer index of selection
"""
info = audioinfo(path)
sr, sigsize = info.samplerate, info.frames
if unit == 'second':
minoffset = int(minlen*sr)
maxoffset = int(maxlen*sr) if maxlen else sigsize
else:
minoffset = minlen
maxoffset = maxlen if maxlen else sigsize
assert (minoffset < maxoffset) and (minoffset <= sigsize), \
f"""BAD: siglen={sigsize}, minlen={minoffset}, maxlen={maxoffset}"""
# Select begin sample
tstart = randrange(max(1, sigsize-minoffset))
tend = randrange(tstart+minoffset, min(tstart+maxoffset, sigsize+1))
return tstart, tend
| 14,464
|
def update_time_bounds_in_config(config): # {{{
"""
Updates the start and end year (and associated full date) for
climatologies, time series and climate indices based on the files that are
actually available.
Parameters
----------
config : ``MpasAnalysisConfigParser`` object
contains config options
"""
# By updating the bounds for each component, we should end up with the
# more constrained time bounds if any component has less output than others
for componentName in ['ocean', 'seaIce']:
for section in ['climatology', 'timeSeries', 'index']:
update_time_bounds_from_file_names(config, section, componentName)
# }}}
| 14,465
|
def parse_datetime(splunk_uri, session_key, time_str):
"""
Leverage splunkd to do time parseing,
:time_str: ISO8601 format, 2011-07-06T21:54:23.000-07:00
"""
import splunklib
if not time_str:
return None
scheme, host, port = tuple(splunk_uri.replace("/", "").split(":"))
service = splunklib.client.Service(token=session_key, scheme=scheme,
host=host, port=port)
endpoint = splunklib.client.Endpoint(service, "search/timeparser/")
r = endpoint.get(time=time_str, output_time_format="%s")
response = splunklib.data.load(r.body.read()).response
seconds = response[time_str]
return datetime.datetime.utcfromtimestamp(float(seconds))
| 14,466
|
def _get_assessment_url(assessment):
"""Returns string URL for assessment view page."""
return urlparse.urljoin(utils.get_url_root(), utils.view_url_for(assessment))
| 14,467
|
def get_secret_setting_names(settings: dict) -> Set[str]:
"""guess the setting names that likely contain sensitive values"""
return {
key for key in settings.keys()
if AUTOFIND_SECRET_SETTINGS.match(key)
and key not in AUTOFIND_SECRET_SETTINGS_EXCLUDED
} | {
key for key, value in settings['SETTINGS_DEFAULTS'].items()
if value == PLACEHOLDER_FOR_SECRET
and key not in AUTOFIND_SECRET_SETTINGS_EXCLUDED
}
| 14,468
|
def main(dict):
"""
Function that allows to send a get request to twitter API and retrieve the last 3 tweets of a
specific account name. The parameter of the account is passed by Watson Assistant throught a
context variable.
Args:
dict (dict): containing the parameter - in our case only one is used : "account" (e.g. @blackmirror)
Return:
list_tweets (list) : list containing text (and image) of the last three tweets.
"""
account_name = dict.get("account")[1:]
client_key = '// your twitter dev account client_key //'
client_secret = '// your twitter dev account client_secret //'
key_secret = '{}:{}'.format(client_key, client_secret).encode('ascii')
b64_encoded_key = base64.b64encode(key_secret)
b64_encoded_key = b64_encoded_key.decode('ascii')
base_url = 'https://api.twitter.com/'
auth_url = '{}oauth2/token'.format(base_url)
auth_headers = {
'Authorization': 'Basic {}'.format(b64_encoded_key),
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'
}
auth_data = {
'grant_type': 'client_credentials'
}
auth_resp = requests.post(auth_url, headers=auth_headers, data=auth_data)
access_token = auth_resp.json()['access_token']
search_headers = {
'Authorization': 'Bearer {}'.format(access_token)
}
search_url = '{}1.1/statuses/user_timeline.json?screen_name={}&count=3'.format(base_url, account_name)
search_resp = requests.get(search_url, headers=search_headers)
tweet_data = search_resp.json()
list_tweets =[]
for i in range(len(tweet_data)):
# store the text of the tweet
text = tweet_data[i].get("text")
# if the tweet contains an image add this to the tweet text
if(tweet_data[i].get("entities").get("media")):
image = tweet_data[i].get("entities").get("media")[0].get("media_url_https")
width = tweet_data[i].get("entities").get("media")[0].get("sizes").get("small").get("w")
height = tweet_data[i].get("entities").get("media")[0].get("sizes").get("small").get("h")
url = tweet_data[i].get("entities").get("media")[0].get("url")
final = text + "<a href = '" + url + "'>" + "<img src = '" +image + "' height =" + str(height) + " width = "+ str(width) + ">" + "</a>"
list_tweets.append(final)
# if there is no image, then just save the text of the tweet
else:
list_tweets.append(text)
return {"result": list_tweets}
| 14,469
|
def build_classifier_model(tfhub_handle_preprocess, tfhub_handle_encoder):
"""Builds a simple binary classification model with BERT trunk."""
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
preprocessing_layer = hub.KerasLayer(tfhub_handle_preprocess, name='preprocessing')
encoder_inputs = preprocessing_layer(text_input)
encoder = hub.KerasLayer(tfhub_handle_encoder, trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
net = outputs['pooled_output']
net = tf.keras.layers.Dropout(0.1)(net)
net = tf.keras.layers.Dense(1, activation=None, name='classifier')(net)
return tf.keras.Model(text_input, net)
| 14,470
|
def execute_sql(sql: str, param: list):
"""
执行查询sql, 返回列表
:param param: 执行参数
:param sql: 执行的sql语句
:return: 结果列表
"""
cursor = connection.cursor()
res = cursor.execute(sql, param)
return res
| 14,471
|
def slice_variable_response(DataFrame, variable, condition):
"""
Parameters
----------
DataFrame : pandas.DataFrame
DataFrame
variable : str
Variable to examine the freqeuncy.
condition, str, default None
Filtering condition
Returns
-------
None
"""
for X in sorted(DataFrame[variable].unique()):
print('\n\n{}\n'.format(X))
print("(ExcludeInModel==0) & ({}=='{}')".format(variable,X))
try:
print(variable_response(DataFrame, variable='DupePrevScore', target_variable='DupeToCapture', condition="({}) & ({}=='{}')".format(condition, variable,X), show_plot=True))
except:
print('No resuts found !')
| 14,472
|
def enqueue_frames_from_output(_proc, _qout, scale, use_timer=None, use_tensorflow: bool = False):
"""
:type scale: tuple
:type _proc: subprocess.Popen
:type _qout: queues.Queue
"""
print('scale enqueue_frames_from_output', scale)
timer = None
if use_timer:
timer = use_timer
e = None
frame_counter = itertools.count()
img_size = scale[0] * scale[1] * 3
while multiprocessing.current_process().is_alive():
if use_timer: timer.tic('_proc.stdout.read')
bb = _proc.stdout.read(img_size)
if use_timer: timer.toc('_proc.stdout.read')
if len(bb) > 0:
try:
if use_tensorflow:
import tensorflow as tf
if use_timer: timer.tic('tf.io.decode_raw')
decode_raw = tf.io.decode_raw(bb, out_type=tf.uint8)
if use_timer: timer.toc('tf.io.decode_raw')
if use_timer: timer.tic('tf.reshape')
reshaped = tf.reshape(
decode_raw,
(scale[1], scale[0], 3)
)
if use_timer: timer.toc('tf.reshape')
if use_timer: timer.tic('image dtype')
reshaped = tf.image.convert_image_dtype(reshaped, dtype=tf.float32)
if use_timer: timer.toc('image dtype')
if use_timer: timer.tic('tf to numpy')
tensor = reshaped.numpy()
if use_timer: timer.toc('tf to numpy')
fn = next(frame_counter)
_qout.put((fn, tensor))
else:
if use_timer: timer.tic('frombuffer')
ndarr = frombuffer(bb, dtype=numpy.uint8)
if use_timer: timer.toc('frombuffer')
if use_timer: timer.tic('buffer reshape')
ndarr = numpy.reshape(ndarr, (scale[1], scale[0], 3))
if use_timer: timer.toc('buffer reshape')
fn = next(frame_counter)
_qout.put((fn, ndarr))
except Exception as err:
log.error("%s" % err)
e = _proc.poll()
# log.debug("%s bb size %d" % (e, len(bb)))
if e >= 0 and len(bb) == 0:
break
if use_timer:
timer.print()
log.debug("bye ffmpeg %d" % e)
if e == 0:
_qout.put(None)
_qout.close()
elif e > 0:
_qout.put(None)
_qout.close()
raise RuntimeError("ffmpeg exits with code %d" % e)
| 14,473
|
def query_jwt_required(fn):
"""
A decorator to protect a query resolver.
If you decorate an resolver with this, it will ensure that the requester
has a valid access token before allowing the resolver to be called. This
does not check the freshness of the access token.
"""
@wraps(fn)
def wrapper(*args, **kwargs):
print(args[0])
token = kwargs.pop(current_app.config['JWT_TOKEN_ARGUMENT_NAME'])
try:
verify_jwt_in_argument(token)
except Exception as e:
return AuthInfoField(message=str(e))
return fn(*args, **kwargs)
return wrapper
| 14,474
|
def main(input_dir, output_dir):
""" Runs data processing scripts to turn raw data from (../raw) into
a single interim dataset (../interim), for which later features
will be engineered. The main process here is *conversion* via pypandoc
and external calls to unoconv.
"""
logger = logging.getLogger(__name__)
logger.info('making document data set from raw data documents directory')
raw_documents_outpath = PurePath(project_dir).joinpath(output_dir).joinpath('raw_documents.pkl')
raw_docs = list_raw_docs(input_dir)
metadata = load_all_metadata(input_dir, logger)
raw_documents = get_doc_df(raw_docs, metadata, logger)
raw_documents.to_pickle(raw_documents_outpath)
| 14,475
|
def callers_for_code(code):
"""
Return all users matching the code.
:param code:
:return:
"""
return db.session.query(Caller).filter(Caller.code==code).all()
| 14,476
|
def WriteJsonFile(filename, params, database):
"""Write database out as a .dat file.
Args:
filename: Name of output file to write database to.
params: Parameter structure used to generate the database.
database: Dictionary of ndarrays of aerodynamic coefficients and
derivatives.
"""
def _PrepareCoefficientArray(array):
return numpy.reshape(numpy.rollaxis(array, -1), (array.size,)).tolist()
keys_and_values = [
('num_alphas', len(database['alphas'])),
('num_betas', len(database['betas'])),
('num_deltas', [len(database['delta1s']), len(database['delta2s']),
len(database['delta3s']), len(database['delta4s']),
len(database['delta5s']), len(database['delta6s']),
len(database['delta7s']), len(database['delta8s'])]),
('reynolds_number', database['reynolds_number']),
('alphas', database['alphas']),
('betas', database['betas']),
('delta1s', database['delta1s']),
('delta2s', database['delta2s']),
('delta3s', database['delta3s']),
('delta4s', database['delta4s']),
('delta5s', database['delta5s']),
('delta6s', database['delta6s']),
('delta7s', database['delta7s']),
('delta8s', database['delta8s']),
('cfm', _PrepareCoefficientArray(database['cfm'])),
('dcfm_dp', _PrepareCoefficientArray(database['dcfm_dp'])),
('dcfm_dq', _PrepareCoefficientArray(database['dcfm_dq'])),
('dcfm_dr', _PrepareCoefficientArray(database['dcfm_dr'])),
('dcfm1', _PrepareCoefficientArray(database['dcfm1'])),
('dcfm1_dp', _PrepareCoefficientArray(database['dcfm1_dp'])),
('dcfm1_dq', _PrepareCoefficientArray(database['dcfm1_dq'])),
('dcfm1_dr', _PrepareCoefficientArray(database['dcfm1_dr'])),
('dcfm2', _PrepareCoefficientArray(database['dcfm2'])),
('dcfm2_dp', _PrepareCoefficientArray(database['dcfm2_dp'])),
('dcfm2_dq', _PrepareCoefficientArray(database['dcfm2_dq'])),
('dcfm2_dr', _PrepareCoefficientArray(database['dcfm2_dr'])),
('dcfm3', _PrepareCoefficientArray(database['dcfm3'])),
('dcfm3_dp', _PrepareCoefficientArray(database['dcfm3_dp'])),
('dcfm3_dq', _PrepareCoefficientArray(database['dcfm3_dq'])),
('dcfm3_dr', _PrepareCoefficientArray(database['dcfm3_dr'])),
('dcfm4', _PrepareCoefficientArray(database['dcfm4'])),
('dcfm4_dp', _PrepareCoefficientArray(database['dcfm4_dp'])),
('dcfm4_dq', _PrepareCoefficientArray(database['dcfm4_dq'])),
('dcfm4_dr', _PrepareCoefficientArray(database['dcfm4_dr'])),
('dcfm5', _PrepareCoefficientArray(database['dcfm5'])),
('dcfm5_dp', _PrepareCoefficientArray(database['dcfm5_dp'])),
('dcfm5_dq', _PrepareCoefficientArray(database['dcfm5_dq'])),
('dcfm5_dr', _PrepareCoefficientArray(database['dcfm5_dr'])),
('dcfm6', _PrepareCoefficientArray(database['dcfm6'])),
('dcfm6_dp', _PrepareCoefficientArray(database['dcfm6_dp'])),
('dcfm6_dq', _PrepareCoefficientArray(database['dcfm6_dq'])),
('dcfm6_dr', _PrepareCoefficientArray(database['dcfm6_dr'])),
('dcfm7', _PrepareCoefficientArray(database['dcfm7'])),
('dcfm7_dp', _PrepareCoefficientArray(database['dcfm7_dp'])),
('dcfm7_dq', _PrepareCoefficientArray(database['dcfm7_dq'])),
('dcfm7_dr', _PrepareCoefficientArray(database['dcfm7_dr'])),
('dcfm8', _PrepareCoefficientArray(database['dcfm8'])),
('dcfm8_dp', _PrepareCoefficientArray(database['dcfm8_dp'])),
('dcfm8_dq', _PrepareCoefficientArray(database['dcfm8_dq'])),
('dcfm8_dr', _PrepareCoefficientArray(database['dcfm8_dr']))
]
output_dict = collections.OrderedDict(
keys_and_values + [('params', dict_util.OrderDict(params))])
class _ParamsEncoder(json.JSONEncoder):
"""JSON encoder which handles the Airfoil objects and numpy arrays."""
def default(self, o):
if isinstance(o, airfoil.Airfoil):
return str(o)
elif isinstance(o, numpy.ndarray):
return o.tolist()
return json.JSONEncoder.default(self, o)
with open(filename, 'w') as f:
output_string = json.dumps(output_dict, separators=(', ', ':\n '),
cls=_ParamsEncoder)
output_string = (output_string
.replace(', \"', ',\n\"')
.replace('], [', '],\n [')
.replace(' [[', '[[')
.replace('{', '{\n')
.replace('}', '\n}')) + '\n'
f.write(output_string)
| 14,477
|
def headers():
""" default HTTP headers for all API calls """
return {"Content-type": "application/json"}
| 14,478
|
def preprocess_confidence(train_df, test_df=None):
"""
Feature creation that should be done given training data and then merged \
with test data.
"""
ATTRIBUTION_CATEGORIES = [
# V1 Features #
###############
['ip'], ['app'], ['device'], ['os'], ['channel'],
# V2 Features #
###############
['app', 'channel'],
['app', 'os'],
['app', 'device'],
# V3 Features #
###############
['channel', 'os'],
['channel', 'device'],
['os', 'device']
]
# Find frequency of is_attributed for each unique value in column
logging.info("Calculating new features: Confidence rates...")
for cols in ATTRIBUTION_CATEGORIES:
# New feature name
new_feature = '_'.join(cols) + '_confRate'
logging.info(new_feature)
# Perform the groupby
group_object = train_df.groupby(cols)
# Group sizes
group_sizes = group_object.size()
# Print group size descriptives once
if test_df is None:
logging.info(
"Calculating confidence-weighted rate for: {}.\n Saving to: {}. \
Group Max / Mean / Median / Min: {} / {} / {} / {}".format(
cols, new_feature,
group_sizes.max(),
np.round(group_sizes.mean(), 2),
np.round(group_sizes.median(), 2),
group_sizes.min()
))
# Merge function
def merge_new_features(group_object, df):
df = df.merge(
group_object['is_attributed']. \
apply(rate_calculation). \
reset_index(). \
rename(
index=str,
columns={'is_attributed': new_feature}
)[cols + [new_feature]],
on=cols, how='left'
)
# Replace NaNs by average of column
df = df.fillna(df.mean())
return df
# Perform the merge
if test_df is None:
train_df = merge_new_features(group_object, train_df)
elif test_df is not None:
test_df = merge_new_features(group_object, test_df)
# Return the relevant data frame
if test_df is None:
return train_df
elif test_df is not None:
return test_df
| 14,479
|
def ancestral_state_pair(aln,tree,pos1,pos2,\
ancestral_seqs=None,null_value=gDefaultNullValue):
"""
"""
ancestral_seqs = ancestral_seqs or get_ancestral_seqs(aln,tree)
ancestral_names_to_seqs = \
dict(zip(ancestral_seqs.Names,ancestral_seqs.ArraySeqs))
distances = tree.getDistances()
tips = tree.getNodeNames(tipsonly=True)
# map names to nodes (there has to be a built-in way to do this
# -- what is it?)
nodes = dict([(n,tree.getNodeMatchingName(n)) for n in tips])
# add tip branch lengths as distance b/w identical tips -- this is
# necessary for my weighting step, where we want correlated changes
# occuring on a single branch to be given the most weight
distances.update(dict([((n,n),nodes[n].Length) for n in nodes]))
result = 0
names_to_seqs = dict(zip(aln.Names,aln.ArraySeqs))
for i in range(len(tips)):
org1 = tips[i]
seq1 = names_to_seqs[org1]
for j in range(i,len(tips)):
org2 = tips[j]
seq2 = names_to_seqs[org2]
ancestor = nodes[org1].lastCommonAncestor(nodes[org2]).Name
if ancestor == org1 == org2:
# we're looking for correlated change along a
# single branch
ancestral_seq = ancestral_names_to_seqs[\
nodes[org1].ancestors()[0].Name]
else:
# we're looking for correlated change along different
# branches (most cases)
ancestral_seq = ancestral_names_to_seqs[ancestor]
# get state of pos1 in org1, org2, and ancestor
org1_p1 = seq1[pos1]
org2_p1 = seq2[pos1]
ancestor_p1 = ancestral_seq[pos1]
# if pos1 has changed in both organisms since their lca,
# this is a position of interest
if org1_p1 != ancestor_p1 and org2_p1 != ancestor_p1:
# get state of pos2 in org1, org2, and ancestor
org1_p2 = seq1[pos2]
org2_p2 = seq2[pos2]
ancestor_p2 = ancestral_seq[pos2]
# if pos2 has also changed in both organisms since their lca,
# then we add a count for a correlated change
if org1_p2 != ancestor_p2 and org2_p2 != ancestor_p2:
# There are a variety of ways to score. The simplest is
# to increment by one, which seems to be what was done
# in other papers.) This works well, but in a quick test
# (alpha helices/myoglobin with several generally
# high scoring alphabets) weighting works better. A more
# detailed analysis is in order.
#result += 1
# Now I weight based on distance so
# changes in shorter time are scored higher than
# in longer time. (More ancient changes
# are more likely to be random than more recent changes,
# b/c more time has passed for the changes to occur in.)
# This gives results
# that appear to be better under some circumstances,
# and at worst, about the same as simply incrementing
# by 1.
result += (1/distances[(org1,org2)])
# Another one to try might involve discounting the score
# for a pair when one changes and the other doesn't.
return result
| 14,480
|
def get_a(i,j,k):
"""returns between tad coordinates"""
i,j,k = np.sort([i,j,k])
ax,ay=[],[]
for x_ in range(i,j+1):
for y_ in range(j+1,k+1):
ax.append(x_)
ay.append(y_)
return ax,ay
| 14,481
|
def write_dov(filename, coeffs, errors=None, header=None, header2=None,
lmax=None, encoding=None):
"""
Write spherical harmonic coefficients to a text file formatted as
[degree, order, value].
Usage
-----
write_dov(filename, coeffs, [errors, header, header2, lmax, encoding])
Parameters
----------
filename : str
File name of the 'dov'-formatted spherical harmonic coefficients. If
filename ends with '.gz' the file will be automatically compressed with
gzip.
coeffs : ndarray, size(2, lmaxin+1, lmaxin+1)
The spherical harmonic coefficients.
errors : ndarray, size(2, lmaxin+1, lmaxin+1), optional, default = None
The errors associated with the spherical harmonic coefficients.
header : str, optional default = None
A string to be written directly before the spherical harmonic
coefficients.
header2 : str, optional default = None
A second string to be written directly before the spherical harmonic
coefficients.
lmax : int, optional, default = None
The maximum spherical harmonic degree to write to the file.
encoding : str, optional, default = None
Encoding of the output file. The default is to use the system default.
Notes
-----
This function will write spherical harmonic coefficients (and optionally
the errors) to a text file formatted as [degree, order, value]. If header
or header2 are specified, these strings will be written first, directly
before the spherical harmonic coefficients. Both real and complex spherical
harmonic coefficients are supported.
The spherical harmonic coefficients in the file will be formatted as pairs
of lines as
l, m, coeffs[0, l, m]
l, -m, coeffs[1, l, m]
where l and m are the spherical harmonic degree and order, respectively.
If the errors are included, each pair of lines will be formatted as
l, m, coeffs[0, l, m], errors[0, l, m]
l, -m, coeffs[1, l, m], errors[1, l, m]
For each value of increasing l, all the angular orders are listed in
inceasing order, from 0 to l.
If the filename ends with '.gz', the file will be automatically compressed
using gzip.
"""
if lmax is None:
lmax = coeffs.shape[1] - 1
else:
if lmax > coeffs.shape[1] - 1:
raise ValueError('lmax is greater than the input coefficients. '
'lmax = {:d}, lmax of input coefficients = {:d}.'
.format(lmax, coeffs.shape[1] - 1))
if filename[-3:] == '.gz':
filebase = filename[:-3]
else:
filebase = filename
with open(filebase, mode='w', encoding=encoding) as file:
if header is not None:
file.write(header + '\n')
if header2 is not None:
file.write(header2 + '\n')
for l in range(lmax+1):
for m in range(l+1):
if errors is not None:
if m == 0:
file.write('{:d}, {:d}, {:.16e}, {:.16e}\n'
.format(l, m, coeffs[0, l, m],
errors[0, l, m]))
else:
file.write('{:d}, {:d}, {:.16e}, {:.16e}\n'
.format(l, m, coeffs[0, l, m],
errors[0, l, m]))
file.write('{:d}, {:d}, {:.16e}, {:.16e}\n'
.format(l, -m, coeffs[1, l, m],
errors[1, l, m]))
else:
if m == 0:
file.write('{:d}, {:d}, {:.16e}\n'
.format(l, m, coeffs[0, l, m]))
else:
file.write('{:d}, {:d}, {:.16e}\n'
.format(l, m, coeffs[0, l, m]))
file.write('{:d}, {:d}, {:.16e}\n'
.format(l, -m, coeffs[1, l, m]))
if filename[-3:] == '.gz':
with open(filebase, 'rb') as f_in:
with _gzip.open(filename, 'wb') as f_out:
_shutil.copyfileobj(f_in, f_out)
| 14,482
|
def get_mode(input_list: list):
"""
Get's the mode of a certain list. If there are few modes, the function returns False.
This is a very slow way to accomplish this, but it gets a mode, which can only be 4 things, so it should be OK
"""
if len(input_list) == 0:
return False
distinguished_elements = {}
for element in input_list:
if element not in distinguished_elements:
distinguished_elements[element] = 0
# Count all of the elements and save them in a dictionary
for key, value in distinguished_elements.items():
distinguished_elements[key] = input_list.count(key)
# Get the mode
max_key = None
max_value = 0
for key, value in distinguished_elements.items():
if value > max_value:
max_key = key
max_value = value
# If there's a second mode, return False
for key, value in distinguished_elements.items():
if value == max_value and key != max_key:
return False
return max_key
| 14,483
|
def write_index(records_list, output_index):
"""Create and write a pandas df to the specified file"""
# Grab the first record dict
some_record = records_list[0]
# Map all values to integers
dtypes = {k: "int64" for k in some_record.keys()}
# Except for genome id that needs to be a string
dtypes["genome_id"] = "string"
index_df = pd.DataFrame.from_records(records_list)
index_df = index_df.astype(dtypes)
# Show missing genomes first
index_df = index_df.sort_values(by="patric_genome")
# Reorder the columns so that genome id and genome show first
cols_reordered = ["genome_id", "patric_genome"]
for col in index_df.columns.tolist():
if col not in cols_reordered:
cols_reordered.append(col)
index_df = index_df[cols_reordered]
# Write it
index_df.to_csv(output_index, sep="\t", index=False)
| 14,484
|
def qqe(close, length=None, smooth=None, factor=None, mamode=None, drift=None, offset=None, **kwargs):
"""Indicator: Quantitative Qualitative Estimation (QQE)"""
# Validate arguments
length = int(length) if length and length > 0 else 14
smooth = int(smooth) if smooth and smooth > 0 else 5
factor = float(factor) if factor else 4.236
wilders_length = 2 * length - 1
mamode = mamode if isinstance(mamode, str) else "ema"
close = verify_series(close, max(length, smooth, wilders_length))
drift = get_drift(drift)
offset = get_offset(offset)
if close is None: return
# Calculate Result
rsi_ = rsi(close, length)
_mode = mamode.lower()[0] if mamode != "ema" else ""
rsi_ma = ma(mamode, rsi_, length=smooth)
# RSI MA True Range
rsi_ma_tr = rsi_ma.diff(drift).abs()
# Double Smooth the RSI MA True Range using Wilder's Length with a default
# width of 4.236.
smoothed_rsi_tr_ma = ma("ema", rsi_ma_tr, length=wilders_length)
dar = factor * ma("ema", smoothed_rsi_tr_ma, length=wilders_length)
# Create the Upper and Lower Bands around RSI MA.
upperband = rsi_ma + dar
lowerband = rsi_ma - dar
m = close.size
long = Series(0, index=close.index)
short = Series(0, index=close.index)
trend = Series(1, index=close.index)
qqe = Series(rsi_ma.iloc[0], index=close.index)
qqe_long = Series(npNaN, index=close.index)
qqe_short = Series(npNaN, index=close.index)
for i in range(1, m):
c_rsi, p_rsi = rsi_ma.iloc[i], rsi_ma.iloc[i - 1]
c_long, p_long = long.iloc[i - 1], long.iloc[i - 2]
c_short, p_short = short.iloc[i - 1], short.iloc[i - 2]
# Long Line
if p_rsi > c_long and c_rsi > c_long:
long.iloc[i] = npMaximum(c_long, lowerband.iloc[i])
else:
long.iloc[i] = lowerband.iloc[i]
# Short Line
if p_rsi < c_short and c_rsi < c_short:
short.iloc[i] = npMinimum(c_short, upperband.iloc[i])
else:
short.iloc[i] = upperband.iloc[i]
# Trend & QQE Calculation
# Long: Current RSI_MA value Crosses the Prior Short Line Value
# Short: Current RSI_MA Crosses the Prior Long Line Value
if (c_rsi > c_short and p_rsi < p_short) or (c_rsi <= c_short and p_rsi >= p_short):
trend.iloc[i] = 1
qqe.iloc[i] = qqe_long.iloc[i] = long.iloc[i]
elif (c_rsi > c_long and p_rsi < p_long) or (c_rsi <= c_long and p_rsi >= p_long):
trend.iloc[i] = -1
qqe.iloc[i] = qqe_short.iloc[i] = short.iloc[i]
else:
trend.iloc[i] = trend.iloc[i - 1]
if trend.iloc[i] == 1:
qqe.iloc[i] = qqe_long.iloc[i] = long.iloc[i]
else:
qqe.iloc[i] = qqe_short.iloc[i] = short.iloc[i]
# Offset
if offset != 0:
rsi_ma = rsi_ma.shift(offset)
qqe = qqe.shift(offset)
long = long.shift(offset)
short = short.shift(offset)
# Handle fills
if "fillna" in kwargs:
rsi_ma.fillna(kwargs["fillna"], inplace=True)
qqe.fillna(kwargs["fillna"], inplace=True)
qqe_long.fillna(kwargs["fillna"], inplace=True)
qqe_short.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
rsi_ma.fillna(method=kwargs["fill_method"], inplace=True)
qqe.fillna(method=kwargs["fill_method"], inplace=True)
qqe_long.fillna(method=kwargs["fill_method"], inplace=True)
qqe_short.fillna(method=kwargs["fill_method"], inplace=True)
# Name and Categorize it
_props = f"{_mode}_{length}_{smooth}_{factor}"
qqe.name = f"QQE{_props}"
rsi_ma.name = f"QQE{_props}_RSI{_mode.upper()}MA"
qqe_long.name = f"QQEl{_props}"
qqe_short.name = f"QQEs{_props}"
qqe.category = rsi_ma.category = "momentum"
qqe_long.category = qqe_short.category = qqe.category
# Prepare DataFrame to return
data = {
qqe.name: qqe, rsi_ma.name: rsi_ma,
# long.name: long, short.name: short
qqe_long.name: qqe_long, qqe_short.name: qqe_short
}
df = DataFrame(data)
df.name = f"QQE{_props}"
df.category = qqe.category
return df
| 14,485
|
async def changelog(ctx):
"""Shows the latest release notes of DimBot"""
await ctx.reply("""
**__0.9.20 (May 20, 2021 1:58AM GMT+1)__**\n
Fixes a typo in pp
Updates the message of d.start to reflect latest changes
WhoPing can now detect Role ghost pings.
The mass ping limit for bots have been increased to 20.
WhoPing now scans DimBot commands again.
""")
| 14,486
|
def reference_col(
tablename, nullable=False, pk_name="id", foreign_key_kwargs=None, column_kwargs=None
):
"""Column that adds primary key foreign key reference.
Usage: ::
category_id = reference_col('category')
category = relationship('Category', backref='categories')
"""
foreign_key_kwargs = foreign_key_kwargs or {}
column_kwargs = column_kwargs or {}
return Column(
db.ForeignKey("{0}.{1}".format(tablename, pk_name), **foreign_key_kwargs),
nullable=nullable,
**column_kwargs
)
| 14,487
|
def launch(directory: str = "",movie: str = "",pdfFile: str = "",webPage: str = "") -> None:
"""
適切なアプリケーションを起動し、指定したドキュメント、Web ページ、またはディレクトリを開きます。
-----------------------------------------
Flags:
-----------------------------------------
directory (string): ディレクトリ。
-----------------------------------------
movie (string): ムービーファイル。使用可能なムービーのファイル形式は、MPEG、QuicktimeおよびWindowsMediaファイルのみです。ファイル名は.mpg、.mpeg、.mp4、.wmv、.movまたは.qtで終わる必要があります。
-----------------------------------------
pdfFile (string): PDF(PortableDocumentFormat)ドキュメント。このファイル名は.pdfで終わる必要があります。
-----------------------------------------
webPage (string): Webページ。
-----------------------------------------
Return Value:
None: なし
"""
pass
| 14,488
|
def Ei(x, minfloat=1e-7, maxfloat=10000):
"""Ei integral function."""
minfloat = min(np.abs(x), minfloat)
maxfloat = max(np.abs(x), maxfloat)
def f(t):
return np.exp(t) / t
if x > 0:
return (quad(f, -maxfloat, -minfloat)[0] + quad(f, minfloat, x)[0])
else:
return quad(f, -maxfloat, x)[0]
| 14,489
|
def validate_tender_activate_with_language_criteria(request, **kwargs):
"""
raise error if CRITERION.OTHER.BID.LANGUAGE wasn't created
for listed tenders_types and trying to change status to active
"""
tender = request.context
data = request.validated["data"]
tender_created = get_first_revision_date(tender, default=get_now())
if (
tender_created < RELEASE_ECRITERIA_ARTICLE_17
or request.validated["tender_src"]["status"] == data.get("status")
or data.get("status") not in ["active", "active.tendering"]
):
return
tenders_types = ["aboveThresholdUA", "aboveThresholdEU",
"competitiveDialogueUA", "competitiveDialogueEU",
"competitiveDialogueUA.stage2", "competitiveDialogueEU.stage2",
"esco", "closeFrameworkAgreementUA"]
tender_type = request.validated["tender"].procurementMethodType
needed_criterion = "CRITERION.OTHER.BID.LANGUAGE"
tender_criteria = [criterion.classification.id for criterion in tender.criteria if criterion.classification]
if (
tender_type in tenders_types
and needed_criterion not in tender_criteria
):
raise_operation_error(request, "Tender must contain {} criterion".format(needed_criterion))
| 14,490
|
def _MaybeMatchCharClassEsc (text, position, include_sce=True):
"""Attempt to match a U{character class escape
<http://www.w3.org/TR/xmlschema-2/#nt-charClassEsc>}
expression.
@param text: The complete text of the regular expression being
translated
@param position: The offset of the backslash that would begin the
potential character class escape
@param include_sce: Optional directive to include single-character
escapes in addition to character cllass escapes. Default is
C{True}.
@return: C{None} if C{position} does not begin a character class
escape; otherwise a pair C{(cps, p)} as in
L{_MatchCharPropBraced}."""
if '\\' != text[position]:
return None
position += 1
if position >= len(text):
raise RegularExpressionError(position, "Incomplete character escape")
nc = text[position]
np = position + 1
cs = None
if include_sce:
cs = unicode.SingleCharEsc.get(nc)
if cs is None:
cs = unicode.MultiCharEsc.get(nc)
if cs is not None:
return (cs, np)
if 'p' == nc:
return _MatchCharPropBraced(text, np)
if 'P' == nc:
(cs, np) = _MatchCharPropBraced(text, np)
return (cs.negate(), np)
if (not include_sce) and (nc in unicode.SingleCharEsc):
return None
raise RegularExpressionError(np, "Unrecognized escape identifier '\\%s'" % (nc,))
| 14,491
|
def request(command, url, headers={}, data=None):
"""Mini-requests."""
class Dummy:
pass
parts = urllib.parse.urlparse(url)
c = http.client.HTTPConnection(parts.hostname, parts.port)
c.request(
command,
urllib.parse.urlunparse(parts._replace(scheme="", netloc="")),
headers=headers,
body=data,
)
r = c.getresponse()
result = Dummy()
result.status_code = r.status
result.headers = r.headers
result.content = r.read()
return result
| 14,492
|
def configure_hosts(hostfile):
"""Assuming no hostfile file exists, ask questions and create a new one"""
print "Configuring `ansible_hosts` file {0}...\n".format(hostfile)
print "What is the IP address of the virtual machine?"
machine_address = raw_input('--> ')
machine_address = machine_address.strip()
with open(hostfile, 'w') as ansible_hosts_file:
ansible_hosts_file.write("[webservers]\n")
ansible_hosts_file.write("{0}\n".format(machine_address))
| 14,493
|
def __ConvertOSBGToLocal(easting, northing, Eo, No, one_over_CSF):
"""
Convert OSBG36 Easting-Northing to local grid coordinates
:param easting: easting in OSBG36
:param northing: northing in OSBG36
:param Eo: delta easting of local grid
:param No: delta northing of local grid
:param one_over_CSF: reciprocal CSF (combinated scale factor, = 1/CSF)
:returns: todo
"""
#x-coord in local grid
x_local = (easting - Eo)*(one_over_CSF)
#y-coord in local grid
y_local = (northing - No)*(one_over_CSF)
#return a tuple of coordinates
return (x_local, y_local)
| 14,494
|
def sum_of_n_2(n):
"""
迭代求和,仅使用加法。
:param n: 1 到 n 的和
:return: 元组(元素的组合),第一位为值,第二位为所花时间
"""
start = time.time()
the_sum = 0
for i in range(1, n + 1):
the_sum = the_sum + i
end = time.time()
return the_sum, end - start
| 14,495
|
def TSA_t_g( temperature, temperature_vegetation, vegetation_fraction):
"""
//Temperature of ground from Tvegetation
//Based on two sources pixel split
//Chen et al., 2005. IJRS 26(8):1755-1762.
//Estimation of daily evapotranspiration using a two-layer remote sensing model.
Ground temperature, bare soil
TSA_t_g( temperature, temperature_vegetation, vegetation_fraction)
"""
result = (temperature - (vegetation_fraction*temperature_vegetation)) / (1 - vegetation_fraction)
return result
| 14,496
|
def get_dict_buildin(dict_obj, _type=(int, float, bool, str, list, tuple, set, dict)):
"""
get a dictionary from value, ignore non-buildin object
"""
non_buildin = {key for key in dict_obj if not isinstance(dict_obj[key], _type)}
return dict_obj if not non_buildin else {key: dict_obj[key] for key in dict_obj if key not in non_buildin}
| 14,497
|
def onNewSceneOpened(*args):
"""
Called when a new scene is opened, usually through a callback.
"""
if store.get(pcfg.use_piper_units):
loadDefaults()
if store.get(pcfg.use_piper_render):
loadRender()
| 14,498
|
def _generic_fix_integers(model):
"""
Fix the integers of a model to its solution, and removes the variables.
:param model:
:return:
"""
continuous_model = model.copy()
continuous_model.name = model.name + ' - continuous'
integer_variables = set()
constraints_with_integer_variables = []
if not hasattr(model, 'solution'):
model.logger.info('Model has no solution to fix the integers, calculating one')
model.optimize()
# We go through all the constraint descriptors and check if at least one of
# their variables is in the integer variable list
for this_cons in continuous_model._cons_dict.values():
has_integer_variable = False
for this_var in this_cons.constraint.variables:
if this_var.type in INTEGER_VARIABLE_TYPES:
has_integer_variable += True
this_var_descriptor = this_var.name
integer_variables.add(this_var_descriptor)
constraints_with_integer_variables.append(this_cons.name)
int_dict = {continuous_model.variables[x]: model.solution.x_dict[x]
for x in integer_variables}
for this_cons_name in constraints_with_integer_variables:
this_cons = model._cons_dict[this_cons_name]
new_expr = this_cons.expr.subs(int_dict)
kind = type(this_cons)
ub = this_cons.constraint.ub
lb = this_cons.constraint.lb
the_id = this_cons.id
# TODO make fatser, using cons.change_expr and ad-hoc subs dicts
continuous_model.remove_constraint(this_cons)
rebuild_constraint(classname=kind.__name__,
model=continuous_model,
this_id=the_id,
new_expr=new_expr,
lb=lb,
ub=ub)
for this_var in integer_variables:
# This_var is an InterfaceVariable object, we want the GenericVariable
# it belongs to
the_generic_var = continuous_model._var_dict[this_var.name]
continuous_model.remove_variable(the_generic_var)
continuous_model._push_queue()
continuous_model.solver.update()
# This will update the values =
print('Is the cobra_model still integer ? {}' \
.format(continuous_model.solver.is_integer))
return continuous_model
| 14,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.