content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import subprocess
def get_rpm_package_list():
""" Gets all installed packages in the system """
pkgstr = subprocess.check_output(['rpm', '-qa', '--queryformat', '%{NAME}\n'])
return pkgstr.splitlines()
|
3ffddefe7e3859f4bc76ae5581b89338c376e03f
| 3,649,000
|
def validate_ints(*args):
""" validates that inputs are ints only """
for value in args:
if not isinstance(value, int):
return False
return True
|
e56ebf78e072731188b2c8282289d307fcfaabdf
| 3,649,001
|
def smooth_l1_loss(y_true, y_pred):
"""
Computes the smooth-L1 loss.
Parameters
----------
y_true : tensor
Ground-truth targets of any shape.
y_pred : tensor
Estimates of same shape as y_true.
Returns
-------
loss : tensor
The loss, sumed over all elements from the last dim of y_true, i.e.,
same shape as y_true without the last dim.
"""
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true, dtype=y_pred.dtype)
diff = tf.math.abs(y_true - y_pred)
less_than_one = tf.cast(tf.math.less(diff, 1.0), y_pred.dtype)
# smooth l1 loss, loss.shape == y_true.shape
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
# sum over all elements in the last dim
loss = tf.math.reduce_sum(loss, axis=-1)
return loss
|
dcf18b7d14feecdbc8b6637ccc3d35ff1880f9d2
| 3,649,002
|
def get_class_occurrences(layer_types):
"""
Takes in a numpy.ndarray of size (nb_points, 10) describing for each point of the track the types of clouds identified at each of the 10 heights
times counting the number of times 8 type of clouds was spotted vertically.
and returns occrrences (binary) as the label of the present/absent of cloud
The height and cloud type information is then lost.
"""
layer_types = np.array(layer_types)
total = 0
occurrences = np.zeros((layer_types.shape[0], 1))
for occ, labels in zip(occurrences, layer_types):
for l in labels:
# keep only cloud types (no 0 or -9)
if l > 0:
total += 1
if total > 0:
occ = 1
return occurrences
|
20c611089a50751e6a6b1d0ebaedc02474b04443
| 3,649,003
|
def parse_duration(datestring):
"""
Parses an ISO 8601 durations into a float value containing seconds.
The following duration formats are supported:
-PnnW duration in weeks
-PnnYnnMnnDTnnHnnMnnS complete duration specification
Years and month are not supported, values must be zero!
"""
if not isinstance(datestring, str):
raise TypeError("Expecting a string %r" % datestring)
match = ISO8601_PERIOD_REGEX.match(datestring)
if not match:
raise ValueError("Unable to parse duration string %r" % datestring)
groups = match.groupdict()
for key, val in groups.items():
if key not in ('separator', 'sign'):
if val is None:
groups[key] = "0n"
# print groups[key]
if key in ('years', 'months'):
groups[key] = Decimal(groups[key][:-1].replace(',', '.'))
else:
# these values are passed into a timedelta object,
# which works with floats.
groups[key] = float(groups[key][:-1].replace(',', '.'))
if groups["years"] != 0 or groups["months"] != 0:
raise ValueError("Unable to parse duration string %r (Non zero year or month)" % datestring)
else:
ret = timedelta(days=groups["days"], hours=groups["hours"],
minutes=groups["minutes"], seconds=groups["seconds"],
weeks=groups["weeks"])
if groups["sign"] == '-':
ret = timedelta(0) - ret
return ret.total_seconds()
|
4fce243684fd2305198ae49693327a110f781a1d
| 3,649,004
|
import math
from functools import reduce
import operator
from typing import Counter
def knn(position, data_set, labels, k):
"""
k-近邻算法
:param position: 待分类点
:param data_set: 数据样本
:param labels: 标签集合
:param k: 取值
:return: 所属标签
"""
distance_list = []
for index, item in enumerate(data_set):
distance_list.append((
labels[index],
math.sqrt(reduce(operator.add, [(v - position[i]) ** 2 for i, v in enumerate(item)]))
))
distance_list = sorted(distance_list, key=lambda x: x, reverse=True)
result = Counter([val[0] for val in distance_list[:k]])
result_labels = sorted(result.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)
return result_labels[0][0]
|
137f87c6a63fbafd3140694386dfd418108ad5b9
| 3,649,005
|
def _cals(raw):
"""Helper to deal with the .cals->._cals attribute change."""
try:
return raw._cals
except AttributeError:
return raw.cals
|
a08273a559b780022c04fe5d5d60a71c600fd481
| 3,649,006
|
def irnn_data_iterator(X, y, batch_size, math_engine):
"""Slices numpy arrays into batches and wraps them in blobs"""
def make_blob(data, math_engine):
"""Wraps numpy data into neoml blob"""
shape = data.shape
if len(shape) == 2: # data
# Wrap 2-D array into blob of (BatchWidth, Channels) shape
return neoml.Blob.asblob(math_engine, data,
(1, shape[0], 1, 1, 1, 1, shape[1]))
elif len(shape) == 1: # dense labels
# Wrap 1-D array into blob of (BatchWidth,) shape
return neoml.Blob.asblob(math_engine, data,
(1, shape[0], 1, 1, 1, 1, 1))
else:
assert(False)
start = 0
data_size = y.shape[0]
while start < data_size:
yield (make_blob(X[start : start+batch_size], math_engine),
make_blob(y[start : start+batch_size], math_engine))
start += batch_size
|
5a6b6726d3d0f78929b2551b8720bfbbb39471eb
| 3,649,007
|
def naive_forecast(series, steps_ahead=3, freq='D', series_name='naive'):
"""
Function fits data into the last available observation value.
INPUT:
:param series: pandas Series of data,
:param steps_ahead: number of steps into the future to predict, default is 3,
:param freq: (str) representation of a time frequency,
:param series_name: str
OUTPUT:
:return: series with extrapolated values equal to the last observation.
"""
obs = [series.iloc[-1] for _ in range(steps_ahead)]
future = set_future_series(forecasted_values=obs,
series_name=series_name,
last_date=series.index[-1],
steps_ahead=steps_ahead,
frequency=freq)
return future
|
1b64edb39ab986d2e850ec5329fb2ed8ae5cd136
| 3,649,008
|
import os
async def delete_original_path(request):
"""
After the processing of the whole data source, this api can be used to delete the original zip
correspoding to a particular username
"""
username = request.args.get("username")
if not username:
raise APIBadRequest("Username for this datasource is required")
res = await get_status(request.app.config[DATASOURCE_NAME]["tables"]["status_table"], username)
result = list(res)
logger.debug(result[0].get("username"))
if not result:
raise APIBadRequest(f"No status present for {DATASOURCE_NAME} for username {username}")
result = result[0]
logger.debug(result)
path_to_be_deleted = result.get("original_path")
logger.warning(f"Path to be deleted is {path_to_be_deleted}")
try:
os.remove(path_to_be_deleted)
logger.success(f"{path_to_be_deleted} is deleted now")
except Exception as e:
return response.json(
{
'error': False,
'success': True,
"message": f"Original path at {path_to_be_deleted} couldnt be delete because of {e.__str__()}",
"data": None
})
return response.json(
{
'error': False,
'success': True,
"message": f"Original path at {path_to_be_deleted} is deleted",
"data": None
})
|
d92379c3fa1f58656a059029fb6555fce298ccbc
| 3,649,009
|
def demonstration():
"""
This will render a template that displays all of the form objects if it's
a Get request. If the use is attempting to Post then this view will push
the data to the database.
"""
#this parts a little hard to understand. flask-wtforms does an implicit
#call each time you create a form object. It attempts to see if there's a
#request.form object in this session and if there is it adds the data from
#the request to the form object.
registration_form = RegistrationForm()
#Before we attempt to validate our form data we have to set our select
#field choices. This is just something you need to do if you're going to
#use WTForms, even if it seems silly.
populate_form_choices(registration_form)
#This means that if we're not sending a post request then this if statement
#will always fail. So then we just move on to render the template normally.
if flask.request.method == 'POST' and registration_form.validate():
#If we're making a post request and we passed all the validators then
#create a registered user model and push that model to the database.
print("DEBUGGGGG")
registered_user = RegisteredUser(
first_name=registration_form.data['first_name_field'],
last_name=registration_form.data['last_name_field'],
address_line_one=registration_form.data['address_line_one_field'],
address_line_two=registration_form.data['address_line_two_field'],
city=registration_form.data['city_field'],
state_id=registration_form.data['state_select_field'],
country_id=registration_form.data['country_select_field'],)
db.session.add(registered_user)
db.session.commit()
print("DEBUGGGGGENDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD")
flask.flash("This data was saved to the database!")
return flask.redirect(flask.url_for(
'user_detail',user_id=registered_user.registered_id))
return flask.render_template(
template_name_or_list='registration.html',
registration_form=registration_form,)
|
17e798069d40bf5644b0ca0b4ffbd9ecea53aafd
| 3,649,010
|
def show_current_task():
"""
显示当前任务正在运行的任务
:return:
"""
try:
current_user_name = session["user_name"]
current_user = RedisService.get_user(current_user_name)
current_task = TaskService.get_working_tasks(user_id=current_user.id)[0]
if current_task:
hook_rule = RedisService.get_task(current_task.id)["hook_rule"]
unscaned_url_num = UrlService.count(where=(Url.task_id == current_task.id, Url.status != TaskStatus.DONE))
scaned_url_num = UrlService.count(where=(Url.task_id == current_task.id, Url.status == TaskStatus.DONE))
total_url_num = unscaned_url_num + scaned_url_num
if current_task.task_status in [TaskStatus.KILLED, TaskStatus.DONE]:
percent = 100
else:
percent = 0 if total_url_num == 0 else (scaned_url_num / total_url_num) * 100
response_data = jsonify(status=200, message="查询成功",
data={'receiver_emails': current_task.receivers_email,
'task_name': current_task.task_name,
'create_time': current_task.created_time.strftime("%Y-%m-%d %H:%M"),
'percent': percent,
'unscaned_url_num': unscaned_url_num, 'scaned_url_num': scaned_url_num,
'total_url_num': total_url_num, 'hook_rule': hook_rule,
'task_id': current_task.id, "task_access_key": current_task.access_key,
'task_status': current_task.task_status, 'user_name': current_user_name})
return response_data
except Exception as e:
if isinstance(e, IndexError):
return jsonify(status=400, message="获取失败", data={"extra_info": "后台无正在运行任务,请登录后台并创建任务"})
logger.exception("show_current_task rasie error")
return jsonify(status=500, message="获取失败", data={"extra_info": "未知异常,可以联系管理员到后台查看"})
|
09e0077232343e46db606b9a3642bfb5d6b17a69
| 3,649,011
|
from rsc.service.ImageService import ImageService
def access_image(access_code:str):
"""
下载图像
post header : {
Content-Type: application/json,
access_token: access_token from vans-token-manager
client_id: client_id from vans-token-manager conf. create by developers.
}
:return:
"""
try:
# 获取图片
service = ImageService()
image_data, mime = service.get_image(access_code)
data = json_res_success({"image":image_data, "mime":mime})
return render_json(data)
except Exception as e:
return E400(str(e))
|
d1ac9efd9e4ba7c7fb9f63d2f0d188e09367fee9
| 3,649,012
|
def workaround_issue_20(handler):
"""
Workaround for
https://github.com/pytest-dev/pytest-services/issues/20,
disabling installation of a broken handler.
"""
return hasattr(handler, 'socket')
|
20d688aedad9e771362d97ad9cac391e7dbfac32
| 3,649,013
|
def item_count(sequences, sequence_column_name):
"""
input:Dataframe sequences
"""
item_max_id = sequences[sequence_column_name].map(max).max()
return int(item_max_id)
|
9bcb64ff3389ef34ed297bca4f55b4de66ac5966
| 3,649,014
|
def bare_stft(x: Tensor, padded_window: Tensor, hop_size: int) -> Tensor:
"""Compute STFT of real 1D signal.
This function does not handle padding of x, and the window tensor.
This function assumes fft_size = window_size.
Args:
x: [..., n_sample]
padded_window: [fft_size], a window padded to fft_size.
hop_size: Also referred to as the frame shift.
Returns:
n_frame: see frame_signal definition.
X: [..., n_frame, fft_size],
where n_frame = n_sample // hop_size
"""
fft_size = len(padded_window)
# Squash x's batch_sizes
batch_size = x.shape[:-1]
n_sample = x.size(-1)
squashed_x = x.reshape(-1, 1, n_sample)
# shape: [prod(batch_size), 1, n_sample]
framed_squashed_x = frame_signal(squashed_x, fft_size, hop_size)
# shape: [prod(batch_size), fft_size, n_frame]
windowed_framed_squashed_x = \
framed_squashed_x * padded_window.unsqueeze(-1)
squashed_X = fft(
windowed_framed_squashed_x.transpose(-1, -2), dim=-1
) # shape: [prod(batch_size), n_frame, fft_size]
X = squashed_X.reshape(*batch_size, *(squashed_X.shape[1:]))
# shape: [*batch_size, n_frame, fft_size]
return X
|
ccefbdc55478de91640a322735dd02f87a185578
| 3,649,015
|
def IsDragResultOk(*args, **kwargs):
"""IsDragResultOk(int res) -> bool"""
return _misc_.IsDragResultOk(*args, **kwargs)
|
87e4c1968b3e5d7adbdc4cfb393481fca647beba
| 3,649,016
|
import os
def set_config(args):
"""
get config from file and reset the config by super parameter
"""
configs = 'configs'
cfg = getattr(__import__(configs, fromlist=[args.config_file]),
args.config_file)
config = cfg.res50_config()
config['data_url'] = DATA_PATH
config['log_dir'] = OUTPUT_PATH
config['model_dir'] = OUTPUT_PATH
config['ckpt_dir'] = OUTPUT_PATH
# set param from parse
config['iterations_per_loop'] = int(args.iterations_per_loop)
config['max_train_steps'] = int(args.max_train_steps)
config['debug'] = args.debug
config['eval'] = args.eval
config['model_dir'] = args.model_dir
config['batch_size'] = args.batch_size
config['global_batch_size'] = config['batch_size'] * config['rank_size']
config['num_classes'] = args.num_classes
config['num_epochs'] = args.num_epochs
config['learning_rate_maximum'] = args.learning_rate_maximum
config['restore_path'] = os.path.join(DATA_PATH, "ckpt",
input_args.restore_path)
print("iterations_per_loop :%d" % (config['iterations_per_loop']))
print("max_train_steps :%d" % (config['max_train_steps']))
print("debug :%s" % (config['debug']))
print("eval :%s" % (config['eval']))
print("model_dir :%s" % (config['model_dir']))
print("batch_size :%d" % (config['batch_size']))
if config['num_epochs']:
print("num_epochs :%d" % (config['num_epochs']))
print("learning_rate_maximum :%f" % (config['learning_rate_maximum']))
print("num_classes :%d" % (config['num_classes']))
print("restore_path :%s" % (config['restore_path']))
return config
|
22ac7d7c0562a5098a8e32fa4b27c9a756471f68
| 3,649,017
|
from xls2xlsx import XLS2XLSX
import os
def excel_convert_xls_to_xlsx(xls_file_path='',xlsx_file_path=''):
"""
Converts given XLS file to XLSX
"""
try:
# Checking the path and then converting it to xlsx file
if os.path.exists(xls_file_path):
# converting xls to xlsx
x2x = XLS2XLSX(xls_file_path)
x2x.to_xlsx(xlsx_file_path)
return True
except ValueError:
print("Input is not supported, could not convert string to float. Please check the inputs, and try again.")
text_to_speech("Input is not supported, could not convert string to float. Please check the inputs, and try again.", show=False)
except PermissionError:
print("Please close the excel file, and try again.")
text_to_speech("Please close the excel file, and try again.", show=False)
except op.utils.exceptions.InvalidFileException:
print("We currently support only : xlsx,.xlsm,.xltx,.xltm files. Please try again with one of those file formats.")
text_to_speech("we currently support only : .xlsx,.xlsm,.xltx,.xltm files. Please try again with one of those file formats.", show=False)
except Exception as e:
errMsg = f"Error in converting file to xlsx format : {str(e)}"
return errMsg
|
0718c5039cc19fa54d7320698d9d1f81106ff788
| 3,649,018
|
import os
def load_model(fn=None):
"""Load the stored model.
"""
if fn is None:
fn = os.path.dirname(os.path.abspath(__file__)) + \
"/../models/model_default.h5"
return keras.models.load_model(fn)
|
267e0bbc082948bc6b47d593aa28248646f4c61f
| 3,649,019
|
def element_wise(counter_method):
"""This is a decorator function allowing multi-process/thread input.
Note that this decorator should always follow the decorator 'tag_maker'.
"""
def _make_iterator(*args):
"""Make a compound iterator from a process iterator and
a thread one.
Note that 'Application' case should not execute this
function."""
monitor_level = args[1]
arg_pid = args[2]
if hasattr(arg_pid, '__iter__'):
pid_itr = (i for i in xrange(arg_pid[0], arg_pid[1]))
else:
pid_itr = (arg_pid,)
if monitor_level == 'Thread':
arg_tid = args[3]
if hasattr(arg_tid, '__iter__'):
tid_itr = (i for i in xrange(arg_tid[0], arg_tid[1]))
else:
tid_itr = (arg_tid,)
if monitor_level == 'Process':
return_itr = pid_itr
elif monitor_level == 'Thread':
return_itr = (pid_itr, tid_itr)
return return_itr
@wraps(counter_method)
def _element_wise(*args):
"""Distribute multi-process/thread input"""
if args[1] == 'Thread':
pid_itr, tid_itr = _make_iterator(*args)
retval = [counter_method(args[0], args[1], pid, tid)
for pid, tid in product(pid_itr, tid_itr)]
return np.array(retval)
elif args[1] == 'Process':
pid_itr = _make_iterator(*args)
retval = [counter_method(args[0], args[1], pid) for pid in pid_itr]
return np.array(retval)
elif args[1] == 'Application':
return np.array(counter_method(*args))
else:
print 'Unknown monitor level'
return _element_wise
|
44c00b9a40b7dba53dcfaac52bc866341a924d01
| 3,649,020
|
import requests
def get_datacite_dates(prefix):
"""Get sumbitted date for DataCite DOIs with specific prefix"""
doi_dates = {}
doi_urls = {}
url = (
"https://api.datacite.org/dois?query=prefix:"
+ prefix
+ "&page[cursor]=1&page[size]=500"
)
next_link = url
meta = requests.get(next_link).json()["meta"]
for j in progressbar(range(meta["totalPages"])):
r = requests.get(next_link)
data = r.json()
for doi in data["data"]:
date = doi["attributes"]["registered"].split("T")[0]
doi_dates[doi["id"]] = date
doi_urls[doi["id"]] = doi["attributes"]["url"]
if "next" in data["links"]:
next_link = data["links"]["next"]
else:
next_link = None
return doi_dates, doi_urls
|
2b75cdfbb7c5f7085ab95f22ec601fbccdac07ea
| 3,649,021
|
def rate_answer():
"""
**Rates an already given answer**
**Args:**
* json:
* {"insight" : String with the name of the Insight
* "paper_id" : String with the paper_id which is in our case the completet link to the paper
* "upvote" : Boolean if the answer was upvoted(= true) or downvoted (= false)
* "answer" : String with the Answer}
**Returns:**
* json:
* {'status': 'success'}
"""
response_object = {'status': 'success'}
#fetch data from request
post_data = request.get_json()
in_insight_name = post_data.get('insight')
in_paper_id = post_data.get('paper_id')
in_paper_id = url_checker(in_paper_id)
in_upvote = post_data.get('upvote')
in_answer = post_data.get('answer')
#query 'information'
inf = Information.query.filter(Information.paper_id == in_paper_id).filter(Information.insight_name==str(in_insight_name)).first()
#query 'answers'
ans = Answers.query.filter(Answers.information_id==inf.information_id).all()
#upvote correct answer
if (in_upvote):
for a in ans:
if (a.answer==in_answer):
a.answer_upvotes = a.answer_upvotes + 1
a.answer_score = a.answer_score + 1
#downvote correct answer
else :
for a in ans:
if (a.answer==in_answer):
a.answer_downvotes = a.answer_downvotes + 2
a.answer_score = a.answer_score - 2
db.session.commit()
return jsonify(response_object)
|
5efc00e015d2127f91462348050f2d445530690d
| 3,649,022
|
def get_ip():
"""
Query the ipify service (https://www.ipify.org) to retrieve this machine's
public IP address.
:rtype: string
:returns: The public IP address of this machine as a string.
:raises: ConnectionError if the request couldn't reach the ipify service,
or ServiceError if there was a problem getting the IP address from
ipify's service.
"""
try:
resp = _get_ip_resp()
except RequestException:
raise ConnectionError("The request failed because it wasn't able to reach the ipify service. This is most likely due to a networking error of some sort.")
if resp.status_code != 200:
raise ServiceError('Received an invalid status code from ipify:' + str(resp.status_code) + '. The service might be experiencing issues.')
return resp.text
|
d560c90986cc99be3ad07c0099743821104e514a
| 3,649,023
|
import ast
def update_plot(p1, p2, arrow, txt, ax, fig, reset_points, line):
"""
Given a line with an agent's move and the current plot, update
the plot based on the agent's move.
"""
l = line.strip()
if 'Agent score' in l:
txt.remove()
txt = plt.text(2, 33, 'Agent Score: {0:.2f}'.format(float(l.split()[2])),
fontsize=8)
reset_points = True
else:
p = ast.literal_eval(l[l.find('('):])
if 'actually at point' in l:
p1 = Circle(p, radius=0.2, facecolor='yellow')
ax.add_patch(p1)
elif 'actually attempting to reach point' in l:
p2 = Circle(p, radius=0.2, facecolor='green')
ax.add_patch(p2)
elif 'now at point' in l:
arrow = YAArrow(fig, p2.center, p1.center, width=0.1,
headwidth=0.5, facecolor='red')
ax.add_patch(arrow)
elif 'Resetting agent to point' in l:
p2 = Circle(p, radius=1, facecolor='green')
ax.add_patch(p2)
arrow = YAArrow(fig, p2.center, p1.center, width=0.25,
headwidth=1, facecolor='red')
ax.add_patch(arrow)
return p1, p2, arrow, txt, ax, fig, reset_points
|
579b4f218fc17eae9b8595e916571e24eba27cb5
| 3,649,024
|
import csv
def upload_file_view(request):
"""Upload file page and retrieve headers"""
data = {}
global ROW_COUNT
if request.method == "GET":
return render(request, "pages/upload-file.html", data)
try:
if request.FILES:
csv_file = request.FILES['csv_file']
request.session['csv'] = str(csv_file)
if not csv_file.name.endswith('.csv'):
messages.error(request, 'File is not CSV type')
return redirect('upload-file')
decoded_file = csv_file.read().decode('utf-8').splitlines()
reader = csv.DictReader(decoded_file)
data['fieldnames'] = reader.fieldnames
data['filename'] = csv_file.name
fs = FileSystemStorage()
fs.save(csv_file.name, csv_file)
file = FilesStatus.objects.create(
user=request.user,
file_name=csv_file.name,
)
ROW_COUNT = sum(1 for row in reader)
request.session['file_status'] = file.id
else:
messages.error(request, 'No file was selected.')
return redirect('upload-file')
except IOError:
return messages.error(request, 'Could not read file')
return render(request, 'pages/upload-file.html', data)
|
62d76c398aee02a61a3dd8dd7ac1251367786c9c
| 3,649,025
|
from typing import Optional
def get_user_by_login_identifier(user_login_identifier) -> Optional[UserSchema]:
"""Get a user by their login identifier.
:param str user_login_identifier: The user's login identifier, either their \
``email`` or ``display_name`` are valid inputs
:return: The discovered user if they exist
:rtype: Optional[UserSchema]
"""
user = get_user_by_email(user_email=user_login_identifier)
if not user:
return get_user_by_display_name(user_display_name=user_login_identifier)
return user
|
821ba79ed56ec9b918dbec60af91e9b472cdb689
| 3,649,026
|
def decode_fixed64(buf, pos):
"""Decode a single 64 bit fixed-size value"""
return decode_struct(_fixed64_fmt, buf, pos)
|
298df6d28f77132bac6d9924b9628af5efa940b5
| 3,649,027
|
def from_xfr(xfr, zone_factory=Zone, relativize=True):
"""Convert the output of a zone transfer generator into a zone object.
@param xfr: The xfr generator
@type xfr: generator of dns.message.Message objects
@param relativize: should names be relativized? The default is True.
It is essential that the relativize setting matches the one specified
to dns.query.xfr().
@type relativize: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
z = None
for r in xfr:
if z is None:
if relativize:
origin = r.origin
else:
origin = r.answer[0].name
rdclass = r.answer[0].rdclass
z = zone_factory(origin, rdclass, relativize=relativize)
for rrset in r.answer:
znode = z.nodes.get(rrset.name)
if not znode:
znode = z.node_factory()
z.nodes[rrset.name] = znode
zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype,
rrset.covers, True)
zrds.update_ttl(rrset.ttl)
for rd in rrset:
rd.choose_relativity(z.origin, relativize)
zrds.add(rd)
z.check_origin()
return z
|
cc3aa11a8ff3dff6cf0609c3527daa193602b522
| 3,649,028
|
from datetime import datetime
def compare_sql_datetime_with_string(filter_on, date_string):
"""Filter an SQL query by a date or range of dates
Returns an SQLAlchemy `BinaryExpression` that can be used in a call to
`filter`.
`filter_on` should be an SQLAlchemy column expression that has a date or
datetime value.
`date_string` is a string that includes date(s) in format
`YYYY-MM-DD` and a range operator such as `>` or `<=`.
In full:
====================== ========================================
Query Description
====================== ========================================
YYYY-MM-DD Matches dates on day
>YYYY-MM-DD Matches dates after day
>=YYYY-MM-DD Matches dates on or after day
<YYYY-MM-DD Matches dates before day
<=YYYY-MM-DD Matches dates on or before day
YYYY-MM-DD..YYYY-MM-DD Matches dates between days (inclusively)
====================== ========================================
Examples:
>>> from app.models.main import AuditEvent
>>> # Equivalent to AuditEvent.created_at >= datetime.date(2012, 1, 1)
>>> compare_sql_datetime_with_string(AuditEvent.created_at, ">=2012-01-01")
<sqlalchemy.sql.elements.BinaryExpression object ...>
>>> # Equivalent to AuditEvent.created_at.between(datetime.date(2010, 1, 1), datetime.date(2019-01-31))
>>> AuditEvent.query.filter(
compare_sql_datetime_with_string(AuditEvent.created_at, "2010-01-01..2019-01-31"))
<app.models.main.AuditEvent.query_class object ...>
"""
filter_test = None
def parse_date(s):
return datetime.datetime.strptime(s, DATE_FORMAT)
if date_string.startswith(">="):
date = parse_date(date_string[2:])
filter_test = (filter_on >= date)
elif date_string.startswith(">"):
date = parse_date(date_string[1:])
filter_test = (filter_on > date)
elif date_string.startswith("<="):
date = parse_date(date_string[2:])
filter_test = (filter_on <= date)
elif date_string.startswith("<"):
date = parse_date(date_string[1:])
filter_test = (filter_on < date)
elif ".." in date_string:
args = date_string.partition("..")
from_ = parse_date(args[0])
to_ = parse_date(args[2])
filter_test = filter_on.between(from_, to_)
else:
date = parse_date(date_string)
filter_test = filter_on.between(date, date + datetime.timedelta(days=1))
return filter_test
|
2938872ffdd3e30c2d364ae30e3a93fa560e55ef
| 3,649,029
|
def get_users():
"""get_users() -> Fetch all users in the database"""
connect() # Connect
cursor.execute("SELECT * FROM users") # Select all users
item = cursor.fetchall()
users = []
for user in item:
users.append(format_user(user)) # Format the users
disconnect()
return users
|
e294406af8abbd0fa813beeadcd8b01552e4d206
| 3,649,030
|
def get_decoder_self_attention_bias(length):
"""Calculate bias for decoder that maintains model's autoregressive property.
Creates a tensor that masks out locations that correspond to illegal
connections, so prediction at position i cannot draw information from future
positions.
Args:
length: int length of sequences in batch.
Returns:
float tensor of shape [1, 1, length, length]
"""
with tf.name_scope("decoder_self_attention_bias"):
valid_locs = tf.linalg.band_part(tf.ones([length, length]), -1, 0)
valid_locs = tf.reshape(valid_locs, [1, 1, length, length])
decoder_bias = _NEG_INF * (1.0 - valid_locs)
return decoder_bias
|
a5de0984715cbc07c16c0ab5a5dc24edb7ca7602
| 3,649,031
|
from typing import Union
from typing import Tuple
import torch
import math
from re import X
def rollout_discrete(
x_grid: Tensor,
idx: Union[int, Tensor],
model: Model,
best_f: Union[float, Tensor],
bounds: Tensor,
quadrature: Union[str, Tuple] = "qmc",
horizon: int = 4,
num_y_samples: int = 10,
):
"""
continuous domain rollout, expectation estimated using (quasi) Monte Carlo or Gaussian-Hermite quadrature
EI_rollout(x) = E_y[ max(y-y0,0) + EI_rollout(x'| x, y) ], where x'=argmax EI(x' | x, y)
define f(y) = max(y-y0,0) + EI_rollout(x'|x,y)
then
EI_rollout(x) = \int w(y) f(y) dy
where the weight function w(y) is a Gaussian density function N(mu, sigma^2)
We can estimate this integral using quasi Monte Carlo samples from w(y)
or use Gauss-Hermite quadrature, as in Lam et al. (2016):
such a integration can be transformed into the standard Gaussian-Hermite quadrature formulation
EI_rollout(x) = 1/sqrt(pi) \int exp(-t^2) f(sqrt(2)*sigma*t+mu) dt, where t = (y-mu)/sqrt(2)/sigma
We first generate Gauss-Hermite quadrature sample locations t_i and weights w_i using numpy.polynomial.hermite.hermgauss
then estimate the expectation by
EI_rollout(x) \approx 1/sqrt(pi) \sum_i w_i f(sqrt(2)*sigma*t_i +mu)
:param x: a single point
:param model: the GP model
:param best_f: current best observed value
:param bounds: bounds of the domain, shape (2, d)
:param base_acquisition:
:param quadrature: Monte Carlo or Quasi Monte Carlo
:param horizon: rollout horizon
:param num_y_samples: number of (quasi) Monte Carlo samples for estimating the integral
:return:
"""
if horizon == 1:
acq_func = ExpectedImprovement(model=model, best_f=best_f)
return acq_func(x).item()
x = x_grid[idx]
# compute posterior
posterior = model.posterior(x)
if isinstance(quadrature, str) and quadrature == "qmc": # quasi Monte Carlo
with torch.no_grad():
sampler = SobolQMCNormalSampler(num_samples=num_y_samples)
samples = sampler(posterior).squeeze().numpy()
weights = torch.ones(num_y_samples) / num_y_samples
elif isinstance(quadrature, Tuple):
mu = posterior.mean.item()
sigma = torch.sqrt(posterior.variance).item()
samples, weights = np.polynomial.hermite.hermgauss(num_y_samples)
samples = np.sqrt(2.0) * sigma * samples + mu
weights /= np.sqrt(math.pi)
improvement_of_samples = np.zeros(num_y_samples)
for i in range(num_y_samples):
y_sample = samples[i]
one_step_improvement = max(y_sample - best_f, 0)
fake_model: Model = deepcopy(model)
x0 = model.train_inputs[0]
y0 = model.train_targets
train_x = torch.cat([x0, x.unsqueeze(0)], -2)
train_y = torch.cat([y0, Tensor([y_sample])])
fake_model.reinitialize(train_X=train_x, train_Y=train_y)
best_f_new = max(best_f, y_sample) # maximization problem
acq_func = ExpectedImprovement(model=fake_model, best_f=best_f_new)
ei_values = acq_func(X)
idx = torch.argmax(ei_values)
future_reward = rollout(
x_grid,
idx,
model=fake_model,
best_f=best_f_new,
bounds=bounds,
quadrature=quadrature,
horizon=horizon - 1,
num_y_samples=num_y_samples,
)
improvement_of_samples[i] = one_step_improvement + future_reward
return improvement_of_samples.dot(weights)
|
c400e0042a4ec236030d7e5d19615441b4cc0456
| 3,649,032
|
import itertools
import torch
def get_accumulative_accuracies(test_loaders, taskcla, result_file, network_cls='resnet32'):
""" Confusion matrix with progressively more classes considered """
iter_model = iter_task_models(network_cls, taskcla, result_file)
accuracies = np.zeros((len(taskcla), len(taskcla)))
classes_so_far = 0.
for task_model, model in enumerate(iter_model):
for task_eval in range(0, task_model+1):
full_test_loader = itertools.chain.from_iterable(test_loaders[:task_eval+1])
with torch.no_grad():
totals = 0.
correct = 0.
logits_mask = np.arange(sum([taskcla[i][1] for i in range(0, task_eval+1)]))
for inputs, targets in full_test_loader:
inputs = inputs.to(device)
targets = targets.to(device)
outputs = torch.cat(model(inputs), dim=1)
outputs = outputs[:, logits_mask]
preds = outputs.argmax(dim=1)
correct += (preds == targets).double().sum()
totals += len(targets)
accuracies[task_model, task_eval] = correct/totals
return accuracies
|
3b10f89b80318c3bb2776af3d336c2b87d2a623e
| 3,649,033
|
import copy
def readout_oper(config):
"""get the layer to process the feature asnd the cls token
"""
class Drop(object):
"""drop class
just drop the cls token
"""
def __init__(self, config):
if 'ViT' in config.MODEL.ENCODER.TYPE:
self.token_num = 1
elif 'DeiT' in config.MODEL.ENCODER.TYPE:
self.token_num = 2
self.feature_size = (config.DATA.CROP_SIZE[0] // config.MODEL.TRANS.PATCH_SIZE,
config.DATA.CROP_SIZE[1] // config.MODEL.TRANS.PATCH_SIZE)
def __call__(self, x):
x = x[:, self.token_num:]
x = x.transpose((0, 2, 1))
x = x.reshape((x.shape[0], x.shape[1], self.feature_size[0], self.feature_size[1]))
return x
class Add(object):
"""add class
add the cls token
"""
def __init__(self, config):
if 'ViT' in config.MODEL.ENCODER.TYPE:
self.token_num = 1
elif 'DeiT' in config.MODEL.ENCODER.TYPE:
self.token_num = 2
self.feature_size = (config.DATA.CROP_SIZE[0] // config.MODEL.TRANS.PATCH_SIZE,
config.DATA.CROP_SIZE[1] // config.MODEL.TRANS.PATCH_SIZE)
def __call__(self, x):
token = x[:, :self.token_num]
token = paddle.sum(token, axis=1).unsqueeze(1)
x = x[:, self.token_num:]
x = x + token
x = x.transpose((0, 2, 1))
x = x.reshape((x.shape[0], x.shape[1], self.feature_size[0], self.feature_size[1]))
return x
class Proj(nn.Layer):
"""porject class
use a linear layer to confuse the feature and the cls token
"""
def __init__(self, config):
super(Proj, self).__init__()
if 'ViT' in config.MODEL.ENCODER.TYPE:
self.token_num = 1
elif 'DeiT' in config.MODEL.ENCODER.TYPE:
self.token_num = 2
self.feature_size = (config.DATA.CROP_SIZE[0] // config.MODEL.TRANS.PATCH_SIZE,
config.DATA.CROP_SIZE[1] // config.MODEL.TRANS.PATCH_SIZE)
self.proj = nn.Sequential(
nn.Linear(2 * config.MODEL.TRANS.HIDDEN_SIZE, config.MODEL.TRANS.HIDDEN_SIZE),
nn.GELU()
)
def forward(self, x):
token = x[:, :self.token_num]
token = paddle.sum(token, axis=1).unsqueeze(1)
x = x[:, self.token_num:]
token = token.expand_as(x)
x = paddle.concat([x, token], axis=-1)
x = self.proj(x)
x = x.transpose((0, 2, 1))
x = x.reshape((x.shape[0], x.shape[1], self.feature_size[0], self.feature_size[1]))
return x
if config.MODEL.DPT.READOUT_PROCESS == 'drop':
return [copy.deepcopy(Drop(config)) for _ in range(4)]
if config.MODEL.DPT.READOUT_PROCESS == 'add':
return [copy.deepcopy(Add(config)) for _ in range(4)]
if config.MODEL.DPT.READOUT_PROCESS =='project':
return nn.LayerList([copy.deepcopy(Proj(config)) for _ in range(4)])
return None
|
36d682851e24535b7b000ae1b343bb90ca2077d6
| 3,649,034
|
import json
def stream_n_messages(request, n):
"""Stream n JSON messages"""
n = int(n)
response = get_dict(request, 'url', 'args', 'headers', 'origin')
n = min(n, 100)
def generate_stream():
for i in range(n):
response['id'] = i
yield json.dumps(response, default=json_dumps_default) + '\n'
return Response(generate_stream(), headers={
"Content-Type": "application/json",
})
|
ea8ec1dd939cc43baa3367696f44956b2aafa780
| 3,649,035
|
def read_covid():
"""Read parsed covid table"""
return pd.read_csv(_COVID_FILE, parse_dates=["date"])
|
fd99256808be1772106260b1da47850de0584adb
| 3,649,036
|
import sys
import os
def install_pip(python=sys.executable, *,
info=None,
downloaddir=None,
env=None,
upgrade=True,
**kwargs
):
"""Install pip on the given Python executable."""
if not python:
python = getattr(info, 'executable', None) or sys.executable
# python -m ensurepip
args = ['-m', 'ensurepip', '-v'] # --verbose
if upgrade:
args.append('-U') # --upgrade
res = _utils.run_python(*args, python=python, **kwargs)
ec, _, _ = res
if ec == 0 and is_pip_installed(python, env=env):
return res
##############################
# Fall back to get-pip.py.
if not downloaddir:
downloaddir = '.'
os.makedirs(downloaddir, exist_ok=True)
# download get-pip.py
filename = os.path.join(downloaddir, 'get-pip.py')
if not os.path.exists(filename):
print("Download %s into %s" % (GET_PIP_URL, filename))
_utils.download(GET_PIP_URL, filename)
# python get-pip.py
argv = [python, '-u', filename]
version = get_best_pip_version(info or python)
if version:
argv.append(version)
res = _utils.run_cmd(argv, env=env)
ec, _, _ = res
if ec != 0:
# get-pip.py was maybe not properly downloaded: remove it to
# download it again next time
os.unlink(filename)
return res
|
2c010be3baba35883ce1bb3003c8e6b184506845
| 3,649,037
|
def define_network(*addr):
"""gives all network related data or host addresses if requested
addr = tuple of arguments netaddr/mask[nb of requested hosts]
"""
if len(addr) == 2:
# provides list of host-addresses for this subnet
# we do this by calling the generator host_g
host_g = addr[0].hosts()
return [next(host_g).exploded for i in range(addr[1])]
else:
netdef = [(' Network Address:',
addr[0].network_address.exploded),
(' Broadcast Address:',
addr[0].broadcast_address.exploded),
(' Valid Hosts:', 2 ** (32 - addr[0].prefixlen)-2),
(' Wildcard Mask:', addr[0].hostmask.exploded),
(' Mask bits:', addr[0].prefixlen),
]
return [(' '+addr[0].network_address.exploded+'/32', '')] \
if addr[0].prefixlen == 32 else netdef
|
905cf702fda005645c608b9dadb84f3659d991c1
| 3,649,038
|
from typing import List
def init_anim() -> List:
"""Initialize the animation."""
return []
|
121fff8b4102c2961449d970307e762bd983bdbe
| 3,649,039
|
def keep_digits(txt: str) -> str:
"""Discard from ``txt`` all non-numeric characters."""
return "".join(filter(str.isdigit, txt))
|
34387003ea03651dd2582b3c49f1095c5589167b
| 3,649,040
|
import re
def camel_case_split(identifier):
"""Split camelCase function names to tokens.
Args:
identifier (str): Identifier to split
Returns:
(list): lower case split tokens. ex: ['camel', 'case']
"""
matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)
return [m.group(0) for m in matches]
|
f212bbe5cc33cb31bea023f726abf60a1491b7df
| 3,649,041
|
from typing import Any
def _ensure_meadowrun_sqs_access_policy(iam_client: Any) -> str:
"""
Creates a policy that gives permission to read/write SQS queues for use with
grid_task_queue.py
"""
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.create_policy
ignore_boto3_error_code(
lambda: iam_client.create_policy(
PolicyName=_MEADOWRUN_SQS_ACCESS_POLICY_NAME,
PolicyDocument=_MEADOWRUN_SQS_ACCESS_POLICY_DOCUMENT,
),
"EntityAlreadyExists",
)
return (
f"arn:aws:iam::{_get_account_number()}:policy/"
f"{_MEADOWRUN_SQS_ACCESS_POLICY_NAME}"
)
|
6bafad83dcf0ed82eac626ad36b9d73416757f37
| 3,649,042
|
def get_subset(dframe, strata, subsetno):
"""This function extracts a subset of the data"""
df_subset = pd.DataFrame(columns=list(dframe)) #initialize
df_real = dframe.dropna() #get rid of nans
edges = np.linspace(0, 1, strata+1) #edges of data strata
for i in range(0, strata):
df_temp = df_real[(df_real['gender diversity score'] > edges[i]) &
(df_real['gender diversity score'] < edges[i+1])]
temp_ind = np.round(np.linspace(0, len(df_temp)-1, subsetno/strata))
df_subset = pd.concat([df_subset,
df_temp.sort_values(by=['gender diversity score']).
iloc[temp_ind, :].reset_index(drop=True)], ignore_index=True)
return df_subset
|
c1657cbce23bb222f68bc5b7efe3fe54dfcc26bd
| 3,649,043
|
import logging
def create_logger(name: str) -> logging.Logger:
"""Create logger, adding the common handler."""
if name is None:
raise TypeError("name is None")
logger = logging.getLogger(name)
# Should be unique
logger.addHandler(_LOGGING_HANDLER)
return logger
|
c4c888345586718f8b476368ef118656d9650469
| 3,649,044
|
def say_hello():
""" Say hello """
return utils.jsonify_success({
'message': 'Hello {}! You are logged in.'.format(current_user.email)
})
|
52c0f572c0bd521a3ab12f1781d39c37130a78d3
| 3,649,045
|
def relu(x):
"""The rectifier activation function. Only activates if argument x is
positive.
Args:
x (ndarray): weighted sum of inputs
"""
# np.clip(x, 0, np.finfo(x.dtype).max, out=x)
# return x
return np.where(x >= 0, x, 0)
|
61b7a4ce252c72dd69251a8783c572c8128a01c5
| 3,649,046
|
def k_shortest_paths(G, source, target, k=1, weight='weight'):
"""Returns the k-shortest paths from source to target in a weighted graph flux_graph.
Parameters
----------
flux_graph : NetworkX graph
source : node
Starting node
target : node
Ending node
k : integer, optional (default=1)
The number of shortest paths to find
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
lengths, paths : lists
Returns a tuple with two lists.
The first list stores the length of each k-shortest path.
The second list stores each k-shortest path.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> flux_graph=nx.complete_graph(5)
>>> print(k_shortest_paths(flux_graph, 0, 4, 4))
([1, 2, 2, 2], [[0, 4], [0, 1, 4], [0, 2, 4], [0, 3, 4]])
Notes
------
Edge weight attributes must be numerical and non-negative.
Distances are calculated as sums of weighted edges traversed.
"""
if source == target:
return ([0], [[source]])
length, path = nx.single_source_dijkstra(G, source, target, weight=weight)
if target not in length:
raise nx.NetworkXNoPath("node %s not reachable from %s" % (source, target))
lengths = [length[target]]
paths = [path[target]]
c = count()
B = []
G_original = G.copy()
for i in range(1, k):
for j in range(len(paths[-1]) - 1):
spur_node = paths[-1][j]
root_path = paths[-1][:j + 1]
edges_removed = []
for c_path in paths:
if len(c_path) > j and root_path == c_path[:j + 1]:
u = c_path[j]
v = c_path[j + 1]
if G.has_edge(u, v):
edge_attr = G.edge[u][v]
G.remove_edge(u, v)
edges_removed.append((u, v, edge_attr))
for n in range(len(root_path) - 1):
node = root_path[n]
# out-edges
for u, v, edge_attr in G.edges_iter(node, data=True):
G.remove_edge(u, v)
edges_removed.append((u, v, edge_attr))
if G.is_directed():
# in-edges
for u, v, edge_attr in G.in_edges_iter(node, data=True):
G.remove_edge(u, v)
edges_removed.append((u, v, edge_attr))
spur_path_length, spur_path = nx.single_source_dijkstra(G, spur_node, target, weight=weight)
if target in spur_path and spur_path[target]:
total_path = root_path[:-1] + spur_path[target]
total_path_length = get_path_length(G_original, root_path, weight) + spur_path_length[target]
heappush(B, (total_path_length, next(c), total_path))
for e in edges_removed:
u, v, edge_attr = e
G.add_edge(u, v, edge_attr)
if B:
(l, _, p) = heappop(B)
lengths.append(l)
paths.append(p)
else:
break
return (lengths, paths)
|
68918c78b1f33c07cd3494286a00b1c020256b56
| 3,649,047
|
def allowed_file(filename):
"""
Check the image extension
Currently, only support jpg, jpeg and png
"""
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
|
635f89b33c9150b5d7b68415bb85bb8c1d644d1f
| 3,649,048
|
from sys import maxsize
def __best_tour(previous: int, prices: np.ndarray, excess: float, best_solution: set, visited: np.ndarray) -> int:
""" Ищем в лучшем туре """
search = __search(best_solution, previous)
node, alpha = -1, maxsize
for edge in search:
temp = edge[0] if edge[0] != previous else edge[1]
if not visited[temp] and prices[temp] < alpha < excess:
node, alpha = temp, prices[temp]
return node if node != -1 else -1
|
9e6ebc1b19c15e31edb4cd1421599c3ed6b9fb90
| 3,649,049
|
def classical_gaussian_kernel(k, sigma):
"""
A function to generate a classical Gaussian kernel
:param k: The size of the kernel, an integer
:param sigma: variance of the gaussian distribution
:return: A Gaussian kernel, a numpy array of shape (k,k)
"""
w = np.linspace(-(k - 1) / 2, (k - 1) / 2, k)
x, y = np.meshgrid(w, w)
kernel = 0.5*np.exp(-0.5*(x**2 + y**2)/(sigma**2))/(np.pi*sigma**2)
return kernel
|
e1a94134e465f72a7d49e8bb950eb7a8ba97ac54
| 3,649,050
|
def collection_to_csv(collection):
"""
Upload collection value to CSV file
:param collection: Collection
:return: None
"""
print("collection_to_csv")
final_df = pd.DataFrame()
try:
dict4json = []
n_documents = 0
for document in collection.get():
result_dict = document.to_dict()
dict4json.append(result_dict)
n_documents += 1
for result in dict4json:
lst = result["result"]
df = pd.DataFrame(lst)
df = df.reindex(sorted(df.columns), axis=1)
final_df = pd.concat([final_df, df])
except Exception as e:
print(e)
return pd.DataFrame()
finally:
return final_df
|
5d62e0fe1eebb190be47a05d822b8714d396125f
| 3,649,051
|
import six
def validate_hatch(s):
"""
Validate a hatch pattern.
A hatch pattern string can have any sequence of the following
characters: ``\\ / | - + * . x o O``.
"""
if not isinstance(s, six.text_type):
raise ValueError("Hatch pattern must be a string")
unique_chars = set(s)
unknown = (unique_chars -
set(['\\', '/', '|', '-', '+', '*', '.', 'x', 'o', 'O']))
if unknown:
raise ValueError("Unknown hatch symbol(s): %s" % list(unknown))
return s
|
4ddf056dab2681759a462005effc4ae5488a4461
| 3,649,052
|
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import ResourceType
def iot_hub_service_factory(cli_ctx, *_):
"""
Factory for importing deps and getting service client resources.
Args:
cli_ctx (knack.cli.CLI): CLI context.
*_ : all other args ignored.
Returns:
iot_hub_resource (IotHubClient.iot_hub_resource): operational resource for
working with IoT Hub.
"""
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_IOTHUB).iot_hub_resource
|
a38ae4a7fedaf8dcbaccba0873e4f519cb51af17
| 3,649,053
|
def filter_example(config, example, mode="train"):
"""
Whether filter a given example according to configure.
:param config: config contains parameters for filtering example
:param example: an example instance
:param mode: "train" or "test", they differs in filter restrictions
:return: boolean
"""
if mode == "train":
return (len(example["ans_sent_tokens"]) > config.sent_limit or
len(example["ques_tokens"]) > config.ques_limit or
(example["y2_in_sent"] - example["y1_in_sent"]) >
config.ans_limit)
elif mode == "test":
return (len(example["ans_sent_tokens"]) > config.sent_limit or
len(example["ques_tokens"]) > config.ques_limit)
else:
print("mode must be train or test")
|
9c49990fe36c0a82d0a99a62fe810a19cd5a8749
| 3,649,054
|
def _dict_flatten(data):
"""Return flattened dict of input dict <data>.
After https://codereview.stackexchange.com/revisions/21035/3
Parameters
----------
data : dict
Input dict to flatten
Returns
-------
fdata : dict
Flattened dict.
"""
def expand(key, value):
"""Expand list."""
if isinstance(value, dict):
return [(key+'>'+k, v) for k, v in _dict_flatten(value).items()]
else:
return [(key, value)]
return dict([item for k, v in data.items() for item in expand(k, v)])
|
a1db4a552ced44efa45fe4f86fbfe04871463356
| 3,649,055
|
def merkleroot(elements):
"""
Args:
elements (List[str]): List of hashes that make the merkletree.
Returns:
str: The root element of the merkle tree.
"""
return Merkletree(elements).merkleroot
|
cd5d1e530fda62f9b92a51a03f5fd2cbbe6a9e62
| 3,649,056
|
def category_start(update, context):
"""Separate function for category selection to filter the options with inline keyboard."""
update.message.reply_text(
"Choose a Group",
reply_markup=create_category_inline(trx_categories.keys(), "group_sel"),
)
return CATEGORY_REPLY_CHOOSE_TRX_OPTS
|
cfc299d8b81785d8418bfb4c280cf88e4137448b
| 3,649,057
|
import argparse
import functools
import sys
import collections
import csv
def betatest():
"""Main Function Definition"""
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--host',\
required=True,\
help="site hostname")
parser.add_argument('--outputfile',\
'-o',\
required=True,\
help="write results to this file")
parser.add_argument('--ntests',\
'-n',\
default=1,\
type=int,\
help="# of requests per path")
parser.add_argument('--timeout',\
'-t',\
default=30,\
type=float,\
help="timeout (seconds)")
parser.add_argument('--delay',\
'-d',\
default=0,\
type=float,\
help="wait between requests (ms)")
parser.add_argument('--processes',\
'-p',\
default=32,\
type=int,\
help="# of parallel processes")
parser.add_argument('--addresses',\
'-a',\
nargs='+',\
help="addresses to use instead of DNS")
args = parser.parse_args()
# Request the urls in parallel
pool = Pool(args.processes)
try:
results = pool.map(functools.partial(process_request,\
timeout=args.timeout,\
delay=args.delay),
generate_requests(paths=PATHS,\
host=args.host,\
addresses=args.addresses,\
tests_per_path=args.ntests))
except KeyboardInterrupt:
pool.terminate()
sys.exit(1)
# Group results by everything, and count
groupby = collections.defaultdict(lambda: [0, 0.0, None])
for result, elapsed in results:
groupby[result][0] += 1
groupby[result][1] += elapsed
# Apply some heuristics to analyze each result
for result, info in sorted(groupby.iteritems()):
info[2] = analyze_result(result)
# Write the results as csv to our destination fil
with open(args.outputfile, 'wb') as file_pointer:
writer = csv.writer(file_pointer, quoting=csv.QUOTE_ALL)
for result, (count, elapsed, outcome) in sorted(groupby.iteritems()):
row = list(result)
row.append(count)
row.append(elapsed / count)
row.append(outcome)
writer.writerow(row)
return "beta test completed"
|
f5c211571aa4e4f63d07ff9b237b4697817e346e
| 3,649,058
|
def create_player(mode, race, char_name):
""" Create the player's character """
# Evil
if mode == 2:
if race == 1:
player = character.Goblin(char_name, 1, app)
elif race == 2:
player = character.Orc(char_name, 1, app)
elif race == 3:
player = character.Uruk(char_name, 1, app)
else:
player = character.Wizard(char_name, 1, app)
# Good
else:
if race == 1:
player = character.Human(char_name, 1, app)
elif race == 2:
player = character.Wizard(char_name, 1, app)
elif race == 3:
player = character.Warrior(char_name, 1, app)
"""elif race == 4:
player = character.Hobbit(char_name, 1, app)
elif race == 6:
player = character.Bishop(char_name, 1, app)
else:
player = character.Wizard(char_name, 1, app)"""
return player
|
30e143f0cca1053d6e10df0c438065747611e4af
| 3,649,059
|
def _get_item(i, j, block):
"""
Returns a single item from the block. Coords must be in block space.
"""
return block[i, j]
|
45a12ecb3959a75ad8f026616242ba64174441fc
| 3,649,060
|
def calculate_potentials_python(volume, mass, volume_material_mass, mass_material_mass):
""" Easy to read python function which calculates potentials using two Python loops
Still uses NumPy for the rote math.
"""
potentials = np.zeros(len(volume), dtype=np.float32)
for volume_i, volume_coord in enumerate(volume):
for mass_coord in mass:
potentials[volume_i] += (G * volume_material_mass * mass_material_mass) / np.sqrt(
np.square(volume_coord - mass_coord).sum())
return potentials
|
73395d31bb470ac96b0c05a140fe6e77f56e2d88
| 3,649,061
|
def rect2sphericalcoord3D(
v: list[Number, Number, Number]
) -> list[float, float, float]:
"""Does a 3D coordinate transform
from rectangular to spherical
coordinate system
p = The length of the hypotenuse
or the magnitude of the
vector
theta = is the angle between the
positive x-axis and p
(azimuth)
phi = is the angle between the
positive z-axis and p
(colatitude)
Args:
vspherecoord: [p, theta, phi]
spherical
coordinates
Returns:
[p: float,
theta: float,
phi: float]
"""
p = vmag(v)
return [p, atan(v[1] / v[0]),
acos(v[2] / p)]
|
8be197341e576465af389f8e20aea25a59fc3d1e
| 3,649,062
|
def GetAssignmentByKeyName(key_name):
"""Gets the assignment with the specified key name."""
return Assignment.get_by_key_name(key_name)
|
a14b9a2033bb995d53219568278d298f305861d7
| 3,649,063
|
def fit_integer_type(n, is_signed=True):
"""Determine the minimal space needed to store integers of maximal value n
"""
if is_signed:
m = 1
types = [np.int8, np.int16, np.int32, np.int64]
else:
m = 0
types = [np.uint8, np.uint16, np.uint32, np.uint64]
if n < 2 ** (8 - m):
return types[0]
elif n < 2 ** (16 - m):
return types[1]
elif n < 2 ** (32 - m):
return types[2]
elif n < 2 ** (64 - m):
return types[3]
else:
raise ValueError('Values are too big to be represented by 64 bits \
integers!')
|
bd9ebd447893509b1144a32bac9f9757988b4a60
| 3,649,064
|
def admin_userforms_order_by_field(user_id):
""" Set User's forms order_by preference
"""
if not g.is_admin:
return jsonify("Forbidden"), 403
data = request.get_json(silent=True)
if not 'order_by_field_name' in data:
return jsonify("Not Acceptable"), 406
field_names = [ field['name'] for field in default_admin_userforms_field_index ]
if not data['order_by_field_name'] in field_names:
return jsonify("Not Acceptable"), 406
g.current_user.admin['userforms']['order_by'] = data['order_by_field_name']
flag_modified(g.current_user, 'admin')
g.current_user.save()
return jsonify(
{'order_by_field_name': g.current_user.admin['userforms']['order_by']}
), 200
|
4d92dc6f9d562a91509ec2b03aff75c4bc376ead
| 3,649,065
|
def check_all_rows(A):
"""
Check if all rows in 2-dimensional matrix don't have more than one queen
"""
for row_inx in range(len(A)):
# compute sum of row row_inx
if sum(A[row_inx]) > 1:
return False
return True
|
e39f4ca3e401c02b13c5b55ed4389a7e6deceb40
| 3,649,066
|
from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk
def extract_largest_connected_region(vtk_im, label_id):
"""
Extrac the largest connected region of a vtk image
Args:
vtk_im: vtk image
label_id: id of the label
Return:
new_im: processed vtk image
"""
fltr = vtk.vtkImageConnectivityFilter()
fltr.SetScalarRange(label_id, label_id)
fltr.SetExtractionModeToLargestRegion()
fltr.SetInputData(vtk_im)
fltr.Update()
new_im = fltr.GetOutput()
py_im = vtk_to_numpy(vtk_im.GetPointData().GetScalars())
py_mask = vtk_to_numpy(new_im.GetPointData().GetScalars())
mask = np.logical_and(py_im==label_id, py_mask==0)
py_im[mask] = 0
vtk_im.GetPointData().SetScalars(numpy_to_vtk(py_im))
return vtk_im
|
c9510da15b4d3cade331aa3b9b3625af5706e417
| 3,649,067
|
import subprocess
import sys
def _run_cli_cmd(cmd_list):
"""Run a shell command and return the error code.
:param cmd_list: A list of strings that make up the command to execute.
"""
try:
return subprocess.call(cmd_list)
except Exception as e:
print(str(e))
sys.exit(1)
|
473d28ec5469ff195b716edfe32723e2379303f3
| 3,649,068
|
def group_set_array_data_ptr(d):
"""
call view%set_external_data_ptr
hide c_loc call and add target attribute
"""
# XXX - should this check the type/shape of value against the view?
# typename - part of function name
# nd - number of dimensions
# f_type - fortran type
# shape - :,:, to match nd
if d['rank'] == 0:
extents_decl = 'extents(1)'
extents_asgn = 'extents(1) = 1_SIDRE_IndexType'
else:
extents_decl = 'extents(%d)' % d['rank']
extents_asgn = 'extents = shape(value, kind=SIDRE_IndexType)'
return """
! Generated by genfsidresplicer.py
! This function does nothing if view name does not exist in group.
subroutine group_set_array_data_ptr_{typename}{nd}(grp, name, value)
use iso_c_binding
implicit none
class(SidreGroup), intent(IN) :: grp
character(len=*), intent(IN) :: name
{f_type}, target, intent(IN) :: value{shape}
integer(C_INT) :: lname
type(SIDRE_SHROUD_view_capsule) view
! integer(SIDRE_IndexType) :: {extents_decl}
! integer(C_INT), parameter :: type = {sidre_type}
type(C_PTR) addr, viewptr
lname = len_trim(name)
! {extents_asgn}
viewptr = c_group_get_view_from_name_bufferify(grp%cxxmem, name, lname, view)
if (c_associated(view%addr)) then
#ifdef USE_C_LOC_WITH_ASSUMED_SHAPE
addr = c_loc(value)
#else
call SIDRE_C_LOC(value{lower_bound}, addr)
#endif
call c_view_set_external_data_ptr_only(view, addr)
! call c_view_apply_type_shape(rv%cxxmem, type, {rank}, extents)
endif
end subroutine group_set_array_data_ptr_{typename}{nd}""".format(
extents_decl=extents_decl,
extents_asgn=extents_asgn, **d)
|
36a18ca9099edf24d37386103f111bde7753ed46
| 3,649,069
|
from typing import cast
def releaseTagName(version: Version) -> str:
"""
Compute the name of the release tag for the given version.
"""
return cast(str, version.public())
|
9f8e350a42e2b50657a87e89e592ae340ba3ee96
| 3,649,070
|
import time
def get_calibrated_values(timeout=10):
"""Return an instance of CalibratedValues containing the 6 spectral bands."""
t_start = time.time()
while _as7262.CONTROL.get_data_ready() == 0 and (time.time() - t_start) <= timeout:
pass
with _as7262.CALIBRATED_DATA as DATA:
return CalibratedValues(DATA.get_r(),
DATA.get_o(),
DATA.get_y(),
DATA.get_g(),
DATA.get_b(),
DATA.get_v())
|
69e5921b3e487ab3f3c3abbae8a2b237eb75b033
| 3,649,071
|
def customizable_admin(cls):
"""
Returns a customizable admin class
"""
class CustomSearchableAdmin(BaseAdmin):
form = customizable_form(cls)
def __init__(self, *args, **kwargs):
super(CustomSearchableAdmin, self).__init__(*args, **kwargs)
# add the custom fields to the fieldsets (if present)
# @see customizable_form and ContentTypeCustomField
if self.fieldsets:
if isinstance(self.fieldsets, tuple):
self.fieldsets = list(self.fieldsets)
fieldset = ContentTypeCustomField.get_fieldset_for_model(self.form._meta.model)
if fieldset: self.fieldsets.append(fieldset)
def get_form(self, request, obj=None, **kwargs):
## modify visualization for certain users
#if not request.user.is_superuser:
# self.exclude.append('field_to_hide')
# self.inlines.remove(UserInline)
# pass
form = super(CustomSearchableAdmin, self).get_form(request, obj, **kwargs)
return form
def get_changelist(self, request, **kwargs):
return CustomChangeList
def queryset(self, request):
qs = super(CustomSearchableAdmin, self).queryset(request)
#qs = qs.filter(Q(is_staff=True) | Q(is_superuser=True))
return qs
def has_change_permission(self, request, obj=None):
has_permission = super(CustomSearchableAdmin, self).has_change_permission(request, obj)
#if obj is not None and not request.user.is_superuser and request.user.id != obj.user.id:
return has_permission
return CustomSearchableAdmin
|
dc6ced817b78b7cbf31cbf788d28fcff421d6b02
| 3,649,072
|
def restoreIm(transformeddata, pca, origshape, datamean, datastd):
"""Given a PCA object and transformeddata that consists of projections onto
the PCs, return images by using the PCA's inverse transform and reshaping to
the provided origshape."""
if transformeddata.shape[0] < transformeddata.shape[1]:
transformeddata = np.transpose(transformeddata)
data = pca.inverse_transform(transformeddata)
# restore the shape and scale of the data before plotting
data = data*datastd
data = data + datamean
data = np.transpose(data)
return data.reshape(origshape)
|
ce8713648b166f7ce35bb47df33a6b99e2de8687
| 3,649,073
|
import random
import sys
def ga_multi(gene_info, ga_info):
"""Main loop which sets DEAP objects and calls a multi objective EA algorithm.
Parameters
-------
gene_info, GeneInfo class
See respective class documentation.
ga_info, GAInfo class
See respective class documentation.
Returns
-------
pop, DEAP object
stats, DEAP object
hof, DEAP object
See post_run function for examples of how to interpret results.
"""
random.seed(ga_info.seed)
creator.create("Fitness", base.Fitness, weights=(1.0, ))
creator.create("Individual", set, fitness=creator.Fitness)
toolbox = base.Toolbox()
toolbox.register("indices", indiv_builder, gene_info)
toolbox.register("individual", tools.initIterate, creator.Individual,
toolbox.indices)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", single_eval, gene_info)
if len(gene_info.obj_list) < 2:
print('Attempted to start multi objective GA with single objective.',
file=sys.stderr)
if ga_info.cross_meth == 'ops':
toolbox.register("mate", cx_OPS, gene_info)
elif ga_info.cross_meth == 'sdb':
toolbox.register("mate", cx_SDB, gene_info)
else:
raise AttributeError('Invalid crossover string specified')
toolbox.register("mutate", mut_flipper, gene_info)
toolbox.register("select", tools.selTournament, tournsize=ga_info.nk)
pop = toolbox.population(n=ga_info.pop)
hof = tools.HallOfFame(1)
# Empty, as SoR objects are special
stats = tools.Statistics()
eaSoR(ga_info, gene_info, pop, toolbox, ga_info.cxpb, ga_info.mutpb,
ga_info.gen, stats, halloffame=hof)
return pop, stats, hof
|
60bd65e90800cfa855b061d0cf5f431e1cac944e
| 3,649,074
|
def sample(population, k=None):
"""Behaves like random.sample, but if k is omitted, it default to
randint(1, len(population)), so that a non-empty sample is returned."""
population = list(population)
if k is None:
k = randint(1, len(population))
return random_sample(population, k)
|
46f7f3365c4574ed9cb09b54f25e30ff23fb3b8d
| 3,649,075
|
def dense_to_one_hot(array, class_num, dtype_, axis=-1):
"""
this function offer a method to change the numpy array to one hot like base on axis
as we know array dims_size in axis should be 1
keep_shape
:param array: a numpy array, data type should be int
:param class_num: one hot class number
:param dtype_: numpy data type declaration
:param axis: broadcast dim
:return:
'
algorithm:
base on axis: base_point(the local of the dim we want to one hot)
we transpose the array to [...., base_point]
and than we make a zeros array [array_element_amount, class_num]
make an array np.arange(num_labels) * class_num for support the offset
which means the step to make sure the array.flat location which set to 1(dtype_)
'
"""
array_shape = array.shape
assert array_shape[axis] == 1, DenseToOneHotLogger.error(Fore.RED + 'dim {0} should be size: 1'.format(axis))
if array.max() >= class_num:
raise ValueError('class_num(a) should bigger than the max of array(b), '
'but a vs. b = {0} vs.{1}'.format(class_num, array.max()))
base_point = axis % len(array_shape)
transpose_axes = []
back_transpose_axes = []
DenseToOneHotLogger.debug("start generate transpose_axes and back_transpose_axes")
if base_point == len(array_shape):
pass
elif base_point == 0:
transpose_axes += list(range(1, len(array_shape)))
transpose_axes.append(0)
back_transpose_axes += [len(array_shape) - 1] + list(range(1, len(array_shape)))
pass
else:
f_start = 0
f_end = base_point
b_start = base_point + 1
b_end = len(array_shape) - 1
transpose_axes += list(range(f_start, f_end))
transpose_axes += list(range(b_start, b_end))
transpose_axes.append(base_point)
back_transpose_axes += list(range(f_start, base_point))
back_transpose_axes += [len(array_shape) - 1]
back_transpose_axes += list(range(base_point, len(array_shape) - 2))
DenseToOneHotLogger.debug('transpose')
np.transpose(array, transpose_axes)
shape = list(array.shape)
shape[-1] = class_num
num_labels = 1
for i in list(np.transpose(array).shape)[0:]:
num_labels *= i
pass
index_offset = np.arange(num_labels) * class_num
label_one_hot = np.zeros(shape, dtype=dtype_)
label_one_hot.flat[index_offset + array.ravel()] = 1
DenseToOneHotLogger.debug("re transpose")
np.transpose(label_one_hot, back_transpose_axes)
return label_one_hot
pass
|
a045fefd6d397de9aa1e7a25f7788ab10bcf7c94
| 3,649,076
|
def add_momentum_ta(df, high, low, close, volume, fillna=False):
"""Add trend technical analysis features to dataframe.
Args:
df (pandas.core.frame.DataFrame): Dataframe base.
high (str): Name of 'high' column.
low (str): Name of 'low' column.
close (str): Name of 'close' column.
fillna(bool): if True, fill nan values.
Returns:
pandas.core.frame.DataFrame: Dataframe with new features.
"""
df['momentum1'] = rsi(df[close], n=14, fillna=fillna)
df['momentum2'] = money_flow_index(df[high], df[low], df[close],
df[volume], n=14, fillna=fillna)
df['momentum3'] = tsi(df[close], r=25, s=13, fillna=fillna)
return df
|
76239057526272874c34eb4250f642745dfc9990
| 3,649,077
|
def get_experiment_type(filename):
"""
Get the experiment type from the filename.
The filename is assumed to be in the form of:
'<reliability>_<durability>_<history kind>_<topic>_<timestamp>'
:param filename: The filename to get the type.
:return: A string where the timesptamp is taken out from the filename.
"""
file_type = ''
filename = filename.split('/')[-1]
elements = filename.split('_')
for i in range(0, len(elements) - 3):
file_type += '{}_'.format(elements[i])
file_type = file_type[:-1]
return file_type
|
e1853a95d034b8f9e36ca65f6f5d200cbf4b86dc
| 3,649,078
|
from typing import Any
def async_check_significant_change(
hass: HomeAssistant,
old_state: str,
old_attrs: dict,
new_state: str,
new_attrs: dict,
**kwargs: Any,
) -> bool | None:
"""Test if state significantly changed."""
if old_state != new_state:
return True
if old_attrs.get(ATTR_EFFECT) != new_attrs.get(ATTR_EFFECT):
return True
old_color = old_attrs.get(ATTR_HS_COLOR)
new_color = new_attrs.get(ATTR_HS_COLOR)
if old_color and new_color:
# Range 0..360
if check_absolute_change(old_color[0], new_color[0], 5):
return True
# Range 0..100
if check_absolute_change(old_color[1], new_color[1], 3):
return True
if check_absolute_change(
old_attrs.get(ATTR_BRIGHTNESS), new_attrs.get(ATTR_BRIGHTNESS), 3
):
return True
if check_absolute_change(
# Default range 153..500
old_attrs.get(ATTR_COLOR_TEMP),
new_attrs.get(ATTR_COLOR_TEMP),
5,
):
return True
if check_absolute_change(
# Range 0..255
old_attrs.get(ATTR_WHITE_VALUE),
new_attrs.get(ATTR_WHITE_VALUE),
5,
):
return True
return False
|
2a3f91923f187a601b80a28aa750060dc0760e65
| 3,649,079
|
import warnings
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=np._NoValue, formatter=None, threshold=None,
edgeitems=None, sign=None):
"""
Return a string representation of an array.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters splits the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing
precision (usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero. A number is "very small" if it
is smaller than the current printing precision.
separator : str, optional
Inserted between elements.
prefix : str, optional
An array is typically printed as::
'prefix(' + array2string(a) + ')'
The length of the prefix string is used to align the
output correctly.
style : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.14.0
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are::
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpystr' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are::
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension.
sign : string, either '-', '+', ' ' or 'legacy', optional
Controls printing of the sign of floating-point types. If '+', always
print the sign of positive values. If ' ', always prints a space
(whitespace character) in the sign position of positive values. If
'-', omit the sign character of positive values. If 'legacy', print a
space for positive values except in 0d arrays.
Returns
-------
array_str : str
String representation of the array.
Raises
------
TypeError
if a callable in `formatter` does not return a string.
See Also
--------
array_str, array_repr, set_printoptions, get_printoptions
Notes
-----
If a formatter is specified for a certain type, the `precision` keyword is
ignored for that type.
This is a very flexible function; `array_repr` and `array_str` are using
`array2string` internally so keywords with the same name should work
identically in all three functions.
Examples
--------
>>> x = np.array([1e-16,1,2,3])
>>> print(np.array2string(x, precision=2, separator=',',
... suppress_small=True))
[ 0., 1., 2., 3.]
>>> x = np.arange(3.)
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
'[0.00 1.00 2.00]'
>>> x = np.arange(3)
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
'[0x0L 0x1L 0x2L]'
"""
# Deprecation 05-16-2017 v1.14
if style is not np._NoValue:
warnings.warn("'style' argument is deprecated and no longer functional",
DeprecationWarning, stacklevel=3)
overrides = _make_options_dict(precision, threshold, edgeitems,
max_line_width, suppress_small, None, None,
sign, formatter)
options = _format_options.copy()
options.update(overrides)
if a.size == 0:
# treat as a null array if any of shape elements == 0
lst = "[]"
else:
lst = _array2string(a, options, separator, prefix)
return lst
|
ab40c565d058a6836fbc9778ae0d8ceb5c3d6a99
| 3,649,080
|
def registered_types():
""" list of registered types """
return list(Registry.types.get_all().keys())
|
50ed8fd4d586d660e2dc48e01e9cd462b346f47e
| 3,649,081
|
from typing import Dict
def is_retain_bg_files(config: Dict[str, ConfigVO] = None) -> bool:
"""
在拉取新的壁纸前,是否保留旧的壁纸
"""
key = const.Key.Task.RETAIN_BGS.value
vo = config.get(key) if config else dao.get_config(key)
return vo and vo.value
|
69fd845479c0afbc6d6b215d0680d7f6a9c35096
| 3,649,082
|
import pytz
def getAwareTime(tt):
"""
Generates timezone aware timestamp from timezone unaware timestamp
PARAMETERS
------------
:param tt: datatime
timezome unaware timestamp
RETURNS
------------
:return: datatime
timezone aware timestamp
"""
timezone = pytz.timezone("Europe/Amsterdam")
return (timezone.localize(tt))
|
1b286c92c7f5d8f0ff48d77296489fbd358c14ce
| 3,649,083
|
def xdfs(request, tmpdir, vol_name, dos_format):
"""return (xdf_file, xdf_size_spec, vol_name) for various disks"""
size = request.param
if size == "880K":
file_name = tmpdir / "disk.adf"
size = ""
else:
file_name = tmpdir / "disk-" + size + ".hdf"
size = "size=" + size
return XDFSpec(str(file_name), size, vol_name, dos_format)
|
0a9878ffe020ba1438844e000be5b9e4a8b2825a
| 3,649,084
|
def nfvi_get_networks(paging, callback):
"""
Get a list of networks
"""
cmd_id = _network_plugin.invoke_plugin('get_networks', paging,
callback=callback)
return cmd_id
|
432bd6a69e25cc7a80aa77b2a58fe99b0947b9a0
| 3,649,085
|
def get_fasta(uniprot_id):
"""Get the protein sequence for a UniProt ID as a string.
Args:
uniprot_id: Valid UniProt ID
Returns:
str: String of the protein (amino acid) sequence
"""
# Silencing the "Will be moved to Biokit" message
with ssbio.utils.suppress_stdout():
return bsup.get_fasta_sequence(uniprot_id)
|
295a5bd30d3e0feaf99ecab7fa975c67f8b06248
| 3,649,086
|
def split_path(path, minsegs=1, maxsegs=None, rest_with_last=False):
"""
Validate and split the given HTTP request path.
**Examples**::
['a'] = split_path('/a')
['a', None] = split_path('/a', 1, 2)
['a', 'c'] = split_path('/a/c', 1, 2)
['a', 'c', 'o/r'] = split_path('/a/c/o/r', 1, 3, True)
:param path: HTTP Request path to be split
:param minsegs: Minimum number of segments to be extracted
:param maxsegs: Maximum number of segments to be extracted
:param rest_with_last: If True, trailing data will be returned as part
of last segment. If False, and there is
trailing data, raises ValueError.
:returns: list of segments with a length of maxsegs (non-existant
segments will return as None)
:raises: ValueError if given an invalid path
"""
if not maxsegs:
maxsegs = minsegs
if minsegs > maxsegs:
raise ValueError('minsegs > maxsegs: %d > %d' % (minsegs, maxsegs))
if rest_with_last:
segs = path.split('/', maxsegs)
minsegs += 1
maxsegs += 1
count = len(segs)
if (segs[0] or count < minsegs or count > maxsegs or
'' in segs[1:minsegs]):
raise ValueError('Invalid path: %s' % quote(path))
else:
minsegs += 1
maxsegs += 1
segs = path.split('/', maxsegs)
count = len(segs)
if (segs[0] or count < minsegs or count > maxsegs + 1 or
'' in segs[1:minsegs] or
(count == maxsegs + 1 and segs[maxsegs])):
raise ValueError('Invalid path: %s' % quote(path))
segs = segs[1:maxsegs]
segs.extend([None] * (maxsegs - 1 - len(segs)))
return segs
|
d3824ebd63b784dadaf0a97e75049f79d1077ded
| 3,649,087
|
def get_purchases_formset(n_forms=0):
"""
Helper method that returns a Django formset for a dynamic amount of Purchases. Initially `n_forms` empty
forms are shown.
"""
return modelformset_factory(Purchase, fields=('amount', 'fruit'), extra=n_forms)
|
b49ec71aef56eabb1781039af947ff510242925a
| 3,649,088
|
async def git_pull():
"""
Pulls any changes down from github and returns the result of the command.
_> changed: str
"""
cmd = Popen(["git", "pull"], stdout=PIPE)
out, _ = cmd.communicate()
out = out.decode()
return out
|
ed32677a22b0f75c23af618f18833b5fc46bb3dc
| 3,649,089
|
def inverse_word_map(word_map):
""" Create an inverse word mapping.
:param word_map: word mapping
"""
return {v: k for k, v in word_map.items()}
|
4048a21ea1c75791a92d57ee0a440a6c9d31b6b9
| 3,649,090
|
def get_coalition_wins_sql_string_for_state(coalition_id,state_id):
"""
:type party_id: integer
"""
str = """ select
lr.candidate_id,
c.fullname as winning_candidate,
lr.constituency_id,
cons.name as constituency,
lr.party_id,
lr.max_votes,
(lr.max_votes-sr.votes) as lead,
sr.candidate_id,
loosing_candidate.fullname as runner_up,
loosing_party.name as runner_up_party,
sr.party_id,
winning_party.name,
ltw.party_id
from latest_results lr
inner join
latest_runners_up as sr
on
sr.constituency_id = lr.constituency_id
inner join
candidate c
on
c.id = lr.candidate_id
inner join
constituency cons
on
cons.id = lr.constituency_id
inner join party winning_party
on
lr.party_id = winning_party.id
inner join party loosing_party
on
loosing_party.id = sr.party_id
inner join candidate loosing_candidate
on
loosing_candidate.id = sr.candidate_id
inner join last_time_winners ltw
on
ltw.constituency_id = lr.constituency_id
where
winning_party.coalition_id = %s
and
cons.state_id = %s
and
lr.status = 'DECLARED'
order by
lead DESC""" % (coalition_id,state_id)
return str;
|
76fb0704779e20e8a53ca80dc17c969f1e455d20
| 3,649,091
|
import numpy
def computeAPLSF(data):
"""
Compute the LSF kernel for each chip
"""
index = 2047
## define lsf range and pixel centers
xlsf = numpy.linspace(-7.,7.,43)
xcenter = numpy.arange(0,4096)
## compute LSF profiles for each chip as a function of pixel
raw_out2_a = raw(xlsf,xcenter,data.lsfcoeff[0])
raw_out2_b = raw(xlsf,xcenter,data.lsfcoeff[1])
raw_out2_c = raw(xlsf,xcenter,data.lsfcoeff[2])
## normalize
raw_out2_a_norm = raw_out2_a/numpy.tile(numpy.sum(raw_out2_a,axis=1),(len(xlsf),1)).T
raw_out2_b_norm = raw_out2_b/numpy.tile(numpy.sum(raw_out2_b,axis=1),(len(xlsf),1)).T
raw_out2_c_norm = raw_out2_c/numpy.tile(numpy.sum(raw_out2_c,axis=1),(len(xlsf),1)).T
return numpy.array([raw_out2_a_norm[index],raw_out2_b_norm[index],raw_out2_c_norm[index]])
|
5cd46d9feec10dd0a4eff1a5fe44e241bfeed539
| 3,649,092
|
def login():
"""Log in a registered user by adding the user id to the session."""
if request.method == "POST":
username = request.form["username"]
password = request.form["password"]
error = None
user = User.query.filter_by(name=username).first()
if user is None:
error = "Incorrect username."
elif not user.check_password(password):
error = "Incorrect password."
if error is None:
db.session.clear()
db.session["user_id"] = user["id"]
return redirect(url_for("mainpage"))
flash(error)
return render_template("auth/login.html")
|
067b202e81d947589c0fe2262372856084b28e35
| 3,649,093
|
def _timedeltaformat(value, include_ms=False):
"""Formats a timedelta in a sane way.
Ignores sub-second precision by default.
"""
if not value:
return NON_BREAKING_HYPHEN + NON_BREAKING_HYPHEN
total_seconds = value.total_seconds()
suffix = ''
if include_ms:
ms = int(round(total_seconds-int(total_seconds), 3) * 1000)
if ms:
suffix = '.%03d' % ms
hours, remainder = divmod(int(round(total_seconds)), 3600)
minutes, seconds = divmod(remainder, 60)
if hours:
return '%d:%02d:%02d%s' % (hours, minutes, seconds, suffix)
# Always prefix minutes, even if 0, otherwise this looks weird. Revisit this
# decision if bikeshedding is desired.
return '%d:%02d%s' % (minutes, seconds, suffix)
|
5c40caa1bd2e005746a44b1767eb4c3ed29b1603
| 3,649,094
|
def get_viame_src(url):
"""
Get image src from via.me API.
"""
END_POINT = 'http://via.me/api/v1/posts/'
tmp = url.split('/')
viame_id = tmp[-1][1:]
address = END_POINT + viame_id
result = httpget(address)['response']['post']
return result['thumb_300_url']
|
52b23fb64b30c97ef70b683e0176f88f8730e5c9
| 3,649,095
|
def Geom_BSplineCurve_MaxDegree(*args):
"""
* Returns the value of the maximum degree of the normalized B-spline basis functions in this package.
:rtype: int
"""
return _Geom.Geom_BSplineCurve_MaxDegree(*args)
|
32729754ca89ce719b81f28fbf3f3c5ea5eb70eb
| 3,649,096
|
import torch
def iou_score(pred_cls, true_cls, nclass, drop=(), mask=None):
"""
compute the intersection-over-union score
both inputs should be categorical (as opposed to one-hot)
"""
assert pred_cls.shape == true_cls.shape, 'Shape of predictions should match GT'
if mask is not None:
assert mask.dim() == true_cls.dim(), \
'Mask should have the same dimensions as inputs'
intersect_ = torch.zeros(nclass - len(drop), device=pred_cls.get_device())
union_ = torch.zeros(nclass - len(drop), device=pred_cls.get_device())
idx = 0
for i in range(nclass):
if i not in drop:
intersect = (pred_cls == i).byte() + (true_cls == i).byte()
if mask is not None:
intersect *= mask.byte()
intersect = intersect.eq(2).sum()
union = (pred_cls == i).byte() + (true_cls == i).byte()
if mask is not None:
union *= mask.byte()
union = union.ge(1).sum()
intersect_[idx] = intersect
union_[idx] = union
idx += 1
return intersect_, union_
|
d38871f339b2126d418a7fca53fbfd874e263aa2
| 3,649,097
|
def check_args(source_path, args):
"""Checks lengths of supplied args match or raise an error.
Lists can have only one element where they are automatically extended.
Args:
source_path(list(str)): List of source_paths supplied to turbiniactl.
args(list(list)): List of args (i.e. name, source, partitions, etc) and
their values supplied to turbiniactl.
Raises:
TurbiniaException: If length of args don't match.
Returns:
list(str): List of arg or None """
ret = list()
if not args[0]:
args[0] = source_path
for arg in args:
if not arg:
arg = [None]
if len(arg) > 1 and len(arg) != len(source_path):
raise TurbiniaException(
'Number of passed in args ({0:d}) must equal to one or '
'number of source_paths/disks ({1:d}).'.format(
len(arg), len(source_path)))
if len(arg) == 1:
arg = [arg[0] for _ in source_path]
ret.append(arg)
return ret
|
23d50e875ac908b0ee3afd4521b1a2660843ffc6
| 3,649,098
|
from datetime import datetime
import calendar
import warnings
import requests
import zipfile
def futures_dce_position_rank(date: str = "20160104") -> pd.DataFrame:
"""
大连商品交易日每日持仓排名-具体合约
http://www.dce.com.cn/dalianshangpin/xqsj/tjsj26/rtj/rcjccpm/index.html
:param date: 指定交易日; e.g., "20200511"
:type date: str
:return: 指定日期的持仓排名数据
:rtype: pandas.DataFrame
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % date.strftime('%Y%m%d'))
return {}
url = "http://www.dce.com.cn/publicweb/quotesdata/exportMemberDealPosiQuotesBatchData.html"
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Content-Length": "160",
"Content-Type": "application/x-www-form-urlencoded",
"Host": "www.dce.com.cn",
"Origin": "http://www.dce.com.cn",
"Pragma": "no-cache",
"Referer": "http://www.dce.com.cn/publicweb/quotesdata/memberDealPosiQuotes.html",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
payload = {
"memberDealPosiQuotes.variety": "a",
"memberDealPosiQuotes.trade_type": "0",
"contract.contract_id": "a2009",
"contract.variety_id": "a",
"year": date.year,
"month": date.month - 1,
"day": date.day,
"batchExportFlag": "batch",
}
r = requests.post(url, payload, headers=headers)
big_dict = dict()
with zipfile.ZipFile(BytesIO(r.content), "r") as z:
for i in z.namelist():
file_name = i.encode('cp437').decode('GBK')
try:
data = pd.read_table(z.open(i), header=None, sep="\t").iloc[:-6]
if len(data) < 12: # 处理没有活跃合约的情况
big_dict[file_name.split("_")[1]] = pd.DataFrame()
continue
start_list = data[data.iloc[:, 0].str.find("名次") == 0].index.tolist()
data = data.iloc[start_list[0]:, data.columns[data.iloc[start_list[0], :].notnull()]]
data.reset_index(inplace=True, drop=True)
start_list = data[data.iloc[:, 0].str.find("名次") == 0].index.tolist()
end_list = data[data.iloc[:, 0].str.find("总计") == 0].index.tolist()
part_one = data[start_list[0]: end_list[0]].iloc[1:, :]
part_two = data[start_list[1]: end_list[1]].iloc[1:, :]
part_three = data[start_list[2]: end_list[2]].iloc[1:, :]
temp_df = pd.concat([part_one.reset_index(drop=True), part_two.reset_index(drop=True),
part_three.reset_index(drop=True)], axis=1, ignore_index=True)
temp_df.columns = ["名次", "会员简称", "成交量", "增减", "名次", "会员简称", "持买单量", "增减", "名次", "会员简称", "持卖单量", "增减"]
temp_df["rank"] = range(1, len(temp_df) + 1)
del temp_df["名次"]
temp_df.columns = ["vol_party_name", "vol", "vol_chg", "long_party_name", "long_open_interest",
"long_open_interest_chg", "short_party_name", "short_open_interest",
"short_open_interest_chg", "rank"]
temp_df["symbol"] = file_name.split("_")[1]
temp_df["variety"] = file_name.split("_")[1][:-4].upper()
temp_df = temp_df[["long_open_interest", "long_open_interest_chg", "long_party_name", "rank",
"short_open_interest", "short_open_interest_chg", "short_party_name",
"vol", "vol_chg", "vol_party_name", "symbol", "variety"]]
big_dict[file_name.split("_")[1]] = temp_df
except UnicodeDecodeError as e:
try:
data = pd.read_table(z.open(i), header=None, sep="\\s+", encoding="gb2312", skiprows=3)
except:
data = pd.read_table(z.open(i), header=None, sep="\\s+", encoding="gb2312", skiprows=4)
start_list = data[data.iloc[:, 0].str.find("名次") == 0].index.tolist()
end_list = data[data.iloc[:, 0].str.find("总计") == 0].index.tolist()
part_one = data[start_list[0]: end_list[0]].iloc[1:, :]
part_two = data[start_list[1]: end_list[1]].iloc[1:, :]
part_three = data[start_list[2]: end_list[2]].iloc[1:, :]
temp_df = pd.concat([part_one.reset_index(drop=True), part_two.reset_index(drop=True),
part_three.reset_index(drop=True)], axis=1, ignore_index=True)
temp_df.columns = ["名次", "会员简称", "成交量", "增减", "名次", "会员简称", "持买单量", "增减", "名次", "会员简称", "持卖单量", "增减"]
temp_df["rank"] = range(1, len(temp_df) + 1)
del temp_df["名次"]
temp_df.columns = ["vol_party_name", "vol", "vol_chg", "long_party_name", "long_open_interest",
"long_open_interest_chg", "short_party_name", "short_open_interest",
"short_open_interest_chg", "rank"]
temp_df["symbol"] = file_name.split("_")[1]
temp_df["variety"] = file_name.split("_")[1][:-4].upper()
temp_df = temp_df[["long_open_interest", "long_open_interest_chg", "long_party_name", "rank",
"short_open_interest", "short_open_interest_chg", "short_party_name",
"vol", "vol_chg", "vol_party_name", "symbol", "variety"]]
big_dict[file_name.split("_")[1]] = temp_df
return big_dict
|
3c4fa81a2fef317210be915f437c0885f5fcbbbd
| 3,649,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.