content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def fan_on():
"""
Turn only the fan on
"""
global PAUSED
print("Temps vary too much; toggling fan on")
GPIO.output(HEATPIN, RELAYOFF)
GPIO.output(COOLPIN, RELAYOFF)
GPIO.output(FANPIN, RELAYON)
while (loc_temp_diff > TEMPDIFF) and (PAUSED == False):
time.sleep(10)
if min_temp < TEMPLOW or max_temp > TEMPHIGH:
break | 34,300 |
def model_query(context, model, *args, **kwargs):
"""Query helper.
:param context: context to query under
:param session: if present, the session to use
"""
session = kwargs.get('session') or object_sqla.get_session()
query = session.query(model, *args)
return filter_by_project(context, query) | 34,301 |
def bert_dropout_model(num_classes,
bert_config,
use_mc_dropout_mha=False,
use_mc_dropout_att=False,
use_mc_dropout_ffn=False,
use_mc_dropout_output=False,
channel_wise_dropout_mha=False,
channel_wise_dropout_att=False,
channel_wise_dropout_ffn=False):
"""Creates a BERT classifier model with MC dropout."""
last_layer_initializer = tf.keras.initializers.TruncatedNormal(
stddev=bert_config.initializer_range)
# Build encoder model.
mc_dropout_bert_encoder = get_mc_dropout_transformer_encoder(
bert_config,
use_mc_dropout_mha=use_mc_dropout_mha,
use_mc_dropout_att=use_mc_dropout_att,
use_mc_dropout_ffn=use_mc_dropout_ffn,
channel_wise_dropout_mha=channel_wise_dropout_mha,
channel_wise_dropout_att=channel_wise_dropout_att,
channel_wise_dropout_ffn=channel_wise_dropout_ffn)
# Build classification model.
mc_dropout_bert_model = DropoutBertClassifier(
mc_dropout_bert_encoder,
num_classes=num_classes,
dropout_rate=bert_config.hidden_dropout_prob,
use_mc_dropout=use_mc_dropout_output,
initializer=last_layer_initializer)
return mc_dropout_bert_model, mc_dropout_bert_encoder | 34,302 |
def calculate_monthly_sales(year: int, month: int, beer_style: str) -> int:
"""Calculates the sales of a particular type of beer in a given month.
param: month -- an int ranges from 1 to 12, beer_style;
return: total_sales
"""
total_sales = 0
for item in data:
if item[2].year == year and item[2].month == month and item[3] == beer_style:
total_sales += int(item[5])
return total_sales | 34,303 |
def check_canopy_height(region_info, regional_lookup):
"""
Check the regional canopy height.
"""
mean_canopy_height = region_info['mean_canopy_height']
if mean_canopy_height == 'no data':
mean_canopy_height = 0
return mean_canopy_height | 34,304 |
def annotated_var(prs):
"""
Parser for annotated variable in parentheses.
Annotation is parsed with prs.
Parser output is a var token
annotation is stored in attribute 'annotation' of var token.
Sample input to parser:
(x : A)
"""
def trt(acc):
v,ann = acc
if len(ann) > 0:
return c.copy_token(v,{'annotation':ann[0]})
return v
return c.paren(var() + colon_annotation(prs)).treat(trt) | 34,305 |
def action_from_json(project, value):
"""return a action from the given json
"""
json_type = value.get('type')
for class_ in sftoolbox.engine.action_classes_register:
if json_type == class_.json_type:
return class_.from_json(project, value)
return DummyAction.from_json(project, value) | 34,306 |
def get_springer_doi(node):
"""
:param node:
:return:
"""
for elem in find_key(node, 'occurrence'):
if isinstance(elem, list):
for sub_elem in elem:
if isinstance(sub_elem, dict):
values = sub_elem.values()
if len(values) == 2 and values[0] == 'DOI':
return values[1]
return '' | 34,307 |
def check_versions(versions=[]):
""" Check if there are version to build the changelog. """
if len(versions) == 0:
raise NotEnoughVersionsError()
return True | 34,308 |
def create_fnet(widths, nfeat, nfeato, orthoinit, llbias):
""" Creates feature-generating network, a multi-layer perceptron.
Parameters:
widths: list of widths of hidden layers
nfeat, nfeato: # input and output channels of the convolution
orthoinit: whether to use orthogonal weight initialization
llbias: whether to use bias in the last layer
"""
fnet_modules = []
for k in range(len(widths) - 1):
fnet_modules.append(nn.Linear(widths[k], widths[k + 1]))
if orthoinit: init.orthogonal_(fnet_modules[-1].weight, gain=init.calculate_gain('relu'))
fnet_modules.append(nn.ReLU(True))
fnet_modules.append(nn.Linear(widths[-1], nfeat * nfeato, bias=llbias))
if orthoinit: init.orthogonal_(fnet_modules[-1].weight)
return nn.Sequential(*fnet_modules) | 34,309 |
def start_fun():
"""
To start the program
"""
ag = argparse.ArgumentParser()
ag.add_argument('data_dir',nargs='*', action='store',default="flowers")
ag.add_argument('--arch', action='store', dest='arch', default= 'vgg19')
ag.add_argument('--gpu',action='store',dest='gpu',default='gpu')
ag.add_argument('--learning_rate',action='store',dest='learning_rate',default=0.001)
ag.add_argument('--save_dir',action='store',dest='save_dir',default='checkpoint.pth')
ag.add_argument('--epochs',action='store',dest='epochs',default=10)
ag.add_argument('--hidden_units',action='store',dest='hid_units',default=5000)
xyz = ag.parse_args()
data_dir = xyz.data_dir
arch = xyz.arch
gpu = xyz.gpu
lr = float(xyz.learning_rate)
checkpoint_path = xyz.save_dir
epochs = int(xyz.epochs)
hid_units = int(xyz.hid_units)
# Making dataset dir
data_dir = data_dir[0]
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# loading data loaders
trainloader,testloader,validateloader = load_func(train_dir, valid_dir, test_dir)
# Model Setup
model, criterion,classifier, optimizer = setup_model(arch,lr,hid_units,gpu)
# train Function
train_func(model, optimizer, criterion, epochs, trainloader,validateloader, gpu)
# Save model
save_func(arch, epochs, optimizer, lr, model,classifier, checkpoint_path)
print("train.py==finished: True") | 34,310 |
def pytorch_array_setitem(op):
"""Implementation of array_setitem for pytorch."""
def _impl(array, begin, end, strides, value):
idx = tuple(slice(b, e, s) for b, e, s in zip(begin, end, strides))
ret = array.clone()
ret[idx] = value
return (ret,)
return _impl, op.inputs[1:] | 34,311 |
def invalidate_voucher_codes_with_campaign(affiliate, campaign):
"""Invalidates all codes under the given campaign name."""
voucher_codes = VoucherCode.objects.filter(batch__affiliate=affiliate,
batch__campaign=campaign)
LOGGER.info('Found %d codes with affiliate \'%s\', '
'and campaign \'%s\'. Invalidating...' %
(len(voucher_codes), affiliate, campaign))
voucher_codes.update(is_active=False)
LOGGER.info('Done.') | 34,312 |
async def help_(message: discord.Message, command: str.lower=None, *args):
""" Display commands or their usage and description. """
command_prefix = config.server_command_prefix(message.server)
# Display the specific command
if command:
if command.startswith(command_prefix):
command = command[len(command_prefix):]
cmd = plugins.get_command(command)
if not cmd:
return
# Get the specific command with arguments and send the help
cmd = plugins.get_sub_command(cmd, *args)
await client.say(message, plugins.format_help(cmd, message.server))
# Display every command
else:
commands = []
for plugin in plugins.all_values():
# Only go through plugins with actual commands
if not getattr(plugin, "__commands", False):
continue
# Add all commands that the user can use
for cmd in plugin.__commands:
if not cmd.hidden and plugins.can_use_command(cmd, message.author, message.channel):
commands.append(cmd.name_prefix(message.server).split()[0])
commands = ", ".join(sorted(commands))
m = "**Commands**: ```{0}```Use `{1}help <command>`, `{1}<command> {2}` or " \
"`{1}<command> {3}` for command specific help.".format(
commands, command_prefix, *config.help_arg)
await client.say(message, m) | 34,313 |
def time_series_seasonal_test(x: pd.Series, expected_lags: list):
"""
通过自相关系数来获取不同lag的相关系数,通过相关系数来判断时序数据的周期值
PS:需要列出lag的值的列表
:param x: 时序数据x,type: Series
:param expected_lags: 可供选择的的滞后值
:return: 返回滞后值值的自相关性排序序列
"""
acf_scores = []
for lag in expected_lags:
acf_score = acf(x.values, nlags=lag, fft=False)[-1]
acf_scores.append(abs(acf_score))
sorted_idx = np.argsort(acf_scores)
return [expected_lags[i] for i in sorted_idx] | 34,314 |
def test_get_registered_collectors():
"""Test for the function get_registered_collectors()."""
collectors = f8a_tagger.recipes.get_registered_collectors()
assert collectors
# ['Maven', 'NPM', 'PyPI', 'StackOverflow']
assert len(collectors) >= 4 | 34,315 |
def comment_like():
"""
- 1.判断用户是否登陆
- 2.获取参数
- 3.校验参数,为空校验
- 4.操作类型校验
- 5.根据评论编号取出,评论对象
- 6.判断评论对象是否存在
- 7.根据操作类型,点赞,取消点赞
- 8.返回响应
:return:
"""
# - 1.判断用户是否登陆
if not g.user:
return jsonify(errno=RET.NODATA, errmsg="用户未登录")
# - 2.获取参数
comment_id = request.json.get("comment_id")
action = request.json.get("action")
# - 3.校验参数,为空校验
if not all([comment_id, action]):
return jsonify(errno=RET.PARAMERR, errmsg="参数不全")
# - 4.操作类型校验
if not action in ["add", "remove"]:
return jsonify(errno=RET.DATAERR, errmsg="操作类型有误")
# - 5.根据评论编号取出,评论对象
try:
comment = Comment.query.get(comment_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="获取评论失败")
# - 6.判断评论对象是否存在
if not comment:
return jsonify(errno=RET.NODATA, errmsg="评论不存在")
try:
# - 7.根据操作类型,点赞,取消点赞
if action == "add":
# 判断用户是否点过赞
comment_like = CommentLike.query.filter(CommentLike.user_id == g.user.id,
CommentLike.comment_id == comment_id).first()
if not comment_like:
# 创建点赞对象
comment_like = CommentLike()
comment_like.user_id = g.user.id
comment_like.comment_id = comment_id
# 保存点赞对象到数据库
db.session.add(comment_like)
db.session.commit()
# 点赞数量+1
comment.like_count += 1
else:
# 判断用户是否点过赞
comment_like = CommentLike.query.filter(CommentLike.user_id == g.user.id,
CommentLike.comment_id == comment_id).first()
if comment_like:
# 移除点赞对象
db.session.delete(comment_like)
db.session.commit()
# 点赞数量-1
if comment.like_count > 0:
comment.like_count -= 1
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="操作失败")
# - 8.返回响应
return jsonify(errno=RET.OK, errmsg="操作成功") | 34,316 |
def open_fits(subject, field, wavelength, size='2x2'):
"""Opens a FITS image of a subject.
Can be used as a context handler.
subject: RGZ subject dict, from the ATLAS survey.
field: 'elais' or 'cdfs'
wavelength: 'ir' or 'radio'
size: Optional. '2x2' or '5x5'.
-> FITS image file.
"""
if field not in {'elais-s1', 'cdfs'}:
raise ValueError('field must be either "elais-s1" or "cdfs".')
if wavelength not in {'ir', 'radio'}:
raise ValueError('wavelength must be either "ir" or "radio".')
cid = subject['metadata']['source']
filename = '{}_{}.fits'.format(cid, wavelength)
path = os.path.join(config['data_sources']['{}_fits'.format(field)], size,
filename)
return astropy.io.fits.open(path, ignore_blank=True) | 34,317 |
def ann_set(ax, fontsize, bracket_x, bracket_y, text_x, texta, textb):
"""
Annotate a set of spectra with text
"""
ax.plot(
[bracket_x[0], bracket_x[1], bracket_x[1], bracket_x[0]],
[bracket_y[0], bracket_y[0], bracket_y[1], bracket_y[1]],
"k-",
linewidth=3.0,
)
text_y = 0.5 * (bracket_y[0] + bracket_y[1])
ax.text(
text_x[0],
text_y,
texta,
rotation=270.0,
fontsize=1.2 * fontsize,
horizontalalignment="center",
verticalalignment="center",
)
ax.text(
text_x[1],
text_y,
textb,
rotation=270.0,
fontsize=0.9 * fontsize,
horizontalalignment="center",
verticalalignment="center",
) | 34,318 |
def spent_estimated_time_chart_by_user(chart_title, stat_extractor, file_path, period="month", measure="spent"):
"""
Creates a chart that shows spent and estimated (in that order) times by user.
:param chart_title:
:param stat_extractor:
:param file_path:
:param period:
:param measure:
:return:
"""
spent_time_by_user = None
estimated_time_by_user = None
if period == "month":
spent_time_by_user = stat_extractor.spent_month_time_by_user
estimated_time_by_user = stat_extractor.estimated_month_time_by_user
elif period == "week":
spent_time_by_user = stat_extractor.spent_week_time_by_user
estimated_time_by_user = stat_extractor.estimated_week_time_by_user
# Calculation of the x-labels
periods = {}
for member in stat_extractor.members:
periods.update(spent_time_by_user.get(member.id))
periods.update(estimated_time_by_user.get(member.id))
periods = periods.keys()
periods.sort()
s_e_by_user_chart_ = pygal.Line(title=chart_title, legend_at_bottom=False)
for member in stat_extractor.members:
member_name = member.username.decode("utf-8")
if stat_extractor.configuration.censored:
member_name = member.id
if periods:
if measure == "spent":
s_e_by_user_chart_.add(u"{0}".format(member_name), [spent_time_by_user[member.id].get(time) for time in periods])
elif measure == "estimated":
s_e_by_user_chart_.add(u"{0}".format(member_name),[estimated_time_by_user[member.id].get(time) for time in periods])
elif measure == "diff":
diff_values = []
for time in periods:
spent_time = spent_time_by_user[member.id].get(time)
estimated_time = estimated_time_by_user[member.id].get(time)
if not spent_time is None and not estimated_time is None:
diff_values.append(estimated_time - spent_time)
else:
diff_values.append(None)
s_e_by_user_chart_.add(u"{0}".format(member_name), diff_values)
s_e_by_user_chart_.x_labels = periods
s_e_by_user_chart_.show_x_labels = True
s_e_by_user_chart_.render_to_png(file_path) | 34,319 |
def create_relationships(model_cls, data):
"""
Create the relationship dict of the specified model class with the data
:param model_cls:
:param data:
:return:
"""
relationships = model_cls.get_relationships()
relationship_map = {}
for key in relationships.keys():
relationship_cls = relationships[key].mapper.class_
relationship_kwargs = data.get(key)
if isinstance(relationship_kwargs, list): # 1:n
relationship = []
for item in relationship_kwargs:
r_ins = create_instance(relationship_cls, item)
if r_ins is not None:
relationship.append(r_ins)
else:
relationship = create_instance(relationship_cls, relationship_kwargs) # 1:1
if relationship is not None:
relationship_map[key] = relationship
return relationship_map | 34,320 |
def get_news_items_from_web(url):
"""
Calls the Athletics News RSS API, parses the resulting response and returns a list of parsed news_items to be
stored in DynamoDB
:param url: Url for the RSS API for UBCO Heat
:return: Parsed news items in a JSON formatted list
"""
try:
request_response = requests.get(url).text
return feedparser.parse(request_response)["entries"]
except RequestException as e:
LOGGER.error("Error in network request to RSS Feed")
detailed_exception(LOGGER)
return [] | 34,321 |
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle) | 34,322 |
def write_graph_file(filename, G, nodelist, write_header=True):
"""
write_graph_file() - write edge list in Pajek format
Note that because snap.ConvertGraph() fails to keep node
attributes so we cannot use it to renumber nodes, we also
use nodelist to get a sequential node number for each node:
the index in nodelist of each nodeid is its sequential number,
so we can these out as sequential node numbers 1..N
So in order to do this we have to write the output ourselves in
an iterator, cannot use the snap.SavePajek() function.
Parameters:
filename - filename to write to (warning: overwritten)
G - SNAP graph object
nodelist - list of nodeids, used to order the nodes in the output
write_header - if True write Pajek header lines
Return value:
None
"""
assert(len(nodelist) == G.GetNodes())
assert(len(nodelist) == len(set(nodelist))) # nodeids must be unique
# build dict mapping nodeid to sequential node number 1..N
seqdict = {nodeid:(seq+1) for seq, nodeid in enumerate(nodelist)}
with open(filename, 'w') as f:
if write_header:
f.write("*vertices " + str(G.GetNodes()) + "\n")
f.write("*arcs\n")
for EI in G.Edges():
f.write("%d %d\n" % (seqdict[EI.GetSrcNId()], seqdict[EI.GetDstNId()])) | 34,323 |
def convert_hcp_plane(plane: list) -> np.ndarray:
"""
four index notion to three index notion for hcp and rhombohedral plane
Args:
plane (list): four index notion
Returns:
three index notion of plane
"""
u1 = plane[0]
v1 = plane[1]
w1 = plane[3]
plane = [u1, v1, w1]
if reduce(gcd, plane) != 1:
index = reduce(gcd, plane)
plane = [int(round(x / index)) for x in plane]
return np.array(plane) | 34,324 |
def should_process(data):
"""Quick check if processing is needed at all."""
from sentry.plugins import plugins
for plugin in plugins.all(version=2):
processors = safe_execute(
plugin.get_event_preprocessors, data=data, _with_transaction=False
)
if processors:
return True
if should_process_for_stacktraces(data):
return True
return False | 34,325 |
def load_fasta_file(input_file: str) -> Tuple[str, List]:
"""
Load a fasta file into a list of SeqRecords.
:param input_file: The path to the input fasta file.
:returns: A tuple of the sequence type ('protein' or 'dna'), and the list of SeqRecords.
"""
if _is_gzipped(input_file):
openfunc = gzip.open
bit = 'rt'
else:
openfunc = open
bit = 'r'
with openfunc(input_file, bit) as handle:
seqs = [x.upper() for x in SeqIO.parse(handle=handle, format='fasta',
alphabet=IUPAC.ambiguous_dna)]
if not all(_verify_alphabet(x.seq) for x in seqs):
handle.seek(0)
seqs = [x.upper() for x in SeqIO.parse(handle=handle, format='fasta',
alphabet=HasStopCodon(IUPAC.extended_protein))]
if not all(_verify_alphabet(x.seq) for x in seqs):
raise ValueError('Invalid input file (neither DNA nor protein FASTA).')
return 'protein', seqs
return 'dna', seqs | 34,326 |
def files(
files,
out_dir,
min_zoom,
title,
task_procs,
procs_per_task,
catalog_delim,
cat_wcs_fits_file,
image_engine,
):
"""--Convert a files to a map.
CLI interface: files command.\n
FILES should be a comma seperated list of files i.e. a.fits,b.fits,c.cat
"""
convert.files_to_map(
files.split(","),
out_dir=out_dir,
min_zoom=min_zoom,
title=title,
task_procs=task_procs,
procs_per_task=procs_per_task,
catalog_delim=catalog_delim,
cat_wcs_fits_file=cat_wcs_fits_file,
image_engine=image_engine,
) | 34,327 |
def train_valid_test_datasets_provider(train_val_test_num_samples):
"""Build train, valid, and test datasets."""
args = get_args()
print_rank_0('> building train, validation, and test datasets '
'for GPT3 ...')
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
data_prefix=args.data_path,
data_impl=args.data_impl,
splits_string=args.split,
train_valid_test_num_samples=train_val_test_num_samples,
seq_length=args.seq_length,
seed=args.seed,
skip_warmup=(not args.mmap_warmup))
print_rank_0("> finished creating GPT2 datasets ...")
return train_ds, valid_ds, test_ds | 34,328 |
def pred_and_plot_multiclass(model, filename, class_names, img_shape=[224,224], scale=True):
"""
Imports an imaged located at filename, makes a prediction with model and plots
the image with the predicted class as the title. Need to import Tensorflow,
matplotlib.image as mpimg """
img= load_and_prep_image(filename, img_shape, scale)
pred= model.predict(tf.expand_dims(img, axis=0))
pred=pred.squeeze()
pred_class= class_names[tf.math.argmax(pred)]
plt.imshow(img)
plt.title(f'Prediction: {pred_class}')
plt.axis(False); | 34,329 |
def get(username, start):
"""
Second level function to pull up to 50 reviews.
start - review number to start from
"""
r = requests.get(
'{}/user/beers/?start={}&&ba={}&order=dateD&view=R'.format(
BASE_URL, start, username
)
)
beers = []
pq = PyQuery(r.text)
pq = pq('#ba-content')
pq = pq('table')
pq = pq('tr')
for tr in pq[3:]: # first 3 rows are table headers
td = tr.getchildren()[1:] # first column is review star icon
beers.append(Beer.build_from_xml(td))
return beers | 34,330 |
def isInContinent(country_name: str, continent: str):
"""Permet de vérifier si le pays est dans un continent
Paramètres
----------
country_name : str
Le nom du pays
continent : str
Le code du continent (alpha2)
Retours
-------
is_in_continent : int
entier binaire positif si le pays est dans le continent
Exemples
-------
>>> isInContinent('Gladstone', 'OC')
1
"""
try:
# code a deux lettres du pays
calpha2 = country_name_to_country_alpha2(country_name.strip())
except KeyError:
# Certains noms de pays de nos jeux de données ne respectent pas la norme dispo sur
# wikipedia : https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
calpha2 = cn_to_ca2[country_name.strip()]
# par exemple 'EU'
concode = country_alpha2_to_continent_code(calpha2)
return int(concode == continent) | 34,331 |
def test_base__BaseReader__canRead__3():
"""It returns `False` if `getFieldNames()` returns an empty list."""
with patch.object(BaseReader, 'getFieldNames', return_value=[]):
assert False is BaseReader.canRead(None) | 34,332 |
def encoder_decoder_archi(inputs, is_train):
"""
Input is assumed to be a 4-D Tensor, with [batch_size, phrase_len, 1, features]
"""
encoder_layers = []
encoded = inputs
encoder_layers.append(encoded)
for i in range(config.encoder_layers):
encoded = encoder_conv_block(encoded, i, is_train)
encoder_layers.append(encoded)
encoder_layers.reverse()
decoded = encoder_layers[0]
for i in range(config.encoder_layers):
decoded = decoder_conv_block(decoded, encoder_layers[i+1], i, is_train)
return decoded | 34,333 |
def build_dict_conforming_to_schema(schema, **kwargs):
"""
Given a schema object (for example, TIMESTAMP_SCHEMA from this module) and
a set of keyword arguments, create a dictionary that conforms to the given
schema, using the keyword arguments to define the elements of the new dict.
Checks the result to make sure that it conforms to the given schema, raising
an error if not.
Returns the new dict conforming to the schema if there are no problems.
"""
# Check that schema supports a check_match call.
# Duck typing version of this check:
if not hasattr(schema, 'check_match'):
raise ValueError(
'The given "schema" does not seem to be a schema. It has no '
'"check_match" method. Given schema: ' + repr(schema))
# # Strict typing version of this check:
# # Check that schema_name is a SCHEMA.Object.
# if not isinstance(schema, schema.Schema):
# raise ValueError(
# 'The first argument must be a schema.Schema object, but is not. '
# 'Given schema: ' + repr(schema))
# The return value.
d = {}
for key, value in kwargs.items():
d[key] = value
schema.check_match(d)
return d | 34,334 |
def drop_non_channels(overlaps_df, filename):
""" Return the overlap dataframe with all channels dropped
and index reset. Save the df as a csv with the filename
passed this function. """
df = overlaps_df
channels_df_dict = {}
for column in df.columns:
# For each set of overlaps, drop all the gene names that are not
# channels. They are replaced by NaNs.
channels_bool = df.loc[:, column].isin(IUPHAR_Channels_names)
channels_df_dict[column] = df.loc[channels_bool, column]
channels_df = pd.DataFrame(channels_df_dict)
clean_channels_df = channels_df.reset_index(drop=True).copy()
for column in channels_df.columns:
# Set all of the rows in this column to NaN so they can be replaced
# by lists of channel names in each overlap.
clean_channels_df.loc[:, column] = np.NaN
channel_names = list(channels_df.loc[:, column].dropna())
# Put the list of channels in the overlap's row. Save the df
clean_channels_df.loc[0:len(channel_names)-1, column] = channel_names
clean_channels_df.to_csv(filename)
return clean_channels_df | 34,335 |
def main():
""" Find all the instances of the regular expression of the KO number in the file"""
args = get_args()
pattern = 'K{1}[0-9]{5}'
result = re.findall(pattern, args.file.read()) # extract the konumbers from texg
# the following are conversions to print in correct format
result = np.array(result)
reshaped = result.reshape(len(result),1)
df = pd.DataFrame(reshaped, columns=['konumber'])
# this changes printing options to avoid collapse
with pd.option_context('display.max_rows', None,
'display.max_columns', None,
'display.precision', 3,
):
print(df, file=args.outfile) | 34,336 |
def AddWorkloadMetadataFromNodeFlag(parser, hidden=False):
"""Adds the --workload-metadata-from-node flag to the parser.
Args:
parser: A given parser.
hidden: Whether or not to hide the help text.
"""
help_text = """\
Sets the node metadata option for workload metadata configuration. This feature
is scheduled to be deprecated in the future and later removed.
"""
parser.add_argument(
'--workload-metadata-from-node',
default=None,
choices={
'SECURE': 'Prevents workloads not in hostNetwork from accessing '
'certain VM metadata, specifically kube-env, which '
'contains Kubelet credentials, and the instance identity '
'token. This is a temporary security solution available '
'while the bootstrapping process for cluster nodes is '
'being redesigned with significant security improvements.',
'EXPOSED': 'Exposes all VM metadata to workloads.',
'UNSPECIFIED': 'Chooses the default.',
},
type=lambda x: x.upper(),
hidden=hidden,
help=help_text) | 34,337 |
def debug_task(self):
"""Simple testing task to debug celery."""
print('Request: {0!r}'.format(self.request)) | 34,338 |
def evaluate_all_flights(model, train_flights_dict, val_flights_dict, trial_folder, n_extreme_flights=10):
"""
Arguments
model: trained tf model to make the predictions
train_flights_dict: a dictionary whose key is flight name and value is a tuple of (features,labels)
val_flights_dict: same but for validation flights
trial_folder: string name of the trial folder "DeepNav_results/trial_###"
n_extreme_flights: integer number of flights to be separated as best or worst, for example, if
n_extreme_flights=5 then best (or worst) folder will contain best 5 flights
return
flights_summary: a dictionary of two elements (training & validation), the value is a 2D list
whose colums are (flight_duration, max_pos_error, max_vel_error)
Outputs
- creates one pdf file containing plots of both prediction and ground
truth of attitude, velocity and postion for each flight, with these pdfs, the
following folders are populated
# training
# |_ differenced
# |_ reconstructed
# |_best - worst - other
# validation
# |_ differenced
# |_ reconstructed
# |_best - worst - other
"""
# loop on sets, one iteration for training and another for validation
flights_summary = {}
set_names = ["training","validation"]
for flights_dict, set_name in zip([train_flights_dict, val_flights_dict], set_names):
# sort flights by name (shorter flight first)
flights_list = sorted(flights_dict.items())
total_flights = len(flights_list) - 1
# dictionary of (flight_name : max_pos_error) pairs, used to extract the best & worst flights
flights_errors = {}
# array of shape (time_steps, 3), colums are (flight_duration, max_pos_error, max_vel_error)
set_summary = []
for flight_number, one_flight_data in enumerate(flights_list):
##to speedup experimenting
# if flight_number > 5:
# break
flight_name = one_flight_data[0]
print("flight " + str(flight_number) + "/" + str(total_flights) + " : " + flight_name)
features = one_flight_data[1][0]
ground_truth_diff = one_flight_data[1][1]
predictions_diff = model.predict(features)
# Reconstruct the original signals from differenced signals
ground_truth_reconstructed = np.cumsum(ground_truth_diff, axis = 0)
predictions_reconstructed = np.cumsum(predictions_diff, axis = 0)
# reconstructed output csv file name
output_csv_file_nn = os.path.join(trial_folder, set_name, "reconstructed", \
"nn_output_csv", flight_name + "_nn.csv")
# differenced output csv file name
output_csv_file_nn_diff = os.path.join(trial_folder, set_name, "differenced", \
"nn_output_csv", flight_name + "_nn.csv")
# save the reconstructed predictions (ground truth already saved by create_dataset.py)
np.savetxt(output_csv_file_nn, predictions_reconstructed, delimiter=",")
# save the differenced predictions
np.savetxt(output_csv_file_nn_diff, predictions_diff, delimiter=",")
# maximum errors between prediction and ground truth
max_velocity_error = np.max(np.linalg.norm(ground_truth_reconstructed[:,0:3] \
-predictions_reconstructed[:,0:3], axis=1))
max_position_error = np.max(np.linalg.norm(ground_truth_reconstructed[:,3:6] \
-predictions_reconstructed[:,3:6], axis=1))
# add error to the output file name
pdf_name = flight_name + "_MPE_" f'{max_position_error:.2f}' + \
"_MVE_" f'{max_velocity_error:.2f}' + ".pdf"
# create a pdf for this flight differenced signals
pdf_name_diff = os.path.join(trial_folder, set_name, "differenced", pdf_name)
flight_pdf_plots(pdf_name_diff, ground_truth_diff, predictions_diff)
# create a pdf for this flight reconstructed signals
pdf_name_recon = os.path.join(trial_folder, set_name, "reconstructed", "other", pdf_name)
flight_pdf_plots(pdf_name_recon, ground_truth_reconstructed, predictions_reconstructed)
flights_errors[pdf_name] = max_position_error
flight_duration = ground_truth_reconstructed.shape[0] * 0.2 / 60
set_summary.append([int(flight_name[0:4]), flight_duration, max_position_error, max_velocity_error])
flights_summary[set_name] = set_summary
# sort the flights from by position error (min error first)
sorted_flights = sorted(flights_errors.items(), key=lambda x: x[1])
# move the pdfs of best & worst flights of this set to their respective folders
old_name_base = os.path.join(trial_folder, set_name, "reconstructed", "other")
best_name_base = os.path.join(trial_folder, set_name, "reconstructed", "best")
worst_name_base = os.path.join(trial_folder, set_name, "reconstructed", "worst")
for i in range(n_extreme_flights):
pdf_name = sorted_flights[i][0]
old_name = os.path.join(old_name_base, pdf_name)
new_name = os.path.join(best_name_base, pdf_name)
os.rename(old_name, new_name)
for i in range(-n_extreme_flights, 0):
pdf_name = sorted_flights[i][0]
old_name = os.path.join(old_name_base, pdf_name)
new_name = os.path.join(worst_name_base, pdf_name)
os.rename(old_name, new_name)
return flights_summary | 34,339 |
def _train_with_autotune(root_dir):
"""Starts training using a tuner (i.e. Vizier).
Args:
root_dir: String directory to save the training results.
"""
study_name = 'aptamer_ff.%s' % (FLAGS.study_name or FLAGS.run_name)
client_handle = '%s/%s' % (study_name, FLAGS.task)
tuner = tf.training.HPTuner(client_handle)
tuner_goal = TUNER_LOSS_TO_GOAL[FLAGS.tuner_loss]
study = _get_study(study_name, tuner_goal, FLAGS.tuner_algorithm,
FLAGS.num_conv_layers, FLAGS.max_strides, FLAGS.max_rates,
FLAGS.num_fc_layers, FLAGS.hpconfig)
tuner.create_study(study)
# if we have a dataset defined, grab the targets so we can report on them
# in Vizier
if FLAGS.dataset:
metrics_targets = set()
for t_list in config.DEFAULT_AFFINITY_TARGET_MAPS[FLAGS.dataset].values():
for target in t_list:
metrics_targets.add(target)
metrics_targets = list(metrics_targets)
else:
metrics_targets = None
# The standard approach of tuner.next_trial() is currently broken if
# some workers restart (see b/64980341). The code below is a
# workaround.
while get_pending_or_stopping_trial_workaround(tuner):
train_dir = '%s/%s' % (root_dir, tuner.trial_handle())
hps = tuner.hparams()
_copy_flags_to_hparams(hps)
run_training_with_default_inputs(hps, train_dir, tuner, FLAGS.master,
FLAGS.validation_fold_template) | 34,340 |
def _REOM(y,t,pot,l2):
"""
NAME:
_REOM
PURPOSE:
implements the EOM, i.e., the right-hand side of the differential
equation
INPUT:
y - current phase-space position
t - current time
pot - (list of) Potential instance(s)
l2 - angular momentum squared
OUTPUT:
dy/dt
HISTORY:
2010-07-20 - Written - Bovy (NYU)
"""
return [y[1],
l2/y[0]**3.+_evaluateplanarRforces(pot,y[0],t=t)] | 34,341 |
def optimize_inst(module, inst):
"""Simplify one instruction"""
for operand in inst.operands:
if isinstance(operand, ir.Id):
if operand.inst.op_name not in ir.CONSTANT_INSTRUCTIONS:
return inst
if inst.op_name == 'OpCompositeConstruct':
inst = optimize_OpCompositeConstruct(module, inst)
elif inst.op_name == 'OpCompositeExtract':
inst = optimize_OpCompositeExtract(inst)
elif inst.op_name == 'OpIAdd':
inst = optimize_OpIAdd(module, inst)
elif inst.op_name == 'OpIMul':
inst = optimize_OpIMul(module, inst)
elif inst.op_name == 'OpLogicalAnd':
inst = optimize_OpLogicalAnd(module, inst)
elif inst.op_name == 'OpLogicalEqual':
inst = optimize_OpLogicalEqual(module, inst)
elif inst.op_name == 'OpLogicalNot':
inst = optimize_OpLogicalNot(module, inst)
elif inst.op_name == 'OpLogicalNotEqual':
inst = optimize_OpLogicalNotEqual(module, inst)
elif inst.op_name == 'OpLogicalOr':
inst = optimize_OpLogicalOr(module, inst)
elif inst.op_name == 'OpNot':
inst = optimize_OpNot(module, inst)
elif inst.op_name == 'OpSNegate':
inst = optimize_OpSNegate(module, inst)
elif inst.op_name == 'OpVectorShuffle':
inst = optimize_OpVectorShuffle(module, inst)
return inst | 34,342 |
def main(argv):
"""Runs XROTOR over all desired conditions and output to a JSON file."""
# Parse flags.
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '\nError: %s\n' % e
sys.exit(1)
logging.basicConfig(stream=sys.stdout,
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO)
databases = dict()
for solution_type in ['potential', 'graded_momentum']:
# Generate a sequence of commands to be piped into XROTOR.
input_str = GetXrotorInput(FLAGS.freestream_vels, FLAGS.angular_rates,
solution_type)
# Call XROTOR with input_str piped into stdin. Parse the output.
logging.info('Running XROTOR on %s using the %s formulation for %s cases. '
'This may take awhile.', FLAGS.rotor_file, solution_type,
str(len(FLAGS.freestream_vels) * len(FLAGS.angular_rates)))
xrotor_output = RunXrotor(FLAGS.rotor_file, input_str)
# Make an ordered dictionary of rotor database values such as the
# thrust and power coefficients and the angular rates and freestream
# velocities at which they are evaluated.
databases[solution_type] = MakeDatabase(
FLAGS.rotor_file, FLAGS.freestream_vels.tolist(),
FLAGS.angular_rates.tolist(), xrotor_output,
{'thrust': map(float, FLAGS.thrust_corrections),
'power': map(float, FLAGS.power_corrections),
'blade_pitch_deg': FLAGS.blade_pitch_correction_deg})
merged_database = MergeDatabases(databases)
# Write the rotor database into the standard JSON format.
logging.info('Writing database to %s.', FLAGS.json_output_file)
WriteJsonDatabase(FLAGS.json_output_file, merged_database)
logging.shutdown() | 34,343 |
def table_3_3(M, lambd_nos, lambd_cil):
"""
Функция для вывода Су для оживальной ГЧ
arguments: число Маха, относительное удлинение носка и цилиндрической части
return: Значение Су ГЧ
"""
cy1iz_alf_0 = [0.0350, 0.0350, 0.0350, 0.0350, 0.0362, 0.0375, 0.0380, 0.0378,
0.0374, 0.0364, 0.0350, 0.0337, 0.0325, 0.0315, 0.0305, 0.0300]
cy1iz_alf_05 = [0.0350, 0.0350, 0.0350, 0.0358, 0.0375, 0.0410, 0.0445, 0.0472,
0.0480, 0.0475, 0.0460, 0.0435, 0.0420, 0.0385, 0.0375, 0.0365]
cy1iz_alf_1 = [0.0350, 0.0350, 0.0350, 0.0358, 0.0375, 0.0410, 0.0455, 0.0495,
0.0515, 0.0520, 0.0515, 0.0485, 0.0465, 0.0445, 0.0425, 0.0410]
cy1iz_alf_2 = [0.0350, 0.0350, 0.0350, 0.0358, 0.0380, 0.0430, 0.0455, 0.0515,
0.0540, 0.0555, 0.0552, 0.0535, 0.0515, 0.0485, 0.0470, 0.0455]
cy1iz_alf_4 = [0.0350, 0.0350, 0.0350, 0.0358, 0.0375, 0.0410, 0.0455, 0.0515,
0.0549, 0.0565, 0.0565, 0.0505, 0.0545, 0.0524, 0.0502, 0.0480]
razm = [-0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1, 1.2, 1.4, 1.6, 1.8, 2, 2.2]
if (M**2 - 1) >= 0:
razmm = np.sqrt(M**2 - 1) / lambd_nos
else:
razmm = -np.sqrt(1 - M**2) / lambd_nos
otnos = lambd_cil / lambd_nos
if otnos == 0:
cy1 = np.interp(razmm, razm, cy1iz_alf_0)
elif (otnos <= 0.5) and (otnos > 0):
cy1 = interpol(np.interp(razmm, razm, cy1iz_alf_0), np.interp(razmm, razm, cy1iz_alf_05), otnos / 0.5)
elif (otnos <= 1) and (otnos > 0.5):
cy1 = interpol(np.interp(razmm, razm, cy1iz_alf_05), np.interp(razmm, razm, cy1iz_alf_1), (otnos - 0.5) / 0.5)
elif (otnos <= 2) and (otnos > 1):
cy1 = interpol(np.interp(razmm, razm, cy1iz_alf_1), np.interp(razmm, razm, cy1iz_alf_2), otnos - 1)
elif (otnos <= 4) and (otnos > 2):
cy1 = interpol(np.interp(razmm, razm, cy1iz_alf_2), np.interp(razmm, razm, cy1iz_alf_4), otnos - 2)
else:
cy1 = np.interp(razmm, razm, cy1iz_alf_4)
return cy1 | 34,344 |
def test():
""" Ensure that DMC obtains the exact result for a hydrogen atom """
from pyscf import gto, scf
from pyqmc.dmc import limdrift
import pandas as pd
mol = gto.M(atom="H 0. 0. 0.", basis="sto-3g", unit="bohr", spin=1)
mf = scf.UHF(mol).run()
nconf = 1000
configs = pyq.initial_guess(mol, nconf)
wf, _ = pyq.generate_wf(mol, mf, jastrow_kws=dict(na=0, nb=0))
enacc = pyq.EnergyAccumulator(mol)
warmup = 200
branchtime = 5
dfdmc, configs_, weights_ = pyq.rundmc(
wf,
configs,
nsteps=4000 + warmup * branchtime,
branchtime=branchtime,
accumulators={"energy": enacc},
ekey=("energy", "total"),
tstep=0.01,
verbose=True,
)
dfdmc = pd.DataFrame(dfdmc)
dfdmc.sort_values("step", inplace=True)
dfprod = dfdmc[dfdmc.step >= warmup]
rb_summary = reblock.reblock_summary(
dfprod[["energytotal", "energyei"]], 20, weights=dfprod["weight"]
)
energy, err = [rb_summary[v]["energytotal"] for v in ("mean", "standard error")]
assert (
np.abs(energy + 0.5) < 5 * err
), "energy not within {0} of -0.5: energy {1}".format(5 * err, np.mean(energy)) | 34,345 |
def ms_to_timestamp(ms):
"""Convert ms to 'HH:MM:SS,mmm'"""
# XXX throw on overflow/underflow?
if ms < 0: ms = 0
if ms > MAX_REPRESENTABLE_TIME: ms = MAX_REPRESENTABLE_TIME
h, m, s, ms = ms_to_times(ms)
return "%02d:%02d:%02d,%03d" % (h, m, s, ms) | 34,346 |
def _lovasz_softmax_flat(y_pred, y_true, classes="present"):
"""
Multi-class Lovasz-Softmax loss
y_pred: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
y_true: [P] Tensor, ground truth y_true (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in y_true, or a list of classes to average.
"""
if y_pred.numel() == 0:
# only void pixels, the gradients should be 0
return y_pred * 0.0
C = y_pred.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ["all", "present"] else classes
for c in class_to_sum:
fg = (y_true == c).float() # foreground for class c
if classes is "present" and fg.sum() == 0:
continue
if C == 1:
if len(classes) > 1:
raise ValueError("Sigmoid output possible only with 1 class")
class_pred = y_pred[:, 0]
else:
class_pred = y_pred[:, c]
errors = (Variable(fg) - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, Variable(_lovasz_grad(fg_sorted))))
return mean(losses) | 34,347 |
def m_rounding(m, weights):
"""
Control the M-value for rounding
"""
if not isinstance(m, int):
raise ValueError('M for rounding should be an integer number')
if m < len(weights):
raise ValueError('M is lower than number of items in a sum') | 34,348 |
def quantum_state_encoding_circuit(bits):
"""根据`bits`构建并返回量子态编码线路."""
circuit = cirq.Circuit()
circuit.append(cirq.H.on_each(bits))
return circuit | 34,349 |
def parse_mov_date(date_str):
"""converts string to date"""
try:
return datetime.datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%S%z")
except (TypeError, ValueError):
pass
return None | 34,350 |
def get_settable_attr(attr):
"""
If attr is not settable, navigate upp in the connection hierarchy until we find the settable attribute.
For example, in RigSqueeze, the ikFk state attribute will be redirected to the root ctrl.
Note that in some case the attribute might have been piped in an utility node, if necessary we'll try to
follow the connections through the utility node.
"""
def is_attr_interesting(attr):
if not attr:
return True
if not attr.isSettable() or not attr.isKeyable():
return False
classification = pymel.getClassification(attr.node().type())
if any(True for token in classification if 'utility' in token):
return False
return True
while not is_attr_interesting(attr):
attr = get_input_attr_from_output_attr(attr)
return attr | 34,351 |
def adds(repo, subset, x):
"""Changesets that add a file matching pattern.
The pattern without explicit kind like ``glob:`` is expected to be
relative to the current directory and match against a file or a
directory.
"""
# i18n: "adds" is a keyword
pat = getstring(x, _(b"adds requires a pattern"))
return checkstatus(repo, subset, pat, 'added') | 34,352 |
def adjust_spines(ax, spines, position=5):
"""
Set custom visibility and position of axes
ax : Axes
Axes handle
spines : List
String list of 'left', 'bottom', 'right', 'top' spines to show
position : Integer
Number of points for position of axis
"""
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', position))
else:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
elif 'right' in spines:
ax.yaxis.set_ticks_position('right')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
ax.tick_params(axis='y', which='both', left='off', right='off')
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
elif 'top' in spines:
ax.xaxis.set_ticks_position('top')
else:
# no xaxis ticks
ax.xaxis.set_ticks([])
ax.tick_params(axis='x', which='both', bottom='off', top='off') | 34,353 |
def setup_experiment(exp_dir, config, resume = False):
"""Initializes a pretraining or RL experiment."""
# If the experiment directory doesn't exist yet, creates it and dumps the
# config dict as a yaml file and git hash as a text file.
# If it exists already, raises a ValueError to prevent overwriting
# unless resume is set to True.
if os.path.exists(exp_dir):
if not resume:
raise ValueError(
"Experiment already exists. Run with --resume to continue.")
load_config_from_dir(exp_dir, config)
else:
os.makedirs(exp_dir)
with open(os.path.join(exp_dir, "config.yaml"), "w") as fp:
yaml.dump(ConfigDict.to_dict(config), fp)
with open(os.path.join(exp_dir, "git_hash.txt"), "w") as fp:
fp.write(git_revision_hash()) | 34,354 |
async def test_disabled(hass):
"""When enabled=False, everything fails."""
hass.states.async_set("switch.test", "on", {"friendly_name": "Test switch"})
request = get_new_request("Alexa.PowerController", "TurnOn", "switch#test")
call_switch = async_mock_service(hass, "switch", "turn_on")
msg = await smart_home.async_handle_message(
hass, DEFAULT_CONFIG, request, enabled=False
)
await hass.async_block_till_done()
assert "event" in msg
msg = msg["event"]
assert not call_switch
assert msg["header"]["name"] == "ErrorResponse"
assert msg["header"]["namespace"] == "Alexa"
assert msg["payload"]["type"] == "BRIDGE_UNREACHABLE" | 34,355 |
def repeat_exp(plan_func, n=1):
"""
Quick wrapper to repeat certain experiment, e.g.
>> RE(repeat_exp(tomo_scan('tomo_scan_config.yml')), 2)
"""
for _ in range(n):
yield from plan_func | 34,356 |
def data_context_topology_context_topologyuuid_linklink_uuid_available_capacity_bandwidth_profile_committed_information_rate_get(uuid, link_uuid): # noqa: E501
"""data_context_topology_context_topologyuuid_linklink_uuid_available_capacity_bandwidth_profile_committed_information_rate_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of topology
:type uuid: str
:param link_uuid: Id of link
:type link_uuid: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!' | 34,357 |
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
l = len(matrix)
for i in range(l):
for j in range(i, l):
temp = matrix[i][j]
matrix[i][j] = matrix[j][i]
matrix[j][i] = temp
for i in range(l):
for j in range(l//2):
temp = matrix[i][j]
matrix[i][j] = matrix[i][-(j+1)]
matrix[i][-(j+1)] = temp | 34,358 |
def CreateMD5ChecksumFile(filename, mangled_filename=None):
"""Create and upload an MD5 checksum file for filename."""
if not mangled_filename:
mangled_filename = os.path.basename(filename)
checksum = CalculateMD5Checksum(filename)
checksum_filename = '%s.md5sum' % filename
with open(checksum_filename, 'w') as f:
f.write('%s *%s' % (checksum, mangled_filename))
return checksum_filename | 34,359 |
def parse_arguments() -> argparse.Namespace:
"""Parse the arguments."""
parser = argparse.ArgumentParser(
description="Panoptic segmentation evaluation."
)
parser.add_argument(
"--gt", "-g", required=True, help="path to panseg ground truth"
)
parser.add_argument(
"--result", "-r", required=True, help="path to panseg results"
)
parser.add_argument(
"--config",
"-c",
default=None,
help="Path to config toml file. Contains definition of categories, "
"and optionally attributes and resolution. For an example "
"see scalabel/label/testcases/configs.toml",
)
parser.add_argument(
"--out-file",
default="",
help="Output file for panseg evaluation results.",
)
parser.add_argument(
"--ignore-unknown-cats",
action="store_true",
help="ignore unknown categories for panseg evaluation",
)
parser.add_argument(
"--nproc",
"-p",
type=int,
default=NPROC,
help="number of processes for panseg evaluation",
)
return parser.parse_args() | 34,360 |
def get_similarity_graph(
*,
fullgraph: Union[str, BELGraph] = DEFAULT_FULLGRAPH_WITHOUT_CHEMSIM_PICKLE,
rebuild: bool = False,
mapping_file: str = DEFAULT_CHEMICALS_MAPPING_PATH,
chemsim_graph_path=None,
clustered: bool = True,
weighted: bool = False,
minimum_similarity: float = 0.7,
name: str = 'Chemical Similarity Graph',
version: str = '1.1.0',
):
"""
Create a BELGraph with chemicals as nodes, and similarity as edges.
:param minimum_similarity: the percent in which the chemicals are similar
:param mapping_file: an existing dataframe with pubchemIDs and Smiles
"""
if not rebuild and weighted and os.path.exists(DEFAULT_CHEMSIM_WEIGHTED_PICKLE):
return nx.read_edgelist(DEFAULT_CHEMSIM_WEIGHTED_PICKLE)
elif not rebuild and not weighted and os.path.exists(DEFAULT_CHEMSIM_PICKLE):
return nx.read_edgelist(DEFAULT_CHEMSIM_PICKLE)
if isinstance(fullgraph, BELGraph):
fullgraph_without_chemsim = fullgraph
else:
fullgraph_without_chemsim = pybel.from_pickle(fullgraph)
pubchem_ids = []
for node in fullgraph_without_chemsim:
if node.namespace != PUBCHEM_NAMESPACE:
continue
pubchem_ids.append(node.identifier)
if os.path.exists(mapping_file):
pubchem_id_to_smiles = parse_chemical_mapping(mapping_file, pubchem_ids)
else:
pubchem_id_to_smiles = get_smiles(pubchem_ids)
pubchem_id_to_fingerprint = get_fingerprints(pubchem_id_to_smiles)
chemsim_graph = pybel.BELGraph(name=name, version=version)
if clustered:
chemsim_graph = create_clustered_chemsim_graph(
pubchem_id_to_fingerprint=pubchem_id_to_fingerprint,
chemsim_graph=chemsim_graph,
weighted=weighted,
)
else:
similarities = get_similarity(pubchem_id_to_fingerprint)
similarities_it = tqdm(similarities.items(), desc='Creating similarity BELGraph')
for (source_pubchem_id, target_pubchem_id), similarity in similarities_it:
if similarity < minimum_similarity:
continue
source = pybel.dsl.Abundance(namespace=PUBCHEM_NAMESPACE, identifier=source_pubchem_id)
target = pybel.dsl.Abundance(namespace=PUBCHEM_NAMESPACE, identifier=target_pubchem_id)
chemsim_graph.add_unqualified_edge(source, target, 'association')
if weighted:
for key in chemsim_graph[source][target]:
chemsim_graph[source][target][key]['weight'] = similarity
if chemsim_graph_path is not None:
pybel.to_pickle(chemsim_graph, chemsim_graph_path)
elif weighted:
pybel.to_pickle(chemsim_graph, DEFAULT_CHEMSIM_WEIGHTED_PICKLE)
else:
pybel.to_pickle(chemsim_graph, DEFAULT_CHEMSIM_PICKLE)
return chemsim_graph | 34,361 |
def update_mlwh_with_cog_uk_ids(samples: List[Dict[str, str]]) -> None:
"""Update the MLWH to write the COG UK barcode for each sample.
Arguments:
samples {List[Dict[str, str]]} -- list of samples to be updated
"""
if len(samples) == 0:
return None
# assign db_connection to avoid UnboundLocalError in 'finally' block, in case of exception
db_connection = None
try:
data = []
for sample in samples:
# using 'b_' prefix for the keys because bindparam() doesn't allow you to use the real
# column names
data.append(
{
"b_root_sample_id": sample[FIELD_ROOT_SAMPLE_ID],
"b_rna_id": sample[FIELD_RNA_ID],
"b_result": sample[FIELD_RESULT],
"b_cog_uk_id": sample[FIELD_COG_BARCODE],
}
)
sql_engine = create_mysql_connection_engine(
app.config["WAREHOUSES_RW_CONN_STRING"], app.config["ML_WH_DB"]
)
table = get_table(sql_engine, app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"])
stmt = (
table.update()
.where(
and_(
table.c.root_sample_id == bindparam("b_root_sample_id"),
table.c.rna_id == bindparam("b_rna_id"),
table.c.result == bindparam("b_result"),
)
)
.values(cog_uk_id=bindparam("b_cog_uk_id"))
)
db_connection = sql_engine.connect()
results = db_connection.execute(stmt, data)
rows_matched = results.rowcount
if rows_matched != len(samples):
msg = f"""
Updating MLWH {app.config['MLWH_LIGHTHOUSE_SAMPLE_TABLE']} table with COG UK ids was
only partially successful.
Only {rows_matched} of the {len(samples)} samples had matches in the MLWH
{app.config['MLWH_LIGHTHOUSE_SAMPLE_TABLE']} table.
"""
logger.error(msg)
raise UnmatchedSampleError(msg)
except (Exception) as e:
msg = f"""
Error while updating MLWH {app.config['MLWH_LIGHTHOUSE_SAMPLE_TABLE']} table with COG UK
ids.
{type(e).__name__}: {str(e)}
"""
logger.error(msg)
raise
finally:
if db_connection is not None:
db_connection.close() | 34,362 |
def make_09f9():
"""倉庫インベントリーフッタ"""
return "" | 34,363 |
def get_last_successful_hour_or_start_hour():
"""Get the last hour that ran successfully or the start hour."""
last_hour = crash_stats.get_last_successful_hour()
if last_hour:
return last_hour
return get_start_hour() | 34,364 |
def print_hex_data(data, begin_offset=0, desc=""):
""" print on stdout "hexdump -C < data" like output
params:
data - bytearray or array of int where each int < 255
begin_offset - int offset that should be printed in left column
desc - str optional description to print on the first line to describe data
"""
if desc:
print "{}:".format(desc)
index = 0
total_len = len(data)
hex_buf = ""
char_buf = ""
while index < total_len:
hex_buf += " {:02x}".format(data[index])
if data[index] < 0x20 or data[index] > 0x7e:
char_buf += "."
else:
char_buf += "{:c}".format(data[index])
index += 1
if index and index % 8 == 0:
hex_buf += " "
if index > 1 and (index % 16) == 0:
print "{:08x} {: <50s} |{: <16s}|".format(begin_offset + index - 16, hex_buf, char_buf)
hex_buf = ""
char_buf = ""
print "{:08x} {: <50s} |{: <16s}|".format(begin_offset + index - 16, hex_buf, char_buf)
return | 34,365 |
def encode_jwt(payload, secret):
"""
Return ``payload`` as a JWT encoded with ``secret``.
Return a JWT whose payload is ``payload`` and that is signed using
``secret``.
:arg payload: the payload to encode
:type payload: dict
:arg secret: the secret to sign the JWT with
:type secret: str
:return: the JWT string
:rtype: str
"""
payload = copy.deepcopy(payload)
payload["exp"] = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
jwt_bytes = jwt.encode(payload, secret, algorithm="HS256")
# PyJWT returns JWT's as UTF8-encoded byte strings (this isn't
# documented, but see
# https://github.com/jpadilla/pyjwt/blob/ed28e495f937f50165a252fd5696a82942cd83a7/jwt/api_jwt.py#L62).
# We need a unicode string, so decode it.
jwt_str = jwt_bytes.decode("utf-8")
return jwt_str | 34,366 |
def make_model():
"""
Loads pretrained torchvision model and redefines fc layer for car classification
"""
# uses about 1 GiB of GPU memory
model = models.vgg19(pretrained = True)
#model = models.resnet50(pretrained = True)
in_feat_num = model.classifier[3].in_features
mid_feat_num = int(np.sqrt(in_feat_num))
out_feat_num = 2
# redefine the last two layers of the classifier for car classification
model.classifier[3] = nn.Linear(in_feat_num,mid_feat_num)
model.classifier[6] = nn.Linear(mid_feat_num, out_feat_num)
return model | 34,367 |
def empty_call_false(*args, **kwargs) -> bool:
"""
Do nothing and return False
"""
return False | 34,368 |
def GenerateColumnAttributesReport():
"""
* Perform key steps in order.
"""
print ("------------------------------")
print ("GenerateNewETL")
print ("------------------------------")
# Get script arguments:
args = GenerateNewETLJsonArgs()
attributes = GenerateReportAndTable(args)
AppendETL(args, attributes)
GeneratePostArgs(args, attributes) | 34,369 |
def cookie_is_encoded(data):
""" Tests whether or not a cookie is encoded / HMAC signed
-> #bool True if encoded
..
from vital.security import cookie_is_encoded
cookie_is_encoded(
"!YuOoKwDp8GhrwwojdjTxSCj1c2Z+7yz7r6cC7E3hBWo=?IkhlbGxvLCB3b3JsZC4i")
# -> True
..
"""
return data.startswith('!') and '?' in data | 34,370 |
def to_file(df, file_name, *args, **kwargs):
"""
Writes the DataFrame `df` to a file in `file_name`.
This is an example implementation that delegates to
`DataFrame.to_csv` and freezes some standard arguments.
When rewriting this and switching to a different file
format, you need to rewrite test_train_model as well.
"""
to_file_func = partial(df.to_csv, file_name, index=False)
to_file_func(*args, **kwargs) | 34,371 |
def test_converter_convert_with_an_unknown_event_raises_an_exception(
event, valid_uuid, caplog
):
"""Tests given an unknown event the convert method should raise an
UnknownEventException.
"""
result = Converter(platform_url="", uuid_namespace=valid_uuid).convert(
[event], ignore_errors=False, fail_on_unknown=True
)
with caplog.at_level(logging.ERROR):
with pytest.raises(UnknownEventException):
list(result) | 34,372 |
def sync_ldap_truststore(manager, dest: str = "") -> None:
"""Pull secret contains base64-string contents of LDAP truststore and save it as a JKS file.
:param manager: An instance of :class:`~jans.pycloudlib.manager._Manager`.
:param dest: Absolute path where generated file is located.
"""
dest = dest or manager.config.get("ldapTrustStoreFn")
manager.secret.to_file(
"ldap_pkcs12_base64", dest, decode=True, binary_mode=True,
) | 34,373 |
def plot_pump1():
"""
This function would plot all the pumps bg getting the flow_rate in X_axis
and head pump in y_axis.
"""
plt.figure(num=None, dpi=120)
plt.plot(flux_rate1.flow_range(), Pump1.head_formula(),
label="1.5HP(1.1kw)")
plt.plot(flux_rate2.flow_range(), Pump2.head_formula(), label="2HP(1.5kw)")
plt.plot(flux_rate3.flow_range(), Pump3.head_formula(), label="3HP(2.2kw)")
plt.plot(flux_rate4.flow_range(), Pump4.head_formula(), label="5HP(3.6kw)")
plt.plot(flux_rate5.flow_range(), Pump5.head_formula(),
label="7.5HP(5.6kw)")
plt.legend()
plt.xlabel("flow rate Lit/Sec")
plt.ylabel("Head (m)")
plt.title("Model 2009 1760 RPM")
plt.savefig("pump1.png")
plt.show() | 34,374 |
def l2_normalize(x: torch.Tensor, eps: float = 1e-12) -> torch.Tensor:
"""Normalizes the input tensor using L2-norm.
Args:
x: Tensor to be normalized.
eps: Small value to avoid division by zero.
Returns:
Normalized tensor.
"""
return x / (torch.norm(x, p=2, dim=1, keepdim=True) + eps).expand_as(x) | 34,375 |
def ssq_cwt(x, wavelet='gmw', scales='log-piecewise', nv=None, fs=None, t=None,
ssq_freqs=None, padtype='reflect', squeezing='sum', maprange='peak',
difftype='trig', difforder=None, gamma=None, vectorized=True,
preserve_transform=None, astensor=True, order=0, patience=0,
flipud=True, cache_wavelet=None, get_w=False, get_dWx=False):
"""Synchrosqueezed Continuous Wavelet Transform.
Implements the algorithm described in Sec. III of [1].
Uses `wavelet.dtype` precision.
# Arguments:
x: np.ndarray
Input vector(s), 1D or 2D. See `help(cwt)`.
wavelet: str / tuple[str, dict] / `wavelets.Wavelet`
Wavelet sampled in Fourier frequency domain. See `help(cwt)`.
scales: str['log', 'linear', 'log:maximal', ...] / np.ndarray
CWT scales. See `help(cwt)`.
nv: int / None
Number of voices (wavelets per octave). Suggested >= 16.
fs, t
See `help(_cwt.cwt)`.
ssq_freqs: str['log', 'linear'] / np.ndarray / None
Frequencies to synchrosqueeze CWT scales onto. Scale-frequency
mapping is only approximate and wavelet-dependent.
If None, will infer from and set to same distribution as `scales`.
padtype: str / None
Pad scheme to apply on input. See `help(utils.padsignal)`.
`None` -> no padding.
squeezing: str['sum', 'lebesgue'] / function
See `help(ssqueezing.ssqueeze)`.
maprange: str['maximal', 'peak', 'energy'] / tuple(float, float)
Kind of frequency mapping used, determining the range of frequencies
spanned (fm to fM, min to max).
- 'maximal': fm=1/dT, fM=1/(2*dt), always. Data's fundamental
and Nyquist frequencies, determined from `fs` (or `t`).
Other mappings can never span outside this range.
- ('peak', 'energy'): sets fm and fM based on center frequency
associated with `wavelet` at maximum and minimum scale,
respectively. See `help(wavelets.center_frequency)`.
- 'peak': the frequency-domain trimmed bell will have its peak
at Nyquist, meaning all other frequencies are beneath, so each
scale is still correctly resolved but with downscaled energies.
With sufficiently-spanned `scales`, coincides with 'maximal'.
- 'energy': however, the bell's spectral energy is centered
elsewhere, as right-half of bell is partly or entirely trimmed
(left-half can be trimmed too). Use for energy-centric mapping,
which for sufficiently-spanned `scales` will always have lesser
fM (but ~same fM).
- tuple: sets `ssq_freqrange` directly.
difftype: str['trig', 'phase', 'numeric']
Method by which to differentiate Wx (default='trig') to obtain
instantaneous frequencies:
w(a,b) = Im( (1/2pi) * (1/Wx(a,b)) * d/db[Wx(a,b)] )
- 'trig': use `dWx`, obtained via trigonometric (frequency-domain
interpolant) differentiation (see `cwt`, `phase_cwt`).
- 'phase': differentiate by taking forward finite-difference of
unwrapped angle of `Wx` (see `phase_cwt`).
- 'numeric': first-, second-, or fourth-order (set by `difforder`)
numeric differentiation (see `phase_cwt_num`).
difforder: int[1, 2, 4]
Order of differentiation for difftype='numeric' (default=4).
gamma: float / None
CWT phase threshold. Sets `w=inf` for small values of `Wx` where
phase computation is unstable and inaccurate (like in DFT):
w[abs(Wx) < beta] = inf
This is used to zero `Wx` where `w=0` in computing `Tx` to ignore
contributions from points with indeterminate phase.
Default = sqrt(machine epsilon) = np.sqrt(np.finfo(np.float64).eps)
vectorized: bool (default True)
Whether to vectorize CWT, i.e. compute quantities for all scales at
once, which is faster but uses more memory.
preserve_transform: bool (default None) / None
Whether to return `Wx` as directly output from `cwt` (it might be
altered by `ssqueeze` or `phase_transform`). Uses more memory
per storing extra copy of `Wx`.
- Defaults to True if `'SSQ_GPU' == '0'`, else False.
astensor: bool (default True)
If `'SSQ_GPU' == '1'`, whether to return arrays as on-GPU tensors
or move them back to CPU & convert to Numpy arrays.
order: int (default 0) / tuple[int]
`order > 0` computes ssq of `cwt` taken with higher-order GMWs.
If tuple, computes ssq of average of `cwt`s taken at each specified
order. See `help(_cwt.cwt_higher_order)`.
patience: int / tuple[int, int]
pyFFTW parameter for faster FFT on CPU; see `help(ssqueezepy.FFT)`.
flipud: bool (default True)
See `help(ssqueeze)`.
cache_wavelet: bool (default None) / None
See `help(cwt)`.
get_w, get_dWx: bool (default False)
`get_w`:
True: will compute phase transform separately, assign it to
array `w` and return it.
False: will compute synchrosqueezing directly from `Wx` and
`dWx` without assigning to intermediate array, which is faster
(by 20-30%) and takes less memory.
`get_dWx`:
True: will return dWx
False: discards dWx after computing `w` or synchrosqueezing.
`get_dWx=True` with `get_w=True` uses most memory.
These options do not affect `Tx`.
# Returns:
Tx: np.ndarray [nf x n]
Synchrosqueezed CWT of `x`. (rows=~frequencies, cols=timeshifts)
(nf = len(ssq_freqs); n = len(x))
`nf = na` by default, where `na = len(scales)`.
Wx: np.ndarray [na x n]
Continuous Wavelet Transform of `x`, L1-normed (see `cwt`).
ssq_freqs: np.ndarray [nf]
Frequencies associated with rows of `Tx`.
scales: np.ndarray [na]
Scales associated with rows of `Wx`.
w: np.ndarray [na x n] (if `get_w=True`)
Phase transform for each element of `Wx`.
dWx: [na x n] np.ndarray (if `get_dWx=True`)
See `help(_cwt.cwt)`.
# References:
1. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
2. A Nonlinear squeezing of the CWT Based on Auditory Nerve Models.
I. Daubechies, S. Maes.
https://services.math.duke.edu/%7Eingrid/publications/DM96.pdf
3. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode
Decomposition. I. Daubechies, J. Lu, H.T. Wu.
https://arxiv.org/pdf/0912.2437.pdf
4. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. G. Thakur and H.-T. Wu.
https://arxiv.org/abs/1006.2533
5. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
synsq_cwt_fw.m
"""
def _process_args(x, scales, fs, t, nv, difftype, difforder, squeezing,
maprange, wavelet, get_w):
if x.ndim == 2 and get_w:
raise NotImplementedError("`get_w=True` unsupported with batched "
"input.")
difforder = _check_ssqueezing_args(squeezing, maprange, wavelet,
difftype, difforder, get_w,
transform='cwt')
if nv is None and not isinstance(scales, np.ndarray):
nv = 32
N = x.shape[-1]
dt, fs, t = _process_fs_and_t(fs, t, N)
return N, dt, fs, difforder, nv
def _phase_transform(Wx, dWx, N, dt, gamma, difftype, difforder):
if difftype == 'trig':
# calculate instantaneous frequency directly from the
# frequency-domain derivative
w = phase_cwt(Wx, dWx, difftype, gamma)
elif difftype == 'phase':
# !!! bad; yields negatives, and forcing abs(w) doesn't help
# calculate inst. freq. from unwrapped phase of CWT
w = phase_cwt(Wx, None, difftype, gamma)
elif difftype == 'numeric':
# !!! tested to be very inaccurate for small scales
# calculate derivative numericly
_, n1, _ = p2up(N)
Wx = Wx[:, (n1 - 4):(n1 + N + 4)]
w = phase_cwt_num(Wx, dt, difforder, gamma)
return Wx, w
N, dt, fs, difforder, nv = _process_args(x, scales, fs, t, nv, difftype,
difforder, squeezing, maprange,
wavelet, get_w)
wavelet = Wavelet._init_if_not_isinstance(wavelet, N=N)
# CWT with higher-order GMWs
if isinstance(order, (tuple, list, range)) or order > 0:
# keep padding for `trigdiff`
kw = dict(wavelet=wavelet, scales=scales, fs=fs, t=t, nv=nv,
l1_norm=True, derivative=False, padtype=padtype, rpadded=True,
vectorized=vectorized, cache_wavelet=cache_wavelet)
_, n1, _ = p2up(N)
average = isinstance(order, (tuple, list, range))
Wx, scales = cwt(x, order=order, average=average, **kw)
dWx = trigdiff(Wx, fs, rpadded=True, N=N, n1=n1)
Wx = Wx[:, n1:n1 + N]
if S.is_tensor(Wx):
Wx = Wx.contiguous()
scales, cwt_scaletype, *_ = process_scales(scales, N, wavelet, nv=nv,
get_params=True)
# regular CWT
if order == 0:
# l1_norm=True to spare a multiplication; for SSQ_CWT L1 & L2 are exactly
# same anyway since we're inverting CWT over time-frequency plane
rpadded = (difftype == 'numeric')
Wx, scales, dWx = cwt(x, wavelet, scales=scales, fs=fs, nv=nv,
l1_norm=True, derivative=True, padtype=padtype,
rpadded=rpadded, vectorized=vectorized,
patience=patience, cache_wavelet=cache_wavelet)
# make copy of `Wx` if specified
if preserve_transform is None:
preserve_transform = not S.is_tensor(Wx)
if preserve_transform:
_Wx = (Wx.copy() if not S.is_tensor(Wx) else
Wx.detach().clone())
else:
_Wx = Wx
# gamma
if gamma is None:
gamma = np.sqrt(EPS64 if S.is_dtype(Wx, 'complex128') else EPS32)
# compute `w` if `get_w` and free `dWx` from memory if `not get_dWx`
if get_w:
_Wx, w = _phase_transform(_Wx, dWx, N, dt, gamma, difftype, difforder)
_dWx = None # don't use in `ssqueeze`
if not get_dWx:
dWx = None
else:
w = None
_dWx = dWx
# default to same scheme used by `scales`
if ssq_freqs is None:
ssq_freqs = cwt_scaletype
# affects `maprange` computation if non-tuple
was_padded = bool(padtype is not None)
# synchrosqueeze
Tx, ssq_freqs = ssqueeze(_Wx, w, ssq_freqs, scales, fs=fs, t=t,
squeezing=squeezing, maprange=maprange,
wavelet=wavelet, gamma=gamma, was_padded=was_padded,
flipud=flipud, dWx=_dWx, transform='cwt')
if difftype == 'numeric':
Wx = Wx[:, 4:-4]
Tx = Tx[:, 4:-4]
w = w[:, 4:-4] if w is not None else None
if not astensor and S.is_tensor(Tx):
Tx, Wx, w, dWx = [g.cpu().numpy() if S.is_tensor(g) else g
for g in (Tx, Wx, w, dWx)]
if get_w and get_dWx:
return Tx, Wx, ssq_freqs, scales, w, dWx
elif get_w:
return Tx, Wx, ssq_freqs, scales, w
elif get_dWx:
return Tx, Wx, ssq_freqs, scales, dWx
else:
return Tx, Wx, ssq_freqs, scales | 34,376 |
def publish_train_request_to_broker():
"""publish_train_request_to_broker
Publish a Train a DNN message to the
Celery Worker's broker queue. This message
is a JSON Dictionary.
Default Broker: ``redis://localhost:6379/6``
Default Exchange: ``webapp.train.requests``
Default Routing Key: ``webapp.train.requests``
Default Queue: ``webapp.train.requests``
"""
parser = argparse.ArgumentParser(
description=("Launch a Train "
"Request into the AntiNex "
"core"))
parser.add_argument(
"-f",
help=("request json file to use default: "
"./training/scaler-django-antinex-simple.json"),
required=False,
dest="request_file")
parser.add_argument(
"-d",
help="debug",
required=False,
dest="debug",
action="store_true")
args = parser.parse_args()
name = "train-publisher"
log.info("{} - start".format(name))
request_file = "./training/scaler-django-antinex-simple.json"
if args.request_file:
request_file = args.request_file
exchange_name = "webapp.train.requests"
routing_key = "webapp.train.requests"
queue_name = "webapp.train.requests"
auth_url = "redis://localhost:6379/6"
serializer = "json"
if not os.path.exists(request_file):
log.error(("Missing request file={}")
.format(
request_file))
sys.exit(1)
req_data = None
with open(request_file, "r") as cur_file:
req_data = json.loads(cur_file.read())
if not os.path.exists(request_file):
log.error(("Did not find request data in file={}")
.format(
request_file))
sys.exit(1)
# import ssl
# Connection("amqp://", login_method='EXTERNAL', ssl={
# "ca_certs": '/etc/pki/tls/certs/something.crt',
# "keyfile": '/etc/something/system.key',
# "certfile": '/etc/something/system.cert',
# "cert_reqs": ssl.CERT_REQUIRED,
# })
#
ssl_options = {}
app = Publisher(
name,
auth_url,
ssl_options)
if not app:
log.error(("Failed to connect to broker={}")
.format(
auth_url))
else:
# Now send:
now = datetime.datetime.now().isoformat()
body = req_data
body["created"] = now
log.info("loading predict_rows")
predict_rows_df = pd.read_csv(req_data["dataset"])
predict_rows = []
for idx, org_row in predict_rows_df.iterrows():
new_row = json.loads(org_row.to_json())
new_row["idx"] = len(predict_rows) + 1
predict_rows.append(new_row)
body["predict_rows"] = pd.DataFrame(predict_rows).to_json()
log.info(("using predict_rows={}")
.format(
len(predict_rows)))
log.info(("Sending msg={} "
"ex={} rk={}")
.format(
str(body)[0:10],
exchange_name,
routing_key))
# Publish the message:
msg_sent = app.publish(
body=body,
exchange=exchange_name,
routing_key=routing_key,
queue=queue_name,
serializer=serializer,
retry=True)
log.info(("End - {} sent={}")
.format(
name,
msg_sent))
# end of valid publisher or not
# if/else | 34,377 |
def predict_image_classification(model: nn.Module, input_: torch.Tensor):
"""
Predict using an image classification model.
Args:
model (`nn.Module`):
Pytorch model.
input_ (`Tensor`):
Input image tensor.
Returns:
(`tuple`)
Prediction score which max is 1, and label idx.
"""
output = model(input_)
output = F.softmax(output, dim=1)
prediction_score, pred_label_idx = torch.topk(output, 1)
if isinstance(pred_label_idx, torch.Tensor):
pred_label_idx = pred_label_idx.squeeze().item()
prediction_score = prediction_score.squeeze().detach().item()
return prediction_score, pred_label_idx | 34,378 |
def _id_to_box(id_, dim):
"""Convert id to box ID"""
row = id_ // (dim ** 3)
col = (id_ % (dim ** 2)) // dim
return row * dim + col | 34,379 |
def _load_hex(instream):
"""Load font from a .hex file."""
global_comment = []
glyphs = []
comment = []
for line in instream:
line = line.rstrip('\r\n')
if ':' in line:
# parse code line
key, value = line.rsplit(':', 1)
value = value.strip()
if (
# preserve empty lines if they separate comments
(not line and comment and comment[-1] != '')
# marked as comment
or line[0] == '#'
# pass through lines without : as comments - allows e.g. to convert diffs, like hexdraw
or (':' not in line)
# not a valid line, treat as comment
or set(value) - set(string.hexdigits + ',')
):
comment.append(line)
else:
# when first glyph is found, split comment lines between global and glyph
if not glyphs and comment:
global_comment, comment = split_global_comment(comment)
glyphs.append(_convert_glyph(key, value, comment))
comment = []
# preserve any comment at end of file as part of global comment
global_comment = '\n'.join([*_clean_comment(global_comment), *_clean_comment(comment)])
return Font(glyphs, comments=global_comment, properties=dict(encoding='unicode')) | 34,380 |
def imshow(data, which, levels):
"""
Display order book data as an image, where order book data is either of
`df_price` or `df_volume` returned by `load_hdf5` or `load_postgres`.
"""
if which == 'prices':
idx = ['askprc.' + str(i) for i in range(levels, 0, -1)]
idx.extend(['bidprc.' + str(i) for i in range(1, levels + 1, 1)])
elif which == 'volumes':
idx = ['askvol.' + str(i) for i in range(levels, 0, -1)]
idx.extend(['bidvol.' + str(i) for i in range(1, levels + 1, 1)])
plt.imshow(data.loc[:, idx].T, interpolation='nearest', aspect='auto')
plt.yticks(range(0, levels * 2, 1), idx)
plt.colorbar()
plt.tight_layout()
plt.show() | 34,381 |
def make_small_graph(graph_description, create_using=None):
"""
Return the small graph described by graph_description.
graph_description is a list of the form [ltype,name,n,xlist]
Here ltype is one of "adjacencylist" or "edgelist",
name is the name of the graph and n the number of nodes.
This constructs a graph of n nodes with integer labels 0,..,n-1.
If ltype="adjacencylist" then xlist is an adjacency list
with exactly n entries, in with the j'th entry (which can be empty)
specifies the nodes connected to vertex j.
e.g. the "square" graph C_4 can be obtained by
>>> G = nx.make_small_graph(
... ["adjacencylist", "C_4", 4, [[2, 4], [1, 3], [2, 4], [1, 3]]]
... )
or, since we do not need to add edges twice,
>>> G = nx.make_small_graph(["adjacencylist", "C_4", 4, [[2, 4], [3], [4], []]])
If ltype="edgelist" then xlist is an edge list
written as [[v1,w2],[v2,w2],...,[vk,wk]],
where vj and wj integers in the range 1,..,n
e.g. the "square" graph C_4 can be obtained by
>>> G = nx.make_small_graph(
... ["edgelist", "C_4", 4, [[1, 2], [3, 4], [2, 3], [4, 1]]]
... )
Use the create_using argument to choose the graph class/type.
"""
if graph_description[0] not in ("adjacencylist", "edgelist"):
raise NetworkXError("ltype must be either adjacencylist or edgelist")
ltype = graph_description[0]
name = graph_description[1]
n = graph_description[2]
G = empty_graph(n, create_using)
nodes = G.nodes()
if ltype == "adjacencylist":
adjlist = graph_description[3]
if len(adjlist) != n:
raise NetworkXError("invalid graph_description")
G.add_edges_from([(u - 1, v) for v in nodes for u in adjlist[v]])
elif ltype == "edgelist":
edgelist = graph_description[3]
for e in edgelist:
v1 = e[0] - 1
v2 = e[1] - 1
if v1 < 0 or v1 > n - 1 or v2 < 0 or v2 > n - 1:
raise NetworkXError("invalid graph_description")
else:
G.add_edge(v1, v2)
G.name = name
return G | 34,382 |
def get_masksize(mask, labelnum = None):
"""
Compute mask size in surface space
Parameters:
----------
mask: label image (mask)
labelnum: mask's label number, use for group analysis
Return:
--------
masksize: mask size of each roi
Example:
--------
>>> masksize = get_masksize(mask)
"""
if mask.ndim == 3:
mask = mask[:,0,0]
labels = np.unique(mask)[1:]
masksize = []
if len(labels) != 0:
if labelnum is None:
labelnum = int(np.max(labels))
for i in range(labelnum):
masksize.append(len(mask[mask == i+1]))
else:
masksize.append(0)
return np.array(masksize) | 34,383 |
def _context_py2rpmversion(context):
"""get a python PEP0440 compatible version and translate it to an RPM
version"""
# the context needs a variable set via {% set upstream_version = 'ver' %}
_context_check_variable(context, CONTEXT_VAR_UPSTREAM_VERSION,
'py2rpmversion')
version = context.vars[CONTEXT_VAR_UPSTREAM_VERSION]
v_python = parse(version)
# fedora does not allow '~' in versions but uses a combination of Version
# and Release
# https://fedoraproject.org/wiki/Packaging:Versioning\#Pre-Release_packages
if context['spec_style'] == 'fedora':
if len(v_python._version.release) >= 4:
return "%d.%d.%d" % (v_python._version.release[0:3])
else:
return v_python.base_version
else:
v_rpm = v_python.public
if v_python.is_prerelease:
# we need to add the 'x' in front of alpha/beta releases because
# in the python world, "1.1a10" > "1.1.dev10"
# but in the rpm world, "1.1~a10" < "1.1~dev10"
v_rpm = v_rpm.replace('a', '~xalpha')
v_rpm = v_rpm.replace('b', '~xbeta')
v_rpm = v_rpm.replace('rc', '~xrc')
v_rpm = v_rpm.replace('.dev', '~dev')
return v_rpm | 34,384 |
def Scrrencapture_MACOS(jarvis, s):
"""
By holding Ctrl + Alt + Shift + R key we start screen capture in
"""
def engine():
pg.keyDown("command")
pg.keyDown("shift")
pg.press("5")
pg.keyDown("shift")
pg.keyUp("command")
jarvis.say('Screen Recording Started')
engine()
n = input("Press Q to stop : ")
if n == 'Q':
engine()
jarvis.say('Screen Recording Ended') | 34,385 |
def odInit(nodename):
"""
Create an Open Directory object to operate on the specified directory service node name.
@param nodename: C{str} containing the node name.
@return: C{object} an object to be passed to all subsequent functions on success,
C{None} on failure.
""" | 34,386 |
def testViewMenuOptions(base_fixture, qtbot):
"""
Test the view menu entries.
Check, that activating the entry set the hide flag is set on the widget.
"""
temp_ini_path = os.path.join(tempfile.gettempdir(), "config.ini")
settings = Settings(ini_file=Path(temp_ini_path))
config_file_path = base_fixture.testdata_path / "app_config.json"
settings.set(LAST_CONFIG_FILE, str(config_file_path))
main_gui = main_ui.MainUi(settings)
main_gui.show()
qtbot.addWidget(main_gui)
qtbot.waitExposed(main_gui, 3000)
time.sleep(5)
# assert default state
for tab in main_gui._ui.tabs.findChildren(TabUiGrid):
for test_app in tab.apps:
assert not test_app._app_version_cbox.isHidden()
assert not test_app._app_channel_cbox.isHidden()
# click and assert
main_gui._ui.menu_set_display_versions.trigger()
for tab in main_gui._ui.tabs.findChildren(TabUiGrid):
for test_app in tab.apps:
assert test_app._app_version_cbox.isHidden()
assert not test_app._app_channel_cbox.isHidden()
main_gui._ui.menu_set_display_channels.trigger()
for tab in main_gui._ui.tabs.findChildren(TabUiGrid):
for test_app in tab.apps:
assert test_app._app_version_cbox.isHidden()
assert test_app._app_channel_cbox.isHidden()
# click again
main_gui._ui.menu_set_display_versions.trigger()
main_gui._ui.menu_set_display_channels.trigger()
for tab in main_gui._ui.tabs.findChildren(TabUiGrid):
for test_app in tab.apps:
assert not test_app._app_version_cbox.isHidden()
assert not test_app._app_channel_cbox.isHidden()
app.conan_worker.finish_working(10)
Logger.remove_qt_logger() | 34,387 |
def check_and_format_address(address):
"""
check address
"""
try:
formatted_address = to_checksum_address(address)
return formatted_address
except Exception as e:
raise ArgumentsError("invalid address {}, reason: {}"
.format(address, e)) | 34,388 |
def integration_tests(session):
"""
Nox run integration tests
Args:
session: nox session
Returns:
None
Raises:
N/A
"""
session.install("-U", "pip")
if session.python == "3.6":
session.install("dataclasses", "async_generator")
session.install("-e", ".")
session.install("-r", "requirements-dev.txt")
session.run(
"pytest",
"--cov=scrapli",
"--cov-report",
"xml",
"--cov-report",
"term",
"tests/integration",
"-v",
) | 34,389 |
def get_cache_name(cache_type: str, tag: Optional[str] = None) -> str:
"""
Get the canonical cache name (e.g., "tmp.cache.mem.tag") for a type of
cache.
:param cache_type: type of a cache
:param tag: optional unique tag of the cache, empty by default
:return: name of the folder for a cache
"""
_check_valid_cache_type(cache_type)
cache_name = "tmp.cache"
cache_name += f".{cache_type}"
if tag is not None:
cache_name += f".{tag}"
return cache_name | 34,390 |
def _aves2_cfg():
""" Read aipctl config
"""
config = ConfigObj()
# The result is a merge of all the files as they appear in the list
f_list = cfg_files()
if not f_list:
print("error: configuration file not found")
exit(1)
for f in cfg_files():
_cfg = ConfigObj(f, encoding='UTF8')
config.merge(_cfg)
return config | 34,391 |
def create_readme(top_dir,package_name,description="",docs=False):
"""
README requires the name of the package and the directory in which to write the file in.
Optionally, give a description and whether or not to create a 'docs' directory.
"""
readme_str="""
# {package}
## Description
{description}
## Examples
## Repo Structure
{package}:<br/>
┣━ README.md<br/>
┣━ LICENSE<br/>
┣━ setup.py<br/>
┣━ {package}:<br/>
┃ ┗━ __init__.py<br/>
"""
if docs:
readme_str= readme_str + \
"""┣━ tests:<br/>
┃ ┗━ test_basic.py<br/>
┗━ docs:<br/>
┗━"""
else:
readme_str= readme_str + \
"""┗━ tests:
┗━ test_basic.py
"""
readme_str=readme_str.format(package=package_name,description=description)
# Write to file
with open(os.path.join(top_dir,'README.md'),'w') as f:
f.write(readme_str)
return readme_str | 34,392 |
def estimate_responsivity(mis_MU, norm_MU):
"""from the estimated base intensities, we return onlu users which have zero base intensity for misinformation
and greater than zero base intensity for normal content. """
no_bad_intentions_ids = []
for id in range(len(mis_MU)):
if mis_MU[id] == 0 and norm_MU[id] != 0:
no_bad_intentions_ids.append(id)
return no_bad_intentions_ids | 34,393 |
def parse_field_constraint(
x: Union[str, int, float, bool, list],
constraint: str,
type: str = "string",
**field: Any,
) -> Union[str, int, float, bool, list, datetime.datetime, ConstraintTypeError]:
"""
Parse field constraint.
Arguments:
x: Constraint value.
constraint: Constraint type.
type: Field type.
field: Additional field attributes
(https://specs.frictionlessdata.io/table-schema/#field-descriptors).
Returns:
Parsed field constraint.
"""
is_list = isinstance(x, list)
X = pd.Series(x)
is_str = X.apply(lambda xi: isinstance(xi, str))
if not is_str.any():
return x
result = parse_field(X[is_str], type=type, **field)
if isinstance(result, ValueTypeError):
return ConstraintTypeError(
fieldName=field.get("name", ""),
constraintName=constraint,
constraintValue=X[is_str].unique().tolist() if is_list else x,
fieldType=type,
fieldFormat=result["fieldFormat"],
)
X[is_str] = result
return X.tolist() if is_list else X[0] | 34,394 |
def test_burn(token_with_customer_balance: Contract, customer: str):
"""Burn tokens."""
token = token_with_customer_balance
initial_balance = token.call().balanceOf(customer)
initial_supply = token.call().totalSupply()
amount = 1000
token.transact({"from": customer}).burn(amount)
assert token.call().balanceOf(customer) == initial_balance - amount
assert token.call().totalSupply() == initial_supply - amount
events = token.pastEvents("Burned").get()
assert len(events) == 1
e = events[-1]
assert e["args"]["burner"] == customer
assert e["args"]["burnedAmount"] == amount | 34,395 |
def compute_ab_cycles(c_cycles, linear_combinations, g, tretkoff_graph):
"""
Returns the a- and b-cycles of the Riemann surface given the
intermediate 'c-cycles' and linear combinations matrix.
Input:
- c_cycles
- linear_combinations: output of the Frobenius transform of the
"""
lincomb = linear_combinations
M,N = lincomb.shape
a_cycles = []
b_cycles = []
for i in range(g):
a = []
b = []
for j in range(N):
cij = lincomb[i,j]
c = c_cycles[j] if cij >= 0 else reverse_cycle(c_cycles[j])
a.extend(abs(cij)*c[:-1])
cij = lincomb[i+g,j]
c = c_cycles[j] if cij >= 0 else reverse_cycle(c_cycles[j])
b.extend(abs(cij)*c[:-1])
a = a + [0]
b = b + [0]
a = compress_cycle(a, tretkoff_graph)
b = compress_cycle(b, tretkoff_graph)
a_cycles.append(a)
b_cycles.append(b)
return a_cycles, b_cycles | 34,396 |
def pmlb_multiclass_classification_dataset_names():
"""Returns list of multiclass classification datasets in PMLB."""
try:
name = pickle.load(open(".pmlb/mcdn.pkl", "rb"))
except FileNotFoundError:
pathlib.Path(".pmlb").mkdir(parents=True, exist_ok=True)
name = []
for dataset in pmlb.classification_dataset_names:
X, y = pmlb.fetch_data(dataset, return_X_y=True, local_cache_dir=".pmlb")
if np.unique(y).size != 2:
name.append(dataset)
pickle.dump(name, open(".pmlb/mcdn.pkl", "wb"))
return name | 34,397 |
def get_loss(p, task=None):
""" Return loss function for a specific task """
if task == 'edge':
from losses.loss_functions import BalancedCrossEntropyLoss
criterion = BalancedCrossEntropyLoss(size_average=True, pos_weight=p['edge_w'])
elif task == 'semseg' or task == 'human_parts':
from losses.loss_functions import SoftMaxwithLoss
criterion = SoftMaxwithLoss()
elif task == 'normals':
from losses.loss_functions import NormalsLoss
criterion = NormalsLoss(normalize=True, size_average=True, norm=p['normloss'])
elif task == 'sal':
from losses.loss_functions import BalancedCrossEntropyLoss
criterion = BalancedCrossEntropyLoss(size_average=True)
elif task == 'depth':
from losses.loss_functions import DepthLoss
criterion = DepthLoss(p['depthloss'])
else:
raise NotImplementedError('Undefined Loss: Choose a task among '
'edge, semseg, human_parts, sal, depth, or normals')
return criterion | 34,398 |
def main() -> None:
"""Run main loop. Listens for new connections."""
geneve_sock = socket.socket(
socket.AF_INET,
socket.SOCK_RAW,
socket.IPPROTO_UDP
)
# Create a bind socket to let the outside world know
# we're listening on `UDP_PORT`. Packets received on this
# socket will be ignored.
bind_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
bind_sock.bind((UDP_IP, UDP_PORT))
health_check_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
health_check_socket.bind((UDP_IP, HEALTHCHECK_PORT))
health_check_socket.listen(100)
# Create a raw socket to process the incoming packets,
# including their IP headers.
geneve_sock.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
geneve_sock.bind((UDP_IP, UDP_PORT))
flow_stack = FlowStack()
packet_inspector = PacketInspector()
print('Listening')
while True:
read_sockets, _, _ = select.select(
[geneve_sock, bind_sock, health_check_socket], [], []
)
for selected_sock in read_sockets:
if selected_sock == geneve_sock:
data, addr = selected_sock.recvfrom(65565)
# Only process messages on the geneve_sock.
response = parse_udp_packet(data, flow_stack, packet_inspector)
# If `response` is None the packet should be dropped.
# If the reponse is not None, it should be returned to the GWLB.
if response:
selected_sock.sendto(response, addr)
if selected_sock == bind_sock:
selected_sock.recvfrom(65565) # Drop packets on the bind_sock
if selected_sock == health_check_socket:
conn, _ = selected_sock.accept()
conn.recv(65565)
conn.send(hc_response().encode('utf-8')) | 34,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.