content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def cisco_ios_l3_acl_parsed():
"""Cisco IOS L3 Interface with ip address, acl, description and vlan."""
vlan = Vlan(id="300", encapsulation="dot1Q")
ipv4 = IPv4(address="10.3.3.13", mask="255.255.255.128")
acl_in = ACL(name="Common_Client_IN", direction="in")
acl_out = ACL(name="TEST_ACL_03", direction="out")
interface = Interface(
name="FastEthernet0/0.300",
description='"Test logical subinterface 3"',
vlans=[vlan],
ipv4=[ipv4],
acl=[acl_in, acl_out],
)
parsed_config = interface.dict()
return parsed_config
| 20,100
|
def brillance(p, g, m = 255):
"""
p < 0 : diminution de la brillance
p > 0 : augmentation de la brillance
"""
if (p + g < m + 1) and (p + g > 0):
return int(p + g)
elif p + g <= 0:
return 0
else:
return m
| 20,101
|
def fetch_fact():
"""Parse the command parameters, validate them, and respond.
Note: This URL must support HTTPS and serve a valid SSL certificate.
"""
# Parse the parameters you need
token = request.form.get('token', None) # TODO: validate the token
command = request.form.get('command', None)
text = request.form.get('text', None)
# Validate the request parameters
if not token: # or some other failure condition
abort(400)
return jsonify({'response_type': 'in_channel',
'text': 'foo'
})
| 20,102
|
def payback(request):
"""
微信支付回调函数
:param request:
:return:
"""
return HttpResponse('payback')
| 20,103
|
def get_index_shares(name, end_date=None):
"""获取某一交易日的指数成分股列表
symbols = get_index_shares("上证50", "2019-01-01 09:30:00")
"""
if not end_date:
end_date = datetime.now().strftime(date_fmt)
else:
end_date = pd.to_datetime(end_date).strftime(date_fmt)
constituents = get_history_constituents(indices[name], end_date, end_date)[0]
symbol_list = [k for k, v in constituents['constituents'].items()]
return list(set(symbol_list))
| 20,104
|
def erfcx(x):
"""Elementwise scaled complementary error function.
.. note::
Forward computation in CPU cannot be done if
`SciPy <https://www.scipy.org/>`_ is not available.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Erfcx().apply((x,))[0]
| 20,105
|
def fetch_data_async(blob, start_index, end_index, rpc=None):
"""Asynchronously fetches data for a blob.
Fetches a fragment of a blob up to `MAX_BLOB_FETCH_SIZE` in length. Attempting
to fetch a fragment that extends beyond the boundaries of the blob will return
the amount of data from `start_index` until the end of the blob, which will be
a smaller size than requested. Requesting a fragment that is entirely outside
the boundaries of the blob will return an empty string. Attempting to fetch a
negative index will raise an exception.
Args:
blob: A `BlobInfo`, `BlobKey`, string, or Unicode representation of
the `BlobKey` of the blob from which you want to fetch data.
start_index: The start index of blob data to fetch. This value must not be
negative.
end_index: The end index (inclusive) of the blob data to fetch. This value
must be greater than or equal to `start_index`.
rpc: Optional UserRPC object.
Returns:
A UserRPC whose result will be a string as returned by `fetch_data()`.
Raises:
TypeError: If `start_index` or `end_index` are not indexes, or if `blob` is
not a string, `BlobKey` or `BlobInfo`.
DataIndexOutOfRangeError: If `start_index` is set to a value that is less
than 0 or `end_index` is less than `start_index` when calling
`rpc.get_result()`.
BlobFetchSizeTooLargeError: If the requested blob fragment is larger than
`MAX_BLOB_FETCH_SIZE` when calling `rpc.get_result()`.
BlobNotFoundError: If the blob does not exist when calling
`rpc.get_result()`.
"""
if isinstance(blob, BlobInfo):
blob = blob.key()
return blobstore.fetch_data_async(blob, start_index, end_index, rpc=rpc)
| 20,106
|
def get_legal_moves(color, size, board):
"""
Get Legal Moves
"""
legal_moves = {}
for y in range(size):
for x in range(size):
reversibles = get_reversibles(color, size, board, x, y)
if reversibles:
legal_moves[(x, y)] = reversibles
return legal_moves
| 20,107
|
def cleanup_duplicate_residuals(task):
"""Cleans the duplicate attributes in the hierarchy
:param task: The top task in the hierarchy
:return:
"""
try:
delattr(task, 'duplicate')
except AttributeError:
pass
for child in task.children:
cleanup_duplicate_residuals(child)
| 20,108
|
def edition(self, key, value):
"""Translates edition indicator field."""
sub_a = clean_val("a", value, str)
if sub_a:
return sub_a.replace("ed.", "")
raise IgnoreKey("edition")
| 20,109
|
async def help(ctx):
"""
Sends a embeded message specifing all the commands currently there
"""
embed = discord.Embed(
colour=discord.Colour.red(),
title="--SongBird--",
description=
"Bot Prefix is - (dash)\n This is a Simple Discord Bot to send Memes & Play Songs\n ",
)
embed.set_author(
name="Made by : Ashutosh",
icon_url="https://avatarfiles.alphacoders.com/172/172111.png",
)
embed.set_image(
url=
"https://initiate.alphacoders.com/images/745/cropped-250-250-745065.png?5477"
)
embed.add_field(
name="join",
value="Makes the Bot Join the Voice Channel You are in.",
inline=True,
)
embed.add_field(name="leave",
value="Gives the Latency of the Bot",
inline=True)
embed.add_field(name="ping",
value="Gives the Latency of the Bot",
inline=True)
embed.add_field(name="play",
value="The makes the bot play the song",
inline=True)
embed.add_field(name="pause",
value="Pauses the current song being played",
inline=True)
embed.add_field(name="resume",
value="Resumes if the song is paused",
inline=True)
embed.add_field(name="add",
value="Adds a new song to the Queue",
inline=True)
embed.add_field(name="next",
value="Skips the current song being played",
inline=True)
embed.add_field(name="meme",
value="Posts a meme from r/memes",
inline=True)
embed.add_field(name="dank",
value="Posts a meme from r/Dankmemes",
inline=True)
embed.set_footer(text="Made with LOVE")
await ctx.send(embed=embed)
| 20,110
|
def get_site_data(hostname: str) -> SiteData:
"""Get metadata about a site from the API"""
url = f"https://{hostname}/w/api.php"
data = dict(
action="query",
meta="siteinfo",
siprop="|".join(
[
"namespaces",
"namespacealiases",
"specialpagealiases",
"magicwords",
"general",
]
),
formatversion="2",
format="json",
)
res_json = backoff_retry("get", url, params=data, output="json")
namespaces: Dict[str, Set[str]] = {}
all_namespaces = res_json["query"]["namespaces"]
namespace_aliases = res_json["query"]["namespacealiases"]
for namespace, nsdata in all_namespaces.items():
namespaces.setdefault(namespace, set()).update(
[
datasources.normal_name(nsdata.get("canonical", "").lower()),
datasources.normal_name(nsdata.get("name", "").lower()),
]
)
for nsdata in namespace_aliases:
namespaces.setdefault(str(nsdata["id"]), set()).add(
datasources.normal_name(nsdata.get("alias", "").lower())
)
specialpages = {
item["realname"]: item["aliases"]
for item in res_json["query"]["specialpagealiases"]
}
magicwords = {
item["name"]: item["aliases"] for item in res_json["query"]["magicwords"]
}
general = res_json["query"]["general"]
contribs = {datasources.normal_name(name) for name in specialpages["Contributions"]}
subst = list(
itertools.chain(
magicwords.get("subst", ["SUBST"]),
[item.lower() for item in magicwords.get("subst", ["SUBST"])],
[item[0] + item[1:].lower() for item in magicwords.get("subst", ["SUBST"])],
)
)
sitedata = SiteData(
user=namespaces["2"] - {""},
user_talk=namespaces["3"] - {""},
file=namespaces["6"] - {""},
special=namespaces["-1"] - {""},
contribs=contribs,
subst=subst,
dbname=general["wikiid"],
hostname=hostname,
)
return sitedata
| 20,111
|
def clear_monitor(nodenet_uid, monitor_uid):
"""Leaves the monitor intact, but deletes the current list of stored values."""
micropsi_core.runtime.get_nodenet(nodenet_uid).get_monitor(monitor_uid).clear()
return True
| 20,112
|
def loadGrammarFrom(filename, data=None):
"""Return the text of a grammar file loaded from the disk"""
with open(filename, 'r') as f:
text = f.read()
lookup = mako.lookup.TemplateLookup(directories=[relativePath('grammars')])
template = mako.template.Template(text, lookup=lookup)
#
base_data = {}
base_data.update(BASE_GRAMMAR_SETTINGS)
#
if data:
for k, v in data.items():
if v is not None:
base_data[k] = v
#
return str(template.render(**base_data))
| 20,113
|
def create_generator_selfatt(generator_inputs, generator_outputs_channels, flag_I=True):
"""
Add Conditional Self-Attention Modual to the U-Net Generator.
By default, 256x256 => 256x256
Args:
generator_inputs: a tensor of input images, [b, h, w, n], with each pixel value [-1, 1].
generator_outputs_channels: the number of generator output channels.
flag_I: bool flag to indicate if add conditional input to self-attention layer.
Returns:
layers[-1]: the output of generator, eg the generated images batch, [b, h, w, n], with each pixel value [-1, 1].
beta_list: list of beta matrics, save to visualize attention maps.
Note: a beta matrix is too large to view directly, visualize it row by row as attention maps
"""
# save output of layers for skip connections
layers = []
###################### encoder ###########################################
# encoder_1: [batch, 256, 256, in_channels] => [batch, 128, 128, ngf]
with tf.variable_scope("encoder_1"):
output = ops.conv(generator_inputs, channels=a.ngf, kernel=4, stride=2, pad=1, sn=a.sn)
output = ops.lrelu(output, 0.2)
# consider: append output before/after lrelu.
# Why not use batch norm in the first layer?
layers.append(output)
# encoder information, (out_channels)
encoder_layers = [
(a.ngf * 2), # encoder_2: [batch, 128, 128, ngf] => [batch, 64, 64, ngf * 2]
(a.ngf * 4), # encoder_3: [batch, 64, 64, ngf * 2] => [batch, 32, 32, ngf * 4]
(a.ngf * 8), # encoder_4: [batch, 32, 32, ngf * 4] => [batch, 16, 16, ngf * 8]
(a.ngf * 8), # encoder_5: [batch, 16, 16, ngf * 8] => [batch, 8, 8, ngf * 8]
(a.ngf * 8), # encoder_6: [batch, 8, 8, ngf * 8] => [batch, 4, 4, ngf * 8]
# a.ngf * 8, # encoder_7: [batch, 4, 4, ngf * 8] => [batch, 2, 2, ngf * 8]
# a.ngf * 8, # encoder_8: [batch, 2, 2, ngf * 8] => [batch, 1, 1, ngf * 8]
]
beta_list = []
for i, out_channels in enumerate(encoder_layers):
with tf.variable_scope("encoder_%d" % (len(layers) + 1)):
# [batch, in_height, in_width, in_channels] => [batch, in_height/2, in_width/2, out_channels]
# Conv + BN + leakyReLU + [selfatt]
output = ops.conv(layers[-1], channels=out_channels, kernel=4, stride=2, pad=1, sn=a.sn)
output = batchnorm(output) # not use ops.batch_norm, because do not know its update strategy
output = ops.lrelu(output, 0.2)
if a.enc_atten[i]=='T':
output, beta = selfatt(output, tf.image.resize_images(generator_inputs, output.shape[1:3]), out_channels, flag_I=flag_I, channel_fac=a.channel_fac)
beta_list.append(beta)
layers.append(output)
###################### decoder ###########################################
# Explictly assign decoder to /gpu:1
# Consider: layers[] is assign to /gpu:0 by default, skip connections involve communication between GPUs.
with tf.device("/gpu:1"):
# decoder information: (out_channels, dropout rate)
decoder_layers = [
# (a.ngf * 8, 0.0), # decoder_8: [batch, 1, 1, ngf * 8] => [batch, 2, 2, ngf * 8 * 2]
# (a.ngf * 8, 0.0), # decoder_7: [batch, 2, 2, ngf * 8 * 2] => [batch, 4, 4, ngf * 8 * 2]
(a.ngf * 8, 0.0), # decoder_6: [batch, 4, 4, ngf * 8 * 2] => [batch, 8, 8, ngf * 8 * 2]
(a.ngf * 8, 0.0), # decoder_5: [batch, 8, 8, ngf * 8 * 2] => [batch, 16, 16, ngf * 8 * 2]
(a.ngf * 4, 0.0), # decoder_4: [batch, 16, 16, ngf * 8 * 2] => [batch, 32, 32, ngf * 4 * 2]
(a.ngf * 2, 0.0), # decoder_3: [batch, 32, 32, ngf * 4 * 2] => [batch, 64, 64, ngf * 2 * 2]
(a.ngf, 0.0), # decoder_2: [batch, 64, 64, ngf * 2 * 2] => [batch, 128, 128, ngf * 2]
]
num_encoder_layers = len(layers)
for decoder_layer, (out_channels, dropout) in enumerate(decoder_layers):
skip_layer = num_encoder_layers - decoder_layer - 1
with tf.variable_scope("decoder_%d" % (skip_layer + 1)):
if decoder_layer == 0 or decoder_layer >= a.num_unet:
# first decoder layer is directly connected to the skip_layer
# a.num_unet controls the number of skip connections
input = layers[-1]
else:
input = tf.concat([layers[-1], layers[skip_layer]], axis=3)
# [batch, in_height, in_width, in_channels] => [batch, in_height*2, in_width*2, out_channels]
# Up-sample + 1x1 Conv + BN + leakyReLU + [selfatt] + [dropout]
output = ops.up_sample(input, scale_factor=2) #use upsample+conv replace deconv to advoid checkboard effect
output = ops.conv(output, channels=out_channels, kernel=3, stride=1, pad=1, sn=True)
output = batchnorm(output)
output = ops.lrelu(output)
if a.dec_atten[i]=='T':
output, beta = selfatt(output, tf.image.resize_images(generator_inputs, output.shape[1:3]), out_channels, flag_I=flag_I, channel_fac=a.channel_fac)
beta_list.append(beta)
if dropout > 0.0:
output = tf.nn.dropout(output, keep_prob=1 - dropout)
layers.append(output)
with tf.device("/gpu:1"):
# decoder_1: [batch, 128, 128, ngf * 2] => [batch, 256, 256, generator_outputs_channels]
with tf.variable_scope("decoder_1"):
output = tf.concat([layers[-1], layers[0]], axis=3)
output = tf.nn.relu(output)
output = deconv(output, generator_outputs_channels)
output = tf.tanh(output)
layers.append(output)
return layers[-1], beta_list
| 20,114
|
def unsubscribe():
"""
Remove broken connection from subscribers list.
"""
client_lock.acquire()
print("Unsubscribing")
del clients[request.sid]
client_lock.release()
| 20,115
|
def by_tag(articles_by_tag, tag):
""" Filter a list of (tag, articles) to list of articles by tag"""
for a in articles_by_tag:
if a[0].slug == tag:
return a[1]
| 20,116
|
def experiment(dataset='SUPPORT', quantiles=(0.25, 0.5, 0.75), prot_att='race',
groups=('black', 'white'), model='dcm', adj='KM',
cv_folds=5, seed=100, hyperparams=None, plot=True, store=False):
"""Top level interface to train and evaluate proposed survival models.
This is the top level function that is designed to be called directly from
inside a jupyter notebook kernel. This function allows the user to run
one of the proposed survival analysis models on the SUPPORT datasets
in a cross validation fashion. The function then plots and
outputs the Expected Calibration Error and ROC characteristic at various
event time quantiles.
Parameters
----------
dataset: str
a string that determines the dataset to run experiments on.
One of "FLCHAIN" or "SUPPORT".
quantiles: list
a list of event time quantiles at which the models are to be evaluated.
prot_att: str
a string that specifies the column in the dataset that is to be treated
as a protected attribute.
groups: list
a list of strings indicating groups on which the survival analysis
models are to be evaluated vis a vis discrimination and calibration.
model: str
the choice of the proposed survival analysis model.
currently supports only "dcm".
adj: str
the choice of adjustment for the L1-ECE: one of
* 'IPCW': Inverse Propensity of Censoring Weighting.
* 'KM': Kaplan-Meier.
cv_folds: int
int that determines the number of Cross Validation folds.
seed: int
numpy random seed.
hyperparams: dict
a dict with hyperparams for the DCM model.
plot: bool
binary flag to determine if the results are to be plotted.
store: bool
whether the models/results are to be stored to disk.
Returns:
a Matplotlib figure with the ROC Curves and Reliability (Calibration) curves
at various event quantiles.
"""
np.random.seed(seed)
fair_strategy = None
(x, t, e, a), folds, quantiles = load_dataset(dataset, cv_folds, prot_att, fair_strategy, quantiles)
trained_model = models.train_model(x, t, e, a, folds, groups, params=hyperparams)
if store:
store_model(dataset, model, trained_model, params)
if plot:
outputs = predict(trained_model, model, x, t, e, a, folds, quantiles, fair_strategy)
results = plots.plot_results(outputs, x, e, t, a, folds, groups,
quantiles, strat='quantile', adj=adj)
return results
| 20,117
|
def _rmse(orig_score, rep_core, pbar=False):
"""
Helping function returning a generator to determine the Root Mean Square Error (RMSE) for all topics.
@param orig_score: The original scores.
@param rep_core: The reproduced/replicated scores.
@param pbar: Boolean value indicating if progress bar should be printed.
@return: Generator with RMSE values.
"""
orig_cp = deepcopy(orig_score)
rep_cp = deepcopy(rep_core)
measures_all = list(list(orig_cp.values())[0].keys())
topics = orig_cp.keys()
measures_valid = [m for m in measures_all if m not in exclude]
measures = tqdm(measures_valid) if pbar else measures_valid
for measure in measures:
orig_measure = np.array([orig_cp.get(topic).get(measure) for topic in topics])
rpl_measure = np.array([rep_cp.get(topic).get(measure) for topic in topics])
diff = orig_measure - rpl_measure
yield measure, sqrt(sum(np.square(diff))/len(diff))
| 20,118
|
def cli_gb_list_grades(session, out):
"""List assignments"""
try:
gList = session.query(Grade).all()
for g in gList:
out.write(
'''Student: {0.student.name}
Assignment: {0.assignment.name}
Grade: {0.grade}
Notes:
{0.notes}\n\n'''.format(g))
except:
pass
| 20,119
|
def ldns_str2rdf_type(*args):
"""LDNS buffer."""
return _ldns.ldns_str2rdf_type(*args)
| 20,120
|
def scramble(password, message):
"""scramble message with password"""
scramble_length = 20
sha_new = partial(hashlib.new, 'sha1')
if not password:
return b''
stage1 = sha_new(password).digest()
stage2 = sha_new(stage1).digest()
buf = sha_new()
buf.update(message[:scramble_length])
buf.update(stage2)
result = buf.digest()
return _crypt(result, stage1)
| 20,121
|
def print_twoe(twoe, nbf):
"""Print the two-electron values."""
ij = 0
for i in range(nbf):
for j in range(i + 1):
ij += 1
kl = 0
for k in range(nbf):
for l in range(k + 1):
kl += 1
if ij >= kl and abs(twoe[ijkl(i, j, k, l)]) > 1.0e-10:
print(
"{0:3d}{1:3d}{2:3d}{3:3d} {4:25.14f}".format(
i, j, k, l, twoe[ijkl(i, j, k, l)]
)
)
| 20,122
|
def dp_port_id(switch: str, port: str) -> str:
"""
Return a unique id of a DP switch port based on switch name and port name
:param switch:
:param port:
:return:
"""
return 'port+' + switch + ':' + port
| 20,123
|
def request_item(zip_code, only_return_po_boxes=False, spatial_reference='4326'):
"""
Request data for a single ZIP code, either routes or PO boxes.
Note that the spatial reference '4326' returns latitudes and longitudes of results.
"""
url = BASE_URL.format(
zip_code=str(zip_code),
spatial_reference=str(spatial_reference),
route_or_box='B' if only_return_po_boxes else 'R'
)
response = requests.get(url)
response.raise_for_status()
return response.json()
| 20,124
|
def data_from_file(fname):
"""Function which reads from the file and yields a generator"""
file_iter = open(fname, 'rU')
for line in file_iter:
line = line.strip().rstrip(',') # Remove trailing comma
record = frozenset(line.split(','))
yield record
| 20,125
|
def test_cache_manager_contains():
"""Test that CacheManager contains returns whether named cache exists."""
settings = {str(n): {} for n in range(5)}
cacheman = CacheManager(settings)
for name in settings.keys():
assert name in cacheman
| 20,126
|
def write_css_files():
"""Make a CSS file for every module in its folder"""
for mdl in sheet.modules:
mdl.write_css()
| 20,127
|
def smoothen_histogram(hist: np.array) -> np.array:
""" Smoothens a histogram with an average filter.
The filter as defined as multiple convolutions
with a three-tap box filter [1, 1, 1] / 3.
See AOS section 4.1.B.
Args:
hist: A histogram containing gradient orientation counts.
Returns:
hist_smoothed: The histogram after average smoothing.
"""
pad_amount = round(len(smooth_kernel) / 2)
hist_pad = np.pad(hist, pad_width=pad_amount, mode='wrap')
hist_smoothed = np.convolve(hist_pad, smooth_kernel, mode='valid')
return hist_smoothed
| 20,128
|
def end_point(min_radius: float, max_radius: float) -> Tuple[int, int]:
"""
Generate a random goal that is reachable by the robot arm
"""
# Ensure theta is not 0
theta = (np.random.random() + np.finfo(float).eps) * 2 * np.pi
# Ensure point is reachable
r = np.random.uniform(low=min_radius, high=max_radius)
x = int(r * np.cos(theta))
y = int(r * np.sin(theta))
#x = -53
#y = -84
return x, y
| 20,129
|
def expand_tile(value, size):
"""Add a new axis of given size."""
value = tf.convert_to_tensor(value=value, name='value')
ndims = value.shape.ndims
return tf.tile(tf.expand_dims(value, axis=0), [size] + [1]*ndims)
| 20,130
|
def test_list_base64_binary_pattern_3_nistxml_sv_iv_list_base64_binary_pattern_4_3(mode, save_output, output_format):
"""
Type list/base64Binary is restricted by facet pattern with value
[a-zA-Z0-9+/]{48} [a-zA-Z0-9+/]{72} [a-zA-Z0-9+/]{52}
[a-zA-Z0-9+/]{48} [a-zA-Z0-9+/]{12} [a-zA-Z0-9+/]{72}
[a-zA-Z0-9+/]{68}.
"""
assert_bindings(
schema="nistData/list/base64Binary/Schema+Instance/NISTSchema-SV-IV-list-base64Binary-pattern-4.xsd",
instance="nistData/list/base64Binary/Schema+Instance/NISTXML-SV-IV-list-base64Binary-pattern-4-3.xml",
class_name="NistschemaSvIvListBase64BinaryPattern4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 20,131
|
def truv2df_main(args):
"""
Main entry point for running DataFrame builder
"""
args = parse_args(args)
out = None
if args.vcf:
out = vcf_to_df(args.directory, args.info, args.format)
else:
vcfs = get_files_from_truvdir(args.directory)
all_dfs = []
for key in vcfs:
df = vcf_to_df(vcfs[key][0], args.info, args.format)
df["state"] = key
all_dfs.append(df)
out = pd.concat(all_dfs)
# compression -- this is not super important for most VCFs
# Especially since nulls are highly likely
logging.info("Optimizing memory")
pre_size = out.memory_usage().sum()
any_passed = False
for col in out.columns:
try:
if out[col].apply(float.is_integer).all():
if len(out[out[col] < 0]) == 0:
out[col] = pd.to_numeric(out[col], downcast="unsigned")
else:
out[col] = pd.to_numeric(out[col], downcast="signed")
else:
out[col] = pd.to_numeric(out[col], downcast="float")
any_passed = True
except TypeError as e:
logging.debug("Unable to downsize %s (%s)", col, str(e))
post_size = out.memory_usage().sum()
logging.info("Optimized %.2fMB to %.2fMB", pre_size / 1e6, post_size / 1e6)
joblib.dump(out, args.output)
logging.info("Finished")
| 20,132
|
def parse_header(
info: Mapping[str, Any],
field_meta_data: Mapping[str, FieldMetaData],
component_meta_data: Mapping[str, ComponentMetaData]
) -> Mapping[str, MessageMemberMetaData]:
"""Parse the header.
Args:
info (Mapping[str, Any]): The header.
field_meta_data (Mapping[str, FieldMetaData]): The field metadata.
component_meta_data (Mapping[str, ComponentMetaData]): The component
metadata.
Returns:
Mapping[str, MessageMemberMetaData]: The parsed header.
"""
return _to_message_member_meta_data(info, field_meta_data, component_meta_data)
| 20,133
|
def amina_choo(update, context): #3.2.1
"""Show new choice of buttons"""
query = update.callback_query
bot = context.bot
keyboard = [
[InlineKeyboardButton("Yes", callback_data='0'),
InlineKeyboardButton("No", callback_data='00')],
[InlineKeyboardButton("Back",callback_data='3.2')]
]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.edit_message_text(
chat_id=query.message.chat_id,
message_id=query.message.message_id,
text="""We have found a lawyer that suits your needs!""",
)
bot.send_photo(
chat_id=query.message.chat_id,
photo = open("female.jpg",'rb')
)
bot.send_message(
chat_id=query.message.chat_id,
text = """Name: Amina Choo \nCompany: Boo and Ow LLP \nYears of Experience: 8""",
)
bot.send_message(
chat_id=query.message.chat_id,
text = """See more on our website: https://eldoraboo.github.io/PairALegal/amina-choo"""
)
bot.send_message(
chat_id=query.message.chat_id,
text = """Thank you for using Pair-A-Legal bot. \nWould you like to restart?""",
reply_markup = reply_markup
)
return FIRST
| 20,134
|
def test_validate_cidr():
"""Test ``validate_cidr()``."""
# IPv4
assert validate_cidr('0.0.0.0/0')
assert validate_cidr('1.2.3.4/32')
# IPv6
assert validate_cidr('::/0')
assert validate_cidr('fe8::/10')
# Bad
assert not validate_cidr('bogus')
assert not validate_cidr(None)
assert not validate_cidr(object())
assert not validate_cidr({})
assert not validate_cidr([])
| 20,135
|
def lighten(data, amt=0.10, is255=False):
"""Lighten a vector of colors by fraction `amt` of remaining possible intensity.
New colors are calculated as::
>>> new_colors = data + amt*(1.0 - data)
>>> new_colors[:, -1] = 1 # keep all alpha at 1.0
Parameters
----------
data : matplotlib colorspec or sequence of colorspecs
input color(s)
amt : float, optional
Percentage by which to lighten `r`, `g`, and `b`. `a` remains unchanged
(Default: 0.10)
is255 : bool, optional
If `True`, rgb values in `data` are assumed to be tween 0 and 255
rather than 0.0 and 1.0. In this case, return values will also
be between 0 and 255.
Returns
-------
numpy.ndarray
Lightened version of data
"""
data = colorConverter.to_rgba_array(data)
new_colors = data + amt * (1.0 - data)
if is255:
new_colors = (255 * new_colors).round()
new_colors[:, -1] = data[:, -1]
return new_colors
| 20,136
|
def _determine_function_name_type(node):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:returns: One of ('function', 'method', 'attr')
"""
if not node.is_method():
return 'function'
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if (isinstance(decorator, astroid.Name) or
(isinstance(decorator, astroid.Getattr) and
decorator.attrname == 'abstractproperty')):
infered = safe_infer(decorator)
if infered and infered.qname() in PROPERTY_CLASSES:
return 'attr'
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
elif (isinstance(decorator, astroid.Getattr) and
decorator.attrname in ('setter', 'deleter')):
return 'attr'
return 'method'
| 20,137
|
def print_top_videos(videos: List[ResultItem], num_to_print: int = 5):
"""Prints top videos to console, with details and link to video."""
for i, video in enumerate(videos[:num_to_print]):
title = video.title
views = video.views
subs = video.num_subscribers
link = video.video_url
print(
f"Video #{i + 1}:\n"
f"The video '{title}' has {views} views, from a channel "
f"with {subs} subscribers and can be viewed here: {link}"
f"\n"
)
print("==========================\n")
| 20,138
|
def find_egl_engine_windows(association: str) -> Optional[UnrealEngine]:
"""Find Epic Games Launcher engine distribution from EngineAssociation string."""
if platform.system() != "Windows":
return None
if os.path.isfile(DAT_FILE):
with open(DAT_FILE, encoding="utf-8") as _datfile:
for item in json.load(_datfile).get("InstallationList", []):
if (
association == item.get("InstallLocation")
or association == item.get("AppVersion", "").split("-")[0][:-2]
):
return UnrealEngine(
item.get("InstallLocation"),
item.get("AppVersion", "").split("-")[0][:-2],
)
return None
| 20,139
|
def create_dataset(
template_path:
str = 'com_github_corypaik_coda/projects/coda/data/coda/templates.yaml',
objects_path:
str = 'com_github_corypaik_coda/projects/coda/data/coda/objects.jsonl',
annotations_path:
str = 'com_github_corypaik_coda/projects/coda/data/coda/annotations.jsonl',
seed_for_splits: int = 12345,
seed_for_kmeans: int = 0,
) -> Tuple[datasets.DatasetDict, pd.DataFrame]:
""" Prepares a dataset and saves it disk
Args:
metadata_path: File to save with metadata about each object.
output_dataset_dir: Directory to save the dataset to disk.
Returns:
ds: dataset containing all formatted examples (train, val, test splits)
meta: dataframe containing metadata about each object.
"""
# maybe convert paths
template_path = maybe_rlocation(template_path)
objects_path = maybe_rlocation(objects_path)
annotations_path = maybe_rlocation(annotations_path)
# process annotations
df = pd.read_json(annotations_path, orient='records', lines=True)
# normalize
# normalize
df[COLORS] = df[COLORS].div(df[COLORS].sum(axis=1), 0)
df = df.set_index(['class_id', 'worker_id'], verify_integrity=True)
# apply a filter
df = df.groupby('class_id', as_index=False).apply(_filter_annotations)
df = df.reset_index()
# average annotations
df = df.groupby('class_id', as_index=False).mean()
# kmeans for groupings.
df = _get_object_groups(df, seed=seed_for_kmeans)
# add template data. this also drops a few objects that we have annotations
# for but are not included.
tdf = pd.read_json(objects_path, orient='records', lines=True)
df = df.merge(tdf, on='class_id', validate='one_to_one')
df = df.sort_values('class_id')
meta = df
templates = _load_templates(template_path=template_path)
# the real dataset: split groundtruth and filtered
# gives us a dict for each split containing a list of objects (example form)
split_objects = _generate_splits(df, seed=seed_for_splits)
def _process_split(x: List[Dict[str, _T]]) -> Dict[str, List[_T]]:
x = T.mapcat(_generate_examples_for_obj(templates=templates), x)
x = list(x)
x = {k: [el[k] for el in x] for k in x[0].keys()}
return x
# map each
data = T.valmap(_process_split, split_objects)
# metadata
features = datasets.Features({
'class_id':
datasets.Value('string'),
'display_name':
datasets.Value('string'),
'ngram':
datasets.Value('string'),
'label':
datasets.Sequence(datasets.Value('float')),
'object_group':
datasets.ClassLabel(names=('Single', 'Multi', 'Any')),
'text':
datasets.Value('string'),
'template_group':
datasets.ClassLabel(names=('clip-imagenet', 'text-masked')),
'template_idx':
datasets.Value('int32')
})
# create dataset
ds = datasets.DatasetDict(
**{
split: datasets.Dataset.from_dict(
mapping=mapping,
features=features,
split=split,
) for split, mapping in data.items()
})
return ds, meta
| 20,140
|
def escape_blog_content(data):
"""Экранирует описание блога."""
if not isinstance(data, binary):
raise ValueError('data should be bytes')
f1 = 0
f2 = 0
# Ищем начало блока
div_begin = b'<div class="blog-description">'
f1 = data.find(b'<div class="blog-content text">')
if f1 >= 0:
f1 = data.find(div_begin, f1, f1 + 200)
# Ищем конец
if f1 >= 0:
f2 = data.find(b'<ul class="blog-info">', f1 + 1)
if f2 >= 0:
f2 = data.rfind(b'</div>', f1 + 1, f2)
if f1 < 0 or f2 < 0:
# Не нашли
return data
body = data[f1 + len(div_begin):f2].strip()
body = html_escape(body)
result = (
data[:f1],
b'<div class="blog-content text" data-escaped="1">',
body,
data[f2:]
)
return b''.join(result)
| 20,141
|
def deleteMatches():
"""Remove all the match records from the database."""
DB = connect()
c = DB.cursor()
c.execute("DELETE FROM matches")
DB.commit()
DB.close()
| 20,142
|
def read_csv(file_path, delimiter=",", encoding="utf-8"):
"""
Reads a CSV file
Parameters
----------
file_path : str
delimiter : str
encoding : str
Returns
-------
collection
"""
with open(file_path, encoding=encoding) as file:
data_in = list(csv.reader(file, delimiter=delimiter))
return data_in
| 20,143
|
def test_shuffle_05():
"""
Test shuffle: buffer_size > number-of-rows-in-dataset
"""
logger.info("test_shuffle_05")
# define parameters
buffer_size = 13
seed = 1
# apply dataset operations
data1 = ds.TFRecordDataset(DATA_DIR, shuffle=ds.Shuffle.FILES)
ds.config.set_seed(seed)
data1 = data1.shuffle(buffer_size=buffer_size)
filename = "shuffle_05_result.npz"
save_and_check_dict(data1, filename, generate_golden=GENERATE_GOLDEN)
| 20,144
|
def delete_ipv6_rule(group, address, port):
""" Remove the IP address/port from the security group """
ec2.revoke_security_group_ingress(
GroupId=group['GroupId'],
IpPermissions=[{
'IpProtocol': "tcp",
'FromPort': port,
'ToPort': port,
'Ipv6Ranges': [
{
'CidrIpv6': address
},
]
}])
logger.info("Removed %s : %i from %s " % (address, port, group['GroupId']))
| 20,145
|
def get_cmd_items(pair: Tuple[str, Path]):
"""Return a list of Albert items - one per example."""
with open(pair[-1], "r") as f:
lines = [li.strip() for li in f.readlines()]
items = []
for i, li in enumerate(lines):
if not li.startswith("- "):
continue
desc = li.lstrip("- ")[:-1]
example_cmd = sanitize_string(
lines[i + 2].strip("`").replace("{{", "").replace("}}", "")
)
items.append(
v0.Item(
id=__prettyname__,
icon=icon_path,
text=example_cmd,
subtext=desc,
actions=[
v0.ClipAction("Copy command", example_cmd),
v0.UrlAction(
"Do a google search",
f'https://www.google.com/search?q="{pair[0]}" command',
),
],
)
)
return items
| 20,146
|
def save_excels(excel_metrics, excel_models):
"""It saves the excels with the information"""
default_sheet_metrics = excel_metrics.book[excel_metrics.book.sheetnames[0]]
excel_metrics.book.remove(default_sheet_metrics)
excel_metrics.save()
excel_metrics.close()
default_sheet_models = excel_models.book[excel_models.book.sheetnames[0]]
excel_models.book.remove(default_sheet_models)
excel_models.save()
excel_models.close()
| 20,147
|
def init_environment(config):
"""Load the application configuration from the specified config.ini
file to allow the Pylons models to be used outside of Pylons."""
config = paste.deploy.appconfig('config:' + config)
herder.config.environment.load_environment(
config.global_conf, config.local_conf)
| 20,148
|
def import_activity_class(activity_name, reload=True):
"""
Given an activity subclass name as activity_name,
attempt to lazy load the class when needed
"""
try:
module_name = "activity." + activity_name
importlib.import_module(module_name)
return True
except ImportError as e:
return False
| 20,149
|
def rgb2hsv(rgb):
"""
Reverse to :any:`hsv2rgb`
"""
eps = 1e-6
rgb = np.asarray(rgb).astype(float)
maxc = rgb.max(axis=-1)
minc = rgb.min(axis=-1)
v = maxc
s = (maxc - minc) / (maxc + eps)
s[maxc <= eps] = 0.0
rc = (maxc - rgb[:, :, 0]) / (maxc - minc + eps)
gc = (maxc - rgb[:, :, 1]) / (maxc - minc + eps)
bc = (maxc - rgb[:, :, 2]) / (maxc - minc + eps)
h = 4.0 + gc - rc
maxgreen = (rgb[:, :, 1] == maxc)
h[maxgreen] = 2.0 + rc[maxgreen] - bc[maxgreen]
maxred = (rgb[:, :, 0] == maxc)
h[maxred] = bc[maxred] - gc[maxred]
h[minc == maxc] = 0.0
h = (h / 6.0) % 1.0
return np.asarray((h, s, v))
| 20,150
|
def tile_wcrs(graph_or_subgraph: GraphViewType,
validate_all: bool,
prefer_partial_parallelism: bool = None) -> None:
"""
Tiles parallel write-conflict resolution maps in an SDFG, state,
or subgraphs thereof. Reduces the number of atomic operations by tiling
and introducing transient arrays to accumulate atomics on.
:param graph_or_subgraph: The SDFG/state/subgraph to optimize within.
:param validate_all: If True, runs SDFG validation after every tiling.
:param prefer_partial_parallelism: If set, prefers extracting non-conflicted
map dimensions over tiling WCR map (may
not perform well if parallel dimensions
are small).
:note: This function operates in-place.
"""
# Avoid import loops
from dace.codegen.targets import cpp
from dace.frontend import operations
from dace.transformation import dataflow, helpers as xfh
# Determine on which nodes to run the operation
graph = graph_or_subgraph
if isinstance(graph_or_subgraph, gr.SubgraphView):
graph = graph_or_subgraph.graph
if isinstance(graph, SDFG):
for state in graph_or_subgraph.nodes():
tile_wcrs(state, validate_all)
return
if not isinstance(graph, SDFGState):
raise TypeError(
'Graph must be a state, an SDFG, or a subgraph of either')
sdfg = graph.parent
edges_to_consider: Set[Tuple[gr.MultiConnectorEdge[Memlet],
nodes.MapEntry]] = set()
for edge in graph_or_subgraph.edges():
if edge.data.wcr is not None:
if (isinstance(edge.src, (nodes.MapExit, nodes.NestedSDFG))
or isinstance(edge.dst, nodes.MapEntry)):
# Do not consider intermediate edges
continue
reason = cpp.is_write_conflicted_with_reason(graph, edge)
if reason is None or not isinstance(reason, nodes.MapEntry):
# Do not consider edges that will not generate atomics or
# atomics we cannot transform
continue
if reason not in graph_or_subgraph.nodes():
# Skip if conflict exists outside of nested SDFG
continue
# Check if identity value can be inferred
redtype = operations.detect_reduction_type(edge.data.wcr)
dtype = sdfg.arrays[edge.data.data].dtype
identity = dtypes.reduction_identity(dtype, redtype)
if identity is None: # Cannot infer identity value
continue
edges_to_consider.add((edge, reason))
tile_size = config.Config.get('optimizer', 'autotile_size')
debugprint = config.Config.get_bool('debugprint')
if prefer_partial_parallelism is None:
prefer_partial_parallelism = config.Config.get_bool(
'optimizer', 'autotile_partial_parallelism')
maps_to_consider: Set[nodes.MapEntry] = set(me
for _, me in edges_to_consider)
transformed: Set[nodes.MapEntry] = set()
# Heuristic: If the map is only partially conflicted, extract
# parallel dimensions instead of tiling
if prefer_partial_parallelism:
for mapentry in maps_to_consider:
# Check the write-conflicts of all WCR edges in map
conflicts: Set[str] = set()
for edge, me in edges_to_consider:
if me is not mapentry:
continue
conflicts |= set(cpp.write_conflicted_map_params(
mapentry, edge))
nonconflicted_dims = set(mapentry.params) - conflicts
if nonconflicted_dims:
dims = [
i for i, p in enumerate(mapentry.params)
if p in nonconflicted_dims
]
if ((dt._prod(s for i, s in enumerate(mapentry.range.size())
if i in dims) < tile_size) == True):
# Map has a small range, extracting parallelism may not be
# beneficial
continue
xfh.extract_map_dims(sdfg, mapentry, dims)
transformed.add(mapentry)
# Tile and accumulate other not-transformed maps
for edge, mapentry in edges_to_consider:
if mapentry in transformed:
continue
transformed.add(mapentry)
# NOTE: The test "(x < y) == True" below is crafted for SymPy
# to be "definitely True"
if all((s < tile_size) == True for s in mapentry.map.range.size()):
# If smaller than tile size, don't transform and instead
# make map sequential
if debugprint:
print(f'Making map "{mapentry}" sequential due to being '
'smaller than tile size')
mapentry.map.schedule = dtypes.ScheduleType.Sequential
continue
# MapTiling -> AccumulateTransient / AccumulateStream
outer_mapentry = dataflow.MapTiling.apply_to(
sdfg, dict(tile_sizes=(tile_size, )), map_entry=mapentry)
# Transform all outgoing WCR and stream edges
mapexit = graph.exit_node(mapentry)
outer_mapexit = graph.exit_node(outer_mapentry)
# Tuple of (transformation type, options, pattern)
to_apply: Tuple[Union[dataflow.StreamTransient,
dataflow.AccumulateTransient], Dict[str, Any],
Dict[str, nodes.Node]] = None
for e in graph.out_edges(mapexit):
if isinstance(sdfg.arrays[e.data.data], dt.Stream):
mpath = graph.memlet_path(e)
tasklet = mpath[0].src
if not isinstance(tasklet, nodes.Tasklet) or len(mpath) != 3:
# TODO(later): Implement StreamTransient independently of tasklet
continue
# Make transient only if there is one WCR/stream
if to_apply is not None:
to_apply = None
break
to_apply = (dataflow.StreamTransient, {},
dict(tasklet=tasklet,
map_exit=mapexit,
outer_map_exit=outer_mapexit))
else:
if (e.data.is_empty() or e.data.wcr is None
or e.data.wcr_nonatomic
or (e.data.dst_subset is not None
and e.data.dst_subset.num_elements() != 0
and e.data.dynamic)):
continue
dtype = sdfg.arrays[e.data.data].dtype
redtype = operations.detect_reduction_type(e.data.wcr)
identity = dtypes.reduction_identity(dtype, redtype)
if identity is None: # Cannot infer identity value
continue
# Make transient only if there is one WCR/stream
if to_apply is not None:
to_apply = None
break
to_apply = (dataflow.AccumulateTransient,
dict(identity=identity, array=e.data.data),
dict(map_exit=mapexit,
outer_map_exit=outer_mapexit))
if to_apply is not None:
xform, opts, pattern = to_apply
xform.apply_to(sdfg, options=opts, **pattern)
if debugprint and len(transformed) > 0:
print(f'Optimized {len(transformed)} write-conflicted maps')
| 20,151
|
def pandas_candlestick_ohlc(dat, stick = "day", otherseries = None):
"""
:param dat: pandas DataFrame object with datetime64 index, and float columns "Open", "High", "Low", and "Close", likely created via DataReader from "yahoo"
:param stick: A string or number indicating the period of time covered by a single candlestick. Valid string inputs include "day", "week", "month", and "year", ("day" default), and any numeric input indicates the number of trading days included in a period
:param otherseries: An iterable that will be coerced into a list, containing the columns of dat that hold other series to be plotted as lines
This will show a Japanese candlestick plot for stock data stored in dat, also plotting other series if passed.
"""
mondays = WeekdayLocator(MONDAY) # major ticks on the mondays
alldays = DayLocator() # minor ticks on the days
dayFormatter = DateFormatter('%d') # e.g., 12
# Create a new DataFrame which includes OHLC data for each period specified by stick input
transdat = dat.loc[:,["Open", "High", "Low", "Close"]]
if (type(stick) == str):
if stick == "day":
plotdat = transdat
stick = 1 # Used for plotting
elif stick in ["week", "month", "year"]:
if stick == "week":
transdat["week"] = pd.to_datetime(transdat.index).map(lambda x: x.isocalendar()[1]) # Identify weeks
elif stick == "month":
transdat["month"] = pd.to_datetime(transdat.index).map(lambda x: x.month) # Identify months
transdat["year"] = pd.to_datetime(transdat.index).map(lambda x: x.isocalendar()[0]) # Identify years
grouped = transdat.groupby(list(set(["year",stick]))) # Group by year and other appropriate variable
plotdat = pd.DataFrame({"Open": [], "High": [], "Low": [], "Close": []}) # Create empty data frame containing what will be plotted
for name, group in grouped:
plotdat = plotdat.append(pd.DataFrame({"Open": group.iloc[0,0],
"High": max(group.High),
"Low": min(group.Low),
"Close": group.iloc[-1,3]},
index = [group.index[0]]))
if stick == "week": stick = 5
elif stick == "month": stick = 30
elif stick == "year": stick = 365
elif (type(stick) == int and stick >= 1):
transdat["stick"] = [np.floor(i / stick) for i in range(len(transdat.index))]
grouped = transdat.groupby("stick")
plotdat = pd.DataFrame({"Open": [], "High": [], "Low": [], "Close": []}) # Create empty data frame containing what will be plotted
for name, group in grouped:
plotdat = plotdat.append(pd.DataFrame({"Open": group.iloc[0,0],
"High": max(group.High),
"Low": min(group.Low),
"Close": group.iloc[-1,3]},
index = [group.index[0]]))
else:
raise ValueError('Valid inputs to argument "stick" include the strings "day", "week", "month", "year", or a positive integer')
# Set plot parameters, including the axis object ax used for plotting
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2)
if plotdat.index[-1] - plotdat.index[0] < pd.Timedelta('730 days'):
weekFormatter = DateFormatter('%b %d') # e.g., Jan 12
ax.xaxis.set_major_locator(mondays)
ax.xaxis.set_minor_locator(alldays)
else:
weekFormatter = DateFormatter('%b %d, %Y')
ax.xaxis.set_major_formatter(weekFormatter)
ax.grid(True)
# Create the candelstick chart
candlestick_ohlc(ax, list(zip(list(mdates.date2num(plotdat.index.tolist())), plotdat["Open"].tolist(), plotdat["High"].tolist(),
plotdat["Low"].tolist(), plotdat["Close"].tolist())),
colorup = "black", colordown = "red", width = stick * .4)
# Plot other series (such as moving averages) as lines
if otherseries != None:
if type(otherseries) != list:
otherseries = [otherseries]
dat.loc[:,otherseries].plot(ax = ax, lw = 1.3, grid = True)
ax.xaxis_date()
ax.autoscale_view()
plt.setp(plt.gca().get_xticklabels(), rotation=45, horizontalalignment='right')
plt.show()
| 20,152
|
def execute_cgx(infile):
"""Run CGX with the batch input file to generate the mesh output files."""
if LOCAL_EXECUTES["CGX"]:
subprocess.run(
LOCAL_EXECUTES["CGX"] + " -bg " + infile.parts[-1],
cwd=infile.parent,
shell=True,
check=True,
capture_output=True,
)
else:
raise ValueError("Need to specify an execution path for CalculiX GraphiX.")
| 20,153
|
def fig_colorbar(fig, collections, *args, **kwargs):
"""Add colorbar to the right on a figure."""
fig.subplots_adjust(right=0.8)
cax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
cbar = fig.colorbar(collections, cax, *args, **kwargs)
plt.pause(0.1)
return cbar
| 20,154
|
def _merge_css_item(item):
"""Transform argument into a single list of string values."""
# Recurse lists and tuples to combine into single list
if isinstance(item, (list, tuple)):
return _merge_css_list(*item)
# Cast to string, be sure to cast falsy values to ''
item = "{}".format(item) if item else ""
# Return as a list
return [item]
| 20,155
|
def register_x509_certificate_alg(cert_algorithm):
"""Register a new X.509 certificate algorithm"""
if _x509_available: # pragma: no branch
_certificate_alg_map[cert_algorithm] = (None, SSHX509CertificateChain)
_x509_certificate_algs.append(cert_algorithm)
| 20,156
|
def write_Stations_grid(grd, filename='roms_sta_grd.nc'):
"""
write_Stations_grid(grd, filename)
Write Stations_CGrid class on a NetCDF file.
"""
Sm = grd.hgrid.x_rho.shape[0]
# Write Stations grid to file
fh = nc.Dataset(filename, 'w')
fh.Description = 'Stations grid'
fh.Author = 'pyroms.grid.write_grd'
fh.Created = datetime.now().isoformat()
fh.type = 'Stations grid file'
fh.createDimension('station', Sm)
if hasattr(grd.vgrid, 's_rho') is True and grd.vgrid.s_rho is not None:
N, = grd.vgrid.s_rho.shape
fh.createDimension('s_rho', N)
fh.createDimension('s_w', N+1)
if hasattr(grd.vgrid, 's_rho') is True and grd.vgrid.s_rho is not None:
io.write_nc_var(fh, grd.vgrid.theta_s, 'theta_s', (),
'S-coordinate surface control parameter')
io.write_nc_var(fh, grd.vgrid.theta_b, 'theta_b', (),
'S-coordinate bottom control parameter')
io.write_nc_var(fh, grd.vgrid.Tcline, 'Tcline', (),
'S-coordinate surface/bottom layer width', 'meter')
io.write_nc_var(fh, grd.vgrid.hc, 'hc', (),
'S-coordinate parameter, critical depth', 'meter')
io.write_nc_var(fh, grd.vgrid.s_rho, 's_rho', ('s_rho'),
'S-coordinate at RHO-points')
io.write_nc_var(fh, grd.vgrid.s_w, 's_w', ('s_w'),
'S-coordinate at W-points')
io.write_nc_var(fh, grd.vgrid.Cs_r, 'Cs_r', ('s_rho'),
'S-coordinate stretching curves at RHO-points')
io.write_nc_var(fh, grd.vgrid.Cs_w, 'Cs_w', ('s_w'),
'S-coordinate stretching curves at W-points')
io.write_nc_var(fh, grd.vgrid.h, 'h', ('station'),
'bathymetry at RHO-points', 'meter')
io.write_nc_var(fh, grd.hgrid.x_rho, 'x_rho', ('station'),
'x location of RHO-points', 'meter')
io.write_nc_var(fh, grd.hgrid.y_rho, 'y_rho', ('station'),
'y location of RHO-points', 'meter')
if hasattr(grd.hgrid, 'lon_rho'):
io.write_nc_var(fh, grd.hgrid.lon_rho, 'lon_rho', ('station'),
'longitude of RHO-points', 'degree_east')
io.write_nc_var(fh, grd.hgrid.lat_rho, 'lat_rho', ('station'),
'latitude of RHO-points', 'degree_north')
fh.createVariable('spherical', 'c')
fh.variables['spherical'].long_name = 'Grid type logical switch'
fh.variables['spherical'][:] = grd.hgrid.spherical
print(' ... wrote ', 'spherical')
io.write_nc_var(fh, grd.hgrid.angle_rho, 'angle', ('station'),
'angle between XI-axis and EAST', 'radians')
fh.close()
| 20,157
|
def extract_arguments(start, string):
""" Return the list of arguments in the upcoming function parameter closure.
Example:
string (input): '(blocks, threads, 0, THCState_getCurrentStream(state))'
arguments (output):
'[{'start': 1, 'end': 7},
{'start': 8, 'end': 16},
{'start': 17, 'end': 19},
{'start': 20, 'end': 53}]'
"""
arguments = []
closures = {
"<": 0,
"(": 0
}
current_position = start
argument_start_pos = current_position + 1
# Search for final parenthesis
while current_position < len(string):
if string[current_position] == "(":
closures["("] += 1
elif string[current_position] == ")":
closures["("] -= 1
elif string[current_position] == "<":
closures["<"] += 1
elif string[current_position] == ">" and string[current_position - 1] != "-" and closures["<"] > 0:
closures["<"] -= 1
# Finished all arguments
if closures["("] == 0 and closures["<"] == 0:
# Add final argument
arguments.append({"start": argument_start_pos, "end": current_position})
break
# Finished current argument
if closures["("] == 1 and closures["<"] == 0 and string[current_position] == ",":
arguments.append({"start": argument_start_pos, "end": current_position})
argument_start_pos = current_position + 1
current_position += 1
return arguments
| 20,158
|
def play_game():
"""
Play a sample game between two UCT players where each player gets a different number of UCT iterations.
"""
board = chess.Board()
board.reset()
print(chess.svg.board(board))
state = ChessState(board=board, side_to_move=board.turn)
while state.get_moves():
print(str(state))
if state.player_just_moved == chess.BLACK:
m = MCTS.search(root_state=state, max_iteration=1000, verbose=False) # White
else:
m = MCTS.search(root_state=state, max_iteration=1, verbose=False) # Black
print("Best Move: " + str(m) + "\n")
state.do_move(m)
if state.get_result(state.player_just_moved) == 1.0:
print("Player " + players[int(state.player_just_moved)] + " wins!")
elif state.get_result(state.player_just_moved) == 0.0:
print("Player " + players[int(not state.player_just_moved)] + "wins!")
else:
print("Nobody wins!")
| 20,159
|
def augment_img(img):
"""Data augmentation with flipping and rotation"""
# TODO: Rewrite with torchvision transform
flip_idx = np.random.choice([0, 1, 2])
if flip_idx != 0:
img = np.flip(img, axis=flip_idx)
rot_idx = int(np.random.choice([0, 1, 2, 3]))
img = np.rot90(img, k=rot_idx, axes=(1, 2))
return img
| 20,160
|
def iter_archive(path, method):
"""Iterate over an archive.
Args:
path: `str`, archive path
method: `tfds.download.ExtractMethod`, extraction method
Returns:
An iterator of `(path_in_archive, f_obj)`
"""
return _EXTRACT_METHODS[method](path)
| 20,161
|
def unfoldPath(cwd, path):
"""
Unfold path applying os.path.expandvars and os.path.expanduser.
Join 'path' with 'cwd' in the beginning If 'path' is not absolute path.
Returns normalized absolute path.
"""
if not path:
return path
path = _expandvars(path)
path = _expanduser(path)
if not _isabs(path):
path = _joinpath(cwd, path)
path = _abspath(path)
return path
| 20,162
|
def readConfig(config):
"""
This function reads configuration file and combine it with parameters from
console input to generate the configuration list.
Args:
* config: configuration file to be readed
Options:
* keyfile (-k): str, path to a file containing the random data used as key for the cipher.
workmode: You can choose one of the following methods:
* lz4: Will compress input or decompres output with lz4, this has a great performance with all file types.
* human: human usable mode. key will be read as 5 bits blocks, input and output will use ownBase32 encoding. This is useful when one of the sides is doing the maths by hand.
* raw: default operation mode, will use 1KB pages for ciphering
Returns:
An array with configuration from file
"""
if not os.path.exists(config):
sys.stderr.write("Could not find config file, "
+"creating a default one\n")
configFile = open(config, "w")
configFile.write("---\nkeyfile: defaultrawfile.rnd\nworkmode: raw")
configFile.close()
return yaml.load(open(config, 'r'))
| 20,163
|
def main(argv):
"""Decide whether the needed jobs got satisfactory results."""
inputs = parse_inputs(
raw_allowed_failures=argv[1],
raw_allowed_skips=argv[2],
raw_jobs=argv[3],
)
jobs = inputs['jobs'] or {}
jobs_allowed_to_fail = set(inputs['allowed_failures'] or [])
jobs_allowed_to_be_skipped = set(inputs['allowed_skips'] or [])
if not jobs:
sys.exit(
'❌ Invalid input jobs matrix, '
'please provide a non-empty `needs` context',
)
job_matrix_succeeded = all(
job['result'] == 'success' for name, job in jobs.items()
if name not in (jobs_allowed_to_fail | jobs_allowed_to_be_skipped)
) and all(
job['result'] in {'skipped', 'success'} for name, job in jobs.items()
if name in jobs_allowed_to_be_skipped
)
set_final_result_outputs(job_matrix_succeeded)
allowed_to_fail_jobs_succeeded = all(
job['result'] == 'success' for name, job in jobs.items()
if name in jobs_allowed_to_fail
)
allowed_to_be_skipped_jobs_succeeded = all(
job['result'] == 'success' for name, job in jobs.items()
if name in jobs_allowed_to_be_skipped
)
log_decision_details(
job_matrix_succeeded,
jobs_allowed_to_fail,
jobs_allowed_to_be_skipped,
allowed_to_fail_jobs_succeeded,
allowed_to_be_skipped_jobs_succeeded,
jobs,
)
return int(not job_matrix_succeeded)
| 20,164
|
def get_predictions(logits):
"""
Convert logits into softmax predictions
"""
probs = F.softmax(logits, dim=1)
confidence, pred = probs.max(dim=1, keepdim=True)
return confidence, pred, probs
| 20,165
|
def test_parse_boolean():
"""TEST 1.2: Parsing single booleans.
Booleans are the special symbols #t and #f. In the ASTs they are
represented by Python's True and False, respectively."""
assert_equals(True, parse('#t'))
assert_equals(False, parse('#f'))
| 20,166
|
def post_to_sns(aws_access_key: str, aws_secret_key: str, sns_topic_arn: str,
message_subject: str, message_body: str):
"""Post a message and subject to AWS SNS
Args:
aws_access_key: The AWS access key your bot will use
aws_secret_key: The AWS secret access key
sns_topic_arn: The SNS topic ARN to publish to
message_subject: A message subject to post to SNS
message_body: A message body to post to SNS
"""
sns = boto3.client('sns', region_name="us-east-1",
aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key)
sns.publish(TopicArn=sns_topic_arn, Subject=message_subject, Message=message_body)
| 20,167
|
def lookup_capacity(lookup_table, environment, cell_type, frequency, bandwidth,
generation, site_density):
"""
Use lookup table to find capacity by clutter environment geotype,
frequency, bandwidth, technology generation and site density.
"""
if (environment, cell_type, frequency, bandwidth, generation) not in lookup_table:
raise KeyError("Combination %s not found in lookup table",
(environment, cell_type, frequency, bandwidth, generation))
density_capacities = lookup_table[
(environment, cell_type, frequency, bandwidth, generation)
]
lowest_density, lowest_capacity = density_capacities[0]
if site_density < lowest_density:
return 0
for a, b in pairwise(density_capacities):
lower_density, lower_capacity = a
upper_density, upper_capacity = b
if lower_density <= site_density and site_density < upper_density:
result = interpolate(
lower_density, lower_capacity,
upper_density, upper_capacity,
site_density
)
return result
# If not caught between bounds return highest capacity
highest_density, highest_capacity = density_capacities[-1]
return highest_capacity
| 20,168
|
def contig_slow(fn, num):
"""brute force, quadratic"""
data = parse(fn)
for i in range(len(data)-2):
for j in range(i + 2, len(data)-1):
s = sum(data[i:j])
if s == num:
return min(data[i:j]) + max(data[i:j])
| 20,169
|
def secretmap(ctx):
"""
Encrypt, decrypt, and edit files.
\f
:return: Void
:rtype: ``None``
"""
ctx.obj = Keychain()
| 20,170
|
def handle_pair(pair, blackpairs, badpairs, newpairs, tickerlist):
"""Check pair conditions."""
# Check if pair is in tickerlist and on 3Commas blacklist
if pair in tickerlist:
if pair in blacklist:
blackpairs.append(pair)
else:
newpairs.append(pair)
else:
badpairs.append(pair)
| 20,171
|
def create_consts(*args) -> superclasses.PyteAugmentedArgList:
"""
Creates a new list of names.
:param args: The args to use.
"""
return _create_validated(*args, name="consts")
| 20,172
|
def opts2constr_feat_gen(opts):
"""Creates ConstFeatPlanes functor by calling its constructor with
parameters from opts.
Args:
opts (obj): Namespace object returned by parser with settings.
Returns:
const_feat_planes (obj): Instantiated ConstFeatPlanes functor.
"""
return ConstrFeatGen(
opts.const_feat_fac)
| 20,173
|
def inhibit_activations(activations, times, window_length):
"""
Remove any activations within a specified time window following a previous activation.
TODO - this is extremely slow for non-sparse activations
Parameters
----------
activations : ndarray
Provided activations
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
window_length : float
Duration (seconds) of inhibition window
Returns
----------
activations : ndarray
Inhibited activations
"""
# Keep track of non-inhibited non-zeros
pitch_idcs_keep = np.empty(0)
frame_idcs_keep = np.empty(0)
while True:
# Determine the pitch and frame indices where activations begin
pitch_idcs, frame_idcs = activations.nonzero()
# Check if there are any non-zeros left to process
if len(pitch_idcs) == 0 or len(frame_idcs) == 0:
# If not, stop looping
break
# Determine the location of the next non-zero activation
next_nz_pitch, next_nz_frame = pitch_idcs[0], frame_idcs[0]
# Determine where the inhibition window ends
inhibition_end = np.argmax(np.append(times, np.inf) >= times[next_nz_frame] + window_length)
# Zero-out the activations in the inhibition window (including the non-zero itself)
activations[next_nz_pitch, next_nz_frame : inhibition_end] = 0
# The the non-zero that was just processed
pitch_idcs_keep = np.append(pitch_idcs_keep, next_nz_pitch)
frame_idcs_keep = np.append(frame_idcs_keep, next_nz_frame)
# Add back in all of the non-inhibited non-zeros
activations[pitch_idcs_keep.astype(constants.UINT),
frame_idcs_keep.astype(constants.UINT)] = 1
return activations
| 20,174
|
def cmd_convert_items_to_cheetah_list(list):
"""
Cheetah templates can't iterate over a list of classes, so
converts all data into a Cheetah-friendly list of tuples
(NAME, DESCRIPTION, ENUM, HAS_BIT_OFFSET, BIT_OFFSET, BITS, TYPE, MIN, MAX, DEFAULT)
"""
temp = []
for i in list:
temp.append(cmd_convert_to_tuple(i))
return temp
| 20,175
|
def pose223(pose:gtsam.Pose2) -> gtsam.Pose3:
"""convert a gtsam.Pose2 to a gtsam.Pose3
Args:
pose (gtsam.Pose2): the input 2D pose
Returns:
gtsam.Pose3: the 3D pose with zeros for the unkown values
"""
return gtsam.Pose3(
gtsam.Rot3.Yaw(pose.theta()), gtsam.Point3(pose.x(), pose.y(), 0)
)
| 20,176
|
def test_call_wiz_cli_without_subcommand():
"""
Calling wiz-cli without a sub-command. Added for code coverage.
"""
with pytest.raises(SystemExit) as e:
main([])
assert e.value.code == 0
| 20,177
|
def pivot_calibration_with_ransac(tracking_matrices,
number_iterations,
error_threshold,
concensus_threshold,
early_exit=False
):
"""
Written as an exercise for implementing RANSAC.
:param tracking_matrices: N x 4 x 4 ndarray, of tracking matrices.
:param number_iterations: the number of iterations to attempt.
:param error_threshold: distance in millimetres from pointer position
:param concensus_threshold: the minimum percentage of inliers to finish
:param early_exit: If True, returns model as soon as thresholds are met
:returns: pointer offset, pivot point and RMS Error about centroid of pivot.
:raises: TypeError, ValueError
"""
if number_iterations < 1:
raise ValueError("The number of iterations must be > 1")
if error_threshold < 0:
raise ValueError("The error threshold must be a positive distance.")
if concensus_threshold < 0 or concensus_threshold > 1:
raise ValueError("The concensus threshold must be [0-1] as percentage")
if not isinstance(tracking_matrices, np.ndarray):
raise TypeError("tracking_matrices is not a numpy array'")
number_of_matrices = tracking_matrices.shape[0]
population_of_indices = range(number_of_matrices)
minimum_matrices_required = 3
highest_number_of_inliers = -1
best_model = None
best_rms = -1
for iter_counter in range(number_iterations):
indexes = random.sample(population_of_indices,
minimum_matrices_required)
sample = tracking_matrices[indexes]
try:
model, _ = pivot_calibration(sample)
except ValueError:
print("RANSAC, iteration " + str(iter_counter) + ", failed.")
continue
# Need to evaluate the number of inliers.
# Slow, but it's written as a teaching exercise.
world_point = model[3:6]
number_of_inliers = 0
inlier_indices = []
for matrix_counter in range(number_of_matrices):
offset = np.vstack((model[0:3], 1))
transformed_point = tracking_matrices[matrix_counter] @ offset
diff = world_point - transformed_point[0:3]
norm = np.linalg.norm(diff)
if norm < error_threshold:
number_of_inliers = number_of_inliers + 1
inlier_indices.append(matrix_counter)
percentage_inliers = number_of_inliers / number_of_matrices
# Keep the best model so far, based on the highest number of inliers.
if percentage_inliers > concensus_threshold \
and number_of_inliers > highest_number_of_inliers:
highest_number_of_inliers = number_of_inliers
inlier_matrices = tracking_matrices[inlier_indices]
best_model, best_rms = pivot_calibration(inlier_matrices)
# Early exit condition, as soon as we find model with enough fit.
if percentage_inliers > concensus_threshold and early_exit:
return best_model, best_rms
if best_model is None:
raise ValueError("Failed to find a model using RANSAC.")
print("RANSAC Pivot, from " + str(number_of_matrices)
+ " matrices, used " + str(highest_number_of_inliers)
+ " matrices, with error threshold = " + str(error_threshold)
+ " and consensus threshold = " + str(concensus_threshold)
)
return best_model, best_rms
| 20,178
|
def find_companies_name_dict():
"""
Finds companies names and addresses
:return: a dict with resource name eg.area of companies and url of available data
"""
base = "https://data.gov.ro/api/3/action/"
query = "Date-de-identificare-platitori"
address = url_build.build_url_package_query(base, query)
# dictionary with available files and download url
data_platitori = {}
# check for valid url
packages_exists = url_response.valid_url(address)
if packages_exists:
# find available packages
avlb_package = url_response.get_avlb_package(address)
# resources are at ['results'][0]['resources']
resources = avlb_package['results'][0]['resources']
# num avl resource
num_resources = avlb_package['results'][0]['num_resources']
# sanity check
count = 0
# loop over list and build a dict with name of resource and url
for x in resources:
package_name = x['name']
package_url = x['url']
temp_dict = {package_name: package_url}
data_platitori.update(temp_dict)
count += 1
# sanity check
if count == num_resources:
print("all resources founded!")
return data_platitori
raise Exception("Invalid query to find companies names")
| 20,179
|
def error_to_response(request: web.Request,
error: typing.Union[Error, ErrorList]):
"""
Convert an :class:`Error` or :class:`ErrorList` to JSON API response.
:arg ~aiohttp.web.Request request:
The web request instance.
:arg typing.Union[Error, ErrorList] error:
The error, which is converted into a response.
:rtype: ~aiohttp.web.Response
"""
if not isinstance(error, (Error, ErrorList)):
raise TypeError('Error or ErrorList instance is required.')
return jsonapi_response(
{
'errors':
[error.as_dict] if isinstance(error, Error) else error.as_dict,
'jsonapi': request.app[JSONAPI]['jsonapi']
},
status=error.status
)
| 20,180
|
def test_link_company_with_dnb_success(
requests_mock,
dnb_response_uk,
base_company_dict,
):
"""
Test the link_company_with_dnb utility.
"""
requests_mock.post(
DNB_V2_SEARCH_URL,
json=dnb_response_uk,
)
company = CompanyFactory()
original_company = Company.objects.get(id=company.id)
modifying_adviser = AdviserFactory()
link_company_with_dnb(company.id, '123456789', modifying_adviser)
company.refresh_from_db()
uk_country = Country.objects.get(iso_alpha2_code='GB')
assert model_to_dict_company(company) == {
**base_company_dict,
'address_1': 'Unit 10, Ockham Drive',
'address_2': '',
'address_country': uk_country.id,
'address_county': '',
'address_postcode': 'UB6 0F2',
'address_area': None,
'address_town': 'GREENFORD',
'archived_documents_url_path': original_company.archived_documents_url_path,
'business_type': original_company.business_type_id,
'company_number': '01261539',
'created_by': original_company.created_by_id,
'duns_number': '123456789',
'employee_range': original_company.employee_range_id,
'export_experience_category': original_company.export_experience_category_id,
'global_ultimate_duns_number': '291332174',
'id': original_company.id,
'modified_by': modifying_adviser.id,
'name': 'FOO BICYCLE LIMITED',
'is_number_of_employees_estimated': True,
'number_of_employees': 260,
'pending_dnb_investigation': False,
'reference_code': '',
'registered_address_area': None,
'sector': original_company.sector_id,
'export_segment': original_company.export_segment,
'export_sub_segment': original_company.export_sub_segment,
'turnover': 50651895,
'turnover_range': original_company.turnover_range_id,
'uk_region': original_company.uk_region_id,
'dnb_modified_on': now(),
}
| 20,181
|
def read(handle):
"""read(handle)"""
record = Record()
__read_version(record, handle)
__read_database_and_motifs(record, handle)
__read_section_i(record, handle)
__read_section_ii(record, handle)
__read_section_iii(record, handle)
return record
| 20,182
|
def validate_telegam():
"""Validate telegram token and chat ID
"""
configs = InitialConfig()
confs = ["chat_id", "bot_token"]
conf_dict = {}
if request.method == "GET":
for conf in confs:
conf_dict[conf] = getattr(configs, conf)
conf_json = json.dumps(conf_dict)
return conf_json
if request.headers.get("Content-Type") == "application/json":
for conf in confs:
value = request.json.get(conf)
if not value:
return HTTPResponse(f"{conf} should have a value", 400)
elif not isinstance(value, str):
return HTTPResponse(f"{conf} should be str", 400)
else:
setattr(configs, conf, value)
# Check telegram bot token
try:
bot = Bot(request.json["bot_token"])
bot.sendMessage(request.json["chat_id"], "Configured")
except (InvalidToken, BadRequest, Unauthorized) as error:
if error.message == "Unauthorized":
error.message += ": Invalid Token"
return HTTPResponse(error.message, 400)
configs.save()
return HTTPResponse("Configured", 200)
| 20,183
|
def find_even(values):
"""Wyszukaj liczby parzyste.
:param values: lista z liczbami calkowitymi
:returns: lista ze znalezionymi liczbami parzystymi bez powtorzeń.
"""
result = []
print(f'lista: {values}')
for value in values:
if value % 2 == 0 and value not in result:
result.append(value)
print(f'wynik: {result}')
| 20,184
|
def hour(e):
"""
:rtype: Column
"""
return col(Hour(ensure_column(e)))
| 20,185
|
def infer_gaussian(data):
"""
Return (amplitude, x_0, y_0, width), where width - rough estimate of
gaussian width
"""
amplitude = data.max()
x_0, y_0 = np.unravel_index(np.argmax(data), np.shape(data))
row = data[x_0, :]
column = data[:, y_0]
x_0 = float(x_0)
y_0 = float(y_0)
dx = len(np.where(row - amplitude/2 > 0)[0])
dy = len(np.where(column - amplitude/2 > 0)[0])
width = np.sqrt(dx ** 2. + dy ** 2.)
return amplitude, x_0, y_0, width
| 20,186
|
def extract_freq(bins=5, **kwargs):
"""
Extract frequency bin features.
Args:
bins (int): The number of frequency bins (besides OOV)
Returns:
(function): A feature extraction function that returns the log of the \
count of query tokens within each frequency bin.
"""
def _extractor(query, resources):
tokens = query.normalized_tokens
stemmed_tokens = query.stemmed_tokens
freq_dict = resources[WORD_FREQ_RSC]
max_freq = freq_dict.most_common(1)[0][1]
freq_features = defaultdict(int)
for idx, tok in enumerate(tokens):
tok = mask_numerics(tok)
if kwargs.get(ENABLE_STEMMING, False):
stemmed_tok = stemmed_tokens[idx]
stemmed_tok = mask_numerics(stemmed_tok)
freq = freq_dict.get(tok, freq_dict.get(stemmed_tok, 0))
else:
freq = freq_dict.get(tok, 0)
if freq < 2:
freq_features["in_vocab:OOV"] += 1
else:
# Bin the frequency with break points at
# half max, a quarter max, an eighth max, etc.
freq_bin = int(math.log(max_freq, 2) - math.log(freq, 2))
if freq_bin < bins:
freq_features["in_vocab:IV|freq_bin:{}".format(freq_bin)] += 1
else:
freq_features["in_vocab:IV|freq_bin:{}".format(bins)] += 1
q_len = float(len(tokens))
for k in freq_features:
# sublinear
freq_features[k] = math.log(freq_features[k] + 1, 2)
# ratio
freq_features[k] /= q_len
return freq_features
return _extractor
| 20,187
|
def test_get_regex_match(pattern, string, result):
"""Test get_regex_match."""
if result:
assert get_regex_match(re.compile(pattern), string) is not None
else:
assert get_regex_match(re.compile(pattern), string) is None
| 20,188
|
def base_url(base_url):
"""Add '/' if base_url not end by '/'."""
yield base_url if base_url[-1] == "/" else base_url + "/"
| 20,189
|
def create_private_key_params(key_type: str) -> typing.Type[PrivateKeyParams]:
"""Returns the class corresponding to private key parameters objects of the
given key type name.
Args:
key_type
The name of the OpenSSH key type.
Returns:
The subclass of :any:`PrivateKeyParams` corresponding to the key type
name.
Raises:
KeyError: There is no subclass of :any:`PrivateKeyParams` corresponding
to the given key type name.
"""
return _KEY_TYPE_MAPPING[key_type].privateKeyParamsClass
| 20,190
|
def get_response(msg):
"""
访问图灵机器人openApi
:param msg 用户输入的文本消息
:return string or None
"""
apiurl = "http://openapi.tuling123.com/openapi/api/v2"
# 构造请求参数实体
params = {"reqType": 0,
"perception": {
"inputText": {
"text": msg
}
},
"userInfo": {
"apiKey": "ca7bf19ac0e644c38cfbe9d6fdc08de1",
"userId": "439608"
}}
# 将表单转换为json格式
content = json.dumps(params)
# 发起post请求
r = requests.post(url=apiurl, data=content, verify=False).json()
print("r = " + str(r))
# 解析json响应结果
# {'emotion':{
# 'robotEmotion': {'a': 0, 'd': 0, 'emotionId': 0, 'p': 0},
# 'userEmotion': {'a': 0, 'd': 0, 'emotionId': 10300, 'p': 0}
# },
# 'intent': {
# 'actionName': '',
# 'code': 10004,
# 'intentName': ''
# },
# 'results': [{'groupType': 1, 'resultType': 'text', 'values': {'text': '欢迎来到本机器人的地盘。'}}]}
code = r['intent']['code']
if code == 10004 or code == 10008:
message = r['results'][0]['values']['text']
return message
return None
| 20,191
|
def lm_sample_with_constraints(lm_model,
max_decode_steps,
use_cuda,
device,
batch_size=1,
alpha_0=1,
alpha=1,
beta=0,
repeat_penalty=0,
history_penalty=0,
history_penalty_beta=0,
penalty_vocab_start=-1,
penalty_vocab_end=-1,
prefix=None,
gamma=1,
normalize="none",
top_k=-1,
top_k0=-1,
top_p=-1,
top_p0=-1,
eos=None,
need_mask_unk=True,
return_states=False):
"""
"""
if eos is None:
eos = lm_model.EOS
dec_states = lm_model.init_search()
search_states = init_search(lm_model, batch_size)
if use_cuda == True:
search_states = nested_to_cuda(search_states, device)
y = search_states[0]
log_probs = search_states[1]
finished = search_states[2]
mask_finished = search_states[3]
hypothesis = search_states[4]
history_log_probs = search_states[5]
gamma = torch.tensor(gamma, dtype=torch.float, device=y.device)
mask_unk = None
if need_mask_unk == True:
mask_unk = get_single_token_mask(lm_model.trg_vocab_size,
lm_model.UNK,
lm_model.MIN_LOGITS)
if use_cuda == True:
mask_unk = nested_to_cuda(mask_unk, device)
steps = 0
trg_seq_len = 0
vocab_size = lm_model.trg_vocab_size
max_decode_steps = min(max_decode_steps, lm_model.trg_max_len - trg_seq_len)
while not finished.all() and steps < max_decode_steps:
outputs = lm_model.decoder._step(steps,
dec_states,
y)
dec_states, logits = outputs[0:2]
if mask_unk is not None:
logits += mask_unk
if steps > 1 and repeat_penalty < 0:
logits += get_multi_token_mask(hypothesis,
vocab_size,
-2,
steps,
repeat_penalty, 0,
penalty_vocab_start,
penalty_vocab_end)
if steps > 2 and history_penalty < 0:
logits += get_multi_token_mask(hypothesis,
vocab_size,
0,
-2,
history_penalty,
history_penalty_beta,
penalty_vocab_start,
penalty_vocab_end)
mask = finished.type(torch.float)
mask_logits = logits * (1 - mask) + mask_finished * mask
_log_probs = F.log_softmax(logits, 1)
temp = alpha_0
if steps > 0:
temp = alpha + steps * beta
if prefix is not None and steps < prefix.size(1):
is_prefix = (prefix[:,steps:steps+1]).ne(lm_model.PAD).float()
prefix_mask = torch.zeros_like(mask_logits)
prefix_mask.scatter_(1, prefix[:, steps:steps+1],
lm_model.MAX_LOGITS)
mask_logits += (prefix_mask * is_prefix)
indice = top_k_top_p_sampling(mask_logits, -1, -1)
elif steps == 0:
indice = top_k_top_p_sampling(mask_logits, top_k0, top_p0, temp)
else:
indice = top_k_top_p_sampling(mask_logits, top_k, top_p, temp)
y = (indice % vocab_size).view(-1, 1)
finished = (finished | y.eq(eos).byte())
hypothesis = torch.cat([hypothesis, y], 1)
_log_probs = torch.gather(_log_probs, 1, indice)
log_probs = log_probs + _log_probs * (1 - mask)
history_log_probs = torch.cat([history_log_probs, _log_probs], 1)
steps += 1
trg_seq_len += 1
hyp_len = torch.sum(hypothesis.ne(lm_model.PAD).float(), 1)
normalized_score = \
normalize_log_probs(log_probs, hyp_len, gamma, normalize)
outputs = [hypothesis, normalized_score]
if return_states == True:
outputs = [hypothesis,
normalized_score,
history_log_probs,
dec_states,
y,
log_probs,
finished,
mask_finished]
return outputs
| 20,192
|
def validate_official(args, data_loader, model, global_stats=None):
"""Run one full official validation. Uses exact spans and same
exact match/F1 score computation as in the SQuAD script.
Extra arguments:
offsets: The character start/end indices for the tokens in each context.
texts: Map of qid --> raw text of examples context (matches offsets).
answers: Map of qid --> list of accepted answers.
"""
eval_time = Timer()
# Run through examples
examples = 0
map = AverageMeter()
mrr = AverageMeter()
prec_1 = AverageMeter()
prec_3 = AverageMeter()
prec_5 = AverageMeter()
with torch.no_grad():
pbar = tqdm(data_loader)
for ex in pbar:
ids, batch_size = ex['ids'], ex['batch_size']
scores = model.predict(ex)
predictions = np.argsort(-scores.cpu().numpy()) # sort in descending order
labels = ex['label'].numpy()
map.update(MAP(predictions, labels))
mrr.update(MRR(predictions, labels))
prec_1.update(precision_at_k(predictions, labels, 1))
prec_3.update(precision_at_k(predictions, labels, 3))
prec_5.update(precision_at_k(predictions, labels, 5))
if global_stats is None:
pbar.set_description('[testing ... ]')
else:
pbar.set_description("%s" % 'Epoch = %d [validating... ]' % global_stats['epoch'])
examples += batch_size
result = dict()
result['map'] = map.avg
result['mrr'] = mrr.avg
result['prec@1'] = prec_1.avg
result['prec@3'] = prec_3.avg
result['prec@5'] = prec_5.avg
if global_stats is None:
logger.info('test results: MAP = %.2f | MRR = %.2f | Prec@1 = %.2f | ' %
(result['map'], result['mrr'], result['prec@1']) +
'Prec@3 = %.2f | Prec@5 = %.2f | examples = %d | ' %
(result['prec@3'], result['prec@5'], examples) +
'time elapsed = %.2f (s)' %
(eval_time.time()))
else:
logger.info('valid official: Epoch = %d | MAP = %.2f | ' %
(global_stats['epoch'], result['map']) +
'MRR = %.2f | Prec@1 = %.2f | Prec@3 = %.2f | ' %
(result['mrr'], result['prec@1'], result['prec@3']) +
'Prec@5 = %.2f | examples = %d | valid time = %.2f (s)' %
(result['prec@5'], examples, eval_time.time()))
return result
| 20,193
|
def tuple_list_to_lua(tuple_list):
"""Given a list of tuples, return a lua table of tables"""
def table(it):
return "{" + ",".join(map(str, it)) + "}"
return table(table(t) for t in tuple_list)
| 20,194
|
def list_actions(cont, fend):
"""initware, получающее список экшнов для подсистемы проверки прав. """
from pynch.core import iter_frontends
for path, sub_fend in iter_frontends(fend):
path = '/'.join(path + ('%s',))
for ctl, action in sub_fend.api:
AVAILABLE_ACTIONS.append((path % ctl, action))
| 20,195
|
def reset_user_messages(request: Request):
"""
For given user reset his notifications.
"""
profile: Profile = get_object_or_404(Profile, user=request.user)
profile.messages = 0
profile.save()
return Response(status=status.HTTP_200_OK)
| 20,196
|
def test():
"""Test command.
Notes
-----
Only intended to ensure all modules (sans `nlp`) are imported correctly.
When run, prints "Motel requirements installed and loaded successfully." to standard output.
"""
print("Motel requirements installed and loaded successfully.")
| 20,197
|
def define_scope(function, scope=None, *args, **kwargs):
"""
A decorator for functions that define TensorFlow operations. The wrapped
function will only be executed once. Subsequent calls to it will directly
return the result so that operations are added to the graph only once.
The operations added by the function live within a tf.variable_scope(). If
this decorator is used with arguments, they will be forwarded to the
variable scope. The scope name defaults to the name of the wrapped
function.
"""
attribute = '_cache_' + function.__name__
name = scope or function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
with tf.variable_scope(name, *args, **kwargs):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
| 20,198
|
def load_yaml(fname: str) -> Union[List, Dict]:
"""Load a YAML file."""
try:
with open(fname, encoding='utf-8') as conf_file:
# If configuration file is empty YAML returns None
# We convert that to an empty dict
return yaml.load(conf_file, Loader=SafeLineLoader) or OrderedDict()
except yaml.YAMLError as exc:
_LOGGER.error(exc)
raise HomeAssistantError(exc)
except UnicodeDecodeError as exc:
_LOGGER.error("Unable to read file %s: %s", fname, exc)
raise HomeAssistantError(exc)
| 20,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.