content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def cli(ctx, path, max_depth=1):
"""List files available from a remote repository for a local path as a tree
Output:
None
"""
return ctx.gi.file.tree(path, max_depth=max_depth) | 5,329,200 |
def warp_p(binary_img):
"""
Warps binary_image using hard coded source and destination
vertices. Returns warped binary image, warp matrix and
inverse matrix.
"""
src = np.float32([[580, 450],
[180, 720],
[1120, 720],
[700, 450]])
dst = np.float32([[350, 0],
[350, 720],
[900, 720],
[900, 0]])
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
binary_warped = cv2.warpPerspective(binary_img, M, (binary_img.shape[1], binary_img.shape[0]), flags=cv2.INTER_LINEAR)
return binary_warped, M, Minv | 5,329,201 |
async def _async_setup_entity(
config, async_add_entities, config_entry=None, discovery_data=None
):
"""Set up the MQTT number."""
async_add_entities([MqttNumber(config, config_entry, discovery_data)]) | 5,329,202 |
def _get_xml_sps(document):
"""
Download XML file and instantiate a `SPS_Package`
Parameters
----------
document : opac_schema.v1.models.Article
Returns
-------
dsm.data.sps_package.SPS_Package
"""
# download XML file
content = reqs.requests_get_content(document.xml)
xml_sps = SPS_Package(content)
# change assets uri
xml_sps.remote_to_local(xml_sps.package_name)
return xml_sps | 5,329,203 |
def moving_pairs(iterable: Iterable) -> Iterator:
"""
Generate moving pair elements over iterable.
e.g. (1, 2), (2, 3) from iterable 1, 2, 3.
:param iterable:
:yields: moving pair on iterable
"""
iterator = iter(iterable)
try:
previous = next(iterator)
except StopIteration:
return
for current in iterator:
yield previous, current
previous = current | 5,329,204 |
def plot_confusion_matrix(ax, y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
From scikit-learn example:
https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
# classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print("Normalized confusion matrix")
# else:
# print('Confusion matrix, without normalization')
# print(cm)
# fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
# fig.tight_layout()
return ax | 5,329,205 |
def _in_docker():
""" Returns: True if running in a Docker container, else False """
with open('/proc/1/cgroup', 'rt') as ifh:
if 'docker' in ifh.read():
print('in docker, skipping benchmark')
return True
return False | 5,329,206 |
def asPosition(flags):
""" Translate a directional flag from an actions into a tuple indicating
the targeted tile. If no directional flag is found in the inputs,
returns (0, 0).
"""
if flags & NORTH:
return 0, 1
elif flags & SOUTH:
return 0, -1
elif flags & EAST:
return 1, 0
elif flags & WEAST:
return -1, 0
return 0, 0 | 5,329,207 |
def write(file_name, data):
"""Write data as JSON to file.
Args:
file_name (str): file name
data: data as serializable object
"""
filex.write(file_name, json.dumps(data, indent=2)) | 5,329,208 |
def pickvol(filenames, fileidx, which):
"""Retrieve index of named volume
Parameters
----------
filenames: list of 4D file names
fileidx: which 4D file to look at
which: 'first' or 'middle'
Returns
-------
idx: index of first or middle volume
"""
from nibabel import load
import numpy as np
if which.lower() == 'first':
idx = 0
elif which.lower() == 'middle':
idx = int(np.ceil(load(filenames[fileidx]).get_shape()[3] / 2))
else:
raise Exception('unknown value for volume selection : %s' % which)
return idx | 5,329,209 |
def update_tmp_lineage_collection():
"""
Creates a lineage field in the temporary collection, and assigns it the value
of True or False based on the corresponding record in the pangolin collection.
Modifies the lineage_tmp collection as a side-effect
"""
print("Updating lineage values for sequence records")
pango = list(DB.pangolin.find())
print(f"Found {len(pango)} lineage annotations to add")
xref = {i.get("accession"): i.get("lineage") for i in pango}
temp = DB.lineages_tmp.find()
new_temp = [set_lineage(r, xref) for r in temp]
DB.lineages_tmp2.insert_many(new_temp) | 5,329,210 |
def merge(d, **kwargs):
"""Recursively merges given kwargs int to a
dict - only if the values are not None.
"""
for key, value in kwargs.items():
if isinstance(value, dict):
d[key] = merge(d.get(key, {}), **value)
elif value is not None:
d[key] = value
return d | 5,329,211 |
def test_search_orgs_for_affiliation(client, jwt, session, keycloak_mock): # pylint:disable=unused-argument
"""Assert that search org with affiliation works."""
headers = factory_auth_header(jwt=jwt, claims=TestJwtClaims.passcode)
client.post('/api/v1/entities', data=json.dumps(TestEntityInfo.entity_lear_mock),
headers=headers, content_type='application/json')
headers = factory_auth_header(jwt=jwt, claims=TestJwtClaims.public_user_role)
client.post('/api/v1/users', headers=headers, content_type='application/json')
rv = client.post('/api/v1/orgs', data=json.dumps(TestOrgInfo.org1),
headers=headers, content_type='application/json')
dictionary = json.loads(rv.data)
org_id = dictionary['id']
client.post('/api/v1/orgs/{}/affiliations'.format(org_id), headers=headers,
data=json.dumps(TestAffliationInfo.affiliation3), content_type='application/json')
# Create a system token
headers = factory_auth_header(jwt=jwt, claims=TestJwtClaims.system_role)
rv = client.get('/api/v1/orgs?affiliation={}'.format(TestAffliationInfo.affiliation3.get('businessIdentifier')),
headers=headers, content_type='application/json')
assert rv.status_code == http_status.HTTP_200_OK
assert schema_utils.validate(rv.json, 'orgs_response')[0]
orgs = json.loads(rv.data)
assert orgs.get('orgs')[0].get('name') == TestOrgInfo.org1.get('name') | 5,329,212 |
def post_process(config_file):
"""Show first level groups keys and attributes on a hdf5 file.
Parameters
----------
hdf5_path : Path
Hdf5 file to be investigated.
Returns
-------
List of str
Keys of first level groups.
List of str
Attributes names of first level groups.
"""
study_name = Path(config_file).stem
study_folder = Path.cwd() / study_name
hdf_db = (study_folder / study_name).with_suffix('.hdf5')
a = plot_from_hdf5(hdf_db, [1, 20])
#print(a)
plot_from_dict(a) | 5,329,213 |
def init_config_flow(hass):
"""Init a configuration flow."""
flow = config_flow.VelbusConfigFlow()
flow.hass = hass
return flow | 5,329,214 |
def test_variant_allele_index_extractor(genotype, expected):
"""
Tests the extraction of the variant allele index from the vcf
"""
idx = VariantAlleleIndexExtractor.extract(genotype)
assert idx == expected | 5,329,215 |
def validate_project_name():
"""
This validator is used to ensure that `project_name` is valid.
Valid inputs starts with the lowercase letter.
Followed by any lowercase letters, numbers or underscores.
Valid example: `school_project3`.
"""
if not re.match(MODULE_REGEX, MODULE_NAME):
# Validates project's module name:
message = [
'ERROR: The project slug {0} is not a valid name.',
'Start with a lowercase letter.',
'Followed by any lowercase letters, numbers, or dashes (-).',
]
raise ValueError(' '.join(message).format(MODULE_NAME)) | 5,329,216 |
def read_service_ids_by_date(path: str) -> Dict[datetime.date, FrozenSet[str]]:
"""Find all service identifiers by date"""
feed = load_raw_feed(path)
return _service_ids_by_date(feed) | 5,329,217 |
def get_all_services(org_id: str) -> tuple:
"""
**public_services_api**
returns a service governed by organization_id and service_id
:param org_id:
:return:
"""
return services_view.return_services(organization_id=org_id) | 5,329,218 |
def test_values():
"""Test the values function."""
table = (('foo', 'bar', 'baz'),
('a', 1, True),
('b', 2),
('b', 7, False))
actual = values(table, 'foo')
expect = ('a', 'b', 'b')
ieq(expect, actual)
ieq(expect, actual)
actual = values(table, 'bar')
expect = (1, 2, 7)
ieq(expect, actual)
ieq(expect, actual)
actual = values(table, ('foo', 'bar'))
expect = (('a', 1), ('b', 2), ('b', 7))
ieq(expect, actual)
ieq(expect, actual)
actual = values(table, 'baz')
expect = (True, None, False)
ieq(expect, actual)
ieq(expect, actual) | 5,329,219 |
def get_initializer(initializer_range=0.02):
"""Creates a `tf.initializers.truncated_normal` with the given range.
Args:
initializer_range: float, initializer range for stddev.
Returns:
TruncatedNormal initializer with stddev = `initializer_range`.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range) | 5,329,220 |
def test_file_created():
"""Test if csv file was created"""
aux = [['16', 'Kinect Adventures!', 'X360', '2010', 'Misc'
, 'Microsoft Game Studios', '14.97', '4.94', '0.24', '1.67', '21.82']]
CsvOperations.create_file(aux)
file_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\
+'/src/resources/csv_searched_games.csv'
my_file = Path(file_dir)
if my_file.is_file():
assert True
else:
assert False | 5,329,221 |
async def character_info(message: types.Message):
"""
Responds to the /char <name> command
:param message:
:return:
"""
character = message.text
find = ' '.join(character.split(' ')[1:])
logger.info(f"{message.from_user.full_name} send /char {find}")
variables = {"query": find}
status_code = requests.post(character_url, json={'query': character_query,
'variables': variables}).status_code
if status_code == 200:
character_data = requests.post(character_url, json={'query': character_query,
'variables': variables}).json()['data'].get('Character',
None)
char_keyboard = InlineKeyboardMarkup()
more_button = InlineKeyboardButton(text="🟡 More ", url=character_data['siteUrl'])
char_keyboard.insert(more_button)
description = shorten(str(character_data['description']).replace("__", ''), character_data['siteUrl'])
await message.answer_photo(photo=character_data['image']['large'],
caption=f"<code>{character_data['name']['full']}</code>\n"
f"<b>Favourites</b>: <b>{character_data['favourites']}</b>\n"
f"{description}\n",
reply_markup=character_data)
else:
logger.info(f"character not found --> status code: {status_code} \n")
await message.answer("<code>Not found 😭 </code>") | 5,329,222 |
def findh_s0(h_max, h_min, q):
"""
Znajduje siłę naciągu metodą numeryczną (wykorzystana metoda bisekcji),
należy podać granice górną i dolną dla metody bisekcji
:param h_max: Górna granica dla szukania siły naciągu
:param h_min: Dolna granica dla szukania siły naciągu
:param q: całkowite obciążenie kabla [N/m]
:return: h - siła naciągu, i - ilość potrzebnych iteracji
"""
i = 1
h = (h_min + h_max) / 2
print("Wstępne H = " + str(h))
f_m = calculatefm(h, q)
while (math.fabs(f_m - f_0_m) >= 1 * 10 ** -8):
if f_m < f_0_m:
h_max = h
else:
h_min = h
# print("iteracja #" + str(i) + " h_max = " + str(h_max) + " h_min = "
# + str(h_min) + " nowe H: " + str(h) + " f_m = " + str(f_m)
# + " docelowe: " + str(f_0_m))
h = (h_min + h_max) / 2
f_m = calculatefm(h, q)
i += 1
return h, i | 5,329,223 |
async def test_no_clients(hass):
"""Test the update_clients function when no clients are found."""
await setup_unifi_integration(
hass,
ENTRY_CONFIG,
options={},
clients_response={},
devices_response={},
clients_all_response={},
)
assert len(hass.states.async_all()) == 2 | 5,329,224 |
def velocity_dependent_covariance(vel):
"""
This function computes the noise in the velocity channel.
The noise generated is gaussian centered around 0, with sd = a + b*v;
where a = 0.01; b = 0.05 (Vul, Frank, Tenenbaum, Alvarez 2009)
:param vel:
:return: covariance
"""
cov = []
for v in vel:
ans = 0.01 + 0.05 * np.linalg.norm(vel)
cov.append(ans)
cov = np.array(cov)
return cov | 5,329,225 |
def get_db_path():
"""Return the path to Dropbox's info.json file with user-settings."""
if os.name == 'posix': # OSX-specific
home_path = os.path.expanduser('~')
dbox_db_path = os.path.join(home_path, '.dropbox', 'info.json')
elif os.name == 'nt': # Windows-specific
home_path = os.getenv('LOCALAPPDATA')
dbox_db_path = os.path.join(home_path, 'Dropbox', 'info.json')
else:
raise NotImplementedError("Unknown Platform: {0}".format(os.name))
return dbox_db_path | 5,329,226 |
def msd_Correlation(allX):
"""Autocorrelation part of MSD."""
M = allX.shape[0]
# numpy with MKL (i.e. intelpython distribution), the fft wont be
# accelerated unless axis along 0 or -1
# perform FT along n_frame axis
# (n_frams, n_particles, n_dim) -> (n_frames_Ft, n_particles, n_dim)
allFX = np.fft.rfft(allX, axis=0, n=M*2)
# sum over n_dim axis
corr = np.sum(abs(allFX)**2, axis=(1, -1)) # (n_frames_ft,)
# IFT over n_frame_ft axis (axis=0), whole operation euqals to
# fx = fft(_.T[0]), fy =... for _ in
# allX.swapaxes(0,1) -> (n_particles, n_frames, n_dim)
# then sum fx, fy, fz...fndim
# rfft for real inputs, higher eff
return np.fft.irfft(corr, n=2 * M)[:M].real/np.arange(M, 0, -1)
# (n_frames,), the n_particles dimension is added out | 5,329,227 |
def convert_table_value(fuel_usage_value):
"""
The graph is a little skewed, so this prepares the data for that.
0 = 0
1 = 25%
2 = 50%
3 = 100%
4 = 200%
5 = 400%
6 = 800%
7 = 1600% (not shown)
Intermediate values scale between those values. (5.5 is 600%)
"""
if fuel_usage_value < 25:
return 0.04 * fuel_usage_value
else:
return math.log((fuel_usage_value / 12.5), 2) | 5,329,228 |
def FindMSBuildInstallation(msvs_version = 'auto'):
"""Returns path to MSBuild for msvs_version or latest available.
Looks in the registry to find install location of MSBuild.
MSBuild before v4.0 will not build c++ projects, so only use newer versions.
"""
import TestWin
registry = TestWin.Registry()
msvs_to_msbuild = {
'2013': r'12.0',
'2012': r'4.0', # Really v4.0.30319 which comes with .NET 4.5.
'2010': r'4.0'}
msbuild_basekey = r'HKLM\SOFTWARE\Microsoft\MSBuild\ToolsVersions'
if not registry.KeyExists(msbuild_basekey):
print 'Error: could not find MSBuild base registry entry'
return None
msbuild_version = None
if msvs_version in msvs_to_msbuild:
msbuild_test_version = msvs_to_msbuild[msvs_version]
if registry.KeyExists(msbuild_basekey + '\\' + msbuild_test_version):
msbuild_version = msbuild_test_version
else:
print ('Warning: Environment variable GYP_MSVS_VERSION specifies "%s" '
'but corresponding MSBuild "%s" was not found.' %
(msvs_version, msbuild_version))
if not msbuild_version:
for msvs_version in sorted(msvs_to_msbuild, reverse=True):
msbuild_test_version = msvs_to_msbuild[msvs_version]
if registry.KeyExists(msbuild_basekey + '\\' + msbuild_test_version):
msbuild_version = msbuild_test_version
break
if not msbuild_version:
print 'Error: could not find MSBuild registry entry'
return None
msbuild_path = registry.GetValue(msbuild_basekey + '\\' + msbuild_version,
'MSBuildToolsPath')
if not msbuild_path:
print 'Error: could not get MSBuild registry entry value'
return None
return os.path.join(msbuild_path, 'MSBuild.exe') | 5,329,229 |
def create_toc_xhtml(metadata: WorkMetadata, spine: list[Matter]) -> str:
"""
Load the default `toc.xhtml` file, and generate the required terms for the creative work. Return xhtml as a string.
Parameters
----------
metadata: WorkMetadata
All the terms for updating the work, not all compulsory
spine: list of Matter
Spine and guide list of Matter, with `dedication` at 0, if present
Returns
-------
str: xhtml response for `toc.xhtml` as a str.
"""
with open(DATA_PATH / "xhtml" / DEFAULT_TOC_XHTML, "r+", encoding="utf-8") as toc_file:
toc_xml = toc_file.read()
# Table of Contents
toc_xhtml = ""
chapter = 1
for matter in spine:
if matter.content == FrontMatter.dedication:
toc_xhtml += F'\t\t\t\t<li>\n\t\t\t\t\t<a href="text/dedication.xhtml">{matter.title}</a>\n\t\t\t\t</li>\n'
if matter.partition == MatterPartition.body:
toc_xhtml += F'\t\t\t\t<li>\n\t\t\t\t\t<a href="text/chapter-{chapter}.xhtml">{matter.title}</a>\n\t\t\t\t</li>\n'
chapter += 1
toc_xml = toc_xml.replace('\t\t\t\t<li>\n\t\t\t\t\t<a href="text/chapter-1.xhtml"></a>\n\t\t\t\t</li>\n',
toc_xhtml)
# Landmark Title
toc_xml = toc_xml.replace('<a href="text/chapter-1.xhtml" epub:type="bodymatter z3998:fiction">WORK_TITLE</a>',
F'<a href="text/chapter-1.xhtml" epub:type="bodymatter z3998:fiction">{metadata.title}</a>')
return toc_xml | 5,329,230 |
def convertSLToNumzero(sl, min_sl=1e-3):
"""
Converts a (neg or pos) significance level to
a count of significant zeroes.
Parameters
----------
sl: float
Returns
-------
float
"""
if np.isnan(sl):
return 0
if sl < 0:
sl = min(sl, -min_sl)
num_zero = np.log10(-sl)
elif sl > 0:
sl = max(sl, min_sl)
num_zero = -np.log10(sl)
else:
raise RuntimeError("Cannot have significance level of 0.")
return num_zero | 5,329,231 |
def test_get_migrations_path():
"""
gets the application migrations path.
"""
root_path = application_services.get_application_main_package_path()
migrations_path = os.path.abspath(os.path.join(root_path, 'migrations'))
assert application_services.get_migrations_path() == migrations_path | 5,329,232 |
def downloadKeggInfo(args):
"""
Download necessary information from Kegg Database for parsing.
Arguments:
:param geneKeggAnnot: Gene to KEGG ID Link file
:type geneKeggAnnot: file
:param metKeggAnnot: Metabolite to KEGG ID Link file
:type metKeggAnnot: file
Returns:
:return gen2kegg: kegg_gene_identifier "\t" Gene_Symbol ";" Gene_name
:rtype gen2kegg: file
:return kgen2pathway: kegg_gene_identifier "\t" pathway_identifier_for_gene
:rtype kgen2pathway: file
:return met2kegg: kegg_metabolite_identifier "\t" Metabolite_names_list_sep_;
:rtype met2kegg: file
:return kmet2pathway: kegg_metabolite_identifier "\t" pathway_identifier_for_metabolite
:rtype kmet2pathway:similarity file
:return pathways: pathway_identifier_for_gene "\t" Pathway_name "-" Specified_organism
:rtype pathways: file
"""
# GeneKeggID2PathwayID
if args.geneKeggAnnot:
geneKeggAnnot = requests.get(
"http://rest.kegg.jp/link/" + args.species + "/pathway"
)
with open(args.kgen2pathways, 'w') as fh:
fh.write(geneKeggAnnot.content.decode("utf-8"))
# MetaboliteKeggID2PathwayID
if args.metKeggAnnot:
metKeggAnnot = requests.get(
"http://rest.kegg.jp/link/compound/pathway"
)
with open(args.kmet2pathways, 'w') as fh:
fh.write(metKeggAnnot.content.decode("utf-8"))
# PathwayID2PathwayNames
if args.pathways:
pathways_data = requests.get(
"http://rest.kegg.jp/list/pathway/" + args.species
)
with open(args.pathways, 'w') as fh:
fh.write(pathways_data.content.decode("utf-8")) | 5,329,233 |
def calibrate_time_domain(power_spectrum, data_pkt):
"""
Return a list of the calibrated time domain data
:param list power_spectrum: spectral data of the time domain data
:param data_pkt: a RTSA VRT data packet
:type data_pkt: pyrf.vrt.DataPacket
:returns: a list containing the calibrated time domain data
"""
i_data, q_data, stream_id, spec_inv = _decode_data_pkts(data_pkt)
# Time domain data calibration
if stream_id in (VRT_IFDATA_I14, VRT_IFDATA_I24):
td_data = i_data -np.mean(i_data)
complex_coefficient = 1
if stream_id == VRT_IFDATA_I14Q14:
td_data = i_data + 1j * q_data
td_data = td_data - np.mean(td_data)
complex_coefficient = 2
P_FD_Ln = 10**(power_spectrum/10)
P_FD_av = np.mean(P_FD_Ln)
v_volt = td_data * np.sqrt(1e-3) * np.sqrt(P_FD_av/np.var(td_data)) * 50 * np.sqrt(complex_coefficient*len(td_data)/128.0)
return v_volt | 5,329,234 |
def func(x):
"""
:param x: [b, 2]
:return:
"""
z = tf.math.sin(x[...,0]) + tf.math.sin(x[...,1])
return z | 5,329,235 |
def parse_handler_input(handler_input: HandlerInput,
) -> Tuple[UserMessage, Dict[str, Any]]:
"""Parses the ASK-SDK HandlerInput into Slowbro UserMessage.
Returns the UserMessage object and serialized SessionAttributes.
"""
request_envelope = handler_input.request_envelope
text: str
asr_hypos: List[AsrHypothesisUtterance] = []
if is_request_type("LaunchRequest")(handler_input):
# This is a launch request.
text = ''
elif is_request_type("IntentRequest")(handler_input):
slots = request_envelope.request.intent.slots
slot_text = slots.get('Text', None)
if slot_text is not None:
text = slot_text.value
else:
text = ''
if hasattr(request_envelope.request, 'speechRecognition'):
hypotheses = request_envelope.request.speechRecognition.get(
'hypotheses', [])
asr_hypos.extend([
AsrHypothesisUtterance([
AsrHypothesisToken(token['value'], token['confidence'],
token['startOffsetInMilliseconds'],
token['endOffsetInMilliseconds'])
for token in hypo['tokens']
], hypo['confidence']) for hypo in hypotheses
])
elif text:
# NOTE: create a fake ASR hypo using the text field.
asr_hypos.extend([
AsrHypothesisUtterance([
AsrHypothesisToken(token, -1, -1, -1)
for token in text.split(' ')
], -1)
])
if not text:
# Try to recover the text using asr_hypos.
# Otherwise, raise an exception.
if asr_hypos:
text = asr_hypos[0].__str__()
else:
raise Exception('Unable to find "text" from handler input:',
handler_input)
else:
raise Exception('Unable to parse handler input:', handler_input)
serializer = DefaultSerializer()
user_message = UserMessage(payload=serializer.serialize(request_envelope),
channel='alexaprize',
request_id=request_envelope.request.request_id,
session_id=request_envelope.session.session_id,
user_id=request_envelope.session.user.user_id,
text=text,
asr_hypos=asr_hypos)
attributes_manager = handler_input.attributes_manager
ser_session_attributes = attributes_manager.session_attributes
return (user_message, ser_session_attributes) | 5,329,236 |
def _validate_show_for_invoking_user_only(show_for_invoking_user_only):
"""
Validates the given `show_for_invoking_user_only` value.
Parameters
----------
show_for_invoking_user_only : `None` or `bool`
The `show_for_invoking_user_only` value to validate.
Returns
-------
show_for_invoking_user_only : `bool`
The validated `show_for_invoking_user_only` value.
Raises
------
TypeError
If `show_for_invoking_user_only` was not given as `None` nor as `bool` instance.
"""
if show_for_invoking_user_only is None:
show_for_invoking_user_only = False
else:
show_for_invoking_user_only = preconvert_bool(
show_for_invoking_user_only, 'show_for_invoking_user_only'
)
return show_for_invoking_user_only | 5,329,237 |
def process_frame(df, country_map, reg_func, formula, model_num):
"""Processes one frame.
df: DataFrame
country_map: map from code to Country
reg_func: function used to compute regression
formula: string Patsy formula
model_num: which model we're running
"""
grouped = df.groupby('cntry')
for code, group in grouped:
country = country_map[code]
country.add_mean(group.mean())
# run the model
model = reg_func(formula, data=group)
results = model.fit(disp=False)
# extract parameters and range of effect sizes
if model_num == 1:
country.add_params(results.params)
add_ranges(country, group, results)
else:
country.add_params2(results.params)
add_ranges2(country, group, results) | 5,329,238 |
def dismiss_get_app_offer(browser, logger):
""" Dismiss 'Get the Instagram App' page after a fresh login """
offer_elem = read_xpath(dismiss_get_app_offer.__name__, "offer_elem")
dismiss_elem = read_xpath(dismiss_get_app_offer.__name__, "dismiss_elem")
# wait a bit and see if the 'Get App' offer rises up
offer_loaded = explicit_wait(
browser, "VOEL", [offer_elem, "XPath"], logger, 5, False
)
if offer_loaded:
dismiss_elem = browser.find_element_by_xpath(dismiss_elem)
click_element(browser, dismiss_elem) | 5,329,239 |
def test_db_transaction_n1(monkeypatch):
"""Raise _DB_TRANSACTION_ATTEMPTS OperationalErrors to force a reconnection.
A cursor for each SQL statement should be returned in the order
the statement were submitted.
0. The first statement execution produce no results _DB_TRANSACTION_ATTEMPTS times (OperationalError)
1. A reconnection will occur
2. The first statement will be re-executed
3. The second statement will be executed
4. The third statement will be executed
Should get 3 cursors with the values _DB_TRANSACTION_ATTEMPTS, _DB_TRANSACTION_ATTEMPTS+1, & _DB_TRANSACTION_ATTEMPTS+2
The next mock_connection_ref should be 2
"""
db_disconnect_all()
mock_connection_ref = sequential_reference()
mock_cursor_ref = sequential_reference()
class mock_cursor():
def __init__(self) -> None: self.value = next(mock_cursor_ref)
def execute(self, sql_str):
if self.value < _DB_TRANSACTION_ATTEMPTS:
raise OperationalError
def fetchone(self): return self.value
class mock_connection():
def __init__(self) -> None: self.value = next(mock_connection_ref)
def cursor(self): return mock_cursor()
def close(self): self.value = None
def mock_connect(*args, **kwargs): return mock_connection()
monkeypatch.setattr(database, 'connect', mock_connect)
dbcur_list = db_transaction(_MOCK_DBNAME, _MOCK_CONFIG, ("SQL0", "SQL1", "SQL2"))
assert len(dbcur_list) == 3
assert dbcur_list[0].fetchone() == _DB_TRANSACTION_ATTEMPTS
assert dbcur_list[1].fetchone() == _DB_TRANSACTION_ATTEMPTS + 1
assert dbcur_list[2].fetchone() == _DB_TRANSACTION_ATTEMPTS + 2
assert next(mock_connection_ref) == 2 | 5,329,240 |
def fetch_status():
"""
解析サイト<https://redive.estertion.win> からクラバト情報を取ってくる
return
----
```
{
"cb_start": datetime,
"cb_end": datetime,
"cb_days": int
}
```
"""
# クラバト開催情報取得
r = requests.get(
"https://redive.estertion.win/ver_log_redive/?page=1&filter=clan_battle"
).json()
# クラバト開始日取得
cb_start = r["data"][0]["clan_battle"][0]["start"]
cb_start = datetime.strptime(cb_start, "%Y/%m/%d %H:%M:%S")
# クラバト終了日取得
cb_end = r["data"][0]["clan_battle"][0]["end"]
cb_end = datetime.strptime(cb_end, "%Y/%m/%d %H:%M:%S")
# クラバト開催日数
cb_days = (cb_end - cb_start).days + 1
return {"cb_start": cb_start, "cb_end": cb_end, "cb_days": cb_days} | 5,329,241 |
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_post(uuid, tapi_path_computation_routing_constraint=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_post
creates tapi.path.computation.RoutingConstraint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_path_computation_routing_constraint: tapi.path.computation.RoutingConstraint to be added to list
:type tapi_path_computation_routing_constraint: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_routing_constraint = TapiPathComputationRoutingConstraint.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!' | 5,329,242 |
def date_range(start, end, step):
"""
Generator that yields a tuple of datetime-like objects
which are `step` apart until the final `end` date
is reached.
"""
curr = start
while curr < end:
next_ = curr + step
# next step is bigger than end date
# yield last (shorter) step until final date
if next_ > end:
yield curr, end
break
else:
yield curr, next_
curr += step | 5,329,243 |
def A004086(i: int) -> int:
"""Digit reversal of i."""
result = 0
while i > 0:
unit = i % 10
result = result * 10 + unit
i = i // 10
return result | 5,329,244 |
def should_raise_sequencingerror(wait, nrep, jump_to, goto, num_elms):
"""
Function to tell us whether a SequencingError should be raised
"""
if wait not in [0, 1]:
return True
if nrep not in range(0, 16384):
return True
if jump_to not in range(-1, num_elms+1):
return True
if goto not in range(0, num_elms+1):
return True
return False | 5,329,245 |
def add_task_with_sentinels(
task_name: str,
num_sentinels: Optional[int] = 1):
"""Adds sentinels to the inputs/outputs of a task.
Adds num_sentinels sentinels to the end of 'inputs' and at the beginning
of 'targets'. This is known to help fine-tuning span corruption models,
especially on smaller datasets.
This will also rename the task by adding a "_{num_sentinels}_sentinel" suffix
to the task name, but making sure it comes before the following suffixes:
'_train', '_dev', '_test', '.'.
Example before:
'inputs': What is the captial of illinois?
'targets': Springfield.
Example after:
'inputs': What is the captial of illinois? <extra_id_0>
'targets': <extra_id_0> Springfield.
Args:
task_name: a str, which is the name of the task you want to have sentinels
added to. Note this will not override the current task, but will create
a new one.
num_sentinels: integer, number of sentinels to end of inputs and the
beginning of targets.
"""
def _append_eos_after_trim_and_preserve(
dataset: tf.data.Dataset,
output_features: Mapping[str, dataset_providers.Feature],
sequence_length: Optional[Mapping[str, int]] = None,
preserve_final_n_tokens_when_trimming: Optional[int] = None
) -> tf.data.Dataset:
"""Version of append_eos_after_trim with option to preserve last n tokens."""
def _maybe_add_eos_and_trim(key: str, value: tf.Tensor) -> tf.Tensor:
if key not in output_features or not output_features[key].add_eos:
return value
eos_id = output_features[key].vocabulary.eos_id
if (sequence_length is not None and
sequence_length.get(key, None) is not None):
max_length = sequence_length[key]
if (preserve_final_n_tokens_when_trimming is not None and
preserve_final_n_tokens_when_trimming > 0):
# Compute the new length of the sequence excluding the EOS token.
trimmed_length = tf.minimum(max_length, tf.shape(value)[0] + 1)
# Can't preserve more tokens than the sequence length.
n_tokens_to_preserve = tf.minimum(
preserve_final_n_tokens_when_trimming, trimmed_length - 1)
# pylint: disable=invalid-unary-operand-type
return tf.concat(
[value[:trimmed_length-(n_tokens_to_preserve + 1)],
value[-n_tokens_to_preserve:],
[eos_id]], axis=0)
# pylint: enable=invalid-unary-operand-type
else:
return tf.concat([value[:max_length-1], [eos_id]], axis=0)
else:
return tf.concat([value, [eos_id]], axis=0)
return dataset.map(
lambda ex: {k: _maybe_add_eos_and_trim(k, v) for k, v in ex.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def _create_new_task_name(task_name):
"""Creates the new task name with sentinels added."""
sentinel_name = '_{}_sentinel'.format(num_sentinels)
# Avoid messing up evaluation suffixes, so insert the sentinel name right
# before these keywords.
for suffix in ['_train', '_dev', '_test', '_eval', '.']:
idx = task_name.find(suffix)
if idx >= 0:
return task_name[:idx] + sentinel_name + task_name[idx:]
return task_name + sentinel_name
def _sentinel_id(vocabulary, sentinel_num=0):
"""Token ID to use as a sentinel.
Args:
vocabulary: a t5.data.vocabularies.Vocabulary
sentinel_num: an optional interger, what sentinel should be returned.
By default it returns the first sentinel.
Returns:
an integer
"""
return vocabulary.vocab_size - 1 - sentinel_num
def _add_sentinels(dataset, sequence_length, output_features):
"""Adds sentinels to end of inputs and beginning of targets."""
del sequence_length
input_vocab = output_features['inputs'].vocabulary
target_vocab = output_features['targets'].vocabulary
@utils.map_over_dataset
def _my_fn(x):
sentinels_input = [
_sentinel_id(input_vocab, idx) for idx in range(num_sentinels)]
sentinels_output = [
_sentinel_id(target_vocab, idx) for idx in range(num_sentinels)]
x['inputs'] = tf.concat([x['inputs'], sentinels_input], 0)
x['targets'] = tf.concat([sentinels_output, x['targets']], 0)
return x
return _my_fn(dataset)
def _postprocess_fn_remove_sentinel(string_label, *args, **kwargs):
"""If sentinels are appended to the task, then remove them before eval."""
del args
del kwargs
vocab = task.output_features['targets'].vocabulary
sentinel_str = vocab.decode(
[_sentinel_id(vocab, idx) for idx in range(num_sentinels)])
if string_label.startswith(sentinel_str):
string_label = string_label[len(sentinel_str):].strip()
return string_label
def _wrap_postprocess_fn_remove_sentinel(postprocess_fn):
"""Wrap around another postprocess_fn to remove sentinels first."""
def new_fn(string_label, *args, **kwargs):
string_label = _postprocess_fn_remove_sentinel(
string_label, *args, **kwargs)
return postprocess_fn(string_label, *args, **kwargs)
return new_fn
# Create the new task name.
task = TaskRegistry.get(task_name)
sentinel_task_name = _create_new_task_name(task_name)
# Make the new preprocessors that will insert sentinels and make sure
# sentinels are preserved if the sequences are trimmed.
new_preprocessors = list(task.preprocessors)
if new_preprocessors[-1] is seqio_preprocessors.append_eos_after_trim:
new_eos_funtion = functools.partial(
_append_eos_after_trim_and_preserve,
preserve_final_n_tokens_when_trimming=num_sentinels)
new_preprocessors[-1] = new_eos_funtion
new_preprocessors.insert(-1, _add_sentinels)
else:
new_preprocessors.append(_add_sentinels)
# Remove the inserted sentinels in the postprocessor.
postprocess_fn = task.postprocessor
if postprocess_fn is not None:
new_postprocess_fn = _wrap_postprocess_fn_remove_sentinel(postprocess_fn)
else:
new_postprocess_fn = _postprocess_fn_remove_sentinel
TaskRegistry.add(
sentinel_task_name,
source=task.source,
preprocessors=new_preprocessors,
output_features=task.output_features,
postprocess_fn=new_postprocess_fn,
metric_fns=task.metric_fns,
) | 5,329,246 |
def to_routing_header(params):
"""Returns a routing header string for the given request parameters.
Args:
params (Mapping[str, Any]): A dictionary containing the request
parameters used for routing.
Returns:
str: The routing header string.
"""
if sys.version_info[0] < 3:
# Python 2 does not have the "safe" parameter for urlencode.
return urlencode(params).replace('%2F', '/')
return urlencode(
params,
# Per Google API policy (go/api-url-encoding), / is not encoded.
safe='/') | 5,329,247 |
def mk_llfdi(data_id, data): # measurement group 10
"""
transforms a k-llfdi.json form into the triples used by insertMeasurementGroup to
store each measurement that is in the form
:param data_id: unique id from the json form
:param data: data array from the json form
:return: The list of (typeid,valType,value) triples that are used by insertMeasurementGroup to add the measurements
"""
val_list = [(220, 2, data_id),
(55, 7, data['f1']), (56, 7, data['f2']), (57, 7, data['f3']),
(58, 7, data['f4']), (59, 7, data['f5']), (60, 7, data['f6']),
(61, 7, data['f7']), (62, 7, data['f8']), (63, 7, data['f9']),
(64, 7, data['f10']), (65, 7, data['f11']), (66, 7, data['f12']),
(67, 7, data['f13']), (68, 7, data['f14']), (69, 7, data['f15']),
(70, 7, data['f16']), (71, 7, data['f17']), (72, 7, data['f18']),
(73, 7, data['f19']), (74, 7, data['f20']), (75, 7, data['f21']),
(76, 7, data['f22']), (77, 7, data['f23']), (78, 7, data['f24']),
(79, 7, data['f25']), (80, 7, data['f26']), (81, 7, data['f27']),
(82, 7, data['f28']), (83, 7, data['f29']), (84, 7, data['f30']),
(85, 7, data['f31']), (86, 7, data['f32'])]
for sublist in lwh.mk_optional_int(87, 224, data, 'fd7'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(88, 225, data, 'fd8'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(89, 226, data, 'fd14'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(90, 227, data, 'fd15'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(91, 228, data, 'fd26'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(92, 229, data, 'fd29'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(93, 230, data, 'fd30'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(94, 231, data, 'fd32'):
val_list.append(sublist)
return val_list | 5,329,248 |
def valid_distro(x):
"""
Validates that arg is a Distro type, and has
:param x:
:return:
"""
if not isinstance(x, Distro):
return False
result = True
for required in ["arch", "variant"]:
val = getattr(x, required)
if not isinstance(val, str):
result = False
elif val.strip() == "":
result = False
return result | 5,329,249 |
def stop_stream_to_online(feature_table: str):
"""
Start stream to online sync job.
"""
import feast.pyspark.aws.jobs
feast.pyspark.aws.jobs.stop_stream_to_online(feature_table) | 5,329,250 |
def validate(dataloader,
model,
criterion,
total_batches,
debug_steps=100,
local_logger=None,
master_logger=None,
save='./'):
"""Validation for the whole dataset
Args:
dataloader: paddle.io.DataLoader, dataloader instance
model: nn.Layer, a ViT model
total_batches: int, total num of batches for one epoch
debug_steps: int, num of iters to log info, default: 100
local_logger: logger for local process/gpu, default: None
master_logger: logger for main process, default: None
Returns:
val_loss_meter.avg: float, average loss on current process/gpu
val_acc1_meter.avg: float, average top1 accuracy on current processes/gpus
master_loss_meter.avg: float, average loss on all processes/gpus
master_acc1_meter.avg: float, average top1 accuracy on all processes/gpus
val_time: float, validation time
"""
model.eval()
val_loss_meter = AverageMeter()
val_acc1_meter = AverageMeter()
master_loss_meter = AverageMeter()
master_acc1_meter = AverageMeter()
time_st = time.time()
# output path
local_rank = paddle.distributed.get_rank()
ofile = open(os.path.join(save, f'pred_{local_rank}.txt'), 'w')
for batch_id, data in enumerate(dataloader):
# get data
images = data[0]
label = data[1]
image_path = data[2]
batch_size = images.shape[0]
output = model(images)
if label is not None:
loss = criterion(output, label)
loss_value = loss.item()
pred = paddle.nn.functional.softmax(output)
if label is not None:
acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1)).item()
# sync from other gpus for overall loss and acc
master_loss = all_reduce_mean(loss_value)
master_acc1 = all_reduce_mean(acc1)
master_batch_size = all_reduce_mean(batch_size)
master_loss_meter.update(master_loss, master_batch_size)
master_acc1_meter.update(master_acc1, master_batch_size)
val_loss_meter.update(loss_value, batch_size)
val_acc1_meter.update(acc1, batch_size)
if batch_id % debug_steps == 0:
local_message = (f"Step[{batch_id:04d}/{total_batches:04d}], "
f"Avg Loss: {val_loss_meter.avg:.4f}, "
f"Avg Acc@1: {val_acc1_meter.avg:.4f}")
master_message = (f"Step[{batch_id:04d}/{total_batches:04d}], "
f"Avg Loss: {master_loss_meter.avg:.4f}, "
f"Avg Acc@1: {master_acc1_meter.avg:.4f}")
write_log(local_logger, master_logger, local_message, master_message)
else:
if batch_id % debug_steps == 0:
local_message = f"Step[{batch_id:04d}/{total_batches:04d}]"
master_message = f"Step[{batch_id:04d}/{total_batches:04d}]"
write_log(local_logger, master_logger, local_message, master_message)
# write results to pred
for idx, img_p in enumerate(image_path):
pred_prob, pred_label = paddle.topk(pred[idx], 1)
pred_label = pred_label.cpu().numpy()[0]
ofile.write(f'{img_p} {pred_label}\n')
val_time = time.time() - time_st
ofile.close()
return (val_loss_meter.avg,
val_acc1_meter.avg,
master_loss_meter.avg,
master_acc1_meter.avg,
val_time) | 5,329,251 |
def encode(message):
"""
Кодирует строку в соответсвие с таблицей азбуки Морзе
>>> encode('MAI-PYTHON-2020') # doctest: +SKIP
'-- .- .. -....-
.--. -.-- - .... --- -. -....-
..--- ----- ..--- -----'
>>> encode('SOS')
'...
---
...'
>>> encode('МАИ-ПИТОН-2020') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: 'М'
"""
encoded_signs = [
LETTER_TO_MORSE[letter] for letter in message
]
return ' '.join(encoded_signs) | 5,329,252 |
def get_group_to_elasticsearch_processor():
"""
This processor adds users from xform submissions that come in to the User Index if they don't exist in HQ
"""
return ElasticProcessor(
elasticsearch=get_es_new(),
index_info=GROUP_INDEX_INFO,
) | 5,329,253 |
def main():
"""Main"""
# Skip manual check
if len(sys.argv) > 1:
if sys.argv[1] == 'manualcheck':
print 'Manual check: skipping'
exit(0)
# Check OS version and skip if too old
if getOsVersion() < 12:
print 'Skipping iBridge check, OS does not support iBridges'
exit(0)
# Create cache dir if it does not exist
cachedir = '%s/cache' % os.path.dirname(os.path.realpath(__file__))
if not os.path.exists(cachedir):
os.makedirs(cachedir)
# Get results
result = dict()
info = get_ibridge_info()
result = flatten_ibridge_info(info)
ibridge_version = get_ibridge_version()
try:
if result[0]:
result[0].update(ibridge_version[0])
except IndexError:
pass
# Write ibridge results to cache
output_plist = os.path.join(cachedir, 'ibridge.plist')
plistlib.writePlist(result, output_plist)
#print plistlib.writePlistToString(result) | 5,329,254 |
def loss_function(recon_x, x, mu, logvar, flattened_image_size = 1024):
"""
from https://github.com/pytorch/examples/blob/master/vae/main.py
"""
BCE = nn.functional.binary_cross_entropy(recon_x, x.view(-1, flattened_image_size), reduction='sum')
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return BCE + KLD | 5,329,255 |
def remove_poly(values, poly_fit=0):
"""
Calculates best fit polynomial and removes it from the record
"""
x = np.linspace(0, 1.0, len(values))
cofs = np.polyfit(x, values, poly_fit)
y_cor = 0 * x
for co in range(len(cofs)):
mods = x ** (poly_fit - co)
y_cor += cofs[co] * mods
return values - y_cor | 5,329,256 |
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=device,
invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
hooks.append(inputs.dataset_initializer_hook())
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels, signals)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function_impl))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset | 5,329,257 |
def download():
"""
curl -o haproxy.tar.gz https://www.haproxy.org/download/1.9/src/haproxy-1.9.0.tar.gz
tar xzf haproxy.tar.gz
"""
with cd("/usr/src"), settings(warn_only=True):
for line in download.__doc__.split("\n"):
sudo(line) | 5,329,258 |
def test_rename_columns(dupcols):
"""Test renaming columns in a data frame with duplicate column names."""
# Rename the first column
d1 = rename(dupcols, columns='Name', names='Person')
assert d1.columns[0] == 'Person'
assert dupcols.columns[0] == 'Name'
assert d1.columns[1] == 'A'
assert d1.columns[2] == 'A'
for col in d1.columns:
assert isinstance(col, Column)
assert d1.shape == (7, 3)
# Rename the first column and the second column
d1 = rename(dupcols, columns=['Name', 'A'], names=['Person', 'Col2'])
assert d1.columns[0] == 'Person'
assert d1.columns[1] == 'Col2'
assert d1.columns[2] == 'A'
for col in d1.columns:
assert isinstance(col, Column)
assert d1.shape == (7, 3)
# Rename the first column and the last column
d1 = rename(dupcols, columns=['Name', 2], names=['Person', 'Col2'])
assert d1.columns[0] == 'Person'
assert d1.columns[1] == 'A'
assert d1.columns[2] == 'Col2'
for col in d1.columns:
assert isinstance(col, Column)
assert d1.shape == (7, 3) | 5,329,259 |
def by_location(aoi, year, lon, lat, chipsize=512, extend=512,
tms=['Google'], axis=True, debug=False):
"""Download the background image with parcels polygon overlay by selected
location. This function will get an image from the center of the polygon.
Examples:
from cbm.view import background
background.by_location(aoi, lon, lat, 512, 512, 'Google',
True, True)
Arguments:
aoi, the area of interest (str)
year, the year of parcels table
lon, lat, longitude and latitude in decimal degrees (float).
chipsize, size of the chip in pixels (int).
extend, size of the chip in meters (float).
tms, tile map server Google or Bing (str).
debug, print or not procedure information (Boolean).
"""
get_requests = data_source()
if type(tms) is str:
tms = [tms]
try:
json_data = json.loads(get_requests.ploc(aoi, year, lon, lat,
True, False, debug))
if type(json_data['ogc_fid']) is list:
pid = json_data['ogc_fid'][0]
else:
pid = json_data['ogc_fid']
workdir = normpath(join(config.get_value(['paths', 'temp']),
aoi, str(year), str(pid)))
if debug:
print('pid: ', pid)
print('workdir: ', workdir)
print('json_data: ', json_data)
json_file = normpath(join(workdir, 'info.json'))
os.makedirs(workdir, exist_ok=True)
if not isfile(json_file):
with open(json_file, "w") as f:
json.dump(json_data, f)
except Exception as err:
workdir = normpath(join(config.get_value(['paths', 'temp']), aoi,
str(year), f'_{lon}_{lat}'.replace('.', '_')))
if debug:
print("No parcel information found.", err)
bg_path = normpath(join(workdir, 'backgrounds'))
os.makedirs(bg_path, exist_ok=True)
with open(f"{bg_path}/chipsize_extend_{chipsize}_{extend}", "w") as f:
f.write('')
if debug:
print('bg_path: ', bg_path)
print('lon, lat:', lon, lat)
for t in tms:
if debug:
print('lon, lat, chipsize, extend, t, bg_path, debug')
print(lon, lat, chipsize, extend, t, bg_path, debug)
get_requests.background(lon, lat, chipsize, extend, t, bg_path, debug) | 5,329,260 |
def from_path(path, vars=None, *args, **kwargs):
"""Read a scenario configuration and construct a new scenario instance.
Args:
path (basestring): Path to a configuration file. `path` may be a directory
containing a single configuration file.
*args: Arguments passed to Scenario __init__.
**kwargs: Arguments passed to Scenario __init__.
Returns:
Scenario: A new scenario instance.
"""
# If path is a directory, find a configuration file inside that directory.
if os.path.isdir(path):
paths = Scenario.find_configuration_files(path)
if not paths:
raise ValueError("No configuration files found at '%s'" % path)
elif len(paths) > 1:
raise ValueError("Multiple configuration files found at '%s': %r" %
(path, paths))
else:
path = paths[0]
# Parse the configuration file and construct a new scenario.
directory, filename = os.path.split(path)
extension = os.path.splitext(filename)[1]
if extension.lower() in [".yml", ".yaml"]:
with open(path) as config_file:
try:
scenario = from_yaml_str(config_file.read(), *args,
vars=vars, source_directory=directory, **kwargs)
except yaml.parser.ParserError as err:
raise CurieTestException("Unable to parse YAML at path %r, check "
"syntax: %r" % (path, str(err)))
else:
raise ValueError("Invalid file type '%s'" % path)
return scenario | 5,329,261 |
def test_get_next_payment_date(controller: Controller):
"""Check that client can get next_payment_date attribute for subscriptions in subscription list"""
subscriptions_list = controller.get_subscriptions_list()
assert type(subscriptions_list[0]) == Subscription
for subs in subscriptions_list:
assert type(subs.next_payment_date) is date | 5,329,262 |
def SogouNews(*args, **kwargs):
""" Defines SogouNews datasets.
The labels includes:
- 0 : Sports
- 1 : Finance
- 2 : Entertainment
- 3 : Automobile
- 4 : Technology
Create supervised learning dataset: SogouNews
Separately returns the training and test dataset
Args:
root: Directory where the datasets are saved. Default: ".data"
ngrams: a contiguous sequence of n items from s string text.
Default: 1
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
include_unk: include unknown token in the data (Default: False)
Examples:
>>> train_dataset, test_dataset = torchtext.datasets.SogouNews(ngrams=3)
"""
return _setup_datasets(*(("SogouNews",) + args), **kwargs) | 5,329,263 |
def test_unasigned_unknowns_are_kept():
"""Test that existing unasigned unknown variables are kept empty."""
set_dotenv(
"""
UNKNOWN_VARIABLE
"""
)
dotenver.parse_files([TEMPLATE_FILE.name], override=False)
expected = """
STATIC_VARIABLE=static
export FALSE_VARIABLE=False
TRUE_VARIABLE=True
######################################
# Variables not in Dotenver template #
######################################
UNKNOWN_VARIABLE
"""
assert DOTENV_FILE.read() == expected | 5,329,264 |
async def async_iter(iterator):
"""Returns an async generator"""
for item in iterator:
yield item | 5,329,265 |
def add_volume (activity_cluster_df,
activity_counts):
"""Scales log of session counts of each activity and merges into activities dataframe
Parameters
----------
activity_cluster_df : dataframe
Pandas dataframe of activities, skipgrams features, and cluster label from DBSCAN
activity_counts: dictionary
Dictionary (from activities.create_corpus func) of activity and session counts
Returns
-------
pandas dataframe of activities, skipgrams features, x-value, y-value, and activity volume percentiles
"""
assert isinstance(activity_counts, dict) == True, "activity_counts should be a dictionary."
assert len(activity_counts) >= len(activity_cluster_df), "activity_counts must contain the same number or more activity entries than activity_cluster_df."
# Map activities to capture unique session ID acount in activities dataframe
activity_cluster_df['volume_pctl'] = activity_cluster_df.index.map(activity_counts)
# Replace absolute volume with percentile rank integer
activity_cluster_df['volume_pctl'] = activity_cluster_df['volume_pctl'].rank(pct=True) * 100
return activity_cluster_df | 5,329,266 |
def handle_message(message):
"""
Where `message` is a string that has already been stripped and lower-cased,
tokenize it and find the corresponding Hand in the database. (Also: return some
helpful examples if requested, or an error message if the input cannot be parsed.)
"""
if 'example' in message:
return example()
message_tokens = filter(lambda x: x != '', message.split(" "))
if len(message_tokens) != 4:
# maybe use a better error message here?
return STANDARD_ERRORMSG
# handle the described poker hand.
rank1 = parsers.get_rank(message_tokens[0])
rank2 = parsers.get_rank(message_tokens[1])
suiting = parsers.get_suiting(message_tokens[2])
players = parsers.get_players(message_tokens[3])
check = check_input(rank1, rank2, suiting, players)
if check != None:
return check
try:
p_win, p_tie, expected_gain = get_stats(rank1, rank2, suiting, players)
except:
print "Input valid but bad db lookup." + str([rank1, rank2, suiting, players])
return "Error! Input valid but DataBase lookup failed? Please report this bug."
return (
"P(win): " + str(p_win * 100) + "%\n"
"P(tie): " + str(p_tie * 100) + "%\n"
"Expected unit gain: " + str(expected_gain)
) | 5,329,267 |
def MemoizedSingleCall(functor):
"""Decorator for simple functor targets, caching the results
The functor must accept no arguments beyond either a class or self (depending
on if this is used in a classmethod/instancemethod context). Results of the
wrapped method will be written to the class/instance namespace in a specially
named cached value. All future invocations will just reuse that value.
Note that this cache is per-process, so sibling and parent processes won't
notice updates to the cache.
"""
# TODO(build): Should we rebase to snakeoil.klass.cached* functionality?
# pylint: disable=protected-access
@functools.wraps(functor)
def wrapper(obj):
key = wrapper._cache_key
val = getattr(obj, key, None)
if val is None:
val = functor(obj)
setattr(obj, key, val)
return val
# Use name mangling to store the cached value in a (hopefully) unique place.
wrapper._cache_key = '_%s_cached' % (functor.__name__.lstrip('_'),)
return wrapper | 5,329,268 |
def polyadd(c1, c2):
"""
Add one polynomial to another.
Returns the sum of two polynomials `c1` + `c2`. The arguments are
sequences of coefficients from lowest order term to highest, i.e.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to high.
Returns
-------
out : ndarray
The coefficient array representing their sum.
See Also
--------
polysub, polymul, polydiv, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> sum = P.polyadd(c1,c2); sum
array([ 4., 4., 4.])
>>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2)
28.0
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret) | 5,329,269 |
def weighted_categorical_crossentropy(target, output, n_classes = 3, axis = None, from_logits=False):
"""Categorical crossentropy between an output tensor and a target tensor.
Automatically computes the class weights from the target image and uses
them to weight the cross entropy
# Arguments
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
# Returns
Output tensor.
"""
# Note: tf.nn.softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if axis is None:
axis = len(output.get_shape()) - 1
if not from_logits:
# scale preds so that the class probas of each sample sum to 1
output /= tf.reduce_sum(output,
axis=axis,
keep_dims=True)
# manual computation of crossentropy
_epsilon = _to_tensor(K.epsilon(), output.dtype.base_dtype)
output = tf.clip_by_value(output, _epsilon, 1. - _epsilon)
target_cast = tf.cast(target, K.floatx())
class_weights = 1.0/np.float(n_classes)*tf.divide(tf.reduce_sum(target_cast), tf.reduce_sum(target_cast, axis = [0,1,2]))
print class_weights.get_shape()
return - tf.reduce_sum(tf.multiply(target * tf.log(output), class_weights), axis=axis)
else:
raise Exception("weighted_categorical_crossentropy cannot take logits") | 5,329,270 |
def config():
"""List the path to the Cards db."""
with cards_db() as db:
print(db.path()) | 5,329,271 |
def get_config(key, default):
"""
Get the dictionary "IMPROVED_PERMISSIONS_SETTINGS"
from the settings module.
Return "default" if "key" is not present in
the dictionary.
"""
from django.conf import settings
config_dict = getattr(settings, 'IMPROVED_PERMISSIONS_SETTINGS', None)
if config_dict:
if key in config_dict:
return config_dict[key]
return default | 5,329,272 |
def fetch_protein_interaction(data_home=None):
"""Fetch the protein-interaction dataset
Constant features were removed
=========================== ===================================
Domain drug-protein interaction network
Features Biological (see [1])
output interaction network
Drug matrix (sample, features) = (1554, 876)
Newtork interaction matrix (samples, labels) = (1554, 1862)
=========================== ===================================
Parameters
----------
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels and
'feature_names', the original names of the dataset columns.
References
----------
.. [1] Yamanishi, Y., Pauwels, E., Saigo, H., & Stoven, V. (2011).
Extracting sets of chemical substructures and protein domains
governing drug-target interactions. Journal of chemical information
and modeling, 51(5), 1183-1194.
"""
data_home = _fetch_drug_protein(data_home=data_home)
protein_fname = os.path.join(data_home, "target_repmat.txt")
data = np.loadtxt(protein_fname, dtype=float, skiprows=1,
usecols=range(1, 877)) # skip id column
mask_constant = np.var(data, axis=0) != 0.
data = data[:, mask_constant] # remove constant columns
with open(protein_fname, 'r') as fhandle:
feature_names = fhandle.readline().split("\t")
feature_names = np.array(feature_names)[mask_constant].tolist()
interaction_fname = os.path.join(data_home, "inter_admat.txt")
target = np.loadtxt(interaction_fname, dtype=float, skiprows=1)
target = target[:, 1:] # skip id column
target = target.T
return Bunch(data=data, target=target, feature_names=feature_names) | 5,329,273 |
def prefetched_iterator(query, chunk_size=2000):
"""
This is a prefetch_related-safe version of what iterator() should do.
It will sort and batch on the default django primary key
Args:
query (QuerySet): the django queryset to iterate
chunk_size (int): the size of each chunk to fetch
"""
# walk the records in ascending id order
base_query = query.order_by("id")
def _next(greater_than_id):
"""Returns the next batch"""
return base_query.filter(id__gt=greater_than_id)[:chunk_size]
batch = _next(0)
while batch:
item = None
# evaluate each batch query here
for item in batch:
yield item
# next batch starts after the last item.id
batch = _next(item.id) if item is not None else None | 5,329,274 |
def mousePressed():
""" mousePressed """
pass | 5,329,275 |
def get_dir(foldername, path):
""" Get directory relative to current file - if it doesn't exist create it. """
file_dir = os.path.join(path, foldername)
if not os.path.isdir(file_dir):
os.mkdir(os.path.join(path, foldername))
return file_dir | 5,329,276 |
def add_img_to_frame(img, frame, offset):
"""put a smaller matrix into a larger frame,
starting at a specific offset"""
img = img.reshape((orig_size, orig_size))
for x in xrange(orig_size):
frame[x + offset[0]][offset[1]: offset[1] + orig_size] = img[x] | 5,329,277 |
def dicom_strfname( names: tuple) -> str:
"""
doe john s -> dicome name (DOE^JOHN^S)
"""
return "^".join(names) | 5,329,278 |
def plot_new_data(logger):
"""
Plots mixing ratio data, creating plot files and queueing the files for upload.
This will plot data, regardless of if there's any new data since it's not run continously.
:param logger: logging logger to record to
:return: bool, True if ran corrected, False if exit on error
"""
logger.info('Running plot_new_data()')
try:
engine, session = connect_to_db(DB_NAME, CORE_DIR)
except Exception as e:
logger.error(f'Error {e.args} prevented connecting to the database in plot_new_data()')
return False
remotedir = BOULDAIR_BASE_PATH + '/MR_plots'
compounds_to_plot = (session.query(Quantification.name)
.join(Standard, Quantification.standard_id == Standard.id)
.filter(Standard.name == 'quantlist').all())
compounds_to_plot[:] = [q.name for q in compounds_to_plot]
date_limits, major_ticks, minor_ticks = create_monthly_ticks(6, days_per_minor=7)
with open(JSON_PUBLIC_DIR / 'zug_plot_info.json', 'r') as file:
compound_limits = json.loads(file.read())
for name in compounds_to_plot:
params = (GcRun.date, Compound.mr)
filters = (
Compound.name == name,
GcRun.date >= date_limits['left'],
*ambient_filters
)
results = abstract_query(params, filters, GcRun.date)
dates = [r.date for r in results]
mrs = [r.mr for r in results]
p = MixingRatioPlot(
{name: (dates, mrs)},
limits={**date_limits, **compound_limits[name]},
major_ticks=major_ticks,
minor_ticks=minor_ticks,
filepath=MR_PLOT_DIR / f'{name}_plot.png'
)
p.plot()
file_to_upload = FileToUpload(p.filepath, remotedir, staged=True)
add_or_ignore_plot(file_to_upload, session)
session.commit()
session.close()
engine.dispose()
return True | 5,329,279 |
def plot_graphs(graphs=compute_graphs()):
""" Affiche les graphes avec la bibliothèque networkx """
GF, Gf = graphs
pos = {1: (2, 1), 2: (4, 1), 3: (5, 2), 4: (4, 3), 5: (1, 3), 6: (1, 2), 7: (3, 4)}
plt.figure(1)
nx.draw_networkx_nodes(GF, pos, node_size=500)
nx.draw_networkx_labels(GF, pos)
nx.draw_networkx_edges(GF, pos, arrows=True)
plt.title("Graphe fort")
plt.show() # display
plt.figure(2)
nx.draw_networkx_nodes(Gf, pos, node_size=500)
nx.draw_networkx_labels(Gf, pos)
nx.draw_networkx_edges(Gf, pos, arrows=True, style="dashed")
plt.title("Graphe faible")
plt.show() # display
return GF, Gf | 5,329,280 |
def get_polygon_name(polygon):
"""Returns the name for a given polygon.
Since not all plygons store their name in the same field, we have to figure
out what type of polygon it is first, then reference the right field.
Args:
polygon: The polygon object to get the name from.
Returns:
The name for that polygon object.
"""
if isinstance(polygon, StatePolygon):
name = polygon.name
elif isinstance(polygon, CountyPolygon):
if polygon.geo_code < 10000000:
name = polygon.name[:-5]
else:
name = polygon.name + ' County'
elif isinstance(polygon, PumaPolygon):
name = polygon.puma_name[:-5]
return name | 5,329,281 |
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return redirect("/login")
# Ensure password was submitted
elif not request.form.get("password"):
return redirect("/login")
# Query database for username
username = request.form.get("username")
rows = list(db.execute(f"SELECT * FROM users WHERE name = '{username}'"))
# Ensure username exists and password is correct
pass_string = request.form.get("password")
if len(rows) != 1 or not check_password_hash(rows[0][2], pass_string):
return redirect("/login")
# Remember which user has logged in
session["user_id"] = rows[0][0]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html") | 5,329,282 |
def new(name):
"""
Create blueprint.
"""
g_blueprint(name) | 5,329,283 |
def read_option(file_path, section, option, fallback=None):
"""
Parse config file and read out the value of a certain option.
"""
try:
# For details see the notice in the header
from . import paval as pv
pv.path(file_path, "config", True, True)
pv.string(section, "section string")
pv.string(option, "option string")
except NameError:
pass
c = configparser.RawConfigParser()
c.read(file_path)
value = ""
try:
value = c.get(section, option)
except configparser.NoSectionError:
if fallback:
return str(fallback)
else:
raise Exception("This section does not exist in the given " \
"config file.")
except configparser.NoOptionError:
if fallback:
return str(fallback)
else:
raise Exception("This option does not exist in the given " \
"section.")
return str(value) | 5,329,284 |
def phietrack(df: pd.DataFrame,
phi: list =None,
lims: list = None,
phi_range :list = [0,0.35],
dtick: bool =False,
ax=None,
fontsize=8,
correlation: pd.DataFrame = None,
grid_numbers : list = [11,51],
steps: list = None,
legend:bool = True,
colormap: str='Dark2',
corr_kw={},
phi_kw:list = [],
depth_ref:str='md'):
"""phietrack [summary]
Parameters
----------
df : pd.DataFrame
[description]
phi : list, optional
[description], by default None
lims : list, optional
[description], by default None
phi_range : list, optional
[description], by default [0,0.35]
dtick : bool, optional
[description], by default False
ax : [type], optional
[description], by default None
fontsize : int, optional
[description], by default 8
correlation : pd.DataFrame, optional
[description], by default None
grid_numbers : list, optional
[description], by default [11,51]
steps : list, optional
[description], by default None
legend : bool, optional
[description], by default True
colormap : str, optional
[description], by default 'Dark2'
corr_kw : dict, optional
[description], by default {}
phi_kw : list, optional
[description], by default []
depth_ref : str, optional
[description], by default 'md'
"""
#get number of curves to build the colormap
n_curves = len(phi)
cmap = mpl.cm.get_cmap(colormap,n_curves)
pax=ax or plt.gca()
defkwa = {
'linestyle':'-',
'linewidth': 1
}
def_corr_kw = {
'color': 'red',
'linestyle':'--',
'linewidth': 2
}
for (k,v) in def_corr_kw.items():
if k not in corr_kw:
corr_kw[k]=v
depth = df.index if depth_ref=='md' else df[depth_ref]
#Plot main Lines
if phi is not None:
for i,r in enumerate(phi):
if len(phi_kw)<i+1:
phi_kw.append(defkwa)
phi_kw[i]['color']=cmap(i)
for (k,v) in defkwa.items():
if k not in phi_kw[i]:
phi_kw[i][k]=v
pax.plot(df[r],depth,label=r,**phi_kw[i])
if lims==None: #Depth Limits
lims=[depth.min(),depth.max()]
pax.set_ylim([lims[1],lims[0]])
#Set the vertical grid spacing
if steps is None:
mayor_grid = np.linspace(lims[0],lims[1],grid_numbers[0])
minor_grid = np.linspace(lims[0],lims[1],grid_numbers[1])
else:
mayor_grid = np.arange(lims[0],lims[1],steps[0])
minor_grid = np.arange(lims[0],lims[1],steps[1])
pax.set_xlim(phi_range)
pax.set_xlabel("phie")
pax.set_xticks(np.linspace(phi_range[0],phi_range[1],4))
pax.set_xticklabels(np.round(np.linspace(phi_range[0],phi_range[1],4),decimals=2))
pax.xaxis.tick_top()
pax.xaxis.set_label_position("top")
pax.tick_params("both",labelsize=fontsize)
pax.set_yticks(minor_grid,minor=True)
pax.set_yticks(mayor_grid)
pax.grid(True,linewidth=1.0)
pax.grid(True,which='minor', linewidth=0.5)
if dtick==True:
pax.set_yticklabels(mayor_grid)
else:
pax.set_yticklabels([])
#Add Correlation Line
if correlation is not None:
cor_ann = corr_kw.pop('ann',False)
for i in correlation.iterrows():
pax.hlines(i[1]['depth'],0,1, **corr_kw)
if cor_ann:
try:
pax.annotate(f"{i[1]['depth']} - {i[1]['comment']} ",xy=(0.35-0.05,i[1]['depth']-1),
xycoords='data',horizontalalignment='right',bbox={'boxstyle':'roundtooth', 'fc':'0.8'})
except:
pax.annotate(f"{i[1]['depth']}",xy=(1-0.3,i[1]['depth']-1),
xycoords='data',horizontalalignment='right',
bbox={'boxstyle':'roundtooth', 'fc':'0.8'})
if legend:
pax.legend() | 5,329,285 |
def D20_roll():
"""calls DX with dice_sides = 20"""
roll_result.set(DX(D20_rolls.get(), 20, D20_modifier_var.get())) | 5,329,286 |
def find_theme_file(theme_filename: pathlib.Path) -> pathlib.Path:
"""Find the real address of a theme file from the given one.
First check if the user has the file in his themes.
:param theme_file_path: The name of the file to look for.
:return: A file path that exists with correct theme.
"""
# Tries in the themes file from appdata
data_theme_file_path = os.path.join(THEMES_DIR, theme_filename)
if os.path.isfile(data_theme_file_path):
return data_theme_file_path
else:
# Tries the default path
return os.path.join(DEFAULT_THEMES_DIR, "default.json") | 5,329,287 |
def msg_to_json(msg: Msg) -> json.Data:
"""Convert message to json serializable data"""
return {'facility': msg.facility.name,
'severity': msg.severity.name,
'version': msg.version,
'timestamp': msg.timestamp,
'hostname': msg.hostname,
'app_name': msg.app_name,
'procid': msg.procid,
'msgid': msg.msgid,
'data': msg.data,
'msg': msg.msg} | 5,329,288 |
def get_direct_dependencies(definitions_by_node: Definitions, node: Node) -> Nodes:
"""Get direct dependencies of a node"""
dependencies = set([node])
def traverse_definition(definition: Definition):
"""Traverses a definition and adds them to the dependencies"""
for dependency in definition['nodes']:
if dependency not in dependencies:
dependencies.add(dependency)
for children_definition in definition['children_definitions']:
traverse_definition(children_definition)
traverse_definition(definitions_by_node[node])
dependencies.discard(node)
return dependencies | 5,329,289 |
def get_timeseries_metadata(request, file_type_id, series_id, resource_mode):
"""
Gets metadata html for the aggregation type (logical file type)
:param request:
:param file_type_id: id of the aggregation (logical file) object for which metadata in html
format is needed
:param series_id: if of the time series for which metadata to be displayed
:param resource_mode: a value of either edit or view. In resource edit mode metadata html
form elements are returned. In view mode normal html for display of metadata is returned
:return: json data containing html string
"""
if resource_mode != "edit" and resource_mode != 'view':
err_msg = "Invalid metadata type request."
ajax_response_data = {'status': 'error', 'message': err_msg}
return JsonResponse(ajax_response_data, status=status.HTTP_400_BAD_REQUEST)
logical_file, json_response = _get_logical_file("TimeSeriesLogicalFile", file_type_id)
if json_response is not None:
return json_response
series_ids = logical_file.metadata.series_ids_with_labels
if series_id not in series_ids.keys():
# this will happen only in case of CSV file upload when data is written
# first time to the blank sqlite file as the series ids get changed to
# uuids
series_id = series_ids.keys()[0]
try:
if resource_mode == 'view':
metadata = logical_file.metadata.get_html(series_id=series_id)
else:
metadata = logical_file.metadata.get_html_forms(series_id=series_id)
ajax_response_data = {'status': 'success', 'metadata': metadata}
except Exception as ex:
ajax_response_data = {'status': 'error', 'message': ex.message}
return JsonResponse(ajax_response_data, status=status.HTTP_200_OK) | 5,329,290 |
def standardize(tag):
"""Put an order-numbering ID3 tag into our standard form.
This function does nothing when applied to a non-order-numbering tag.
Args:
tag: A mutagen ID3 tag, which is modified in-place.
Returns:
A 2-tuple with the decoded version of the order string.
raises:
BadOrderError: if the tag is obviously bad.
"""
if not _is_order_tag(tag):
return
tag.text[0] = standardize_str(tag.text[0])
return decode(tag.text[0]) | 5,329,291 |
def test_composite_unit_get_format_name():
"""See #1576"""
unit1 = u.Unit('nrad/s')
unit2 = u.Unit('Hz(1/2)')
assert (str(u.CompositeUnit(1, [unit1, unit2], [1, -1])) ==
'nrad / (Hz(1/2) s)') | 5,329,292 |
def pop_layer_safe(lv: dict, pop_key: str):
"""Pops a child from a layer 'safely' in that it maintains the node's size
Args:
lv (dict): The node to pop a child from
pop_key (str): The key to the child that should be popped
"""
lv.pop(pop_key)
lv['size'] -= 1 | 5,329,293 |
def _count_objects(osm_pbf):
"""Count objects of each type in an .osm.pbf file."""
p = run(["osmium", "fileinfo", "-e", osm_pbf], stdout=PIPE, stderr=DEVNULL)
fileinfo = p.stdout.decode()
n_objects = {"nodes": 0, "ways": 0, "relations": 0}
for line in fileinfo.split("\n"):
for obj in n_objects:
if f"Number of {obj}" in line:
n_objects[obj] = int(line.split(":")[-1])
return n_objects | 5,329,294 |
def combine_multi_uncertainty(unc_lst):
"""Combines Uncertainty Values From More Than Two Sources"""
ur = 0
for i in range(len(unc_lst)):
ur += unc_lst[i] ** 2
ur = np.sqrt(float(ur))
return ur | 5,329,295 |
def export_performance_df(
dataframe: pd.DataFrame, rule_name: str = None, second_df: pd.DataFrame = None, relationship: str = None
) -> pd.DataFrame:
"""
Function used to calculate portfolio performance for data after calculating a trading signal/rule and relationship.
"""
if rule_name is not None:
if rule_name in algorithm_functions["infertrade"]["allocation"].keys():
used_calculation = calculate_infertrade_allocation
elif rule_name in algorithm_functions["ta"]["signal"].keys():
used_calculation = calculate_ta_signal
elif rule_name in algorithm_functions["ta"]["allocation"].keys():
used_calculation = calculate_ta_allocation
elif rule_name in algorithm_functions["infertrade"]["signal"].keys():
used_calculation = calculate_infertrade_signal
elif rule_name in ta_export_regression_allocations.keys():
used_calculation = calculate_ta_regression_allocation
elif rule_name not in algorithm_functions:
raise ValueError("Algorithm not found")
df_with_performance = used_calculation(dataframe=dataframe, rule_name=rule_name)
else:
df_with_performance = dataframe
if relationship is not None:
if second_df is not None:
if rule_name is not None:
second_df_with_performance = used_calculation(dataframe=second_df, rule_name=rule_name)
else:
second_df_with_performance = second_df
second_df_with_relationship = calculate_infertrade_allocation(
dataframe=second_df_with_performance, rule_name=relationship
)
df_with_relationship = calculate_infertrade_allocation(
dataframe=df_with_performance, rule_name=relationship
)
complete_relationship = df_with_relationship.append(second_df_with_relationship, ignore_index=False)
return complete_relationship
else:
df_with_relationship = calculate_infertrade_allocation(
dataframe=df_with_performance, rule_name=relationship
)
return df_with_relationship
else:
return df_with_performance | 5,329,296 |
def attention(x, scope, n_head, n_timesteps):
"""
perform multi-head qkv dot-product attention and linear project result
"""
n_state = x.shape[-1].value
with tf.variable_scope(scope):
queries = conv1d(x, 'q', n_state)
keys = conv1d(x, 'k', n_state)
values = conv1d(x, 'v', n_state)
# note that split/merge heads is fused into attention ops (no resahpe/transpose needed)
bst = get_blocksparse_attention_ops(n_timesteps, n_head)
attention_energies = bst.query_key_op(queries, keys)
attention_weights = bst.masked_softmax(attention_energies, scale=tf.rsqrt(n_state / n_head))
weighted_values = bst.weight_value_op(attention_weights, values)
result = conv1d(weighted_values, 'proj', n_state)
return result | 5,329,297 |
def std_ver_minor_inst_valid_possible(std_ver_minor_uninst_valid_possible): # pylint: disable=redefined-outer-name
"""Return an instantiated IATI Version Number."""
return iati.Version(std_ver_minor_uninst_valid_possible) | 5,329,298 |
def sequence_loss_by_example(logits, targets, weights,
average_across_timesteps=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits (per example).
Args:
logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: List of 1D batch-sized int32 Tensors of the same length as logits.
weights: List of 1D batch-sized float-Tensors of the same length as logits.
average_across_timesteps: If set, divide the returned cost by the total
label weight.
softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: Optional name for this operation, default: "sequence_loss_by_example".
Returns:
1D batch-sized float Tensor: The log-perplexity for each sequence.
Raises:
ValueError: If len(logits) is different from len(targets) or len(weights).
"""
if len(targets) != len(logits) or len(weights) != len(logits):
raise ValueError("Lengths of logits, weights, and targets must be the same "
"%d, %d, %d." % (len(logits), len(weights), len(targets)))
with tf.name_scope( name,
"sequence_loss_by_example",logits + targets + weights):
log_perp_list = []
for logit, target, weight in zip(logits, targets, weights):
if softmax_loss_function is None:
target = tf.reshape(target, [-1])
crossent = tf.sparse_softmax_cross_entropy_with_logits(
logit, target)
else:
crossent = softmax_loss_function(logit, target)
log_perp_list.append(crossent * weight)
log_perps = tf.add_n(log_perp_list)
if average_across_timesteps:
total_size = tf.add_n(weights)
total_size += 1e-12 # Just to avoid division by 0 for all-0 weights.
log_perps /= total_size
return log_perps | 5,329,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.