content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import sys
def query_yes_no(question, default="no"):
"""
Ask a yes/no question via raw_input() and return their answer.
:param str question: a string that is presented to the user.
:param str default: the presumed answer if the user just hits <Enter>.
:return bool: True for "yes" or False for "no"
"""
def parse(ans):
return {"yes": True, "y": True, "ye": True, "no": False, "n": False}[
ans.lower()
]
try:
prompt = {None: "[y/n]", "yes": "[Y/n]", "no": "[y/N]"}[
None if default is None else default.lower()
]
except (AttributeError, KeyError):
raise ValueError("invalid default answer: {}".format(default))
msg = "{q} {p} ".format(q=question, p=prompt)
while True:
sys.stdout.write(msg)
try:
return parse(_read_from_user() or default)
except KeyError:
sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
|
36f95aa7a52fbcb17dc351d86fda01a06dd8504e
| 3,642,800
|
def skeda_from_skedadict(line_dict, filing_number, line_sequence, is_amended):
"""
We can either pass the header row in or not; if not, look it up.
"""
line_dict['transaction_id'] = line_dict['transaction_id'][:20]
line_dict['line_sequence'] = line_sequence
line_dict['superseded_by_amendment'] = is_amended
line_dict['filing_number'] = filing_number
if line_dict['contribution_date']:
try:
line_dict['contribution_date_formatted'] = parser.parse(line_dict['contribution_date'])
except ValueError:
pass
return line_dict
|
2e07efa96f93ef777185e48bb07787774d4e5180
| 3,642,801
|
from datetime import datetime
def oracle_to_date(string2convert, fmt, nlsparam=None):
"""
https://docs.oracle.com/cd/B19306_01/server.102/b14200/functions183.htm
TO_DATE(char [, fmt [, 'nlsparam' ] ])
TO_DATE converts char of CHAR, VARCHAR2, NCHAR, or NVARCHAR2 datatype to a value of DATE datatype.
The fmt is a datetime model format specifying the format of char. If you omit fmt, then char must be in the default date format.
If fmt is J, for Julian, then char must be an integer.
On SQLite date are in iso-8601 format: 'YYYY-MM-DD HH:MM:SS'
Also, the supported format is the C standard (1989 version)
The Function is cached for performance reason
"""
dobj = datetime.datetime.strptime(string2convert, fmt)
# Return a nice Sqlite date string
return dobj.isoformat(sep=" ", timespec="seconds")
|
eeaee6289d43bd446fbf27ce25ed87555a116ae4
| 3,642,802
|
import re
def replace_whitespace(s, rep=' '):
"""Replace any length white spaces in the given string with a replacement.
Parameters
----------
s : str
The string in which any length whitespaces should be replaced.
rep : Optional[str]
The string with which all whitespace should be replaced. By default,
the plain ASCII space ( ) is used.
Returns
-------
str
The string in which whitespaces have been replaced.
"""
s = re.sub(r'\s+', rep, s)
return s
|
b583a627dda830275822f6276af33b58afb55f1e
| 3,642,803
|
from sys import prefix
def add_s3_prefix(s3_bucket):
"""
Ensure a bucket has the s3:// prefix
:param s3_bucket: string - The bucket name
"""
s3_prefix = 's3://'
return prefix(s3_bucket, s3_prefix)
|
6c6c7738b2a7d8972ae01f06c9d5cfb5a4d53502
| 3,642,804
|
import aiohttp
async def handle_xml_response(request):
""" Faking response """
response = load_data("equipment_data.xml")
return aiohttp.web.Response(
content_type="text/xml",
body=response
)
|
d56526414469424483fc8461c29f3b9c9963e698
| 3,642,805
|
import this
def plugin_prefs(parent, cmdr, is_beta):
"""
Return a TK Frame for adding to the EDMC settings dialog.
"""
global listbox
frame = nb.Frame(parent)
nb.Label(frame, text="Faction Name:").grid(row=0,column=0)
nb.Label(frame, text="System Name").grid(row=0,column=1)
faction_entry = nb.Entry(frame,width=35)
faction_entry.grid(row=2,column=0)
faction_listbox = tk.Listbox(frame,width=35)
faction_listbox.grid(row=3,column=0)
this.faction_el = entry_lookup.EntryLookup(faction_entry,faction_listbox, db_connection.get_faction_names(),this.faction_name.get())
system_entry = nb.Entry(frame,width=35)
system_entry.grid(row=2,column=1)
system_listbox = tk.Listbox(frame,width=35)
system_listbox.grid(row=3,column=1)
this.system_el = entry_lookup.EntryLookup(system_entry,system_listbox, db_connection.get_system_names(),this.system_name.get())
b = nb.Button(frame, text="Scrape history", command=scrape_history)
b.grid(row=4, column=1)
nb.Label(frame,text="Warning, this will take a while. Shut down ED before running").grid(row=4,column=0)
return frame
|
25df93343750cdac60604e6f5f91f84b3d105a12
| 3,642,806
|
from typing import Concatenate
def Conv1D_positive_r(x, kernel_size):
"""index of r is hard-coded to 2!"""
out1 = Conv1D(1, kernel_size=kernel_size, padding='valid', activation='linear')(x)
out2 = Conv1D(1, kernel_size=kernel_size, padding='valid', activation='linear')(x)
out3 = Conv1D(1, kernel_size=kernel_size, padding='valid', activation='relu')(x)
return Concatenate()([out1, out2, out3])
|
b66db8e65007d6044ad711b4ac9e7e9f967ecd91
| 3,642,807
|
def remove_innermost_template_usage(raw_code: str) -> str:
"""
If the code does not include templates, should return the exact same code
FIXME: check if any task is templated
"""
_temp_code = raw_code
template_types = get_all_template_types(raw_code)
_temp_code = replace_template_type(_temp_code, template_types)
_temp_code = add_fake_template_types_def(_temp_code, template_types)
_temp_code = replace_template_types(_temp_code, template_types)
return _temp_code
|
92aa5164277064045e3436588f87067ecf626f07
| 3,642,808
|
def decrypt(text, key):
"""Decrypt the supplied text and return the result.
Args:
text (str): The text to decrypt.
key (str): The key with which to perform the decryption.
"""
return transform(text, key, True)
|
bb7fb87622a38c3eba9156d9a8678357e40adcb3
| 3,642,809
|
def psi_gauss_1d(x, a: float = 1.0, x_0: float = 0.0, k_0: float = 0.0):
"""
Gaussian wave packet of width a and momentum k_0, centered at x_0
:param x: mathematical variable
:param a: Amplitude of pulse
:param x_0: Mean spatial x of pulse
:param k_0: Group velocity of pulse
"""
return ((a * np.sqrt(np.pi)) ** (-0.5)
* np.exp(-0.5 * ((x - x_0) * 1. / a) ** 2 + 1j * x * k_0))
|
278ffa7f15fd8c52346f5b232a89d40ee48c8843
| 3,642,810
|
def get(address, limit=LIMIT):
"""
Recursively dereferences an address.
Returns:
A list containing ``address``, followed by up to ``limit`` valid pointers.
"""
result = []
for i in range(limit):
# Don't follow cycles, except to stop at the second occurrence.
if result.count(address) >= 2:
break
result.append(address)
try:
address = int(pwndbg.memory.poi(pwndbg.typeinfo.ppvoid, address))
except gdb.MemoryError:
break
return result
|
1a3b7122ede440ddee773d7e260430517181909d
| 3,642,811
|
import unittest
def elemwise_checker(op, expected_f, gap=None, test_dtypes=None,
grad_test=True, name=None, gap_grad=None):
"""Return the appropriate test class for the elemwise on sparse.
:param op: Op to test.
:expected_f: Function use to compare. This function must act
on dense matrix. If the op is structured
see the `structure_function` decorator to make
this function structured.
:param gap: Tuple for the range of the random sample. When
length is 1, it is assumed to be the exclusive
max, when `gap` = (`a`, `b`) it provide a sample
from [a, b[. If `None` is used, it provide [0, 1]
for float dtypes and [0, 50[ for integer dtypes.
:param test_dtypes: Particular dtypes for testing the op.
If `None`, this is set to the most common
dtypes.
:param grad_test: True for testing the grad. False will
skip this test.
:param gap_grad: If None, we reuse gap. Otherwise it is the same as gap
but for testing the gradiant of the op.
:return: The class that perform the tests, not an instance
of the class.
"""
if test_dtypes is None:
test_dtypes = sparse.all_dtypes
class Tester(unittest.TestCase):
def setUp(self):
super(Tester, self).setUp()
self.op = op
self.expected_f = expected_f
self.gap = gap
if gap_grad is not None:
self.gap_grad = gap_grad
else:
self.gap_grad = gap
# Ensure the test's name is correct.
utt.seed_rng()
assert eval(self.__class__.__name__) is self.__class__
def test_op(self):
for format in sparse.sparse_formats:
for dtype in test_dtypes:
if dtype == 'int8' or dtype == 'uint8':
continue
# When testing with unsigned integers,
# we must check if the gap contains
# negative numbers.
if dtype.startswith('uint'):
if self.gap and len(self.gap) == 2 and self.gap[0] < 0:
if self.gap[1] >= 1:
self.gap = (0, self.gap[1])
else:
raise TypeError('Gap not suitable for',
dtype, self.__name__)
variable, data = sparse_random_inputs(
format,
shape=(4, 7),
out_dtype=dtype,
gap=self.gap)
f = theano.function(variable, self.op(*variable))
tested = f(*data)
data = [m.toarray() for m in data]
expected = self.expected_f(*data)
assert tested.format == format
tested = tested.toarray()
try:
utt.assert_allclose(expected, tested)
except AssertionError:
raise AssertionError(self.__name__)
# Test with int8 as dtype
# These tests are not in the loop for two reasons.
# First, in recent version of numpy, when a numpy
# function have int8 as input dtype, it returns a
# float16 as output dtype. Since this does not provide
# enough precision, we upcast the data before we apply the
# function.
# Second, the tolerance for the checkup in DebugMode
# is too high.
for dtype in ['int8', 'uint8']:
if dtype in test_dtypes:
if self.gap:
domain = self.gap
# When testing with unsigned integers,
# we must check if the gap contains
# negative numbers.
if dtype == 'uint8':
if len(domain) == 2 and domain[0] < 0:
if domain[1] >= 1:
domain = (0, domain[1])
else:
raise TypeError('Gap not suitable for',
dtype, self.__name__)
else:
domain = (0, 5)
variable, data = sparse_random_inputs(
format,
shape=(4, 7),
out_dtype=dtype,
gap=domain)
f = theano.function(variable, self.op(*variable))
old_value = (tensor.basic.float32_atol,
tensor.basic.float32_rtol,
tensor.basic.float64_atol,
tensor.basic.float64_rtol)
tensor.basic.float32_atol = 1e-4
tensor.basic.float32_rtol = 1e-3
tensor.basic.float64_atol = 1e-3
tensor.basic.float64_rtol = 1e-4
try:
tested = f(*data)
finally:
(tensor.basic.float32_atol,
tensor.basic.float32_rtol,
tensor.basic.float64_atol,
tensor.basic.float64_rtol) = old_value
data = [m.toarray().astype('float32') for m in data]
expected = self.expected_f(*data)
assert tested.format == format
tested = tested.toarray()
try:
utt.assert_allclose(tested, expected, rtol=1e-2)
except AssertionError:
raise AssertionError(self.__name__)
if grad_test:
def test_grad(self):
for format in sparse.sparse_formats:
for dtype in sparse.float_dtypes:
variable, data = sparse_random_inputs(
format,
shape=(4, 7),
out_dtype=dtype,
gap=self.gap_grad)
verify_grad_sparse(self.op,
data,
structured=True)
# Set proper class name to uniquely identify tests.
# Note that it is important to run this code *outside* of the `Tester`
# class itself, otherwise it will not work properly for some reason.
if name is None:
name = op.__name__.capitalize() + 'Tester'
Tester.__name__ = name
assert 'Roundhalftoeven' not in Tester.__name__
return Tester
|
e2ed86b905c086bfd1ceda228b6d0e19ed99a444
| 3,642,812
|
import os
import numpy
def plot_segments(track_generator, get_figure=False, plot_3D=False):
"""Plot the characteristic track segments from an OpenMOC simulation.
This method requires that tracks have been generated by a TrackGenerator.
Each segment is colored by the ID of the unique FSR it is within.
Parameters
----------
track_generator : openmoc.TrackGenerator
A TrackGenerator with the track segments to plot
get_figure : bool
Whether or not to return the Matplotlib figure
Returns
-------
fig : matplotlib.Figure or None
The Matplotlib figure is returned if get_figure is True
Examples
--------
A user may invoke this function from an OpenMOC Python file as follows:
>>> openmoc.plotter.plot_segments(track_generator)
"""
cv.check_type('track_generator', track_generator, openmoc.TrackGenerator)
if not track_generator.containsTracks():
py_printf('ERROR', 'Unable to plot Track segments since the ' +
'TrackGenerator has not yet generated Tracks.')
global subdirectory, matplotlib_rcparams
directory = openmoc.get_output_directory() + subdirectory
# Ensure that normal settings are used even if called from ipython
curr_rc = matplotlib.rcParams.copy()
update_rc_param(curr_rc)
# Make directory if it does not exist
try:
os.makedirs(directory)
except OSError:
pass
py_printf('NORMAL', 'Plotting the track segments...')
# Retrieve data from TrackGenerator
vals_per_segment = openmoc.NUM_VALUES_PER_RETRIEVED_SEGMENT
num_azim = track_generator.getNumAzim()
spacing = track_generator.getDesiredAzimSpacing()
if plot_3D and isinstance(track_generator, openmoc.TrackGenerator3D):
num_polar = track_generator.getNumPolar()
z_spacing = track_generator.getDesiredZSpacing()
num_segments = int(track_generator.getNumSegments())
num_fsrs = int(track_generator.getGeometry().getNumTotalFSRs())
coords = \
track_generator.retrieveSegmentCoords(num_segments*vals_per_segment)
# Convert data to NumPy arrays
coords = np.array(coords)
x = np.zeros(num_segments*2)
y = np.zeros(num_segments*2)
z = np.zeros(num_segments*2)
fsrs = np.zeros(num_segments)
for i in range(num_segments):
fsrs[i] = coords[i*vals_per_segment]
x[i*2] = coords[i*vals_per_segment+1]
y[i*2] = coords[i*vals_per_segment+2]
z[i*2] = coords[i*vals_per_segment+3]
x[i*2+1] = coords[i*vals_per_segment+4]
y[i*2+1] = coords[i*vals_per_segment+5]
z[i*2+1] = coords[i*vals_per_segment+6]
# Create array of equally spaced randomized floats as a color map for plots
# Seed the NumPy random number generator to ensure reproducible color maps
numpy.random.seed(1)
color_map = np.linspace(0., 1., num_fsrs, endpoint=False)
numpy.random.shuffle(color_map)
# Make figure of line segments for each track
fig = plt.figure(constrained_layout=True)
fig.patch.set_facecolor('none')
# Create a color map corresponding to FSR IDs
if plot_3D:
ax = fig.gca(projection = '3d')
for i in range(num_segments):
cNorm = colors.Normalize(vmin=0, vmax=max(color_map))
scalarMap = cmx.ScalarMappable(norm=cNorm)
color = scalarMap.to_rgba(color_map[int(fsrs[i]) % num_fsrs])
plt.plot(x[i*2:(i+1)*2], y[i*2:(i+1)*2], z[i*2:(i+1)*2], c=color)
if z.min() != z.max():
ax.set_zlim(z.min(), z.max())
else:
for i in range(num_segments):
cNorm = colors.Normalize(vmin=0, vmax=max(color_map))
scalarMap = cmx.ScalarMappable(norm=cNorm)
color = scalarMap.to_rgba(color_map[int(fsrs[i]) % num_fsrs])
plt.plot(x[i*2:(i+1)*2], y[i*2:(i+1)*2], c=color)
plt.xlim([x.min(), x.max()])
plt.ylim([y.min(), y.max()])
suptitle = 'Segments ({0} angles, {1} cm spacing)'.format(num_azim,
spacing)
if plot_3D and isinstance(track_generator, openmoc.TrackGenerator3D):
suptitle = 'Segments ({0}/{1} azimuthal/polar angles\n and {2}/{3} cm '\
'azimuthal/axial spacings'.format(num_azim, num_polar, spacing,
z_spacing)
title = 'z = {0}'.format(z[0])
plt.suptitle(suptitle)
if not plot_3D:
plt.title(title)
# Restore settings if called from ipython
update_rc_param(curr_rc)
if track_generator.getGeometry().isRootDomain():
if get_figure:
return fig
else:
filename = 'segments-{0}-angles-{1}-spacing'.format(num_azim,
spacing)
filename = '{0}-z-{1}.png'.format(filename, z[0])
if plot_3D and isinstance(track_generator, openmoc.TrackGenerator3D):
filename = '3d-segments-{0}-azimuthal-{1}-polar-angles-{2}-'\
'azimuthal-{3}-z-spacing.png'.format(num_azim, num_polar,
spacing, z_spacing)
fig.savefig(directory+filename, bbox_inches='tight')
plt.close(fig)
|
d6e29c550b2c13d0287359974892867fcd3404ab
| 3,642,813
|
def find_availability_by_year(park, campground, year, months=range(1, 13)):
"""
Parameters
----------
park : str
campground : str
year : str
months : list
list of months as str or int. Default is `range(1, 13)`
Returns
-------
list
list of weekend availability at the given park's campground during the
given month and year
"""
yearly_availability = []
for month in months:
if isinstance(month, int):
month = str(month)
try:
monthly_availability = find_availability_by_month(park, campground, year, month)
yearly_availability.append(monthly_availability)
except:
break
# Flatten list
yearly_availability = [item for sublist in yearly_availability for item in sublist]
return yearly_availability
|
28e81b2382f2733d1cc024a221c11feaa5ae5653
| 3,642,814
|
def seconds(value=None, utc=True, **kwargs):
"""
Converts value to seconds. If value is timedelta or struc_time, it will be just converted to seconds.
If value is datetime instance it will be converted to milliseconds since epoch (UTC). If value is number,
it's assumed that it's in milliseconds, so it will be just divided by 1000. You can also provide named arguments,
same as for timedelta function.
"""
if isinstance(value, (int, float)):
return int(float(value) / 1000.0)
else:
return _convert_time(value, utc, **kwargs)
|
aced764fc038b316ca0b772254b6c6a44f333d9e
| 3,642,815
|
def fix_mocov2_state_dict(state_dict):
"""
Ref: https://bit.ly/3cDfGVA
"""
new_state_dict = {}
for k, v in state_dict.items():
if k.startswith("model.encoder_q."):
k = k.replace("model.encoder_q.", "")
new_state_dict[k] = v
return new_state_dict
|
13471d6863eb14eb3248f6d6e1d6b5882c341ed0
| 3,642,816
|
def get_perspective(image, contours, ratio):
"""
This function takes image and contours and returns perspective of this contours.
:param image: image, numpy array
:param contours: contours, numpy array
:param ratio: rescaling parameter to the original image
:return: warped image
"""
points = contours.reshape(4, 2)
points = points * ratio
rectangle = np.zeros(shape=(4, 2), dtype='float32')
total = points.sum(axis=1)
rectangle[0] = points[np.argmin(total)]
rectangle[2] = points[np.argmax(total)]
difference = np.diff(points, axis=1)
rectangle[1] = points[np.argmin(difference)]
rectangle[3] = points[np.argmax(difference)]
# rectangle *= ratio
(a, b, c, d) = rectangle
width1 = norm(c - d)
width2 = norm(b - a)
height1 = norm(b - c)
height2 = norm(a - d)
max_width = max(int(width1), int(width2))
max_height = max(int(height1), int(height2))
destination = np.array([[0, 0],
[max_width - 1, 0],
[max_width - 1, max_height - 1],
[0, max_height - 1]], dtype='float32')
M = cv2.getPerspectiveTransform(src=rectangle, dst=destination)
warped_image = cv2.warpPerspective(src=image, M=M, dsize=(max_width, max_height))
return warped_image
|
237db75baa8b72314e095f435075e75b8aa126b0
| 3,642,817
|
import bz2
import os
def processFilesOpen(filename, filetype='file', subname='', zptr=None,
**kwargs):
"""
Open a file for processing. If it is a compressed file, open for
decompression.
:param filetype: 'zip' if this is a zip archive.
:param filename: name of the file (if a zip archive, this is the archive).
:param subname: name within an archive.
:param zptr: a pointer to a zip archive if appropriate.
:returns: a file-like object and the display filename.
"""
if filetype == 'zip':
fptr = zptr.open(subname)
filename += ' - ' + subname
elif filename.lower().endswith('.bz2'):
fptr = bz2.BZ2File(filename)
filename = filename.rsplit('.', 1)[0]
elif (filename.lower().endswith('.gz') or
filename.lower().endswith('.gz.tmp')):
# fptr = gzip.open(filename)
# Using the command line utility lets a second core be used a little
fptr = os.popen('gunzip < %s' % filename)
filename = filename.rsplit('.', 1)[0]
else:
fptr = open(filename)
return fptr, filename
|
7a5cbab72ccc6b31530cb517fe672d2a49af4564
| 3,642,818
|
from pathlib import Path
def load_model_selector(folder_path):
"""Load information about stored model selection
Parameters
----------
folder_path : str
path where .model_selector_result files are stored
Returns
-------
ModelSelector
Information about model selection for each partition
"""
results = [
load_model_selector_result(path=r.parent, partition_hash=r.stem)
for r in Path(folder_path).glob("*.model_selector_result")
]
model_selector = ModelSelector(
horizon=results[0].horizon,
frequency=results[0].frequency,
country_code_column=results[0].country_code_column,
)
model_selector.results = results
return model_selector
|
1e977ca422c5004e510f4989f7778bd0ca95f4c0
| 3,642,819
|
def generate_expired_date():
"""Generate a datetime object NB_DAYS_BEFORE_DELETING_LIVE_RECORDINGS days in the past."""
return timezone.now() - timedelta(
days=settings.NB_DAYS_BEFORE_DELETING_LIVE_RECORDINGS
)
|
8d6fb9aae4cd5065416ccea4ba17d11080d8ccbc
| 3,642,820
|
from typing import Dict
def make_dummy_authentication_request_args() -> Dict[str, bytes]:
"""Creates a request to emulate a login request.
Returns:
Dict[str, bytes]: Authenticator dictionary
"""
def _make_dummy_authentication_request_args():
args = {
"username": ["foobar".encode()],
"password": ["mypassword".encode()],
"assignment_name": ["lab101".encode()],
"course_id": ["intro101".encode()],
"lms_user_id": ["abc123".encode()],
"user_role": ["Student".encode()],
}
return args
return _make_dummy_authentication_request_args
|
2e0919bac46a5140a72c02ee09c1ce3b1cb9269a
| 3,642,821
|
def add_experiment_images_to_image_info_csv(image_info_df, experiment_xml_file):
"""
Goes through the xml file of the experiment and adds the info of its images to the image info dataframe.
If the gene name is missing in the experiment, then this experiment is considered invalid.
:param image_info_df: the image info dataframe to append the new images
:param experiment_xml_file: the xml file of the experiment that we want to add its images
:return: the image info dataframe and also a boolean which determines whether this experiment is invalid.
"""
invalid = False
tree = et.parse(experiment_xml_file)
root = tree.getroot()
section_data_sets = root.find('section-data-sets')
section_data_set = section_data_sets.find('section-data-set')
experiment_id = section_data_set.find('id').text
specimen_id = section_data_set.find('specimen-id').text
section_images = section_data_set.find('section-images')
genes = section_data_set.find('genes')
specimen = section_data_set.find('specimen')
donor = specimen.find('donor')
structure = specimen.find('structure')
donor_id = donor.find('name').text
donor_sex = donor.find('sex').text
donor_age = donor.find('age-id').text
pmi = donor.find('pmi').text
donor_race = donor.find('race-only').text
smoker = donor.find('smoker').text
chemotherapy = donor.find('chemotherapy').text
radiation_therapy = donor.find('radiation-therapy').text
tumor_status = donor.find('tumor-status').text
conditions = donor.find('conditions')
condition = conditions.find('condition')
description = condition.find('description').text
region_name = structure.find('name').text
region_acronym = structure.find('acronym').text
tissue_ph = specimen.find('tissue-ph').text
gene = genes.find('gene')
if gene == None:
print ("experiment " + experiment_id + " is invalid")
invalid = True
else:
gene_symbol = gene.find('acronym').text
gene_alias_tags = gene.find('alias-tags').text
entrez_id = gene.find('entrez-id').text
gene_original_name = gene.find('original-name').text
gene_original_symbol = gene.find('original-symbol').text
all_section_images = section_images.findall('section-image')
image_id_list = []
for item in all_section_images:
image_id_list.append(item.find('id').text)
for image_id in image_id_list:
new_row = pd.Series({'image_id': image_id, 'gene_symbol': gene_symbol, 'entrez_id': entrez_id,
'alias_tags': gene_alias_tags, 'original_name': gene_original_name,
'original_symbol': gene_original_symbol, 'experiment_id':experiment_id,'specimen_id': specimen_id,
'description': description, 'donor_id': donor_id, 'donor_sex': donor_sex,
'donor_age':donor_age, 'donor_race':donor_race,
'smoker' : smoker, 'chemotherapy': chemotherapy, 'radiation_therapy': radiation_therapy,
'tumor_status' : tumor_status,
'region':region_name, 'region_acronym': region_acronym,
'tissue_ph': tissue_ph, 'pmi': pmi })
image_info_df = image_info_df.append(new_row, ignore_index=True)
return image_info_df, invalid
|
99b545cba5aeb53f9ba2af2a1a5bf3acb72c6fa7
| 3,642,822
|
from typing import Iterable
from re import T
from typing import Optional
from typing import Callable
from re import U
from typing import Iterator
def dedup(iterable: Iterable[T], key: Optional[Callable[[T], U]] = None) -> Iterator[T]:
"""
List unique elements.
>>> tuple(dedup([5, 4, 3, 5, 3, 3]))
(3, 4, 5)
"""
return uniq(sorted(iterable, key=key), key)
|
8334d08f926584b1c976c24bde180930124b78ba
| 3,642,823
|
def get_product(barcode):
"""
Return information of a given product.
"""
return utils.fetch('api/v0/product/%s' % barcode)
|
2cc298cf640b4aa742c51b5d076f0021660fe0d5
| 3,642,824
|
def knn_matcher(arr2, arr1, neighbours=2, img_id=0, ratio_threshold=0.75):
"""Computes the inlier matches for given descriptor ararys arr1 and arr2
Arguments:
arr2 {np.ndarray} -- Image used for finding the matches (train image)
arr1 {[type]} -- Image in which matches are found (test image)
Keyword Arguments:
neighbours {int} -- Number of neighbours to consider while matching.
Should be 2 (default: {2})
img_id {int} -- Id of the train image (default: {0})
ratio_threshold {float} -- Ratio threshold for the ratio test
(default: {0.75}). If 0 or None, the mathes are not filtered.
Returns:
list(matches) -- List of cv2.DMatch objects
"""
assert neighbours == 2
# Compute L2 distance for all the descriptors arr1 and arr2
all_distances = np.sqrt(np.square(arr2).sum(
axis=1)[:, np.newaxis] + np.square(arr1).sum(axis=1) - 2 * arr2.dot(arr1.T))
# Take top K closest neighbours for each descriptor
closest_indices = np.argsort(all_distances, axis=1)[:, :neighbours]
# Create a list of "K" match pairs
matches = []
for i in range(closest_indices.shape[0]):
match_list = [cv2.DMatch(
_trainIdx=n, _queryIdx=i, _distance=all_distances[i, n], _imgIdx=img_id) for n in closest_indices[i]]
matches.append(match_list)
# Perform ratio test to get inliers
if ratio_threshold:
matches = filter_matches(matches, ratio_threshold)
return matches
|
6397938b3624e1f32426b429f809e60e6bb72b49
| 3,642,825
|
from typing import Optional
def get_virtual_network_gateway_bgp_peer_status(peer: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult:
"""
Response for list BGP peer status API service call.
:param str peer: The IP address of the peer to retrieve the status of.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['peer'] = peer
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200601:getVirtualNetworkGatewayBgpPeerStatus', __args__, opts=opts, typ=GetVirtualNetworkGatewayBgpPeerStatusResult).value
return AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult(
value=__ret__.value)
|
ade144cd6ce8c6827c0631a5c795d4ef2fbcaf7f
| 3,642,826
|
import re
import logging
import sys
def check_wrs2_tiles(wrs2_tile_list=[], path_list=[], row_list=[]):
"""Setup path/row lists
Populate the separate path and row lists from wrs2_tile_list
Filtering by path and row lists separately seems to be faster than
creating a new path/row field and filtering directly
"""
wrs2_tile_fmt = 'p{:03d}r{:03d}'
wrs2_tile_re = re.compile('p(?P<PATH>\d{1,3})r(?P<ROW>\d{1,3})')
# Force path/row list to zero padded three digit numbers
if wrs2_tile_list:
wrs2_tile_list = sorted([
wrs2_tile_fmt.format(int(m.group('PATH')), int(m.group('ROW')))
for pr in wrs2_tile_list
for m in [wrs2_tile_re.match(pr)] if m])
# If path_list and row_list were specified, force to integer type
# Declare variable as an empty list if it does not exist
try:
path_list = list(sorted(map(int, path_list)))
except ValueError:
logging.error(
'\nERROR: The path list could not be converted to integers, '
'exiting\n {}'.format(path_list))
sys.exit()
try:
row_list = list(sorted(map(int, row_list)))
except ValueError:
logging.error(
'\nERROR: The row list could not be converted to integers, '
'exiting\n {}'.format(row_list))
sys.exit()
# Convert wrs2_tile_list to path_list and row_list if not set
# Pre-filtering on path and row separately is faster than building wrs2_tile
# This is a pretty messy way of doing this...
if wrs2_tile_list and not path_list:
path_list = sorted(list(set([
int(wrs2_tile_re.match(pr).group('PATH'))
for pr in wrs2_tile_list if wrs2_tile_re.match(pr)])))
if wrs2_tile_list and not row_list:
row_list = sorted(list(set([
int(wrs2_tile_re.match(pr).group('ROW'))
for pr in wrs2_tile_list if wrs2_tile_re.match(pr)])))
if path_list:
logging.debug(' Paths: {}'.format(
' '.join(list(map(str, path_list)))))
if row_list:
logging.debug(' Rows: {}'.format(' '.join(list(map(str, row_list)))))
if wrs2_tile_list:
logging.debug(' WRS2 Tiles: {}'.format(
' '.join(list(map(str, wrs2_tile_list)))))
return wrs2_tile_list, path_list, row_list
|
3876cb4e96a5f82f919c770798fe9393c2655877
| 3,642,827
|
import pathlib
def cat(file_path: str) -> str:
"""pathlib.Path().read_textのshortcut
Args:
file_path (str): filepath
Returns:
str: file内の文字列
Example:
>>> cat('unknown.txt')
"""
file_path = pathlib.Path(file_path)
if file_path.is_file():
return file_path.read_text()
return None
|
17eef15686a97e62380d077d678f2993e02e6d5c
| 3,642,828
|
def _get_role_by_name(role_name):
"""
Get application membership role
Args:
role_name (str): role name.
Returns:
int: application membership role id.
"""
base_request = BaseRequest()
settings = Settings()
params = {
'filter': 'name',
'eq': role_name
}
roles = base_request.request(
'application_membership_role', 'GET', params=params,
endpoint=settings.get('pine_endpoint')
)['d']
if not roles:
raise exceptions.BalenaApplicationMembershipRoleNotFound(role_name=role_name)
else:
return roles[0]['id']
|
0599f6c9571345318be71b9f453f89d1439c64fa
| 3,642,829
|
def parse_filename(filename, is_adversarial=False, **kwargs):
"""Parse the filename of the experment result file into a dictionary of settings.
Args:
filename: a string of filename
is_adversarial: whether the file is from experiments/GIB_node_adversarial_attack.
"""
if is_adversarial:
return parse_filename_adversarial(filename, **kwargs)
else:
return parse_filename_standard(filename)
|
1972de5803a8eb0ff50438adbe0adee1597199a9
| 3,642,830
|
def WHo_mt(dist, sigma):
"""
Speed Accuracy model for generating finger movement time.
:param dist: euclidian distance between points.
:param sigma: speed-accuracy trade-off variance.
:return: mt: movement time.
"""
x0 = 0.092
y0 = 0.0018
alpha = 0.6
x_min = 0.006
x_max = 0.06
k_alpha = 0.12
if dist == 0:
dist = 0.0000001
mt = pow((k_alpha * pow(((sigma - y0) / dist), (alpha - 1))), 1 / alpha) + x0
return mt
|
36d8b7e913df658b52f1f03617d0b9817091d0ef
| 3,642,831
|
def find_next_sibling_position(element, tag_type):
"""
Gets current elements next sibling's (chosen by provided tag_type) actual character position in html document
:param element: Whose sibling to look for, type: An object of class bs4.Tag
:param tag_type: sibling tag's type (e.g. p, h2, div, span etc. ), type: A string
:return: An Integer specifying character pos. in html, infinite when no sibling is found
"""
nxt_sib = element.find_next_sibling(tag_type)
return float("inf") if nxt_sib is None else nxt_sib.sourcepos
|
9b912fd9b7d30e81d6b4c2fec0e0573017b51a83
| 3,642,832
|
import subprocess
def CheckOutput(cmd, **kwargs):
"""Call subprocess.check_output to get output.
The subprocess.check_output return type is "bytes" in python 3, we have
to convert bytes as string with .decode() in advance.
Args:
cmd: String of command.
**kwargs: dictionary of keyword based args to pass to func.
Return:
String to command output.
"""
return subprocess.check_output(cmd, **kwargs).decode()
|
b4eb9ac552124c56f76c0c684c2d515558307aa4
| 3,642,833
|
def one_hot(arr, n_class=0):
"""Change labels to one-hot expression.
Args:
arr [np.array]: numpy array
n_class [int]: number of class
Returns:
oh [np.array]: numpy array with one-hot expression
"""
if arr is None:
return None
if isinstance(arr, list) or isinstance(arr, np.ndarray):
arr = np.array(arr)
ishape = arr.shape
arr = arr.flatten()
n_class = arr.max() + 1 if n_class == 0 else n_class
assert n_class >= arr.max() + 1, ValueError("Value of 'n_class' is too small.")
oh = np.zeros((arr.size, n_class), dtype=int)
oh[np.arange(arr.size), arr] = 1
oh = np.reshape(oh, (*ishape, -1))
return oh
|
ba22f7f1f7d97d5d3989eff69c42bdce2ca34e87
| 3,642,834
|
def boost_nfw_at_R(R, B0, R_scale):
"""NFW boost factor model.
Args:
R (float or array like): Distances on the sky in the same units as R_scale. Mpc/h comoving suggested for consistency with other modules.
B0 (float): NFW profile amplitude.
R_scale (float): NFW profile scale radius.
Returns:
float or array like: NFW boost factor profile; B = (1-fcl)^-1.
"""
R = _ArrayWrapper(R, 'R')
boost = _ArrayWrapper.zeros_like(R)
cluster_toolkit._lib.boost_nfw_at_R_arr(R.cast(), len(R), B0, R_scale,
boost.cast())
return boost.finish()
|
a7e13f5309fa663b41c5eec1c8518f444ba86b5f
| 3,642,835
|
def get_swatches(root):
"""Get swatch elements in the SVG"""
swatches = {}
for node in descendants(root):
if "hasAttribute" not in dir(node) or not node.hasAttribute("id"):
continue
classname = extract_class_name(node.getAttribute("id"))
if classname:
swatches[classname] = node
return swatches
|
2d9cd4ca2ff034d4200b242eaa5592311c250155
| 3,642,836
|
def chunks(l, n):
"""
Split list in chunks - useful for controlling memory usage
"""
if n < 1:
n = 1
return [l[i:i + n] for i in range(0, len(l), n)]
|
d878aeb50bd42c9f5a2060f4bb2747aecb1a3b58
| 3,642,837
|
def UserLevelAuthEntry(val=None):
"""Provide a 2-tuple of user and level
* user: string
* level: oneof(ACCESS_LEVELS)
currently: GUEST, USER, ADMIN
"""
if len(val) != 2:
raise ValueError('UserLevelAuthEntry entry needs to be a 2-tuple '
'(name, accesslevel)')
# pylint: disable=unbalanced-tuple-unpacking
user, _p, level = UserPassLevelAuthEntry((val[0], '', val[1]))
return tuple((user, level))
|
e26c723a55d215c71d46d2e45e30b3a39d78723d
| 3,642,838
|
import tokenize
def parseCookie(headers):
"""Bleargh, the cookie spec sucks.
This surely needs interoperability testing.
There are two specs that are supported:
Version 0) http://wp.netscape.com/newsref/std/cookie_spec.html
Version 1) http://www.faqs.org/rfcs/rfc2965.html
"""
cookies = []
# There can't really be multiple cookie headers according to RFC, because
# if multiple headers are allowed, they must be joinable with ",".
# Neither new RFC2965 cookies nor old netscape cookies are.
header = ';'.join(headers)
if header[0:8].lower() == "$version":
# RFC2965 cookie
h = tokenize([header], foldCase=False)
r_cookies = split(h, Token(','))
for r_cookie in r_cookies:
last_cookie = None
rr_cookies = split(r_cookie, Token(';'))
for cookie in rr_cookies:
nameval = tuple(split(cookie, Token('=')))
if len(nameval) == 2:
(name,), (value,) = nameval
else:
(name,), = nameval
value = None
name = name.lower()
if name == '$version':
continue
if name[0] == '$':
if last_cookie is not None:
if name == '$path':
last_cookie.path = value
elif name == '$domain':
last_cookie.domain = value
elif name == '$port':
if value is None:
last_cookie.ports = ()
else:
last_cookie.ports = tuple([int(s) for s in value.split(',')])
else:
last_cookie = Cookie(name, value, version=1)
cookies.append(last_cookie)
else:
# Oldstyle cookies don't do quoted strings or anything sensible.
# All characters are valid for names except ';' and '=', and all
# characters are valid for values except ';'. Spaces are stripped,
# however.
r_cookies = header.split(';')
for r_cookie in r_cookies:
name, value = r_cookie.split('=', 1)
name = name.strip(' \t')
value = value.strip(' \t')
cookies.append(Cookie(name, value))
return cookies
|
f12cfc5303f466eebe3f1b2731d22d02caf12b1d
| 3,642,839
|
import psutil
import os
def get_system_metrics():
"""
For keys in fields
>>> from serverstats import get_system_metrics
>>> fields = dict()
>>> dl = get_system_metrics()
>>> _fields = {
... 'cpu': ['usage_percent', 'idle_percent', 'iowait',
... 'avg_load_15_min', 'avg_load_5_min', 'avg_load_1_min'],
... 'cpu_times': ['user', 'nice', 'system', 'idle', 'iowait',
... 'irq', 'softirq', 'steal', 'guest', 'guest_nice'],
... 'cpu_stats': ['ctx_switches', 'interrupts', 'soft_interrupts', 'syscalls'],
... 'cpu_times_percent': ['user', 'nice', 'system', 'idle',
... 'iowait', 'irq', 'softirq', 'steal', 'guest', 'guest_nice'],
... 'ram': ['total', 'available', 'percent', 'used', 'free',
... 'active', 'inactive', 'buffers', 'cached', 'shared', 'slab'],
... 'swap': ['total', 'used', 'free', 'percent', 'sin', 'sout'],
... 'disk': ['total', 'free', 'used', 'percent'],
... 'disk_partitions': ['sda1', 'sda15'],
... 'disk_io_counters': ['sda1', 'sda15'],
... 'network_traffic': ['lo', 'eth0']}
>>> for key, value in dl.items():
... lst = list()
... if type(value) is dict:
... for t , c in value.items():
... lst.append(t)
... fields[key] = lst
...
>>> _fields == fields
True
"""
load1, load5, load15 = psutil.os.getloadavg()
cpu_percent = psutil.cpu_percent()
cpu_times = psutil.cpu_times()._asdict()
cpu_stats = psutil.cpu_stats()._asdict()
percpu_percent = psutil.cpu_percent(interval=None, percpu=True)
cpu_times_percent = psutil.cpu_times_percent(interval=None, percpu=False)._asdict()
cpu_count = psutil.cpu_count(logical=True)
cpu_freq = [freq._asdict() for freq in psutil.cpu_freq(percpu=True)]
network_traffic_info = psutil.net_io_counters(pernic=True)
memory = psutil.virtual_memory()._asdict()
swap_mem = psutil.swap_memory()._asdict()
disk_partitions = {}
fs_types = set()
for part in psutil.disk_partitions(all=False):
usage = {}
if os.name == 'nt':
if 'cdrom' in part.opts or part.fstype == '':
continue
usage = part._asdict()
usage.pop("opts")
device = usage["device"].split("/")[-1]
fs_types.add(device)
_usage = psutil.disk_usage(part.mountpoint)
disk_partitions.update({device: {**usage, **_usage._asdict()}})
disk = {}
disk["total"] = 0
disk["used"] = 0
disk["percent"] = 0
for key, val in disk_partitions.items():
disk["total"] += val.get("total")
disk["used"] += val.get("used")
disk["percent"] += val.get("percent")
disk["free"] = disk["total"]-disk["used"]
disk["percent"] = disk["percent"]/len(disk_partitions)
disk_io_counters = {}
for key, val in psutil.disk_io_counters(perdisk=True, nowrap=False).items():
if key in fs_types:
disk_io_counters[key] = val._asdict()
network_traffic = dict()
for interface in network_traffic_info:
if any(st in interface for st in ["veth", "docker", "br"]):
continue
network_traffic[interface] = {
"bytes_sent": float(network_traffic_info[interface].bytes_sent),
"bytes_received": float(network_traffic_info[interface].bytes_recv),
"packets_sent": float(network_traffic_info[interface].packets_sent),
"packets_recv": float(network_traffic_info[interface].packets_recv)
}
net_connections = psutil.net_connections(kind='inet')
num_pids = len(psutil.pids())
num_users = len(psutil.users())
return dict(
# load_avg info
cpu=dict(
usage_percent=float(cpu_percent),
idle_percent=float(100.00 - cpu_percent),
iowait=float(cpu_times.get("iowait")),
avg_load_15_min=float(load15),
avg_load_5_min=float(load5),
avg_load_1_min=float(load1),
),
# cpu times
cpu_times=cpu_times,
# cpu stats
cpu_stats=cpu_stats,
# percpu pervents
percpu_percent=percpu_percent,
# cpu times percent
cpu_times_percent=cpu_times_percent,
# number of cpu
cpu_count=cpu_count,
# cpu frequency
cpu_freq=cpu_freq,
# ram info
ram=memory,
# swap memory info
swap=swap_mem,
# disk info
disk=disk,
# disk partitions info
disk_partitions = disk_partitions,
# disk io counter
disk_io_counters = disk_io_counters,
# network traffic
network_traffic=network_traffic,
# number of net connections
num_net_connections=len(net_connections),
# number of pids
num_pids=num_pids,
# number of users
num_users=num_users
)
|
b612ada76bd829a76dfb35eb070e9788ef75ce78
| 3,642,840
|
def FilterKeptAttachments(
is_description, kept_attachments, comments, approval_id):
"""Filter kept attachments to be a subset of last description's attachments.
Args:
is_description: bool, if the comment is a change to the issue description.
kept_attachments: list of ints with the attachment ids for attachments
kept from previous descriptions, if the comment is a change to the
issue description.
comments: list of IssueComment PBs for the issue we want to edit.
approval_id: int id of the APPROVAL_TYPE fielddef, if we're editing an
approval description, or None otherwise.
Returns:
A list of kept_attachment ids that are a subset of the last description.
"""
if not is_description:
return None
attachment_ids = set()
for comment in reversed(comments):
if comment.is_description and comment.approval_id == approval_id:
attachment_ids = set([a.attachment_id for a in comment.attachments])
break
kept_attachments = [
aid for aid in kept_attachments if aid in attachment_ids]
return kept_attachments
|
89732832db557835a5dea1ef10229bfdd809d304
| 3,642,841
|
import os
def scan_fixtures(path):
"""Scan for fixture files on the given path.
:param path: The path to scan.
:type path: str
:rtype: list
:returns: A list of three-element tuples; the app name, file name, and relative path.
"""
results = list()
for root, dirs, files in os.walk(path):
relative_path = root.replace(path + "/", "")
if relative_path.startswith("static") or relative_path.startswith("theme"):
continue
for f in files:
if not f.endswith(".json"):
continue
app_name = os.path.basename(os.path.dirname(relative_path))
results.append((app_name, f, relative_path))
return results
|
c85e8281a2f9005feb1801083138b55cb5079cf6
| 3,642,842
|
import json
import os
import _thread
def invocations():
"""Do an inference on a single batch of data. In this sample server, we take data as CSV, convert
it to a pandas data frame for internal use and then convert the predictions back to CSV (which really
just means one prediction per line, since there's a single column.
"""
data = None
print("================ INVOCATIONS =================")
#parse json in request
print ("<<<< flask.request.content_type", flask.request.content_type)
data = flask.request.data.decode('utf-8')
data = json.loads(data)
print(data)
bucket = data['bucket']
s3_url = data['s3_url']
download_file_name = s3_url.split('/')[-1]
print ("<<<<download_file_name ", download_file_name)
# s3_client.download_file(bucket, s3_url, download_file_name)
#local test
download_file_name= data['s3_url']
print('Download finished!')
# inference and send result to RDS and SQS
print('Start to inference:')
#LOAD MODEL
weight = './yolov4.weights'
names = './coco.names'
cfg = './yolov4.cfg'
#make sure the model parameters exist
for i in [weight,names,cfg]:
if os.path.exists(i):
print ("<<<<pretrained model exists for :", i)
else:
print ("<<< make sure the model parameters exist for: ", i)
break
# 图片推理 make inference
if data['type'] == 'pic':
print('infer pic')
classes, confidences, boxes = yolo_infer(bucket, weight, names, cfg, download_file_name)
print ("Done inference picture! ")
inference_result = {
'classes':classes.tolist(),
'confidences':confidences.tolist(),
'boxes':boxes.tolist()
}
_payload = json.dumps(inference_result,ensure_ascii=False)
else:
print('infer video')
# detect_objects(bucket, weight, names, cfg, download_file_name)
output_s3_path = 'xxxxx'
_thread.start_new_thread(detect_objects, (bucket, weight, names, cfg, download_file_name))
print ("Done inference video! ")
inference_result = {
'vidoe':'infer is done!!',
'output_s3_path':output_s3_path
}
_payload = json.dumps(inference_result,ensure_ascii=False)
return flask.Response(response=_payload, status=200, mimetype='application/json')
|
ab602a874d151869f000d6e0d4b8a5f085be6b7d
| 3,642,843
|
import piaplib.book.__main__
import piaplib.book.__main__
def main(argv=None):
"""The main event"""
try:
if 'piaplib.book.__main__' not in sys.modules:
else:
piaplib.book.__main__ = sys.modules["""piaplib.book.__main__"""]
if piaplib.book.__main__.__name__ is None:
raise ImportError("Failed to import piaplib.book.__main__")
except Exception as importErr:
del importErr
return piaplib.book.__main__.main(argv)
|
61ce42c99b933f95596e6cde788890984f270fee
| 3,642,844
|
from datetime import datetime
def today() -> date:
"""
**today**
returns today's date
:return present date
"""
return datetime.datetime.now().date()
|
b5c7b19d9ff02993ab63de2bebd3d7bcdd24da59
| 3,642,845
|
def get_word_size(word,font_size):
"""get's the dimansions of any given word for any giving font size"""
Font=ImageFont.truetype(FONT, font_size)
return Font.getsize(word)
|
5742f9ee4377d2f251f75acdfceb1c8c1884fde1
| 3,642,846
|
def get_id(asset, **kwargs):
"""Get an asset by the unique id.
The key for the id must have 'id' in the name in the kwargs.
Example::
get_id(Foo, foo_id=1) # works
get_id(Foo, foo=1) # TypeError
"""
id_key = next(_parse_id(kwargs), None)
if id_key is None:
raise TypeError('Could not parse id key:{}'.format(kwargs))
instance = asset.get_id(kwargs[id_key])
if instance is not None:
return instance.dump()
return NoContent, 404
|
d4c1864acca7aecaed91f550359e7d9541f0de2f
| 3,642,847
|
def put_study_document(request):
"""PUT method for editing an existing study.
Adds "resource_type" -> "study" then calls generic `put_document`.
See `finish_write_operation` for description of the response.
"""
request.matchdict['resource_type'] = 'study'
return put_document(request)
|
244cfecf0f87187334ab599a477e1e3459f549c6
| 3,642,848
|
def create_command(input_file, columns_to_use, column_separator, output_file):
"""
This function creates the linux command to filter the columns and creating the output file
:param input_file: A valid file path to raw data file
:param columns_to_use: Indexes of the columns that needs to be filtered out (index starts from 1)
:param column_separator: Column separator in input/output file (default is ',' [comma])
:param output_file: A valid file path where the output will be stored
:return: A linux shell command
"""
print('Creating text filter command.....', log_type='info')
column_indexes = columns_to_use.split(',')
prefix, command_segment = ('$', '')
count = 1
index_length = len(column_indexes)
for item in column_indexes:
if count < index_length:
segment = prefix + item + '" "'
command_segment += segment
count += 1
else:
segment = prefix + item
command_segment += segment
if column_separator is None:
delimiter = ''
else:
delimiter = ' -F "' + column_separator + '"'
command = "awk" + delimiter + " '{print " + command_segment + "}' " + input_file + " > " + output_file
print('Command creation complete!', log_type='info')
# Return command
return command
|
e14d88bf843190890827d2a157d02faf60917800
| 3,642,849
|
def _new_data_generated(dataset, datagen):
"""
Function to put augmented data in directories
:param dataset: The path for the specified directory
:param datagen: The augmented data
:return: The new data to use for model
"""
new_data = datagen.flow_from_directory(
dataset,
target_size=(IMG_HEIGHT, IMG_WIDTH),
batch_size=32,
class_mode="categorical")
return new_data
|
659edb0eb32e5609c8a0ff11814c5c0d317b134c
| 3,642,850
|
def findOutNode( node, testFunc, fallback=... ):
""" get node and all its parents, inner to outer order """
for out_node in getOutNodes( node ):
if testFunc( out_node ):
return out_node
if fallback is not ...:
return fallback
raise Exception( 'cannot find out node' )
|
73a7f88dee12dd82edecaf173b6fecb48b2ce86b
| 3,642,851
|
import logging
def lookup_cpe(vendor, product, cpe_type, cpe_table, remap):
"""Identify the correct vendor and product values for a CPE
This function attempts to determine the correct CPE using vendor and product
values supplied by the caller as well as a remapping dictionary for mapping
these values to more correct values used by NIST.
For example, the remapping might tell us that a value of 'alpine' for the
vendor string should be 'aplinelinux' instead, or for product 'solaris'
should be 'sunos'.
This function should only emit values seen in the official NIST CPE list
which is provided to it in cpe_table.
Lookup priority:
1. Original vendor / product
2. Original vendor / remap product
3. Remap vendor / original product
4. Remap vendor / remap product
Args:
vendor (str): vendor name
product (str): product name
cpe_type (str): CPE type - o, a, h, etc.
cpe_table (dict): dict containing the official NIST CPE data
remap (dict): dict containing the remapping values
Returns:
success, vendor, product
"""
if (
vendor in cpe_table[cpe_type]
and product in cpe_table[cpe_type][vendor]
):
# Hot path, success with original values
return True, vendor, product
# Everything else depends on a remap of some sort.
# get the remappings for this one vendor string.
vendor_remap = remap.get(vendor, None)
if vendor_remap:
# If we have product remappings, work that angle next
possible_product = None
if (
vendor_remap.get('products', None)
and product in vendor_remap['products']
):
possible_product = vendor_remap['products'][product]
if (vendor in cpe_table[cpe_type]
and possible_product
and possible_product in cpe_table[cpe_type][vendor]):
# Found original vendor, remap product
return True, vendor, possible_product
# Start working the process to find a match with a remapped vendor name
if vendor_remap.get('vendor', None):
new_vendor = vendor_remap['vendor']
if new_vendor in cpe_table[cpe_type]:
if product in cpe_table[cpe_type][new_vendor]:
# Found remap vendor, original product
return True, new_vendor, product
if possible_product and possible_product in cpe_table[cpe_type][new_vendor]:
# Found remap vendor, remap product
return True, new_vendor, possible_product
logging.error("Product %s from vendor %s invalid for CPE %s and no mapping",
product, vendor, cpe_type)
return False, None, None
|
5a6e2e735daa50d3d2a19022db002ebfc647335c
| 3,642,852
|
def main():
"""main function of git learning
"""
return 'Google git'
|
a7296a18657643188ef58131fe012df6543f808e
| 3,642,853
|
import numpy
import signal
def SSIM(img1, img2, cs_map=False):
"""Return the Structural Similarity Map corresponding to input images img1
and img2 (images are assumed to be uint8)
This function attempts to mimic precisely the functionality of ssim.m a
MATLAB provided by the author's of SSIM
https://ece.uwaterloo.ca/~z70wang/research/ssim/ssim_index.m
"""
img1 = img1.astype(numpy.float64)
img2 = img2.astype(numpy.float64)
size = 11
sigma = 1.5
window = fspecial_gauss(size, sigma)
K1 = 0.01
K2 = 0.03
L = 255 #bitdepth of image
C1 = (K1*L)**2
C2 = (K2*L)**2
mu1 = signal.fftconvolve(window, img1, mode='valid')
mu2 = signal.fftconvolve(window, img2, mode='valid')
mu1_sq = mu1*mu1
mu2_sq = mu2*mu2
mu1_mu2 = mu1*mu2
sigma1_sq = signal.fftconvolve(window, img1*img1, mode='valid') - mu1_sq
sigma2_sq = signal.fftconvolve(window, img2*img2, mode='valid') - mu2_sq
sigma12 = signal.fftconvolve(window, img1*img2, mode='valid') - mu1_mu2
if cs_map:
return (((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
(sigma1_sq + sigma2_sq + C2)),
(2.0*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2))
else:
return ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
(sigma1_sq + sigma2_sq + C2))
|
6fe795ac4818ec06db7ed3cd66f52a169dccbc24
| 3,642,854
|
def covariance(prices: np.ndarray) -> np.ndarray:
"""Calculate covariance matrix.
Args:
prices: Prices of market data.
Returns:
Covariance matrix.
"""
Q = np.cov(prices.T, ddof=0)
return np.array(Q)
|
d0870a9ba6fdf58a0d4242f4c1638de7f05b738a
| 3,642,855
|
def fiveplates_clean_design_file(field, designID):
"""
string representation of targets_clean file for field within
fiveplates_field_files zip file.
Parameters
----------
field : str
identifier of field, e.g. 'GG_010'
"""
return f'{field}_des{designID}_targets_clean.txt'
|
c6e5c60ad08aa3e4162700f3d48e58d35a57486e
| 3,642,856
|
import itertools
def setup_figure(diff=False):
"""Set diff to True if you want an additional panel showing pair-wise differences in accuracy"""
fig = plt.figure(figsize=(2*3.385, 2*3)) # two column figure for bio-informatics
plt.subplots_adjust(left=0.15, bottom=0.1, right=0.98, top=0.93, wspace=0.05, hspace=0.01)
gs = plt.GridSpec(4 if diff else 3, 3,
width_ratios=[6, 0.9, 6],
height_ratios=[3.5, 2, 1, 1] if diff else [3.5, 1, 1])
ax = {k: plt.subplot(g) for k, g in
zip([''.join(e) for e in itertools.product(['A', 'Ad', 'B', 'C'] if diff else ['A', 'B', 'C'], ['-DEL', '-REF/SNP', '-INS'])], gs)}
return fig, ax
|
6c28eb8fd9f633029fad707bdfee8806fa6a3b72
| 3,642,857
|
def load_axon_morphometrics(morphometrics_file):
"""
:param morphometrics_file: absolute path of file containing the morphometrics (must be .csv, .xlsx or pickle format)
:return: stats_dataframe: dataframe containing the morphometrics
"""
# If string, convert to Path objects
morphometrics_file = convert_path(morphometrics_file)
if morphometrics_file.suffix == "":
raise ValueError("File not specified. Please provide the full path of the file, including its extension")
try:
#Use the appropriate loader depending on the extension
if morphometrics_file.suffix.lower() == ".csv":
stats_dataframe = pd.read_csv(morphometrics_file, na_values='NaN')
elif morphometrics_file.suffix.lower() == ".xlsx":
stats_dataframe = pd.read_excel(morphometrics_file, na_values='NaN')
else:
stats_dataframe = pd.read_pickle(morphometrics_file)
except IOError as e:
logger.error(f"Error: Could not load file {str(morphometrics_file)}")
raise
stats_dataframe = rename_column_names_after_loading(stats_dataframe)
# with csv and excel files, they often will have an "unnamed" column because of the indexes saved with the dataframe
# we remove it here
for column in stats_dataframe.columns:
if "unnamed" in column.lower():
stats_dataframe = stats_dataframe.drop(columns=column)
return stats_dataframe
|
b89bdac95660fd6c6e84bfcace4125d2840347ca
| 3,642,858
|
def get_input_costs(
inputs, cost_class="monetary", unit="billion_2015eur", mapping=COST_NAME_MAPPING,
**kwargs
):
"""
Get costs used as model inputs
"""
costs = {}
for var_name, var_data in inputs.data_vars.items():
if "costs" not in var_data.dims or not var_name.startswith("cost"):
continue
if "cap" in var_name:
_unit = f"{unit}_per_tw"
elif var_name == "cost_om_annual":
_unit = f"{unit}_per_tw_per_year"
elif var_name == "cost_om_annual_investment_fraction":
_unit = "fraction_of_total_investment"
elif var_name == "cost_depreciation_rate":
_unit = "fraction"
elif "om_" in var_name:
_unit = f"{unit}_per_twh"
_name = mapping[var_name]
mapped_da = map_da(var_data.loc[{"costs": cost_class}], loc_tech_agg="mean", **kwargs)
series = clean_series(mapped_da)
if series is not None:
costs[_name] = (
series
.to_frame(_unit)
.rename_axis(columns="unit")
.stack()
)
costs[_name].loc[costs[_name].index.get_level_values("unit").str.find("per_tw") > -1] *= 10
return costs
|
02671abda50889bd51fb91a5629b718882434745
| 3,642,859
|
import gettext
def render_template(language, context, data, template):
"""Renders HTML display of metadata XML"""
env = Environment(extensions=['jinja2.ext.i18n'],
loader=FileSystemLoader(context.ppath))
env.install_gettext_callables(gettext, ngettext, newstyle=True)
template_file = 'resources/templates/%s' % template
template = env.get_template(template_file)
return template.render(language=language, obj=data)
|
6c19a53c1038681c2f5f4d014ec4ed2aae9a50af
| 3,642,860
|
def ht_26():
"""Making one Hash table instance with 26 key val pairs inserted."""
ht = HashTable()
count = 1
for char in letters:
ht.set(char, count)
count += 1
return ht
|
8ab9d1b887da2c9719a1b23d2817b66827b7cc3c
| 3,642,861
|
import time
def get_session(uuid):
"""
Api.get_session method
returns: [uuid, users, payload, state, ts]
200 -- session created
400 -- wrong arguments
403 -- wrong authorization
404 -- session not found
500 -- internal error
"""
conn = conn_get()
session = database.get_session(conn, uuid)
if session is None:
abort(404)
if session['state'] in ['Started', 'Finished']:
if AUTH.username() not in session['users']:
abort(404)
return jsonify(session)
if AUTH.username() not in session['users']:
session['users'].append(AUTH.username())
session['ts'] = int(time())
if len(session['users']) == session['players']:
session['state'] = 'Started'
session['round'] = 1
database.add_round(conn, {
'uuid': uuid,
'round': 1,
'user_moves': {},
})
database.update_session(conn, session)
conn.commit()
return jsonify(session)
|
5657a53dc60b8d8743ccaac79041208c11caa07c
| 3,642,862
|
from typing import Iterable
import io
def fetch_data(
indicator: WorldBankIndicators, country_names: Iterable[str], fill_missing=None
) -> pd.DataFrame:
"""
Fetch data from the market_data_cache collection (not to be confused with the market_quote_cache collection)
and ensure the specified countries are only present in the data (if present). Optionally apply a callable to
fill in gaps eg. resample
"""
if indicator is None:
return None
with io.BytesIO(indicator.fetch_data()) as fp:
df = pd.read_parquet(fp)
if df is not None and len(df) > 0:
plot_df = df[df["country"].isin(country_names)]
# print(country_names)
if len(plot_df) == 0:
return None
if fill_missing:
# print(plot_df)
plot_df.index = pd.to_datetime(
plot_df["date"], format="%Y-%m-%d"
) # avoid callers having to specify 'on' keyword as they may not know which column
plot_df = fill_missing(plot_df)
# print(plot_df)
return plot_df
else:
return None
|
8f0778aa28acb4fefeaf1d889d1650994357a787
| 3,642,863
|
from . import authinfos
def _(dbmodel, backend):
"""
get_backend_entity for Django DbAuthInfo
"""
return authinfos.DjangoAuthInfo.from_dbmodel(dbmodel, backend)
|
fa0529b038e4321b2f7e535afb82d16455ef4853
| 3,642,864
|
def wavelen_diversity_doppler_est(echo, prf, samprate, bandwidth,
centerfreq):
"""Estimate Doppler based on wavelength diversity.
It uses slope of phase of range frequency along with single-lag
time-domain correlator approach proposed by [BAMLER1991]_.
Parameters
----------
echo : np.ndarray(complex)
2-D complex basebanded echo, azimuth by range in time domain.
prf : float
Pulse repetition frequency in (Hz)
samprate : float
Sampling rate in range , second dim, in (Hz)
bandwidth : float
RF/chirp bandiwdth in (Hz)
centerfreq : float
RF center frequency of chirp in (Hz)
Returns
-------
float
Unambiguous Doppler centroid at center frequency in (Hz)
Raises
------
ValueError
For bad input
TypeError
If echo is not numpy array
See Also
--------
corr_doppler_est : Correlation Doppler Estimator (CDE)
sign_doppler_est : Sign-Doppler estimator (SDE)
References
----------
.. [BAMLER1991] R. Bamler and H. Runge, 'PRF-Ambiguity Resolving by
Wavelength Diversity', IEEE Transaction on GeoSci and Remote Sensing,
November 1991.
"""
if prf <= 0:
raise ValueError('PRF must be positive value!')
if samprate <= 0:
raise ValueError('samprate must be positive value!')
if bandwidth <= 0 or bandwidth >= samprate:
raise ValueError('badnwidth must be positive less than samprate!')
if centerfreq <= 0.0:
raise ValueError('centerfreq must be positive value!')
if not isinstance(echo, np.ndarray):
raise TypeError('echo must be a numpy array')
if echo.ndim != 2:
raise ValueError('echo must have two dimensions')
num_azb, num_rgb = echo.shape
if num_azb <= 2:
raise ValueError('The first dimension of echo must be larger than 2')
if num_rgb > 2:
raise ValueError('The second dimension of echo must be larger than 2!')
# FFT along range
nfft = fft.next_fast_len(num_rgb)
echo_fft = fft.fft(echo, nfft, axis=1)
# one-lag correlator along azimuth
az_corr = (echo_fft[1:] * echo_fft[:-1].conj()).mean(axis=0)
# Get the unwrapped phase of range spectrum within +/-bandwidth/2.
df = samprate / nfft
half_bw = 0.5 * bandwidth
idx_hbw = nfft // 2 - int(half_bw / df)
unwrap_phs_rg = np.unwrap(np.angle(fft.fftshift(az_corr)
[idx_hbw: -idx_hbw])) # (rad)
# perform linear regression in range freq within bandwidth
freq_bw = -half_bw + df * np.arange(nfft - 2 * idx_hbw)
pf_coef = np.polyfit(freq_bw, unwrap_phs_rg, deg=1)
# get the doppler centroid at center freq based on slope
dop_slope = prf / (2. * np.pi) * pf_coef[0]
return centerfreq * dop_slope
|
d1fe5cb45b9e850fe4da83700fc061e715c76502
| 3,642,865
|
def _parse_line(line):
"""
Parse node string representation and return a dict with appropriate node values.
"""
res = {}
if 'leaf' in line:
res['is_leaf'] = 1
res['leaf_val'] = _parse_leaf_node_line(line)
else:
res['is_leaf'] = 0
res['feature'], res['threshold'] = _parse_decision_node_line(line)
return res
|
014c6d2e9d61798d55af2725b79ff404f9aa7ff3
| 3,642,866
|
from typing import Optional
def generate_richcompare_wrapper(cl: ClassIR, emitter: Emitter) -> Optional[str]:
"""Generates a wrapper for richcompare dunder methods."""
# Sort for determinism on Python 3.5
matches = sorted([name for name in RICHCOMPARE_OPS if cl.has_method(name)])
if not matches:
return None
name = '{}_RichCompare_{}'.format(DUNDER_PREFIX, cl.name_prefix(emitter.names))
emitter.emit_line(
'static PyObject *{name}(PyObject *obj_lhs, PyObject *obj_rhs, int op) {{'.format(
name=name)
)
emitter.emit_line('switch (op) {')
for func in matches:
emitter.emit_line('case {}: {{'.format(RICHCOMPARE_OPS[func]))
method = cl.get_method(func)
assert method is not None
generate_wrapper_core(method, emitter, arg_names=['lhs', 'rhs'])
emitter.emit_line('}')
emitter.emit_line('}')
emitter.emit_line('Py_INCREF(Py_NotImplemented);')
emitter.emit_line('return Py_NotImplemented;')
emitter.emit_line('}')
return name
|
cbbc66a22ee61ed869331fbecd59eb615588fd48
| 3,642,867
|
def display_credentials():
"""
Function that displays all saved credentials
"""
return Credentials.display_credentials()
|
09e474a5b76ae3224571cf9d2d05ea5811e7fbf1
| 3,642,868
|
def render_diff_report():
"""
Render a summary of the diffs found and/or changed.
Returns a string.
Dependencies:
config settings: action, templates, report_order
globals: diff_dict, T_NAME_KEY
modules: nori
"""
if nori.core.cfg['action'] == 'diff':
diff_report = ' Diff Report '
elif nori.core.cfg['action'] == 'sync':
diff_report = ' Diff / Sync Report '
diff_report = ('#' * len(diff_report) + '\n' +
diff_report + '\n' +
'#' * len(diff_report) + '\n\n')
if nori.core.cfg['report_order'] == 'template':
for template_index in diff_dict:
template = nori.core.cfg['templates'][template_index]
section_header = ('Template {0} ({1}):' .
format(template_index,
nori.pps(template[T_NAME_KEY])))
section_header += '\n' + ('-' * len(section_header)) + '\n\n'
diff_report += section_header
for diff_t in diff_dict[template_index]:
exists_in_source = diff_t[0]
source_row = diff_t[1]
exists_in_dest = diff_t[2]
dest_row = diff_t[3]
has_been_changed = diff_t[4]
if exists_in_source:
source_str = nori.pps(source_row[1])
elif exists_in_source is None:
source_str = '[no value match in source database]'
else:
source_str = '[no key match in source database]'
if exists_in_dest:
dest_str = nori.pps(dest_row[1])
elif exists_in_dest is None:
dest_str = '[no value match in destination database]'
else:
dest_str = '[no key match in destination database]'
if has_been_changed is None:
changed_str = 'unchanged'
elif not has_been_changed:
changed_str = (
'partially changed - action may be needed!'
)
else:
changed_str = 'changed'
diff_report += (
'Source: {0}\nDest: {1}\nStatus: {2}\n\n' .
format(source_str, dest_str, changed_str)
)
diff_report += '\n'
elif nori.core.cfg['report_order'] == 'keys':
for key_str in diff_dict:
section_header = ('Key tuple {0}:' .
format(nori.pps(key_str)))
section_header += '\n' + ('-' * len(section_header)) + '\n\n'
diff_report += section_header
for diff_t in diff_dict[key_str]:
template_index = diff_t[0]
exists_in_source = diff_t[1]
source_row = diff_t[2]
exists_in_dest = diff_t[3]
dest_row = diff_t[4]
has_been_changed = diff_t[5]
template = nori.core.cfg['templates'][template_index]
if exists_in_source:
num_keys = source_row[0]
source_data = source_row[1]
source_str = nori.pps(source_data[num_keys:])
elif exists_in_source is None:
source_str = '[no value match in source database]'
else:
source_str = '[no key match in source database]'
if exists_in_dest:
num_keys = dest_row[0]
dest_data = dest_row[1]
dest_str = nori.pps(dest_data[num_keys:])
elif exists_in_dest is None:
dest_str = '[no value match in destination database]'
else:
dest_str = '[no key match in destination database]'
if has_been_changed is None:
changed_str = 'unchanged'
elif not has_been_changed:
changed_str = (
'partially changed - action may be needed!'
)
else:
changed_str = 'changed'
diff_report += (
'Template: {0}\nSource: {1}\nDest: {2}\n'
'Status: {3}\n\n' .
format(template[T_NAME_KEY], source_str, dest_str,
changed_str)
)
diff_report += '\n'
return diff_report.strip()
|
da02e6b9dee4424d217d0f0839938a1aff9250df
| 3,642,869
|
def get_step_handler_for_gym_env(gym_env_name: str, cfg: Configuration) -> StepRewardDoneHandler:
"""Return an example step handler for the given gym environemtn name, that uses the
given config file."""
if gym_env_name == 'Acrobot-v1':
handler = AcrobotStepHandler(cfg)
elif gym_env_name == 'CartPole-v1':
handler = CartPoleStepHandler(cfg)
elif gym_env_name == 'MountainCarContinuous-v0':
handler = ContinuousMountainCarStepHandler(cfg)
elif gym_env_name == 'MountainCar-v0':
handler = MountainCarStepHandler(cfg)
elif gym_env_name == 'Pendulum-v0':
handler = PendulumStepHandler(cfg)
else:
raise NotImplementedError(f'No support for this gym env: {gym_env_name}')
return handler
|
51164ee7c3da5184f221d1e658c7e1ddc73585de
| 3,642,870
|
import os
def get_post_ids() -> list:
"""
"""
create_directory(WORK_PATH)
list_of_files_and_folders = os.listdir(WORK_PATH)
list_of_folders = []
for p in list_of_files_and_folders:
path = f'{WORK_PATH}/{p}'
if os.path.isdir(path):
list_of_folders.append(p)
list_of_post_ids = []
for p in list_of_folders:
if int(p) > 0:
list_of_post_ids.append(int(p))
return list_of_post_ids
|
45abeccb2af1edb382d720981d3e8b14c08133ca
| 3,642,871
|
import traceback
def get_module(mod_name):
"""Import module and return."""
try:
return import_module(mod_name)
except ImportError:
logger.error('Failed to import module "%s".' % mod_name)
logger.error(traceback.format_exc())
raise
|
7cb81ff17f3d49bee3d53549e415c14ff4c13512
| 3,642,872
|
def sort_ranks(ranks):
"""Sort ranks by MAIN_RANKS order.
Parameters
----------
ranks
Ranks to sort
Returns
-------
Sorted ranks
"""
ret = False
ranks = list(ranks) if not isinstance(ranks, list) else ranks
if len(ranks) > 0:
ret = [rank for rank in VALID_RANKS if rank in ranks]
return ret
|
e86b985a83153c46f53d7d31f849d1c5c10a6d66
| 3,642,873
|
def formalize_rules(list_rules):
""" Gives an list of rules where
facts are separeted by coma.
Returns string with rules in
convinient form (such as
'If' and 'Then' words, etc.).
"""
text = ''
for r in list_rules:
t = [i for i in r.split(',') if i]
text += 'If %s,\n' % t[0]
for i in t[1:-1]:
text += ' %s,\n' % i
text += 'Then: %s.\n' % t[-1]
return text
|
d8fbb024f38ae097efa42f95efe6b5d3b5adbd71
| 3,642,874
|
def filenames_to_labels(filenames, filename_label_dict):
"""Converts filename strings to integer labels.
Args:
filenames (List[str]): The filenames of the images.
filename_label_dict (Dict[str, int]): A dictionary mapping filenames to
integer labels.
Returns:
ndarray: Integer labels
"""
return np.asarray([int(filename_label_dict[filename]) for filename in filenames])
|
7dad11665aa3858dac7cd91757367f4ab72629cb
| 3,642,875
|
def load_model():
"""
保存した提供されているモデルを読み込む
Returns
----------
model
提供されたmodel
tokernizre
提供されたヤツ(よくわかってない)
"""
with open(MODEL_DIR + 'model.pickle', 'rb') as f:
model = pick.load(f)
with open(MODEL_DIR + 'tokenizer.pickle', 'rb') as f:
tokenizer = pick.load(f)
return model, tokenizer
|
abea695c9e56af585e37694b8a022b8634ccfe79
| 3,642,876
|
from datetime import datetime
def post(post_id):
"""View function for post page"""
# Form object: `Comment`
form = CommentForm()
# form.validate_on_submit() will be true and return the
# data object to form instance from user enter,
# when the HTTP request is POST
if form.validate_on_submit():
new_comment = Comment(id=str(uuid4()),
name=form.name.data)
new_comment.text = form.text.data
new_comment.date = datetime.now()
new_comment.post_id = post_id
db.session.add(new_comment)
db.session.commit()
post = db.session.query(Post).get_or_404(post_id)
tags = post.tags
comments = post.comments.order_by(Comment.date.desc()).all()
recent, top_tags = sidebar_data()
return render_template('post.html',
post=post,
tags=tags,
comments=comments,
form=form,
recent=recent,
top_tags=top_tags)
|
28a5e611e370a33a609cabb4d3ec827284911ccc
| 3,642,877
|
def pipe(val, *funcs):
"""Pipe a value through a sequence of functions
I.e. ``pipe(val, f, g, h)`` is equivalent to ``h(g(f(val)))``
>>> double = lambda i: 2 * i
>>> pipe(3, double, str)
'6'
"""
if not funcs:
raise PipeNotGivenAnyFunctions
if any_is_async(funcs):
return async_functions.compose(*reversed(funcs))(val)
for f in funcs:
val = f(val)
return val
|
c2815c7842df2a1d1e07a628b613da0a8ffd35f5
| 3,642,878
|
def do_query(method, query, values):
"""Executes a query on a DFP API method, returning a list of results."""
# Trap exceptions here instead of in caller?
statement = dfp.FilterStatement(query, values)
data = []
while True:
response = method(statement.ToStatement())
if 'results' in response:
data += response['results']
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
return data
|
3b53e992e4cb49b1814593147826f3d3b4e2bfa8
| 3,642,879
|
def markdown_format(text):
"""
The outside param 'name' is similar to "tag" (in 'usage')
which it'll determines how you use it, e.g. {{ THING | FILTER }}.
Just a reminder
for tag, {% my_post_count %}
for filter, {{ post.body | truncatewords:30 }}
Why using `mark_safe`?
It simply marks the string as 'safe' for HTML output.
It was used pretty ?everywhere (in short: ready-for-rendering).
Also, about the 'markdown',
I simply typed these (& not digging more ...), =_=!
"""
return mark_safe(markdown.markdown(text))
|
663781e441b305a26bb0d85fb45302539159aa92
| 3,642,880
|
from typing import List
def _find_tex_env_recursive(original_s: str, s: str, offset: int = 0, depth: int = 0) -> List:
"""
Find all environments.
:param s: Latex string code
:param offset: Offset applied to the search
:return: Tuple of all commands
"""
tags = find_tex_commands(s, offset=offset)
new_tags = []
for t in tags:
a, b, c, d, _ = t
source_cmd = s[a - offset:b - offset + 1]
if 'begin' not in source_cmd and 'end' not in source_cmd:
# Get the arguments of the command, and check more environments there
cmd_args = s[c - offset:d - offset + 1]
if 'begin' in cmd_args or 'end' in cmd_args:
if 'newenvironment' in source_cmd or 'newcommand' in source_cmd: # Prone to bugs
continue
for tr in _find_tex_env_recursive(original_s, cmd_args, offset=c, depth=depth + 1):
new_tags.append(tr)
else:
new_tags.append(t)
return new_tags
|
a92f25a19be59ab8f89540501bf2a7b9975c3ea9
| 3,642,881
|
from typing import Iterable
from typing import List
def group_by_instance_type(
jobs: Iterable[JobConfiguration],
) -> List[List[JobConfiguration]]:
"""
Group job-configuration into different queues depending on which instance
each job should be run. This returns a list of the different queues.
>>> group_by_instance_type( # doctest: +SKIP
... [
... {"ResourceConfig": {"InstanceType": 1}, "name": 1},
... {"ResourceConfig": {"InstanceType": 2}, "name": 2},
... {"ResourceConfig": {"InstanceType": 2}, "name": 3},
... ]
... )
[
[
{"ResourceConfig": {"InstanceType": 1}, "name": 1}
],
[
{"ResourceConfig": {"InstanceType": 2}, "name": 2},
{"ResourceConfig": {"InstanceType": 2}, "name": 3},
],
]
"""
return list(
groupby(
lambda job_config: job_config["ResourceConfig"]["InstanceType"],
jobs,
).values()
)
|
8c9baf76de4089972c87f7f71b66abec236e23d3
| 3,642,882
|
import scipy
def integral_func(phi, th1, n):
""" Used in computing the continuous hypersphere cap intersection below. """
return np.sin(phi)**(n-2) * scipy.special.betainc( (n-2)/2 , 1/2, 1-( (np.tan(th1))/(np.tan(phi)) )**2 )
|
f086da8f5086d13abfced0c05b41d419d4e7d6b0
| 3,642,883
|
def test_model(image_path, class_names, img_height, img_width):
"""测试你的模型"""
img = keras.preprocessing.image.load_img(image_path, target_size=(img_height, img_width)) # 将图片加载为PIL格式
input_array = keras.preprocessing.image.img_to_array(img) # 将PIL映像实例转换为Numpy数组
input_array = np.array([input_array]) # 来自load_img中描述
# input_array = tf.expand_dims(input_array, 0) # Create a batch # 使用expand_dims来将维度加1
# print('input_array: ',input_array)
input_array = preprocess_input(input_array)
predictions = model.predict(input_array)[0] # 输入测试数据,输出预测结果
class_index = int(np.argmax(predictions)) # 返回识别后最大值索引
max_value = predictions[class_index] # 获取最大index的值, 下面防止分数大于1是*100
class_score = 100 * np.max(predictions) if max_value <= 1 else np.max(predictions) # 相似度 返回数组的最大值或沿轴的最大值。
print("这个图像最有可能是: {} 置信度: {:.2f} %".format(class_names[class_index], class_score))
return class_names[class_index]
|
5f661c231dbc459e9e6d24f9ddeecbed15dc0e0d
| 3,642,884
|
def order(order_id,complete):
"""
Charge completion return URL. Once the customer is redirected
back to this site from the authorization page, we search for the
charge based on the provided `order_id`.
"""
return render_template(
"complete.html",
order_id=order_id,
complete=complete
)
|
c7d7d385c23ef24748d5b4e847aaf9acd2212cbd
| 3,642,885
|
import logging
def load_dimension_subdag(
parent_dag_name,
task_id,
redshift_conn_id,
*args, **kwargs):
"""
A python function with arguments, which creates a dag
:param parent_dag_name: imp ({parent_dag_name}.{task_id})
:param task_id: imp {task_id}
:param redshift_conn_id: {any connection id}
:param args: {verbose}
:param kwargs: {verbose and context variables}
:return:
"""
dag = DAG(
f"{parent_dag_name}.{task_id}",
**kwargs
)
copy_ports = StageToRedshiftOperator(
task_id='copy_ports',
dag=dag,
redshift_conn_id="redshift",
aws_credentials_id="aws_default",
file='i94port.csv',
delimiter=',',
table='i94ports',
s3_bucket=Variable.get("s3_bucket"),
s3_key="csv",
sql_stmt=SqlQueries.copy_csv_cmd,
provide_context=True)
copy_visa = StageToRedshiftOperator(
task_id='copy_visa',
dag=dag,
redshift_conn_id="redshift",
aws_credentials_id="aws_default",
file='i94visa.csv',
delimiter=',',
table='i94visa',
s3_bucket=Variable.get("s3_bucket"),
s3_key="csv",
sql_stmt=SqlQueries.copy_csv_cmd,
provide_context=True)
copy_modes = StageToRedshiftOperator(
task_id='copy_modes',
dag=dag,
redshift_conn_id="redshift",
aws_credentials_id="aws_default",
file='i94mode.csv',
delimiter=',',
table='i94mode',
s3_bucket=Variable.get("s3_bucket"),
s3_key="csv",
sql_stmt=SqlQueries.copy_csv_cmd,
provide_context=True)
copy_addr = StageToRedshiftOperator(
task_id='copy_addr',
dag=dag,
redshift_conn_id="redshift",
aws_credentials_id="aws_default",
file='i94addr.csv',
delimiter=',',
table='i94addr',
s3_bucket=Variable.get("s3_bucket"),
s3_key="csv",
sql_stmt=SqlQueries.copy_csv_cmd,
provide_context=True)
copy_country_codes = StageToRedshiftOperator(
task_id='copy_country_codes',
dag=dag,
redshift_conn_id="redshift",
aws_credentials_id="aws_default",
file='i94cit&i94res.csv',
delimiter=',',
table='i94res',
s3_bucket=Variable.get("s3_bucket"),
s3_key="csv",
sql_stmt=SqlQueries.copy_csv_cmd,
provide_context=True)
copy_cities_demographics = StageToRedshiftOperator(
task_id='copy_cities_demographics',
dag=dag,
redshift_conn_id="redshift",
aws_credentials_id="aws_default",
file='us-cities-demographics.csv',
delimiter=';',
table='us_cities_demographics',
s3_bucket=Variable.get("s3_bucket"),
s3_key="csv",
sql_stmt=SqlQueries.copy_csv_cmd,
provide_context=True)
copy_airports = StageToRedshiftOperator(
task_id='copy_airports',
dag=dag,
redshift_conn_id="redshift",
aws_credentials_id="aws_default",
file='airport-codes_csv.csv',
delimiter=',',
table='airport_codes',
s3_bucket=Variable.get("s3_bucket"),
s3_key="csv",
sql_stmt=SqlQueries.copy_csv_cmd,
provide_context=True)
def parquet_to_redshift(table, s3_bucket, s3_key, iam_role,
sql_stmt, redshift_conn_id, **kwargs):
"""
This function reads parquet files and copies them to redshift
schema.db
:param table:
:param s3_bucket:
:param s3_key:
:param iam_role:
:param sql_stmt:
:param redshift_conn_id:
:param kwargs:
:return:
"""
redshift = PostgresHook(postgres_conn_id=redshift_conn_id)
logging.info("Copying data from S3 to Redshift")
s3_path = "s3://{}/{}".format(s3_bucket, s3_key)
formatted_sql = sql_stmt.format(
table,
s3_path,
iam_role
)
redshift.run(formatted_sql)
aws_hook = AwsHook("aws_default")
credentials = aws_hook.get_credentials()
client = boto3.client('s3',
aws_access_key_id=credentials.access_key,
aws_secret_access_key=credentials.secret_key)
objects_to_delete = client.list_objects(
Bucket=Variable.get("s3_bucket"), Prefix="parquet")
delete_keys = {'Objects': []}
delete_keys['Objects'] = [{'Key': k} for k in
[obj['Key'] for obj in
objects_to_delete.get('Contents',
[])]]
client.delete_objects(Bucket=Variable.get("s3_bucket"),
Delete=delete_keys)
copy_immigration = PythonOperator(
task_id='copy_immigration',
python_callable=parquet_to_redshift, # changed
provide_context=True,
op_kwargs={'table': "immigration",
's3_bucket': Variable.get("s3_bucket"),
's3_key': 'parquet',
'iam_role': Variable.get('iam_role'),
'sql_stmt': SqlQueries.copy_parquet_cmd,
'redshift_conn_id': 'redshift'},
dag=dag
)
copy_ports
copy_visa
copy_modes
copy_addr
copy_country_codes
copy_airports
copy_cities_demographics
copy_immigration
return dag
|
d68348b51aef7ad38999b2383f41721a178af451
| 3,642,886
|
import os
def get_ns_lns_ids_config_file():
"""Reads node_id to host name mapping from one of the config files in the map"""
assert exp_config.node_config_folder is not None and os.path.exists(exp_config.node_config_folder)
files = os.listdir(exp_config.node_config_folder)
# read mapping from any file
return read_node_to_hostname_mapping(os.path.join(exp_config.node_config_folder, files[0]))
|
17e162aaf68ef4df8ba730c3104b57e99ee783be
| 3,642,887
|
import os
import subprocess
def find(name, environment=None, guess=None):
"""Finds a particular binary on this system.
Attempts to find the binary given by ``name``, first checking the value of
the environment variable named ``environment`` (if provided), then by
checking the system path, then finally checking hardcoded paths in
``guess`` (if provided). This function is cross-platform compatible - it
works on Windows, Linux, and Mac. If there are spaces in the path found,
this function will wrap its return value in double quotes.
Args:
name (str): Binary name.
environment (str): An optional environment variable to check.
guess (iterable): An optional list of hardcoded paths to check.
Returns:
A string with the absolute path to the binary if found, otherwise
``None``.
"""
def sanitize(path):
quotes = ("'", "'")
if " " in path and path[0] not in quotes and path[-1] not in quotes:
path = '"{}"'.format(path)
return path
if environment:
path = os.environ.get(environment)
if path is not None:
path = os.path.abspath(os.path.expanduser(path))
if os.path.isfile(path):
return sanitize(path)
if os.name == "posix":
search = "which"
elif os.name == "nt":
search = "where.exe"
else:
raise EnvironmentError("unknown platform: {}".format(os.name))
try:
with open(os.devnull, "w") as output:
path = subprocess.check_output([search, name], stderr=output).decode(
"utf-8"
)
return sanitize(os.path.abspath(path.strip()))
except subprocess.CalledProcessError:
pass
if guess:
for path in guess:
if os.path.isfile(path):
return sanitize(path)
return None
|
d3f8d4375804dc54e0187b6b3f8ab53b2120acd7
| 3,642,888
|
import random
import string
import os
from datetime import datetime
def upload_to(path):
"""
Generates unique ascii filename before saving. Supports strftime()
formatting as django.db.models.FileField.upload_to does.
Example:
class SomeModel(models.Model):
picture = models.ImageField(upload_to=upload_to('my_model_uploads/'))
It is possible to define `upload_to` folder depending on model.
Declare dict `IMAGE_UPLOAD_TO` in settings:
{
'ModelName': 'path for upload_to"',
}
And provide None to upload_to func as path.
"""
def upload_callback(instance, filename):
random_fname = ''.join(
random.choice(string.ascii_uppercase + string.digits) for x in range(16))
random_fname += os.path.splitext(filename)[-1]
if path is None:
img_path = path_dict.get(instance.__class__.__name__, "images")
else:
img_path = path
img_path = os.path.normpath(force_text(
datetime.datetime.now().strftime(force_str(img_path))))
return '%s/%s' % (img_path.rstrip('/'), random_fname)
return upload_callback
|
4d3b9ff7f95d20bc4234ce27453ad582e6218a18
| 3,642,889
|
from typing import List
def standardize_measurements_lastref(measurements: List[Measurement], remove_ref: bool = True) \
-> List[Measurement]:
""" Sets the standardization of all measurement to the Reference Measurement before """
last_null_meas = None
clean_measurements = []
for measurement in measurements:
isref = measurement.is_reference()
if isref:
last_null_meas = measurement
measurement.set_reference(last_null_meas, StandardizationType.LAST_REFERENCE)
if not isref or not remove_ref:
if last_null_meas is None:
raise ValueError("ERROR - NO NULL MEASUREMENT FOUND")
clean_measurements.append(measurement)
return clean_measurements
|
34c73375fcada6a19e9c7e876b252f44fc6f9415
| 3,642,890
|
import os
import re
def blg2texkey(filename):
"""Extract TeX keys from a .blg file."""
keys = []
if not os.path.exists(filename):
LOGGER.error("File %s not found.", filename)
return keys
with open(filename, "r") as f:
lines = f.readlines()
# regexp to match 'Warning--I didn\'t find a database entry for "..."' (bibtex)
# or 'WARN - I didn\'t find a database entry for '...'' (biber)
pattern = re.compile(
r".*I didn\'t find a database entry for [\"\'](?P<keys>[^\"]+)[\"\'].*"
)
# get nested list of texkeys
keys = [
re.search(pattern, c).group("keys").split(",")
for c in lines
if re.match(pattern, c)
]
# flatten nested list
keys = [item for sublist in keys for item in sublist]
# remove duplicates
keys = list(set(keys))
# remove blacklisted keys
keys = [x for x in keys if x not in BLACKLISTED_KEYS]
return keys
|
11eb2bbffbcb6052638a53b25ca60a2ace63f3a6
| 3,642,891
|
from typing import get_args
import torch
def build_train_valid_test_data_iterators(
build_train_valid_test_datasets_provider):
"""XXX"""
args = get_args()
(train_dataloader, valid_dataloader, test_dataloader) = (None, None, None)
print_rank_0('> building train, validation, and test datasets ...')
# Backward compatibility, assume fixed batch size.
if args.iteration > 0 and args.consumed_train_samples == 0:
assert args.train_samples is None, \
'only backward compatiblity support for iteration-based training'
args.consumed_train_samples = args.iteration * args.global_batch_size
if args.iteration > 0 and args.consumed_valid_samples == 0:
if args.train_samples is None:
args.consumed_valid_samples = (args.iteration // args.eval_interval) * \
args.eval_iters * args.global_batch_size
# Data loader only on rank 0 of each model parallel group.
if mpu.get_tensor_model_parallel_rank() == 0:
# Number of train/valid/test samples.
if args.train_samples:
train_samples = args.train_samples
else:
train_samples = args.train_iters * args.global_batch_size
eval_iters = (args.train_iters // args.eval_interval + 1) * \
args.eval_iters
test_iters = args.eval_iters
train_val_test_num_samples = [train_samples,
eval_iters * args.global_batch_size,
test_iters * args.global_batch_size]
print_rank_0(' > datasets target sizes (minimum size):')
print_rank_0(' train: {}'.format(train_val_test_num_samples[0]))
print_rank_0(' validation: {}'.format(train_val_test_num_samples[1]))
print_rank_0(' test: {}'.format(train_val_test_num_samples[2]))
# Build the datasets.
train_ds, valid_ds, test_ds = build_train_valid_test_datasets_provider(
train_val_test_num_samples)
# Build dataloders.
train_dataloader = build_pretraining_data_loader(
train_ds, args.consumed_train_samples)
valid_dataloader = build_pretraining_data_loader(
valid_ds, args.consumed_valid_samples)
test_dataloader = build_pretraining_data_loader(test_ds, 0)
# Flags to know if we need to do training/validation/testing.
do_train = train_dataloader is not None and args.train_iters > 0
do_valid = valid_dataloader is not None and args.eval_iters > 0
do_test = test_dataloader is not None and args.eval_iters > 0
# Need to broadcast num_tokens and num_type_tokens.
flags = torch.cuda.LongTensor(
[int(do_train), int(do_valid), int(do_test)])
else:
flags = torch.cuda.LongTensor([0, 0, 0])
# Broadcast num tokens.
torch.distributed.broadcast(flags,
mpu.get_tensor_model_parallel_src_rank(),
group=mpu.get_tensor_model_parallel_group())
args.do_train = flags[0].item()
args.do_valid = flags[1].item()
args.do_test = flags[2].item()
# Build iterators.
dl_type = args.dataloader_type
assert dl_type in ['single', 'cyclic']
if train_dataloader is not None:
train_data_iterator = iter(train_dataloader) if dl_type == 'single' \
else iter(cyclic_iter(train_dataloader))
else:
train_data_iterator = None
if valid_dataloader is not None:
valid_data_iterator = iter(valid_dataloader) if dl_type == 'single' \
else iter(cyclic_iter(valid_dataloader))
else:
valid_data_iterator = None
if test_dataloader is not None:
test_data_iterator = iter(test_dataloader) if dl_type == 'single' \
else iter(cyclic_iter(test_dataloader))
else:
test_data_iterator = None
return train_data_iterator, valid_data_iterator, test_data_iterator
|
1bf5c79519df289f88ab5372c6ccb963a77ce5cd
| 3,642,892
|
import os
def get_lib_ver(library_path=""):
"""Returns the version of the Minipresto library.
### Parameters
- `library_path`: The Minipresto library directory."""
version_file = os.path.join(library_path, "version")
try:
with open(version_file, "r") as f:
for line in f:
line = line.strip()
if line:
return line
return "NOT FOUND"
except:
return "NOT FOUND"
|
e42b029762ca8e6baee12062464134f13ae71522
| 3,642,893
|
def width():
"""Get console width."""
x, y = get()
return x
|
a6090038a4c97e215e57e0f7966ec41c09682f90
| 3,642,894
|
import yaml
def load_yaml_config(path):
"""returns the config parsed based on the info in the flags.
Grabs the config file, written in yaml, slurps it in.
"""
with open(path) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
return config
|
0ee100a6e4d25881f8b8ab4ced723f600e878e28
| 3,642,895
|
def convert_year(years, debug=False):
"""Example usage: db['date'] = cln.convert_year(db['date']) """
for i, yr in years.iteritems():
if debug:
print(yr)
print(type(yr))
if yr is None:
years.set_value(i, np.nan)
continue
if is_int(yr):
continue
if isinstance(yr, float):
if np.isnan(yr):
continue
yr = q_html(yr)
yr = q_all(yr)
yr = dedashslash(yr)
if is_int(yr):
years.set_value(i, int(yr))
else:
years.set_value(i, np.nan)
return years
|
605ab3d0554942de9a3ea46f35a304d6cd25a7ed
| 3,642,896
|
def home(request):
"""View function for home page of site."""
return laboratorio_list(request)
|
a06bcbdb2b7edb79ee1d30cd69329742b24f2f49
| 3,642,897
|
def scan_armatures(context):
"""
scans the selected objects or the scene for a source (regular)
armature and a destination (Make Human) armature
"""
src = (
scan_for_armature(context.selected_objects)
or scan_for_armature(context.scene.objects)
)
dst = (
scan_for_armature(context.selected_objects, look_for_mhx=True)
or scan_for_armature(context.scene.objects, look_for_mhx=True)
)
if not src or not dst:
raise LookupError("Couldn't find source or target armatures")
return src, dst
|
a613c39767280919a684dcf8eaa4e537ffc2ebb3
| 3,642,898
|
def populate_instance(msg, inst):
"""
:param msg: contains the values to use to populate inst.
:param inst: message class instance to populate.
:return: an instance of the provided message class, with its fields populated according to the values in msg
"""
return _to_inst(msg, type(inst).__name__, type(inst).__name__, inst)
|
7a148629ad178be632c9388fc536e7fea02c44ed
| 3,642,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.