content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def delete_user(user_id: int):
"""删除"""
user = User.query.filter(
User.user_id == user_id
).first()
if not user:
return dbu.inner_error("该不存在或已被删除")
db.session.delete(user)
try:
db.session.commit()
except Exception as e:
current_app.logger.warning(e)
db.session.rollback()
return dbu.inner_error("删除失败")
return jsonify({"data": {"user_id": user_id}, "code": 200, "msg": "删除角色成功"}) | 5,324,400 |
def _combine_odds(odds):
"""Combine odds of different targets."""
combined_odds = 1 / (1 / odds).sum(axis=1)
return combined_odds | 5,324,401 |
def db(port, dirpath):
"""Run HTTP api/database server"""
env = environ.copy()
if port is not None:
env["MLRUN_httpdb__port"] = str(port)
if dirpath is not None:
env["MLRUN_httpdb__dirpath"] = dirpath
cmd = [executable, "-m", "mlrun.api.main"]
child = Popen(cmd, env=env)
returncode = child.wait()
if returncode != 0:
raise SystemExit(returncode) | 5,324,402 |
def deleteAndMove(dir, files='*.fits'):
"""
:param dir:
:param files:
:return:
"""
if not os.path.exists(dir):
os.makedirs(dir)
else:
for f in glob.glob(dir + '/' + files):
os.remove(f)
for f in glob.glob(files):
shutil.move(f, './'+dir+'/'+f) | 5,324,403 |
def decrypt(pp, skx, cty, max_innerprod=100):
"""
Performs the decrypt algorithm for IPE on a secret key skx and ciphertext cty.
The output is the inner product <x,y>, so long as it is in the range
[0,max_innerprod].
"""
(k1, k2) = skx
(c1, c2) = cty
d1 = pair(k1, c1)
d2 = innerprod_pair(k2, c2)
# check for unit element
# gt = group.random(GT)
# if(gt == gt * d2):
# print("Unit")
# return True
# return False
return solve_dlog_bsgs(d1, d2, max_innerprod + 1) | 5,324,404 |
def alpha_blend_colors(colors, additional_alpha=1.0):
"""
Given a sequence of colors, return the alpha blended color.
This assumes the last color is the one in front.
"""
srcr, srcg, srcb, srca = COLOR_CONVERTER.to_rgba(colors[0])
srca *= additional_alpha
for color in colors[1:]:
dstr, dstg, dstb, dsta = COLOR_CONVERTER.to_rgba(color)
dsta *= additional_alpha
outa = srca + dsta * (1 - srca)
outr = (srcr * srca + dstr * dsta * (1 - srca)) / outa
outg = (srcg * srca + dstg * dsta * (1 - srca)) / outa
outb = (srcb * srca + dstb * dsta * (1 - srca)) / outa
srca, srcr, srcg, srcb = outa, outr, outg, outb
return srcr, srcg, srcb, srca | 5,324,405 |
def ruled(nrb1, nrb2):
"""
Construct a ruled surface/volume
between two NURBS curves/surfaces.
Parameters
----------
nrb1, nrb2 : NURBS
"""
assert nrb1.dim == nrb2.dim
assert nrb1.dim <= 2
assert nrb2.dim <= 2
nrb1, nrb2 = compat(nrb1, nrb2)
Cw = np.zeros(nrb1.shape+(2,4),dtype='d')
Cw[...,0,:] = nrb1.control
Cw[...,1,:] = nrb2.control
UVW = nrb1.knots + ([0,0,1,1],)
return NURBS(UVW, Cw) | 5,324,406 |
async def db_head_state():
"""Status/health check."""
sql = ("SELECT num, created_at, extract(epoch from created_at) ts "
"FROM hive_blocks ORDER BY num DESC LIMIT 1")
row = DB.query_row(sql)
return dict(db_head_block=row['num'],
db_head_time=str(row['created_at']),
db_head_age=int(time.time() - row['ts'])) | 5,324,407 |
def d_d_theta_inv(y, alpha):
"""
xi'(y) = 1/theta''(xi(y)) > 0
= alpha / (1 - |y|)^2
Nikolova et al 2014, table 1, theta_2 and eq 5.
"""
assert -1 < y < 1 and alpha > 0
denom = 1 - abs(y)
return alpha / (denom*denom) | 5,324,408 |
async def add_item(
item: ItemEditableFields,
db_session: Session = Depends(get_db_session)
):
"""This handler adds item to the DB.
Args:
item: ItemEditableFields. The data of item to be added to the DB.
db_session: Session. The database session used to interact with the DB.
Returns:
Item. The item object that was inserted.
Raises:
HTTPException. Item already exists in the DB.
"""
existing_item = item_crud.get_by_sku(db_session, item.sku)
if existing_item:
raise HTTPException(status_code=412, detail='Item already exists.')
return item_crud.create(db_session, Item(**item.dict())) | 5,324,409 |
def center_crop(img, crop_height, crop_width):
""" Crop the central part of an image.
Args:
img (ndarray): image to be cropped.
crop_height (int): height of the crop.
crop_width (int): width of the crop.
Return:
(ndarray): the cropped image.
"""
def get_center_crop_coords(height, width, crop_height, crop_width):
y1 = (height - crop_height) // 2
y2 = y1 + crop_height
x1 = (width - crop_width) // 2
x2 = x1 + crop_width
return x1, y1, x2, y2
height, width = img.shape[:2]
x1, y1, x2, y2 = get_center_crop_coords(
height, width, crop_height, crop_width)
return img[y1:y2, x1:x2, ...] | 5,324,410 |
def p_py_atom_1(p):
"""py_atom : ID
| ICONST
| FCONST
| SCONST_D
| SCONST_S"""
p[0] = p[1] | 5,324,411 |
def list_medical_transcription_jobs(Status=None, JobNameContains=None, NextToken=None, MaxResults=None):
"""
Lists medical transcription jobs with a specified status or substring that matches their names.
See also: AWS API Documentation
Exceptions
:example: response = client.list_medical_transcription_jobs(
Status='QUEUED'|'IN_PROGRESS'|'FAILED'|'COMPLETED',
JobNameContains='string',
NextToken='string',
MaxResults=123
)
:type Status: string
:param Status: When specified, returns only medical transcription jobs with the specified status. Jobs are ordered by creation date, with the newest jobs returned first. If you don\'t specify a status, Amazon Transcribe Medical returns all transcription jobs ordered by creation date.
:type JobNameContains: string
:param JobNameContains: When specified, the jobs returned in the list are limited to jobs whose name contains the specified string.
:type NextToken: string
:param NextToken: If you a receive a truncated result in the previous request of ListMedicalTranscriptionJobs , include NextToken to fetch the next set of jobs.
:type MaxResults: integer
:param MaxResults: The maximum number of medical transcription jobs to return in the response. IF there are fewer results in the list, this response contains only the actual results.
:rtype: dict
ReturnsResponse Syntax
{
'Status': 'QUEUED'|'IN_PROGRESS'|'FAILED'|'COMPLETED',
'NextToken': 'string',
'MedicalTranscriptionJobSummaries': [
{
'MedicalTranscriptionJobName': 'string',
'CreationTime': datetime(2015, 1, 1),
'StartTime': datetime(2015, 1, 1),
'CompletionTime': datetime(2015, 1, 1),
'LanguageCode': 'en-US'|'es-US'|'en-AU'|'fr-CA'|'en-GB'|'de-DE'|'pt-BR'|'fr-FR'|'it-IT'|'ko-KR'|'es-ES'|'en-IN'|'hi-IN'|'ar-SA'|'ru-RU'|'zh-CN'|'nl-NL'|'id-ID'|'ta-IN'|'fa-IR'|'en-IE'|'en-AB'|'en-WL'|'pt-PT'|'te-IN'|'tr-TR'|'de-CH'|'he-IL'|'ms-MY'|'ja-JP'|'ar-AE',
'TranscriptionJobStatus': 'QUEUED'|'IN_PROGRESS'|'FAILED'|'COMPLETED',
'FailureReason': 'string',
'OutputLocationType': 'CUSTOMER_BUCKET'|'SERVICE_BUCKET',
'Specialty': 'PRIMARYCARE',
'Type': 'CONVERSATION'|'DICTATION'
},
]
}
Response Structure
(dict) --
Status (string) --
The requested status of the medical transcription jobs returned.
NextToken (string) --
The ListMedicalTranscriptionJobs operation returns a page of jobs at a time. The maximum size of the page is set by the MaxResults parameter. If the number of jobs exceeds what can fit on a page, Amazon Transcribe Medical returns the NextPage token. Include the token in the next request to the ListMedicalTranscriptionJobs operation to return in the next page of jobs.
MedicalTranscriptionJobSummaries (list) --
A list of objects containing summary information for a transcription job.
(dict) --
Provides summary information about a transcription job.
MedicalTranscriptionJobName (string) --
The name of a medical transcription job.
CreationTime (datetime) --
A timestamp that shows when the medical transcription job was created.
StartTime (datetime) --
A timestamp that shows when the job began processing.
CompletionTime (datetime) --
A timestamp that shows when the job was completed.
LanguageCode (string) --
The language of the transcript in the source audio file.
TranscriptionJobStatus (string) --
The status of the medical transcription job.
FailureReason (string) --
If the TranscriptionJobStatus field is FAILED , a description of the error.
OutputLocationType (string) --
Indicates the location of the transcription job\'s output.
The CUSTOMER_BUCKET is the S3 location provided in the OutputBucketName field when the
Specialty (string) --
The medical specialty of the transcription job. Primary care is the only valid value.
Type (string) --
The speech of the clinician in the input audio.
Exceptions
TranscribeService.Client.exceptions.BadRequestException
TranscribeService.Client.exceptions.LimitExceededException
TranscribeService.Client.exceptions.InternalFailureException
:return: {
'Status': 'QUEUED'|'IN_PROGRESS'|'FAILED'|'COMPLETED',
'NextToken': 'string',
'MedicalTranscriptionJobSummaries': [
{
'MedicalTranscriptionJobName': 'string',
'CreationTime': datetime(2015, 1, 1),
'StartTime': datetime(2015, 1, 1),
'CompletionTime': datetime(2015, 1, 1),
'LanguageCode': 'en-US'|'es-US'|'en-AU'|'fr-CA'|'en-GB'|'de-DE'|'pt-BR'|'fr-FR'|'it-IT'|'ko-KR'|'es-ES'|'en-IN'|'hi-IN'|'ar-SA'|'ru-RU'|'zh-CN'|'nl-NL'|'id-ID'|'ta-IN'|'fa-IR'|'en-IE'|'en-AB'|'en-WL'|'pt-PT'|'te-IN'|'tr-TR'|'de-CH'|'he-IL'|'ms-MY'|'ja-JP'|'ar-AE',
'TranscriptionJobStatus': 'QUEUED'|'IN_PROGRESS'|'FAILED'|'COMPLETED',
'FailureReason': 'string',
'OutputLocationType': 'CUSTOMER_BUCKET'|'SERVICE_BUCKET',
'Specialty': 'PRIMARYCARE',
'Type': 'CONVERSATION'|'DICTATION'
},
]
}
:returns:
TranscribeService.Client.exceptions.BadRequestException
TranscribeService.Client.exceptions.LimitExceededException
TranscribeService.Client.exceptions.InternalFailureException
"""
pass | 5,324,412 |
def membOutDet(input_slc, cell_mask=10, outer_mask=30, det_cutoff=0.75):
""" Detection of mYFP maxima in the line of interest.
Algorithm is going from outside to inside cell
and finding first outer maxima of the membrane.
"cell_mask" - option for hiding inner cell region
for ignoring possible cytoplasmic artefacts of fluorescence,
number of pixels to be given to zero.
"outer_mask" - option for hiding extracellular artefacts of fluorescence,
numbers of pexels
Working with diam slice only!
Returns two indexes of membrane maxima.
"""
slc = np.copy(input_slc)
if (np.shape(slc)[0] % 2) != 0: # parity check for correct splitting slice by two half
slc = slc[:-1]
slc_left, slc_right = np.split(slc, 2)
# slc_right = np.flip(slc_right)
logging.info('Slice splitted!')
slc_left[-cell_mask:] = 0 # mask cellular space
slc_right[:cell_mask] = 0 #
slc_left[:outer_mask] = 0 # mask extracellular space
slc_right[-outer_mask:] = 0 #
left_peak, _ = signal.find_peaks(slc_left,
height=[slc_left.max()*det_cutoff,
slc_left.max()],
distance=10)
logging.info('Left peak val {:.2f}'.format(slc_left[left_peak[0]]))
right_peak, _ = signal.find_peaks(slc_right,
height=[slc_right.max()*det_cutoff,
slc_right.max()],
distance=10)
logging.info('Right peak val {:.2f}'.format(slc_right[right_peak[0]]))
memb_peaks = []
try:
memb_peaks.append(left_peak[0])
except IndexError:
logging.error('LEFT membrane peak NOT DETECTED!')
memb_peaks.append(0)
try:
memb_peaks.append(int(len(slc)/2+right_peak[0]))
except IndexError:
logging.error('RIGHT membrane peak NOT DETECTED!')
memb_peaks.append(0)
logging.info('L {}, R {}'.format(memb_peaks[0], memb_peaks[1]))
output_slc = np.concatenate((slc_left, slc_right))
return output_slc, memb_peaks | 5,324,413 |
def cmk_arn_value(variable_name):
"""Retrieve target CMK ARN from environment variable."""
arn = os.environ.get(variable_name, None)
if arn is None:
raise ValueError(
'Environment variable "{}" must be set to a valid KMS CMK ARN for examples to run'.format(
variable_name
)
)
if arn.startswith("arn:") and ":alias/" not in arn:
return arn
raise ValueError("KMS CMK ARN provided for examples must be a key not an alias") | 5,324,414 |
def qaoa_ansatz(gammas, betas):
"""
Function that returns a QAOA ansatz program for a list of angles betas and gammas. len(betas) ==
len(gammas) == P for a QAOA program of order P.
:param list(float) gammas: Angles over which to parameterize the cost Hamiltonian.
:param list(float) betas: Angles over which to parameterize the driver Hamiltonian.
:return: The QAOA ansatz program.
:rtype: Program.
"""
return Program([exponentiate_commuting_pauli_sum(h_cost)(g)
+ exponentiate_commuting_pauli_sum(h_driver)(b)
for g, b in zip(gammas, betas)]) | 5,324,415 |
def test_atomic_any_uri_length_2_nistxml_sv_iv_atomic_any_uri_length_3_5(mode, save_output, output_format):
"""
Type atomic/anyURI is restricted by facet length with value 34.
"""
assert_bindings(
schema="nistData/atomic/anyURI/Schema+Instance/NISTSchema-SV-IV-atomic-anyURI-length-3.xsd",
instance="nistData/atomic/anyURI/Schema+Instance/NISTXML-SV-IV-atomic-anyURI-length-3-5.xml",
class_name="NistschemaSvIvAtomicAnyUriLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,324,416 |
def print_header(procs_status, num_procs):
"""Print system-related info, above the process list."""
def get_dashes(perc):
dashes = "|" * int((float(perc) / 10 * 4))
empty_dashes = " " * (40 - len(dashes))
return dashes, empty_dashes
# cpu usage
percs = psutil.cpu_percent(interval=0, percpu=True)
for cpu_num, perc in enumerate(percs):
dashes, empty_dashes = get_dashes(perc)
line = " CPU%-2s [%s%s] %5s%%" % (cpu_num, dashes, empty_dashes, perc)
printl(line, color=get_color(perc))
# memory usage
mem = psutil.virtual_memory()
dashes, empty_dashes = get_dashes(mem.percent)
line = " Mem [%s%s] %5s%% %6s / %s" % (
dashes, empty_dashes,
mem.percent,
bytes2human(mem.used),
bytes2human(mem.total),
)
printl(line, color=get_color(mem.percent))
# swap usage
swap = psutil.swap_memory()
dashes, empty_dashes = get_dashes(swap.percent)
line = " Swap [%s%s] %5s%% %6s / %s" % (
dashes, empty_dashes,
swap.percent,
bytes2human(swap.used),
bytes2human(swap.total),
)
printl(line, color=get_color(swap.percent))
# processes number and status
st = []
for x, y in procs_status.items():
if y:
st.append("%s=%s" % (x, y))
st.sort(key=lambda x: x[:3] in ('run', 'sle'), reverse=1)
printl(" Processes: %s (%s)" % (num_procs, ', '.join(st)))
# load average, uptime
uptime = datetime.datetime.now() - \
datetime.datetime.fromtimestamp(psutil.boot_time())
av1, av2, av3 = psutil.getloadavg()
line = " Load average: %.2f %.2f %.2f Uptime: %s" \
% (av1, av2, av3, str(uptime).split('.')[0])
printl(line) | 5,324,417 |
def approximated_atmo_spectrum(energy):
"""Gives an approximated atmospheric neutrino spectrum.
Can be used for comparing expected true energy distribution to recorded
energy proxy distributions. It is normalised such that the weight for an
energy of 1 is equal to 1. (It is agnostic to energy units)
:param energy: True neutrino energy (in some consistent unit)
:return: Spectrum weight for that energy
"""
return energy**-3.7 | 5,324,418 |
def test_delete_token():
"""Delete Token"""
soup = TexSoup(r"""
\section{one}
text
\section{two}
delete me""")
assert 'delete me' in str(soup)
for node in soup.all:
if 'delete me' in node:
node.delete()
assert 'delete me' not in str(soup) | 5,324,419 |
def to_csv(logbook, filename, output=False):
"""
Write a logbook to a CSV file.
The output file is readable using an ordinary CSV reader, e.g. ``pandas.read_csv``.
Alternatively you may read it back into a logbook format using ``gt.ops.from_csv``.
"""
logs = []
for unique_trip_id in logbook:
log = logbook[unique_trip_id].assign(unique_trip_id=unique_trip_id)
logs.append(log)
if len(logs) == 0:
df = pd.DataFrame(
columns=[
'trip_id', 'route_id', 'action', 'minimum_time', 'maximum_time', 'stop_id',
'latest_information_time', 'unique_trip'
]
)
else:
df = pd.concat(logs)
if output:
return df
else:
return df.to_csv(filename, index=False) | 5,324,420 |
def flatten_dict(d, parent_key="", sep="_"):
"""
Flatten a dictionnary
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if v and isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items) | 5,324,421 |
def request_mxnet_inference(ip_address="127.0.0.1", port="80", connection=None, model="squeezenet"):
"""
Send request to container to test inference on kitten.jpg
:param ip_address:
:param port:
:connection: ec2_connection object to run the commands remotely over ssh
:return: <bool> True/False based on result of inference
"""
conn_run = connection.run if connection is not None else run
# Check if image already exists
run_out = conn_run("[ -f kitten.jpg ]", warn=True)
if run_out.return_code != 0:
conn_run("curl -O https://s3.amazonaws.com/model-server/inputs/kitten.jpg", hide=True)
run_out = conn_run(f"curl -X POST http://{ip_address}:{port}/predictions/{model} -T kitten.jpg", warn=True)
# The run_out.return_code is not reliable, since sometimes predict request may succeed but the returned result
# is 404. Hence the extra check.
if run_out.return_code != 0 or "probability" not in run_out.stdout:
return False
return True | 5,324,422 |
def flatten(iterable):
""" Flattens an iterable, where strings and dicts
are not considered iterable.
:param iterable: The iterable to flatten.
:returns: The iterable flattened as a flat list.
>>> from dautils import collect
>>> collect.flatten([[1, 2]])
[1, 2]
"""
logger = log_api.env_logger()
logger.debug('Iterable {}'.format(iterable))
assert isiterable(iterable), 'Not iterable {}'.format(iterable)
flat = iterable
if isiterable(iterable[0]):
flat = [i for i in chain.from_iterable(iterable)]
return flat | 5,324,423 |
def get_ts_pid(pidfile):
"""Read a pidfile, return a PID."""
try:
with open(pidfile) as f:
pid = f.readline()
except EnvironmentError:
LOG.warning("Unable to read pidfile; process metrics will fail!")
pid = None
return pid | 5,324,424 |
def conjg(a):
"""
Find the complex conjugate values of the input.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing copmplex conjugate values from `a`.
"""
return _arith_unary_func(a, backend.get().af_conjg) | 5,324,425 |
def obj_size_avg_residual(coeffs, avg_size, class_id):
"""
:param coeffs: object sizes
:param size_template: dictionary that saves the mean size of each category
:param class_id: nyu class id.
:return: size residual ground truth normalized by the average size
"""
size_residual = (coeffs - avg_size[class_id]) / avg_size[class_id]
return size_residual | 5,324,426 |
def search_content(obj):
"""Get the excerpt in which the searched term matches the content."""
indexable_text = []
if hasattr(obj, 'excerpt'):
if obj.excerpt:
indexable_text.append(obj.excerpt)
if not hasattr(obj, 'content_editor'):
return ' '.join(indexable_text)
content = obj.content_editor
if isinstance(content, str):
indexable_text.append(return_all_content(content))
return re.sub(r' +', ' ', ' '.join(indexable_text)).strip()
if isinstance(content, StreamValue):
indexable_text = []
text = ''
for block in content:
text = ''
if block.block_type in ALLOWED_BLOCK_TYPES:
text = return_all_content(block.render())
if text:
indexable_text.append(' %s' % text)
if indexable_text:
return re.sub(r' +', ' ', ' '.join(indexable_text)).strip()
return '' | 5,324,427 |
def get_total_num_activities():
"""Query the IATI registry and return a faceted list of activity counts and their frequencies.
The total number of activities is then calculated as the sum of the product of a count and a frequency.
E.g. if "30" is the count and the frequency is 2, then the total number of activities is 60.
"""
activity_request = requests.get(ACTIVITY_URL)
if activity_request.status_code == 200:
activity_json = json.loads(activity_request.content.decode('utf-8'))
activity_count = 0
for key in activity_json["result"]["facets"]["extras_activity_count"]:
activity_count += int(key) * activity_json["result"]["facets"]["extras_activity_count"][key]
return activity_count
else:
raise CommandError('Unable to connect to IATI registry to query activities.') | 5,324,428 |
def _get_chromosome_dirs(input_directory):
"""Collect chromosome directories"""
dirs = []
for d in input_directory.iterdir():
if not d.is_dir():
continue
# Just in case user re-runs and
# does not delete output files
elif d.name == 'logs':
continue
elif d.name == 'p_distance_output':
continue
else:
dirs.append(d)
return dirs | 5,324,429 |
def calculateCumTimeDiff(df):
"""
Calculates the cumulative of the time difference
between points for each track of 'dfTrack'.
"""
warnings.warn("The calculateCumTimeDiff function is deprecated and "
"will be removed in version 2.0.0. "
"Use the calculate_cum_time_diff function instead.",
FutureWarning,
stacklevel=8
)
return calculate_cum_time_diff(df) | 5,324,430 |
def optimalPriceFast(z_list, V_list, U, s_radius, max_iter = 1e4):
""" Returns tuple of optimal prices p^* chosen in hindsight, and revenue achieved
if we know z_t (list of 1-D arrays) V_t (list of 2-D arrays),
and U (2-D array) is Orthogonal!
Performs optimization over low-dimensional actions and is therefore fast.
"""
TOL = 1e-10 # numerical error allowed.
T = len(z_list)
z = np.zeros(z_list[0].shape)
V = np.zeros(V_list[0].shape)
N = U.shape[0]
for t in range(T):
z += z_list[t]
V += V_list[t]
z = z/T
z = z.reshape((z_list[0].shape[0],1))
V = V/T
c = np.dot(U, z)
B = np.dot(U, np.dot(V, U.transpose()))
# ensure B is positive definite:
eigvals = np.linalg.eigvals((B + B.transpose())/2.0)
if (sum(~np.isreal(eigvals))) > 0 or (np.min(eigvals) < -TOL):
print("Warning: B is not positive definite")
cons = {'type':'ineq', 'fun': lambda x: s_radius - np.linalg.norm(x),
'jac': lambda x: x / np.linalg.norm(x) if np.linalg.norm(x) > TOL else np.zeros(x.shape)}
res = optimize.minimize(fun=hindsightLowDimObj,
x0=np.zeros(U.shape[1]), args = (z,V),
jac = hindsightLowDimGrad, method = 'SLSQP',
constraints=cons, options={'disp':True,'maxiter':max_iter})
# Note: Set options['disp'] = True to print output of optimization.
x_star = res['x']
p_star = np.dot(U, x_star)
p_norm = np.linalg.norm(p_star)
if p_norm > s_radius:
print ("Warning: p_star not in constraints")
p_star = s_radius * p_star/p_norm
R_star = hindsightObj(p_star, c, B)*T
for i in range(1000): # compare with random search to see if optimization worked
p_rand = randomPricing(N,s_radius)
R_rand = hindsightObj(p_rand, c, B)*T
if (R_star - R_rand)/np.abs(R_star + TOL) > 0.001:
raise ValueError("SLSQP optimization failed, R_star="+
str(R_star)+ ", R_rand="+str(R_rand))
if R_rand < R_star:
p_star = p_rand
R_star = R_rand
return (p_star, R_star) | 5,324,431 |
def push(repo: Repository, remote, branch):
"""Upload local BRANCH commit history / data to REMOTE server.
"""
commit_hash = repo.remote.push(remote=remote, branch=branch)
click.echo(f'Push data for commit hash: {commit_hash}') | 5,324,432 |
def extract_seqIdList_from_cluster(cluster_name, cluster_dic, extract_id, allSeqDf, idName, match_method="contains"):
"""
For all sequences in a cluster, find corresponding sequences in the dataframe.
The extract_id function is used to extract a id that will be used to
find the matching entry in the dataframe index. If the match_method option is set to "contains",
the dataframe index will match the id if it contains the string. If the match_method option
is set to "exact", the dataframe index will match the id if matches exactly.
Return a list of dataframe indices.
"""
seq_list = []
for seq in cluster_dic[cluster_name]:
# The output of the clustering algorithm CD-HIT only print the first 19 characters
# (excluding the first > character) of the sequence id.
# Therefore, we need to perform a search to map the printed information to the unique id
# in the dataframe.
# Extract the accession id which is **unique** for each sequence
seqAccessionId = extract_id(seq[0])
# Search for the accession id in the dataframe
# Note: here we assume a multiindex dataframe, because the same function will
# be used again later on the multiindex version of the allSeqDf dataframe.
if match_method == "contains":
pattern = seqAccessionId
elif match_method == "exact":
pattern = r'^' + seqAccessionId + r'$'
else:
print("Error extract_sequences_from_cluster: invalid match_method option.")
dfSearch = allSeqDf.index.get_level_values(idName).str.contains(pattern)
dfId = None
# Test if we get more than one match
nMatches = pd.Series(dfSearch).sum()
if nMatches == 1:
#dfId = allSeqDf[dfSearch].index.tolist()[0]
# Valid for multiindex dataframe
dfId = allSeqDf.index.get_level_values(idName)[dfSearch].tolist()[0]
elif nMatches > 1:
print("Error extract_sequences_from_cluster: sequenceid", seq[0],
" with accession id \"", seqAccessionId, "\" has several matches in dataframe.")
dfId = None
else:
print("Error extract_sequences_from_cluster: sequenceid", seq[0],
" with accession id \"", seqAccessionId, "\" not found in dataframe.")
# print("dfId =",allSeqDf.index.get_level_values(idName)[dfSearch].tolist()[0])
# print("dfSearch.sum() =",dfSearch.sum())
dfId = None
# seq_list is just the list of sequence ids
seq_list.append(dfId)
return seq_list | 5,324,433 |
def format_imports(import_statements):
"""
-----
examples:
@need
from fastest.constants import TestBodies
@end
@let
import_input = TestBodies.TEST_STACK_IMPORTS_INPUT
output = TestBodies.TEST_STACK_IMPORTS_OUTPUT
@end
1) format_imports(import_input) -> output
-----
:param import_statements: list
:return: list
"""
return [
'{}\n'.format(import_statement.strip())
for import_statement in import_statements
if len(import_statement) > 0
] | 5,324,434 |
def parse_modmap(lines):
"""Parse a modmap file."""
re_range = re.compile(r'KeyCodes range from (\d+) to')
lower_bound = 8
re_line = re.compile(r'^\s+(\d+)\s+0x[\dA-Fa-f]+\s+(.*)')
re_remainder = re.compile(r'\((.+?)\)')
ret = ModMapper()
for line in lines.split('\n'):
if not line:
continue
grps = re_range.search(line)
if grps:
lower_bound = int(grps.group(1))
#end if
grps = re_line.search(line)
if grps:
code = int(grps.group(1)) - lower_bound
strlst = []
for grp in re_remainder.finditer(grps.group(2)):
strlst.append(grp.group(1))
#end for
# We'll pick the first one
alias = strlst[0].upper()
my_keyname = 'KEY_' + alias
my_keyname = my_keyname.replace('XF86', '')
ret.set_map(code, (my_keyname, alias))
#end if
#end for
ret.done()
return ret | 5,324,435 |
def ParseFastq(pathstofastqs):
"""
:param pathstofastqs: list of list of fastq.
Given a list [] of fastq directories (uncompressed, gzip compressed or bz2 compressed), extract sequence ID, Sequence(s) and qualityscore(s) from it.
"""
for i in range(len(pathstofastqs)):
if pathstofastqs[i][0].endswith('.gz'):
processes = [gzip.open(fastq) for fastq in pathstofastqs[i]]
elif pathstofastqs[i][0].endswith('.bz2'):
processes = [bz2.open(fastq) for fastq in pathstofastqs[i]]
elif pathstofastqs[i][0].endswith('.fastq'):
processes = [open(fastq) for fastq in pathstofastqs[i]]
else:
sys.exit('The format of the file %s is not recognized.' % (str(pathstofastqs[i])))
while True:
try:
'''
If the next(read) returns byte decode it, if it returns string encode and decode.
Handles potential different behaviors.
'''
try:
names = [next(read).decode().split(' ')[0] for read in processes]
Sequence = [next(read).decode() for read in processes]
Blank = [next(read).decode() for read in processes]
qualityscore = [next(read).decode() for read in processes]
except (UnicodeDecodeError, AttributeError):
names = [next(read).encode().decode().split(' ')[0] for read in processes]
Sequence = [next(read).encode().decode() for read in processes]
Blank = [next(read).encode().decode() for read in processes]
qualityscore = [next(read).encode().decode() for read in processes]
assert all(name == names[0] for name in names)
if names:
yield [names[0], Sequence, qualityscore]
else:
break
except StopIteration:
return
for read in processes:
read.close() | 5,324,436 |
def read_evoked(fname, setno=None, baseline=None, kind='average', proj=True):
"""Read an evoked dataset
Parameters
----------
fname : string
The file name.
setno : int or str | list of int or str | None
The index or list of indices of the evoked dataset to read. FIF files
can contain multiple datasets. If None and there is only one dataset in
the file, this dataset is loaded.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction. If None do not apply it.
If baseline is (a, b) the interval is between "a (s)" and "b (s)". If a
is None the beginning of the data is used and if b is None then b is set
to the end of the interval. If baseline is equal ot (None, None) all the
time interval is used.
kind : str
Either 'average' or 'standard_error', the type of data to read.
proj : bool
If False, available projectors won't be applied to the data.
Returns
-------
evoked : instance of Evoked or list of Evoked
The evoked datasets.
"""
evoked_node = _get_evoked_node(fname)
if setno is None and len(evoked_node) > 1:
fid, _, _ = fiff_open(fname)
try:
_, _, t = _get_entries(fid, evoked_node)
except:
t = 'None found, must use integer'
else:
fid.close()
raise ValueError('%d datasets present, setno parameter must be set.'
'Candidate setno names:\n%s' % (len(evoked_node), t))
elif isinstance(setno, list):
return [Evoked(fname, s, baseline=baseline, kind=kind, proj=proj)
for s in setno]
else:
if setno is None:
setno = 0
return Evoked(fname, setno, baseline=baseline, kind=kind, proj=proj) | 5,324,437 |
def construct_lexically_addressed_lambda_tree(lambda_tree, surrounding_scope=None):
"""
Out of a "bare" LambdaTree constructs a LexicallyAddressedLambdaTree, i.e. a tree of lambdas where the nodes are
decorated with a dict like so:
{symbol: scopes_up_count}
It does this by going down the tree, calling lexical_addressing_x at each node (and passing down the surrounding
node's info).
"""
if surrounding_scope is None:
surrounding_scope = {}
pmts(surrounding_scope, dict)
pmts(lambda_tree, LambdaTree)
lexical_addresses = lexical_addressing_x(surrounding_scope, lambda_tree.lambda_form)
constructed_children = [
construct_lexically_addressed_lambda_tree(child, lexical_addresses)
for child in lambda_tree.children
]
return LexicallyAddressedLambdaTree(
lambda_tree.lambda_form,
lexical_addresses,
constructed_children,
) | 5,324,438 |
def integrate_vec(vec, time_dep=False, method='ss', **kwargs):
"""
Integrate (stationary of time-dependent) vector field (N-D Tensor) in tensorflow
Aside from directly using tensorflow's numerical integration odeint(), also implements
"scaling and squaring", and quadrature. Note that the diff. equation given to odeint
is the one used in quadrature.
Parameters:
vec: the Tensor field to integrate.
If vol_size is the size of the intrinsic volume, and vol_ndim = len(vol_size),
then vector shape (vec_shape) should be
[vol_size, vol_ndim] (if stationary)
[vol_size, vol_ndim, nb_time_steps] (if time dependent)
time_dep: bool whether vector is time dependent
method: 'scaling_and_squaring' or 'ss' or 'ode' or 'quadrature'
if using 'scaling_and_squaring': currently only supports integrating to time point 1.
nb_steps: int number of steps. Note that this means the vec field gets broken
down to 2**nb_steps. so nb_steps of 0 means integral = vec.
if using 'ode':
out_time_pt (optional): a time point or list of time points at which to evaluate
Default: 1
init (optional): if using 'ode', the initialization method.
Currently only supporting 'zero'. Default: 'zero'
ode_args (optional): dictionary of all other parameters for
tf.contrib.integrate.odeint()
Returns:
int_vec: integral of vector field.
Same shape as the input if method is 'scaling_and_squaring', 'ss', 'quadrature',
or 'ode' with out_time_pt not a list. Will have shape [*vec_shape, len(out_time_pt)]
if method is 'ode' with out_time_pt being a list.
Todo:
quadrature for more than just intrinsically out_time_pt = 1
"""
if method not in ['ss', 'scaling_and_squaring', 'ode', 'quadrature']:
raise ValueError("method has to be 'scaling_and_squaring' or 'ode'. found: %s" % method)
if method in ['ss', 'scaling_and_squaring']:
nb_steps = kwargs['nb_steps']
assert nb_steps >= 0, 'nb_steps should be >= 0, found: %d' % nb_steps
if time_dep:
svec = K.permute_dimensions(vec, [-1, *range(0, vec.shape[-1] - 1)])
assert 2**nb_steps == svec.shape[0], "2**nb_steps and vector shape don't match"
svec = svec / (2**nb_steps)
for _ in range(nb_steps):
svec = svec[0::2] + tf.map_fn(transform, svec[1::2, :], svec[0::2, :])
disp = svec[0, :]
else:
vec = vec / (2**nb_steps)
for _ in range(nb_steps):
vec += transform(vec, vec)
disp = vec
elif method == 'quadrature':
# TODO: could output more than a single timepoint!
nb_steps = kwargs['nb_steps']
assert nb_steps >= 1, 'nb_steps should be >= 1, found: %d' % nb_steps
vec = vec / nb_steps
if time_dep:
disp = vec[..., 0]
for si in range(nb_steps - 1):
disp += transform(vec[..., si + 1], disp)
else:
disp = vec
for _ in range(nb_steps - 1):
disp += transform(vec, disp)
else:
assert not time_dep, "odeint not implemented with time-dependent vector field"
fn = lambda disp, _: transform(vec, disp)
# process time point.
out_time_pt = kwargs['out_time_pt'] if 'out_time_pt' in kwargs.keys() else 1
out_time_pt = tf.cast(K.flatten(out_time_pt), tf.float32)
len_out_time_pt = out_time_pt.get_shape().as_list()[0]
assert len_out_time_pt is not None, 'len_out_time_pt is None :('
# initializing with something like tf.zeros(1) gives a control flow issue.
z = out_time_pt[0:1] * 0.0
K_out_time_pt = K.concatenate([z, out_time_pt], 0)
# enable a new integration function than tf.contrib.integrate.odeint
odeint_fn = tf.contrib.integrate.odeint
if 'odeint_fn' in kwargs.keys() and kwargs['odeint_fn'] is not None:
odeint_fn = kwargs['odeint_fn']
# process initialization
if 'init' not in kwargs.keys() or kwargs['init'] == 'zero':
disp0 = vec * 0 # initial displacement is 0
else:
raise ValueError('non-zero init for ode method not implemented')
# compute integration with odeint
if 'ode_args' not in kwargs.keys():
kwargs['ode_args'] = {}
disp = odeint_fn(fn, disp0, K_out_time_pt, **kwargs['ode_args'])
disp = K.permute_dimensions(disp[1:len_out_time_pt + 1, :], [*range(1, len(disp.shape)), 0])
# return
if len_out_time_pt == 1:
disp = disp[..., 0]
return disp | 5,324,439 |
def get_all_news(num_page, limit):
"""Get all users"""
news = News.objects.paginate(page=num_page, per_page=limit)
response_object = {
'status': 'success',
'data': news.items,
}
return jsonify(response_object), 200 | 5,324,440 |
def correlations(X, y=None):
"""
given a pandas DataFrame returns correlation matrix and figure representing the correlations
:param y: [pandas Series] target column
:param X: [pandas DataFrame] predictor columns
:param size: matplotlib figure size
:return: correlation matrix and figure representing the correlations
"""
assert (isinstance(X, pd.DataFrame)) and (not X.empty), 'X should be a valid pandas DataFrame'
numerical_cols = X.select_dtypes(include=[np.number]).columns
if len(numerical_cols) == 0:
return None, None
df = X.copy()
if y is not None:
df[y.name] = y
corr = df.corr()
fig = sns.clustermap(corr, linewidths=.5, figsize=constants.FIGURE_SIZE)
plt.suptitle('Raw Features Correlation', fontsize=20)
return corr, fig | 5,324,441 |
def build_stream(subdir):
"""generate the stream templates"""
stream_types = ["uniform grid",
"amr grids",
"particles",
"octree",
"hexahedral mesh",
"unstructured mesh"]
if subdir is None:
subdir = os.path.join('{{cookiecutter.project_slug}}',
'{{cookiecutter.project_slug}}',
"frontend_templates",
"stream")
filename = '{{cookiecutter.project_slug}}.py'
stream_template.write_template(stream_types, filename=filename, subdir=subdir) | 5,324,442 |
def consensus():
"""
Resolve the blockchain based on consensus algorthim when multiple nodes are conencted
"""
replaced = blockchain.resolve_conflicts()
if replaced:
response = {
'message': 'Our chain was replaced',
'new_chain': blockchain.chain
}
else:
response = {
'message': 'Our chain is authoritative',
'chain': blockchain.chain
}
return jsonify(response), 200 | 5,324,443 |
def create_idletomography_report(results, filename, title="auto",
ws=None, auto_open=False, link_to=None,
brevity=0, advancedOptions=None, verbosity=1):
"""
Creates an Idle Tomography report, summarizing the results of running
idle tomography on a data set.
Parameters
----------
results : IdleTomographyResults
An object which represents the set of results from an idle tomography
run, typically obtained from running :func:`do_idle_tomography` OR a
dictionary of such objects, representing multiple idle tomography runs
to be compared (typically all with *different* data sets). The keys of
this dictionary are used to label different data sets that are
selectable in the report.
filename : string, optional
The output filename where the report file(s) will be saved. If
None, then no output file is produced (but returned Workspace
still caches all intermediate results).
title : string, optional
The title of the report. "auto" causes a random title to be
generated (which you may or may not like).
ws : Workspace, optional
The workspace used as a scratch space for performing the calculations
and visualizations required for this report. If you're creating
multiple reports with similar tables, plots, etc., it may boost
performance to use a single Workspace for all the report generation.
auto_open : bool, optional
If True, automatically open the report in a web browser after it
has been generated.
link_to : list, optional
If not None, a list of one or more items from the set
{"tex", "pdf", "pkl"} indicating whether or not to
create and include links to Latex, PDF, and Python pickle
files, respectively. "tex" creates latex source files for
tables; "pdf" renders PDFs of tables and plots ; "pkl" creates
Python versions of plots (pickled python data) and tables (pickled
pandas DataFrams).
advancedOptions : dict, optional
A dictionary of advanced options for which the default values aer usually
are fine. Here are the possible keys of `advancedOptions`:
- connected : bool, optional
Whether output HTML should assume an active internet connection. If
True, then the resulting HTML file size will be reduced because it
will link to web resources (e.g. CDN libraries) instead of embedding
them.
- cachefile : str, optional
filename with cached workspace results
- precision : int or dict, optional
The amount of precision to display. A dictionary with keys
"polar", "sci", and "normal" can separately specify the
precision for complex angles, numbers in scientific notation, and
everything else, respectively. If an integer is given, it this
same value is taken for all precision types. If None, then
`{'normal': 6, 'polar': 3, 'sci': 0}` is used.
- resizable : bool, optional
Whether plots and tables are made with resize handles and can be
resized within the report.
- autosize : {'none', 'initial', 'continual'}
Whether tables and plots should be resized, either initially --
i.e. just upon first rendering (`"initial"`) -- or whenever
the browser window is resized (`"continual"`).
verbosity : int, optional
How much detail to send to stdout.
Returns
-------
Workspace
The workspace object used to create the report
"""
tStart = _time.time()
printer = _VerbosityPrinter.build_printer(verbosity) # , comm=comm)
if advancedOptions is None: advancedOptions = {}
precision = advancedOptions.get('precision', None)
cachefile = advancedOptions.get('cachefile', None)
connected = advancedOptions.get('connected', False)
resizable = advancedOptions.get('resizable', True)
autosize = advancedOptions.get('autosize', 'initial')
mdl_sim = advancedOptions.get('simulator', None) # a model
if filename and filename.endswith(".pdf"):
fmt = "latex"
else:
fmt = "html"
printer.log('*** Creating workspace ***')
if ws is None: ws = _ws.Workspace(cachefile)
if title is None or title == "auto":
if filename is not None:
autoname = _autotitle.generate_name()
title = "Idle Tomography Report for " + autoname
_warnings.warn(("You should really specify `title=` when generating reports,"
" as this makes it much easier to identify them later on. "
"Since you didn't, pyGSTi has generated a random one"
" for you: '{}'.").format(autoname))
else:
title = "N/A" # No title - but it doesn't matter since filename is None
results_dict = results if isinstance(results, dict) else {"unique": results}
renderMath = True
qtys = {} # stores strings to be inserted into report template
def addqty(b, name, fn, *args, **kwargs):
"""Adds an item to the qtys dict within a timed block"""
if b is None or brevity < b:
with _timed_block(name, formatStr='{:45}', printer=printer, verbosity=2):
qtys[name] = fn(*args, **kwargs)
qtys['title'] = title
qtys['date'] = _time.strftime("%B %d, %Y")
pdfInfo = [('Author', 'pyGSTi'), ('Title', title),
('Keywords', 'GST'), ('pyGSTi Version', _version.__version__)]
qtys['pdfinfo'] = _merge.to_pdfinfo(pdfInfo)
# Generate Switchboard
printer.log("*** Generating switchboard ***")
#Create master switchboard
switchBd, dataset_labels = \
_create_switchboard(ws, results_dict)
if fmt == "latex" and (len(dataset_labels) > 1):
raise ValueError("PDF reports can only show a *single* dataset,"
" estimate, and gauge optimization.")
# Generate Tables
printer.log("*** Generating tables ***")
multidataset = bool(len(dataset_labels) > 1)
#REM intErrView = [False,True,True]
if fmt == "html":
qtys['topSwitchboard'] = switchBd
#REM qtys['intrinsicErrSwitchboard'] = switchBd.view(intErrView,"v1")
results = switchBd.results
#REM errortype = switchBd.errortype
#REM errorop = switchBd.errorop
A = None # no brevity restriction: always display; for "Summary"- & "Help"-tab figs
#Brevity key:
# TODO - everything is always displayed for now
addqty(A, 'intrinsicErrorsTable', ws.IdleTomographyIntrinsicErrorsTable, results)
addqty(A, 'observedRatesTable', ws.IdleTomographyObservedRatesTable, results,
20, mdl_sim) # HARDCODED - show only top 20 rates
# errortype, errorop,
# Generate plots
printer.log("*** Generating plots ***")
toggles = {}
toggles['CompareDatasets'] = False # not comparable by default
if multidataset:
#check if data sets are comparable (if they have the same sequences)
comparable = True
gstrCmpList = list(results_dict[dataset_labels[0]].dataset.keys()) # maybe use circuit_lists['final']??
for dslbl in dataset_labels:
if list(results_dict[dslbl].dataset.keys()) != gstrCmpList:
_warnings.warn("Not all data sets are comparable - no comparisions will be made.")
comparable = False; break
if comparable:
#initialize a new "dataset comparison switchboard"
dscmp_switchBd = ws.Switchboard(
["Dataset1", "Dataset2"],
[dataset_labels, dataset_labels],
["buttons", "buttons"], [0, 1]
)
dscmp_switchBd.add("dscmp", (0, 1))
dscmp_switchBd.add("dscmp_gss", (0,))
dscmp_switchBd.add("refds", (0,))
for d1, dslbl1 in enumerate(dataset_labels):
dscmp_switchBd.dscmp_gss[d1] = results_dict[dslbl1].circuit_structs['final']
dscmp_switchBd.refds[d1] = results_dict[dslbl1].dataset # only used for #of spam labels below
# dsComp = dict()
all_dsComps = dict()
indices = []
for i in range(len(dataset_labels)):
for j in range(len(dataset_labels)):
indices.append((i, j))
#REMOVE (for using comm)
#if comm is not None:
# _, indexDict, _ = _distribute_indices(indices, comm)
# rank = comm.Get_rank()
# for k, v in indexDict.items():
# if v == rank:
# d1, d2 = k
# dslbl1 = dataset_labels[d1]
# dslbl2 = dataset_labels[d2]
#
# ds1 = results_dict[dslbl1].dataset
# ds2 = results_dict[dslbl2].dataset
# dsComp[(d1, d2)] = _DataComparator(
# [ds1, ds2], DS_names=[dslbl1, dslbl2])
# dicts = comm.gather(dsComp, root=0)
# if rank == 0:
# for d in dicts:
# for k, v in d.items():
# d1, d2 = k
# dscmp_switchBd.dscmp[d1, d2] = v
# all_dsComps[(d1,d2)] = v
#else:
for d1, d2 in indices:
dslbl1 = dataset_labels[d1]
dslbl2 = dataset_labels[d2]
ds1 = results_dict[dslbl1].dataset
ds2 = results_dict[dslbl2].dataset
all_dsComps[(d1, d2)] = _DataComparator([ds1, ds2], DS_names=[dslbl1, dslbl2])
dscmp_switchBd.dscmp[d1, d2] = all_dsComps[(d1, d2)]
qtys['dscmpSwitchboard'] = dscmp_switchBd
addqty(4, 'dsComparisonSummary', ws.DatasetComparisonSummaryPlot, dataset_labels, all_dsComps)
#addqty('dsComparisonHistogram', ws.DatasetComparisonHistogramPlot, dscmp_switchBd.dscmp, display='pvalue')
addqty(4, 'dsComparisonHistogram', ws.ColorBoxPlot,
'dscmp', dscmp_switchBd.dscmp_gss, dscmp_switchBd.refds, None,
dscomparator=dscmp_switchBd.dscmp, typ="histogram")
addqty(1, 'dsComparisonBoxPlot', ws.ColorBoxPlot, 'dscmp', dscmp_switchBd.dscmp_gss,
dscmp_switchBd.refds, None, dscomparator=dscmp_switchBd.dscmp)
toggles['CompareDatasets'] = True
else:
toggles['CompareDatasets'] = False # not comparable!
else:
toggles['CompareDatasets'] = False
if filename is not None:
if True: # comm is None or comm.Get_rank() == 0:
# 3) populate template file => report file
printer.log("*** Merging into template file ***")
if fmt == "html":
if filename.endswith(".html"):
_merge.merge_jinja_template(
qtys, filename, templateDir='~idletomography_html_report',
auto_open=auto_open, precision=precision, link_to=link_to,
connected=connected, toggles=toggles, renderMath=renderMath,
resizable=resizable, autosize=autosize, verbosity=printer
)
else:
_merge.merge_jinja_template_dir(
qtys, filename, templateDir='~idletomography_html_report',
auto_open=auto_open, precision=precision, link_to=link_to,
connected=connected, toggles=toggles, renderMath=renderMath,
resizable=resizable, autosize=autosize, verbosity=printer
)
elif fmt == "latex":
raise NotImplementedError("No PDF version of this report is available yet.")
templateFile = "idletomography_pdf_report.tex"
base = _os.path.splitext(filename)[0] # no extension
_merge.merge_latex_template(qtys, templateFile, base + ".tex", toggles,
precision, printer)
# compile report latex file into PDF
cmd = _ws.WorkspaceOutput.default_render_options.get('latex_cmd', None)
flags = _ws.WorkspaceOutput.default_render_options.get('latex_flags', [])
assert(cmd), "Cannot render PDF documents: no `latex_cmd` render option."
printer.log("Latex file(s) successfully generated. Attempting to compile with %s..." % cmd)
_merge.compile_latex_report(base, [cmd] + flags, printer, auto_open)
else:
raise ValueError("Unrecognized format: %s" % fmt)
else:
printer.log("*** NOT Merging into template file (filename is None) ***")
printer.log("*** Report Generation Complete! Total time %gs ***" % (_time.time() - tStart))
return ws | 5,324,444 |
def sha256_secrethash(secret: Secret) -> SecretHash:
"""Compute the secret hash using sha256."""
return SecretHash(sha256(secret).digest()) | 5,324,445 |
def make_http_request(method, url, check_response=None, *args, **kwargs):
"""
Make an HTTP request with the global session.
:return: The response object.
:raises: WebserviceException
"""
session = get_requests_session()
response = ClientBase._execute_func(lambda *args, **kwargs: session.request(method, *args, **kwargs),
url, *args, **kwargs)
try:
if check_response is None:
response.raise_for_status()
elif not check_response(response):
raise WebserviceException('Received bad response from service:\n'
'Response Code: {}\n'
'Headers: {}\n'
'Content: {}'.format(response.status_code, response.headers, response.content),
logger=module_logger)
except requests.Timeout:
raise WebserviceException('Error, request to {} timed out.'.format(url), logger=module_logger)
except requests.exceptions.HTTPError:
raise WebserviceException('Received bad response from service:\n'
'Response Code: {}\n'
'Headers: {}\n'
'Content: {}'.format(response.status_code, response.headers, response.content),
logger=module_logger)
return response | 5,324,446 |
def get_pages_matches_no_prep(title, edition, archive, filename, text, keysentences):
"""
Get pages within a document that include one or more keywords.
For each page that includes a specific keyword, add a tuple of
form:
(<TITLE>, <EDITION>, <ARCHIVE>, <FILENAME>, <TEXT>, <KEYWORD>)
If a keyword occurs more than once on a page, there will be only
one tuple for the page for that keyword.
If more than one keyword occurs on a page, there will be one tuple
per keyword.
:return: list of tuples
"""
matches = []
for keysentence in keysentences:
#sentence_match = get_sentences_list_matches(text, keysentence)
sentence_match_idx = get_text_keyword_idx(text, keysentence)
if sentence_match:
match = (title, edition, archive, filename, text, keysentence)
matches.append(match)
return matches | 5,324,447 |
def sf(k, r, p):
"""
Survival function of the negative binomial distribution.
Parameters
----------
r : int
Number of failures until the experiment is stopped.
p : float
Probability of success.
"""
with mpmath.extradps(5):
k = mpmath.mpf(k)
r = mpmath.mpf(r)
p = mpmath.mpf(p)
return mpmath.betainc(k + 1, r, 0, p, regularized=True) | 5,324,448 |
def _loglevel_signal_handler(loggers):
"""
Handle SIGUSR1 and SIGUSR2. Sets log level to INFO on SIGUSR1 and DEBUG on SIGUSR2.
After setting level, it calls the previous SIGUSRx handler unless it was set to SIG_DFL.
"""
usr_signals = {
signal.SIGUSR1: logging.INFO,
signal.SIGUSR2: logging.DEBUG,
}
previous_handlers = {
signalnum: handler
for (signalnum, handler) in zip(usr_signals, map(signal.getsignal, usr_signals))
if handler != signal.SIG_DFL
}
def handle_usr_signal(signalnum, _frame):
level = usr_signals[signalnum]
for logger in loggers:
logger.setLevel(level)
previous_handler = previous_handlers.get(signalnum)
if previous_handler:
previous_handler(signalnum, _frame)
signal.signal(signal.SIGUSR1, handle_usr_signal)
signal.signal(signal.SIGUSR2, handle_usr_signal) | 5,324,449 |
def process_menu(menu: Menu, perms: PermWrapper) -> MenuGroup:
"""Enable a menu item if view permissions exist for the user."""
for group in menu.groups:
for item in group.items:
# Parse the URL template tag to a permission string.
app, scope = item.url.split(":")
object_name = scope.replace("_list", "")
view_perm = f"{app}.view_{scope}"
add_perm = f"{app}.add_object_name"
if view_perm in perms:
# If the view permission for each item exists, toggle
# the `disabled` field, which will be used in the UI.
item.disabled = False
if add_perm in perms:
if item.add_url is not None:
item.has_add = True
if item.import_url is not None:
item.has_import = True
return menu | 5,324,450 |
def _grid_in_property(field_name, docstring, read_only=False,
closed_only=False):
"""Create a GridIn property."""
def getter(self):
if closed_only and not self._closed:
raise AttributeError("can only get %r on a closed file" %
field_name)
# Protect against PHP-237
if field_name == 'length':
return self._file.get(field_name, 0)
return self._file.get(field_name, None)
def setter(self, value):
if self._closed:
self._coll.files.update_one({"_id": self._file["_id"]},
{"$set": {field_name: value}})
self._file[field_name] = value
if read_only:
docstring += "\n\nThis attribute is read-only."
elif closed_only:
docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and "
"can only be read after :meth:`close` "
"has been called.")
if not read_only and not closed_only:
return property(getter, setter, doc=docstring)
return property(getter, doc=docstring) | 5,324,451 |
def Run(benchmark_spec):
"""Runs memtier against memcached and gathers the results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample instances.
"""
client = benchmark_spec.vm_groups['client'][0]
server = benchmark_spec.vm_groups['server'][0]
server_ip = server.internal_ip
metadata = {'memcached_version': memcached_server.GetVersion(server),
'memcached_server_size': FLAGS.memcached_size_mb}
logging.info('Start benchmarking memcached using memtier.')
samples = memtier.Run(client, server_ip, memcached_server.MEMCACHED_PORT)
for sample in samples:
sample.metadata.update(metadata)
return samples | 5,324,452 |
async def test_connection_error(
hass: HomeAssistant, mock_pvoutput_config_flow: MagicMock
) -> None:
"""Test API connection error."""
mock_pvoutput_config_flow.status.side_effect = PVOutputConnectionError
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={
CONF_SYSTEM_ID: 12345,
CONF_API_KEY: "tadaaa",
},
)
assert result.get("type") == RESULT_TYPE_FORM
assert result.get("errors") == {"base": "cannot_connect"}
assert len(mock_pvoutput_config_flow.status.mock_calls) == 1 | 5,324,453 |
def query_leave(account_id, __, ___):
"""
This function deals with the problems related to leave query.
:param account_id: user account id.
"""
status, __, type, message = get_user_status(account_id)
if status == "wait_in":
set_user_status(account_id, status="none")
fmt = _("Here are FAQs about HR and leave.")
text = make_i18n_text("Here are FAQs about HR and leave.", "query_leave", fmt)
fmt = _("Send a question")
replay = create_quick_replay_item("transfer_leave", "query_leave", "Send a question", fmt)
labels = ["See more", "See more", "See more", "See more"]
fmt = _("See more")
i18n_label_fmts = [fmt, fmt, fmt, fmt]
titles = [
"The types of leave.",
"How many days of leave I have?",
"To request a leave.",
"To cancel my leave."
]
i18n_title_fmts = [
_("The types of leave."),
_("How many days of leave I have?"),
_("To request a leave."),
_("To cancel my leave.")
]
texts = [
"Types of leave are classified in the Labor Standards Act.",
"The remaining days of your leave are on in-house browser.",
"You can request a leave via the in-house browser.",
"You can cancel your leave request via the in-house browser."
]
i18n_text_fmts = [
_("Types of leave are classified in the Labor Standards Act."),
_("The remaining days of your leave are on in-house browser."),
_("You can request a leave via the in-house browser."),
_("You can cancel your leave request via the in-house browser.")
]
carousel = create_carousel("query_leave", labels, POST_BACK_URLS["leave"],
texts, titles, CAROUSEL["leave"],
fmt_labels=i18n_label_fmts,
fmt_texts=i18n_text_fmts,
fmt_titles=i18n_title_fmts)
carousel["quickReply"] = replay
yield push_messages(account_id, [text, carousel]) | 5,324,454 |
def get_trainer_class_from_model(parameter):
"""
>>> from IPython.lib.pretty import pprint
>>> from pb_bss.distribution.cacgmm import (
... ComplexAngularCentralGaussian,
... )
>>> get_trainer_class_from_model(ComplexAngularCentralGaussian).__name__
'ComplexAngularCentralGaussianTrainer'
>>> get_trainer_class_from_model(ComplexAngularCentralGaussian()).__name__
'ComplexAngularCentralGaussianTrainer'
"""
from pb_bss import distribution
if not hasattr(parameter, '__name__'):
parameter = parameter.__class__
name = parameter.__name__
assert 'Trainer' not in name, name
name = name + 'Trainer'
return getattr(distribution, name) | 5,324,455 |
def is_fractional_it(input_str, short_scale=False):
"""
This function takes the given text and checks if it is a fraction.
Updated to italian from en version 18.8.9
Args:
input_str (str): the string to check if fractional
short_scale (bool): use short scale if True, long scale if False
Returns:
(bool) or (float): False if not a fraction, otherwise the fraction
"""
input_str = input_str.lower()
if input_str.endswith('i', -1) and len(input_str) > 2:
input_str = input_str[:-1] + "o" # normalizza plurali
fracts_it = {"intero": 1, "mezza": 2, "mezzo": 2}
if short_scale:
for num in _SHORT_ORDINAL_STRING_IT:
if num > 2:
fracts_it[_SHORT_ORDINAL_STRING_IT[num]] = num
else:
for num in _LONG_ORDINAL_STRING_IT:
if num > 2:
fracts_it[_LONG_ORDINAL_STRING_IT[num]] = num
if input_str in fracts_it:
return 1.0 / fracts_it[input_str]
return False | 5,324,456 |
def qchem2molgraph(logfile, return_qobj=False, return_none_on_err=False, **kwargs):
"""
Convert a Q-Chem logfile to a MolGraph object. Return the QChem
object in addition to the MolGraph if return_qobj is True. Catch
QChemError if return_none_on_err is True and return None. Options
in kwargs are passed to valid_job. If the job is not valid, also
return None.
"""
try:
q = QChem(logfile=logfile)
except QChemError as e:
if return_none_on_err:
print(e)
return None
else:
raise
if not valid_job(q, **kwargs):
return None
energy = q.get_energy() + q.get_zpe() # With ZPE
symbols, coords = q.get_geometry()
mol = MolGraph(symbols=symbols, coords=coords, energy=energy)
mol.infer_connections()
if return_qobj:
return mol, q
else:
return mol | 5,324,457 |
def extract_signals(data, fs, segmentation_times):
"""
Signal that given the set of segmentation times, extract the signal from the raw trace.
Args:
data : Numpy
The input seismic data containing both, start and end times of the seismic data.
fs : float
The sampling frequency.
segmentation_times : list
A list containing the segmentation of the file
Returns:
List
A list containing the extracted signals.
"""
signals = []
durations = []
for m in segmentation_times:
segmented = data[int(m[0] * fs): int(m[1] * fs)]
signals.append(segmented)
durations.append(segmented.shape[0]/float(fs))
return signals, durations | 5,324,458 |
def main(results_path, network_config: dict, learningrate: int = 1e-3, weight_decay: float = 1e-5,
n_updates: int = int(1e5), device: torch.device = torch.device("cuda:0")):
"""Main function that takes hyperparameters and performs training and evaluation of model"""
# Prepare a path to plot to
os.makedirs(results_path, exist_ok=True)
plotpath = os.path.join(results_path, 'plots')
os.makedirs(plotpath, exist_ok=True)
# Load dataset
trainset = getImages(part='dataset_part_1/**')
valset = getImages(part='dataset_part_4/**')
#testset = getImages(part='dataset_part_2/**')
# Create datasets and dataloaders with rotated targets without augmentation (for evaluation)
trainingset_eval = ImageWidiSet(dataset=trainset)
validationset = ImageWidiSet(dataset=valset)
#testset = ImageWidiSet(dataset=testset)
#trainloader = torch.utils.data.DataLoader(trainingset_eval, batch_size=1, shuffle=False, num_workers=22)
valloader = torch.utils.data.DataLoader(validationset, batch_size=1, shuffle=False, num_workers=22)
#testloader = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=False, num_workers=22)
# Create datasets and dataloaders with rotated targets with augmentation (for training)
trainingset_augmented = ImageWidiSet(dataset=trainset)
trainloader_augmented = torch.utils.data.DataLoader(trainingset_augmented, batch_size=1, shuffle=True,
num_workers=22)
# Define a tensorboard summary writer that writes to directory "results_path/tensorboard"
writer = SummaryWriter(log_dir=os.path.join(results_path, 'tensorboard'))
# Create Network
net = SimpleCNN(**network_config)
net.to(device)
net.train()
# Get mse loss function
mse = torch.nn.MSELoss(reduction='sum')
# Get adam optimizer
optimizer = torch.optim.Adam(net.parameters(), lr=learningrate, weight_decay=weight_decay)
print_stats_at = 1e2 # print status to tensorboard every x updates
plot_at = 2e3 # plot every x updates
validate_at = len(trainingset_augmented) # evaluate model on validation set and check for new best model every x updates
update = 0 # current update counter
best_validation_loss = np.inf # best validation loss so far
n_updates = len(trainingset_augmented)*3
update_progess_bar = tqdm(total=n_updates, desc=f"loss: {np.nan:7.5f}", position=0) # progressbar
# Save initial model as "best" model (will be overwritten later)
torch.save(net, os.path.join(results_path, 'best_model.pt'))
# Train until n_updates update have been reached
while update < n_updates:
for data in trainloader_augmented:
# Get next samples in `trainloader_augmented`
inputs, targets, mask = data
inputs = inputs.to(device)
targets = targets.to(device)
target_masks = mask.to(dtype=torch.bool)
mask = mask.to(device)
# Reset gradients
optimizer.zero_grad()
# Get outputs for network
# print('mask', mask.shape)
outputs = net(inputs) * mask
# predictions = [outputs[i, target_masks[i]] for i in range(len(outputs))]
# targetss = [targets[i, target_masks[i]] for i in range(len(targets))]
# Calculate loss, do backward pass, and update weights
# losses = torch.stack([mse(prediction, target.to(device))for prediction, target in zip(predictions, targetss)])
loss = mse(outputs[target_masks], targets[target_masks])
# loss = losses.mean()
# loss = mse(predicted_image,target_image)
loss.backward()
optimizer.step()
# Print current status and score
if update % print_stats_at == 0 and update > 0:
writer.add_scalar(tag="training/loss",
scalar_value=loss.cpu().detach().numpy(),
global_step=update)
# Plot output
if update % plot_at == 0:
plot(inputs.detach().cpu().numpy(), targets.detach().cpu().numpy(),
outputs.detach().cpu().numpy() * 255,
plotpath, update)
# Evaluate model on validation set
if update % validate_at == 0 and update > 0:
val_loss = evaluate_model(net, dataloader=valloader, device=device)
writer.add_scalar(tag="validation/loss", scalar_value=val_loss.cpu(), global_step=update)
# Add weights as arrays to tensorboard
for i, param in enumerate(net.parameters()):
writer.add_histogram(tag=f'validation/param_{i}', values=param.cpu(),
global_step=update)
# Add gradients as arrays to tensorboard
for i, param in enumerate(net.parameters()):
writer.add_histogram(tag=f'validation/gradients_{i}',
values=param.grad.cpu(),
global_step=update)
# Save best model for early stopping
if best_validation_loss > val_loss:
best_validation_loss = val_loss
print('new best model')
torch.save(net, os.path.join(results_path, 'best_model.pt'))
update_progess_bar.set_description(f"loss: {loss:7.5f}", refresh=True)
update_progess_bar.update()
# Increment update counter, exit if maximum number of updates is reached
update += 1
if update >= n_updates:
break
update_progess_bar.close()
torch.save(net, os.path.join(results_path, 'best_model.pt'))
print('Finished Training!')
# Load best model and compute score on test set
print(f"Computing scores for best model")
net = torch.load(os.path.join(results_path, 'best_model.pt'))
#test_loss = evaluate_model(net, dataloader=testloader, device=device)
val_loss = evaluate_model(net, dataloader=valloader, device=device)
#train_loss = evaluate_model(net, dataloader=trainloader, device=device)
print(f"Scores:")
#print(f"test loss: {test_loss}")
print(f"validation loss: {val_loss}")
#print(f"training loss: {train_loss}")
# Write result to file
with open(os.path.join(results_path, 'results.txt'), 'w') as fh:
print(f"Scores:", file=fh)
#print(f"test loss: {test_loss}", file=fh)
print(f"validation loss: {val_loss}", file=fh)
#print(f"training loss: {train_loss}", file=fh)
# Write predictions to file | 5,324,459 |
def get_dataset(
file_pattern,
n_classes,
batch_size,
volume_shape,
plane,
n_slices=24,
block_shape=None,
n_epochs=None,
mapping=None,
shuffle_buffer_size=None,
num_parallel_calls=AUTOTUNE,
mode="train",
):
"""Returns tf.data.Dataset after preprocessing from
tfrecords for training and validation
Parameters
----------
file_pattern:
n_classes:
"""
files = glob.glob(file_pattern)
if not files:
raise ValueError("no files found for pattern '{}'".format(file_pattern))
compressed = _is_gzipped(files[0])
shuffle = bool(shuffle_buffer_size)
ds = nobrainer.dataset.tfrecord_dataset(
file_pattern=file_pattern,
volume_shape=volume_shape,
shuffle=shuffle,
scalar_label=True,
compressed=compressed,
num_parallel_calls=num_parallel_calls,
)
def _ss(x, y):
x, y = structural_slice(x, y, plane, n_slices)
return (x, y)
ds = ds.map(_ss, num_parallel_calls)
ds = ds.prefetch(buffer_size=batch_size)
if batch_size is not None:
ds = ds.batch(batch_size=batch_size, drop_remainder=True)
if mode == "train":
if shuffle_buffer_size:
ds = ds.shuffle(buffer_size=shuffle_buffer_size)
# Repeat the dataset n_epochs times
ds = ds.repeat(n_epochs)
return ds | 5,324,460 |
def cos_d(x:int)->float:
"""
This function takes in input in radians and returns the
computed derivaive of cos which is -sin.
"""
return -math.sin(x) | 5,324,461 |
def rhist(ax, data, **kwargs):
"""
Create a hist plot with default style parameters to look like ggplot2.
kwargs can be passed to changed other parameters.
"""
defaults = {'facecolor': '0.3',
'edgecolor': '0.36',
'linewidth': 1,
'rwidth': 1}
for x, y in defaults.iteritems():
kwargs.setdefault(x, y)
return ax.hist(data, **kwargs) | 5,324,462 |
def follow(request, username_to_follow):
"""View that is used to let the login user to follow another user"""
try:
# If the user is not all ready begin follwed
if not request.user.following.filter(username=username_to_follow).exists():
user_to_follow = get_object_or_404(User, username=username_to_follow)
request.user.following.add(user_to_follow)
return JsonResponse({'message':'success'})
else:
return JsonResponse({'message': 'user is all ready being follwed'})
except:
res = JsonResponse({'message': 'error'})
res.status_code = 400
return res | 5,324,463 |
def Neq(left: Expr, right: Expr) -> BinaryExpr:
"""Difference expression.
Checks if left != right.
Args:
left: A value to check.
right: The other value to check. Must evaluate to the same type as left.
"""
return BinaryExpr(Op.neq, right.type_of(), TealType.uint64, left, right) | 5,324,464 |
def load_core_settings():
"""Load core settings.
This function must be manually called (see :file:`urls.py`) in
order to load base settings.
"""
from django.utils.translation import ugettext_lazy
from modoboa.lib import parameters, events
from .app_settings import GeneralParametersForm
parameters.register(GeneralParametersForm, ugettext_lazy("General"))
events.declare(BASE_EVENTS) | 5,324,465 |
def create_simplex_matrix(
dimensions: int,
distance: float) -> np.ndarray:
"""
Create centered normalized N-dimensional simplex structure
The structure is described by N+1, N-dimensional points that have the
same distance between them
:param dimensions: The number of dimensions of the simplex
:param distance: Distance between the points
:return: [dimensions+1, dimensions] matrix of points, each row a point
"""
# --------------------------------
# argument checking
if dimensions <= 0:
raise ValueError("dimensions should be > 0")
if distance <= 0:
raise ValueError("distance should be > 0")
# --------------------------------
# An N-Dimensional simplex requires N+1 points
points = dimensions + 1
# create identity matrix (N points)
matrix = np.identity(dimensions, dtype=np.float)
# we create the last point
# Now we need a n+1-th point with the same distance to all other points.
# We have to choose (x, x, ... x).
point = np.ones(shape=(1, dimensions), dtype=np.float) * \
((1. + np.sqrt(dimensions + 1.)) / dimensions)
matrix = np.vstack([matrix, point])
# center points to zero
mean_m = np.mean(matrix, axis=0)
matrix = matrix - mean_m
# all points now have sqrt(2) distance between them
# points lie on the surface of an n-dimensional circle
# calculate the radius of that circle
radius = np.mean(np.linalg.norm(matrix, ord=2, axis=1))
# angle between origin center (0)
# a point A and the midpoint intersection
sin_theta = (np.sqrt(2) / 2.) / radius
# go through the points and normalize into the set distance
for i in range(points):
norm2 = np.linalg.norm(matrix[i], ord=2)
matrix[i] = matrix[i] * ((distance / (sin_theta * 2.)) / norm2)
return matrix | 5,324,466 |
def asymmetric_lorentz_gauss_sum(x, mu, fwhm_l, fwhm_g, alpha=1.0, beta=1.5):
"""
asymmetric Lorentzian with Gauss convoluted
"""
ygaus = np.array(gauss_one(x, fwhm_g, mu))
ylorentz = np.array(asymmetric_lorentz(x, fwhm_l, mu, alpha=alpha, beta=beta))
ydata = ylorentz + ygaus
return ydata | 5,324,467 |
def main():
"""
Main entry point for module execution
"""
argument_spec = dict(
netbox_url=dict(type="str", required=True),
netbox_token=dict(type="str", required=True, no_log=True),
data=dict(type="dict", required=True),
state=dict(required=False, default="present", choices=["present", "absent"]),
validate_certs=dict(type="bool", default=True),
)
required_if = [("state", "present", ["slug"]), ("state", "absent", ["slug"])]
module = NetboxAnsibleModule(
argument_spec=argument_spec, supports_check_mode=True, required_if=required_if
)
netbox_device_type = NetboxDcimModule(module, NB_DEVICE_TYPES)
netbox_device_type.run() | 5,324,468 |
def manage_delegations(d_spec, args, log, deployed, auth_spec):
"""
Create and manage cross account access delegations based on
delegation specifications. Manages delegation roles in
trusting accounts and group policies in Auth (trusted) account.
"""
log.debug('considering %s' % d_spec['RoleName'])
if d_spec['RoleName'] == args['--org-access-role']:
log.error("Refusing to manage delegation '%s'" % d_spec['RoleName'])
return
# munge trusting_accounts list
if d_spec['TrustingAccount'] == 'ALL':
trusting_accounts = [a['Name'] for a in deployed['accounts']]
if 'ExcludeAccounts' in d_spec and d_spec['ExcludeAccounts']:
trusting_accounts = [a for a in trusting_accounts
if a not in d_spec['ExcludeAccounts']]
else:
trusting_accounts = d_spec['TrustingAccount']
for account_name in trusting_accounts:
if not lookup(deployed['accounts'], 'Name', account_name):
log.error("Can not manage delegation role '%s' in account "
"'%s'. Account '%s' not found in Organization" %
(d_spec['RoleName'], account_name, account_name))
trusting_accounts.remove(account_name)
# is this a service role or a user role?
if 'TrustedGroup' in d_spec and 'TrustedAccount' in d_spec:
log.error("can not declare both 'TrustedGroup' or 'TrustedAccount' in "
"delegation spec for role '%s'" % d_spec['RoleName'])
return
elif 'TrustedGroup' not in d_spec and 'TrustedAccount' not in d_spec:
log.error("neither 'TrustedGroup' or 'TrustedAccount' declared in "
"delegation spec for role '%s'" % d_spec['RoleName'])
return
elif 'TrustedAccount' in d_spec and d_spec['TrustedAccount']:
# this is a service role. skip setting group policy
pass
else:
# this is a user role. set group policies in Auth account
set_group_assume_role_policies(args, log, deployed, auth_spec,
trusting_accounts, d_spec)
# run manage_delegation_role() task in thread pool
queue_threads(log, deployed['accounts'], manage_delegation_role,
f_args=(args, log, auth_spec, deployed, trusting_accounts, d_spec)) | 5,324,469 |
def access(path, mode):
"""Use the real uid/gid to test for access to path.
:type path: bytes | unicode
:type mode: int
:rtype: bool
"""
return False | 5,324,470 |
def _is_punctuation(char: str) -> bool:
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False | 5,324,471 |
def get_imports(filename, source=None):
"""
Returns a list of #ImportInfo tuples for all module imports in the specified
Python source file or the *source* string. Note that `from X import Y`
imports could also refer to a member of the module X named Y and not the
module X.Y.
"""
def _find_nodes(ast_node, predicate):
result = []
class Visitor(ast.NodeVisitor):
def visit(self, node):
if predicate(node):
result.append(node)
self.generic_visit(node)
Visitor().generic_visit(ast_node)
return result
if source is None:
with open(filename, 'rb') as fp:
source = fp.read()
module = ast.parse(source, filename)
result = []
def _is_import_call(x):
return isinstance(x, ast.Call) and isinstance(x.func, ast.Name) and \
x.func.id == '__import__' and x.args and isinstance(x.args[0], ast.Str)
for node in _find_nodes(module, _is_import_call):
result.append(ImportInfo(node.args[0].s, filename, node.lineno, False))
for node in _find_nodes(module, lambda x: isinstance(x, ast.Import)):
for alias in node.names:
result.append(ImportInfo(alias.name, filename, node.lineno, False))
for node in _find_nodes(module, lambda x: isinstance(x, ast.ImportFrom)):
parent_name = '.' * node.level + (node.module or '')
result.append(ImportInfo(parent_name, filename, node.lineno, False))
for alias in node.names:
if alias.name == '*': continue
import_name = parent_name
if not import_name.endswith('.'):
import_name += '.'
import_name += alias.name
result.append(ImportInfo(import_name, filename, node.lineno, True))
result.sort(key=lambda x: x.lineno)
return result | 5,324,472 |
def follow_card(card, deck_size, shuffles, shuffler):
"""Follow position of the card in deck of deck_size during shuffles."""
position = card
for shuffle, parameter in shuffles:
shuffling = shuffler(shuffle)
position = shuffling(deck_size, position, parameter)
return position | 5,324,473 |
def get_secure_directory():
"""get a temporary secure sub directory"""
temp_dir = tempfile.mkdtemp(suffix='',prefix='')
return temp_dir | 5,324,474 |
def area(shape):
"""Multimethod dispatch key"""
return shape.get('type') | 5,324,475 |
def get_trie_properties(trie, offsets, values):
"""Obtain the length of every trigger in the trie."""
anchor_length = np.zeros(len(values), dtype=np.int32)
start, end = 0, 0
for idx, key in enumerate(trie.iterkeys()):
end = offsets[idx]
anchor_length[start:end] = len(key)
start = end
return anchor_length | 5,324,476 |
def _register_when_ui_available():
"""Register a function emits the SceneEvents.WhenUIAvailable event.
This will be emitted when the UI is first brought up and the event
loop begins running.
:return:
"""
# Import here in case UI is not available.
import hdefereval
# Emit the event after the event loop has run once.
hdefereval.executeDeferredAfterWaiting(_emit_ui_available, 1) | 5,324,477 |
def get_available_qmix_configs(configs_dir=None):
"""
Create a list of available qmix configurations
Parameters
----------
configs_dir : string or None
The parent directory containing the Qmix configurations.
If ``None``, assume the default directory used by
Qmix Elements, i.e.,
`C:/Users/Public/Documents/QmixElements/Projects/default_project/Configurations/`.
Returns
-------
list of strings
Names of available Qmix configurations.
Raises
------
ValueError
If the configuration directory does not exist.
"""
if configs_dir is None:
configs_dir = DEFAULT_CONFIGS_DIR
def get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
if not os.path.exists(configs_dir):
msg = 'The configuration directory does not exist: %s' % configs_dir
raise ValueError(msg)
return get_immediate_subdirectories(configs_dir) | 5,324,478 |
def besseli(X, order=0, Nk=64):
""" Approximates the modified Bessel function of the first kind,
of either order zero or one.
OBS: Inputing float32 can lead to numerical issues.
Args:
X (torch.tensor): Input (N, 1).
order (int, optional): 0 or 1, defaults to 0.
Nk (int, optional): Terms in summation, higher number, better approximation.
Defaults to 50.
Returns:
I (torch.tensor): Modified Bessel function of the first kind (N, 1).
See also:
https://mathworld.wolfram.com/ModifiedBesselFunctionoftheFirstKind.html
"""
device = X.device
dtype = X.dtype
if len(X.shape) == 1:
X = X[:, None]
N = X.shape[0]
else:
N = 1
# Compute factorial term
X = X.repeat(1, Nk)
K = torch.arange(0, Nk, dtype=dtype, device=device)
K = K.repeat(N, 1)
K_factorial = (K + 1).lgamma().exp()
if order == 0:
# ..0th order
i = torch.sum((0.25 * X ** 2) ** K / (K_factorial ** 2), dim=1, dtype=torch.float64)
else:
# ..1st order
i = torch.sum(
0.5 * X * ((0.25 * X ** 2) ** K /
(K_factorial * torch.exp(torch.lgamma(K + 2)))), dim=1, dtype=torch.float64)
return i | 5,324,479 |
def proxy_py_subprocess(request: Any) -> Generator[int, None, None]:
"""Instantiate proxy.py in a subprocess for testing.
NOTE: Doesn't waits for the proxy to startup.
Ensure instance check in your tests.
After the testing is over, tear it down.
"""
run_id = str(int(time.time())) + '-' + str(int(random() * pow(10, 6)))
port_file = TEMP_DIR / ('proxy-%s.port' % run_id)
ca_cert_dir = TEMP_DIR / ('certificates-%s' % run_id)
os.makedirs(ca_cert_dir, exist_ok=True)
proxy_cmd = (
'python', '-m', 'proxy',
'--hostname', '127.0.0.1',
'--port', '0',
'--port-file', str(port_file),
'--enable-web-server',
'--plugin', 'proxy.plugin.WebServerPlugin',
'--enable-reverse-proxy',
'--plugin', 'proxy.plugin.ReverseProxyPlugin',
'--num-acceptors', '3',
'--num-workers', '3',
'--ca-cert-dir', str(ca_cert_dir),
'--log-level', 'd',
) + tuple(request.param.split())
proxy_proc = Popen(proxy_cmd, stderr=subprocess.STDOUT)
# Needed because port file might not be available immediately
while not port_file.exists():
time.sleep(1)
try:
yield int(port_file.read_text())
finally:
proxy_proc.terminate()
proxy_proc.wait() | 5,324,480 |
def laplacian_total_variation_kernel(x, y, sigma=1.0, **kwargs):
"""Geodesic Laplacian kernel based on total variation distance."""
dist = np.abs(x - y).sum() / 2.0
return np.exp(-sigma * dist) | 5,324,481 |
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
line, text = '', ''
# Iterating 200 time to pick up random keys and values
for count in range(0, 200):
key = random.choice(list(mimic_dict.keys()))
val = mimic_dict.get(key)
line += ('{} {} '.format(key, random.choice(val)))
# print 70 columns per line
if len(line) > 70:
text += line + '\n'
line = ''
print(text)
return True | 5,324,482 |
def get_dataset(
dataset_name: str,
path: Path = default_dataset_path,
regenerate: bool = False,
) -> TrainDatasets:
"""
Get a repository dataset.
The datasets that can be obtained through this function have been used
with different processing over time by several papers (e.g., [SFG17]_,
[LCY+18]_, and [YRD15]_).
Parameters
----------
dataset_name
name of the dataset, for instance "m4_hourly"
regenerate
whether to regenerate the dataset even if a local file is present.
If this flag is False and the file is present, the dataset will not
be downloaded again.
path
where the dataset should be saved
Returns
-------
dataset obtained by either downloading or reloading from local file.
"""
dataset_path = materialize_dataset(dataset_name, path, regenerate)
return load_datasets(
metadata=dataset_path,
train=dataset_path / "train",
test=dataset_path / "test",
) | 5,324,483 |
def create_queue_from_filter_selector():
"""Create queue for a GPU device or,
if it is not available, for a CPU device.
Create in-order queue with profilign enabled.
"""
q = dpctl.SyclQueue("gpu,cpu", property=("in_order", "enable_profiling"))
print("Queue {} is in order: {}".format(q, q.is_in_order))
# display the device used
print("Device targeted by the queue:")
q.sycl_device.print_device_info() | 5,324,484 |
def test_view(regression_id):
"""
Show a single regression test.
:param regression_id: id of the regression test
:type regression_id: int
:return: Regression test
:rtype: dict
"""
test = RegressionTest.query.filter(RegressionTest.id == regression_id).first()
if test is None:
g.log.error(f'requested regression test with id: {regression_id} not found!')
abort(404)
return {
'test': test
} | 5,324,485 |
def test44():
"""
test44: like test43, but get the task host from a StaticServer in the
infra model
"""
cap = Capture()
class Infra(InfraModel):
setup_server = StaticServer("setup_helper", "127.0.0.1")
infra = Infra("helper")
class NS(NamespaceModel):
task_performer = Role("tp", host_ref=Infra.setup_server)
ns = NS()
ns.set_infra_model(infra)
class Cfg(ConfigModel):
with_config_options(default_task_role=NS.task_performer)
a_task = ReportingTask("atask", report=cap)
cfg = Cfg()
cfg.set_namespace(ns)
assert cfg.a_task.get_task_host() == "127.0.0.1" | 5,324,486 |
def data_context_path_computation_context_path_comp_serviceuuid_objective_function_post(uuid, tapi_path_computation_path_objective_function=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_objective_function_post
creates tapi.path.computation.PathObjectiveFunction # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_path_computation_path_objective_function: tapi.path.computation.PathObjectiveFunction to be added to list
:type tapi_path_computation_path_objective_function: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_path_objective_function = TapiPathComputationPathObjectiveFunction.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!' | 5,324,487 |
def tbl_2_nparray(in_tbl, flds):
"""Form the TableToNumPyArray to account for nulls for various dtypes.
This is essentially a shortcut to `arcpy.da.TableToNumPyArray`
Requires
--------
`in_tbl` :
table, or featureclass table name
`flds` :
list of field names
`skip_nulls` = False :
set within function
`null_value` :
determined from the dtype of the array...
otherwise you may as well do it manually
Source
------
arraytools, apt.py module
"""
nulls = {'Double':np.nan,
'Integer':np.iinfo(np.int32).min,
'OID':np.iinfo(np.int32).min,
'String':"None"}
#
fld_dict = {i.name: i.type for i in arcpy.ListFields(in_tbl)}
null_dict = {f:nulls[fld_dict[f]] for f in flds}
a = arcpy.da.TableToNumPyArray(in_table=in_tbl,
field_names=flds,
skip_nulls=False,
null_value=null_dict)
return a | 5,324,488 |
def _get_paper_page(url: str) -> object: # pragma: no cover
"""
Get a paper page element from a provided URL
Parameters
----------
url : str
The paper URL
Returns
-------
Object
A HTML element representing the paper given by the provided URL
"""
response = common_util.try_success(lambda: DefaultSession().get(url), 2)
return html.fromstring(response.content) | 5,324,489 |
def check_grid_side(ctx, param, value: int) -> int:
"""
check the size of the grid
:type value: int
"""
if value < 5:
raise ValueError("all sides of grid must be at least 5")
return value | 5,324,490 |
def distributed_transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False,
gpu_nums=2):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = bert_utils.get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = bert_utils.reshape_to_matrix(input_tensor)
all_layer_outputs = []
gpu_partition = int(num_hidden_layers/gpu_nums)
gpu_id = -1 # gpu_id is started from 0 to gpu_nums
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
if np.mod(layer_idx, gpu_partition) == 0:
gpu_id += 1
with tf.device('/gpu:{}'.format(gpu_id)):
tf.logging.info(" apply transformer attention {}-th layer on device {} ".format(layer_idx, gpu_id))
print(" apply transformer attention {}-th layer on device {} ".format(layer_idx, gpu_id))
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = bert_utils.reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = bert_utils.reshape_from_matrix(prev_output, input_shape)
return final_output | 5,324,491 |
def cli(version=False):
"""
A simple AbemaTV and other we(e)bsite video downloader
"""
if version:
print('yuu v{} - Created by NoAiOne'.format(__version__))
exit(0) | 5,324,492 |
def create(ctx, message, test=False):
""" Invoke alembic to generate new migration from the existing db
and service' models.
"""
get_env(test)
app = create_app()
with app.app_context():
import alembic.config
alembic.config.main(argv=[
'-c',
'migrations/alembic.ini',
'revision',
'--autogenerate',
'-m',
message,
]) | 5,324,493 |
def sgrank(doc, kp_count, window=1500, idf=None):
"""
Extracts keyphrases from a text using SGRank algorithm.
Args:
doc: a spacy.Doc object
kp_count: number of keyphrases
window: word co-occurrence window length
idf: a dictionary (string, float) of inverse document frequencies
Returns:
list of keyphrases
Raises:
TypeError if idf is not dictionary or None
"""
if isinstance(idf, dict):
idf = defaultdict(lambda: 1, idf)
elif idf is not None:
msg = "idf must be a dictionary, not {}".format(type(idf))
raise TypeError(msg)
cutoff_factor = 3000
token_count = len(doc)
top_n = max(int(token_count * 0.2), 100)
min_freq = 1
if 1500 < token_count < 4000:
min_freq = 2
elif token_count >= 4000:
min_freq = 3
terms = [tok for toks in (ngrams(doc, n) for n in range(1,7)) for tok in toks]
term_strs = {id(term): normalize(term) for term in terms}
# Count terms and filter by the minimum term frequency
counts = Counter(term_strs[id(term)] for term in terms)
term_freqs = {term_str: freq for term_str, freq in counts.items()
if freq >= min_freq}
if idf:
# For ngrams with n >= 2 we have idf = 1
modified_tfidf = {term_str: freq * idf[term_str] if ' ' not in term_str else freq
for term_str, freq in term_freqs.items()}
else:
modified_tfidf = term_freqs
# Take top_n values, but also those that have have equal tfidf with the top_n:th value
# This guarantees that the algorithm produces similar results with every run
ordered_tfidfs = sorted(modified_tfidf.items(), key=lambda t: t[1], reverse=True)
top_n = min(top_n, len(ordered_tfidfs))
top_n_value = ordered_tfidfs[top_n-1][1]
top_terms = set(str for str, val in it.takewhile(lambda t: t[1] >= top_n_value, ordered_tfidfs))
terms = [term for term in terms if term_strs[id(term)] in top_terms]
term_weights = {}
# Calculate term weights
for term in terms:
term_str = term_strs[id(term)]
term_len = math.sqrt(len(term))
term_freq = term_freqs[term_str]
occ_factor = math.log(cutoff_factor / (term.start + 1))
# Sum the frequencies of all other terms that contain this term
subsum_count = sum(term_freqs[other] for other in top_terms
if other != term_str and term_str in other)
freq_diff = term_freq - subsum_count
if idf and term_len == 1:
freq_diff *= idf[term_str]
weight = freq_diff * occ_factor * term_len
if term_str in term_weights:
# log(1/x) is a decreasing function, so the first occurrence has largest weight
if weight > term_weights[term_str]:
term_weights[term_str] = weight
else:
term_weights[term_str] = weight
# Use only positive-weighted terms
terms = [term for term in terms if term_weights[term_strs[id(term)]] > 0]
num_co_occurrences = defaultdict(lambda: defaultdict(int))
total_log_distance = defaultdict(lambda: defaultdict(float))
# Calculate term co-occurrences and co-occurrence distances within the co-occurrence window
for t1, t2 in it.combinations(terms, 2):
dist = abs(t1.start - t2.start)
if dist <= window:
t1_str = term_strs[id(t1)]
t2_str = term_strs[id(t2)]
if t1_str != t2_str:
num_co_occurrences[t1_str][t2_str] += 1
total_log_distance[t1_str][t2_str] += math.log(window / max(1, dist))
# Weight the graph edges based on word co-occurrences
edge_weights = defaultdict(lambda: defaultdict(float))
for t1, neighbors in total_log_distance.items():
for n in neighbors:
edge_weights[t1][n] = (total_log_distance[t1][n] / num_co_occurrences[t1][n]) \
* term_weights[t1] * term_weights[n]
# Normalize edge weights by sum of outgoing edge weights
norm_edge_weights = []
for t1, neighbors in edge_weights.items():
weights_sum = sum(neighbors.values())
norm_edge_weights.extend((t1, n, weight / weights_sum)
for n, weight in neighbors.items())
term_graph = networkx.Graph()
term_graph.add_weighted_edges_from(norm_edge_weights)
term_ranks = networkx.pagerank_scipy(term_graph)
if 0 < kp_count < 1:
kp_count = round(kp_count * len(term_ranks))
kp_count = int(kp_count)
top_phrases = top_keys(kp_count, term_ranks)
return top_phrases | 5,324,494 |
def save_quantitative_results(quantitative_results: List[Tuple[int, List[float]]], metric_names: List[str], output_file: str):
"""
Saves the quantitative results into an output file.
The quantitative results are passed as a list of entries, where each entry consists
of the entry id, as well as a list of metric values.
Example quantitative results: [(0, [0.81, 0.78, 0.88), (1, [0.62, 0.7, 0.5), ...]
A list of metric names in the same oreder they appear in the quantitative results is also
to be provided.
Args:
quantitative_results (list(tuple(int, list[int]))): the quantitative results
metric_names (list[str]): the names of the metrics
output_file (str): the output location
"""
metric_means = []
with open(output_file, 'w') as f:
f.write('Overall results: AVERAGE +- STD DEVIATION\n')
for idx, metric_name in enumerate(metric_names):
raw_numbers = np.array([x[1][idx] for x in quantitative_results])
metric_avg = raw_numbers.mean()
metric_std = raw_numbers.std()
metric_means.append(metric_avg)
f.write(
f'{metric_name: <15} {metric_avg:.4f} +- {metric_std:.4f}\n')
f.write('\n\nIndividual examples:\n')
f.write(f'Metrics order {" - ".join(metric_names)}\n')
for (idx, values) in quantitative_results:
rounded_values = [str(round(i, 4)) for i in values]
f.write(f'{idx:<3}: {", ".join(rounded_values)}\n')
return metric_means | 5,324,495 |
def interpret(parsed, source_url, base_href=None, item=None,
use_rel_syndication=True, want_json=False, fetch_mf2_func=None):
"""Interpret a permalink of unknown type. Finds the first interesting
h-* element, and delegates to :func:`interpret_entry` if it is an
h-entry or :func:`interpret_event` for an h-event
:param dict parsed: the result of parsing a mf2 document
:param str source_url: the URL of the source document (used for authorship
discovery)
:param str base_href: (optional) the href value of the base tag
:param dict item: (optional) the item to be parsed. If provided,
this will be used instead of the first element on the page.
:param boolean use_rel_syndication: (optional, default True) Whether
to include rel=syndication in the list of syndication sources. Sometimes
useful to set this to False when parsing h-feeds that erroneously include
rel=syndication on each entry.
:param boolean want_json: (optional, default False) If true, the result
will be pure json with datetimes as strings instead of python objects
:param callable fetch_mf2_func: (optional) function to fetch mf2 parsed
output for a given URL.
:return: a dict as described by interpret_entry or interpret_event, or None
"""
if not item:
item = find_first_entry(parsed, ['h-entry', 'h-event'])
if item:
types = item.get('type', [])
if 'h-event' in types:
return interpret_event(
parsed, source_url, base_href=base_href, hevent=item,
use_rel_syndication=use_rel_syndication, want_json=want_json,
fetch_mf2_func=fetch_mf2_func)
elif 'h-entry' in types or 'h-cite' in types:
return interpret_entry(
parsed, source_url, base_href=base_href, hentry=item,
use_rel_syndication=use_rel_syndication, want_json=want_json,
fetch_mf2_func=fetch_mf2_func) | 5,324,496 |
def display_img(i, x, style, is_val=False):
"""
Display image.
"""
# Currently generated image
img = x
if is_val:
#img = ndimage.median_filter(img, 3)
fname = f"images/output/{style}_{i}_val.png"
else:
fname = f"images/output/{style}_{i}.png"
# Save image
imsave(fname, img)
print('Image saved as', fname) | 5,324,497 |
def generate(*drf_globs: DRF_list) -> List[str]:
"""
Generates a list of valid requests from a DRF glob.
:param drf_globs: A list of DRF globs.
:return: A list of valid requests.
"""
results = []
def parse_globs(drf_globs):
for drf_glob in drf_globs:
# Parse nested lists and tuples
if type(drf_glob) in [list, tuple]:
parse_globs(drf_glob)
break
tokens = re.split(r'[{}]', drf_glob)
iterations = []
for token in tokens:
if '..' in token:
first, second = token.split('..')
iterations.append(token_range(first, second))
elif ',' in token:
iterations.append(token.split(','))
else:
if token != '':
iterations.append([token])
for result in product(*iterations):
results.append(''.join(map(str, result)))
parse_globs(drf_globs)
return results | 5,324,498 |
def MoveItemToUserFromCharacter(request, callback, customData = None, extraHeaders = None):
"""
Moves an item from a character's inventory into the owning user's inventory.
https://docs.microsoft.com/rest/api/playfab/server/player-item-management/moveitemtouserfromcharacter
"""
if not PlayFabSettings.DeveloperSecretKey:
raise PlayFabErrors.PlayFabException("Must have DeveloperSecretKey set to call this method")
def wrappedCallback(playFabResult, error):
if callback:
callback(playFabResult, error)
PlayFabHTTP.DoPost("/Server/MoveItemToUserFromCharacter", request, "X-SecretKey", PlayFabSettings.DeveloperSecretKey, wrappedCallback, customData, extraHeaders) | 5,324,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.