content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def blit_array(surface, array):
"""
Generates image pixels from a JNumeric array.
Arguments include destination Surface and array of integer colors.
JNumeric required as specified in numeric module.
"""
if not _initialized:
_init()
if len(array.shape) == 2:
data = numeric.transpose(array, (1,0))
data = numeric.ravel(data)
else:
data = array[:,:,0]*0x10000 | array[:,:,1]*0x100 | array[:,:,2]
data = numeric.transpose(data, (1,0))
data = numeric.ravel(data)
if not surface.getColorModel().hasAlpha():
surface.setRGB(0, 0, surface.width, surface.height, data, 0, surface.width)
else:
surf = Surface((surface.width,surface.height), BufferedImage.TYPE_INT_RGB)
surf.setRGB(0, 0, surface.width, surface.height, data, 0, surface.width)
g2d = surface.createGraphics()
g2d.drawImage(surf, 0, 0, None)
g2d.dispose()
return None | 850b6451ecc780fd163f574176a8c5683174046e | 22,100 |
def soerp_numeric(slc, sqc, scp, var_moments, func0, title=None, debug=False,
silent=False):
"""
This performs the same moment calculations, but expects that all input
derivatives and moments have been put in standardized form. It can also
describe the variance contributions and print out any output distribution
information, both raw and central moments.
Parameters
----------
slc : array
1st-order standardized derivatives (i.e., multiplied by the standard
deviation of the related input)
sqc : array
2nd-order derivatives (i.e., multiplied by the standard
deviation squared, or variance, of the related input)
scp : 2d-array
2nd-order cross-derivatives (i.e., multiplied by the two standard
deviations of the related inputs)
var_moments : 2-d array
Standardized moments where row[i] contains the first 9 moments of
variable x[i]. FYI: the first 3 values should always be [1, 0, 1]
func0 : scalar
System mean (i.e. value of the system evaluated at the means of all
the input variables)
Optional
--------
title : str
Identifier for results that get printed to the screen
debug : bool, false by default
If true, all intermediate calculation results get printed to the screen
silent : bool, false by default
If true, nothing gets printed to the screen (overrides debug).
Returns
-------
moments : list
The first four standard moments (mean, variance, skewness and kurtosis
coefficients)
Example
-------
Example taken from the original SOERP user guide by N. D. Cox:
>>> norm_moments = [1, 0, 1, 0, 3, 0, 15, 0, 105]
>>> lc = [-802.65, -430.5]
>>> qc = [205.54, 78.66]
>>> cp = np.array([[0, -216.5], [-216.5, 0]])
>>> vm = np.array([norm_moments, norm_moments])
>>> f0 = 4152
>>> soerp_numeric(lc, qc, cp, vm, f0,
... title='EXAMPLE FROM ORIGINAL SOERP USER GUIDE')
********************************************************************************
**************** SOERP: EXAMPLE FROM ORIGINAL SOERP USER GUIDE *****************
********************************************************************************
Variance Contribution of lc[x0]: 66.19083%
Variance Contribution of lc[x1]: 19.04109%
Variance Contribution of qc[x0]: 8.68097%
Variance Contribution of qc[x1]: 1.27140%
Variance Contribution of cp[x0, x1]: 4.81572%
********************************************************************************
MEAN-INTERCEPT (EDEL1).................... 2.8420000E+02
MEAN...................................... 4.4362000E+03
SECOND MOMENT (EDEL2)..................... 1.0540873E+06
VARIANCE (VARDL).......................... 9.7331770E+05
STANDARD DEVIATION (RTVAR)................ 9.8656865E+02
THIRD MOMENT (EDEL3)...................... 1.4392148E+09
THIRD CENTRAL MOMENT (MU3DL).............. 5.8640938E+08
COEFFICIENT OF SKEWNESS SQUARED (BETA1)... 3.7293913E-01
COEFFICIENT OF SKEWNESS (RTBT1)........... 6.1068742E-01
FOURTH MOMENT (EDEL4)..................... 5.0404781E+12
FOURTH CENTRAL MOMENT (MU4DL)............. 3.8956371E+12
COEFFICIENT OF KURTOSIS (BETA2)........... 4.1121529E+00
********************************************************************************
"""
if not silent:
print('\n', '*'*80)
if title:
print('{:*^80}'.format(' SOERP: ' + title + ' '))
############################
vy = np.empty(5)
if debug and not silent:
print('*'*80)
for k in range(5):
vy[k] = rawmoment(slc, sqc, scp, var_moments, k)
if debug and not silent:
print('Raw Moment {}: {}'.format(k, vy[k]))
############################
vz = np.empty(5)
if debug and not silent:
print('*'*80)
for k in range(5):
vz[k] = centralmoment(vy, k)
if debug and not silent:
print('Central Moment {}: {}'.format(k, vz[k]))
sysmean = float(vy[1] + func0)
############################
# Calculate variance contributions
vc_lc, vc_qc, vc_cp = variance_components(slc, sqc, scp, var_moments, vz)
vlc, vqc, vcp = variance_contrib(vc_lc, vc_qc, vc_cp, vz)
n = len(slc)
if not silent:
print('*'*80)
for i in range(n):
print('Variance Contribution of lc[x{:d}]: {:7.5%}'.format(i, vlc[i]))
for i in range(n):
print('Variance Contribution of qc[x{:d}]: {:7.5%}'.format(i, vqc[i]))
for i in range(n - 1):
for j in range(i + 1, n):
print('Variance Contribution of cp[x{:d}, x{:d}]: {:7.5%}'.format(i, j, vcp[i, j]))
############################
stdev = vz[2]**(0.5)
if stdev:
rtbt1 = vz[3]/vz[2]**(1.5)
beta2 = vz[4]/vz[2]**2
else:
rtbt1 = 0.0
beta2 = 0.0
beta1 = rtbt1**2
if not silent:
print('*'*80)
print('MEAN-INTERCEPT (EDEL1)....................','{: 8.7E}'.format(vy[1]))
print('MEAN......................................','{: 8.7E}'.format(sysmean))
print('SECOND MOMENT (EDEL2).....................','{: 8.7E}'.format(vy[2]))
print('VARIANCE (VARDL)..........................','{: 8.7E}'.format(vz[2]))
print('STANDARD DEVIATION (RTVAR)................','{: 8.7E}'.format(stdev))
print('THIRD MOMENT (EDEL3)......................','{: 8.7E}'.format(vy[3]))
print('THIRD CENTRAL MOMENT (MU3DL)..............','{: 8.7E}'.format(vz[3]))
print('COEFFICIENT OF SKEWNESS SQUARED (BETA1)...','{: 8.7E}'.format(beta1))
print('COEFFICIENT OF SKEWNESS (RTBT1)...........','{: 8.7E}'.format(rtbt1))
print('FOURTH MOMENT (EDEL4).....................','{: 8.7E}'.format(vy[4]))
print('FOURTH CENTRAL MOMENT (MU4DL).............','{: 8.7E}'.format(vz[4]))
print('COEFFICIENT OF KURTOSIS (BETA2)...........','{: 8.7E}'.format(beta2))
print('*'*80)
return [sysmean, vz[2], rtbt1, beta2] | b38e824f341b8b179ada92b53bdfd3f16c665f07 | 22,101 |
def generate_IO_examples(program, N, L, V):
""" Given a programs, randomly generates N IO examples.
using the specified length L for the input arrays. """
input_types = program.ins
input_nargs = len(input_types)
# Generate N input-output pairs
IO = []
for _ in range(N):
input_value = [None]*input_nargs
for a in range(input_nargs):
minv, maxv = program.bounds[a]
if input_types[a] == int:
input_value[a] = np.random.randint(minv, maxv)
elif input_types[a] == [int]:
input_value[a] = list(np.random.randint(minv, maxv, size=L))
else:
raise Exception("Unsupported input type " + input_types[a] + " for random input generation")
output_value = program.fun(input_value)
IO.append((input_value, output_value))
assert (program.out == int and output_value <= V) or (program.out == [int] and len(output_value) == 0) or (program.out == [int] and max(output_value) <= V)
return IO | 4449974cf5a1bd04a89e5575fb48362da8fa1621 | 22,102 |
import os
def populate_runtime_info(query, impala, converted_args, timeout_secs=maxint):
"""Runs the given query by itself repeatedly until the minimum memory is determined
with and without spilling. Potentially all fields in the Query class (except
'sql') will be populated by this method. 'required_mem_mb_without_spilling' and
the corresponding runtime field may still be None if the query could not be run
without spilling.
converted_args.samples and converted_args.max_conflicting_samples control the
reliability of the collected information. The problem is that memory spilling or usage
may differ (by a large amount) from run to run due to races during execution. The
parameters provide a way to express "X out of Y runs must have resulted in the same
outcome". Increasing the number of samples and decreasing the tolerance (max conflicts)
increases confidence but also increases the time to collect the data.
"""
LOG.info("Collecting runtime info for query %s: \n%s", query.name, query.sql)
samples = converted_args.samples
max_conflicting_samples = converted_args.max_conflicting_samples
results_dir = converted_args.results_dir
mem_limit_eq_threshold_mb = converted_args.mem_limit_eq_threshold_mb
mem_limit_eq_threshold_percent = converted_args.mem_limit_eq_threshold_percent
runner = QueryRunner(impalad=impala.impalads[0], results_dir=results_dir,
common_query_options=converted_args.common_query_options,
test_admission_control=converted_args.test_admission_control,
use_kerberos=converted_args.use_kerberos, check_if_mem_was_spilled=True)
runner.connect()
limit_exceeded_mem = 0
non_spill_mem = None
spill_mem = None
report = None
mem_limit = None
old_required_mem_mb_without_spilling = query.required_mem_mb_without_spilling
old_required_mem_mb_with_spilling = query.required_mem_mb_with_spilling
profile_error_prefix = query.logical_query_id + "_binsearch_error"
# TODO: This method is complicated enough now that breaking it out into a class may be
# helpful to understand the structure.
def update_runtime_info():
required_mem = min(mem_limit, impala.min_impalad_mem_mb)
if report.mem_was_spilled:
if (
query.required_mem_mb_with_spilling is None or
required_mem < query.required_mem_mb_with_spilling
):
query.required_mem_mb_with_spilling = required_mem
query.solo_runtime_secs_with_spilling = report.runtime_secs
query.solo_runtime_profile_with_spilling = report.profile
elif (
query.required_mem_mb_without_spilling is None or
required_mem < query.required_mem_mb_without_spilling
):
query.required_mem_mb_without_spilling = required_mem
query.solo_runtime_secs_without_spilling = report.runtime_secs
assert report.runtime_secs is not None, report
query.solo_runtime_profile_without_spilling = report.profile
def get_report(desired_outcome=None):
reports_by_outcome = defaultdict(list)
leading_outcome = None
for remaining_samples in xrange(samples - 1, -1, -1):
report = runner.run_query(query, mem_limit, run_set_up=True,
timeout_secs=timeout_secs, retain_profile=True)
if report.timed_out:
report.write_query_profile(
os.path.join(results_dir, PROFILES_DIR), profile_error_prefix)
raise QueryTimeout(
"query {0} timed out during binary search".format(query.logical_query_id))
if report.other_error:
report.write_query_profile(
os.path.join(results_dir, PROFILES_DIR), profile_error_prefix)
raise Exception(
"query {0} errored during binary search: {1}".format(
query.logical_query_id, str(report.other_error)))
LOG.debug("Spilled: %s" % report.mem_was_spilled)
if not report.has_query_error():
if query.result_hash is None:
query.result_hash = report.result_hash
elif query.result_hash != report.result_hash:
report.write_query_profile(
os.path.join(results_dir, PROFILES_DIR), profile_error_prefix)
raise Exception(
"Result hash mismatch for query %s; expected %s, got %s" %
(query.logical_query_id, query.result_hash, report.result_hash))
if report.not_enough_memory:
outcome = "EXCEEDED"
elif report.mem_was_spilled:
outcome = "SPILLED"
else:
outcome = "NOT_SPILLED"
reports_by_outcome[outcome].append(report)
if not leading_outcome:
leading_outcome = outcome
continue
if len(reports_by_outcome[outcome]) > len(reports_by_outcome[leading_outcome]):
leading_outcome = outcome
if len(reports_by_outcome[leading_outcome]) + max_conflicting_samples == samples:
break
if (
len(reports_by_outcome[leading_outcome]) + remaining_samples <
samples - max_conflicting_samples
):
return
if desired_outcome \
and len(reports_by_outcome[desired_outcome]) + remaining_samples \
< samples - max_conflicting_samples:
return
reports = reports_by_outcome[leading_outcome]
reports.sort(key=lambda r: r.runtime_secs)
return reports[len(reports) / 2]
if not any((old_required_mem_mb_with_spilling, old_required_mem_mb_without_spilling)):
mem_estimate = estimate_query_mem_mb_usage(query, runner.impalad_conn)
LOG.info("Finding a starting point for binary search")
mem_limit = min(mem_estimate, impala.min_impalad_mem_mb) or impala.min_impalad_mem_mb
while True:
LOG.info("Next mem_limit: {0}".format(mem_limit))
report = get_report()
if not report or report.not_enough_memory:
if report and report.not_enough_memory:
limit_exceeded_mem = mem_limit
if mem_limit == impala.min_impalad_mem_mb:
LOG.warn(
"Query couldn't be run even when using all available memory\n%s", query.sql)
return
mem_limit = min(2 * mem_limit, impala.min_impalad_mem_mb)
continue
update_runtime_info()
if report.mem_was_spilled:
spill_mem = mem_limit
else:
non_spill_mem = mem_limit
break
LOG.info("Finding minimum memory required to avoid spilling")
lower_bound = max(limit_exceeded_mem, spill_mem)
upper_bound = min(non_spill_mem or maxint, impala.min_impalad_mem_mb)
while True:
if old_required_mem_mb_without_spilling:
mem_limit = old_required_mem_mb_without_spilling
old_required_mem_mb_without_spilling = None
else:
mem_limit = (lower_bound + upper_bound) / 2
LOG.info("Next mem_limit: {0}".format(mem_limit))
should_break = mem_limit / float(upper_bound) > 1 - mem_limit_eq_threshold_percent \
or upper_bound - mem_limit < mem_limit_eq_threshold_mb
report = get_report(desired_outcome=("NOT_SPILLED" if spill_mem else None))
if not report:
lower_bound = mem_limit
elif report.not_enough_memory:
lower_bound = mem_limit
limit_exceeded_mem = mem_limit
else:
update_runtime_info()
if report.mem_was_spilled:
lower_bound = mem_limit
spill_mem = min(spill_mem, mem_limit)
else:
upper_bound = mem_limit
non_spill_mem = mem_limit
if mem_limit == impala.min_impalad_mem_mb:
break
if should_break:
if non_spill_mem:
break
lower_bound = upper_bound = impala.min_impalad_mem_mb
# This value may be updated during the search for the absolute minimum.
LOG.info(
"Minimum memory to avoid spilling: %s MB" % query.required_mem_mb_without_spilling)
LOG.info("Finding absolute minimum memory required")
lower_bound = limit_exceeded_mem
upper_bound = min(
spill_mem or maxint, non_spill_mem or maxint, impala.min_impalad_mem_mb)
while True:
if old_required_mem_mb_with_spilling:
mem_limit = old_required_mem_mb_with_spilling
old_required_mem_mb_with_spilling = None
else:
mem_limit = (lower_bound + upper_bound) / 2
LOG.info("Next mem_limit: {0}".format(mem_limit))
should_break = mem_limit / float(upper_bound) > 1 - mem_limit_eq_threshold_percent \
or upper_bound - mem_limit < mem_limit_eq_threshold_mb
report = get_report(desired_outcome="SPILLED")
if not report or report.not_enough_memory:
lower_bound = mem_limit
else:
update_runtime_info()
upper_bound = mem_limit
if should_break:
if not query.required_mem_mb_with_spilling:
if upper_bound - mem_limit < mem_limit_eq_threshold_mb:
# IMPALA-6604: A fair amount of queries go down this path.
LOG.info(
"Unable to find a memory limit with spilling within the threshold of {0} "
"MB. Using the same memory limit for both.".format(
mem_limit_eq_threshold_mb))
query.required_mem_mb_with_spilling = query.required_mem_mb_without_spilling
query.solo_runtime_secs_with_spilling = query.solo_runtime_secs_without_spilling
query.solo_runtime_profile_with_spilling = \
query.solo_runtime_profile_without_spilling
break
LOG.info("Minimum memory is %s MB" % query.required_mem_mb_with_spilling)
if (
query.required_mem_mb_without_spilling is not None and
query.required_mem_mb_without_spilling is not None and
query.required_mem_mb_without_spilling < query.required_mem_mb_with_spilling
):
# Query execution is not deterministic and sometimes a query will run without spilling
# at a lower mem limit than it did with spilling. In that case, just use the lower
# value.
LOG.info(
"A lower memory limit to avoid spilling was found while searching for"
" the absolute minimum memory.")
query.required_mem_mb_with_spilling = query.required_mem_mb_without_spilling
query.solo_runtime_secs_with_spilling = query.solo_runtime_secs_without_spilling
query.solo_runtime_profile_with_spilling = query.solo_runtime_profile_without_spilling
LOG.debug("Query after populating runtime info: %s", query) | 1d8044e648efd7401b9e798830a1f158059ae851 | 22,103 |
def secrecy_capacity(dist, rvs=None, crvs=None, rv_mode=None, niter=None, bound_u=None):
"""
The rate at which X and Y can agree upon a key with Z eavesdropping,
and no public communication.
Parameters
----------
dist : Distribution
The distribution of interest.
rvs : iterable of iterables, len(rvs) == 2
The indices of the random variables agreeing upon a secret key.
crvs : iterable
The indices of the eavesdropper.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
niter : int, None
The number of hops to perform during optimization.
bound_u : int, None
The bound to use on the size of the variable U. If none, use the
theoretical bound of |X|.
Returns
-------
sc : float
The secrecy capacity.
"""
a = secrecy_capacity_directed(dist, rvs[0], rvs[1], crvs, rv_mode=rv_mode,
niter=niter, bound_u=bound_u)
b = secrecy_capacity_directed(dist, rvs[1], rvs[0], crvs, rv_mode=rv_mode,
niter=niter, bound_u=bound_u)
return max([a, b]) | c55655847f9d9cf586bd4e274f72359d0241c349 | 22,104 |
def encrypt_message(partner, message):
"""
Encrypt a message
:param parner: Name of partner
:param message: Message as string
:return: Message as numbers
"""
matrix = get_encryption_matrix(get_key(get_private_filename(partner)))
rank = np.linalg.matrix_rank(matrix)
num_blocks = int(np.ceil(1.0 * len(message) / rank))
padded_message = message
for i in range(len(message), rank * num_blocks):
padded_message += ' '
encoded_message = string_to_numbers(padded_message)
encrypted_numbers = np.empty(rank * num_blocks, dtype=int)
rhs = np.empty(rank, dtype=int)
for b in range(num_blocks):
for i in range(rank):
rhs[i] = encoded_message[i + rank * b]
lhs = np.dot(matrix, rhs)
for i in range(rank):
encrypted_numbers[i + rank * b] = lhs[i]
return encrypted_numbers | e8e96f99f511afb3b91a9c264f8452e45b14e165 | 22,105 |
def create_event(title, start, end, capacity, location, coach, private):
"""Create event and submit to database"""
event = Class(title=title, start=start, end=end, capacity=capacity, location=location, coach=coach, free=capacity, private=private)
db.session.add(event)
db.session.commit()
return event | 4a1b6314eee362f6ae7f35685cb09fe969175c0b | 22,106 |
def regret_obs(m_list, inputs, true_ymin=0):
"""Immediate regret using past observations.
Parameters
----------
m_list : list
A list of GPy models generated by `OptimalDesign`.
inputs : instance of `Inputs`
The input space.
true_ymin : float, optional
The minimum value of the objective function.
Returns
-------
res : list
A list containing the values of the immediate regret for each
model in `m_list` using past observations:
$r(n) = min y_i - y_{true}$
where y_i are the observations recorded in the first `n`
iterations, and y_{true} the minimum of the objective function.
"""
res = np.zeros(len(m_list))
for ii, model in enumerate(m_list):
res[ii] = model.Y.min() - true_ymin
return res | e9cc2567f9740deae7b681cc754d20a387fbc894 | 22,107 |
import numpy
def pmat2cam_center(P):
"""
See Hartley & Zisserman (2003) p. 163
"""
assert P.shape == (3, 4)
determinant = numpy.linalg.det
# camera center
X = determinant([P[:, 1], P[:, 2], P[:, 3]])
Y = -determinant([P[:, 0], P[:, 2], P[:, 3]])
Z = determinant([P[:, 0], P[:, 1], P[:, 3]])
T = -determinant([P[:, 0], P[:, 1], P[:, 2]])
C_ = nx.transpose(nx.array([[X / T, Y / T, Z / T]]))
return C_ | f959eab9feeeafd90c3a2178b77d81e509ef1282 | 22,108 |
def _http_req(mocker):
"""Fixture providing HTTP Request mock."""
return mocker.Mock(spec=Request) | 651866118fd25909e50469cb92f35a9aa3a6d873 | 22,109 |
def transform_data(df, steps_per_floor_):
"""Transform original dataset.
:param df: Input DataFrame.
:param steps_per_floor_: The number of steps per-floor at 43 Tanner
Street.
:return: Transformed DataFrame.
"""
df_transformed = (
df
.select(
col('id'),
concat_ws(
' ',
col('first_name'),
col('second_name')).alias('name'),
(col('floor') * lit(steps_per_floor_)).alias('steps_to_desk')))
return df_transformed | 163bedd83315828001f4cca2abdc130a2a77a55a | 22,110 |
import logging
def get_client(bucket):
"""Get the Storage Client appropriate for the bucket.
Args:
bucket (str): Bucket including
Returns:
~Storage: Client for interacting with the cloud.
"""
try:
protocol, bucket_name = str(bucket).lower().split('://', 1)
except ValueError:
raise ValueError('Invalid storage bucket name: {}'.format(bucket))
logger = logging.getLogger('storage.get_client')
if protocol == 's3':
storage_client = S3Storage(bucket_name)
elif protocol == 'gs':
storage_client = GoogleStorage(bucket_name)
else:
errmsg = 'Unknown STORAGE_BUCKET protocol: %s'
logger.error(errmsg, protocol)
raise ValueError(errmsg % protocol)
return storage_client | 46a27b4a028daa927507f3e8b0d5aeb453fd302b | 22,111 |
def extract_text(xml_string):
"""Get text from the body of the given NLM XML string.
Parameters
----------
xml_string : str
String containing valid NLM XML.
Returns
-------
str
Extracted plaintext.
"""
paragraphs = extract_paragraphs(xml_string)
if paragraphs:
return '\n'.join(paragraphs) + '\n'
else:
return None | f3e80d960837d8663d9711bd9696644f00ba21e9 | 22,112 |
import os
def get_processing_info(data_path, actual_names, labels):
"""
Iterates over the downloaded data and checks which one is in our database
Returns:
files_to_process: List of file paths to videos
labs_to_process: list of same length with corresponding labels
"""
files_to_process = []
labs_to_process = []
for img_type in os.listdir(data_path):
if img_type[0] == ".":
continue
# img_type is B-lines, cardiac etc
for vid in os.listdir(os.path.join(data_path, img_type)):
# print(vid)
if vid in actual_names:
full_path = os.path.join(data_path, img_type, vid)
files_to_process.append(full_path)
ind = actual_names.index(vid)
labs_to_process.append(labels[ind])
return files_to_process, labs_to_process | 0e2ed514159bd230d9315d1b668ce8a59d36b545 | 22,113 |
def search_organizations(search_term: str = None, limit: str = None):
"""
Looks up organizations by name & location.
:param search_term: e.g. "College of Nursing" or "Chicago, IL".
:param limit: The maximum number of matches you'd like returned - defaults to 10, maximum is 50.
:returns: String containing xml or an lxml element.
"""
return get_anonymous(
'searchOrganizations',
search_term=search_term,
limit=limit) | 5523f6278b4eff5618979ce942fb175b20042079 | 22,114 |
def call_math_operator(value1, value2, op, default):
"""Return the result of the math operation on the given values."""
if not value1:
value1 = default
if not value2:
value2 = default
if not pyd.is_number(value1):
try:
value1 = float(value1)
except Exception:
pass
if not pyd.is_number(value2):
try:
value2 = float(value2)
except Exception:
pass
return op(value1, value2) | da1a163e4079cfd885d8ca163939111c9291767b | 22,115 |
def addGems(ID, nbGems):
"""
Permet d'ajouter un nombre de gems à quelqu'un. Il nous faut son ID et le nombre de gems.
Si vous souhaitez en retirer mettez un nombre négatif.
Si il n'y a pas assez d'argent sur le compte la fonction retourne un nombre
strictement inférieur à 0.
"""
old_value = valueAt(ID, "gems", GF.dbGems)
new_value = int(old_value) + nbGems
if new_value >= 0:
updateField(ID, "gems", new_value, GF.dbGems)
print("DB >> Le compte de " + str(ID) + " est maintenant de: " + str(new_value))
else:
print("DB >> Il n'y a pas assez sur ce compte !")
return str(new_value) | 6f3778cec488138101a78a072591babb832d7f95 | 22,116 |
def BertzCT(mol, cutoff=100, dMat=None, forceDMat=1):
""" A topological index meant to quantify "complexity" of molecules.
Consists of a sum of two terms, one representing the complexity
of the bonding, the other representing the complexity of the
distribution of heteroatoms.
From S. H. Bertz, J. Am. Chem. Soc., vol 103, 3599-3601 (1981)
"cutoff" is an integer value used to limit the computational
expense. A cutoff value tells the program to consider vertices
topologically identical if their distance vectors (sets of
distances to all other vertices) are equal out to the "cutoff"th
nearest-neighbor.
**NOTE** The original implementation had the following comment:
> this implementation treats aromatic rings as the
> corresponding Kekule structure with alternating bonds,
> for purposes of counting "connections".
Upon further thought, this is the WRONG thing to do. It
results in the possibility of a molecule giving two different
CT values depending on the kekulization. For example, in the
old implementation, these two SMILES:
CC2=CN=C1C3=C(C(C)=C(C=N3)C)C=CC1=C2C
CC3=CN=C2C1=NC=C(C)C(C)=C1C=CC2=C3C
which correspond to differentk kekule forms, yield different
values.
The new implementation uses consistent (aromatic) bond orders
for aromatic bonds.
THIS MEANS THAT THIS IMPLEMENTATION IS NOT BACKWARDS COMPATIBLE.
Any molecule containing aromatic rings will yield different
values with this implementation. The new behavior is the correct
one, so we're going to live with the breakage.
**NOTE** this barfs if the molecule contains a second (or
nth) fragment that is one atom.
"""
atomTypeDict = {}
connectionDict = {}
numAtoms = mol.GetNumAtoms()
if forceDMat or dMat is None:
if forceDMat:
# nope, gotta calculate one
dMat = Chem.GetDistanceMatrix(mol, useBO=0, useAtomWts=0, force=1)
mol._adjMat = dMat
else:
try:
dMat = mol._adjMat
except AttributeError:
dMat = Chem.GetDistanceMatrix(mol, useBO=0, useAtomWts=0, force=1)
mol._adjMat = dMat
if numAtoms < 2:
return 0
bondDict, neighborList, vdList = _CreateBondDictEtc(mol, numAtoms)
symmetryClasses = _AssignSymmetryClasses(mol, vdList, dMat, forceDMat, numAtoms, cutoff)
# print('Symmm Classes:',symmetryClasses)
for atomIdx in range(numAtoms):
hingeAtomNumber = mol.GetAtomWithIdx(atomIdx).GetAtomicNum()
atomTypeDict[hingeAtomNumber] = atomTypeDict.get(hingeAtomNumber, 0) + 1
hingeAtomClass = symmetryClasses[atomIdx]
numNeighbors = vdList[atomIdx]
for i in range(numNeighbors):
neighbor_iIdx = neighborList[atomIdx][i]
NiClass = symmetryClasses[neighbor_iIdx]
bond_i_order = _LookUpBondOrder(atomIdx, neighbor_iIdx, bondDict)
# print('\t',atomIdx,i,hingeAtomClass,NiClass,bond_i_order)
if (bond_i_order > 1) and (neighbor_iIdx > atomIdx):
numConnections = bond_i_order * (bond_i_order - 1) / 2
connectionKey = (min(hingeAtomClass, NiClass), max(hingeAtomClass, NiClass))
connectionDict[connectionKey] = connectionDict.get(connectionKey, 0) + numConnections
for j in range(i + 1, numNeighbors):
neighbor_jIdx = neighborList[atomIdx][j]
NjClass = symmetryClasses[neighbor_jIdx]
bond_j_order = _LookUpBondOrder(atomIdx, neighbor_jIdx, bondDict)
numConnections = bond_i_order * bond_j_order
connectionKey = (min(NiClass, NjClass), hingeAtomClass, max(NiClass, NjClass))
connectionDict[connectionKey] = connectionDict.get(connectionKey, 0) + numConnections
if not connectionDict:
connectionDict = {'a': 1}
return _CalculateEntropies(connectionDict, atomTypeDict, numAtoms) | 0ca8119db47121dc22e0554e114e51ad916af455 | 22,117 |
def BOPTools_AlgoTools_CorrectRange(*args):
"""
* Correct shrunk range <aSR> taking into account 3D-curve resolution and corresp. tolerances' values of <aE1>, <aE2>
:param aE1:
:type aE1: TopoDS_Edge &
:param aE2:
:type aE2: TopoDS_Edge &
:param aSR:
:type aSR: IntTools_Range &
:param aNewSR:
:type aNewSR: IntTools_Range &
:rtype: void
* Correct shrunk range <aSR> taking into account 3D-curve resolution and corresp. tolerances' values of <aE>, <aF>
:param aE:
:type aE: TopoDS_Edge &
:param aF:
:type aF: TopoDS_Face &
:param aSR:
:type aSR: IntTools_Range &
:param aNewSR:
:type aNewSR: IntTools_Range &
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools_CorrectRange(*args) | 491b1930a017940137aa2a59c630f995f0fc8366 | 22,118 |
from typing import cast
def OldValue(lval, mem, exec_opts):
# type: (lvalue_t, Mem, optview.Exec) -> value_t
"""
Used by s+='x' and (( i += 1 ))
TODO: We need a stricter and less ambiguous version for Oil.
Problem:
- why does lvalue have Indexed and Keyed, while sh_lhs_expr only has
IndexedName?
- should I have lvalue.Named and lvalue.Indexed only?
- and Indexed uses the index_t type?
- well that might be Str or Int
"""
assert isinstance(lval, lvalue_t), lval
# TODO: refactor lvalue_t to make this simpler
UP_lval = lval
with tagswitch(lval) as case:
if case(lvalue_e.Named): # (( i++ ))
lval = cast(lvalue__Named, UP_lval)
var_name = lval.name
elif case(lvalue_e.Indexed): # (( a[i]++ ))
lval = cast(lvalue__Indexed, UP_lval)
var_name = lval.name
elif case(lvalue_e.Keyed): # (( A['K']++ )) ? I think this works
lval = cast(lvalue__Keyed, UP_lval)
var_name = lval.name
else:
raise AssertionError()
val = _LookupVar(var_name, mem, exec_opts)
UP_val = val
with tagswitch(lval) as case:
if case(lvalue_e.Named):
return val
elif case(lvalue_e.Indexed):
lval = cast(lvalue__Indexed, UP_lval)
array_val = None # type: value__MaybeStrArray
with tagswitch(val) as case2:
if case2(value_e.Undef):
array_val = value.MaybeStrArray([])
elif case2(value_e.MaybeStrArray):
tmp = cast(value__MaybeStrArray, UP_val)
# mycpp rewrite: add tmp. cast() creates a new var in inner scope
array_val = tmp
else:
e_die("Can't use [] on value of type %s", ui.ValType(val))
s = word_eval.GetArrayItem(array_val.strs, lval.index)
if s is None:
val = value.Str('') # NOTE: Other logic is value.Undef()? 0?
else:
assert isinstance(s, str), s
val = value.Str(s)
elif case(lvalue_e.Keyed):
lval = cast(lvalue__Keyed, UP_lval)
assoc_val = None # type: value__AssocArray
with tagswitch(val) as case2:
if case2(value_e.Undef):
# This never happens, because undef[x]+= is assumed to
raise AssertionError()
elif case2(value_e.AssocArray):
tmp2 = cast(value__AssocArray, UP_val)
# mycpp rewrite: add tmp. cast() creates a new var in inner scope
assoc_val = tmp2
else:
e_die("Can't use [] on value of type %s", ui.ValType(val))
s = assoc_val.d.get(lval.key)
if s is None:
val = value.Str('')
else:
val = value.Str(s)
else:
raise AssertionError()
return val | e7205a56fbec502e1d9a0a48864142f0cd7a82e5 | 22,119 |
from typing import Tuple
def approx_min_k(operand: Array,
k: int,
reduction_dimension: int = -1,
recall_target: float = 0.95,
reduction_input_size_override: int = -1,
aggregate_to_topk: bool = True) -> Tuple[Array, Array]:
"""Returns min ``k`` values and their indices of the ``operand``.
Args:
operand : Array to search for min-k.
k : Specifies the number of min-k.
reduction_dimension: Integer dimension along which to search. Default: -1.
recall_target: Recall target for the approximation.
reduction_input_size_override : When set to a positive value, it overrides
the size determined by operands[reduction_dim] for evaluating the recall.
This option is useful when the given operand is only a subset of the
overall computation in SPMD or distributed pipelines, where the true input
size cannot be deferred by the operand shape.
aggregate_to_topk: When true, aggregates approximate results to top-k. When
false, returns the approximate results.
Returns:
Tuple[Array, Array] : Least k values and their indices of the inputs.
"""
if xc._version < 45:
aggregate_to_topk = True
return approx_top_k_p.bind(
operand,
k=k,
reduction_dimension=reduction_dimension,
recall_target=recall_target,
is_max_k=False,
reduction_input_size_override=reduction_input_size_override,
aggregate_to_topk=aggregate_to_topk) | e4716369b4371b27ccabaaa61d2157e513cf06ed | 22,120 |
def sitemap_host_xml():
"""Supplementary Sitemap XML for Host Pages"""
database_connection.reconnect()
hosts = ww_host.info.retrieve_all(database_connection)
sitemap = render_template("sitemaps/hosts.xml",
hosts=hosts)
return Response(sitemap, mimetype="text/xml") | 79b21d1465ef84c1cfaf192be0b2fa5bf07f1014 | 22,121 |
def WTC(df,N):
"""Within Topic Coherence Measure.
[Note]
It ignores a word which does not have trained word vector.
Parameters
----------
df : Word-Topic distribution K by V
where K is number of topics and V is number of words
N : Number of top N words
Returns
-------
total : WTC value of each topic (1 * K)
"""
df = df.iloc[:N,:]
total = []
for col in df.columns:
cos_val = 0
words = df[col].tolist()
for c in combinations(words,2):
# print(c)
try:
cos_val += 1-cosine(word2vec_model.get_vector(c[0]),
word2vec_model.get_vector(c[1]))
except:
pass
# print(c)
# print(cosine(word2glove[c[0]], word2glove[c[1]]))
print(col, cos_val)
total.append(cos_val)
return total | b5b50eef85f6e12c54c9ec16a2f7264aa057d344 | 22,122 |
def extract_static_override_features(
static_overrides):
"""Extract static feature override values.
Args:
static_overrides: A dataframe that contains the value for static overrides
to be passed to the GAM Encoders.
Returns:
A mapping from feature name to location and then to the override value.
This is a two-level dictionary of the format: {feature: {location: value}}
"""
static_overrides_features = dict()
for feature in set(static_overrides[constants.FEATURE_NAME_COLUMN]):
static_overrides_features[feature] = dict()
override_slice = static_overrides.loc[static_overrides[
constants.FEATURE_NAME_COLUMN] == feature]
for location in set(override_slice[constants.GEO_ID_COLUMN]):
override_sub_slice = override_slice.loc[override_slice[
constants.GEO_ID_COLUMN] == location]
static_overrides_features[feature][location] = override_sub_slice[
constants.FEATURE_MODIFIER_COLUMN].to_numpy()[0]
return static_overrides_features | 9425674eb2e0578ecbc926b249a6eb2f6afb37d0 | 22,123 |
def job_list_View(request):
"""
"""
job_list = Job.objects.filter()
paginator = Paginator(job_list, 10)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'page_obj': page_obj,
}
return render(request, 'jobapp/job-list.html', context) | a2928ea255ff3cb044462fc4ebf7c530bd54b2fb | 22,124 |
import json
def edit_schedule(request):
"""Edit automatic updates schedule"""
if request.method == "POST":
schedule = models.UpdateSchedule.objects.get()
def fun(query):
return [int(x.strip()) for x in query.split(" ") if x.strip() != ""]
schedule.text = json.dumps({
models.YoutubeChannel.PRIORITY_LOW: fun(request.POST["low"]),
models.YoutubeChannel.PRIORITY_MEDIUM: fun(request.POST["medium"]),
models.YoutubeChannel.PRIORITY_HIGH: fun(request.POST["high"]),
})
schedule.save()
return redirect("notifpy:settings") | c87ef4ff0088bf4d67a1078ffe11b4c723272a8a | 22,125 |
import math
def infection_formula(name_model, infectious_number, classroom_volume, classroom_ach):
""" Calculate infection rate of with/without a mask by selected model. """
if name_model == "wells_riley":
# Use wells riley model.
effect_mask = 1.0 / ((1.0 - config.EXHALATION_FILTRATION_EFFICIENCY) * (1.0 - config.RESPIRATION_FILTRATION_EFFICIENCY))
infection_rate_w_mask = 1.0 - math.exp(-infectious_number * config.QUANTUM_GENERATION_RATE * config.PULMONARY_VENTILATIION_RATE * (config.LESSON_TIME / 60) / (classroom_volume * classroom_ach * effect_mask))
infection_rate_wo_mask = 1.0 - math.exp(-infectious_number * config.QUANTUM_GENERATION_RATE * config.PULMONARY_VENTILATIION_RATE * (config.LESSON_TIME / 60) / (classroom_volume * classroom_ach))
else:
# Future Work: Add infection models for calculate infection rate.
infection_rate_w_mask = 0.0
infection_rate_wo_mask = 0.0
return infection_rate_w_mask, infection_rate_wo_mask | 99b14a88c4ed02716626d8bc037b3f54211caa2a | 22,126 |
def RetryWithBackoff(opts, fn, args=None, kwargs=None):
"""`fn` function must follow the interface suggested:
* it should return tuple <status, err> where
status - backoff status
err - error that happend in function to propogate it to caller."""
args = args or ()
kwargs = kwargs or {}
update_opts(opts)
count = 0
backoff = opts['backoff']
while True:
count += 1
status, err_or_rv = fn(*args, **kwargs)
print status, err_or_rv
if status == RETRY_BREAK:
return err_or_rv
if status == RETRY_RESET:
backoff = opts['backoff']
count = wait = 0
if status == RETRY_CONTINUE:
if opts['max_attempts'] > 0 and count >= opts['max_attempts']:
raise RetryMaxAttemptsError(
opts['max_attempts'], reason=err_or_rv)
wait = (backoff + backoff * retry_jitter) * opts['constant_factor']
print "RETRIED IN ... %s" % wait
if backoff > opts['max_backoff']:
backoff = opts['max_backoff']
gevent.sleep(wait) | bf040a93015f3a283ac858c8738dc6bd8c48b2af | 22,127 |
def noaa_api_formatter(raw, metrics=None, country_aggr=False):
"""Format the output of the NOAA API to the task-geo Data Model.
Arguments:
raw(pandas.DataFrame):Data to be formatted.
metrics(list[str]): Optional.List of metrics requested,valid metric values are:
TMIN: Minimum temperature.
TMAX: Maximum temperature.
TAVG: Average of temperature.
SNOW: Snowfall (mm).
SNWD: Snow depth (mm).
PRCP: Precipitation
country_aggr(bool): When True, only an aggregate for each date/country will be returned.
Returns:
pandas.DataFrame
"""
if metrics is None:
metrics = [metric.lower() for metric in DEFAULT_METRICS if metric in raw.columns]
data = raw.copy()
data.columns = [column.lower() for column in data.columns]
column_order = [
'latitude', 'longitude', 'elevation', 'country', 'name',
'date', 'station']
column_order.extend(metrics)
data.date = pd.to_datetime(data.date)
for column in ['tmax', 'tavg', 'tmin']:
if column in data.columns:
data[column] = data[column].astype(float)
if 'snwd' in data.columns:
data['snwd'] = data['snwd'].astype(float) / 1000
data.snwd.fillna(0, inplace=True)
if 'prcp' in data.columns:
data['prcp'] = data['prcp'].astype(float) / 1000
data.prcp.fillna(0, inplace=True)
data['country'] = data.station.str.slice(0, 2).apply(fips_to_name)
data = data[column_order]
if country_aggr:
aggregations = {}
if 'tmin' in metrics:
aggregations['tmin'] = np.min
if 'tmax' in metrics:
aggregations['tmax'] = np.max
agg_columns = list(aggregations.keys())
return data.groupby(['country', 'date'])[agg_columns].aggregate(aggregations).reset_index()
return data | 155b9a0cee72f85d6f5329a5a68aca4aa1dfe1eb | 22,128 |
def crop_wav(wav, center, radius):
"""
Crop wav on [center - radius, center + radius + 1], and pad 0 for out of range indices.
:param wav: wav
:param center: crop center
:param radius: crop radius
:return: a slice whose length is radius*2 +1.
"""
left_border = center - radius
right_border = center + radius + 1
if left_border < 0:
zeros = np.zeros(-left_border)
cropped_wav = np.concatenate([zeros, wav[0: right_border]])
elif right_border > len(wav):
zeros = np.zeros(right_border - len(wav))
cropped_wav = np.concatenate([wav[left_border: len(wav)], zeros])
else:
cropped_wav = wav[left_border: right_border]
assert len(cropped_wav) == radius * 2 + 1
return cropped_wav | 69a5a078f06b083694d5d5eb5328b63c70a6f17c | 22,129 |
def markdown(text: str) -> str:
"""Helper function to escape markdown symbols"""
return MD_RE.sub(r'\\\1', text) | 2cb5fb3f5cac2d5cc5b6d256c4a8357832f3e53e | 22,130 |
def import_sample(sample_name, db):
"""Import sample"""
cur = db.cursor()
cur.execute('select sample_id from sample where sample_name=?',
(sample_name, ))
res = cur.fetchone()
if res is None:
cur.execute('insert into sample (sample_name) values (?)',
(sample_name, ))
sample_id = cur.lastrowid
else:
sample_id = res[0]
return sample_id | c477a4f036951cac88789b59f361cf9397a0e9ee | 22,131 |
import torch
def change_background_color_balck_digit(images, old_background, new_background, new_background2=None, p=1):
"""
:param images: BCHW
:return:
"""
if new_background2 is None:
assert old_background == [0]
if not torch.is_tensor(new_background):
new_background = torch.tensor(new_background, dtype=images.dtype)
if images.max() <= 1 and new_background.max() > 1:
new_background /= 255
if images.size(1) == 1 and len(new_background) == 3:
images = images.expand(-1, 3, -1, -1)
else:
assert images.size(1) == len(new_background)
# raise NotImplementedError(images.size(), new_background)
images = images.clone()
new_background = new_background.view(-1, 1, 1)
n=images.size(0)
ch=images.size(1)
if (images.view(n,ch,-1).sum(2)==0).sum(1).sum()>n:
#when input is already colored (digit or background)
non_zero_ch_idx=torch.nonzero(images[0].view(ch,-1).sum(1)).squeeze() #torch.nonzero(images[0].view(n,ch,-1).sum(2))
non_zero_chnls = images[:,non_zero_ch_idx]
if len(non_zero_chnls.shape)==3:
non_zero_chnls=non_zero_chnls.unsqueeze(1)
else:
non_zero_chnls=non_zero_chnls[:,0].unsqueeze(1)
if torch.sum(non_zero_chnls.view(n,-1)==0)>torch.sum(non_zero_chnls.view(n,-1)==1):
#digit was previously colored
bg_ratio = images.max() - non_zero_chnls
bg = bg_ratio * new_background
return images + bg
else:
#background is previously colored
bg = (non_zero_chnls.expand(-1, 3, -1, -1)*new_background)
images*=images.max()-new_background
return images+bg
else:
#when input is greyscale
bg_ratio = images.max() - images
bg = bg_ratio * new_background
# imgs = images + bg
# print(images[:, 0, :, :].std().item(),images[:, 1, :, :].std().item(),images[:, 2, :, :].std().item())
# print(imgs[:, 0, :, :].std().item(), imgs[:, 1, :, :].std().item(), imgs[:, 2, :, :].std().item())
return bg #imgs
else:
assert old_background == [0]
if not torch.is_tensor(new_background):
new_background = torch.tensor(new_background, dtype=images.dtype)
if images.max() <= 1 and new_background.max() > 1:
new_background /= 255
if not torch.is_tensor(new_background2):
new_background2 = torch.tensor(new_background2, dtype=images.dtype)
if images.max() <= 1 and new_background2.max() > 1:
new_background2 /= 255
if images.size(1) == 1 and len(new_background) == 3:
images = images.expand(-1, 3, -1, -1)
else:
assert images.size(1) == len(new_background)
# raise NotImplementedError(images.size(), new_background)
images = images.clone()
new_background = new_background.view(-1, 1, 1)
new_background2 = new_background2.view(-1, 1, 1)
n=images.size(0)
ch=images.size(1)
if (images.view(n,ch,-1).sum(2)==0).sum(1).sum()>n:
raise NotImplementedError
#when input is already colored (digit or background)
non_zero_ch_idx=torch.nonzero(images[0].view(ch,-1).sum(1)).squeeze() #torch.nonzero(images[0].view(n,ch,-1).sum(2))
non_zero_chnls = images[:,non_zero_ch_idx]
if len(non_zero_chnls.shape)==3:
non_zero_chnls=non_zero_chnls.unsqueeze(1)
else:
non_zero_chnls=non_zero_chnls[:,0].unsqueeze(1)
if torch.sum(non_zero_chnls.view(n,-1)==0)>torch.sum(non_zero_chnls.view(n,-1)==1):
#digit was previously colored
bg_ratio = images.max() - non_zero_chnls
bg = bg_ratio * new_background
return images + bg
else:
#background is previously colored
bg = (non_zero_chnls.expand(-1, 3, -1, -1)*new_background)
images*=images.max()-new_background
return images+bg
else:
#when input is greyscale
bg_ratio = images.max() - images
idxs = torch.randperm(len(bg_ratio))
n_imgs=int(p*len(bg_ratio))
bg_ratio[idxs[:n_imgs]] *= new_background2
bg_ratio[idxs[n_imgs:]] *= new_background
# imgs = images + bg
# print(images[:, 0, :, :].std().item(),images[:, 1, :, :].std().item(),images[:, 2, :, :].std().item())
# print(imgs[:, 0, :, :].std().item(), imgs[:, 1, :, :].std().item(), imgs[:, 2, :, :].std().item())
return bg_ratio | f17f627616c75f3673d6ed043f0f36751ccde2a1 | 22,132 |
def render_sprites(sprites, scales, offsets, backgrounds, name="render_sprites"):
""" Render a scene composed of sprites on top of a background.
An scene is composed by scaling the sprites by `scales` and offseting them by offsets
(using spatial transformers), and merging the sprites and background together using per-sprite
alpha and importance channels.
Sprites are organized into a series of `flights`. Each flight can use a different shape for the sprite maps,
and there can be a different number of sprites in each flight.
The coordinate system for scales and offsets has (0, 0) at the image top-left and (1, 1) at the image bottom-right.
A sprite with scale (1, 1) and offset (0, 0) would occupy the whole output image.
Uses bilinear interpolation for the spatial transformer sections.
Args:
sprites: List of tensors of length `n_flights`, each of shape
(batch_size, sprite_height_i, sprite_width_i, n_channels+2)
The sprite maps in flight i are assumed to have shape (sprite_height_i, sprite_width_i).
The final two channels are the alpha and importance channels.
scales: Tensor of shape `[batch_size, n_sprites, 2]`
Amount to scale sprites by. Order is y, x. A value of 1 will have the sprite occupy the whole output image.
offsets: Tensor of shape `[batch_size, n_sprites, 2]`
Location of top-left corner of each sprite. Order is y, x.
backgrounds: Tensor of shape `[batch_size, output_height, output_width, n_channels]`
The background for each image.
name: Optional name of the op.
Returns:
Tensor giving the stitched images. Shape is
`(batch_size, output_height, output_width, n_channels)`, same as `backgrounds`.
Raises:
ImportError: if the wrapper generated during compilation is not present when
the function is called.
"""
with ops.name_scope(name, "render_sprites", [sprites, scales, offsets, backgrounds]):
sprites_tensor_list = [
ops.convert_to_tensor(s, name="sprites_flight_{}".format(i))
for i, s in enumerate(sprites)]
scales_tensor_list = [
ops.convert_to_tensor(s, name="scales_flight_{}".format(i))
for i, s in enumerate(scales)]
offsets_tensor_list = [
ops.convert_to_tensor(s, name="offsets_flight_{}".format(i))
for i, s in enumerate(offsets)]
backgrounds_tensor = ops.convert_to_tensor(backgrounds, name="backgrounds")
lib = render_sprites_so()
output = lib.render_sprites(
sprites_tensor_list, scales_tensor_list, offsets_tensor_list, backgrounds_tensor)
return output | c4210a3b1f123368c77d89fcf15634ead3d97c85 | 22,133 |
import ctypes
def load_shared_library(dll_path, lib_dir):
"""
Return the loaded shared library object from the dll_path and adding `lib_dir` to the path.
"""
# add lib path to the front of the PATH env var
update_path_environment(lib_dir)
if not exists(dll_path):
raise ImportError('Shared library does not exists: %(dll_path)r' % locals())
if not isinstance(dll_path, bytes):
# ensure that the path is not Unicode...
dll_path = fsencode(dll_path)
lib = ctypes.CDLL(dll_path)
if lib and lib._name:
return lib
raise ImportError('Failed to load shared library with ctypes: %(dll_path)r and lib_dir: %(lib_dir)r' % locals()) | 983b6b42b25e5f7936117579b02babff30899d21 | 22,134 |
def preprocess_img(image):
"""Preprocess the image to adapt it to network requirements
Args:
Image we want to input the network (W,H,3) numpy array
Returns:
Image ready to input the network (1,W,H,3)
"""
# BGR to RGB
in_ = image[:, :, ::-1]
# image centralization
# They are the mean color values of BSDS500 dataset
in_ = np.subtract(in_, np.array((104.00699, 116.66877, 122.67892), dtype=np.float32))
# in_ = tf.subtract(tf.cast(in_, tf.float32), np.array((104.00699, 116.66877, 122.67892), dtype=np.float32))
# (W,H,3) to (1,W,H,3)
in_ = np.expand_dims(in_, axis=0)
return in_ | c4a0136c03aa57a54db432e9abe8e35cbe43f0b6 | 22,135 |
def _parse_ec_record(e_rec):
"""
This parses an ENSDF electron capture + b+ record
Parameters
----------
e_rec : re.MatchObject
regular expression MatchObject
Returns
-------
en : float
b+ endpoint energy in keV
en_err : float
error in b+ endpoint energy
ib : float
b+ branch intensity
dib : float
error in b+ branch intensity
ie : float
ec branch intensity
die : float
error in ec branch intensity
logft : float
logft of the decay
dft : float
error in logft
"""
en, en_err = _get_val_err(e_rec.group(2), e_rec.group(3))
ib, dib = _get_val_err(e_rec.group(4), e_rec.group(5))
ie, die = _get_val_err(e_rec.group(6), e_rec.group(7))
logft, dft = _get_val_err(e_rec.group(8), e_rec.group(9))
tti, dtti = _get_val_err(e_rec.group(10), e_rec.group(11))
return en, en_err, ib, dib, ie, die, logft, dft, tti, dtti | 00480d031a3e6b118d880ed5e2abb890e7e8b410 | 22,136 |
import datetime
def skpTime(time):
"""
Retorna un datetime con la hora en que la unidad genero la trama.
>>> time = '212753.00'
>>> datetime.time(int(time[0:2]), int(time[2:4]), int(time[4:6]), int(time[-2]))
datetime.time(21, 27, 53)
>>>
"""
return datetime.time(int(time[0:2]), int(time[2:4]), int(time[4:6]), int(time[-2]), tzinfo=timezone('UTC')) | 8bfa7e4d7faa52152c0a63944502cd6b1975ebdf | 22,137 |
def calc_max_moisture_set_point(bpr, tsd, t):
"""
(76) in ISO 52016-1:2017
Gabriel Happle, Feb. 2018
:param bpr: Building Properties
:type bpr: BuildingPropertiesRow
:param tsd: Time series data of building
:type tsd: dict
:param t: time step / hour of the year
:type t: int
:return: max moisture set point (kg/kg_dry_air)
:rtype: double
"""
# from bpr get set point for humidification
phi_int_set_dhu = bpr.comfort['RH_max_pc']
t_int = tsd['T_int'][t]
p_sat_int = calc_saturation_pressure(t_int)
x_set_max = 0.622 * (phi_int_set_dhu / 100 * p_sat_int) / (
P_ATM - phi_int_set_dhu / 100 * p_sat_int)
return x_set_max | 3fe2ab28b8f0ba3e6ba2139ed168f08e1b0e969d | 22,138 |
def compress_pub_key(pub_key: bytes) -> bytes:
"""Convert uncompressed to compressed public key."""
if pub_key[-1] & 1:
return b"\x03" + pub_key[1:33]
return b"\x02" + pub_key[1:33] | 05824112c6e28c36171c956910810fc1d133c865 | 22,139 |
def is_tensor(blob):
"""Whether the given blob is a tensor object."""
return isinstance(blob, TensorBase) | 514e9fea7b6fc60078ea46c61d25462096fa47cc | 22,140 |
def transform_dlinput(
tlist=None, make_tensor=True, flip_prob=0.5,
augment_stain_sigma1=0.5, augment_stain_sigma2=0.5):
"""Transform input image data for a DL model.
Parameters
----------
tlist: None or list. If testing mode, pass as None.
flip_prob
augment_stain_sigma1
augment_stain_sigma2
"""
tmap = {
'hflip': tvdt.RandomHorizontalFlip(prob=flip_prob),
'augment_stain': tvdt.RandomHEStain(
sigma1=augment_stain_sigma1, sigma2=augment_stain_sigma2),
}
tlist = [] if tlist is None else tlist
transforms = []
# go through various transforms
for tname in tlist:
transforms.append(tmap[tname])
# maybe convert to tensor
if make_tensor:
# transforms.append(tvdt.PILToTensor(float16=ISCUDA))
transforms.append(tvdt.PILToTensor(float16=False))
return tvdt.Compose(transforms) | 9f7bacb5a27667667432d3775ae624f4fd57e2c6 | 22,141 |
def _(text):
"""Normalize white space."""
return ' '.join(text.strip().split()) | f99f02a2fe84d3b214164e881d7891d4bfa0571d | 22,142 |
import ipdb
def mean_IOU_primitive_segment(matching, predicted_labels, labels, pred_prim, gt_prim):
"""
Primitive type IOU, this is calculated over the segment level.
First the predicted segments are matched with ground truth segments,
then IOU is calculated over these segments.
:param matching
:param pred_labels: N x 1, pred label id for segments
:param gt_labels: N x 1, gt label id for segments
:param pred_prim: K x 1, pred primitive type for each of the predicted segments
:param gt_prim: N x 1, gt primitive type for each point
"""
batch_size = labels.shape[0]
IOU = []
IOU_prim = []
for b in range(batch_size):
iou_b = []
iou_b_prim = []
iou_b_prims = []
len_labels = np.unique(predicted_labels[b]).shape[0]
rows, cols = matching[b]
count = 0
for r, c in zip(rows, cols):
pred_indices = predicted_labels[b] == r
gt_indices = labels[b] == c
# use only matched segments for evaluation
if (np.sum(gt_indices) == 0) or (np.sum(pred_indices) == 0):
continue
# also remove the gt labels that are very small in number
if np.sum(gt_indices) < 100:
continue
iou = np.sum(np.logical_and(pred_indices, gt_indices)) / (
np.sum(np.logical_or(pred_indices, gt_indices)) + 1e-8)
iou_b.append(iou)
# evaluation of primitive type prediction performance
gt_prim_type_k = gt_prim[b][gt_indices][0]
try:
predicted_prim_type_k = pred_prim[b][r]
except:
ipdb.set_trace()
iou_b_prim.append(gt_prim_type_k == predicted_prim_type_k)
iou_b_prims.append([gt_prim_type_k, predicted_prim_type_k])
# find the mean of IOU over this shape
IOU.append(np.mean(iou_b))
IOU_prim.append(np.mean(iou_b_prim))
return np.mean(IOU), np.mean(IOU_prim), iou_b_prims | cf405144206e824a868f4eb777635237e8cc59b8 | 22,143 |
import os
def get_tmp_directory_path():
"""Get the path to the tmp dir.
Creates the tmp dir if it doesn't already exists in this file's dir.
:return: str -- abs path to the tmp dir
"""
tmp_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'tmp')
if not os.path.exists(tmp_directory):
os.mkdir(tmp_directory)
return tmp_directory | b480578e6ae7a1840e8bf4acce36a63253a33d80 | 22,144 |
def _infer_title(ntbk, strip_title_header=True):
"""Infer a title from notebook metadata.
First looks in metadata['title'] and if nothing is found,
looks for whether the first line of the first cell is an H1
header. Optionally it strips this header from the notebook content.
"""
# First try the notebook metadata, if not found try the first line
title = ntbk.metadata.get('title')
# If the first line of the ontebook is H1 header, assume it's the title.
if title is None:
first_cell_lines = ntbk.cells[0].source.lstrip().split('\n')
if first_cell_lines[0].startswith('# '):
title = first_cell_lines.pop(0).strip('# ')
if strip_title_header is True:
ntbk.cells[0].source = '\n'.join(first_cell_lines)
return title | e8152f0c160d2cb7af66b1a20f4d95d4ea16c703 | 22,145 |
import hashlib
def stable_hash(value):
"""Return a stable hash."""
return int(hashlib.md5(str(value).encode('utf-8')).hexdigest(), 16) | a5be51a971eb6c9a91489155216ef194f9d0d7ba | 22,146 |
def minimal_community(community_owner):
"""Minimal community data as dict coming from the external world."""
return {
"id": "comm_id",
"access": {
"visibility": "public",
},
"metadata": {
"title": "Title",
"type": "topic"
}
} | 18b99c30d4dff01b988e8ac311a6da92142e71ee | 22,147 |
from typing import Counter
def retrieve_descriptions(gene, descriptions, empties):
"""Given single gene name, grab possible descriptions from NCBI
and prompt user to select one"""
# Perform ESearch and grab list of IDs
query = gene + '[Gene Name]'
handle = Entrez.esearch(db='gene', term=query,
retmax=100,
retmode='xml')
record = Entrez.read(handle)
handle.close()
idlist = ','.join(record["IdList"])
# Ensure you have results, exit if not
if idlist == '':
print('No records for {}, skipping...\n'.format(gene))
empties.append(gene)
return
# Generate summary from UID list
handle = Entrez.esummary(db='gene', id=idlist)
record = Entrez.read(handle)
handle.close()
# Grab description, counter for unique values
desc_cnt = Counter()
doc_sums = record[u'DocumentSummarySet'][u'DocumentSummary']
for i in range(len(doc_sums)):
if doc_sums[i][u'NomenclatureName'] != '':
desc = doc_sums[i][u'NomenclatureName']
else:
desc = doc_sums[i][u'OtherDesignations'].split('|')[0]
desc_cnt[desc] += 1
# Create list from counter keys for indexing purposes
desc_list = filter(None, desc_cnt)
if len(desc_cnt) > 1:
print('{} has {} unique descriptions from {} results. These are:'.format(
gene, len(desc_list), len(doc_sums)))
ans_range = range(len(desc_list))
for i in ans_range:
print ('{}: {} [{}/{}]'.format(i+1, desc_list[i], desc_cnt[desc_list[i]], len(doc_sums)))
# Take user input to accept/reject a description
while True:
ans = raw_input('Which do you accept? [{}-{}/N]: '.format(
min(ans_range)+1, max(ans_range)+1))
# Check if int or str entered
try:
ans = int(ans)-1
if ans in ans_range:
print('Accepting #{}.\n'.format(ans+1))
descriptions[gene] = desc_list[ans]
break
else:
print('{} is outside acceptable range. Try again.'.format(
ans))
except:
if ans in ['N', 'n', 'no', 'No']:
print('Skipping this gene.\n')
break
else:
print('Invalid input, try again.')
# If there's only one unique description, accept/reject
elif len(desc_cnt) == 1:
desc_list2 = list(desc_cnt)
desc = desc_list2[0]
if desc == '':
print('{} has empty description.'.format(gene))
empties.append(gene)
return
print('{} only has one unique description from {} results.'.format(
gene, len(doc_sums)))
print('This is:\n{}'.format(desc))
while True:
ans = raw_input('Accept? Y/N: ')
if ans in ['Y', 'y', 'yes', 'Yes']:
print('Description accepted.\n')
descriptions[gene] = desc
break
elif ans in ['N', 'n', 'no', 'No']:
print('Skipping this gene.\n')
empties.append(gene)
break
else:
print('Invalid input, try again.')
return(descriptions) | 524d4955a51eb0e3143c06d91eb7ae611579d9dd | 22,148 |
import time
def readCmd():
""" Parses out a single character contained in '<>'
i.e. '<1>' returns int(1)
returns the single character as an int, or
returns -1 if it fails"""
recvInProgress = False
timeout = time.time() + 10
while time.time() < timeout:
try:
rc = ser.read().decode("utf-8")
except(UnicodeDecodeError):
continue
if recvInProgress == True:
if rc != '>':
cmd = rc
else:
#while(ser.in_waiting != 0):
# ser.read()
try:
return int(cmd)
except:
print("Bad command parse")
return -1
elif rc == '<':
recvInProgress = True
print("Timeout on readCmd")
return -1 | 3e7b1eef27ab41b7079c966bf696e95923ebe6eb | 22,149 |
def map_ground_truth(bounding_boxes, anchor_boxes, threshold=0.5):
"""
Assign a ground truth object to every anchor box as described in SSD paper
:param bounding_boxes:
:param anchor_boxes:
:param threshold:
:return:
"""
# overlaps shape: (bounding_boxes, anchor_boxes)
overlaps = jaccard_overlap(bounding_boxes, anchor_boxes)
# best_bbox_overlaps and best_bbox_ids shape: (bounding_boxes)
# best_bbox_overlaps: IoU of overlap with the best anchor box for every ground truth box
# best_bbox_ids: indexes of anchor boxes
best_bbox_overlaps, best_bbox_ids = overlaps.max(1)
# overlaps and bbox_ids shape: (anchor_boxes)
# IoU and indexes of bounding boxes with the best overlap for every anchor box
overlaps, bbox_ids = overlaps.max(0)
# Combine the two:
# best_bbox_overlaps takes precedence
overlaps[best_bbox_ids] = 2
for bbox_id, anchor_id in enumerate(best_bbox_ids):
bbox_ids[anchor_id] = bbox_id
# Check for the threshold and return binary mask and bbox ids for each anchor
is_positive = overlaps > threshold
return is_positive, bbox_ids | 1609bac66f4132249e893996b07b1a8752b8ab48 | 22,150 |
import os
import time
def create_jumpbox(username, network, image_name='jumpBox-Ubuntu18.04.ova'):
"""Make a new jumpbox so a user can connect to their lab
:Returns: Dictionary
:param username: The user who wants to delete their jumpbox
:type username: String
:param network: The name of the network the jumpbox connects to
:type network: string
"""
with vCenter(host=const.INF_VCENTER_SERVER, user=const.INF_VCENTER_USER, \
password=const.INF_VCENTER_PASSWORD) as vcenter:
ova = Ova(os.path.join(const.VLAB_JUMPBOX_IMAGES_DIR, image_name))
try:
network_map = vim.OvfManager.NetworkMapping()
network_map.name = ova.networks[0]
try:
network_map.network = vcenter.networks[network]
except KeyError:
raise ValueError('No such network named {}'.format(network))
the_vm = virtual_machine.deploy_from_ova(vcenter, ova, [network_map],
username, 'jumpBox', logger)
finally:
ova.close()
_setup_jumpbox(vcenter, the_vm, username)
# VMTools will be ready long before the full network stack is up.
# Pause for a moment here so we can return an IP
time.sleep(70)
return virtual_machine.get_info(vcenter, the_vm) | 6eddd8668d049e6403895b01e35006272a05a905 | 22,151 |
def MakeFrame(ea, lvsize, frregs, argsize):
"""
Make function frame
@param ea: any address belonging to the function
@param lvsize: size of function local variables
@param frregs: size of saved registers
@param argsize: size of function arguments
@return: ID of function frame or -1
If the function did not have a frame, the frame
will be created. Otherwise the frame will be modified
"""
func = idaapi.get_func(ea)
if func is None:
return -1
frameid = idaapi.add_frame(func, lvsize, frregs, argsize)
if not frameid:
if not idaapi.set_frame_size(func, lvsize, frregs, argsize):
return -1
return func.frame | f0affe28d506a65d2a43fa64f2b9a99dbaf62b25 | 22,152 |
import logging
import time
def mip_solver(f, strides, arch, part_ratios, global_buf_idx, A, Z, compute_factor=10, util_factor=-1,
traffic_factor=1):
"""CoSA mixed integer programming(MIP) formulation."""
logging.info(f"LAYER {f}")
num_vars = len(A[0])
num_mems = len(Z[0])
m = Model("mip")
cost = []
constraints = []
org = ['spatial', 'temporal']
M = []
# ignore DRAM cap
for i in range(num_mems - 1):
mem_cap = arch.mem_entries[i]
mem_cap_arr = []
for j in range(num_vars):
var_mem_cap = mem_cap * part_ratios[i][j]
mem_cap_arr.append(var_mem_cap)
M.append(mem_cap_arr)
# log friendly M
M_log = []
for i, mem in enumerate(M):
M_v = []
for bound in mem:
if bound == 0:
# turn 0 to 1 for taking the log
bound = 1
M_v.append(bound)
M_log.append(M_v)
# spatial constraints
S = arch.S
# set the levels to be equal to the number of factors + 4 memory levels
perm_levels = 0
for j, f_j in enumerate(f):
perm_levels += len(f_j)
gb_start_level = global_buf_idx
total_levels = num_mems - 1 + perm_levels
logging.info(f"total {total_levels} levels")
x = {} # x_jn_jn
for i in range(total_levels):
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
for k in range(2):
name = "X({},{},{},{})".format(i, j, n, k)
x[(i, j, n, k)] = m.addVar(vtype=GRB.BINARY, name=name)
# sum for each sub factor spatial and temp must be less than 1
# NOT equals to one
spatial_temp_sum = 0
for k in range(2):
name = "X({},{},{},{})".format(i, j, n, k)
spatial_temp_sum += x[(i, j, n, k)]
m.addConstr(spatial_temp_sum <= 1, "spatial_temp_sum_{}_{}_{}".format(i, j, n))
# j, n is the loop level
# each mapper must have a mapping
i = 0
x_row_sums = []
x_col_sums = []
# for i in range(total_levels):
for i in range(gb_start_level, gb_start_level + perm_levels):
row_sum = 0
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
for k in range(2):
name = "X({},{},{},{})".format(i, j, n, k)
row_sum += x[(i, j, n, k)]
m.addConstr(row_sum <= 1, "row_sum_{}".format(i))
x_row_sums.append(row_sum)
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
col_sum = 0
for i in range(total_levels):
for k in range(2):
name = "X({},{},{},{})".format(i, j, n, k)
col_sum += x[(i, j, n, k)]
# assume perm can be interleaved in diff perm level
m.addConstr(col_sum == 1, "col_sum_{}_{}".format(j, n))
x_col_sums.append(col_sum)
# make sure v is one for all outer loop level, once a correlation exists
# add another relation var v - f, 3 - 7*n loop-level
s = {}
y = {}
for v in range(num_vars):
for i in range(gb_start_level, gb_start_level + perm_levels):
row_sum = 0
y[(v, i)] = m.addVar(lb=0, ub=1, vtype=GRB.INTEGER, name="y({},{})".format(v, i))
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
row_sum += x[(i, j, n, 1)] * A[j][v]
if i > gb_start_level:
m.addConstr(y[(v, i)] >= y[(v, i - 1)], "y_v_i_sv_{}_{}".format(v, i))
# can be ==
m.addConstr(y[(v, i)] >= row_sum, "y_v_i_row_sum_{}_{}".format(v, i))
else:
# can be ==
m.addConstr(y[(v, i)] == row_sum, "y_v_i_row_sum_{}_{}".format(v, i))
s[(v, i)] = row_sum
## exhausively list all scenarios where p or q is inside current mem
zz = {}
prefix = 0
for var in [2, 3]:
for mem_level in [3]:
zz[(var, mem_level)] = m.addVar(lb=0, ub=1, vtype=GRB.INTEGER,
name="zz({},{},{})".format(prefix, var, mem_level))
x_sums = 0
for n, prime_factor in enumerate(f[var]):
for inner_mem_level_i in range(mem_level + 1):
for k in range(2):
filter_in = x[(inner_mem_level_i, var, n, k)]
m.addConstr(zz[(var, mem_level)] >= filter_in,
"zz_x_sum_{}_{}_{}_{}_{}_{}".format(prefix, var, n, mem_level, inner_mem_level_i,
k))
x_sums += filter_in
m.addConstr(zz[(var, mem_level)] <= x_sums, "z_x_sum_{}_{}_{}".format(prefix, var, mem_level))
l = {}
for v in range(num_vars):
for i in range(gb_start_level, gb_start_level + perm_levels):
row_sum = 0
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
row_sum += np.log2(f[j][n]) * (x[(i, j, n, 1)])
l[(v, i)] = row_sum
# Add spatial constraints
spatial_tile = 0
for i in range(gb_start_level, gb_start_level + perm_levels):
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
spatial_tile += np.log2(f[j][n]) * x[(i, j, n, 0)]
m.addConstr(spatial_tile <= np.log2(S[gb_start_level]), "spatial_tile_gb_{}".format(prefix))
for i in range(gb_start_level):
spatial_tile = 0
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
spatial_tile += np.log2(f[j][n]) * x[(i, j, n, 0)]
m.addConstr(spatial_tile <= np.log2(S[i]), f"spatial_tile_{prefix}_{i}")
for i in range(gb_start_level + perm_levels, total_levels):
spatial_tile = 0
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
spatial_tile += np.log2(f[j][n]) * x[(i, j, n, 0)]
m.addConstr(spatial_tile <= np.log2(S[i - perm_levels + 1]), f"spatial_tile_{i - perm_levels + 1}")
# Add inner gb buffer constraints
buf_util = {}
for v in range(num_vars):
for i in range(num_mems):
buf_util[(i, v)] = 0
for v in range(num_vars):
for i_ in range(gb_start_level + perm_levels):
for i in range(num_mems):
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
factor = 1
if v == 1 and j == 2:
factor = strides[0]
if v == 1 and j == 3:
factor = strides[1]
if i_ > gb_start_level and i_ < gb_start_level + perm_levels:
Z_const = Z[v][i][gb_start_level]
else:
Z_const = Z[v][i][i_]
buf_util[(i, v)] += np.log2(factor * f[j][n]) * (x[(i_, j, n, 0)] + x[i_, j, n, 1]) * A[j][
v] * Z_const # use the i for the cur mem for relationship
# only add once
if i == 3 and j in [0, 1] and v == 1:
buf_util[(i, v)] += (x[(i_, j, n, 0)] + x[(i_, j, n, 1)]) * (1 - zz[(j + 2, i)]) * np.log2(
f[j][n])
buf_util[(i, v)] += (x[(i_, j, n, 0)] + x[(i_, j, n, 1)]) * zz[(j + 2, i)] * np.log2(2)
for v in range(num_vars):
# excluding DRAM
for i in range(num_mems - 1):
if M_log[i][v] > 0:
m.addConstr(buf_util[(i, v)] <= np.log2(M_log[i][v]), f"buffer_size_{i}_{v}")
# get compute cost
inner_gb_cycles = 0
for i in range(gb_start_level):
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
inner_gb_cycles += np.log2(f[j][n]) * (x[(i, j, n, 1)])
gb_cycles = 0
for i in range(gb_start_level, gb_start_level + perm_levels):
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
gb_cycles += np.log2(f[j][n]) * (x[(i, j, n, 1)])
dram_cycles = 0
for i in range(gb_start_level + perm_levels, total_levels):
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
dram_cycles += np.log2(f[j][n]) * (x[(i, j, n, 1)])
total_compute = inner_gb_cycles + gb_cycles + dram_cycles
gb_compute = inner_gb_cycles + gb_cycles
# get traffic cost
spatial_cost = {}
for v in range(num_vars):
size = 0
for i in range(gb_start_level, gb_start_level + perm_levels):
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
size += np.log2(f[j][n]) * (x[(i, j, n, 0)])
spatial_cost[v] = size
data_size = {}
for v in range(num_vars):
size = 0
for i in range(gb_start_level):
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
# TRICK prioritize spatial
factors = 0.8 + 0.04 * i
size += factors * np.log2(f[j][n]) * (x[(i, j, n, 0)] + x[i, j, n, 1]) * A[j][v]
data_size[v] = size
gb_traffic = {}
for v in range(num_vars):
size = 0
for i in range(gb_start_level, gb_start_level + perm_levels):
size += l[(v, i)] * y[(v, i)]
gb_traffic[v] = size
# use the last level gb y for DRAM
dram_traffic = {}
for v in range(num_vars):
corr = y[(v, gb_start_level + perm_levels - 1)]
i = gb_start_level + perm_levels # DRAM
size = 0
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
size += np.log2(f[j][n]) * (x[(i, j, n, 1)]) # * corr
dram_traffic[v] = size
total_util = 0
for i in range(gb_start_level):
# for each memory and each variable there is a constraint
for v in range(num_vars):
# make weight util more important since it directly comes from dram
factor = 1.01 if i == 2 else 1
total_util += buf_util[(i, v)] * factor
total_traffic = 0
for v in range(num_vars):
# TRICKS
if v == 0:
# encode dram latency for weights
factor = 1.01
else:
factor = 1
total_traffic += 0.99 * data_size[v] + 0.99 * spatial_cost[v] + gb_traffic[v] + dram_traffic[v] * factor
# ========================== user-defined objective function ========================== #
cosa_obj = total_util * util_factor + total_compute * compute_factor + total_traffic * traffic_factor
max_it = m.addVar(vtype=GRB.CONTINUOUS, name="max_it")
its = []
its.append(m.addVar(vtype=GRB.CONTINUOUS, name="a"))
m.addConstr(its[-1] == total_traffic, "total_traffic")
its.append(m.addVar(vtype=GRB.CONTINUOUS, name="b"))
m.addConstr(its[-1] == total_compute, "total_compute")
m.addConstr(max_it == max_(its), name="max_it_constr")
total_util_var = m.addVar(vtype=GRB.CONTINUOUS, name="total_util_var")
total_comp_var = m.addVar(vtype=GRB.CONTINUOUS, name="total_comp_var")
total_traf_var = m.addVar(vtype=GRB.CONTINUOUS, name="total_traf_var")
# cycle count = total max 3 * all log factors variables
m.addConstr(total_util_var == total_util, "total_util_constraint")
m.addConstr(total_comp_var == total_compute, "total_comp_constraint")
m.addConstr(total_traf_var == total_traffic, "total_traf_constraint")
m.ModelSense = GRB.MINIMIZE
m.setObjective(cosa_obj, GRB.MINIMIZE)
# optimize for the objective function
milp_time = 0
begin_time = time.time()
m.optimize()
end_time = time.time()
milp_runtime = end_time - begin_time
# output all constraints and variables
m.write("debug.lp")
result_dict = {}
for variable in m.getVars():
# logging.debug("Variable %s: Value %s" % (variable.varName, variable.x))
assert (variable.varName not in result_dict)
result_dict[variable.varName] = variable.x
logging.debug('Obj: %g' % m.objVal)
all_x = np.zeros((total_levels, perm_levels, 2))
for i in range(total_levels):
level_idx = 0
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
for k in range(2):
name = "X({},{},{},{})".format(i, j, n, k)
val = result_dict[name]
all_x[i, level_idx, k] = val
level_idx += 1
np.set_printoptions(precision=0, suppress=True)
var_outer_perm_config = [-1] * perm_levels
outer_perm_config = [-1] * perm_levels
x_arr = np.zeros((perm_levels, perm_levels, 2))
for i in range(gb_start_level, gb_start_level + perm_levels):
level_idx = 0
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
for k in range(2):
name = "X({},{},{},{})".format(i, j, n, k)
val = result_dict[name]
x_arr[i - gb_start_level, level_idx, k] = val
name = "X({},{},{},{})".format(i, j, n, 1)
val = result_dict[name]
if val == 1:
var_outer_perm_config[i - gb_start_level] = j
level_idx += 1
logging.info(f'var_outer_perm_config: {var_outer_perm_config}')
y_arr = np.zeros((num_vars, perm_levels))
for v in range(num_vars):
for i in range(gb_start_level, gb_start_level + perm_levels):
row_sum = 0
val = result_dict["y({},{})".format(v, i)]
y_arr[v, i - gb_start_level] = val
# Merge the permutation, taking the first appearance of a prob to be the
merge_outer_perm_config = []
for i, var in enumerate(var_outer_perm_config):
if var != -1 and var not in merge_outer_perm_config:
merge_outer_perm_config.append(var)
for i in range(len(f)):
if i not in merge_outer_perm_config:
merge_outer_perm_config.append(i)
logging.info("var idx as the value {}".format(var_outer_perm_config))
logging.info("merged var idx as the value {}".format(merge_outer_perm_config))
outer_perm_config = [1] * len(f)
for i, var in enumerate(merge_outer_perm_config):
outer_perm_config[var] = i
logging.info("ordering idx as the value {}".format(outer_perm_config))
# init factor_config
# DRAM is the last level
factor_config = []
spatial_config = []
dram_level = -1
for j, f_j in enumerate(f):
sub_factor_config = []
sub_spatial_config = []
for n, f_jn in enumerate(f_j):
sub_factor_config.append(dram_level)
sub_spatial_config.append(0)
factor_config.append(sub_factor_config)
spatial_config.append(sub_spatial_config)
for i in range(gb_start_level):
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
if f[j][n] == 1:
factor_config[j][n] = num_mems - 1
spatial_config[j][n] = 0
continue
for k in range(2):
name = "X({},{},{},{})".format(i, j, n, k)
val = result_dict[name]
if val >= 0.9:
factor_config[j][n] = i
if k == 0:
spatial_config[j][n] = 1
for i in range(gb_start_level + perm_levels, total_levels):
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
if f[j][n] == 1:
factor_config[j][n] = num_mems - 1
spatial_config[j][n] = 0
continue
for k in range(2):
name = "X({},{},{},{})".format(i, j, n, k)
val = result_dict[name]
if val >= 0.9:
if k == 0:
raise ValueError('Invalid Mapping')
factor_config[j][n] = i - perm_levels + 1
# set to -1 for not specified
for j, f_j in enumerate(f):
for n, f_jn in enumerate(f_j):
for i in range(gb_start_level, gb_start_level + perm_levels):
for k in range(2):
name = "X({},{},{},{})".format(i, j, n, k)
val = result_dict[name]
if val >= 0.9:
factor_config[j][n] = gb_start_level
if k == 0:
spatial_config[j][n] = 1
logging.info(f"prime factors: {f}")
logging.info(f"factor configs: {factor_config}")
logging.info(f"spatial configs: {spatial_config}")
return (factor_config, spatial_config, outer_perm_config, milp_runtime) | 5367ac49f2b1724aa1c78ea07ff09a880456cdae | 22,153 |
def get_image_ids(idol_id):
"""Returns all image ids an idol has."""
c.execute("SELECT id FROM groupmembers.imagelinks WHERE memberid=%s", (idol_id,))
all_ids = {'ids': [current_id[0] for current_id in c.fetchall()]}
return all_ids | 036dfdc9d757b2a7c70b26653252bbb5e180a5f1 | 22,154 |
def sortarai(datablock, s, Zdiff, **kwargs):
"""
sorts data block in to first_Z, first_I, etc.
Parameters
_________
datablock : Pandas DataFrame with Thellier-Tellier type data
s : specimen name
Zdiff : if True, take difference in Z values instead of vector difference
NB: this should always be False
**kwargs :
version : data model. if not 3, assume data model = 2.5
Returns
_______
araiblock : [first_Z, first_I, ptrm_check,
ptrm_tail, zptrm_check, GammaChecks]
field : lab field (in tesla)
"""
if 'version' in list(kwargs.keys()) and kwargs['version'] == 3:
dec_key, inc_key = 'dir_dec', 'dir_inc'
Mkeys = ['magn_moment', 'magn_volume', 'magn_mass', 'magnitude']
meth_key = 'method_codes'
temp_key, dc_key = 'treat_temp', 'treat_dc_field'
dc_theta_key, dc_phi_key = 'treat_dc_field_theta', 'treat_dc_field_phi'
# convert dataframe to list of dictionaries
datablock = datablock.to_dict('records')
else:
dec_key, inc_key = 'measurement_dec', 'measurement_inc'
Mkeys = ['measurement_magn_moment', 'measurement_magn_volume',
'measurement_magn_mass', 'measurement_magnitude']
meth_key = 'magic_method_codes'
temp_key, dc_key = 'treatment_temp', 'treatment_dc_field'
dc_theta_key, dc_phi_key = 'treatment_dc_field_theta', 'treatment_dc_field_phi'
first_Z, first_I, zptrm_check, ptrm_check, ptrm_tail = [], [], [], [], []
field, phi, theta = "", "", ""
starthere = 0
Treat_I, Treat_Z, Treat_PZ, Treat_PI, Treat_M = [], [], [], [], []
ISteps, ZSteps, PISteps, PZSteps, MSteps = [], [], [], [], []
GammaChecks = [] # comparison of pTRM direction acquired and lab field
rec = datablock[0]
for key in Mkeys:
if key in list(rec.keys()) and rec[key] != "":
momkey = key
break
# first find all the steps
for k in range(len(datablock)):
rec = datablock[k]
temp = float(rec[temp_key])
methcodes = []
tmp = rec[meth_key].split(":")
for meth in tmp:
methcodes.append(meth.strip())
if 'LT-T-I' in methcodes and 'LP-TRM' not in methcodes and 'LP-PI-TRM' in methcodes:
Treat_I.append(temp)
ISteps.append(k)
if field == "":
field = float(rec[dc_key])
if phi == "":
phi = float(rec[dc_phi_key])
theta = float(rec[dc_theta_key])
# stick first zero field stuff into first_Z
if 'LT-NO' in methcodes:
Treat_Z.append(temp)
ZSteps.append(k)
if 'LT-T-Z' in methcodes:
Treat_Z.append(temp)
ZSteps.append(k)
if 'LT-PTRM-Z' in methcodes:
Treat_PZ.append(temp)
PZSteps.append(k)
if 'LT-PTRM-I' in methcodes:
Treat_PI.append(temp)
PISteps.append(k)
if 'LT-PTRM-MD' in methcodes:
Treat_M.append(temp)
MSteps.append(k)
if 'LT-NO' in methcodes:
dec = float(rec[dec_key])
inc = float(rec[inc_key])
st = float(rec[momkey])
first_I.append([273, 0., 0., 0., 1])
first_Z.append([273, dec, inc, st, 1]) # NRM step
for temp in Treat_I: # look through infield steps and find matching Z step
if temp in Treat_Z: # found a match
istep = ISteps[Treat_I.index(temp)]
irec = datablock[istep]
methcodes = []
tmp = irec[meth_key].split(":")
for meth in tmp:
methcodes.append(meth.strip())
# take last record as baseline to subtract
brec = datablock[istep - 1]
zstep = ZSteps[Treat_Z.index(temp)]
zrec = datablock[zstep]
# sort out first_Z records
if "LP-PI-TRM-IZ" in methcodes:
ZI = 0
else:
ZI = 1
dec = float(zrec[dec_key])
inc = float(zrec[inc_key])
st = float(zrec[momkey])
first_Z.append([temp, dec, inc, st, ZI])
# sort out first_I records
try:
idec = float(irec[dec_key])
iinc = float(irec[inc_key])
istr = float(irec[momkey])
except TypeError as ex:
raise Exception('Malformed data of some sort for dec/inc/moment in measurement: {}. You must fix this before proceeding.\n Bad record: {}'.format(irec.get('measurement', ''), irec))
X = dir2cart([idec, iinc, istr])
BL = dir2cart([dec, inc, st])
I = []
for c in range(3):
I.append((X[c] - BL[c]))
if I[2] != 0:
iDir = cart2dir(I)
if Zdiff == 0:
first_I.append([temp, iDir[0], iDir[1], iDir[2], ZI])
else:
first_I.append([temp, 0., 0., I[2], ZI])
gamma = angle([iDir[0], iDir[1]], [phi, theta])
else:
first_I.append([temp, 0., 0., 0., ZI])
gamma = 0.0
# put in Gamma check (infield trm versus lab field)
if 180. - gamma < gamma:
gamma = 180. - gamma
GammaChecks.append([temp - 273., gamma])
for temp in Treat_PI: # look through infield steps and find matching Z step
step = PISteps[Treat_PI.index(temp)]
rec = datablock[step]
dec = float(rec[dec_key])
inc = float(rec[inc_key])
st = float(rec[momkey])
brec = datablock[step - 1] # take last record as baseline to subtract
pdec = float(brec[dec_key])
pinc = float(brec[inc_key])
pint = float(brec[momkey])
X = dir2cart([dec, inc, st])
prevX = dir2cart([pdec, pinc, pint])
I = []
for c in range(3):
I.append(X[c] - prevX[c])
dir1 = cart2dir(I)
if Zdiff == 0:
ptrm_check.append([temp, dir1[0], dir1[1], dir1[2]])
else:
ptrm_check.append([temp, 0., 0., I[2]])
# in case there are zero-field pTRM checks (not the SIO way)
for temp in Treat_PZ:
step = PZSteps[Treat_PZ.index(temp)]
rec = datablock[step]
dec = float(rec[dec_key])
inc = float(rec[inc_key])
st = float(rec[momkey])
brec = datablock[step - 1]
pdec = float(brec[dec_key])
pinc = float(brec[inc_key])
pint = float(brec[momkey])
X = dir2cart([dec, inc, st])
prevX = dir2cart([pdec, pinc, pint])
I = []
for c in range(3):
I.append(X[c] - prevX[c])
dir2 = cart2dir(I)
zptrm_check.append([temp, dir2[0], dir2[1], dir2[2]])
# get pTRM tail checks together -
for temp in Treat_M:
# tail check step - just do a difference in magnitude!
step = MSteps[Treat_M.index(temp)]
rec = datablock[step]
st = float(rec[momkey])
if temp in Treat_Z:
step = ZSteps[Treat_Z.index(temp)]
brec = datablock[step]
pint = float(brec[momkey])
# X=dir2cart([dec,inc,st])
# prevX=dir2cart([pdec,pinc,pint])
# I=[]
# for c in range(3):I.append(X[c]-prevX[c])
# d=cart2dir(I)
# ptrm_tail.append([temp,d[0],d[1],d[2]])
# difference - if negative, negative tail!
ptrm_tail.append([temp, 0, 0, st - pint])
else:
print(
s, ' has a tail check with no first zero field step - check input file! for step', temp - 273.)
#
# final check
#
if len(first_Z) != len(first_I):
print(len(first_Z), len(first_I))
print(" Something wrong with this specimen! Better fix it or delete it ")
input(" press return to acknowledge message")
araiblock = (first_Z, first_I, ptrm_check,
ptrm_tail, zptrm_check, GammaChecks)
return araiblock, field | 5127b293b63a0d67da9e0ec21ff3085beea5a836 | 22,155 |
import sys
def GetRemoteBuildPath(build_revision, target_platform='chromium',
target_arch='ia32', patch_sha=None):
"""Compute the url to download the build from."""
def GetGSRootFolderName(target_platform):
"""Gets Google Cloud Storage root folder names"""
if IsWindowsHost():
if Is64BitWindows() and target_arch == 'x64':
return 'Win x64 Builder'
return 'Win Builder'
if IsLinuxHost():
if target_platform == 'android':
return 'android_perf_rel'
return 'Linux Builder'
if IsMacHost():
return 'Mac Builder'
raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
base_filename = GetZipFileName(
build_revision, target_arch, patch_sha)
builder_folder = GetGSRootFolderName(target_platform)
return '%s/%s' % (builder_folder, base_filename) | d2cfcd671c9dfd6a94857cea70d3bc4ed6ac0f96 | 22,156 |
import json
def _extract_then_dump(hex_string: str) -> str:
"""Extract compressed content json serialized list of paragraphs."""
return json.dumps(
universal_extract_paragraphs(
unpack(bytes.fromhex(hex_string))
)
) | 6be6f045ff86580e5d33239e8b8232f5c343723b | 22,157 |
import base64
import hmac
import hashlib
def sso_redirect_url(nonce, secret, email, external_id, username, name, avatar_url, is_admin , **kwargs):
"""
nonce: returned by sso_validate()
secret: the secret key you entered into Discourse sso secret
user_email: email address of the user who logged in
user_id: the internal id of the logged in user
user_username: username of the logged in user
return value: URL to redirect users back to discourse, now logged in as user_username
"""
kwargs.update({
'nonce': nonce,
'email': email,
'external_id': external_id,
'username': username,
'name' : name,
'avatar_url' : avatar_url,
'avatar_force_update' : 'true',
'admin':is_admin
})
uencode = urlencode(kwargs)
return_payload = base64.encodestring(uencode.encode())
h = hmac.new(secret.encode(), return_payload, digestmod=hashlib.sha256)
query_string = urlencode({'sso': return_payload, 'sig': h.hexdigest()})
return '/session/sso_login?%s' % query_string | 7aefb1c67a23f0c1311ce4297760a13ea39769fe | 22,158 |
def normalized_cluster_entropy(cluster_labels, n_clusters=None):
""" Cluster entropy normalized by the log of the number of clusters.
Args:
cluster_labels (list/np.ndarray): Cluster labels
Returns:
float: Shannon entropy / log(n_clusters)
"""
if n_clusters is None:
n_clusters = len(np.unique(cluster_labels))
counts = np.unique(cluster_labels, return_counts=True)[1]
return entropy(counts) / np.log(n_clusters) | aaed21c7cd91e1b61eb8a8336978eded8673960e | 22,159 |
def ingest_data(data, schema=None, date_format=None, field_aliases=None):
"""
data: Array of Dictionary objects
schema: PyArrow schema object or list of column names
date_format: Pandas datetime format string (with schema only)
field_aliases: dict mapping Json field names to desired schema names
return: a PyArrow Batch
"""
if isinstance(schema, list) and isinstance(field_aliases, dict):
return _convert_data_with_column_names_dict(data, field_aliases)
elif isinstance(schema, dict):
return _convert_data_with_column_names_dict(data, schema)
elif isinstance(schema, list):
return _convert_data_with_column_names(data, schema)
elif isinstance(schema, pa.Schema):
return _convert_data_with_schema(data, schema, date_format=date_format, field_aliases=field_aliases)
else:
return _convert_data_without_schema(data) | 94ac4c13a71275485a79404f20c702886b39fb1c | 22,160 |
def build_messages(missing_scene_paths, update_stac):
""" """
message_list = []
error_list = []
for path in missing_scene_paths:
landsat_product_id = str(path.strip("/").split("/")[-1])
if not landsat_product_id:
error_list.append(
f"It was not possible to build product ID from path {path}"
)
message_list.append(
{
"Message": {
"landsat_product_id": landsat_product_id,
"s3_location": str(path),
"update_stac": update_stac,
}
}
)
return {"message_list": message_list, "failed": error_list} | 07865a38fad6e3f642d3e55b80fac734dbb7d94b | 22,161 |
def DecrementPatchNumber(version_num, num):
"""Helper function for `GetLatestVersionURI`.
DecrementPatchNumber('68.0.3440.70', 6) => '68.0.3440.64'
Args:
version_num(string): version number to be decremented
num(int): the amount that the patch number need to be reduced
Returns:
string: decremented version number
"""
version_num_list = version_num.split('.')
version_num_list[-1] = str(int(version_num_list[-1]) - num)
assert int(version_num_list[-1]) >= 0, 'patch number cannot be negative'
return '.'.join(version_num_list) | e42585791063d7982675065be7480ae7b5ea637d | 22,162 |
def hi_means(steps, edges):
"""This applies kmeans in a hierarchical fashion.
:param edges:
:param steps:
:returns: a tuple of two arrays, ´´kmeans_history´´ containing a number of
arrays of varying lengths and ´´labels_history´´, an array of length equal
to edges.shape[0]
"""
sub_edges = edges
kmeans_history = []
labels_history = []
for _ in xrange(steps):
kmeans = nkm.kmeans(sub_edges.shape[0] / 2, sub_edges)
sub_edges = kmeans[0]
kmeans_history += [kmeans[0]]
labels_history += [kmeans[1]]
kmeans_history = np.array(kmeans_history)
labels_history = np.array(labels_history)
return kmeans_history, labels_history | 60d242f1ed9a4009bac706053e56f0d450ca7a7a | 22,163 |
def tag_item(tag_name, link_flag=False):
"""
Returns Items tagged with tag_name
ie. tag-name: django will return items tagged django.
"""
print C3 % ("\n_TAGGED RESULTS_")
PAYLOAD["tag"] = tag_name
res = requests.post(
GET_URL, data=json.dumps(PAYLOAD), headers=HEADERS, verify=False)
if res.json()['status'] == 2:
print C3 % ("Invalid tag: Tag not found!")
exit()
return render(res.json()['list'], link_flag=link_flag) | 038b6a81dec1ea7c6fb9c4d83eaa6425d950c2fd | 22,164 |
def movie_info(tmdb_id):
"""Renders salient movie data from external API."""
# Get movie info TMDB database.
print("Fetching movie info based on tmdb id...")
result = TmdbMovie.get_movie_info_by_id(tmdb_id)
# TMDB request failed.
if not result['success']:
print("Error!")
# Can't find movie referenced by id.
if result['status_code'] == 404:
abort(404)
else:
# Some other error, e.g. 429: too many request.
err_message = f"TMDB API query failed; HTTP response = {result['status_code']}"
return render_template("errors/misc-error.html",
err_message=err_message)
# Collect movie object.
movie = result['movie']
# To check a user's personal movie list, user must be logged in.
# Also, limiting the fetching of NYT movie reviews to authenticated users.
# This will speed up display of movie info for anonymous users as NYT review
# fetching requires time delays between API requests.
# See whether movie is already on user's list.
on_user_list, film_list_item_id = False, None
# Get search-engine queries for movie.
search_engines = {
'Google': movie.get_query('google'),
'DuckDuckGo': movie.get_query('duckduckgo')
}
if current_user.is_authenticated:
# CHECK PERSONAL MOVIE LIST!!!
print(f"Checking whether '{movie.title}' on user list...")
film = FilmListItem.query.filter_by(tmdb_id=tmdb_id,
user_id=current_user.id).first()
if film:
on_user_list = True
film_list_item_id = film.id
# on_user_list = True if film else False
print(f"On user list? {on_user_list}, id: {film_list_item_id}")
return render_template("movie.html",
movie=movie,
on_user_list=on_user_list,
search_engines=search_engines) | ee7188eddcc50d0114ae5b80bc753e803632d557 | 22,165 |
def diabetic(y, t, ui, dhat):
"""
Expanded Bergman Minimal model to include meals and insulin
Parameters for an insulin dependent type-I diabetic
States (6):
In non-diabetic patients, the body maintains the blood glucose
level at a range between about 3.6 and 5.8 mmol/L (64.8 and
104.4 mg/dL with 1:18 conversion between mmol/L and mg/dL)
:param y: input state
:param t: time step
:param ui: Insulin infusion rate (mU/min)
:param dhat: Meal disturbance (mmol/L-min)
:return: change in states
"""
g = y[0] # blood glucose (mg/dL)
x = y[1] # remote insulin (micro-u/ml)
i = y[2] # plasma insulin (micro-u/ml)
q1 = y[3] # S1
q2 = y[4] # S2
g_gut = y[5] # gut blood glucose (mg/dl)
# Parameters:
gb = 291.0 # (mg/dL) Basal Blood Glucose
p1 = 3.17e-2 # 1/min
p2 = 1.23e-2 # 1/min
si = 2.9e-2 # 1/min * (mL/micro-U)
ke = 9.0e-2 # 1/min Insulin elimination from plasma
kabs = 1.2e-2 # 1/min t max,G inverse
kemp = 1.8e-1 # 1/min t max,I inverse
f = 8.00e-1 # L
vi = 12.0 # L Insulin distribution volume
vg = 12.0 # L Glucose distibution volume
# Compute ydot:
dydt = np.empty(6)
dydt[0] = -p1 * (g - gb) - si * x * g + f * kabs / vg * g_gut + f / vg * dhat # (1)
dydt[1] = p2 * (i - x) # remote insulin compartment dynamics (2)
dydt[2] = -ke * i + ui # plasma insulin concentration (3)
dydt[3] = ui - kemp * q1 # two-part insulin absorption model dS1/dt
dydt[4] = -kemp * (q2 - q1) # two-part insulin absorption model dS2/dt
dydt[5] = kemp * q2 - kabs * g_gut
# convert from minutes to hours
dydt = dydt * 60
return dydt | 35949f9a3d6010e89346ebb7f2818230ca6148a0 | 22,166 |
def get_spacy_sentences(doc_text):
"""
Split given document into its sentences
:param doc_text: Text to tokenize
:return: list of spacy sentences
"""
doc = _get_spacy_nlp()(doc_text)
return list(doc.sents) | 30345e04add02fd74e8470ce667fee60ddc7140d | 22,167 |
def get_recommendations(commands_fields, app_pending_changes):
"""
:param commands_fields:
:param app_pending_changes:
:return: List of object describing command to run
>>> cmd_fields = [
... ['cmd1', ['f1', 'f2']],
... ['cmd2', ['prop']],
... ]
>>> app_fields = {
... 'f2': {'field': 'f2', 'user': 'api', 'updated': '00:00'}
... }
>>> from pprint import pprint
>>> pprint(get_recommendations(cmd_fields, app_fields))
[{'command': 'cmd1', 'field': 'f2', 'updated': '00:00', 'user': 'api'}]
"""
recommended_cmds = []
for cmd in commands_fields:
cmd_name = cmd[0]
cmd_fields = cmd[1]
for field in cmd_fields:
if field in app_pending_changes.keys():
recommended_cmds.append({
'command': cmd_name,
'field': field,
'user': app_pending_changes[field]['user'],
'updated': app_pending_changes[field]['updated'],
})
break
return recommended_cmds | 03fa583a5d4ea526cfeaa671418488218e1b227f | 22,168 |
import collections
def file_based_convert_examples_to_features(examples,
label_list,
max_seq_length,
tokenizer,
output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
def create_int_feature(values):
return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer)
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close() | efb6a0d347ae3a99a5859cbd3e0c1216d09377e6 | 22,169 |
def hello():
"""Return a friendly HTTP greeting."""
return 'Hello World!!!' | ae3528d10f94c92c169f53d5c3897572c9032bc2 | 22,170 |
from typing import Union
from typing import List
from typing import Tuple
def _findStress(
syllables: Union[List[Syllable], List[List[str]]]
) -> Tuple[List[int], List[int]]:
"""Find the syllable and phone indicies for stress annotations"""
tmpSyllables = [_toSyllable(syllable) for syllable in syllables]
stressedSyllables: List[int] = []
stressedPhones: List[int] = []
for syllableI, syllable in enumerate(tmpSyllables):
for phoneI, phone in enumerate(syllable.phonemes):
if "ˈ" in phone:
stressedSyllables.insert(0, syllableI)
stressedPhones.insert(0, phoneI)
break
if "ˌ" in phone:
stressedSyllables.append(syllableI)
stressedPhones.append(phoneI)
return stressedSyllables, stressedPhones | d4adc4b6156e4d823640a29815d75832c21f68ac | 22,171 |
from hydrus.data.helpers import get_path_from_type
import random
import string
def gen_dummy_object(class_title, doc):
"""
Create a dummy object based on the definitions in the API Doc.
:param class_title: Title of the class whose object is being created.
:param doc: ApiDoc.
:return: A dummy object of class `class_title`.
"""
object_ = {
"@type": class_title
}
expanded_base_url = DocUrl.doc_url
for class_path in doc.collections:
if class_title == doc.collections[class_path]["collection"].name:
members = list()
manages_class_titles = list()
collection_manages = doc.collections[class_title]["collection"].manages
if type(collection_manages) is dict:
# only one manages block
manages_class = collection_manages['object'].split(expanded_base_url)[1]
manages_class_titles.append(manages_class)
elif type(collection_manages) is list:
# multiple manages block
for manages_block in collection_manages:
manages_class = collection_manages['object'].split(expanded_base_url)[1]
manages_class_titles.append(manages_class)
for _ in range(3):
member_class = random.choice(manages_class_titles)
member = gen_dummy_object(member_class, doc)
member_id = crud.insert(object_=member,
session=get_session(),
collection=False)
member_class_path = get_path_from_type(member_class)
member_api_path = f'/{get_api_name()}/{member_class_path}/{member_id}'
members.append({
"@id": member_api_path,
"@type": member_class,
})
object_['members'] = members
return object_
for class_path in doc.parsed_classes:
if class_title == doc.parsed_classes[class_path]["class"].title:
for prop in doc.parsed_classes[class_path]["class"].supportedProperty:
if prop.write is False:
continue
if isinstance(prop.prop, HydraLink):
object_[prop.title] = ''.join(random.choice(
string.ascii_uppercase + string.digits) for _ in range(6))
pass
elif expanded_base_url in prop.prop:
prop_class = prop.prop.split(expanded_base_url)[1]
object_[prop.title] = gen_dummy_object(prop_class, doc)
else:
type_ = prop.kwargs.get('range')
if type_ is not None:
object_[prop.title] = random.randint(50,100)
else:
object_[prop.title] = ''.join(random.choice(
string.ascii_uppercase + string.digits) for _ in range(6))
return object_ | 88f096a483699b9126496564cfe755386012acce | 22,172 |
def gather_info(arguments) -> Info:
"""Gather info."""
if arguments.integration:
info = {"domain": arguments.integration}
elif arguments.develop:
print("Running in developer mode. Automatically filling in info.")
print()
info = {"domain": "develop"}
else:
info = _gather_info(
{
"domain": {
"prompt": "What is the domain?",
"validators": [
CHECK_EMPTY,
[
"Domains cannot contain spaces or special characters.",
lambda value: value == slugify(value),
],
],
}
}
)
info["is_new"] = not (COMPONENT_DIR / info["domain"] / "manifest.json").exists()
if not info["is_new"]:
return _load_existing_integration(info["domain"])
if arguments.develop:
info.update(
{
"name": "Develop Hub",
"codeowner": "@developer",
"requirement": "aiodevelop==1.2.3",
"oauth2": True,
}
)
else:
info.update(gather_new_integration(arguments.template == "integration"))
return Info(**info) | 4cc527373fe29b36526388716f2402101039cea2 | 22,173 |
import subprocess
def get_git_version():
"""
Get the version from git.
"""
return subprocess.check_output('git describe --tags'.split()).strip() | 9dd34bd2fc55df75b82f2ccae0005a3212d16623 | 22,174 |
from loopy.preprocess import preprocess_program, infer_unknown_types
def get_mem_access_map(program, numpy_types=True, count_redundant_work=False,
subgroup_size=None):
"""Count the number of memory accesses in a loopy kernel.
:arg knl: A :class:`loopy.LoopKernel` whose memory accesses are to be
counted.
:arg numpy_types: A :class:`bool` specifying whether the types in the
returned mapping should be numpy types instead of
:class:`loopy.LoopyType`.
:arg count_redundant_work: Based on usage of hardware axes or other
specifics, a kernel may perform work redundantly. This :class:`bool`
flag indicates whether this work should be included in the count.
(Likely desirable for performance modeling, but undesirable for
code optimization.)
:arg subgroup_size: An :class:`int`, :class:`str` ``'guess'``, or
*None* that specifies the sub-group size. An OpenCL sub-group is an
implementation-dependent grouping of work-items within a work-group,
analagous to an NVIDIA CUDA warp. subgroup_size is used, e.g., when
counting a :class:`MemAccess` whose count_granularity specifies that it
should only be counted once per sub-group. If set to *None* an attempt
to find the sub-group size using the device will be made, if this fails
an error will be raised. If a :class:`str` ``'guess'`` is passed as
the subgroup_size, get_mem_access_map will attempt to find the
sub-group size using the device and, if unsuccessful, will make a wild
guess.
:return: A :class:`ToCountMap` of **{** :class:`MemAccess` **:**
:class:`islpy.PwQPolynomial` **}**.
- The :class:`MemAccess` specifies the characteristics of the memory
access.
- The :class:`islpy.PwQPolynomial` holds the number of memory accesses
with the characteristics specified in the key (in terms of the
:class:`loopy.LoopKernel` *inames*).
Example usage::
# (first create loopy kernel and specify array data types)
params = {'n': 512, 'm': 256, 'l': 128}
mem_map = get_mem_access_map(knl)
f32_s1_g_ld_a = mem_map[MemAccess(
mtype='global',
dtype=np.float32,
lid_strides={0: 1},
gid_strides={0: 256},
direction='load',
variable='a',
count_granularity=CountGranularity.WORKITEM)
].eval_with_dict(params)
f32_s1_g_st_a = mem_map[MemAccess(
mtype='global',
dtype=np.float32,
lid_strides={0: 1},
gid_strides={0: 256},
direction='store',
variable='a',
count_granularity=CountGranularity.WORKITEM)
].eval_with_dict(params)
f32_s1_l_ld_x = mem_map[MemAccess(
mtype='local',
dtype=np.float32,
lid_strides={0: 1},
gid_strides={0: 256},
direction='load',
variable='x',
count_granularity=CountGranularity.WORKITEM)
].eval_with_dict(params)
f32_s1_l_st_x = mem_map[MemAccess(
mtype='local',
dtype=np.float32,
lid_strides={0: 1},
gid_strides={0: 256},
direction='store',
variable='x',
count_granularity=CountGranularity.WORKITEM)
].eval_with_dict(params)
# (now use these counts to, e.g., predict performance)
"""
program = infer_unknown_types(program, expect_completion=True)
program = preprocess_program(program)
access_map = ToCountMap()
callables_count = program.callables_table.callables_count
for func_id, in_knl_callable in program.callables_table.items():
if isinstance(in_knl_callable, CallableKernel):
knl = in_knl_callable.subkernel
knl_access_map = get_mem_access_map_for_single_kernel(knl,
program.callables_table, numpy_types,
count_redundant_work, subgroup_size)
# FIXME: didn't see any easy way to multiply
for i in range(callables_count[func_id]):
access_map += knl_access_map
elif isinstance(in_knl_callable, ScalarCallable):
pass
else:
raise NotImplementedError("Unknown callabke types %s." % (
type(in_knl_callable).__name__))
return access_map | 8255cc6d62a333d2634283abee7e6fbe08e18cc8 | 22,175 |
def __is_geotagging_input(question_input, _):
"""Validates the specified geotagging input configuration.
A geotagging input configuration contains the following optional fields:
- location: a string that specifies the input's initial location.
Args:
question_input (dict): An input configuration to validate.
Returns:
<bool, str|None>: A pair containing the value True if the specified configuration
is valid, False otherwise; as well as an error message in case it is invalid.
"""
location = question_input.get("location")
if location is not None:
message = "A geotagging input's 'location' field must be a non-empty string."
try:
if is_empty_string(location):
return (False, message)
except TypeError:
return (False, message)
return (True, None) | 8fc392f832cc6d5c38eb5ffd2e7e20ff50b4ffd3 | 22,176 |
def _be_num_input(num_type, than, func=_ee_num_input, text='', error_text="Enter number great or equal than ",
error_text_format_bool=True,
error_text_format="Enter number great or equal than {}", pause=True, pause_text_bool=True,
pause_text='Press Enter...', clear=True, error_text_input="Enter number!",
pause_input=True, pause_input_text_bool=True, pause_input_text=True, clear_input=True,
error_text_bool=True, error_text_input_bool=True, sleep_bool=True, sleep_time=1,
sleep_text_bool=True, sleep_format_text_bool=True, sleep_text="Sleeping time",
sleep_format_text="Sleep for {} seconds!",
sleep_bool_input=True, sleep_time_input=1, sleep_text_bool_input=True,
sleep_format_text_bool_input=True,
sleep_text_input="Sleeping time", sleep_format_text_input="Sleep for {} seconds!"):
"""
:param func: function that instantly returned
:param error_text_format_bool: bool to show formatted error text or not
:param error_text_format: formatted error text
:param error_text_input_bool: bool to show error text or not in input error case
:param num_type: type to input
:param text: text that shows in input default= ''
:param error_text_bool: bool to show error text or not
:param error_text: text that show in case of error bool is true
:param pause: bool to pause for a while
:param pause_text_bool: bool to show text op pause
:param pause_text: text show on pause
:param clear: bool to clear cmd
:param than: number that input number must be great or equal
:param error_text_input: error_text_bool but in input
:param pause_input: pause but in input
:param pause_input_text_bool: pause_text_bool but in input
:param pause_input_text: pause_text but in input
:param clear_input: bool to clear cmd in input but in input
:param sleep_format_text: formatted sleep text
:param sleep_text: sleep text
:param sleep_format_text_bool: if set True that show sleep_format_text else show sleep text
:param sleep_time: time to sleep
:param sleep_text_bool: if set True show text on sleeping
:param sleep_bool: if True sleep program for a sleep_time
:param sleep_format_text_input: formatted sleep text
:param sleep_text_input: sleep text
:param sleep_format_text_bool_input: if set True that show sleep_format_text else show sleep text
:param sleep_time_input: time to sleep
:param sleep_text_bool_input: if set True show text on sleeping
:param sleep_bool_input: if True sleep program for a sleep_time
:return: number
"""
return func(num_type=num_type, eq='<=<=', than=than, text=text, error_text=error_text,
error_text_format_bool=error_text_format_bool,
error_text_format=error_text_format, pause=pause, pause_text_bool=pause_text_bool,
pause_text=pause_text, clear=clear, error_text_input=error_text_input,
pause_input=pause_input, pause_input_text_bool=pause_input_text_bool,
pause_input_text=pause_input_text, clear_input=clear_input,
error_text_bool=error_text_bool, error_text_input_bool=error_text_input_bool,
sleep_bool_input=sleep_bool_input,
sleep_time_input=sleep_time_input, sleep_text_bool_input=sleep_text_bool_input,
sleep_format_text_bool_input=sleep_format_text_bool_input,
sleep_text_input=sleep_text_input, sleep_format_text_input=sleep_format_text_input,
sleep_bool=sleep_bool,
sleep_time=sleep_time, sleep_text_bool=sleep_text_bool, sleep_format_text_bool=sleep_format_text_bool,
sleep_text=sleep_text, sleep_format_text=sleep_format_text) | 92f8d3c119b0c331c50c4213644003c62534deb9 | 22,177 |
def createParetoFig(_pareto_df,_bestPick):
"""
Initalize figure and axes objects using pyplot for pareto curve
Parameters
----------
_pareto_df : Pandas DataFrame
DataFrame from Yahoo_fin that contains all the relevant options data
_bestPick : Pandas Series
Option data for the best pick given the user input settings
Returns
-------
pareto_fig : matplotlib figure object
figure used to plot the stockPareto data from the _pareto_df input
pareto_ax : matplotlib axes object
axes object that holds the stockPareto data from _pareto_df input
plotted using pandas integrated matplotlib .plot function
"""
pareto_fig = Figure(figsize=(6,6), dpi=100)
pareto_ax = pareto_fig.add_subplot(111)
pareto_ax.set_title('Pareto Curve of Available Options in DOW JONES Index')
_pareto_df.plot.scatter(x='POP',y='Potential Gain Multiple Contracts', ax = pareto_ax)
pareto_ax.set_xlabel('Probability of Profit (%)')
pareto_ax.set_ylabel('Potential Gain ($)')
# ax = finalFrame.plot(kind = 'scatter', x='POP',y='Potential Gain Multiple Contracts')
pareto_ax.axvline(_bestPick['POP'], color='green', ls='--')
pareto_ax.axhline(_bestPick['Potential Gain Multiple Contracts'], color='green', ls='--')
return pareto_fig, pareto_ax | dc6d8e6566c8c12d9938bf57a22c569124b65895 | 22,178 |
def rem4(rings, si):
"""finds if the silicon atom is within a 4 membered ring"""
for i in range(len(rings)):
triangles = 0
distances = []
locations = []
for n in range(len(rings[i]) - 1):
for m in range(1, len(rings[i]) - n):
distances.append(distance(rings[i][n], rings[i][n + m]))
locations.append([n, n + m])
locations.append(len(rings[i]))
for n in range(2):
del locations[distances.index(max(distances))]
del distances[distances.index(max(distances))]
for n in range(len(locations)):
triangles += triarea(rings[i][locations[n][0]],
rings[i][locations[n][1]], si)
if ringarea(rings[i]) == triangles:
return"n"
return"y" | e38fb7e1349597fa79cd21d39ecb222f39de86b3 | 22,179 |
def ilogit(x):
"""Return the inverse logit"""
return exp(x) / (1.0 + exp(x)) | 064383b65c2e8b011d2a6cd6ce14bf7936dd3178 | 22,180 |
def get_out_of_bounds_func(limits, bounds_check_type="cube"):
"""returns func returning a boolean array, True for param rows that are out of bounds"""
if bounds_check_type == "cube":
def out_of_bounds(params):
""" "cube" bounds_check_type; checks each parameter independently"""
return ~np.alltrue(
np.logical_and(limits[0] <= params, params <= limits[1]), axis=-1
)
else:
raise ValueError(
f'Only "cube" bounds checks are currently supported; You selected {bounds_check_type}'
)
return out_of_bounds | 56c82efd63afdb36fc4d60cda7912fd9a4edb1d0 | 22,181 |
from typing import Dict
from typing import Set
def inspectors_for_each_mode(lead_type="lead_inspector") -> Dict[str, Set[str]]:
"""
We want to be able to group lead inspectors by submode.
"""
if lead_type not in ["lead_inspector", "deputy_lead_inspector"]:
raise ValueError("Can only query for lead_inspector and deputy_lead_inspector attributes.")
submodes = Submode.objects.all()
out = {}
for sm in submodes:
insp = set()
orgs = sm.organisation_set.all()
for org in orgs:
insp.add(getattr(org, lead_type))
insp = {x for x in insp if x is not None}
out[sm.descriptor] = insp
del insp
return out | b712e82a14ddc8153c0ce61ebd3e95faca5993ae | 22,182 |
from .link_shortcuts import add_shortcut_to_desktop, suffix
from .module_install import ModuleInstall
import os
def add_shortcut_to_desktop_for_module(name):
"""
Adds a shortcut on a module which includes a script.
@param name name of the module
@return shortcut was added or not
"""
if name == "spyder":
md = ModuleInstall("spyder", "exe", script="spyder.bat")
sc = md.Script
if os.path.exists(sc):
ver = suffix()
r = add_shortcut_to_desktop(sc, name + "." + ver, name + "." + ver)
return os.path.exists(r)
else:
return False
else:
raise NotImplementedError(
"nothing implemented for module: {0}".format(name)) | 9d621ea5bf8436cb06964e7670384a3c6c28c34c | 22,183 |
import tables
def is_hdf_file(f):
"""Checks if the given file object is recognized as a HDF file.
:type f: str | tables.File
:param f: The file object. Either a str object holding the file name or
a HDF file instance.
"""
if((isinstance(f, str) and (f[-4:] == '.hdf' or f[-3:] == '.h5')) or
(isinstance(f, tables.File))
):
return True
return False | 55d89b0d1afdf2acf705d5266e2c44f6d3901c2e | 22,184 |
def dummy_receivers(request, dummy_streamers):
"""Provides `acquire.Receiver` objects for dummy devices.
Either constructs by giving source ID, or by mocking user input.
"""
receivers = {}
for idx, (_, _, source_id, _) in enumerate(dummy_streamers):
with mock.patch('builtins.input', side_effect=str(idx)):
receiver = request.param(source_id=source_id, autostart=False)
receivers[source_id] = receiver
def teardown():
for sid, receiver in receivers.items():
receiver.stop()
del(receiver)
request.addfinalizer(teardown)
return receivers | 70d8e0f7c4f84f949bdc73a4fd240684d86baaed | 22,185 |
import six
import inspect
def get_package_formats():
"""Get the list of available package formats and parameters."""
# pylint: disable=fixme
# HACK: This obviously isn't great, and it is subject to change as
# the API changes, but it'll do for now as a interim method of
# introspection to get the parameters we need.
def get_parameters(cls):
"""Build parameters for a package format."""
params = {}
# Create a dummy instance so we can check if a parameter is required.
# As with the rest of this function, this is obviously hacky. We'll
# figure out a way to pull this information in from the API later.
dummy_kwargs = {
k: 'dummy'
for k in cls.swagger_types
}
instance = cls(**dummy_kwargs)
for k, v in six.iteritems(cls.swagger_types):
attr = getattr(cls, k)
docs = attr.__doc__.strip().split('\n')
doc = (docs[1] if docs[1] else docs[0]).strip()
try:
setattr(instance, k, None)
required = False
except ValueError:
required = True
params[cls.attribute_map.get(k)] = {
'type': v,
'help': doc,
'required': required
}
return params
return {
key.replace('PackagesUpload', '').lower(): get_parameters(cls)
for key, cls in inspect.getmembers(cloudsmith_api.models)
if key.startswith('PackagesUpload')
} | 5db4d576380b027768c22df91fcd4d23b94ae160 | 22,186 |
def construct_reverse_protocol(splitting="OVRVO"):
"""Run the steps in the reverse order, and for each step, use the time-reverse of that kernel."""
step_length = make_step_length_dict(splitting)
protocol = []
for step in splitting[::-1]:
transition_density = partial(reverse_kernel(step_mapping[step]), dt=step_length[step])
protocol.append(transition_density)
return protocol | 7526e1cab43eeb3ef58a1ac4a673bf9a9992e287 | 22,187 |
def tabinv(xarr, x):
"""
Find the effective index in xarr of each element in x.
The effective index for each element j in x is the value i such that
:math:`xarr[i] <= x[j] <= xarr[i+1]`, to which is added an interpolation fraction
based on the size of the intervals in xarr.
Parameters
----------
x_arr : array-like
The array of values to search
x : float or array-like
Value (or list of values) to look for in x_arr
Returns
-------
ieff : float
Effective index
"""
npoints, npt = len(xarr), len(xarr) - 1
if npoints <= 1:
raise ValueError("Search array must contain at least 2 elements")
if not (np.all(np.diff(xarr) >= 0) or (np.all(np.diff(xarr) <= 0))):
raise ValueError("Search array must be monotonic")
if not isinstance(x, (list, tuple, np.ndarray)):
x = np.array([x])
# ieff contains values j1, ..., jn such that
# ji = x where xarr[x-1] <= ji < xarr[x]
# If no position is found, ji = len(xarr)
ieff = np.searchsorted(xarr, x, side='right').astype(np.float64)
g = np.where((ieff >= 0) & (ieff < (len(xarr) - 1)))
if len(g) > 0 and len(g[0] > 0):
neff = ieff[g].astype(np.int32)
x0 = xarr[neff].astype(np.float64)
diff = x[g] - x0
ieff[g] = neff + diff / (xarr[neff+1] - x0)
ieff = np.where(ieff>0., ieff, 0.)
return ieff | c020222355e4d7671e7117c30e3babf0fa1d1f46 | 22,188 |
import configparser
def getldapconfig() :
""" Renvoie la configuration ldap actuelle"""
cfg = configparser.ConfigParser()
cfg.read(srv_path)
try :
return (cfg.get('Ldap', 'ldap_address'),
cfg.get('Ldap', 'ldap_username'),
cfg.get('Ldap', 'ldap_password').replace("$percent", "%"),
cfg.get('Ldap', 'ldap_base'))
except :
sleep(0.4)
return getldapconfig() | 505c7d3728b986811a86841003cd763912c82a93 | 22,189 |
def rename_dict_key(_old_key, _new_key, _dict):
"""
renames a key in a dict without losing the order
"""
return { key if key != _old_key else _new_key: value for key, value in _dict.items()} | ddc497796e0e52677afdf09b7f4995cf3a534cbc | 22,190 |
def api_browse_use_case() -> use_cases.APIBrowseUseCase:
"""Get use case instance."""
return use_cases.APIBrowseUseCase(items_repository) | 653525c9338bf8f520014a530228512fae1ed03d | 22,191 |
import logging
def treeIntersectIds(node, idLookup, sampleSet, lookupFunc=None):
"""For each leaf in node, attempt to look up its label in idLookup; replace if found.
Prune nodes with no matching leaves. Store new leaf labels in sampleSet.
If lookupFunc is given, it is passed two arguments (label, idLookup) and returns a
possible empty list of matches."""
if (node['kids']):
# Internal node: prune
prunedKids = []
for kid in (node['kids']):
kidIntersected = treeIntersectIds(kid, idLookup, sampleSet, lookupFunc)
if (kidIntersected):
prunedKids.append(kidIntersected)
if (len(prunedKids) > 1):
node['kids'] = prunedKids
elif (len(prunedKids) == 1):
node = prunedKids[0]
else:
node = None
else:
# Leaf: lookup, prune if not found
label = node['label']
if (lookupFunc):
matchList = lookupFunc(node['label'], idLookup)
elif label in idLookup:
matchList = idLookup[label]
else:
matchList = []
if (not matchList):
logging.info("No match for leaf '" + label + "'")
node = None
else:
if (len(matchList) != 1):
logging.warn("Non-unique match for leaf '" + label + "': ['" +
"', '".join(matchList) + "']")
else:
logging.debug(label + ' --> ' + matchList[0]);
node['label'] = matchList[0]
sampleSet.add(matchList[0])
return node | 86ddb852dd3f6c612c248fbf0a43dca738131660 | 22,192 |
def get_descriptive_verbs(tree, gender):
"""
Returns a list of verbs describing pronouns of the given gender in the given dependency tree.
:param tree: dependency tree for a document, output of **generate_dependency_tree**
:param gender: `Gender` to search for usages of
:return: List of verbs as strings
"""
verbs = []
for sentence in tree:
for triple in sentence:
if triple[1] == "nsubj" and (triple[0][1] == "VBD" or triple[0][1] == "VB"
or triple[0][1] == "VBP" or triple[0][1] == "VBZ"):
if triple[2][0] in gender.identifiers:
verbs.append(triple[0][0])
return verbs | ec021c89cb59da2ff8abb0169ea2567cb2e3a13c | 22,193 |
def client():
"""Return a client instance"""
return Client('192.168.1.1') | 19cda306e37e7a34395b86010fb4331a238a6cbc | 22,194 |
import os
import sys
import re
from bs4 import BeautifulSoup
def load_html_file(file_dir):
""" Uses BeautifulSoup to load an html """
with open(file_dir, 'rb') as fp:
data = fp.read()
if os.name == 'nt' or sys.version_info[0] == 3:
data = data.decode(encoding='utf-8', errors='strict')
data = re.sub(r'(\>)([ ]+)', lambda match: match.group(1) + ('!space!' * len(match.group(2))), data)
data = re.sub(r'([ ]+)(\<)', lambda match: ('!space!' * len(match.group(1))) + match.group(2), data)
if os.name == 'nt' or sys.version_info[0] == 3:
data = data.encode('utf-8', 'ignore')
soup = BeautifulSoup(data, 'html.parser')
return soup | 999fba3d63ed3c0d62befe0b76baf0a0f3e0a7ca | 22,195 |
import termios, fcntl, sys, os
def read_single_keypress():
"""Waits for a single keypress on stdin.
This is a silly function to call if you need to do it a lot because it has
to store stdin's current setup, setup stdin for reading single keystrokes
then read the single keystroke then revert stdin back after reading the
keystroke.
Returns a tuple of characters of the key that was pressed - on Linux,
pressing keys like up arrow results in a sequence of characters. Returns
('\x03',) on KeyboardInterrupt which can happen when a signal gets
handled.
"""
fd = sys.stdin.fileno()
# save old state
flags_save = fcntl.fcntl(fd, fcntl.F_GETFL)
attrs_save = termios.tcgetattr(fd)
# make raw - the way to do this comes from the termios(3) man page.
attrs = list(attrs_save) # copy the stored version to update
# iflag
attrs[0] &= ~(
termios.IGNBRK
| termios.BRKINT
| termios.PARMRK
| termios.ISTRIP
| termios.INLCR
| termios.IGNCR
| termios.ICRNL
| termios.IXON
)
# oflag
attrs[1] &= ~termios.OPOST
# cflag
attrs[2] &= ~(termios.CSIZE | termios.PARENB)
attrs[2] |= termios.CS8
# lflag
attrs[3] &= ~(
termios.ECHONL | termios.ECHO | termios.ICANON | termios.ISIG | termios.IEXTEN
)
termios.tcsetattr(fd, termios.TCSANOW, attrs)
# turn off non-blocking
fcntl.fcntl(fd, fcntl.F_SETFL, flags_save & ~os.O_NONBLOCK)
# read a single keystroke
ret = []
try:
ret.append(sys.stdin.read(1)) # returns a single character
fcntl.fcntl(fd, fcntl.F_SETFL, flags_save | os.O_NONBLOCK)
c = sys.stdin.read(1) # returns a single character
while len(c) > 0:
ret.append(c)
c = sys.stdin.read(1)
except KeyboardInterrupt:
ret.append("\x03")
finally:
# restore old state
termios.tcsetattr(fd, termios.TCSAFLUSH, attrs_save)
fcntl.fcntl(fd, fcntl.F_SETFL, flags_save)
return tuple(ret) | 646c04bc5441557064714716087a9893c7fa66dc | 22,196 |
def set_table(table, fold_test, inner_number_folds, index_table, y_name):
""" Set the table containing the data information
Set the table by adding to each entry (patient) its start and end indexes in the concatenated data object.
In fact each patients i is composed by `n_i` tiles so that for example patient 0 will have as starts and ends indices 0 and `n_0`.
It then separates the dataset into test and train sets (according to `fold_test`).
Finally, several splits of the train sets are done for cross validation, preserving relative class frequency.
Obviously, dataset is shuffled, and splitted at the patient level, so that the indexes returned are the table indexes,
not the concatenated object indexes.
Parameters
----------
table : pd.DataFrame
data information.
fold_test : int
number of the fold which will be used for testing.
inner_number_folds : int
number of splits used in the cross validation.
index_table : dict
maps each file (key) to its start and end index in the data object (concatenated encoded bags)
y_name : str
or "y_interest", is the name of the target variable.
Returns
-------
pd.DataFrame, list(tuple), list
returns 1: the table DataFrame augmented with start and end indexes
2: The `inner_number_folds` splits for cross_validation, each containing (list(train_indexes), list(val_indexes)).
3: List containing indexes of the test dataset.
"""
## add index_table to table so that all the info is in table
table = add_index(table, index_table)
train_table = table[table["fold"] != fold_test]
test_index = table[table["fold"] == fold_test].index
stratified_variable = train_table[y_name].round(0)
skf = StratifiedKFold(n_splits=inner_number_folds, shuffle=True) # Assures that relative class frequency is preserve in each folds.
obj = skf.split(train_table.index, stratified_variable)
# index_folds = [(train_index, val_index) for train_index, val_index in obj]
index_folds = [(np.array(train_table.index[train_index]), np.array(train_table.index[val_index])) for train_index, val_index in obj]
# import pdb; pdb.set_trace()
return table, index_folds, test_index | d46c0601b59f27a60ec99a5305113d94893ba748 | 22,197 |
def parse_args():
"""
引数パース
"""
argparser = ArgumentParser()
argparser.add_argument(
"-b",
"--bucket-name",
help="S3 bucket name",
)
argparser.add_argument(
"-d",
"--days",
type=int,
help="Number of days",
)
return argparser.parse_args() | 36302c14466a2c1a1791217566c49687fc55b567 | 22,198 |
def predict(file):
"""
Returns values predicted
"""
x = load_img(file, target_size=(WIDTH, HEIGHT))
x = img_to_array(x)
x = np.expand_dims(x, axis=0)
array = NET.predict(x)
result = array[0]
answer = np.argmax(result)
return CLASSES[answer], result | 98ce2d770d7bffc47e6fe84521fd6992ab2a53fa | 22,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.