content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def snr2Ivar(flux, snr):
"""
Estimate the inverse variance given flux and S/N.
Parameters
----------
flux : scalar or array of float
Flux of the obejct.
snr : scalar or array of float
Signal to noise ratio
"""
return 1.0 / ((flux / snr) ** 2.0) | 5,331,500 |
def revision_to_cashflows(rev, end_date):
"""Converts a revision to a list of cashflows
end_date -- the date from which we want to stop computing
"""
if rev.end_date is not None:
end_date = next_month(rev.end_date)
result = []
for first_of_month in first_of_month_range(rev.start_date, end_date):
start = max(first_of_month, rev.start_date)
end = next_month(first_of_month)
if rev.end_date is not None:
end = min(end, rev.end_date)
delta = end - start
total_days = monthrange(first_of_month.year, first_of_month.month)[1]
rent = fractional_amount(-rev.rent, delta.days, total_days)
result.append(Cashflow(first_of_month, rent, _("rent")))
if rev.provision != 0:
p = fractional_amount(-rev.provision, delta.days, total_days)
result.append(Cashflow(
first_of_month, p, _("provision")))
return result | 5,331,501 |
def test_migrate_to_newest(external_archive, tmp_path, filename, nodes):
"""Test migrations from old archives to newest version."""
filepath_archive = get_archive_file(filename, **external_archive)
archive_format = ArchiveFormatSqlZip()
new_archive = tmp_path / 'out.aiida'
archive_format.migrate(filepath_archive, new_archive, archive_format.latest_version)
assert archive_format.read_version(new_archive) == archive_format.latest_version
with archive_format.open(new_archive, 'r') as reader:
# count nodes
archive_node_count = reader.querybuilder().append(orm.Node).count()
assert archive_node_count == nodes
# Verify that CalculationNodes have non-empty attribute dictionaries
calc_query = reader.querybuilder().append(orm.CalculationNode, project=['attributes'])
for [attributes] in calc_query.iterall():
assert isinstance(attributes, dict)
assert len(attributes) > 0
# Verify that the StructureData nodes maintained their (same) label, cell, and kinds
struct_query = reader.querybuilder().append(orm.StructureData, project=['label', 'attributes.cell'])
assert struct_query.count() == 2
for [label, cell] in struct_query.all():
assert label == ''
assert cell == [[4, 0, 0], [0, 4, 0], [0, 0, 4]]
known_kinds = [
{
'name': 'Ba',
'mass': 137.327,
'weights': [1],
'symbols': ['Ba']
},
{
'name': 'Ti',
'mass': 47.867,
'weights': [1],
'symbols': ['Ti']
},
{
'name': 'O',
'mass': 15.9994,
'weights': [1],
'symbols': ['O']
},
]
kind_query = reader.querybuilder().append(orm.StructureData, project=['attributes.kinds'])
for kinds in kind_query.all(flat=True):
assert len(kinds) == len(known_kinds)
for kind in kinds:
assert kind in known_kinds
# Check that there is a StructureData that is an input of a CalculationNode
builder = reader.querybuilder()
builder.append(orm.StructureData, tag='structure')
builder.append(orm.CalculationNode, with_incoming='structure', project=['id'])
assert len(builder.all()) > 0
# Check that there is a RemoteData that is the output of a CalculationNode
builder = reader.querybuilder()
builder.append(orm.CalculationNode, tag='parent')
builder.append(orm.RemoteData, with_incoming='parent', project=['id'])
assert len(builder.all()) > 0 | 5,331,502 |
def filter_verified_user(path, community_user_dataFrame,verified_user_file,sep = ',',header = None):
"""
根据已经认证的用户文件,过滤到保留社区中的认证用户。
:param path:认证用户文件的保存路径。
:param community_user_dataFrame:社区用户数据框,两列,列名(user_id, community_id)。
:param verified_user_file:认证用户的文件,为CSV文件,格式为(user_id, is_verified, name),分隔符为逗号。
:return: 过滤掉认证用户之后的pandas数据框,格式与community_user_dataFrame相同。列名(user_id, community_id)。
"""
print 'fileter verified user'
verified_user_dataFrame = pd.read_csv(path + verified_user_file, names=['user_id', 'is_verified', 'name'],dtype={'user_id': np.str},sep = sep,header = header)
verified_user_dataFrame = verified_user_dataFrame[verified_user_dataFrame.is_verified == True]
del verified_user_dataFrame['is_verified']
del verified_user_dataFrame['name']
dataFrame = pd.DataFrame()
user_id_list = set(list(community_user_dataFrame.user_id))
verified_user_id_list = list(verified_user_dataFrame.user_id)
for user_id in user_id_list:
if user_id not in verified_user_id_list:
dataFrame = dataFrame.append(community_user_dataFrame[community_user_dataFrame.user_id == user_id],ignore_index=False)
print 'keep user: ', user_id
else:
print 'delete user: ', user_id
pass
return dataFrame | 5,331,503 |
def calculate(series):
"""
:param series: a list of lists of [[(),()], [(),()]] for every swc tube in the pixel
:return:
"""
# gets every dates tuple in the list
dates = [t for t, v in series]
# define the dates as a set
ds = set(dates[0])
# get the intersection of every other set.
for d in dates[1:]:
ds = ds.intersection(set(d))
def func(di):
""""""
# check for matching dates in the intersected and ordered set with the values from the series.
ns = [get_matching_date(di, zip(*cs)) for cs in series] # ns is the matching values
# if the value is not none...
ns = [ni for ni in ns if ni is not None]
# print "Here is your ns {}".format(ns)
# calculate the error of the mean for that value.
# if ns:
# return datetime.strptime(di, '%m/%d/%Y'), calculate_sem(ns) #, calculate_avg(ns)
if ns:
return datetime.strptime(di, '%m/%d/%Y'), ns
# sets are NOT ordered so you need to find the ones that match up.
# vs = [func(di) for di in sorted(list(ds), reverse=True)]
storages = [func(di) for di in sorted(list(ds), reverse=True)]
# vs = [vi for vi in vs if vi is not None]
storages = [i for i in storages if i is not None]
# return zip(*vs)
# print "length storages {}".format(len(storages))
# print "STORAGES {}".format(storages)
# print "length storages {}".format(len(zip(*storages)))
# print "STORAGES {}".format(zip(*storages))
return zip(*storages) | 5,331,504 |
def load_dataset(filename: str) -> Tuple[np.ndarray, np.ndarray]:
"""
Load dataset for comparing the Gaussian Naive Bayes and LDA classifiers. File is assumed to be an
ndarray of shape (n_samples, 3) where the first 2 columns represent features and the third column the class
Parameters
----------
filename: str
Path to .npy data file
Returns
-------
X: ndarray of shape (n_samples, 2)
Design matrix to be used
y: ndarray of shape (n_samples,)
Class vector specifying for each sample its class
"""
dataset = np.load(f"..//datasets//{filename}")
X, y = dataset[:, :2], dataset[:, -1]
return X, y | 5,331,505 |
def intersections():
"""Intersects all surfaces in model. Uses python cmd line, not api."""
sc.doc.Views.Redraw()
layer('INTERSECTIONS')
objs = rs.AllObjects()
rs.SelectObjects(objs)
rs.Command('_Intersect', echo=False)
frac_isect_ids = rs.LastCreatedObjects()
rs.UnselectAllObjects()
if frac_isect_ids:
for intid in frac_isect_ids:
if rs.IsCurve(intid):
rs.AddPoint(rs.CurveStartPoint(intid))
rs.AddPoint(rs.CurveEndPoint(intid))
if len(frac_isect_ids) > 1:
rs.SelectObjects(frac_isect_ids)
rs.Command('_Intersect', echo=False) | 5,331,506 |
def readCSV(name,shape = [None], delimiter = ","):
""" Lectura de archivo csv name
Devuelve matriz con los datos y cabecera
"""
data = []
with open(name, 'r') as f:
reader = csv.reader(f,delimiter = delimiter)
for row in reader:
data.append(row[slice(*shape)])
return data | 5,331,507 |
def build_category(category):
"""Build a single-item list of a YouTube category.
This refers to the Category of a video entry, such as "Film" or "Comedy",
not the atom/gdata element. This does not check if the category provided
is valid.
Keyword arguments:
category: String representing the category.
Returns:
A single-item list of a YouTube category (type gdata.media.Category).
"""
from gdata.media import Category
return [Category(
text=category,
scheme='http://gdata.youtube.com/schemas/2007/categories.cat',
label=category)] | 5,331,508 |
def apply_and_save(base_file, out_filename, apply_function, save_function, *args):
""" apply_and_save does following process.
1. apply function to base_file
2. the 1's result will be saved as out_filename with base_file's directory
Arguments:
base_file {str} -- file name
out_filename {str} -- file name for save
apply_function {function} -- open base_file and do something
save_function {function} -- receive the results of apply_function and save results
as out_filename
"""
result = function(base_file, *args)
splitted_name = base_file.replace("\\", "/").split("/")
out_filepath = "/".join(splitted_name[:-1]) + "/" + out_filename
save_function(result, out_filepath) | 5,331,509 |
def all(event, context):
""" retrieves all experiment results from redis
params:
- namespace (optional)
- scope (optional, comma-separated list of experiments)
"""
r = _redis()
namespace = event.get('namespace', 'alephbet')
scope = event.get('scope')
if scope:
experiments = scope.split(',')
else:
experiments = r.smembers("{0}:experiments".format(namespace))
results = []
results.append({'meta': {'scope': scope}})
for ex in experiments:
goals = experiment({'experiment': ex, 'namespace': namespace}, context)
results.append({'experiment': ex, 'goals': goals})
return results | 5,331,510 |
def wait_for_save(filename, timeout=5):
"""Waits for FILENAME to update, waiting up to TIMEOUT seconds.
Returns True if a save was detected, and False otherwise.
"""
modification_time = os.path.getmtime(filename)
start_time = time.time()
while time.time() < start_time + timeout:
if (os.path.getmtime(filename) > modification_time and
os.path.getsize(filename) > 0):
return True
time.sleep(0.2)
return False | 5,331,511 |
def check_submodule_update(job, position):
"""
Checks to see if certain submodules have been updated and post a comment to the PR if so.
"""
output = get_output_by_position(job, position)
modules = find_in_output(output, "CIVET_CLIENT_SUBMODULE_UPDATES")
if not modules:
return False
if not job.event.pull_request or not job.event.pull_request.review_comments_url:
return False
for mod in modules.split():
api = job.event.build_user.api()
url = job.event.pull_request.review_comments_url
sha = job.event.head.sha
msg = "**Caution!** This contains a submodule update"
# The 2 position will leave the message on the new submodule hash
api.pr_review_comment(url, sha, mod, 2, msg)
return True | 5,331,512 |
def display_side_by_side(dfs: list, captions: list):
"""Display tables side by side to save vertical space
Input:
dfs: list of pandas.DataFrame
captions: list of table captions
"""
output = ""
combined = dict(zip(captions, dfs))
for caption, df in combined.items():
output += df.style.set_table_attributes(
"style='display:inline'").set_caption(caption)._repr_html_()
output += "\xa0\xa0\xa0"
display(HTML(output)) | 5,331,513 |
def _compute_subseq_errors_direct(series, weights):
"""
Subsequence errors (using one pass formulation)
:param Array{Float64,1} series
:param Array{Float64,1} weights
The subsequence errors is:
$$\begin{align}
E[i,j] &= Q[i,j] - \frac{S[i,j]^2}{W[i,j]}
\end{align}$$
Were W, S, Q are upper diagonal matrices:
$$\begin{align}
W[i,j] &\equiv \sum_{k=i}^j w_k \\
S[i,j] &\equiv \sum_{k=i}^j w_k x_k \\
Q[i,j] &\equiv \sum_{k=i}^j w_k {x_k}^2
\end{align}$$
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
Because $$Q[i,j]$$ and $$\frac{S[i,j]^2}{W[i,j]}$$ can be very similar numbers,
cancellation can lead to the precision of the result to be much less than
the inherent precision of the floating-point arithmetic used to perform the computation.
Thus this algorithm should not be used in practice.
This is particularly bad if the standard deviation is small relative to the mean.
"""
N = np.size(series)
wgts = np.diag(weights)
wsum = np.diag(weights * series)
sqrs = np.diag(weights * series * series)
dists = np.zeros((N, N), dtype=np.float)
means = np.diag(series)
# Fill the upper triangle of dists and means by performing up-right
# diagonal sweeps through the matrices
for delta in range(0, N):
for l in range(0, (N-1-delta)):
# l = left boundary, r = right boundary
r = l + delta + 1
# Incrementally update every partial sum
wgts[l, r] = wgts[l, r-1] + wgts[r, r]
wsum[l, r] = wsum[l, r-1] + wsum[r, r]
sqrs[l, r] = sqrs[l, r-1] + sqrs[r, r]
# Calculate the mean over the range
means[l, r] = 0 if (wgts[l, r] == 0) else wsum[l, r] / wgts[l, r]
dists[l, r] = sqrs[l, r] - means[l, r] * wsum[l, r]
if dists[l, r] < 0:
print("[WARNING] Numerical instability detected, dists[", l, ", ", r, "] is negative: ", dists[l, r])
return dists, means | 5,331,514 |
def app_info():
"""
Show app infos (Version & License)
"""
print('\n\n ##### A Python script to read iMessage data #####')
print(' # Created by niftycode #')
print(f' # Version {VERSION} #')
print(' # MIT License #')
print(' #################################################\n\n')
print() | 5,331,515 |
def concat(infilenames: List[str], outfilename: str) -> None:
"""Concatenates files along the time axis
If files are overlapping, the last one will be used.
Parameters
----------
infilenames: List[str]
filenames to concatenate
outfilename: str
filename
Notes
------
The list of input files have to be sorted, i.e. in chronological order
"""
dfs_i_a = DfsFileFactory.DfsGenericOpen(infilenames[0])
dfs_o = _clone(infilenames[0], outfilename)
n_items = safe_length(dfs_i_a.ItemInfo)
dfs_i_a.Close()
current_time = datetime(1, 1, 1) # beginning of time...
for i, infilename in enumerate(tqdm(infilenames, disable=not show_progress)):
dfs_i = DfsFileFactory.DfsGenericOpen(infilename)
t_axis = dfs_i.FileInfo.TimeAxis
n_time_steps = t_axis.NumberOfTimeSteps
dt = t_axis.TimeStep
start_time = from_dotnet_datetime(t_axis.StartDateTime)
if i > 0 and start_time > current_time + timedelta(seconds=dt):
dfs_o.Close()
os.remove(outfilename)
raise Exception("Gap in time axis detected - not supported")
current_time = start_time
if i < (len(infilenames) - 1):
dfs_n = DfsFileFactory.DfsGenericOpen(infilenames[i + 1])
nf = dfs_n.FileInfo.TimeAxis.StartDateTime
next_start_time = datetime(
nf.Year, nf.Month, nf.Day, nf.Hour, nf.Minute, nf.Second
)
dfs_n.Close()
for timestep in range(n_time_steps):
current_time = start_time + timedelta(seconds=timestep * dt)
if i < (len(infilenames) - 1):
if current_time >= next_start_time:
break
for item in range(n_items):
itemdata = dfs_i.ReadItemTimeStep(item + 1, timestep)
d = to_numpy(itemdata.Data)
darray = to_dotnet_float_array(d)
dfs_o.WriteItemTimeStepNext(0, darray)
dfs_i.Close()
dfs_o.Close() | 5,331,516 |
def preprocess_dataframe(data):
"""Helper method to preprocess the dataframe.
Creates new columns for year,month,recalls and percentage change.
Limits the date range for the experiment (these data are trustworthy)."""
data['recalls'] = data['doc_count'] + 1
data.drop(columns=['product', 'Unnamed: 0', 'key', 'key_as_string', 'doc_count'], inplace=True)
data = data.resample("M").sum()
mask = (data.index > '2007-05-31') & (data.index < '2019-09-30')
data = data.loc[mask]
data['pct'] = data['recalls'].pct_change()
return data | 5,331,517 |
def is_solution(x:int, y:int) -> bool:
"""Returns try if (x, y) is a solution."""
# x and y are the values in a sequence of 15 terms of the following form:
# xxxxyxxxxxyxxxx
# x must be a positive integer
if x <= 0:
return False
# y must be a negative integer
if y >= 0:
return False
# a run of 6 consecutive terms must be positive
if 5 * x + y <= 0:
return False
# a run of 11 consecutive terms must be negative
if 9 * x + 2 * y >= 0:
return False
# x must be <= 16 or y must be >= 16
return x <= 16 or y >= -16 | 5,331,518 |
def load_crl(file):
# type: (AnyStr) -> CRL
"""
Load CRL from file.
:param file: Name of file containing CRL in PEM format.
:return: M2Crypto.X509.CRL object.
"""
with BIO.openfile(file) as f:
cptr = m2.x509_crl_read_pem(f.bio_ptr())
return CRL(cptr, 1) | 5,331,519 |
def clean_and_lemmatize(text):
"""
Clean and lemmatize the text of a Tweet
Returns:
cleaned_text (string): The cleaned and lemmatized text.
"""
wnl = WordNetLemmatizer() # NLTK lemmatizer
converted_tweet = clean_and_tokenize(
text) # cleans the text and tokenize it
tagged = nltk.pos_tag(converted_tweet) # POS tag the tokenized Tweet
wordnet_tagged = list(
map(lambda x: (x[0], pos_tagger(x[1])), tagged))
lemmatized_sentence = []
# loop through each word in the tagged list
for word, tag in wordnet_tagged:
if tag is None:
# if there is no available tag, append the word as is
lemmatized_sentence.append(word)
else:
# else use the tag to lemmatize the word
lemmatized_sentence.append(wnl.lemmatize(word, tag))
# attached lemmatized words to a string
cleaned_text = " ".join(lemmatized_sentence)
return cleaned_text | 5,331,520 |
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding
Arguments:
in_planes {int} -- Number of channels in the input image
out_planes {int} -- Number of channels produced by the convolution
Keyword Arguments:
stride {int or tuple, optional} -- Stride of the convolution. Default: 1 (default: {1})
groups {int, optional} -- Number of blocked connections from input channels to output channels.tion] (default: {1})
dilation {int or tuple, optional} -- Spacing between kernel elements (default: {1})
Returns:
output layer of 3x3 convolution with padding
"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
) | 5,331,521 |
def task_finish(request, pk):
"""タスクを完了するAPI
:param request:
:param pk:
:return:
"""
task = get_object_or_404(models.Task, pk=pk)
prev_task = task.get_prev_task()
if prev_task is None or prev_task.can_continue():
task.status = '99'
task.updated_user = request.user
task.save()
serializer = serializers.TaskSerializer(task)
return Response(serializer.data)
else:
return Response({'detail': constants.ERROR_PREV_TASK_UNFINISHED}, status=400) | 5,331,522 |
def add_runas_options(parser):
"""
Add options for commands which can run tasks as another user
Note that this includes the options from add_runas_prompt_options(). Only one of these
functions should be used.
"""
runas_group = optparse.OptionGroup(parser, "Privilege Escalation Options", "control how and which user you become as on target hosts")
# consolidated privilege escalation (become)
runas_group.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
help="run operations with become (does not imply password prompting)")
runas_group.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD,
help="privilege escalation method to use (default=%default), use "
"`ansible-doc -t become -l` to list valid choices.")
runas_group.add_option('--become-user', default=None, dest='become_user', type='string',
help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
add_runas_prompt_options(parser, runas_group=runas_group) | 5,331,523 |
def test_active_flag_is_working_for_file_lock(filename_for_test):
"""
Проверяем, что атрибут .active проставляется правильно.
"""
assert FileLock(filename_for_test).active == True
assert FileLock(None).active == False | 5,331,524 |
def add_parser(subparsers):
"""Add reduction parser"""
parser = subparsers.add_parser(
'reduce', aliases=['red'], help="""Reduce games""",
description="""Create reduced game files from input game files.""")
parser.add_argument(
'--input', '-i', metavar='<input-file>', default=sys.stdin,
type=argparse.FileType('r'), help="""Input file for script. (default:
stdin)""")
parser.add_argument(
'--output', '-o', metavar='<output-file>', default=sys.stdout,
type=argparse.FileType('w'), help="""Output file for script. (default:
stdout)""")
parser.add_argument(
'--type', '-t', choices=REDUCTIONS, default='dpr', help="""Type of
reduction to perform. `dpr` - deviation preserving. `hr` -
hierarchical. `tr` - twins. `idr` - identity. (default:
%(default)s)""")
parser.add_argument(
'--sorted-roles', '-s', action='store_true', help="""If set, reduction
should be a comma separated list of reduced counts for the role names
in sorted order.""")
parser.add_argument(
'reduction', nargs='?', metavar='<role>:<count>;...',
help="""Number of players in each reduced-game role. This is a string
e.g. "role1:4;role2:2".""")
return parser | 5,331,525 |
def write_password_db(filename, password, xml, compress=True):
"""Write a password database file."""
# Encode the data if needed.
if isinstance(xml, bytes):
encoded_data = xml
else:
encoded_data = xml.encode('utf-8')
# Compress the data if requested.
if compress:
compressed_unpadded_data = zlib.compress(encoded_data)
# Pad the result to the 16-byte boundary.
padlen = 16 - len(compressed_unpadded_data) % 16
compressed_data = compressed_unpadded_data + bytes([padlen] * padlen)
else:
compressed_data = encoded_data
# Add a hash for integrity check.
hash256 = hashlib.sha256(compressed_data).digest()
decrypted_data = hash256 + compressed_data
# Calculate the PBKDF2 derived key.
salt = os.urandom(8)
key = hashlib.pbkdf2_hmac('sha1',
password.encode('utf-8'),
salt,
12000,
dklen=32)
# Encrypt the data.
init_vector = os.urandom(16)
crypto_obj = Crypto.Cipher.AES.new(key, Crypto.Cipher.AES.MODE_CBC,
init_vector)
encrypted_data = crypto_obj.encrypt(decrypted_data)
# Prepare final output and write it out.
output_data = (b'rvl\x00\x02\x00\x00\x00\x00\x00\x00\x00' + salt +
init_vector + encrypted_data)
write_file(filename, output_data) | 5,331,526 |
def config(program):
"""
Print the disco master configuration.
"""
for config in program.disco.config:
print("\t".join(config)) | 5,331,527 |
def test_log_missing_file():
""" Tail a single file on a single task """
returncode, stdout, stderr = exec_command(
['dcos', 'task', 'log', 'test-app', 'bogus'])
assert returncode == 1
assert stdout == b''
assert stderr == b'No files exist. Exiting.\n' | 5,331,528 |
def test_status_no_db_access(create_engine_mock):
"""
Tests that status does not try to access the database
"""
try:
from kolibri.utils import cli
cli.status.callback()
except SystemExit:
pass
create_engine_mock.assert_not_called() | 5,331,529 |
async def test_update_failed(
hass: HomeAssistant,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test data is not destroyed on update failure."""
entry = await init_integration(hass, aioclient_mock)
await async_setup_component(hass, HA_DOMAIN, {})
assert hass.states.get(CLIMATE_ID).state == HVAC_MODE_HEAT
coordinator = hass.data[DOMAIN][entry.entry_id]
with patch("pyatag.AtagOne.update", side_effect=TimeoutError) as updater:
await coordinator.async_refresh()
await hass.async_block_till_done()
updater.assert_called_once()
assert not coordinator.last_update_success
assert coordinator.data.id == UID | 5,331,530 |
def init_logging(verbose=False):
"""Set logging format to something more readable."""
level = logging.DEBUG if verbose else logging.WARNING
logging.basicConfig(level=level, format="[%(levelname)s] %(message)s") | 5,331,531 |
def fine_tune():
"""recreates top model architecture/weights and fine tunes with image augmentation and optimizations"""
# reconstruct vgg16 model
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# load vgg16 weights
f = h5py.File(weights_path)
for k in range(f.attrs['nb_layers']):
if k >= len(model.layers):
break
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
model.layers[k].set_weights(weights)
f.close()
# add the classification layers
top_model = Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))
top_model.load_weights(top_model_weights_path)
# add the model on top of the convolutional base
model.add(top_model)
# set the first 25 layers (up to the last conv block)
# to non-trainable (weights will not be updated)
for layer in model.layers[:25]:
layer.trainable = False
# compile the model with a SGD/momentum optimizer
# and a very slow learning rate.
model.compile(loss='binary_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])
# prepare data augmentation configuration
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_height, img_width),
batch_size=32,
class_mode='binary')
# fine-tune the model
model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
nb_epoch=nb_epoch,
validation_data=validation_generator,
nb_val_samples=nb_validation_samples,
callbacks=[early_stopping])
# save the model
json_string = model.to_json()
with open('final_model_architecture.json', 'w') as f:
f.write(json_string)
model.save_weights('final_weights.h5')
# return the model for convenience when making predictions
return model | 5,331,532 |
def normalize(a, seqlength=None, rv=None):
"""
Normalize the VSA vector
:param a: input VSA vector
:param seqlength: Optional, for BSC vectors must be set to a valid.
:param rv: Optional random vector, used for splitting ties on binary and ternary VSA vectors.
:return: new VSA vector
"""
return a.normalize(a, seqlength, rv) | 5,331,533 |
def load_pickled_coverage(in_fp):
"""
Replace (overwrite) coverage information from the given file handle.
"""
global _t
_t = load(in_fp) | 5,331,534 |
def MACD(df, n_fast, n_slow):
"""Calculate MACD, MACD Signal and MACD difference
:param df: pandas.DataFrame
:param n_fast:
:param n_slow:
:return: pandas.DataFrame
"""
EMAfast = pd.Series(df['Close'].ewm(span=n_fast, min_periods=n_slow).mean())
EMAslow = pd.Series(df['Close'].ewm(span=n_slow, min_periods=n_slow).mean())
MACD = pd.Series(EMAfast - EMAslow, name='MACD_' + str(n_fast) + '_' + str(n_slow))
MACDsign = pd.Series(MACD.ewm(span=9, min_periods=9).mean(), name='MACDsign_' + str(n_fast) + '_' + str(n_slow))
MACDdiff = pd.Series(MACD - MACDsign, name='MACDdiff_' + str(n_fast) + '_' + str(n_slow))
df = df.join(MACD)
df = df.join(MACDsign)
df = df.join(MACDdiff)
return df | 5,331,535 |
def find_x_chain(board: Board):
"""apply the x-chain logic
- an x-chain has an even number of chained cells
- odd links must be strong links, even links may be weak links (but can also be strong links)
- either the starting or the ending cell has the candidate as its candidate. therefore, all cells which are
not part of the chain and which can see the both the starting and ending cell, may <<not>> have the
candidate.
"""
for candidate in range(1, 10):
cells = board.get_cells_by_candidate(candidate=candidate)
for starting_cell in cells:
chain = [starting_cell]
x_chain = extend_x_chain(board=board, chain=chain, candidate=candidate)
if x_chain:
# after finding a chain, we need to
# return as all other chains may be inconsistent after invalidating
# note: actual invalidating will happen as execute() fn of XChain object
board.notify_preview(preview=x_chain)
return | 5,331,536 |
def enable_cors_after_request_hook():
"""This executes after every route. We use it to attach CORS headers when applicable."""
headers = dict(
Origin="*", Methods="GET, POST, PUT, DELETE, OPTIONS",
Headers="Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token, Cache-Control, Last-Event-Id")
for key, value in headers.items():
bottle.response.set_header(f"Access-Control-Allow-{key}", value) | 5,331,537 |
def ecef2map(xyz, spatialRef):
""" transform 3D cartesian Earth Centered Earth fixed coordinates, to
map coordinates (that is 2D) in a projection frame
Parameters
----------
xyz : np.array, size=(m,3), float
np.array with 3D coordinates, in WGS84. In the following form:
[[x, y, z], [x, y, z], ... ]
spatialRef : osgeo.osr.SpatialReference
target projection
Returns
-------
xyz : np.array, size=(m,2), float
np.array with planar coordinates, within a given projection frame
"""
if isinstance(spatialRef, str):
spatialStr = spatialRef
spatialRef = osr.SpatialReference()
spatialRef.ImportFromWkt(spatialStr)
llh = ecef2llh(xyz) # get spherical coordinates and height
xy = ll2map(llh[:, :-1], spatialRef)
return xy | 5,331,538 |
def dis2speed(t, dis):
"""
Return speed in distance travelled per hour.
Args:
t (datetime64[ms]): 1D array with time.
dis (float ): 1D array with distance travelled.
Returns:
float: 1D array with speed data.
"""
# divide by one hour (=3600 x 1000 milliseconds)
speed = np.diff(dis) / (np.float64(np.diff(t))/1000) *3600
speed = np.r_[np.nan, speed]
return speed | 5,331,539 |
def default_context(plugin, context):
"""
Return the default context for plugins rendered with a template, which
simply is a single variable named ``plugin`` containing the plugin
instance.
"""
return {"plugin": plugin} | 5,331,540 |
def compare_times(G_lst: list, n: int = 100):
"""
Takes graph and sampling time n, and compute dijkstra for all nodes to all nodes n times
returns avg time per iter
"""
def compare_times_(func):
print(" Running {} ".format(func.__name__).center(100, "-"))
start_time = time.time()
for _ in tqdm(range(n), total=n):
for i in range(1, g.n):
func(i)
res_time = (time.time() - start_time) / n
if res_time >= 10:
m, s = res_time // 60, res_time % 60
t = "{:0>2}min:{:0>2}s".format(m, s)
else:
t = "{:.0f}ms".format(res_time * 1000)
print("Avg running time for {}: {}".format(func.__name__, t))
g = Graph(G_lst)
for func in [g.dijkstra, g.dijkstra_heap]:
compare_times_(func) | 5,331,541 |
def get_model_from_key(model_name):
"""
Gets the model from a given key.
param:
model_name: name of model
return:
object
"""
_known_models = {}
#populate
for klass in Model.__subclasses__():
_known_models[klass.__name__] = klass
for sub in klass.__subclasses__():
_known_models[sub.__name__] = sub
return _known_models.get(model_name, None) | 5,331,542 |
def main():
"""
The main function which gets called when this module is ran as a script.
This main function will set some example values and then it will run the model on the example.
Subsequently the results are plotted in a figure.
:return:
"""
forward = 0.01100343969
displacement = 0.007 # displacement aka shift
expiry_time = 30.019178082191782
vol_physical_measure_atm = 0.2130896096958366 # physical aka swap-settled
atm_pay_vol = 0.22212766305824355
atm_rec_vol = 0.1994018397861978
cash_annuity_info = CashAnnuityInfo(year_fraction=1.0, number_payments=30)
cash_pvbp = 17.81653454617611
physical_pvbp = 17.994421329395927
market_cash_payer = [
None, None, None, None, None, None, None, 0.22212766305824355, 0.2191166853454672, 0.21625128088540194,
0.21092449139042832, 0.2016940138299131, 0.19406310229906248, 0.18774926787352228, 0.17821763702783086,
0.1717397903524143, 0.16444139694677906,
]
market_cash_receiver = [
0.3779182774820147, 0.2759240484072475, 0.24887266885973108, 0.22840424850890914,
0.21231444837760344, 0.20551432485314422, 0.20237753128037136, 0.1994018397861978,
None, None, None, None, None, None, None, None, None,
]
model = Model(
forward=forward,
displacement=displacement,
expiry_time=expiry_time,
swap_atm_vol=vol_physical_measure_atm,
cash_pay_atm_vol=atm_pay_vol,
cash_receive_atm_vol=atm_rec_vol,
cash_annuity_info=cash_annuity_info,
cash_pvbp=cash_pvbp,
swap_settled_pvbp=physical_pvbp,
)
strikes = get_strikes(forward=forward, displacement=displacement)
plot_vols(strikes, model, market_cash_payer, market_cash_receiver) | 5,331,543 |
def default_fields(
coll_id=None, type_id=None, entity_id=None,
width=12, **kwargs):
"""
Returns a function that accepts a field width and returns a dictionary of entity values
for testing. The goal is to isolate default entity value settings from the test cases.
"""
def_label = kwargs.get("default_label",
default_label(coll_id=coll_id, type_id=type_id, entity_id=entity_id)
)
def_comment = kwargs.get("default_comment",
default_comment(coll_id=coll_id, type_id=type_id, entity_id=entity_id)
)
def_label_esc = def_label.replace("'", "'")
def_comment_esc = def_comment.replace("'", "'")
def_entity_url = collection_entity_view_url(coll_id=coll_id, type_id=type_id, entity_id=entity_id)
collection_url = collection_view_url(coll_id).rstrip("/")
def def_fields(width=12):
fields = layout_classes(width=width)
fields.update(
{ 'coll_id': coll_id
, 'type_id': type_id
, 'entity_id': entity_id
, 'default_label': def_label
, 'default_comment': def_comment
, 'default_label_esc': def_label_esc
, 'default_comment_esc': def_comment_esc
, 'default_entity_url': def_entity_url
, 'collection_url': collection_url
})
if kwargs:
fields.update(kwargs)
return fields
return def_fields | 5,331,544 |
def test(edm, runtime, toolkit, environment):
""" Run the test suite in a given environment with the specified toolkit.
"""
parameters = get_parameters(edm, runtime, toolkit, environment)
environ = environment_vars.get(toolkit, {}).copy()
environ["PYTHONUNBUFFERED"] = "1"
commands = [
(
"{edm} run -e {environment} -- python -W default -m "
"coverage run -p -m unittest discover -v envisage"
),
]
# We run in a tempdir to avoid accidentally picking up wrong envisage
# code from a local dir. We need to ensure a good .coveragerc is in
# that directory, plus coverage has a bug that means a non-local coverage
# file doesn't get populated correctly.
click.echo("Running tests in '{environment}'".format(**parameters))
with do_in_tempdir(files=[".coveragerc"], capture_files=["./.coverage*"]):
os.environ.update(environ)
execute(commands, parameters)
click.echo("Done test") | 5,331,545 |
def test_invalid_logout_not_logged_in(test_client):
"""
GIVEN a Flask application configured for testing
WHEN the '/users/logout' page is requested (GET) when the user is not logged in
THEN check that the user is redirected to the login page
"""
test_client.get('/users/logout', follow_redirects=True) # Double-check that there are no logged in users!
response = test_client.get('/users/logout', follow_redirects=True)
assert response.status_code == 200
assert b'Goodbye!' not in response.data
assert b'Flask Stock Portfolio App' in response.data
assert b'Login' in response.data
assert b'Please log in to access this page.' in response.data | 5,331,546 |
def predict_using_broadcasts(feature1, feature2, feature3, feature4):
"""
Scale the feature values and use the model to predict
:return: 1 if normal, -1 if abnormal 0 if something went wrong
"""
prediction = 0
x_test = [[feature1, feature2, feature3, feature4]]
try:
x_test = SCL.value.transform(x_test)
prediction = CLF.value.predict(x_test)[0]
except ValueError:
import traceback
traceback.print_exc()
print('Cannot predict:', x_test)
return int(prediction) | 5,331,547 |
def make_positions(tensor, padding_idx, left_pad):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1.
Padding symbols are ignored, but it is necessary to specify whether padding
is added on the left side (left_pad=True) or right side (left_pad=False).
"""
max_pos = padding_idx + 1 + tensor.size(1)
device = tensor.get_device()
buf_name = f'range_buf_{device}'
if not hasattr(make_positions, buf_name):
setattr(make_positions, buf_name, tensor.new())
setattr(make_positions, buf_name, getattr(make_positions, buf_name).type_as(tensor))
if getattr(make_positions, buf_name).numel() < max_pos:
torch.arange(padding_idx + 1, max_pos, out=getattr(make_positions, buf_name))
mask = tensor.ne(padding_idx)
positions = getattr(make_positions, buf_name)[:tensor.size(1)].expand_as(tensor)
if left_pad:
positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)
new_tensor = tensor.clone()
return new_tensor.masked_scatter_(mask, positions[mask]).long() | 5,331,548 |
def teq(state, *column_values):
"""Tag-Equals filter. Expects, that a first row contains tags and/or metadata
Tag row is ignored in comparison, but prepended to the result (in order to maintain the first row in the results).
Accepts one or more column-value pairs. Keep only rows where value in the column equals specified value.
Example: teq-column1-1
"""
df = state.get()
tags = df.iloc[:1, :]
df = df.iloc[1:, :]
assert state.type_identifier == "dataframe"
for i in range(0, len(column_values), 2):
c = column_values[i]
v = column_values[i + 1]
state.log_info(f"Equals: {c} == {v}")
index = np.array([x == v for x in df[c]], np.bool)
try:
if int(v) == float(v):
index = index | (df[c] == int(v))
else:
index = index | (df[c] == float(v))
except:
pass
df = df.loc[index, :]
df = tags.append(df, ignore_index=True)
return state.with_data(df) | 5,331,549 |
def set_glb(name: str, value: Union[List[Any], Callable]):
"""
Sets the value of the provided option to the provided value without any
further checks.
:param name: Name of the option.
:param value: Value for the option
"""
global_options[name] = value | 5,331,550 |
def music(amusic=None, load=True, play=True, stop=False, loop=1):
"""For loading and playing music.
::Example::
music('bla.ogg', load=True, play=True)
music(stop=True)
"""
# perhaps the mixer is not included or initialised.
if pygame.mixer and pygame.mixer.get_init():
if load and not stop:
pygame.mixer.music.load(music_path(amusic))
if play and stop is None or stop is False:
pygame.mixer.music.play(loop)
elif stop:
pygame.mixer.music.stop() | 5,331,551 |
def adjust_labels(data_y, dataset, pred_type='actions'):
"""
Transforms original labels into the range [0, nb_labels-1]
:param data_y: numpy integer array
Sensor labels
:param pred_type: string, ['gestures', 'locomotion', 'actions', 'tasks']
Type of activities to be recognized
:return: numpy integer array
Modified sensor labels
"""
data_y[data_y == "null_class"] = 0
if dataset == 'wetlab':
if pred_type == 'tasks': # Labels for tasks are adjusted
data_y[data_y == "1solvent"] = 1
data_y[data_y == "2catalysator"] = 2
data_y[data_y == "3cutting"] = 3
data_y[data_y == "4mixing"] = 4
data_y[data_y == "5catalysator"] = 5
data_y[data_y == "6waterbath"] = 6
data_y[data_y == "7solvent"] = 7
data_y[data_y == "8catalysator"] = 8
data_y[data_y == "9cutting"] = 9
data_y[data_y == "10mixing"] = 10
data_y[data_y == "11catalysator"] = 11
data_y[data_y == "12waterbath"] = 12
data_y[data_y == "13waterbath"] = 13
data_y[data_y == "14catalysator"] = 14
data_y[data_y == "15pestling"] = 15
data_y[data_y == "16filtrate"] = 16
data_y[data_y == "17catalysator"] = 17
data_y[data_y == "18pouring"] = 18
data_y[data_y == "19detect"] = 19
data_y[data_y == "20waterbath"] = 20
data_y[data_y == "21catalysator"] = 21
data_y[data_y == "22pestling"] = 22
data_y[data_y == "23filtrate"] = 23
data_y[data_y == "24catalysator"] = 24
data_y[data_y == "25pouring"] = 25
data_y[data_y == "26detect"] = 26
data_y[data_y == "27end"] = 27
elif pred_type == 'actions': # Labels for actions are adjusted
data_y[data_y == "cutting"] = 1
data_y[data_y == "inverting"] = 2
data_y[data_y == "peeling"] = 3
data_y[data_y == "pestling"] = 4
data_y[data_y == "pipetting"] = 5
data_y[data_y == "pouring"] = 6
data_y[data_y == "pour catalysator"] = 6
data_y[data_y == "stirring"] = 7
data_y[data_y == "transfer"] = 8
elif dataset == 'sbhar':
data_y[data_y == 'walking'] = 1
data_y[data_y == 'walking_upstairs'] = 2
data_y[data_y == 'walking_downstairs'] = 3
data_y[data_y == 'sitting'] = 4
data_y[data_y == 'standing'] = 5
data_y[data_y == 'lying'] = 6
data_y[data_y == 'stand-to-sit'] = 7
data_y[data_y == 'sit-to-stand'] = 8
data_y[data_y == 'sit-to-lie'] = 9
data_y[data_y == 'lie-to-sit'] = 10
data_y[data_y == 'stand-to-lie'] = 11
data_y[data_y == 'lie-to-stand'] = 12
elif dataset == 'rwhar' or dataset == 'rwhar_3sbjs':
data_y[data_y == 'climbing_down'] = 0
data_y[data_y == 'climbing_up'] = 1
data_y[data_y == 'jumping'] = 2
data_y[data_y == 'lying'] = 3
data_y[data_y == 'running'] = 4
data_y[data_y == 'sitting'] = 5
data_y[data_y == 'standing'] = 6
data_y[data_y == 'walking'] = 7
elif dataset == 'hhar':
data_y[data_y == 'bike'] = 1
data_y[data_y == 'sit'] = 2
data_y[data_y == 'stand'] = 3
data_y[data_y == 'walk'] = 4
data_y[data_y == 'stairsup'] = 5
data_y[data_y == 'stairsdown'] = 6
elif dataset == 'opportunity' or 'opportunity_ordonez':
if pred_type == 'locomotion':
data_y[data_y == "stand"] = 1
data_y[data_y == "walk"] = 2
data_y[data_y == "sit"] = 3
data_y[data_y == "lie"] = 4
elif pred_type == 'gestures':
data_y[data_y == 'open_door_1'] = 1
data_y[data_y == 'open_door_2'] = 2
data_y[data_y == 'close_door_1'] = 3
data_y[data_y == 'close_door_2'] = 4
data_y[data_y == 'open_fridge'] = 5
data_y[data_y == 'close_fridge'] = 6
data_y[data_y == 'open_dishwasher'] = 7
data_y[data_y == 'close_dishwasher'] = 8
data_y[data_y == 'open_drawer_1'] = 9
data_y[data_y == 'close_drawer_1'] = 10
data_y[data_y == 'open_drawer_2'] = 11
data_y[data_y == 'close_drawer_2'] = 12
data_y[data_y == 'open_drawer_3'] = 13
data_y[data_y == 'close_drawer_3'] = 14
data_y[data_y == 'clean_table'] = 15
data_y[data_y == 'drink_from_cup'] = 16
data_y[data_y == 'toggle_switch'] = 17
return data_y | 5,331,552 |
def test_CollaborationRecords_read(collab_env):
""" Tests if single reading of collab records is self-consistent and
hierarchy-enforcing.
# C1: Check that specified record exists (inherited from create())
# C2: Check that specified record was dynamically created
# C3: Check that specified record have a composite key
# C4: Check that specified record was archived with correct substituent keys
# C5: Check that specified record was archived with correct substituent IDs
# C6: Check that specified record captured the correct specified details
# C7: Check hierarchy-enforcing field "relations" exist
# C8: Check that all downstream relations have been captured
"""
(
collab_records, collab_details, _,
(collab_id, _, _, _, _),
_,
) = collab_env
retrieved_collab = collab_records.read(collab_id=collab_id)
# C1
assert retrieved_collab is not None
# C2 - C5
check_key_equivalence(
record=retrieved_collab,
ids=[collab_id],
r_type="collaboration"
)
# C6
check_detail_equivalence(
record=retrieved_collab,
details=collab_details
)
# C7 - C8
check_relation_equivalence(
record=retrieved_collab,
r_type="collaboration"
) | 5,331,553 |
def is_str_or_bytes(x):
""" True if x is str or bytes.
This doesn't use rpartial to avoid infinite recursion.
"""
return isinstance(x, (str, bytes, bytearray)) | 5,331,554 |
def test_task_9(base_settings):
"""No. 9 tests collection for Task.
Test File: task-example2.json
"""
filename = base_settings["unittest_data_dir"] / "task-example2.json"
inst = task.Task.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Task" == inst.resource_type
impl_task_9(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Task" == data["resourceType"]
inst2 = task.Task(**data)
impl_task_9(inst2) | 5,331,555 |
def _type_convert(new_type, obj):
"""
Convert type of `obj` to `force`.
"""
return new_type(obj) | 5,331,556 |
def create_tokenizer(corpus_file, vocab_size):
"""Create a tokenizer from a corpus file
Args:
corpus_file (Pathlib path): File containng corpus i.e. all unique words for
vocab_size (int): Vocabulary size of the tokenizer
Returns:
hugging_face tokenizer: Byte pair tokenizer used to tokenize text
"""
tokenizer = Tokenizer(BPE())
trainer = BpeTrainer(
special_tokens=["<pad>", "<s>", "</s>", "<unk>"], vocab_size=vocab_size
)
tokenizer.pre_tokenizer = Whitespace()
files = [str(corpus_file)]
tokenizer.train(trainer, files)
tokenizer.post_processor = TemplateProcessing(
single="<s> $A </s>",
special_tokens=[
("<s>", tokenizer.token_to_id("<s>")),
("</s>", tokenizer.token_to_id("</s>")),
],
)
tokenizer.enable_padding(
pad_token="<pad>",
pad_id=tokenizer.token_to_id("<pad>"),
)
return tokenizer | 5,331,557 |
def test_dim_name_creation():
"""Asserts that creation of unique dimension names works"""
create_names = ParamSpace._unique_dim_names
def check_names(*name_check_pdim):
"""Create the necessary input data for the create_names function, then
perform it and assert equality to the expected values ...
"""
kv_pairs = [
(path, ParamDim(default=0, values=[1, 2], name=pd_name))
if pdim is not None
else (path, ParamDim(default=0, values=[1, 2]))
for path, _, pd_name in name_check_pdim
]
expected_names = [name_out for _, name_out, _ in name_check_pdim]
actual_names = [k for k, _ in create_names(kv_pairs)]
assert expected_names == actual_names
# Start with the tests
# Arguments for check_names: (input path, expected name, custom name)
# Some basics
check_names(
(("foo", "bar"), "bar", None),
(("foo", "baz"), "foo.baz", None),
(("abc", "def"), "spam", "spam"),
(("bar", "baz"), "bar.baz", None),
)
# Repeating pattern -> resolved up unto root
check_names(
(("p0",), ".p0", None),
(("d", "p0"), ".d.p0", None),
(("d", "d", "p0"), "d.d.p0", None),
(("d", "d", "d", "p0"), "p1", "p1"),
)
# Custom names
check_names(
(("p0",), "p0", "p0"),
(("d", "p0"), "p1", "p1"),
(("d", "d", "p0"), "p2", "p2"),
(("d", "d", "d", "p0"), "p3", "p3"),
)
# Single non-custom name at root level
check_names(
(("p0",), "p0", "p0"),
(("d", "p0"), "p1", "p1"),
(("d", "d", "p0"), "p2", "p2"),
(("v0",), "v0", None),
)
# Custom names have priority over existing paths
check_names(
(("p0",), ".p0", None),
(("d", "p0"), "p0", "p0"),
(("d", "d", "p0"), ".d.d.p0", None),
(("d", "d", "d", "p0"), "d.d.d.p0", None),
)
# Can have integer elements in there
check_names(
(("foo", "bar", 0), "bar.0", None),
(("foo", "baz", 1), "baz.1", None),
(("abc", "def", 23, "foo"), "def.23.foo", None),
((12, "bar", "baz", 0, "foo"), ".12.bar.baz.0.foo", None),
)
check_names(
(("d", "d", "s", 1), "d.d.s.1", None),
(("d", "d", "s", 2), "d.s.2", None),
(("d", "d", "s", 3), "d.s.3", None),
(("d", "s", 0), ".d.s.0", None),
(("d", "s", 1), ".d.s.1", None),
(("s", 0), ".s.0", None),
(("s", 1), ".s.1", None),
(("s", 3, 0), "s.3.0", None),
(("s", 2, 0), "s.2.0", None),
(("t", 1, 0), "t.1.0", None),
(("t", 1, 1), "t.1.1", None),
)
# Can also be other numerical values (although not a super idea, typically)
check_names(
(("foo", "bar", -2), "bar.-2", None),
(("foo", "baz", 1.5), "baz.1.5", None),
(("abc", "def", 23.45, "foo"), "def.23.45.foo", None),
((12.34, "bar", "baz", -0.1, "foo"), ".12.34.bar.baz.-0.1.foo", None),
)
# Paths cannot include '.', require a custom name
with pytest.raises(ValueError, match="Please select a custom name for"):
check_names(
(("foo.bar", "baz"), "baz", None),
(("foo", "bar.baz"), "bar.baz", None),
)
# Path separator in custom names is not allowed
with pytest.raises(ValueError, match="cannot contain the hierarchy-sep"):
check_names(
(("p0",), ".p0", None),
(("d", "p0"), ".d.p0", None),
(("d", "d", "p0"), "d.d.p0", None),
(("d", "d", "d", "p0"), "d.d.d.p0", "d.p0"),
)
# Custom names need be strings
with pytest.raises(TypeError, match="need to be strings"):
check_names(
(("p0",), "p0", 1.23),
)
# Colliding custom names -> ValueError
with pytest.raises(ValueError, match="There were duplicates among"):
check_names(
(("p0",), ".p0", None),
(("d", "p0"), ".d.p0", None),
(("d", "d", "p0"), "d.d.p0", "p1"),
(("d", "d", "d", "p0"), "d.d.d.p0", "p1"),
)
# Pathological case where no unique names can be found; should abort
with pytest.raises(ValueError, match="Could not automatically find"):
check_names((("d", "p0"), ".d.p0", None), (("d", "p0"), ".d.p0", None)) | 5,331,558 |
def test_fenced_code_blocks_090():
"""
Test case 090: Simple example with tildes
"""
# Arrange
source_markdown = """~~~
<
>
~~~"""
expected_tokens = [
"[fcode-block(1,1):~:3::::::]",
"[text(2,1):\a<\a<\a\n \a>\a>\a:]",
"[end-fcode-block::3:False]",
]
expected_gfm = """<pre><code><
>
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens) | 5,331,559 |
def mock_retrocookie(monkeypatch: MonkeyPatch) -> None:
"""Replace retrocookie function by noop."""
monkeypatch.setattr(__main__, "retrocookie", lambda *args, **kwargs: None) | 5,331,560 |
def copy_files(file_list, out_dir, fix_ext=(('.pyc', '.py'),)):
"""
TODO:
:param file_list:
:param out_dir:
:param fix_ext:
:return:
"""
rep_list = replaceext(file_list=file_list, ext=fix_ext)
if not rep_list or not all([osp.exists(x) for x in rep_list]):
raise IOError(rep_list)
if not out_dir or not osp.exists(out_dir) or not osp.isdir(out_dir):
raise IOError(out_dir)
for x in rep_list:
copyfile(x, osp.join(out_dir, osp.split(x)[1])) | 5,331,561 |
def get_readings(tag):
"""Get sensor readings and collate them in a dictionary."""
try:
enable_sensors(tag)
readings = {}
# IR sensor
readings["ir_temp"], readings["ir"] = tag.IRtemperature.read()
# humidity sensor
readings["humidity_temp"], readings["humidity"] = tag.humidity.read()
# barometer
readings["baro_temp"], readings["pressure"] = tag.barometer.read()
# luxmeter
readings["light"] = tag.lightmeter.read()
# battery
readings["battery"] = tag.battery.read()
disable_sensors(tag)
# round to 2 decimal places for all readings
readings = {key: round(value, 2) for key, value in readings.items()}
return readings
except BTLEException as error:
print("Unable to take sensor readings. {}".format(error))
return {} | 5,331,562 |
def test_find_statement(test_df: pd.DataFrame) -> None:
"""Find a correctly aligned statement amidst wronly aligned statements."""
start, end = labeler.find_statement(
test_df,
"this speaker says something but alignment is correct".split(),
"fi",
size=40,
step=30,
)
assert start == 309
assert end == 319 | 5,331,563 |
def digita_gw(request):
"""
Digita GW endpoint implementation
"""
identifier = request.data['DevEUI_uplink']['DevEUI']
apsen = core.models.apartment_sensor_models.ApartmentSensor.objects.get_or_create(identifier=identifier)[0]
payload = binascii.unhexlify(request.data['DevEUI_uplink']['payload_hex'])
decoded_payload = decode_elsys_payload(payload)
mapping = settings.DIGITA_GW_PAYLOAD_TO_ATTRIBUTES # type: dict
new_values = []
for key, value in decoded_payload.items():
uri = mapping.get(key, '')
if uri:
attr = core.models.sensor_models.SensorAttribute.objects.get_or_create(uri=uri, defaults={'description': key})[0]
else:
attr = core.models.sensor_models.SensorAttribute.objects.get_or_create(description=key)[0]
apsen_attr = apsen.attributes.get_or_create(attribute=attr)[0]
new_values.append(apsen_attr.values.create(value=value))
models.Subscription.handle_new_values(new_values)
return Response({"message": "Updated successfully"}) | 5,331,564 |
def resize_img(_img, maxdims=(1000, 700)):
"""
Resize a given image. Image can be either a Pillow Image, or a NumPy array. Resizing is done automatically such
that the entire image fits inside the given maxdims box, keeping aspect ratio intact
:param _img:
:param maxdims:
:return:
"""
try:
# If NumPy array, create Pillow Image
img = Image.fromarray(_img)
except TypeError:
# Else image must already be a Pillow Image
img = _img
ratio = max(img.size[1] / maxdims[0], img.size[0] / maxdims[1])
image = img.resize((int(img.size[0] / ratio), int(img.size[1] / ratio)), Image.ANTIALIAS)
return image | 5,331,565 |
def natural_key(s):
"""Converts string ``s`` into a tuple that will sort "naturally"
(i.e., ``name5`` will come before ``name10`` and ``1`` will come
before ``A``). This function is designed to be used as the ``key``
argument to sorting functions.
:param s: the str/unicode string to convert.
:rtype: tuple
"""
# Use _nkre to split the input string into a sequence of
# digit runs and non-digit runs. Then use _nkconv() to convert
# the digit runs into ints and the non-digit runs to lowercase.
return tuple(_nkconv(m) for m in _nkre.findall(s)) | 5,331,566 |
def query_props(path):
"""
Extracts a QueryProps object from a file name.
:param path: Path to a query file
:return: QueryProps of the file
"""
basename = os.path.basename(path)
match = re.match(r'''
(?P<topic>[^-]+?)
(\s*-\s*)
(?P<contributor>[A-Z]+)
(\s*-\s*)?
(?P<query_string>[^-]+)?
(\.(?P<extension>[a-z]+))
''', basename, re.X)
if not match:
raise ValueError(
'"{}" does not follow the file name convention.'.format(basename)
)
return QueryProps(path, **match.groupdict()) | 5,331,567 |
def SetDocTimestampFrequency(doc:NexDoc, freq:float):
"""Sets the document timestamp frequency"""
return NexRun("SetDocTimestampFrequency", locals()) | 5,331,568 |
def test_gibbs_energy_data_are_parsed():
"""Test that Gibbs energy data can be parsed"""
result = convert_pop_data(POP_GIBBS_ENERGY)
assert match_sets(result, POP_GIBBS_ENERGY_RESULTS) | 5,331,569 |
def rsqrt(x:np.ndarray):
"""Computes reciprocal of square root of x element-wise.
Args:
x: input tensor
Returns:
output tensor
Examples:
>>> x = np.array([2., 0., -2.])
>>> rsqrt(x)
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0.707, inf, nan], dtype=float32)>
"""
return 1/np.sqrt(x) | 5,331,570 |
def test_split_pane_session(capsys):
"""
Test a session with a single window, with a split pane
"""
window_data = [
{
"identity": "window",
"number": 0,
"title": "window",
"postfix": "~-",
"dir": DEFAULT_DIRECTORY,
"layout": "c6e0,200x60,0,0{100x60,0,0,0,100x60,101,0,1}",
"panes": [
{"identity": "pane", "number": 0, "command": "top"},
{
"identity": "pane2",
"number": 1,
"dir": "/var/log",
"command": "tail -f syslog",
"parent": "pane",
},
],
}
]
window_list = set_session_parameters(
DEFAULT_SESSION, DEFAULT_DIRECTORY, window_data,
)
scripter = TmuxScripter(DEFAULT_SESSION, DEFAULT_DIRECTORY).set_terminal_size(
TERMINAL_WIDTH, TERMINAL_HEIGHT
)
with capsys.disabled():
scripter.analyze(window_list)
expected = convert_lines_to_object(
GENERIC_START
+ [
'send-keys "top" "C-m" \\; \\',
'split-window -h -t session:0.0 -c "/var/log" \\; \\',
'send-keys "tail -f syslog" "C-m" \\; \\',
"resize-pane -t session:0.0 -x 100 -y 60 \\; \\",
"resize-pane -t session:0.1 -x 100 -y 60",
"popd",
]
)
actual = convert_lines_to_object(scripter.commands.split("\n"))
assert_objects_equal(expected, actual, expected.keys(), capsys) | 5,331,571 |
def test_get_payoff_settings():
"""Test the setup of payoff distributions."""
payoff_settings = get_payoff_settings(0.1)
assert payoff_settings.ndim == 2
assert payoff_settings.shape[-1] == 8
assert payoff_settings.shape[0] >= 1
for probability in payoff_settings[0, [2, 3, 6, 7]]:
assert probability in np.round(np.arange(0.1, 1, 0.1), 1)
for magnitude in payoff_settings[0, [0, 1, 4, 5]]:
assert magnitude in range(1, 10) | 5,331,572 |
def is_card(obj):
"""Return true if the object is a card."""
return obj in CARDS_SET | 5,331,573 |
def plot_lines(
y: tuple,
x: np.ndarray = None,
points: bool = True,
x_axis_label: str = 'Index',
y_axis_label: str = 'Value',
plot_width: int = 1000,
plot_height: int = 500,
color: tuple = None,
legend: tuple = None,
title: str = 'Graph lines',
show_graph: bool = True
) -> figure:
"""
Plot lines from y tuple. Number of lines equal len(y)
with plot params
"""
if x is None:
x = np.arange(len(y[0]))
if legend is None:
legend = [f'Line {i}' for i in range(len(y))]
if color is None:
color = COLORS
fig = figure(title=title, x_axis_label=x_axis_label,
y_axis_label=y_axis_label, plot_width=plot_width,
plot_height=plot_height)
for i in range(len(y)):
fig.line(
y=y[i],
x=x,
color=color[i],
legend=legend[i]
)
if points is not None:
fig.circle(
y=y[i],
x=x,
fill_color=color[i]
)
if show_graph:
show(fig)
return fig | 5,331,574 |
def obtain_csrf(session):
"""
Obtain the CSRF token from the login page.
"""
resp = session.get(FLOW_LOGIN_GET_URL)
contents = str(resp.content)
match = re.search(r'csrfToken" value="([a-z0-9\-]+)"', contents)
return match.group(1) | 5,331,575 |
def get_distance_curve(
kernel,
lambda_values,
N,
M=None,
):
""" Given number of elements per class, full kernel (with first N rows corr.
to mixture and the last M rows corr. to component, and set of lambda values
compute $\hat d(\lambda)$ for those values of lambda"""
d_lambda = []
if M == None:
M = kernel.shape[0] - N
prev_soln = None
for lambda_value in lambda_values:
u_lambda = lambda_value / N * np.concatenate((np.ones((N, 1)),
np.zeros((M, 1)))) + (1 - lambda_value) / M \
* np.concatenate((np.zeros((N, 1)), np.ones((M, 1))))
(solution, distance_sqd) = \
find_nearest_valid_distribution(u_lambda, kernel, initial=prev_soln)
prev_soln = solution
d_lambda.append(sqrt(distance_sqd))
d_lambda = np.array(d_lambda)
return d_lambda | 5,331,576 |
def _print_activations(module, activation_input, activation_output):
"""A forward hook to be called whenever a module finishes computing an
output. Apply to a module using
module.register_forward_hook(_print_activations)
Prints the size, mean and standard deviation of the output activations."""
print("Activations:", _get_stats(activation_output)) | 5,331,577 |
def test_Image_dt_source_constant(dt_source, xy, expected, tol=0.001):
"""Test getting constant dT values for a single date at a real point"""
m = default_image_obj(dt_source=dt_source)
output = utils.point_image_value(ee.Image(m.dt), xy)
assert abs(output['dt'] - expected) <= tol | 5,331,578 |
def wait_for_interrupts(threaded=False, epoll_timeout=1):
"""
This is the main blocking loop which, while active, will listen for interrupts and start your custom callbacks.
At some point in your script you need to start this to receive interrupt callbacks.
This blocking method is perfectly suited as “the endless loop that keeps your script running”.
:param threaded: With the argument threaded=True, this method starts in the background while your script continues in the main thread (RPIO will automatically shut down the thread when your script exits)
:param epoll_timeout:
:return:
"""
pass | 5,331,579 |
def _process_labels(labels, label_smoothing):
"""Pre-process a binary label tensor, maybe applying smoothing.
Parameters
----------
labels : tensor-like
Tensor of 0's and 1's.
label_smoothing : float or None
Float in [0, 1]. When 0, no smoothing occurs. When positive, the binary
ground truth labels `y_true` are squeezed toward 0.5, with larger values
of `label_smoothing` leading to label values closer to 0.5.
Returns
-------
torch.Tensor
The processed labels.
"""
assert label_smoothing is not None
labels = (1 - label_smoothing) * labels + label_smoothing * 0.5
return labels | 5,331,580 |
def guess_digit(image, avgs):
"""Return the digit whose average darkness in the training data is
closest to the darkness of ``image``. Note that ``avgs`` is
assumed to be a defaultdict whose keys are 0...9, and whose values
are the corresponding average darknesses across the training data."""
darkness = sum(image)
distances = {k: abs(v-darkness) for k, v in avgs.iteritems()}
return min(distances, key=distances.get) | 5,331,581 |
def multiple_writes(self,
Y_splits,
Z_splits,
X_splits,
out_dir,
mem,
filename_prefix="bigbrain",
extension="nii",
nThreads=1,
benchmark=False):
"""
Split the input image into several splits,
all share with the same shape
For now only support .nii extension
:param Y_splits: How many splits in Y-axis
:param Z_splits: How many splits in Z-axis
:param X_splits: How many splits in X-axis
:param out_dir: Output Splits dir
:param mem: memory load each round
:param filename_prefix: each split's prefix filename
:param extension: extension of each split
:param nThreads: number of threads to trigger in each writing process
:param benchmark: If set to true the function will return
a dictionary containing benchmark information.
:return:
"""
def threaded_multiple():
'''# Using multi-threading to send data to hdfs in parallel,
# which will parallelize writing process.
# nThreads: number of threads that are working on writing
# data at the same time.
print("start {} threads to write data...".format(nThreads))
# separate all the splits' metadata to several pieces,
# each piece contains #nThreads splits' metadata.
caches = _split_arr(one_round_split_metadata.items(), nThreads)
st1 = time()
for thread_round in caches:
tds = []
# one split's metadata triggers one thread
for i in thread_round:
ix = i[1]
data = data_in_range[ix[0]: ix[1],
ix[2]: ix[3],
ix[4]: ix[5]]
td = threading.Thread(target=write_array_to_file,
args=(data, i[0], 0, benchmark))
td.start()
tds.append(td)
del data
for t in tds:
t.join()'''
pass
def compute_sizes(Y_splits, Z_splits, X_splits):
''' A function.
'''
# calculate remainder based on the original image file
Y_size, Z_size, X_size = self.header.get_data_shape()
bytes_per_voxel = self.header['bitpix'] / 8
if (X_size % X_splits != 0
or Z_size % Z_splits != 0
or Y_size % Y_splits != 0):
raise Exception("There is remainder after splitting, \
please reset the y,z,x splits")
x_size = X_size / X_splits
z_size = Z_size / Z_splits
y_size = Y_size / Y_splits
return ((x_size, z_size, y_size),
(X_size, Z_size, Y_size),
bytes_per_voxel)
def file_manipulation_multiple(sizes, Sizes, filename_prefix):
''' A function.
'''
x_size, z_size, y_size = sizes
X_size, Z_size, Y_size = Sizes
# get all split_names and write them to the legend file
split_names = generate_splits_name(y_size, z_size, x_size, Y_size,
Z_size, X_size, out_dir,
filename_prefix,
extension)
generate_legend_file(split_names, "legend.txt", out_dir)
# generate all the headers for each split
# in order to reduce overhead when reading headers of splits
# from hdfs, create a header cache in the local environment
print("create split meta data dictionary...")
split_meta_cache = generate_headers_of_splits(split_names,
y_size,
z_size,
x_size,
self.header
.get_data_dtype())
print("Get split indexes...")
split_indexes = get_indexes_of_all_splits(split_names,
split_meta_cache,
Y_size, Z_size)
return split_indexes, split_names, split_meta_cache
def get_metadata_multiple(split_indexes,
split_names,
split_meta_cache,
from_x_index):
''' A function.
'''
# create split metadata for all splits(position, write_range, etc.)
one_round_split_metadata = {}
for split_name in split_names:
if check_in_range(next_read_index, split_indexes[split_name]):
split = split_meta_cache[split_name]
(X_index_min, X_index_max,
x_index_min, x_index_max) = \
extract_slices_range(split,
next_read_index, Y_size,
Z_size)
y_index_min = int(split.split_pos[-3])
z_index_min = int(split.split_pos[-2])
y_index_max = y_index_min + split.split_y
z_index_max = z_index_min + split.split_z
one_round_split_metadata[split_name] = \
(y_index_min, y_index_max, z_index_min, z_index_max,
X_index_min - from_x_index,
X_index_max - from_x_index + 1)
return one_round_split_metadata
def loop_multiple(next_read_index,
bytes_per_voxel,
Sizes,
split_indexes,
split_names,
split_meta_cache,
split_read_time,
split_write_time,
split_seek_time,
split_seek_number,
benchmark):
''' A function.
'''
split_read_time = 0
split_nb_seeks = 0
X_size, Z_size, Y_size = Sizes
original_img_voxels = X_size * Y_size * Z_size
next_read_offsets = (next_read_index[0] * bytes_per_voxel,
next_read_index[1] * bytes_per_voxel + 1)
print("From {} to {}".format(next_read_offsets[0],
next_read_offsets[1]))
from_x_index = index_to_voxel(next_read_index[0],
Y_size, Z_size)[2]
to_x_index = index_to_voxel(next_read_index[1] + 1,
Y_size, Z_size)[2]
# read
print("Start reading data to memory...")
if benchmark:
t = time()
data_in_range = \
self.proxy.dataobj[..., int(from_x_index): int(to_x_index)]
if benchmark:
read_time = time() -t
print('read time ', read_time)
split_read_time += read_time
split_nb_seeks += 1
one_round_split_metadata = get_metadata_multiple(split_indexes,
split_names,
split_meta_cache,
from_x_index)
caches = _split_arr(one_round_split_metadata.items(), nThreads)
threaded_multiple()
for round in caches:
for i in round:
ix = i[1]
ix = list(map(lambda x: int(x), ix))
data = data_in_range[ix[0]:ix[1], ix[2]:ix[3], ix[4]:ix[5]]
if benchmark:
seek_time, write_time, seek_number = \
write_array_to_file(data, i[0], 0, benchmark)
split_write_time += write_time
split_seek_time += seek_time
split_nb_seeks += seek_number
print("writing data takes ", write_time)
else:
write_array_to_file(data, i[0], 0, benchmark)
next_read_index = (next_read_index[1] + 1,
next_read_index[1] + voxels)
# last write, write no more than image size
if next_read_index[1] >= original_img_voxels:
next_read_index = (next_read_index[0], original_img_voxels - 1)
del caches
del one_round_split_metadata
del data_in_range
if benchmark:
return (next_read_index,
split_read_time,
split_write_time,
split_seek_time,
split_seek_number)
else:
return next_read_index
# begin algorithm
split_read_time = 0
split_seek_time = 0
split_write_time = 0
split_seek_number = 0
# preparation
sizes, Sizes, bytes_per_voxel = compute_sizes(Y_splits,
Z_splits,
X_splits)
X_size, Z_size, Y_size = Sizes
original_img_voxels = X_size * Y_size * Z_size
(split_indexes,
split_names,
split_meta_cache) = \
file_manipulation_multiple(sizes,
Sizes,
filename_prefix)
# drop the remainder which is less than one slice
# if mem is less than one slice, then set mem to one slice
mem = mem - mem % (Y_size * Z_size * bytes_per_voxel) \
if mem >= Y_size * Z_size * bytes_per_voxel \
else Y_size * Z_size * bytes_per_voxel
voxels = mem // bytes_per_voxel # get how many voxels per round
next_read_index = (0, voxels - 1)
while True:
if benchmark:
(next_read_index,
split_read_time,
split_write_time,
split_seek_time,
split_seek_number) = (loop_multiple(next_read_index,
bytes_per_voxel,
Sizes,
split_indexes,
split_names,
split_meta_cache,
split_read_time,
split_write_time,
split_seek_time,
split_seek_number,
benchmark))
else:
next_read_index = loop_multiple(next_read_index,
bytes_per_voxel,
Sizes,
split_indexes,
split_names,
split_meta_cache,
split_read_time,
split_write_time,
split_seek_time,
split_seek_number,
benchmark)
# if write range is larger than img size, we are done
if next_read_index[0] >= original_img_voxels:
break
if benchmark:
return {'split_read_time': split_read_time,
'split_write_time': split_write_time,
'split_seek_time': split_seek_time,
'split_nb_seeks': split_seek_number}
else:
return | 5,331,582 |
def new_parameter_value(data, parameter_key: str):
"""Return the new parameter value and if necessary, remove any obsolete multiple choice values."""
new_value = dict(bottle.request.json)[parameter_key]
source_parameter = data.datamodel["sources"][data.source["type"]]["parameters"][parameter_key]
if source_parameter["type"] == "multiple_choice":
new_value = [value for value in new_value if value in source_parameter["values"]]
return new_value | 5,331,583 |
def rotate_to_base_frame(
pybullet_client: bullet_client.BulletClient,
urdf_id: int,
vector: Sequence[float],
init_orientation_inv_quat: Optional[Sequence[float]] = (0, 0, 0, 1)
) -> np.ndarray:
"""Rotates the input vector to the base coordinate systems.
Note: This is different from world frame to base frame transformation, as we
do not apply any translation here.
Args:
pybullet_client: The bullet client.
urdf_id: The unique id returned after loading URDF.
vector: Input vector in the world frame.
init_orientation_inv_quat:
Returns:
A rotated vector in the base frame.
"""
_, base_orientation_quat = (
pybullet_client.getBasePositionAndOrientation(urdf_id))
_, base_orientation_quat_from_init = pybullet_client.multiplyTransforms(
positionA=(0, 0, 0),
orientationA=init_orientation_inv_quat,
positionB=(0, 0, 0),
orientationB=base_orientation_quat)
_, inverse_base_orientation = pybullet_client.invertTransform(
[0, 0, 0], base_orientation_quat_from_init)
# PyBullet transforms requires simple list/tuple or it may crash.
if isinstance(vector, np.ndarray):
vector_list = vector.tolist()
else:
vector_list = vector
local_vector, _ = pybullet_client.multiplyTransforms(
positionA=(0, 0, 0),
orientationA=inverse_base_orientation,
positionB=vector_list,
orientationB=(0, 0, 0, 1),
)
return np.array(local_vector) | 5,331,584 |
def show_page_map(label):
"""Renders the base page map code."""
return render('page_map.html', {
'map_label': label.replace('_', ' '),
}) | 5,331,585 |
def create_clf_unicycle_position_controller(linear_velocity_gain=0.8, angular_velocity_gain=3):
"""Creates a unicycle model pose controller. Drives the unicycle model to a given position
and orientation. (($u: \mathbf{R}^{3 \times N} \times \mathbf{R}^{2 \times N} \to \mathbf{R}^{2 \times N}$)
linear_velocity_gain - the gain impacting the produced unicycle linear velocity
angular_velocity_gain - the gain impacting the produced unicycle angular velocity
-> function
"""
#Check user input types
assert isinstance(linear_velocity_gain, (int, float)), "In the function create_clf_unicycle_position_controller, the linear velocity gain (linear_velocity_gain) must be an integer or float. Recieved type %r." % type(linear_velocity_gain).__name__
assert isinstance(angular_velocity_gain, (int, float)), "In the function create_clf_unicycle_position_controller, the angular velocity gain (angular_velocity_gain) must be an integer or float. Recieved type %r." % type(angular_velocity_gain).__name__
#Check user input ranges/sizes
assert linear_velocity_gain >= 0, "In the function create_clf_unicycle_position_controller, the linear velocity gain (linear_velocity_gain) must be greater than or equal to zero. Recieved %r." % linear_velocity_gain
assert angular_velocity_gain >= 0, "In the function create_clf_unicycle_position_controller, the angular velocity gain (angular_velocity_gain) must be greater than or equal to zero. Recieved %r." % angular_velocity_gain
def position_uni_clf_controller(states, positions):
""" A position controller for unicycle models. This utilized a control lyapunov function
(CLF) to drive a unicycle system to a desired position. This function operates on unicycle
states and desired positions to return a unicycle velocity command vector.
states: 3xN numpy array (of unicycle states, [x;y;theta])
poses: 3xN numpy array (of desired positons, [x_goal;y_goal])
-> 2xN numpy array (of unicycle control inputs)
"""
#Check user input types
assert isinstance(states, np.ndarray), "In the function created by the create_clf_unicycle_position_controller function, the single-integrator robot states (xi) must be a numpy array. Recieved type %r." % type(states).__name__
assert isinstance(positions, np.ndarray), "In the function created by the create_clf_unicycle_position_controller function, the robot goal points (positions) must be a numpy array. Recieved type %r." % type(positions).__name__
#Check user input ranges/sizes
assert states.shape[0] == 3, "In the function created by the create_clf_unicycle_position_controller function, the dimension of the unicycle robot states (states) must be 3 ([x;y;theta]). Recieved dimension %r." % states.shape[0]
assert positions.shape[0] == 2, "In the function created by the create_clf_unicycle_position_controller function, the dimension of the robot goal positions (positions) must be 2 ([x_goal;y_goal]). Recieved dimension %r." % positions.shape[0]
assert states.shape[1] == positions.shape[1], "In the function created by the create_clf_unicycle_position_controller function, the number of unicycle robot states (states) must be equal to the number of robot goal positions (positions). Recieved a current robot pose input array (states) of size %r states %r and desired position array (positions) of size %r states %r." % (states.shape[0], states.shape[1], positions.shape[0], positions.shape[1])
_,N = np.shape(states)
dxu = np.zeros((2, N))
pos_error = positions - states[:2][:]
rot_error = np.arctan2(pos_error[1][:],pos_error[0][:])
dist = np.linalg.norm(pos_error, axis=0)
dxu[0][:]=linear_velocity_gain*dist*np.cos(rot_error-states[2][:])
dxu[1][:]=angular_velocity_gain*dist*np.sin(rot_error-states[2][:])
return dxu
return position_uni_clf_controller | 5,331,586 |
def cluster_pipeline(gff3_file, strand, verbose):
"""
here clusters of sequences from the same locus are prepared
"""
cat = CAT % gff3_file
btsort1 = BEDTOOLS_SORT
if strand:
btmerge1 = BEDTOOLS_MERGE_ST
sys.stdout.write("###CLUSTERING IN\033[32m STRANDED MODE\033[0m###\n")
else:
btmerge1 = BEDTOOLS_MERGE
sys.stdout.write("###CLUSTERING IN\033[32m NON-STRANDED MODE\033[0m ###\n")
btsort2 = BEDTOOLS_SORT
# Sort the GFF3 file
cat_call = subprocess.Popen(cat, stdout=subprocess.PIPE, shell=True)
if verbose:
sys.stderr.write('Executing: %s\n\n' % cat)
btsort1_call = subprocess.Popen(btsort1, stdin=cat_call.stdout, stdout=subprocess.PIPE, shell=True)
# Merge the BED entries, count number of reads on each merged entry
if verbose:
sys.stderr.write('Executing: %s\n\n' % btsort1)
btmerge1_call = subprocess.Popen(btmerge1, stdin=btsort1_call.stdout, stdout=subprocess.PIPE, shell=True)
# NSort it again and returns
if verbose:
sys.stderr.write('Executing: %s\n\n' % btmerge1)
btsort2_call = subprocess.Popen(btsort2, stdin=btmerge1_call.stdout, stdout=subprocess.PIPE, shell=True)
if verbose:
sys.stderr.write('Executing: %s\n\n' % btsort2)
outputBT = btsort2_call.communicate()[0]
final_output = outputBT.splitlines()
return final_output | 5,331,587 |
def stft_reassign_from_sig(sig_wf: np.ndarray,
frequency_sample_rate_hz: float,
band_order_Nth: float) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray,
np.ndarray]:
"""
Librosa STFT is complex FFT grid, not power
Reassigned frequencies are not the same as the standard mesh frequencies
:param sig_wf: array with input signal
:param frequency_sample_rate_hz: sample rate of frequency in Hz
:param band_order_Nth: Nth order of constant Q bands
:return: six numpy ndarrays with STFT, STFT_bits, time_stft_s, frequency_stft_hz, time_stft_rsg_s,
frequency_stft_rsg_hz
"""
sig_duration_s = len(sig_wf)/frequency_sample_rate_hz
_, min_frequency_hz = scales.from_duration(band_order_Nth, sig_duration_s)
order_Nth, cycles_M, quality_Q, \
frequency_center, frequency_start, frequency_end = \
scales.frequency_bands_g2f1(scale_order_input=band_order_Nth,
frequency_low_input=min_frequency_hz,
frequency_sample_rate_input=frequency_sample_rate_hz)
# Choose the spectral resolution as the key parameter
frequency_resolution_min_hz = np.min(frequency_end - frequency_start)
frequency_resolution_max_hz = np.max(frequency_end - frequency_start)
frequency_resolution_hz_geo = np.sqrt(frequency_resolution_min_hz*frequency_resolution_max_hz)
stft_time_duration_s = 1/frequency_resolution_hz_geo
stft_points_per_seg = int(frequency_sample_rate_hz*stft_time_duration_s)
# From CQT
stft_points_hop, _, _, _, _ = \
scales.cqt_frequency_bands_g2f1(band_order_Nth,
min_frequency_hz,
frequency_sample_rate_hz,
is_power_2=False)
print('Reassigned STFT Duration, NFFT, HOP:', len(sig_wf), stft_points_per_seg, stft_points_hop)
STFT_Scaling = 2*np.sqrt(np.pi)/stft_points_per_seg
# Reassigned frequencies require a 'best fit' solution.
frequency_stft_rsg_hz, time_stft_rsg_s, STFT_mag = \
librosa.reassigned_spectrogram(sig_wf, sr=frequency_sample_rate_hz,
n_fft=stft_points_per_seg,
hop_length=stft_points_hop, win_length=None,
window='hann', center=False, pad_mode='reflect')
# Must be scaled to match scipy psd
STFT_mag *= STFT_Scaling
STFT_bits = utils.log2epsilon(STFT_mag)
# Standard mesh times and frequencies for plotting - nice to have both
time_stft_s = librosa.times_like(STFT_mag, sr=frequency_sample_rate_hz,
hop_length=stft_points_hop)
frequency_stft_hz = librosa.core.fft_frequencies(sr=frequency_sample_rate_hz,
n_fft=stft_points_per_seg)
# Reassigned frequencies are not the same as the standard mesh frequencies
return STFT_mag, STFT_bits, time_stft_s, frequency_stft_hz, time_stft_rsg_s, frequency_stft_rsg_hz | 5,331,588 |
def download(*urls, zip: str=None, unzip: bool=False, **kwargs) -> List[File]:
"""
Download multiple zippyshare urls
Parameters
-----------
*urls
Zippyshare urls.
zip: :class:`str`
Zip all downloaded files once finished.
Zip filename will be taken from ``zip`` parameter,
default to ``None``.
NOTE: You can't mix ``zip`` and ``unzip`` options together
with value ``True``, it will raise error.
unzip: :class:`bool`
Unzip all downloaded files once finished
(if given file is zip format extract it, otherwise ignore it),
default to ``False``.
NOTE: You can't mix ``zip`` and ``unzip`` options together
with value ``True``, it will raise error.
**kwargs
These parameters will be passed to :meth:`File.download()`,
except for parameter ``filename``.
Returns
-------
List[:class:`File`]
a list of Zippyshare files
"""
if unzip and zip:
raise ValueError("unzip and zip paramaters cannot be set together")
downloaded_files = {}
files = []
for url in urls:
info = get_info(url)
file = File(info)
files.append(file)
if kwargs.get('filename') is not None:
kwargs.pop('filename')
file_path = file.download(**kwargs)
downloaded_files[file] = file_path
if unzip:
extract_archived_file(str(file_path))
if zip:
log.info(build_pretty_list_log(downloaded_files, 'Zipping all downloaded files to "%s"' % zip))
archive_zip(downloaded_files, zip)
log.info(build_pretty_list_log(downloaded_files, 'Successfully zip all downloaded files to "%s"' % zip))
return files | 5,331,589 |
def pop_arg(args_list, expected_size_after=0, msg="Missing argument"):
"""helper function to get and check command line arguments"""
try:
value = args_list.pop(0)
except IndexError:
raise BadCommandUsage(msg)
if expected_size_after is not None and len(args_list) > expected_size_after:
raise BadCommandUsage('too many arguments')
return value | 5,331,590 |
def main():
"""Make a jazz noise here"""
args = get_args()
cdhit_file = args.cdhit
protein_file = args.proteins
outfile = args.outfile
#print('boo')
# if cdhit_file == '' or protein_file == '':
# die('usage: find_unclustered.py [-h] -c str -p str [-o str]\nfind_unclustered.py: error: the following arguments are required: -c/--cdhit, -p/--proteins')
if not os.path.isfile(protein_file):
print('--proteins "{}" is not a file'.format(protein_file), file=sys.stderr)
sys.exit(1)
if not os.path.isfile(cdhit_file):
print('--cdhit "{}" is not a file'.format(cdhit_file), file=sys.stderr)
sys.exit(1)
id_re = re.compile('>gi'
'[|]'
'(?P<id_string>\d+)' # capture year (group 1)
'[|]')
clust_prots = []
lines = 0
with open(cdhit_file, 'r') as f:
for line in f:
# Do something with 'line'
#print(line)
#print(re.search('>gi|.*|', line))
#lines += 1
match = id_re.search(line)
if match:
clust_prots.append(match.group('id_string'))
# else:
# print(line)
# t = re.search(">gi|(\d{9})",line)
# print(t)
#print('lines: {}'.format(lines))
#print(len(clust_prots))
clust_prots = set(clust_prots)
#print(len(clust_prots))
out_fh = open(outfile, 'wt')
num_unclustered = 0
num_clustered = 0
num_total = 0
with open(protein_file, "r") as handle:
for record in SeqIO.parse(handle, "fasta"):
#print(record.id)
num_total += 1
record.id = re.sub('\|.*', '', record.id)
#print(record.id)
if record.id in clust_prots:
#print(record.id)
num_clustered += 1
else:
num_unclustered += 1
SeqIO.write(record, out_fh, 'fasta')
print('Wrote {:,} of {:,} unclustered proteins to "{}"'.format(num_unclustered,num_total,outfile)) | 5,331,591 |
def d4s(data):
"""
Beam parameter calculation according to the ISO standard D4sigma integrals
input: 2D array of intensity values (pixels)
output:
xx, yy: x and y centres
dx, dy: 4 sigma widths for x and y
angle: inferred rotation angle, radians
"""
gg = data
dimy, dimx = np.shape(data)
X, Y = np.mgrid[0:dimx,0:dimy]
X = X.T
Y = Y.T
P = np.sum(data)
xx = np.sum(data * X) / P
yy = np.sum(data * Y) / P
xx2 = np.sum(data * (X - xx)**2)/P
yy2 = np.sum(data * (Y - yy)**2)/P
xy = np.sum(data * (X - xx) * (Y - yy)) / P
gamm = np.sign(xx2 - yy2)
angle = 0.5 * np.arctan(2*xy / (xx2 - yy2))
try:
dx = 2 * np.sqrt(2) * (xx2 + yy2 + gamm * ( (xx2 - yy2)**2 + 4*xy**2)**0.5)**(0.5)
dy = 2 * np.sqrt(2) * (xx2 + yy2 - gamm * ( (xx2 - yy2)**2 + 4*xy**2)**0.5)**(0.5)
except:
# In case of error, just make the size very large
print "Fitting error"
dx, dy = data.shape
return xx, yy, dx, dy, angle | 5,331,592 |
def write_files_human_readable(json_object, input_path, path_to_save, use_time):
"""Save the list of directories that match the old_rollup criteria to a human readable file."""
todays_date_formatted = todays_date.strftime("%Y-%m-%d-%H-%M-%S")
if os.path.exists(path_to_save):
dir_path = os.path.join(path_to_save, '')
input_path_under = input_path.replace('/', '_')
filename_to_save = '{}_old_rollup_human_readable_{}time_{}.txt'.format(input_path_under, use_time, todays_date_formatted)
path_with_file = '{}{}'.format(dir_path, filename_to_save)
with open(path_with_file, 'w') as outfile:
for path in json_object:
outfile.write('{}\n'.format(path)) | 5,331,593 |
def checkWarnings(func, func_args=[], func_kwargs={},
category=UserWarning,
nwarnings=1, message=None, known_warning=None):
"""Function to check expected warnings."""
if (not isinstance(category, list) or len(category) == 1) and nwarnings > 1:
if isinstance(category, list):
category = category * nwarnings
else:
category = [category] * nwarnings
if (not isinstance(message, list) or len(message) == 1) and nwarnings > 1:
if isinstance(message, list):
message = message * nwarnings
else:
message = [message] * nwarnings
if known_warning == 'miriad':
# The default warnings for known telescopes when reading miriad files
category = [UserWarning]
message = ['Altitude is not present in Miriad file, using known '
'location values for PAPER.']
nwarnings = 1
elif known_warning == 'paper_uvfits':
# The default warnings for known telescopes when reading uvfits files
category = [UserWarning] * 2
message = ['Required Antenna frame keyword', 'telescope_location is not set']
nwarnings = 2
elif known_warning == 'fhd':
category = [UserWarning]
message = ['Telescope location derived from obs']
nwarnings = 1
category = uvutils._get_iterable(category)
message = uvutils._get_iterable(message)
clearWarnings()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always") # All warnings triggered
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
# filter iers warnings if iers.conf.auto_max_age is set to None, as we do in testing if the iers url is down
from astropy.utils import iers
if iers.conf.auto_max_age is None:
warnings.filterwarnings("ignore", message="failed to download")
warnings.filterwarnings("ignore", message="time is out of IERS range")
if isinstance(message, six.string_types):
test_message = [message.startswith("LST values stored in ")]
else:
test_message = []
for m in message:
if m is None:
test_message.append(False)
else:
test_message.append(m.startswith("LST values stored in "))
if not any(test_message):
warnings.filterwarnings("ignore", message="LST values stored in ")
retval = func(*func_args, **func_kwargs) # Run function
# Verify
if len(w) != nwarnings:
print('wrong number of warnings. Expected number was {nexp}, '
'actual number was {nact}.'.format(nexp=nwarnings, nact=len(w)))
for idx, wi in enumerate(w):
print('warning {i} is: {w}'.format(i=idx, w=wi))
assert(False)
else:
for i, w_i in enumerate(w):
if w_i.category is not category[i]:
print('expected category ' + str(i) + ' was: ', category[i])
print('category ' + str(i) + ' was: ', str(w_i.category))
assert(False)
if message[i] is not None:
if message[i] not in str(w_i.message):
print('expected message ' + str(i) + ' was: ', message[i])
print('message ' + str(i) + ' was: ', str(w_i.message))
assert(False)
return retval | 5,331,594 |
def test_title(title, expected_title):
"""Test that title is properly parsed.
1. Create a field parser for dictionary with specific title.
2. Parse a title.
3. Check the parsed title.
"""
actual_title = FieldParser(data={"title": title}).parse_title()
assert actual_title == expected_title, "Wrong title" | 5,331,595 |
def add_name_slug(apps, schema_editor):
"""Correctly name_slug for every Interface with slash in the name."""
Interface = apps.get_model("nsot", "Interface")
for i in Interface.objects.iterator():
name_slug = slugify_interface(device_hostname=i.device_hostname, name=i.name)
i.name_slug = name_slug
i.save() | 5,331,596 |
def extract_times(imas_version=omas_rcparams['default_imas_version']):
"""
return list of strings with .time across all structures
:param imas_version: imas version
:return: list with times
"""
from omas.omas_utils import list_structures
from omas.omas_utils import load_structure
times = []
for structure in list_structures(imas_version=imas_version):
tmp = load_structure(structure, imas_version)[0]
for item in tmp:
if not item.endswith('.time') or 'data_type' not in tmp[item] or tmp[item]['data_type'] == 'STRUCTURE':
continue
times.append(item)
return sorted(times) | 5,331,597 |
async def sl_setup(hass):
"""Set up the shopping list."""
assert await async_setup_component(hass, "shopping_list", {})
await sl_intent.async_setup_intents(hass) | 5,331,598 |
def test_eval_schedule_cron(schedule):
"""
Tests eval if the schedule is defined with cron expression
"""
schedule.opts.update({"pillar": {"schedule": {}}})
schedule.opts.update(
{"schedule": {"testjob": {"function": "test.true", "cron": "* * * * *"}}}
)
now = datetime.datetime.now()
schedule.eval()
assert schedule.opts["schedule"]["testjob"]["_next_fire_time"] > now | 5,331,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.