content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def rayleigh(gamma, M0, TtR):
"""
Function that takes in a input (output) Mach number and a stagnation
temperature ratio and yields an output (input) Mach number, according
to the Rayleigh flow equation. The function also outputs the stagnation
pressure ratio
Inputs:
M [dimensionless]
gamma [dimensionless]
Ttr [dimensionless]
Outputs:
M1 [dimensionless]
Ptr [dimensionless]
"""
func = lambda M1: (((1.+gamma*M0*M0)**2.*M1*M1*(1.+(gamma-1.)/2.*M1*M1))/((1.+gamma*M1*M1)**2.*M0*M0*(1.+(gamma-1.)/2.*M0*M0))-TtR)
#Initializing the array
M1_guess = np.ones_like(M0)
# Separating supersonic and subsonic solutions
i_low = M0 <= 1.0
i_high = M0 > 1.0
#--Subsonic solution Guess
M1_guess[i_low]= .01
#--Supersonic solution Guess
M1_guess[i_high]= 1.1
# Find Mach number
M1 = fsolve(func,M1_guess, factor=0.1)
#Calculate stagnation pressure ratio
Ptr = ((1.+gamma*M0*M0)/(1.+gamma*M1*M1)*((1.+(gamma-1.)/2.*M1*M1)/(1.+(gamma-1.)/2.*M0*M0))**(gamma/(gamma-1.)))
return M1, Ptr | f1432158136bee529ec592f7ef539f2aac19e5f5 | 32,900 |
def attack(N, e, c, oracle):
"""
Recovers the plaintext from the ciphertext using the LSB oracle attack.
:param N: the modulus
:param e: the public exponent
:param c: the encrypted message
:param oracle: a function which returns the last bit of a plaintext for a given ciphertext
:return: the plaintext
"""
left = ZZ(0)
right = ZZ(N)
while right - left > 1:
c = (c * pow(2, e, N)) % N
if oracle(c) == 0:
right = (right + left) / 2
else:
left = (right + left) / 2
return int(right) | 3fe99894909c07da0dc42fc1101bb45853e3366f | 32,901 |
from typing import Optional
from typing import Union
from typing import Dict
from typing import Any
def setup_wandb_logging(
trainer: Engine,
optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None,
evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None,
log_every_iters: int = 100,
**kwargs: Any
):
"""Method to setup WandB logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer (Engine): trainer engine
optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
**kwargs: optional keyword args to be passed to construct the logger.
Returns:
:class:`~ignite.contrib.handlers.wandb_logger.WandBLogger`
"""
logger = WandBLogger(**kwargs)
_setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
return logger | caf67149d8aa8d2b85f07f96b4d55677183c594b | 32,902 |
def _norm(X, y):
"""Scales data to [0..1] interval"""
X = X.astype(np.float32) / (X.max() - X.min())
return X, y | 87189d4c885d77654793373416c0d5c4be498fba | 32,903 |
import os
def check_directory(directory, verbose):
"""
Inputs: graph_directory- the directory for the graphs to be place
verbose- the verbose flag
Checks to see if the graph directory exists. If it doesn't exit, the
folder is created.
"""
cwd = os.getcwd() + '/'
if not os.path.isdir(cwd + directory):
if verbose:
print('Making output directory\n')
os.mkdir(cwd + directory)
else:
directory_exists = True
i = 0
while directory_exists:
i += 1
if not os.path.isdir(cwd + directory + f'_{i}'):
directory = directory + f'_{i}'
os.mkdir(cwd + directory)
directory_exists = False
return(directory) | 2d76f00f4438a97e4fc91d3b23b74c94015385c5 | 32,904 |
from typing import Dict
from typing import Any
from typing import List
from datetime import datetime
import logging
import os
def compute_single_lesson_score(
lesson_metadata: Dict[str, Any],
df: pd.DataFrame,
slide_columns: List[str],
due_date: datetime.datetime,
) -> pd.Series:
"""Takes a DataFrame from an Ed export of completions and a str representing the due date
and returns a Series (indexed by email) of the students' scores for this assignment.
"""
# Compute a binary 1/0 value for each completion if it is on time.
# Allows a 15 minute buffer and half credit for anything up to a week late
due_date_buffered = due_date + datetime.timedelta(minutes=15)
late_date_buffered = due_date_buffered + datetime.timedelta(days=7)
before_due_date = df[slide_columns] < due_date_buffered
before_late_cutoff = df[slide_columns] < late_date_buffered
# This formula gives 1 for on time, 0.5 for less than week late, and 0 for more than week late
all_on_time = 0.5 * before_due_date + 0.5 * before_late_cutoff
# Only count scores of slides students had to do work on (e.g., code and quiz)
scores = pd.Series(0, index=df.index)
points_total = 0
for type in ["quiz", "code"]:
for slide in lesson_metadata[type]:
logging.info(f"Processing {slide['name']}")
# Read in results. Note we want to keep the sid's as emails for simplicity
results_file = os.path.join(
DATA_DIR, lesson_metadata["title"], f"{slide['file']}.csv"
)
results = EdStemReader(
results_file, "email", "total score", sid_is_email=False
)
slide_scores = results.scores[results.score_col]
# Get points total (assume one student got max score)
slide_out_of = slide_scores.max()
# Get if this slide was on time for each student
slide_on_time = all_on_time[slide["name"]]
# Add to cumulative sum
scores += slide_scores * slide_on_time
points_total += slide_out_of
return scores / points_total | b93e1dd9f6ecdfd3764320a76c208025dbe25375 | 32,905 |
async def delete_layer(location_id, layer_id):
"""
Delete layer
---
delete:
summary: Delete layer
tags:
- layers
parameters:
- name: id
in: path
required: true
description: ID of the object to be deleted
responses:
200:
description: The object which was deleted
content:
application/json:
schema: Layer
"""
location = g.active_incident.Location.find_by_id(location_id)
if location is None:
raise exceptions.NotFound(description="Location {} was not found".format(location_id))
layer = location.Layer.find_by_id(layer_id)
if layer is None:
raise exceptions.NotFound(description="Layer {} was not found".format(layer_id))
layer.delete()
return jsonify(layer), HTTPStatus.OK | 537ea3907fef2998ca8ae960a05a2e5204b4ab7e | 32,906 |
def fit_and_save_model(params, data, targets):
"""Fit xgb classifier pipeline with params parameters and
save it to disk"""
pipe = make_pipeline(StandardScaler(),
XGBClassifier(learning_rate=params['learning_rate'],
max_depth=int(params['max_depth'])))
pipe.fit(data, targets)
# Persist the pipeline to disk
dump(pipe, 'ADXL345_xgb_gesture.joblib')
print('Done saving')
return pipe | 589dd94f0a258f8eabcbd47b2341a71303c5d6b7 | 32,907 |
def relu_backward(dout, cache):
"""
Backward pass for the ReLU function layer.
Arguments:
dout: numpy array of gradient of output passed from next layer with any shape
cache: tuple (x)
Output:
x: numpy array of gradient for input with same shape of dout
"""
x = cache
dx = dout * (x >= 0)
return dx | 3384ddf789ed2a31e25a4343456340a60e5a6e11 | 32,908 |
def run_decopath(request):
"""Process file submission page."""
# Get current user
current_user = request.user
user_email = current_user.email
# Check if user submits pathway analysis results
if 'submit_results' in request.POST:
# Populate form with data from the request
results_form = UploadResultsForm(request.POST, request.FILES)
results_form_fc = UploadFoldChangesForm(request.POST, request.FILES)
# If form is not valid, return HttpResponseBadRequest
if not results_form.is_valid():
err = results_form.errors
return HttpResponseBadRequest(f'{err} {INVALID_RESULTS_FORM_MSG}')
# If form is not valid, return HttpResponseBadRequest
if not results_form_fc.is_valid():
return HttpResponseBadRequest(INVALID_FOLD_CHANGES_MSG)
# If form is valid, process file data in request.FILES and throw HTTPBadRequest if improper submission
results_file_val = process_user_results(current_user, user_email, results_form, results_form_fc)
# Check if file cannot be read and throw error
if isinstance(results_file_val, str):
return HttpResponseBadRequest(results_file_val)
# Check if user submits form to run ORA
elif 'run_ora' in request.POST:
db_form = ORADatabaseSelectionForm(request.POST, request.FILES)
parameters_form = ORAParametersForm(request.POST, request.FILES)
form = ORAOptions(request.POST, request.FILES) # dict
# Check if form is valid
if not db_form.is_valid():
return HttpResponseBadRequest(DB_SELECTION_MSG)
# Check if parameters form is valid
if not parameters_form.is_valid():
return HttpResponseBadRequest(MAX_MIN_GENES_MSG)
# Check if form is valid
if not form.is_valid():
return HttpResponseBadRequest(FORM_COMPLETION_MSG)
# If form is valid, process file data in request.FILES and throw HTTPBadRequest if improper submission
run_ora_val = process_files_run_ora(current_user, user_email, form, db_form, parameters_form)
if isinstance(run_ora_val, str):
return HttpResponseBadRequest(run_ora_val)
elif isinstance(run_ora_val, bool):
messages.error(request, DB_SELECTION_MSG)
return redirect("/")
else:
_update_job_user(request, current_user, run_ora_val[0], run_ora_val[1])
# Check if user submits form to run GSEA
else:
db_form = GSEADatabaseSelectionForm(request.POST, request.FILES)
form = UploadGSEAForm(request.POST, request.FILES)
fc_form = UploadFoldChangesFormGSEA(request.POST, request.FILES)
# Check if form is valid
if not db_form.is_valid():
err = db_form.errors
return HttpResponseBadRequest(f'{err} {DB_SELECTION_MSG}')
if not form.is_valid():
err = form.errors
return HttpResponseBadRequest(f'{err} {FORM_COMPLETION_MSG}')
if not fc_form.is_valid():
err = fc_form.errors
return HttpResponseBadRequest(f'{err} {INVALID_FOLD_CHANGES_MSG}')
# If form is valid, process file data in request.FILES and throw HTTPBadRequest if improper submission
run_gsea_val = process_files_run_gsea(current_user, user_email, db_form, form, fc_form)
if isinstance(run_gsea_val, str):
return HttpResponseBadRequest(run_gsea_val)
elif isinstance(run_gsea_val, bool):
messages.error(request, DB_SELECTION_MSG)
return redirect("/")
else:
_update_job_user(request, current_user, run_gsea_val[0], run_gsea_val[1])
return redirect("/experiments") | d86d21a31004f971e2f77b11040e88dcd2a26ee4 | 32,909 |
import tqdm
def load_abs_pos_sighan_plus(dataset=None, path_head=""):
"""
Temporary deprecation !
for abs pos bert
"""
print("Loading Expanded Abs_Pos Bert SigHan Dataset ...")
train_pkg, valid_pkg, test_pkg = load_raw_lattice(path_head=path_head)
tokenizer_model_name_path="hfl/chinese-roberta-wwm-ext"
tokenizer = AutoTokenizer.from_pretrained(tokenizer_model_name_path)
train_dataset, valid_dataset, test_dataset = get_lattice_and_pos_plus(train_pkg, tokenizer), get_lattice_and_pos(valid_pkg, tokenizer), get_lattice_and_pos(test_pkg, tokenizer)
def transpose(inputs):
features = []
for i in tqdm(range(len(inputs["input_ids"]))):
#ugly fix for encoder model (the same length
features.append({key:inputs[key][i] for key in inputs.keys()}) #we fix here (truncation
return features
return transpose(train_dataset), transpose(valid_dataset), transpose(test_dataset) | 8da548c4586f42c8a7421482395895b56aa31a10 | 32,910 |
def _train_on_tpu_system(model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
config = model_fn_wrapper.config.tpu_config
iterations_per_loop = config.iterations_per_loop
num_shards = config.num_shards
single_tpu_train_step = model_fn_wrapper.convert_to_single_tpu_train_step(
dequeue_fn)
multi_tpu_train_steps_on_single_shard = (lambda: training_loop.repeat( # pylint: disable=g-long-lambda
iterations_per_loop, single_tpu_train_step, [_INITIAL_LOSS],
name=b'loop'))
(loss,) = tpu.shard(multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=num_shards,
outputs_from_all_shards=False)
return loss | faad0d857b2741b5177f348a6f2b7a54f9470135 | 32,911 |
def get_docs_url(model):
"""
Return the documentation URL for the specified model.
"""
return f'{settings.STATIC_URL}docs/models/{model._meta.app_label}/{model._meta.model_name}/' | 613cb815ff01fa13c6c957f47c0b5f3f7edcff8f | 32,912 |
import random
def _fetch_random_words(n=1000):
"""Generate a random list of words"""
# Ensure the same words each run
random.seed(42)
# Download the corpus if not present
nltk.download('words')
word_list = nltk.corpus.words.words()
random.shuffle(word_list)
random_words = word_list[:n]
return random_words | aaf257e3b6202555b29bdf34fd9342794a5acf6f | 32,913 |
import json
def cache_pdf(pdf, document_number, metadata_url):
"""Update submission metadata and cache comment PDF."""
url = SignedUrl.generate()
content_disposition = generate_content_disposition(document_number,
draft=False)
s3_client.put_object(
Body=json.dumps({'pdfUrl': metadata_url.url}),
Bucket=settings.ATTACHMENT_BUCKET,
Key=metadata_url.key,
)
s3_client.put_object(
Body=pdf,
ContentType='application/pdf',
ContentDisposition=content_disposition,
Bucket=settings.ATTACHMENT_BUCKET,
Key=url.key,
)
return url | 44ffd6841380b9454143f4ac8c71ef6ea560030a | 32,914 |
def get_tile_list(geom,
zoom=17):
"""Generate the Tile List for The Tasking List
Parameters
----------
geom: shapely geometry of area.
zoom : int Zoom Level for Tiles
One or more zoom levels.
Yields
------
list of tiles that intersect with
"""
west, south, east, north = geom.bounds
tiles = mercantile.tiles(west, south, east, north, zooms=zoom)
tile_list = []
for tile in tiles:
tile_geom = geometry.shape(mercantile.feature(tile)['geometry'])
if tile_geom.intersects(geom):
tile_list.append(tile)
return tile_list | dcceb93b13ce2bbd9e95f664c12929dee10a1e63 | 32,915 |
def findTolerableError(log, file='data/psf4x.fits', oversample=4.0, psfs=10000, iterations=7, sigma=0.75):
"""
Calculate ellipticity and size for PSFs of different scaling when there is a residual
pixel-to-pixel variations.
"""
#read in PSF and renormalize it
data = pf.getdata(file)
data /= np.max(data)
#PSF scalings for the peak pixel, in electrons
scales = np.random.random_integers(1e2, 2e5, psfs)
#set the scale for shape measurement
settings = dict(sampling=1.0/oversample, itereations=iterations, sigma=sigma)
#residual from a perfect no pixel-to-pixel non-uniformity
residuals = np.logspace(-7, -1.6, 9)[::-1] #largest first
tot = residuals.size
res = {}
for i, residual in enumerate(residuals):
print'%i / %i' % (i+1, tot)
R2 = []
e1 = []
e2 = []
e = []
#loop over the PSFs
for scale in scales:
#random residual pixel-to-pixel variations
if oversample < 1.1:
residualSurface = np.random.normal(loc=1.0, scale=residual, size=data.shape)
elif oversample == 4.0:
tmp = np.random.normal(loc=1.0, scale=residual, size=(170, 170))
residualSurface = zoom(tmp, 4.013, order=0)
else:
sys.exit('ERROR when trying to generate a blocky pixel-to-pixel non-uniformity map...')
#make a copy of the PSF and scale it with the given scaling
#and then multiply with a residual pixel-to-pixel variation
tmp = data.copy() * scale * residualSurface
#measure e and R2 from the postage stamp image
sh = shape.shapeMeasurement(tmp.copy(), log, **settings)
results = sh.measureRefinedEllipticity()
#save values
e1.append(results['e1'])
e2.append(results['e2'])
e.append(results['ellipticity'])
R2.append(results['R2'])
out = dict(e1=np.asarray(e1), e2=np.asarray(e2), e=np.asarray(e), R2=np.asarray(R2))
res[residual] = out
return res | 02b1771e4a363a74a202dd8d5b559efd68064f4d | 32,916 |
def squeeze_labels(labels):
"""Set labels to range(0, objects+1)"""
label_ids = np.unique([r.label for r in measure.regionprops(labels)])
for new_label, label_id in zip(range(1, label_ids.size), label_ids[1:]):
labels[labels == label_id] == new_label
return labels | 9c78f5e103fa83f891c11477d4cea6fdac6e416d | 32,917 |
import itertools
def orient_edges_gs2(edge_dict, Mb, data, alpha):
"""
Similar algorithm as above, but slightly modified for speed?
Need to test.
"""
d_edge_dict = dict([(rv,[]) for rv in edge_dict])
for X in edge_dict.keys():
for Y in edge_dict[X]:
nxy = set(edge_dict[X]) - set(edge_dict[Y]) - {Y}
for Z in nxy:
if Y not in d_edge_dict[X]:
d_edge_dict[X].append(Y) # SET Y -> X
B = min(set(Mb[Y]) - {X} - {Z},set(Mb[Z]) - {X} - {Y})
for i in range(len(B)):
for S in itertools.combinations(B,i):
cols = (Y,Z,X) + tuple(S)
pval = mi_test(data[:,cols])
if pval < alpha and X in d_edge_dict[Y]: # Y IS independent of Z given S+X
d_edge_dict[Y].remove(X)
if X in d_edge_dict[Y]:
break
return d_edge_dict | 272bdd74ed5503851bd4eb5519c505d8583e3141 | 32,918 |
def _divide_evenly(start, end, max_width):
""" Evenly divides the interval between ``start`` and ``end`` into
intervals that are at most ``max_width`` wide.
Arguments
---------
start : float
Start of the interval
end : float
End of the interval
max_width : float
Maximum width of the divisions
Returns
-------
divisions : ndarray
Resulting array
"""
num_partitions = int(ceil((end - start)/max_width))
return linspace(start, end, num_partitions+1) | 08647cc55eca35447a08fd4ad3959db56dffc565 | 32,919 |
def uncompress_pubkey(pubkey):
""" Convert compressed public key to uncompressed public key.
Args:
pubkey (str): Hex encoded 33Byte compressed public key
Return:
str: Hex encoded uncompressed 65byte public key (4 + x + y).
"""
public_pair = encoding.sec_to_public_pair(h2b(pubkey))
return b2h(encoding.public_pair_to_sec(public_pair, compressed=False)) | 672f89482e5338f1e23cbe21823b9ee6625c792f | 32,920 |
def make_celery(main_flask_app):
"""Generates the celery object and ties it to the main Flask app object"""
celery = Celery(main_flask_app.import_name, include=["feed.celery_periodic.tasks"])
celery.config_from_object(envs.get(main_flask_app.config.get("ENV"), "config.DevConfig"))
task_base = celery.Task
class ContextTask(task_base):
abstract = True
def __call__(self, *args, **kwargs):
with main_flask_app.app_context():
return task_base.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery | 57fc0d7917b409cb36b4f50442dd357d384b3852 | 32,921 |
def adjustForWeekdays(dateIn):
""" Returns a date based on whether or not the input date
is on a weekend. If the input date falls on a Saturday or
Sunday, the return is the date on the following Monday. If
not, it returns the original date. """
#If Saturday, return the following Monday.
if dateIn.weekday() == 5:
print "Projected End Date falls on a Saturday, correcting "\
+ "to fall on a Monday."
return dateIn + timedelta(days = 2)
#If Sunday, return the following Monday
elif dateIn.weekday() == 6:
print "Projected End Date falls on a Sunday, correcting "\
+ "to fall on a Monday."
return dateIn + timedelta(days = 1)
#On any other weekday, return the date
else:
return dateIn | 9db5c3fadbcb8aeb77bfc1498333d6b0f44fd716 | 32,922 |
def composed_model_input_classes(cls):
"""
This function returns a list of the possible models that can be accepted as
inputs.
TODO: lru_cache this
"""
if issubclass(cls, ModelSimple) or cls in PRIMITIVE_TYPES:
return [cls]
elif issubclass(cls, ModelNormal):
if cls.discriminator is None:
return [cls]
else:
return get_discriminated_classes(cls)
elif issubclass(cls, ModelComposed):
if not cls._composed_schemas['oneOf']:
return []
if cls.discriminator is None:
input_classes = []
for c in cls._composed_schemas['oneOf']:
input_classes.extend(composed_model_input_classes(c))
return input_classes
else:
return get_discriminated_classes(cls)
return [] | 98b248aa769391a9edd99eb8184e0af44c40d020 | 32,923 |
import os
def via_sudo():
"""
Return `True` if Blueprint was invoked via `sudo`(8), which indicates
that privileges must be dropped when writing to the filesystem.
"""
return 'SUDO_UID' in os.environ \
and 'SUDO_GID' in os.environ \
and 'blueprint' in os.environ.get('SUDO_COMMAND', '') | c30c3e21f5bd780e42c37a0248a1406edf44bd44 | 32,924 |
from datetime import datetime
def get_user_or_add_user(spotify_id, display_name, display_image=None, token=None):
"""Fetch an existing user or create a user"""
user = User.query.filter(User.spotify_id == spotify_id).first()
if user is None:
spotify_id = spotify_id
spotify_display_name = display_name
spotify_image_url = display_image
created_at = datetime.now()
access_token = None
refresh_token = token.refresh_token if token else None
user = create_user(
spotify_id, spotify_display_name, spotify_image_url, created_at, access_token, refresh_token)
elif token:
user.refresh_token = token.refresh_token
user.spotify_image_url = display_image
db.session.commit()
return user | 27f0fffcaf10e4060860c39f1df54afe21814250 | 32,925 |
from bs4 import BeautifulSoup
def get_daily_data(y, m, d, icao):
"""
grab daily weather data for an airport from wunderground.com
parameter
---------
y: year
m: month
d: day
ICAO: ICAO identification number for an airport
return
------
a dictionary containing
"Min Temperature": daily minimum temperature
"Max Temperature": daily maximum temperature
"Precipitation": daily precipitation
"Max Humidity": daily maximum humidity
"Min Humidify": daily minimum humidify
"""
# construct url from (y, m, d)
url = "http://www.wunderground.com/history/airport/" + icao + '/'+\
str(y) + "/" + str(m) + "/" + str(d) + "/DailyHistory.html"
page = urlopen(url)
# parse html
soup = BeautifulSoup(page, 'html5lib')
# return dictionary
daily_data = {'Min Temperature':'nan', 'Max Temperature':'nan',
'Precipitation':'nan', 'Maximum Humidity':'nan', 'Minimum Humidity':'nan'}
# find rows in the main table
all_rows = soup.find(id="historyTable").find_all('tr')
for row in all_rows:
# attempt to find item name
try:
item_name = row.findAll('td', class_='indent')[0].get_text()
except Exception as e:
# if run into error, skip this row
continue
# temperature and precipitation
if item_name in ('Min Temperature','Max Temperature', 'Precipitation'):
try:
val = row.find_all('span', class_='wx-value')[0].get_text()
except Exception as e:
continue
if is_number(val):
daily_data[item_name] = val
if item_name in ('Maximum Humidity', 'Minimum Humidity'):
try:
val = row.find_all('td')[1].get_text()
except Exception as e:
continue
if is_number(val):
daily_data[item_name] = val
return daily_data | 35abfda3ed6f80c213099149d5a3009c03be1d48 | 32,926 |
def create_CIM_object(cimpath):
"""This function aims to speed up other bits of this and ``cgrid``
modules, by returning a ``casacore.images.image.image`` object.
The trick is, that the ``cimpath`` argument can be either a string i.e. the path
to the CASAImage wich will be read in and returned, **or** it can be already an
in-memory ``casacore.images.image.image`` object.
This might not be the best solution, but I hope overall a check in a lot of cases will
speed up code, rather than reading in the same CASAImage again-and again. So ideally, only
one reading in happens for each CASAImage and all inside this function!
Parameters
==========
cimpath: str
The input CASAImage path or a ``casacore.images.image.image`` object
Returns
=======
cim: ``casacore.images.image.image`` object
The in-memory CASAImage
"""
#create an empty image in-memory to check the object type
if type(cimpath) == type(casaimage.image(imagename='',shape=np.ones(1))):
#if type(cimpath) == 'casacore.images.image.image': #this does not seems to work
return cimpath
else:
# We could simply return, no need to assign the return value of
# `casaimage.image(cimpath)` to a new variable.
log.debug('Open image: {0:s}'.format(str(cimpath))) #We know it is a string in this case
return casaimage.image(cimpath) | 25ece938101b0b1ad65379e3fab9552e74d9e735 | 32,927 |
def image_tag_create(context, image_id, value, session=None):
"""Create an image tag."""
session = session or get_session()
tag_ref = models.ImageTag(image_id=image_id, value=value)
tag_ref.save(session=session)
return tag_ref['value'] | 5aaa684912ae18fe98beb1d62d1c219239c013c6 | 32,928 |
def test_branch_same_shape():
"""
Feature: control flow function.
Description: Two branch must return the same shape.
Expectation: Null.
"""
class Net(Cell):
def __init__(self):
super().__init__()
self.a = 1
def construct(self, x, y):
for k in range(1):
if x != 1:
for _ in range(1):
y = k * x
y = self.a + y
if x > 5:
break
if x == 5:
for _ in range(1):
y = self.a - y
if x == y:
continue
return x + y
x = np.array([-1], np.float32)
y = np.array([2], np.float32)
net = Net()
grad_net = F.grad(net, grad_position=(1, 1))
context.set_context(mode=context.GRAPH_MODE)
fgrad = grad_net(Tensor(x), Tensor(y))
print(fgrad) | 57326097cb0da2c3982aea2cfeee5be19923b4cf | 32,929 |
def potential(__func__=None, **kwds):
"""
Decorator function instantiating potentials. Usage:
@potential
def B(parent_name = ., ...)
return baz(parent_name, ...)
where baz returns the deterministic B's value conditional
on its parents.
:SeeAlso:
Deterministic, deterministic, Stochastic, Potential, stochastic, data, Model
"""
def instantiate_pot(__func__):
junk, parents = _extract(
__func__, kwds, keys, 'Potential', probe=False)
return Potential(parents=parents, **kwds)
keys = ['logp']
instantiate_pot.kwds = kwds
if __func__:
return instantiate_pot(__func__)
return instantiate_pot | 5023755aee2d887eb0077cb202c01013dda456e8 | 32,930 |
def numBytes(qimage):
"""Compatibility function btw. PyQt4 and PyQt5"""
try:
return qimage.numBytes()
except AttributeError:
return qimage.byteCount() | a2e5bfb28ef679858f0cdb2fb8065ad09b87c037 | 32,931 |
def _dt_to_decimal_time(datetime):
"""Convert a datetime.datetime object into a fraction of a day float.
Take the decimal part of the date converted to number of days from 01/01/0001
and return it. It gives fraction of way through day: the time."""
datetime_decimal = date2num(datetime)
time_decimal = datetime_decimal - int(datetime_decimal)
return time_decimal | febcaa0779cbd24340cc1da297e338f1c4d63385 | 32,932 |
def poll_create(event, context):
"""
Return true if the resource has been created and false otherwise so
CloudFormation polls again.
"""
endpoint_name = get_endpoint_name(event)
logger.info("Polling for update of endpoint: %s", endpoint_name)
return is_endpoint_ready(endpoint_name) | 3ac7dd8a4142912035c48ff41c343e5d56caeba3 | 32,933 |
def outsatstats_all(percent, Reads_per_CB, counts, inputbcs):
"""Take input from downsampled bam stats and returns df of genes, UMIs and reads for each bc.
Args:
percent (int): The percent the bamfile was downsampled.
Reads_per_CB (file path): Space delimited file of the barcodes and # of reads.
counts (file path): tab delimited count matrix made from downsampled bam
inputbcs (file path): File containing the list of barcodes used
Returns:
reads (df): A pandas df with columns reads, genes, and UMIs for each bc
"""
UMIs = pd.read_csv(
counts,
delimiter='\t',
index_col='gene',
compression='gzip')
# fill in an empty column for any barcodes that
# have no UMIs at this read depth
for i in inputbcs['Barcode'].tolist():
if i not in UMIs.columns:
UMIs[i] = UMIs.shape[0] * [0]
# remove barcodes not in the list
UMIs = UMIs[inputbcs['Barcode']]
# make reads df
reads = pd.read_csv(Reads_per_CB, delimiter= '\s', header=None)
reads[1] = reads[1].str.split(':', expand=True)[2]
reads.columns = ["Reads", "Barcodes"]
reads.index = reads['Barcodes']
reads = reads[['Reads']]
reads = reads[reads.index.isin(inputbcs['Barcode'])]
for i in inputbcs['Barcode'].tolist():
if i not in reads.index:
reads.loc[i] = 0
reads = reads.reindex(inputbcs['Barcode'], copy=False)
# count number of genes for each barcode and UMIs per barcode
reads['Genes'] = np.count_nonzero(UMIs, axis=0)
reads['UMI'] = UMIs.sum(axis=0)
return(reads) | f6d320e10c3171c543afdc6bdf70d83f5bcfb030 | 32,934 |
import os
def relative_uri(source, target):
"""
Make a relative URI from source to target.
"""
su = patched_urllib_parse.urlparse(source)
tu = patched_urllib_parse.urlparse(target)
extra = list(tu[3:])
relative = None
if tu[0] == '' and tu[1] == '':
if tu[2] == su[2]:
relative = ''
elif not tu[2].startswith('/'):
relative = tu[2]
elif su[0:2] != tu[0:2]:
return target
if relative is None:
if tu[2] == su[2]:
relative = ''
else:
relative = os.path.relpath(tu[2], os.path.dirname(su[2]))
if relative == '.':
relative = ''
relative = patched_urllib_parse.urlunparse(["", "", relative] + extra)
return relative | d125b3d40b97812a3cfe2b2ebb97ec98abdf6468 | 32,935 |
import os
import pickle
def fill_gaps_batch(slice_list, acq_datelist, training_data_path, cluster_model_path, outDir=None, cpu=20, reg_kws=None):
"""
This function fills gaps for a slice of time series.
Parameters
----------
slice_list: list
Specification of list of slice time series file.
The slice time series file should be save in npy format.
Each file saves a slice of time series (n_pixels, n_timesteps)
acq_datelist: list
list of acquisition dates (n_timesteps)
training_data_path: string
Collection of no-gap time series data as training data
cluster_model_path: string
Cluster model based on the training data
outDir: String
Specification of output location.
cpu:
Number of cpu
reg_kws: dict, optional
Keyword arguments for :func:`gap_fill_pixel`.
Examples
--------
n_cpu = 20
dates_list = np.load(os.path.join(work_dir, 'dates.npy'))
outname = os.path.join(work_dir, 'training')
training_data_path = '{}.pkl'.format(outname)
cluster_model_path = '{}.model'.format(outname)
fill_gaps(file_list, dates_list, training_data_path, cluster_model_path, cpu=n_cpu)
"""
reg_kws = {} if reg_kws is None else reg_kws.copy()
if outDir is None:
outDir = os.path.dirname(slice_list[0])
if not os.path.exists(outDir): ## if outfolder is not already existed creating one
os.makedirs(outDir)
if os.path.exists(training_data_path):
training_data = pd.read_pickle(training_data_path)
date_idx = training_data.index.isin(acq_datelist)
training_data = training_data[date_idx].values.T
else:
print(training_data_path)
print('Can not find training data! Please save samples from overlap pixels.')
return None
if os.path.exists(cluster_model_path):
cluster_model = pickle.load(open(cluster_model_path, 'rb'))
labels = cluster_model.labels_
centroids = cluster_model.cluster_centers_
centroids = centroids[:, date_idx]
else:
print('Can not find cluster model!')
return None
print('Imputing')
for i_slice in np.arange(0, len(slice_list), cpu):
start_slice = i_slice
end_slice = min(start_slice+cpu, len(slice_list))
jobs = []
for i_step in range(start_slice, end_slice):
print('Processing line: ', i_step)
ts_path = slice_list[i_step]
print('training_data.shape, centroids.shape: ', training_data.shape, centroids.shape)
if not os.path.exists(ts_path):
print('Can not find file: ', ts_path)
continue
p = Process(target=gap_fill_slice, args=(outDir, ts_path,
training_data, labels, centroids, reg_kws))
jobs.append(p)
p.start()
for p in jobs:
p.join() | dabec97b80271bb8c9431c23071d67a037b9d6cf | 32,936 |
from typing import Optional
from typing import List
import random
import itertools
def generate_sums(
n: int, min_terms: int, max_terms: int, *, seed=12345, fold=False, choose_from=None
) -> Optional[List[ExprWithEnv]]:
""" Generate the specified number of example expressions (with no duplicates).
The constants used in the expressions will normally have a maximum value of 2,
but this is increased if the number of examples requested is very large.
If choose_from is not None then the expressions will be randomly sampled from a
larger set of expressions, where the larger set consists of the expressions that
would be generated for n=choose_from.
"""
assert min_terms <= max_terms
if choose_from is None:
choose_from = n
else:
assert choose_from >= n
rng = random.Random(seed)
approx_different_sums = sum(
i * i * i for i in range(min_terms, max_terms + 1)
) # Crude approximation of the total number of examples that it is possible to generate with constants at most 2.
sums = set()
sums_list = []
for num_attempts in itertools.count():
num_terms = num_attempts % (max_terms - min_terms + 1) + min_terms
max_constant_term = num_attempts // approx_different_sums + 2
new_sum = generate_sum(num_terms, rng, max_constant_term, fold=fold)
if new_sum not in sums:
sums.add(new_sum)
sums_list.append(new_sum)
if len(sums_list) >= choose_from:
return sums_list if choose_from == n else rng.sample(sums_list, n)
return None | 26dbd81ec62fe15ff6279356bb5f41f894d033d2 | 32,937 |
import string
import sys
def convert_from_string(str_input_xml):
"""Convert a string into a Python data structure type.
> *Input arguments*
* `str_input_xml` (*type:* `str`): Input string
> *Returns*
`bool`, `int`, `float`, list of `float` or `str`.
"""
if str_input_xml is None:
return ''
value = None
def is_hex(s):
if len(s) < 2:
return False
if '0x' != s[0:2]:
return False
for e in s:
if e not in string.hexdigits and e != 'x':
return False
try:
int(s, 16)
except BaseException:
return False
return len(s) % 2 == 0
def is_numeric(s):
if sys.version_info[0] > 2:
return str(s).isdigit()
else:
return s.isdigit()
if is_hex(str_input_xml):
value = int(str_input_xml, 0)
elif isinstance(str_input_xml, list):
value = str_input_xml
elif is_numeric(str_input_xml):
value = int(str_input_xml)
elif str_input_xml in ['true', 'false', 'True', 'False']:
value = True if str_input_xml in ['true', 'True'] else False
elif ' ' in str_input_xml:
# Check if this a string with whitespaces
is_numeric = True
for c in str_input_xml.split():
try:
float(c)
except BaseException:
is_numeric = False
break
if is_numeric:
value = list()
for item in str_input_xml.split():
value.append(float(item))
else:
try:
value = float(str_input_xml)
except ValueError:
value = str(str_input_xml)
return value if value is not None else str_input_xml | 9c1637919fb2f8e03a1cae8823cbe2593d626c70 | 32,938 |
def arr2pil(frame: npt.NDArray[np.uint8]) -> Image.Image:
"""Convert from ``frame`` (BGR ``npt.NDArray``) to ``image`` (RGB ``Image.Image``)
Args:
frame (npt.NDArray[np.uint8]) : A BGR ``npt.NDArray``.
Returns:
Image.Image: A RGB ``Image.Image``
"""
return Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) | 5878d983055d75653a54d2912473eacac3b7501d | 32,939 |
def sophos_firewall_app_category_update_command(client: Client, params: dict) -> CommandResults:
"""Update an existing object
Args:
client (Client): Sophos XG Firewall Client
params (dict): params to update the object with
Returns:
CommandResults: Command results object
"""
return generic_save_and_get(client, APP_CATEGORY['endpoint_tag'], params, app_category_builder,
APP_CATEGORY['table_headers']) | 1ba77c9b2ad172e2d9c508c833a7d4b08fb5b876 | 32,940 |
import collections
def find_identities(l):
"""
Takes in a list and returns a dictionary with seqs as keys and positions of identical elements in list as values.
argvs: l = list, e.g. mat[:,x]
"""
# the number of items in the list will be the number of unique types
uniq = [item for item, count in collections.Counter(l).items()]
# Initialise a dictionary that will hold the results
identDict = {}
for item in uniq:
identDict[item] = [ x for x in range(len(l)) if l[x] == item ]
return identDict | db7b64cc430ab149de7d14e4f4a88abafbadbe34 | 32,941 |
def decode_event_to_internal2(event):
""" Enforce the binary encoding of address for internal usage. """
data = event.event_data
# Note: All addresses inside the event_data must be decoded.
if data['event'] == EVENT_TOKEN_ADDED2:
data['token_network_address'] = to_canonical_address(data['args']['token_network_address'])
data['token_address'] = to_canonical_address(data['args']['token_address'])
elif data['event'] == EVENT_CHANNEL_NEW2:
data['participant1'] = to_canonical_address(data['args']['participant1'])
data['participant2'] = to_canonical_address(data['args']['participant2'])
elif data['event'] == EVENT_CHANNEL_NEW_BALANCE2:
data['participant'] = to_canonical_address(data['args']['participant'])
elif data['event'] == EVENT_CHANNEL_WITHDRAW:
data['participant'] = to_canonical_address(data['args']['participant'])
elif data['event'] == EVENT_CHANNEL_UNLOCK:
data['participant'] = to_canonical_address(data['args']['participant'])
elif data['event'] == EVENT_BALANCE_PROOF_UPDATED:
data['closing_participant'] = to_canonical_address(data['args']['closing_participant'])
elif data['event'] == EVENT_CHANNEL_CLOSED:
data['closing_participant'] = to_canonical_address(data['args']['closing_participant'])
elif data['event'] == EVENT_SECRET_REVEALED:
data['secrethash'] = data['args']['secrethash']
data['secret'] = data['args']['secret']
return event | fdacba3f496f5aa3715b8f9c4f26c54a21ca3472 | 32,942 |
def _repeated_features(n, n_informative, X):
"""Randomly select and copy n features from X, from the col
range [0 ... n_informative].
"""
Xrep = np.zeros((X.shape[0], n))
for jj in range(n):
rand_info_col = np.random.random_integers(0, n_informative - 1)
Xrep[:, jj] = X[:, rand_info_col]
return Xrep | f15811a34bcc94fff77812a57a2f68178f7a8802 | 32,943 |
def create_auto_edge_set(graph, transport_guid):
"""Set up an automatic MultiEdgeSet for the intersite graph
From within MS-ADTS 6.2.2.3.4.4
:param graph: the intersite graph object
:param transport_guid: a transport type GUID
:return: a MultiEdgeSet
"""
e_set = MultiEdgeSet()
# use a NULL guid, not associated with a SiteLinkBridge object
e_set.guid = misc.GUID()
for site_link in graph.edges:
if site_link.con_type == transport_guid:
e_set.edges.append(site_link)
return e_set | 5f6832506d0f31795dd82f92416bb532cc7237fe | 32,944 |
def jaccard(box_a, box_b):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes. Here we operate on
ground truth boxes and default boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
Return:
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
"""
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2]-box_a[:, 0]) *
(box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]
area_b = ((box_b[:, 2]-box_b[:, 0]) *
(box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]
union = area_a + area_b - inter
return inter / union | fc72ebcaa47b7f0f1f27a0618cbe7592ada5ad70 | 32,945 |
def concat(input, axis, main_program=None, startup_program=None):
"""
This function concats the input along the axis mentioned
and returns that as the output.
"""
helper = LayerHelper('concat', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op(
type='concat',
inputs={'X': input},
outputs={'Out': [out]},
attrs={'axis': axis})
return out | 55a6e8704141a45a135402dac10d6793f2ae6a28 | 32,946 |
def is_available():
"""Return true if a pdfjs installation is available."""
try:
get_pdfjs_res('build/pdf.js')
get_pdfjs_res('web/viewer.html')
except PDFJSNotFound:
return False
else:
return True | d345ca0b881ecc749fcea8ec4f579f9ba05f25c4 | 32,947 |
def reissueMissingJobs(updatedJobFiles, jobBatcher, batchSystem,
childJobFileToParentJob, childCounts, config,
killAfterNTimesMissing=3):
"""Check all the current job ids are in the list of currently running batch system jobs.
If a job is missing, we mark it as so, if it is missing for a number of runs of
this function (say 10).. then we try deleting the job (though its probably lost), we wait
then we pass the job to processFinishedJob.
"""
runningJobs = set(batchSystem.getIssuedJobIDs())
jobIDsSet = set(jobBatcher.getJobIDs())
#Clean up the reissueMissingJobs_missingHash hash, getting rid of jobs that have turned up
missingJobIDsSet = set(reissueMissingJobs_missingHash.keys())
for jobID in missingJobIDsSet.difference(jobIDsSet):
reissueMissingJobs_missingHash.pop(jobID)
logger.critical("Job id %s is no longer missing" % str(jobID))
assert runningJobs.issubset(jobIDsSet) #Assert checks we have no unexpected jobs running
jobsToKill = []
for jobID in set(jobIDsSet.difference(runningJobs)):
jobFile = jobBatcher.getJob(jobID)
if reissueMissingJobs_missingHash.has_key(jobID):
reissueMissingJobs_missingHash[jobID] = reissueMissingJobs_missingHash[jobID]+1
else:
reissueMissingJobs_missingHash[jobID] = 1
timesMissing = reissueMissingJobs_missingHash[jobID]
logger.critical("Job %s with id %s is missing for the %i time" % (jobFile, str(jobID), timesMissing))
if timesMissing == killAfterNTimesMissing:
reissueMissingJobs_missingHash.pop(jobID)
jobsToKill.append(jobID)
killJobs(jobsToKill, updatedJobFiles, jobBatcher, batchSystem, childJobFileToParentJob, childCounts, config)
return len(reissueMissingJobs_missingHash) == 0 | 6ac051049f1e454fdc92a1a136ef4736e602d121 | 32,948 |
def all_children(wid):
"""Return all children of a widget."""
_list = wid.winfo_children()
for item in _list:
if item.winfo_children():
_list.extend(item.winfo_children())
return _list | ca52791b06db6f2dd1aeedc3656ecf08cb7de6d8 | 32,949 |
def logout():
"""------------- Log out -----------------------"""
# remove user session cookies
flash("You Have Been Logged Out")
session.pop("user")
return redirect(url_for("login")) | 66577a335a3e86c56c2aa78afacef4817786ac30 | 32,950 |
def listtodict(l: ty.Sequence) -> ty.Mapping:
"""Converts list to dictionary"""
return dict(zip(l[::2], l[1::2])) | 80e645c3b7834e4fd5980fdb3e5df75114e0da82 | 32,951 |
def sqrtspace(a, b, n_points):
"""
:return: Distribute n_points quadratically from point a to point b, inclusive
"""
return np.linspace(0, 1, n_points)**2*(b-a)+a | d88f3cd808dbab7447cf9609e3770a15e703e515 | 32,952 |
from datetime import datetime
def tstr2iso(input_string: str) -> datetime:
"""
Convert a specific type of ISO string that are compliant with file pathing requirement to ISO datetime.
:return:
"""
no_colon_input_string = input_string.replace(":", "")
iso_datetime = tstr2iso_nocolon(no_colon_input_string)
return iso_datetime | bb591dceef294c36eb9c028b5e28979c37f05a16 | 32,953 |
def test_processing_hooks_are_inherited():
"""Processing hooks are inherited from base classes if missing.
"""
class TestView(DummyBase):
def __call__(self, *args, **kwargs):
return self.count
testview = create_view(TestView)
assert [testview(), testview(), testview()] == [2, 4, 6] | 465303560f95c098c891361b504238dc4fe22adb | 32,954 |
import argparse
from astrometry.util.plotutils import PlotSequence
from astrometry.util.multiproc import multiproc
from re import I
def main():
"""Main program.
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--force', action='store_true',
help='Run calib processes even if files already exist?')
parser.add_argument('--ccds', help='Set ccds.fits file to load')
parser.add_argument('--expnum', type=str, help='Cut to a single or set of exposures; comma-separated list')
parser.add_argument('--extname', '--ccdname', help='Cut to a single extension/CCD name')
parser.add_argument('--no-psf', dest='psfex', action='store_false',
help='Do not compute PsfEx calibs')
parser.add_argument('--no-sky', dest='sky', action='store_false',
help='Do not compute sky models')
parser.add_argument('--run-se', action='store_true', help='Run SourceExtractor')
parser.add_argument('--splinesky', action='store_true', help='Spline sky, not constant')
parser.add_argument('--threads', type=int, help='Run multi-threaded', default=None)
parser.add_argument('--continue', dest='cont', default=False, action='store_true',
help='Continue even if one file fails?')
parser.add_argument('--plot-base', help='Make plots with this base filename')
# actually this doesn't work for calibs...
#parser.add_argument('--outdir', dest='output_dir', default=None,
# help='Set output base directory')
parser.add_argument('args',nargs=argparse.REMAINDER)
opt = parser.parse_args()
survey = LegacySurveyData() #output_dir=opt.output_dir)
T = None
if opt.ccds is not None:
T = fits_table(opt.ccds)
T = survey.cleanup_ccds_table(T)
print('Read', len(T), 'from', opt.ccds)
#else:
# T = survey.get_ccds()
# #print len(T), 'CCDs'
if len(opt.args) == 0:
if opt.expnum is not None:
expnums = set([int(e) for e in opt.expnum.split(',')])
#T.cut(np.array([e in expnums for e in T.expnum]))
T = merge_tables([survey.find_ccds(expnum=e, ccdname=opt.extname) for e in expnums])
print('Cut to', len(T), 'with expnum in', expnums, 'and extname', opt.extname)
#if opt.extname is not None:
# T.cut(np.array([(t.strip() == opt.extname) for t in T.ccdname]))
# print('Cut to', len(T), 'with extname =', opt.extname)
opt.args = range(len(T))
ps = None
if opt.plot_base is not None:
ps = PlotSequence(opt.plot_base)
args = []
for a in opt.args:
# Check for "expnum-ccdname" format.
if '-' in str(a):
words = a.split('-')
assert(len(words) == 2)
expnum = int(words[0])
ccdname = words[1]
T = survey.find_ccds(expnum=expnum, ccdname=ccdname)
if len(T) != 1:
print('Found', len(I), 'CCDs for expnum', expnum, 'CCDname', ccdname, ':', I)
print('WARNING: skipping this expnum,ccdname')
continue
t = T[0]
else:
i = int(a)
print('Index', i)
t = T[i]
#print('CCDnmatch', t.ccdnmatch)
#if t.ccdnmatch < 20 and not opt.force:
# print('Skipping ccdnmatch = %i' % t.ccdnmatch)
# continue
im = survey.get_image_object(t)
print('Running', im.name)
kwargs = dict(psfex=opt.psfex, sky=opt.sky, ps=ps, survey=survey)
if opt.force:
kwargs.update(force=True)
if opt.run_se:
kwargs.update(se=True)
if opt.splinesky:
kwargs.update(splinesky=True)
if opt.cont:
kwargs.update(noraise=True)
if opt.threads:
args.append((im, kwargs))
else:
run_calibs((im, kwargs))
if opt.threads:
mp = multiproc(opt.threads)
mp.map(time_run_calibs, args)
return 0 | 3001a16aaab01ef826d2f4c9555686e24b6061b6 | 32,955 |
def lesson(request, order, slug):
"""
One lesson can be viewed in two different ways:
(1) as independent lesson
(2) as part of one course
As (1) it is well, independent. And it is not really
important to jump to next in order lesson or not.
It is more important in this conetxt to display 'related lessons'.
As (2) this lesson is within logical sequence of group of lessons
- a course.
In this context, lesson title from within a course may override
the title of the lesson.
E.g. L#18, lesson.title = Django Deployment from Zero to Hero Part 1
L#19, lesson.title = Django Deployment from Zero to Hero Part 2
L#20, lesson.title = Django Deployment from Zero to Hero Part 3
Within course, those lessons will be titled differently:
course.title = Django Deployment from Zero to Hero
lesson#1 - Setup VPS host
lesson#2 - Setup Nginx
lesson#3 - Prepare Database
where lesson#1 is same 'thing' as L#18
lesson#2 is same 'thing' as L#19.
they are just within different context.
Long story short, if user clicks on lesson from course view - lesson
will be displayed differently - as lesson within course.
To switch between views - pass http parameter view = course | lesson
"""
try:
lesson = Lesson.objects.get(order=order)
except Lesson.DoesNotExist:
logger.warning(f"Lesson #{order} not found")
raise Http404("Lesson not found")
user = request.user
if lesson.lesson_type == PRO and not user.is_authenticated:
return login_with_pro(lesson_order=order)
elif lesson.lesson_type == PRO and user.is_authenticated:
if user.profile and not user.profile.is_pro_user():
# means an authenticated user which is not PRO
# wants to access a PRO lesson => he will be redirected
# to upgrade view with lesson_ord argument
return upgrade_with_pro(lesson_order=order)
view = request.GET.get('view', 'lesson')
if view == 'lesson':
template_name = 'lessons/lesson.html'
else:
template_name = 'lessons/lesson_within_course.html'
course = None
lesson_group = None
if view == 'course':
if lesson.lesson_groups.count() > 0:
lesson_group = lesson.lesson_groups.first()
course = lesson_group.course
similar_lessons = []
lesson_groups = LessonGroup.objects.filter(
course=course
).order_by('order')
next_item = lesson_group.get_next_lesson_group_obj()
prev_item = lesson_group.get_prev_lesson_group_obj()
else:
lesson_groups = []
similar_lessons = [
sim_lesson.post
for sim_lesson in lesson.similar_lessons.all()
]
next_item = lesson.get_next_lesson_obj()
prev_item = lesson.get_prev_lesson_obj()
return render(
request,
template_name,
{
'page': lesson,
'course': course,
'lesson_group': lesson_group,
'similar_lessons': similar_lessons,
'all_course_lessons': lesson_groups,
'next_item': next_item,
'prev_item': prev_item
}
) | 7365179a033728f6208d26e666929f8b414c8d72 | 32,956 |
def getNum(n, res_num):
"""TODO: docstring"""
res, num = res_num
try:
idx = res.index(n)
return num[idx]
except ValueError:
raise ValueError(f'{n} is not in a list of residues!') | 3eff8e3b6f2ede791f9c2c98218d13585a98e8b3 | 32,957 |
def _run_prospector(filename,
stamp_file_name,
disabled_linters,
show_lint_files):
"""Run prospector."""
linter_tools = [
"pep257",
"pep8",
"pyflakes"
]
if can_run_pylint():
linter_tools.append("pylint")
# Run prospector on tests. There are some errors we don't care about:
# - invalid-name: This is often triggered because test method names
# can be quite long. Descriptive test method names are
# good, so disable this warning.
# - super-on-old-class: unittest.TestCase is a new style class, but
# pylint detects an old style class.
# - too-many-public-methods: TestCase subclasses by definition have
# lots of methods.
test_ignore_codes = [
"invalid-name",
"super-on-old-class",
"too-many-public-methods"
]
kwargs = dict()
if _file_is_test(filename):
kwargs["ignore_codes"] = test_ignore_codes
else:
if can_run_frosted():
linter_tools += ["frosted"]
return _stamped_deps(stamp_file_name,
_run_prospector_on,
[filename],
linter_tools,
disabled_linters,
show_lint_files,
**kwargs) | 393435da9d7d638be0e7461ec5251a1485649d7f | 32,958 |
def dEuler212(q, w):
"""
dEuler212(Q,W)
dq = dEuler212(Q,W) returns the (2-1-2) euler angle derivative
vector for a given (2-1-2) euler angle vector Q and body
angular velocity vector w.
dQ/dt = [B(Q)] w
"""
return np.dot(BmatEuler212(q), w) | 567a7a452c1e86a01854d63b0fd2efb0ea951fcd | 32,959 |
from typing import Callable
import functools
def with_zero_out_padding_outputs(
graph_net: Callable[[gn_graph.GraphsTuple], gn_graph.GraphsTuple]
) -> Callable[[gn_graph.GraphsTuple], gn_graph.GraphsTuple]:
"""A wrapper for graph to graph functions that zeroes padded d output values.
See `zero_out_padding` for a full explanation of the method.
Args:
graph_net: A Graph Neural Network.
Returns:
A Graph Neural Network that will zero out all output padded values.
"""
@functools.wraps(graph_net)
def wrapper(graph: gn_graph.GraphsTuple) -> gn_graph.GraphsTuple:
return zero_out_padding(graph_net(graph))
return wrapper | 5f23defb49df229b2edec46f1f018a25401ca3f4 | 32,960 |
def as_bytes(x) -> bytes:
"""Convert a value to bytes by converting it to string and encoding in utf8."""
if _is_bytes(x):
return bytes(x)
if not isinstance(x, str):
x = str(x)
return x.encode('utf8') | 2c1c48bd1b02f290ec33dc427ebc4536ba2f2caf | 32,961 |
def getGeneCount(person, geneSetDictionary):
"""
determines how many genes a person is assumed to have based upon the query information provided
"""
if person in geneSetDictionary["no_genes"]:
gene_count = 0
elif person in geneSetDictionary["one_gene"]:
gene_count = 1
else:
gene_count = 2
return gene_count | 0fef236dd805ae77f04a22670752031af15ca5b2 | 32,962 |
import json
def merge_json(*args):
"""
Take a list of json files and merges them together
Input: list of json file
Output: dictionary of merged json
"""
json_out = dict()
for json_file in args:
try:
if isinstance(json_file, dict):
json_out = {**json_out, **json_file}
else:
with open(json_file) as fn:
json_out = {**json_out, **json.load(fn)}
except OSError as error:
raise error
return json_out | 37d5e29468d2de2aa11e5a92dc59b7b7b28a170d | 32,963 |
import functools
def hyp2f1_small_argument(a, b, c, z, name=None):
"""Compute the Hypergeometric function 2f1(a, b, c, z) when |z| <= 1.
Given `a, b, c` and `z`, compute Gauss' Hypergeometric Function, specified
by the series:
`1 + (a * b/c) * z + (a * (a + 1) * b * (b + 1) / ((c * (c + 1)) * z**2 / 2 +
... (a)_n * (b)_n / (c)_n * z ** n / n! + ....`
NOTE: Gradients with only respect to `z` are available.
NOTE: It is recommended that the arguments are `float64` due to the heavy
loss of precision in float32.
Args:
a: Floating-point `Tensor`, broadcastable with `b, c, z`. Parameter for the
numerator of the series fraction.
b: Floating-point `Tensor`, broadcastable with `a, c, z`. Parameter for the
numerator of the series fraction.
c: Floating-point `Tensor`, broadcastable with `a, b, z`. Parameter for the
denominator of the series fraction.
z: Floating-point `Tensor`, broadcastable `a, b, c`. Value to compute
`2F1(a, b, c, z)` at. Only values of `|z| < 1` are allowed.
name: A name for the operation (optional).
Default value: `None` (i.e., 'continued_fraction').
Returns:
hypergeo: `2F1(a, b, c, z)`
#### References
[1] F. Johansson. Computing hypergeometric functions rigorously.
ACM Transactions on Mathematical Software, August 2019.
https://arxiv.org/abs/1606.06977
[2] J. Pearson, S. Olver, M. Porter. Numerical methods for the computation of
the confluent and Gauss hypergeometric functions.
Numerical Algorithms, August 2016.
[3] M. Abramowitz, I. Stegun. Handbook of Mathematical Functions with
Formulas, Graphs and Mathematical Tables.
"""
with tf.name_scope(name or 'hyp2f1_small_argument'):
dtype = dtype_util.common_dtype([a, b, c, z], tf.float32)
numpy_dtype = dtype_util.as_numpy_dtype(dtype)
a = tf.convert_to_tensor(a, dtype=dtype)
b = tf.convert_to_tensor(b, dtype=dtype)
c = tf.convert_to_tensor(c, dtype=dtype)
z = tf.convert_to_tensor(z, dtype=dtype)
# Mask out exceptional cases to ensure that the series transformations
# terminate fast.
safe_a, safe_b, safe_c, safe_z = _mask_exceptional_arguments(
a, b, c, z, numpy_dtype)
# TODO(b/128632717): Extend this by including transformations for:
# * Large parameter ranges. Specifically use Hypergeometric recurrences
# to decrease the parameter values. This should be done via backward
# recurrences rather than forward recurrences since those are numerically
# stable.
# * Include |z| > 1. This can be done via Hypergeometric identities that
# transform to |z| < 1.
# * Handling exceptional cases where parameters are negative integers.
# Assume that |b| > |a|. Swapping the two makes no effect on the
# calculation.
a_small = tf.where(
tf.math.abs(safe_a) > tf.math.abs(safe_b), safe_b, safe_a)
safe_b = tf.where(tf.math.abs(safe_a) > tf.math.abs(safe_b), safe_a, safe_b)
safe_a = a_small
d = safe_c - safe_a - safe_b
# Use the identity
# 2F1(a , b, c, z) = (1 - z) ** d * 2F1(c - a, c - b, c, z).
# when the numerator coefficients become smaller.
should_use_linear_transform = (
(tf.math.abs(c - a) < tf.math.abs(a)) &
(tf.math.abs(c - b) < tf.math.abs(b)))
safe_a = tf.where(should_use_linear_transform, c - a, a)
safe_b = tf.where(should_use_linear_transform, c - b, b)
# When -0.5 < z < 0.9, use approximations to Taylor Series.
safe_z_small = tf.where(
(safe_z >= 0.9) | (safe_z <= -0.5), numpy_dtype(0.), safe_z)
taylor_series = _hyp2f1_internal(safe_a, safe_b, safe_c, safe_z_small)
# When z >= 0.9 or -0.5 > z, we use hypergeometric identities to ensure
# that |z| is small.
safe_positive_z_large = tf.where(safe_z >= 0.9, safe_z, numpy_dtype(1.))
hyp2f1_z_near_one = _hyp2f1_z_near_one(
safe_a, safe_b, safe_c, safe_positive_z_large)
safe_negative_z_large = tf.where(safe_z <= -0.5, safe_z, numpy_dtype(-1.))
hyp2f1_z_near_negative_one = _hyp2f1_z_near_negative_one(
safe_a, safe_b, safe_c, safe_negative_z_large)
result = tf.where(
safe_z >= 0.9, hyp2f1_z_near_one,
tf.where(safe_z <= -0.5, hyp2f1_z_near_negative_one, taylor_series))
# Now if we applied the linear transformation identity, we need to
# add a term (1 - z) ** (c - a - b)
result = tf.where(
should_use_linear_transform,
tf.math.exp(d * tf.math.log1p(-safe_z)) * result,
result)
# Finally handle the exceptional cases.
# First when z == 1., this expression diverges if c <= a + b, and otherwise
# converges.
hyp2f1_at_one = tf.math.exp(
tf.math.lgamma(c) + tf.math.lgamma(c - a - b) -
tf.math.lgamma(c - a) - tf.math.lgamma(c - b))
sign_hyp2f1_at_one = (
_gamma_negative(c) ^ _gamma_negative(c - a - b) ^
_gamma_negative(c - a) ^ _gamma_negative(c - b))
sign_hyp2f1_at_one = -2. * tf.cast(sign_hyp2f1_at_one, dtype) + 1.
hyp2f1_at_one = hyp2f1_at_one * sign_hyp2f1_at_one
result = tf.where(
tf.math.equal(z, 1.),
tf.where(c > a + b,
hyp2f1_at_one, numpy_dtype(np.nan)),
result)
# When a == c or b == c this reduces to (1 - z)**-b (-a respectively).
result = tf.where(
tf.math.equal(a, c),
tf.math.exp(-b * tf.math.log1p(-z)),
tf.where(
tf.math.equal(b, c),
tf.math.exp(-a * tf.math.log1p(-z)), result))
# When c is a negative integer we can get a divergent series.
result = tf.where(
(_is_negative_integer(c) &
((a < c) | ~_is_negative_integer(a)) &
((b < c) | ~_is_negative_integer(b))),
numpy_dtype(np.inf),
result)
def grad(dy):
grad_z = a * b * dy * hyp2f1_small_argument(
a + 1., b + 1., c + 1., z) / c
# We don't have an easily computable gradient with respect to parameters,
# so ignore that for now.
broadcast_shape = functools.reduce(
ps.broadcast_shape,
[ps.shape(x) for x in [a, b, c]])
_, grad_z = _fix_gradient_for_broadcasting(
tf.ones(broadcast_shape, dtype=z.dtype),
z, tf.ones_like(grad_z), grad_z)
return None, None, None, grad_z
return result, grad | 3088761e007a5f65ba4af0c1e739324ff30a8bae | 32,964 |
def get_KPP_PL_tag(last_tag, tag_prefix='T'):
""" Get the next P/L tag in a format T??? """
assert (len(last_tag) == 4), "Tag must be 4 characers long! (e.g. T???)"
last_tag_num = int(last_tag[1:])
return '{}{:0>3}'.format(tag_prefix, last_tag_num+1) | feb9cedce1fe4dd17aac3d28df25c951bb24cc3f | 32,965 |
from ibmsecurity.appliance.ibmappliance import IBMError
def update(isamAppliance, description, properties, check_mode=False, force=False):
"""
Update a specified Attribute Matcher
"""
id, update_required, json_data = _check(isamAppliance, description, properties)
if id is None:
raise IBMError("999", "Cannot update data for unknown Attribute Matcher: {0}".format(description))
if force is True or update_required is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_put(
"Update a specified Attribute Matcher",
"{0}/{1}".format(am_uri, id), json_data)
return isamAppliance.create_return_object() | 2b7d90a15a65035aa623fc16dada0e76076221c1 | 32,966 |
import os
def name_has_image_suffix(fname):
"""Test whether file fname has an image suffix in the allowed list."""
extension = os.path.splitext(fname)[1]
return extension in allowed_image_file_suffixes | 644f91bccb2f688f7fcbc4fbe89bc1423664f6ed | 32,967 |
def get_all_themes(config, brand_id):
"""
Get all themes for the given brand id.
:param config: context config
:param brand_id: the brand id for the relevant help center
:return list: list of all themes
"""
url = f"https://{config['subdomain']}.zendesk.com/api/guide/theming/{brand_id}/themes.json"
res = get(config, url)
return res['themes'] | 54e846e8cfbafc418fae3b57818632d1ef8bbb42 | 32,968 |
import logging
def maximum_radius_test(gpu_memory=None, number_of_gpu=None):
"""
:return:
"""
if gpu_memory is None and number_of_gpu is None:
gpu_memory, number_of_gpu = tfu.client.read_gpu_memory()
logging.info('GPU Memory={:.2f} Number of GPU={}'.format(gpu_memory, number_of_gpu))
if 10 <= gpu_memory < 11:
r_input = 120
r_output = 78
elif 11 <= gpu_memory < 12:
r_input = '!'
r_output = '!'
elif 12 <= gpu_memory < 16:
r_input = 136
r_output = 94
return r_input, r_output | a422dd94e8003e25a011ff6f604c20c6b75a203f | 32,969 |
from vistrails.core.modules.basic_modules import Boolean, String, Integer, Float, List
from vistrails.core import debug
def get_module(value, signature):
"""
Creates a module for value, in order to do the type checking.
"""
if isinstance(value, Constant):
return type(value)
elif isinstance(value, bool):
return Boolean
elif isinstance(value, str):
return String
elif isinstance(value, int):
return Integer
elif isinstance(value, float):
return Float
elif isinstance(value, list):
return List
elif isinstance(value, tuple):
v_modules = ()
for element in xrange(len(value)):
v_modules += (get_module(value[element], signature[element]))
return v_modules
else:
debug.warning("Could not identify the type of the list element.")
debug.warning("Type checking is not going to be done inside Map module.")
return None | 1761f8bcb8275d00509ef26da2b40bfde94afbc1 | 32,970 |
def tca_plus(source, target):
"""
TCA: Transfer Component Analysis
:param source:
:param target:
:param n_rep: number of repeats
:return: result
"""
result = dict()
metric = 'process'
for src_name in source:
try:
stats = []
val = []
src = prepare_data(src_name, metric)
for tgt_name in target:
try:
tgt = prepare_data(tgt_name, metric)
loc = tgt['file_la'] + tgt['file_lt']
dcv_src, dcv_tgt = get_dcv(src, tgt)
norm_src, norm_tgt = smart_norm(src, tgt, dcv_src, dcv_tgt)
_train, _test = map_transform(norm_src, norm_tgt)
clf = create_model(_train)
actual, predicted, predicted_proba = predict_defects(clf=clf, test=_test)
abcd = metrics.measures(actual,predicted,loc)
recall = abcd.calculate_recall()
pf = abcd.get_pf()
g = abcd.get_g_score()
f = abcd.calculate_f1_score()
pci_20 = abcd.get_pci_20()
print([src_name, tgt_name, recall, pf, g, f, pci_20])
stats.append([src_name, tgt_name,recall, pf, g, f, pci_20])
except Exception as e:
print(src_name, tgt_name, e)
continue
except Exception as e:
print(src_name, tgt_name, e)
continue
print('completed',len(source))
stats_df = pd.DataFrame(stats, columns = ['source', 'target', 'recell', 'pf', 'g', 'f', 'pci_20'])
# result.update({tgt_name: stats_df})
return stats_df | 95242aa64db7b88a7f170abf619677c1d4acde57 | 32,971 |
def local_2d_self_attention_spatial_blocks(query_antecedent,
kv_channels,
heads,
memory_h_dim=None,
memory_w_dim=None,
mask_right=False,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name=None):
"""Attention to the source position and a neighborhood to the left or right.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
Args:
query_antecedent: a mtf.Tensor with shape [batch, num_h_blocks,
num_w_blocks, h_dim, w_dim, io_channels] must have the same size as
query_length, but a different name.
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
memory_h_dim: mtf Dimension, for the memory height block.
memory_w_dim: mtf Dimension, for the memory width block.
mask_right: bool, flag specifying whether we mask out attention to the right
for the decoder.
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
a Tensor of shape
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]
Raises:
ValueError: if channels or depth don't match.
"""
with tf.variable_scope(
name, default_name="multihead_attention", values=[query_antecedent]):
h_dim, w_dim, io_channels = query_antecedent.shape.dims[-3:]
batch, num_h_blocks, num_w_blocks = query_antecedent.shape.dims[:3]
wq, wk, wv, wo = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, query_antecedent.dtype)
# Rename dimensions for the memory height and width.
memory_antecedent = mtf.rename_dimension(query_antecedent, h_dim.name,
"memory_" + h_dim.name)
memory_antecedent = mtf.rename_dimension(memory_antecedent, w_dim.name,
"memory_" + w_dim.name)
memory_h_dim, memory_w_dim = memory_antecedent.shape.dims[-3:-1]
# Call einsum over the query and memory to get query q, keys k and values v.
q = mtf.einsum([query_antecedent, wq],
mtf.Shape([
batch, heads, num_h_blocks, num_w_blocks, h_dim, w_dim,
kv_channels
]))
k = mtf.einsum([memory_antecedent, wk],
mtf.Shape([batch, heads, num_h_blocks, num_w_blocks,
memory_h_dim, memory_w_dim, kv_channels]))
v = mtf.einsum([memory_antecedent, wv],
mtf.Shape([batch, heads, num_h_blocks, num_w_blocks,
memory_h_dim, memory_w_dim, kv_channels]))
# Halo exchange for memory blocks.
k, v = local_2d_halo_exchange(k, v, num_h_blocks, memory_h_dim,
num_w_blocks, memory_w_dim, mask_right)
# Calculate the causal mask to avoid peeking into the future. We compute
# this once and reuse it for all blocks since the block_size is known.
mask = None
if mask_right:
mask = attention_bias_local_2d_block(query_antecedent.mesh, h_dim, w_dim,
memory_h_dim, memory_w_dim)
output = dot_product_attention(q, k, v, mask=mask)
return mtf.einsum(
[output, wo],
mtf.Shape(
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels])) | 6295dff8753f4b577086fd414a386271ed6e1a1a | 32,972 |
def polling_locations_import_from_structured_json(structured_json):
"""
This pathway in requires a we_vote_id, and is not used when we import from Google Civic
:param structured_json:
:return:
"""
polling_location_manager = PollingLocationManager()
polling_locations_saved = 0
polling_locations_updated = 0
polling_locations_not_processed = 0
for one_polling_location in structured_json:
we_vote_id = one_polling_location['we_vote_id'] if 'we_vote_id' in one_polling_location else ''
line1 = one_polling_location['line1'] if 'line1' in one_polling_location else ''
city = one_polling_location['city'] if 'city' in one_polling_location else ''
state = one_polling_location['state'] if 'state' in one_polling_location else ''
if positive_value_exists(we_vote_id) and positive_value_exists(line1) and positive_value_exists(city) and \
positive_value_exists(state):
proceed_to_update_or_create = True
else:
proceed_to_update_or_create = False
if proceed_to_update_or_create:
# Values that are not required
polling_location_id = one_polling_location['polling_location_id'] \
if 'polling_location_id' in one_polling_location else ''
location_name = one_polling_location['location_name'] if 'location_name' in one_polling_location else ''
polling_hours_text = one_polling_location['polling_hours_text'] \
if 'polling_hours_text' in one_polling_location else ''
directions_text = one_polling_location['directions_text'] \
if 'directions_text' in one_polling_location else ''
line2 = one_polling_location['line2'] if 'line2' in one_polling_location else ''
zip_long = one_polling_location['zip_long'] if 'zip_long' in one_polling_location else ''
results = polling_location_manager.update_or_create_polling_location(
we_vote_id, polling_location_id, location_name, polling_hours_text, directions_text,
line1, line2, city, state, zip_long)
else:
polling_locations_not_processed += 1
results = {
'success': False,
'status': 'Required value missing, cannot update or create'
}
if results['success']:
if results['new_polling_location_created']:
polling_locations_saved += 1
else:
polling_locations_updated += 1
else:
polling_locations_not_processed += 1
polling_locations_results = {
'success': True,
'status': "POLLING_LOCATIONS_IMPORT_PROCESS_COMPLETE",
'saved': polling_locations_saved,
'updated': polling_locations_updated,
'not_processed': polling_locations_not_processed,
}
return polling_locations_results | 868062d4dac4a56073c832f7d2a2919a37a12203 | 32,973 |
def _mgSeqIdToTaxonId(seqId):
"""
Extracts a taxonId from sequence id used in the Amphora or Silva mg databases (ends with '|ncbid:taxonId")
@param seqId: sequence id used in mg databases
@return: taxonId
@rtype: int
"""
return int(seqId.rsplit('|', 1)[1].rsplit(':', 1)[1]) | 2ce74f453e3496c043a69b4205f258f06bfd0452 | 32,974 |
def has_progress(toppath):
"""Return `True` if there exist paths that have already been
imported under `toppath`.
"""
with progress_state() as state:
return len(state[toppath]) != 0 | 862c20336c7dd3b1b7d93022d4b633a9de89f336 | 32,975 |
def run_both_transfers(model: BiVAE, *args, **kwargs):
"""
Run both content-transfer and style-transfer on the each pair of the content-representative tensor images
:param model: Trained BiVAE model
:param class_reps: a dictionary of string class_id <-> a single 3dim Tensor (C,H,W)
:param log_dir: Path to the model.logger's log_dir (Eg. '..../exp_name/version7')
:param train_mean: Original datamodule's training set's mean
:param train_std: Oiriginal datamodule's training set std
:param linearlize: (bool). If true, linearlize the output image to range [0,1] for better viz. contrast
:return:
"""
return (save_content_transfers(model, *args, **kwargs), save_style_transfers(model, *args, **kwargs)) | 6d791931cda68b99701ccf407d78f1c470b124f0 | 32,976 |
def se_mobilenet_075():
"""
Construct SE_MobileNet.
"""
model = SE_MobileNet(widen_factor=0.75, num_classes=1000)
return model | 277d00141576f55dc6c41896725dcd2ee7c5a1d1 | 32,977 |
from pathlib import Path
def canonicalize_lookup_info(
lookup: SshPubKeyLookupInfo,
ssh_auth_dir_root: Path,
template_vars: SshPubKeyFileTemplateVars
) -> SshPubKeyLookupInfo:
"""Expand the template variables and ensure that paths are made absolute.
"""
ad_root = ssh_auth_dir_root
expd = expand_file_template_vars
canz_path = _canonicalize_potentially_rel_path
return SshPubKeyLookupInfo(
[expd(ft, template_vars) for ft in lookup.file_template],
[canz_path(sp, ad_root) for sp in lookup.file_search_path],
canz_path(lookup.file, ad_root) if lookup.file is not None else None
) | 24cb791ce5f0ea58daea268dc0e1804a3b056892 | 32,978 |
def reprojection_rms(impoints_known, impoints_reprojected):
"""
Compute root mean square (RMS) error of points
reprojection (cv2.projectPoints).
Both input NumPy arrays should be of shape (n_points, 2)
"""
diff = impoints_known - impoints_reprojected
squared_distances = np.sum(np.square(diff), axis=1)
rms = np.sqrt(np.mean(squared_distances))
return rms | 11bfbd994df21eb81581012313b838cf5e44424d | 32,979 |
import torch
def macro_accuracy_one_sub(**kwargs: dict) -> bool:
"""
Calculates whether the predicted output, after the postprocessing step of
selecting the single most 'changed' substation has been applied, wholly
matches the true output.
Differs from micro_accuracy_one_sub in that it doesn't check the
element-wise accuracy.
Differs from macro_accuracy and macro_accuracy_valid in the
postprocessing that has been applied to the prediction.
Parameters
----------
**kwargs['one_sub_P'] : torch.Tensor[float]
The output of the model after the postprocessing step of
selecting the single most 'changed' substation has been applied.
Elements are floats in the range (0,1). A value below 0.5 represents
no change; above 0.5, change.
**kwargs['Y'] : torch.Tensor[float]
The label of the datapoints Elements are floats in {0,1}.
A value below 0 represents no change; of 1, change.
Returns
-------
bool
Whether the post-processed predicted output matches the true output.
"""
one_sub_P = kwargs['one_sub_P']
Y = kwargs['Y']
return torch.equal(torch.round(one_sub_P), torch.round(Y)) | e21dcc6b2781abe9c7e5c0d03220d0624f68546c | 32,980 |
def modified_euler(f, y0, t0, t1, n):
""" Use the modified Euler method to compute an approximate solution
to the ODE y' = f(t, y) at n equispaced parameter values from t0 to t1
with initial conditions y(t0) = y0.
y0 is assumed to be either a constant or a one-dimensional numpy array.
t and t0 are assumed to be constants.
f is assumed to accept two arguments.
The first is a constant giving the value of t.
The second is a one-dimensional numpy array of the same size as y.
This function returns an array Y of shape (n,) if
y is a constant or an array of size 1.
It returns an array of shape (n, y.size) otherwise.
In either case, Y[i] is the approximate value of y at
the i'th value of np.linspace(t0, t, n).
"""
Y, T, h = initialize_all(y0, t0, t1, n)
for i in xrange(1, n):
Y[i] = Y[i-1] + (h / 2.) * (f(T[i-1], Y[i-1]) + f(T[i-1], Y[i-1] + h * f(T[i-1], Y[i-1])))
return Y | c5549f194ee8fc446561967a49e89072abdad830 | 32,981 |
def read_tree(sha1=None, data=None):
"""Read tree object with given SHA-1 (hex string) or data, and return list
of (mode, path, sha1) tuples.
"""
if sha1 is not None:
obj_type, data = read_object(sha1)
assert obj_type == 'tree'
elif data is None:
raise TypeError('must specify "sha1" or "data"')
i = 0
entries = []
for _ in range(1000):
end = data.find(b'\x00', i)
if end == -1:
break
mode_str, path = data[i:end].decode().split()
mode = int(mode_str, 8)
digest = data[end + 1:end + 21]
entries.append((mode, path, digest.hex()))
i = end + 1 + 20
return entries | 6d3fed787ba0e817ee67e9bfd99f5e2b6984684f | 32,982 |
def cluster(T, m):
"""
Runs PCCA++ [1] to compute a metastable decomposition of MSM states.
(i.e. find clusters using transition matrix and PCCA)
Parameters
----------
T: a probability transition matrix.
m : Desired number of metastable sets (int).
Notes
-----
The metastable decomposition is done using the PCCA method of the pyemma.msm.MSM class.
For more details and references: https://github.com/markovmodel/PyEMMA/blob/devel/pyemma/msm/models/msm.py
"""
# Compute membership vectors.
memberships = mana.pcca_memberships(T, m)
assignments = cluster_assignments(memberships)
clusters = cluster_sets(assignments)
return clusters | 7ba6f19d519d681b4b36c59409e617a9f1b385e5 | 32,983 |
def fine_tune_class_vector(nr_class, *, exclusive_classes=True, **cfg):
"""Select features from the class-vectors from the last hidden state,
softmax them, and then mean-pool them to produce one feature per vector.
The gradients of the class vectors are incremented in the backward pass,
to allow fine-tuning.
"""
return chain(
get_pytt_class_tokens,
flatten_add_lengths,
with_getitem(0, Softmax(nr_class, cfg["token_vector_width"])),
Pooling(mean_pool),
) | 21359e128124f075ce4cf0768a24d1d5daaba4c2 | 32,984 |
def _recommend_aals_annoy(est, userid, R, n, filter_items,
recalculate_user, filter_previously_rated,
return_scores, recommend_function,
scaling_function, *args, **kwargs):
"""Produce recommendations for Annoy and NMS ALS algorithms"""
user = est._user_factor(userid, R, recalculate_user)
# Calculate the top N items, only removing the liked items from the
# results if specified
filter_out = _get_filter_items(filter_previously_rated,
# Don't use user, since it might have
# been re-estimated:
R[userid].indices,
filter_items)
# If N is None, we set it to the number of items. The item_factors attr
# exists in all ALS models here
if n is None:
n = est.item_factors.shape[0]
# The count to produce
count = n + len(filter_out)
# See [1] in docstring for why we do this...
query = np.append(user, 0) # (is this the fastest way?)
ids, dist = map(np.asarray, # Need to be a Numpy array
recommend_function(query, count, *args, **kwargs))
# Only compute the dist scaling if we care about them, since it's
# expensive
if return_scores:
# convert the distances from euclidean to cosine distance,
# and then rescale the cosine distance to go back to inner product.
scaling = est.max_norm * np.linalg.norm(query)
dist = scaling_function(dist, scaling) # sig: f(dist, scaling)
# if we're filtering anything out...
return _do_filter(ids, dist, filter_out=filter_out,
return_scores=return_scores, n=n) | 76e258b64d080ef9804577c92bf41e4f4621f6c2 | 32,985 |
def url_to_filename(url):
"""Converts a URL to a valid filename."""
return url.replace('/', '_') | db3023c582590a47a6adc32501a2e3f5fd72f24f | 32,986 |
def _parallel_dict_from_expr_if_gens(exprs, opt):
"""Transform expressions into a multinomial form given generators."""
indices = {g: i for i, g in enumerate(opt.gens)}
zero_monom = [0]*len(opt.gens)
polys = []
for expr in exprs:
poly = {}
for term in Add.make_args(expr):
coeff, monom = [], zero_monom.copy()
for factor in Mul.make_args(term):
base, exp = decompose_power(factor)
if exp < 0:
exp, base = -exp, Pow(base, -1)
try:
monom[indices[base]] += exp
continue
except KeyError:
if factor.free_symbols & set(opt.gens):
raise PolynomialError(f'{factor} contains an element'
' of the generators set')
coeff.append(factor)
monom = tuple(monom)
poly[monom] = Mul(*coeff) + poly.get(monom, 0)
polys.append(poly)
return polys | 81dec70ff041cb31062877e8b18823b8e4d283e0 | 32,987 |
def getMObjectHandle(value):
"""
Method used to get an MObjectHandle from any given value.
:type value: Union[str, om.MObject, om.MObjectHandle, om.MDagPath]
:rtype: om.MObjectHandle
"""
# Check for redundancy
#
if isinstance(value, om.MObjectHandle):
return value
else:
return om.MObjectHandle(getMObject(value)) | e41c7ccd48a5b8eb3b692730d4c6c8a74240f7dd | 32,988 |
def remove_dupes(inds1, inds2, inds3=None, inds4=None, tol=1e-6):
"""
Remove duplicates so as to not brake the interpolator.
Parameters
----------
inds1, inds2, inds3 : list or np.array()
to find unique values, must be same length
just_two : Bool [False]
do not include inds3
Returns
-------
non_dupes : list
indices of input arrays that are not duplicates
"""
def unique_seq(seq, tol=1e-6):
'''
Not exactly unique, but only points that are farther
apart than some tol
'''
return np.nonzero(np.abs(np.diff(seq)) >= tol)[0]
un_ind1 = unique_seq(inds1, tol=tol)
un_ind2 = unique_seq(inds2, tol=tol)
non_dupes = list(set(un_ind1) & set(un_ind2))
if inds3 is not None:
un_ind3 = unique_seq(inds3, tol=tol)
non_dupes = list(set(un_ind1) & set(un_ind2) & set(un_ind3))
if inds4 is not None:
un_ind4 = unique_seq(inds4, tol=tol)
non_dupes = list(set(un_ind1) & set(un_ind2) &
set(un_ind3) & set(un_ind4))
return non_dupes | 6164e35d0b2c3b33d4e7a4f1737e356c096f2059 | 32,989 |
def filter_punctuation(fst: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Helper function for parsing number strings. Converts common cardinal strings (groups of three digits delineated by 'cardinal_separator' - see graph_utils)
and converts to a string of digits:
"1 000" -> "1000"
"1.000.000" -> "1000000"
Args:
fst: Any pynini.FstLike object. Function composes fst onto string parser fst
Returns:
fst: A pynini.FstLike object
"""
exactly_three_digits = NEMO_DIGIT ** 3 # for blocks of three
up_to_three_digits = pynini.closure(NEMO_DIGIT, 1, 3) # for start of string
cardinal_string = pynini.closure(
NEMO_DIGIT, 1
) # For string w/o punctuation (used for page numbers, thousand series)
cardinal_string |= (
up_to_three_digits
+ pynutil.delete(cardinal_separator)
+ pynini.closure(exactly_three_digits + pynutil.delete(cardinal_separator))
+ exactly_three_digits
)
return cardinal_string @ fst | 6e78d197fd4b05b66470622a0714bea0c4a935b4 | 32,990 |
def pixbuf2image(pix):
"""Convert gdkpixbuf to PIL image"""
data = pix.get_pixels()
w = pix.props.width
h = pix.props.height
stride = pix.props.rowstride
mode = "RGB"
if pix.props.has_alpha == True:
mode = "RGBA"
im = Image.frombytes(mode, (w, h), data, "raw", mode, stride)
return im | a44720fa3e40571d86e65b7f73cd660270919e67 | 32,991 |
def setup_s3_client(job_data):
"""Creates an S3 client
Uses the credentials passed in the event by CodePipeline. These
credentials can be used to access the artifact bucket.
Args:
job_data: The job data structure
Returns:
An S3 client with the appropriate credentials
"""
key_id = job_data['artifactCredentials']['accessKeyId']
key_secret = job_data['artifactCredentials']['secretAccessKey']
session_token = job_data['artifactCredentials']['sessionToken']
session = Session(aws_access_key_id=key_id,
aws_secret_access_key=key_secret,
aws_session_token=session_token)
return session.client('s3', config=botocore.client.Config(signature_version='s3v4')) | 98ff4d514734a5326dd709274bf0354e7d7cc255 | 32,992 |
async def get_subscriptions_handler(request: Request) -> data.SubscriptionsListResponse:
"""
Get user's subscriptions.
"""
token = request.state.token
params = {
"type": BUGOUT_RESOURCE_TYPE_SUBSCRIPTION,
"user_id": str(request.state.user.id),
}
try:
resources: BugoutResources = bc.list_resources(token=token, params=params)
except BugoutResponseException as e:
raise MoonstreamHTTPException(status_code=e.status_code, detail=e.detail)
except Exception as e:
logger.error(
f"Error listing subscriptions for user ({request.user.id}) with token ({request.state.token}), error: {str(e)}"
)
reporter.error_report(e)
raise MoonstreamHTTPException(status_code=500, internal_error=e)
return data.SubscriptionsListResponse(
subscriptions=[
data.SubscriptionResourceData(
id=str(resource.id),
user_id=resource.resource_data["user_id"],
address=resource.resource_data["address"],
color=resource.resource_data["color"],
label=resource.resource_data["label"],
subscription_type_id=resource.resource_data["subscription_type_id"],
updated_at=resource.updated_at,
created_at=resource.created_at,
)
for resource in resources.resources
]
) | 00b48801e4c45d117882cbda7caf35d30154907f | 32,993 |
import json
def unique_doc_key(doc):
"""
Creates a key that allows to check for record uniqueness
"""
keyparts = [doc['type']]
for attr in ('level', 'country', 'state', 'region', 'district', 'city'):
if attr in doc:
keyparts.append(doc[attr])
key = json.dumps(keyparts)
return key | a2584c4628ffd4b0f433c2f85c8c4e7132ed05ea | 32,994 |
def parse_tag(vt):
"""
Get a VTag from a label
Parameters
----------
vt : str
A label that we want to get the VTag
Raises
------
UnknownTypeError
If the label is not known in VTag
"""
vt = vt.strip()
if vt == "C":
return TAG_CRITICAL
if vt == "L":
return TAG_LEAF
if vt == "N":
return TAG_NODE
raise UnknownTypeError('Type of value unknown: ' + str(vt)) | ae551ca27f9c3cf542bf4c253c25731ffd8a6097 | 32,995 |
from datetime import datetime
import time
import random
def fetch_stock_revive_info(start_date: date = None, end_date: date = None, retry: int = 10) -> list:
"""
歷年上櫃減資資訊資料表
輸出格式: [{'code': '4153', 'name': '鈺緯', 'revive_date': date(2020-10-19), 'old_price': 27.20, 'new_price': 30.62}]
"""
result = []
if not start_date:
start_date = date(2013, 1, 1)
if not end_date:
end_date = datetime.today()
req = HttpRequest()
kwargs = dict()
kwargs['headers'] = req.default_headers()
kwargs['params'] = {
'o': 'json', 'l': 'zh',
'd': '{}/{:02d}/{:02d}'.format(start_date.year - 1911, start_date.month, start_date.day),
'ed': '{}/{:02d}/{:02d}'.format(end_date.year - 1911, end_date.month, end_date.day),
'_': int(time.time() * 1000)}
for i in range(retry):
req.wait_interval = random.randint(3, 5)
resp = req.send_data(method='GET', url=STOCK_REVIVE_URL, **kwargs)
if resp.status_code == 200:
try:
data = resp.json()
if not data:
continue
except Exception as e:
logger.warning(str(e))
continue
rows = data.get('aaData', [])
for r in rows:
code = r[1]
# 只抓取代碼長度為4的資料
if len(code) != 4:
continue
str_zh_date = str(r[0])
if len(str_zh_date) != 7:
continue
year = 1911 + int(str_zh_date[0:3])
month = int(str_zh_date[3:5])
day = int(str_zh_date[5:7])
revive_date = date(year, month, day)
name = r[2]
old_price = round(float(r[3]), 2)
new_price = round(float(r[4]), 2)
reason = r[9]
data = {
'code': code, 'name': name, 'revive_date': revive_date,
'old_price': old_price, 'new_price': new_price, 'reason': reason}
full_href = r[10]
if full_href:
url_list = full_href.split("\'")
if len(url_list) > 1:
param = url_list[1].split('?')
if len(param) == 2:
patch_param = param[1].replace('amp;', '')
patch_param = patch_param.replace('%', '%25')
url = "%s?%s" % (STOCK_REVIVE_DETAIL_URL, patch_param)
detail_data = fetch_stock_revive_detail_info(url)
for k, v in detail_data.items():
data[k] = v
logger.info("取得減資資料: %s" % (data, ))
result.append(data)
break
else:
logger.warning("無法取得所有上櫃減資歷史資資料")
return result | 12e493accdcd8c6896e23a0c592c284c90e53de3 | 32,996 |
def _read_one(stream: BytesIO) -> int:
"""
Read 1 byte, converting it into an int
"""
c = stream.read(1)
if c == b"":
raise EOFError("Unexpected EOF while reading bytes")
return ord(c) | d3f8d22b2d2d3ff08cec42ffcf81cafe9192c707 | 32,997 |
def new_thread_mails(post, users_and_watches):
"""Return an interable of EmailMessages to send when a new thread is
created."""
c = {'post': post.content,
'post_html': post.content_parsed,
'author': post.creator.username,
'host': Site.objects.get_current().domain,
'thread': post.thread.title,
'forum': post.thread.document.title,
'post_url': post.thread.get_absolute_url()}
return emails_with_users_and_watches(
subject=_lazy(u'{forum} - {thread}'),
text_template='kbforums/email/new_thread.ltxt',
html_template='kbforums/email/new_thread.html',
context_vars=c,
users_and_watches=users_and_watches) | 5a6e0bfbaf87f68d6010c84c0cb8c876042c0027 | 32,998 |
def known(words):
"""The subset of `words` that appear in the dictionary of WORDS."""
return set(w for w in words if w in WORDS) | c6665115d9cece679cef0cace8d4037aa4a8e47c | 32,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.