content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def concatenate_weather_files(dir_path):
"""Concatenate all .nc files found in the directory set by path."""
# import all the files as datasets
fnames = get_weather_files(dir_path)
ds_list = []
for f in fnames:
with xr.open_dataset(f, engine='netcdf4') as ds:
ds_list.append(ds)
ds_main = xr.concat(ds_list, dim='time')
groups = ds_main.groupby('time')
return groups
| 5,342,300
|
def homepage(module=None, *match, **attr):
"""
Shortcut for module homepage menu items using the MM layout,
retrieves the module's nice name.
@param module: the module's prefix (controller)
@param match: additional prefixes
@param attr: attributes for the navigation item
"""
settings = current.deployment_settings
all_modules = settings.modules
layout = S3MainMenuDefaultLayout
c = [module] + list(match)
if "name" in attr:
name = attr["name"]
attr.pop("name")
else:
if module is None:
module = "default"
if module in all_modules:
m = all_modules[module]
name = m.name_nice
else:
name = module
if "f" in attr:
f = attr["f"]
del attr["f"]
else:
f = "index"
return layout(name, c=c, f=f, **attr)
| 5,342,301
|
def task_created_upon_camera_create(camera):
"""task_created_upon_camera_create
Args:
camera (Camera): a Camera instance
"""
assert CameraTask.objects.exists()
| 5,342,302
|
def handle_fallthrough(event, path, query):
"""
Handles the fallthrough cases where no redirects were matched
"""
# If no fallthough response provider, 302 the whole website to the HOST that
# was input
if variables.FALLTHROUGH == None:
return redirect('//' + variables.HOST + path + query)
# If we asked to fallthrough to the origin, just return the original request
# so that Cloudfront continues on its merry way
elif variables.FALLTHROUGH == 'origin':
return event['Records'][0]['cf']['request']
# Otherwise use the fallthrough as is
else:
return variables.FALLTHROUGH
| 5,342,303
|
def vowel_space_area(F1a, F1i, F1u, F2a, F2i, F2u):
"""
Return vowel space area
Args:
F1a: (float) the 1. formant frequency of the vowel /a [Hz]
F1i: (float) the 1. formant frequency of the vowel /i [Hz]
F1u: (float) the 1. formant frequency of the vowel /u [Hz]
F2a: (float) the 1. formant frequency of the vowel /a [Hz]
F2i: (float) the 1. formant frequency of the vowel /i [Hz]
F2u: (float) the 1. formant frequency of the vowel /u [Hz]
Returns:
VSA: (float) vowel space area
"""
# Compute vowel space area
EDiu = np.sqrt((F1i-F1u)**2+(F2i-F2u)**2)
EDia = np.sqrt((F1i-F1a)**2+(F2i-F2a)**2)
EDau = np.sqrt((F1a-F1u)**2+(F2a-F2u)**2)
S = (EDiu+EDia+EDau)/(2.0)
VSA = np.sqrt(S*(S-EDiu)*(S-EDia)*(S-EDau))
# Return vowel space area
return float(VSA)
| 5,342,304
|
def get_hosted_zone(domain):
"""Return a domain's hosted zone."""
return api.get(f"/api/domain/{domain['_id']}/records/")
| 5,342,305
|
def call_subprocess(command, action, module):
"""Call a command, redirect output given current logging level."""
output = []
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(process.stdout.readline, b""):
output.append(line)
encoding = (
getattr(sys.stdout, "encoding", None)
or getattr(sys.stdin, "encoding", None)
or "utf-8"
)
sys.stdout.write(line.decode(encoding))
process.wait()
process.stdout.close()
if process.returncode != 0:
raise sneakersync.Exception(action, module, b"".join(output))
| 5,342,306
|
def align_spikes(spike_data, spt_dict, sp_win, type="max", resample=1,
contact=0, remove=True):
"""Aligns spike waves and returns corrected spike times
Parameters
----------
spike_data : dict
spt_dict : dict
sp_win : list of int
type : {'max', 'min'}, optional
resample : int, optional
contact : int, optional
remove : bool, optiona
Returns
-------
ret_dict : dict
spike times of aligned spikes
"""
spt = spt_dict['data'].copy()
idx_align = np.arange(len(spt))
#spt_align = {'data': spt}
#go in a loop until all spikes are correctly aligned
iter_id = 0
while len(idx_align) > 0:
spt_align = {'data': spt[idx_align]}
spt_inbound = filter_spt(spike_data, spt_align, sp_win)
idx_align = idx_align[spt_inbound]
#spt_align = {'data': spt[idx_align]}
sp_waves_dict = extract_spikes(spike_data, spt_align, sp_win,
resample=resample, contacts=contact)
sp_waves = sp_waves_dict['data'][:,spt_inbound,0]
#if sp_waves_dict.has_key('is_valid'):
# sp_waves = sp_waves[:, sp_waves_dict['is_valid']]
time = sp_waves_dict['time']
if type=="max":
i = sp_waves.argmax(0)
elif type=="min":
i = sp_waves.argmin(0)
#move spike markers
shift = time[i]
spt[idx_align]+=shift
#if spike maximum/minimum was at the edge we have to extract it at the
# new marker and repeat the alignment
tol = 0.1
idx_align = idx_align[(shift<(sp_win[0]+tol)) | (shift>(sp_win[1]-tol))]
iter_id +=1
#print "Align. iteration %d, remaining idx %d" % (iter_id, len(idx_align))
#print shift
ret_dict = {'data':spt}
if remove:
#remove double spikes
FS = spike_data['FS']
ret_dict = remove_doubles(ret_dict, 1000./FS)
return ret_dict
| 5,342,307
|
def rgb2hsi(rgb: np.ndarray,
*,
axis: int=None) -> np.ndarray:
"""
Convert RGB to Hue Saturation Intensity
:param rgb:
:param axis:
:return:
"""
if axis is None:
axis = get_matching_axis(rgb.shape, 3)
big_m, little_m, chroma = _compute_chroma(rgb, axis)
inds = construct_component_inds(axis, rgb.ndim, 3)
hsi = np.zeros(rgb.shape)
hsi[inds[0]] = _compute_rgb_hue(rgb, big_m, little_m, chroma, axis)
hsi[inds[2]] = np.mean(rgb, axis=axis, keepdims=True)
i_nz = hsi[inds[2]] != 0 # type: np.ndarray
if little_m.ndim < i_nz.ndim:
# This only happens in the 1D case
little_m = little_m[slice(None), np.newaxis]
if np.any(i_nz):
hsi[inds[1]][i_nz] = 1 - little_m[i_nz] / hsi[inds[2]][i_nz]
return hsi
| 5,342,308
|
def p_cmdexpr_rtorder(p):
"""cmdexpr : RTORDER
| RTORDER arglist
| RTORDER MACRO"""
| 5,342,309
|
def set(d=False, t=False):
"""
Enable logging types (debug, trace)
"""
assert(type(d) is bool)
assert(type(t) is bool)
global _debug_on, _trace_on
_debug_on = d
_trace_on = t
| 5,342,310
|
def get_movie_brief_actor(actor, soup):
"""
Getting brief data from individual movie webpage (for actor dictionary)
"""
headers=['actor','title','year','rating','vote','genre_list','budget','opening','gross_usa',\
'gross_cw','runtime','director','writer','star','distributor']
# actor of interest
actor = actor
# find movie title
title = " ".join(soup.find('h1').text.split()[:-1])
# find rating
rating = np.nan
try:
rating = float(soup.find('span',attrs={'itemprop':'ratingValue'}).text)
except:
pass
# find vote (rating count)
vote = np.nan
try:
vote = int(soup.find('span',attrs={'itemprop':'ratingCount'}).text.replace(',',''))
except:
pass
# find list of genre
genre_list=[]
try:
for genres in soup.find('div', class_="subtext").find_all('a')[:-1]:
genre_list.append(genres.text)
except:
pass
# find release date
date = np.nan
try:
date_pre = soup.find('div', class_="subtext").find_all('a')[-1].text.split('(')[0]
date = pd.to_datetime(date_pre) ## why is it Timestamp? format ='%d-%B-%Y'
except:
pass
# # find metascorre
# if soup.find('div',class_="metacriticScore score_favorable titleReviewBarSubItem") is not None:
# meta = int(soup.find('div',class_="metacriticScore score_favorable titleReviewBarSubItem").text.strip('\n'))
# else:
# meta = np.nan
# # find plot keywords
# keyword_list=[]
# for keywords in soup.find('div', class_="article", id="titleStoryLine").\
# find('div', class_="see-more inline canwrap").find_all('a')[:-1]:
# keyword_list.append(keywords.text.strip(' '))
# find budget, opening weekend USA, gross USA, cumulative worldwide gross
# assign default value:
budget, opening, gross_usa, gross_cw, distributor = np.nan, np.nan, np.nan, np.nan, np.nan
try:
for line in soup.find('div', class_="article", id="titleDetails").find_all('h4'):
if "Budget:" in line:
budget = int(''.join(s for s in line.next_sibling if s.isdigit()))
if "Opening Weekend USA:" in line:
opening = int(''.join(s for s in line.next_sibling if s.isdigit()))
if "Gross USA:" in line:
gross_usa = int(''.join(s for s in line.next_sibling if s.isdigit()))
if "Cumulative Worldwide Gross:" in line:
gross_cw = int(''.join(s for s in line.next_sibling if s.isdigit()))
if "Production Co:" in line:
distributor = line.findNext().text.replace(' ','')
except:
pass
# find runtime
runtime = np.nan
try:
runtime = int(soup.find_all('time')[-1].text.strip(' min'))
except:
pass
# find director
director= np.nan
try:
director = soup.find('div',class_="credit_summary_item").find('a').text
link_d = soup.find('div',class_="credit_summary_item").find('a').get('href')
except:
pass
# find writer
writer = np.nan
try:
writer_line = soup.find_all('div',class_="credit_summary_item")[1].find_all('a')
link_w = [w.get('href') for w in writer_line]
writer = [w.text for w in writer_line]
if '1 more credit' in writer:
writer.remove('1 more credit')
link_w.pop()
except:
pass
# find star
star = np.nan
try:
star_line = soup.find_all('div',class_="credit_summary_item")[2].find_all('a')
link_s = [s.get('href') for s in star_line]
star = [s.text for s in star_line]
if 'See full cast & crew' in star:
star.remove('See full cast & crew')
link_s.pop()
except:
pass
# # find language
# language= np.nan
# t= []
# matching = []
# for div in soup.find('div', class_="article", id="titleDetails").find_all('div'):
# t.append(div.text.replace('\n','').replace(' ',''))
# matching = [s for s in t if 'Language:' in s]
# language = matching[0].replace(':',' ').replace('|',' ').split(' ')[1:]
# # find country
# country= np.nan
# t= []
# matching = []
# for div in soup.find('div', class_="article", id="titleDetails").find_all('div'):
# t.append(div.text.replace('\n','').replace(' ',''))
# matching = [s for s in t if 'Country:' in s]
# country = matching[0].replace(':',' ').replace('|',' ').split(' ')[1:]
movie_dict = dict(zip(headers, [actor,
title,
date,
rating,
vote,
genre_list,
budget,
opening,
gross_usa,
gross_cw,
runtime,
director,
writer,
star,
distributor]))
return movie_dict
| 5,342,311
|
def mul(a: TensorableType, b: TensorableType) -> Tensor:
"""Returns the product of input tensor_objects with their local gradients"""
a = enforceTensor(a)
b = enforceTensor(b)
output = Tensor(a.data * b.data, requires_grad=(a.requires_grad or b.requires_grad))
output.save_for_backward([a, b])
def backward_fn():
if a.requires_grad:
a_local_gradient = output.grad.data * b.data
a_local_gradient = manageBroadcasting(a.ndim, a.shape, a_local_gradient)
a.grad.data += a_local_gradient
if b.requires_grad:
b_local_gradient = output.grad.data * a.data
b_local_gradient = manageBroadcasting(b.ndim, b.shape, b_local_gradient)
b.grad.data += b_local_gradient
output.backward_fn = backward_fn
return output
| 5,342,312
|
def _validate_asset_icons(icon_manager: 'IconManager') -> None:
"""
Loops through icons in the user's data directory and deletes those that are malformed.
"""
icons_directory = icon_manager.icons_dir
for icon_entry in icons_directory.iterdir():
if icon_entry.is_file():
icon_file_type = filetype.guess(icon_entry)
if icon_file_type is None:
icon_entry.unlink()
elif icon_file_type.extension not in ALLOWED_ICON_EXTENSIONS:
icon_entry.unlink()
| 5,342,313
|
def safe_extract_zip(
in_path: Union[str, Path],
out_path: Union[str, Path],
*,
only_prefix: Iterable[str] = (),
ignore_prefix: Iterable[str] = ('..', '/'),
callback: Callable[[str, Any], None] = null_callback,
callback_description: str = 'Extracting zip files'
):
"""Safely extract a zip file
:param in_path: Path to extract from
:param out_path: Path to extract to
:param only_prefix: Extract only internal paths starting with these prefixes
:param ignore_prefix: Ignore internal paths starting with these prefixes
:param callback: a callback to report on the process, ``callback(action, value)``,
with the following callback signatures:
- ``callback('init', {'total': <int>, 'description': <str>})``,
to signal the start of a process, its total iterations and description
- ``callback('update', <int>)``,
to signal an update to the process and the number of iterations to progress
:param callback_description: the description to return in the callback
:raises `~aiida.tools.importexport.common.exceptions.CorruptArchive`: if the file cannot be read
"""
_filter = _get_filter(only_prefix, ignore_prefix)
try:
with zipfile.ZipFile(in_path, 'r', allowZip64=True) as handle:
callback('init', {'total': 1, 'description': 'Gathering list of files to extract from zip'})
members = [name for name in handle.namelist() if _filter(name)]
callback('init', {'total': len(members), 'description': callback_description})
for membername in members:
callback('update', 1)
handle.extract(path=os.path.abspath(out_path), member=membername)
except zipfile.BadZipfile as error:
raise CorruptArchive(f'The input file cannot be read: {error}')
| 5,342,314
|
def split_multi(vds: 'VariantDataset', *, filter_changed_loci: bool = False) -> 'VariantDataset':
"""Split the multiallelic variants in a :class:`.VariantDataset`.
Parameters
----------
vds : :class:`.VariantDataset`
Dataset in VariantDataset representation.
filter_changed_loci : :obj:`bool`
If any REF/ALT pair changes locus under :func:`.min_rep`, filter that
variant instead of throwing an error.
Returns
-------
:class:`.VariantDataset`
"""
variant_data = hl.experimental.sparse_split_multi(vds.variant_data, filter_changed_loci=filter_changed_loci)
return VariantDataset(vds.reference_data, variant_data)
| 5,342,315
|
def zeros(shape, dtype=K.floatx()):
"""Return all-zeros tensor of given shape and type."""
# As of Keras version 1.1.0, Keras zeros() requires integer values
# in shape (e.g. calling np.zeros() with the Theano backend) and
# thus can't be called with tensor values. This version avoids the
# issue by using the backend zeros() instead.
if K.backend() == 'theano':
from theano import tensor as T
return T.zeros(shape, dtype)
else:
assert K.backend() == 'tensorflow'
import tensorflow as tf
return tf.zeros(shape, dtype)
| 5,342,316
|
def nrc_emo_lex(headlines, bodies):
"""
Counts Number of words in a text associated with 8 different emotions.
Uses EmoLex lexicon: http://saifmohammad.com/WebPages/lexicons.html#EmoLex
"""
lexicon_path = "%s/../data/lexicons/emoLex/" % (path.dirname(path.dirname(path.abspath(__file__))))
word_list = defaultdict(list)
# emotion_list = defaultdict(list)
emotion_set = set()
with open(lexicon_path + 'NRC_emotion_lexicon_list.txt', 'r') as f:
reader = csv.reader(f, delimiter='\t')
for word, emotion, present in reader:
if int(present) == 1: # 1 = word/emotion-allocation present
word_list[word].append(emotion)
# emotion_list[emotion].append(word)
emotion_set.add(emotion)
def generate_emotion_count(string):
emo_count = Counter()
for token in nltk.word_tokenize(string):
token = token.lower()
emo_count += Counter(word_list[token])
# Guarantee same length for each feature vector by adding emotions
# that do no appear in the text
for emotion in emotion_set:
if (emotion not in emo_count):
emo_count[emotion] = 0
return emo_count
emo_counts_head = [generate_emotion_count(headline) for headline in tqdm(headlines)]
emo_counts_body = [generate_emotion_count(body) for body in tqdm(bodies)]
emo_counts_head_df = pd.DataFrame(emo_counts_head)
emo_counts_head_df = emo_counts_head_df.fillna(0)
emo_counts_body_df = pd.DataFrame(emo_counts_body)
emo_counts_body_df = emo_counts_body_df.fillna(0)
emo_counts = np.concatenate([emo_counts_head_df.as_matrix(), emo_counts_body_df.as_matrix()], axis=1)
return emo_counts
| 5,342,317
|
def accretion_cylinder(mbh, mdot, r):
"""rschw, omega, facc, teff, zscale = accretion_cylinder(mbh, mdot, r)"""
GM = cgs_graw * mbh * sol_mass
rschw = 2 * GM / cgs_c**2
omega = sqrt( GM / (r * rschw)**3 )
facc = 3 * GM * (mdot * mdot_edd(mbh)) / (8 * pi * (r * rschw)**3) \
* (1 - sqrt(3 / r))
teff = ( facc / cgs_stef )**0.25
zscale = sqrt( 2 * cgs_k_over_mh * teff ) / omega
return rschw, omega, facc, teff, zscale
| 5,342,318
|
def plot_gt_freqs(fp):
"""
Draws a scatterplot of the empirical frequencies of the counted species
versus their Simple Good Turing smoothed values, in rank order. Depends on
pylab and matplotlib.
"""
MLE = MLENGram(1, filter_punctuation=False, filter_stopwords=False)
MLE.train(fp, encoding="utf-8-sig")
counts = dict(MLE.counts[1])
GT = GoodTuringNGram(1, filter_stopwords=False, filter_punctuation=False)
GT.train(fp, encoding="utf-8-sig")
ADD = AdditiveNGram(1, 1, filter_punctuation=False, filter_stopwords=False)
ADD.train(fp, encoding="utf-8-sig")
tot = float(sum(counts.values()))
freqs = dict([(token, cnt / tot) for token, cnt in counts.items()])
sgt_probs = dict([(tok, np.exp(GT.log_prob(tok, 1))) for tok in counts.keys()])
as_probs = dict([(tok, np.exp(ADD.log_prob(tok, 1))) for tok in counts.keys()])
X, Y = np.arange(len(freqs)), sorted(freqs.values(), reverse=True)
plt.loglog(X, Y, "k+", alpha=0.25, label="MLE")
X, Y = np.arange(len(sgt_probs)), sorted(sgt_probs.values(), reverse=True)
plt.loglog(X, Y, "r+", alpha=0.25, label="simple Good-Turing")
X, Y = np.arange(len(as_probs)), sorted(as_probs.values(), reverse=True)
plt.loglog(X, Y, "b+", alpha=0.25, label="Laplace smoothing")
plt.xlabel("Rank")
plt.ylabel("Probability")
plt.legend()
plt.tight_layout()
plt.savefig("img/rank_probs.png")
plt.close("all")
| 5,342,319
|
def test_rectify_latest_version():
"""Test the function rectify_latest_version."""
lst = [
{
"package": "io.vertx:vertx-web",
"actual_latest_version": "3.7.9"
}
]
resp = rectify_latest_version(lst, "maven")
assert resp == "Success"
| 5,342,320
|
def bidirectional(*args, **kwargs): # real signature unknown
"""
Returns the bidirectional class assigned to the character chr as string.
If no such value is defined, an empty string is returned.
"""
pass
| 5,342,321
|
def cost_function_wrapper(theta, cost_function_parameters):
"""Wrapper for the Cost Function"""
cost_function_parameters['theta'] = theta
return cost_function(cost_function_parameters)
| 5,342,322
|
def make_img_tile(imgs, path, epoch, aspect_ratio=1.0,
tile_shape=None, border=1, border_color=0):
"""
"""
if imgs.ndim != 3 and imgs.ndim != 4:
raise ValueError('imgs has wrong number of dimensions.')
n_imgs = imgs.shape[0]
tile_shape = None
# Grid shape
img_shape = np.array(imgs.shape[1:3])
if tile_shape is None:
img_aspect_ratio = img_shape[1] / float(img_shape[0])
aspect_ratio *= img_aspect_ratio
tile_height = int(np.ceil(np.sqrt(n_imgs * aspect_ratio)))
tile_width = int(np.ceil(np.sqrt(n_imgs / aspect_ratio)))
grid_shape = np.array((tile_height, tile_width))
else:
assert len(tile_shape) == 2
grid_shape = np.array(tile_shape)
# Tile image shape
tile_img_shape = np.array(imgs.shape[1:])
tile_img_shape[:2] = (img_shape[:2] + border) * grid_shape[:2] - border
# Assemble tile image
tile_img = np.empty(tile_img_shape)
tile_img[:] = border_color
for i in range(grid_shape[0]):
for j in range(grid_shape[1]):
img_idx = j + i*grid_shape[1]
# No more images - stop filling out the grid.
if img_idx >= n_imgs:
break
# Convert 1~1 to 0~1
img = (imgs[img_idx] + 1)/2.0# * 255.0
yoff = (img_shape[0] + border) * i
xoff = (img_shape[1] + border) * j
tile_img[yoff:yoff+img_shape[0], xoff:xoff+img_shape[1], ...] = img
img_tile = Image.fromarray(np.uint8(tile_img * 255) , 'L')
if path is not None:
path_name = path + "/iteration_%03d"%(epoch)+".jpg"
img_tile.save(path_name)
return img_tile
| 5,342,323
|
def test_flags_init_app_production():
"""Ensure that extension can be initialized."""
app = Flask(__name__)
app.env = 'production'
app.config['LD_SDK_KEY'] = 'https://no.flag/avail'
with app.app_context():
flags = Flags()
flags.init_app(app)
assert app.extensions['featureflags']
| 5,342,324
|
def dummy_plugin_distribution(dummy_plugin_distribution_name, save_sys_path):
"""Add a dummy plugin distribution to the current working_set."""
dist = pkg_resources.Distribution(
project_name=dummy_plugin_distribution_name,
metadata=DummyEntryPointMetadata(
f"""
[lektor.plugins]
dummy-plugin = {__name__}:DummyPlugin
"""
),
version="1.23",
location=__file__,
)
pkg_resources.working_set.add(dist)
return dist
| 5,342,325
|
def powerup_drift(ship: monospace.Ship):
"""Make all the bullets shifty."""
for blaster in ship.blasters:
blaster.bullet_type = monospace.DriftingShipBullet
| 5,342,326
|
def write_f_songplay_df(spark, f_songplay_df, output_data_path):
"""
Write a song play fact dataframe to an S3 path.
Parameters:
spark (SparkSession): The spark session.
f_songplay_df (DataFrame): The song play fact dataframe.
output_data_path (str): The base S3 bucket URL.
Returns:
None
"""
path = output_data_path + 'f_songplay_df'
print('\nwrite_f_songplay_df to ' + path)
# write songplays table to parquet files partitioned by year and month
f_songplay_df.repartition(1) \
.write \
.partitionBy('year', 'month') \
.parquet(path, mode='overwrite')
| 5,342,327
|
def set_array_significant_figures(sig_figs):
"""Summary.
Parameters
----------
sig_figs
optional int, number of significant figures to be shown when printing
"""
_assert_array_significant_figures_formatting(sig_figs)
global array_significant_figures_stack
array_significant_figures_stack.append(sig_figs)
| 5,342,328
|
def parse_temperature_item(item):
"""Parse item for time and temperature
:param item: Definition, eg. '17.0 > 07:00'
:returns: dict with temperature and minutes"""
temp_time_tupel = item.split(">")
temperature = float(temp_time_tupel[0].strip())
minutes_from_midnight = calculate_minutes_from_midnight(
temp_time_tupel[1].strip())
return {'minutes_from_midnight': minutes_from_midnight,
'temperature': temperature}
| 5,342,329
|
def add_file_to_dataset_view(user_data, cache):
"""Add the uploaded file to cloned repository."""
ctx = DatasetAddRequest().load(request.json)
user = cache.ensure_user(user_data)
project = cache.get_project(user, ctx['project_id'])
if not ctx['commit_message']:
ctx['commit_message'] = 'service: dataset add {0}'.format(
ctx['short_name']
)
local_paths = []
for _file in ctx['files']:
local_path = None
if 'file_url' in _file:
commit_message = '{0}{1}'.format(
ctx['commit_message'], _file['file_url']
)
job = cache.make_job(user)
_file['job_id'] = job.job_id
with enqueue_retry(DATASETS_JOB_QUEUE) as queue:
queue.enqueue(
dataset_add_remote_file, user_data, job.job_id,
project.project_id, ctx['create_dataset'], commit_message,
ctx['short_name'], _file['file_url']
)
continue
if 'file_id' in _file:
file = cache.get_file(user, _file['file_id'])
local_path = file.abs_path
elif 'file_path' in _file:
local_path = project.abs_path / Path(_file['file_path'])
if not local_path or not local_path.exists():
return error_response(
INVALID_PARAMS_ERROR_CODE,
'invalid file reference: {0}'.format(json.dumps(_file))
)
ctx['commit_message'] += ' {0}'.format(local_path.name)
local_paths.append(str(local_path))
if local_paths:
with chdir(project.abs_path):
add_file(
local_paths,
ctx['short_name'],
create=ctx['create_dataset'],
force=ctx['force'],
commit_message=ctx['commit_message']
)
try:
_, ctx['remote_branch'] = repo_sync(
Repo(project.abs_path), remote='origin'
)
except GitCommandError:
return error_response(
INTERNAL_FAILURE_ERROR_CODE, 'repo sync failed'
)
return result_response(DatasetAddResponseRPC(), ctx)
| 5,342,330
|
def get_available_adapters() -> dict:
"""Get information on all available adapters
Returns:
(dict) Where keys are adapter names and values are descriptions
"""
return _output_plugin_info(ExtensionManager(namespace='materialsio.adapter'))
| 5,342,331
|
def _must_find_n(session, obj_outer, cls_inner, name_inner):
"""Searches the database for a "namespaced" object, such as a nic on a node.
Raises NotFoundError if there is none. Otherwise returns the object.
Arguments:
session - a SQLAlchemy session to use.
obj_outer - the "owner" object
cls_inner - the "owned" class
name_inner - the name of the "owned" object
"""
obj_inner = _namespaced_query(session, obj_outer, cls_inner, name_inner)
if obj_inner is None:
raise NotFoundError("%s %s on %s %s does not exist." %
(cls_inner.__name__, name_inner,
obj_outer.__class__.__name__, obj_outer.label))
return obj_inner
| 5,342,332
|
def list_providers():
"""
Get list of names of all supported cloud providers
:rtype: list
"""
return [cls.provider_name() for cls in BaseHandler.__subclasses__()]
| 5,342,333
|
def GetRPCProxy(address=None, port=None, url=GOOFY_RPC_URL):
"""Gets an instance (for client side) to access the goofy server.
Args:
address: Address of the server to be connected.
port: Port of the server to be connected.
url: Target URL for the RPC server. Default to Goofy RPC.
"""
address = address or DEFAULT_GOOFY_ADDRESS
port = port or DEFAULT_GOOFY_PORT
return jsonrpc.ServerProxy(
'http://%s:%d%s' % (address, port, url))
| 5,342,334
|
def getparser():
"""
Use argparse to add arguments from the command line
Parameters
----------
createlapserates : int
Switch for processing lapse rates (default = 0 (no))
createtempstd : int
Switch for processing hourly temp data into monthly standard deviation (default = 0 (no))
Returns
-------
Object containing arguments and their respective values.
"""
parser = argparse.ArgumentParser(description="select pre-processing options")
# add arguments
parser.add_argument('-createlapserates', action='store', type=int, default=0,
help='option to create lapse rates or not (1=yes, 0=no)')
parser.add_argument('-createtempstd', action='store', type=int, default=0,
help='option to create temperature std of daily data or not (1=yes, 0=no)')
return parser
| 5,342,335
|
def dict_hash_table_100_buckets():
"""Test for hash table with 100 buckets, dictionary."""
ht = HashTable(100, naive_hash)
for word in dictionary_words:
ht.set(word, word)
return ht
| 5,342,336
|
def write_fv_schemes(case):
"""Sets fv_schemes"""
fv_schemes = {
'ddtSchemes' : {'default' : 'Euler'},
'gradSchemes' : {'default' : 'Gauss linear'},
'divSchemes' : {'default' : 'none', 'div(tauMC)' : 'Gauss linear'},
'laplacianSchemes' : {'default' : 'Gauss linear corrected'},
'interpolationSchemes' : {'default' : 'linear',
'reconstruct(rho)' : 'vanLeer',
'reconstruct(U)' : 'vanLeerV',
'reconstruct(T)': 'vanLeer'},
'snGradSchemes' : {'default': 'corrected'}}
with case.mutable_data_file(FileName.FV_SCHEMES) as d:
d.update(fv_schemes)
| 5,342,337
|
def white(N):
"""
White noise.
:param N: Amount of samples.
White noise has a constant power density. It's narrowband spectrum is therefore flat.
The power in white noise will increase by a factor of two for each octave band,
and therefore increases with 3 dB per octave.
"""
return np.random.randn(N)
| 5,342,338
|
def annotate_search_plugin_restriction(results, file_path, channel):
"""
Annotate validation results to restrict uploads of OpenSearch plugins
https://github.com/mozilla/addons-server/issues/12462
Once this has settled for a while we may want to merge this with
`annotate_legacy_addon_restrictions`
"""
if not file_path.endswith('.xml'):
return
# We can be broad here. Search plugins are not validated through this
# path and as of right now (Jan 2019) there aren't any legacy type
# add-ons allowed to submit anymore.
msg = ugettext(
u'Open Search add-ons are {blog_link_open}no longer supported on AMO'
u'{blog_link_close}. You can create a {doc_link_open}search extension '
u'instead{doc_link_close}.').format(
blog_link_open=(
'<a href="https://blog.mozilla.org/addons/2019/10/15/'
'search-engine-add-ons-to-be-removed-from-addons-mozilla-org/'
'">'),
blog_link_close='</a>',
doc_link_open=(
'<a href="https://developer.mozilla.org/docs/Mozilla/Add-ons/'
'WebExtensions/manifest.json/chrome_settings_overrides">'),
doc_link_close='</a>')
insert_validation_message(
results, type_='error', message=msg, msg_id='opensearch_unsupported'
)
| 5,342,339
|
def plot_XWSigma(qa_dict,outfile):
"""
Plot XWSigma
Args:
qa_dict: qa dictionary from countpix qa
outfile : file of the plot
"""
camera=qa_dict["CAMERA"]
expid=qa_dict["EXPID"]
pa=qa_dict["PANAME"]
xsigma=qa_dict["METRICS"]["XWSIGMA_FIB"][0]
wsigma=qa_dict["METRICS"]["XWSIGMA_FIB"][1]
xsigma_med=qa_dict["METRICS"]["XWSIGMA"][0]
wsigma_med=qa_dict["METRICS"]["XWSIGMA"][1]
xfiber=np.arange(xsigma.shape[0])
wfiber=np.arange(wsigma.shape[0])
fig=plt.figure()
plt.suptitle("X & W Sigma over sky peaks, Camera: {}, ExpID: {}".format(camera,expid),fontsize=10,y=0.99)
ax1=fig.add_subplot(221)
hist_x=ax1.bar(xfiber,xsigma,align='center')
ax1.set_xlabel("Fiber #",fontsize=10)
ax1.set_ylabel("X std. dev. (# of pixels)",fontsize=10)
ax1.tick_params(axis='x',labelsize=10)
ax1.tick_params(axis='y',labelsize=10)
plt.xlim(0,len(xfiber))
ax2=fig.add_subplot(222)
hist_w=ax2.bar(wfiber,wsigma,align='center')
ax2.set_xlabel("Fiber #",fontsize=10)
ax2.set_ylabel("W std. dev. (# of pixels)",fontsize=10)
ax2.tick_params(axis='x',labelsize=10)
ax2.tick_params(axis='y',labelsize=10)
plt.xlim(0,len(wfiber))
if "XWSIGMA_AMP" in qa_dict["METRICS"]:
xsigma_amp=qa_dict["METRICS"]["XWSIGMA_AMP"][0]
wsigma_amp=qa_dict["METRICS"]["XWSIGMA_AMP"][1]
ax3=fig.add_subplot(223)
heatmap3=ax3.pcolor(xsigma_amp.reshape(2,2),cmap=plt.cm.OrRd)
plt.title('X Sigma = {:.4f}'.format(xsigma_med), fontsize=10)
ax3.set_xlabel("X std. dev. per Amp (# of pixels)",fontsize=10)
ax3.tick_params(axis='x',labelsize=10,labelbottom=False)
ax3.tick_params(axis='y',labelsize=10,labelleft=False)
ax3.annotate("Amp 1\n{:.3f}".format(xsigma_amp[0]),
xy=(0.4,0.4),
fontsize=10
)
ax3.annotate("Amp 2\n{:.3f}".format(xsigma_amp[1]),
xy=(1.4,0.4),
fontsize=10
)
ax3.annotate("Amp 3\n{:.3f}".format(xsigma_amp[2]),
xy=(0.4,1.4),
fontsize=10
)
ax3.annotate("Amp 4\n{:.3f}".format(xsigma_amp[3]),
xy=(1.4,1.4),
fontsize=10
)
ax4=fig.add_subplot(224)
heatmap4=ax4.pcolor(wsigma_amp.reshape(2,2),cmap=plt.cm.OrRd)
plt.title('W Sigma = {:.4f}'.format(wsigma_med), fontsize=10)
ax4.set_xlabel("W std. dev. per Amp (# of pixels)",fontsize=10)
ax4.tick_params(axis='x',labelsize=10,labelbottom=False)
ax4.tick_params(axis='y',labelsize=10,labelleft=False)
ax4.annotate("Amp 1\n{:.3f}".format(wsigma_amp[0]),
xy=(0.4,0.4),
fontsize=10
)
ax4.annotate("Amp 2\n{:.3f}".format(wsigma_amp[1]),
xy=(1.4,0.4),
fontsize=10
)
ax4.annotate("Amp 3\n{:.3f}".format(wsigma_amp[2]),
xy=(0.4,1.4),
fontsize=10
)
ax4.annotate("Amp 4\n{:.3f}".format(wsigma_amp[3]),
xy=(1.4,1.4),
fontsize=10
)
plt.tight_layout()
fig.savefig(outfile)
| 5,342,340
|
def lgamma(x) -> float:
"""
Return the natural logarithm of the gamma function of ``x``.
"""
...
| 5,342,341
|
def read_HiCPro(bedfile, matfile):
"""
Fast loading of the .matrix and .bed files derived from HiC-Pro
Parameters
----------
bedfile : str,
path to the .bed file which contains fragments info
matfile : str,
path to the .matrix file which contains contact counts
Returns
-------
counts : the interaction contacts map
lengths : the lengths of each chromosomes
chrs : the chromosome names
"""
### read and parse fragments file at first
bed_df = pd.read_csv(bedfile, sep='\t', comment="#", header=None, names=['chrs', 'starts', 'ends', 'idxs'])
# get lengths for each chromosome
chrs, indices, lengths = np.unique(bed_df.chrs.values, return_index=True, return_counts=True)
chrs = list(chrs[indices.argsort()])
lengths = lengths[indices.argsort()]
base = bed_df.idxs[0] # start index: 0 or 1
### read and parse counts file then
n = lengths.sum()
shape = (n, n)
# This is the interaction count files
mat_df = pd.read_csv(matfile, sep='\t', comment="#", header=None)
row, col, data = mat_df.values.T
row = row.astype(int)
col = col.astype(int)
# If there are NAs remove them
mask = np.isnan(data)
if np.any(mask):
warnings.warn(f'NAs detected in {mat_file}. Removing NAs and replacing with 0.')
row = row[np.invert(mask)] # invert True and False for mask
col = col[np.invert(mask)]
data = data[np.invert(mask)].astype(int)
# if index start from 1
if base not in [0, 1]:
raise ValueError('indices should start either at 0 or 1')
if base == 1:
col -= 1
row -= 1
# convert to a coo_matrix (lower triangular)
counts = coo_matrix((data, (row, col)), shape=shape)
# whether the matrix is lower or upper triangular
if np.all(row <= col):
triangular_upper = True
elif np.all(row >= col):
triangular_upper = False
else:
raise ValueError('The HiC matrix is neither lower nor upper triangular!')
# We need to deal with the fact that we should not duplicate entries for the diagonal
counts = counts.toarray()
if triangular_upper:
counts = counts + np.triu(counts, 1).T
else:
counts = counts + np.tril(counts, -1).T
return counts, lengths, chrs
| 5,342,342
|
def get_objective_by_task(target, task):
"""Returns an objective and a set of metrics for a specific task."""
if task == 'classification':
if target.nunique() == 2:
objective = 'binary'
else:
objective = 'multi'
elif task == 'regression':
objective = 'regression'
else:
raise_invalid_task_error(task)
return objective
| 5,342,343
|
def websocket_recv_nb():
"""Receive data from websocket (non-blocking).
:return: The received data
:rtype: str
"""
| 5,342,344
|
def pileupGenes(GenePositions,filename,pad=500000,doBalance=False,
TPM=0,CTCFWapldKO=False,TPMlargerthan=True,
minlength=0,maxlength=5000000,OE=None, useTTS=False):
"""
This function piles up Hi-C contact maps around genes, centered on TSSs or TTSs.
Inputs
------
GenePositions - pandas dataframe - with genes and their transcription intensity
filename - str - is path to cooler file
pad - int - half of the window size in bp
OE - str or None - path to scaling data to use as "expected" to compute observed over expected
useTTS - bool - False to pile on TSS, True to pile on TTS
other parameters do some optional filtering of the data frame
"""
sortString="start"
if useTTS:
sortString="end"
OrderedPositions=GenePositions.sort_values(sortString)
c = cooler.Cooler(filename)
res = c.info['bin-size']
chromsizes = bioframe.fetch_chromsizes('mm9')
chrmList = list(chromsizes.index)
runningCount = 0
pile = []
for mychr in chrmList: #Iterate over chromosomes
mychrstrCooler=mychr
mychrstrDataFrame=mychr#Chromosomes in the dataframe GenePositions
#are labeled 1 to 19, X Y and M, while in the cooler file they are labeld 0 to 21
current = OrderedPositions[OrderedPositions["chrom"] == mychrstrDataFrame]
if len(current) <= 0:
continue
#identify + and - so we can reorient genes
#genes for which strand is +, and current gene is not too long and not too short
currentPlusStrand=current[(current['strand']=='+')&(current['gene_length']<maxlength)
&(current['gene_length']>minlength)]
#genes for which strand is -, and current gene is not too long and not too short
currentMinusStrand=current[(current['strand']=='-')&(current['gene_length']<maxlength)
&(current['gene_length']>minlength)]
if TPMlargerthan: #filter by TPM > threshold
if CTCFWapldKO:
currentPlusStrand=currentPlusStrand[(currentPlusStrand['TPM_dKO+Adeno-Cre_30251-30253']>=TPM)]
currentMinusStrand=currentMinusStrand[(currentMinusStrand['TPM_dKO+Adeno-Cre_30251-30253']>=TPM)]
else:
currentPlusStrand=currentPlusStrand[(currentPlusStrand['TPM_wildtype']>=TPM)]
currentMinusStrand=currentMinusStrand[(currentMinusStrand['TPM_wildtype']>=TPM)]
else: #filter by TPM < thresh
if CTCFWapldKO:
currentPlusStrand=currentPlusStrand[(currentPlusStrand['TPM_dKO+Adeno-Cre_30251-30253']<=TPM)&(currentPlusStrand['next_TPM_dKO']>0)]
currentMinusStrand=currentMinusStrand[(currentMinusStrand['TPM_dKO+Adeno-Cre_30251-30253']<=TPM)
&(currentPlusStrand['TPM_dKO+Adeno-Cre_30251-30253']>0)]
else:
currentPlusStrand=currentPlusStrand[(currentPlusStrand['TPM_wildtype']<=TPM)
&(currentPlusStrand['next_TPM_wildtype']>0)]
currentMinusStrand=currentMinusStrand[(currentMinusStrand['TPM_wildtype']<=TPM)
&(currentMinusStrand['TPM_wildtype']>0)]
centerString="start"
if useTTS:
centerString="end"
for st, end in zip(currentPlusStrand[centerString].values, currentPlusStrand[centerString].values):
reg1 = '{}:{}-{}'.format(mychrstrCooler, int(np.floor((st - pad) / res) * res),
int(np.floor((st + pad) / res) * res),)
reg2 = '{}:{}-{}'.format(mychrstrCooler,int(np.floor((end - pad) / res) * res),
int(np.floor((end + pad) / res) * res))
#from balanced matrix, fetch regions
try:
mat = c.matrix(balance=doBalance).fetch(reg1, reg2)
if OE!=None:#Divide by expected
mat=mat/OE[mychr]
pile.append(mat)
except Exception as e:
print(e)
#mat = np.nan * np.ones((pad * 2 //res, pad * 2 //res))
print('Cannot retrieve a window:', reg1, reg2)
centerString="end"
if useTTS:
centerString="start"
for st, end in zip(currentMinusStrand[centerString].values, currentMinusStrand[centerString].values):
reg1 = '{}:{}-{}'.format(mychrstrCooler, int(np.floor((st - pad) / res) * res),
int(np.floor((st + pad) / res) * res),)
reg2 = '{}:{}-{}'.format(mychrstrCooler,int(np.floor((end - pad) / res) * res),
int(np.floor((end + pad) / res) * res))
try:
temp=c.matrix(balance=doBalance).fetch(reg1, reg2)
if OE!=None:#Divide by expected
temp=temp/OE[mychr]
mat = temp[::-1].T[::-1].T #Rotate matrix 180 degrees to align genes
pile.append(mat)
except Exception as e:
print(e)
#mat = np.nan * np.ones((pad * 2 //res, pad * 2 //res))
print('Cannot retrieve a window:', reg1, reg2)
return pile
| 5,342,345
|
def model_cnn_2layer(in_ch, in_dim, width, linear_size=128):
"""
CNN, small 2-layer (default kernel size is 4 by 4)
Parameter:
in_ch: input image channel, 1 for MNIST and 3 for CIFAR
in_dim: input dimension, 28 for MNIST and 32 for CIFAR
width: width multiplier
"""
model = nn.Sequential(
nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), linear_size),
nn.ReLU(),
nn.Linear(linear_size, 10)
)
return model
| 5,342,346
|
def snack_raw_formants_tcl(wav_fn, frame_shift, window_size, pre_emphasis, lpc_order, tcl_shell_cmd):
"""Implement snack_formants() by calling Snack through Tcl shell
tcl_shell_cmd is the name of the command to invoke the Tcl shell.
Note this method can only be used if Tcl is installed.
The vectors returned here are the raw Snack output, without padding.
For more info, see documentation for snack_raw_formants().
"""
# File path for wav file provided to Tcl script
in_file = wav_fn
# ERROR: wind_dur parameter must be between [0.0001, 0.1].
# ERROR: frame_step parameter must be between [1/sampling rate, 0.1].
# invalid/inconsistent parameters -- exiting.
# HACK: Tcl shell expects double backslashes in Windows path
if sys.platform == 'win32' or sys.platform == 'cygwin': # pragma: no cover
in_file = in_file.replace('\\', '\\\\')
tcl_file = os.path.join(os.path.dirname(wav_fn), 'tclforsnackformant.tcl')
# Write Tcl script to compute Snack formants
f = open(tcl_file, 'w')
script = "#!/usr/bin/env bash\n"
script += '# the next line restarts with tclsh \\\n'
script += 'exec {} "$0" "$@"\n\n'.format(tcl_shell_cmd)
# HACK: The variable user_snack_lib_path is a hack we use in continous
# integration testing. The reason is that we may not have the
# permissions to copy the Snack library to the standard Tcl library
# location. This is a workaround to load the Snack library from a
# different location, where the location is given by
# user_snack_lib_path.
if user_snack_lib_path is not None:
script += 'pkg_mkIndex {} snack.tcl libsnack.dylib libsound.dylib\n'.format(user_snack_lib_path)
script += 'lappend auto_path {}\n\n'.format(user_snack_lib_path)
script += 'package require snack\n\n'
script += 'snack::sound s\n\n'
script += 's read {}\n\n'.format(in_file)
script += 'set fd [open [file rootname {}].frm w]\n'.format(in_file)
script += 'puts $fd [join [s formant -windowlength {} -framelength {} -windowtype Hamming -lpctype 0 -preemphasisfactor {} -ds_freq 10000 -lpcorder {}]\n\n]\n'.format(window_size / 1000, frame_shift / 1000, pre_emphasis, lpc_order)
script += 'close $fd\n\n'
script += 'exit'
f.write(script)
f.close()
# Run Tcl script
try:
return_code = call([tcl_shell_cmd, tcl_file])
except OSError: # pragma: no cover
os.remove(tcl_file)
raise OSError('Error while attempting to call Snack via Tcl shell. Is Tcl shell command {} correct?'.format(tcl_shell_cmd))
else:
if return_code != 0: # pragma: no cover
os.remove(tcl_file)
raise OSError('Error when trying to call Snack via Tcl shell script.')
# Load results from f0 file and save into return variables
frm_file = os.path.splitext(wav_fn)[0] + '.frm'
num_cols = len(sformant_names)
if os.path.isfile(frm_file):
frm_results = np.loadtxt(frm_file, dtype=float).reshape((-1, num_cols))
estimates_raw = {}
for i in range(num_cols):
estimates_raw[sformant_names[i]] = frm_results[:, i]
# Cleanup and remove f0 file
os.remove(frm_file)
else: # pragma: no cover
raise OSError('Snack Tcl shell error -- unable to locate .frm file')
# Cleanup and remove Tcl script file
os.remove(tcl_file)
return estimates_raw
| 5,342,347
|
def _run_eval(annot_dir, output_dir, eval_tracking=False, eval_pose=True):
"""
Runs the evaluation, and returns the "total mAP" and "total MOTA"
"""
from datasets.posetrack.poseval.py import evaluate_simple
(apAll, _, _), mota = evaluate_simple.evaluate(
annot_dir, output_dir, eval_pose, eval_tracking,
cfg.TRACKING.DEBUG.UPPER_BOUND_4_EVAL_UPPER_BOUND)
return apAll[-1][0], mota[-4][0]
| 5,342,348
|
def reserve_api():
"""Helper function for making API requests to the /reserve API endpoints
:returns: a function that can be called to make a request to /reserve
"""
def execute_reserve_api_request(method, endpoint, **kwargs):
master_api_client = master_api()
return master_api_client(method, "/reserve%s" % endpoint, **kwargs)
return execute_reserve_api_request
| 5,342,349
|
def extract_user_id(source_open_url):
"""
extract the user id from given user's id
:param source_open_url: "sslocal://profile?refer=video&uid=6115075278" example
:return:
"""
if source_open_url[10:17] != 'profile':
return None
try:
res = re.search("\d+$", source_open_url).group(0)
return res.strip()
except (AttributeError, KeyError):
return None
| 5,342,350
|
def prolog_rule(line):
"""Specify prolog equivalent"""
def specify(rule):
"""Apply restrictions to rule"""
rule.prolog.insert(0, line)
return rule
return specify
| 5,342,351
|
def simple_interest(p, r, t):
"""
calculate SIMPLE INTEREST ----
:param p:
:param r:
:param t:
:return:
"""
SI = (p * r * t) / 100
print("simple interest is: ", SI)
| 5,342,352
|
def load_GloVe_model(path):
"""
It is a function to load GloVe model
:param path: model path
:return: model array
"""
print("Load GloVe Model.")
with open(path, 'r') as f:
content = f.readlines()
model = {}
for line in content:
splitLine = line.split()
word = splitLine[0]
embedding = np.array((splitLine[1:]))
model[word] = embedding
print("Done.", len(model), " words loaded!\n")
return model
| 5,342,353
|
def quadratic_program() -> MPQP_Program:
"""a simple mplp to test the dimensional correctness of its functions"""
A = numpy.array(
[[1, 1, 0, 0], [0, 0, 1, 1], [-1, 0, -1, 0], [0, -1, 0, -1], [-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0],
[0, 0, 0, -1]])
b = numpy.array([350, 600, 0, 0, 0, 0, 0, 0]).reshape(8, 1)
c = 25 * make_column([1, 1, 1, 1])
F = numpy.array([[0, 0], [0, 0], [-1, 0], [0, -1], [0, 0], [0, 0], [0, 0], [0, 0]])
Q = 2.0 * numpy.diag([153, 162, 162, 126])
CRa = numpy.vstack((numpy.eye(2), -numpy.eye(2)))
CRb = numpy.array([1000, 1000, 0, 0]).reshape(4, 1)
H = numpy.zeros((F.shape[1], Q.shape[0]))
prog = MPQP_Program(A, b, c, H, Q, CRa, CRb, F)
prog.scale_constraints()
return prog
| 5,342,354
|
def load_img(str_img):
"""
"""
str_b64 = None
if os.path.exists(str_img) and is_path_img(str_img):
with open(str_img, 'rb') as f:
content = f.read()
content_b64 = base64.b64encode(content)
str_b64 = content_b64.decode('utf-8')
elif str_img.startswith('http'):
res = rq.get(str_img)
if res.status_code == 200:
content_b64 = base64.b64encode(res.content)
str_b64 = content_b64.decode('utf-8')
elif isinstance(str_img, bytes):
str_img = str_img
return str_b64
| 5,342,355
|
def snr(flux, axis=0):
""" Calculates the S/N ratio of a spectra.
Translated from the IDL routine der_snr.pro """
signal = np.nanmedian(flux, axis=axis)
noise = 1.482602 / np.sqrt(6.) * np.nanmedian(np.abs(2.*flux - \
np.roll(flux, 2, axis=axis) - np.roll(flux, -2, axis=axis)), \
axis=axis)
return signal, noise, signal / noise
| 5,342,356
|
def subtract_images(img_input, img_output, img_height, img_width):
"""Subtract input and output image and compute difference image and ela image"""
input_data = img_input.T
output_data = img_output.T
if len(input_data) != len(output_data):
raise Exception("Input and Output image have different sizes!")
diff = abs(input_data - output_data)
diff = diff.reshape(img_height, img_width, 3)
diff = np.clip(diff, 0, 255)
if auto:
args.multiplier = np.divide(255, diff.max())
diff_multiplied = diff * args.multiplier
diff_multiplied = np.clip(diff_multiplied, 0, 255)
if args.cupy:
diff_img = Image.fromarray(np.asnumpy(diff).astype(np.uint8), 'RGB')
diff_img_multiplied = Image.fromarray(np.asnumpy(diff_multiplied).astype(np.uint8), 'RGB')
else:
diff_img = Image.fromarray(diff.astype(np.uint8), 'RGB')
diff_img_multiplied = Image.fromarray(diff_multiplied.astype(np.uint8), 'RGB')
return diff_img, diff_img_multiplied
| 5,342,357
|
def k4a_playback_get_track_name(playback_handle, track_index, track_name, track_name_size):
"""
K4ARECORD_EXPORT k4a_buffer_result_t k4a_playback_get_track_name(k4a_playback_t playback_handle,
size_t track_index,
char *track_name,
size_t *track_name_size);
"""
_k4a_playback_get_track_name = record_dll.k4a_playback_get_track_name
_k4a_playback_get_track_name.restype = k4a_buffer_result_t
_k4a_playback_get_track_name.argtypes = (k4a_playback_t,\
ctypes.c_size_t,\
ctypes.POINTER(ctypes.c_char),\
ctypes.POINTER(ctypes.c_size_t))
return _k4a_playback_get_track_name(playback_handle, track_index, track_name, track_name_size)
| 5,342,358
|
def stripLeadingCharacters(charQueue, numChars):
"""
Takes in the queue representation of the text and strips the leading numChars characters.
Args:
charQueue: The text in a Queue object.
numChars: The number of characters to remove.
Returns:
None
"""
for i in xrange(numChars):
removedChar = charQueue.get()
| 5,342,359
|
def recommend_lowercase_d(data: pd.Series, **kwargs) -> int:
"""Returns the recommended value of differencing order 'd' to use
Parameters
----------
data : pd.Series
The data for which the differencing order needs to be calculated
*kwargs: Keyword arguments that can be passed to the difference test.
Values are:
alpha : float, optional
Significance Value, by default 0.05
test : str, optional
The test to use to test the order of differencing, by default 'kpss'
max_d : int, optional
maximum differencing order to try, by default 2
Returns
-------
int
The differencing order to use
"""
recommended_lowercase_d = ndiffs(data, **kwargs)
return recommended_lowercase_d
| 5,342,360
|
def get_player_current_games_to_move(username: str) -> Dict:
"""Public method that returns an array of Daily Chess games
where it is the player's turn to act
Parameters:
username -- username of the player
"""
r = _internal.do_get_request(f"/player/{username}/games/to-move")
return json.loads(r.data.decode('utf-8'))
| 5,342,361
|
def get_rb_data_attribute(xmldict, attr):
"""Get Attribute `attr` from dict `xmldict`
Parameters
----------
xmldict : dict
Blob Description Dictionary
attr : str
Attribute key
Returns
-------
sattr : int
Attribute Values
"""
try:
sattr = int(xmldict["@" + attr])
except KeyError:
raise KeyError(
f"Attribute @{attr} is missing from "
"Blob Description. There may be some "
"problems with your file"
)
return sattr
| 5,342,362
|
def dopythonphot(image, xc, yc, aparcsec=0.4, system='AB', ext=None,
psfimage=None, psfradpix=3, recenter=False, imfilename=None,
ntestpositions=100, snthresh=0.0, zeropoint=None,
filtername=None, exptime=None, pixscale=None,
skyannarcsec=[6.0, 12.0], skyval=None,
skyalgorithm='sigmaclipping',
target=None, printstyle=None, exact=True, fitsconvention=True,
phpadu=None, returnflux=False, showfit=False,
verbose=False, debug=False):
""" Measure the flux through aperture(s) and/or psf fitting using the
PythonPhot package.
Inputs:
image : string giving image file name OR a list or 2-tuple giving
the header and data array as [hdr,data]
xc,yc : aperture center in pixel coordinates
aparcsec : aperture radius in arcsec, or a string with a comma-separated
list of aperture radii
psfimage : filename for a fits file containing a psf model
system : report AB or Vega mags ('AB','Vega')
snthresh : If the measured flux is below <snthresh>*fluxerr then the
resulting magnitude is reported as a lower limit.
zeropoint : fix the zeropoint (if not provided, we look it up from
hardcoded tables)
skyannarcsec : inner and outer radius of the sky annulus (in arcsec)
target : name of the target object (for printing in snanastyle)
printstyle : None or 'default' = report MJD, filter, and photometry
'verbose' or 'long' = include target name and position
'snana' = report mags in the format of a SNANA .dat file.
fitsconvention : xc,yc position follows the fits convention with (1,1)
as the lower left pixel. Otherwise, follow the python/pyfits
convention with (0,0) as the lower left pixel.
returnflux : instead of returning a list of strings containing all the
flux and magnitude information, simply return a single flux val
Note : No recentering is done (i.e. this does forced photometry at the
given pixel position)
"""
from PythonPhot import photfunctions
if debug == 1:
import pdb
pdb.set_trace()
imhdr, imdat = getheaderanddata(image, ext=ext)
if imfilename is None:
if isinstance(image, str):
imfilename = image
elif 'FILENAME' in imhdr:
imfilename = imhdr['FILENAME']
else:
imfilename = 'unknown'
if imdat.dtype != 'float64':
imdat = imdat.astype('float64', copy=False)
if not filtername:
if 'FILTER1' in imhdr:
if 'CLEAR' in imhdr['FILTER1']:
filtername = imhdr['FILTER2']
else:
filtername = imhdr['FILTER1']
else:
filtername = imhdr['FILTER']
if not exptime:
if 'EXPTIME' in imhdr:
exptime = imhdr['EXPTIME']
else:
raise exceptions.RuntimeError(
"Cannot determine exposure time for %s" % imfilename)
if not pixscale:
pixscale = getpixscale(imhdr, ext=ext)
if not np.iterable(aparcsec):
aparcsec = np.array([aparcsec])
elif not isinstance(aparcsec, np.ndarray):
aparcsec = np.array(aparcsec)
appix = np.array([ap / pixscale for ap in aparcsec])
skyannpix = np.array([skyrad / pixscale for skyrad in skyannarcsec])
if len(appix) >= 1:
assert skyannpix[0] >= np.max(
appix), "Sky annulus must be >= largest aperture."
camera = getcamera(imhdr)
# Define the conversion factor from the values in this image
# to photons : photons per ADU.
if phpadu is None:
if 'BUNIT' not in imhdr:
if camera == 'WFC3-IR' and 'EXPTIME' in imhdr:
phpadu = imhdr['EXPTIME']
else:
phpadu = 1
elif imhdr['BUNIT'].lower() in ['cps', 'electrons/s']:
phpadu = imhdr['EXPTIME']
elif imhdr['BUNIT'].lower() in ['counts', 'electrons']:
phpadu = 1
assert (
phpadu is not None), "Can't determine units from the image header."
if fitsconvention:
xpy, ypy = xc - 1, yc - 1
else:
xpy, ypy = xc, yc
if recenter:
xim, yim = getxycenter([imhdr, imdat], xc, yc,
fitsconvention=True, radec=False,
verbose=verbose)
if verbose:
print("Recentered position (x,y) : %.2f %.2f" % (xim, yim))
ra, dec = xy2radec(imhdr, xim, yim)
print("Recentered position (ra,dec) : %.6f %.6f" % (ra, dec))
output_PythonPhot = photfunctions.get_flux_and_err(
imdat, psfimage, [xpy, ypy],
psfradpix=psfradpix, apradpix=appix, ntestpositions=ntestpositions,
skyannpix=skyannpix, skyalgorithm=skyalgorithm, setskyval=skyval,
recenter_target=False, recenter_fakes=True, exact=exact,
exptime=exptime, ronoise=1, phpadu=phpadu,
showfit=showfit, verbose=verbose, debug=debug)
apflux, apfluxerr, psfflux, psffluxerr, sky, skyerr = output_PythonPhot
if not np.iterable(apflux):
apflux = np.array([apflux])
apfluxerr = np.array([apfluxerr])
# Define aperture corrections for each aperture
if zeropoint is not None:
zpt = zeropoint
apcor = np.zeros(len(aparcsec))
aperr = np.zeros(len(aparcsec))
else:
zpt = hstzpt_apcorr.getzpt(image, system=system)
if camera == 'WFC3-IR':
# TODO: allow user to choose an alternate EE table?
apcor, aperr = hstzpt_apcorr.apcorrWFC3IR(filtername, aparcsec)
elif camera == 'WFC3-UVIS':
apcor, aperr = hstzpt_apcorr.apcorrWFC3UVIS(filtername, aparcsec)
elif camera == 'ACS-WFC':
apcor, aperr = hstzpt_apcorr.apcorrACSWFC(filtername, aparcsec)
# record the psf flux as a final infinite aperture for printing purposes
if psfimage is not None:
aparcsec = np.append(aparcsec, np.inf)
apflux = np.append(apflux, [psfflux])
apfluxerr = np.append(apfluxerr, [psffluxerr])
apcor = np.append(apcor, 0)
# apply aperture corrections to flux and mags
# and define upper limit mags for fluxes with significance <snthresh
mag, magerr = np.zeros(len(apflux)), np.zeros(len(apflux))
for i in range(len(apflux)):
if np.isfinite(aparcsec[i]):
# For actual aperture measurements (not the psf fitting flux),
# apply aperture corrections to the measured fluxes
# Flux rescaled to larger aperture:
apflux[i] *= 10 ** (0.4 * apcor[i])
# Flux error rescaled:
df = apfluxerr[i] * 10 ** (0.4 * apcor[i])
# Systematic err from aperture correction :
dfap = 0.4 * np.log(10) * apflux[i] * aperr[i]
apfluxerr[i] = np.sqrt(df ** 2 + dfap ** 2) # total flux err
if verbose > 1:
print(" FERRTOT FERRSTAT FERRSYS")
print(" %.5f %.5f %.5f" % (apfluxerr[i], df, dfap))
if apflux[i] < abs(apfluxerr[i]) * snthresh:
# no real detection. Report mag as an upper limit
sigmafactor = snthresh or 3
mag[i] = -2.5 * np.log10(sigmafactor * abs(apfluxerr[i])) \
+ zpt - apcor[i]
magerr[i] = -9.0
else:
# Good detection. convert to a magnitude (ap correction already
# applied)
mag[i] = -2.5 * np.log10(apflux[i]) + zpt
magerr[i] = 1.0857 * apfluxerr[i] / apflux[i]
if debug:
import pdb
pdb.set_trace()
if returnflux:
return apflux
if 'EXPSTART' in imhdr and 'EXPEND' in imhdr:
mjdobs = (imhdr['EXPEND'] + imhdr['EXPSTART'])/2.
else:
mjdobs = 0.0
if verbose and printstyle == 'snana':
# Convert to SNANA fluxcal units and Construct a SNANA-style OBS
# line, e.g.
# OBS: 56456.500 H wol 0.000 8.630 25.160 -9.000
fluxcal = apflux * 10 ** (0.4 * (27.5 - zpt))
fluxcalerr = apfluxerr * 10 ** (0.4 * (27.5 - zpt))
print('VARLIST: MJD FLT FIELD FLUXCAL FLUXCALERR MAG '
'MAGERR ZPT')
elif verbose:
if printstyle.lower() in ['long', 'verbose']:
print('# TARGET RA DEC MJD FILTER '
' APER FLUX FLUXERR MAG MAGERR MAGSYS '
' ZP SKY SKYERR IMAGE')
else:
print('# MJD FILTER APER FLUX FLUXERR MAG '
'MAGERR MAGSYS ZP SKY SKYERR')
if printstyle is not None:
printstyle = printstyle.lower()
ra, dec = 0, 0
if (printstyle is not None and
printstyle.lower() in ['snana', 'long', 'verbose']):
if not target and 'FILENAME' in imhdr.keys():
target = imhdr['FILENAME'].split('_')[0]
elif not target:
target = 'target'
ra, dec = xy2radec(imhdr, xc, yc, ext=ext)
maglinelist = []
for iap in range(len(aparcsec)):
if printstyle == 'snana':
magline = 'OBS: %8.2f %6s %s %8.3f %8.3f '\
'%8.3f %8.3f %.3f' % (
float(mjdobs), FilterAlpha[filtername], target,
fluxcal[iap], fluxcalerr[iap], mag[iap], magerr[iap],
zpt)
elif printstyle in ['long', 'verbose']:
magline = '%-15s %10.5f %10.5f %.3f %6s %4.2f %9.4f %8.4f '\
' %9.4f %8.4f %5s %7.4f %7.4f %6.4f %s' % (
target, ra, dec, float(mjdobs), filtername,
aparcsec[iap],
apflux[iap], apfluxerr[iap], mag[iap], magerr[iap],
system,
zpt, sky, skyerr, imfilename)
else:
magline = '%.3f %6s %4.2f %9.4f %8.4f %9.4f %8.4f %5s ' \
'%7.4f %7.4f %6.4f' % (
float(mjdobs), filtername, aparcsec[iap],
apflux[iap], apfluxerr[iap], mag[iap], magerr[iap],
system,
zpt, sky, skyerr)
maglinelist.append(magline)
return maglinelist
| 5,342,363
|
def zip_dir(source_dir, archive_file, fnmatch_list=None):
"""Creates an archive of the given directory and stores it in the given
archive_file which may be a filename as well. By default, this function
will look for a .cfignore file and exclude any matching entries from the
archive.
"""
if fnmatch_list is None:
fnmatch_list = []
cwd = os.getcwd()
try:
with zipfile.ZipFile(
archive_file,
mode='w',
compression=zipfile.ZIP_DEFLATED) as zipf:
if os.path.isdir(source_dir):
os.chdir(source_dir)
files = list_files(source_dir, fnmatch_list)
for f in files:
name = f['fn'].replace(source_dir, '')
compress = zipfile.ZIP_STORED if f['fn'].endswith(
'/') else zipfile.ZIP_DEFLATED
zipf.write(f['fn'], arcname=name, compress_type=compress)
else:
zipf.write(
source_dir,
arcname=os.path.basename(source_dir),
compress_type=zipfile.ZIP_DEFLATED)
finally:
os.chdir(cwd)
return archive_file
| 5,342,364
|
def tls_params(mqtt_config):
"""Return the TLS configuration parameters from a :class:`.MQTTConfig`
object.
Args:
mqtt_config (:class:`.MQTTConfig`): The MQTT connection settings.
Returns:
dict: A dict {'ca_certs': ca_certs, 'certfile': certfile,
'keyfile': keyfile} with the TLS configuration parameters, or None if
no TLS connection is used.
.. versionadded:: 0.6.0
"""
# Set up a dict containing TLS configuration parameters for the MQTT
# client.
if mqtt_config.tls.hostname:
return {'ca_certs': mqtt_config.tls.ca_file,
'certfile': mqtt_config.tls.client_cert,
'keyfile': mqtt_config.tls.client_key}
# Or don't use TLS.
else:
return None
| 5,342,365
|
def is_admin():
"""Check if current user is an admin."""
try:
return flask.g.admin
except AttributeError:
return False
| 5,342,366
|
def statistic():
""" RESTful CRUD Controller """
return crud_controller()
| 5,342,367
|
def parse_args():
""" Parses command line arguments.
:return: argparse parser with parsed command line args
"""
parser = argparse.ArgumentParser(description='Godot AI Bridge (GAB) - DEMO Environment Action Client')
parser.add_argument('--id', type=int, required=False, default=DEFAULT_AGENT,
help=f'the id of the agent to which this action will be sent (default: {DEFAULT_AGENT})')
parser.add_argument('--host', type=str, required=False, default=DEFAULT_HOST,
help=f'the IP address of host running the GAB action listener (default: {DEFAULT_HOST})')
parser.add_argument('--port', type=int, required=False, default=DEFAULT_PORT,
help=f'the port number of the GAB action listener (default: {DEFAULT_PORT})')
parser.add_argument('--verbose', required=False, action="store_true",
help='increases verbosity (displays requests & replies)')
return parser.parse_args()
| 5,342,368
|
async def test_async_edit_development_config(
aresponses, readarr_client: ReadarrClient
) -> None:
"""Test editing development config."""
aresponses.add(
"127.0.0.1:8787",
f"/api/{READARR_API}/config/development",
"PUT",
aresponses.Response(
status=202,
headers={"Content-Type": "application/json"},
),
match_querystring=True,
)
data = await readarr_client.async_edit_development_config(
ReadarrDevelopmentConfig("test")
)
assert isinstance(data, ReadarrDevelopmentConfig)
| 5,342,369
|
def test_all():
"""function test_all
Args:
Returns:
"""
test1()
| 5,342,370
|
def segment_range_to_fragment_range(segment_start, segment_end, segment_size,
fragment_size):
"""
Takes a byterange spanning some segments and converts that into a
byterange spanning the corresponding fragments within their fragment
archives.
Handles prefix, suffix, and fully-specified byte ranges.
:param segment_start: first byte of the first segment
:param segment_end: last byte of the last segment
:param segment_size: size of an EC segment, in bytes
:param fragment_size: size of an EC fragment, in bytes
:returns: a 2-tuple (frag_start, frag_end) where
* frag_start is the first byte of the first fragment, or None if this
is a suffix byte range
* frag_end is the last byte of the last fragment, or None if this is a
prefix byte range
"""
# Note: segment_start and (segment_end + 1) are
# multiples of segment_size, so we don't have to worry
# about integer math giving us rounding troubles.
#
# There's a whole bunch of +1 and -1 in here; that's because HTTP wants
# byteranges to be inclusive of the start and end, so e.g. bytes 200-300
# is a range containing 101 bytes. Python has half-inclusive ranges, of
# course, so we have to convert back and forth. We try to keep things in
# HTTP-style byteranges for consistency.
# the index of the first byte of the first fragment
fragment_start = ((
segment_start // segment_size * fragment_size)
if segment_start is not None else None)
# the index of the last byte of the last fragment
fragment_end = (
# range unbounded on the right
None if segment_end is None else
# range unbounded on the left; no -1 since we're
# asking for the last N bytes, not to have a
# particular byte be the last one
((segment_end + 1) // segment_size
* fragment_size) if segment_start is None else
# range bounded on both sides; the -1 is because the
# rest of the expression computes the length of the
# fragment, and a range of N bytes starts at index M
# and ends at M + N - 1.
((segment_end + 1) // segment_size * fragment_size) - 1)
return (fragment_start, fragment_end)
| 5,342,371
|
def update_dict(d, u):
""" Recursively update dict d with values from dict u.
Args:
d: Dict to be updated
u: Dict with values to use for update
Returns: Updated dict
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
default = v.copy()
default.clear()
r = update_dict(d._get(k, default), v)
d[k] = r
else:
d[k] = v
return d
| 5,342,372
|
def test_third_party_overwrite_build_file():
""" this test emulates the work of a developer contributing recipes to ConanCenter, and
replacing the original build script with your one one.
The export_sources is actually copying CMakeLists.txt into the "src" folder, but the
'download' will overwrite it, so it is necessary to copy it again
"""
conanfile = textwrap.dedent(r"""
import os, shutil
from conan import ConanFile
from conan.tools.files import save, load
class Pkg(ConanFile):
name = "mypkg"
version = "1.0"
exports_sources = "CMakeLists.txt"
def layout(self):
self.folders.source = "src"
self.folders.build = "build"
def source(self):
# emulate a download from web site
save(self, "CMakeLists.txt", "MISTAKE: Very old CMakeLists to be replaced")
# Now I fix it with one of the exported files
shutil.copy("../CMakeLists.txt", ".")
def build(self):
if "MISTAKE" in load(self, os.path.join(self.source_folder, "CMakeLists.txt")):
raise Exception("MISTAKE BUILD!")
""")
client = TestClient()
client.save({"conanfile.py": conanfile,
"conandata.yml": "",
"CMakeLists.txt": "My better cmake"})
client.run("install .")
client.run("source .")
client.run("build .")
assert "conanfile.py (mypkg/1.0): Calling build()" in client.out
# of course create should work too
client.run("create .")
assert "mypkg/1.0: Created package" in client.out
| 5,342,373
|
def get_dataset(args, tokenizer, evaluate=False):
"""Convert the text file into the GPT-2 TextDataset format.
Args:
tokenizer: The GPT-2 tokenizer object.
evaluate: Whether to evalute on the dataset.
"""
file_path = args.eval_data_file if evaluate else args.train_data_file
if args.line_by_line:
return LineByLineTextDataset(
tokenizer=tokenizer, file_path=file_path, block_size=args.block_size
)
else:
return TextDataset(
tokenizer=tokenizer,
file_path=file_path,
block_size=args.block_size,
overwrite_cache=args.overwrite_cache,
)
| 5,342,374
|
def test_function_decorators():
"""Function Decorators."""
# Function decorators are simply wrappers to existing functions. Putting the ideas mentioned
# above together, we can build a decorator. In this example let's consider a function that
# wraps the string output of another function by p tags.
# This is the function that we want to decorate.
def greeting(name):
return "Hello, {0}!".format(name)
# This function decorates another functions output with <p> tag.
def decorate_with_p(func):
def function_wrapper(name):
return "<p>{0}</p>".format(func(name))
return function_wrapper
# Now, let's call our decorator and pass the function we want decorate to it.
my_get_text = decorate_with_p(greeting)
# Here we go, we've just decorated the function output without changing the function itself.
assert my_get_text('John') == '<p>Hello, John!</p>' # With decorator.
assert greeting('John') == 'Hello, John!' # Without decorator.
# Now, Python makes creating and using decorators a bit cleaner and nicer for the programmer
# through some syntactic sugar There is a neat shortcut for that, which is to mention the
# name of the decorating function before the function to be decorated. The name of the
# decorator should be prepended with an @ symbol.
@decorate_with_p
def greeting_with_p(name):
return "Hello, {0}!".format(name)
assert greeting_with_p('John') == '<p>Hello, John!</p>'
# Now let's consider we wanted to decorate our greeting function by one more functions to wrap a
# div the string output.
# This will be our second decorator.
def decorate_with_div(func):
def function_wrapper(text):
return "<div>{0}</div>".format(func(text))
return function_wrapper
# With the basic approach, decorating get_text would be along the lines of
# greeting_with_div_p = decorate_with_div(decorate_with_p(greeting_with_p))
# With Python's decorator syntax, same thing can be achieved with much more expressive power.
@decorate_with_div
@decorate_with_p
def greeting_with_div_p(name):
return "Hello, {0}!".format(name)
assert greeting_with_div_p('John') == '<div><p>Hello, John!</p></div>'
# One important thing to notice here is that the order of setting our decorators matters.
# If the order was different in the example above, the output would have been different.
# Passing arguments to decorators.
# Looking back at the example before, you can notice how redundant the decorators in the
# example are. 2 decorators(decorate_with_div, decorate_with_p) each with the same
# functionality but wrapping the string with different tags. We can definitely do much better
# than that. Why not have a more general implementation for one that takes the tag to wrap
# with as a string? Yes please!
def tags(tag_name):
def tags_decorator(func):
def func_wrapper(name):
return "<{0}>{1}</{0}>".format(tag_name, func(name))
return func_wrapper
return tags_decorator
@tags('div')
@tags('p')
def greeting_with_tags(name):
return "Hello, {0}!".format(name)
assert greeting_with_tags('John') == '<div><p>Hello, John!</p></div>'
| 5,342,375
|
def log_to_stderr(log_level='INFO', force=False):
"""
Shortcut allowing to display logs from workers.
:param log_level: Set the logging level of this logger.
:param force: Add handler even there are other handlers already.
"""
if not log.handlers or force:
mp.log_to_stderr()
log.setLevel(log_level)
| 5,342,376
|
def make_loci_field( loci ):
""" make string representation of contig loci """
codes = [L.code for L in loci]
return c_delim2.join( codes )
| 5,342,377
|
def compute_wilderness_impact1(ground_truth_all, prediction_all, video_list, known_classes, tiou_thresholds=np.linspace(0.5, 0.95, 10)):
""" Compute wilderness impact for each video (WI=Po/Pc < 1)
"""
wi = np.zeros((len(tiou_thresholds), len(known_classes)))
# # Initialize true positive and false positive vectors.
tp_u2u = np.zeros((len(tiou_thresholds), len(prediction_all)))
tp_k2k = np.zeros((len(tiou_thresholds), len(known_classes), len(prediction_all))) # TPc in WACV paper
fp_u2k = np.zeros((len(tiou_thresholds), len(known_classes), len(prediction_all))) # FPo in WACV paper
fp_k2k = np.zeros((len(tiou_thresholds), len(known_classes), len(prediction_all))) # FPc in WACV paper
fp_k2u = np.zeros((len(tiou_thresholds), len(prediction_all)))
fp_bg2u = np.zeros((len(tiou_thresholds), len(prediction_all)))
fp_bg2k = np.zeros((len(tiou_thresholds), len(known_classes), len(prediction_all)))
ground_truth_by_vid = ground_truth_all.groupby('video-id')
prediction_by_vid = prediction_all.groupby('video-id')
def _get_predictions_with_vid(prediction_by_vid, video_name):
"""Get all predicitons of the given video. Return empty DataFrame if there
is no predcitions with the given video.
"""
try:
return prediction_by_vid.get_group(video_name).reset_index(drop=True)
except:
return pd.DataFrame()
# compute the TP, FPo and FPc for each predicted segment.
vidx_offset = 0
all_scores, all_max_tious = [], []
for video_name in tqdm(video_list, total=len(video_list), desc='Compute WI'):
ground_truth = ground_truth_by_vid.get_group(video_name).reset_index()
prediction = _get_predictions_with_vid(prediction_by_vid, video_name)
if prediction.empty:
vidx_offset += len(prediction)
all_scores.extend([0] * len(prediction)) # only for confidence score
all_max_tious.extend([0] * len(prediction))
continue # no predictions for this video
all_scores.extend(prediction['score'].values.tolist())
lock_gt = np.zeros((len(tiou_thresholds),len(ground_truth)))
for idx, this_pred in prediction.iterrows():
tiou_arr = segment_iou(this_pred[['t-start', 't-end']].values,
ground_truth[['t-start', 't-end']].values)
# attach each prediction with the gt that has maximum tIoU
max_iou = tiou_arr.max()
max_jdx = tiou_arr.argmax()
all_max_tious.append(max_iou)
label_pred = this_pred['label']
label_gt = int(ground_truth.loc[max_jdx]['label'])
for tidx, tiou_thr in enumerate(tiou_thresholds):
if max_iou > tiou_thr:
if label_pred == label_gt and lock_gt[tidx, max_jdx] == 0:
if label_gt == 0:
tp_u2u[tidx, vidx_offset + idx] = 1 # true positive (u2u), not used by WI by default
else:
tp_k2k[tidx, label_pred-1, vidx_offset + idx] = 1 # true positive (k2k)
lock_gt[tidx, max_jdx] = 1 # lock this ground truth
else:
if label_gt == 0: # false positive (u2k)
fp_u2k[tidx, label_pred-1, vidx_offset + idx] = 1
else: # false positive (k2k, k2u)
if label_pred == 0:
fp_k2u[tidx, vidx_offset + idx] = 1
else:
fp_k2k[tidx, label_pred-1, vidx_offset + idx] = 1
else: # GT is defined to be background (known), must be FP
if label_pred == 0:
fp_bg2u[tidx, vidx_offset + idx] = 1
else:
fp_bg2k[tidx, label_pred-1, vidx_offset + idx] = 1
# move the offset
vidx_offset += len(prediction)
stats = {'tp_k2k': tp_k2k, 'tp_u2u': tp_u2u, 'fp_k2k': fp_k2k, 'fp_k2u': fp_k2u, 'fp_u2k': fp_u2k, 'fp_bg2k': fp_bg2k, 'fp_bg2u': fp_bg2u,
'scores': all_scores, 'max_tious': all_max_tious}
# Here we assume the background detections (small tIoU) are from the background class, which is a known class
fp_k2u += fp_bg2u
fp_k2k += fp_bg2k
tp_k2k_sum = np.sum(tp_k2k, axis=-1).astype(np.float)
fp_u2k_sum = np.sum(fp_u2k, axis=-1).astype(np.float)
fp_k2k_sum = np.sum(fp_k2k, axis=-1).astype(np.float)
wi = fp_u2k_sum / (tp_k2k_sum + fp_k2k_sum + 1e-6)
return wi, stats
| 5,342,378
|
def write_data_header(ping, status):
""" write output log file header
:param status: p1125 information
:return: success <True/False>
"""
try:
with open(os.path.join(CSV_FILE_PATH, filename), "w+") as f:
f.write("# This file is auto-generated by p1125_example_mahrs_csv.py\n".format(filename))
f.write("# {}\n".format(filename))
f.write("# p1125_ping = {}\n".format(ping))
f.write("# p1125_status = {}\n".format(status))
f.write("# p1125_settings = {{'VOUT': {}, 'TIME_CAPTURE_WINDOW_S': {}, 'TIME_TOTAL_RUN_S': {}, "
"'CONNECT_PROBE': {}, 'DOWN_SAMPLE_FACTOR': {}}}\n".format(VOUT,
TIME_CAPTURE_WINDOW_S, TIME_TOTAL_RUN_S, CONNECT_PROBE, DOWN_SAMPLE_FACTOR))
f.write("# time, uA, Max uA\n")
except Exception as e:
logger.error(e)
return False
return True
| 5,342,379
|
def register_config_validator(type, validator_class):
"""
Register a config value validator.
Args:
type: The value type.
validator_class: The validator class type.
"""
_config_validators_registry[type] = validator_class
| 5,342,380
|
def clear_flags(header, flags=None):
"""Utility function for management of flag related metadata."""
bitmask = BitmaskWrapper(header['flags'])
if flags is not None:
_verify_flags(flags)
bitmask.clear([flag-1 for flag in flags])
else:
bitmask.clear()
| 5,342,381
|
def require_client(func):
"""
Decorator for class methods that require a client either through keyword
argument, or through the object's client attribute.
Returns:
A wrapped version of the function. The object client attrobute will
be passed in as the client keyword if None is provided.
Raises:
AssertionError : Raised when the method is called without a client
keyword set and no client attribute.
"""
@wraps(func)
async def wrapper(self, *args, **kwargs):
client = kwargs.get("client", None) or getattr(self, "client", None)
if client is None:
msg = (
"{0} object does not have a client -- {0}.{1} will do "
"nothing. To set a client, initialize the object with "
"{0}(..., client=your_client). Alternatively, you can "
"use the client keyword argument in the method."
).format(
self.__class__.__name__,
func.__name__,
)
raise AssertionError(msg)
else:
kwargs["client"] = client
return await func(self, *args, **kwargs)
return wrapper
| 5,342,382
|
def untar(inpath: PathOrStr, outdir: PathOrStr) -> None:
"""
Unpack tarfile.
Parameters
----------
inpath
Path to tarfile
outdir
Desired output directory
"""
logging.info(f"Untarring {inpath} to {outdir}")
with tarfile.open(inpath) as archive:
members = archive.getmembers()
for item in tqdm(iterable=members, total=len(members)):
archive.extract(member=item, path=outdir)
| 5,342,383
|
async def on_shard_ready(shard_id : int) -> None:
"""When a shard starts print out that the shard has started.
Args:
shard_id (int): The ID of the shard that has started. (Starts from 0).
"""
print(f"{Style.BRIGHT}{Fore.CYAN}[SHARD-STARTED]{Fore.WHITE} Shard {Fore.YELLOW}{shard_id}{Fore.WHITE} has started!")
| 5,342,384
|
def test_set_params_regressor():
"""Test set_params method of Regressor class."""
regressor = Regressor()
regressor.set_params(strategy="LightGBM")
assert regressor._Regressor__strategy == "LightGBM"
regressor.set_params(strategy="RandomForest")
assert regressor._Regressor__strategy == "RandomForest"
regressor.set_params(strategy="ExtraTrees")
assert regressor._Regressor__strategy == "ExtraTrees"
regressor.set_params(strategy="RandomForest")
assert regressor._Regressor__strategy == "RandomForest"
regressor.set_params(strategy="Tree")
assert regressor._Regressor__strategy == "Tree"
regressor.set_params(strategy="AdaBoost")
assert regressor._Regressor__strategy == "AdaBoost"
regressor.set_params(strategy="Linear")
assert regressor._Regressor__strategy == "Linear"
regressor.set_params(strategy="Bagging")
assert regressor._Regressor__strategy == "Bagging"
with pytest.warns(UserWarning) as record:
regressor.set_params(wrong_strategy="wrong_strategy")
assert len(record) == 1
| 5,342,385
|
def plot_dislikes_vs_videos():
"""
Plots a graph by looking at the data stored in the files, and then saves the graph in Assets/Graphs as png.
None -> None
"""
video_dislikes = fio.read.get_dislikes()
for i in range(len(video_dislikes)):
if video_dislikes[i ] is None :
video_dislikes[i] = 0.0
for i in range(len(video_dislikes)):
if video_dislikes[i] > 6e3:
video_dislikes[i] = 6e3
plt.xlabel('Videos')
plt.ylabel('dislikes')
plt.bar(np.arange(len(video_dislikes)), video_dislikes, color = 'green')
plt.savefig('Assets/Graphs/dislikes_bar_graph.png')
| 5,342,386
|
def db_session():
"""FastAPI dependency genarator for create database session."""
db: Session = _mk_orm_session()
try:
yield db
finally:
db.close()
| 5,342,387
|
def extract_subwindows_image(image, scoremap, mask, input_window_size, output_window_size, mode, mean_radius,
flatten=True, dataset_augmentation=False, random_state=42, sw_extr_stride=None,
sw_extr_ratio=None, sw_extr_score_thres=None, sw_extr_npi=None):
"""
Extract subwindows from the multi-spectral provided image.
Parameters
----------
image: array-like of shape (width, height, n_features)
The multi-spectral image.
scoremap: array-like of shape (width, height)
The corresponding scoremap.
mask: array-like of shape (width, height)
The corresponding mask.
input_window_size: tuple of two int
The size (width, height) of input subwindows.
output_window_size: tuple of two int
The size (width, height) of output subwindows.
mode: {'random', 'scoremap_constrained', 'sliding'}
The mode of extraction for input suwbindows.
mean_radius: int
The mean radius of objects
dataset_augmentation: bool, optional (default=False)
If dataset augmentation must be performed.
random_state: int, optional (default=42)
An optional seed to make random number generator predictable.
Returns
-------
X: array-like of shape (n_subwindows, input_width * input_height * n_features)
The input subwindows.
y: array-like of shape (n_subwindows, output_width * output_height)
The output subwindows.
"""
input_window_size_half = half_size(input_window_size)
output_window_size_half = half_size(output_window_size)
if dataset_augmentation:
np.random.seed(random_state)
methods = [np.fliplr, np.flipud, np.rot90,
partial(np.rot90, k=2), partial(np.rot90, k=3)]
else:
methods = []
if mode == 'random':
if sw_extr_npi is None:
raise ValueError('number_per_image parameter required/invalid')
window_centers = _extract_random(mask, sw_extr_npi)
elif mode == 'scoremap_constrained':
if sw_extr_ratio is None:
raise ValueError('bg_ratio parameter required/invalid')
if sw_extr_score_thres is None:
raise ValueError('score_threshold required/invalid')
window_centers = _extract_scoremap_constrained(mask, scoremap, sw_extr_ratio,
sw_extr_score_thres)
else:
raise ValueError('unknown mode')
X, y = list(), list()
for window_center in window_centers:
top, right, bottom, left = subwindow_box(input_window_size,
input_window_size_half,
window_center)
input_window = image[slice(top, bottom), slice(left, right), :]
top, right, bottom, left = subwindow_box(output_window_size,
output_window_size_half,
window_center)
output_window = scoremap[slice(top, bottom), slice(left, right)]
if flatten:
X.append(input_window.ravel())
y.append(output_window.ravel())
else:
X.append(input_window)
y.append(output_window)
# TODO
if dataset_augmentation:
for method in methods:
X.append(method(input_window).ravel())
if output_window.ndim > 1:
y.append(method(output_window).ravel())
else:
y.append(output_window.ravel())
del window_centers
return np.array(X), np.array(y)
| 5,342,388
|
def format_template(string, tokens=None, encode=None):
"""Create an encoding from given string template."""
if tokens is None:
tokens = {}
format_values = {"config": config,
"tokens": tokens}
result = string.format(**format_values)
if encode == "base64":
result = base64.b64encode(result.encode("utf-8")).decode("utf-8")
else:
assert encode is None, f"Unknown encoding {encode}"
return result
| 5,342,389
|
def make_mosaic(band='fuv', ra_ctr=None, dec_ctr=None, size_deg=None, index=None, name=None, pgcname=None, model_bg=True, weight_ims=True, convert_mjysr=True, desired_pix_scale=GALEX_PIX_AS, imtype='intbgsub', wttype='rrhr', window=False):
"""
Create noise of a galaxy in a single GALEX band.
Parameters
----------
band : str
GALEX band to use
ra_ctr : float
Central RA of galaxy
dec_ctr : float
Central Dec of galaxy
size_deg : float
Desired side length of each cutout, in degrees
index : array, optional
Structured array containing the galbase information. The default is to read it in inside this code. (Default: None)
name : str, optional
Name of the galaxy for which to generate a cutout
pgcname : str, optional
PGC name of the galaxy
model_bg : bool, optional
Model the background of the mosaiced image (Default: False)
weight_ims : bool, optional
weight the input images with the weights images
convert_mjysr : bool, optional
convert input images from counts/sec to MJy/sr
desired_pix_scale : float, optional
Desired pixel scale of output image. Default is currently set to GALEX pixel scale (Default: 1.5)
imtype : str, optional
input image type to use from galex (Default: int)
wttype : str, optional
input weights image type to use from galex (Default: rrhr)
window : bool, optional
window across the input images rather than use a single value
"""
ttype = 'galex'
data_dir = os.path.join(_TOP_DIR, ttype, 'sorted_tiles')
problem_file = os.path.join(_WORK_DIR, 'problem_galaxies_{}_noise.txt'.format(band))
numbers_file = os.path.join(_WORK_DIR, 'gal_reproj_info_{}_noise.txt'.format(band))
galaxy_noise_file = os.path.join(_MOSAIC_DIR, '_'.join([pgcname, band]).upper() + '_noise.fits')
if not os.path.exists(galaxy_noise_file):
start_time = time.time()
print pgcname, band.upper()
# READ THE INDEX FILE (IF NOT PASSED IN)
if index is None:
indexfile = os.path.join(_INDEX_DIR, 'galex_index_file.fits')
ext = 1
index, hdr = astropy.io.fits.getdata(indexfile, ext, header=True)
# CALCULATE TILE OVERLAP
tile_overlaps = calc_tile_overlap(ra_ctr, dec_ctr, pad=size_deg,
min_ra=index['MIN_RA'],
max_ra=index['MAX_RA'],
min_dec=index['MIN_DEC'],
max_dec=index['MAX_DEC'])
# FIND OVERLAPPING TILES WITH RIGHT BAND
# index file set up such that index['fuv'] = 1 where fuv and
# index['nuv'] = 1 where nuv
ind = np.where((index[band]) & tile_overlaps)
# MAKE SURE THERE ARE OVERLAPPING TILES
ct_overlap = len(ind[0])
if ct_overlap == 0:
with open(problem_file, 'a') as myfile:
myfile.write(pgcname + ': ' + 'No overlapping tiles\n')
return
pix_scale = desired_pix_scale / 3600. # 1.5 arbitrary: how should I set it?
try:
# CREATE NEW TEMP DIRECTORY TO STORE TEMPORARY FILES
gal_dir = os.path.join(_WORK_DIR, '_'.join([pgcname, band]).upper())
os.makedirs(gal_dir)
# MAKE HEADER AND EXTENDED HEADER AND WRITE TO FILE
gal_hdr = GalaxyHeader(pgcname, gal_dir, ra_ctr, dec_ctr, size_deg, pix_scale, factor=3)
# GATHER THE INPUT FILES
input_dir = os.path.join(gal_dir, 'input')
if not os.path.exists(input_dir):
os.makedirs(input_dir)
nfiles = get_input(index, ind, data_dir, input_dir, hdr=gal_hdr)
im_dir, wt_dir = input_dir, input_dir
# WRITE TABLE OF INPUT IMAGE INFORMATION
input_table = os.path.join(im_dir, 'input.tbl')
montage.mImgtbl(im_dir, input_table, corners=True)
if convert_mjysr:
converted_dir = os.path.join(gal_dir, 'converted')
if not os.path.exists(converted_dir):
os.makedirs(converted_dir)
convert_to_flux_input(im_dir, converted_dir, band, desired_pix_scale, imtype=imtype)
im_dir = converted_dir
# MASK IMAGES
masked_dir = os.path.join(gal_dir, 'masked')
im_masked_dir = os.path.join(masked_dir, imtype)
wt_masked_dir = os.path.join(masked_dir, wttype)
for outdir in [masked_dir, im_masked_dir, wt_masked_dir]:
os.makedirs(outdir)
mask_images(im_dir, wt_dir, im_masked_dir, wt_masked_dir, imtype=imtype, wttype=wttype)
im_dir = im_masked_dir
wt_dir = wt_masked_dir
# CREATE DIRECTORY FOR NOISE IMAGES
noise_dir = os.path.join(gal_dir, 'noise')
if not os.path.exists(noise_dir):
os.makedirs(noise_dir)
# CALCULATE NOISE AND GENERATE NOISE MOSAIC CUTOUT
noisetype = 'noise'
calc_noise(gal_dir, noise_dir, gal_hdr, galaxy_noise_file, imtype, wttype, noisetype, window=window)
# REMOVE TEMP GALAXY DIRECTORY AND EXTRA FILES
shutil.rmtree(gal_dir, ignore_errors=True)
# NOTE TIME TO FINISH
stop_time = time.time()
total_time = (stop_time - start_time) / 60.
# WRITE OUT THE NUMBER OF TILES THAT OVERLAP THE GIVEN GALAXY
out_arr = [pgcname, band.upper(), nfiles, np.around(total_time, 2)]
with open(numbers_file, 'a') as nfile:
nfile.write('{0: >10}'.format(out_arr[0]))
nfile.write('{0: >6}'.format(out_arr[1]))
nfile.write('{0: >6}'.format(out_arr[2]))
nfile.write('{0: >6}'.format(out_arr[3]) + '\n')
# SOMETHING WENT WRONG -- WRITE ERROR TO FILE
except Exception as inst:
me = sys.exc_info()[0]
with open(problem_file, 'a') as myfile:
myfile.write(pgcname + ': ' + str(me) + ': '+str(inst)+'\n')
shutil.rmtree(gal_dir, ignore_errors=True)
return
| 5,342,390
|
def exists_case_sensitive(path: str) -> bool:
"""Returns if the given path exists and also matches the case on Windows.
When finding files that can be imported, it is important for the cases to match because while
file os.path.exists("module.py") and os.path.exists("MODULE.py") both return True on Windows,
Python can only import using the case of the real file.
"""
result = os.path.exists(path)
if (
sys.platform.startswith("win") or sys.platform == "darwin"
) and result: # pragma: no cover
directory, basename = os.path.split(path)
result = basename in os.listdir(directory)
return result
| 5,342,391
|
def make_datastore_api(client):
"""Create an instance of the GAPIC Datastore API.
:type client: :class:`~google.cloud.datastore.client.Client`
:param client: The client that holds configuration details.
:rtype: :class:`.datastore.v1.datastore_client.DatastoreClient`
:returns: A datastore API instance with the proper credentials.
"""
parse_result = six.moves.urllib_parse.urlparse(
client._base_url)
host = parse_result.netloc
if parse_result.scheme == 'https':
channel = make_secure_channel(
client._credentials, DEFAULT_USER_AGENT, host)
else:
channel = insecure_channel(host)
return GAPICDatastoreAPI(
channel=channel, lib_name='gccl', lib_version=__version__)
| 5,342,392
|
def _mul_certain(left, right):
"""Multiplies two values, where one is certain and the other is uncertain,
and returns the result."""
if _is_number(left):
return Uncertain(
value=right.value * left,
delta=right.delta,
)
return Uncertain(
value=left.value * right,
delta=left.delta,
)
| 5,342,393
|
def felica_RequestSystemCode(): # -> (int, List[int]):
"""
Sends FeliCa Request System Code command
:returns: (status, systemCodeList)
status 1: Success, < 0: error
systemCodeList System Code list (Array length should longer than 16)
"""
cmd = bytearray([FELICA_CMD_REQUEST_SYSTEM_CODE]) + _felicaIDm[:8]
status, response = felica_SendCommand(cmd)
responseLength = len(response)
if (status != 1):
DMSG("Request System Code command failed\n")
return -1, []
numSystemCode = response[9]
# length check
if (responseLength < 10 + 2 * numSystemCode):
DMSG("Request System Code command failed (wrong response length)\n")
return -2, []
systemCodeList = []
for i in range(numSystemCode):
systemCodeList.append((response[10 + i * 2] << 8) + response[10 + i * 2 + 1])
return 1, systemCodeList
| 5,342,394
|
def test_dev_bump_pipeline_version(datafiles, tmp_path):
"""Test that making a release works with a dev name and a leading v"""
# Get a workflow and configs
test_pipeline_dir = os.path.join(tmp_path, "nf-core-testpipeline")
create_obj = nf_core.create.PipelineCreate(
"testpipeline", "This is a test pipeline", "Test McTestFace", outdir=test_pipeline_dir
)
create_obj.init_pipeline()
pipeline_obj = nf_core.utils.Pipeline(test_pipeline_dir)
pipeline_obj._load()
# Bump the version number
nf_core.bump_version.bump_pipeline_version(pipeline_obj, "v1.2dev")
new_pipeline_obj = nf_core.utils.Pipeline(test_pipeline_dir)
# Check the pipeline config
new_pipeline_obj._load_pipeline_config()
assert new_pipeline_obj.nf_config["manifest.version"].strip("'\"") == "1.2dev"
| 5,342,395
|
def get_regularizer(
regularizer_type: str, l_reg_factor_weight: float
) -> Optional[Callable[[tf.Tensor], Optional[tf.Tensor]]]:
"""Gets a regularizer of a given type and scale.
Args:
regularizer_type: One of types.RegularizationType
l_reg_factor_weight: Scale for regularization.
Returns:
A function with weights parameter that applies regularization.
"""
if regularizer_type == types.RegularizationType.NONE:
return None
elif regularizer_type == types.RegularizationType.L1:
return slim.l1_regularizer(scale=l_reg_factor_weight)
elif regularizer_type == types.RegularizationType.L2:
return slim.l2_regularizer(scale=l_reg_factor_weight)
else:
raise ValueError(f"Unknown regularization type {regularizer_type}")
| 5,342,396
|
def app_eliminar():
"""
Eliminar datos a través de formulario
"""
helper.menu()
# Seccion eliminar
output.span(output.put_markdown("## Sección Eliminar"))
output.put_markdown(f"Eliminar una fila")
form_delete = input.input_group("Eliminar Datos", [
input.input(label="ID", type=input.NUMBER, name="id"),
])
eliminar(form_delete)
| 5,342,397
|
def metadata():
"""Returns shared metadata instance with naming convention."""
naming_convention = {
'ix': 'ix_%(column_0_label)s',
'uq': 'uq_%(table_name)s_%(column_0_name)s',
'ck': 'ck_%(table_name)s_%(constraint_name)s',
'fk': 'fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s',
'pk': 'pk_%(table_name)s'}
return MetaData(naming_convention=naming_convention)
| 5,342,398
|
def date_rss(dte=None):
"""Dtate au format RSS """
ctime = time if dte is None else time.mktime(dte.timetuple())
return ctime.strftime('%a, %d %b %Y %H:%M:%S %z')
| 5,342,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.