content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def setToC(doc, toc, collapse=1):
"""Create new outline tree (table of contents, TOC).
Args:
toc: (list, tuple) each entry must contain level, title, page and
optionally top margin on the page. None or '()' remove the TOC.
collapse: (int) collapses entries beyond this level. Zero or None
shows all entries unfolded.
Returns:
the number of inserted items, or the number of removed items respectively.
"""
if doc.isClosed or doc.isEncrypted:
raise ValueError("document closed or encrypted")
if not doc.isPDF:
raise ValueError("not a PDF")
if not toc: # remove all entries
return len(doc._delToC())
# validity checks --------------------------------------------------------
if type(toc) not in (list, tuple):
raise ValueError("'toc' must be list or tuple")
toclen = len(toc)
pageCount = doc.pageCount
t0 = toc[0]
if type(t0) not in (list, tuple):
raise ValueError("items must be sequences of 3 or 4 items")
if t0[0] != 1:
raise ValueError("hierarchy level of item 0 must be 1")
for i in list(range(toclen - 1)):
t1 = toc[i]
t2 = toc[i + 1]
if not -1 <= t1[2] <= pageCount:
raise ValueError("row %i: page number out of range" % i)
if (type(t2) not in (list, tuple)) or len(t2) not in (3, 4):
raise ValueError("bad row %i" % (i + 1))
if (type(t2[0]) is not int) or t2[0] < 1:
raise ValueError("bad hierarchy level in row %i" % (i + 1))
if t2[0] > t1[0] + 1:
raise ValueError("bad hierarchy level in row %i" % (i + 1))
# no formal errors in toc --------------------------------------------------
# --------------------------------------------------------------------------
# make a list of xref numbers, which we can use for our TOC entries
# --------------------------------------------------------------------------
old_xrefs = doc._delToC() # del old outlines, get their xref numbers
old_xrefs = [] # TODO do not reuse them currently
# prepare table of xrefs for new bookmarks
xref = [0] + old_xrefs
xref[0] = doc._getOLRootNumber() # entry zero is outline root xref#
if toclen > len(old_xrefs): # too few old xrefs?
for i in range((toclen - len(old_xrefs))):
xref.append(doc._getNewXref()) # acquire new ones
lvltab = {0: 0} # to store last entry per hierarchy level
# ------------------------------------------------------------------------------
# contains new outline objects as strings - first one is the outline root
# ------------------------------------------------------------------------------
olitems = [{"count": 0, "first": -1, "last": -1, "xref": xref[0]}]
# ------------------------------------------------------------------------------
# build olitems as a list of PDF-like connnected dictionaries
# ------------------------------------------------------------------------------
for i in range(toclen):
o = toc[i]
lvl = o[0] # level
title = getPDFstr(o[1]) # title
pno = min(doc.pageCount - 1, max(0, o[2] - 1)) # page number
page = doc[pno] # load the page
ictm = ~page._getTransformation() # get inverse transformation matrix
top = Point(72, 36) * ictm # default top location
dest_dict = {"to": top, "kind": LINK_GOTO} # fall back target
if o[2] < 0:
dest_dict["kind"] = LINK_NONE
if len(o) > 3: # some target is specified
if type(o[3]) in (int, float): # convert a number to a point
dest_dict["to"] = Point(72, o[3]) * ictm
else: # if something else, make sure we have a dict
dest_dict = o[3] if type(o[3]) is dict else dest_dict
if "to" not in dest_dict: # target point not in dict?
dest_dict["to"] = top # put default in
else: # transform target to PDF coordinates
point = dest_dict["to"] * ictm
dest_dict["to"] = point
d = {}
d["first"] = -1
d["count"] = 0
d["last"] = -1
d["prev"] = -1
d["next"] = -1
d["dest"] = getDestStr(page.xref, dest_dict)
d["top"] = dest_dict["to"]
d["title"] = title
d["parent"] = lvltab[lvl - 1]
d["xref"] = xref[i + 1]
lvltab[lvl] = i + 1
parent = olitems[lvltab[lvl - 1]] # the parent entry
if collapse and lvl > collapse: # suppress expansion
parent["count"] -= 1 # make /Count negative
else:
parent["count"] += 1 # positive /Count
if parent["first"] == -1:
parent["first"] = i + 1
parent["last"] = i + 1
else:
d["prev"] = parent["last"]
prev = olitems[parent["last"]]
prev["next"] = i + 1
parent["last"] = i + 1
olitems.append(d)
# ------------------------------------------------------------------------------
# now create each outline item as a string and insert it in the PDF
# ------------------------------------------------------------------------------
for i, ol in enumerate(olitems):
txt = "<<"
if ol["count"] != 0:
txt += "/Count %i" % ol["count"]
try:
txt += ol["dest"]
except:
pass
try:
if ol["first"] > -1:
txt += "/First %i 0 R" % xref[ol["first"]]
except:
pass
try:
if ol["last"] > -1:
txt += "/Last %i 0 R" % xref[ol["last"]]
except:
pass
try:
if ol["next"] > -1:
txt += "/Next %i 0 R" % xref[ol["next"]]
except:
pass
try:
if ol["parent"] > -1:
txt += "/Parent %i 0 R" % xref[ol["parent"]]
except:
pass
try:
if ol["prev"] > -1:
txt += "/Prev %i 0 R" % xref[ol["prev"]]
except:
pass
try:
txt += "/Title" + ol["title"]
except:
pass
if i == 0: # special: this is the outline root
txt += "/Type/Outlines" # so add the /Type entry
txt += ">>"
doc._updateObject(xref[i], txt) # insert the PDF object
doc.initData()
return toclen
| 5,343,100
|
def test_card_update_rect():
"""
Called whenever the application size changes to ensure that the rectangle
that contains the screen to be drawn is also updated in size (so the
background colour / image is updated, if required).
"""
card = Card("title")
card.rect = mock.MagicMock()
instance = mock.MagicMock()
instance.pos = 400
instance.size = 500
card._update_rect(instance, 100)
assert card.rect.pos == instance.pos
assert card.rect.size == instance.size
| 5,343,101
|
def _map_dvector_permutation(rd,d,eps):
"""Maps the basis vectors to a permutation.
Args:
rd (array-like): 2D array of the rotated basis vectors.
d (array-like): 2D array of the original basis vectors.
eps (float): Finite precision tolerance.
Returns:
RP (list): The permutation of the basis vectors.
"""
n_d = len(rd) # of d-vectors
found = [False]*n_d
RP = []
for iD in range(n_d):
for jD in range(n_d):
if found[jD]:
continue
if np.allclose(rd[iD],d[jD],atol=eps,rtol=0):
RP.append(jD)
found[jD] = True
break
if len(RP) != len(d): #pragma: no cover
print("d-vector didn't permute in map_dvector_permutation "
"This usually means that the d-set from the input structure and the d-set"
" from the struct_enum.out have a different origin or don't live in the same"
" unit cell. This probably isn't your fault---the code should overcome this."
,RP)
exit()
return(RP)
| 5,343,102
|
def get_vacsi_non_vacsi(name, column, statut, multi, filter = False, region = True):
"""Process each geozone for Rate type of KPIs with exception.
Specific function dedicated to vaccin_vaccines_couv
"""
indicateurResult = get_empty_kpi()
config = get_config(name)
log.debug('Processing - '+name)
indicateurResult['nom'] = config['nom']
indicateurResult['unite'] = config['unite']
indicateurResult['unite_short'] = config['unite_short']
indicateurResult['trendType'] = config['trendType']
df = pd.read_csv("files_new/" + config['res_id_fra'],
sep=None,
engine='python',
dtype={'reg': str, 'dep': str})
if filter:
df = df[df['age'] != '[0,19]']
df = df.groupby(['date', 'vac_statut'], as_index = False).sum()
if statut == 'Vaccination complète':
df = df[~df['vac_statut'].isin(['Non-vaccinés', 'Primo dose efficace', 'Primo dose récente'])]
df = df.groupby('date', as_index = False).sum()
else:
df = df[df['vac_statut'] == statut]
df = df.sort_values('date', ascending = True)
df['numerateur'] = multi * df[column].rolling(window = 7).sum()
df['denominateur'] = df['effectif'].rolling(window = 7).mean()
df = df[~df['denominateur'].isnull()]
df['res'] = df['numerateur'] / df['denominateur']
for country in tqdm(countries, desc="Processing National"):
res = process_stock(
df,
'nat',
'fra',
config['trendType'],
'res'
)
indicateurResult['france'].append(res)
if region:
df = pd.read_csv("files_new/" + config['res_id_reg'],
sep=None,
engine='python',
dtype={'reg': str, 'dep': str})
df = df[df['vac_statut'] == statut]
tri_reg = pd.read_csv("utils/region2021.csv",
sep=None,
engine='python',
dtype={'reg': str, 'dep': str})
for reg in tqdm(df['region'].unique(), desc="Processing Régions"):
df_reg = df[df['region'] == reg]
df_reg = df_reg.sort_values('date', ascending = True)
df_reg['numerateur'] = multi * df_reg[column].rolling(window = 7).sum()
df_reg['denominateur'] = df_reg['effectif'].rolling(window = 7).mean()
df_reg = df_reg[~df_reg['denominateur'].isnull()]
df_reg['res'] = df_reg['numerateur'] / df_reg['denominateur']
res = process_stock(
df_reg,
'reg',
tri_reg.loc[tri_reg['trigramme'] == reg, 'reg'].iloc[0],
config['trendType'],
'res'
)
indicateurResult['regions'].append(res)
save_result(indicateurResult, name)
| 5,343,103
|
def dump_lightcurves_with_grcollect(photfileglob, lcdir, maxmemory,
objectidcol=3,
lcextension='grcollectilc',
observatory='tess'):
"""
Given a list of photometry files (text files at various times output by
fiphot with rows that are objects), make lightcurve files (text files for
various objects whose rows are times).
(For TESS, the timestamp imprinted here is JD_UTC midtime.)
This routine uses the `fitsh` routine `grcollect` to do the dumping. This
is comparably fast to the postgresql indexing approach without heavy
optimization (dumps ~1 photometry file per second).
An important intermediate step, implemented here, is prepending times and
filenames to *.iphot lightcurve files, to make *.iphottemp files.
*.iphot photometry files look like:
HAT-381-0000004 1482.093 521.080 1482.10899 521.079941
1.86192159 -0.217043415 -0.152707564 0.35 0.41
583942.10 334.31 5.54226 0.00062 G
605285.46 340.12 5.50328 0.00061 G
619455.38 344.29 5.47816 0.00060 G
Args:
photfileglob (str): bash glob to pass to grcollect, e.g., the first
input line below
grcollect /home/mypath/rsub-3f62ef9f-tess201913112*.iphot \
--col-base 1 --prefix /nfs/phtess1/ar1/TESS/SIMFFI/LC/ISP_1-1/ \
--extension grcollectilc --max-memory 4g
objectidcol (int): column of object id in *.iphottemp files (default:
3)
lcdir (str): directory where lightcurves get dumped
maxmemory (str): e.g., "2g", "100m" for 2gb, or 100mb. Maximum amount
of memory for `grcollect`. See https://fitsh.net/wiki/man/grcollect for
terse details.
Keyword args:
lcextension (str): e.g., "grcollectilc" for the extension you want your
dumped lightcurves to have. Also common is "ilc" for "image-subtracted
lightcurve".
Returns:
diddly-squat.
"""
if not os.path.exists(lcdir):
os.mkdir(lcdir)
starttime = time.time()
photpaths = glob.glob(photfileglob)
print('%sZ: called dump lightcurves for %d photfiles' %
(datetime.utcnow().isoformat(), len(photpaths)))
if len(photpaths)==0:
print('ERR! %sZ: failed to find %s, continuing' %
(datetime.utcnow().isoformat(), frametoshift))
return 0
# *.iphot files need to have timestamp and filenames prepended on each
# line, otherwise lightcurves will have no times. make the correctly
# formatted ".iphottemp" files here.
for ix, photpath in enumerate(photpaths):
if observatory=='tess':
framekey = re.findall(
'tess20.*?-[0-9][0-9][0-9][0-9]_cal_img_bkgdsub', photpath)
if not len(framekey) == 1:
raise AssertionError(
'expected only one photframe, got {:s}'.
format(repr(framekey)))
originalframe = os.path.join(os.path.dirname(photpath),
framekey[0]+'.fits')
elif observatory=='hatpi':
framekey = re.findall('(.-.{7}_.)\.iphot', photpath)
assert len(framekey) == 1, 'HATPI specific regex!'
originalframe = os.path.join(os.path.dirname(photpath),
framekey[0]+'.fits')
else:
raise NotImplementedError
# check these files exist, and populate the dict if they do
if os.path.exists(originalframe):
tempphotpath = photpath.replace('.iphot','.iphottemp')
if os.path.exists(tempphotpath):
print('%sZ: skipping %d/%d frame, found .iphottemp file %s' %
(datetime.utcnow().isoformat(), ix, len(photpaths),
tempphotpath))
continue
print('%sZ: writing %d/%d frame, %s to .iphottemp file %s' %
(datetime.utcnow().isoformat(), ix, len(photpaths), photpath,
tempphotpath))
# this is the ORIGINAL FITS frame, since the subtracted one
# contains some weird JD header (probably inherited from the
# photref frame)
if observatory=='tess':
from astropy.time import Time
# observation start and stop time as BJD_UTC calendar dates.
# calculated by SPOC. (these include the leapseconds). the
# wrong barycentric correction has been applied to all
# available timestamps (and the sign convention for how the
# correction is applied -- is it added, or subtracted? -- has
# been confirmed via private comm. with Jon Jenkins)
tstart_bjd_utc_str = get_header_keyword(
originalframe, 'DATE-OBS')
tstop_bjd_utc_str = get_header_keyword(
originalframe, 'DATE-END')
ltt_barycenter_spoc = get_header_keyword(
originalframe, 'BARYCORR')
# record the midtime in JD_UTC with grcollect. barycentric
# correction comes later. (once ra/dec are accessible).
tstart_bjd_utc = Time(tstart_bjd_utc_str, format='isot',
scale='utc')
tstop_bjd_utc = Time(tstop_bjd_utc_str, format='isot',
scale='utc')
tmid_bjd_utc = (
tstart_bjd_utc.jd + (tstop_bjd_utc.jd - tstart_bjd_utc.jd)/2.
)
# WANT: bjd_tdb_me = jd_utc_spoc + ltt_barycenter_me + leapseconds (eq 1)
# HAVE: bjd_utc_spoc = jd_utc_spoc + ltt_barycenter_spoc (eq 2)
# use eq (2) to get jd_tdb_spoc:
tmid_jd_utc = tmid_bjd_utc - ltt_barycenter_spoc
frametime = tmid_jd_utc
elif observatory=='hatpi':
frametime = get_header_keyword(originalframe, 'JD')
else:
raise NotImplementedError
# now open the phot file, read it, and write to the tempphot file.
output = StringIO()
with open(photpath, 'rb') as photfile:
# write time precise to 1e-7 days = 8.6 milliseconds.
for line in photfile:
output.write('%.7f\t%s\t%s' % (
frametime, framekey[0], line.decode('utf-8'))
)
with open(tempphotpath, 'w') as tempphotfile:
output.seek(0)
shutil.copyfileobj(output, tempphotfile)
output.close()
grcollectglob = photfileglob.replace('.iphot', '.iphottemp')
cmdtorun = GRCOLLECTCMD.format(fiphotfileglob=grcollectglob,
lcdir=lcdir,
objectidcol=objectidcol,
lcextension=lcextension,
maxmemory=maxmemory)
if DEBUG:
print(cmdtorun)
# execute the grcollect shell command. NB we execute through a shell
# interpreter to get correct wildcards applied.
grcollectproc = subprocess.Popen(cmdtorun, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, stderr = grcollectproc.communicate()
if grcollectproc.returncode == 0:
print('%sZ: grcollect dump succeeded' % datetime.utcnow().isoformat())
else:
print('ERR! %sZ: grcollect failed' % datetime.utcnow().isoformat())
print('error was %s' % stderr )
print('%sZ: done, time taken: %.2f minutes' %
(datetime.utcnow().isoformat(), (time.time() - starttime)/60.0))
| 5,343,104
|
def main(): # pragma: no cover
"""Entrypoint invoked via a console script."""
try:
while True:
print(format_datetime())
sleep(1)
except KeyboardInterrupt:
pass
| 5,343,105
|
def varPostV(self,name,value):
""" Moving all the data from entry to treeview """
regex = re.search("-[@_!#$%^&*()<>?/\|}{~: ]", name) #Prevent user from giving special character and space character
print(regex)
if not regex == None:
tk.messagebox.showerror("Forbidden Entry","The variable name for vehicle must not contain special character or space character")
return None
if not name.strip():
tk.messagebox.showerror("Empty entry","The variable name for vehicle is empty")
return None
if not value.strip():
tk.messagebox.showerror("Empty entry","The variable value for vechicle is empty")
return None
if not value.isdigit():
tk.messagebox.showerror("Empty entry","The variable value for vechicle must be number")
return None
self.varVContent = self.varDispV
self.varVContent.insert("",index="end",text=name,value=float(value))
| 5,343,106
|
def _mysql_int_length(subtype):
"""Determine smallest field that can hold data with given length."""
try:
length = int(subtype)
except ValueError:
raise ValueError(
'Invalid subtype for Integer column: {}'.format(subtype)
)
if length < 3:
kind = 'TINYINT'
elif length < 4:
kind = 'SMALLINT'
elif length < 7:
kind = 'MEDIUMINT'
elif length <= 10:
kind = 'INT'
else:
kind = 'BIGINT'
return '{}({})'.format(kind, length)
| 5,343,107
|
def get_ci(vals, percent=0.95):
"""Confidence interval for `vals` from the Students' t
distribution. Uses `stats.t.interval`.
Parameters
----------
percent : float
Size of the confidence interval. The default is 0.95. The only
requirement is that this be above 0 and at or below 1.
Returns
-------
tuple
The first member is the upper end of the interval, the second
the lower end of the interval.
"""
if len(set(vals)) == 1:
return (vals[0], vals[0])
mu = np.mean(vals)
df = len(vals)-1
sigma = np.std(vals) / np.sqrt(len(vals))
return stats.t.interval(percent, df, loc=mu, scale=sigma)
| 5,343,108
|
def kb_overview_rows(mode=None, max=None, locale=None, product=None, category=None):
"""Return the iterable of dicts needed to draw the new KB dashboard
overview"""
if mode is None:
mode = LAST_30_DAYS
docs = Document.objects.filter(locale=settings.WIKI_DEFAULT_LANGUAGE,
is_archived=False,
is_template=False)
docs = docs.exclude(html__startswith=REDIRECT_HTML)
select = OrderedDict([
('num_visits', 'SELECT `wdv`.`visits` '
'FROM `dashboards_wikidocumentvisits` as `wdv` '
'WHERE `wdv`.`period`=%s '
'AND `wdv`.`document_id`=`wiki_document`.`id`'),
])
docs = docs.extra(select=select,
select_params=(mode,))
if product:
docs = docs.filter(products__in=[product])
if category:
docs = docs.filter(category__in=[category])
docs = docs.order_by('-num_visits', 'title')
if max:
docs = docs[:max]
rows = []
if docs.count():
max_visits = docs[0].num_visits
for d in docs:
data = {
'url': reverse('wiki.document', args=[d.slug],
locale=settings.WIKI_DEFAULT_LANGUAGE),
'trans_url': reverse('wiki.show_translations', args=[d.slug],
locale=settings.WIKI_DEFAULT_LANGUAGE),
'title': d.title,
'num_visits': d.num_visits,
'ready_for_l10n': d.revisions.filter(is_approved=True,
is_ready_for_localization=True).exists()
}
if d.current_revision:
data['expiry_date'] = d.current_revision.expires
if d.num_visits:
data['visits_ratio'] = float(d.num_visits) / max_visits
if 'expiry_date' in data and data['expiry_date']:
data['stale'] = data['expiry_date'] < datetime.now()
# Check L10N status
if d.current_revision:
unapproved_revs = d.revisions.filter(
reviewed=None, id__gt=d.current_revision.id)[:1]
else:
unapproved_revs = d.revisions.all()
if unapproved_revs.count():
data['revision_comment'] = unapproved_revs[0].comment
else:
data['latest_revision'] = True
# Get the translated doc
if locale != settings.WIKI_DEFAULT_LANGUAGE:
transdoc = d.translations.filter(
locale=locale,
is_archived=False).first()
if transdoc:
data['needs_update'] = transdoc.is_outdated()
else: # For en-US we show the needs_changes comment.
data['needs_update'] = d.needs_change
data['needs_update_comment'] = d.needs_change_comment
rows.append(data)
return rows
| 5,343,109
|
def test_query_devicecontrolalert_facets(monkeypatch):
"""Test a Device Control alert facet query."""
_was_called = False
def _run_facet_query(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/alerts/devicecontrol/_facet"
assert body == {"query": "Blort", "criteria": {"workflow": ["OPEN"]},
"terms": {"rows": 0, "fields": ["REPUTATION", "STATUS"]},
"rows": 100}
_was_called = True
return StubResponse({"results": [{"field": {},
"values": [{"id": "reputation", "name": "reputationX", "total": 4}]},
{"field": {},
"values": [{"id": "status", "name": "statusX", "total": 9}]}]})
api = CBCloudAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbc_sdk_api(monkeypatch, api, POST=_run_facet_query)
query = api.select(DeviceControlAlert).where("Blort").set_workflows(["OPEN"])
f = query.facets(["REPUTATION", "STATUS"])
assert _was_called
assert f == [{"field": {}, "values": [{"id": "reputation", "name": "reputationX", "total": 4}]},
{"field": {}, "values": [{"id": "status", "name": "statusX", "total": 9}]}]
| 5,343,110
|
def plot_logs(experiments: List[Summary],
smooth_factor: float = 0,
ignore_metrics: Optional[Set[str]] = None,
pretty_names: bool = False,
include_metrics: Optional[Set[str]] = None) -> Figure:
"""A function which will plot experiment histories for comparison viewing / analysis.
Args:
experiments: Experiment(s) to plot.
smooth_factor: A non-negative float representing the magnitude of gaussian smoothing to apply (zero for none).
pretty_names: Whether to modify the metric names in graph titles (True) or leave them alone (False).
ignore_metrics: Any keys to ignore during plotting.
include_metrics: A whitelist of keys to include during plotting. If None then all will be included.
Returns:
The handle of the pyplot figure.
"""
# Sort to keep same colors between multiple runs of visualization
experiments = humansorted(to_list(experiments), lambda exp: exp.name)
n_experiments = len(experiments)
if n_experiments == 0:
return make_subplots()
ignore_keys = ignore_metrics or set()
ignore_keys = to_set(ignore_keys)
ignore_keys |= {'epoch'}
include_keys = to_set(include_metrics)
# TODO: epoch should be indicated on the axis (top x axis?). Problem - different epochs per experiment.
# TODO: figure out how ignore_metrics should interact with mode
# TODO: when ds_id switches during training, prevent old id from connecting with new one (break every epoch?)
ds_ids = set()
metric_histories = defaultdict(_MetricGroup) # metric: MetricGroup
for idx, experiment in enumerate(experiments):
history = experiment.history
# Since python dicts remember insertion order, sort the history so that train mode is always plotted on bottom
for mode, metrics in sorted(history.items(),
key=lambda x: 0 if x[0] == 'train' else 1 if x[0] == 'eval' else 2 if x[0] == 'test'
else 3 if x[0] == 'infer' else 4):
for metric, step_val in metrics.items():
base_metric, ds_id, *_ = f'{metric}|'.split('|') # Plot acc|ds1 and acc|ds2 on same acc graph
if len(step_val) == 0:
continue # Ignore empty metrics
if metric in ignore_keys or base_metric in ignore_keys:
continue
# Here we intentionally check against metric and not base_metric. If user wants to display per-ds they
# can specify that in their include list: --include mcc 'mcc|usps'
if include_keys and metric not in include_keys:
continue
metric_histories[base_metric].add(idx, mode, ds_id, step_val)
ds_ids.add(ds_id)
metric_list = list(sorted(metric_histories.keys()))
if len(metric_list) == 0:
return make_subplots()
ds_ids = humansorted(ds_ids) # Sort them to have consistent ordering (and thus symbols) between plot runs
n_plots = len(metric_list)
if len(ds_ids) > 9: # 9 b/c None is included
print("FastEstimator-Warn: Plotting more than 8 different datasets isn't well supported. Symbols will be "
"reused.")
# Non-Shared legends aren't supported yet. If they get supported then maybe can have that feature here too.
# https://github.com/plotly/plotly.js/issues/5099
# https://github.com/plotly/plotly.js/issues/5098
# map the metrics into an n x n grid, then remove any extra columns. Final grid will be n x m with m <= n
n_rows = math.ceil(math.sqrt(n_plots))
n_cols = math.ceil(n_plots / n_rows)
metric_grid_location = {}
nd1_metrics = []
idx = 0
for metric in metric_list:
if metric_histories[metric].ndim() == 1:
# Delay placement of the 1D plots until the end
nd1_metrics.append(metric)
else:
metric_grid_location[metric] = (idx // n_cols, idx % n_cols)
idx += 1
for metric in nd1_metrics:
metric_grid_location[metric] = (idx // n_cols, idx % n_cols)
idx += 1
titles = [k for k, v in sorted(list(metric_grid_location.items()), key=lambda e: e[1][0] * n_cols + e[1][1])]
if pretty_names:
titles = [prettify_metric_name(title) for title in titles]
fig = make_subplots(rows=n_rows, cols=n_cols, subplot_titles=titles, shared_xaxes='all')
fig.update_layout({'plot_bgcolor': '#FFF',
'hovermode': 'closest',
'margin': {'t': 50},
'modebar': {'add': ['hoverclosest', 'hovercompare'],
'remove': ['select2d', 'lasso2d']},
'legend': {'tracegroupgap': 5,
'font': {'size': 11}}})
# Set x-labels
for idx, metric in enumerate(titles, start=1):
plotly_idx = idx if idx > 1 else ""
x_axis_name = f'xaxis{plotly_idx}'
y_axis_name = f'yaxis{plotly_idx}'
if metric_histories[metric].ndim() > 1:
fig['layout'][x_axis_name]['title'] = 'Steps'
fig['layout'][x_axis_name]['showticklabels'] = True
fig['layout'][x_axis_name]['linecolor'] = "#BCCCDC"
fig['layout'][y_axis_name]['linecolor'] = "#BCCCDC"
else:
# Put blank data onto the axis to instantiate the domain
row, col = metric_grid_location[metric][0], metric_grid_location[metric][1]
fig.add_annotation(text='', showarrow=False, row=row + 1, col=col + 1)
# Hide the axis stuff
fig['layout'][x_axis_name]['showgrid'] = False
fig['layout'][x_axis_name]['zeroline'] = False
fig['layout'][x_axis_name]['visible'] = False
fig['layout'][y_axis_name]['showgrid'] = False
fig['layout'][y_axis_name]['zeroline'] = False
fig['layout'][y_axis_name]['visible'] = False
# If there is only 1 experiment, we will use alternate colors based on mode
color_offset = defaultdict(lambda: 0)
n_colors = n_experiments
if n_experiments == 1:
n_colors = 4
color_offset['eval'] = 1
color_offset['test'] = 2
color_offset['infer'] = 3
colors = get_colors(n_colors=n_colors)
alpha_colors = get_colors(n_colors=n_colors, alpha=0.3)
# exp_id : {mode: {ds_id: {type: True}}}
add_label = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: True))))
# {row: {col: (x, y)}}
ax_text = defaultdict(lambda: defaultdict(lambda: (0.0, 0.9))) # Where to put the text on a given axis
# Set up ds_id markers. The empty ds_id will have no extra marker. After that there are 4 configurations of 3-arm
# marker, followed by 'x', '+', '*', and pound. After that it will just repeat the symbol set.
ds_id_markers = [None, 37, 38, 39, 40, 34, 33, 35, 36] # https://plotly.com/python/marker-style/
ds_id_markers = {k: v for k, v in zip(ds_ids, cycle(ds_id_markers))}
# Plotly doesn't support z-order, so delay insertion until all the plots are figured out:
# https://github.com/plotly/plotly.py/issues/2345
z_order = defaultdict(list) # {order: [(plotly element, row, col), ...]}
# Figure out the legend ordering
legend_order = []
for exp_idx, experiment in enumerate(experiments):
for metric, group in metric_histories.items():
for mode in group.modes(exp_idx):
for ds_id in group.ds_ids(exp_idx, mode):
ds_title = f"{ds_id} " if ds_id else ''
title = f"{experiment.name} ({ds_title}{mode})" if n_experiments > 1 else f"{ds_title}{mode}"
legend_order.append(title)
legend_order.sort()
legend_order = {legend: order for order, legend in enumerate(legend_order)}
# Actually do the plotting
for exp_idx, experiment in enumerate(experiments):
for metric, group in metric_histories.items():
row, col = metric_grid_location[metric][0], metric_grid_location[metric][1]
if group.ndim() == 1:
# Single value
for mode in group.modes(exp_idx):
for ds_id in group.ds_ids(exp_idx, mode):
ds_title = f"{ds_id} " if ds_id else ''
prefix = f"{experiment.name} ({ds_title}{mode})" if n_experiments > 1 else f"{ds_title}{mode}"
plotly_idx = row * n_cols + col + 1 if row * n_cols + col + 1 > 1 else ''
fig.add_annotation(text=f"{prefix}: {group.get_val(exp_idx, mode, ds_id)}",
font={'color': colors[exp_idx + color_offset[mode]]},
showarrow=False,
xref=f'x{plotly_idx} domain',
xanchor='left',
x=ax_text[row][col][0],
yref=f'y{plotly_idx} domain',
yanchor='top',
y=ax_text[row][col][1],
exclude_empty_subplots=False)
ax_text[row][col] = (ax_text[row][col][0], ax_text[row][col][1] - 0.1)
if ax_text[row][col][1] < 0:
ax_text[row][col] = (ax_text[row][col][0] + 0.5, 0.9)
elif group.ndim() == 2:
for mode, dsv in group[exp_idx].items():
color = colors[exp_idx + color_offset[mode]]
for ds_id, data in dsv.items():
ds_title = f"{ds_id} " if ds_id else ''
title = f"{experiment.name} ({ds_title}{mode})" if n_experiments > 1 else f"{ds_title}{mode}"
if data.shape[0] < 2:
x = data[0][0]
y = data[0][1]
y_min = None
y_max = None
if isinstance(y, ValWithError):
y_min = y.y_min
y_max = y.y_max
y = y.y
marker_style = 'circle' if mode == 'train' else 'diamond' if mode == 'eval' \
else 'square' if mode == 'test' else 'hexagram'
limit_data = [(y_max, y_min)] if y_max is not None and y_min is not None else None
tip_text = "%{x}: (%{customdata[1]:.3f}, %{y:.3f}, %{customdata[0]:.3f})" if \
limit_data is not None else "%{x}: %{y:.3f}"
error_y = None if limit_data is None else {'type': 'data',
'symmetric': False,
'array': [y_max - y],
'arrayminus': [y - y_min]}
z_order[2].append((go.Scatter(x=[x],
y=[y],
name=title,
legendgroup=title,
customdata=limit_data,
hovertemplate=tip_text,
mode='markers',
marker={'color': color,
'size': 12,
'symbol': _symbol_mash(marker_style,
ds_id_markers[ds_id]),
'line': {'width': 1.5,
'color': 'White'}},
error_y=error_y,
showlegend=add_label[exp_idx][mode][ds_id]['patch'],
legendrank=legend_order[title]),
row,
col))
add_label[exp_idx][mode][ds_id]['patch'] = False
else:
# We can draw a line
y = data[:, 1]
y_min = None
y_max = None
if isinstance(y[0], ValWithError):
y = np.stack(y)
y_min = y[:, 0]
y_max = y[:, 2]
y = y[:, 1]
if smooth_factor != 0:
y_min = gaussian_filter1d(y_min, sigma=smooth_factor)
y_max = gaussian_filter1d(y_max, sigma=smooth_factor)
# TODO - for smoothed lines, plot original data in background but greyed out
if smooth_factor != 0:
y = gaussian_filter1d(y, sigma=smooth_factor)
x = data[:, 0]
linestyle = 'solid' if mode == 'train' else 'dash' if mode == 'eval' else 'dot' if \
mode == 'test' else 'dashdot'
limit_data = [(mx, mn) for mx, mn in zip(y_max, y_min)] if y_max is not None and y_min is \
not None else None
tip_text = "%{x}: (%{customdata[1]:.3f}, %{y:.3f}, %{customdata[0]:.3f})" if \
limit_data is not None else "%{x}: %{y:.3f}"
z_order[1].append((go.Scatter(x=x,
y=y,
name=title,
legendgroup=title,
mode="lines+markers" if ds_id_markers[ds_id] else 'lines',
marker={'color': color,
'size': 8,
'line': {'width': 2,
'color': 'DarkSlateGrey'},
'maxdisplayed': 10,
'symbol': ds_id_markers[ds_id]},
line={'dash': linestyle,
'color': color},
customdata=limit_data,
hovertemplate=tip_text,
showlegend=add_label[exp_idx][mode][ds_id]['line'],
legendrank=legend_order[title]),
row,
col))
add_label[exp_idx][mode][ds_id]['line'] = False
if limit_data is not None:
z_order[0].append((go.Scatter(x=x,
y=y_max,
mode='lines',
line={'width': 0},
legendgroup=title,
showlegend=False,
hoverinfo='skip'),
row,
col))
z_order[0].append((go.Scatter(x=x,
y=y_min,
mode='lines',
line={'width': 0},
fillcolor=alpha_colors[exp_idx + color_offset[mode]],
fill='tonexty',
legendgroup=title,
showlegend=False,
hoverinfo='skip'),
row,
col))
else:
# Some kind of image or matrix. Not implemented yet.
pass
for z in sorted(list(z_order.keys())):
plts = z_order[z]
for plt, row, col in plts:
fig.add_trace(plt, row=row + 1, col=col + 1)
# If inside a jupyter notebook then force the height based on number of rows
if in_notebook():
fig.update_layout(height=280 * n_rows)
return fig
| 5,343,111
|
def regularity(sequence):
"""
Compute the regularity of a sequence.
The regularity basically measures what percentage of a user's
visits are to a previously visited place.
Parameters
----------
sequence : list
A list of symbols.
Returns
-------
float
1 minus the ratio between unique and total symbols in the sequence.
"""
n = len(sequence)
n_unique = len(set(sequence))
if n_unique <= 1:
return 1.0
if n_unique == n:
return .0
return 1 - (n_unique / n)
| 5,343,112
|
def _make_tick_labels(
tick_values: List[float], axis_subtractor: float, tick_divisor_power: int,
) -> List[str]:
"""Given a collection of ticks, return a formatted version.
Args:
tick_values (List[float]): The ticks positions in ascending
order.
tick_divisor_power (int): The power of ten the tick labels will
be divided by.
axis_subtractor (float): The amount to subtract from the tick
values.
Returns:
Generator[str, None, None]: The generated tick labels.
"""
tick_divisor_prefix = _get_metric_prefix(tick_divisor_power)
return [
f"{(tick - axis_subtractor) / 10 ** tick_divisor_power:0.2f}"
f"{tick_divisor_prefix}"
for tick in tick_values
]
| 5,343,113
|
def load_coco(dataset_file, map_file):
"""
Load preprocessed MSCOCO 2017 dataset
"""
print('\nLoading dataset...')
h5f = h5py.File(dataset_file, 'r')
x = h5f['x'][:]
y = h5f['y'][:]
h5f.close()
split = int(x.shape[0] * 0.8) # 80% of data is assigned to the training set
x_train, y_train = x[:split], y[:split]
x_test, y_test = x[split:], y[split:]
with open(map_file, 'rb') as mapping:
category_id_map = pickle.load(mapping)
id_category = category_id_map['id_category']
print('Done.')
return (x_train, y_train), (x_test, y_test), id_category
| 5,343,114
|
def eval_rule(call_fn, abstract_eval_fn, *args, **kwargs):
"""
Python Evaluation rule for a numba4jax function respecting the
XLA CustomCall interface.
Evaluates `outs = abstract_eval_fn(*args)` to compute the output shape
and preallocate them, then executes `call_fn(*outs, *args)` which is
the Numba kernel.
Args:
call_fn: a (numba.jit) function respecting the calling convention of
XLA CustomCall, taking first the outputs by reference then the
inputs.
abstract_eval_fn: The abstract evaluation function respecting jax
interface
args: The arguments to the `call_fn`
kwargs: Optional keyword arguments for the numba function.
"""
# compute the output shapes
output_shapes = abstract_eval_fn(*args)
# Preallocate the outputs
outputs = tuple(np.empty(shape.shape, dtype=shape.dtype) for shape in output_shapes)
# convert inputs to a tuple
inputs = tuple(np.asarray(arg) for arg in args)
# call the kernel
call_fn(outputs + inputs, **kwargs)
# Return the outputs
return tuple(outputs)
| 5,343,115
|
def bev_box_overlap(boxes, qboxes, criterion=-1):
"""
Calculate rotated 2D iou.
Args:
boxes:
qboxes:
criterion:
Returns:
"""
riou = rotate_iou_gpu_eval(boxes, qboxes, criterion)
return riou
| 5,343,116
|
def filter_shape(image):
"""画像にぼかしフィルターを適用。"""
weight = (
(1, 1, 1),
(1, 1, 1),
(1, 1, 1)
)
offset = 0
div = 9
return _filter(image, weight, offset, div)
| 5,343,117
|
def ppretty(obj, indent=' ', depth=4, width=72, seq_length=5,
show_protected=False, show_private=False, show_static=False, show_properties=False, show_address=False,
str_length=50):
"""Represents any python object in a human readable format.
:param obj: An object to represent.
:type obj: object
:param indent: Fill string for indents. Default is ' '.
:type indent: str
:param depth: Depth of introspecion. Default is 4.
:type depth: int
:param width: Width of line in output string. It may be exceeded when representation doesn't fit. Default is 72.
:type width: int
:param seq_length: Maximum sequence length. Also, used for object's members enumeration. Default is 5.
:type seq_length: int
:param show_protected: Examine protected members. Default is False.
:type show_protected: bool
:param show_private: Examine private members. To take effect show_protected must be set to True. Default is False.
:type show_private: bool
:param show_static: Examine static members. Default is False.
:type show_static: bool
:param show_properties: Examine properties members. Default is False.
:type show_properties: bool
:param show_address: Show address. Default is False.
:type show_address: bool
:param str_length: Maximum string length. Default is 50.
:type str_length: int
:return: The final representation of the object.
:rtype: str
"""
seq_brackets = {list: ('[', ']'), tuple: ('(', ')'), set: ('set([', '])'), dict: ('{', '}')}
seq_types = tuple(seq_brackets.keys())
basestring_type = basestring if sys.version_info[0] < 3 else str
def inspect_object(current_obj, current_depth, current_width, seq_type_descendant=False):
inspect_nested_object = partial(inspect_object,
current_depth=current_depth - 1,
current_width=current_width - len(indent))
# Basic types
if isinstance(current_obj, Number):
return [repr(current_obj)]
# Strings
if isinstance(current_obj, basestring_type):
if len(current_obj) <= str_length:
return [repr(current_obj)]
return [repr(current_obj[:int(str_length / 2)] + '...' + current_obj[int((1 - str_length) / 2):])]
# Class object
if isinstance(current_obj, type):
module = current_obj.__module__ + '.' if hasattr(current_obj, '__module__') else ''
return ["<class '" + module + current_obj.__name__ + "'>"]
# None
if current_obj is None:
return ['None']
# Format block of lines
def format_block(lines, open_bkt='', close_bkt=''):
new_lines = [] # new_lines will be returned if width exceeded
one_line = '' # otherwise, one_line will be returned.
if open_bkt:
new_lines.append(open_bkt)
one_line += open_bkt
for line in lines:
new_lines.append(indent + line)
if len(one_line) <= current_width:
one_line += line
if close_bkt:
if lines:
new_lines.append(close_bkt)
else:
new_lines[-1] += close_bkt
one_line += close_bkt
return [one_line] if len(one_line) <= current_width and one_line else new_lines
class SkipElement(object):
pass
class ErrorAttr(object):
def __init__(self, e):
self.e = e
def cut_seq(seq):
if current_depth < 1:
return [SkipElement()]
if len(seq) <= seq_length:
return seq
elif seq_length > 1:
seq = list(seq) if isinstance(seq, tuple) else seq
return seq[:int(seq_length / 2)] + [SkipElement()] + seq[int((1 - seq_length) / 2):]
return [SkipElement()]
def format_seq(extra_lines):
r = []
items = cut_seq(obj_items)
for n, i in enumerate(items, 1):
if type(i) is SkipElement:
r.append('...')
else:
if type(current_obj) is dict or seq_type_descendant and isinstance(current_obj, dict):
(k, v) = i
k = inspect_nested_object(k)
v = inspect_nested_object(v)
k[-1] += ': ' + v.pop(0)
r.extend(k)
r.extend(format_block(v))
elif type(current_obj) in seq_types or seq_type_descendant and isinstance(current_obj, seq_types):
r.extend(inspect_nested_object(i))
else:
(k, v) = i
k = [k]
if type(v) is ErrorAttr:
e_message = '<Attribute error: ' + type(v.e).__name__
if hasattr(v.e, 'message'):
e_message += ': ' + v.e.message
e_message += '>'
v = [e_message]
else:
v = inspect_nested_object(v)
k[-1] += ' = ' + v.pop(0)
r.extend(k)
r.extend(format_block(v))
if n < len(items) or extra_lines:
r[-1] += ', '
return format_block(r + extra_lines, *brackets)
# Sequence types
# Others objects are considered as sequence of members
extra_lines = []
if type(current_obj) in seq_types or seq_type_descendant and isinstance(current_obj, seq_types):
if isinstance(current_obj, dict):
obj_items = list(current_obj.items())
else:
obj_items = current_obj
if seq_type_descendant:
brackets = seq_brackets[[seq_type for seq_type in seq_types if isinstance(current_obj, seq_type)].pop()]
else:
brackets = seq_brackets[type(current_obj)]
else:
obj_items = []
for k in sorted(dir(current_obj)):
if not show_private and k.startswith('_') and '__' in k:
continue
if not show_protected and k.startswith('_'):
continue
try:
v = getattr(current_obj, k)
if isroutine(v):
continue
if not show_static and hasattr(type(current_obj), k) and v is getattr(type(current_obj), k):
continue
if not show_properties and hasattr(type(current_obj), k) and isinstance(
getattr(type(current_obj), k), property):
continue
except Exception as e:
v = ErrorAttr(e)
obj_items.append((k, v))
if isinstance(current_obj, seq_types):
# If object's class was inherited from one of basic sequence types
extra_lines += inspect_nested_object(current_obj, seq_type_descendant=True)
module = current_obj.__module__ + '.' if hasattr(current_obj, '__module__') else ''
address = ' at ' + hex(id(current_obj)) + ' ' if show_address else ''
brackets = (module + type(current_obj).__name__ + address + '(', ')')
return format_seq(extra_lines)
return '\n'.join(inspect_object(obj, depth, width))
| 5,343,118
|
def traj_colormap(ax, traj, array, plot_mode, min_map, max_map, title=""):
"""
color map a path/trajectory in xyz coordinates according to
an array of values
:param ax: plot axis
:param traj: trajectory.PosePath3D or trajectory.PoseTrajectory3D object
:param array: Nx1 array of values used for color mapping
:param plot_mode: PlotMode
:param min_map: lower bound value for color mapping
:param max_map: upper bound value for color mapping
:param title: plot title
"""
pos = traj.positions_xyz
norm = mpl.colors.Normalize(vmin=min_map, vmax=max_map, clip=True)
mapper = cm.ScalarMappable(
norm=norm,
cmap=SETTINGS.plot_trajectory_cmap) # cm.*_r is reversed cmap
mapper.set_array(array)
colors = [mapper.to_rgba(a) for a in array]
line_collection = colored_line_collection(pos, colors, plot_mode)
ax.add_collection(line_collection)
if plot_mode == PlotMode.xyz:
ax.set_zlim(
np.amin(traj.positions_xyz[:, 2]),
np.amax(traj.positions_xyz[:, 2]))
if SETTINGS.plot_xyz_realistic:
set_aspect_equal_3d(ax)
fig = plt.gcf()
cbar = fig.colorbar(
mapper, ticks=[min_map, (max_map - (max_map - min_map) / 2), max_map])
cbar.ax.set_yticklabels([
"{0:0.3f}".format(min_map),
"{0:0.3f}".format(max_map - (max_map - min_map) / 2),
"{0:0.3f}".format(max_map)
])
if title:
ax.legend(frameon=True)
plt.title(title)
| 5,343,119
|
def dict2pkl(mydict, path):
"""
Saves a dictionary object into a pkl file.
:param mydict: dictionary to save in a file
:param path: path where my_dict is stored
:return:
"""
import cPickle
if path[-4:] == '.pkl':
extension = ''
else:
extension = '.pkl'
with open(path + extension, 'w') as f:
cPickle.dump(mydict, f, protocol=cPickle.HIGHEST_PROTOCOL)
| 5,343,120
|
def makeTriangularMAFdist(low=0.02, high=0.5, beta=5):
"""Fake a non-uniform maf distribution to make the data
more interesting - more rare alleles """
MAFdistribution = []
for i in xrange(int(100*low),int(100*high)+1):
freq = (51 - i)/100.0 # large numbers of small allele freqs
for j in range(beta*i): # or i*i for crude exponential distribution
MAFdistribution.append(freq)
return MAFdistribution
| 5,343,121
|
def export_inference_graph(
config_path: str,
trained_ckpt_dir: str,
input_type: str,
output_dir: str,
config_override: Optional[pipeline_pb2.TrainEvalPipelineConfig] = None,
use_side_inputs: bool = False,
side_input_shapes: str = '',
side_input_types: str = '',
side_input_names: str = '',
) -> None:
"""Exports inference graph for the model specified in the pipeline config.
This function creates `output_dir` if it does not already exist, which will hold a copy of the pipeline config
with filename `pipeline.config`, and two subdirectories named `checkpoint` and `saved_model` (containing the
exported checkpoint and SavedModel respectively).
Args:
config_path: A path to a pipeline config file.
trained_ckpt_dir: Path to the trained checkpoint file.
input_type: Type of input for the graph. Can be one of ['image_tensor', 'encoded_image_string_tensor',
'tf_example'].
output_dir: Path to write outputs.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to override the config from `config_path`.
use_side_inputs: boolean that determines whether side inputs should be included in the input signature.
side_input_shapes: forward-slash-separated list of comma-separated lists describing input shapes.
side_input_types: comma-separated list of the types of the inputs.
side_input_names: comma-separated list of the names of the inputs.
Raises:
ValueError: if input_type is invalid.
"""
# config
pipeline_config = _get_pipeline_config(config_path, config_override)
# build model
detection_model = INPUT_BUILDER_UTIL_MAP['model_build'](pipeline_config.model, is_training=False)
# restore checkpoint
ckpt = tf.train.Checkpoint(model=detection_model)
manager = tf.train.CheckpointManager(ckpt, trained_ckpt_dir, max_to_keep=1)
status = ckpt.restore(manager.latest_checkpoint).expect_partial()
if input_type not in DETECTION_MODULE_MAP:
raise ValueError('Unrecognized `input_type`')
if use_side_inputs and input_type != 'image_tensor':
raise ValueError('Side inputs supported for image_tensor input type only.')
zipped_side_inputs = []
if use_side_inputs:
zipped_side_inputs = _combine_side_inputs(side_input_shapes, side_input_types, side_input_names)
detection_module = DETECTION_MODULE_MAP[input_type](detection_model, use_side_inputs, list(zipped_side_inputs))
# get concrete function traces the graph and forces variables to be constructed only after this can we save the
# checkpoint and saved model
concrete_function = detection_module.__call__.get_concrete_function()
status.assert_existing_objects_matched()
# output
output_ckpt_dir = os.path.join(output_dir, 'checkpoint')
output_saved_model_dir = os.path.join(output_dir, 'saved_model')
# export checkpoint
exported_manager = tf.train.CheckpointManager(ckpt, output_ckpt_dir, max_to_keep=1)
exported_manager.save(checkpoint_number=0)
# export saved model
tf.saved_model.save(detection_module, output_saved_model_dir, signatures=concrete_function)
# export config
config_util.save_pipeline_config(pipeline_config, output_dir)
| 5,343,122
|
def create_structures_hdf5_stitched_ref_gene_file_npy(stitching_file, joining, nr_pixels,
reference_gene, blend = 'non linear'):
"""Takes an HDF5 file handle and creates the necessary structures.
Modification of create_structures_hdf5_files to work with .npy list of
files
Creates groups and data sets, when the groups or data sets already
exists, they are kept as they are, as long as the data sets have
the right size and data type. Incompatible data sets will be
overwritten.
Stitching file has the following structure:
__________________Groups____________|___Data sets____
gene_stitched:
StitchedImage:
final_image
blending_mask
Parameters:
-----------
stitching_file: pointer
HDF5 file handle. The file where the stitched images will be saved.
joining: dict
Dictionary containing keys 'corner_list and 'final_image_shape'.
Corner_list is a list of list, each list is a pair of an image number
(int) and it's coordinates (numpy array containing floats).
Final_image_shape is a tuple of size 2 or 3
depending on the number of dimensions and contains ints.
nr_pixels: int
Height and length of the tile in pixels, tile is assumed to be square.
reference_gene: str
The name of the gene we are stitching.This will be used to place the
data in the right group in stitching_file.
blend: str
When 'non linear' or 'linear',blending will be applied,
so we will need to create the structures
necessary for saving the blended tiles. When
it has another value or is None no blending at
all will be applied, so we can skip this.
This variable also determines to return value of
linear_blending.
Returns:
--------
stitched_group: pointer
HDF5 reference to the group where the final will be.
linear_blending: bool
When True later blending should be linear and when False, blending
should be non-linear.
blend: str
When 'non linear' or 'linear', blending should be applied. When
it has another value or is None no blending at
all will be applied.
"""
logger.info("Generating stitching file structures.")
# Create a group for the stitched images in the stitching file
stitching_file.require_group(reference_gene)
stitched_group = stitching_file[reference_gene].require_group('StitchedImage')
# Create the final image in this file
try:
final_image = stitched_group.require_dataset('final_image',
joining['final_image_shape'],
dtype = np.float64)
except TypeError as err:
logger.info("Incompatible 'final_image' data set already existed, deleting old dataset.\n {}"
.format(err))
del stitched_group['final_image']
inout.free_hdf5_space(stitching_file)
final_image = stitched_group.require_dataset('final_image',
joining['final_image_shape'],
dtype = np.float64)
# If blending is required initialize the blending mask in the
# hdf5 file
if blend is not None:
# For the blending masks use only the last 2 dimensions of final
# image shape, because also when working in 3D the masks can be
# 2D as there is the same shift in x and y direction for the
# whole stack.
try:
blending_mask = stitched_group.require_dataset('blending_mask',
joining['final_image_shape'][-2:],
dtype = np.float64)
except TypeError as err:
logger.info("Incompatible 'blending_mask' data set already existed, deleting old dataset.\n {}"
.format(err))
del stitched_group['blending_mask']
inout.free_hdf5_space(stitching_file)
final_image = stitched_group.require_dataset('blending_mask',
joining['final_image_shape'][-2:],
dtype = np.float64)
# Check type of blending
if blend == 'non linear':
linear_blending = False
elif blend == 'linear':
linear_blending = True
else:
linear_blending = False
logger.warning("Blend not defined correctly, \
using non-linear blending, \
blend is: {}".format(blend))
if False:
logger.info("Flushing hdf5 file to clean up after delete operations")
before_flush = stitching_file.id.get_filesize()
stitching_file.flush()
after_flush = stitching_file.id.get_filesize()
logger.debug("Size in bytes before flush: {} after flush: {} space freed: {}".format(before_flush, after_flush, before_flush - after_flush))
return stitched_group, linear_blending, blend
| 5,343,123
|
def contains_whitespace(s : str):
"""
Returns True if any whitespace chars in input string.
"""
return " " in s or "\t" in s
| 5,343,124
|
def get_files_to_check(files, filter_function):
# type: (List[str], Callable[[str], bool]) -> List[str]
"""Get a list of files that need to be checked based on which files are managed by git."""
# Get a list of candidate_files
candidates_nested = [expand_file_string(f) for f in files]
candidates = list(itertools.chain.from_iterable(candidates_nested))
if len(files) > 0 and len(candidates) == 0:
raise ValueError("Globs '%s' did not find any files with glob." % (files))
repos = get_repos()
valid_files = list(
itertools.chain.from_iterable(
[r.get_candidates(candidates, filter_function) for r in repos]))
if len(files) > 0 and len(valid_files) == 0:
raise ValueError("Globs '%s' did not find any files with glob in git." % (files))
return valid_files
| 5,343,125
|
def main(argv):
"""Parse the argv, verify the args, and call the runner."""
args = arg_parse(argv)
return run(args.top_foods, args.top_food_categories)
| 5,343,126
|
def _failover_read_request(request_fn, endpoint, path, body, headers, params, timeout):
""" This function auto-retries read-only requests until they return a 2xx status code. """
try:
return request_fn('GET', endpoint, path, body, headers, params, timeout)
except (requests.exceptions.RequestException, Non200ResponseException) as ex:
raise FailoverException(ex)
| 5,343,127
|
def update_image_viewer_state(rec, context):
"""
Given viewer session information, make sure the session information is
compatible with the current version of the viewers, and if not, update
the session information in-place.
"""
if '_protocol' not in rec:
# Note that files saved with protocol < 1 have bin settings saved per
# layer but they were always restricted to be the same, so we can just
# use the settings from the first layer
rec['state'] = {}
rec['state']['values'] = {}
# TODO: could generalize this into a mapping
properties = rec.pop('properties')
viewer_state = rec['state']['values']
viewer_state['color_mode'] = 'st__Colormaps'
viewer_state['reference_data'] = properties['data']
data = context.object(properties['data'])
# TODO: add an id method to unserializer
x_index = properties['slice'].index('x')
y_index = properties['slice'].index('y')
viewer_state['x_att_world'] = str(uuid.uuid4())
context.register_object(viewer_state['x_att_world'], data.world_component_ids[x_index])
viewer_state['y_att_world'] = str(uuid.uuid4())
context.register_object(viewer_state['y_att_world'], data.world_component_ids[y_index])
viewer_state['x_att'] = str(uuid.uuid4())
context.register_object(viewer_state['x_att'], data.pixel_component_ids[x_index])
viewer_state['y_att'] = str(uuid.uuid4())
context.register_object(viewer_state['y_att'], data.pixel_component_ids[y_index])
viewer_state['x_min'] = -0.5
viewer_state['x_max'] = data.shape[1] - 0.5
viewer_state['y_min'] = -0.5
viewer_state['y_max'] = data.shape[0] - 0.5
viewer_state['aspect'] = 'st__equal'
# Slicing with cubes
viewer_state['slices'] = [s if np.isreal(s) else 0 for s in properties['slice']]
# RGB mode
for layer in rec['layers'][:]:
if layer['_type'].split('.')[-1] == 'RGBImageLayerArtist':
for icolor, color in enumerate('rgb'):
new_layer = {}
new_layer['_type'] = 'glue.viewers.image.layer_artist.ImageLayerArtist'
new_layer['layer'] = layer['layer']
new_layer['attribute'] = layer[color]
new_layer['norm'] = layer[color + 'norm']
new_layer['zorder'] = layer['zorder']
new_layer['visible'] = layer['color_visible'][icolor]
new_layer['color'] = color
rec['layers'].append(new_layer)
rec['layers'].remove(layer)
viewer_state['color_mode'] = 'st__One color per layer'
layer_states = []
for layer in rec['layers']:
state_id = str(uuid.uuid4())
state_cls = STATE_CLASS[layer['_type'].split('.')[-1]]
state = state_cls(layer=context.object(layer.pop('layer')))
for prop in ('visible', 'zorder'):
value = layer.pop(prop)
value = context.object(value)
setattr(state, prop, value)
if 'attribute' in layer:
state.attribute = context.object(layer['attribute'])
else:
state.attribute = context.object(properties['attribute'])
if 'norm' in layer:
norm = context.object(layer['norm'])
state.bias = norm.bias
state.contrast = norm.contrast
state.stretch = norm.stretch
if norm.clip_hi is not None:
state.percentile = norm.clip_hi
else:
if norm.vmax is not None:
state.v_min = norm.vmin
state.v_max = norm.vmax
state.percentile = 'Custom'
if 'color' in layer:
state.global_sync = False
state.color = layer['color']
context.register_object(state_id, state)
layer['state'] = state_id
layer_states.append(state)
list_id = str(uuid.uuid4())
context.register_object(list_id, layer_states)
rec['state']['values']['layers'] = list_id
| 5,343,128
|
def test_block_legacy_send_from_dict():
"""
When deserializing a legacy send block, the balance has to be hex-formatted
"""
block_data = BLOCKS["send"]["data"].copy()
block_data["balance"] = "10000000"
with pytest.raises(InvalidBalance) as exc:
Block.from_dict(block_data)
assert "needs to be a hex" in str(exc.value)
| 5,343,129
|
def GenerateConfig(context):
"""Generates configuration."""
image = ''.join(['https://www.googleapis.com/compute/v1/',
'projects/google-containers/global/images/',
context.properties['containerImage']])
default_network = ''.join(['https://www.googleapis.com/compute/v1/projects/',
context.env['project'],
'/global/networks/default'])
instance_template = {
'name': context.env['name'] + '-it',
'type': 'compute.v1.instanceTemplate',
'properties': {
'properties': {
'metadata': {
'items': [{
'key': 'google-container-manifest',
'value': GenerateManifest(context)
}]
},
'machineType': 'f1-micro',
'disks': [{
'deviceName': 'boot',
'boot': True,
'autoDelete': True,
'mode': 'READ_WRITE',
'type': 'PERSISTENT',
'initializeParams': {'sourceImage': image}
}],
'networkInterfaces': [{
'accessConfigs': [{
'name': 'external-nat',
'type': 'ONE_TO_ONE_NAT'
}],
'network': default_network
}]
}
}
}
outputs = [{'name': 'instanceTemplateSelfLink',
'value': '$(ref.' + instance_template['name'] + '.selfLink)'}]
return {'resources': [instance_template], 'outputs': outputs}
| 5,343,130
|
async def read_all_orders(
status_order: Optional[str] = None,
priority: Optional[int] = None,
age: Optional[str] = None,
value: Optional[str] = None,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
db: AsyncIOMotorClient = Depends(get_database),
) -> List[OrderSchema]:
"""[summary]
Get all item by ID.
[description]
Endpoint to retrieve an specific item.
[optional]
[ON CREATE] Filter order by status: ['to-do', 'doing', 'done']
"""
filters = {
"status": status_order,
"priority": priority,
"age": age,
"value": value,
"start_date": start_date,
"end_date": end_date,
}
orders_list = await orders.get_all(db, filters)
if not orders_list:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail="Note not found"
)
return list(map(fix_item_id, orders_list))
| 5,343,131
|
def read_log_file(path):
"""
Read the log file for 3D Match's log files
"""
with open(path, "r") as f:
log_lines = f.readlines()
log_lines = [line.strip() for line in log_lines]
num_logs = len(log_lines) // 5
transforms = []
for i in range(0, num_logs, 5):
meta_data = np.fromstring(log_lines[i], dtype=int, sep=" \t")
transform = np.zeros((4, 4), dtype=float)
for j in range(4):
transform[j] = np.fromstring(log_lines[i + j + 1], dtype=float, sep=" \t")
transforms.append((meta_data, transform))
return transforms
| 5,343,132
|
def visualize_OOP_and_F_timeseries(OOP_list,J_list,folder_name):
"""Plot timeseries."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
plt.figure()
plt.subplot(1,2,1)
plt.plot(OOP_list)
plt.xlabel('frame number')
plt.ylabel('OOP')
plt.tight_layout()
plt.subplot(1,2,2)
plt.plot(J_list)
plt.xlabel('frame number')
plt.ylabel('average deformation J')
plt.tight_layout()
plt.savefig(out_analysis + '/OOP_J_timeseries')
return
| 5,343,133
|
def divide_list(l, n):
"""Divides list l into n successive chunks."""
length = len(l)
chunk_size = int(math.ceil(length/n))
expected_length = n * chunk_size
chunks = []
for i in range(0, expected_length, chunk_size):
chunks.append(l[i:i+chunk_size])
for i in range(len(chunks), n):
chunks.append([])
return chunks
| 5,343,134
|
def sigma(n):
"""Calculate the sum of all divisors of N."""
return sum(divisors(n))
| 5,343,135
|
def ATOMPAIRSfpDataFrame(chempandas,namecol,smicol):
"""
AtomPairs-based fingerprints 2048 bits.
"""
assert chempandas.shape[0] <= MAXLINES
molsmitmp = [Chem.MolFromSmiles(x) for x in chempandas.iloc[:,smicol]]
i = 0
molsmi = []
for x in molsmitmp:
if x is not None:
x.SetProp("_Name",chempandas.iloc[i,namecol])
molsmi.append(x)
i += 1
# ATOMPAIRS Fingerprints.
fps = [Pairs.GetAtomPairFingerprintAsBitVect(x) for x in molsmi]
fpsmat = np.matrix(fps)
df = DataFrame(fpsmat,index = [x.GetProp("_Name") for x in molsmi]) # how to name the col?
df['SMILES'] = [Chem.MolToSmiles(x) for x in molsmi]
df['CHEMBL'] = df.index
return(df)
| 5,343,136
|
def test_backend_get_set_pin_mode() -> None:
"""Test that we can get and set pin modes."""
pin = 2
backend = SBArduinoHardwareBackend("COM0", SBArduinoSerial)
assert backend.get_gpio_pin_mode(pin) is GPIOPinMode.DIGITAL_INPUT
backend.set_gpio_pin_mode(pin, GPIOPinMode.DIGITAL_OUTPUT)
assert backend.get_gpio_pin_mode(pin) is GPIOPinMode.DIGITAL_OUTPUT
| 5,343,137
|
def filter_by_minimum(X, region):
"""Filter synapses by minimum.
# Arguments:
X (numpy array): A matrix in the NeuroSynapsis matrix format.
# Returns:
numpy array: A matrix in the NeuroSynapsis matrix format.
"""
vals = np.where((X[:,2] >= i[0])*(X[:,3] >= i[1])*(X[:,4] >= i[2]))[0]
return X[vals,:]
| 5,343,138
|
def gen_graphs(sizes):
"""
Generate community graphs.
"""
A = []
for V in tqdm(sizes):
G = nx.barabasi_albert_graph(V, 3)
G = nx.to_numpy_array(G)
P = np.eye(V)
np.random.shuffle(P)
A.append(P.T @ G @ P)
return np.array(A)
| 5,343,139
|
def daemonize(identity: str, kind: str = 'workspace') -> DaemonID:
"""Convert to DaemonID
:param identity: uuid or DaemonID
:param kind: defaults to 'workspace'
:return: DaemonID from identity
"""
try:
return DaemonID(identity)
except TypeError:
return DaemonID(f'j{kind}-{identity}')
| 5,343,140
|
def count_gender(data_list:list):
"""
Contar a população dos gêneros
args:
data_list (list): Lista de dados que possui a propriedade 'Gender'
return (list): Retorna uma lista com o total de elementos do gênero 'Male' e 'Female', nessa ordem
"""
genders = column_to_list(data_list, "Gender")
genders_counter = Counter(genders)
male = genders_counter["Male"]
female = genders_counter["Female"]
return [male, female]
| 5,343,141
|
async def card_balance(request: Request):
""" 返回用户校园卡余额 """
cookies = await get_cookies(request)
balance_data = await balance.balance(cookies)
return success(data=balance_data)
| 5,343,142
|
def _grid_vals(grid, dist_name, scn_save_fs,
mod_thy_info, constraint_dct):
""" efef
"""
# Initialize the lists
locs_lst = []
enes_lst = []
# Build the lists of all the locs for the grid
grid_locs = []
for grid_val_i in grid:
if constraint_dct is None:
grid_locs.append([[dist_name], [grid_val_i]])
else:
grid_locs.append([constraint_dct, [dist_name], [grid_val_i]])
# Get the energies along the grid
for locs in grid_locs:
if scn_save_fs[-1].exists(locs):
scn_path = scn_save_fs[-1].path(locs)
sp_save_fs = autofile.fs.single_point(scn_path)
enes_lst.append(sp_save_fs[-1].file.energy.read(mod_thy_info[1:4]))
locs_lst.append(locs)
return locs_lst, enes_lst
| 5,343,143
|
def group_images_by_label(label_arr, gid_arr):
"""
Input: Length N list of labels and ids
Output: Length M list of unique labels, and lenth M list of lists of ids
"""
# Reverse the image to cluster index mapping
import vtool as vt
labels_, groupxs_ = vt.group_indices(label_arr)
sortx = np.array(list(map(len, groupxs_))).argsort()[::-1]
labels = labels_.take(sortx, axis=0)
groupxs = ut.take(groupxs_, sortx)
label_gids = vt.apply_grouping(gid_arr, groupxs)
return labels, label_gids
| 5,343,144
|
def ask_user(prompt: str, default: str = None) -> Optional[str]:
"""
Prompts the user, with a default. Returns user input from ``stdin``.
"""
if default is None:
prompt += ": "
else:
prompt += " [" + default + "]: "
result = input(prompt)
return result if len(result) > 0 else default
| 5,343,145
|
def fetch_lords_mocks_data():
"""Fetch mocks data for unit tests of Lords."""
# Download Lords
l = lords.fetch_lords_raw()
validate.write(l, 'lords_raw')
time.sleep(constants.API_PAUSE_TIME)
# Download Lords memberships
l_cm = lords.fetch_lords_memberships_raw()
validate.write(l_cm, 'lords_memberships_raw')
time.sleep(constants.API_PAUSE_TIME)
# Download Lords party memberships
l_pm = lords.fetch_lords_party_memberships_raw()
validate.write(l_pm, 'lords_party_memberships_raw')
time.sleep(constants.API_PAUSE_TIME)
# Download Lords government roles
l_gor = lords.fetch_lords_government_roles_raw()
validate.write(l_gor, 'lords_government_roles_raw')
time.sleep(constants.API_PAUSE_TIME)
# Download Lords opposition roles
l_opr = lords.fetch_lords_opposition_roles_raw()
validate.write(l_opr, 'lords_opposition_roles_raw')
time.sleep(constants.API_PAUSE_TIME)
# Download Lords committee memberships
l_ctm = lords.fetch_lords_committee_memberships_raw()
validate.write(l_ctm, 'lords_committee_memberships_raw')
time.sleep(constants.API_PAUSE_TIME)
| 5,343,146
|
def tensor_index_by_tuple(data, tuple_index):
"""Tensor getitem by tuple of various types with None"""
if not tuple_index:
return data
op_name = const_utils.TENSOR_GETITEM
tuple_index = _transform_ellipsis_to_slice(data, tuple_index, op_name)
data, tuple_index = _expand_data_dims(data, tuple_index)
min_data_dim, max_data_dim = 1, 8
const_utils.judge_data_dim(data.ndim, min_data_dim, max_data_dim)
indexes_types = hyper_map(F.typeof, tuple_index)
contain_type = const_utils.tuple_index_type_cnt(indexes_types, op_name)
if contain_type == const_utils.ALL_BASIC:
return _tensor_getitem_by_tuple_slice(data, tuple_index)
return _tensor_getitem_by_tuple(data, tuple_index, op_name)
| 5,343,147
|
def extract_text(savelocation, file):
"""Splits text into seperate files starting from 1. to the next 1.
Args:
savelocation (string): directory to where it should extract files to
file (string): file you want to extra files FROM
"""
# If the save location doesn't exist, we create one.
if not os.path.exists(savelocation): os.makedirs(savelocation)
# Open the File you want to orginze text from.
with open(file, 'r', encoding='utf-8') as f:
# read all the lines of the file this gives you a large list with each line.
lines = f.readlines()
# creating a list to store all the lines that have '1.' written on that line.
ones_index = []
# loop over every line inside the lines list.
for idx, line in enumerate(lines):
# if the line has `1.` then add that to the ones_index list.
# if '1.' in line and not '11.' in line and not '21.' in line and not '31.' in line and not '41.' in line and not '51.' in line and not '61.' in line:
if line[0] == '1' and line[1] == '.':
lineIndex0 = line[0]
lineIndex1 = line[1]
# if not '11.' in line and not '21.' in line and not '31.' in line and not '41.' in line and not '51.' in line and not '61.' in line:
if lineIndex0 == '1' and lineIndex1 == '.':
ones_index.append(idx)
'''
here we a the last line of the text file to the ones_index
We do so when the last `1.` is found it gets all the text.
It's more important for layer.
'''
ones_index[len(lines):] = [len(lines)]
# We loop over all the ones_indexs elements.
for j, i in enumerate(range(len(ones_index)-1)):
# we set a variable to be the number we are currently in the iteration
start = ones_index[i]
# our stop variale is the next element in a list.
stop = ones_index[i+1]
''' -=-=-=-=-= Make title =-=-=-=-=-=- '''
# We set our title variable to the current lines text.
title = lines[start].replace('\n', '')
# if our title has punctiation at the end we want to remove it.
# if title.endswith(',') or title.endswith('-') or title.endswith('.'):
# title = title[:-1]
title = title.replace(',', '')
title = title.replace('?', '')
title = title.replace('!', '')
title = title.replace("'", '')
title = title.replace("’", '')
title = title.replace(":", '')
title = title.replace("|", '')
title = title.replace('\n', '')
# we split the title into a list where ever a space is (' ')
title = title.split('.')
# We remove the first element in our splited list, because it always contains a number
title.pop(0)
# title = title.replace('.', '')
# we join our title back together again with spaces (' ')
title = ' '.join(title)
# We set the number to be the number of the iteration we are in, in the loop.
title = title.split(',')
title1 = title[0]
title2 = lines[start+1].replace('\n', '')
if (title1.endswith('-')):
title1 = title1.replace('-', '')
title2 = title2.replace(',', ' ')
title2 = title2.replace('!', '')
title2 = title2.replace("'", '')
title = str(j + 1) + '.' + title1 + title2
else:
title = str(j + 1) + '.' + title1
title = title.replace('\t', ' ')
'''
Create a new file that is called what we set our title to.
Write all the lines from the start variable to the stop variable.
'''
with open('{}\\{}.txt'.format(savelocation, title), 'w+', encoding='utf-8') as f:
f.write(''.join(lines[start:stop]))
| 5,343,148
|
def are_neurons_responsive(spike_times, spike_clusters, stimulus_intervals=None,
spontaneous_period=None, p_value_threshold=.05):
"""
Return which neurons are responsive after specific stimulus events, compared to spontaneous
activity, according to a Wilcoxon test.
:param spike_times: times of spikes, in seconds
:type spike_times: 1D array
:param spike_clusters: spike neurons
:type spike_clusters: 1D array, same length as spike_times
:type stimulus_intervals: the times of
the stimulus events onsets and offsets
:param stimulus_intervals: 2D array
:type spontaneous_period: the period of spontaneous activity
:param spontaneous_period: 1D array with 2 elements
:param p_value_threshold: the threshold for the
p value in the Wilcoxon test.
:type p_value_threshold: float
:rtype: 1D boolean array with `n_neurons`
elements (clusters are sorted by increasing cluster
id as appearing in spike_clusters).
"""
stimulus_counts = _get_spike_counts_in_bins(
spike_times, spike_clusters, stimulus_intervals)
# Find spontaneous intervals.
stimulus_durations = np.diff(stimulus_intervals, axis=1).squeeze()
t0, t1 = spontaneous_period
spontaneous_starts = np.linspace(
t0,
t1 - stimulus_durations.max(),
len(stimulus_intervals))
spontaneous_intervals = np.c_[
spontaneous_starts,
spontaneous_starts +
stimulus_durations]
# Count the spontaneous counts.
spontaneous_counts = _get_spike_counts_in_bins(
spike_times, spike_clusters, spontaneous_intervals)
assert stimulus_counts.shape == stimulus_counts.shape
# Generate the responsive vector (for every neuron, whether it is
# responsive).
responsive = np.zeros(stimulus_counts.shape[0], dtype=bool)
n_neurons = stimulus_counts.shape[0]
for i in range(n_neurons):
x = stimulus_counts[i, :]
y = spontaneous_counts[i, :]
try:
_, p = scipy.stats.wilcoxon(x, y)
except ValueError:
pass
responsive[i] = p < p_value_threshold
return responsive
| 5,343,149
|
def _get_book(**keywords):
"""Get an instance of :class:`Book` from an excel source
Where the dictionary should have text as keys and two dimensional
array as values.
"""
source = factory.get_book_source(**keywords)
sheets = source.get_data()
filename, path = source.get_source_info()
return sheets, filename, path
| 5,343,150
|
def handler(
state_store: StateStore,
hardware_api: HardwareAPI,
movement_handler: MovementHandler,
) -> PipettingHandler:
"""Create a PipettingHandler with its dependencies mocked out."""
return PipettingHandler(
state_store=state_store,
hardware_api=hardware_api,
movement_handler=movement_handler,
)
| 5,343,151
|
def get_domain(domain_name):
"""
Query the Rackspace DNS API to get a domain object for the domain name.
Keyword arguments:
domain_name -- the domain name that needs a challenge record
"""
base_domain_name = get_tld("http://{0}".format(domain_name))
domain = rax_dns.find(name=base_domain_name)
return domain
| 5,343,152
|
def lobatto(n):
"""Get Gauss-Lobatto-Legendre points and weights.
Parameters
----------
n : int
Number of points
"""
if n == 2:
return ([0, 1],
[sympy.Rational(1, 2), sympy.Rational(1, 2)])
if n == 3:
return ([0, sympy.Rational(1, 2), 1],
[sympy.Rational(1, 6), sympy.Rational(2, 3), sympy.Rational(1, 6)])
if n == 4:
return ([0, (1 - 1 / sympy.sqrt(5)) / 2, (1 + 1 / sympy.sqrt(5)) / 2, 1],
[sympy.Rational(1, 12), sympy.Rational(5, 12), sympy.Rational(5, 12),
sympy.Rational(1, 12)])
if n == 5:
return ([0, (1 - sympy.sqrt(3) / sympy.sqrt(7)) / 2, sympy.Rational(1, 2),
(1 + sympy.sqrt(3) / sympy.sqrt(7)) / 2, 1],
[sympy.Rational(1, 20), sympy.Rational(49, 180), sympy.Rational(16, 45),
sympy.Rational(49, 180), sympy.Rational(1, 20)])
if n == 6:
return ([0,
(1 - sympy.sqrt(sympy.Rational(1, 3) + (2 * sympy.sqrt(7) / 21))) / 2,
(1 - sympy.sqrt(sympy.Rational(1, 3) - (2 * sympy.sqrt(7) / 21))) / 2,
(1 + sympy.sqrt(sympy.Rational(1, 3) - (2 * sympy.sqrt(7) / 21))) / 2,
(1 + sympy.sqrt(sympy.Rational(1, 3) + (2 * sympy.sqrt(7) / 21))) / 2,
1],
[sympy.Rational(1, 30), (14 - sympy.sqrt(7)) / 60, (14 + sympy.sqrt(7)) / 60,
(14 + sympy.sqrt(7)) / 60, (14 - sympy.sqrt(7)) / 60, sympy.Rational(1, 30)])
if n == 7:
return ([0,
(1 - sympy.sqrt((5 + 2 * sympy.sqrt(5) / sympy.sqrt(3)) / 11)) / 2,
(1 - sympy.sqrt((5 - 2 * sympy.sqrt(5) / sympy.sqrt(3)) / 11)) / 2,
sympy.Rational(1, 2),
(1 + sympy.sqrt((5 - 2 * sympy.sqrt(5) / sympy.sqrt(3)) / 11)) / 2,
(1 + sympy.sqrt((5 + 2 * sympy.sqrt(5) / sympy.sqrt(3)) / 11)) / 2,
1],
[sympy.Rational(1, 42),
(124 - 7 * sympy.sqrt(15)) / 700,
(124 + 7 * sympy.sqrt(15)) / 700,
sympy.Rational(128, 525),
(124 + 7 * sympy.sqrt(15)) / 700,
(124 - 7 * sympy.sqrt(15)) / 700,
sympy.Rational(1, 42)])
raise NotImplementedError()
| 5,343,153
|
def login():
"""Login."""
username = request.form.get('username')
password = request.form.get('password')
if not username:
flask.flash('Username is required.', 'warning')
elif password is None:
flask.flash('Password is required.', 'warning')
else:
user = models.User.login_user(username, password)
if user:
session['user'] = user.username
return flask.redirect(flask.url_for('catalog'))
flask.flash('Invalid username/password.', 'danger')
return flask.redirect(flask.url_for('home'))
| 5,343,154
|
def comp_periodicity(self, p=None):
"""Compute the periodicity factor of the lamination
Parameters
----------
self : LamSlotMulti
A LamSlotMulti object
Returns
-------
per_a : int
Number of spatial periodicities of the lamination
is_antiper_a : bool
True if an spatial anti-periodicity is possible after the periodicities
per_t : int
Number of time periodicities of the lamination
is_antiper_t : bool
True if an time anti-periodicity is possible after the periodicities
"""
if self.sym_dict_enforced is not None:
self.get_logger().debug("Enforcing symmetry for LamSlotMulti")
return (
self.sym_dict_enforced["per_a"],
self.sym_dict_enforced["is_antiper_a"],
self.sym_dict_enforced["per_t"],
self.sym_dict_enforced["is_antiper_t"],
)
else:
Zs = self.get_Zs()
is_aper = False
# TODO compute it
self.get_logger().debug("Symmetry not available yet for LamSlotMulti")
return 1, is_aper, 1, is_aper
| 5,343,155
|
def tagger(c):
"""
Find usage value or AWS Tags.
Using a json input to stdin we gather enough data to be able to specify the
usage of some resources.
"""
p_log("Task: Tagger")
params = {}
lines = [x.strip() for x in sys.stdin.readlines()]
lines = list(filter(None, lines))
if len(lines) != 0:
params = json.loads(",".join(lines))
c = {"usage": "NULL"}
# Change the args to vessel and quadrant
if re.match("dev|stage", params["vpc_name"]):
c["usage"] = "non-prod"
if "client_name" in params and re.match("ops", params["client_name"]):
c["usage"] = "devops"
elif re.match("prod", params["vpc_name"]):
c["usage"] = "prod"
if "client_name" in params and re.match("demo|preview", params["client_name"]):
c["usage"] = "sales"
print(json.dumps(c))
| 5,343,156
|
def sha2_384(data: bytes) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.sha2_384(), data)
| 5,343,157
|
def cat(arr, match="CAT", upper_bound=None, lower_bound=None):
"""
Basic idea is if a monkey typed randomly, how long would it take for it
to write `CAT`. Practically, we are mapping generated numbers onto the
alphabet.
>"There are 26**3 = 17 576 possible 3-letter words, so the average number of
keystrokes necessary to produce CAT should be around 17 576" [1]
Example
-------
Parameters
----------
word: string or list-type object
All elements of the string must be the same number of characters
match: string or list-type object
The keyword to search for. Other than length, doesn't really matter.
If you pass in a list of strings, it will give you a result for each
passed in string.
upper_bound: int (optional)
Upper bound of random values. If not set, will calculate the minimum
value from the array passed.
lower_bound: int (optional)
Lower bound of random values. If not set, will calculate the maximum
value from the array passed.
Returns
-------
dict
Key is the string passed into match, the value is a list of the
iteration cycles it was found at
Notes
-----
[1]: Marsaglia, G. and Zaman, A., (1995), Monkey tests for random number
generators, Computers & Mathematics with Applications, 9, No. 9, 1–10.
"""
if upper_bound is None:
upper_bound = np.max(arr)
if lower_bound is None:
lower_bound = np.min(arr)
if isinstance(match, str):
match = [match]
match = list(map(str.upper, match))
num_letters = len(match[0])
assert all([len(match_i) == num_letters for match_i in match]), \
"All elements of `match` must have the same number of characters"
n_uppercase = len(string.ascii_uppercase)
# {...number: letter...} mapping
mapping = dict(zip(range(n_uppercase), string.ascii_uppercase))
# Scale the array so that everything is between 0 and 26
arr_norm = np.floor((arr - lower_bound) * (n_uppercase/upper_bound))
# Map the integer component to letters
letters = [mapping[i] for i in arr_norm.astype(np.int)]
# Split the array of letters into words
words = chunker(letters, batch_size=num_letters, complete=True)
iter_counts = {match_i: [] for match_i in match}
for i, letter_list in enumerate(words):
word = ''.join(letter_list)
if word in match:
iter_counts[word].append(i)
return iter_counts
| 5,343,158
|
def encrypt_data(key: bytes, data: str) -> str:
"""
Encrypt the data
:param key: key to encrypt the data
:param data: data to be encrypted
:returns: bytes encrypted
"""
# instance class
cipher_suite = Fernet(key)
# convert our data into bytes mode
data_to_bytes = bytes(data, "utf-8")
encrypted = cipher_suite.encrypt(data_to_bytes)
return encrypted.decode("utf-8")
| 5,343,159
|
def make_doi_table(dataset: ObservatoryDataset) -> List[Dict]:
"""Generate the DOI table from an ObservatoryDataset instance.
:param dataset: the Observatory Dataset.
:return: table rows.
"""
records = []
for paper in dataset.papers:
# Doi, events and grids
doi = paper.doi.upper()
events = make_doi_events(doi, paper.events)
# Affiliations: institutions, countries, regions, subregion, funders, journals, publishers
institutions = make_doi_institutions(paper.authors)
countries = make_doi_countries(paper.authors)
regions = make_doi_regions(paper.authors)
subregions = make_doi_subregions(paper.authors)
funders = make_doi_funders(paper.funders)
journals = make_doi_journals(paper.journal)
publishers = make_doi_publishers(paper.publisher)
# Make final record
records.append(
{
"doi": doi,
"crossref": {
"title": paper.title,
"published_year": paper.published_date.year,
"published_month": paper.published_date.month,
"published_year_month": f"{paper.published_date.year}-{paper.published_date.month}",
"funder": [{"name": funder.name, "DOI": funder.doi} for funder in paper.funders],
},
"unpaywall": {},
"unpaywall_history": {},
"mag": {},
"open_citations": {},
"events": events,
"affiliations": {
"doi": doi,
"institutions": institutions,
"countries": countries,
"subregions": subregions,
"regions": regions,
"groupings": [],
"funders": funders,
"authors": [],
"journals": journals,
"publishers": publishers,
},
}
)
# Sort to match with sorted results
records.sort(key=lambda r: r["doi"])
return records
| 5,343,160
|
def do_volume_connector_list(cc, args):
"""List the volume connectors."""
params = {}
if args.node is not None:
params['node'] = args.node
if args.detail:
fields = res_fields.VOLUME_CONNECTOR_DETAILED_RESOURCE.fields
field_labels = res_fields.VOLUME_CONNECTOR_DETAILED_RESOURCE.labels
elif args.fields:
utils.check_for_invalid_fields(
args.fields[0],
res_fields.VOLUME_CONNECTOR_DETAILED_RESOURCE.fields)
resource = res_fields.Resource(args.fields[0])
fields = resource.fields
field_labels = resource.labels
else:
fields = res_fields.VOLUME_CONNECTOR_RESOURCE.fields
field_labels = res_fields.VOLUME_CONNECTOR_RESOURCE.labels
sort_fields = res_fields.VOLUME_CONNECTOR_DETAILED_RESOURCE.sort_fields
sort_field_labels = (
res_fields.VOLUME_CONNECTOR_DETAILED_RESOURCE.sort_labels)
params.update(utils.common_params_for_list(args,
sort_fields,
sort_field_labels))
volume_connector = cc.volume_connector.list(**params)
cliutils.print_list(volume_connector, fields,
field_labels=field_labels,
sortby_index=None,
json_flag=args.json)
| 5,343,161
|
def get_output_file_path(file_path: str) -> str:
"""
get the output file's path
:param file_path: the file path
:return: the output file's path
"""
split_file_path: List[str] = list(os.path.splitext(file_path))
return f'{split_file_path[0]}_sorted{split_file_path[1]}'
| 5,343,162
|
def test_custom_grain_with_annotations(grains_dir):
"""
Load custom grain with annotations.
"""
opts = salt.config.DEFAULT_MINION_OPTS.copy()
opts["grains_dirs"] = [grains_dir]
grains = salt.loader.grains(opts, force_refresh=True)
assert grains.get("example") == "42"
| 5,343,163
|
def test_relative_and_other_root_dirs(offline, request):
"""Test styles in relative and in other root dirs."""
another_dir = TEMP_ROOT_PATH / "another_dir" # type: Path
project = (
ProjectMock(request)
.named_style(
"{}/main".format(another_dir),
"""
[nitpick.styles]
include = "styles/pytest.toml"
""",
)
.named_style(
"{}/styles/pytest".format(another_dir),
"""
["pyproject.toml".tool.pytest]
some-option = 123
""",
)
.named_style(
"{}/styles/black".format(another_dir),
"""
["pyproject.toml".tool.black]
line-length = 99
missing = "value"
""",
)
.named_style(
"{}/poetry".format(another_dir),
"""
["pyproject.toml".tool.poetry]
version = "1.0"
""",
)
)
common_pyproject = """
[tool.black]
line-length = 99
[tool.pytest]
some-option = 123
"""
# Use full path on initial styles
project.pyproject_toml(
"""
[tool.nitpick]
style = ["{another_dir}/main", "{another_dir}/styles/black"]
{common_pyproject}
""".format(
another_dir=another_dir, common_pyproject=common_pyproject
)
).flake8(offline=offline).assert_single_error(
"""
NIP318 File pyproject.toml has missing values:\x1b[32m
[tool.black]
missing = "value"\x1b[0m
"""
)
# Reuse the first full path that appears
project.pyproject_toml(
"""
[tool.nitpick]
style = ["{}/main", "styles/black.toml"]
{}
""".format(
another_dir, common_pyproject
)
).flake8().assert_single_error(
"""
NIP318 File pyproject.toml has missing values:\x1b[32m
[tool.black]
missing = "value"\x1b[0m
"""
)
# Allow relative paths
project.pyproject_toml(
"""
[tool.nitpick]
style = ["{}/styles/black", "../poetry"]
{}
""".format(
another_dir, common_pyproject
)
).flake8(offline=offline).assert_single_error(
"""
NIP318 File pyproject.toml has missing values:\x1b[32m
[tool.black]
missing = "value"
[tool.poetry]
version = "1.0"\x1b[0m
"""
)
| 5,343,164
|
def binarySearch(arr, val):
"""
array values must be sorted
"""
left = 0
right = len(arr) - 1
half = (left + right) // 2
while arr[half] != val:
if val < arr[half]:
right = half - 1
else:
left = half + 1
half = (left + right) // 2
if arr[half] == val:
return half
return -1
| 5,343,165
|
def foreign_key(
recipe: Union[Recipe[M], str], one_to_one: bool = False
) -> RecipeForeignKey[M]:
"""Return a `RecipeForeignKey`.
Return the callable, so that the associated `_model` will not be created
during the recipe definition.
This resolves recipes supplied as strings from other module paths or from
the calling code's module.
"""
if isinstance(recipe, str):
# Load `Recipe` from string before handing off to `RecipeForeignKey`
try:
# Try to load from another module
recipe = baker._recipe(recipe)
except (AttributeError, ImportError, ValueError):
# Probably not in another module, so load it from calling module
recipe = _load_recipe_from_calling_module(cast(str, recipe))
return RecipeForeignKey(cast(Recipe[M], recipe), one_to_one)
| 5,343,166
|
def posterize(image, num_bits):
"""Equivalent of PIL Posterize."""
shift = 8 - num_bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
| 5,343,167
|
def cli(
from_proj: str,
from_x_column: str,
from_y_column: str,
from_x_format: str,
from_y_format: str,
to_proj: str,
to_x_header: str,
to_y_header: str,
input_filename: str,
output_filename: str
):
"""Reproject a CSV from one Coordinate Reference System to another."""
from_proj = Proj(init=from_proj)
to_proj = Proj(init=to_proj)
transformer = Transformer.from_proj(from_proj, to_proj)
with open(input_filename) as input_file, \
open(output_filename, 'w') as output_file:
reader = CsvProjReader(
input_file,
from_x_column,
from_y_column,
from_x_format,
from_y_format
)
writer = CsvProjWriter(
output_file,
to_x_header,
to_y_header
)
input_headers = reader.read_headers()
writer.write_headers(input_headers)
for csv_proj_row in reader.read_proj_rows():
new_x, new_y = [None, None]
if csv_proj_row.x is not None and csv_proj_row.y is not None:
new_x, new_y = transformer.transform(
csv_proj_row.x,
csv_proj_row.y
)
writer.write_row(csv_proj_row.row, new_x, new_y)
| 5,343,168
|
def validate_targetRegionBedFile_for_runType(
value,
field_label,
runType,
reference,
nucleotideType=None,
applicationGroupName=None,
isPrimaryTargetRegion=True,
barcodeId="",
runType_label=ugettext_lazy("workflow.step.application.fields.runType.label"),
):
"""
validate targetRegionBedFile based on the selected reference and the plan's runType
"""
errors = []
value = value.strip() if value else ""
if value:
missing_file = check_uploaded_files(bedfilePaths=[value])
if missing_file:
errors.append("%s : %s not found" % (field_label, value))
logger.debug(
"plan_validator.validate_targetRegionBedFile_for_run() SKIPS validation due to no targetRegion file exists in db. value=%s"
% (value)
)
return errors
logger.debug(
"plan_validator.validate_targetRegionBedFile_for_runType() value=%s; runType=%s; reference=%s; nucleotideType=%s; applicationGroupName=%s"
% (value, runType, reference, nucleotideType, applicationGroupName)
)
if not isPrimaryTargetRegion:
logger.debug(
"plan_validator.validate_targetRegionBedFile_for_run() SKIPS validation due to no validation rules for non-primary targetRegion. value=%s"
% (value)
)
return errors
if reference:
if runType:
runType = runType.strip()
applProducts = ApplProduct.objects.filter(
isActive=True,
applType__runType=runType,
applicationGroup__name=applicationGroupName,
) or ApplProduct.objects.filter(isActive=True, applType__runType=runType)
if applProducts:
applProduct = applProducts[0]
if applProduct:
if (
validation.has_value(value)
and not applProduct.isTargetRegionBEDFileSupported
):
errors.append(
validation.invalid_invalid_related(
field_label, ScientificApplication.verbose_name
)
)
else:
isRequired = (
applProduct.isTargetRegionBEDFileSelectionRequiredForRefSelection
)
if (
isRequired
and not validation.has_value(value)
and not barcodeId
):
# skip for now
if (
runType in ["AMPS_DNA_RNA", "AMPS_HD_DNA_RNA"]
and nucleotideType
and nucleotideType.upper() == "RNA"
):
logger.debug(
"plan_validator.validate_targetRegionBedFile_for_runType() ALLOW MISSING targetRegionBed for runType=%s; nucleotideType=%s"
% (runType, nucleotideType)
)
elif runType in ["AMPS_RNA", "AMPS_HD_RNA"]:
logger.debug(
"plan_validator.validate_targetRegionBedFile_for_runType() ALLOW MISSING targetRegionBed for runType=%s; applicationGroupName=%s"
% (runType, applicationGroupName)
)
else:
errors.append(
validation.invalid_required_related(
field_label, ScientificApplication.verbose_name
)
)
elif value:
if not os.path.isfile(value):
errors.append(
validation.invalid_invalid_value(field_label, value)
)
else:
errors.append(
validation.invalid_invalid_value_related(
runType_label, runType, ScientificApplication.verbose_name
)
)
else:
errors.append(
validation.invalid_required_related(
runType_label, ScientificApplication.verbose_name
)
)
return errors
| 5,343,169
|
def run_find_markers(
input_h5ad_file: str,
output_file: str,
label_attr: str,
de_key: str = "de_res",
n_jobs: int = -1,
min_gain: float = 1.0,
random_state: int = 0,
remove_ribo: bool = False,
) -> None:
"""
For command line use.
"""
import xlsxwriter
from natsort import natsorted
data = read_input(input_h5ad_file)
markers = find_markers(
data,
label_attr,
de_key=de_key,
n_jobs=n_jobs,
min_gain=min_gain,
random_state=random_state,
remove_ribo=remove_ribo,
)
keywords = [("strong", "strong_gain"), ("weak", "weak_gain"), ("down", "down_gain")]
writer = pd.ExcelWriter(output_file, engine="xlsxwriter")
for clust_id in natsorted(markers.keys()):
clust_markers = markers[clust_id]
sizes = []
for keyword in keywords:
sizes.append(len(clust_markers[keyword[0]]))
arr = np.zeros((max(sizes), 8), dtype=object)
arr[:] = ""
for i in range(3):
arr[0 : sizes[i], i * 3] = clust_markers[keywords[i][0]]
arr[0 : sizes[i], i * 3 + 1] = clust_markers[keywords[i][1]]
df = pd.DataFrame(
data=arr,
columns=[
"strongly up-regulated",
"gain",
"",
"weakly up-regulated",
"gain",
"",
"down-regulated",
"gain",
],
)
df.to_excel(writer, sheet_name=clust_id, index=False)
writer.save()
| 5,343,170
|
def polyFit(x, y):
"""
Function to fit a straight line to data and estimate slope and
intercept of the line and corresponding errors using first order
polynomial fitting.
Parameters
----------
x : ndarray
X-axis data
y : ndarray
Y-axis data
Returns
-------
ndarray
slope, intercept, SDslope, SDintercept
Reference
---------
"""
# Number of input points
N = x.shape[0]
# Estimate slope and intercept of fitted line
slope, intercept = np.polyfit(x, y, 1)
# Calculate standard deviation of slope and intercept
yhat = intercept + slope * x
residual = y - yhat
Sx2 = np.sum(x**2)
Sxx = np.sum((x - np.mean(x))**2)
Sy_x = np.sqrt(np.sum(residual**2) / (N -2))
SDslope = Sy_x / np.sqrt(Sxx)
SDintercept = Sy_x * np.sqrt(Sx2 / (N * Sxx))
return np.array([[slope, intercept], [SDslope, SDintercept]])
| 5,343,171
|
def test_delete_no_pk():
"""Validate a delete returning a pkdict without specifying the primary key."""
_logger.debug(stack()[0][3])
config = deepcopy(_CONFIG)
t = table(config)
returning = t.delete('{id}={target}', {'target': 7},
('uid',), container='pkdict')
row = t.select('WHERE {id} = 7', columns=(
'id', 'left', 'right', 'uid', 'metadata', 'name'))
assert len(returning) == 1
assert row == []
| 5,343,172
|
def stability_test_standard(
umbrella: Path,
outdir: Path | None = None,
tests: str | list[str] = "all",
) -> None:
"""Perform a battery of standard stability tests.
This function expects a rigid `umbrella` directory structure,
based on the output of results that are generated by rexpy_.
.. _rexpy: https://github.com/douglasdavis/rexpy
Parameters
----------
umbrella : pathlib.Path
Umbrella directory containing all fits run via rexpy's
standard fits.
outdir : pathlib.Path, optional
Directory to save results (defaults to current working
directory).
tests : str or list(str)
Which tests to execute. (default is "all"). The possible tests
include:
* ``"sys-drops"``, which shows the stability test for dropping
some systematics.
* ``"indiv-camps"``, which shows the stability test for
limiting the fit to individual campaigns.
* ``"regions"``, which shows the stability test for limiting
the fit to subsets of the analysis regions.
* ``"b0-check"``, which shows the stability test for limiting
the fit to individual analysis regions and checking the B0
eigenvector uncertainty.
"""
import tdub.internal.stab_tests as tist
umbrella = umbrella.resolve()
curdir = Path.cwd().resolve()
if outdir is None:
outdir = curdir
else:
outdir.mkdir(parents=True, exist_ok=True)
if tests == "all":
tests = ["sys-drops", "indiv-camps", "regions", "b0-check"]
os.chdir(outdir)
if "sys-drops" in tests:
nom, names, labels, vals = tist.excluded_systematics_delta_mu_summary(
umbrella / "main.force-data.d" / "tW"
)
fig, ax = plt.subplots(figsize=(5.2, 1.5 + len(names) * 0.315))
fig.subplots_adjust(left=0.50, right=0.925)
tist.make_delta_mu_plot(
ax, nom.sig_hi, nom.sig_lo, vals["c"], vals["d"], vals["u"], labels
)
fig.savefig("stability-tests-sys-drops.pdf")
if "indiv-camps" in tests:
nom, names, labels, vals = tist.indiv_camp_delta_mu_summary(umbrella)
fig, ax = plt.subplots(figsize=(5.2, 1.5 + len(names) * 0.315))
fig.subplots_adjust(left=0.350, right=0.925, bottom=0.3, top=0.99)
tist.make_delta_mu_plot(
ax, nom.sig_hi, nom.sig_lo, vals["c"], vals["d"], vals["u"], labels
)
fig.savefig("stability-tests-indiv-camps.pdf")
if "regions" in tests:
nom, names, labels, vals = tist.region_delta_mu_summary(umbrella)
fig, ax = plt.subplots(figsize=(5.2, 1.5 + len(names) * 0.315))
fig.subplots_adjust(left=0.350, right=0.925, bottom=0.3, top=0.99)
tist.make_delta_mu_plot(
ax, nom.sig_hi, nom.sig_lo, vals["c"], vals["d"], vals["u"], labels
)
fig.savefig("stability-tests-regions.pdf")
if "b0-check" in tests:
fig, ax = tist.b0_by_year_fig_and_ax(umbrella)
fig.subplots_adjust(left=0.350, right=0.925, bottom=0.3, top=0.8)
fig.savefig("stability-tests-b0-check.pdf")
os.chdir(curdir)
return None
| 5,343,173
|
def evaluate_error(X, y, w):
"""Returns the mean squared error.
X : numpy.ndarray
Numpy array of data.
y : numpy.ndarray
Numpy array of outputs. Dimensions are n * 1, where n is the number of
rows in `X`.
w : numpy.ndarray
Numpy array with dimensions (m + 1) * 1, where m is the number of
columns in `X`.
Returns
-------
float
The mean squared error
"""
X_b = np.hstack((np.ones((X.shape[0], 1)), X))
y_predict = X_b.dot(w)
dist = (y - y_predict) ** 2
return float(np.sum(dist)) / X.shape[0]
| 5,343,174
|
def call_later(fn, args=(), delay=0.001):
"""
Calls the provided function in a new thread after waiting some time.
Useful for giving the system some time to process an event, without blocking
the current execution flow.
"""
thread = _Thread(target=lambda: (_time.sleep(delay), fn(*args)))
thread.start()
| 5,343,175
|
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up ha_reef_pi from a config entry."""
websession = async_get_clientsession(hass)
coordinator = ReefPiDataUpdateCoordinator(hass, websession, entry)
await coordinator.async_config_entry_first_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
undo_listener = entry.add_update_listener(update_listener)
hass.data[DOMAIN][entry.entry_id] = {
"coordinator": coordinator,
"undo_update_listener": undo_listener,
}
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
| 5,343,176
|
def get_list(client):
"""
"""
request = client.__getattr__(MODULE).ListIpBlocks()
response, _ = request.result()
return response['results']
| 5,343,177
|
def g(z: Set[str]):
""" Test set default """
print(z)
| 5,343,178
|
def file_asset(class_obj):
"""
Decorator to annotate the FileAsset class. Registers the decorated class
as the FileAsset known type.
"""
assert isinstance(class_obj, six.class_types), "class_obj is not a Class"
global _file_asset_resource_type
_file_asset_resource_type = class_obj
return class_obj
| 5,343,179
|
def main(username: str, output: str, file_format: str, token: str) -> None:
"""A Python CLI to backup all your GitHub repositories."""
# More info:
# - https://ghapi.fast.ai/core.html#GhApi
# - https://ghapi.fast.ai/core.html#Operations
# (if don't pass the token parameter, then your GITHUB_TOKEN environment variable
# will be used, if available)
# - https://ghapi.fast.ai/page.html#paged
api = GhApi(owner=username, token=token)
# click.echo(api.headers)
# click.echo(api.func_dict)
# More info:
# - https://ghapi.fast.ai/fullapi.html#repos
# - https://docs.github.com/en/rest/reference/repos#list-public-repositories
# (it is not for a specific user)
# - https://docs.github.com/en/rest/reference/repos#list-repositories-for-a-user
# (public repositories)
# - https://docs.github.com/en/rest/reference/repos#list-repositories-for-the-authenticated-user
# (all repositories)
# repos = api.repos.list_for_user(username=username, type="all", sort="pushed")
repos: Generator = paged(
api.repos.list_for_authenticated_user,
visibility="all",
affiliation="owner",
sort="full_name",
)
# More info:
# - https://stackoverflow.com/a/50110841
# - https://docs.python.org/3.6/library/pathlib.html#pathlib.Path.mkdir
# - https://stackoverflow.com/a/32490661
# - https://docs.python.org/3.6/library/pathlib.html#pathlib.Path.open
timestamp: str = datetime.today().strftime(f"%Y%m%d{OUTPUT_FOLDER_SEP}%H%M%S")
output_folder = (
Path(output) / f"{OUTPUT_FOLDER_PREFIX}{OUTPUT_FOLDER_SEP}{timestamp}"
)
output_folder.mkdir(parents=False, exist_ok=False)
click.echo(f"Output folder: {output_folder}")
# More info:
# - https://docs.github.com/en/rest/reference/repos#download-a-repository-archive-zip
# - https://docs.github.com/en/rest/reference/repos#download-a-repository-archive-tar
# - https://github.com/fastai/ghapi/issues/22
# - https://github.com/fastai/fastcore/pull/308
# - https://github.com/fastai/fastcore/blob/1.3.27/fastcore/net.py#L203
# - https://stackoverflow.com/a/67964008
# (ref="" for the master/main branch)
# Note: It is not working. Use an alternative. See error message for debugging.
# It would work if the execution was via this if branch, for example:
# https://github.com/fastai/fastcore/blob/1.3.27/fastcore/net.py#L209
# api.repos.download_zipball_archive(repo="glone", ref="")
# api.repos.download_zipball_archive(repo="glone", ref="", archive_format="zip")
# Workaround:
# - https://fastcore.fast.ai/net.html#urlsend
# - https://docs.github.com/en/rest/reference/actions#download-an-artifact
# - https://docs.python.org/3.6/library/functions.html#open
# - https://stackoverflow.com/a/6633693
# - https://click.palletsprojects.com/en/7.x/options/?highlight=choice#choice-options
# zip_url = (
# f"{GH_HOST}/repos/{username}/" + "{repo}/" + f"{file_format}ball" + "/{ref}"
# )
# route = {"repo": "glone", "ref": "", "archive_format": file_format}
# or
# route = {"repo": "glone", "ref": "", "archive_format": "zip"}
# click.echo(zip_url)
# click.echo(route)
# res, headers = urlsend(
# zip_url, "GET", headers=api.headers, route=route, return_headers=True
# )
# click.echo(headers)
# _, _, output_filename = headers["content-disposition"].partition("filename=")
# click.echo(output_filename)
# with open(output_folder / output_filename, "wb") as fh:
# fh.write(res)
zip_url = (
f"{GH_HOST}/repos/{username}/" + "{repo}/" + f"{file_format}ball" + "/{ref}"
)
for page in repos:
# click.echo(len(page))
for repo in page:
click.echo(f"Repo: {repo.name}")
route = {"repo": repo.name, "ref": "", "archive_format": file_format}
res, headers = urlsend(
zip_url, "GET", headers=api.headers, route=route, return_headers=True
)
_, _, output_filename = headers["content-disposition"].partition(
"filename="
)
output_file_path = output_folder / output_filename
with open(output_file_path, "wb") as fh:
fh.write(res)
click.echo(f"Archive file: {output_file_path}")
# break
# break
click.echo(f"Number of archive files/repos: {get_folder_file_count(output_folder)}")
# Compare with:
# du -ch <OUTPUT_FOLDER>/*
# du -sh <OUTPUT_FOLDER>
size = get_folder_size(output_folder)
click.echo(
"Output folder size (approximate): "
f"{naturalsize(size, binary=False, gnu=False)}"
)
click.echo("Done!")
| 5,343,180
|
def get_bam_list(args):
"""
Retrieve bam list from given tumor bam directory
"""
bamList = []
for bam in glob.glob(os.path.join(args.tumor_bams_directory, "*.bam")):
# Todo: CMO bams don't always end in 'T'
# if os.path.basename(bam).split('_')[0].split('-')[-1].startswith('T'):
bamList.append(bam)
return bamList
| 5,343,181
|
def test_init_raw():
"""
tests whether the ADPBulk object can be instantiated correctly
"""
adat = build_adat()
# tests singular group conditions
for group in adat.obs.columns:
_ = ADPBulk(adat, groupby=group, use_raw=True)
# tests multiple group conditions
_ = ADPBulk(adat, groupby=["cA", "cD"], use_raw=True)
assert True
| 5,343,182
|
def run_time_it():
"""Trigger timeit"""
dynamicArray(n, queries)
| 5,343,183
|
def scheming_field_by_name(fields, name):
"""
Simple helper to grab a field from a schema field list
based on the field name passed. Returns None when not found.
"""
for f in fields:
if f.get('field_name') == name:
return f
| 5,343,184
|
def saveResult(img_file, img, boxes, dirname='./result/', verticals=None, texts=None):
""" save text detection result one by one
Args:
img_file (str): image file name
img (array): raw image context
boxes (array): array of result file
Shape: [num_detections, 4] for BB output / [num_detections, 4] for QUAD output
Return:
None
"""
img = np.array(img)
#print("image shape:", img.shape)
height = img.shape[0]
width = img.shape[1]
#img = clustering(img)
alpha = 1.25 # Contrast control (1.0-3.0)
beta = 0# Brightness control (0-100)
img= cv2.convertScaleAbs(img, alpha=alpha, beta=beta)
# make result file list
filename, file_ext = os.path.splitext(os.path.basename(img_file))
# result directory
res_file = dirname + "res_" + filename + '.txt'
res_img_file = dirname + "res_" + filename + '.jpg'
if not os.path.isdir(dirname):
os.mkdir(dirname)
# ignore top bboxes
boxes = boxes[2:]
# enlist top left corner of bboxes
top_l_points = []
textd = []
#texts = [i for i, _ in enumerate(boxes)]
with open(res_file, 'w') as f:
for i, box in enumerate(boxes):
poly = np.array(box).astype(np.int32).reshape((-1))
strResult = ','.join([str(p) for p in poly]) + '\r\n'
f.write(strResult)
#### these points contain edges of boxes or polygons, dependin
## on argument of SaveResult
poly = poly.reshape(-1, 2)
#### these points contain edges of boxes ##
#x, y, w, h = poly[0][0 ], poly[0][1], poly[2][0], poly[2][1]
# draw first point of box
#cv2.circle(img, (poly[0][0], poly[0][1]), 6, (255,0,0), 2)
# x, y = tuple(poly[0])
# w, h = tuple(poly[2])
y, x = tuple(poly[0])
h, w = tuple(poly[2])
#print(f"Coordinates are x {x}, y {y}, w {w}, h {h}")
img_copy = img.copy()
cropped_boxes = img_copy[int(min(x,w)-4):int(max(x,w)+4), int(min(y,h)-4):int(max(y,h)+4)]
#print("cropped boxes: ",cropped_boxes )
#print("min and max (w,x), min and max (y,h)",
if cropped_boxes is not None:
cv2.imwrite("saved_{}_box.png".format(i), cropped_boxes)
dilated_img = cv2.dilate(cropped_boxes[:,:,1], np.ones((33,33 ), np.uint8))
#bg_img = cv2.GaussianBlur(dilated_img, (9,9),0)
bg_img = cv2.medianBlur(dilated_img, 11)
#--- finding absolute difference to preserve edges ---
diff_img = 255 - cv2.absdiff(cropped_boxes[:,:,1], bg_img)
#--- normalizing between 0 to 255 ---
norm_img = cv2.normalize(diff_img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
#--- Otsu threshold ---
#th = cv2.adaptiveThreshold(norm_img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
cropped_boxes = cv2.threshold(norm_img, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
#cropped_boxes = clustering(cropped_boxes)
text = pytesseract.image_to_string(cropped_boxes, lang="spa", config='--psm 6')
#print("text by crop box {}".format(i), text)
top_l_points.append(tuple(poly[0]))
text_w_n_blanks = text.strip()
textd.append(text_w_n_blanks)
# Check where DoB is
check_dob = re.search("[0-9]{1,2}(/)[0-9]{1,2}(/)[0-9]{4}",
text_w_n_blanks)
if check_dob:
x_b, y_b = tuple(poly[0])
DoB_point = tuple(poly[0])
DoB_text = text
#print("DoB point: ", poly[0])
print("DoB: ", DoB_text)
# Check where curp is
check_curp = re.search("[a-zA-Z]{4}[0-9]{6}[a-zA-Z]{4}", text_w_n_blanks)
if check_curp:
curp = text.split(" ")[-1]
print("curp: ", curp)
# Check where clave de elector is
check_clave = re.search("[a-zA-Z]{6}[0-9]{8}[a-zA-Z]{1}", text_w_n_blanks)
if check_clave:
clave = text.split(" ")[-1]
print("clave: ", clave)
# Check where registro is
check_registro= re.search("[0-9]{4}( )[0-9]{2}", text_w_n_blanks)
if check_registro:
registro1 = text.split(" ")[-2:-1][0]
registro2 = text.split(" ")[-1]
registro = registro1+" "+registro2
print("registro: ", registro1, registro2)
# Check emisión and vigencia
#check_vig_emision= re.search("[0-9]{4}( )[a-zA-Z]{8}( )[0-9]{4}",
# text_w_n_blanks)
vig = text_w_n_blanks.split(" ")[-1]
emi = text_w_n_blanks.split(" ")[0]
check_vig= re.search("[0-9]{4}", vig)
check_emi= re.search("[0-9]{4}", emi)
if check_vig and check_emi:
print("vigencia: ", vig)
print("emisión: ", emi)
vigencia = vig
emisión = emi
# check if sexo
if "sex" in text_w_n_blanks.lower():
sexo = text.split(" ")[-1]
print("sexo check", sexo)
if "m" in sexo.lower():
sexo = "M"
print("sexo: ",sexo)
else:
sexo = "H"
print("sexo: ",sexo)
#print("sexo: ", sexo)
# check if municipio
if "munici" in text_w_n_blanks.lower():
municipio = text.split(" ")[-1]
print("municipio: ", municipio)
if "esta" in text_w_n_blanks.lower():
estado = text.split(" ")[-1]
print("estado: ", estado)
#print("debug", text_w_n_blanks)
# all text is lowercase
text_w_n_blanks = text_w_n_blanks.lower()
#print(text_w_n_blanks)
print(DoB_point, DoB_text)
name_dicts = dict(zip(textd, top_l_points))
#print("name_dicts: ", name_dicts)
#print("DoB_point:", DoB_point)
for k, v in name_dicts.copy().items():
if v == tuple(DoB_point):
#print(k ,"value deleted")
del name_dicts[k]
top_l_points.remove(tuple(DoB_point))
## gets the nearest y coordinate initial bounding point
name_dicts0= {k:tuple(map(lambda i, j:
i - j, v, tuple(DoB_point))) for k, v in name_dicts.items() }
#print(name_dicts0)
for x,y in top_l_points:
if y < y_b+(0.015*height) and y > y_b-(0.015*height) :
# if y < y_b+15 and y > y_b-15 :
NamePoint = x,y
#print(NamePoint)
distances_list = []
for point in top_l_points:
distances_list.append(distance(point, NamePoint))
#print( distances_list)
for k, v in name_dicts.copy().items(): # (for Python 2.x)
if v == NamePoint:
PrimerApellido = k
#print("Primer apellido", k)
name_dicts2= {k:tuple(map(lambda i, j:
i - j, v, NamePoint)) for k, v in name_dicts.items() }
#print(name_dicts2)
dist_dict = {k:distance((0,0),v) for k,v in name_dicts2.items()}
#print(dist_dict)
sorted_dist_dict = {k: v for k, v in sorted(dist_dict.items(),
key=lambda item: item[1])}
#print(sorted_dist_dict)
## get the next two items (they are in ordered by the tuple)
## and should be the next two bounding boxes
names_list= list(sorted_dist_dict.keys())[:5]
names_list = [name for name in names_list
if "DOMICI" not in name]
names_list = [name for name in names_list
if "NOM" not in name]
names_list = [name for name in names_list
if "CREDENCIAL" not in name]
Domicilio_list= list(sorted_dist_dict.keys())[5:10]
#print(Domicilio_list)
Domicilio_list = [name for name in Domicilio_list
if "DOMICI" not in name]
Domicilio_list = [name for name in Domicilio_list
if "MÉXICO" not in name]
Domicilio_list = [name for name in Domicilio_list
if "CREDENCIAL" not in name]
Domicilio_list = [name for name in Domicilio_list
if "ELECTOR" not in name]
Domicilio_list = [name for name in Domicilio_list
if "CLAVE" not in name]
Domicilio_list = [name for name in Domicilio_list
if "cur" not in name]
domicilio_list_str = ' '.join([str(elem) for elem in Domicilio_list])
#print("names_list: ",names_list)
names_list_str = ' '.join([str(elem) for elem in names_list])
print()
print("Nombre completo: ", names_list_str)
print("Domicilio completo: ", domicilio_list_str)
#print("Fecha de nacimiento:", DoB_text)
# Save result image
cv2.imwrite(res_img_file, img)
return {"nombre": names_list_str, "fecha_de_nacimiento":DoB_text.strip(),
"sexo": sexo, "domicilio":domicilio_list_str,"clave_de_elector": clave.strip(),
"CURP": curp.strip(), "registro":registro.strip(), "numero_de_emisión":emisión,
"estado": estado.strip(), "municipio": municipio.strip(), "vigencia":vigencia}
#, "seccion": seccion}
| 5,343,185
|
def merge_sort(items):
"""Sorts a list of items.
Uses merge sort to sort the list items.
Args:
items: A list of items.
Returns:
The sorted list of items.
"""
n = len(items)
if n < 2:
return items
m = n // 2
left = merge_sort(items[:m])
right = merge_sort(items[m:])
return merge(left, right)
| 5,343,186
|
def sshkey_generate(private, public, path):
"""Génération de clé privée , public ssh"""
try:
from Crypto.PublicKey import RSA
key = RSA.generate(2048)
public_key = key.publickey()
enc_data = public_key.encrypt("""admin-manager-hash""", 32)
x = key.decrypt(enc_data)
x = x.decode('utf-8')
k = key.exportKey('PEM')
p = key.publickey().exportKey('PEM')
with open(private,'w') as kf:
kf.write(k.decode())
kf.close()
with open(public,'w') as pf:
pf.write(p.decode())
pf.close()
os.system("sudo mkdir" " "+path)
os.system("sudo mv "+private+" "+path)
os.system("sudo mv "+public+" "+path)
except NameError:
print('Une erreur de type "NameError" a été levée')
except AttributeError:
print('Une erreur de type "AttributeError" a été levée')
except SyntaxError:
print('Une erreur de type "SyntaxError" a été levée')
except IOError:
print('Une erreur de type "IOError" a été levée')
except ImportError:
print('Une erreur de type "ImportError" a été levée')
except IndentationError:
print('Une ereur de type "IndentationError" a été levée')
| 5,343,187
|
def hunk_boundary(
hunk: HunkInfo, operation_type: Optional[str] = None
) -> Optional[HunkBoundary]:
"""
Calculates boundary for the given hunk, returning a tuple of the form:
(<line number of boundary start>, <line number of boundary end>)
If operation_type is provided, it is used to filter down only to lines whose line_type matches
the operation_type. Possible values: "+", "-", None.
If there are no lines of the given type in the hunk, returns None.
"""
line_type_p = lambda line: True
if operation_type is not None:
line_type_p = lambda line: line.line_type == operation_type
admissible_lines = [line for line in hunk.lines if line_type_p(line)]
if not admissible_lines:
return None
return HunkBoundary(
operation_type=operation_type,
start=admissible_lines[0].new_line_number,
end=admissible_lines[-1].new_line_number,
)
| 5,343,188
|
def get_activities_list(log, parameters=None):
"""
Gets the activities list from a log object, sorted by activity name
Parameters
--------------
log
Log
parameters
Possible parameters of the algorithm
Returns
-------------
activities_list
List of activities sorted by activity name
"""
from pm4py.statistics.attributes.pandas import get as pd_attributes_filter
from pm4py.statistics.attributes.log import get as log_attributes_filter
if parameters is None:
parameters = {}
activity_key = parameters[
constants.PARAMETER_CONSTANT_ACTIVITY_KEY] if constants.PARAMETER_CONSTANT_ACTIVITY_KEY in parameters else xes.DEFAULT_NAME_KEY
if type(log) is pd.DataFrame:
activities = pd_attributes_filter.get_attribute_values(log, activity_key)
else:
activities = log_attributes_filter.get_attribute_values(log, activity_key)
return sorted(list(activities.keys()))
| 5,343,189
|
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
source_vocab_size,
encoding_embedding_size):
"""
:return: tuple (RNN output, RNN state)
"""
embed = tf.contrib.layers.embed_sequence(rnn_inputs,
vocab_size=source_vocab_size,
embed_dim=encoding_embedding_size)
stacked_cells = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.LSTMCell(rnn_size), keep_prob) for _ in range(num_layers)])
outputs, state = tf.nn.dynamic_rnn(stacked_cells,
embed,
dtype=tf.float32)
return outputs, state
| 5,343,190
|
def point_selection(start, end, faces):
""" Calculates the intersection points between a line segment and triangle mesh.
:param start: line segment start point
:type start: Vector3
:param end: line segment end point
:type end: Vector3
:param faces: faces: N x 9 array of triangular face vertices
:type faces: numpy.ndarray
:return: array of intersection points
:rtype: numpy.ndarray
"""
direction = end - start
length = direction.length
if length < eps or faces is None:
return np.array([])
direction /= length
distances = segment_triangle_intersection(start, direction, length, faces)
if not distances:
return np.array([])
distances = np.reshape(distances, (len(distances), 1))
return start + direction * distances
| 5,343,191
|
def daysBetweenDates(year1, month1, day1, year2, month2, day2):
"""Returns the number of days between year1/month1/day1
and year2/month2/day2. Assumes inputs are valid dates
in Gregorian calendar, and the first date is not after
the second."""
month = month2
year = year2
day = day2 - day1
if (day < 0):
day += 30
month -= 1
month = month - month1
if (month < 0):
month += 12
year -= 1
year = year - year1
return (year * 360) + month * 30 + day
| 5,343,192
|
def derive_sender_1pu(epk, sender_sk, recip_pk, alg, apu, apv, keydatalen):
"""Generate two shared secrets (ze, zs)."""
ze = derive_shared_secret(epk, recip_pk)
zs = derive_shared_secret(sender_sk, recip_pk)
key = derive_1pu(ze, zs, alg, apu, apv, keydatalen)
return key
| 5,343,193
|
def collapse(board_u):
"""
takes a row/column of the board
and collapses it to the left
"""
i = 1
limit = 0
while i < 4:
if board_u[i]==0:
i += 1
continue
up_index = i-1
curr_index = i
while up_index>=0 and board_u[up_index]==0:
board_u[up_index] = board_u[curr_index]
board_u[curr_index] = 0
up_index -= 1
curr_index -= 1
if up_index >= limit and board_u[up_index]==board_u[curr_index]:
board_u[up_index] *= 2
board_u[curr_index] = 0
limit = curr_index
i += 1
return board_u
| 5,343,194
|
def find_match_in_file(search_term, file_location):
"""
This function is used to query a file
search_term = Term to find
file_location = Location of file to query.
"""
try:
with open(file_location) as line:
for search in line:
result = re.match(search_term, search)
if result:
return result
return
except Exception as err:
print(err)
| 5,343,195
|
def write_long(encoder, datum, schema, named_schemas, fname):
"""int and long values are written using variable-length, zig-zag coding."""
encoder.write_long(datum)
| 5,343,196
|
def get_cost_function(cost_function_name: str):
"""
Given the name of a cost function, retrieve the corresponding function and its partial derivative wrt Y_circ
:param cost_function_name: the name of the cost function
:return: the corresponding cost function and its partial derivative wrt Y_circ
"""
try:
return cost_functions[cost_function_name]
except KeyError:
raise UnknownCostFunctionName(cost_function_name)
| 5,343,197
|
def create_C1(data_set):
"""
Create frequent candidate 1-itemset C1 by scaning data set.
Args:
data_set: A list of transactions. Each transaction contains several items.
Returns:
C1: A set which contains all frequent candidate 1-itemsets
"""
C1 = set()
for t in data_set:
for item in t:
item_set = frozenset([item])
C1.add(item_set)
return C1
| 5,343,198
|
def timezone(name):
"""
Loads a Timezone instance by name.
:param name: The name of the timezone.
:type name: str or int
:rtype: Timezone
"""
return Timezone.load(name)
| 5,343,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.