hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4039add7535d2043971e1d6f766c77cb409e499d
| 4,197
|
py
|
Python
|
telemetry/telemetry/internal/actions/scroll_bounce.py
|
willcrichton/catapult
|
52d3c05d814a12b32392ac45f14fc4b99ea3674e
|
[
"BSD-3-Clause"
] | 4
|
2017-06-04T05:37:39.000Z
|
2021-06-26T05:30:15.000Z
|
telemetry/telemetry/internal/actions/scroll_bounce.py
|
willcrichton/catapult
|
52d3c05d814a12b32392ac45f14fc4b99ea3674e
|
[
"BSD-3-Clause"
] | null | null | null |
telemetry/telemetry/internal/actions/scroll_bounce.py
|
willcrichton/catapult
|
52d3c05d814a12b32392ac45f14fc4b99ea3674e
|
[
"BSD-3-Clause"
] | 1
|
2019-10-04T03:24:35.000Z
|
2019-10-04T03:24:35.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.internal.actions import page_action
from telemetry.internal.actions import utils
from telemetry.util import js_template
class ScrollBounceAction(page_action.PageAction):
def __init__(self, selector=None, text=None, element_function=None,
left_start_ratio=0.5, top_start_ratio=0.5,
direction='down', distance=100,
overscroll=10, repeat_count=10,
speed_in_pixels_per_second=400,
synthetic_gesture_source=page_action.GESTURE_SOURCE_DEFAULT):
super(ScrollBounceAction, self).__init__()
if direction not in ['down', 'up', 'left', 'right']:
raise page_action.PageActionNotSupported(
'Invalid scroll direction: %s' % self.direction)
self._selector = selector
self._text = text
self._element_function = element_function
self._left_start_ratio = left_start_ratio
self._top_start_ratio = top_start_ratio
# Should be big enough to do more than just hide the URL bar.
self._distance = distance
self._direction = direction
# This needs to be < height / repeat_count so we don't walk off the screen.
# We also probably don't want to spend more than a couple frames in
# overscroll since it may mask any synthetic delays.
self._overscroll = overscroll
# It's the transitions we really want to stress, make this big.
self._repeat_count = repeat_count
# 7 pixels per frame should be plenty of frames.
self._speed = speed_in_pixels_per_second
self._synthetic_gesture_source = ('chrome.gpuBenchmarking.%s_INPUT' %
synthetic_gesture_source)
if (self._selector is None and self._text is None and
self._element_function is None):
self._element_function = '(document.scrollingElement || document.body)'
def WillRunAction(self, tab):
utils.InjectJavaScript(tab, 'gesture_common.js')
utils.InjectJavaScript(tab, 'scroll_bounce.js')
# Fail if browser doesn't support synthetic scroll bounce gestures.
if not tab.EvaluateJavaScript(
'window.__ScrollBounceAction_SupportedByBrowser()'):
raise page_action.PageActionNotSupported(
'Synthetic scroll bounce not supported for this browser')
# Fail if we can't send touch events (bouncing is really only
# interesting for touch)
if not page_action.IsGestureSourceTypeSupported(tab, 'touch'):
raise page_action.PageActionNotSupported(
'Touch scroll not supported for this browser')
if (self._synthetic_gesture_source ==
'chrome.gpuBenchmarking.MOUSE_INPUT'):
raise page_action.PageActionNotSupported(
'ScrollBounce page action does not support mouse input')
tab.ExecuteJavaScript("""
window.__scrollBounceActionDone = false;
window.__scrollBounceAction = new __ScrollBounceAction(
function() { window.__scrollBounceActionDone = true; });""")
def RunAction(self, tab):
code = js_template.Render('''
function(element, info) {
if (!element) {
throw Error('Cannot find element: ' + info);
}
window.__scrollBounceAction.start({
element: element,
left_start_ratio: {{ left_start_ratio }},
top_start_ratio: {{ top_start_ratio }},
direction: {{ direction }},
distance: {{ distance }},
overscroll: {{ overscroll }},
repeat_count: {{ repeat_count }},
speed: {{ speed }}
});
}''',
left_start_ratio=self._left_start_ratio,
top_start_ratio=self._top_start_ratio,
direction=self._direction,
distance=self._distance,
overscroll=self._overscroll,
repeat_count=self._repeat_count,
speed=self._speed)
page_action.EvaluateCallbackWithElement(
tab, code, selector=self._selector, text=self._text,
element_function=self._element_function)
tab.WaitForJavaScriptExpression('window.__scrollBounceActionDone', 60)
| 42.826531
| 79
| 0.684537
|
2af5252691f85a2cb595c56f63cefc0c2ee67dbc
| 1,244
|
py
|
Python
|
linfitxy/linreg.py
|
alaxa27/competitive
|
f60f1470ba12e83e483127c109d1f60f22b88130
|
[
"Apache-2.0"
] | null | null | null |
linfitxy/linreg.py
|
alaxa27/competitive
|
f60f1470ba12e83e483127c109d1f60f22b88130
|
[
"Apache-2.0"
] | null | null | null |
linfitxy/linreg.py
|
alaxa27/competitive
|
f60f1470ba12e83e483127c109d1f60f22b88130
|
[
"Apache-2.0"
] | null | null | null |
from pylab import *
from math import sqrt
Rc = [20]
for i in range(1, 21):
Rc.append(50.0*i)
Eeff = 100
Ueff = [29, 50.6, 67.6,76, 81, 84.4, 86.7, 88.5, 89.9, 91, 91.9, 92.5, 93.3, 93.8, 94.2, 94.8, 95.1, 95.4, 95.6, 95.8, 96.1]
def variables(Ueff, Eeff, Rc):
x = []
for i in Rc:
x.append(1/float(i))
y = []
for i in Ueff:
y.append(Eeff/float(i))
return x, y
def err(X, Y, deltaUeff, deltaEeff, deltaRc):
deltaX = []
for x in X:
deltaX.append(pow(x, 2) * pow(deltaRc, 2))
deltaY = []
for y in Y:
deltaY.append(sqrt(pow((y/Eeff), 2) * pow(deltaEeff, 2) + pow(((-1) * pow(y, 2) / Eeff), 2) * pow(deltaUeff, 2)))
return deltaX, deltaY
def print_for_octave(y):
Y = str(y).split(',')
Ystr = ''
for i in Y[0:-1]:
Ystr += i+';'
Ystr += Y[-1]
return Ystr
(x, y) = variables(Ueff, Eeff, Rc)
(deltaX, deltaY) = err(x, y, 0.002*1000, 0.01*1000, 0.01)
print "deltaX="+print_for_octave(deltaX)
print "deltaY="+print_for_octave(deltaY)
print "X= " + print_for_octave(x)
print "Y= " + print_for_octave(y)
(m,b) = polyfit(x, y, 1)
yp = polyval([m, b], x)
plot(x, yp)
scatter(x, y)
grid(True)
xlabel('1/Rc')
ylabel('Eeff/Ueff')
show()
| 19.746032
| 124
| 0.555466
|
56b7267ec9068cdec9c46bd721d250a3958b6b21
| 13,843
|
py
|
Python
|
offsb/reporter/qcarchive.py
|
MobleyLab/openff-spellbook
|
66a9f2add895034da7949701069b11cf0ab3f817
|
[
"MIT"
] | 3
|
2019-09-20T13:53:53.000Z
|
2021-01-25T20:42:12.000Z
|
offsb/reporter/qcarchive.py
|
MobleyLab/openff-spellbook
|
66a9f2add895034da7949701069b11cf0ab3f817
|
[
"MIT"
] | 2
|
2019-10-12T07:12:27.000Z
|
2020-05-22T10:22:17.000Z
|
offsb/reporter/qcarchive.py
|
MobleyLab/openff-spellbook
|
66a9f2add895034da7949701069b11cf0ab3f817
|
[
"MIT"
] | 3
|
2019-10-12T00:31:03.000Z
|
2020-01-30T21:36:10.000Z
|
#!/usr/bin/env python3
import pickle
import collections
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import itertools
import numpy as np
import re
import os
from offsb.op.geometry import ( \
BondOperation,
AngleOperation,
ImproperTorsionOperation,
TorsionOperation)
import offsb.tools.const as const
import simtk.unit as unit
import offsb.qcarchive as qca
PREFIX="."
def tuple_to_hyphenated( x):
if isinstance( x, tuple):
return re.sub("[() ]", "", str(x).replace(",","-"))
else:
return x
#if 0:
# for param in param_types:
# data = None
# num_angs = 1 #len(ang_pl)
# out_str = "{:12s} {:12s} {:16.10e} "+"{:12s} {:12s} {:8.4f}"*num_angs +" {:10.4f} {:64s}\n"
# filename = data_chart.get( param)
# with open( os.path.join( PREFIX, filename), 'rb') as fid:
# # print("Loading", filename)
# data = pickle.load(fid)
# key = 'canonical_isomeric_explicit_hydrogen_mapped_smiles'
# i = 0
# for entry_node in QCA.iter_entry():
# print( i, entry_node)
# i += 1
# #labels = oFF10.db.get( entry.payload).get( "data").get( param)
# entry = QCA.db[entry_node.payload]["entry"]
# mol_name = entry.name
# smiles_indexed = entry.attributes[ key]
# #if entryA is None or entryB is None:
# # return False
# # for group in labels:
# # label = labels.get( group)
# # d = collections.defaultdict(list)
# nodes = list(QCA.node_iter_optimization_minimum( entry_node, select="Molecule"))
# order = np.arange( len( nodes))
# vals = []
# for node in nodes:
# val = tuple([ c.payload[2] for c in \
# QCA.node_iter_to_root( node,
# select="Constraint")])
# if len(val) > 0:
# vals.append(val)
# if len(vals) == 0:
# nodes_in_order = nodes
# else:
# vals = np.array( vals)
# order = np.lexsort( vals.T)
# nodes_in_order = [nodes[i] for i in order]
# fnm = entry_node.payload + "." + param + ".dat"
# fd = open( fnm, 'w')
# spec_written = False
# ds = list(QCA.node_iter_to_root( entry_node))[-2]
# for mol_node in nodes_in_order:
# # for opt in QCA.node_iter_depth_first( cons, select="Optimization"):
# opt = next(QCA.node_iter_to_root( mol_node, select="Optimization"))
# if not spec_written:
# qc_spec = QCA.db.get( opt.payload).get( "data").get( "qc_spec")
# method = str( qc_spec.method)
# basis = str( qc_spec.basis)
# fd.write( ds.name + "\n" + method + "\n" + basis + "\n")
# header = "{:12s} {:12s} {:16s} "+"{:12s} {:12s} {:8s}"*num_angs +" {:10s} {:64s}\n"
# fd.write(header.format( "# QCAProc", " QCAMol", " QMEne", " ScanType", " Atoms", " ScanVal", \
# param, \
# " SmilesMapped" ))
# spec_written = True
# if QCA.db.get( opt.payload).get( "data").get( "energies") is None:
# fd.close()
# continue
# try:
# ene = QCA.db.get( opt.payload).get( "data").get( "energies")[ mol_idx]
# except TypeError:
# fd.close()
# continue
# mol_id = mol_node.payload
# # assume we are taking only minimum energy
# syms = QCA.db.get( mol_node.payload).get( "data").get( "symbols")
# try:
# vals = data.db.get( mol_node.payload).get( "data").get("energy")
# except Exception:
# fd.close()
# continue
# vals *= converters.get( param)
# ang_pl = [c.payload for c in QCA.node_iter_to_root( mol_node, select="Constraint")][0]
# if num_angs > 1:
# ang = [ tuple_to_hyphenated( x) for y in ang_pl for x in y]
# else:
# ang = [ tuple_to_hyphenated( x) for x in ang_pl]
# fd.write(out_str.format( entry_node.payload, mol_id, ene, *ang, \
# vals.value_in_unit( vals.unit), \
# smiles_indexed ))
# fd.close()
#}}}
#{{{
#}}}
#{{{
def extract_torsions_for_same_molecule(QCA, param_types, data_chart=None, converters):
for param in param_types:
data = None
if data_charge is not None:
filename = data_chart[param]
with open( os.path.join( PREFIX, filename), 'rb') as fid:
# print("Loading", filename)
data = pickle.load(fid)
key = 'canonical_isomeric_explicit_hydrogen_mapped_smiles'
for folder in QCA.combine_by_entry():
#check if there are torsions, if not, skip (for now)
has_torsion = False
for n in folder.children:
if QCA[n].name == "TorsionDrive":
has_torsion = True
break
if not has_torsion:
continue
print(folder)
for n in folder.children:
constraints = set([c.payload[:2] for c in
QCA.node_iter_depth_first(QCA[n], select="Constraint")])
num_angs = len(constraints)
out_str = "{:12s} {:12s} {:16.10e} "+"{:12s} {:12s} {:8.4f}"*num_angs +" {:10.4f} {:64s}\n"
i = 0
for entry_id in folder.children:
entry_node = QCA[entry_id]
print( " ",i, entry_node)
i += 1
#labels = oFF10.db.get( entry.payload).get( "data").get( param)
entry = QCA.db[entry_node.payload]["entry"]
mol_name = entry.name
smiles_indexed = entry.attributes[ key]
# choose between all mins or just min along a TD
# Note that not sure how TD iter will work when entry
# is an optimization
#nodes = list(QCA.node_iter_optimization_minimum(entry_node, select="Molecule"))
nodes = list(QCA.node_iter_torsiondriverecord_minimum(entry_node, select="Molecule"))
order = np.arange( len( nodes))
cons = []
for node in nodes:
# for kk in QCA.node_iter_to_root( QCA[node.index]):
# print(kk)
# print(" ", kk.parent, kk.children)
val = tuple([ c.payload[2] for c in \
QCA.node_iter_to_root( node,
select="Constraint")])
if len(val) > 0:
cons.append(val)
if len(cons) == 0:
nodes_in_order = nodes
order = np.arange(len(nodes))
else:
cons = np.array( cons)
order = np.lexsort( cons.T)
nodes_in_order = [nodes[i] for i in order]
cons_set = cons
fnm = entry_node.payload + "." + param + ".dat"
fd = open( fnm, 'w')
spec_written = False
# Assumes -1 is root, so -2 are the datasets
ds = list(QCA.node_iter_to_root( entry_node))[-2]
mols = []
for m_idx, mol_node in enumerate(nodes_in_order):
# for opt in QCA.node_iter_depth_first( cons, select="Optimization"):
opt = next(QCA.node_iter_to_root( mol_node, select="Optimization"))
if not spec_written:
qc_spec = QCA.db.get( opt.payload).get( "data").get( "qc_spec")
method = str( qc_spec.method)
basis = str( qc_spec.basis)
fd.write( ds.name + "\n" + method + "\n" + basis + "\n")
header = "{:12s} {:12s} {:16s} "+"{:12s} {:12s} {:8s}"*num_angs +" {:10s} {:64s}\n"
fd.write(header.format( "# QCAProc", " QCAMol", " QMEne",
*[" ScanType", " Atoms", " ScanVal"] * num_angs, \
param, " SmilesMapped" ))
spec_written = True
if QCA.db.get( opt.payload).get( "data").get( "energies") is None:
#fd.close()
print("No energies")
continue
try:
ene = QCA.db.get( opt.payload).get( "data").get( "energies")[ mol_idx]
except TypeError:
print("No energies for this mol", mol_idx)
#fd.close()
continue
mol_id = mol_node.payload
# assume we are taking only minimum energy
syms = QCA.db.get( mol_node.payload).get( "data").get( "symbols")
qc_mol = QCA.db.get( mol_node.payload).get( "data")
# this is the "data" e.g. openMM energy eval
if data is not None:
try:
vals = data.db.get( mol_node.payload).get( "data").get("energy")
vals *= converters.get( param)
except Exception:
#fd.close()
print("No aux data from", data.name, mol_node.payload)
continue
# need to now measure each constraint
#ang_pl = [c.payload for c in QCA.node_iter_to_root( mol_node, select="Constraint")][0]
#ang = [tuple_to_hyphenated(x[1]) for x in constraints]
angle_str = []
for cons in constraints:
indices = tuple_to_hyphenated(cons[1])
angle = TorsionOperation.measure_praxeolitic_single(qc_mol, list(cons[1]))
if np.sign(angle) != np.sign(cons_set[order[m_idx]]):
angle *= -1.0
angle_str = [cons[0],indices,float(cons_set[order[m_idx]])]
angle_str = [cons[0],indices,angle]
#qc_angle = qcelemental.models.molecule.Molecule.from_data(qc_mol).measure(list(cons[1]))
#angle_str = [cons[0],indices,float(qc_angle)]
#angle_str = [cons[0],indices,angle]
if type(vals) is unit.Quantity:
vals = vals.value_in_unit( vals.unit)
print(" ", entry_node.payload, mol_id, ene, *angle_str,vals)
fd.write(out_str.format( entry_node.payload, mol_id, ene, *angle_str, \
vals, \
smiles_indexed ))
#for atoms in constraints:
# ang
#if num_angs > 1:
# ang = [ tuple_to_hyphenated( x) for y in ang_pl for x in y]
#else:
# ang = [ tuple_to_hyphenated( x) for x in ang_pl]
fd.close()
#}}}
#{{{
# extract_torsions_for_same_molecule(QCA, param_types, data_chart, converters)
#}}}
if __name__ == "__main__":
with open(os.path.join(PREFIX,'QCA.p'), 'rb') as fid:
QCA = pickle.load(fid)
if QCA.db is None:
with open(os.path.join(PREFIX, 'QCA.db.p'), 'rb') as fid:
QCA.db = pickle.load(fid).db
converters = { \
# "Bonds" : const.bohr2angstrom, \
# "Angles": 1.0, \
# "ImproperTorsions": 1.0, \
# "ProperTorsions": 1.0, \
# "vdW": 1.0 \
"MMEnergyA": 1.0, \
# "MMEnergyB": 1.0 \
}
data_chart = { \
# "Bonds" : "bonds.p", \
# "Angles": "angles.p", \
# "ImproperTorsions": "outofplane.p", \
# "ProperTorsions": "torsions.p" \
"MMEnergyA": "oMM.oFF-Parsley-uncons.p" \
# "MMEnergyB": "oMM.oFF-Parsley-uncons" \
}
#label_db = oFF10.db.get( "ROOT").get( "data")
param_types = list(data_chart.keys())
# get the last structure from opts -> min energy
mol_idx = -1
for n in QCA.iter_entry():
constraints = set([tuple(c.payload[1]) for c in QCA.node_iter_depth_first( n, select="Constraint")])
print(n)
print(" ", constraints)
extract_torsions_for_same_molecule(QCA, param_types, data_chart, converters)
| 44.085987
| 119
| 0.448458
|
e38df9c7d708da7d2254f3abc487c56d2c9df234
| 26,249
|
py
|
Python
|
venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydev_bundle/fsnotify/scandir_vendored.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 695
|
2020-01-30T14:34:51.000Z
|
2022-03-31T09:31:57.000Z
|
venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydev_bundle/fsnotify/scandir_vendored.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 845
|
2020-01-29T23:53:36.000Z
|
2022-03-31T19:45:04.000Z
|
venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydev_bundle/fsnotify/scandir_vendored.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 66
|
2020-01-30T13:10:38.000Z
|
2022-03-29T07:11:17.000Z
|
"""scandir, a better directory iterator and faster os.walk(), now in the Python 3.5 stdlib
scandir() is a generator version of os.listdir() that returns an
iterator over files in a directory, and also exposes the extra
information most OSes provide while iterating files in a directory
(such as type and stat information).
This module also includes a version of os.walk() that uses scandir()
to speed it up significantly.
See README.md or https://github.com/benhoyt/scandir for rationale and
docs, or read PEP 471 (https://www.python.org/dev/peps/pep-0471/) for
more details on its inclusion into Python 3.5
scandir is released under the new BSD 3-clause license.
Copyright (c) 2012, Ben Hoyt
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Ben Hoyt nor the names of its contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import division
from errno import ENOENT
from os import listdir, lstat, stat, strerror
from os.path import join, islink
from stat import S_IFDIR, S_IFLNK, S_IFREG
import collections
import sys
try:
import _scandir
except ImportError:
_scandir = None
try:
import ctypes
except ImportError:
ctypes = None
if _scandir is None and ctypes is None:
import warnings
warnings.warn("scandir can't find the compiled _scandir C module "
"or ctypes, using slow generic fallback")
__version__ = '1.10.0'
__all__ = ['scandir', 'walk']
# Windows FILE_ATTRIBUTE constants for interpreting the
# FIND_DATA.dwFileAttributes member
FILE_ATTRIBUTE_ARCHIVE = 32
FILE_ATTRIBUTE_COMPRESSED = 2048
FILE_ATTRIBUTE_DEVICE = 64
FILE_ATTRIBUTE_DIRECTORY = 16
FILE_ATTRIBUTE_ENCRYPTED = 16384
FILE_ATTRIBUTE_HIDDEN = 2
FILE_ATTRIBUTE_INTEGRITY_STREAM = 32768
FILE_ATTRIBUTE_NORMAL = 128
FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 8192
FILE_ATTRIBUTE_NO_SCRUB_DATA = 131072
FILE_ATTRIBUTE_OFFLINE = 4096
FILE_ATTRIBUTE_READONLY = 1
FILE_ATTRIBUTE_REPARSE_POINT = 1024
FILE_ATTRIBUTE_SPARSE_FILE = 512
FILE_ATTRIBUTE_SYSTEM = 4
FILE_ATTRIBUTE_TEMPORARY = 256
FILE_ATTRIBUTE_VIRTUAL = 65536
IS_PY3 = sys.version_info >= (3, 0)
if IS_PY3:
unicode = str # Because Python <= 3.2 doesn't have u'unicode' syntax
class GenericDirEntry(object):
__slots__ = ('name', '_stat', '_lstat', '_scandir_path', '_path')
def __init__(self, scandir_path, name):
self._scandir_path = scandir_path
self.name = name
self._stat = None
self._lstat = None
self._path = None
@property
def path(self):
if self._path is None:
self._path = join(self._scandir_path, self.name)
return self._path
def stat(self, follow_symlinks=True):
if follow_symlinks:
if self._stat is None:
self._stat = stat(self.path)
return self._stat
else:
if self._lstat is None:
self._lstat = lstat(self.path)
return self._lstat
# The code duplication below is intentional: this is for slightly
# better performance on systems that fall back to GenericDirEntry.
# It avoids an additional attribute lookup and method call, which
# are relatively slow on CPython.
def is_dir(self, follow_symlinks=True):
try:
st = self.stat(follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno != ENOENT:
raise
return False # Path doesn't exist or is a broken symlink
return st.st_mode & 0o170000 == S_IFDIR
def is_file(self, follow_symlinks=True):
try:
st = self.stat(follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno != ENOENT:
raise
return False # Path doesn't exist or is a broken symlink
return st.st_mode & 0o170000 == S_IFREG
def is_symlink(self):
try:
st = self.stat(follow_symlinks=False)
except OSError as e:
if e.errno != ENOENT:
raise
return False # Path doesn't exist or is a broken symlink
return st.st_mode & 0o170000 == S_IFLNK
def inode(self):
st = self.stat(follow_symlinks=False)
return st.st_ino
def __str__(self):
return '<{0}: {1!r}>'.format(self.__class__.__name__, self.name)
__repr__ = __str__
def _scandir_generic(path=unicode('.')):
"""Like os.listdir(), but yield DirEntry objects instead of returning
a list of names.
"""
for name in listdir(path):
yield GenericDirEntry(path, name)
if IS_PY3 and sys.platform == 'win32':
def scandir_generic(path=unicode('.')):
if isinstance(path, bytes):
raise TypeError("os.scandir() doesn't support bytes path on Windows, use Unicode instead")
return _scandir_generic(path)
scandir_generic.__doc__ = _scandir_generic.__doc__
else:
scandir_generic = _scandir_generic
scandir_c = None
scandir_python = None
if sys.platform == 'win32':
if ctypes is not None:
from ctypes import wintypes
# Various constants from windows.h
INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value
ERROR_FILE_NOT_FOUND = 2
ERROR_NO_MORE_FILES = 18
IO_REPARSE_TAG_SYMLINK = 0xA000000C
# Numer of seconds between 1601-01-01 and 1970-01-01
SECONDS_BETWEEN_EPOCHS = 11644473600
kernel32 = ctypes.windll.kernel32
# ctypes wrappers for (wide string versions of) FindFirstFile,
# FindNextFile, and FindClose
FindFirstFile = kernel32.FindFirstFileW
FindFirstFile.argtypes = [
wintypes.LPCWSTR,
ctypes.POINTER(wintypes.WIN32_FIND_DATAW),
]
FindFirstFile.restype = wintypes.HANDLE
FindNextFile = kernel32.FindNextFileW
FindNextFile.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(wintypes.WIN32_FIND_DATAW),
]
FindNextFile.restype = wintypes.BOOL
FindClose = kernel32.FindClose
FindClose.argtypes = [wintypes.HANDLE]
FindClose.restype = wintypes.BOOL
Win32StatResult = collections.namedtuple('Win32StatResult', [
'st_mode',
'st_ino',
'st_dev',
'st_nlink',
'st_uid',
'st_gid',
'st_size',
'st_atime',
'st_mtime',
'st_ctime',
'st_atime_ns',
'st_mtime_ns',
'st_ctime_ns',
'st_file_attributes',
])
def filetime_to_time(filetime):
"""Convert Win32 FILETIME to time since Unix epoch in seconds."""
total = filetime.dwHighDateTime << 32 | filetime.dwLowDateTime
return total / 10000000 - SECONDS_BETWEEN_EPOCHS
def find_data_to_stat(data):
"""Convert Win32 FIND_DATA struct to stat_result."""
# First convert Win32 dwFileAttributes to st_mode
attributes = data.dwFileAttributes
st_mode = 0
if attributes & FILE_ATTRIBUTE_DIRECTORY:
st_mode |= S_IFDIR | 0o111
else:
st_mode |= S_IFREG
if attributes & FILE_ATTRIBUTE_READONLY:
st_mode |= 0o444
else:
st_mode |= 0o666
if (attributes & FILE_ATTRIBUTE_REPARSE_POINT and
data.dwReserved0 == IO_REPARSE_TAG_SYMLINK):
st_mode ^= st_mode & 0o170000
st_mode |= S_IFLNK
st_size = data.nFileSizeHigh << 32 | data.nFileSizeLow
st_atime = filetime_to_time(data.ftLastAccessTime)
st_mtime = filetime_to_time(data.ftLastWriteTime)
st_ctime = filetime_to_time(data.ftCreationTime)
# Some fields set to zero per CPython's posixmodule.c: st_ino, st_dev,
# st_nlink, st_uid, st_gid
return Win32StatResult(st_mode, 0, 0, 0, 0, 0, st_size,
st_atime, st_mtime, st_ctime,
int(st_atime * 1000000000),
int(st_mtime * 1000000000),
int(st_ctime * 1000000000),
attributes)
class Win32DirEntryPython(object):
__slots__ = ('name', '_stat', '_lstat', '_find_data', '_scandir_path', '_path', '_inode')
def __init__(self, scandir_path, name, find_data):
self._scandir_path = scandir_path
self.name = name
self._stat = None
self._lstat = None
self._find_data = find_data
self._path = None
self._inode = None
@property
def path(self):
if self._path is None:
self._path = join(self._scandir_path, self.name)
return self._path
def stat(self, follow_symlinks=True):
if follow_symlinks:
if self._stat is None:
if self.is_symlink():
# It's a symlink, call link-following stat()
self._stat = stat(self.path)
else:
# Not a symlink, stat is same as lstat value
if self._lstat is None:
self._lstat = find_data_to_stat(self._find_data)
self._stat = self._lstat
return self._stat
else:
if self._lstat is None:
# Lazily convert to stat object, because it's slow
# in Python, and often we only need is_dir() etc
self._lstat = find_data_to_stat(self._find_data)
return self._lstat
def is_dir(self, follow_symlinks=True):
is_symlink = self.is_symlink()
if follow_symlinks and is_symlink:
try:
return self.stat().st_mode & 0o170000 == S_IFDIR
except OSError as e:
if e.errno != ENOENT:
raise
return False
elif is_symlink:
return False
else:
return (self._find_data.dwFileAttributes &
FILE_ATTRIBUTE_DIRECTORY != 0)
def is_file(self, follow_symlinks=True):
is_symlink = self.is_symlink()
if follow_symlinks and is_symlink:
try:
return self.stat().st_mode & 0o170000 == S_IFREG
except OSError as e:
if e.errno != ENOENT:
raise
return False
elif is_symlink:
return False
else:
return (self._find_data.dwFileAttributes &
FILE_ATTRIBUTE_DIRECTORY == 0)
def is_symlink(self):
return (self._find_data.dwFileAttributes &
FILE_ATTRIBUTE_REPARSE_POINT != 0 and
self._find_data.dwReserved0 == IO_REPARSE_TAG_SYMLINK)
def inode(self):
if self._inode is None:
self._inode = lstat(self.path).st_ino
return self._inode
def __str__(self):
return '<{0}: {1!r}>'.format(self.__class__.__name__, self.name)
__repr__ = __str__
def win_error(error, filename):
exc = WindowsError(error, ctypes.FormatError(error))
exc.filename = filename
return exc
def _scandir_python(path=unicode('.')):
"""Like os.listdir(), but yield DirEntry objects instead of returning
a list of names.
"""
# Call FindFirstFile and handle errors
if isinstance(path, bytes):
is_bytes = True
filename = join(path.decode('mbcs', 'strict'), '*.*')
else:
is_bytes = False
filename = join(path, '*.*')
data = wintypes.WIN32_FIND_DATAW()
data_p = ctypes.byref(data)
handle = FindFirstFile(filename, data_p)
if handle == INVALID_HANDLE_VALUE:
error = ctypes.GetLastError()
if error == ERROR_FILE_NOT_FOUND:
# No files, don't yield anything
return
raise win_error(error, path)
# Call FindNextFile in a loop, stopping when no more files
try:
while True:
# Skip '.' and '..' (current and parent directory), but
# otherwise yield (filename, stat_result) tuple
name = data.cFileName
if name not in ('.', '..'):
if is_bytes:
name = name.encode('mbcs', 'replace')
yield Win32DirEntryPython(path, name, data)
data = wintypes.WIN32_FIND_DATAW()
data_p = ctypes.byref(data)
success = FindNextFile(handle, data_p)
if not success:
error = ctypes.GetLastError()
if error == ERROR_NO_MORE_FILES:
break
raise win_error(error, path)
finally:
if not FindClose(handle):
raise win_error(ctypes.GetLastError(), path)
if IS_PY3:
def scandir_python(path=unicode('.')):
if isinstance(path, bytes):
raise TypeError("os.scandir() doesn't support bytes path on Windows, use Unicode instead")
return _scandir_python(path)
scandir_python.__doc__ = _scandir_python.__doc__
else:
scandir_python = _scandir_python
if _scandir is not None:
scandir_c = _scandir.scandir
DirEntry_c = _scandir.DirEntry
if _scandir is not None:
scandir = scandir_c
DirEntry = DirEntry_c
elif ctypes is not None:
scandir = scandir_python
DirEntry = Win32DirEntryPython
else:
scandir = scandir_generic
DirEntry = GenericDirEntry
# Linux, OS X, and BSD implementation
elif sys.platform.startswith(('linux', 'darwin', 'sunos5')) or 'bsd' in sys.platform:
have_dirent_d_type = (sys.platform != 'sunos5')
if ctypes is not None and have_dirent_d_type:
import ctypes.util
DIR_p = ctypes.c_void_p
# Rather annoying how the dirent struct is slightly different on each
# platform. The only fields we care about are d_name and d_type.
class Dirent(ctypes.Structure):
if sys.platform.startswith('linux'):
_fields_ = (
('d_ino', ctypes.c_ulong),
('d_off', ctypes.c_long),
('d_reclen', ctypes.c_ushort),
('d_type', ctypes.c_byte),
('d_name', ctypes.c_char * 256),
)
elif 'openbsd' in sys.platform:
_fields_ = (
('d_ino', ctypes.c_uint64),
('d_off', ctypes.c_uint64),
('d_reclen', ctypes.c_uint16),
('d_type', ctypes.c_uint8),
('d_namlen', ctypes.c_uint8),
('__d_padding', ctypes.c_uint8 * 4),
('d_name', ctypes.c_char * 256),
)
else:
_fields_ = (
('d_ino', ctypes.c_uint32), # must be uint32, not ulong
('d_reclen', ctypes.c_ushort),
('d_type', ctypes.c_byte),
('d_namlen', ctypes.c_byte),
('d_name', ctypes.c_char * 256),
)
DT_UNKNOWN = 0
DT_DIR = 4
DT_REG = 8
DT_LNK = 10
Dirent_p = ctypes.POINTER(Dirent)
Dirent_pp = ctypes.POINTER(Dirent_p)
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
opendir = libc.opendir
opendir.argtypes = [ctypes.c_char_p]
opendir.restype = DIR_p
readdir_r = libc.readdir_r
readdir_r.argtypes = [DIR_p, Dirent_p, Dirent_pp]
readdir_r.restype = ctypes.c_int
closedir = libc.closedir
closedir.argtypes = [DIR_p]
closedir.restype = ctypes.c_int
file_system_encoding = sys.getfilesystemencoding()
class PosixDirEntry(object):
__slots__ = ('name', '_d_type', '_stat', '_lstat', '_scandir_path', '_path', '_inode')
def __init__(self, scandir_path, name, d_type, inode):
self._scandir_path = scandir_path
self.name = name
self._d_type = d_type
self._inode = inode
self._stat = None
self._lstat = None
self._path = None
@property
def path(self):
if self._path is None:
self._path = join(self._scandir_path, self.name)
return self._path
def stat(self, follow_symlinks=True):
if follow_symlinks:
if self._stat is None:
if self.is_symlink():
self._stat = stat(self.path)
else:
if self._lstat is None:
self._lstat = lstat(self.path)
self._stat = self._lstat
return self._stat
else:
if self._lstat is None:
self._lstat = lstat(self.path)
return self._lstat
def is_dir(self, follow_symlinks=True):
if (self._d_type == DT_UNKNOWN or
(follow_symlinks and self.is_symlink())):
try:
st = self.stat(follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno != ENOENT:
raise
return False
return st.st_mode & 0o170000 == S_IFDIR
else:
return self._d_type == DT_DIR
def is_file(self, follow_symlinks=True):
if (self._d_type == DT_UNKNOWN or
(follow_symlinks and self.is_symlink())):
try:
st = self.stat(follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno != ENOENT:
raise
return False
return st.st_mode & 0o170000 == S_IFREG
else:
return self._d_type == DT_REG
def is_symlink(self):
if self._d_type == DT_UNKNOWN:
try:
st = self.stat(follow_symlinks=False)
except OSError as e:
if e.errno != ENOENT:
raise
return False
return st.st_mode & 0o170000 == S_IFLNK
else:
return self._d_type == DT_LNK
def inode(self):
return self._inode
def __str__(self):
return '<{0}: {1!r}>'.format(self.__class__.__name__, self.name)
__repr__ = __str__
def posix_error(filename):
errno = ctypes.get_errno()
exc = OSError(errno, strerror(errno))
exc.filename = filename
return exc
def scandir_python(path=unicode('.')):
"""Like os.listdir(), but yield DirEntry objects instead of returning
a list of names.
"""
if isinstance(path, bytes):
opendir_path = path
is_bytes = True
else:
opendir_path = path.encode(file_system_encoding)
is_bytes = False
dir_p = opendir(opendir_path)
if not dir_p:
raise posix_error(path)
try:
result = Dirent_p()
while True:
entry = Dirent()
if readdir_r(dir_p, entry, result):
raise posix_error(path)
if not result:
break
name = entry.d_name
if name not in (b'.', b'..'):
if not is_bytes:
name = name.decode(file_system_encoding)
yield PosixDirEntry(path, name, entry.d_type, entry.d_ino)
finally:
if closedir(dir_p):
raise posix_error(path)
if _scandir is not None:
scandir_c = _scandir.scandir
DirEntry_c = _scandir.DirEntry
if _scandir is not None:
scandir = scandir_c
DirEntry = DirEntry_c
elif ctypes is not None and have_dirent_d_type:
scandir = scandir_python
DirEntry = PosixDirEntry
else:
scandir = scandir_generic
DirEntry = GenericDirEntry
# Some other system -- no d_type or stat information
else:
scandir = scandir_generic
DirEntry = GenericDirEntry
def _walk(top, topdown=True, onerror=None, followlinks=False):
"""Like Python 3.5's implementation of os.walk() -- faster than
the pre-Python 3.5 version as it uses scandir() internally.
"""
dirs = []
nondirs = []
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
scandir_it = scandir(top)
except OSError as error:
if onerror is not None:
onerror(error)
return
while True:
try:
try:
entry = next(scandir_it)
except StopIteration:
break
except OSError as error:
if onerror is not None:
onerror(error)
return
try:
is_dir = entry.is_dir()
except OSError:
# If is_dir() raises an OSError, consider that the entry is not
# a directory, same behaviour than os.path.isdir().
is_dir = False
if is_dir:
dirs.append(entry.name)
else:
nondirs.append(entry.name)
if not topdown and is_dir:
# Bottom-up: recurse into sub-directory, but exclude symlinks to
# directories if followlinks is False
if followlinks:
walk_into = True
else:
try:
is_symlink = entry.is_symlink()
except OSError:
# If is_symlink() raises an OSError, consider that the
# entry is not a symbolic link, same behaviour than
# os.path.islink().
is_symlink = False
walk_into = not is_symlink
if walk_into:
for entry in walk(entry.path, topdown, onerror, followlinks):
yield entry
# Yield before recursion if going top down
if topdown:
yield top, dirs, nondirs
# Recurse into sub-directories
for name in dirs:
new_path = join(top, name)
# Issue #23605: os.path.islink() is used instead of caching
# entry.is_symlink() result during the loop on os.scandir() because
# the caller can replace the directory entry during the "yield"
# above.
if followlinks or not islink(new_path):
for entry in walk(new_path, topdown, onerror, followlinks):
yield entry
else:
# Yield after recursion if going bottom up
yield top, dirs, nondirs
if IS_PY3 or sys.platform != 'win32':
walk = _walk
else:
# Fix for broken unicode handling on Windows on Python 2.x, see:
# https://github.com/benhoyt/scandir/issues/54
file_system_encoding = sys.getfilesystemencoding()
def walk(top, topdown=True, onerror=None, followlinks=False):
if isinstance(top, bytes):
top = top.decode(file_system_encoding)
return _walk(top, topdown, onerror, followlinks)
| 36.40638
| 110
| 0.557888
|
f51b871e332e40df68142027eeb4ef53b1f90b85
| 3,538
|
py
|
Python
|
telegram_crypto_price_bot/info_message_sender/chart_price_info_message_sender.py
|
ebellocchia/telegram_crypto_price_bot
|
dffc0e3f5b7de86b13fc2d90da0c79391606f39c
|
[
"MIT"
] | 7
|
2021-11-05T14:33:20.000Z
|
2022-03-24T04:05:25.000Z
|
telegram_crypto_price_bot/info_message_sender/chart_price_info_message_sender.py
|
ebellocchia/telegram_crypto_price_bot
|
dffc0e3f5b7de86b13fc2d90da0c79391606f39c
|
[
"MIT"
] | null | null | null |
telegram_crypto_price_bot/info_message_sender/chart_price_info_message_sender.py
|
ebellocchia/telegram_crypto_price_bot
|
dffc0e3f5b7de86b13fc2d90da0c79391606f39c
|
[
"MIT"
] | 4
|
2021-10-02T10:43:28.000Z
|
2022-01-30T22:53:58.000Z
|
# Copyright (c) 2021 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Imports
#
from typing import Any
import pyrogram
from telegram_crypto_price_bot.chart_info.chart_info_file_saver import ChartInfoTmpFileSaver
from telegram_crypto_price_bot.config.configurable_object import ConfigurableObject
from telegram_crypto_price_bot.info_message_sender.info_message_sender_base import InfoMessageSenderBase
from telegram_crypto_price_bot.logger.logger import Logger
from telegram_crypto_price_bot.price_info.price_info_builder import PriceInfoBuilder
from telegram_crypto_price_bot.translation.translation_loader import TranslationLoader
#
# Classes
#
# Chart price info message sender class (chart and price in the same message)
class ChartPriceInfoMessageSender(InfoMessageSenderBase):
config: ConfigurableObject
logger: Logger
translator: TranslationLoader
price_info_builder: PriceInfoBuilder
# Constructor
def __init__(self,
client: pyrogram.Client,
config: ConfigurableObject,
logger: Logger,
translator: TranslationLoader) -> None:
super().__init__(client, logger)
self.config = config
self.logger = logger
self.translator = translator
self.price_info_builder = PriceInfoBuilder(config, translator)
# Send message
def _SendMessage(self,
chat: pyrogram.types.Chat,
*args: Any,
**kwargs: Any) -> pyrogram.types.Message:
# Get chart and price information
chart_info = self._CoinGeckoPriceApi().GetChartInfo(args[0], args[1], args[2])
price_info = self._CoinGeckoPriceApi().GetPriceInfo(args[0], args[1])
# Build price information string
price_info_str = self.price_info_builder.Build(price_info)
# Save chart image
chart_info_saver = ChartInfoTmpFileSaver(self.config, self.logger, self.translator)
chart_info_saver.SaveToTmpFile(chart_info)
# Get temporary file name
tmp_file_name = chart_info_saver.TmpFileName()
if tmp_file_name is None:
raise RuntimeError("Unable to save chart to file")
# Send chart image with price information as caption
return self._MessageSender().SendPhoto(chat,
tmp_file_name,
caption=price_info_str,
**kwargs)
| 43.146341
| 104
| 0.709723
|
c1e624442abcb80d25f7807a9049ad93ea28f404
| 951
|
py
|
Python
|
mosquitto-1.5.4/test/broker/05-clean-session-qos1-helper.py
|
RainaWLK/mqtt-test
|
cb4175c8bd1e35deed45941ca61c88fdcc6ddeba
|
[
"MIT"
] | null | null | null |
mosquitto-1.5.4/test/broker/05-clean-session-qos1-helper.py
|
RainaWLK/mqtt-test
|
cb4175c8bd1e35deed45941ca61c88fdcc6ddeba
|
[
"MIT"
] | null | null | null |
mosquitto-1.5.4/test/broker/05-clean-session-qos1-helper.py
|
RainaWLK/mqtt-test
|
cb4175c8bd1e35deed45941ca61c88fdcc6ddeba
|
[
"MIT"
] | 1
|
2021-06-19T17:17:41.000Z
|
2021-06-19T17:17:41.000Z
|
#!/usr/bin/env python
# Test whether a clean session client has a QoS 1 message queued for it.
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("test-helper", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
mid = 128
publish_packet = mosq_test.gen_publish("qos1/clean_session/test", qos=1, mid=mid, payload="clean-session-message")
puback_packet = mosq_test.gen_puback(mid)
port = mosq_test.get_port()
sock = mosq_test.do_client_connect(connect_packet, connack_packet, port=port)
mosq_test.do_send_receive(sock, publish_packet, puback_packet, "puback")
rc = 0
sock.close()
exit(rc)
| 29.71875
| 129
| 0.770768
|
8be1c8a909c7007bd5abd5dc86d2a75d063733d6
| 4,937
|
py
|
Python
|
ecosystem_tools/mindconverter/mindconverter/graph_based_converter/mapper/onnx/ops/div_mapper.py
|
mindspore-ai/mindinsight
|
8c57fdd62eb7f8653662be2208633386ac82e8d7
|
[
"Apache-2.0"
] | 216
|
2020-03-28T02:11:56.000Z
|
2022-03-31T06:20:09.000Z
|
ecosystem_tools/mindconverter/mindconverter/graph_based_converter/mapper/onnx/ops/div_mapper.py
|
mindspore-ai/mindinsight
|
8c57fdd62eb7f8653662be2208633386ac82e8d7
|
[
"Apache-2.0"
] | 13
|
2020-03-31T03:00:12.000Z
|
2021-01-03T13:01:06.000Z
|
ecosystem_tools/mindconverter/mindconverter/graph_based_converter/mapper/onnx/ops/div_mapper.py
|
mindspore-ai/mindinsight
|
8c57fdd62eb7f8653662be2208633386ac82e8d7
|
[
"Apache-2.0"
] | 21
|
2020-03-28T02:41:06.000Z
|
2021-11-24T12:20:25.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd.All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mapper module."""
import numpy as np
from mindconverter.graph_based_converter.common.utils import reset_init_or_construct
from mindconverter.graph_based_converter.constant import WeightType, ExchangeMessageKeywords, \
TemplateKeywords
from mindconverter.graph_based_converter.mapper.base import ONNXToMindSporeMapper
class DivMapper(ONNXToMindSporeMapper):
"""Div mapper."""
@staticmethod
def _operation_name_in_ms(*args, **kwargs):
return "P.Div"
@staticmethod
def _convert_params(**kwargs):
return dict()
@staticmethod
def _convert_trained_weights(**kwargs):
weights = kwargs.get('weights', list())
tensor = DivMapper._find_val_by_index(0, weights)
if isinstance(tensor, np.ndarray) and tensor.shape:
return {'w': {'data': tensor, 'type': WeightType.PARAMETER.value}}
return dict()
@staticmethod
def _generate_snippet_template(**kwargs):
template, exchange_msg, outputs_list, outputs_mapping = ONNXToMindSporeMapper._generate_snippet_template(
**kwargs)
op = kwargs.get("operation")
args = kwargs.get("converted_params")
weights = kwargs.get("weights")
trainable_params = kwargs.get("trainable_params", dict())
if not weights:
variable_slot = "var_0"
construct_template = \
f"opt_{{{variable_slot}}} = {op}()({{{ExchangeMessageKeywords.VariableScope.value.INPUTS.value}}}) "
template = {
variable_slot: {
TemplateKeywords.INIT.value: [],
TemplateKeywords.CONSTRUCT.value: [construct_template]
}
}
return template, exchange_msg, outputs_list, outputs_mapping
return DivMapper._generate_snippet_template_with_weights(weights, args, template, op, trainable_params)
@staticmethod
def _generate_snippet_template_with_weights(weights, args, template, op, trainable_params):
"""Generate template when weights exist."""
tensor = DivMapper._find_val_by_index(0, weights)
w_shape = tensor.shape
w_dtype = tensor.dtype
w_location = DivMapper._find_location_by_index(0, weights)
variable_slot = "var_0"
inputs_in_construct = [f"{{{ExchangeMessageKeywords.VariableScope.value.INPUTS.value}}}"]
if w_location != -1:
inputs_in_construct.insert(w_location, f"self.{{{variable_slot}}}_w")
if w_shape:
args["w_shape"] = w_shape
args["w_dtype"] = w_dtype
init_tensor = f"self.{{{variable_slot}}}_w = " \
f"Parameter(Tensor(np.random.uniform(0, 1, {{w_shape}}).astype(np.{{w_dtype}})), " \
f"name=None)"
else:
args["w_value"] = tensor.tolist()
init_tensor = f"self.{{{variable_slot}}}_w = {{w_value}}"
construct_template = f"opt_{{{variable_slot}}} = {' / '.join(inputs_in_construct)}"
template = reset_init_or_construct(template, variable_slot, [init_tensor],
TemplateKeywords.INIT.value)
template = reset_init_or_construct(template, variable_slot, [construct_template],
TemplateKeywords.CONSTRUCT.value)
exchange_msg = {
variable_slot: {
ExchangeMessageKeywords.VariableScope.value.OPERATION.value: op,
ExchangeMessageKeywords.VariableScope.value.VARIABLE_NAME.value: None,
ExchangeMessageKeywords.VariableScope.value.OUTPUT_TYPE.value:
ExchangeMessageKeywords.VariableScope.value.TSR_TYPE.value,
ExchangeMessageKeywords.VariableScope.value.INPUTS.value: [],
ExchangeMessageKeywords.VariableScope.value.ARGS.value: args,
ExchangeMessageKeywords.VariableScope.value.WEIGHTS.value: weights,
ExchangeMessageKeywords.VariableScope.value.TRAINABLE_PARAMS.value: trainable_params
}
}
outputs_list = [f"opt_{{{variable_slot}}}"]
outputs_mapping = ((0, 0),)
return template, exchange_msg, outputs_list, outputs_mapping
| 46.140187
| 116
| 0.645534
|
67052ed4008980d90dfc6a5ae808e35264a0d433
| 14,984
|
py
|
Python
|
selfdrive/controls/lib/planner.py
|
Nad-Arb/openpilot3.9
|
ea48ef2d8d6a6ddf39d826182b3b6796ba3ea17e
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/planner.py
|
Nad-Arb/openpilot3.9
|
ea48ef2d8d6a6ddf39d826182b3b6796ba3ea17e
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/planner.py
|
Nad-Arb/openpilot3.9
|
ea48ef2d8d6a6ddf39d826182b3b6796ba3ea17e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import zmq
import numpy as np
import math
from common.realtime import sec_since_boot
from common.params import Params
from common.numpy_fast import interp
import selfdrive.messaging as messaging
from selfdrive.swaglog import cloudlog
from selfdrive.config import Conversions as CV
from selfdrive.services import service_list
from selfdrive.controls.lib.drive_helpers import create_event, EventTypes as ET
from selfdrive.controls.lib.pathplanner import PathPlanner
from selfdrive.controls.lib.longitudinal_mpc import libmpc_py
from selfdrive.controls.lib.speed_smoother import speed_smoother
from selfdrive.controls.lib.longcontrol import LongCtrlState
_DT = 0.01 # 100Hz
_DT_MPC = 0.2 # 5Hz
MAX_SPEED_ERROR = 2.0
AWARENESS_DECEL = -0.2 # car smoothly decel at .2m/s^2 when user is distracted
_DEBUG = False
_LEAD_ACCEL_TAU = 1.5
# lookup tables VS speed to determine min and max accels in cruise
# make sure these accelerations are smaller than mpc limits
_A_CRUISE_MIN_V = [-1.0, -.8, -.67, -.5, -.30]
_A_CRUISE_MIN_BP = [ 0., 5., 10., 20., 40.]
# need fast accel at very low speed for stop and go
# make sure these accelerations are smaller than mpc limits
_A_CRUISE_MAX_V = [1., 1., .8, .5, .3]
_A_CRUISE_MAX_V_FOLLOWING = [1.5, 1.5, 1.2, .7, .3]
_A_CRUISE_MAX_BP = [0., 5., 10., 20., 40.]
# Lookup table for turns
_A_TOTAL_MAX_V = [1.5, 1.9, 3.2]
_A_TOTAL_MAX_BP = [0., 20., 40.]
# max acceleration allowed in acc, which happens in restart
A_ACC_MAX = max(_A_CRUISE_MAX_V_FOLLOWING)
def calc_cruise_accel_limits(v_ego, following):
a_cruise_min = interp(v_ego, _A_CRUISE_MIN_BP, _A_CRUISE_MIN_V)
if following:
a_cruise_max = interp(v_ego, _A_CRUISE_MAX_BP, _A_CRUISE_MAX_V_FOLLOWING)
else:
a_cruise_max = interp(v_ego, _A_CRUISE_MAX_BP, _A_CRUISE_MAX_V)
return np.vstack([a_cruise_min, a_cruise_max])
def limit_accel_in_turns(v_ego, angle_steers, a_target, CP):
"""
This function returns a limited long acceleration allowed, depending on the existing lateral acceleration
this should avoid accelerating when losing the target in turns
"""
deg_to_rad = np.pi / 180. # from can reading to rad
a_total_max = interp(v_ego, _A_TOTAL_MAX_BP, _A_TOTAL_MAX_V)
a_y = v_ego**2 * angle_steers * deg_to_rad / (CP.sR * CP.l)
a_x_allowed = math.sqrt(max(a_total_max**2 - a_y**2, 0.))
a_target[1] = min(a_target[1], a_x_allowed)
return a_target
class FCWChecker(object):
def __init__(self):
self.fcw_count = 0
self.last_fcw_a = 0.0
self.v_lead_max = 0.0
self.lead_seen_t = 0.0
self.last_fcw_time = 0.0
def reset_lead(self, cur_time):
self.v_lead_max = 0.0
self.lead_seen_t = cur_time
def update(self, mpc_solution, cur_time, v_ego, v_lead, y_lead, vlat_lead, fcw_lead, blinkers):
min_a_mpc = min(list(mpc_solution[0].a_ego)[1:])
self.v_lead_max = max(self.v_lead_max, v_lead)
if (fcw_lead > 0.99
and v_ego > 5.0
and min_a_mpc < -4.0
and self.v_lead_max > 2.5
and v_ego > v_lead
and self.lead_seen_t < cur_time - 2.0
and abs(y_lead) < 1.0
and abs(vlat_lead) < 0.3
and not blinkers):
self.fcw_count += 1
if self.fcw_count > 10 and self.last_fcw_time + 5.0 < cur_time:
self.last_fcw_time = cur_time
self.last_fcw_a = min_a_mpc
return True
else:
self.fcw_count = 0
return False
class LongitudinalMpc(object):
def __init__(self, mpc_id, live_longitudinal_mpc):
self.live_longitudinal_mpc = live_longitudinal_mpc
self.mpc_id = mpc_id
self.setup_mpc()
self.v_mpc = 0.0
self.v_mpc_future = 0.0
self.a_mpc = 0.0
self.v_cruise = 0.0
self.prev_lead_status = False
self.prev_lead_x = 0.0
self.new_lead = False
self.last_cloudlog_t = 0.0
def send_mpc_solution(self, qp_iterations, calculation_time):
qp_iterations = max(0, qp_iterations)
dat = messaging.new_message()
dat.init('liveLongitudinalMpc')
dat.liveLongitudinalMpc.xEgo = list(self.mpc_solution[0].x_ego)
dat.liveLongitudinalMpc.vEgo = list(self.mpc_solution[0].v_ego)
dat.liveLongitudinalMpc.aEgo = list(self.mpc_solution[0].a_ego)
dat.liveLongitudinalMpc.xLead = list(self.mpc_solution[0].x_l)
dat.liveLongitudinalMpc.vLead = list(self.mpc_solution[0].v_l)
dat.liveLongitudinalMpc.aLead = list(self.mpc_solution[0].a_l)
dat.liveLongitudinalMpc.aLeadTau = self.l
dat.liveLongitudinalMpc.qpIterations = qp_iterations
dat.liveLongitudinalMpc.mpcId = self.mpc_id
dat.liveLongitudinalMpc.calculationTime = calculation_time
self.live_longitudinal_mpc.send(dat.to_bytes())
def setup_mpc(self):
ffi, self.libmpc = libmpc_py.get_libmpc(self.mpc_id)
self.libmpc.init()
self.mpc_solution = ffi.new("log_t *")
self.cur_state = ffi.new("state_t *")
self.cur_state[0].v_ego = 0
self.cur_state[0].a_ego = 0
self.l = _LEAD_ACCEL_TAU
def set_cur_state(self, v, a):
self.cur_state[0].v_ego = v
self.cur_state[0].a_ego = a
def update(self, CS, lead, v_cruise_setpoint):
# Setup current mpc state
self.cur_state[0].x_ego = 0.0
if lead is not None and lead.status:
x_lead = lead.dRel
v_lead = max(0.0, lead.vLead)
a_lead = lead.aLeadK
if (v_lead < 0.1 or -a_lead / 2.0 > v_lead):
v_lead = 0.0
a_lead = 0.0
# Learn if constant acceleration
if abs(a_lead) < 0.5:
self.l = _LEAD_ACCEL_TAU
else:
self.l *= 0.9
l = max(self.l, -a_lead / (v_lead + 0.01))
self.new_lead = False
if not self.prev_lead_status or abs(x_lead - self.prev_lead_x) > 2.5:
self.libmpc.init_with_simulation(self.v_mpc, x_lead, v_lead, a_lead, l)
self.new_lead = True
self.prev_lead_status = True
self.prev_lead_x = x_lead
self.cur_state[0].x_l = x_lead
self.cur_state[0].v_l = v_lead
self.cur_state[0].a_l = a_lead
else:
self.prev_lead_status = False
# Fake a fast lead car, so mpc keeps running
self.cur_state[0].x_l = 50.0
self.cur_state[0].v_l = CS.vEgo + 10.0
self.cur_state[0].a_l = 0.0
l = _LEAD_ACCEL_TAU
# Calculate mpc
t = sec_since_boot()
n_its = self.libmpc.run_mpc(self.cur_state, self.mpc_solution, l)
duration = int((sec_since_boot() - t) * 1e9)
self.send_mpc_solution(n_its, duration)
# Get solution. MPC timestep is 0.2 s, so interpolation to 0.05 s is needed
self.v_mpc = self.mpc_solution[0].v_ego[1]
self.a_mpc = self.mpc_solution[0].a_ego[1]
self.v_mpc_future = self.mpc_solution[0].v_ego[10]
# Reset if NaN or goes through lead car
dls = np.array(list(self.mpc_solution[0].x_l)[1:]) - np.array(list(self.mpc_solution[0].x_ego)[1:])
crashing = min(dls) < -50.0
nans = np.any(np.isnan(list(self.mpc_solution[0].v_ego)))
backwards = min(list(self.mpc_solution[0].v_ego)[1:]) < -0.01
if ((backwards or crashing) and self.prev_lead_status) or nans:
if t > self.last_cloudlog_t + 5.0:
self.last_cloudlog_t = t
cloudlog.warning("Longitudinal mpc %d reset - backwards: %s crashing: %s nan: %s" % (
self.mpc_id, backwards, crashing, nans))
self.libmpc.init()
self.cur_state[0].v_ego = CS.vEgo
self.cur_state[0].a_ego = 0.0
self.prev_lead_status = False
class Planner(object):
def __init__(self, CP, fcw_enabled):
context = zmq.Context()
self.CP = CP
self.live20 = messaging.sub_sock(context, service_list['live20'].port)
self.model = messaging.sub_sock(context, service_list['model'].port)
self.plan = messaging.pub_sock(context, service_list['plan'].port)
self.live_longitudinal_mpc = messaging.pub_sock(context, service_list['liveLongitudinalMpc'].port)
self.last_md_ts = 0
self.last_l20_ts = 0
self.last_model = 0.
self.last_l20 = 0.
self.model_dead = True
self.radar_dead = True
self.radar_errors = []
self.PP = PathPlanner()
self.mpc1 = LongitudinalMpc(1, self.live_longitudinal_mpc)
self.mpc2 = LongitudinalMpc(2, self.live_longitudinal_mpc)
self.v_acc_start = 0.0
self.a_acc_start = 0.0
self.acc_start_time = sec_since_boot()
self.v_acc = 0.0
self.v_acc_sol = 0.0
self.v_acc_future = 0.0
self.a_acc = 0.0
self.a_acc_sol = 0.0
self.v_cruise = 0.0
self.a_cruise = 0.0
self.lead_1 = None
self.lead_2 = None
self.longitudinalPlanSource = 'cruise'
self.fcw = False
self.fcw_checker = FCWChecker()
self.fcw_enabled = fcw_enabled
def choose_solution(self, v_cruise_setpoint):
solutions = {'cruise': self.v_cruise}
if self.mpc1.prev_lead_status:
solutions['mpc1'] = self.mpc1.v_mpc
if self.mpc2.prev_lead_status:
solutions['mpc2'] = self.mpc2.v_mpc
slowest = min(solutions, key=solutions.get)
if _DEBUG:
print "D_SOL", solutions, slowest, self.v_acc_sol, self.a_acc_sol
print "D_V", self.mpc1.v_mpc, self.mpc2.v_mpc, self.v_cruise
print "D_A", self.mpc1.a_mpc, self.mpc2.a_mpc, self.a_cruise
self.longitudinalPlanSource = slowest
# Choose lowest of MPC and cruise
if slowest == 'mpc1':
self.v_acc = self.mpc1.v_mpc
self.a_acc = self.mpc1.a_mpc
elif slowest == 'mpc2':
self.v_acc = self.mpc2.v_mpc
self.a_acc = self.mpc2.a_mpc
elif slowest == 'cruise':
self.v_acc = self.v_cruise
self.a_acc = self.a_cruise
self.v_acc_future = min([self.mpc1.v_mpc_future, self.mpc2.v_mpc_future, v_cruise_setpoint])
# this runs whenever we get a packet that can change the plan
def update(self, CS, LoC, v_cruise_kph, user_distracted):
cur_time = sec_since_boot()
v_cruise_setpoint = v_cruise_kph * CV.KPH_TO_MS
md = messaging.recv_sock(self.model)
if md is not None:
self.last_md_ts = md.logMonoTime
self.last_model = cur_time
self.model_dead = False
self.PP.update(CS.vEgo, md)
l20 = messaging.recv_sock(self.live20) if md is None else None
if l20 is not None:
self.last_l20_ts = l20.logMonoTime
self.last_l20 = cur_time
self.radar_dead = False
self.radar_errors = list(l20.live20.radarErrors)
self.v_acc_start = self.v_acc_sol
self.a_acc_start = self.a_acc_sol
self.acc_start_time = cur_time
self.lead_1 = l20.live20.leadOne
self.lead_2 = l20.live20.leadTwo
enabled = (LoC.long_control_state == LongCtrlState.pid) or (LoC.long_control_state == LongCtrlState.stopping)
following = self.lead_1.status and self.lead_1.dRel < 45.0 and self.lead_1.vLeadK > CS.vEgo and self.lead_1.aLeadK > 0.0
# Calculate speed for normal cruise control
if enabled:
accel_limits = map(float, calc_cruise_accel_limits(CS.vEgo, following))
# TODO: make a separate lookup for jerk tuning
jerk_limits = [min(-0.1, accel_limits[0]), max(0.1, accel_limits[1])]
accel_limits = limit_accel_in_turns(CS.vEgo, CS.steeringAngle, accel_limits, self.CP)
if user_distracted:
# if user is not responsive to awareness alerts, then start a smooth deceleration
accel_limits[1] = min(accel_limits[1], AWARENESS_DECEL)
accel_limits[0] = min(accel_limits[0], accel_limits[1])
self.v_cruise, self.a_cruise = speed_smoother(self.v_acc_start, self.a_acc_start,
v_cruise_setpoint,
accel_limits[1], accel_limits[0],
jerk_limits[1],
jerk_limits[0],
_DT_MPC)
else:
starting = LoC.long_control_state == LongCtrlState.starting
self.v_cruise = CS.vEgo
self.a_cruise = self.CP.startAccel if starting else CS.aEgo
self.v_acc_start = CS.vEgo
self.a_acc_start = self.CP.startAccel if starting else CS.aEgo
self.v_acc = CS.vEgo
self.a_acc = self.CP.startAccel if starting else CS.aEgo
self.v_acc_sol = CS.vEgo
self.a_acc_sol = self.CP.startAccel if starting else CS.aEgo
self.mpc1.set_cur_state(self.v_acc_start, self.a_acc_start)
self.mpc2.set_cur_state(self.v_acc_start, self.a_acc_start)
self.mpc1.update(CS, self.lead_1, v_cruise_setpoint)
self.mpc2.update(CS, self.lead_2, v_cruise_setpoint)
self.choose_solution(v_cruise_setpoint)
# determine fcw
if self.mpc1.new_lead:
self.fcw_checker.reset_lead(cur_time)
blinkers = CS.leftBlinker or CS.rightBlinker
self.fcw = self.fcw_checker.update(self.mpc1.mpc_solution, cur_time, CS.vEgo,
self.lead_1.vLead, self.lead_1.yRel, self.lead_1.vLat,
self.lead_1.fcw, blinkers) \
and not CS.brakePressed
if self.fcw:
cloudlog.info("FCW triggered")
if cur_time - self.last_model > 0.5:
self.model_dead = True
if cur_time - self.last_l20 > 0.5:
self.radar_dead = True
# **** send the plan ****
plan_send = messaging.new_message()
plan_send.init('plan')
events = []
if self.model_dead:
events.append(create_event('modelCommIssue', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
if self.radar_dead or 'commIssue' in self.radar_errors:
events.append(create_event('radarCommIssue', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
if 'fault' in self.radar_errors:
events.append(create_event('radarFault', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
# Interpolation of trajectory
dt = min(cur_time - self.acc_start_time, _DT_MPC + _DT) + _DT # no greater than dt mpc + dt, to prevent too high extraps
self.a_acc_sol = self.a_acc_start + (dt / _DT_MPC) * (self.a_acc - self.a_acc_start)
self.v_acc_sol = self.v_acc_start + dt * (self.a_acc_sol + self.a_acc_start) / 2.0
plan_send.plan.events = events
plan_send.plan.mdMonoTime = self.last_md_ts
plan_send.plan.l20MonoTime = self.last_l20_ts
# lateral plan
plan_send.plan.lateralValid = not self.model_dead
plan_send.plan.dPoly = map(float, self.PP.d_poly)
plan_send.plan.laneWidth = float(self.PP.lane_width)
# longitudal plan
plan_send.plan.longitudinalValid = not self.radar_dead
plan_send.plan.vCruise = self.v_cruise
plan_send.plan.aCruise = self.a_cruise
plan_send.plan.vTarget = self.v_acc_sol
plan_send.plan.aTarget = self.a_acc_sol
plan_send.plan.vTargetFuture = self.v_acc_future
plan_send.plan.hasLead = self.mpc1.prev_lead_status
plan_send.plan.longitudinalPlanSource = self.longitudinalPlanSource
# Send out fcw
fcw = self.fcw and (self.fcw_enabled or LoC.long_control_state != LongCtrlState.off)
plan_send.plan.fcw = fcw
self.plan.send(plan_send.to_bytes())
return plan_send
| 36.106024
| 126
| 0.676321
|
1eaa0f5952642715074ae857c2746faff15c225f
| 87
|
py
|
Python
|
lab/doodle/test01/testcode01.py
|
JoHyukJun/RasberryPiLab
|
c0fecd9e9906b4fe186b299c0d31e5b6666fe955
|
[
"Apache-2.0"
] | null | null | null |
lab/doodle/test01/testcode01.py
|
JoHyukJun/RasberryPiLab
|
c0fecd9e9906b4fe186b299c0d31e5b6666fe955
|
[
"Apache-2.0"
] | null | null | null |
lab/doodle/test01/testcode01.py
|
JoHyukJun/RasberryPiLab
|
c0fecd9e9906b4fe186b299c0d31e5b6666fe955
|
[
"Apache-2.0"
] | null | null | null |
for i in range(2, 10):
for j in range(1, 10):
print(i * j, end = ' ')
print('\n')
| 14.5
| 25
| 0.505747
|
b3ad574f5154de6063cb700784fa3afaf80ef216
| 2,222
|
py
|
Python
|
djangobase/users_registration/tests/test_views_simple.py
|
seospace/djangousers
|
a9f18cabeee31e91d91c46c91bc3d689aec8f0db
|
[
"BSD-3-Clause"
] | null | null | null |
djangobase/users_registration/tests/test_views_simple.py
|
seospace/djangousers
|
a9f18cabeee31e91d91c46c91bc3d689aec8f0db
|
[
"BSD-3-Clause"
] | null | null | null |
djangobase/users_registration/tests/test_views_simple.py
|
seospace/djangousers
|
a9f18cabeee31e91d91c46c91bc3d689aec8f0db
|
[
"BSD-3-Clause"
] | null | null | null |
from django.test import TestCase
from .base import TemplateResponseTestMixin
from users.forms import UserRegistrationForm, UserAuthenticationForm
from django.contrib.auth.forms import PasswordResetForm
from .. import views
from django.contrib.auth.forms import SetPasswordForm
from django.urls import reverse
from users.forms import UserRegistrationForm
_auth_redirect = 'index'
class UserRegistrationClosedViewTestCase(TemplateResponseTestMixin, TestCase):
view_class = views.UserRegistrationClosedView
url_name = 'user_registration_closed_view'
template_name = 'registration/user_registration_closed_view.html'
get_status_code = 200
post_status_code = 405
authenticated_redirect_url_name = _auth_redirect
class UserRegistrationSuccessViewTestCase(TemplateResponseTestMixin, TestCase):
view_class = views.UserRegistrationSuccessView
url_name = 'user_registration_success_view'
template_name = 'registration/user_registration_success_view.html'
get_status_code = 200
post_status_code = 405
authenticated_redirect_url_name = _auth_redirect
class UserRegistrationViewTestCase(TemplateResponseTestMixin, TestCase):
view_class = views.UserRegistrationView
url_name = 'user_registration_view'
template_name = 'registration/user_registration_view.html'
form_class = UserRegistrationForm
csrf_token = True
get_status_code = 200
post_status_code = 200
authenticated_redirect_url_name = _auth_redirect
class UserActivationSuccessViewTestCase(TemplateResponseTestMixin, TestCase):
view_class = views.UserActivationSuccessView
url_name = 'user_activation_success_view'
template_name = 'registration/user_activation_success_view.html'
get_status_code = 200
post_status_code = 405
authenticated_redirect_url_name = _auth_redirect
class UserActivationViewTestCase(TemplateResponseTestMixin, TestCase):
view_class = views.UserActivationView
url_name = 'user_activation_view'
url_name_kwargs = {'activation_key': 'someactivationkey'}
template_name = 'registration/user_activation_view.html'
get_status_code = 200
post_status_code = 405
authenticated_redirect_url_name = _auth_redirect
| 32.202899
| 79
| 0.812781
|
6c767a5995dfdc511a89abc92a882b1bb772da06
| 1,832
|
py
|
Python
|
autogqla/objects/helpers.py
|
srafehi/autogqla
|
171d2a9b35f3a03cac6d07da94b5340257f359ee
|
[
"MIT"
] | 1
|
2020-06-16T01:57:55.000Z
|
2020-06-16T01:57:55.000Z
|
autogqla/objects/helpers.py
|
srafehi/autogqla
|
171d2a9b35f3a03cac6d07da94b5340257f359ee
|
[
"MIT"
] | null | null | null |
autogqla/objects/helpers.py
|
srafehi/autogqla
|
171d2a9b35f3a03cac6d07da94b5340257f359ee
|
[
"MIT"
] | null | null | null |
import graphene
from autogqla.base import BaseModel
from autogqla.fields.connections.base import apply_query_condition
from autogqla.fields.connections.pagination_connection_field import PaginationConnectionField
from autogqla.fields.connections.pagination_details import PaginationDetails
from autogqla.fields.connections.pagination_helpers import paginate
def make_pagination_field(model):
resolver = BaseModel.resolver_collection.for_model(model)
return PaginationConnectionField(
resolver.connection_type,
where=graphene.Argument(resolver.where_input_type),
order_by=graphene.Argument(graphene.List(resolver.order_by_enum)),
)
def make_pagination_resolver(model):
def execute(_, _info, first=None, last=None, before=None, after=None, order_by=None, **arguments):
pagination = PaginationDetails(before, after, first, last, tuple(order_by or ()))
session = BaseModel.session_func()
resolver = BaseModel.resolver_collection.for_model(model)
query = session.query(model)
query = apply_query_condition(query=query, resolver=resolver, arguments=arguments)
return paginate(model, query, pagination)
return execute
def make_relationship_field(model):
resolver = BaseModel.resolver_collection.for_model(model)
return graphene.List(
graphene.NonNull(resolver.node),
required=True,
where=graphene.Argument(resolver.where_input_type),
)
def make_relationship_resolver(model):
def execute(_, _info, **arguments):
session = BaseModel.session_func()
resolver = BaseModel.resolver_collection.for_model(model)
query = session.query(model)
query = apply_query_condition(query=query, resolver=resolver, arguments=arguments)
return query.all()
return execute
| 35.921569
| 102
| 0.753821
|
78fa997dc20d6be11c0849046e5079b3e7210562
| 419
|
py
|
Python
|
bcdi/postprocessing/__init__.py
|
sjleake/bcdi
|
bf071ad085a11622158e1e651857a8a172c51cf1
|
[
"CECILL-B"
] | null | null | null |
bcdi/postprocessing/__init__.py
|
sjleake/bcdi
|
bf071ad085a11622158e1e651857a8a172c51cf1
|
[
"CECILL-B"
] | null | null | null |
bcdi/postprocessing/__init__.py
|
sjleake/bcdi
|
bf071ad085a11622158e1e651857a8a172c51cf1
|
[
"CECILL-B"
] | null | null | null |
# -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-05/2021 : DESY PHOTON SCIENCE
# (c) 06/2021-present : DESY CFEL
# authors:
# Jerome Carnis, carnis_jerome@yahoo.fr
"""
BCDI postprocessing.
This package contains functions related to data postprocessing after phase retrieval.
"""
| 29.928571
| 85
| 0.692124
|
39c6623139c134db061770ee49ff0ce2978f796a
| 1,418
|
py
|
Python
|
nemo/package_info.py
|
magictron/NeMo
|
aae9e3405b6e78dff864d21ea59331abdadc0217
|
[
"Apache-2.0"
] | null | null | null |
nemo/package_info.py
|
magictron/NeMo
|
aae9e3405b6e78dff864d21ea59331abdadc0217
|
[
"Apache-2.0"
] | null | null | null |
nemo/package_info.py
|
magictron/NeMo
|
aae9e3405b6e78dff864d21ea59331abdadc0217
|
[
"Apache-2.0"
] | null | null | null |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
MAJOR = 0
MINOR = 10
PATCH = 0
PRE_RELEASE = 'b9'
# Use the following formatting: (major, minor, patch, pre-release)
VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
__shortversion__ = '.'.join(map(str, VERSION[:3]))
__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
__package_name__ = 'nemo_toolkit'
__contact_names__ = 'NVIDIA'
__contact_emails__ = 'nemo-toolkit@nvidia.com'
__homepage__ = 'https://nvidia.github.io/NeMo/'
__repository_url__ = 'https://github.com/nvidia/nemo'
__download_url__ = 'https://github.com/NVIDIA/NeMo/releases'
__description__ = 'NEMO core package. Necessary for all collections'
__license__ = 'Apache2'
__keywords__ = 'deep learning, machine learning, gpu, NLP, NeMo, nvidia, pytorch, torch, tts, speech, language'
| 37.315789
| 111
| 0.739069
|
e8f0b4fd1245f0e72a33524cd729b8e097c77695
| 42,859
|
py
|
Python
|
sympy/geometry/ellipse.py
|
darknight009/sympy
|
618193720b862a41aa295d474793cc12e4de2927
|
[
"BSD-3-Clause"
] | 1
|
2021-07-24T12:45:14.000Z
|
2021-07-24T12:45:14.000Z
|
sympy/geometry/ellipse.py
|
darknight009/sympy
|
618193720b862a41aa295d474793cc12e4de2927
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/geometry/ellipse.py
|
darknight009/sympy
|
618193720b862a41aa295d474793cc12e4de2927
|
[
"BSD-3-Clause"
] | 1
|
2021-12-31T12:31:28.000Z
|
2021-12-31T12:31:28.000Z
|
"""Elliptical geometrical entities.
Contains
* Ellipse
* Circle
"""
from __future__ import division, print_function
from sympy.core import S, pi, sympify
from sympy.core.logic import fuzzy_bool
from sympy.core.numbers import Rational, oo
from sympy.core.compatibility import range
from sympy.core.symbol import Dummy
from sympy.simplify import simplify, trigsimp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.geometry.exceptions import GeometryError
from sympy.polys import DomainError, Poly, PolynomialError
from sympy.polys.polyutils import _not_a_coeff, _nsort
from sympy.solvers import solve
from sympy.utilities.iterables import uniq
from sympy.utilities.misc import filldedent
from sympy.utilities.decorator import doctest_depends_on
from .entity import GeometryEntity, GeometrySet
from .point import Point
from .line import Line, LinearEntity
from .util import _symbol, idiff
import random
class Ellipse(GeometrySet):
"""An elliptical GeometryEntity.
Parameters
==========
center : Point, optional
Default value is Point(0, 0)
hradius : number or SymPy expression, optional
vradius : number or SymPy expression, optional
eccentricity : number or SymPy expression, optional
Two of `hradius`, `vradius` and `eccentricity` must be supplied to
create an Ellipse. The third is derived from the two supplied.
Attributes
==========
center
hradius
vradius
area
circumference
eccentricity
periapsis
apoapsis
focus_distance
foci
Raises
======
GeometryError
When `hradius`, `vradius` and `eccentricity` are incorrectly supplied
as parameters.
TypeError
When `center` is not a Point.
See Also
========
Circle
Notes
-----
Constructed from a center and two radii, the first being the horizontal
radius (along the x-axis) and the second being the vertical radius (along
the y-axis).
When symbolic value for hradius and vradius are used, any calculation that
refers to the foci or the major or minor axis will assume that the ellipse
has its major radius on the x-axis. If this is not true then a manual
rotation is necessary.
Examples
========
>>> from sympy import Ellipse, Point, Rational
>>> e1 = Ellipse(Point(0, 0), 5, 1)
>>> e1.hradius, e1.vradius
(5, 1)
>>> e2 = Ellipse(Point(3, 1), hradius=3, eccentricity=Rational(4, 5))
>>> e2
Ellipse(Point2D(3, 1), 3, 9/5)
Plotting:
>>> from sympy.plotting.pygletplot import PygletPlot as Plot
>>> from sympy import Circle, Segment
>>> c1 = Circle(Point(0,0), 1)
>>> Plot(c1) # doctest: +SKIP
[0]: cos(t), sin(t), 'mode=parametric'
>>> p = Plot() # doctest: +SKIP
>>> p[0] = c1 # doctest: +SKIP
>>> radius = Segment(c1.center, c1.random_point())
>>> p[1] = radius # doctest: +SKIP
>>> p # doctest: +SKIP
[0]: cos(t), sin(t), 'mode=parametric'
[1]: t*cos(1.546086215036205357975518382),
t*sin(1.546086215036205357975518382), 'mode=parametric'
"""
def __contains__(self, o):
if isinstance(o, Point):
x = Dummy('x', real=True)
y = Dummy('y', real=True)
res = self.equation(x, y).subs({x: o.x, y: o.y})
return trigsimp(simplify(res)) is S.Zero
elif isinstance(o, Ellipse):
return self == o
return False
def __eq__(self, o):
"""Is the other GeometryEntity the same as this ellipse?"""
return isinstance(o, GeometryEntity) and (self.center == o.center and
self.hradius == o.hradius and
self.vradius == o.vradius)
def __hash__(self):
return super(Ellipse, self).__hash__()
def __new__(
cls, center=None, hradius=None, vradius=None, eccentricity=None,
**kwargs):
hradius = sympify(hradius)
vradius = sympify(vradius)
eccentricity = sympify(eccentricity)
if center is None:
center = Point(0, 0)
else:
center = Point(center, dim=2)
if len(center) != 2:
raise ValueError('The center of "{0}" must be a two dimensional point'.format(cls))
if len(list(filter(None, (hradius, vradius, eccentricity)))) != 2:
raise ValueError('Exactly two arguments of "hradius", '
'"vradius", and "eccentricity" must not be None."')
if eccentricity is not None:
if hradius is None:
hradius = vradius / sqrt(1 - eccentricity**2)
elif vradius is None:
vradius = hradius * sqrt(1 - eccentricity**2)
if hradius == vradius:
return Circle(center, hradius, **kwargs)
return GeometryEntity.__new__(cls, center, hradius, vradius, **kwargs)
def _do_ellipse_intersection(self, o):
"""The intersection of an ellipse with another ellipse or a circle.
Private helper method for `intersection`.
"""
x = Dummy('x', real=True)
y = Dummy('y', real=True)
seq = self.equation(x, y)
oeq = o.equation(x, y)
# TODO: Replace solve with solveset, when this line is tested
result = solve([seq, oeq], [x, y])
return [Point(*r) for r in list(uniq(result))]
def _do_line_intersection(self, o):
"""
Find the intersection of a LinearEntity and the ellipse.
All LinearEntities are treated as a line and filtered at
the end to see that they lie in o.
"""
hr_sq = self.hradius ** 2
vr_sq = self.vradius ** 2
lp = o.points
ldir = lp[1] - lp[0]
diff = lp[0] - self.center
mdir = Point(ldir.x/hr_sq, ldir.y/vr_sq)
mdiff = Point(diff.x/hr_sq, diff.y/vr_sq)
a = ldir.dot(mdir)
b = ldir.dot(mdiff)
c = diff.dot(mdiff) - 1
det = simplify(b*b - a*c)
result = []
if det == 0:
t = -b / a
result.append(lp[0] + (lp[1] - lp[0]) * t)
# Definite and potential symbolic intersections are allowed.
elif (det > 0) != False:
root = sqrt(det)
t_a = (-b - root) / a
t_b = (-b + root) / a
result.append( lp[0] + (lp[1] - lp[0]) * t_a )
result.append( lp[0] + (lp[1] - lp[0]) * t_b )
return [r for r in result if r in o]
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG ellipse element for the Ellipse.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
from sympy.core.evalf import N
c = N(self.center)
h, v = N(self.hradius), N(self.vradius)
return (
'<ellipse fill="{1}" stroke="#555555" '
'stroke-width="{0}" opacity="0.6" cx="{2}" cy="{3}" rx="{4}" ry="{5}"/>'
).format(2. * scale_factor, fill_color, c.x, c.y, h, v)
@property
def ambient_dimension(self):
return 2
@property
def apoapsis(self):
"""The apoapsis of the ellipse.
The greatest distance between the focus and the contour.
Returns
=======
apoapsis : number
See Also
========
periapsis : Returns shortest distance between foci and contour
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.apoapsis
2*sqrt(2) + 3
"""
return self.major * (1 + self.eccentricity)
def arbitrary_point(self, parameter='t'):
"""A parameterized point on the ellipse.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
arbitrary_point : Point
Raises
======
ValueError
When `parameter` already appears in the functions.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.arbitrary_point()
Point2D(3*cos(t), 2*sin(t))
"""
t = _symbol(parameter)
if t.name in (f.name for f in self.free_symbols):
raise ValueError(filldedent('Symbol %s already appears in object '
'and cannot be used as a parameter.' % t.name))
return Point(self.center.x + self.hradius*cos(t),
self.center.y + self.vradius*sin(t))
@property
def area(self):
"""The area of the ellipse.
Returns
=======
area : number
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.area
3*pi
"""
return simplify(S.Pi * self.hradius * self.vradius)
@property
def bounds(self):
"""Return a tuple (xmin, ymin, xmax, ymax) representing the bounding
rectangle for the geometric figure.
"""
h, v = self.hradius, self.vradius
return (self.center.x - h, self.center.y - v, self.center.x + h, self.center.y + v)
@property
def center(self):
"""The center of the ellipse.
Returns
=======
center : number
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.center
Point2D(0, 0)
"""
return self.args[0]
@property
def circumference(self):
"""The circumference of the ellipse.
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.circumference
12*Integral(sqrt((-8*_x**2/9 + 1)/(-_x**2 + 1)), (_x, 0, 1))
"""
from sympy import Integral
if self.eccentricity == 1:
return 2*pi*self.hradius
else:
x = Dummy('x', real=True)
return 4*self.major*Integral(
sqrt((1 - (self.eccentricity*x)**2)/(1 - x**2)), (x, 0, 1))
@property
def eccentricity(self):
"""The eccentricity of the ellipse.
Returns
=======
eccentricity : number
Examples
========
>>> from sympy import Point, Ellipse, sqrt
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, sqrt(2))
>>> e1.eccentricity
sqrt(7)/3
"""
return self.focus_distance / self.major
def encloses_point(self, p):
"""
Return True if p is enclosed by (is inside of) self.
Notes
-----
Being on the border of self is considered False.
Parameters
==========
p : Point
Returns
=======
encloses_point : True, False or None
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Ellipse, S
>>> from sympy.abc import t
>>> e = Ellipse((0, 0), 3, 2)
>>> e.encloses_point((0, 0))
True
>>> e.encloses_point(e.arbitrary_point(t).subs(t, S.Half))
False
>>> e.encloses_point((4, 0))
False
"""
p = Point(p, dim=2)
if p in self:
return False
if len(self.foci) == 2:
# if the combined distance from the foci to p (h1 + h2) is less
# than the combined distance from the foci to the minor axis
# (which is the same as the major axis length) then p is inside
# the ellipse
h1, h2 = [f.distance(p) for f in self.foci]
test = 2*self.major - (h1 + h2)
else:
test = self.radius - self.center.distance(p)
return fuzzy_bool(test.is_positive)
def equation(self, x='x', y='y'):
"""The equation of the ellipse.
Parameters
==========
x : str, optional
Label for the x-axis. Default value is 'x'.
y : str, optional
Label for the y-axis. Default value is 'y'.
Returns
=======
equation : sympy expression
See Also
========
arbitrary_point : Returns parameterized point on ellipse
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(1, 0), 3, 2)
>>> e1.equation()
y**2/4 + (x/3 - 1/3)**2 - 1
"""
x = _symbol(x)
y = _symbol(y)
t1 = ((x - self.center.x) / self.hradius)**2
t2 = ((y - self.center.y) / self.vradius)**2
return t1 + t2 - 1
def evolute(self, x='x', y='y'):
"""The equation of evolute of the ellipse.
Parameters
==========
x : str, optional
Label for the x-axis. Default value is 'x'.
y : str, optional
Label for the y-axis. Default value is 'y'.
Returns
=======
equation : sympy expression
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(1, 0), 3, 2)
>>> e1.evolute()
2**(2/3)*y**(2/3) + (3*x - 3)**(2/3) - 5**(2/3)
"""
if len(self.args) != 3:
raise NotImplementedError('Evolute of arbitrary Ellipse is not supported.')
x = _symbol(x)
y = _symbol(y)
t1 = (self.hradius*(x - self.center.x))**Rational(2, 3)
t2 = (self.vradius*(y - self.center.y))**Rational(2, 3)
return t1 + t2 - (self.hradius**2 - self.vradius**2)**Rational(2, 3)
@property
def foci(self):
"""The foci of the ellipse.
Notes
-----
The foci can only be calculated if the major/minor axes are known.
Raises
======
ValueError
When the major and minor axis cannot be determined.
See Also
========
sympy.geometry.point.Point
focus_distance : Returns the distance between focus and center
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.foci
(Point2D(-2*sqrt(2), 0), Point2D(2*sqrt(2), 0))
"""
c = self.center
hr, vr = self.hradius, self.vradius
if hr == vr:
return (c, c)
# calculate focus distance manually, since focus_distance calls this
# routine
fd = sqrt(self.major**2 - self.minor**2)
if hr == self.minor:
# foci on the y-axis
return (c + Point(0, -fd), c + Point(0, fd))
elif hr == self.major:
# foci on the x-axis
return (c + Point(-fd, 0), c + Point(fd, 0))
@property
def focus_distance(self):
"""The focal distance of the ellipse.
The distance between the center and one focus.
Returns
=======
focus_distance : number
See Also
========
foci
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.focus_distance
2*sqrt(2)
"""
return Point.distance(self.center, self.foci[0])
@property
def hradius(self):
"""The horizontal radius of the ellipse.
Returns
=======
hradius : number
See Also
========
vradius, major, minor
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.hradius
3
"""
return self.args[1]
def intersection(self, o):
"""The intersection of this ellipse and another geometrical entity
`o`.
Parameters
==========
o : GeometryEntity
Returns
=======
intersection : list of GeometryEntity objects
Notes
-----
Currently supports intersections with Point, Line, Segment, Ray,
Circle and Ellipse types.
See Also
========
sympy.geometry.entity.GeometryEntity
Examples
========
>>> from sympy import Ellipse, Point, Line, sqrt
>>> e = Ellipse(Point(0, 0), 5, 7)
>>> e.intersection(Point(0, 0))
[]
>>> e.intersection(Point(5, 0))
[Point2D(5, 0)]
>>> e.intersection(Line(Point(0,0), Point(0, 1)))
[Point2D(0, -7), Point2D(0, 7)]
>>> e.intersection(Line(Point(5,0), Point(5, 1)))
[Point2D(5, 0)]
>>> e.intersection(Line(Point(6,0), Point(6, 1)))
[]
>>> e = Ellipse(Point(-1, 0), 4, 3)
>>> e.intersection(Ellipse(Point(1, 0), 4, 3))
[Point2D(0, -3*sqrt(15)/4), Point2D(0, 3*sqrt(15)/4)]
>>> e.intersection(Ellipse(Point(5, 0), 4, 3))
[Point2D(2, -3*sqrt(7)/4), Point2D(2, 3*sqrt(7)/4)]
>>> e.intersection(Ellipse(Point(100500, 0), 4, 3))
[]
>>> e.intersection(Ellipse(Point(0, 0), 3, 4))
[Point2D(-363/175, -48*sqrt(111)/175), Point2D(-363/175, 48*sqrt(111)/175), Point2D(3, 0)]
>>> e.intersection(Ellipse(Point(-1, 0), 3, 4))
[Point2D(-17/5, -12/5), Point2D(-17/5, 12/5), Point2D(7/5, -12/5), Point2D(7/5, 12/5)]
"""
if isinstance(o, Point):
if o in self:
return [o]
else:
return []
elif isinstance(o, LinearEntity):
# LinearEntity may be a ray/segment, so check the points
# of intersection for coincidence first
return self._do_line_intersection(o)
elif isinstance(o, Circle):
return self._do_ellipse_intersection(o)
elif isinstance(o, Ellipse):
if o == self:
return self
else:
return self._do_ellipse_intersection(o)
return o.intersection(self)
def is_tangent(self, o):
"""Is `o` tangent to the ellipse?
Parameters
==========
o : GeometryEntity
An Ellipse, LinearEntity or Polygon
Raises
======
NotImplementedError
When the wrong type of argument is supplied.
Returns
=======
is_tangent: boolean
True if o is tangent to the ellipse, False otherwise.
See Also
========
tangent_lines
Examples
========
>>> from sympy import Point, Ellipse, Line
>>> p0, p1, p2 = Point(0, 0), Point(3, 0), Point(3, 3)
>>> e1 = Ellipse(p0, 3, 2)
>>> l1 = Line(p1, p2)
>>> e1.is_tangent(l1)
True
"""
inter = None
if isinstance(o, Ellipse):
inter = self.intersection(o)
if isinstance(inter, Ellipse):
return False
return (inter is not None and len(inter) == 1
and isinstance(inter[0], Point))
elif isinstance(o, LinearEntity):
inter = self._do_line_intersection(o)
if inter is not None and len(inter) == 1:
return inter[0] in o
else:
return False
elif isinstance(o, Polygon):
c = 0
for seg in o.sides:
inter = self._do_line_intersection(seg)
c += len([True for point in inter if point in seg])
return c == 1
else:
raise NotImplementedError("Unknown argument type")
@property
def major(self):
"""Longer axis of the ellipse (if it can be determined) else hradius.
Returns
=======
major : number or expression
See Also
========
hradius, vradius, minor
Examples
========
>>> from sympy import Point, Ellipse, Symbol
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.major
3
>>> a = Symbol('a')
>>> b = Symbol('b')
>>> Ellipse(p1, a, b).major
a
>>> Ellipse(p1, b, a).major
b
>>> m = Symbol('m')
>>> M = m + 1
>>> Ellipse(p1, m, M).major
m + 1
"""
ab = self.args[1:3]
if len(ab) == 1:
return ab[0]
a, b = ab
o = b - a < 0
if o == True:
return a
elif o == False:
return b
return self.hradius
@property
def minor(self):
"""Shorter axis of the ellipse (if it can be determined) else vradius.
Returns
=======
minor : number or expression
See Also
========
hradius, vradius, major
Examples
========
>>> from sympy import Point, Ellipse, Symbol
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.minor
1
>>> a = Symbol('a')
>>> b = Symbol('b')
>>> Ellipse(p1, a, b).minor
b
>>> Ellipse(p1, b, a).minor
a
>>> m = Symbol('m')
>>> M = m + 1
>>> Ellipse(p1, m, M).minor
m
"""
ab = self.args[1:3]
if len(ab) == 1:
return ab[0]
a, b = ab
o = a - b < 0
if o == True:
return a
elif o == False:
return b
return self.vradius
def normal_lines(self, p, prec=None):
"""Normal lines between `p` and the ellipse.
Parameters
==========
p : Point
Returns
=======
normal_lines : list with 1, 2 or 4 Lines
Examples
========
>>> from sympy import Line, Point, Ellipse
>>> e = Ellipse((0, 0), 2, 3)
>>> c = e.center
>>> e.normal_lines(c + Point(1, 0))
[Line2D(Point2D(0, 0), Point2D(1, 0))]
>>> e.normal_lines(c)
[Line2D(Point2D(0, 0), Point2D(0, 1)), Line2D(Point2D(0, 0), Point2D(1, 0))]
Off-axis points require the solution of a quartic equation. This
often leads to very large expressions that may be of little practical
use. An approximate solution of `prec` digits can be obtained by
passing in the desired value:
>>> e.normal_lines((3, 3), prec=2)
[Line2D(Point2D(-0.81, -2.7), Point2D(0.19, -1.2)),
Line2D(Point2D(1.5, -2.0), Point2D(2.5, -2.7))]
Whereas the above solution has an operation count of 12, the exact
solution has an operation count of 2020.
"""
p = Point(p, dim=2)
# XXX change True to something like self.angle == 0 if the arbitrarily
# rotated ellipse is introduced.
# https://github.com/sympy/sympy/issues/2815)
if True:
rv = []
if p.x == self.center.x:
rv.append(Line(self.center, slope=oo))
if p.y == self.center.y:
rv.append(Line(self.center, slope=0))
if rv:
# at these special orientations of p either 1 or 2 normals
# exist and we are done
return rv
# find the 4 normal points and construct lines through them with
# the corresponding slope
x, y = Dummy('x', real=True), Dummy('y', real=True)
eq = self.equation(x, y)
dydx = idiff(eq, y, x)
norm = -1/dydx
slope = Line(p, (x, y)).slope
seq = slope - norm
# TODO: Replace solve with solveset, when this line is tested
yis = solve(seq, y)[0]
xeq = eq.subs(y, yis).as_numer_denom()[0].expand()
if len(xeq.free_symbols) == 1:
try:
# this is so much faster, it's worth a try
xsol = Poly(xeq, x).real_roots()
except (DomainError, PolynomialError, NotImplementedError):
# TODO: Replace solve with solveset, when these lines are tested
xsol = _nsort(solve(xeq, x), separated=True)[0]
points = [Point(i, solve(eq.subs(x, i), y)[0]) for i in xsol]
else:
raise NotImplementedError(
'intersections for the general ellipse are not supported')
slopes = [norm.subs(zip((x, y), pt.args)) for pt in points]
if prec is not None:
points = [pt.n(prec) for pt in points]
slopes = [i if _not_a_coeff(i) else i.n(prec) for i in slopes]
return [Line(pt, slope=s) for pt,s in zip(points, slopes)]
@property
def periapsis(self):
"""The periapsis of the ellipse.
The shortest distance between the focus and the contour.
Returns
=======
periapsis : number
See Also
========
apoapsis : Returns greatest distance between focus and contour
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.periapsis
-2*sqrt(2) + 3
"""
return self.major * (1 - self.eccentricity)
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Ellipse.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.plot_interval()
[t, -pi, pi]
"""
t = _symbol(parameter)
return [t, -S.Pi, S.Pi]
def random_point(self, seed=None):
"""A random point on the ellipse.
Returns
=======
point : Point
See Also
========
sympy.geometry.point.Point
arbitrary_point : Returns parameterized point on ellipse
Notes
-----
A random point may not appear to be on the ellipse, ie, `p in e` may
return False. This is because the coordinates of the point will be
floating point values, and when these values are substituted into the
equation for the ellipse the result may not be zero because of floating
point rounding error.
Examples
========
>>> from sympy import Point, Ellipse, Segment
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.random_point() # gives some random point
Point2D(...)
>>> p1 = e1.random_point(seed=0); p1.n(2)
Point2D(2.1, 1.4)
The random_point method assures that the point will test as being
in the ellipse:
>>> p1 in e1
True
Notes
=====
An arbitrary_point with a random value of t substituted into it may
not test as being on the ellipse because the expression tested that
a point is on the ellipse doesn't simplify to zero and doesn't evaluate
exactly to zero:
>>> from sympy.abc import t
>>> e1.arbitrary_point(t)
Point2D(3*cos(t), 2*sin(t))
>>> p2 = _.subs(t, 0.1)
>>> p2 in e1
False
Note that arbitrary_point routine does not take this approach. A value
for cos(t) and sin(t) (not t) is substituted into the arbitrary point.
There is a small chance that this will give a point that will not
test as being in the ellipse, so the process is repeated (up to 10
times) until a valid point is obtained.
"""
from sympy import sin, cos, Rational
t = _symbol('t')
x, y = self.arbitrary_point(t).args
# get a random value in [-1, 1) corresponding to cos(t)
# and confirm that it will test as being in the ellipse
if seed is not None:
rng = random.Random(seed)
else:
rng = random
for i in range(10): # should be enough?
# simplify this now or else the Float will turn s into a Float
c = 2*Rational(rng.random()) - 1
s = sqrt(1 - c**2)
p1 = Point(x.subs(cos(t), c), y.subs(sin(t), s))
if p1 in self:
return p1
raise GeometryError(
'Having problems generating a point in the ellipse.')
def reflect(self, line):
"""Override GeometryEntity.reflect since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle, Line
>>> Circle((0, 1), 1).reflect(Line((0, 0), (1, 1)))
Circle(Point2D(1, 0), -1)
>>> from sympy import Ellipse, Line, Point
>>> Ellipse(Point(3, 4), 1, 3).reflect(Line(Point(0, -4), Point(5, 0)))
Traceback (most recent call last):
...
NotImplementedError:
General Ellipse is not supported but the equation of the reflected
Ellipse is given by the zeros of: f(x, y) = (9*x/41 + 40*y/41 +
37/41)**2 + (40*x/123 - 3*y/41 - 364/123)**2 - 1
Notes
=====
Until the general ellipse (with no axis parallel to the x-axis) is
supported a NotImplemented error is raised and the equation whose
zeros define the rotated ellipse is given.
"""
from .util import _uniquely_named_symbol
if line.slope in (0, oo):
c = self.center
c = c.reflect(line)
return self.func(c, -self.hradius, self.vradius)
else:
x, y = [_uniquely_named_symbol(name, self, line) for name in 'xy']
expr = self.equation(x, y)
p = Point(x, y).reflect(line)
result = expr.subs(zip((x, y), p.args
), simultaneous=True)
raise NotImplementedError(filldedent(
'General Ellipse is not supported but the equation '
'of the reflected Ellipse is given by the zeros of: ' +
"f(%s, %s) = %s" % (str(x), str(y), str(result))))
def rotate(self, angle=0, pt=None):
"""Rotate ``angle`` radians counterclockwise about Point ``pt``.
Note: since the general ellipse is not supported, only rotations that
are integer multiples of pi/2 are allowed.
Examples
========
>>> from sympy import Ellipse, pi
>>> Ellipse((1, 0), 2, 1).rotate(pi/2)
Ellipse(Point2D(0, 1), 1, 2)
>>> Ellipse((1, 0), 2, 1).rotate(pi)
Ellipse(Point2D(-1, 0), 2, 1)
"""
if self.hradius == self.vradius:
return self.func(self.center.rotate(angle, pt), self.hradius)
if (angle/S.Pi).is_integer:
return super(Ellipse, self).rotate(angle, pt)
if (2*angle/S.Pi).is_integer:
return self.func(self.center.rotate(angle, pt), self.vradius, self.hradius)
# XXX see https://github.com/sympy/sympy/issues/2815 for general ellipes
raise NotImplementedError('Only rotations of pi/2 are currently supported for Ellipse.')
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since it is the major and minor
axes which must be scaled and they are not GeometryEntities.
Examples
========
>>> from sympy import Ellipse
>>> Ellipse((0, 0), 2, 1).scale(2, 4)
Circle(Point2D(0, 0), 4)
>>> Ellipse((0, 0), 2, 1).scale(2)
Ellipse(Point2D(0, 0), 4, 1)
"""
c = self.center
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
h = self.hradius
v = self.vradius
return self.func(c.scale(x, y), hradius=h*x, vradius=v*y)
@doctest_depends_on(modules=('pyglet',))
def tangent_lines(self, p):
"""Tangent lines between `p` and the ellipse.
If `p` is on the ellipse, returns the tangent line through point `p`.
Otherwise, returns the tangent line(s) from `p` to the ellipse, or
None if no tangent line is possible (e.g., `p` inside ellipse).
Parameters
==========
p : Point
Returns
=======
tangent_lines : list with 1 or 2 Lines
Raises
======
NotImplementedError
Can only find tangent lines for a point, `p`, on the ellipse.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Line
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.tangent_lines(Point(3, 0))
[Line2D(Point2D(3, 0), Point2D(3, -12))]
>>> # This will plot an ellipse together with a tangent line.
>>> from sympy.plotting.pygletplot import PygletPlot as Plot
>>> from sympy import Point, Ellipse
>>> e = Ellipse(Point(0,0), 3, 2)
>>> t = e.tangent_lines(e.random_point())
>>> p = Plot()
>>> p[0] = e # doctest: +SKIP
>>> p[1] = t # doctest: +SKIP
"""
p = Point(p, dim=2)
if self.encloses_point(p):
return []
if p in self:
delta = self.center - p
rise = (self.vradius ** 2)*delta.x
run = -(self.hradius ** 2)*delta.y
p2 = Point(simplify(p.x + run),
simplify(p.y + rise))
return [Line(p, p2)]
else:
if len(self.foci) == 2:
f1, f2 = self.foci
maj = self.hradius
test = (2*maj -
Point.distance(f1, p) -
Point.distance(f2, p))
else:
test = self.radius - Point.distance(self.center, p)
if test.is_number and test.is_positive:
return []
# else p is outside the ellipse or we can't tell. In case of the
# latter, the solutions returned will only be valid if
# the point is not inside the ellipse; if it is, nan will result.
x, y = Dummy('x'), Dummy('y')
eq = self.equation(x, y)
dydx = idiff(eq, y, x)
slope = Line(p, Point(x, y)).slope
# TODO: Replace solve with solveset, when this line is tested
tangent_points = solve([slope - dydx, eq], [x, y])
# handle horizontal and vertical tangent lines
if len(tangent_points) == 1:
assert tangent_points[0][
0] == p.x or tangent_points[0][1] == p.y
return [Line(p, p + Point(1, 0)), Line(p, p + Point(0, 1))]
# others
return [Line(p, tangent_points[0]), Line(p, tangent_points[1])]
@property
def vradius(self):
"""The vertical radius of the ellipse.
Returns
=======
vradius : number
See Also
========
hradius, major, minor
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.vradius
1
"""
return self.args[2]
class Circle(Ellipse):
"""A circle in space.
Constructed simply from a center and a radius, or from three
non-collinear points.
Parameters
==========
center : Point
radius : number or sympy expression
points : sequence of three Points
Attributes
==========
radius (synonymous with hradius, vradius, major and minor)
circumference
equation
Raises
======
GeometryError
When trying to construct circle from three collinear points.
When trying to construct circle from incorrect parameters.
See Also
========
Ellipse, sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Circle
>>> # a circle constructed from a center and radius
>>> c1 = Circle(Point(0, 0), 5)
>>> c1.hradius, c1.vradius, c1.radius
(5, 5, 5)
>>> # a circle constructed from three points
>>> c2 = Circle(Point(0, 0), Point(1, 1), Point(1, 0))
>>> c2.hradius, c2.vradius, c2.radius, c2.center
(sqrt(2)/2, sqrt(2)/2, sqrt(2)/2, Point2D(1/2, 1/2))
"""
def __new__(cls, *args, **kwargs):
c, r = None, None
if len(args) == 3:
args = [Point(a, dim=2) for a in args]
if Point.is_collinear(*args):
raise GeometryError(
"Cannot construct a circle from three collinear points")
from .polygon import Triangle
t = Triangle(*args)
c = t.circumcenter
r = t.circumradius
elif len(args) == 2:
# Assume (center, radius) pair
c = Point(args[0], dim=2)
r = sympify(args[1])
if not (c is None or r is None):
return GeometryEntity.__new__(cls, c, r, **kwargs)
raise GeometryError("Circle.__new__ received unknown arguments")
@property
def circumference(self):
"""The circumference of the circle.
Returns
=======
circumference : number or SymPy expression
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.circumference
12*pi
"""
return 2 * S.Pi * self.radius
def equation(self, x='x', y='y'):
"""The equation of the circle.
Parameters
==========
x : str or Symbol, optional
Default value is 'x'.
y : str or Symbol, optional
Default value is 'y'.
Returns
=======
equation : SymPy expression
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(0, 0), 5)
>>> c1.equation()
x**2 + y**2 - 25
"""
x = _symbol(x)
y = _symbol(y)
t1 = (x - self.center.x)**2
t2 = (y - self.center.y)**2
return t1 + t2 - self.major**2
def intersection(self, o):
"""The intersection of this circle with another geometrical entity.
Parameters
==========
o : GeometryEntity
Returns
=======
intersection : list of GeometryEntities
Examples
========
>>> from sympy import Point, Circle, Line, Ray
>>> p1, p2, p3 = Point(0, 0), Point(5, 5), Point(6, 0)
>>> p4 = Point(5, 0)
>>> c1 = Circle(p1, 5)
>>> c1.intersection(p2)
[]
>>> c1.intersection(p4)
[Point2D(5, 0)]
>>> c1.intersection(Ray(p1, p2))
[Point2D(5*sqrt(2)/2, 5*sqrt(2)/2)]
>>> c1.intersection(Line(p2, p3))
[]
"""
if isinstance(o, Circle):
if o.center == self.center:
if o.radius == self.radius:
return o
return []
dx, dy = (o.center - self.center).args
d = sqrt(simplify(dy**2 + dx**2))
R = o.radius + self.radius
if d > R or d < abs(self.radius - o.radius):
return []
a = simplify((self.radius**2 - o.radius**2 + d**2) / (2*d))
x2 = self.center.x + (dx * a/d)
y2 = self.center.y + (dy * a/d)
h = sqrt(simplify(self.radius**2 - a**2))
rx = -dy * (h/d)
ry = dx * (h/d)
xi_1 = simplify(x2 + rx)
xi_2 = simplify(x2 - rx)
yi_1 = simplify(y2 + ry)
yi_2 = simplify(y2 - ry)
ret = [Point(xi_1, yi_1)]
if xi_1 != xi_2 or yi_1 != yi_2:
ret.append(Point(xi_2, yi_2))
return ret
return Ellipse.intersection(self, o)
@property
def radius(self):
"""The radius of the circle.
Returns
=======
radius : number or sympy expression
See Also
========
Ellipse.major, Ellipse.minor, Ellipse.hradius, Ellipse.vradius
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.radius
6
"""
return self.args[1]
def reflect(self, line):
"""Override GeometryEntity.reflect since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle, Line
>>> Circle((0, 1), 1).reflect(Line((0, 0), (1, 1)))
Circle(Point2D(1, 0), -1)
"""
c = self.center
c = c.reflect(line)
return self.func(c, -self.radius)
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle
>>> Circle((0, 0), 1).scale(2, 2)
Circle(Point2D(0, 0), 2)
>>> Circle((0, 0), 1).scale(2, 4)
Ellipse(Point2D(0, 0), 2, 4)
"""
c = self.center
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
c = c.scale(x, y)
x, y = [abs(i) for i in (x, y)]
if x == y:
return self.func(c, x*self.radius)
h = v = self.radius
return Ellipse(c, hradius=h*x, vradius=v*y)
@property
def vradius(self):
"""
This Ellipse property is an alias for the Circle's radius.
Whereas hradius, major and minor can use Ellipse's conventions,
the vradius does not exist for a circle. It is always a positive
value in order that the Circle, like Polygons, will have an
area that can be positive or negative as determined by the sign
of the hradius.
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.vradius
6
"""
return abs(self.radius)
from .polygon import Polygon
| 27.562058
| 98
| 0.515364
|
8287be7c91abd330e7676782b470aadaa17550fa
| 1,067
|
py
|
Python
|
grr/server/grr_response_server/bin/worker.py
|
BA7JCM/grr
|
c6f3b19e73e1d76a195d3c9a63e894ace6ea2508
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/bin/worker.py
|
BA7JCM/grr
|
c6f3b19e73e1d76a195d3c9a63e894ace6ea2508
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/bin/worker.py
|
BA7JCM/grr
|
c6f3b19e73e1d76a195d3c9a63e894ace6ea2508
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""This is a backend analysis worker which will be deployed on the server."""
from absl import app
from absl import flags
from grr_response_core import config
from grr_response_core.config import contexts
from grr_response_core.config import server as config_server
from grr_response_server import fleetspeak_connector
from grr_response_server import server_startup
from grr_response_server import worker_lib
flags.DEFINE_bool(
"version",
default=False,
allow_override=True,
help="Print the GRR worker version number and exit immediately.")
def main(argv):
"""Main."""
del argv # Unused.
if flags.FLAGS.version:
print("GRR worker {}".format(config_server.VERSION["packageversion"]))
return
config.CONFIG.AddContext(contexts.WORKER_CONTEXT,
"Context applied when running a worker.")
# Initialise flows and config_lib
server_startup.Init()
fleetspeak_connector.Init()
worker_obj = worker_lib.GRRWorker()
worker_obj.Run()
if __name__ == "__main__":
app.run(main)
| 24.25
| 77
| 0.746017
|
13c7f4036faea213b0da264627b4d292261f465c
| 163
|
py
|
Python
|
Python-Pro.py
|
ravi-kr/Universe
|
cf11bc631f5fcf171f733998a8f5aa842b8ccf09
|
[
"MIT"
] | null | null | null |
Python-Pro.py
|
ravi-kr/Universe
|
cf11bc631f5fcf171f733998a8f5aa842b8ccf09
|
[
"MIT"
] | null | null | null |
Python-Pro.py
|
ravi-kr/Universe
|
cf11bc631f5fcf171f733998a8f5aa842b8ccf09
|
[
"MIT"
] | null | null | null |
from sqlalchemy import create_engine
engine = create_engine('hive://localhost:10000/default')
import pandas as pd
df = pd.read_sql("SELECT * from table1", engine)
| 32.6
| 56
| 0.779141
|
72a7e10fb7e1270d01b9dc8fe3e5dd356e025192
| 5,519
|
py
|
Python
|
models/wrn.py
|
UBCDingXin/RepDistiller
|
dcc043277f2820efafd679ffb82b8e8195b7e222
|
[
"BSD-2-Clause"
] | 1,347
|
2019-10-24T02:30:04.000Z
|
2022-03-31T12:01:21.000Z
|
models/wrn.py
|
WeiChengTseng/RepDistiller
|
dcc043277f2820efafd679ffb82b8e8195b7e222
|
[
"BSD-2-Clause"
] | 45
|
2019-10-28T07:16:25.000Z
|
2022-03-19T03:34:36.000Z
|
models/wrn.py
|
WeiChengTseng/RepDistiller
|
dcc043277f2820efafd679ffb82b8e8195b7e222
|
[
"BSD-2-Clause"
] | 282
|
2019-10-24T03:58:04.000Z
|
2022-03-25T19:52:39.000Z
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Original Author: Wei Yang
"""
__all__ = ['wrn']
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.block1)
feat_m.append(self.block2)
feat_m.append(self.block3)
return feat_m
def get_bn_before_relu(self):
bn1 = self.block2.layer[0].bn1
bn2 = self.block3.layer[0].bn1
bn3 = self.bn1
return [bn1, bn2, bn3]
def forward(self, x, is_feat=False, preact=False):
out = self.conv1(x)
f0 = out
out = self.block1(out)
f1 = out
out = self.block2(out)
f2 = out
out = self.block3(out)
f3 = out
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
f4 = out
out = self.fc(out)
if is_feat:
if preact:
f1 = self.block2.layer[0].bn1(f1)
f2 = self.block3.layer[0].bn1(f2)
f3 = self.bn1(f3)
return [f0, f1, f2, f3, f4], out
else:
return out
def wrn(**kwargs):
"""
Constructs a Wide Residual Networks.
"""
model = WideResNet(**kwargs)
return model
def wrn_40_2(**kwargs):
model = WideResNet(depth=40, widen_factor=2, **kwargs)
return model
def wrn_40_1(**kwargs):
model = WideResNet(depth=40, widen_factor=1, **kwargs)
return model
def wrn_16_2(**kwargs):
model = WideResNet(depth=16, widen_factor=2, **kwargs)
return model
def wrn_16_1(**kwargs):
model = WideResNet(depth=16, widen_factor=1, **kwargs)
return model
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 32, 32)
net = wrn_40_2(num_classes=100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 32.274854
| 116
| 0.5896
|
9fcc0fbfc747d41bc472fd4dcfe6a1f9b62850c8
| 625
|
py
|
Python
|
scripts/examples/OpenMV/02-Board-Control/led_control.py
|
mzaian/openmv
|
08cc9c1d27eb130585a559aed027c3db233ccb70
|
[
"MIT"
] | 6
|
2017-05-24T06:51:37.000Z
|
2020-07-04T16:36:29.000Z
|
usr/examples/02-Board-Control/led_control.py
|
guohuijiang1234/openmv
|
9c3e9109ec1a2b68bb34107557945bfa379d3a0e
|
[
"MIT"
] | 2
|
2018-11-13T06:47:24.000Z
|
2019-05-15T06:21:55.000Z
|
usr/examples/02-Board-Control/led_control.py
|
guohuijiang1234/openmv
|
9c3e9109ec1a2b68bb34107557945bfa379d3a0e
|
[
"MIT"
] | 11
|
2020-06-03T10:12:28.000Z
|
2020-06-05T16:02:40.000Z
|
# LED Control Example
#
# This example shows how to control your OpenMV Cam's built-in LEDs. Use your
# smart phone's camera to see the IR LEDs.
import time
from pyb import LED
red_led = LED(1)
green_led = LED(2)
blue_led = LED(3)
ir_led = LED(4)
def led_control(x):
if (x&1)==0: red_led.off()
elif (x&1)==1: red_led.on()
if (x&2)==0: green_led.off()
elif (x&2)==2: green_led.on()
if (x&4)==0: blue_led.off()
elif (x&4)==4: blue_led.on()
if (x&8)==0: ir_led.off()
elif (x&8)==8: ir_led.on()
while(True):
for i in range(16):
led_control(i)
time.sleep(500)
| 22.321429
| 77
| 0.5904
|
90aaaa4c367b53bf53f5ba795c5a75e5aa5fb392
| 629
|
py
|
Python
|
MOOSE_web/manage.py
|
XYChang-cxy/MOOSE
|
6cca5a1ed34451e6c525ca480b59ce504bba3f49
|
[
"Apache-2.0"
] | 1
|
2021-09-02T09:32:41.000Z
|
2021-09-02T09:32:41.000Z
|
MOOSE_web/manage.py
|
XYChang-cxy/MOOSE
|
6cca5a1ed34451e6c525ca480b59ce504bba3f49
|
[
"Apache-2.0"
] | null | null | null |
MOOSE_web/manage.py
|
XYChang-cxy/MOOSE
|
6cca5a1ed34451e6c525ca480b59ce504bba3f49
|
[
"Apache-2.0"
] | 3
|
2021-08-30T08:34:51.000Z
|
2021-09-30T13:14:31.000Z
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MOOSE_web.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.590909
| 73
| 0.683625
|
4d8d55a1c7a9033906bf8e089429c38dd8ec8b2d
| 3,090
|
py
|
Python
|
src/transformers/data/metrics/__init__.py
|
ShnitzelKiller/transformers
|
ce50305e5b8c8748b81b0c8f5539a337b6a995b9
|
[
"Apache-2.0"
] | 1
|
2021-02-21T05:00:40.000Z
|
2021-02-21T05:00:40.000Z
|
src/transformers/data/metrics/__init__.py
|
ShnitzelKiller/transformers
|
ce50305e5b8c8748b81b0c8f5539a337b6a995b9
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/data/metrics/__init__.py
|
ShnitzelKiller/transformers
|
ce50305e5b8c8748b81b0c8f5539a337b6a995b9
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
logger = logging.getLogger(__name__)
try:
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
_has_sklearn = True
except (AttributeError, ImportError) as e:
logger.warning("To use data.metrics please install scikit-learn. See https://scikit-learn.org/stable/index.html")
_has_sklearn = False
def is_sklearn_available():
return _has_sklearn
if _has_sklearn:
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def glue_compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
def xnli_compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "xnli":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
| 34.333333
| 117
| 0.63754
|
9e8e829174c0ac17a3eb71305381930e0ff42ebf
| 1,622
|
py
|
Python
|
team_9/cocos/test/test_shuffletiles_fullscreen.py
|
Donnyvdm/dojo19
|
3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400
|
[
"BSD-3-Clause"
] | 1
|
2019-09-15T18:59:49.000Z
|
2019-09-15T18:59:49.000Z
|
team_9/cocos/test/test_shuffletiles_fullscreen.py
|
Donnyvdm/dojo19
|
3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400
|
[
"BSD-3-Clause"
] | null | null | null |
team_9/cocos/test/test_shuffletiles_fullscreen.py
|
Donnyvdm/dojo19
|
3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "t 0.1, s, t 0.49, s, t 0.51, s, t 2.49, s, t 2.51, s, t 2.99, s, t 3.1, s, q"
tags = "CallFunc, Delay, fullscreen"
import pyglet
import cocos
from cocos.director import director
import cocos.actions as ac
from cocos.layer import *
from pyglet import gl
class BackgroundLayer(cocos.layer.Layer):
def __init__(self):
super(BackgroundLayer, self).__init__()
self.img = pyglet.resource.image('background_image.png')
def draw( self ):
gl.glColor4ub(255, 255, 255, 255)
gl.glPushMatrix()
self.transform()
self.img.blit(0,0)
gl.glPopMatrix()
def toggle_fullscreen():
director.window.set_fullscreen( not director.window.fullscreen )
def main():
director.init( resizable=True, fullscreen=False )
main_scene = cocos.scene.Scene()
main_scene.add( BackgroundLayer(), z=0 )
action1 = ac.ShuffleTiles( grid=(16,8), seed=2, duration=3 ) + ac.StopGrid()
action2 = ( ac.Delay(0.5) +
ac.CallFunc(toggle_fullscreen) +
ac.Delay(2.0) +
ac.CallFunc(toggle_fullscreen))
combo_action = action1 | action2
# In real code after a sequence of grid actions the StopGrid() action
# should be called. Omited here to stay in the last grid action render
main_scene.do( combo_action )
director.run (main_scene)
if __name__ == '__main__':
main()
| 30.037037
| 89
| 0.669544
|
2b4ded0918295075b66aae94d0d81d4efdc11cb0
| 4,282
|
py
|
Python
|
clinica/pipelines/t1_volume_existing_template/t1_volume_existing_template_cli.py
|
Raelag0112/clinica
|
d301b1abfdf4d3b62dc4b329622340795ae51ef8
|
[
"MIT"
] | null | null | null |
clinica/pipelines/t1_volume_existing_template/t1_volume_existing_template_cli.py
|
Raelag0112/clinica
|
d301b1abfdf4d3b62dc4b329622340795ae51ef8
|
[
"MIT"
] | null | null | null |
clinica/pipelines/t1_volume_existing_template/t1_volume_existing_template_cli.py
|
Raelag0112/clinica
|
d301b1abfdf4d3b62dc4b329622340795ae51ef8
|
[
"MIT"
] | null | null | null |
from typing import List, Optional, Tuple
import click
from clinica.pipelines import cli_param
pipeline_name = "t1-volume-existing-template"
@click.command(name=pipeline_name)
@cli_param.argument.bids_directory
@cli_param.argument.caps_directory
@cli_param.argument.group_label
@cli_param.option_group.pipeline_specific_options
@cli_param.option.smooth
@cli_param.option_group.common_pipelines_options
@cli_param.option.subjects_sessions_tsv
@cli_param.option.working_directory
@cli_param.option.n_procs
@cli_param.option_group.advanced_pipeline_options
@cli_param.option.tissue_classes
@cli_param.option.tissue_probability_maps
@cli_param.option.dont_save_warped_unmodulated
@cli_param.option.save_warped_modulated
@cli_param.option.dartel_tissues
@cli_param.option.tissues
@cli_param.option.modulate
@cli_param.option.voxel_size
def cli(
bids_directory: str,
caps_directory: str,
group_label: str,
smooth: List[int] = (8,),
tissue_classes: List[int] = (1, 2, 3),
tissue_probability_maps: Optional[str] = None,
dont_save_warped_unmodulated: bool = False,
save_warped_modulated: bool = False,
dartel_tissues: List[int] = (1, 2, 3),
tissues: List[int] = (1, 2, 3),
modulate: bool = True,
voxel_size: Tuple[float, float, float] = (1.5, 1.5, 1.5),
subjects_sessions_tsv: Optional[str] = None,
working_directory: Optional[str] = None,
n_procs: Optional[int] = None,
) -> None:
"""Volume-based processing of T1-weighted MR images using an existing DARTEL template.
GROUP_LABEL is an user-defined identifier to target a specific group of subjects.
https://aramislab.paris.inria.fr/clinica/docs/public/latest/Pipelines/T1_Volume/
"""
from clinica.utils.stream import cprint
from ..t1_volume_dartel2mni import t1_volume_dartel2mni_cli
from ..t1_volume_parcellation import t1_volume_parcellation_cli
from ..t1_volume_register_dartel import t1_volume_register_dartel_cli
from ..t1_volume_tissue_segmentation import t1_volume_tissue_segmentation_cli
cprint(
"The t1-volume-existing-template pipeline is divided into 4 parts:\n"
"\tt1-volume-tissue-segmentation pipeline: "
"Tissue segmentation, bias correction and spatial normalization to MNI space\n"
"\tt1-volume-register-dartel pipeline: "
"Inter-subject registration using an existing DARTEL template\n"
"\tt1-volume-dartel2mni pipeline: "
"DARTEL template to MNI\n"
"\tt1-volume-parcellation pipeline: "
"Atlas statistics"
)
cprint("Part 1/4: Running t1-volume-segmentation pipeline")
t1_volume_tissue_segmentation_cli.cli(
bids_directory=bids_directory,
caps_directory=caps_directory,
tissue_classes=tissue_classes,
dartel_tissues=dartel_tissues,
tissue_probability_maps=tissue_probability_maps,
dont_save_warped_unmodulated=dont_save_warped_unmodulated,
save_warped_modulated=save_warped_modulated,
subjects_sessions_tsv=subjects_sessions_tsv,
working_directory=working_directory,
n_procs=n_procs,
)
cprint("Part 2/4: Running t1-volume-register-dartel pipeline")
t1_volume_register_dartel_cli.cli(
bids_directory=bids_directory,
caps_directory=caps_directory,
group_label=group_label,
tissues=tissues,
subjects_sessions_tsv=subjects_sessions_tsv,
working_directory=working_directory,
n_procs=n_procs,
)
cprint("Part 3/4: Running t1-volume-dartel2mni pipeline")
t1_volume_dartel2mni_cli.cli(
bids_directory=bids_directory,
caps_directory=caps_directory,
group_label=group_label,
smooth=smooth,
tissues=tissues,
modulate=modulate,
voxel_size=voxel_size,
subjects_sessions_tsv=subjects_sessions_tsv,
working_directory=working_directory,
n_procs=n_procs,
)
cprint("Part 4/4: Running t1-volume-parcellation pipeline")
t1_volume_parcellation_cli.cli(
caps_directory=caps_directory,
group_label=group_label,
subjects_sessions_tsv=subjects_sessions_tsv,
working_directory=working_directory,
n_procs=n_procs,
)
if __name__ == "__main__":
pass
| 35.098361
| 90
| 0.740075
|
a24387d1a361099f998ee8e7551a8c8655da4735
| 2,906
|
py
|
Python
|
hh_problems.py
|
Bekt/hh-problems-bot
|
b263f3ba3a5fa2190a6d7f9cdc28cc75423bbc15
|
[
"MIT"
] | 3
|
2015-01-10T22:36:35.000Z
|
2016-01-26T06:47:45.000Z
|
hh_problems.py
|
Bekt/hh-problems-bot
|
b263f3ba3a5fa2190a6d7f9cdc28cc75423bbc15
|
[
"MIT"
] | null | null | null |
hh_problems.py
|
Bekt/hh-problems-bot
|
b263f3ba3a5fa2190a6d7f9cdc28cc75423bbc15
|
[
"MIT"
] | null | null | null |
"""Twitter Bot for HH Problems Facebook group.
This script periodically updates the Twitter account @hh_problems
with posts from the HH Hackers Problems Facebook group.
Potential enhancements:
- Handle posts with pictures.
- Link back to the FB post.
Twitter URL: https://twitter.com/hh_problems
FB URL: https://www.facebook.com/groups/hhproblems/
Code: https://github.com/Bekt/hh-problems-bot
"""
import facebook
import tweepy
import time
from datetime import datetime
from credentials import (
consumer_key,
consumer_secret,
access_token,
access_token_secret,
app_access_token
)
class HHProbs(object):
hh_tag = 'HH_Problems: '
# http://www.facebook.com/groups/hhproblems/
fb_group_id = '291381824396182'
def __init__(self):
self._tw_api = None
self._fb_api = None
@property
def tw_api(self):
if self._tw_api is None:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
self._tw_api = tweepy.API(auth)
return self._tw_api
@property
def fb_api(self):
if self._fb_api is None:
self._fb_api = facebook.GraphAPI(app_access_token)
return self._fb_api
def run(self):
posts = self.fb_posts()
posts = filter(self._filt, posts)
self.tweet_posts(posts)
def tweet_posts(self, posts):
recent = self.last_tweet()
for post in posts:
if not recent or post['created_time'] > recent.created_at:
try:
print(post['message'])
self.tweet(post['message'])
# Don't make Twitter mad.
time.sleep(10)
except:
pass
def last_tweet(self):
tweet = self.tw_api.home_timeline(count=1)
return tweet[0] if tweet else None
def tweet(self, text):
status = (self.hh_tag + text)[:140]
self.tw_api.update_status(status)
def fb_posts(self, limit=20):
posts = self.fb_api.get_connections(self.fb_group_id, 'feed',
limit=limit,
fields='message,created_time')
# Order by created_time (default is updated_time).
for post in posts['data']:
created_date = datetime.strptime(post['created_time'][:-5],
'%Y-%m-%dT%H:%M:%S')
post['created_time'] = created_date
posts['data'].sort(key=lambda x: x['created_time'])
return posts['data']
@classmethod
def _filt(self, post):
# 200 is random, anything that long is
# most likely a rant or spam?
msg = post['message']
return len(msg) < 200
if __name__ == '__main__':
hh = HHProbs()
hh.run()
| 28.490196
| 74
| 0.592567
|
41455a37d08561fcbe50e851aff1bc2640e02852
| 3,654
|
py
|
Python
|
server/axeshome/storage.py
|
kevinmcguinness/axes-home
|
d41b7e605c8b0d3d6f274eccdd59b3fce794a28b
|
[
"Apache-2.0"
] | 2
|
2015-06-25T03:10:03.000Z
|
2016-02-22T11:34:08.000Z
|
server/axeshome/storage.py
|
kevinmcguinness/axes-home
|
d41b7e605c8b0d3d6f274eccdd59b3fce794a28b
|
[
"Apache-2.0"
] | null | null | null |
server/axeshome/storage.py
|
kevinmcguinness/axes-home
|
d41b7e605c8b0d3d6f274eccdd59b3fce794a28b
|
[
"Apache-2.0"
] | null | null | null |
#
# (c) Copyright 2015 Kevin McGuinness. All Rights Reserved.
#
"""
Filesystem storage objects.
"""
import os
import uuid
from flask import current_app as app
from flask import request
from urlparse import urlparse, urlunsplit, urljoin
class MediaStorage(object):
"""
Represents a HTTP accessible storage location on a locally accessible
filesystem path. The constructor reads the variables MEDIA_PATH and
MEDIA_URL from the flask application config.
"""
def __init__(self):
self.media_url = app.config['MEDIA_URL']
self.media_path = app.config['MEDIA_PATH']
def get_path(self, fn):
"""
Get the path to the given filename on the locally accessible filesystem.
"""
return os.path.join(self.media_path, fn)
def get_url(self, fn):
"""
Get the relative URL for the given filename.
"""
return self.media_url + fn
def get_absolute_url(self, fn):
"""
Returns the absolute URL for the given filename. Assumes that any
relative MEDIA_URL has the same hostname as the incoming requests
url_root and that there is a request object available.
"""
url = self.get_url(fn)
if urlparse(url).scheme:
# Already have an absolute URL specified in MEDIA_URL
abs_url = url
else:
# URL has no scheme component. Assume it is relative to incoming
# request URL
if url.startswith('/'):
# URL path is absolute: use location relative to host base
parsed_req_url = urlparse(request.url_root)
base_url = urlunsplit((parsed_req_url.scheme,
parsed_req_url.netloc, '', '', ''))
else:
# URL path is relative: use location relative to url root
base_url = request.url_root
abs_url = urljoin(base_url, url)
return abs_url
def create_unique_filename(self, extension=None):
"""
Generate a unique filename with given extension.
"""
fn = unicode(uuid.uuid4())
if extension is not None:
fn += extension
return fn
def exists(self, fn):
"""
Returns true if a file with the given name exists.
"""
return os.path.isfile(self.get_path(fn))
def save(self, data, fn=None, extension=None, abs_url=True):
"""
Save a file to storage, returning the url and full path to the file as
a tuple.
Parameters
----------
data : str like
Data to write to the file.
fn : str like
Name of file to use. If None, then a unique filename is generated
extension : str like
Extension to append to filename.
abs_url : bool
If true, an absolute url is returned. See self.get_absolute_url
Returns
-------
url : str like
URL
full_path: str like
Full path to file on disk
"""
if fn is None:
fn = self.create_unique_filename(extension)
full_path = self.get_path(fn)
with open(full_path, 'w') as f:
f.write(data)
if abs_url:
url = self.get_absolute_url(fn)
else:
url = self.get_url(fn)
return url, full_path
| 29.467742
| 80
| 0.54543
|
a2cced969984c8ff2bd1ef8c18a09c95fd4c033d
| 4,851
|
py
|
Python
|
docs/conf.py
|
Taschenbergerm/bgg_miner
|
f20057ec2e85e20ad08f92514ce38c699e8c85eb
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
Taschenbergerm/bgg_miner
|
f20057ec2e85e20ad08f92514ce38c699e8c85eb
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
Taschenbergerm/bgg_miner
|
f20057ec2e85e20ad08f92514ce38c699e8c85eb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# bgg_scraper documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import bgg_scraper
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'bgg_scraper'
copyright = "2021, Marvin Tascheberger"
author = "Marvin Tascheberger"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = bgg_scraper.__version__
# The full version, including alpha/beta/rc tags.
release = bgg_scraper.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'bgg_scraperdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'bgg_scraper.tex',
'bgg_scraper Documentation',
'Marvin Tascheberger', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bgg_scraper',
'bgg_scraper Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'bgg_scraper',
'bgg_scraper Documentation',
author,
'bgg_scraper',
'One line description of project.',
'Miscellaneous'),
]
| 29.760736
| 77
| 0.688724
|
719aca76012ca54c1d1e0a7bc009f8a83b3d368e
| 7,515
|
py
|
Python
|
python/tink/streaming_aead/_encrypting_stream.py
|
hazaelsan/tink
|
9be3f690611bfab85284624de46ad6f36119d8bb
|
[
"Apache-2.0"
] | 1
|
2019-01-08T16:38:47.000Z
|
2019-01-08T16:38:47.000Z
|
python/tink/streaming_aead/_encrypting_stream.py
|
bhaskatripathi/tink
|
841802758ae6fadb6bcb60e9053fb4c169549f29
|
[
"Apache-2.0"
] | 1
|
2020-08-18T16:42:11.000Z
|
2020-08-25T16:13:11.000Z
|
python/tink/streaming_aead/_encrypting_stream.py
|
bhaskatripathi/tink
|
841802758ae6fadb6bcb60e9053fb4c169549f29
|
[
"Apache-2.0"
] | 1
|
2020-11-30T06:38:02.000Z
|
2020-11-30T06:38:02.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A file-like object that encrypts data written to it.
It writes the ciphertext to a given other file-like object, which can later be
decrypted and read using a DecryptingStream wrapper.
"""
from __future__ import absolute_import
from __future__ import division
# Placeholder for import for type annotations
from __future__ import print_function
import errno
import io
from typing import Iterable, BinaryIO
from tink import core
from tink.cc.pybind import tink_bindings
from tink.util import file_object_adapter
class EncryptingStream(io.BufferedIOBase):
"""A file-like object which wraps writes to an underlying file-like object.
It encrypts any data written to it, and writes the ciphertext to the wrapped
object.
The additional method position() returns the number of written plaintext
bytes.
Writes to an EncryptingStream may be partial - it is important to check the
return value of write().
The close() method indicates that the message is complete, and will write a
final ciphertext block to signify end of message. The context manager will
only call this close() method on a normal exit - if an exception is raised
inside the context manager which causes it to exit early, the close() method
will not be called, and the ciphertext will not be complete.
"""
def __init__(self, stream_aead, ciphertext_destination: BinaryIO,
associated_data: bytes):
"""Create a new EncryptingStream.
Args:
stream_aead: C++ StreamingAead primitive from which a C++ EncryptingStream
will be obtained.
ciphertext_destination: A writable file-like object to which ciphertext
bytes will be written.
associated_data: The associated data to use for encryption. This must
match the associated_data used for decryption.
"""
super(EncryptingStream, self).__init__()
self._closed = False
self._bytes_written = 0
# Create FileObjectAdapter
if not ciphertext_destination.writable():
raise ValueError('ciphertext_destination must be writable')
cc_ciphertext_destination = file_object_adapter.FileObjectAdapter(
ciphertext_destination)
# Get OutputStreamAdapter of C++ EncryptingStream
self._output_stream_adapter = self._get_output_stream_adapter(
stream_aead, associated_data, cc_ciphertext_destination)
@staticmethod
@core.use_tink_errors
def _get_output_stream_adapter(cc_primitive, aad, destination):
"""Implemented as a separate method to ensure correct error transform."""
return tink_bindings.new_cc_encrypting_stream(
cc_primitive, aad, destination)
@core.use_tink_errors
def write(self, b: bytes) -> int:
"""Write the given buffer to the stream.
May use multiple calls to the underlying file object's write() method.
Returns:
The number of bytes written, which will always be the length of b in
bytes.
Raises:
BlockingIOError: if the write could not be fully completed, with
characters_written set to the number of bytes successfully written.
TinkError: if there was a permanent error.
Args:
b: The buffer to write.
"""
self._check_not_closed()
if not isinstance(b, (bytes, memoryview, bytearray)):
raise TypeError('a bytes-like object is required, not {}'.format(
type(b).__name__))
# One call to OutputStreamAdapter.write() may call next() multiple times
# on the C++ EncryptingStream, but will perform a partial write if there is
# a temporary write error. Permanent write errors will bubble up as
# exceptions.
written = self._output_stream_adapter.write(b)
if written < 0:
raise core.TinkError('Number of written bytes was negative')
self._bytes_written += written
if written < len(b):
raise io.BlockingIOError(errno.EAGAIN,
'Write could not complete without blocking.',
written)
elif written > len(b):
raise core.TinkError(
'Number of written bytes was greater than length of bytes given')
return written
def writelines(self, lines: Iterable[bytes]) -> None:
"""Write a list of lines to the stream.
Line separators are not added, so it is usual for each of the lines
provided to have a line separator at the end.
Args:
lines: An iterable of buffers to write to the stream.
"""
self._check_not_closed()
for line in lines:
self.write(line)
### Internal ###
# TODO(b/141344377) Use parent class _checkClosed() instead
def _check_not_closed(self, msg=None):
"""Internal: raise a ValueError if file is closed."""
if self.closed:
raise ValueError('I/O operation on closed file.' if msg is None else msg)
### Positioning ###
def position(self) -> int:
"""Returns total number of written plaintext bytes."""
return self._bytes_written
### Flush and close ###
def flush(self) -> None:
"""Flush write buffers.
This method has no effect.
"""
self._check_not_closed()
return
@core.use_tink_errors
def close(self) -> None:
"""Flush and close the stream.
This has no effect on a closed stream.
"""
if self.closed:
return
self.flush()
self._output_stream_adapter.close()
self._closed = True
def __del__(self):
"""Destructor. Calls flush()."""
try:
# We deliberately don't close the file here, since we don't know if the
# user was really done writing or if there was an error.
self.flush()
except Exception: # pylint: disable=broad-except
pass
### Inquiries ###
def writable(self) -> bool:
"""Indicates whether object was opened for writing.
Returns:
Whether object was opened for writing.
If False, write() and truncate() will raise UnsupportedOperation.
"""
return True
@property
def closed(self) -> bool:
"""Indicates if the file has been closed.
Returns:
True if and only if the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self._closed
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
"""Context management protocol. Calls close() if there was no exception."""
# Calling close() signifies that the message is complete - we should not
# do this if there was an exception.
# Instead, we let the destructors be called, which should lead to sufficient
# cleanup on the other end, and if ciphertext_destination calls close() in
# __del__ (as IOBase does) then the underlying file descriptor should also
# be closed eventually.
if exc_type is None:
self.close()
### Iterator ###
def __iter__(self):
"""Iterator API."""
raise io.UnsupportedOperation('Cannot iterate an EncryptingStream')
def __next__(self):
"""Iterator API."""
raise io.UnsupportedOperation('Cannot iterate an EncryptingStream')
| 33.105727
| 80
| 0.703393
|
d1d1b4f8b20f706612ecba857864ca3bb763b05c
| 2,358
|
py
|
Python
|
vanir/tests/vm/__init__.py
|
VanirLab/VOS
|
e6cb3e4e391e583e98d548292b5f272320d38cc4
|
[
"MIT"
] | null | null | null |
vanir/tests/vm/__init__.py
|
VanirLab/VOS
|
e6cb3e4e391e583e98d548292b5f272320d38cc4
|
[
"MIT"
] | null | null | null |
vanir/tests/vm/__init__.py
|
VanirLab/VOS
|
e6cb3e4e391e583e98d548292b5f272320d38cc4
|
[
"MIT"
] | null | null | null |
import jinja2
import unittest.mock
import vanir.tests
class TestVMM(object):
# pylint: disable=too-few-public-methods
def __init__(self, offline_mode=False):
self.offline_mode = offline_mode
self.xs = unittest.mock.Mock()
@property
def libvirt_conn(self):
import libvirt
raise libvirt.libvirtError('phony error')
class TestHost(object):
# pylint: disable=too-few-public-methods
def __init__(self):
self.memory_total = 1000 * 1024
self.no_cpus = 4
class TestVMsCollection(dict):
def get_vms_connected_to(self, vm):
return set()
def close(self):
self.clear()
class TestVolume(object):
def __init__(self, pool):
self.pool = pool
self.size = 0
self.source = None
class TestPool(object):
def init_volume(self, *args, **kwargs):
return TestVolume(self)
class TestApp(vanir.tests.TestEmitter):
labels = {1: vanir.Label(1, '0xcc0000', 'red')}
check_updates_vm = False
def get_label(self, label):
# pylint: disable=unused-argument
if label in self.labels:
return self.labels[label]
for l in self.labels.values():
if l.name == label:
return l
raise KeyError(label)
def get_pool(self, pool):
return self.pools[pool]
def __init__(self):
super(TestApp, self).__init__()
self.vmm = TestVMM()
self.host = TestHost()
default_pool = TestPool()
self.pools = {
'default': default_pool,
default_pool: default_pool,
'linux-kernel': TestPool(),
}
self.default_pool_volatile = 'default'
self.default_pool_root = 'default'
self.default_pool_private = 'default'
self.default_pool_kernel = 'linux-kernel'
self.default_qrexec_timeout = 60
self.default_netvm = None
self.domains = TestVMsCollection()
#: jinja2 environment for libvirt XML templates
self.env = jinja2.Environment(
loader=jinja2.FileSystemLoader([
'templates',
'/etc/vanir/templates',
'/usr/share/vanir/templates',
]),
undefined=jinja2.StrictUndefined)
| 29.111111
| 56
| 0.585242
|
e867f6ff437a6e918547da6253bf74f64a5d16a0
| 508
|
py
|
Python
|
UVRatio/utils.py
|
chrisdevito/UVRatio
|
10411e07d2de47ee760996db484a8185323b63cc
|
[
"MIT"
] | null | null | null |
UVRatio/utils.py
|
chrisdevito/UVRatio
|
10411e07d2de47ee760996db484a8185323b63cc
|
[
"MIT"
] | null | null | null |
UVRatio/utils.py
|
chrisdevito/UVRatio
|
10411e07d2de47ee760996db484a8185323b63cc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from UVRatio.packages.Qt import QtWidgets
def show():
"""
Shows ui in maya
:raises: None
:return: None
:rtype: NoneType
"""
from UVRatio.ui.ui import UI
from UVRatio.ui import utils
# prevent duplicate windows
for widget in QtWidgets.QApplication.instance().topLevelWidgets():
if widget.objectName() == 'UVRatio':
widget.close()
break
win = UI(utils.get_maya_window())
win.show()
| 19.538462
| 70
| 0.608268
|
c13ea9bdf51cd95313f87f5f246ca385df82b519
| 653
|
py
|
Python
|
Primer_LR_5/Primer3.py
|
tamaranesterenko/Python_LR_5-2
|
7f103f409c8889911cb456558c50e59a5f537b60
|
[
"MIT"
] | null | null | null |
Primer_LR_5/Primer3.py
|
tamaranesterenko/Python_LR_5-2
|
7f103f409c8889911cb456558c50e59a5f537b60
|
[
"MIT"
] | null | null | null |
Primer_LR_5/Primer3.py
|
tamaranesterenko/Python_LR_5-2
|
7f103f409c8889911cb456558c50e59a5f537b60
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python3
# -*- cosing: utf-8 -*-
import argparse
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='List of commands')
list_parser = subparsers.add_parser('list', help='list contents')
list_parser.add_argument('dirname', action='store', help='Directory to list')
create_parser = subparsers.add_parser('create', help='Create a directory')
create_parser.add_argument('dirname', action='store', help='new directory to create')
create_parser.add_argument(
'--read-only',
default=False,
action='store_true',
help='Set permissions to prevent writing to the directory'
)
| 26.12
| 86
| 0.707504
|
cccac1ee74e8811951dab1669a60f91a7f129c79
| 1,890
|
py
|
Python
|
test/unit/modules/test_sfp_twitter.py
|
khiemtq-cyber/spiderfoot
|
66e671918853b0334931fd2fbabad0096d506726
|
[
"MIT"
] | null | null | null |
test/unit/modules/test_sfp_twitter.py
|
khiemtq-cyber/spiderfoot
|
66e671918853b0334931fd2fbabad0096d506726
|
[
"MIT"
] | null | null | null |
test/unit/modules/test_sfp_twitter.py
|
khiemtq-cyber/spiderfoot
|
66e671918853b0334931fd2fbabad0096d506726
|
[
"MIT"
] | null | null | null |
import pytest
import unittest
from modules.sfp_twitter import sfp_twitter
from sflib import SpiderFoot
from spiderfoot import SpiderFootEvent, SpiderFootTarget
@pytest.mark.usefixtures
class TestModuleTwitter(unittest.TestCase):
def test_opts(self):
module = sfp_twitter()
self.assertEqual(len(module.opts), len(module.optdescs))
def test_setup(self):
sf = SpiderFoot(self.default_options)
module = sfp_twitter()
module.setup(sf, dict())
def test_watchedEvents_should_return_list(self):
module = sfp_twitter()
self.assertIsInstance(module.watchedEvents(), list)
def test_producedEvents_should_return_list(self):
module = sfp_twitter()
self.assertIsInstance(module.producedEvents(), list)
def test_handleEvent_event_data_social_media_not_twitter_profile_should_not_return_event(self):
sf = SpiderFoot(self.default_options)
module = sfp_twitter()
module.setup(sf, dict())
target_value = 'spiderfoot.net'
target_type = 'INTERNET_NAME'
target = SpiderFootTarget(target_value, target_type)
module.setTarget(target)
def new_notifyListeners(self, event):
raise Exception(f"Raised event {event.eventType}: {event.data}")
module.notifyListeners = new_notifyListeners.__get__(module, sfp_twitter)
event_type = 'ROOT'
event_data = 'example data'
event_module = ''
source_event = ''
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
event_type = 'SOCIAL_MEDIA'
event_data = 'Not Twitter: example_username'
event_module = 'example module'
source_event = evt
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
result = module.handleEvent(evt)
self.assertIsNone(result)
| 31.5
| 99
| 0.695238
|
78d70be32652c34469d4536be930b1f01e82ad56
| 107
|
py
|
Python
|
pybind11_examples/01_py-list_cpp-vector/test.py
|
Red-Eyed/machine-learning-notes
|
aa6de49e6d3f312ef6dd8f6e93ba4259413be1e4
|
[
"MIT"
] | 1
|
2020-04-27T16:45:20.000Z
|
2020-04-27T16:45:20.000Z
|
pybind11_examples/01_py-list_cpp-vector/test.py
|
Red-Eyed/pybind11_examples
|
aa6de49e6d3f312ef6dd8f6e93ba4259413be1e4
|
[
"MIT"
] | null | null | null |
pybind11_examples/01_py-list_cpp-vector/test.py
|
Red-Eyed/pybind11_examples
|
aa6de49e6d3f312ef6dd8f6e93ba4259413be1e4
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("build")
import example
A = [1., 2., 3., 4.]
B = example.modify(A)
print(B)
| 9.727273
| 24
| 0.616822
|
6cf59355e9a872b39749f130c9a6e0f90d32425c
| 5,404
|
py
|
Python
|
iceworm/engine/connectors/base.py
|
wrmsr0/iceworm
|
09431bb3cdc4f6796aafca41e37d42ebe0ddfeef
|
[
"BSD-3-Clause"
] | null | null | null |
iceworm/engine/connectors/base.py
|
wrmsr0/iceworm
|
09431bb3cdc4f6796aafca41e37d42ebe0ddfeef
|
[
"BSD-3-Clause"
] | 1
|
2021-01-19T14:29:19.000Z
|
2021-01-19T14:34:27.000Z
|
iceworm/engine/connectors/base.py
|
wrmsr0/iceworm
|
09431bb3cdc4f6796aafca41e37d42ebe0ddfeef
|
[
"BSD-3-Clause"
] | 1
|
2020-12-31T22:29:52.000Z
|
2020-12-31T22:29:52.000Z
|
"""
TODO:
- virtual vs physical tables
- physical tables requiring refresh
- incremental vs total physical tables
- materialized vs unmaterialized virtuals
- ** dataclass interop ** - dc->tbl, query
- just return object refs? jsonize?
- support snowflake json garbage on objects
- piecewise conf? csv mounts? ...
- *no*, but could have a csv_mount rule before ctor phase that rewrites the sole ctor cfg ele
- ctors/conns as ctxmgrs?
- HeapConnector - writable
- simpler dumber connector? where does sf query jit live?
- conns that support sql pushdown vs not..
- 'union'? 'overlay'? wrap one by heap/pg to give txns?
Def conns:
- sql - snow + pg (+ incl internal state storage pg, 'self')
- kafka
- dynamo
- system - conns, nodes, running ops, etc
- mongo
- redis
Alt conns:
- salesforce
- pagerduty
- jira
- gsheets
- slack
- github
- pandas? :/
"""
import abc
import typing as ta
from omnibus import check
from omnibus import configs as cfgs
from omnibus import dataclasses as dc
from omnibus import defs
from omnibus import lang
from omnibus.serde import mapping as sm
from .. import elements as els
from ... import metadata as md
from ...types import QualifiedName
ConnectorT = ta.TypeVar('ConnectorT', bound='Connector')
ConnectorConfigT = ta.TypeVar('ConnectorConfigT', bound='Connector.Config')
Row = ta.Mapping[str, ta.Any]
Rows = ta.Iterable[Row]
class RowSource(lang.Abstract):
@abc.abstractmethod
def produce_rows(self) -> Rows:
raise NotImplementedError
class RowSink(lang.Abstract):
@abc.abstractmethod
def consume_rows(self, rows: ta.Iterable[Row]) -> None:
raise NotImplementedError
class ListRowSource(RowSource):
def __init__(self, rows: ta.Iterable[Row]) -> None:
super().__init__()
self._rows = list(rows)
@property
def rows(self) -> ta.List[Row]:
return self._rows
def produce_rows(self) -> Rows:
yield from self._rows
class ListRowSink(RowSink):
def __init__(self, rows: ta.Optional[ta.List[Row]] = None) -> None:
super().__init__()
self._rows = rows if rows is not None else []
@property
def rows(self) -> ta.List[Row]:
return self._rows
def __iter__(self) -> ta.Iterator[Row]:
return iter(self._rows)
def consume_rows(self, rows: ta.Iterable[Row]) -> None:
self._rows.extend(rows)
class Connector(ta.Generic[ConnectorT, ConnectorConfigT], cfgs.Configurable[ConnectorConfigT], lang.Abstract):
class Config(els.Element, cfgs.Config, abstract=True):
dc.metadata({
els.PhaseFrozen: els.PhaseFrozen(els.Phases.CONNECTORS),
sm.Name: lambda cls: lang.decamelize(cfgs.get_impl(cls).__name__),
})
id: els.Id = dc.field(check=lambda s: isinstance(s, els.Id) and s)
def __init__(self, config: ConnectorConfigT) -> None:
super().__init__(config)
defs.repr('id')
@property
def config(self) -> ConnectorConfigT:
return self._config
@property
def id(self) -> els.Id:
return self._config.id
def close(self) -> None:
pass
@abc.abstractmethod
def connect(self) -> 'Connection[ConnectorT]':
raise NotImplementedError
@classmethod
def of(cls, obj: ta.Union['Connector', Config]) -> 'Connector':
if isinstance(obj, Connector):
return obj
elif isinstance(obj, Connector.Config):
return check.isinstance(check.issubclass(cfgs.get_impl(obj), cls)(obj), Connector)
else:
raise TypeError(obj)
class Connection(lang.Abstract, ta.Generic[ConnectorT]):
def __init__(self, ctor: ConnectorT) -> None:
super().__init__()
self._ctor: ConnectorT = check.isinstance(ctor, Connector)
self._reflect_cache: ta.MutableMapping[QualifiedName, ta.Optional[md.Object]] = {}
defs.repr('ctor')
@property
def ctor(self) -> ConnectorT:
return self._ctor
def close(self) -> None:
pass
@abc.abstractmethod
def create_row_source(self, query: str) -> RowSource:
raise NotImplementedError
@abc.abstractmethod
def create_row_sink(self, table: QualifiedName) -> RowSink:
raise NotImplementedError
def reflect(self, names: ta.Optional[ta.Iterable[QualifiedName]] = None) -> ta.Mapping[QualifiedName, md.Object]:
if names is not None:
check.not_isinstance(names, (str, QualifiedName))
ret = {}
missing = set()
for name in names:
check.isinstance(name, QualifiedName)
try:
obj = self._reflect_cache[name]
except KeyError:
missing.add(name)
else:
if obj is not None:
ret[name] = obj
if missing:
new = self._reflect(missing)
for name, obj in new.items():
check.not_in(name, ret)
check.not_in(name, self._reflect_cache)
ret[name] = self._reflect_cache[name] = obj
return ret
else:
raise NotImplementedError
@abc.abstractmethod
def _reflect(self, names: ta.Optional[ta.Iterable[QualifiedName]] = None) -> ta.Mapping[QualifiedName, md.Object]:
raise NotImplementedError
| 26.752475
| 118
| 0.63601
|
cfc9775d8a7fa615e40aa016fb91503a22f5ffec
| 468
|
py
|
Python
|
run_example.py
|
PederHA/vjemmie
|
e3742380d3ea06de90f8227a0934569f8fd02b5c
|
[
"MIT"
] | 1
|
2018-07-30T02:43:27.000Z
|
2018-07-30T02:43:27.000Z
|
run_example.py
|
PederHA/vjemmie
|
e3742380d3ea06de90f8227a0934569f8fd02b5c
|
[
"MIT"
] | 5
|
2020-09-20T14:07:28.000Z
|
2022-01-13T01:18:23.000Z
|
run_example.py
|
PederHA/vjemmie
|
e3742380d3ea06de90f8227a0934569f8fd02b5c
|
[
"MIT"
] | null | null | null |
from contextlib import suppress
from typing import List, Optional
from discord.ext.commands import Cog
import vjemmie
import botsecrets # Pass entire botsecrets module to vjemmie.run
try:
from mycogs import cogs # Define custom cogs in your own module
except ImportError:
cogs = []
def main(secrets, cogs: Optional[List[Cog]]=None) -> None:
vjemmie.run(secrets, cogs)
if __name__ == "__main__":
main(botsecrets, cogs)
| 22.285714
| 68
| 0.698718
|
545f80239700bc04088392d6ffae4915d878bac0
| 279
|
py
|
Python
|
55.jump-game.py
|
viewv/leetcode
|
b31e643846bb38978746342e3e3a94991178565a
|
[
"MIT"
] | 2
|
2018-02-26T09:12:19.000Z
|
2019-06-07T13:38:10.000Z
|
55.jump-game.py
|
viewv/leetcode
|
b31e643846bb38978746342e3e3a94991178565a
|
[
"MIT"
] | 1
|
2018-12-24T07:03:34.000Z
|
2018-12-24T07:03:34.000Z
|
55.jump-game.py
|
viewv/leetcode
|
b31e643846bb38978746342e3e3a94991178565a
|
[
"MIT"
] | 2
|
2018-12-24T07:01:03.000Z
|
2019-06-07T13:38:07.000Z
|
#
# @lc app=leetcode id=55 lang=python3
#
# [55] Jump Game
#
class Solution:
def canJump(self, nums: List[int]) -> bool:
m = 0
for i, n in enumerate(nums):
if i > m:
return False
m = max(m, i + n)
return True
| 17.4375
| 47
| 0.480287
|
77689c99dd2c9d439c9fcc82ed4e8a532a11254f
| 454
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/treemap/_hovertext.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
venv/Lib/site-packages/plotly/validators/treemap/_hovertext.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
venv/Lib/site-packages/plotly/validators/treemap/_hovertext.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class HovertextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="hovertext", parent_name="treemap", **kwargs):
super(HovertextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "style"),
**kwargs
)
| 34.923077
| 81
| 0.667401
|
c09e29b4ae3434c4696c23ae1a345b3dea9ea81e
| 220
|
py
|
Python
|
2 semester/AP/Practics/2022-02-09/3_2.py
|
kurpenok/Labs
|
069c92b7964a1445d093313b38ebdc56318d2a73
|
[
"MIT"
] | 1
|
2022-02-06T17:50:25.000Z
|
2022-02-06T17:50:25.000Z
|
2 semester/AP/Practics/2022-02-09/3_2.py
|
kurpenok/Labs
|
069c92b7964a1445d093313b38ebdc56318d2a73
|
[
"MIT"
] | null | null | null |
2 semester/AP/Practics/2022-02-09/3_2.py
|
kurpenok/Labs
|
069c92b7964a1445d093313b38ebdc56318d2a73
|
[
"MIT"
] | 1
|
2022-03-02T06:45:06.000Z
|
2022-03-02T06:45:06.000Z
|
count = 0
while True:
number = float(input("[>] Enter real number: "))
if number <= 36.6:
if number < 0:
count += 1
else:
break
print(f"[>] Count of negative numbers: {count}")
| 16.923077
| 52
| 0.518182
|
7b999fc2546db726625a098298be6a4befca9d6a
| 10,235
|
py
|
Python
|
lhotse/recipes/callhome_english.py
|
pzelasko/lhotse
|
41984467d2ead1dc69f418638b969e46f63308c7
|
[
"Apache-2.0"
] | 64
|
2020-04-27T14:55:15.000Z
|
2020-10-25T06:57:56.000Z
|
lhotse/recipes/callhome_english.py
|
pzelasko/lhotse
|
41984467d2ead1dc69f418638b969e46f63308c7
|
[
"Apache-2.0"
] | 85
|
2020-04-26T06:29:47.000Z
|
2020-10-19T20:28:52.000Z
|
lhotse/recipes/callhome_english.py
|
pzelasko/lhotse
|
41984467d2ead1dc69f418638b969e46f63308c7
|
[
"Apache-2.0"
] | 17
|
2020-06-19T06:26:33.000Z
|
2020-10-12T15:19:15.000Z
|
"""
About the Callhome American English
CALLHOME American English Speech was developed by the Linguistic Data
Consortium (LDC) and consists of 120 unscripted 30-minute telephone
conversations between native speakers of English.
All calls originated in North America; 90 of the 120 calls were placed
to various locations outisde of North America, while the remaining 30 calls
were made within North America. Most participants called family members or
close friends.
This script support setup of two different tasks -- either ASR or SRE
For ASR, the following LDC corpora are relevant
Speech : LDC97S42
Transcripts : LDC97T14
Lexicon : LDC97L20 (not actually used)
For SRE, this script prepares data for speaker diarization on a portion
of CALLHOME used in the 2000 NIST speaker recognition evaluation.
The 2000 NIST SRE data is required. LDC catalog number LDC2001S97.
"""
import tarfile
from collections import Counter
from decimal import Decimal, InvalidOperation
from pathlib import Path
from typing import Dict, Optional, Union
from tqdm.auto import tqdm
from lhotse import Recording, RecordingSet, SupervisionSegment, SupervisionSet
from lhotse.qa import fix_manifests, validate_recordings_and_supervisions
from lhotse.utils import Pathlike, check_and_rglob, urlretrieve_progress
def prepare_callhome_english(
audio_dir: Pathlike,
rttm_dir: Optional[Pathlike] = None,
transcript_dir: Optional[Pathlike] = None,
output_dir: Optional[Pathlike] = None,
absolute_paths: bool = False,
) -> Dict[str, Union[RecordingSet, SupervisionSet]]:
"""
Prepare manifests for the CallHome American English corpus.
We create two manifests: one with recordings, and the other one with text
supervisions.
Depending on the value of transcript_dir, will prepare either
* data for ASR task (expected LDC corpora ``LDC97S42`` and ``LDC97T14``)
* or the SRE task (expected corpus ``LDC2001S97``)
:param audio_dir: Path to ``LDC97S42``or ``LDC2001S97`` content
:param transcript_dir: Path to the ``LDC97T14`` content
:param rttm_dir: Path to the transcripts directory. If not provided,
the transcripts will be downloaded.
:param absolute_paths: Whether to return absolute or relative
(to the corpus dir) paths for recordings.
:return: A dict with manifests. The keys are:
``{'recordings', 'supervisions'}``.
"""
# not sure if there is possible deeper level of integration,
# as SRE does not contain/respect the train/eval/test splits?
if transcript_dir is not None:
return prepare_callhome_english_asr(
audio_dir, transcript_dir, output_dir, absolute_paths
)
else:
return prepare_callhome_english_sre(
audio_dir, rttm_dir, output_dir, absolute_paths
)
def prepare_callhome_english_sre(
audio_dir: Pathlike,
rttm_dir: Optional[Pathlike] = None,
output_dir: Optional[Pathlike] = None,
absolute_paths: bool = False,
) -> Dict[str, Union[RecordingSet, SupervisionSet]]:
"""
Prepare manifests for the Callhome American English portion prepartion.
We create two manifests: one with recordings, and the other one with text
supervisions.
:param audio_dir: Path to ``LDC2001S97`` package.
:param rttm_dir: Path to the transcripts directory. If not provided,
the transcripts will be downloaded.
:param output_dir: Directory where the manifests should be written.
Can be omitted to avoid writing.
:param absolute_paths: Whether to return absolute or relative
(to the corpus dir) paths for recordings.
:return: A dict with manifests.
The keys are: ``{'recordings', 'supervisions'}``.
"""
if rttm_dir is None:
rttm_dir = download_callhome_metadata()
rttm_path = rttm_dir / "fullref.rttm"
supervisions = read_rttm(rttm_path)
audio_paths = check_and_rglob(audio_dir, "*.sph")
recordings = RecordingSet.from_recordings(
Recording.from_file(p, relative_path_depth=None if absolute_paths else 4)
for p in tqdm(audio_paths)
)
recordings, supervisions = fix_manifests(recordings, supervisions)
validate_recordings_and_supervisions(recordings, supervisions)
if output_dir is not None:
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
recordings.to_json(output_dir / "recordings.json")
supervisions.to_json(output_dir / "supervisions.json")
return {"recordings": recordings, "supervisions": supervisions}
def prepare_callhome_english_asr(
audio_dir: Pathlike,
transcript_dir: Pathlike,
output_dir: Optional[Pathlike] = None,
absolute_paths: bool = False,
) -> Dict[str, Union[RecordingSet, SupervisionSet]]:
"""
Prepare manifests for the CallHome American English corpus.
We create two manifests: one with recordings, and the other one with text
supervisions.
:param audio_dir: Path to ``LDC97S42`` content
:param transcript_dir: Path to the ``LDC97T14`` content
:param output_dir: Directory where the manifests should be written.
Can be omitted to avoid writing.
:param absolute_paths: Whether to return absolute or relative
(to the corpus dir) paths for recordings.
:return: A dict with manifests. The keys are:
``{'recordings', 'supervisions'}``.
"""
audio_dir = Path(audio_dir)
transcript_dir = Path(transcript_dir)
manifests = {}
for split in ["evaltest", "train", "devtest"]:
audio_paths = check_and_rglob(
# The LDC distribution has a typo.
audio_dir / "data" / split.replace("evaltest", "evltest"),
"*.sph",
)
recordings = RecordingSet.from_recordings(
Recording.from_file(p, relative_path_depth=None if absolute_paths else 4)
for p in tqdm(audio_paths)
)
transcript_paths = check_and_rglob(
transcript_dir / "transcrpt" / split,
"*.txt",
)
# TODO: Add text normalization like in Kaldi recipe.
# Not doing this right now as it's not needed for VAD/diarization...
supervisions = []
for p in transcript_paths:
idx = 0
postprocessed_lines = list()
for line in p.read_text().splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
try:
start, end, spk, text = line.split(maxsplit=3)
duration = float(Decimal(end) - Decimal(start))
if duration <= 0:
continue
postprocessed_lines.append(line)
except InvalidOperation:
postprocessed_lines[-1] = postprocessed_lines[-1] + " " + line
except ValueError:
postprocessed_lines[-1] = postprocessed_lines[-1] + " " + line
for line in postprocessed_lines:
recording_id = p.stem
# example line:
# 19.33 21.18 B: %ah Tayyib
start, end, spk, text = line.split(maxsplit=3)
spk = spk.replace(":", "")
duration = float(Decimal(end) - Decimal(start))
if duration <= 0:
continue
start = float(start)
supervisions.append(
SupervisionSegment(
recording_id=recording_id,
start=start,
duration=duration,
channel=ord(spk[0]) - ord("A"),
speaker=f"{recording_id}_{spk:0>2s}",
id=f"{recording_id}_{spk:0>2s}_{idx:0>5d}",
text=text,
)
)
idx += 1
supervisions = SupervisionSet.from_segments(supervisions)
recordings, supervisions = fix_manifests(recordings, supervisions)
validate_recordings_and_supervisions(recordings, supervisions)
if output_dir is not None:
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
recordings.to_file(
output_dir / f"callhome-english_recordings_{split}.jsonl.gz"
)
supervisions.to_file(
output_dir / f"callhome-english_supervisions_{split}.jsonl.gz"
)
manifests[split] = {"recordings": recordings, "supervisions": supervisions}
return manifests
def download_callhome_metadata(
target_dir: Pathlike = ".",
force_download: bool = False,
url: str = "http://www.openslr.org/resources/10/sre2000-key.tar.gz",
) -> Path:
target_dir = Path(target_dir)
sre_dir = target_dir / "sre2000-key"
if sre_dir.is_dir():
return sre_dir
target_dir.mkdir(parents=True, exist_ok=True)
tar_name = "sre2000-key.tar.gz"
tar_path = target_dir / tar_name
if force_download or not tar_path.is_file():
urlretrieve_progress(url, filename=tar_path, desc=f"Downloading {tar_name}")
with tarfile.open(tar_path) as tar:
tar.extractall(path=target_dir)
return sre_dir
def read_rttm(path: Pathlike) -> SupervisionSet:
lines = Path(path).read_text().splitlines()
sups = []
rec_cntr = Counter()
for line in lines:
_, recording_id, channel, start, duration, _, _, speaker, _, _ = line.split()
start, duration, channel = float(start), float(duration), int(channel)
if duration == 0.0:
continue
rec_cntr[recording_id] += 1
sups.append(
SupervisionSegment(
id=f"{recording_id}_{rec_cntr[recording_id]}",
recording_id=recording_id,
start=start,
duration=duration,
channel=channel,
speaker=f"{recording_id}_{speaker}",
language="English",
)
)
return SupervisionSet.from_segments(sups)
| 38.622642
| 85
| 0.638495
|
e26fbf4d88f0c1ab9c4acb6520ef230923630359
| 1,672
|
py
|
Python
|
src/undistort_and_transform.py
|
snandasena/udacity-dl
|
7ea13ec7ebd992f1199f43bd5300782436ed71e5
|
[
"Apache-2.0"
] | 2
|
2020-12-04T11:52:01.000Z
|
2021-08-21T14:42:32.000Z
|
src/undistort_and_transform.py
|
snandasena/udacity-dl
|
7ea13ec7ebd992f1199f43bd5300782436ed71e5
|
[
"Apache-2.0"
] | null | null | null |
src/undistort_and_transform.py
|
snandasena/udacity-dl
|
7ea13ec7ebd992f1199f43bd5300782436ed71e5
|
[
"Apache-2.0"
] | null | null | null |
import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Read in the saved camera matrix and distortion coefficients
# These are the arrays you calculated using cv2.calibrateCamera()
dist_pickle = pickle.load( open( "../calibration_wide/wide_dist_pickle.p", "rb" ) )
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
img = cv2.imread("../calibration_wide/test_image2.png")
nx = 8
ny = 6
cv2.imshow("Original", img)
cv2.waitKey(1000)
def corners_unwrap(img, nx, ny, mtx, dist):
# Undistort using mtx and dist
undst = cv2.undistort(img, mtx, dist, None, mtx)
# coverting to grayscale
gray = cv2.cvtColor(undst, cv2.COLOR_BGR2GRAY)
# find chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
print(ret)
M = None
warped = np.copy(img)
if ret:
cv2.drawChessboardCorners(undst, (nx, ny), corners, ret)
img_size = (img.shape[1], img.shape[0])
src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
offset = 100
dst = np.float32([
[offset, offset], [img_size[0]-offset,offset],
[img_size[0]- offset, img_size[1] -offset],
[offset, img_size[1] - offset]])
# given src and dst points, calculate the prespective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(undst, M, img_size)
return warped, M
top_down, perspective_m = corners_unwrap(img, nx, ny, mtx,dist)
cv2.imshow("Original", img)
cv2.imshow("Undistot", top_down)
cv2.waitKey(0)
| 30.962963
| 83
| 0.65012
|
db135931c718a3e13615c99c33e1a306663fd3bb
| 2,899
|
py
|
Python
|
plugins/tell.py
|
MemorableUsername/skybot
|
f5a9ed3a38d89febfca9219a0f746f978639f3b5
|
[
"Unlicense"
] | null | null | null |
plugins/tell.py
|
MemorableUsername/skybot
|
f5a9ed3a38d89febfca9219a0f746f978639f3b5
|
[
"Unlicense"
] | null | null | null |
plugins/tell.py
|
MemorableUsername/skybot
|
f5a9ed3a38d89febfca9219a0f746f978639f3b5
|
[
"Unlicense"
] | null | null | null |
" tell.py: written by sklnd in July 2009"
" 2010.01.25 - modified by Scaevolus"
import time
from util import hook, timesince
def db_init(db):
"check to see that our db has the tell table and return a dbection."
db.execute("create table if not exists tell"
"(user_to, user_from, message, chan, time,"
"primary key(user_to, message))")
db.commit()
return db
def get_tells(db, user_to):
return db.execute("select user_from, message, time, chan from tell where"
" user_to=lower(?) order by time",
(user_to.lower(),)).fetchall()
@hook.singlethread
@hook.event('PRIVMSG')
def tellinput(paraml, input=None, db=None, bot=None):
if 'showtells' in input.msg.lower():
return
db_init(db)
tells = get_tells(db, input.nick)
if tells:
user_from, message, time, chan = tells[0]
reltime = timesince.timesince(time)
reply = "%s said %s ago in %s: %s" % (user_from, reltime, chan,
message)
if len(tells) > 1:
reply += " (+%d more, .showtells to view)" % (len(tells) - 1)
db.execute("delete from tell where user_to=lower(?) and message=?",
(input.nick, message))
db.commit()
input.notice(reply)
@hook.command(autohelp=False)
def showtells(inp, nick='', chan='', notice=None, db=None):
".showtells -- view all pending tell messages (sent in PM)."
db_init(db)
tells = get_tells(db, nick)
if not tells:
notice("You have no pending tells.")
return
for tell in tells:
user_from, message, time, chan = tell
past = timesince.timesince(time)
notice("%s said %s ago in %s: %s" % (user_from, past, chan, message))
db.execute("delete from tell where user_to=lower(?)",
(nick,))
db.commit()
@hook.command
def tell(inp, nick='', chan='', db=None):
".tell <nick> <message> -- relay <message> to <nick> when <nick> is around"
query = inp.split(' ', 1)
if len(query) != 2:
return tell.__doc__
user_to = query[0].lower()
message = query[1].strip()
user_from = nick
if chan.lower() == user_from.lower():
chan = 'a pm'
if user_to == user_from.lower():
return "No."
db_init(db)
if db.execute("select count() from tell where user_to=?",
(user_to,)).fetchone()[0] >= 5:
return "That person has too many things queued."
try:
db.execute("insert into tell(user_to, user_from, message, chan,"
"time) values(?,?,?,?,?)", (user_to, user_from, message,
chan, time.time()))
db.commit()
except db.IntegrityError:
return "Message has already been queued."
return "I'll pass that along."
| 27.349057
| 79
| 0.565022
|
d4efefb0c59b8befed0a3eb00155e4cf9605d8f0
| 79
|
py
|
Python
|
openmc_plasma_source/plotting/__init__.py
|
mdfaisal98/openmc-plasma-source
|
e55d61ce6d641f4d382ce298b6f6335cd46bc507
|
[
"MIT"
] | null | null | null |
openmc_plasma_source/plotting/__init__.py
|
mdfaisal98/openmc-plasma-source
|
e55d61ce6d641f4d382ce298b6f6335cd46bc507
|
[
"MIT"
] | null | null | null |
openmc_plasma_source/plotting/__init__.py
|
mdfaisal98/openmc-plasma-source
|
e55d61ce6d641f4d382ce298b6f6335cd46bc507
|
[
"MIT"
] | null | null | null |
from .plot_tokamak_source import plot_tokamak_source_3D, scatter_tokamak_source
| 79
| 79
| 0.924051
|
5c50c78daf5a441485f4d6502e78fc4f806f7528
| 2,447
|
py
|
Python
|
vmware_nsx/plugins/nsx_v/vshield/vcns_driver.py
|
yebinama/vmware-nsx
|
5f59ce8d4668c24e0f4f934898fb4b4e63f1c2f4
|
[
"Apache-2.0"
] | null | null | null |
vmware_nsx/plugins/nsx_v/vshield/vcns_driver.py
|
yebinama/vmware-nsx
|
5f59ce8d4668c24e0f4f934898fb4b4e63f1c2f4
|
[
"Apache-2.0"
] | null | null | null |
vmware_nsx/plugins/nsx_v/vshield/vcns_driver.py
|
yebinama/vmware-nsx
|
5f59ce8d4668c24e0f4f934898fb4b4e63f1c2f4
|
[
"Apache-2.0"
] | 1
|
2019-06-21T18:07:53.000Z
|
2019-06-21T18:07:53.000Z
|
# Copyright 2013 VMware, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
from oslo_log import log as logging
from vmware_nsx.plugins.nsx_v.vshield import edge_appliance_driver
from vmware_nsx.plugins.nsx_v.vshield import edge_dynamic_routing_driver
from vmware_nsx.plugins.nsx_v.vshield import edge_firewall_driver
from vmware_nsx.plugins.nsx_v.vshield.tasks import tasks
from vmware_nsx.plugins.nsx_v.vshield import vcns
from vmware_nsx.services.lbaas.nsx_v.v2 import (
edge_loadbalancer_driver_v2 as lbaas_v2)
LOG = logging.getLogger(__name__)
class VcnsDriver(edge_appliance_driver.EdgeApplianceDriver,
lbaas_v2.EdgeLoadbalancerDriverV2,
edge_firewall_driver.EdgeFirewallDriver,
edge_dynamic_routing_driver.EdgeDynamicRoutingDriver):
def __init__(self, callbacks):
super(VcnsDriver, self).__init__()
self.callbacks = callbacks
self.vcns_uri = cfg.CONF.nsxv.manager_uri
self.vcns_user = cfg.CONF.nsxv.user
self.vcns_passwd = cfg.CONF.nsxv.password
self.ca_file = cfg.CONF.nsxv.ca_file
self.insecure = cfg.CONF.nsxv.insecure
self.deployment_container_id = cfg.CONF.nsxv.deployment_container_id
self._pid = None
self._task_manager = None
self.vcns = vcns.Vcns(self.vcns_uri, self.vcns_user, self.vcns_passwd,
self.ca_file, self.insecure)
@property
def task_manager(self):
if (self._task_manager is None or
self._pid != os.getpid()):
LOG.debug("Creating task manager")
self._pid = os.getpid()
interval = cfg.CONF.nsxv.task_status_check_interval
self._task_manager = tasks.TaskManager(interval)
LOG.debug("Starting task manager")
self._task_manager.start()
return self._task_manager
| 39.467742
| 78
| 0.706988
|
e625991e91ce92a86e5a56c39371578e5a61c06a
| 10,180
|
py
|
Python
|
sdk/python/pulumi_azure_native/devtestlab/v20160515/get_global_schedule.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/devtestlab/v20160515/get_global_schedule.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/devtestlab/v20160515/get_global_schedule.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetGlobalScheduleResult',
'AwaitableGetGlobalScheduleResult',
'get_global_schedule',
]
@pulumi.output_type
class GetGlobalScheduleResult:
"""
A schedule.
"""
def __init__(__self__, created_date=None, daily_recurrence=None, hourly_recurrence=None, id=None, location=None, name=None, notification_settings=None, provisioning_state=None, status=None, tags=None, target_resource_id=None, task_type=None, time_zone_id=None, type=None, unique_identifier=None, weekly_recurrence=None):
if created_date and not isinstance(created_date, str):
raise TypeError("Expected argument 'created_date' to be a str")
pulumi.set(__self__, "created_date", created_date)
if daily_recurrence and not isinstance(daily_recurrence, dict):
raise TypeError("Expected argument 'daily_recurrence' to be a dict")
pulumi.set(__self__, "daily_recurrence", daily_recurrence)
if hourly_recurrence and not isinstance(hourly_recurrence, dict):
raise TypeError("Expected argument 'hourly_recurrence' to be a dict")
pulumi.set(__self__, "hourly_recurrence", hourly_recurrence)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if notification_settings and not isinstance(notification_settings, dict):
raise TypeError("Expected argument 'notification_settings' to be a dict")
pulumi.set(__self__, "notification_settings", notification_settings)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if target_resource_id and not isinstance(target_resource_id, str):
raise TypeError("Expected argument 'target_resource_id' to be a str")
pulumi.set(__self__, "target_resource_id", target_resource_id)
if task_type and not isinstance(task_type, str):
raise TypeError("Expected argument 'task_type' to be a str")
pulumi.set(__self__, "task_type", task_type)
if time_zone_id and not isinstance(time_zone_id, str):
raise TypeError("Expected argument 'time_zone_id' to be a str")
pulumi.set(__self__, "time_zone_id", time_zone_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unique_identifier and not isinstance(unique_identifier, str):
raise TypeError("Expected argument 'unique_identifier' to be a str")
pulumi.set(__self__, "unique_identifier", unique_identifier)
if weekly_recurrence and not isinstance(weekly_recurrence, dict):
raise TypeError("Expected argument 'weekly_recurrence' to be a dict")
pulumi.set(__self__, "weekly_recurrence", weekly_recurrence)
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> str:
"""
The creation date of the schedule.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter(name="dailyRecurrence")
def daily_recurrence(self) -> Optional['outputs.DayDetailsResponse']:
"""
If the schedule will occur once each day of the week, specify the daily recurrence.
"""
return pulumi.get(self, "daily_recurrence")
@property
@pulumi.getter(name="hourlyRecurrence")
def hourly_recurrence(self) -> Optional['outputs.HourDetailsResponse']:
"""
If the schedule will occur multiple times a day, specify the hourly recurrence.
"""
return pulumi.get(self, "hourly_recurrence")
@property
@pulumi.getter
def id(self) -> str:
"""
The identifier of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationSettings")
def notification_settings(self) -> Optional['outputs.NotificationSettingsResponse']:
"""
Notification settings.
"""
return pulumi.get(self, "notification_settings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning status of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
The status of the schedule (i.e. Enabled, Disabled)
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="targetResourceId")
def target_resource_id(self) -> Optional[str]:
"""
The resource ID to which the schedule belongs
"""
return pulumi.get(self, "target_resource_id")
@property
@pulumi.getter(name="taskType")
def task_type(self) -> Optional[str]:
"""
The task type of the schedule (e.g. LabVmsShutdownTask, LabVmAutoStart).
"""
return pulumi.get(self, "task_type")
@property
@pulumi.getter(name="timeZoneId")
def time_zone_id(self) -> Optional[str]:
"""
The time zone ID (e.g. Pacific Standard time).
"""
return pulumi.get(self, "time_zone_id")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueIdentifier")
def unique_identifier(self) -> Optional[str]:
"""
The unique immutable identifier of a resource (Guid).
"""
return pulumi.get(self, "unique_identifier")
@property
@pulumi.getter(name="weeklyRecurrence")
def weekly_recurrence(self) -> Optional['outputs.WeekDetailsResponse']:
"""
If the schedule will occur only some days of the week, specify the weekly recurrence.
"""
return pulumi.get(self, "weekly_recurrence")
class AwaitableGetGlobalScheduleResult(GetGlobalScheduleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGlobalScheduleResult(
created_date=self.created_date,
daily_recurrence=self.daily_recurrence,
hourly_recurrence=self.hourly_recurrence,
id=self.id,
location=self.location,
name=self.name,
notification_settings=self.notification_settings,
provisioning_state=self.provisioning_state,
status=self.status,
tags=self.tags,
target_resource_id=self.target_resource_id,
task_type=self.task_type,
time_zone_id=self.time_zone_id,
type=self.type,
unique_identifier=self.unique_identifier,
weekly_recurrence=self.weekly_recurrence)
def get_global_schedule(expand: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGlobalScheduleResult:
"""
A schedule.
:param str expand: Specify the $expand query. Example: 'properties($select=status)'
:param str name: The name of the schedule.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:devtestlab/v20160515:getGlobalSchedule', __args__, opts=opts, typ=GetGlobalScheduleResult).value
return AwaitableGetGlobalScheduleResult(
created_date=__ret__.created_date,
daily_recurrence=__ret__.daily_recurrence,
hourly_recurrence=__ret__.hourly_recurrence,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
notification_settings=__ret__.notification_settings,
provisioning_state=__ret__.provisioning_state,
status=__ret__.status,
tags=__ret__.tags,
target_resource_id=__ret__.target_resource_id,
task_type=__ret__.task_type,
time_zone_id=__ret__.time_zone_id,
type=__ret__.type,
unique_identifier=__ret__.unique_identifier,
weekly_recurrence=__ret__.weekly_recurrence)
| 38.415094
| 324
| 0.65609
|
67e26c9ca6bb09f1714f1cd3c368850fad5ccc0c
| 113
|
py
|
Python
|
src/pycounts_rm/__init__.py
|
rezam747/pycounts_rm
|
08357dbc841b43b22cdc4d23313ab5aed0567bed
|
[
"MIT"
] | null | null | null |
src/pycounts_rm/__init__.py
|
rezam747/pycounts_rm
|
08357dbc841b43b22cdc4d23313ab5aed0567bed
|
[
"MIT"
] | 1
|
2022-01-16T23:47:46.000Z
|
2022-01-16T23:47:46.000Z
|
src/pycounts_rm/__init__.py
|
rezam747/pycounts_rm
|
08357dbc841b43b22cdc4d23313ab5aed0567bed
|
[
"MIT"
] | null | null | null |
# read version from installed package
from importlib.metadata import version
__version__ = version("pycounts_rm")
| 37.666667
| 38
| 0.831858
|
40e852350ed1b9eabec234dcd74215b4a534bcc1
| 10,225
|
py
|
Python
|
client.py
|
Saldie883/AgarIO
|
1ee9850e5444c3182f11efd56e35d8fd90e785c8
|
[
"MIT"
] | null | null | null |
client.py
|
Saldie883/AgarIO
|
1ee9850e5444c3182f11efd56e35d8fd90e785c8
|
[
"MIT"
] | null | null | null |
client.py
|
Saldie883/AgarIO
|
1ee9850e5444c3182f11efd56e35d8fd90e785c8
|
[
"MIT"
] | null | null | null |
from pygame.locals import K_w, K_a, K_s, K_d, K_i
from math import sqrt, hypot
import pickle
import sys
import random
import codes as code
import socket
from tools import *
import settings
pygame.init()
SCREEN_SIZE = settings.SCREEN_SIZE
screen = pygame.display.set_mode(SCREEN_SIZE)
pygame.font.init()
font = pygame.font.SysFont('Comic Sans MS', 30)
font_s = pygame.font.SysFont('Comic Sans MS', 18)
clock = pygame.time.Clock()
elapsed = 0
RUNNING = True
OBJ_COLORS = [(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) for _ in range(100)]
# ---------------------------
def send_to_server(client, data, server):
client.sendto(pickle.dumps(data), server)
def recieve_from_server(server):
try:
data, addr = server.recvfrom(settings.BUFSIZE)
return pickle.loads(data)
except BlockingIOError:
return None
# ---------------------------
class Player(pygame.sprite.Sprite):
def __init__(self, x, y, mass=20, color=None):
super(Player, self).__init__()
self.speed = 220
self.color = color if color else random.choice(OBJ_COLORS)
self.mass = mass
self.x = x
self.y = y
def update(self, time, camera, cells, borders, enemies):
# Moving
if pygame.mouse.get_focused():
mouse_position = pygame.mouse.get_pos()
player_position = camera.translate_coords(self)
x_offset = mouse_position[0]-player_position[0]
y_offset = mouse_position[1]-player_position[1]
bef = (self.x, self.y)
self.x += x_offset * time
for b in borders:
if collision(b.x, b.y, b.w, b.h, self.x, self.y, self.mass):
self.x = bef[0]
break
self.y += y_offset * time
for b in borders:
if collision(b.x, b.y, b.w, b.h, self.x, self.y, self.mass):
self.y = bef[1]
break
# Eating cells
for c in cells.copy():
if sqrt((c.x-self.x)**2 + (c.y-self.y)**2) < c.mass + self.mass:
self.mass += c.mass
cells.remove(c)
send_to_server(client, {"code": code.CELL_EAT, "cell": (c.x, c.y)}, (HOST, PORT))
# Eating enemies
for e in enemies:
# If enemy center inside player circle and player's mass greater than enemy - player eat enemy
if point_in_cirlce(self.x, self.y, self.mass, e.x, e.y) and self.mass / e.mass >= 1.2:
# Sending ENEMY_EAT event to server
send_to_server(client, {"code": code.ENEMY_EAT, "player": e.addr}, (HOST, PORT))
self.mass += e.mass * 0.8 # Adding enemy mass to player's mass
def draw(self, screen, camera):
pygame.draw.circle(screen, self.color, camera.translate_coords(self), self.mass)
class Enemy(pygame.sprite.Sprite):
def __init__(self, x, y, mass, addr, nickname, color=None):
super(Enemy, self).__init__()
self.mass = mass
self.x = x
self.y = y
self.color = color if color else random.choice(OBJ_COLORS)
self.addr = addr
self.nickname = nickname
def draw(self, screen, camera):
pygame.draw.circle(screen, self.color, camera.translate_coords(self), self.mass)
class Cell(pygame.sprite.Sprite):
def __init__(self, x, y, mass=0.5, color=None):
super(Cell, self).__init__()
self.mass = mass
self.x = x
self.y = y
self.color = color if color else random.choice(OBJ_COLORS)
def draw(self, screen, camera):
pygame.draw.circle(screen, self.color, camera.translate_coords(self), self.mass*10)
class Border(pygame.sprite.Sprite):
def __init__(self, x, y, w, h, color=(254, 113, 113)):
super(Border, self).__init__()
self.x = x
self.y = y
self.w = w
self.h = h
self.color = color
def draw(self, screen, camera):
pygame.draw.rect(screen, self.color, (*camera.translate_coords(self), self.w, self.h))
class Camera():
def __init__(self, x, y, w, h):
self.x = x
self.y = y
self.w = w
self.h = h
self.ow = w
self.oh = h
self.scale = 1
def translate_coords(self, obj):
return (obj.x-(self.x-(self.w//2)), obj.y-(self.y-(self.h//2)))
def update(self, player):
self.x = player.x
self.y = player.y
def deadscreen():
RUN = True
while RUN:
for event in pygame.event.get():
if event.type == pygame.QUIT:
RUN = False
# Drawing ------------------------------------------
screen.fill((35, 35, 35))
# Draw players nickname and score
nick = font.render(nickname, False, (255, 255, 255))
score = font.render(f"Score: {player.mass}", False, (255, 255, 255))
screen.blit(nick, (SCREEN_SIZE[0]//2 - nick.get_width()//2, (SCREEN_SIZE[1]//2 - nick.get_height()//2)-20))
screen.blit(score, (SCREEN_SIZE[0]//2 - score.get_width()//2, (SCREEN_SIZE[1]//2 - score.get_height()//2)+20))
pygame.display.flip()
elapsed = clock.tick(settings.FPS)/1000
pygame.display.set_caption("AgarIO")
pygame.quit()
# connecting to server ---------------
HOST, PORT = 'localhost', 7777
nickname = ""
while len(nickname) < 3 or len(nickname) > 20:
nickname = input("Enter nickname: ")
print(f"Connecting to {HOST}:{PORT}")
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client.setblocking(False)
# Request server for connection (for getting init info)
send_to_server(client, {"code": code.CONNECT_REQUEST, "nickname": nickname}, (HOST, PORT))
connected = False
print("Waiting for connection")
while not connected:
data = recieve_from_server(client)
if not data:
continue
# Waiting for CONNECTED response from server
if data["code"] == code.CONNECTED:
print("Connected")
# Getting init info
MAPSIZE = data['mapsize']
player_coordinates = data['coords']
player_color = data['color']
player_address = data['addr']
cells = set() # Creating cells
enemies = set() # Creating enemies
connected = True
elif data['code'] == code.DATA_SEND:
pass
else:
print("Can't connect")
sys.exit()
# ------------------------------------
print("\n\n\tStarted game")
# Spawning player at position given by server
player = Player(*player_coordinates, color=player_color)
# Configurating the camera according to player position
camera = Camera(player.x, player.y, *SCREEN_SIZE)
# Placing arena borders
borders = (Border(0, 0, 5, MAPSIZE), Border(0, 0, MAPSIZE, 5), Border(MAPSIZE, 0, 5, MAPSIZE), Border(0, MAPSIZE, MAPSIZE, 5))
scoreboard = None
# Game loop --------------------------------------------
while RUNNING:
for event in pygame.event.get():
if event.type == pygame.QUIT:
# If window closes - sending DISCONNECT code to server for handling this event
send_to_server(client, {"code": code.DISCONNECT}, (HOST, PORT))
RUNNING = False
# Getting relevant data -------------------------------
relevant_data = None
# Getting latest data
while True:
data = recieve_from_server(client)
if data:
if data['code'] == code.DIED:
deadscreen()
sys.exit()
relevant_data = data
else:
break
# working with latest data
if relevant_data:
cells = set()
for c in relevant_data['cells']:
cells.add(Cell(*c, color=relevant_data['cells'][c]))
enemies = set()
for enemy, info in relevant_data['players'].items():
if enemy == player_address:
continue
enemies.add(Enemy(*info['pos'], addr=enemy, color=info['color'], mass=info['mass'], nickname=info['nickname']))
scoreboard = relevant_data['scoreboard']
# Updating -----------------------------------------
player.update(elapsed, camera, cells, borders, enemies)
camera.update(player)
# Drawing ------------------------------------------
screen.fill((35, 35, 35))
player.draw(screen, camera)
for c in cells:
c.draw(screen, camera)
for e in enemies:
e.draw(screen, camera)
nickname_rendered = font_s.render(e.nickname, False, (255, 255, 255))
nick_coords = camera.translate_coords(e)
screen.blit(nickname_rendered, (nick_coords[0]-nickname_rendered.get_width()//2, nick_coords[1]-nickname_rendered.get_height()//2))
for b in borders:
b.draw(screen, camera)
# GUI
xc = font.render(f"X: {round(player.x, 1)}", False, (255, 255, 255))
yc = font.render(f"Y: {round(player.y, 1)}", False, (255, 255, 255))
score = font.render(f"Score: {player.mass}", False, (255, 255, 255))
nickname_rendered = font_s.render(nickname, False, (255, 255, 255))
offset_score = SCREEN_SIZE[1]-score.get_height()-10
offset_y = offset_score-yc.get_height()-10
offset_x = offset_y-xc.get_height()-10
screen.blit(xc, (10, offset_x))
screen.blit(yc, (10, offset_y))
screen.blit(score, (10, offset_score))
screen.blit(nickname_rendered, (SCREEN_SIZE[0]//2 - nickname_rendered.get_width()//2, SCREEN_SIZE[1]//2 - nickname_rendered.get_height()//2))
# Scoreboard
if scoreboard:
for pos, (nick, mass) in enumerate(scoreboard.items(), 1):
text = font.render(f"{pos}. {nick} - {mass}", False, (255, 255, 255))
screen.blit(text, (10, 10+(text.get_height()*(pos-1))))
# Sending relevant info to server ---------------------
send_to_server(client, {"code": code.DATA_SEND, "pos": (player.x, player.y), "color": player.color, "mass": player.mass, "nickname": nickname}, (HOST, PORT))
# --------------------------------------------------
pygame.display.flip()
elapsed = clock.tick(settings.FPS)/1000
pygame.display.set_caption("AgarIO")
pygame.display.set_caption(str(elapsed))
pygame.quit()
| 29.810496
| 161
| 0.582592
|
2e6301c047b88d43b7adeda6955bf91e9a23b7f3
| 1,730
|
py
|
Python
|
facility_management/facility_management/doctype/tenant_master/tenant_master.py
|
odoochain/facility_management
|
545146db4e58e90311934a9d39c77def2d2a3e70
|
[
"MIT"
] | null | null | null |
facility_management/facility_management/doctype/tenant_master/tenant_master.py
|
odoochain/facility_management
|
545146db4e58e90311934a9d39c77def2d2a3e70
|
[
"MIT"
] | null | null | null |
facility_management/facility_management/doctype/tenant_master/tenant_master.py
|
odoochain/facility_management
|
545146db4e58e90311934a9d39c77def2d2a3e70
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, 9T9IT and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.model.rename_doc import rename_doc
from erpnext.accounts.party import get_dashboard_info
class TenantMaster(Document):
def onload(self):
info = get_dashboard_info('Customer', self.customer)
self.set_onload('dashboard_info', info)
def after_rename(self, old, new, merge):
if self.customer:
rename_doc('Customer', self.customer, new)
def validate(self):
_validate_tenant_name(self)
def after_insert(self):
_create_customer(self)
def _validate_tenant_name(tenant):
if tenant.tenant_type == 'Company':
tenant.first_name = ''
tenant.last_name = ''
else:
if not tenant.first_name or not tenant.last_name:
frappe.throw(_('Please input first and last names for the Individual'))
tenant.tenant_name = ' '.join([tenant.first_name, tenant.last_name])
def _create_customer(tenant):
customer_group = frappe.get_value('Selling Settings', None, 'customer_group')
territory = frappe.get_value('Selling Settings', None, 'territory')
if not (customer_group and territory):
frappe.throw(_('Please set default customer group and territory in Selling Settings'))
customer = frappe.get_doc({
'doctype': 'Customer',
'customer_name': tenant.tenant_name,
'customer_group': customer_group,
'territory': territory,
'customer_type': 'Individual'
})
customer.insert(ignore_permissions=True)
frappe.db.set_value('Tenant Master', tenant.name, 'customer', customer.name)
frappe.msgprint(_('Customer {0} is created').format(customer.name), alert=True)
| 30.892857
| 88
| 0.75896
|
bba3f9483d14424781a595d6fb41e41bf9182112
| 272
|
py
|
Python
|
backend/discord_events/on_message.py
|
MattBSG/Inktober
|
39b83d6f7beb87b7acdc93951849d22178dc9928
|
[
"MIT"
] | 1
|
2019-12-31T13:39:28.000Z
|
2019-12-31T13:39:28.000Z
|
backend/discord_events/on_message.py
|
MattBSG/Inktober
|
39b83d6f7beb87b7acdc93951849d22178dc9928
|
[
"MIT"
] | null | null | null |
backend/discord_events/on_message.py
|
MattBSG/Inktober
|
39b83d6f7beb87b7acdc93951849d22178dc9928
|
[
"MIT"
] | 4
|
2019-10-23T22:00:19.000Z
|
2020-09-29T17:54:18.000Z
|
from discord.ext import commands
from bot import Bot as Client
class OnMessageEvent(commands.Cog):
def __init__(self, bot):
self.bot: Client = bot
async def on_message(self, message):
pass
def setup(bot):
bot.add_cog(OnMessageEvent(bot))
| 17
| 40
| 0.6875
|
e4c7c734e70c9d8389853007cd0141b71799a2c1
| 2,448
|
py
|
Python
|
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DescribeRenewalPriceRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DescribeRenewalPriceRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DescribeRenewalPriceRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeRenewalPriceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeRenewalPrice')
self.set_method('POST')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_PriceUnit(self):
return self.get_query_params().get('PriceUnit')
def set_PriceUnit(self,PriceUnit):
self.add_query_param('PriceUnit',PriceUnit)
def get_ResourceId(self):
return self.get_query_params().get('ResourceId')
def set_ResourceId(self,ResourceId):
self.add_query_param('ResourceId',ResourceId)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ResourceType(self):
return self.get_query_params().get('ResourceType')
def set_ResourceType(self,ResourceType):
self.add_query_param('ResourceType',ResourceType)
| 33.081081
| 73
| 0.768382
|
076839006c930dee73a64ef68a2e89c6825076fe
| 5,184
|
py
|
Python
|
resources/util/python/util.py
|
feliciatrinh/rhino
|
adafc0df0c8ed0451e610f2cdac562199515288e
|
[
"Apache-2.0"
] | null | null | null |
resources/util/python/util.py
|
feliciatrinh/rhino
|
adafc0df0c8ed0451e610f2cdac562199515288e
|
[
"Apache-2.0"
] | null | null | null |
resources/util/python/util.py
|
feliciatrinh/rhino
|
adafc0df0c8ed0451e610f2cdac562199515288e
|
[
"Apache-2.0"
] | null | null | null |
import os
import platform
import subprocess
def _pv_linux_machine(machine):
if machine == 'x86_64':
return machine
cpu_info = subprocess.check_output(['cat', '/proc/cpuinfo']).decode()
hardware_info = [x for x in cpu_info.split('\n') if 'Hardware' in x][0]
model_info = [x for x in cpu_info.split('\n') if 'model name' in x][0]
if 'BCM' in hardware_info:
if 'rev 7' in model_info:
return 'arm11'
elif 'rev 5' in model_info:
return 'cortex-a7'
elif 'rev 4' in model_info:
return 'cortex-a53'
elif 'AM33' in hardware_info:
return 'beaglebone'
else:
raise NotImplementedError('unsupported CPU:\n%s' % cpu_info)
def _pv_platform():
pv_system = platform.system()
if pv_system not in {'Darwin', 'Linux', 'Windows'}:
raise ValueError("unsupported system '%s'" % pv_system)
if pv_system == 'Linux':
pv_machine = _pv_linux_machine(platform.machine())
else:
pv_machine = platform.machine()
return pv_system, pv_machine
_PV_SYSTEM, _PV_MACHINE = _pv_platform()
_RASPBERRY_PI_MACHINES = {'arm11', 'cortex-a7', 'cortex-a53'}
def _library_path():
if _PV_SYSTEM == 'Darwin':
return os.path.join(os.path.dirname(__file__), '../../../lib/mac/x86_64/libpv_rhino.dylib')
elif _PV_SYSTEM == 'Linux':
if _PV_MACHINE == 'x86_64':
return os.path.join(os.path.dirname(__file__), '../../../lib/linux/x86_64/libpv_rhino.so')
elif _PV_MACHINE in _RASPBERRY_PI_MACHINES:
return os.path.join(
os.path.dirname(__file__),
'../../../lib/raspberry-pi/%s/libpv_rhino.so' % _PV_MACHINE)
elif _PV_MACHINE == 'beaglebone':
return os.path.join(os.path.dirname(__file__), '../../../lib/beaglebone/libpv_rhino.so')
elif _PV_SYSTEM == 'Windows':
return os.path.join(os.path.dirname(__file__), '../../../lib/windows/libpv_rhino.dll')
raise NotImplementedError('unsupported platform')
RHINO_LIBRARY_PATH = _library_path()
def _porcupine_library_path():
if _PV_SYSTEM == 'Darwin':
return os.path.join(os.path.dirname(__file__), '../../../resources/porcupine/lib/mac/x86_64/libpv_porcupine.dylib')
elif _PV_SYSTEM == 'Linux':
if _PV_MACHINE == 'x86_64':
return os.path.join(os.path.dirname(__file__), '../../../resources/porcupine/lib/linux/x86_64/libpv_porcupine.so')
elif _PV_MACHINE in _RASPBERRY_PI_MACHINES:
return os.path.join(
os.path.dirname(__file__),
'../../../resources/porcupine/lib/raspberry-pi/%s/libpv_porcupine.so' % _PV_MACHINE)
elif _PV_MACHINE == 'beaglebone':
return os.path.join(os.path.dirname(__file__), '../../../resources/porcupine/lib/beaglebone/libpv_porcupine.so')
elif _PV_SYSTEM == 'Windows':
return os.path.join(os.path.dirname(__file__), '../../../resources/porcupine/lib/windows/libpv_porcupine.dll')
raise NotImplementedError('unsupported platform')
PORCUPINE_LIBRARY_PATH = _porcupine_library_path()
RHINO_MODEL_FILE_PATH = os.path.join(os.path.dirname(__file__), '../../../lib/common/rhino_params.pv')
PORCUPINE_MODEL_FILE_PATH = os.path.join(os.path.dirname(__file__), '../../../resources/porcupine/lib/common/porcupine_params.pv')
def _context_files_subdir():
if _PV_SYSTEM == 'Darwin':
return 'mac'
elif _PV_SYSTEM == 'Linux':
if _PV_MACHINE == 'x86_64':
return 'linux'
elif _PV_MACHINE in _RASPBERRY_PI_MACHINES:
return 'raspberrypi'
elif _PV_MACHINE == 'beaglebone':
return 'beaglebone'
elif _PV_SYSTEM == 'Windows':
return 'windows'
raise NotImplementedError('unsupported platform')
def _context_file_paths():
context_files_dir =\
os.path.join(os.path.dirname(__file__), '../../../resources/contexts', _context_files_subdir())
res = dict()
for x in os.listdir(context_files_dir):
res[x.rsplit('_')[0]] = os.path.join(context_files_dir, x)
return res
CONTEXT_FILE_PATH = _context_file_paths()["coffee"]
def _keyword_files_subdir():
if _PV_SYSTEM == 'Darwin':
return 'mac'
elif _PV_SYSTEM == 'Linux':
if _PV_MACHINE == 'x86_64':
return 'linux'
elif _PV_MACHINE in _RASPBERRY_PI_MACHINES:
return 'raspberrypi'
elif _PV_MACHINE == 'beaglebone':
return 'beaglebone'
elif _PV_SYSTEM == 'Windows':
return 'windows'
raise NotImplementedError('unsupported platform')
def _keyword_file_paths():
keyword_files_dir =\
os.path.join(os.path.dirname(__file__), '../../../resources/porcupine/resources/keyword_files', _keyword_files_subdir())
res = dict()
for x in os.listdir(keyword_files_dir):
if '_compressed' not in x:
res[x.rsplit('_')[0]] = os.path.join(keyword_files_dir, x)
return res
KEYWORD_FILE_PATH = _keyword_file_paths()['hey pico']
| 34.331126
| 131
| 0.626157
|
c6a28367db7d19aa95f578644a754a442c0808e3
| 7,302
|
py
|
Python
|
v0.8.2/routerrpc/router_pb2_grpc.py
|
lncm/lnd-proto
|
8caa6558efe043413560f807ef44b11699901d76
|
[
"MIT"
] | 2
|
2020-02-10T09:46:06.000Z
|
2020-04-09T19:30:30.000Z
|
v0.8.2/routerrpc/router_pb2_grpc.py
|
lncm/lnd-proto
|
8caa6558efe043413560f807ef44b11699901d76
|
[
"MIT"
] | 1
|
2020-01-24T18:58:23.000Z
|
2020-01-24T18:58:23.000Z
|
v0.8.2/routerrpc/router_pb2_grpc.py
|
lncm/lnd-proto
|
8caa6558efe043413560f807ef44b11699901d76
|
[
"MIT"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from routerrpc import router_pb2 as routerrpc_dot_router__pb2
class RouterStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SendPayment = channel.unary_stream(
'/routerrpc.Router/SendPayment',
request_serializer=routerrpc_dot_router__pb2.SendPaymentRequest.SerializeToString,
response_deserializer=routerrpc_dot_router__pb2.PaymentStatus.FromString,
)
self.TrackPayment = channel.unary_stream(
'/routerrpc.Router/TrackPayment',
request_serializer=routerrpc_dot_router__pb2.TrackPaymentRequest.SerializeToString,
response_deserializer=routerrpc_dot_router__pb2.PaymentStatus.FromString,
)
self.EstimateRouteFee = channel.unary_unary(
'/routerrpc.Router/EstimateRouteFee',
request_serializer=routerrpc_dot_router__pb2.RouteFeeRequest.SerializeToString,
response_deserializer=routerrpc_dot_router__pb2.RouteFeeResponse.FromString,
)
self.SendToRoute = channel.unary_unary(
'/routerrpc.Router/SendToRoute',
request_serializer=routerrpc_dot_router__pb2.SendToRouteRequest.SerializeToString,
response_deserializer=routerrpc_dot_router__pb2.SendToRouteResponse.FromString,
)
self.ResetMissionControl = channel.unary_unary(
'/routerrpc.Router/ResetMissionControl',
request_serializer=routerrpc_dot_router__pb2.ResetMissionControlRequest.SerializeToString,
response_deserializer=routerrpc_dot_router__pb2.ResetMissionControlResponse.FromString,
)
self.QueryMissionControl = channel.unary_unary(
'/routerrpc.Router/QueryMissionControl',
request_serializer=routerrpc_dot_router__pb2.QueryMissionControlRequest.SerializeToString,
response_deserializer=routerrpc_dot_router__pb2.QueryMissionControlResponse.FromString,
)
self.BuildRoute = channel.unary_unary(
'/routerrpc.Router/BuildRoute',
request_serializer=routerrpc_dot_router__pb2.BuildRouteRequest.SerializeToString,
response_deserializer=routerrpc_dot_router__pb2.BuildRouteResponse.FromString,
)
class RouterServicer(object):
# missing associated documentation comment in .proto file
pass
def SendPayment(self, request, context):
"""*
SendPayment attempts to route a payment described by the passed
PaymentRequest to the final destination. The call returns a stream of
payment status updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TrackPayment(self, request, context):
"""*
TrackPayment returns an update stream for the payment identified by the
payment hash.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def EstimateRouteFee(self, request, context):
"""*
EstimateRouteFee allows callers to obtain a lower bound w.r.t how much it
may cost to send an HTLC to the target end destination.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendToRoute(self, request, context):
"""*
SendToRoute attempts to make a payment via the specified route. This method
differs from SendPayment in that it allows users to specify a full route
manually. This can be used for things like rebalancing, and atomic swaps.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ResetMissionControl(self, request, context):
"""*
ResetMissionControl clears all mission control state and starts with a clean
slate.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def QueryMissionControl(self, request, context):
"""*
QueryMissionControl exposes the internal mission control state to callers.
It is a development feature.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BuildRoute(self, request, context):
"""*
BuildRoute builds a fully specified route based on a list of hop public
keys. It retrieves the relevant channel policies from the graph in order to
calculate the correct fees and time locks.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RouterServicer_to_server(servicer, server):
rpc_method_handlers = {
'SendPayment': grpc.unary_stream_rpc_method_handler(
servicer.SendPayment,
request_deserializer=routerrpc_dot_router__pb2.SendPaymentRequest.FromString,
response_serializer=routerrpc_dot_router__pb2.PaymentStatus.SerializeToString,
),
'TrackPayment': grpc.unary_stream_rpc_method_handler(
servicer.TrackPayment,
request_deserializer=routerrpc_dot_router__pb2.TrackPaymentRequest.FromString,
response_serializer=routerrpc_dot_router__pb2.PaymentStatus.SerializeToString,
),
'EstimateRouteFee': grpc.unary_unary_rpc_method_handler(
servicer.EstimateRouteFee,
request_deserializer=routerrpc_dot_router__pb2.RouteFeeRequest.FromString,
response_serializer=routerrpc_dot_router__pb2.RouteFeeResponse.SerializeToString,
),
'SendToRoute': grpc.unary_unary_rpc_method_handler(
servicer.SendToRoute,
request_deserializer=routerrpc_dot_router__pb2.SendToRouteRequest.FromString,
response_serializer=routerrpc_dot_router__pb2.SendToRouteResponse.SerializeToString,
),
'ResetMissionControl': grpc.unary_unary_rpc_method_handler(
servicer.ResetMissionControl,
request_deserializer=routerrpc_dot_router__pb2.ResetMissionControlRequest.FromString,
response_serializer=routerrpc_dot_router__pb2.ResetMissionControlResponse.SerializeToString,
),
'QueryMissionControl': grpc.unary_unary_rpc_method_handler(
servicer.QueryMissionControl,
request_deserializer=routerrpc_dot_router__pb2.QueryMissionControlRequest.FromString,
response_serializer=routerrpc_dot_router__pb2.QueryMissionControlResponse.SerializeToString,
),
'BuildRoute': grpc.unary_unary_rpc_method_handler(
servicer.BuildRoute,
request_deserializer=routerrpc_dot_router__pb2.BuildRouteRequest.FromString,
response_serializer=routerrpc_dot_router__pb2.BuildRouteResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'routerrpc.Router', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 43.987952
| 102
| 0.76075
|
4094e3445da3dd990afdf82466108662613c573d
| 557
|
py
|
Python
|
setup.py
|
leam-tech/renovation_logging
|
172be47b9b5f7a49590efcda3cf189687e514069
|
[
"MIT"
] | 1
|
2021-06-19T12:10:12.000Z
|
2021-06-19T12:10:12.000Z
|
setup.py
|
leam-tech/renovation_logging
|
172be47b9b5f7a49590efcda3cf189687e514069
|
[
"MIT"
] | null | null | null |
setup.py
|
leam-tech/renovation_logging
|
172be47b9b5f7a49590efcda3cf189687e514069
|
[
"MIT"
] | 1
|
2021-06-19T12:10:15.000Z
|
2021-06-19T12:10:15.000Z
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in renovation_logging/__init__.py
from renovation_logging import __version__ as version
setup(
name='renovation_logging',
version=version,
description='Log Server for all the sites',
author='Leam Technology Systems',
author_email='admin@leam.ae',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| 26.52381
| 73
| 0.775583
|
db779befa3551685e5f59d446fe50a5c97a9505a
| 3,181
|
py
|
Python
|
multibuster/multibuster.py
|
aidanhall34/bbskiddytools
|
abd102d47d37182e0d5dae87e49e2819372ea258
|
[
"MIT"
] | null | null | null |
multibuster/multibuster.py
|
aidanhall34/bbskiddytools
|
abd102d47d37182e0d5dae87e49e2819372ea258
|
[
"MIT"
] | null | null | null |
multibuster/multibuster.py
|
aidanhall34/bbskiddytools
|
abd102d47d37182e0d5dae87e49e2819372ea258
|
[
"MIT"
] | null | null | null |
#!/bin/python3
# This was written so I can run multipule gobuster instances at the same time.
# At this stage it only allows you to run a default gobuster dir scan, probably more to come IDK
# This should be run from a VPS to avoid IP bans / blocks from your home IP
import os
import argparse
import subprocess
import concurrent.futures
# Define the Arguments
arguments = argparse.ArgumentParser('Thanks for using multibuster, this will run multipule gobuster dir scans for a list or URL\'s\n')
arguments.add_argument("-u", "--urlfile", help="The location on the URL file", type=str)
arguments.add_argument("-w", "--wordlist", help="The location on the wordlist", type=str)
arguments.add_argument('-o', "--outfolder", help="The location of the output file", type=str)
args = arguments.parse_args()
# Validate the Args
def checkargs():
# Check the args are submitted
if args.urlfile == None or args.wordlist == None or args.outfolder == None:
print('Please supply a URL list, wordlist and output file location')
print('Your command should look like:\nmultigobuster.py -u <URL list> -w <Wordlist> -o <Output folder>')
exit()
# Check for the URL file
if os.path.exists(args.urlfile) != True:
print('I can\'t find the URL file')
exit()
# Check for the wordlist
if os.path.exists(args.wordlist) != True:
print('I can\'t find the wordlist')
exit()
# Check for the output file path
if os.path.exists(args.outfolder) != True:
print('I can\'t find the output file path')
exit()
checkargs()
# Manager the Gobuster session
def gobuster(URL):
# Remove the URL prefix from the string to make it work as a file name
if URL.startswith('http://'):
outfile = args.outfolder + '/' + URL.replace('http://', 'http')
if URL.startswith('https://'):
outfile = args.outfolder + '/' + URL.replace('https://', 'https')
URL = URL.strip()
# Start the session
print(f"Session for {URL} has started")
session = subprocess.run(f"gobuster dir -u {URL} -w {args.wordlist.strip()} -o {outfile.strip() + '.log'}",capture_output=True, shell=True)
# If the session dies
if session.returncode != 0:
print(f"Session for {URL} died")
# Write the error to the log file
with open(outfile.strip() + '.log', 'a') as f:
f.write(f"Session for {URL} died with error code {session.returncode}\n ERROR MESSAGE:\n {session.stderr.decode()}")
f.close
print(f"Session for {URL} has ended")
# Working with Multiprocessing
with open(args.urlfile, 'r') as URLS:
with concurrent.futures.ProcessPoolExecutor() as executor:
executor.map(gobuster, URLS)
#### BUGS
# None that I know of atm. I am running some full tests on local systems. If you find one, lemme know
# Features to add:
# Limited fuzzer support(future project)
# Gobuster custom extension support (.txt,.php,.xml...)
# Gobuster custom responce code support (!429,!500,200)
# Gobuster better error handling (more then just writing it to a file)
# Test command with basic args
#multigobuster.py -u <URLfile> -w <Wordlist> -o <Outputfolder>
| 41.855263
| 143
| 0.675574
|
38699030a50367bd268c62d877c5a18fb30fe5ef
| 267
|
py
|
Python
|
audio.py
|
andrew12/osr-stuff
|
db573bcd0782ee7bbe70ab2109812dde8cc18eee
|
[
"MIT"
] | 27
|
2016-08-27T00:31:54.000Z
|
2020-07-21T07:37:25.000Z
|
audio.py
|
hannahherbig/osr-stuff
|
b73abf064d33db37d19f7d23d20218f8b49e93c1
|
[
"MIT"
] | 25
|
2021-04-12T05:59:35.000Z
|
2022-02-07T00:12:42.000Z
|
audio.py
|
hannahherbig/osr-stuff
|
b73abf064d33db37d19f7d23d20218f8b49e93c1
|
[
"MIT"
] | 8
|
2016-08-30T16:38:04.000Z
|
2020-05-13T10:44:33.000Z
|
import sys
import time
import pygame
pygame.mixer.init(44100)
pygame.mixer.music.load(sys.argv[1])
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
sys.stdout.write("%d\r" % pygame.mixer.music.get_pos())
time.sleep(0.01)
pygame.mixer.quit()
| 17.8
| 59
| 0.726592
|
aa541d5b8dbeef5faa34c18d83795f884bfab485
| 40,217
|
py
|
Python
|
VirtualBox-5.0.0/src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/PomAdapter/InfPomAlignment.py
|
egraba/vbox_openbsd
|
6cb82f2eed1fa697d088cecc91722b55b19713c2
|
[
"MIT"
] | 1
|
2015-04-30T14:18:45.000Z
|
2015-04-30T14:18:45.000Z
|
VirtualBox-5.0.0/src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/PomAdapter/InfPomAlignment.py
|
egraba/vbox_openbsd
|
6cb82f2eed1fa697d088cecc91722b55b19713c2
|
[
"MIT"
] | null | null | null |
VirtualBox-5.0.0/src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/PomAdapter/InfPomAlignment.py
|
egraba/vbox_openbsd
|
6cb82f2eed1fa697d088cecc91722b55b19713c2
|
[
"MIT"
] | null | null | null |
## @file InfPomAlignment.py
# This file contained the adapter for convert INF parser object to POM Object
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
InfPomAlignment
'''
##
# Import modules
#
import os.path
from Logger import StringTable as ST
import Logger.Log as Logger
from Library.String import FORMAT_INVALID
from Library.String import PARSER_ERROR
from Library.String import NormPath
from Library.String import GetSplitValueList
from Library.Misc import ConvertVersionToDecimal
from Library.Misc import GetHelpStringByRemoveHashKey
from Library.Misc import ConvertArchList
from Library.Parsing import GetPkgInfoFromDec
from Library import DataType as DT
from Library import GlobalData
from Object.POM import CommonObject
from Object.POM.ModuleObject import ModuleObject
from Object.POM.ModuleObject import ExternObject
from Object.POM.ModuleObject import HobObject
from Object.POM.ModuleObject import EventObject
from Object.POM.ModuleObject import BootModeObject
from Object.POM.ModuleObject import PackageDependencyObject
from Object.POM.ModuleObject import SourceFileObject
from Object.POM.ModuleObject import DepexObject
from Object.POM.ModuleObject import AsBuildLibraryClassObject
from Object.POM.ModuleObject import AsBuiltObject
from PomAdapter.InfPomAlignmentMisc import GenModuleHeaderUserExt
from PomAdapter.InfPomAlignmentMisc import GenBinaryData
from Parser import InfParser
## InfPomAlignment
#
# Inherit from ModuleObject
#
class InfPomAlignment(ModuleObject):
## Construct of InfPomAlignment
# Skip means that UPT don't care the syntax of INF, this may be the not
# distributed INF files during creation or the INF files checked for
# dependency rule during remove.
#
def __init__(self, FileName, WorkSpace=None, PackagePath='', Skip=False):
ModuleObject.__init__(self)
self.Parser = None
self.FileName = FileName
self.WorkSpace = WorkSpace
self.CombinePath = ''
self.LibModuleTypeList = []
self.FullPath = ''
self.ModulePath = ''
self.WorkspaceDir = " "
self.CustomMakefile = []
self.SetPackagePath(PackagePath)
#
# Call GenInfPomObjects function to fill POM object.
#
if Skip:
OrigConfig = Logger.SUPRESS_ERROR
Logger.SUPRESS_ERROR = True
self._GenInfPomObjects(Skip)
Logger.SUPRESS_ERROR = OrigConfig
else:
self._GenInfPomObjects(Skip)
##
# Generate all POM objects, the original input comes
# from INF parser's output
#
def _GenInfPomObjects(self, Skip):
#
# Call INF Parser to get information from INF file
#
self.Parser = InfParser.InfParser(self.FileName, self.WorkSpace)
self.FullPath = self.Parser.FullPath
self.GetFullPath()
self._GenModuleHeader()
#
# Call GenBinaries after Module Header for Binary INF consideration.
#
self._GenBinaries()
self._GenBuildOptions()
self._GenLibraryClasses()
self._GenPackages(Skip)
self._GenPcds()
self._GenSources()
self._GenUserExtensions()
self._GenGuidProtocolPpis(DT.TAB_GUIDS)
self._GenGuidProtocolPpis(DT.TAB_PROTOCOLS)
self._GenGuidProtocolPpis(DT.TAB_PPIS)
self._GenDepexes()
self._GenMiscFiles(self.FullPath, Skip)
## Convert [Defines] section content to InfDefObject
#
# Convert [Defines] section content to InfDefObject
#
# @param Defines The content under [Defines] section
# @param ModuleHeader An object of ModuleHeaderClass
# @param Arch The supported ARCH
#
def _GenModuleHeader(self):
Logger.Debug(2, "Generate ModuleHeader ...")
#
# Get all defines information form InfParser Object
#
RecordSet = self.Parser.InfDefSection.Defines
#
# Should only have one ArchString Item.
#
ArchString = RecordSet.keys()[0]
ArchList = GetSplitValueList(ArchString, ' ')
ArchList = ConvertArchList(ArchList)
HasCalledFlag = False
#
# Get data from Sdict()
#
ValueList = RecordSet[ArchString]
self.SetFileName(self.FileName)
self.SetFullPath(self.FullPath)
#
# The INF's filename (without the directory path or the extension)
# must be used for the value of the
# ModuleSurfaceArea.Header.Name element
#
self.SetName(os.path.splitext(os.path.basename(self.FileName))[0])
self.WorkspaceDir = " "
#
# CombinePath and ModulePath
#
PathCount = self.FullPath.upper().find(self.WorkSpace.upper()) + len(self.WorkSpace) + 1
CombinePath = self.FullPath[PathCount:]
self.SetCombinePath(CombinePath)
ModulePath = os.path.split(CombinePath)[0]
ModuleRelativePath = ModulePath
if self.GetPackagePath() != '':
ModuleRelativePath = ModulePath[ModulePath.find(self.GetPackagePath()) + len(self.GetPackagePath()) + 1:]
self.SetModulePath(ModuleRelativePath)
#
# For Define Seciton Items.
#
DefineObj = ValueList
#
# Convert UEFI/PI version to decimal number
#
if DefineObj.GetUefiSpecificationVersion() != None:
__UefiVersion = DefineObj.GetUefiSpecificationVersion().GetValue()
__UefiVersion = ConvertVersionToDecimal(__UefiVersion)
self.SetUefiSpecificationVersion(str(__UefiVersion))
if DefineObj.GetPiSpecificationVersion() != None:
__PiVersion = DefineObj.GetPiSpecificationVersion().GetValue()
__PiVersion = ConvertVersionToDecimal(__PiVersion)
self.SetPiSpecificationVersion(str(__PiVersion))
SpecList = DefineObj.GetSpecification()
NewSpecList = []
for SpecItem in SpecList:
NewSpecList.append((SpecItem[0], ConvertVersionToDecimal(SpecItem[1])))
self.SetSpecList(NewSpecList)
#
# must exist items in INF define section
# MODULE_TYPE/BASE_NAME/INF_VERSION/FILE_GUID/VERSION_STRING
#
if DefineObj.GetModuleType() == None:
Logger.Error("InfParser", FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_SECTION_MUST_ITEM_NOT_EXIST % ("MODULE_TYPE"), File=self.FullPath)
else:
self.SetModuleType(DefineObj.GetModuleType().GetValue())
ModuleType = DefineObj.GetModuleType().GetValue()
if ModuleType:
#
# Drivers and applications are not allowed to have a MODULE_TYPE of "BASE". Only
# libraries are permitted to a have a MODULE_TYPE of "BASE".
#
if len(DefineObj.LibraryClass) == 0 and ModuleType == 'BASE':
Logger.Error("InfParser",
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULETYPE_INVALID,
File=self.FullPath,
Line=DefineObj.ModuleType.CurrentLine.LineNo,
ExtraData=DefineObj.ModuleType.CurrentLine.LineString)
self.LibModuleTypeList.append(ModuleType)
if DefineObj.GetBaseName() == None:
Logger.Error("InfParser", FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_SECTION_MUST_ITEM_NOT_EXIST % ("BASE_NAME"), File=self.FullPath)
else:
self.SetBaseName(DefineObj.GetBaseName().GetValue())
if DefineObj.GetInfVersion() == None:
Logger.Error("InfParser", FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_SECTION_MUST_ITEM_NOT_EXIST % ("INF_VERSION"), File=self.FullPath)
else:
self.SetVersion(DefineObj.GetInfVersion().GetValue())
if DefineObj.GetFileGuid() == None:
Logger.Error("InfParser", FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_SECTION_MUST_ITEM_NOT_EXIST % ("FILE_GUID"), File=self.FullPath)
else:
self.SetGuid(DefineObj.GetFileGuid().GetValue())
if DefineObj.GetVersionString() == None:
#
# VERSION_STRING is missing from the [Defines] section, tools must assume that the module's version is 0.
#
self.SetVersion('0')
else:
#
# Get version of INF
#
if DefineObj.GetVersionString().GetValue() != "":
#
# EDK2 inf
#
VersionString = DefineObj.GetVersionString().GetValue()
if len(VersionString) > 0:
VersionString = ConvertVersionToDecimal(VersionString)
self.SetVersion(VersionString)
else:
#
# EDK1 inf
#
Logger.Error("Parser", PARSER_ERROR, ST.ERR_INF_PARSER_NOT_SUPPORT_EDKI_INF, ExtraData=self.FullPath,
RaiseError=Logger.IS_RAISE_ERROR)
#
# if there is Shadow, Should judge the MODULE_TYPE in
# SEC, PEI_CORE and PEIM
#
if DefineObj.GetShadow():
ModuleTypeValue = DefineObj.GetModuleType().GetValue()
if not (ModuleTypeValue == 'SEC' or ModuleTypeValue == 'PEI_CORE' or ModuleTypeValue == 'PEIM'):
Logger.Error("InfParser", FORMAT_INVALID, ST.ERR_INF_PARSER_DEFINE_SHADOW_INVALID, File=self.FullPath)
if DefineObj.GetPcdIsDriver() != None:
self.SetPcdIsDriver(DefineObj.GetPcdIsDriver().GetValue())
#
# LIBRARY_CLASS
#
self._GenModuleHeaderLibClass(DefineObj, ArchList)
#
# CUSTOM_MAKEFILE
#
self.CustomMakefile = DefineObj.GetCustomMakefile()
#
# Externs in Defines section
# Only one define section, so just call once.
#
if not HasCalledFlag:
self._GenModuleHeaderExterns(DefineObj)
HasCalledFlag = True
#
# each module has only one module header
#
self.SetSupArchList(ArchList)
#
# Get Hob/BootMode/EventList information
#
self._GenSpecialComments()
#
# put all define statement into user-extension sections
#
DefinesDictNew = GenModuleHeaderUserExt(DefineObj, ArchString)
if DefinesDictNew:
UserExtension = CommonObject.UserExtensionObject()
UserExtension.SetDefinesDict(DefinesDictNew)
UserExtension.SetIdentifier('DefineModifiers')
UserExtension.SetUserID('EDK2')
self.SetUserExtensionList(self.GetUserExtensionList() + [UserExtension])
#
# Get all meta-file header information
# the record is list of items formated:
# [LineValue, Arch, StartLine, ID, Third]
#
InfHeaderObj = self.Parser.InfHeader
#
# Put header information into POM object
#
self.SetAbstract(InfHeaderObj.GetAbstract())
self.SetDescription(InfHeaderObj.GetDescription())
self.SetCopyright(InfHeaderObj.GetCopyright())
self.SetLicense(InfHeaderObj.GetLicense())
## GenModuleHeaderLibClass
#
#
def _GenModuleHeaderLibClass(self, DefineObj, ArchList):
LibraryList = DefineObj.GetLibraryClass()
for LibraryItem in LibraryList:
Lib = CommonObject.LibraryClassObject()
Lib.SetLibraryClass(LibraryItem.GetLibraryName())
Lib.SetUsage(DT.USAGE_ITEM_PRODUCES)
SupModuleList = LibraryItem.GetTypes()
self.LibModuleTypeList += SupModuleList
Lib.SetSupModuleList(SupModuleList)
Lib.SetSupArchList(ArchList)
self.SetLibraryClassList(self.GetLibraryClassList() + [Lib])
self.SetIsLibrary(True)
self.SetIsLibraryModList(self.GetIsLibraryModList() + SupModuleList)
## GenModuleHeaderExterns
#
#
def _GenModuleHeaderExterns(self, DefineObj):
EntryPointList = DefineObj.GetEntryPoint()
for EntryPoint in EntryPointList:
Image = ExternObject()
Image.SetEntryPoint(EntryPoint.GetCName())
#
# Future enhancement
#
self.SetExternList(self.GetExternList() + [Image])
#
# UNLOAD_IMAGE
#
UnloadImageList = DefineObj.GetUnloadImages()
for UnloadImage in UnloadImageList:
Image = ExternObject()
#
# Future enhancement
#
Image.SetUnloadImage(UnloadImage.GetCName())
self.SetExternList(self.GetExternList() + [Image])
#
# CONSTRUCTOR
#
ConstructorList = DefineObj.GetConstructor()
for ConstructorItem in ConstructorList:
Image = ExternObject()
#
# Future enhancement
#
Image.SetConstructor(ConstructorItem.GetCName())
self.SetExternList(self.GetExternList() + [Image])
#
# DESTRUCTOR
#
DestructorList = DefineObj.GetDestructor()
for DestructorItem in DestructorList:
Image = ExternObject()
#
# Future enhancement
#
Image.SetDestructor(DestructorItem.GetCName())
self.SetExternList(self.GetExternList() + [Image])
## GenModuleHeaderExterns
# BootMode/HOB/Event
#
def _GenSpecialComments(self):
SpecialCommentsList = self.Parser.InfSpecialCommentSection.GetSpecialComments()
for Key in SpecialCommentsList:
if Key == DT.TYPE_HOB_SECTION:
HobList = []
for Item in SpecialCommentsList[Key]:
Hob = HobObject()
Hob.SetHobType(Item.GetHobType())
Hob.SetUsage(Item.GetUsage())
Hob.SetSupArchList(Item.GetSupArchList())
if Item.GetHelpString():
HelpTextObj = CommonObject.TextObject()
HelpTextObj.SetString(Item.GetHelpString())
Hob.SetHelpTextList([HelpTextObj])
HobList.append(Hob)
self.SetHobList(HobList)
elif Key == DT.TYPE_EVENT_SECTION:
EventList = []
for Item in SpecialCommentsList[Key]:
Event = EventObject()
Event.SetEventType(Item.GetEventType())
Event.SetUsage(Item.GetUsage())
if Item.GetHelpString():
HelpTextObj = CommonObject.TextObject()
HelpTextObj.SetString(Item.GetHelpString())
Event.SetHelpTextList([HelpTextObj])
EventList.append(Event)
self.SetEventList(EventList)
elif Key == DT.TYPE_BOOTMODE_SECTION:
BootModeList = []
for Item in SpecialCommentsList[Key]:
BootMode = BootModeObject()
BootMode.SetSupportedBootModes(Item.GetSupportedBootModes())
BootMode.SetUsage(Item.GetUsage())
if Item.GetHelpString():
HelpTextObj = CommonObject.TextObject()
HelpTextObj.SetString(Item.GetHelpString())
BootMode.SetHelpTextList([HelpTextObj])
BootModeList.append(BootMode)
self.SetBootModeList(BootModeList)
## GenBuildOptions
#
# Gen BuildOptions of Inf
# [<Family>:]<ToolFlag>=Flag
#
#
def _GenBuildOptions(self):
Logger.Debug(2, "Generate %s ..." % DT.TAB_BUILD_OPTIONS)
#
# Get all BuildOptions
#
BuildOptionsList = self.Parser.InfBuildOptionSection.GetBuildOptions()
if not GlobalData.gIS_BINARY_INF:
BuildOptionDict = {}
for BuildOptionObj in BuildOptionsList:
ArchList = BuildOptionObj.GetSupArchList()
ArchList = ConvertArchList(ArchList)
BuildOptionsContent = BuildOptionObj.GetContent()
ArchString = ' '.join(ArchList)
if not BuildOptionsContent:
continue
BuildOptionDict[ArchString] = BuildOptionsContent
if not BuildOptionDict:
return
UserExtension = CommonObject.UserExtensionObject()
UserExtension.SetBuildOptionDict(BuildOptionDict)
UserExtension.SetIdentifier('BuildOptionModifiers')
UserExtension.SetUserID('EDK2')
self.SetUserExtensionList(self.GetUserExtensionList() + [UserExtension])
else:
#
# Not process this information, will be processed in GenBinaries()
#
pass
## GenLibraryClasses
#
# Get LibraryClass of Inf
# <LibraryClassKeyWord>|<LibraryInstance>
#
# @param ContainerFile: The Inf file full path
#
def _GenLibraryClasses(self):
Logger.Debug(2, "Generate %s ..." % DT.TAB_LIBRARY_CLASSES)
if not GlobalData.gIS_BINARY_INF:
#
# Get all LibraryClasses
#
LibClassObj = self.Parser.InfLibraryClassSection.LibraryClasses
Keys = LibClassObj.keys()
for Key in Keys:
LibraryClassData = LibClassObj[Key]
for Item in LibraryClassData:
LibraryClass = CommonObject.LibraryClassObject()
LibraryClass.SetUsage(DT.USAGE_ITEM_CONSUMES)
LibraryClass.SetLibraryClass(Item.GetLibName())
LibraryClass.SetRecommendedInstance(None)
LibraryClass.SetFeatureFlag(Item.GetFeatureFlagExp())
LibraryClass.SetSupArchList(ConvertArchList(Item.GetSupArchList()))
LibraryClass.SetSupModuleList(Item.GetSupModuleList())
HelpStringObj = Item.GetHelpString()
if HelpStringObj != None:
CommentString = GetHelpStringByRemoveHashKey(HelpStringObj.HeaderComments +
HelpStringObj.TailComments)
HelpTextHeaderObj = CommonObject.TextObject()
HelpTextHeaderObj.SetString(CommentString)
LibraryClass.SetHelpTextList([HelpTextHeaderObj])
self.SetLibraryClassList(self.GetLibraryClassList() + [LibraryClass])
## GenPackages
#
# Gen Packages of Inf
#
#
# @param ContainerFile: The Inf file full path
#
def _GenPackages(self, Skip):
Logger.Debug(2, "Generate %s ..." % DT.TAB_PACKAGES)
#
# Get all Packages
#
PackageObj = self.Parser.InfPackageSection.Packages
#
# Go through each arch
#
for PackageItemObj in PackageObj:
#
# Need package information for dependency check usage
#
PackageDependency = PackageDependencyObject()
PackageDependency.SetPackageFilePath(NormPath(PackageItemObj.GetPackageName()))
PackageDependency.SetSupArchList(ConvertArchList(PackageItemObj.GetSupArchList()))
PackageDependency.SetFeatureFlag(PackageItemObj.GetFeatureFlagExp())
PkgInfo = GetPkgInfoFromDec(os.path.normpath(os.path.join(self.WorkSpace,
NormPath(PackageItemObj.GetPackageName()))))
if PkgInfo[1] and PkgInfo[2]:
PackageDependency.SetGuid(PkgInfo[1])
PackageDependency.SetVersion(PkgInfo[2])
elif Skip:
continue
else:
Logger.Error("\nUPT", PARSER_ERROR,
ST.ERR_INF_GET_PKG_DEPENDENCY_FAIL % PackageItemObj.GetPackageName(), File=self.FullPath)
PackageDependencyList = self.GetPackageDependencyList()
PackageDependencyList.append(PackageDependency)
self.SetPackageDependencyList(PackageDependencyList)
## GenPcds
#
# Gen Pcds of Inf
# <TokenSpaceGuidCName>.<PcdCName>[|<Value> [|<FFE>]]
#
# @param ContainerFile: The Inf file full path
#
def _GenPcds(self):
if not GlobalData.gIS_BINARY_INF:
Logger.Debug(2, "Generate %s ..." % DT.TAB_PCDS)
#
# Get all Pcds
#
PcdObj = self.Parser.InfPcdSection.Pcds
KeysList = PcdObj.keys()
#
# Go through each arch
#
for (PcdType, PcdKey) in KeysList:
PcdData = PcdObj[PcdType, PcdKey]
for PcdItemObj in PcdData:
CommentList = PcdItemObj.GetHelpStringList()
if CommentList:
for CommentItem in CommentList:
Pcd = CommonObject.PcdObject()
Pcd.SetCName(PcdItemObj.GetCName())
Pcd.SetTokenSpaceGuidCName(PcdItemObj.GetTokenSpaceGuidCName())
Pcd.SetDefaultValue(PcdItemObj.GetDefaultValue())
Pcd.SetItemType(PcdType)
Pcd.SetValidUsage(CommentItem.GetUsageItem())
Pcd.SetFeatureFlag(PcdItemObj.GetFeatureFlagExp())
Pcd.SetSupArchList(ConvertArchList(PcdItemObj.GetSupportArchList()))
HelpTextObj = CommonObject.TextObject()
HelpTextObj.SetString(CommentItem.GetHelpStringItem())
Pcd.SetHelpTextList([HelpTextObj])
PcdList = self.GetPcdList()
PcdList.append(Pcd)
self.SetPcdList(PcdList)
## GenSources
#
# Gen Sources of Inf
# <Filename>[|<Family>[|<TagName>[|<ToolCode>[|<PcdFeatureFlag>]]]]
#
# @param ContainerFile: The Inf file full path
#
def _GenSources(self):
Logger.Debug(2, "Generate %s ..." % DT.TAB_SOURCES)
#
# Get all SourceFiles
#
SourceObj = self.Parser.InfSourcesSection.Sources
DataList = SourceObj.keys()
#
# Go through each arch
#
SourceList = []
for Key in DataList:
SourceData = SourceObj[Key]
for Item in SourceData:
SourceFile = Item.GetSourceFileName()
Family = Item.GetFamily()
FeatureFlag = Item.GetFeatureFlagExp()
SupArchList = ConvertArchList(Item.GetSupArchList())
SupArchList.sort()
Source = SourceFileObject()
Source.SetSourceFile(SourceFile)
Source.SetFamily(Family)
Source.SetFeatureFlag(FeatureFlag)
Source.SetSupArchList(SupArchList)
SourceList.append(Source)
self.SetSourceFileList(self.GetSourceFileList() + SourceList)
## GenUserExtensions
#
# Gen UserExtensions of Inf
#
def _GenUserExtensions(self):
#
# UserExtensions
#
UserExtensionObj = self.Parser.InfUserExtensionSection.UserExtension
Keys = UserExtensionObj.keys()
for Key in Keys:
UserExtensionData = UserExtensionObj[Key]
for UserExtensionDataObj in UserExtensionData:
UserExtension = CommonObject.UserExtensionObject()
UserId = UserExtensionDataObj.GetUserId()
if UserId.startswith('"') and UserId.endswith('"'):
UserId = UserId[1:-1]
UserExtension.SetUserID(UserId)
Identifier = UserExtensionDataObj.GetIdString()
if Identifier.startswith('"') and Identifier.endswith('"'):
Identifier = Identifier[1:-1]
UserExtension.SetIdentifier(Identifier)
UserExtension.SetStatement(UserExtensionDataObj.GetContent())
UserExtension.SetSupArchList(ConvertArchList(UserExtensionDataObj.GetSupArchList()))
self.SetUserExtensionList(self.GetUserExtensionList() + [UserExtension])
def _GenDepexesList(self, SmmDepexList, DxeDepexList, PeiDepexList):
if SmmDepexList:
self.SetSmmDepex(SmmDepexList)
if DxeDepexList:
self.SetDxeDepex(DxeDepexList)
if PeiDepexList:
self.SetPeiDepex(PeiDepexList)
## GenDepexes
#
# Gen Depex of Inf
#
# @param ContainerFile: The Inf file full path
#
def _GenDepexes(self):
Logger.Debug(2, "Generate %s ..." % DT.TAB_DEPEX)
PEI_LIST = [DT.SUP_MODULE_PEIM]
SMM_LIST = [DT.SUP_MODULE_DXE_SMM_DRIVER]
DXE_LIST = [DT.SUP_MODULE_DXE_DRIVER, DT.SUP_MODULE_DXE_SAL_DRIVER,
DT.SUP_MODULE_DXE_RUNTIME_DRIVER]
IsLibraryClass = self.GetIsLibrary()
#
# Get all Depexes
#
DepexData = self.Parser.InfDepexSection.GetDepex()
SmmDepexList = []
DxeDepexList = []
PeiDepexList = []
for Depex in DepexData:
ModuleType = Depex.GetModuleType()
ModuleTypeList = []
if IsLibraryClass:
if not self.GetIsLibraryModList():
Logger.Error("\nMkPkg", PARSER_ERROR, ST.ERR_INF_PARSER_DEPEX_SECTION_INVALID_FOR_LIBRARY_CLASS,
self.GetFullPath(), RaiseError=True)
if ModuleType and ModuleType not in self.GetIsLibraryModList():
Logger.Error("\nMkPkg", PARSER_ERROR, ST.ERR_INF_PARSER_DEPEX_SECTION_NOT_DETERMINED,
self.GetFullPath(), RaiseError=True)
if ModuleType:
ModuleTypeList = [ModuleType]
else:
for ModuleTypeInList in self.GetIsLibraryModList():
if ModuleTypeInList in DT.VALID_DEPEX_MODULE_TYPE_LIST:
ModuleTypeList.append(ModuleTypeInList)
if not ModuleTypeList:
Logger.Error("\nMkPkg", PARSER_ERROR, ST.ERR_INF_PARSER_DEPEX_SECTION_NOT_DETERMINED,
self.GetFullPath(), RaiseError=True)
else:
if not ModuleType:
ModuleType = self.ModuleType
if ModuleType not in DT.VALID_DEPEX_MODULE_TYPE_LIST:
Logger.Error("\nMkPkg", PARSER_ERROR,
ST.ERR_INF_PARSER_DEPEX_SECTION_MODULE_TYPE_ERROR % (ModuleType),
self.GetFullPath(), RaiseError=True)
if ModuleType != self.ModuleType:
Logger.Error("\nMkPkg", PARSER_ERROR, ST.ERR_INF_PARSER_DEPEX_SECTION_NOT_DETERMINED,
self.GetFullPath(), RaiseError=True)
ModuleTypeList = [ModuleType]
for ModuleType in ModuleTypeList:
DepexIns = DepexObject()
DepexIns.SetDepex(Depex.GetDepexContent())
if IsLibraryClass:
DepexIns.SetModuleType(ModuleType)
else:
if Depex.GetModuleType():
DepexIns.SetModuleType(Depex.GetModuleType())
DepexIns.SetSupArchList(ConvertArchList([Depex.GetSupArch()]))
DepexIns.SetFeatureFlag(Depex.GetFeatureFlagExp())
if Depex.HelpString:
HelpIns = CommonObject.TextObject()
HelpIns.SetString(GetHelpStringByRemoveHashKey(Depex.HelpString))
DepexIns.SetHelpText(HelpIns)
if ModuleType in SMM_LIST:
SmmDepexList.append(DepexIns)
if ModuleType in DXE_LIST:
DxeDepexList.append(DepexIns)
if ModuleType in PEI_LIST:
PeiDepexList.append(DepexIns)
if ModuleType == DT.SUP_MODULE_UEFI_DRIVER:
if IsLibraryClass:
DxeDepexList.append(DepexIns)
else:
Logger.Error("\nMkPkg", PARSER_ERROR, ST.ERR_INF_PARSER_DEPEX_SECTION_INVALID_FOR_DRIVER,
self.GetFullPath(), RaiseError=True)
#End of for ModuleType in ModuleTypeList
self._GenDepexesList(SmmDepexList, DxeDepexList, PeiDepexList)
#End of for Depex in DepexData
## GenBinaries
#
# Gen Binary of Inf, must be called after Pcd/Library is generated
# <FileType>|<Filename>|<Target>[|<TokenSpaceGuidCName>.<PcdCName>]
#
# @param ContainerFile: The Inf file full path
#
def _GenBinaries(self):
Logger.Debug(2, "Generate %s ..." % DT.TAB_BINARIES)
BinariesDict = {}
#
# Get all Binary data
#
BinaryObj = self.Parser.InfBinariesSection.GetBinary()
BinaryData = BinaryObj.keys()
#
# If the INF file does not contain a [Sources] section, and the INF file does contain a [Binaries] section,
# then the ModuleSurfaceArea.BinaryModule attribute must be set to true. Otherwise, do not use the attribute
#
if BinaryObj and not self.Parser.InfSourcesSection.GetSources():
self.BinaryModule = True
else:
self.BinaryModule = False
BinaryFileObjectList = []
AsBuildLibraryClassList = []
AsBuildBuildOptionList = []
AsBuildIns = AsBuiltObject()
#
# Library AsBuild Info
#
for LibItem in self.Parser.InfLibraryClassSection.GetLibraryClasses():
AsBuildLibIns = AsBuildLibraryClassObject()
AsBuildLibIns.SetLibGuid(LibItem.GetFileGuid())
AsBuildLibIns.SetLibVersion(LibItem.GetVersion())
AsBuildLibraryClassList.append(AsBuildLibIns)
AsBuildIns.SetLibraryInstancesList(AsBuildLibraryClassList)
#
# BuildOption AsBuild Info
#
for BuildOptionItem in self.Parser.InfBuildOptionSection.GetBuildOptions():
AsBuildBuildOptionList += BuildOptionItem.GetAsBuildList()
AsBuildIns.SetBuildFlagsList(AsBuildBuildOptionList)
#
# PatchPcd and PcdEx
#
AsBuildIns = self._GenAsBuiltPcds(self.Parser.InfPcdSection.GetPcds(), AsBuildIns)
BinariesDict, AsBuildIns, BinaryFileObjectList = GenBinaryData(BinaryData, BinaryObj,
BinariesDict,
AsBuildIns,
BinaryFileObjectList,
self.GetSupArchList(),
self.BinaryModule)
BinariesDict2 = {}
for Key in BinariesDict:
ValueList = BinariesDict[Key]
if len(ValueList) > 1:
BinariesDict2[Key] = ValueList
else:
#
# if there is no TagName, ToolCode, HelpStr,
# then do not need to put them into userextension
#
(Target, Family, TagName, HelpStr) = ValueList[0]
if not (Target or Family or TagName or HelpStr):
continue
else:
BinariesDict2[Key] = ValueList
self.SetBinaryFileList(self.GetBinaryFileList() + BinaryFileObjectList)
if BinariesDict2:
UserExtension = CommonObject.UserExtensionObject()
UserExtension.SetBinariesDict(BinariesDict2)
UserExtension.SetIdentifier('BinaryFileModifiers')
UserExtension.SetUserID('EDK2')
self.SetUserExtensionList(self.GetUserExtensionList() + [UserExtension])
## GenAsBuiltPcds
#
#
def _GenAsBuiltPcds(self, PcdList, AsBuildIns):
AsBuildPatchPcdList = []
AsBuildPcdExList = []
#
# Pcd AsBuild Info
#
for PcdItem in PcdList:
if PcdItem[0].upper() == DT.TAB_INF_PATCH_PCD.upper():
PcdItemObj = PcdItem[1]
Pcd = CommonObject.PcdObject()
Pcd.SetCName(PcdItemObj.GetCName())
Pcd.SetTokenSpaceGuidCName(PcdItemObj.GetTokenSpaceGuidCName())
if PcdItemObj.GetTokenSpaceGuidValue() == '' and self.BinaryModule:
Logger.Error("\nMkPkg",
PARSER_ERROR,
ST.ERR_ASBUILD_PCD_TOKENSPACE_GUID_VALUE_MISS % \
(PcdItemObj.GetTokenSpaceGuidCName()),
self.GetFullPath(), RaiseError=True)
else:
Pcd.SetTokenSpaceGuidValue(PcdItemObj.GetTokenSpaceGuidValue())
if (PcdItemObj.GetToken() == '' or PcdItemObj.GetDatumType() == '') and self.BinaryModule:
Logger.Error("\nMkPkg",
PARSER_ERROR,
ST.ERR_ASBUILD_PCD_DECLARITION_MISS % \
(PcdItemObj.GetTokenSpaceGuidCName() + '.' + PcdItemObj.GetCName()),
self.GetFullPath(), RaiseError=True)
Pcd.SetToken(PcdItemObj.GetToken())
Pcd.SetDatumType(PcdItemObj.GetDatumType())
Pcd.SetMaxDatumSize(PcdItemObj.GetMaxDatumSize())
Pcd.SetDefaultValue(PcdItemObj.GetDefaultValue())
Pcd.SetOffset(PcdItemObj.GetOffset())
Pcd.SetItemType(PcdItem[0])
Pcd.SetFeatureFlag(PcdItemObj.GetFeatureFlagExp())
Pcd.SetSupArchList(ConvertArchList(PcdItemObj.GetSupportArchList()))
HelpTextObj = CommonObject.TextObject()
HelpTextObj.SetString(PcdItemObj.GetHelpStringList())
Pcd.SetHelpTextList([HelpTextObj])
AsBuildPatchPcdList.append(Pcd)
else:
PcdItemObj = PcdItem[1]
Pcd = CommonObject.PcdObject()
Pcd.SetTokenSpaceGuidValue(PcdItemObj.GetTokenSpaceGuidValue())
Pcd.SetToken(PcdItemObj.GetToken())
Pcd.SetDatumType(PcdItemObj.GetDatumType())
Pcd.SetMaxDatumSize(PcdItemObj.GetMaxDatumSize())
Pcd.SetDefaultValue(PcdItemObj.GetDefaultValue())
Pcd.SetItemType(PcdItem[0])
Pcd.SetFeatureFlag(PcdItemObj.GetFeatureFlagExp())
Pcd.SetSupArchList(ConvertArchList(PcdItemObj.GetSupportArchList()))
HelpTextObj = CommonObject.TextObject()
HelpTextObj.SetString(PcdItemObj.GetHelpStringList())
Pcd.SetHelpTextList([HelpTextObj])
AsBuildPcdExList.append(Pcd)
AsBuildIns.SetPatchPcdList(AsBuildPatchPcdList)
AsBuildIns.SetPcdExList(AsBuildPcdExList)
return AsBuildIns
## GenGuidProtocolPpis
#
# Gen Guids/Protocol/Ppis of INF
# <CName>=<GuidValue>
#
def _GenGuidProtocolPpis(self, Type):
Logger.Debug(2, "Generate %s ..." % Type)
#
# Get all Guid/Protocol/Ppis data
#
GuidObj = self.Parser.InfGuidSection.GetGuid()
ProtocolObj = self.Parser.InfProtocolSection.GetProtocol()
PpisObj = self.Parser.InfPpiSection.GetPpi()
GuidProtocolPpiList = []
if Type == DT.TAB_GUIDS:
GuidData = GuidObj.keys()
for Item in GuidData:
CommentList = Item.GetCommentList()
#
# Depend on CommentList content
# generate muti-guid-obj
#
if CommentList:
for GuidComentItem in CommentList:
ListObject = CommonObject.GuidObject()
ListObject.SetGuidTypeList([GuidComentItem.GetGuidTypeItem()])
ListObject.SetVariableName(GuidComentItem.GetVariableNameItem())
ListObject.SetUsage(GuidComentItem.GetUsageItem())
ListObject.SetName(Item.GetName())
ListObject.SetCName(Item.GetName())
ListObject.SetSupArchList(ConvertArchList(Item.GetSupArchList()))
ListObject.SetFeatureFlag(Item.GetFeatureFlagExp())
HelpString = GuidComentItem.GetHelpStringItem()
HelpTxtTailObj = CommonObject.TextObject()
HelpTxtTailObj.SetString(HelpString)
ListObject.SetHelpTextList([HelpTxtTailObj])
GuidProtocolPpiList.append(ListObject)
elif Type == DT.TAB_PROTOCOLS:
ProtocolData = ProtocolObj.keys()
for Item in ProtocolData:
CommentList = Item.GetCommentList()
for CommentItem in CommentList:
ListObject = CommonObject.ProtocolObject()
ListObject.SetCName(Item.GetName())
ListObject.SetSupArchList(ConvertArchList(Item.GetSupArchList()))
ListObject.SetFeatureFlag(Item.GetFeatureFlagExp())
ListObject.SetNotify(CommentItem.GetNotify())
ListObject.SetUsage(CommentItem.GetUsageItem())
HelpTxtObj = CommonObject.TextObject()
HelpString = CommentItem.GetHelpStringItem()
HelpTxtObj.SetString(HelpString)
ListObject.SetHelpTextList([HelpTxtObj])
GuidProtocolPpiList.append(ListObject)
elif Type == DT.TAB_PPIS:
PpiData = PpisObj.keys()
for Item in PpiData:
CommentList = Item.GetCommentList()
for CommentItem in CommentList:
ListObject = CommonObject.PpiObject()
ListObject.SetCName(Item.GetName())
ListObject.SetSupArchList(ConvertArchList(Item.GetSupArchList()))
ListObject.SetFeatureFlag(Item.GetFeatureFlagExp())
ListObject.SetNotify(CommentItem.GetNotify())
ListObject.SetUsage(CommentItem.GetUsage())
HelpTextObj = CommonObject.TextObject()
HelpString = CommentItem.GetHelpStringItem()
HelpTextObj.SetString(HelpString)
ListObject.SetHelpTextList([HelpTextObj])
GuidProtocolPpiList.append(ListObject)
if Type == DT.TAB_GUIDS:
self.SetGuidList(self.GetGuidList() + GuidProtocolPpiList)
elif Type == DT.TAB_PROTOCOLS:
self.SetProtocolList(self.GetProtocolList() + GuidProtocolPpiList)
elif Type == DT.TAB_PPIS:
self.SetPpiList(self.GetPpiList() + GuidProtocolPpiList)
## GenMiscFiles
#
# Gen MiscellaneousFiles of Inf
#
# @param ContainerFile: The Inf file full path
#
def _GenMiscFiles(self, ContainerFile, Skip):
pass
| 41.375514
| 118
| 0.588358
|
ab829bdad4889d102b33b8fdd5876070b96c15dd
| 369
|
py
|
Python
|
entries/models.py
|
eliusmgani/my-diary
|
14a20ac5c7fbae73402bb28f23767568a80e5379
|
[
"MIT"
] | null | null | null |
entries/models.py
|
eliusmgani/my-diary
|
14a20ac5c7fbae73402bb28f23767568a80e5379
|
[
"MIT"
] | null | null | null |
entries/models.py
|
eliusmgani/my-diary
|
14a20ac5c7fbae73402bb28f23767568a80e5379
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils import timezone
# Create your models here.
class Entry(models.Model):
title = models.CharField(max_length=200)
content = models.TextField()
date_created = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title
class Meta:
verbose_name_plural = "Entries"
| 24.6
| 61
| 0.710027
|
46fa40eb0d6ceb820fa30662480106a015c2838b
| 4,088
|
py
|
Python
|
flexpart_alto/nbs/nb_run_2021-03-14_20-15-10_LONG/z035_get_flx_log_pol_coords_puhti_array.py
|
daliagachc/flexpart-alto
|
2663fad47239989bb4aff0e2058bfd62e2489d07
|
[
"Xnet",
"X11"
] | null | null | null |
flexpart_alto/nbs/nb_run_2021-03-14_20-15-10_LONG/z035_get_flx_log_pol_coords_puhti_array.py
|
daliagachc/flexpart-alto
|
2663fad47239989bb4aff0e2058bfd62e2489d07
|
[
"Xnet",
"X11"
] | null | null | null |
flexpart_alto/nbs/nb_run_2021-03-14_20-15-10_LONG/z035_get_flx_log_pol_coords_puhti_array.py
|
daliagachc/flexpart-alto
|
2663fad47239989bb4aff0e2058bfd62e2489d07
|
[
"Xnet",
"X11"
] | null | null | null |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
"""
README:
this script will transform rect coords to logpol at puhti
it takers one artgumen: the folder where all the flexpart output resides
for that day.
example:
python z035_get_flx_log_pol_coords_puhti_array.py 2017-12-06
number of days is of days is 177
"""
# %%
# this notebook was created to convert rectanfular coo
# %%
import flexpart_alto.modules.FLEXOUT as FO
# import flexpart_alto.modules.flx_array as fa
from useful_scit.imps2.defs import log, Path, glob
import sys
# %%
log.ger.setLevel(log.log.DEBUG)
# %%
#it starts at 1
i_to_run = int(sys.argv[1]) - 1
# i_to_run = 0
# %%
list_of_days = '''\
2017-12-06
2017-12-07
2017-12-08
2017-12-09
2017-12-10
2017-12-11
2017-12-12
2017-12-13
2017-12-14
2017-12-15
2017-12-16
2017-12-17
2017-12-18
2017-12-19
2017-12-20
2017-12-21
2017-12-22
2017-12-23
2017-12-24
2017-12-25
2017-12-26
2017-12-27
2017-12-28
2017-12-29
2017-12-30
2017-12-31
2018-01-01
2018-01-02
2018-01-03
2018-01-04
2018-01-05
2018-01-06
2018-01-07
2018-01-08
2018-01-09
2018-01-10
2018-01-11
2018-01-12
2018-01-13
2018-01-14
2018-01-15
2018-01-16
2018-01-17
2018-01-18
2018-01-19
2018-01-20
2018-01-21
2018-01-22
2018-01-23
2018-01-24
2018-01-25
2018-01-26
2018-01-27
2018-01-28
2018-01-29
2018-01-30
2018-01-31
2018-02-01
2018-02-02
2018-02-03
2018-02-04
2018-02-05
2018-02-06
2018-02-07
2018-02-08
2018-02-09
2018-02-10
2018-02-11
2018-02-12
2018-02-13
2018-02-14
2018-02-15
2018-02-16
2018-02-17
2018-02-18
2018-02-19
2018-02-20
2018-02-21
2018-02-22
2018-02-23
2018-02-24
2018-02-25
2018-02-26
2018-02-27
2018-02-28
2018-03-01
2018-03-02
2018-03-03
2018-03-04
2018-03-05
2018-03-06
2018-03-07
2018-03-08
2018-03-09
2018-03-10
2018-03-11
2018-03-12
2018-03-13
2018-03-14
2018-03-15
2018-03-16
2018-03-17
2018-03-18
2018-03-19
2018-03-20
2018-03-21
2018-03-22
2018-03-23
2018-03-24
2018-03-25
2018-03-26
2018-03-27
2018-03-28
2018-03-29
2018-03-30
2018-03-31
2018-04-01
2018-04-02
2018-04-03
2018-04-04
2018-04-05
2018-04-06
2018-04-07
2018-04-08
2018-04-09
2018-04-10
2018-04-11
2018-04-12
2018-04-13
2018-04-14
2018-04-15
2018-04-16
2018-04-17
2018-04-18
2018-04-19
2018-04-20
2018-04-21
2018-04-22
2018-04-23
2018-04-24
2018-04-25
2018-04-26
2018-04-27
2018-04-28
2018-04-29
2018-04-30
2018-05-01
2018-05-02
2018-05-03
2018-05-04
2018-05-05
2018-05-06
2018-05-07
2018-05-08
2018-05-09
2018-05-10
2018-05-11
2018-05-12
2018-05-13
2018-05-14
2018-05-15
2018-05-16
2018-05-17
2018-05-18
2018-05-19
2018-05-20
2018-05-21
2018-05-22
2018-05-23
2018-05-24
2018-05-25
2018-05-26
2018-05-27
2018-05-28
2018-05-29
2018-05-30
2018-05-31'''
list_of_days = list_of_days.split('\n')
day_to_run = list_of_days[i_to_run]
# %%
# os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE'
doms = ['d01', 'd02']
run_root_path = Path(
'/scratch/project_2001273/diego/flexpart-alto-data/run_2021-03-14_20-15'
'-10_LONG')
root_path = run_root_path / day_to_run
# root_path = '/homeappl/home/aliagadi/wrk/DONOTREMOVE
# /flexpart_management_data/runs/run_2019-06-05_18-42-11_/*-*-*'
path_out = run_root_path / 'log_pol'
log.ger.debug(f'root path is {root_path}')
run_name = 'run_2021-03-14_20-15-10_LONG'
paths = glob.glob(str(root_path))
paths.sort()
# %%
fo_base_dic = dict(
# dom = 'd01', folder_path = '/Volumes/mbProD/Downloads/flex_out/run_2019
# -06-02_20-42-05_/2017-12-10',
folder_path_out=path_out,
run_name=run_name,
)
# %%
for p in paths[:]:
for d in doms:
print('starting', d, p)
new_dic = dict(dom=d, folder_path=p)
fo_dic = {**fo_base_dic, **new_dic}
try:
fo = FO.FLEXOUT(**fo_dic)
fo.export_log_polar_coords()
print('done', d, p)
except AssertionError as error:
log.ger.error(error)
print('failed when', d, p)
# %%
# %%
| 14.6
| 77
| 0.68909
|
8a0c0357f31101300db4184fd5989fef7ad5031e
| 421
|
py
|
Python
|
C3_Decorator_Pattern/StarBuzz/Condiments/SteamedMilk.py
|
sarada92/Design_Pattern
|
f817206a6f995bc6f534d7cabb3a290955f37d33
|
[
"MIT"
] | 1
|
2022-02-06T15:42:09.000Z
|
2022-02-06T15:42:09.000Z
|
C3_Decorator_Pattern/StarBuzz/Condiments/SteamedMilk.py
|
sarada92/Design_Pattern
|
f817206a6f995bc6f534d7cabb3a290955f37d33
|
[
"MIT"
] | null | null | null |
C3_Decorator_Pattern/StarBuzz/Condiments/SteamedMilk.py
|
sarada92/Design_Pattern
|
f817206a6f995bc6f534d7cabb3a290955f37d33
|
[
"MIT"
] | null | null | null |
from C3_Decorator_Pattern.StarBuzz.Beverages.Beverages import Beverages
from C3_Decorator_Pattern.StarBuzz.Condiments.Condiments import Condiments
class SteamedMilk(Condiments):
def __init__(self, beverage: Beverages):
self.beverage = beverage
def get_description(self):
return self.beverage.get_description() + ', Steamed Milk'
def cost(self):
return self.beverage.cost() + 0.10
| 28.066667
| 74
| 0.743468
|
e389afb5cb0021252f5988c208f309df79c0a931
| 2,046
|
py
|
Python
|
buildscripts/sourcepush.py
|
sayfullah/MongoPi
|
8205c77e634a2db210938ae97dbe09fdaaa43736
|
[
"Apache-2.0"
] | 5
|
2015-02-02T04:51:58.000Z
|
2019-01-22T03:01:38.000Z
|
buildscripts/sourcepush.py
|
sayfullah/MongoPi
|
8205c77e634a2db210938ae97dbe09fdaaa43736
|
[
"Apache-2.0"
] | null | null | null |
buildscripts/sourcepush.py
|
sayfullah/MongoPi
|
8205c77e634a2db210938ae97dbe09fdaaa43736
|
[
"Apache-2.0"
] | 2
|
2015-02-19T10:19:03.000Z
|
2020-10-29T07:00:33.000Z
|
import os
import sys
sys.path.append( "." )
sys.path.append( ".." )
sys.path.append( "../../" )
sys.path.append( "../../../" )
import simples3
import settings
import subprocess
# this pushes all source balls as tgz and zip
def run_git( args ):
cmd = "git " + args
cmd = cmd.split( " " )
x = subprocess.Popen( ( "git " + args ).split( " " ) , stdout=subprocess.PIPE).communicate()
return x[0]
def push_tag( bucket , tag , extension , gzip=False ):
localName = "mongodb-src-" + tag + "." + extension
remoteName = "src/" + localName
if gzip:
remoteName += ".gz"
for ( key , modify , etag , size ) in bucket.listdir( prefix=remoteName ):
print( "found old: " + key + " uploaded on: " + str( modify ) )
return
if os.path.exists( localName ):
os.remove( localName )
print( "need to do: " + remoteName )
cmd = "archive --format %s --output %s --prefix mongodb-src-%s/ %s" % ( extension , localName , tag , tag )
run_git( cmd )
print( "\t" + cmd )
if not os.path.exists( localName ) or os.path.getsize(localName) == 0 :
raise( Exception( "creating archive failed: " + cmd ) )
if gzip:
newLocalName = localName + ".gz"
if ( os.path.exists( newLocalName ) ):
os.remove( newLocalName )
subprocess.call( [ "gzip" , localName ] )
localName = newLocalName
if not os.path.exists( localName ) or os.path.getsize(localName) == 0 :
raise( Exception( "gzipping failed" ) )
bucket.put( remoteName , open( localName , "rb" ).read() , acl="public-read" )
print( "\t uploaded to: http://s3.amazonaws.com/%s/%s" % ( bucket.name , remoteName ) )
os.remove( localName )
def push_all():
tags = run_git("tag -l").strip().split( "\n" )
bucket = simples3.S3Bucket( settings.bucket , settings.id , settings.key )
for tag in tags:
push_tag( bucket , tag , "tar" , True )
push_tag( bucket , tag , "zip" )
if __name__ == "__main__":
push_all()
| 28.816901
| 111
| 0.581623
|
005b66ee6e157ff9d1d45683ab007a99f0de9536
| 262
|
gyp
|
Python
|
Dependencies/gyp-master/test/standalone/standalone.gyp
|
knight666/exlibris
|
b21b46e0c84e5c4f81f8048022cda88e7bb3dca2
|
[
"MIT"
] | null | null | null |
Dependencies/gyp-master/test/standalone/standalone.gyp
|
knight666/exlibris
|
b21b46e0c84e5c4f81f8048022cda88e7bb3dca2
|
[
"MIT"
] | null | null | null |
Dependencies/gyp-master/test/standalone/standalone.gyp
|
knight666/exlibris
|
b21b46e0c84e5c4f81f8048022cda88e7bb3dca2
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name' : 'foo',
'type' : 'executable'
},
]
}
| 20.153846
| 73
| 0.580153
|
55cf6a2582273a280e4356d752cb29b4f15375e6
| 168
|
py
|
Python
|
swampyer/__init__.py
|
zabertech/python-swampyer
|
4181db7d11439cc2a79b5a7d7b2b7c581abf59ec
|
[
"MIT"
] | 1
|
2022-03-10T00:10:19.000Z
|
2022-03-10T00:10:19.000Z
|
swampyer/__init__.py
|
zabertech/python-swampyer
|
4181db7d11439cc2a79b5a7d7b2b7c581abf59ec
|
[
"MIT"
] | 4
|
2021-05-12T21:56:56.000Z
|
2021-05-12T22:02:20.000Z
|
swampyer/__init__.py
|
zabertech/python-swampyer
|
4181db7d11439cc2a79b5a7d7b2b7c581abf59ec
|
[
"MIT"
] | 2
|
2018-06-10T12:50:21.000Z
|
2019-10-23T17:16:26.000Z
|
from .common import *
from .messages import *
from .utils import logger
from .exceptions import *
from .transport import *
from .queues import *
from .client import *
| 18.666667
| 25
| 0.75
|
eae0e8628f05e50a1c16387e543ab329cef08654
| 429
|
py
|
Python
|
init.py
|
jamro/oversort
|
19b1da5adc71eef84d0b88faa9c3e721589a4fd2
|
[
"MIT"
] | 1
|
2020-03-02T01:26:59.000Z
|
2020-03-02T01:26:59.000Z
|
init.py
|
jamro/oversort
|
19b1da5adc71eef84d0b88faa9c3e721589a4fd2
|
[
"MIT"
] | null | null | null |
init.py
|
jamro/oversort
|
19b1da5adc71eef84d0b88faa9c3e721589a4fd2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import vagrant
import click
@click.command()
def run():
"""Creates VMs for Docker Swarm cluster"""
currentDir = os.path.dirname(os.path.realpath(__file__));
rootDir = os.chdir(os.path.join(currentDir, 'vagrant'))
v = vagrant.Vagrant(root=rootDir, quiet_stdout=False)
print('Initializing infrastructure...')
v.up()
print('DONE')
if __name__ == '__main__':
run()
| 22.578947
| 61
| 0.675991
|
bcfcfe8612acb0d91c36f10e536c3dc7af4dcb3a
| 377
|
py
|
Python
|
algorithms/insertion_sort_fast.py
|
parisam83/Sorting-Algorithms
|
d2edf8e87e239a8539b7e09ecb2fee8560c6ef63
|
[
"MIT"
] | 20
|
2019-05-19T00:26:38.000Z
|
2022-01-30T15:09:48.000Z
|
algorithms/insertion_sort_fast.py
|
parisam83/Sorting-Algorithms
|
d2edf8e87e239a8539b7e09ecb2fee8560c6ef63
|
[
"MIT"
] | null | null | null |
algorithms/insertion_sort_fast.py
|
parisam83/Sorting-Algorithms
|
d2edf8e87e239a8539b7e09ecb2fee8560c6ef63
|
[
"MIT"
] | 2
|
2019-06-13T16:01:17.000Z
|
2020-07-15T05:18:25.000Z
|
'''This insertion sort algorithm
is faster and much more easier to
understand for beginners...'''
def insertion_sort_fast(arr):
for i in range(1, len(arr)):
j = i - 1
for k in range(i):
if arr[i] < arr[j]:
arr[i], arr[j] = arr[j], arr[i]
i = i -1
j = j - 1
#insertion_sort_fast(arr)
#print(arr)
| 22.176471
| 45
| 0.517241
|
af1b0a2231233013e27bf22d098fddce0084f694
| 991
|
py
|
Python
|
var/spack/repos/builtin/packages/r-limma/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2020-09-10T22:50:08.000Z
|
2021-01-12T22:18:54.000Z
|
var/spack/repos/builtin/packages/r-limma/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 14
|
2021-07-20T01:04:53.000Z
|
2022-03-02T01:08:36.000Z
|
var/spack/repos/builtin/packages/r-limma/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2021-05-06T00:17:46.000Z
|
2021-05-06T00:17:46.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RLimma(RPackage):
"""Linear Models for Microarray Data.
Data analysis, linear models and differential expression for microarray
data."""
homepage = "https://bioconductor.org/packages/limma"
git = "https://git.bioconductor.org/packages/limma.git"
version('3.40.6', commit='3ae0767ecf7a764030e7b7d0b1d0f292c0b24055')
version('3.38.3', commit='77b292eb150cdedaa1db704bcfb01f0bb29e9849')
version('3.36.5', commit='3148d1cb7eea9c6bdd60351d51abcfd665332d44')
version('3.34.9', commit='6755278a929f942a49e2441fb002a3ed393e1139')
version('3.32.10', commit='593edf28e21fe054d64137ae271b8a52ab05bc60')
depends_on('r@2.3.0:', type=('build', 'run'))
depends_on('r@3.6.0:', when='@3.40.6:', type=('build', 'run'))
| 36.703704
| 78
| 0.718466
|
77b53fc98ee9d50b3e329ea5fe7af460a986bac4
| 2,747
|
py
|
Python
|
pyphoenix/errors.py
|
melnikovmaksimv/pyPhoenix
|
3e1af26ed231bcf2e1f871d28517888733ed1d37
|
[
"Apache-2.0"
] | 16
|
2017-03-24T00:25:43.000Z
|
2020-08-14T14:00:46.000Z
|
pyphoenix/errors.py
|
melnikovmaksimv/pyPhoenix
|
3e1af26ed231bcf2e1f871d28517888733ed1d37
|
[
"Apache-2.0"
] | 1
|
2018-01-06T07:41:53.000Z
|
2018-01-11T20:59:15.000Z
|
pyphoenix/errors.py
|
melnikovmaksimv/pyPhoenix
|
3e1af26ed231bcf2e1f871d28517888733ed1d37
|
[
"Apache-2.0"
] | 13
|
2017-03-24T00:25:46.000Z
|
2021-07-23T08:58:09.000Z
|
# Copyright 2015 Lukas Lalinsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'Warning', 'Error', 'InterfaceError', 'DatabaseError', 'DataError',
'OperationalError', 'IntegrityError', 'InternalError',
'ProgrammingError', 'NotSupportedError',
]
class Warning(Exception):
"""Not used by this package, only defined for compatibility
with DB API 2.0."""
class Error(Exception):
"""Exception that is the base class of all other error exceptions.
You can use this to catch all errors with one single except statement."""
def __init__(self, message, code=None, sqlstate=None, cause=None):
super(Exception, self).__init__(message, code, sqlstate, cause)
@property
def message(self):
return self.args[0]
@property
def code(self):
return self.args[1]
@property
def sqlstate(self):
return self.args[2]
@property
def cause(self):
return self.args[3]
class InterfaceError(Error):
"""Exception raised for errors that are related to the database
interface rather than the database itself."""
class DatabaseError(Error):
"""Exception raised for errors that are related to the database."""
class DataError(DatabaseError):
"""Exception raised for errors that are due to problems with the
processed data like division by zero, numeric value out of range,
etc."""
class OperationalError(DatabaseError):
"""Raised for errors that are related to the database's operation and not
necessarily under the control of the programmer, e.g. an unexpected
disconnect occurs, the data source name is not found, a transaction could
not be processed, a memory allocation error occurred during
processing, etc."""
class IntegrityError(DatabaseError):
"""Raised when the relational integrity of the database is affected, e.g. a foreign key check fails."""
class InternalError(DatabaseError):
"""Raised when the database encounters an internal problem."""
class ProgrammingError(DatabaseError):
"""Raises for programming errors, e.g. table not found, syntax error, etc."""
class NotSupportedError(DatabaseError):
"""Raised when using an API that is not supported by the database."""
| 31.215909
| 107
| 0.720058
|
833d06cb33e829ef374e62419db6b0e9392eddca
| 7,363
|
py
|
Python
|
doudizhu_solver.py
|
kammaron/doudizhu_solver
|
be7de80878c1de9ee493c83970443dacaeb0f6c0
|
[
"MIT"
] | null | null | null |
doudizhu_solver.py
|
kammaron/doudizhu_solver
|
be7de80878c1de9ee493c83970443dacaeb0f6c0
|
[
"MIT"
] | null | null | null |
doudizhu_solver.py
|
kammaron/doudizhu_solver
|
be7de80878c1de9ee493c83970443dacaeb0f6c0
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
# Author: Tim Wu
# Author: Carl King
# Author: Kamma Ron
# 牌型枚举
class COMB_TYPE:
PASS, SINGLE, PAIR, TRIPLE, TRIPLE_ONE, TRIPLE_TWO, FOURTH_TWO_ONES, FOURTH_TWO_PAIRS, STRIGHT, BOMB = range(10)
# 3-14 分别代表 3-10, J, Q, K, A
# 16, 18, 19 分别代表 2, little_joker, big_joker
# 将 2 与其他牌分开是为了方便计算顺子
# 定义 HAND_PASS 为过牌
little_joker, big_joker = 18, 19
HAND_PASS = {'type':COMB_TYPE.PASS, 'main': 0, 'component':[]}
# 根据当前手牌,获取此牌所有可能出的牌型
# 牌型数据结构为 {牌类型,主牌,包含的牌}
# 同种牌类型可以通过主牌比较大小
# 为方便比较大小, 将顺子按照不同长度分为不同牌型
def get_all_hands(pokers):
if not pokers:
return []
# 过牌
combs = [HAND_PASS]
# 获取每个点数的数目
dic = counter(pokers)
# 王炸
if little_joker in pokers and big_joker in pokers:
combs.append({'type':COMB_TYPE.BOMB, 'main': big_joker, 'component': [big_joker, little_joker]})
# 非顺子, 非王炸
for poker in dic:
if dic[poker] >= 1:
# 单张
combs.append({'type':COMB_TYPE.SINGLE, 'main':poker, 'component':[poker]})
if dic[poker] >= 2:
# 对子
combs.append({'type':COMB_TYPE.PAIR, 'main':poker, 'component':[poker, poker]})
if dic[poker] >= 3:
# 三带零
combs.append({'type':COMB_TYPE.TRIPLE, 'main':poker, 'component':[poker, poker, poker]})
for poker2 in dic:
if ALLOW_THREE_ONE and dic[poker2] >= 1 and poker2 != poker:
# 三带一
combs.append({'type':COMB_TYPE.TRIPLE_ONE, 'main':poker, 'component': [poker, poker, poker, poker2]})
if ALLOW_THREE_TWO and dic[poker2] >= 2 and poker2 != poker:
# 三带二
combs.append({'type':COMB_TYPE.TRIPLE_TWO, 'main':poker, 'component': [poker, poker, poker, poker2, poker2]})
if dic[poker] == 4:
# 炸弹
combs.append({'type':COMB_TYPE.BOMB, 'main':poker, 'component': [poker, poker, poker, poker]})
if ALLOW_FOUR_TWO:
pairs = []
ones = []
for poker2 in dic:
if dic[poker2] == 1:
ones.append(poker2)
elif dic[poker2] == 2:
pairs.append(poker2)
# 四带二单
for i in range(len(ones)):
for j in range(i + 1, len(ones)):
combs.append({'type':COMB_TYPE.FOURTH_TWO_ONES, 'main':poker, \
'component':[poker, poker, poker, poker, ones[i], ones[j]]})
# 四带二对
for i in range(len(pairs)):
combs.append({'type':COMB_TYPE.FOURTH_TWO_ONES, 'main':poker, \
'component': [poker, poker, poker, poker, pairs[i], pairs[i]]})
for j in range(i + 1, len(pairs)):
combs.append({'type':COMB_TYPE.FOURTH_TWO_PAIRS, 'main':poker, \
'component': [poker, poker, poker, poker, pairs[i], pairs[i], pairs[j], pairs[j]]})
# 所有顺子组合
# 以 COMB_TYPE.STRIGHT * len(straight) 标志顺子牌型, 不同长度的顺子是不同的牌型
for straight in create_straight(list(set(pokers)), 5):
combs.append({'type':COMB_TYPE.STRIGHT * len(straight), 'main': straight[0], 'component': straight})
# 返回所有可能的出牌类型
return combs
# 根据列表创建顺子
def create_straight(list_of_nums, min_length):
a = sorted(list_of_nums)
lens = len(a)
for start in range(0, lens):
for end in range(start, lens):
if a[end] - a[start] != end - start:
break
elif end - start >= min_length - 1:
yield list(range(a[start], a[end] + 1))
# 统计列表中每个元素的个数
def counter(pokers):
dic = {}
for poker in pokers:
dic[poker] = pokers.count(poker)
return dic
# comb1 先出,问后出的 comb2 是否能打过 comb1
# 1. 同种牌型比较 main 值, main 值大的胜
# 2. 炸弹大过其他牌型
# 3. 牌型不同, 后出为负
def can_beat(comb1, comb2):
if not comb2 or comb2['type'] == COMB_TYPE.PASS:
return False
if not comb1 or comb1['type'] == COMB_TYPE.PASS:
return True
if comb1['type'] == comb2['type']:
return comb2['main'] > comb1['main']
elif comb2['type'] == COMB_TYPE.BOMB:
return True
else:
return False
# 给定 pokers,求打出手牌 hand 后的牌
# 用 component 字段标志打出的牌, 可以方便地统一处理
def make_hand(pokers, hand):
poker_clone = pokers[:]
for poker in hand['component']:
poker_clone.remove(poker)
return poker_clone
def make_bare_hand(pokers, hand):
for poker in hand:
pokers.remove(poker)
# 模拟每次出牌, my_pokers 为当前我的牌, enemy_pokers 为对手的牌
# raider 为当前情况下出什么牌可以赢, last_hand 为上一手对手出的牌, cache 用于缓存牌局与胜负关系
def hand_out(my_pokers, enemy_pokers, raider, last_hand = None, cache = {}):
# 牌局终止的边界条件
if not my_pokers:
return True
if not enemy_pokers:
return False
# 如果上一手为空, 则将上一手赋值为 HAND_PASS
if last_hand is None:
last_hand = HAND_PASS
# 从缓存中读取数据
key = str((my_pokers, enemy_pokers, last_hand['component']))
if key in cache:
return cache[key]
# 模拟出牌过程, 深度优先搜索, 找到赢的分支则返回 True
for current_hand in get_all_hands(my_pokers):
# 转换出牌权有两种情况:
# 1. 当前手胜出, 则轮到对方选择出牌
# 2. 当前手 PASS, 且对方之前没有 PASS, 则轮到对方出牌
if can_beat(last_hand, current_hand) or \
(last_hand['type'] != COMB_TYPE.PASS and current_hand['type'] == COMB_TYPE.PASS):
if not hand_out(enemy_pokers, make_hand(my_pokers, current_hand), raider, current_hand, cache):
raider[key] = current_hand
cache[key] = True
return True
# 遍历所有情况, 均无法赢, 则返回 False
# print(False, ':', key)
cache[key] = False
return False
def trans(c):
return {
't':10,
'j':11,
'q':12,
'k':13,
'1':14,
'2':16,
'w':18,
'W':19
}.get(c)
# input function input cards as a string
# t for 10, W and w for Joker
def get_input(vec):
s = input()
for i in range(len(s)):
if s[i] >= '3' and s[i] <= '9':
vec.append(int(s[i]))
else:
vec.append(trans(s[i]))
if __name__ == '__main__':
import time
# 残局1
# 是否允许三带一
ALLOW_THREE_ONE = True
# 是否允许三带二
ALLOW_THREE_TWO = False
# 是否允许四带二
ALLOW_FOUR_TWO = True
lord = []
farmer = []
print('input computer\'s cards')
get_input(lord)
print("computer's cards: {}".format(lord))
print('input your cards')
get_input(farmer)
print("your cards: {}".format(farmer))
print(farmer)
#cache for searching solution
raider = {}
start = time.clock()
print("Please wait for the solution")
result = hand_out(farmer, lord, raider)
elapsed = (time.clock() - start)
print("Solve result:", result)
print("Time elapsed:", elapsed)
if result:
last_hand = []
while farmer:
key = str((farmer, lord, last_hand))
my_hand = raider[key]['component']
print('please play:', str(my_hand))
make_bare_hand(farmer, my_hand)
if not farmer:
break
print('input computer\'s hand:')
last_hand = []
get_input(last_hand)
make_bare_hand(lord, last_hand)
print('finished!')
| 28.210728
| 129
| 0.556159
|
c754a4dec31807194fb9f45a727f35da8a7d58f1
| 795
|
py
|
Python
|
ws2122-lspm/Lib/site-packages/pm4py/algo/conformance/antialignments/variants/__init__.py
|
Malekhy/ws2122-lspm
|
e4dc8b801d12f862b8ef536a0f125f346f085a00
|
[
"MIT"
] | 1
|
2022-01-19T04:02:46.000Z
|
2022-01-19T04:02:46.000Z
|
ws2122-lspm/Lib/site-packages/pm4py/algo/conformance/antialignments/variants/__init__.py
|
Malekhy/ws2122-lspm
|
e4dc8b801d12f862b8ef536a0f125f346f085a00
|
[
"MIT"
] | 1
|
2021-11-19T07:21:48.000Z
|
2021-11-19T07:21:48.000Z
|
ws2122-lspm/Lib/site-packages/pm4py/algo/conformance/antialignments/variants/__init__.py
|
Malekhy/ws2122-lspm
|
e4dc8b801d12f862b8ef536a0f125f346f085a00
|
[
"MIT"
] | 1
|
2022-01-14T17:15:38.000Z
|
2022-01-14T17:15:38.000Z
|
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from pm4py.algo.conformance.antialignments.variants import discounted_a_star
| 44.166667
| 76
| 0.744654
|
bacbf38fda67ef991b1de8f7521d21124fa94330
| 389
|
py
|
Python
|
profiles_api/urls.py
|
foreverals2002/profiles_rest_api
|
176df33423e03c2b3599960ede046c7e9922b08f
|
[
"MIT"
] | null | null | null |
profiles_api/urls.py
|
foreverals2002/profiles_rest_api
|
176df33423e03c2b3599960ede046c7e9922b08f
|
[
"MIT"
] | null | null | null |
profiles_api/urls.py
|
foreverals2002/profiles_rest_api
|
176df33423e03c2b3599960ede046c7e9922b08f
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from profiles_api import views
router = DefaultRouter()
router.register('hello-viewset', views.HelloViewSet, base_name='hello-viewset')
router.register('profile', views.UserProfileViewSet)
urlpatterns = [
path("hello-view/", views.HelloApiView.as_view()),
path('', include(router.urls))
]
| 24.3125
| 79
| 0.766067
|
06df4b92069334f1063e6a6dcbb8672013aba3e9
| 2,099
|
py
|
Python
|
tests/test_server_info.py
|
handycz/opcua-mock
|
cc3aab7a847ae5c1ff9fda64f2e3cff13d9b5878
|
[
"MIT"
] | null | null | null |
tests/test_server_info.py
|
handycz/opcua-mock
|
cc3aab7a847ae5c1ff9fda64f2e3cff13d9b5878
|
[
"MIT"
] | 4
|
2022-03-24T12:20:30.000Z
|
2022-03-24T12:50:54.000Z
|
tests/test_server_info.py
|
handycz/opcua-mock
|
cc3aab7a847ae5c1ff9fda64f2e3cff13d9b5878
|
[
"MIT"
] | null | null | null |
import asyncio
import pytest
from uamockapp.server import MockServer
@pytest.mark.asyncio
async def test_server_read_data_image(mock_server: MockServer):
data = await mock_server.get_data_image()
expected = dict({
'1:Var1': 15,
'1:Var2': 11,
'1:Obj/2:Var3': 101
})
assert data.keys() == expected.keys()
assert [item.value for item in data.values()] == list(expected.values())
@pytest.mark.asyncio
async def test_server_list_functions(mock_server: MockServer):
await mock_server.on_call(
"RunRobotThreeArgs", lambda: None, (int, int, str)
)
functions1 = await mock_server.get_function_list()
await mock_server.on_call(
"StopRobotNoArg", lambda: None
)
functions2 = await mock_server.get_function_list()
assert len(functions1) == 1
assert len(functions2) == 2
assert functions1[0].name == "RunRobotThreeArgs"
assert functions1[0].args == ["int", "int", "str"]
assert functions2[0].name == "RunRobotThreeArgs"
assert functions2[0].args == ["int", "int", "str"]
assert functions2[1].name == "StopRobotNoArg"
assert functions2[1].args is None
@pytest.mark.asyncio
async def test_server_onchange_list(mock_server: MockServer):
for val in range(5):
await mock_server.write("Var1", val)
await mock_server.write("Var2", val * 2)
await asyncio.sleep(0.1)
await mock_server.on_change(
"Var1", lambda: None, int
)
onchange1 = await mock_server.get_onchange_list()
await mock_server.on_change(
"Var2", lambda: None
)
onchange2 = await mock_server.get_onchange_list()
assert len(onchange1) == 1
assert len(onchange2) == 2
assert onchange1[0].var_name == "Var1"
assert [sample.value for sample in onchange1[0].history] == [4, 3, 2, 1, 0, 15]
assert onchange2[0].var_name == "Var1"
assert [sample.value for sample in onchange2[0].history] == [4, 3, 2, 1, 0, 15]
assert onchange2[1].var_name == "Var2"
assert [sample.value for sample in onchange2[1].history] == [8, 6, 4, 2, 0, 11]
| 27.618421
| 83
| 0.661744
|
02bf58b7b2e8fbdf9d40e9a5ea002c8d58b7649a
| 1,826
|
py
|
Python
|
BptImporter/BptImporter.py
|
blindstitch/BptImporter
|
c1f815a0eec4bb270a23de4b75191bb22ad104ae
|
[
"MIT"
] | 1
|
2019-05-10T03:49:50.000Z
|
2019-05-10T03:49:50.000Z
|
BptImporter/BptImporter.py
|
blindstitch/BptImporter
|
c1f815a0eec4bb270a23de4b75191bb22ad104ae
|
[
"MIT"
] | null | null | null |
BptImporter/BptImporter.py
|
blindstitch/BptImporter
|
c1f815a0eec4bb270a23de4b75191bb22ad104ae
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import pandas
import re
def sanitize(string):
string = string.lower()
string = re.sub(r' ','_',string)
string = re.sub(r'[^a-zA-Z_]','',string)
return string
class BptImporter():
playName = ''
dirtyData = ''
headers = []
cleanedData = pandas.DataFrame()
def __init__(self, name):
self.playName = name
def importData(self, path):
self.dirtyData = open(path,'r').read()
self.dirtyData = self.dirtyData.split('\n')
self.dirtyData = self.dirtyData[4:] # Top four lines are junk
self.dirtyData = [line.split('\t') for line in self.dirtyData]
self.headers = self.dirtyData[1]
self.dirtyData = [line for line in self.dirtyData if len(line[0]) > 0]
self.dirtyData = [line for line in self.dirtyData if line != self.headers]
self.dirtyData = [line for line in self.dirtyData if line[0] != 'None']
def processData(self):
sectionHeads = [idx for idx, x in enumerate(self.dirtyData) if len(x) == 1]
for idx in range(len(sectionHeads)):
if len(self.dirtyData[sectionHeads[idx] + 1]) == 1:
pass
else:
groupTicketClass = sanitize(self.dirtyData[sectionHeads[idx]][0])
groupStart = sectionHeads[idx] + 1
if idx != (len(sectionHeads) - 1):
groupEnd = sectionHeads[idx+1] - 1
else: # End of data
groupEnd = len(self.dirtyData) - 1
df = pandas.DataFrame(self.dirtyData[groupStart:groupEnd], columns=self.headers)
df['ticket_purchase_type'] = groupTicketClass
self.cleanedData = pandas.concat((self.cleanedData,df), ignore_index=True)
def getData(self):
return self.cleanedData
| 35.803922
| 96
| 0.591457
|
715a71410b6d4f21281942354e3f8728191f885b
| 5,514
|
py
|
Python
|
contrib/seeds/makeseeds.py
|
bytn-team/byt
|
a978b71d938fceeaba3a03856d0ba91f65bbb93c
|
[
"MIT"
] | 1
|
2020-08-07T15:00:09.000Z
|
2020-08-07T15:00:09.000Z
|
contrib/seeds/makeseeds.py
|
bytn-team/byt
|
a978b71d938fceeaba3a03856d0ba91f65bbb93c
|
[
"MIT"
] | null | null | null |
contrib/seeds/makeseeds.py
|
bytn-team/byt
|
a978b71d938fceeaba3a03856d0ba91f65bbb93c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/BYTNCore:2.2.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| 32.05814
| 186
| 0.566921
|
0b51f2877c48f7b9194404bfd2a62f1b753d9471
| 608
|
py
|
Python
|
runoob/basic_tutorial/greater.py
|
zeroonegit/python
|
919f8bb14ae91e37e42ff08192df24b60135596f
|
[
"MIT"
] | 1
|
2017-03-30T00:43:40.000Z
|
2017-03-30T00:43:40.000Z
|
runoob/basic_tutorial/greater.py
|
QuinceySun/Python
|
919f8bb14ae91e37e42ff08192df24b60135596f
|
[
"MIT"
] | null | null | null |
runoob/basic_tutorial/greater.py
|
QuinceySun/Python
|
919f8bb14ae91e37e42ff08192df24b60135596f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
############################
# File Name: greater.py
# Author: One Zero
# Mail: zeroonegit@gmail.com
# Created Time: 2015-12-28 00:09:13
############################
# 定义函数
def lcm(x, y):
# 获取最大的数
if x > y:
greater = x
else:
greater = y
while(True):
if ((greater % x == 0) and (greater % y == 0)):
lcm = greater
break
greater += 1
return lcm
# 获取用户输入
num1 = int(input('请输入第一个数字: '))
num2 = int(input('请输入第二个数字: '))
print('{} 和 {} 的最小公倍数为 {}'.format(num1, num2, lcm(num1, num2)))
| 19
| 63
| 0.481908
|
98e138c9f680371a29cb3c7588e0b7e0a6688d71
| 25,339
|
py
|
Python
|
farm/modeling/biadaptive_model.py
|
AymericSallet/farmlibrary
|
b2dae8e078ae1e77216819f7dad4dff407817d72
|
[
"Apache-2.0"
] | 1
|
2021-07-06T07:22:41.000Z
|
2021-07-06T07:22:41.000Z
|
farm/modeling/biadaptive_model.py
|
AymericSallet/farmlibrary
|
b2dae8e078ae1e77216819f7dad4dff407817d72
|
[
"Apache-2.0"
] | null | null | null |
farm/modeling/biadaptive_model.py
|
AymericSallet/farmlibrary
|
b2dae8e078ae1e77216819f7dad4dff407817d72
|
[
"Apache-2.0"
] | 1
|
2022-01-27T19:06:04.000Z
|
2022-01-27T19:06:04.000Z
|
import copy
import json
import logging
import os
from argparse import Namespace
from pathlib import Path
import multiprocessing
import numpy
import torch
from torch import nn
from farm.data_handler.processor import TextSimilarityProcessor
from farm.data_handler.data_silo import DataSilo
from farm.modeling.language_model import LanguageModel
from farm.modeling.prediction_head import PredictionHead, TextSimilarityHead
from farm.modeling.tokenization import Tokenizer
from farm.utils import MLFlowLogger as MlLogger, stack
logger = logging.getLogger(__name__)
class BaseBiAdaptiveModel:
"""
Base Class for implementing AdaptiveModel with frameworks like PyTorch and ONNX.
"""
subclasses = {}
def __init_subclass__(cls, **kwargs):
""" This automatically keeps track of all available subclasses.
Enables generic load() for all specific AdaptiveModel implementation.
"""
super().__init_subclass__(**kwargs)
cls.subclasses[cls.__name__] = cls
def __init__(self, prediction_heads):
self.prediction_heads = prediction_heads
@classmethod
def load(cls, **kwargs):
"""
Load corresponding AdaptiveModel Class(AdaptiveModel/ONNXAdaptiveModel) based on the
files in the load_dir.
:param kwargs: arguments to pass for loading the model.
:return: instance of a model
"""
if (Path(kwargs["load_dir"]) / "model.onnx").is_file():
model = cls.subclasses["ONNXBiAdaptiveModel"].load(**kwargs)
else:
model = cls.subclasses["BiAdaptiveModel"].load(**kwargs)
return model
def logits_to_preds(self, logits, **kwargs):
"""
Get predictions from all prediction heads.
:param logits: logits, can vary in shape and type, depending on task
:type logits: object
:param label_maps: Maps from label encoding to label string
:param label_maps: dict
:return: A list of all predictions from all prediction heads
"""
all_preds = []
# collect preds from all heads
for head, logits_for_head in zip(self.prediction_heads, logits):
preds = head.logits_to_preds(logits=logits_for_head, **kwargs)
all_preds.append(preds)
return all_preds
def formatted_preds(self, logits, language_model1, language_model2, **kwargs):
"""
Format predictions to strings for inference output
:param logits: model logits
:type logits: torch.tensor
:param kwargs: placeholder for passing generic parameters
:type kwargs: object
:return: predictions in the right format
"""
n_heads = len(self.prediction_heads)
if n_heads == 1:
preds_final = []
# This try catch is to deal with the fact that sometimes we collect preds before passing it to
# formatted_preds (see Inferencer._get_predictions_and_aggregate()) and sometimes we don't
# (see Inferencer._get_predictions())
try:
preds = kwargs["preds"]
temp = [y[0] for y in preds]
preds_flat = [item for sublist in temp for item in sublist]
kwargs["preds"] = preds_flat
except KeyError:
kwargs["preds"] = None
head = self.prediction_heads[0]
logits_for_head = logits[0]
preds = head.formatted_preds(logits=logits_for_head, **kwargs)
# TODO This is very messy - we need better definition of what the output should look like
if type(preds) == list:
preds_final += preds
elif type(preds) == dict and "predictions" in preds:
preds_final.append(preds)
return preds_final
def connect_heads_with_processor(self, tasks, require_labels=True):
"""
Populates prediction head with information coming from tasks.
:param tasks: A dictionary where the keys are the names of the tasks and the values are the details of the task (e.g. label_list, metric, tensor name)
:param require_labels: If True, an error will be thrown when a task is not supplied with labels)
:return:
"""
for head in self.prediction_heads:
head.label_tensor_name = tasks[head.task_name]["label_tensor_name"]
label_list = tasks[head.task_name]["label_list"]
if not label_list and require_labels:
raise Exception(f"The task \'{head.task_name}\' is missing a valid set of labels")
label_list = tasks[head.task_name]["label_list"]
head.label_list = label_list
num_labels = len(label_list)
head.metric = tasks[head.task_name]["metric"]
@classmethod
def _get_prediction_head_files(cls, load_dir, strict=True):
load_dir = Path(load_dir)
files = os.listdir(load_dir)
config_files = [
load_dir / f
for f in files
if "config.json" in f and "prediction_head" in f
]
# sort them to get correct order in case of multiple prediction heads
config_files.sort()
return config_files
def loss_per_head_sum(loss_per_head, global_step=None, batch=None):
"""
Input: loss_per_head (list of tensors), global_step (int), batch (dict)
Output: aggregated loss (tensor)
"""
return sum(loss_per_head)
class BiAdaptiveModel(nn.Module, BaseBiAdaptiveModel):
""" PyTorch implementation containing all the modelling needed for your NLP task. Combines 2 language
models for representation of 2 sequences and a prediction head. Allows for gradient flow back to the 2 language model components."""
def __init__(
self,
language_model1,
language_model2,
prediction_heads,
embeds_dropout_prob=0.1,
device="cuda",
lm1_output_types=["per_sequence"],
lm2_output_types=["per_sequence"],
loss_aggregation_fn=None,
):
"""
:param language_model1: Any model that turns token ids into vector representations
:type language_model1: LanguageModel
:param language_model2: Any model that turns token ids into vector representations
:type language_model2: LanguageModel
:param prediction_heads: A list of models that take 2 sequence embeddings and return logits for a given task
:type prediction_heads: list
:param embeds_dropout_prob: The probability that a value in the embeddings returned by any of the 2
language model will be zeroed.
:param embeds_dropout_prob: float
:param lm1_output_types: How to extract the embeddings from the final layer of the first language model. When set
to "per_token", one embedding will be extracted per input token. If set to
"per_sequence", a single embedding will be extracted to represent the full
input sequence. Can either be a single string, or a list of strings,
one for each prediction head.
:type lm1_output_types: list or str
:param lm2_output_types: How to extract the embeddings from the final layer of the second language model. When set
to "per_token", one embedding will be extracted per input token. If set to
"per_sequence", a single embedding will be extracted to represent the full
input sequence. Can either be a single string, or a list of strings,
one for each prediction head.
:type lm2_output_types: list or str
:param device: The device on which this model will operate. Either "cpu" or "cuda".
:param loss_aggregation_fn: Function to aggregate the loss of multiple prediction heads.
Input: loss_per_head (list of tensors), global_step (int), batch (dict)
Output: aggregated loss (tensor)
Default is a simple sum:
`lambda loss_per_head, global_step=None, batch=None: sum(tensors)`
However, you can pass more complex functions that depend on the
current step (e.g. for round-robin style multitask learning) or the actual
content of the batch (e.g. certain labels)
Note: The loss at this stage is per sample, i.e one tensor of
shape (batchsize) per prediction head.
:type loss_aggregation_fn: function
"""
super(BiAdaptiveModel, self).__init__()
self.device = device
self.language_model1 = language_model1.to(device)
self.lm1_output_dims = language_model1.get_output_dims()
self.language_model2 = language_model2.to(device)
self.lm2_output_dims = language_model2.get_output_dims()
self.dropout1 = nn.Dropout(embeds_dropout_prob)
self.dropout2 = nn.Dropout(embeds_dropout_prob)
self.prediction_heads = nn.ModuleList([ph.to(device) for ph in prediction_heads])
self.lm1_output_types = (
[lm1_output_types] if isinstance(lm1_output_types, str) else lm1_output_types
)
self.lm2_output_types = (
[lm2_output_types] if isinstance(lm2_output_types, str) else lm2_output_types
)
self.log_params()
# default loss aggregation function is a simple sum (without using any of the optional params)
if not loss_aggregation_fn:
loss_aggregation_fn = loss_per_head_sum
self.loss_aggregation_fn = loss_aggregation_fn
def save(self, save_dir, lm1_name="lm1", lm2_name="lm2"):
"""
Saves the 2 language model weights and respective config_files in directories lm1 and lm2 within save_dir.
:param save_dir: path to save to
:type save_dir: Path
"""
os.makedirs(save_dir, exist_ok=True)
if not os.path.exists(Path.joinpath(save_dir, Path(lm1_name))):
os.makedirs(Path.joinpath(save_dir, Path(lm1_name)))
if not os.path.exists(Path.joinpath(save_dir, Path(lm2_name))):
os.makedirs(Path.joinpath(save_dir, Path(lm2_name)))
self.language_model1.save(Path.joinpath(save_dir, Path(lm1_name)))
self.language_model2.save(Path.joinpath(save_dir, Path(lm2_name)))
for i, ph in enumerate(self.prediction_heads):
logger.info("prediction_head saving")
ph.save(save_dir, i)
@classmethod
def load(cls, load_dir, device, strict=False, lm1_name="lm1", lm2_name="lm2", processor=None):
"""
Loads a BiAdaptiveModel from a directory. The directory must contain:
* directory "lm1_name" with following files:
-> language_model.bin
-> language_model_config.json
* directory "lm2_name" with following files:
-> language_model.bin
-> language_model_config.json
* prediction_head_X.bin multiple PH possible
* prediction_head_X_config.json
* processor_config.json config for transforming input
* vocab.txt vocab file for language model, turning text to Wordpiece Token
* special_tokens_map.json
:param load_dir: location where adaptive model is stored
:type load_dir: Path
:param device: to which device we want to sent the model, either cpu or cuda
:type device: torch.device
:param lm1_name: the name to assign to the first loaded language model(for encoding queries)
:type lm1_name: str
:param lm2_name: the name to assign to the second loaded language model(for encoding context/passages)
:type lm2_name: str
:param strict: whether to strictly enforce that the keys loaded from saved model match the ones in
the PredictionHead (see torch.nn.module.load_state_dict()).
Set to `False` for backwards compatibility with PHs saved with older version of FARM.
:type strict: bool
:param processor: populates prediction head with information coming from tasks
:type processor: Processor
"""
# Language Model
if lm1_name:
language_model1 = LanguageModel.load(os.path.join(load_dir, lm1_name))
else:
language_model1 = LanguageModel.load(load_dir)
if lm2_name:
language_model2 = LanguageModel.load(os.path.join(load_dir, lm2_name))
else:
language_model2 = LanguageModel.load(load_dir)
# Prediction heads
ph_config_files = cls._get_prediction_head_files(load_dir)
prediction_heads = []
ph_output_type = []
for config_file in ph_config_files:
head = PredictionHead.load(config_file, strict=False, load_weights=False)
prediction_heads.append(head)
ph_output_type.append(head.ph_output_type)
model = cls(language_model1, language_model2, prediction_heads, 0.1, device)
if processor:
model.connect_heads_with_processor(processor.tasks)
return model
def logits_to_loss_per_head(self, logits, **kwargs):
"""
Collect losses from each prediction head.
:param logits: logits, can vary in shape and type, depending on task.
:type logits: object
:return: The per sample per prediciton head loss whose first two dimensions have length n_pred_heads, batch_size
"""
all_losses = []
for head, logits_for_one_head in zip(self.prediction_heads, logits):
# check if PredictionHead connected to Processor
assert hasattr(head, "label_tensor_name"), \
(f"Label_tensor_names are missing inside the {head.task_name} Prediction Head. Did you connect the model"
" with the processor through either 'model.connect_heads_with_processor(processor.tasks)'"
" or by passing the processor to the Adaptive Model?")
all_losses.append(head.logits_to_loss(logits=logits_for_one_head, **kwargs))
return all_losses
def logits_to_loss(self, logits, global_step=None, **kwargs):
"""
Get losses from all prediction heads & reduce to single loss *per sample*.
:param logits: logits, can vary in shape and type, depending on task
:type logits: object
:param global_step: number of current training step
:type global_step: int
:param kwargs: placeholder for passing generic parameters.
Note: Contains the batch (as dict of tensors), when called from Trainer.train().
:type kwargs: object
:return loss: torch.tensor that is the per sample loss (len: batch_size)
"""
all_losses = self.logits_to_loss_per_head(logits, **kwargs)
# This aggregates the loss per sample across multiple prediction heads
# Default is sum(), but you can configure any fn that takes [Tensor, Tensor ...] and returns [Tensor]
loss = self.loss_aggregation_fn(all_losses, global_step=global_step, batch=kwargs)
return loss
def prepare_labels(self, **kwargs):
"""
Label conversion to original label space, per prediction head.
:param label_maps: dictionary for mapping ids to label strings
:type label_maps: dict[int:str]
:return: labels in the right format
"""
all_labels = []
# for head, label_map_one_head in zip(self.prediction_heads):
# labels = head.prepare_labels(label_map=label_map_one_head, **kwargs)
# all_labels.append(labels)
for head in self.prediction_heads:
labels = head.prepare_labels(**kwargs)
all_labels.append(labels)
return all_labels
def forward(self, **kwargs):
"""
Push data through the whole model and returns logits. The data will propagate through
the first language model and second language model based on the tensor names and both the
encodings through each of the attached prediction heads.
:param kwargs: Holds all arguments that need to be passed to both the language models and prediction head(s).
:return: all logits as torch.tensor or multiple tensors.
"""
# Run forward pass of both language models
pooled_output = self.forward_lm(**kwargs)
# Run forward pass of (multiple) prediction heads using the output from above
all_logits = []
if len(self.prediction_heads) > 0:
for head, lm1_out, lm2_out in zip(self.prediction_heads, self.lm1_output_types, self.lm2_output_types):
# Choose relevant vectors from LM as output and perform dropout
if pooled_output[0] is not None:
if lm1_out == "per_sequence" or lm1_out == "per_sequence_continuous":
output1 = self.dropout1(pooled_output[0])
else:
raise ValueError(
"Unknown extraction strategy from BiAdaptive language_model1: {}".format(lm1_out)
)
else:
output1 = None
if pooled_output[1] is not None:
if lm2_out == "per_sequence" or lm2_out == "per_sequence_continuous":
output2 = self.dropout2(pooled_output[1])
else:
raise ValueError(
"Unknown extraction strategy from BiAdaptive language_model2: {}".format(lm2_out)
)
else:
output2 = None
embedding1, embedding2 = head(output1, output2)
all_logits.append(tuple([embedding1, embedding2]))
else:
# just return LM output (e.g. useful for extracting embeddings at inference time)
all_logits.append((pooled_output))
return all_logits
def forward_lm(self, **kwargs):
"""
Forward pass for the BiAdaptive model.
:param kwargs:
:return: 2 tensors of pooled_output from the 2 language models
"""
pooled_output = [None, None]
if "query_input_ids" in kwargs.keys():
pooled_output1, hidden_states1 = self.language_model1(**kwargs)
pooled_output[0] = pooled_output1
if "passage_input_ids" in kwargs.keys():
pooled_output2, hidden_states2 = self.language_model2(**kwargs)
pooled_output[1] = pooled_output2
return tuple(pooled_output)
def log_params(self):
"""
Logs paramteres to generic logger MlLogger
"""
params = {
"lm1_type": self.language_model1.__class__.__name__,
"lm1_name": self.language_model1.name,
"lm1_output_types": ",".join(self.lm1_output_types),
"lm2_type": self.language_model2.__class__.__name__,
"lm2_name": self.language_model2.name,
"lm2_output_types": ",".join(self.lm2_output_types),
"prediction_heads": ",".join(
[head.__class__.__name__ for head in self.prediction_heads])
}
try:
MlLogger.log_params(params)
except Exception as e:
logger.warning(f"ML logging didn't work: {e}")
def verify_vocab_size(self, vocab_size1, vocab_size2):
""" Verifies that the model fits to the tokenizer vocabulary.
They could diverge in case of custom vocabulary added via tokenizer.add_tokens()"""
model1_vocab_len = self.language_model1.model.resize_token_embeddings(new_num_tokens=None).num_embeddings
msg = f"Vocab size of tokenizer {vocab_size1} doesn't match with model {model1_vocab_len}. " \
"If you added a custom vocabulary to the tokenizer, " \
"make sure to supply 'n_added_tokens' to LanguageModel.load() and BertStyleLM.load()"
assert vocab_size1 == model1_vocab_len, msg
model2_vocab_len = self.language_model2.model.resize_token_embeddings(new_num_tokens=None).num_embeddings
msg = f"Vocab size of tokenizer {vocab_size1} doesn't match with model {model2_vocab_len}. " \
"If you added a custom vocabulary to the tokenizer, " \
"make sure to supply 'n_added_tokens' to LanguageModel.load() and BertStyleLM.load()"
assert vocab_size2 == model2_vocab_len, msg
def get_language(self):
return self.language_model1.language, self.language_model2.language
def convert_to_transformers(self):
from transformers import DPRContextEncoder, DPRQuestionEncoder, AutoModel
if len(self.prediction_heads) != 1:
raise ValueError(f"Currently conversion only works for models with a SINGLE prediction head. "
f"Your model has {len(self.prediction_heads)}")
if self.prediction_heads[0].model_type == "text_similarity":
# init model
if "dpr" in self.language_model1.model.config.model_type:
transformers_model1 = DPRQuestionEncoder(config=self.language_model1.model.config)
else:
transformers_model1 = AutoModel.from_config(config=self.language_model1.model.config)
if "dpr" in self.language_model2.model.config.model_type:
transformers_model2 = DPRContextEncoder(config=self.language_model2.model.config)
else:
transformers_model2 = AutoModel.from_config(config=self.language_model2.model.config)
# transfer weights for language model + prediction head
setattr(transformers_model1, transformers_model1.base_model_prefix,
getattr(self.language_model1.model, self.language_model1.model.base_model_prefix))
setattr(transformers_model2, transformers_model2.base_model_prefix,
getattr(self.language_model2.model, self.language_model2.model.base_model_prefix))
logger.warning("No prediction head weights are required for DPR")
else:
raise NotImplementedError(f"FARM -> Transformers conversion is not supported yet for"
f" prediction heads of type {self.prediction_heads[0].model_type}")
pass
return transformers_model1, transformers_model2
@classmethod
def convert_from_transformers(cls, model_name_or_path1, model_name_or_path2, device, task_type, processor=None, similarity_function="dot_product"):
"""
Load a (downstream) model from huggingface's transformers format. Use cases:
- continue training in FARM (e.g. take a squad QA model and fine-tune on your own data)
- compare models without switching frameworks
- use model directly for inference
:param model_name_or_path1: local path of a saved model or name of a public one for Question Encoder
Exemplary public names:
- facebook/dpr-question_encoder-single-nq-base
- deepset/bert-large-uncased-whole-word-masking-squad2
:param model_name_or_path2: local path of a saved model or name of a public one for Context/Passage Encoder
Exemplary public names:
- facebook/dpr-ctx_encoder-single-nq-base
- deepset/bert-large-uncased-whole-word-masking-squad2
:param device: "cpu" or "cuda"
:param task_type: 'text_similarity'
More tasks coming soon ...
:param processor: populates prediction head with information coming from tasks
:type processor: Processor
:return: AdaptiveModel
"""
lm1 = LanguageModel.load(pretrained_model_name_or_path=model_name_or_path1, language_model_class="DPRQuestionEncoder")
lm2 = LanguageModel.load(pretrained_model_name_or_path=model_name_or_path2, language_model_class="DPRContextEncoder")
prediction_head = TextSimilarityHead(similarity_function=similarity_function)
#TODO Infer type of head automatically from config
if task_type == "text_similarity":
bi_adaptive_model = cls(language_model1=lm1,
language_model2=lm2,
prediction_heads=[prediction_head],
embeds_dropout_prob=0.1,
lm1_output_types=["per_sequence"],
lm2_output_types=["per_sequence"],
device=device)
else:
raise NotImplementedError(f"Huggingface's transformer models of type {task_type} are not supported yet for BiAdaptive Models")
if processor:
bi_adaptive_model.connect_heads_with_processor(processor.tasks)
return bi_adaptive_model
| 48.449331
| 158
| 0.639528
|
636190b608602cc20459af491f011dd3ae6c41da
| 2,374
|
py
|
Python
|
FastSimulation/Tracking/python/iterativeTk_cff.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 2
|
2020-10-26T18:40:32.000Z
|
2021-04-10T16:33:25.000Z
|
FastSimulation/Tracking/python/iterativeTk_cff.py
|
gartung/cmssw
|
3072dde3ce94dcd1791d778988198a44cde02162
|
[
"Apache-2.0"
] | 30
|
2015-11-04T11:42:27.000Z
|
2021-12-01T07:56:34.000Z
|
FastSimulation/Tracking/python/iterativeTk_cff.py
|
gartung/cmssw
|
3072dde3ce94dcd1791d778988198a44cde02162
|
[
"Apache-2.0"
] | 8
|
2016-03-25T07:17:43.000Z
|
2021-07-08T17:11:21.000Z
|
##############################
# FastSim equivalent of RecoTracker/IterativeTracking/python/iterativeTk_cff.py
##############################
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Modifier_trackingPhase1_cff import trackingPhase1
from TrackingTools.MaterialEffects.MaterialPropagatorParabolicMf_cff import *
#importing iterations directly from RecoTracker
from RecoTracker.IterativeTracking.InitialStep_cff import *
from RecoTracker.IterativeTracking.DetachedQuadStep_cff import *
from RecoTracker.IterativeTracking.HighPtTripletStep_cff import *
from RecoTracker.IterativeTracking.LowPtQuadStep_cff import *
from RecoTracker.IterativeTracking.DetachedTripletStep_cff import *
from RecoTracker.IterativeTracking.LowPtTripletStep_cff import *
from RecoTracker.IterativeTracking.PixelPairStep_cff import *
from RecoTracker.IterativeTracking.MixedTripletStep_cff import *
from RecoTracker.IterativeTracking.PixelLessStep_cff import *
from RecoTracker.IterativeTracking.TobTecStep_cff import *
# the following loads a dummy empty track collection
# such that FastSim can import earlyGeneralTracks_cfi from full tracking
# todo: actual implementation of JetCore iteration
from RecoTracker.IterativeTracking.JetCoreRegionalStep_cff import *
import RecoTracker.FinalTrackSelectors.earlyGeneralTracks_cfi
# todo, import MuonSeededStep_cff, preDuplicateMergingGeneralTracks_cfi, MergeTrackCollections_cff, ConversionStep_cff
generalTracksBeforeMixing = RecoTracker.FinalTrackSelectors.earlyGeneralTracks_cfi.earlyGeneralTracks.clone()
from RecoTracker.FinalTrackSelectors.trackTfClassifier_cfi import *
trackdnn_essource = cms.ESSource("EmptyESSource", recordName = cms.string("TfGraphRecord"), firstValid = cms.vuint32(1), iovIsRunNotTime = cms.bool(True) )
iterTracking = cms.Sequence(
InitialStep
+DetachedTripletStep
+LowPtTripletStep
+PixelPairStep
+MixedTripletStep
+PixelLessStep
+TobTecStep
+JetCoreRegionalStep
+generalTracksBeforeMixing)
_iterTracking_Phase1 = cms.Sequence(
InitialStep
+LowPtQuadStep
+HighPtTripletStep
+LowPtTripletStep
+DetachedQuadStep
+DetachedTripletStep
+PixelPairStep
+MixedTripletStep
+PixelLessStep
+TobTecStep
+JetCoreRegionalStep
+generalTracksBeforeMixing)
trackingPhase1.toReplaceWith(iterTracking, _iterTracking_Phase1)
| 40.237288
| 155
| 0.820135
|
732df0b6ec0adf9a5f5bb2e97551e3d5016b16ec
| 1,776
|
py
|
Python
|
pyot/utils/decorators.py
|
andrewdge/Pyot
|
a0b44a4462fd643bb21fbdc349beb9546543997c
|
[
"MIT"
] | null | null | null |
pyot/utils/decorators.py
|
andrewdge/Pyot
|
a0b44a4462fd643bb21fbdc349beb9546543997c
|
[
"MIT"
] | null | null | null |
pyot/utils/decorators.py
|
andrewdge/Pyot
|
a0b44a4462fd643bb21fbdc349beb9546543997c
|
[
"MIT"
] | null | null | null |
from functools import wraps
def silence_event_loop_closed(func):
'''Silences the Exception `RuntimeError: Event loop is closed` in a class method.'''
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except RuntimeError as e:
if str(e) != 'Event loop is closed':
raise
return wrapper
class cached_property:
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
A cached property can be made out of an existing method:
(e.g. ``url = cached_property(get_absolute_url)``).
"""
name = None
@staticmethod
def func(instance): # pylint: disable=method-hidden
raise TypeError(
'Cannot use cached_property instance without calling '
'__set_name__() on it.'
)
def __init__(self, func, name=None):
self.real_func = func
self.__doc__ = getattr(func, '__doc__')
def __set_name__(self, owner, name):
if self.name is None:
self.name = name
self.func = self.real_func
elif name != self.name:
raise TypeError(
"Cannot assign the same cached_property to two different names "
"(%r and %r)." % (self.name, name)
)
def __get__(self, instance, cls=None):
"""
Call the function and put the return value in instance.__dict__ so that
subsequent attribute access on the instance returns the cached value
instead of calling cached_property.__get__().
"""
if instance is None:
return self
res = instance.__dict__[self.name] = self.func(instance)
return res
| 31.714286
| 88
| 0.60473
|
b90419e46c0abd08c0bcb8ce119b9752d1585a5d
| 98,752
|
py
|
Python
|
src/run/RTyyyy_runcore.py
|
ben-github/NXP-MCUBootUtility
|
3ff9fa203d667844f83a08c855fef85723d2612e
|
[
"Apache-2.0"
] | null | null | null |
src/run/RTyyyy_runcore.py
|
ben-github/NXP-MCUBootUtility
|
3ff9fa203d667844f83a08c855fef85723d2612e
|
[
"Apache-2.0"
] | null | null | null |
src/run/RTyyyy_runcore.py
|
ben-github/NXP-MCUBootUtility
|
3ff9fa203d667844f83a08c855fef85723d2612e
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import math
import RTyyyy_rundef
import rundef
import boot
sys.path.append(os.path.abspath(".."))
from gen import RTyyyy_gencore
from gen import RTyyyy_gendef
from fuse import RTyyyy_fusedef
from ui import RTyyyy_uidef
from ui import uidef
from ui import uivar
from ui import uilang
from mem import RTyyyy_memdef
from boot import bltest
from boot import target
from utils import misc
def RTyyyy_createTarget(device, exeBinRoot):
# Build path to target directory and config file.
cpu = "MIMXRT1052"
if device == uidef.kMcuDevice_iMXRT1011:
cpu = "MIMXRT1011"
elif device == uidef.kMcuDevice_iMXRT1015:
cpu = "MIMXRT1015"
elif device == uidef.kMcuDevice_iMXRT102x:
cpu = "MIMXRT1021"
elif device == uidef.kMcuDevice_iMXRT105x:
cpu = "MIMXRT1052"
elif device == uidef.kMcuDevice_iMXRT106x:
cpu = "MIMXRT1062"
elif device == uidef.kMcuDevice_iMXRT1064:
cpu = "MIMXRT1064"
elif device == uidef.kMcuDevice_iMXRT117x:
cpu = "MIMXRT1176"
else:
pass
targetBaseDir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'targets', cpu)
# Check for existing target directory.
if not os.path.isdir(targetBaseDir):
targetBaseDir = os.path.join(os.path.dirname(exeBinRoot), 'src', 'targets', cpu)
if not os.path.isdir(targetBaseDir):
raise ValueError("Missing target directory at path %s" % targetBaseDir)
targetConfigFile = os.path.join(targetBaseDir, 'bltargetconfig.py')
# Check for config file existence.
if not os.path.isfile(targetConfigFile):
raise RuntimeError("Missing target config file at path %s" % targetConfigFile)
# Build locals dict by copying our locals and adjusting file path and name.
targetConfig = locals().copy()
targetConfig['__file__'] = targetConfigFile
targetConfig['__name__'] = 'bltargetconfig'
# Execute the target config script.
execfile(targetConfigFile, globals(), targetConfig)
# Create the target object.
tgt = target.Target(**targetConfig)
return tgt, targetBaseDir
##
# @brief
class secBootRTyyyyRun(RTyyyy_gencore.secBootRTyyyyGen):
def __init__(self, parent):
RTyyyy_gencore.secBootRTyyyyGen.__init__(self, parent)
if self.mcuSeries in uidef.kMcuSeries_iMXRTyyyy:
self.RTyyyy_initRun()
def RTyyyy_initRun( self ):
self.blhost = None
self.sdphost = None
self.tgt = None
self.cpuDir = None
self.sdphostVectorsDir = os.path.join(self.exeTopRoot, 'tools', 'sdphost', 'win', 'vectors')
self.blhostVectorsDir = os.path.join(self.exeTopRoot, 'tools', 'blhost2_3', 'win', 'vectors')
self.RTyyyy_isDeviceEnabledToOperate = True
self.bootDeviceMemId = None
self.bootDeviceMemBase = None
self.semcNandImageCopies = None
self.semcNandBlockSize = None
self.isFlexspiNorErasedForImage = False
self.mcuDeviceHabStatus = None
self.mcuDeviceBtFuseSel = None
self.mcuDeviceHwCryptoKey0Sel = None
self.mcuDeviceHwCryptoKey1Sel = None
self.comMemWriteUnit = 0x1
self.comMemEraseUnit = 0x1
self.comMemReadUnit = 0x1
self.sbLastSharedFuseBootCfg1 = RTyyyy_fusedef.kEfuseValue_Invalid
self.sbLastSharedFuseOtfadCfg = RTyyyy_fusedef.kEfuseValue_Invalid
self.RTyyyy_createMcuTarget()
def RTyyyy_createMcuTarget( self ):
self.tgt, self.cpuDir = RTyyyy_createTarget(self.mcuDevice, self.exeBinRoot)
def RTyyyy_getUsbid( self ):
self.RTyyyy_createMcuTarget()
return [self.tgt.romUsbVid, self.tgt.romUsbPid, self.tgt.flashloaderUsbVid, self.tgt.flashloaderUsbPid]
def RTyyyy_connectToDevice( self , connectStage):
if connectStage == uidef.kConnectStage_Rom:
# Create the target object.
self.RTyyyy_createMcuTarget()
xhost = None
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
xhost = 'sdp_'
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
xhost = ''
else:
pass
if self.isUartPortSelected:
xPeripheral = xhost + 'uart'
uartComPort = self.uartComPort
uartBaudrate = int(self.uartBaudrate)
usbVid = ''
usbPid = ''
elif self.isUsbhidPortSelected:
xPeripheral = xhost + 'usb'
uartComPort = ''
uartBaudrate = ''
usbVid = self.tgt.romUsbVid
usbPid = self.tgt.romUsbPid
else:
pass
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
self.sdphost = bltest.createBootloader(self.tgt,
self.sdphostVectorsDir,
xPeripheral,
uartBaudrate, uartComPort,
usbVid, usbPid)
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
self.blhost = bltest.createBootloader(self.tgt,
self.blhostVectorsDir,
xPeripheral,
uartBaudrate, uartComPort,
usbVid, usbPid,
True)
else:
pass
elif connectStage == uidef.kConnectStage_Flashloader:
if self.isUartPortSelected:
blPeripheral = 'uart'
uartComPort = self.uartComPort
uartBaudrate = int(self.uartBaudrate)
usbVid = ''
usbPid = ''
elif self.isUsbhidPortSelected:
blPeripheral = 'usb'
uartComPort = ''
uartBaudrate = ''
usbVid = self.tgt.flashloaderUsbVid
usbPid = self.tgt.flashloaderUsbPid
else:
pass
self.blhost = bltest.createBootloader(self.tgt,
self.blhostVectorsDir,
blPeripheral,
uartBaudrate, uartComPort,
usbVid, usbPid,
True)
elif connectStage == uidef.kConnectStage_Reset:
self.tgt = None
else:
pass
def RTyyyy_pingRom( self ):
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
status, results, cmdStr = self.sdphost.errorStatus()
self.printLog(cmdStr)
return (status == boot.status.kSDP_Status_HabEnabled or status == boot.status.kSDP_Status_HabDisabled)
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
status, results, cmdStr = self.blhost.getProperty(boot.properties.kPropertyTag_CurrentVersion)
self.printLog(cmdStr)
return (status == boot.status.kStatus_Success)
else:
pass
def _getDeviceRegisterBySdphost( self, regAddr, regName, needToShow=True):
if self.tgt.hasSdpReadRegisterCmd:
filename = 'readReg.dat'
filepath = os.path.join(self.sdphostVectorsDir, filename)
status, results, cmdStr = self.sdphost.readRegister(regAddr, 32, 4, filename)
self.printLog(cmdStr)
if (status == boot.status.kSDP_Status_HabEnabled or status == boot.status.kSDP_Status_HabDisabled):
regVal = self.getVal32FromBinFile(filepath)
if needToShow:
self.printDeviceStatus(regName + " = " + self.convertLongIntHexText(str(hex(regVal))))
return regVal
else:
if needToShow:
self.printDeviceStatus(regName + " = --------")
return None
try:
os.remove(filepath)
except:
pass
else:
return None
def _readMcuDeviceRegisterUuid( self ):
self._getDeviceRegisterBySdphost( self.tgt.registerAddrDict['kRegisterAddr_OCOTP_UUID1'], 'OCOTP->UUID[31:00]')
self._getDeviceRegisterBySdphost( self.tgt.registerAddrDict['kRegisterAddr_OCOTP_UUID2'], 'OCOTP->UUID[63:32]')
def _readMcuDeviceRegisterSrcSmbr( self ):
self._getDeviceRegisterBySdphost( self.tgt.registerAddrDict['kRegisterAddr_SRC_SBMR1'], 'SRC->SBMR1')
sbmr2 = self._getDeviceRegisterBySdphost( self.tgt.registerAddrDict['kRegisterAddr_SRC_SBMR2'], 'SRC->SBMR2')
if sbmr2 != None:
bmod = ((sbmr2 & self.tgt.registerDefnDict['kRegisterMask_SRC_SBMR2_Bmod']) >> self.tgt.registerDefnDict['kRegisterShift_SRC_SBMR2_Bmod'])
if bmod == 0:
self.printDeviceStatus('BMOD[1:0] = 2\'b00 (Boot From Fuses)')
elif bmod == 1:
self.printDeviceStatus('BMOD[1:0] = 2\'b01 (Serial Downloader)')
elif bmod == 2:
self.printDeviceStatus('BMOD[1:0] = 2\'b10 (Internal Boot)')
else:
self.printDeviceStatus('BMOD[1:0] = 2\'b11 (Reserved)')
def RTyyyy_getMcuDeviceInfoViaRom( self ):
self.printDeviceStatus("--------MCU device Register----------")
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
# RT10yy supports SDP protocol, but some device(RT1011) doesn't support Read Register command
self._readMcuDeviceRegisterUuid()
self._readMcuDeviceRegisterSrcSmbr()
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
# RT11yy doesn't support SDP protocol
pass
def getFlexramInfoViaRom( self ):
self.printDeviceStatus("----------FlexRAM memory-----------")
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
#gpr16 = self._getDeviceRegisterBySdphost( self.tgt.registerAddrDict['kRegisterAddr_IOMUXC_GPR_GPR16'], 'IOMUXC_GPR->GPR16')
#if gpr16 == None:
# return
#if not (gpr16 & self.tgt.registerDefnDict['kRegisterMask_IOMUXC_GPR_GPR16_FlexramBankCfgSel']):
if True:
miscConf0 = self._getDeviceRegisterBySdphost( self.tgt.registerAddrDict['kRegisterAddr_OCOTP_FlexramCfg'], 'OCOTP->MISC_CONF0[31:00]')
if miscConf0 != None:
self.printDeviceStatus('Assume that FlexRAM configuration is from eFuse')
defaultFlexramPart = (miscConf0 & RTyyyy_fusedef.kEfuseMask_DefaultFlexramPart) >> RTyyyy_fusedef.kEfuseShift_DefaultFlexramPart
self.printDeviceStatus(self.tgt.efuseDescDiffDict['0x6d0_miscconf0_bit19_16']['Default_FlexRAM_Partion'][defaultFlexramPart])
else:
#gpr17 = self._getDeviceRegisterBySdphost( self.tgt.registerAddrDict['kRegisterAddr_IOMUXC_GPR_GPR17'], 'IOMUXC_GPR->GPR17')
#if gpr17 != None:
# self.printDeviceStatus('FlexRAM configuration is from IOMUXC_GPR Register')
pass
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
pass
def getMcuDeviceHabStatus( self ):
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
if self.tgt.hasSdpReadRegisterCmd:
secConfig = self._getDeviceRegisterBySdphost( self.tgt.registerAddrDict['kRegisterAddr_SRC_SBMR2'], '', False)
if secConfig != None:
self.mcuDeviceHabStatus = ((secConfig & self.tgt.registerDefnDict['kRegisterMask_SRC_SBMR2_SecConfig']) >> self.tgt.registerDefnDict['kRegisterShift_SRC_SBMR2_SecConfig'])
if self.mcuDeviceHabStatus == RTyyyy_fusedef.kHabStatus_FAB:
self.printDeviceStatus('HAB status = FAB')
elif self.mcuDeviceHabStatus == RTyyyy_fusedef.kHabStatus_Open:
self.printDeviceStatus('HAB status = Open')
elif self.mcuDeviceHabStatus == RTyyyy_fusedef.kHabStatus_Closed0 or self.mcuDeviceHabStatus == RTyyyy_fusedef.kHabStatus_Closed1:
self.printDeviceStatus('HAB status = Closed')
else:
pass
else:
status, results, cmdStr = self.sdphost.errorStatus()
self.printLog(cmdStr)
if status == boot.status.kSDP_Status_HabEnabled:
self.mcuDeviceHabStatus = RTyyyy_fusedef.kHabStatus_Closed0
self.printDeviceStatus('HAB status = Closed')
elif status == boot.status.kSDP_Status_HabDisabled:
self.mcuDeviceHabStatus = RTyyyy_fusedef.kHabStatus_Open
self.printDeviceStatus('HAB status = Open')
else:
pass
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
status, results, cmdStr = self.blhost.getProperty(boot.properties.kPropertyTag_FlashSecurityState)
self.printLog(cmdStr)
if status == boot.status.kStatus_Success:
if results[0] == 0:
self.mcuDeviceHabStatus = RTyyyy_fusedef.kHabStatus_Open
self.printDeviceStatus('HAB status = Open')
else:
self.mcuDeviceHabStatus = RTyyyy_fusedef.kHabStatus_Closed0
self.printDeviceStatus('HAB status = Closed')
else:
pass
else:
pass
def _selectFlashloader( self ):
flSrecFile = None
flBinFile = None
flLoadAddr = None
flJumpAddr = None
if self.flashloaderResident == None:
flSrecFile = os.path.join(self.cpuDir, 'flashloader.srec')
flBinFile = os.path.join(self.cpuDir, 'ivt_flashloader.bin')
flLoadAddr = self.tgt.flashloaderLoadAddr
flJumpAddr = self.tgt.flashloaderJumpAddr
elif self.flashloaderResident == 'itcm' or \
self.flashloaderResident == 'dtcm' or \
self.flashloaderResident == 'ocram':
flSrecFile = os.path.join(self.cpuDir, 'flexram_loader', self.flashloaderResident, 'flashloader.srec')
flBinFile = os.path.join(self.cpuDir, 'flexram_loader', self.flashloaderResident, 'ivt_flashloader.bin')
if self.flashloaderResident == 'ocram':
flLoadAddr = self.tgt.reservedRegionDict['ram'][1] + 1
else:
flLoadAddr = self.tgt.memoryRange[self.flashloaderResident].start + 0x200
flJumpAddr = flLoadAddr + RTyyyy_gendef.kIvtOffset_RAM_FLASHLOADER
else:
pass
return flSrecFile, flBinFile, flLoadAddr, flJumpAddr
def RTyyyy_jumpToFlashloader( self ):
flashloaderSrecFile, flashloaderBinFile, flashloaderLoadAddr, flashloaderJumpAddr = self._selectFlashloader()
if self.mcuDeviceHabStatus == RTyyyy_fusedef.kHabStatus_Closed0 or self.mcuDeviceHabStatus == RTyyyy_fusedef.kHabStatus_Closed1:
flashloaderBinFile = self.genSignedFlashloader(flashloaderSrecFile)
if flashloaderBinFile == None:
return False
elif self.mcuDeviceHabStatus == RTyyyy_fusedef.kHabStatus_FAB or self.mcuDeviceHabStatus == RTyyyy_fusedef.kHabStatus_Open:
pass
else:
pass
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
status, results, cmdStr = self.sdphost.writeFile(flashloaderLoadAddr, flashloaderBinFile)
self.printLog(cmdStr)
if status != boot.status.kSDP_Status_HabEnabled and status != boot.status.kSDP_Status_HabDisabled:
return False
status, results, cmdStr = self.sdphost.jumpAddress(flashloaderJumpAddr)
self.printLog(cmdStr)
if status != boot.status.kSDP_Status_HabEnabled and status != boot.status.kSDP_Status_HabDisabled:
return False
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
status, results, cmdStr = self.blhost.loadImage(flashloaderBinFile)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
else:
pass
return True
def RTyyyy_pingFlashloader( self ):
status, results, cmdStr = self.blhost.getProperty(boot.properties.kPropertyTag_CurrentVersion)
self.printLog(cmdStr)
return (status == boot.status.kStatus_Success)
def readMcuDeviceFuseByBlhost( self, fuseIndex, fuseName, needToShow=True):
if not self.RTyyyy_isDeviceEnabledToOperate and self.isSbFileEnabledToGen:
return RTyyyy_fusedef.kEfuseValue_Blank
status, results, cmdStr = self.blhost.efuseReadOnce(fuseIndex)
self.printLog(cmdStr)
if (status == boot.status.kStatus_Success):
if needToShow:
self.printDeviceStatus(fuseName + " = " + self.convertLongIntHexText(str(hex(results[1]))))
if self.isSbFileEnabledToGen:
if fuseIndex == self.tgt.efusemapIndexDict['kEfuseIndex_BOOT_CFG1'] and self.sbLastSharedFuseBootCfg1 == RTyyyy_fusedef.kEfuseValue_Invalid:
self.sbLastSharedFuseBootCfg1 = results[1]
if fuseIndex == self.tgt.efusemapIndexDict['kEfuseIndex_OTFAD_CFG'] and self.sbLastSharedFuseOtfadCfg == RTyyyy_fusedef.kEfuseValue_Invalid:
self.sbLastSharedFuseOtfadCfg = results[1]
return results[1]
else:
if needToShow:
self.printDeviceStatus(fuseName + " = --------")
return None
def _readMcuDeviceFuseTester( self ):
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_TESTER0'], '(0x410) TESTER0')
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_TESTER1'], '(0x420) TESTER1')
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_TESTER2'], '(0x430) TESTER2')
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_TESTER3'], '(0x440) TESTER3')
def _readMcuDeviceFuseBootCfg( self ):
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_BOOT_CFG0'], '(0x450) BOOT_CFG0')
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_BOOT_CFG1'], '(0x460) BOOT_CFG1')
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_BOOT_CFG2'], '(0x470) BOOT_CFG2')
def _genOtpmkDekFile( self, otpmk4, otpmk5, otpmk6, otpmk7 ):
try:
os.remove(self.otpmkDekFilename)
except:
pass
self.fillVal32IntoBinFile(self.otpmkDekFilename, otpmk4)
self.fillVal32IntoBinFile(self.otpmkDekFilename, otpmk5)
self.fillVal32IntoBinFile(self.otpmkDekFilename, otpmk6)
self.fillVal32IntoBinFile(self.otpmkDekFilename, otpmk7)
def _readMcuDeviceFuseOtpmkDek( self ):
otpmk4 = self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_OTPMK4'], '', False)
otpmk5 = self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_OTPMK5'], '', False)
otpmk6 = self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_OTPMK6'], '', False)
otpmk7 = self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_OTPMK7'], '', False)
if otpmk4 != None and otpmk5 != None and otpmk6 != None and otpmk7 != None:
self._genOtpmkDekFile(otpmk4, otpmk5, otpmk6, otpmk7)
def _readMcuDeviceFuseSrk( self ):
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_SRK0'], '(0x580) SRK0')
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_SRK1'], '(0x590) SRK1')
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_SRK2'], '(0x5A0) SRK2')
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_SRK3'], '(0x5B0) SRK3')
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_SRK4'], '(0x5C0) SRK4')
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_SRK5'], '(0x5D0) SRK5')
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_SRK6'], '(0x5E0) SRK6')
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_SRK7'], '(0x5F0) SRK7')
def _readMcuDeviceFuseSwGp2( self ):
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_SW_GP2_0'], '(0x690) SW_GP2_0')
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_SW_GP2_1'], '(0x6A0) SW_GP2_1')
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_SW_GP2_2'], '(0x6B0) SW_GP2_2')
self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_SW_GP2_3'], '(0x6C0) SW_GP2_3')
def getMcuDeviceInfoViaFlashloader( self ):
self.printDeviceStatus("--------MCU device eFusemap--------")
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
#self._readMcuDeviceFuseTester()
self._readMcuDeviceFuseBootCfg()
#self._readMcuDeviceFuseOtpmkDek()
#self._readMcuDeviceFuseSrk()
#self._readMcuDeviceFuseSwGp2()
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
pass
def getMcuDeviceBtFuseSel( self ):
btFuseSel = self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseLocation_BtFuseSel'], '', False)
if btFuseSel != None:
self.mcuDeviceBtFuseSel = ((btFuseSel & self.tgt.efusemapDefnDict['kEfuseMask_BtFuseSel']) >> self.tgt.efusemapDefnDict['kEfuseShift_BtFuseSel'])
if self.mcuDeviceBtFuseSel == 0:
self.printDeviceStatus('BT_FUSE_SEL = 1\'b0')
self.printDeviceStatus(' When BMOD[1:0] = 2\'b00 (Boot From Fuses), It means there is no application in boot device, MCU will enter serial downloader mode directly')
self.printDeviceStatus(' When BMOD[1:0] = 2\'b10 (Internal Boot), It means MCU will boot application according to both BOOT_CFGx pins and Fuse BOOT_CFGx')
elif self.mcuDeviceBtFuseSel == 1:
self.printDeviceStatus('BT_FUSE_SEL = 1\'b1')
self.printDeviceStatus(' When BMOD[1:0] = 2\'b00 (Boot From Fuses), It means there is application in boot device, MCU will boot application according to Fuse BOOT_CFGx')
self.printDeviceStatus(' When BMOD[1:0] = 2\'b10 (Internal Boot), It means MCU will boot application according to Fuse BOOT_CFGx only')
else:
pass
def _getDeviceRegisterByBlhost( self, regAddr, regName, needToShow=True):
filename = 'readReg.dat'
filepath = os.path.join(self.blhostVectorsDir, filename)
status, results, cmdStr = self.blhost.readMemory(regAddr, 4, filename)
self.printLog(cmdStr)
if status == boot.status.kStatus_Success:
regVal = self.getVal32FromBinFile(filepath)
if needToShow:
self.printDeviceStatus(regName + " = " + self.convertLongIntHexText(str(hex(regVal))))
return regVal
else:
if needToShow:
self.printDeviceStatus(regName + " = --------")
return None
try:
os.remove(filepath)
except:
pass
def _showFlexramAccordingToBankCfg( self, flexramBankCfg ):
if flexramBankCfg != 0:
banks = self.tgt.memoryRange['itcm'].length / RTyyyy_memdef.kFlexramBankSize
itcmBanks = 0
dtcmBanks = 0
ocramBanks = 0
for bank in range(banks):
bankId = (flexramBankCfg >> (bank *2)) & 0x3
if bankId == RTyyyy_memdef.kFlexramBankId_Ocram:
ocramBanks += 1
elif bankId == RTyyyy_memdef.kFlexramBankId_Dtcm:
dtcmBanks += 1
elif bankId == RTyyyy_memdef.kFlexramBankId_Itcm:
itcmBanks += 1
else:
pass
itcmSizeInKB = itcmBanks * RTyyyy_memdef.kFlexramBankSize / 0x400
dtcmSizeInKB = dtcmBanks * RTyyyy_memdef.kFlexramBankSize / 0x400
ocramSizeInKB = ocramBanks * RTyyyy_memdef.kFlexramBankSize / 0x400
self.printDeviceStatus(str(itcmSizeInKB) + "KB ITCM, " + str(dtcmSizeInKB) + "KB DTCM, " + str(ocramSizeInKB) + "KB OCRAM")
else:
self.printDeviceStatus("0KB ITCM, 0KB DTCM, 0KB OCRAM")
def getFlexramInfoViaFlashloader( self ):
self.printDeviceStatus("----------FlexRAM memory-----------")
gpr16 = self._getDeviceRegisterByBlhost( self.tgt.registerAddrDict['kRegisterAddr_IOMUXC_GPR_GPR16'], 'IOMUXC_GPR->GPR16')
if gpr16 == None:
return
if not (gpr16 & self.tgt.registerDefnDict['kRegisterMask_IOMUXC_GPR_GPR16_FlexramBankCfgSel']):
self.printDeviceStatus('FlexRAM configuration is from eFuse')
flexramCfg = self._getDeviceRegisterByBlhost( self.tgt.registerAddrDict['kRegisterAddr_OCOTP_FlexramCfg'], 'OCOTP->MISC_CONF0[31:00]')
if flexramCfg != None:
defaultFlexramPart = (flexramCfg & self.tgt.efusemapDefnDict['kEfuseMask_DefaultFlexramPart']) >> self.tgt.efusemapDefnDict['kEfuseShift_DefaultFlexramPart']
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
self.printDeviceStatus("FlexRAM Partion =" + self.tgt.efuseDescDiffDict['0x6d0_miscconf0_bit19_16']['Default_FlexRAM_Partion'][defaultFlexramPart])
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
self.printDeviceStatus("FlexRAM Partion =" + self.tgt.efuseDescDiffDict['0xc70_flexramcfg_bit21_16']['Default_FlexRAM_Partion'][defaultFlexramPart])
else:
pass
else:
self.printDeviceStatus('FlexRAM configuration is from IOMUXC_GPR Register')
gpr17 = self._getDeviceRegisterByBlhost( self.tgt.registerAddrDict['kRegisterAddr_IOMUXC_GPR_GPR17'], 'IOMUXC_GPR->GPR17')
if gpr17 != None:
flexramBankCfg = (gpr17 & self.tgt.registerDefnDict['kRegisterMask_IOMUXC_GPR_GPR17_FlexramBankCfg']) >> self.tgt.registerDefnDict['kRegisterShift_IOMUXC_GPR_GPR17_FlexramBankCfg']
self._showFlexramAccordingToBankCfg(flexramBankCfg)
def _RTyyyy_prepareForBootDeviceOperation ( self ):
if self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor:
self.bootDeviceMemId = rundef.kBootDeviceMemId_FlexspiNor
self.bootDeviceMemBase = self.tgt.flexspiNorMemBase
elif self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNand:
self.bootDeviceMemId = rundef.kBootDeviceMemId_FlexspiNand
self.bootDeviceMemBase = RTyyyy_rundef.kBootDeviceMemBase_FlexspiNand
elif self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNor:
self.bootDeviceMemId = rundef.kBootDeviceMemId_SemcNor
self.bootDeviceMemBase = RTyyyy_rundef.kBootDeviceMemBase_SemcNor
elif self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNand:
self.bootDeviceMemId = rundef.kBootDeviceMemId_SemcNand
self.bootDeviceMemBase = RTyyyy_rundef.kBootDeviceMemBase_SemcNand
elif self.bootDevice == RTyyyy_uidef.kBootDevice_UsdhcSd:
self.bootDeviceMemId = rundef.kBootDeviceMemId_UsdhcSd
self.bootDeviceMemBase = RTyyyy_rundef.kBootDeviceMemBase_UsdhcSd
elif self.bootDevice == RTyyyy_uidef.kBootDevice_UsdhcMmc:
self.bootDeviceMemId = rundef.kBootDeviceMemId_UsdhcMmc
self.bootDeviceMemBase = RTyyyy_rundef.kBootDeviceMemBase_UsdhcMmc
elif self.bootDevice == RTyyyy_uidef.kBootDevice_LpspiNor:
self.bootDeviceMemId = rundef.kBootDeviceMemId_SpiNor
self.bootDeviceMemBase = RTyyyy_rundef.kBootDeviceMemBase_LpspiNor
else:
pass
def _getSemcNandDeviceInfo ( self ):
filename = 'semcNandFcb.dat'
filepath = os.path.join(self.blhostVectorsDir, filename)
status, results, cmdStr = self.blhost.readMemory(self.bootDeviceMemBase + RTyyyy_rundef.kSemcNandFcbInfo_StartAddr, RTyyyy_rundef.kSemcNandFcbInfo_Length, filename, self.bootDeviceMemId)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
fingerprint = self.getVal32FromBinFile(filepath, RTyyyy_rundef.kSemcNandFcbOffset_Fingerprint)
semcTag = self.getVal32FromBinFile(filepath, RTyyyy_rundef.kSemcNandFcbOffset_SemcTag)
if fingerprint == RTyyyy_rundef.kSemcNandFcbTag_Fingerprint and semcTag == RTyyyy_rundef.kSemcNandFcbTag_Semc:
firmwareCopies = self.getVal32FromBinFile(filepath, RTyyyy_rundef.kSemcNandFcbOffset_FirmwareCopies)
pageByteSize = self.getVal32FromBinFile(filepath, RTyyyy_rundef.kSemcNandFcbOffset_PageByteSize)
pagesInBlock = self.getVal32FromBinFile(filepath, RTyyyy_rundef.kSemcNandFcbOffset_PagesInBlock)
blocksInPlane = self.getVal32FromBinFile(filepath, RTyyyy_rundef.kSemcNandFcbOffset_BlocksInPlane)
planesInDevice = self.getVal32FromBinFile(filepath, RTyyyy_rundef.kSemcNandFcbOffset_PlanesInDevice)
self.printDeviceStatus("Page Size = " + self.showAsOptimalMemoryUnit(pageByteSize))
self.printDeviceStatus("Pages In Block = " + str(pagesInBlock))
self.printDeviceStatus("Blocks In Plane = " + str(blocksInPlane))
self.printDeviceStatus("Planes In Device = " + str(planesInDevice))
self.semcNandImageCopies = firmwareCopies
self.semcNandBlockSize = pageByteSize * pagesInBlock
self.comMemWriteUnit = pageByteSize
self.comMemEraseUnit = pageByteSize * pagesInBlock
self.comMemReadUnit = pageByteSize
else:
self.printDeviceStatus("Page Size = --------")
self.printDeviceStatus("Pages In Block = --------")
self.printDeviceStatus("Blocks In Plane = --------")
self.printDeviceStatus("Planes In Device = --------")
return False
try:
os.remove(filepath)
except:
pass
return True
def _getSemcNorDeviceInfo ( self ):
filename = 'SemcNorCfg.dat'
filepath = os.path.join(self.blhostVectorsDir, filename)
status, results, cmdStr = self.blhost.readMemory(self.bootDeviceMemBase, 0x2D, filename, self.bootDeviceMemId)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
# flexspiTag = self.getVal32FromBinFile(filepath, rundef.kFlexspiNorCfgOffset_FlexspiTag)
# if flexspiTag == rundef.kFlexspiNorCfgTag_Flexspi:
# pageByteSize = self.getVal32FromBinFile(filepath, rundef.kFlexspiNorCfgOffset_PageByteSize)
# sectorByteSize = self.getVal32FromBinFile(filepath, rundef.kFlexspiNorCfgOffset_SectorByteSize)
# blockByteSize = self.getVal32FromBinFile(filepath, rundef.kFlexspiNorCfgOffset_BlockByteSize)
# self.printDeviceStatus("Page Size = " + self.showAsOptimalMemoryUnit(pageByteSize))
# self.printDeviceStatus("Sector Size = " + self.showAsOptimalMemoryUnit(sectorByteSize))
# self.printDeviceStatus("Block Size = " + self.showAsOptimalMemoryUnit(blockByteSize))
# self.comMemWriteUnit = pageByteSize
# self.comMemEraseUnit = sectorByteSize
# self.comMemReadUnit = pageByteSize
# else:
# self.printDeviceStatus("Page Size = --------")
# self.printDeviceStatus("Sector Size = --------")
# self.printDeviceStatus("Block Size = --------")
# return False
# try:
# os.remove(filepath)
# except:
# pass
return True
def _getFlexspiNorDeviceInfo ( self ):
if not self.RTyyyy_isDeviceEnabledToOperate and self.isSbFileEnabledToGen:
return True
filename = 'flexspiNorCfg.dat'
filepath = os.path.join(self.blhostVectorsDir, filename)
status, results, cmdStr = self.blhost.readMemory(self.bootDeviceMemBase + self.tgt.xspiNorCfgInfoOffset, rundef.kFlexspiNorCfgInfo_Length, filename, self.bootDeviceMemId)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
flexspiTag = self.getVal32FromBinFile(filepath, rundef.kFlexspiNorCfgOffset_FlexspiTag)
if flexspiTag == rundef.kFlexspiNorCfgTag_Flexspi:
pageByteSize = self.getVal32FromBinFile(filepath, rundef.kFlexspiNorCfgOffset_PageByteSize)
sectorByteSize = self.getVal32FromBinFile(filepath, rundef.kFlexspiNorCfgOffset_SectorByteSize)
blockByteSize = self.getVal32FromBinFile(filepath, rundef.kFlexspiNorCfgOffset_BlockByteSize)
self.printDeviceStatus("Page Size = " + self.showAsOptimalMemoryUnit(pageByteSize))
self.printDeviceStatus("Sector Size = " + self.showAsOptimalMemoryUnit(sectorByteSize))
self.printDeviceStatus("Block Size = " + self.showAsOptimalMemoryUnit(blockByteSize))
self.comMemWriteUnit = pageByteSize
self.comMemEraseUnit = sectorByteSize
self.comMemReadUnit = pageByteSize
else:
self.printDeviceStatus("Page Size = --------")
self.printDeviceStatus("Sector Size = --------")
self.printDeviceStatus("Block Size = --------")
return False
try:
os.remove(filepath)
except:
pass
return True
def _getLpspiNorDeviceInfo ( self ):
pageByteSize = 0
sectorByteSize = 0
totalByteSize = 0
lpspiNorOpt0, lpspiNorOpt1 = uivar.getBootDeviceConfiguration(self.bootDevice)
val = (lpspiNorOpt0 & 0x0000000F) >> 0
if val <= 2:
pageByteSize = int(math.pow(2, val + 8))
else:
pageByteSize = int(math.pow(2, val + 2))
val = (lpspiNorOpt0 & 0x000000F0) >> 4
if val <= 1:
sectorByteSize = int(math.pow(2, val + 12))
else:
sectorByteSize = int(math.pow(2, val + 13))
val = (lpspiNorOpt0 & 0x00000F00) >> 8
if val <= 11:
totalByteSize = int(math.pow(2, val + 19))
else:
totalByteSize = int(math.pow(2, val + 3))
self.printDeviceStatus("Page Size = " + self.showAsOptimalMemoryUnit(pageByteSize))
self.printDeviceStatus("Sector Size = " + self.showAsOptimalMemoryUnit(sectorByteSize))
self.printDeviceStatus("Total Size = " + self.showAsOptimalMemoryUnit(totalByteSize))
self.comMemWriteUnit = pageByteSize
self.comMemEraseUnit = sectorByteSize
self.comMemReadUnit = pageByteSize
return True
def _getUsdhcSdMmcDeviceInfo ( self ):
status, results, cmdStr = self.blhost.getProperty(boot.properties.kPropertyTag_ExternalMemoryAttribles, self.bootDeviceMemId)
self.printLog(cmdStr)
if (status == boot.status.kStatus_Success):
#typedef struct
#{
# uint32_t availableAttributesFlag; //!< Available Atrributes, bit map
# uint32_t startAddress; //!< start Address of external memory
# uint32_t flashSizeInKB; //!< flash size of external memory
# uint32_t pageSize; //!< page size of external memory
# uint32_t sectorSize; //!< sector size of external memory
# uint32_t blockSize; //!< block size of external memory
#} external_memory_property_store_t;
blockByteSize = results[5]
totalSizeKB = results[2]
self.printDeviceStatus("Block Size = " + self.showAsOptimalMemoryUnit(blockByteSize))
strTotalSizeGB = ("%.2f" % (totalSizeKB / 1024.0 / 1024))
self.printDeviceStatus("Total Size = " + self.convertLongIntHexText(strTotalSizeGB) + ' GB')
self.comMemWriteUnit = blockByteSize
self.comMemEraseUnit = blockByteSize
self.comMemReadUnit = blockByteSize
else:
self.printDeviceStatus("Block Size = --------")
self.printDeviceStatus("Total Size = --------")
return False
return True
def getBootDeviceInfoViaFlashloader ( self ):
if self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNand:
self.printDeviceStatus("--------SEMC NAND memory----------")
self._getSemcNandDeviceInfo()
elif self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNor:
self.printDeviceStatus("--------SEMC Nor memory----------")
self._getSemcNorDeviceInfo()
elif self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor:
self.printDeviceStatus("--------FlexSPI NOR memory--------")
if not self._getFlexspiNorDeviceInfo():
if not self._eraseFlexspiNorForConfigBlockLoading():
return False
if not self._programFlexspiNorConfigBlock():
return False
self._getFlexspiNorDeviceInfo()
elif self.bootDevice == RTyyyy_uidef.kBootDevice_LpspiNor:
self.printDeviceStatus("--------LPSPI NOR/EEPROM memory---")
self._getLpspiNorDeviceInfo()
elif self.bootDevice == RTyyyy_uidef.kBootDevice_UsdhcSd:
self.printDeviceStatus("--------uSDHC SD Card info--------")
self._getUsdhcSdMmcDeviceInfo()
elif self.bootDevice == RTyyyy_uidef.kBootDevice_UsdhcMmc:
self.printDeviceStatus("--------uSDHC (e)MMC Card info----")
self._getUsdhcSdMmcDeviceInfo()
else:
pass
def _addFlashActionIntoSbAppBdContent(self, actionContent ):
self.sbAppBdContent += actionContent
self.sbAppFlashBdContent += actionContent
def _eraseFlexspiNorForConfigBlockLoading( self ):
status = boot.status.kStatus_Success
if self.RTyyyy_isDeviceEnabledToOperate:
status, results, cmdStr = self.blhost.flashEraseRegion(self.tgt.flexspiNorMemBase + self.tgt.xspiNorCfgInfoOffset, rundef.kFlexspiNorCfgInfo_Length, rundef.kBootDeviceMemId_FlexspiNor)
self.printLog(cmdStr)
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" erase " + self.sbAccessBootDeviceMagic + " " + self.convertLongIntHexText(str(hex(self.tgt.flexspiNorMemBase + self.tgt.xspiNorCfgInfoOffset))) + ".." + self.convertLongIntHexText(str(hex(self.tgt.flexspiNorMemBase + self.tgt.flexspiNorMemBase + rundef.kFlexspiNorCfgInfo_Length))) + ";\n")
return (status == boot.status.kStatus_Success)
def _programFlexspiNorConfigBlock ( self ):
#if not self.tgt.isSipFlexspiNorDevice:
if True:
status = boot.status.kStatus_Success
# 0xf000000f is the tag to notify Flashloader to program FlexSPI NOR config block to the start of device
if self.RTyyyy_isDeviceEnabledToOperate:
status, results, cmdStr = self.blhost.fillMemory(RTyyyy_rundef.kRamFreeSpaceStart_LoadCfgBlock, 0x4, rundef.kFlexspiNorCfgInfo_Notify)
self.printLog(cmdStr)
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" load " + self.convertLongIntHexText(str(hex(rundef.kFlexspiNorCfgInfo_Notify))) + " > " + self.convertLongIntHexText(str(hex(RTyyyy_rundef.kRamFreeSpaceStart_LoadCfgBlock))) + ";\n")
if status != boot.status.kStatus_Success:
return False
if self.RTyyyy_isDeviceEnabledToOperate:
status, results, cmdStr = self.blhost.configureMemory(self.bootDeviceMemId, RTyyyy_rundef.kRamFreeSpaceStart_LoadCfgBlock)
self.printLog(cmdStr)
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" enable " + self.sbEnableBootDeviceMagic + " " + self.convertLongIntHexText(str(hex(RTyyyy_rundef.kRamFreeSpaceStart_LoadCfgBlock))) + ";\n")
if self.isSbFileEnabledToGen:
return True
else:
return (status == boot.status.kStatus_Success)
else:
status, results, cmdStr = self.blhost.writeMemory(self.bootDeviceMemBase, os.path.join(self.cpuDir, 'sip_flash_config.bin'), self.bootDeviceMemId)
self.printLog(cmdStr)
return (status == boot.status.kStatus_Success)
def RTyyyy_configureBootDevice ( self ):
self._RTyyyy_prepareForBootDeviceOperation()
configOptList = []
if self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNand:
semcNandOpt, semcNandFcbOpt, semcNandImageInfoList = uivar.getBootDeviceConfiguration(self.bootDevice)
configOptList.extend([semcNandOpt, semcNandFcbOpt])
for i in range(len(semcNandImageInfoList)):
if semcNandImageInfoList[i] != None:
configOptList.extend([semcNandImageInfoList[i]])
else:
break
if self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNor:
semcNorOpt, semcNorSetting, semcNorDeviceModel= uivar.getBootDeviceConfiguration(self.bootDevice)
configOptList.extend([semcNorOpt])
elif self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor:
flexspiNorOpt0, flexspiNorOpt1, flexspiNorDeviceModel = uivar.getBootDeviceConfiguration(uidef.kBootDevice_XspiNor)
configOptList.extend([flexspiNorOpt0, flexspiNorOpt1])
elif self.bootDevice == RTyyyy_uidef.kBootDevice_LpspiNor:
lpspiNorOpt0, lpspiNorOpt1 = uivar.getBootDeviceConfiguration(self.bootDevice)
configOptList.extend([lpspiNorOpt0, lpspiNorOpt1])
elif self.bootDevice == RTyyyy_uidef.kBootDevice_UsdhcSd:
usdhcSdOpt = uivar.getBootDeviceConfiguration(self.bootDevice)
configOptList.extend([usdhcSdOpt])
elif self.bootDevice == RTyyyy_uidef.kBootDevice_UsdhcMmc:
usdhcMmcOpt0, usdhcMmcOpt1 = uivar.getBootDeviceConfiguration(self.bootDevice)
configOptList.extend([usdhcMmcOpt0, usdhcMmcOpt1])
else:
pass
status = boot.status.kStatus_Success
for i in range(len(configOptList)):
if self.RTyyyy_isDeviceEnabledToOperate:
status, results, cmdStr = self.blhost.fillMemory(RTyyyy_rundef.kRamFreeSpaceStart_LoadCommOpt + 4 * i, 0x4, configOptList[i])
self.printLog(cmdStr)
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" load " + self.convertLongIntHexText(str(hex(configOptList[i]))) + " > " + self.convertLongIntHexText(str(hex(RTyyyy_rundef.kRamFreeSpaceStart_LoadCommOpt + 4 * i))) + ";\n")
if status != boot.status.kStatus_Success:
return False
if self.RTyyyy_isDeviceEnabledToOperate:
status, results, cmdStr = self.blhost.configureMemory(self.bootDeviceMemId, RTyyyy_rundef.kRamFreeSpaceStart_LoadCommOpt)
self.printLog(cmdStr)
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" enable " + self.sbEnableBootDeviceMagic + " " + self.convertLongIntHexText(str(hex(RTyyyy_rundef.kRamFreeSpaceStart_LoadCommOpt))) + ";\n")
if status != boot.status.kStatus_Success:
return False
return True
def _showOtpmkDek( self ):
if os.path.isfile(self.otpmkDekFilename):
self.clearOtpmkDekData()
keyWords = RTyyyy_gendef.kSecKeyLengthInBits_DEK / 32
for i in range(keyWords):
val32 = self.getVal32FromBinFile(self.otpmkDekFilename, (i * 4))
self.printOtpmkDekData(self.getFormattedHexValue(val32))
def _eraseFlexspiNorForImageLoading( self ):
imageLen = os.path.getsize(self.destAppFilename)
memEraseLen = misc.align_up(imageLen, self.comMemEraseUnit)
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" erase " + self.sbAccessBootDeviceMagic + " " + self.convertLongIntHexText(str(hex(self.tgt.flexspiNorMemBase))) + ".." + self.convertLongIntHexText(str(hex(self.tgt.flexspiNorMemBase + memEraseLen))) + ";\n")
else:
status, results, cmdStr = self.blhost.flashEraseRegion(self.tgt.flexspiNorMemBase, memEraseLen, rundef.kBootDeviceMemId_FlexspiNor)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
self.isFlexspiNorErasedForImage = True
return True
def prepareForFixedOtpmkEncryption( self ):
self._RTyyyy_prepareForBootDeviceOperation()
#self._showOtpmkDek()
if not self._eraseFlexspiNorForImageLoading():
return False
otpmkKeyCommDict = uivar.getAdvancedSettings(uidef.kAdvancedSettings_OtpmkKey)
otpmkKeyOpt = otpmkKeyCommDict['opt']
otpmkEncryptedRegionStartList = otpmkKeyCommDict['regionStartList'][:]
otpmkEncryptedRegionLengthOrEndList = otpmkKeyCommDict['regionLengthList'][:]
# Prepare PRDB options
if self.secureBootType == RTyyyy_uidef.kSecureBootType_BeeCrypto:
#---------------------------------------------------------------------------
# 0xe0120000 is an option for PRDB contruction and image encryption
# bit[31:28] tag, fixed to 0x0E
# bit[27:24] Key source, fixed to 0 for A0 silicon
# bit[23:20] AES mode: 1 - CTR mode
# bit[19:16] Encrypted region count (maximum of 3)
# bit[15:00] reserved in A0
#---------------------------------------------------------------------------
encryptedRegionCnt = (otpmkKeyOpt & 0x000F0000) >> 16
if encryptedRegionCnt == 0:
otpmkKeyOpt = (otpmkKeyOpt & 0xFFF0FFFF) | (0x1 << 16)
encryptedRegionCnt = 1
otpmkEncryptedRegionStartList[0] = self.tgt.flexspiNorMemBase + RTyyyy_gendef.kIvtOffset_NOR
# For BEE, it should be length
otpmkEncryptedRegionLengthOrEndList[0] = misc.align_up(os.path.getsize(self.destAppFilename), RTyyyy_gendef.kSecFacRegionAlignedUnit_Bee) - RTyyyy_gendef.kIvtOffset_NOR
else:
pass
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_OtfadCrypto:
#---------------------------------------------------------------------------
# 0xe0001100 is an option for PRDB contruction and image encryption
# bit[31:28] tag, fixed to 0x0E
# bit[27:16] Reserved
# bit[15:12] Key source. 1 for SNVS[255:128], 0 for SNVS[127:0]
# bit[11:08] Encrypted region count (maximum of 4)
# bit[07:00] Redundant image offset in 256K. 0 for no redundant image
encryptedRegionCnt = (otpmkKeyOpt & 0x00000F00) >> 8
if encryptedRegionCnt == 0:
otpmkKeyOpt = (otpmkKeyOpt & 0xFFFFF0FF) | (0x1 << 8)
encryptedRegionCnt = 1
otpmkEncryptedRegionStartList[0] = self.tgt.flexspiNorMemBase + RTyyyy_gendef.kIvtOffset_NOR
# For OTFAD, it should be end
otpmkEncryptedRegionLengthOrEndList[0] = self.tgt.flexspiNorMemBase + misc.align_up(os.path.getsize(self.destAppFilename), RTyyyy_gendef.kSecFacRegionAlignedUnit_Otfad) - 1
else:
for i in range(encryptedRegionCnt):
otpmkEncryptedRegionLengthOrEndList[i] = otpmkEncryptedRegionStartList[i] + otpmkEncryptedRegionLengthOrEndList[i] - 1
else:
pass
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" load " + self.convertLongIntHexText(str(hex(otpmkKeyOpt))) + " > " + self.convertLongIntHexText(str(hex(RTyyyy_rundef.kRamFreeSpaceStart_LoadPrdbOpt))) + ";\n")
else:
status, results, cmdStr = self.blhost.fillMemory(RTyyyy_rundef.kRamFreeSpaceStart_LoadPrdbOpt, 0x4, otpmkKeyOpt)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
for i in range(encryptedRegionCnt):
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" load " + self.convertLongIntHexText(str(hex(otpmkEncryptedRegionStartList[i]))) + " > " + self.convertLongIntHexText(str(hex(RTyyyy_rundef.kRamFreeSpaceStart_LoadPrdbOpt + i * 8 + 4))) + ";\n")
self._addFlashActionIntoSbAppBdContent(" load " + self.convertLongIntHexText(str(hex(otpmkEncryptedRegionLengthOrEndList[i]))) + " > " + self.convertLongIntHexText(str(hex(RTyyyy_rundef.kRamFreeSpaceStart_LoadPrdbOpt + i * 8 + 8))) + ";\n")
else:
status, results, cmdStr = self.blhost.fillMemory(RTyyyy_rundef.kRamFreeSpaceStart_LoadPrdbOpt + i * 8 + 4, 0x4, otpmkEncryptedRegionStartList[i])
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
status, results, cmdStr = self.blhost.fillMemory(RTyyyy_rundef.kRamFreeSpaceStart_LoadPrdbOpt + i * 8 + 8, 0x4, otpmkEncryptedRegionLengthOrEndList[i])
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" enable " + self.sbEnableBootDeviceMagic + " " + self.convertLongIntHexText(str(hex(RTyyyy_rundef.kRamFreeSpaceStart_LoadPrdbOpt))) + ";\n")
else:
status, results, cmdStr = self.blhost.configureMemory(self.bootDeviceMemId, RTyyyy_rundef.kRamFreeSpaceStart_LoadPrdbOpt)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
if not self._programFlexspiNorConfigBlock():
return False
return True
def _isDeviceFuseSrkRegionReadyForBurn( self, srkFuseFilename ):
isReady = True
isBlank = True
keyWords = RTyyyy_gendef.kSecKeyLengthInBits_SRK / 32
for i in range(keyWords):
srk = self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_SRK0'] + i, '(' + str(hex(0x580 + i * 0x10)) + ') ' + 'SRK' + str(i), False)
if srk == None:
isReady = False
break
elif srk != 0:
isBlank = False
val32 = self.getVal32FromBinFile(srkFuseFilename, (i * 4))
if srk != val32:
isReady = False
break
return isReady, isBlank
def burnMcuDeviceFuseByBlhost( self, fuseIndex, fuseValue, actionFrom=RTyyyy_rundef.kActionFrom_AllInOne):
status = boot.status.kStatus_Success
if self.isSbFileEnabledToGen:
if actionFrom == RTyyyy_rundef.kActionFrom_AllInOne:
if fuseIndex == self.tgt.efusemapIndexDict['kEfuseIndex_BOOT_CFG1']:
fuseValue = fuseValue | self.sbLastSharedFuseBootCfg1
self.sbLastSharedFuseBootCfg1 = fuseValue
elif fuseIndex == self.tgt.efusemapIndexDict['kEfuseIndex_OTFAD_CFG']:
fuseValue = fuseValue | self.sbLastSharedFuseOtfadCfg
self.sbLastSharedFuseOtfadCfg = fuseValue
else:
pass
sbAppBdContent = " load fuse 0x" + self.getFormattedFuseValue(fuseValue) + " > " + self.convertLongIntHexText(str(hex(fuseIndex))) + ";\n"
self.sbAppBdContent += sbAppBdContent
self.sbAppEfuseBdContent += sbAppBdContent
self.isEfuseOperationInSbApp = True
elif actionFrom == RTyyyy_rundef.kActionFrom_BurnFuse:
self.sbUserEfuseBdContent += " load fuse 0x" + self.getFormattedFuseValue(fuseValue) + " > " + self.convertLongIntHexText(str(hex(fuseIndex))) + ";\n"
else:
pass
else:
status, results, cmdStr = self.blhost.efuseProgramOnce(fuseIndex, self.getFormattedFuseValue(fuseValue))
self.printLog(cmdStr)
return (status == boot.status.kStatus_Success)
def burnSrkData ( self ):
if os.path.isfile(self.srkFuseFilename):
isReady, isBlank = self._isDeviceFuseSrkRegionReadyForBurn(self.srkFuseFilename)
if isReady:
if isBlank:
keyWords = RTyyyy_gendef.kSecKeyLengthInBits_SRK / 32
for i in range(keyWords):
val32 = self.getVal32FromBinFile(self.srkFuseFilename, (i * 4))
burnResult = self.burnMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_SRK0'] + i, val32)
if not burnResult:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_failToBurnSrk'][self.languageIndex])
return False
return True
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_srkHasBeenBurned'][self.languageIndex])
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['certGenError_srkNotGen'][self.languageIndex])
return False
def _isDeviceFuseSwGp2RegionReadyForBurn( self, swgp2DekFilename ):
isReady = True
isBlank = True
keyWords = RTyyyy_gendef.kSecKeyLengthInBits_DEK / 32
for i in range(keyWords):
dek = self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_SW_GP2_0'] + i, '(' + str(hex(0x690 + i * 0x10)) + ') ' + 'SW_GP2_' + str(i), False)
if dek == None:
isReady = False
break
elif dek != 0:
isBlank = False
val32 = self.getVal32FromBinFile(swgp2DekFilename, (i * 4))
if dek != val32:
isReady = False
break
return isReady, isBlank
def _isDeviceFuseGp4RegionReadyForBurn( self, gp4DekFilename ):
isReady = True
isBlank = True
keyWords = RTyyyy_gendef.kSecKeyLengthInBits_DEK / 32
for i in range(keyWords):
dek = self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_GP4_0'] + i, '(' + str(hex(0x8C0 + i * 0x10)) + ') ' + 'GP4_' + str(i), False)
if dek == None:
isReady = False
break
elif dek != 0:
isBlank = False
val32 = self.getVal32FromBinFile(gp4DekFilename, (i * 4))
if dek != val32:
isReady = False
break
return isReady, isBlank
def _isDeviceFuseUserKey5RegionReadyForBurn( self, userkey5DekFilename ):
isReady = True
isBlank = True
keyWords = RTyyyy_gendef.kSecKeyLengthInBits_DEK / 32
for i in range(keyWords):
dek = self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_USER_KEY5_0'] + i, '(' + str(hex(0x1000 + i * 0x10)) + ') ' + 'USER_KEY5_' + str(i), False)
if dek == None:
isReady = False
break
elif dek != 0:
isBlank = False
val32 = self.getVal32FromBinFile(userkey5DekFilename, (i * 4))
if dek != val32:
isReady = False
break
return isReady, isBlank
def _lockFuseSwGp2( self ):
lock = self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_LOCK'], '', False)
if lock != None:
lock = (lock | (RTyyyy_fusedef.kEfuseMask_WLockSwGp2 | RTyyyy_fusedef.kEfuseMask_RLockSwGp2)) & (~RTyyyy_fusedef.kEfuseMask_LockHigh)
burnResult = self.burnMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_LOCK'], lock)
if not burnResult:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_failToBurnSwgp2Lock'][self.languageIndex])
return False
return True
def _lockFuseGp4( self ):
lock = self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_LOCK'], '', False)
if lock != None:
lock = (lock | (RTyyyy_fusedef.kEfuseMask_WLockGp4 | RTyyyy_fusedef.kEfuseMask_RLockGp4)) & (~RTyyyy_fusedef.kEfuseMask_LockHigh)
burnResult = self.burnMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_LOCK'], lock)
if not burnResult:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_failToBurnGp4Lock'][self.languageIndex])
return False
def burnHwCryptoDekData ( self ):
needToBurnSwGp2 = False
needToBurnGp4 = False
needToBurnUserKey5 = False
swgp2DekFilename = None
gp4DekFilename = None
userkey5DekFilename = None
userKeyCtrlDict, userKeyCmdDict = uivar.getAdvancedSettings(uidef.kAdvancedSettings_UserKeys)
if self.secureBootType == RTyyyy_uidef.kSecureBootType_BeeCrypto:
if userKeyCtrlDict['engine_sel'] == RTyyyy_uidef.kUserEngineSel_Engine1 or userKeyCtrlDict['engine_sel'] == RTyyyy_uidef.kUserEngineSel_BothEngines:
if userKeyCtrlDict['engine1_key_src'] == RTyyyy_uidef.kUserKeySource_SW_GP2:
needToBurnSwGp2 = True
swgp2DekFilename = self.beeDek1Filename
elif userKeyCtrlDict['engine1_key_src'] == RTyyyy_uidef.kUserKeySource_GP4:
needToBurnGp4 = True
gp4DekFilename = self.beeDek1Filename
else:
pass
if userKeyCtrlDict['engine_sel'] == RTyyyy_uidef.kUserEngineSel_Engine0 or userKeyCtrlDict['engine_sel'] == RTyyyy_uidef.kUserEngineSel_BothEngines:
if userKeyCtrlDict['engine0_key_src'] == RTyyyy_uidef.kUserKeySource_SW_GP2:
needToBurnSwGp2 = True
swgp2DekFilename = self.beeDek0Filename
elif userKeyCtrlDict['engine0_key_src'] == RTyyyy_uidef.kUserKeySource_GP4:
needToBurnGp4 = True
gp4DekFilename = self.beeDek0Filename
else:
pass
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_OtfadCrypto:
if userKeyCtrlDict['kek_src'] == RTyyyy_uidef.kUserKeySource_SW_GP2:
needToBurnSwGp2 = True
swgp2DekFilename = self.otfadDek0Filename
elif userKeyCtrlDict['kek_src'] == RTyyyy_uidef.kUserKeySource_USER_KEY5:
needToBurnUserKey5 = True
userkey5DekFilename = self.otfadDek0Filename
else:
pass
else:
pass
keyWords = RTyyyy_gendef.kSecKeyLengthInBits_DEK / 32
if needToBurnSwGp2:
isReady, isBlank = self._isDeviceFuseSwGp2RegionReadyForBurn(swgp2DekFilename)
if isReady:
if isBlank:
for i in range(keyWords):
val32 = self.getVal32FromBinFile(swgp2DekFilename, (i * 4))
burnResult = self.burnMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_SW_GP2_0'] + i, val32)
if not burnResult:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_failToBurnSwgp2'][self.languageIndex])
return False
if not self._lockFuseSwGp2():
return False
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_swgp2HasBeenBurned'][self.languageIndex])
else:
pass
if needToBurnGp4:
isReady, isBlank = self._isDeviceFuseGp4RegionReadyForBurn(gp4DekFilename)
if isReady:
if isBlank:
for i in range(keyWords):
val32 = self.getVal32FromBinFile(gp4DekFilename, (i * 4))
burnResult = self.burnMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_GP4_0'] + i, val32)
if not burnResult:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_failToBurnGp4'][self.languageIndex])
return False
if not self._lockFuseGp4():
return False
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_gp4HasBeenBurned'][self.languageIndex])
else:
pass
if needToBurnUserKey5:
isReady, isBlank = self._isDeviceFuseUserKey5RegionReadyForBurn(userkey5DekFilename)
if isReady:
if isBlank:
for i in range(keyWords):
val32 = self.getVal32FromBinFile(userkey5DekFilename, (i * 4))
burnResult = self.burnMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_USER_KEY5_0'] + i, val32)
if not burnResult:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_failToBurnUserkey5'][self.languageIndex])
return False
#if not self._lockFuseUserKey5():
# return False
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_userkey5HasBeenBurned'][self.languageIndex])
else:
pass
return True
def _genDestEncAppFileWithoutCfgBlock( self ):
destEncAppPath, destEncAppFile = os.path.split(self.destEncAppFilename)
destEncAppName, destEncAppType = os.path.splitext(destEncAppFile)
destEncAppName += '_nocfgblock'
self.destEncAppNoCfgBlockFilename = os.path.join(destEncAppPath, destEncAppName + destEncAppType)
imageLen = os.path.getsize(self.destEncAppFilename)
imageData = None
with open(self.destEncAppFilename, 'rb') as fileObj:
imageData = fileObj.read(imageLen)
if len(imageData) > rundef.kFlexspiNorCfgInfo_Length:
imageData = imageData[rundef.kFlexspiNorCfgInfo_Length:len(imageData)]
fileObj.close()
with open(self.destEncAppNoCfgBlockFilename, 'wb') as fileObj:
fileObj.write(imageData)
fileObj.close()
def _genDestEncAppFileWithoutKeyblobAndCfgBlock( self ):
destEncAppPath, destEncAppFile = os.path.split(self.destEncAppFilename)
destEncAppName, destEncAppType = os.path.splitext(destEncAppFile)
destEncAppName += '_nokeyblob_nocfgblock'
self.destEncAppNoKeyblobAndCfgBlockFilename = os.path.join(destEncAppPath, destEncAppName + destEncAppType)
imageLen = os.path.getsize(self.destEncAppFilename)
imageData = None
with open(self.destEncAppFilename, 'rb') as fileObj:
imageData = fileObj.read(imageLen)
if len(imageData) > self.tgt.xspiNorCfgInfoOffset + rundef.kFlexspiNorCfgInfo_Length:
imageData = imageData[self.tgt.xspiNorCfgInfoOffset + rundef.kFlexspiNorCfgInfo_Length:len(imageData)]
fileObj.close()
with open(self.destEncAppNoKeyblobAndCfgBlockFilename, 'wb') as fileObj:
fileObj.write(imageData)
fileObj.close()
def _extractOtfadKeyblobFromDestEncAppFile( self ):
imageLen = os.path.getsize(self.destEncAppFilename)
imageData = None
with open(self.destEncAppFilename, 'rb') as fileObj:
imageData = fileObj.read(imageLen)
if len(imageData) > RTyyyy_memdef.kMemBlockOffset_HwCryptoKeyBlob + RTyyyy_memdef.kMemBlockSize_HwCryptoKeyBlob:
imageData = imageData[RTyyyy_memdef.kMemBlockOffset_HwCryptoKeyBlob:RTyyyy_memdef.kMemBlockOffset_HwCryptoKeyBlob + RTyyyy_memdef.kMemBlockSize_HwCryptoKeyBlob]
fileObj.close()
with open(self.otfadKeyblobFilenname, 'wb') as fileObj:
fileObj.write(imageData)
fileObj.close()
def _programFlexspiNorOtfadKeyBlob( self ):
otfadKeyblobLoadAddr = self.bootDeviceMemBase + RTyyyy_memdef.kMemBlockOffset_HwCryptoKeyBlob
status = boot.status.kStatus_Success
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" load " + self.sbAccessBootDeviceMagic + " otfadKeyblobFile > " + self.convertLongIntHexText(str(hex(imageLoadAddr))) + ";\n")
status = boot.status.kStatus_Success
else:
status, results, cmdStr = self.blhost.writeMemory(otfadKeyblobLoadAddr, self.otfadKeyblobFilenname, self.bootDeviceMemId)
self.printLog(cmdStr)
return status == boot.status.kStatus_Success
def RTyyyy_flashBootableImage ( self ):
self._RTyyyy_prepareForBootDeviceOperation()
imageLen = os.path.getsize(self.destAppFilename)
if self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNand:
semcNandOpt, semcNandFcbOpt, semcNandImageInfoList = uivar.getBootDeviceConfiguration(self.bootDevice)
memEraseLen = misc.align_up(imageLen, self.comMemEraseUnit)
for i in range(self.semcNandImageCopies):
imageLoadAddr = self.bootDeviceMemBase + (semcNandImageInfoList[i] >> 16) * self.semcNandBlockSize
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" erase " + self.sbAccessBootDeviceMagic + " " + self.convertLongIntHexText(str(hex(imageLoadAddr))) + ".." + self.convertLongIntHexText(str(hex(imageLoadAddr + memEraseLen))) + ";\n")
self._addFlashActionIntoSbAppBdContent(" load " + self.sbAccessBootDeviceMagic + " myBinFile > " + self.convertLongIntHexText(str(hex(imageLoadAddr))) + ";\n")
else:
status, results, cmdStr = self.blhost.flashEraseRegion(imageLoadAddr, memEraseLen, self.bootDeviceMemId)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
status, results, cmdStr = self.blhost.writeMemory(imageLoadAddr, self.destAppFilename, self.bootDeviceMemId)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
elif self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNor:
semcNorOpt, semcNorSetting, semcNorDeviceModel = uivar.getBootDeviceConfiguration(self.bootDevice)
memEraseLen = self.destAppBinaryBytes+self.destAppVectorAddress
imageLoadAddr = self.bootDeviceMemBase
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" erase " + self.sbAccessBootDeviceMagic + " " + self.convertLongIntHexText(str(hex(imageLoadAddr))) + ".." + self.convertLongIntHexText(str(hex(imageLoadAddr + memEraseLen))) + ";\n")
self._addFlashActionIntoSbAppBdContent(" load " + self.sbAccessBootDeviceMagic + " myBinFile > " + self.convertLongIntHexText(str(hex(imageLoadAddr))) + ";\n")
else:
status, results, cmdStr = self.blhost.flashEraseRegion(imageLoadAddr, memEraseLen, self.bootDeviceMemId)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
status, results, cmdStr = self.blhost.writeMemory(imageLoadAddr, self.destAppFilename,
self.bootDeviceMemId)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
elif self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor:
if not self.isFlexspiNorErasedForImage:
if not self._eraseFlexspiNorForImageLoading():
return False
if self.secureBootType == RTyyyy_uidef.kSecureBootType_Development or \
self.secureBootType == RTyyyy_uidef.kSecureBootType_HabAuth or \
(self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto and self.keyStorageRegion == RTyyyy_uidef.kKeyStorageRegion_FlexibleUserKeys):
if not self._programFlexspiNorConfigBlock():
self.isFlexspiNorErasedForImage = False
return False
if self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto and self.keyStorageRegion == RTyyyy_uidef.kKeyStorageRegion_FlexibleUserKeys:
destEncAppFilename = None
if self.secureBootType == RTyyyy_uidef.kSecureBootType_BeeCrypto:
self._genDestEncAppFileWithoutCfgBlock()
destEncAppFilename = self.destEncAppNoCfgBlockFilename
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_OtfadCrypto:
self._genDestEncAppFileWithoutKeyblobAndCfgBlock()
destEncAppFilename = self.destEncAppNoKeyblobAndCfgBlockFilename
else:
pass
imageLoadAddr = self.bootDeviceMemBase + self.tgt.xspiNorCfgInfoOffset + rundef.kFlexspiNorCfgInfo_Length
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" load " + self.sbAccessBootDeviceMagic + " myBinFile > " + self.convertLongIntHexText(str(hex(imageLoadAddr))) + ";\n")
status = boot.status.kStatus_Success
else:
status, results, cmdStr = self.blhost.writeMemory(imageLoadAddr, destEncAppFilename, self.bootDeviceMemId)
self.printLog(cmdStr)
if self.secureBootType == RTyyyy_uidef.kSecureBootType_OtfadCrypto:
self._extractOtfadKeyblobFromDestEncAppFile()
if not self._programFlexspiNorOtfadKeyBlob():
self.isFlexspiNorErasedForImage = False
return False
else:
pass
else:
imageLoadAddr = self.bootDeviceMemBase + RTyyyy_gendef.kIvtOffset_NOR
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" load " + self.sbAccessBootDeviceMagic + " myBinFile > " + self.convertLongIntHexText(str(hex(imageLoadAddr))) + ";\n")
status = boot.status.kStatus_Success
else:
status, results, cmdStr = self.blhost.writeMemory(imageLoadAddr, self.destAppNoPaddingFilename, self.bootDeviceMemId)
self.printLog(cmdStr)
self.isFlexspiNorErasedForImage = False
if status != boot.status.kStatus_Success:
return False
elif self.bootDevice == RTyyyy_uidef.kBootDevice_LpspiNor:
memEraseLen = misc.align_up(imageLen, self.comMemEraseUnit)
imageLoadAddr = self.bootDeviceMemBase
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" erase " + self.sbAccessBootDeviceMagic + " " + self.convertLongIntHexText(str(hex(imageLoadAddr))) + ".." + self.convertLongIntHexText(str(hex(imageLoadAddr + memEraseLen))) + ";\n")
self._addFlashActionIntoSbAppBdContent(" load " + self.sbAccessBootDeviceMagic + " myBinFile > " + self.convertLongIntHexText(str(hex(imageLoadAddr))) + ";\n")
else:
status, results, cmdStr = self.blhost.flashEraseRegion(imageLoadAddr, memEraseLen, self.bootDeviceMemId)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
status, results, cmdStr = self.blhost.writeMemory(imageLoadAddr, self.destAppFilename, self.bootDeviceMemId)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
elif self.bootDevice == RTyyyy_uidef.kBootDevice_UsdhcSd or \
self.bootDevice == RTyyyy_uidef.kBootDevice_UsdhcMmc:
memEraseLen = misc.align_up(imageLen, self.comMemEraseUnit)
imageLoadAddr = self.bootDeviceMemBase + RTyyyy_gendef.kIvtOffset_NAND_SD_EEPROM
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" erase " + self.sbAccessBootDeviceMagic + " " + self.convertLongIntHexText(str(hex(imageLoadAddr))) + ".." + self.convertLongIntHexText(str(hex(imageLoadAddr + memEraseLen))) + ";\n")
self._addFlashActionIntoSbAppBdContent(" load " + self.sbAccessBootDeviceMagic + " myBinFile > " + self.convertLongIntHexText(str(hex(imageLoadAddr))) + ";\n")
else:
status, results, cmdStr = self.blhost.flashEraseRegion(imageLoadAddr, memEraseLen, self.bootDeviceMemId)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
status, results, cmdStr = self.blhost.writeMemory(imageLoadAddr, self.destAppNoPaddingFilename, self.bootDeviceMemId)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
else:
pass
if self.isConvertedAppUsed:
try:
os.remove(self.srcAppFilename)
except:
pass
self.isConvertedAppUsed = False
return True
def _getMcuDeviceSemcNandCfg( self ):
semcNandCfg = self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseLocation_SemcNandCfg'], '', False)
return semcNandCfg
def _getMcuDeviceLpspiCfg( self ):
lpspiCfg = self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseLocation_LpspiCfg'], '', False)
return lpspiCfg
def burnBootDeviceFuses( self ):
if self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNand:
setSemcNandCfg = 0
semcNandOpt, semcNandFcbOpt, imageInfo = uivar.getBootDeviceConfiguration(self.bootDevice)
# Set Device Ecc Status
eccStatus = (semcNandOpt & 0x00020000) >> 17
setSemcNandCfg = (setSemcNandCfg & (~self.tgt.efusemapDefnDict['kEfuseMask_RawNandEccStatus']) | (eccStatus << self.tgt.efusemapDefnDict['kEfuseShift_RawNandEccStatus']))
# Set I/O Port Size
portSize = (semcNandOpt & 0x00000300) >> 8
if portSize <= 1:
portSize = 0
else:
portSize = 1
setSemcNandCfg = (setSemcNandCfg & (~self.tgt.efusemapDefnDict['kEfuseMask_RawNandPortSize']) | (portSize << self.tgt.efusemapDefnDict['kEfuseShift_RawNandPortSize']))
if self.tgt.isEccTypeSetInFuseMiscConf:
# Set ECC Check Type
eccType = (semcNandOpt & 0x00010000) >> 16
if self.tgt.isSwEccSetAsDefaultInNandOpt:
eccType = (eccType + 1) % 2
setSemcNandCfg = (setSemcNandCfg & (~self.tgt.efusemapDefnDict['kEfuseMask_RawNandEccEdoSet']) | (eccType << self.tgt.efusemapDefnDict['kEfuseShift_RawNandEccEdoSet']))
else:
# Set EDO mode
edoMode = (semcNandOpt & 0x00000008) >> 3
setSemcNandCfg = (setSemcNandCfg & (~self.tgt.efusemapDefnDict['kEfuseMask_RawNandEccEdoSet']) | (edoMode << self.tgt.efusemapDefnDict['kEfuseShift_RawNandEccEdoSet']))
getSemcNandCfg = self._getMcuDeviceSemcNandCfg()
if getSemcNandCfg != None:
getSemcNandCfg = getSemcNandCfg | setSemcNandCfg
if (getSemcNandCfg & (self.tgt.efusemapDefnDict['kEfuseMask_RawNandEccStatus'] | self.tgt.efusemapDefnDict['kEfuseMask_RawNandPortSize'] | self.tgt.efusemapDefnDict['kEfuseMask_RawNandEccEdoSet'])) != setSemcNandCfg:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_miscConf1HasBeenBurned'][self.languageIndex])
return False
else:
burnResult = self.burnMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseLocation_SemcNandCfg'], getSemcNandCfg)
if not burnResult:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_failToBurnMiscConf1'][self.languageIndex])
return False
elif self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor:
pass
elif self.bootDevice == RTyyyy_uidef.kBootDevice_LpspiNor:
setLpspiCfg = 0
# Set EEPROM enable
setLpspiCfg = setLpspiCfg | self.tgt.efusemapDefnDict['kEfuseMask_EepromEnable']
lpspiNorOpt0, lpspiNorOpt1 = uivar.getBootDeviceConfiguration(self.bootDevice)
# Set Spi Index
spiIndex = ((lpspiNorOpt0 & 0x00F00000) >> 20) - 1
setLpspiCfg = (setLpspiCfg & (~self.tgt.efusemapDefnDict['kEfuseMask_LpspiIndex']) | (spiIndex << self.tgt.efusemapDefnDict['kEfuseShift_LpspiIndex']))
# Set Spi Speed
spiSpeed = (lpspiNorOpt1 & 0x0000000F) >> 0
setLpspiCfg = (setLpspiCfg & (~self.tgt.efusemapDefnDict['kEfuseMask_LpspiSpeed']) | (spiSpeed << self.tgt.efusemapDefnDict['kEfuseShift_LpspiSpeed']))
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
# Set Spi Addressing
spiAddressing = 0
val = (lpspiNorOpt0 & 0x00000F00) >> 8
totalByteSize = 0
if val <= 11:
totalByteSize = int(math.pow(2, val + 19))
else:
totalByteSize = int(math.pow(2, val + 3))
if totalByteSize > (64 * 1024):
spiAddressing = RTyyyy_fusedef.kSpiAddressing_3Bytes
else:
spiAddressing = RTyyyy_fusedef.kSpiAddressing_2Bytes
setLpspiCfg = (setLpspiCfg & (~self.tgt.efusemapDefnDict['kEfuseMask_SpiAddressing']) | (spiAddressing << self.tgt.efusemapDefnDict['kEfuseShift_SpiAddressing']))
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
pass
else:
pass
getLpspiCfg = self._getMcuDeviceLpspiCfg()
if getLpspiCfg != None:
getLpspiCfg = getLpspiCfg | setLpspiCfg
if (getLpspiCfg & (self.tgt.efusemapDefnDict['kEfuseMask_EepromEnable'] | self.tgt.efusemapDefnDict['kEfuseMask_LpspiIndex'] | self.tgt.efusemapDefnDict['kEfuseMask_SpiAddressing'] | self.tgt.efusemapDefnDict['kEfuseMask_LpspiSpeed'])) != setLpspiCfg:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_miscConf0HasBeenBurned'][self.languageIndex])
return False
else:
burnResult = self.burnMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseLocation_LpspiCfg'], getLpspiCfg)
if not burnResult:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_failToBurnMiscConf0'][self.languageIndex])
return False
elif self.bootDevice == RTyyyy_uidef.kBootDevice_UsdhcSd:
pass
elif self.bootDevice == RTyyyy_uidef.kBootDevice_UsdhcMmc:
pass
else:
pass
return True
def _getMcuDeviceHwCryptoKeySel( self ):
hwCryptoKeySel = self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseLocation_HwCryptoKeySel'], '', False)
if hwCryptoKeySel != None:
self.mcuDeviceHwCryptoKey0Sel = ((hwCryptoKeySel & self.tgt.efusemapDefnDict['kEfuseMask_HwCryptoKey0Sel']) >> self.tgt.efusemapDefnDict['kEfuseShift_HwCryptoKey0Sel'])
self.mcuDeviceHwCryptoKey1Sel = ((hwCryptoKeySel & self.tgt.efusemapDefnDict['kEfuseMask_HwCryptoKey1Sel']) >> self.tgt.efusemapDefnDict['kEfuseShift_HwCryptoKey1Sel'])
return hwCryptoKeySel
def burnHwCryptoKeySel( self ):
setHwCryptoKey0Sel = None
setHwCryptoKey1Sel = None
if self.keyStorageRegion == RTyyyy_uidef.kKeyStorageRegion_FixedOtpmkKey:
otpmkKeyCommDict = uivar.getAdvancedSettings(uidef.kAdvancedSettings_OtpmkKey)
otpmkKeyOpt = otpmkKeyCommDict['opt']
if self.secureBootType == RTyyyy_uidef.kSecureBootType_BeeCrypto:
encryptedRegionCnt = (otpmkKeyOpt & 0x000F0000) >> 16
# One PRDB means one BEE_KEY, no matter how many FAC regions it has
if encryptedRegionCnt >= 0:
setHwCryptoKey0Sel = RTyyyy_fusedef.kBeeKeySel_FromOtpmkHigh
#if encryptedRegionCnt > 1:
# setHwCryptoKey1Sel = RTyyyy_fusedef.kBeeKeySel_FromOtpmkHigh
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_OtfadCrypto:
keySource = (otpmkKeyOpt & 0x0000F000) >> 12
if keySource == 0:
setHwCryptoKey0Sel = RTyyyy_fusedef.kOtfadKeySel_FromOtpmkLow
elif keySource == 1:
setHwCryptoKey0Sel = RTyyyy_fusedef.kOtfadKeySel_FromOtpmkHigh
else:
pass
else:
pass
elif self.keyStorageRegion == RTyyyy_uidef.kKeyStorageRegion_FlexibleUserKeys:
userKeyCtrlDict, userKeyCmdDict = uivar.getAdvancedSettings(uidef.kAdvancedSettings_UserKeys)
if self.secureBootType == RTyyyy_uidef.kSecureBootType_BeeCrypto:
if userKeyCtrlDict['engine_sel'] == RTyyyy_uidef.kUserEngineSel_Engine0 or userKeyCtrlDict['engine_sel'] == RTyyyy_uidef.kUserEngineSel_BothEngines:
if userKeyCtrlDict['engine0_key_src'] == RTyyyy_uidef.kUserKeySource_OTPMK:
setHwCryptoKey0Sel = RTyyyy_fusedef.kBeeKeySel_FromOtpmkHigh
elif userKeyCtrlDict['engine0_key_src'] == RTyyyy_uidef.kUserKeySource_SW_GP2:
setHwCryptoKey0Sel = RTyyyy_fusedef.kBeeKeySel_FromSwGp2
elif userKeyCtrlDict['engine0_key_src'] == RTyyyy_uidef.kUserKeySource_GP4:
setHwCryptoKey0Sel = RTyyyy_fusedef.kBeeKeySel_FromGp4
else:
pass
if userKeyCtrlDict['engine_sel'] == RTyyyy_uidef.kUserEngineSel_Engine1 or userKeyCtrlDict['engine_sel'] == RTyyyy_uidef.kUserEngineSel_BothEngines:
if userKeyCtrlDict['engine0_key_src'] == RTyyyy_uidef.kUserKeySource_OTPMK:
setHwCryptoKey1Sel = RTyyyy_fusedef.kBeeKeySel_FromOtpmkHigh
elif userKeyCtrlDict['engine1_key_src'] == RTyyyy_uidef.kUserKeySource_SW_GP2:
setHwCryptoKey1Sel = RTyyyy_fusedef.kBeeKeySel_FromSwGp2
elif userKeyCtrlDict['engine1_key_src'] == RTyyyy_uidef.kUserKeySource_GP4:
setHwCryptoKey1Sel = RTyyyy_fusedef.kBeeKeySel_FromGp4
else:
pass
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_OtfadCrypto:
if userKeyCtrlDict['kek_src'] == RTyyyy_uidef.kUserKeySource_SW_GP2:
setHwCryptoKey0Sel = RTyyyy_fusedef.kOtfadKeySel_FromSwGp2
elif userKeyCtrlDict['kek_src'] == RTyyyy_uidef.kUserKeySource_USER_KEY5:
setHwCryptoKey0Sel = RTyyyy_fusedef.kOtfadKeySel_FromUserKey5
else:
pass
else:
pass
else:
pass
getHwCryptoKeySel = self._getMcuDeviceHwCryptoKeySel()
if getHwCryptoKeySel != None:
if setHwCryptoKey0Sel != None:
getHwCryptoKeySel = getHwCryptoKeySel | (setHwCryptoKey0Sel << self.tgt.efusemapDefnDict['kEfuseShift_HwCryptoKey0Sel'])
if ((getHwCryptoKeySel & self.tgt.efusemapDefnDict['kEfuseMask_HwCryptoKey0Sel']) >> self.tgt.efusemapDefnDict['kEfuseShift_HwCryptoKey0Sel']) != setHwCryptoKey0Sel:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_hwCryptoKey0SelHasBeenBurned'][self.languageIndex])
return False
if setHwCryptoKey1Sel != None:
getHwCryptoKeySel = getHwCryptoKeySel | (setHwCryptoKey1Sel << self.tgt.efusemapDefnDict['kEfuseShift_HwCryptoKey1Sel'])
if ((getHwCryptoKeySel & self.tgt.efusemapDefnDict['kEfuseMask_HwCryptoKey1Sel']) >> self.tgt.efusemapDefnDict['kEfuseShift_HwCryptoKey1Sel']) != setHwCryptoKey1Sel:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_hwCryptoKey1SelHasBeenBurned'][self.languageIndex])
return False
burnResult = self.burnMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseLocation_HwCryptoKeySel'], getHwCryptoKeySel)
if not burnResult:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_failToBurnHwCryptoKeyxSel'][self.languageIndex])
return False
return True
def enableOtfad( self ):
otfadCfg = self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseLocation_OtfadEnable'], '', False)
if otfadCfg != None:
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
otfadCfg = otfadCfg | (0x1 << self.tgt.efusemapDefnDict['kEfuseShift_OtfadEnable'])
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
otfadCfg = otfadCfg | (0x1 << self.tgt.efusemapDefnDict['kEfuseShift_OtfadKeyblobEnable'])
otfadCfg = otfadCfg | (0x1 << self.tgt.efusemapDefnDict['kEfuseShift_OtfadKeyblobCrcEnable'])
otfadCfg = otfadCfg | (0x1 << self.tgt.efusemapDefnDict['kEfuseShift_Otfad2KeyblobEnable'])
otfadCfg = otfadCfg | (0x1 << self.tgt.efusemapDefnDict['kEfuseShift_Otfad2KeyblobCrcEnable'])
else:
pass
burnResult = self.burnMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseLocation_OtfadEnable'], otfadCfg)
if not burnResult:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_failToBurnOtfadEnablementBit'][self.languageIndex])
return False
return True
def _isDeviceFuseOtfadKeyScrambleAlgoRegionReadyForBurn( self, scrambleAlgo ):
isReady = True
isBlank = True
key = self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_OTFAD_KEY'], '', False)
if key == None:
isReady = False
elif key != 0:
isBlank = False
if key != scrambleAlgo:
isReady = False
return isReady, isBlank
def burnOtfadKeyScrambleAlgo ( self, scrambleAlgo ):
isReady, isBlank = self._isDeviceFuseOtfadKeyScrambleAlgoRegionReadyForBurn(scrambleAlgo)
if isReady:
if isBlank:
burnResult = self.burnMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_OTFAD_KEY'], scrambleAlgo)
if not burnResult:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_failToBurnOtfadKeyScramble'][self.languageIndex])
return False
return True
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_otfadKeyScrambleHasBeenBurned'][self.languageIndex])
return False
def burnOtfadScrambleFields( self ):
if self.keyStorageRegion == RTyyyy_uidef.kKeyStorageRegion_FlexibleUserKeys:
userKeyCtrlDict, userKeyCmdDict = uivar.getAdvancedSettings(uidef.kAdvancedSettings_UserKeys)
if userKeyCmdDict['scramble'] != None:
scrambleAlgo = int(userKeyCmdDict['scramble'][2:len(userKeyCmdDict['scramble'])], 16)
if not self.burnOtfadKeyScrambleAlgo(scrambleAlgo):
return False
scrambleAlignment = int(userKeyCmdDict['scramble_align'][2:len(userKeyCmdDict['scramble_align'])], 16)
otfadCfg = self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_OTFAD_CFG'], '', False)
if otfadCfg != None:
otfadCfg = otfadCfg | (0x1 << self.tgt.efusemapDefnDict['kEfuseShift_OtfadKeyScrambleEnable'])
otfadCfg = (otfadCfg & (~self.tgt.efusemapDefnDict['kEfuseMask_OtfadKeyScrambleAlign'])) | (scrambleAlignment << self.tgt.efusemapDefnDict['kEfuseShift_OtfadKeyScrambleAlign'])
burnResult = self.burnMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseIndex_OTFAD_CFG'], otfadCfg)
if not burnResult:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_failToBurnOtfadScrambleConfigurationField'][self.languageIndex])
return False
return True
else:
return True
else:
return True
def burnHwCryptoEnablements( self ):
if self.secureBootType == RTyyyy_uidef.kSecureBootType_BeeCrypto:
return True
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_OtfadCrypto:
return self.enableOtfad() and self.burnOtfadScrambleFields()
else:
pass
def flashHabDekToGenerateKeyBlob ( self ):
if os.path.isfile(self.habDekFilename) and self.habDekDataOffset != None:
self._RTyyyy_prepareForBootDeviceOperation()
imageLen = os.path.getsize(self.destAppFilename)
imageCopies = 0x1
if self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNand:
imageCopies = self.semcNandImageCopies
else:
pass
# Construct KeyBlob Option
#---------------------------------------------------------------------------
# bit [31:28] tag, fixed to 0x0b
# bit [27:24] type, 0 - Update KeyBlob context, 1 Program Keyblob to SPI NAND
# bit [23:20] keyblob option block size, must equal to 3 if type =0,
# reserved if type = 1
# bit [19:08] Reserved
# bit [07:04] DEK size, 0-128bit 1-192bit 2-256 bit, only applicable if type=0
# bit [03:00] Firmware Index, only applicable if type = 1
# if type = 0, next words indicate the address that holds dek
# the 3rd word
#----------------------------------------------------------------------------
keyBlobContextOpt = 0xb0300000
keyBlobDataOpt = 0xb1000000
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" load dekFile > " + self.convertLongIntHexText(str(hex(RTyyyy_rundef.kRamFreeSpaceStart_LoadDekData))) + ";\n")
self._addFlashActionIntoSbAppBdContent(" load " + self.convertLongIntHexText(str(hex(keyBlobContextOpt))) + " > " + self.convertLongIntHexText(str(hex(RTyyyy_rundef.kRamFreeSpaceStart_LoadKeyBlobContext))) + ";\n")
self._addFlashActionIntoSbAppBdContent(" load " + self.convertLongIntHexText(str(hex(RTyyyy_rundef.kRamFreeSpaceStart_LoadDekData))) + " > " + self.convertLongIntHexText(str(hex(RTyyyy_rundef.kRamFreeSpaceStart_LoadKeyBlobContext + 4))) + ";\n")
self._addFlashActionIntoSbAppBdContent(" load " + self.convertLongIntHexText(str(hex(self.habDekDataOffset))) + " > " + self.convertLongIntHexText(str(hex(RTyyyy_rundef.kRamFreeSpaceStart_LoadKeyBlobContext + 8))) + ";\n")
self._addFlashActionIntoSbAppBdContent(" enable " + self.sbEnableBootDeviceMagic + " " + self.convertLongIntHexText(str(hex(RTyyyy_rundef.kRamFreeSpaceStart_LoadKeyBlobContext))) + ";\n")
else:
status, results, cmdStr = self.blhost.writeMemory(RTyyyy_rundef.kRamFreeSpaceStart_LoadDekData, self.habDekFilename)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
status, results, cmdStr = self.blhost.fillMemory(RTyyyy_rundef.kRamFreeSpaceStart_LoadKeyBlobContext, 0x4, keyBlobContextOpt)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
status, results, cmdStr = self.blhost.fillMemory(RTyyyy_rundef.kRamFreeSpaceStart_LoadKeyBlobContext + 4, 0x4, RTyyyy_rundef.kRamFreeSpaceStart_LoadDekData)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
status, results, cmdStr = self.blhost.fillMemory(RTyyyy_rundef.kRamFreeSpaceStart_LoadKeyBlobContext + 8, 0x4, self.habDekDataOffset)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
status, results, cmdStr = self.blhost.configureMemory(self.bootDeviceMemId, RTyyyy_rundef.kRamFreeSpaceStart_LoadKeyBlobContext)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
for i in range(imageCopies):
ramFreeSpace = RTyyyy_rundef.kRamFreeSpaceStart_LoadKeyBlobData + (RTyyyy_rundef.kRamFreeSpaceStep_LoadKeyBlobData * i)
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" load " + self.convertLongIntHexText(str(hex(keyBlobDataOpt + i))) + " > " + self.convertLongIntHexText(str(hex(ramFreeSpace))) + ";\n")
else:
status, results, cmdStr = self.blhost.fillMemory(ramFreeSpace, 0x4, keyBlobDataOpt + i)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
########################################################################
# Flashloader will not erase keyblob region automatically, so we need to handle it here manually
imageLoadAddr = 0x0
if self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNand:
semcNandOpt, semcNandFcbOpt, imageInfo = uivar.getBootDeviceConfiguration(self.bootDevice)
imageLoadAddr = self.bootDeviceMemBase + (imageInfo[i] >> 16) * self.semcNandBlockSize
elif self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor or \
self.bootDevice == RTyyyy_uidef.kBootDevice_LpspiNor or \
self.bootDevice == RTyyyy_uidef.kBootDevice_UsdhcSd or \
self.bootDevice == RTyyyy_uidef.kBootDevice_UsdhcMmc:
imageLoadAddr = self.bootDeviceMemBase
else:
pass
alignedErasedSize = misc.align_up(imageLen, self.comMemEraseUnit)
needToBeErasedSize = misc.align_up(self.habDekDataOffset + RTyyyy_memdef.kMemBlockSize_HabKeyBlob, self.comMemEraseUnit)
if alignedErasedSize < needToBeErasedSize:
memEraseLen = needToBeErasedSize - alignedErasedSize
alignedMemEraseAddr = imageLoadAddr + alignedErasedSize
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" erase " + self.sbAccessBootDeviceMagic + " " + self.convertLongIntHexText(str(hex(alignedMemEraseAddr))) + ".." + self.convertLongIntHexText(str(hex(alignedMemEraseAddr + memEraseLen))) + ";\n")
else:
status, results, cmdStr = self.blhost.flashEraseRegion(alignedMemEraseAddr, memEraseLen, self.bootDeviceMemId)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
########################################################################
if self.isSbFileEnabledToGen:
self._addFlashActionIntoSbAppBdContent(" enable " + self.sbEnableBootDeviceMagic + " " + self.convertLongIntHexText(str(hex(ramFreeSpace))) + ";\n")
else:
status, results, cmdStr = self.blhost.configureMemory(self.bootDeviceMemId, ramFreeSpace)
self.printLog(cmdStr)
if status != boot.status.kStatus_Success:
return False
if self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor:
if not self._eraseFlexspiNorForConfigBlockLoading():
return False
if not self._programFlexspiNorConfigBlock():
return False
self.updateImgPictureAfterFlashDek()
return True
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['certGenError_dekNotGen'][self.languageIndex])
return False
def enableHab( self ):
if self.mcuDeviceHabStatus != RTyyyy_fusedef.kHabStatus_Closed0 and \
self.mcuDeviceHabStatus != RTyyyy_fusedef.kHabStatus_Closed1:
secConfig1 = self.readMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseLocation_SecConfig1'], '', False)
if secConfig1 != None:
secConfig1 = secConfig1 | self.tgt.efusemapDefnDict['kEfuseMask_SecConfig1']
burnResult = self.burnMcuDeviceFuseByBlhost(self.tgt.efusemapIndexDict['kEfuseLocation_SecConfig1'], secConfig1)
if not burnResult:
self.popupMsgBox(uilang.kMsgLanguageContentDict['burnFuseError_failToBurnSecConfig1'][self.languageIndex])
return False
return True
def RTyyyy_resetMcuDevice( self ):
status, results, cmdStr = self.blhost.reset()
self.printLog(cmdStr)
return (status == boot.status.kStatus_Success)
| 59.310511
| 346
| 0.639217
|
b7aed576b89c7c7895cc8ad6215314d8a2e5dacb
| 3,451
|
py
|
Python
|
tests/system/reserves/balance/test_lf_reserves_down.py
|
souissim/gridpath
|
4eeca2be24b485edc56026e38cfda83f4a6b27ea
|
[
"Apache-2.0"
] | null | null | null |
tests/system/reserves/balance/test_lf_reserves_down.py
|
souissim/gridpath
|
4eeca2be24b485edc56026e38cfda83f4a6b27ea
|
[
"Apache-2.0"
] | null | null | null |
tests/system/reserves/balance/test_lf_reserves_down.py
|
souissim/gridpath
|
4eeca2be24b485edc56026e38cfda83f4a6b27ea
|
[
"Apache-2.0"
] | 1
|
2021-12-21T20:44:21.000Z
|
2021-12-21T20:44:21.000Z
|
# Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from builtins import str
from collections import OrderedDict
from importlib import import_module
import os.path
import sys
import unittest
from tests.common_functions import create_abstract_model, add_components_and_load_data
TEST_DATA_DIRECTORY = os.path.join(
os.path.dirname(__file__), "..", "..", "..", "test_data"
)
# Import prerequisite modules
PREREQUISITE_MODULE_NAMES = [
"temporal.operations.timepoints",
"temporal.operations.horizons",
"temporal.investment.periods",
"geography.load_zones",
"geography.load_following_down_balancing_areas",
"project",
"project.capacity.capacity",
"project.fuels",
"project.operations",
"project.operations.reserves.lf_reserves_down",
"system.load_balance.static_load_requirement",
"system.reserves.requirement.lf_reserves_down",
"system.reserves.aggregation.lf_reserves_down",
]
NAME_OF_MODULE_BEING_TESTED = "system.reserves.balance.lf_reserves_down"
IMPORTED_PREREQ_MODULES = list()
for mdl in PREREQUISITE_MODULE_NAMES:
try:
imported_module = import_module("." + str(mdl), package="gridpath")
IMPORTED_PREREQ_MODULES.append(imported_module)
except ImportError:
print("ERROR! Module " + str(mdl) + " not found.")
sys.exit(1)
# Import the module we'll test
try:
MODULE_BEING_TESTED = import_module(
"." + NAME_OF_MODULE_BEING_TESTED, package="gridpath"
)
except ImportError:
print("ERROR! Couldn't import module " + NAME_OF_MODULE_BEING_TESTED + " to test.")
class TestCosts(unittest.TestCase):
""" """
def test_add_model_components(self):
"""
Test that there are no errors when adding model components
:return:
"""
create_abstract_model(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage="",
)
def test_load_model_data(self):
"""
Test that data are loaded with no errors
:return:
"""
add_components_and_load_data(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage="",
)
def test_data_loaded_correctly(self):
"""
Test components initialized with expected data
:return:
"""
m, data = add_components_and_load_data(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage="",
)
instance = m.create_instance(data)
if __name__ == "__main__":
unittest.main()
| 31.372727
| 87
| 0.687047
|
9f875df69844a27b4ab52a7e04de1730de137182
| 14,996
|
py
|
Python
|
tests/test_registration.py
|
KonstantinKlepikov/scikit-fda
|
93c4ad80aaba8739b4f90932a2a759d6f5960387
|
[
"BSD-3-Clause"
] | 1
|
2020-06-27T22:25:49.000Z
|
2020-06-27T22:25:49.000Z
|
tests/test_registration.py
|
KonstantinKlepikov/scikit-fda
|
93c4ad80aaba8739b4f90932a2a759d6f5960387
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_registration.py
|
KonstantinKlepikov/scikit-fda
|
93c4ad80aaba8739b4f90932a2a759d6f5960387
|
[
"BSD-3-Clause"
] | null | null | null |
from skfda import FDataGrid
from skfda._utils import _check_estimator
from skfda.datasets import (make_multimodal_samples, make_multimodal_landmarks,
make_sinusoidal_process)
from skfda.exploratory.stats import mean
from skfda.preprocessing.registration import (
normalize_warping, invert_warping, landmark_shift_deltas, landmark_shift,
landmark_registration_warping, landmark_registration, ShiftRegistration)
from skfda.preprocessing.registration.validation import (
AmplitudePhaseDecomposition, LeastSquares,
SobolevLeastSquares, PairwiseCorrelation)
from skfda.representation.basis import Fourier
from skfda.representation.interpolation import SplineInterpolation
import unittest
from sklearn.exceptions import NotFittedError
import numpy as np
class TestWarping(unittest.TestCase):
"""Test warpings functions"""
def setUp(self):
"""Initialization of samples"""
self.time = np.linspace(-1, 1, 50)
interpolation = SplineInterpolation(3, monotone=True)
self.polynomial = FDataGrid([self.time**3, self.time**5],
self.time, interpolation=interpolation)
def test_invert_warping(self):
inverse = invert_warping(self.polynomial)
# Check if identity
id = self.polynomial.compose(inverse)
np.testing.assert_array_almost_equal([self.time, self.time],
id.data_matrix[..., 0],
decimal=3)
def test_standard_normalize_warping(self):
"""Test normalization to (0, 1)"""
normalized = normalize_warping(self.polynomial, (0, 1))
# Test new domain range (0, 1)
np.testing.assert_array_equal(normalized.domain_range, [(0, 1)])
np.testing.assert_array_almost_equal(normalized.sample_points[0],
np.linspace(0, 1, 50))
np.testing.assert_array_almost_equal(
normalized(0)[..., 0], [[0.], [0.]])
np.testing.assert_array_almost_equal(
normalized(1)[..., 0], [[1.], [1.]])
def test_standard_normalize_warping_default_value(self):
"""Test normalization """
normalized = normalize_warping(self.polynomial)
# Test new domain range (0, 1)
np.testing.assert_array_equal(normalized.domain_range, [(-1, 1)])
np.testing.assert_array_almost_equal(normalized.sample_points[0],
np.linspace(-1, 1, 50))
np.testing.assert_array_almost_equal(
normalized(-1)[..., 0], [[-1], [-1]])
np.testing.assert_array_almost_equal(
normalized(1)[..., 0], [[1.], [1.]])
def test_normalize_warping(self):
"""Test normalization to (a, b)"""
a = -4
b = 3
domain = (a, b)
normalized = normalize_warping(self.polynomial, domain)
# Test new domain range (0, 1)
np.testing.assert_array_equal(normalized.domain_range, [domain])
np.testing.assert_array_almost_equal(normalized.sample_points[0],
np.linspace(*domain, 50))
np.testing.assert_array_equal(normalized(a)[..., 0], [[a], [a]])
np.testing.assert_array_equal(normalized(b)[..., 0], [[b], [b]])
def test_landmark_shift_deltas(self):
fd = make_multimodal_samples(n_samples=3, random_state=1)
landmarks = make_multimodal_landmarks(n_samples=3, random_state=1)
landmarks = landmarks.squeeze()
shifts = landmark_shift_deltas(fd, landmarks).round(3)
np.testing.assert_almost_equal(shifts, [0.25, -0.25, -0.231])
def test_landmark_shift(self):
fd = make_multimodal_samples(n_samples=3, random_state=1)
landmarks = make_multimodal_landmarks(n_samples=3, random_state=1)
landmarks = landmarks.squeeze()
original_modes = fd(landmarks.reshape((3, 1, 1)),
aligned_evaluation=False)
# Test default location
fd_registered = landmark_shift(fd, landmarks)
center = (landmarks.max() + landmarks.min()) / 2
reg_modes = fd_registered(center)
# Test callable location
np.testing.assert_almost_equal(reg_modes, original_modes, decimal=2)
fd_registered = landmark_shift(fd, landmarks, location=np.mean)
center = np.mean(landmarks)
reg_modes = fd_registered(center)
np.testing.assert_almost_equal(reg_modes, original_modes, decimal=2)
# Test integer location
fd_registered = landmark_shift(fd, landmarks, location=0)
center = np.mean(landmarks)
reg_modes = fd_registered(0)
np.testing.assert_almost_equal(reg_modes, original_modes, decimal=2)
# Test array location
fd_registered = landmark_shift(fd, landmarks, location=[0, 0.1, 0.2])
reg_modes = fd_registered([[0], [.1], [.2]], aligned_evaluation=False)
np.testing.assert_almost_equal(reg_modes, original_modes, decimal=2)
def test_landmark_registration_warping(self):
fd = make_multimodal_samples(n_samples=3, n_modes=2, random_state=9)
landmarks = make_multimodal_landmarks(n_samples=3, n_modes=2,
random_state=9)
landmarks = landmarks.squeeze()
# Default location
warping = landmark_registration_warping(fd, landmarks)
center = (landmarks.max(axis=0) + landmarks.min(axis=0)) / 2
np.testing.assert_almost_equal(
warping(center)[..., 0], landmarks, decimal=1)
# Fixed location
center = [.3, .6]
warping = landmark_registration_warping(fd, landmarks, location=center)
np.testing.assert_almost_equal(
warping(center)[..., 0], landmarks, decimal=3)
def test_landmark_registration(self):
fd = make_multimodal_samples(n_samples=3, n_modes=2, random_state=9)
landmarks = make_multimodal_landmarks(n_samples=3, n_modes=2,
random_state=9)
landmarks = landmarks.squeeze()
original_values = fd(landmarks.reshape(3, 2), aligned_evaluation=False)
# Default location
fd_reg = landmark_registration(fd, landmarks)
center = (landmarks.max(axis=0) + landmarks.min(axis=0)) / 2
np.testing.assert_almost_equal(fd_reg(center), original_values,
decimal=2)
# Fixed location
center = [.3, .6]
fd_reg = landmark_registration(fd, landmarks, location=center)
np.testing.assert_array_almost_equal(fd_reg(center), original_values,
decimal=2)
class TestShiftRegistration(unittest.TestCase):
"""Test shift registration"""
def setUp(self):
"""Initialization of samples"""
self.fd = make_sinusoidal_process(n_samples=2, error_std=0,
random_state=1)
self.fd.extrapolation = "periodic"
def test_fit_transform(self):
reg = ShiftRegistration()
# Test fit transform with FDataGrid
fd_reg = reg.fit_transform(self.fd)
# Check attributes fitted
self.assertTrue(hasattr(reg, 'deltas_'))
self.assertTrue(hasattr(reg, 'template_'))
self.assertTrue(hasattr(reg, 'n_iter_'))
self.assertTrue(isinstance(fd_reg, FDataGrid))
deltas = reg.deltas_.round(3)
np.testing.assert_array_almost_equal(deltas, [-0.022, 0.03])
# Test with Basis
fd = self.fd.to_basis(Fourier())
reg.fit_transform(fd)
deltas = reg.deltas_.round(3)
np.testing.assert_array_almost_equal(deltas, [-0.022, 0.03])
def test_fit_and_transform(self):
"""Test wrapper of shift_registration_deltas"""
fd = make_sinusoidal_process(n_samples=2, error_std=0, random_state=10)
reg = ShiftRegistration()
response = reg.fit(self.fd)
# Check attributes and returned value
self.assertTrue(hasattr(reg, 'template_'))
self.assertTrue(response is reg)
fd_registered = reg.transform(fd)
deltas = reg.deltas_.round(3)
np.testing.assert_allclose(deltas, [0.071, -0.072])
def test_inverse_transform(self):
reg = ShiftRegistration()
fd = reg.fit_transform(self.fd)
fd = reg.inverse_transform(fd)
np.testing.assert_array_almost_equal(fd.data_matrix,
self.fd.data_matrix, decimal=3)
def test_raises(self):
reg = ShiftRegistration()
# Test not fitted
with np.testing.assert_raises(NotFittedError):
reg.transform(self.fd)
reg.fit(self.fd)
reg.set_params(restrict_domain=True)
# Test use fit or transform with restrict_domain=True
with np.testing.assert_raises(AttributeError):
reg.transform(self.fd)
with np.testing.assert_raises(AttributeError):
reg.fit(self.fd)
# Test inverse_transform without previous transformation
with np.testing.assert_raises(AttributeError):
reg.inverse_transform(self.fd)
reg.fit_transform(self.fd)
# Test inverse transform with different number of sample
with np.testing.assert_raises(ValueError):
reg.inverse_transform(self.fd[:1])
fd = make_multimodal_samples(dim_domain=2, random_state=0)
with np.testing.assert_raises(ValueError):
reg.fit_transform(fd)
reg.set_params(initial=[0.])
# Wrong initial estimation
with np.testing.assert_raises(ValueError):
reg.fit_transform(self.fd)
def test_template(self):
reg = ShiftRegistration()
fd_registered_1 = reg.fit_transform(self.fd)
reg_2 = ShiftRegistration(template=reg.template_)
fd_registered_2 = reg_2.fit_transform(self.fd)
reg_3 = ShiftRegistration(template=mean)
fd_registered_3 = reg_3.fit_transform(self.fd)
reg_4 = ShiftRegistration(template=reg.template_)
fd_registered_4 = reg_4.fit(self.fd).transform(self.fd)
np.testing.assert_array_almost_equal(fd_registered_1.data_matrix,
fd_registered_3.data_matrix)
# With the template fixed could vary the convergence
np.testing.assert_array_almost_equal(fd_registered_1.data_matrix,
fd_registered_2.data_matrix,
decimal=3)
np.testing.assert_array_almost_equal(fd_registered_2.data_matrix,
fd_registered_4.data_matrix)
def test_restrict_domain(self):
reg = ShiftRegistration(restrict_domain=True)
fd_registered_1 = reg.fit_transform(self.fd)
np.testing.assert_array_almost_equal(
fd_registered_1.domain_range.round(3), [[0.022, 0.969]])
reg2 = ShiftRegistration(restrict_domain=True, template=reg.template_)
fd_registered_2 = reg2.fit_transform(self.fd)
np.testing.assert_array_almost_equal(
fd_registered_2.data_matrix, fd_registered_1.data_matrix,
decimal=3)
reg3 = ShiftRegistration(restrict_domain=True, template=mean)
fd_registered_3 = reg3.fit_transform(self.fd)
np.testing.assert_array_almost_equal(
fd_registered_3.data_matrix, fd_registered_1.data_matrix)
def test_initial_estimation(self):
reg = ShiftRegistration(initial=[-0.02161235, 0.03032652])
reg.fit_transform(self.fd)
# Only needed 1 iteration until convergence
self.assertEqual(reg.n_iter_, 1)
def test_custom_output_points(self):
reg = ShiftRegistration(output_points=np.linspace(0, 1, 50))
reg.fit_transform(self.fd)
class TestRegistrationValidation(unittest.TestCase):
"""Test shift registration"""
def setUp(self):
"""Initialization of samples"""
self.X = make_sinusoidal_process(error_std=0, random_state=0)
self.shift_registration = ShiftRegistration().fit(self.X)
def test_amplitude_phase_score(self):
scorer = AmplitudePhaseDecomposition()
score = scorer(self.shift_registration, self.X)
np.testing.assert_allclose(score, 0.972095, rtol=1e-6)
def test_amplitude_phase_score_with_output_points(self):
eval_points = self.X.sample_points[0]
scorer = AmplitudePhaseDecomposition(eval_points=eval_points)
score = scorer(self.shift_registration, self.X)
np.testing.assert_allclose(score, 0.972095, rtol=1e-6)
def test_amplitude_phase_score_with_basis(self):
scorer = AmplitudePhaseDecomposition()
X = self.X.to_basis(Fourier())
score = scorer(self.shift_registration, X)
np.testing.assert_allclose(score, 0.995087, rtol=1e-6)
def test_default_score(self):
score = self.shift_registration.score(self.X)
np.testing.assert_allclose(score, 0.972095, rtol=1e-6)
def test_least_squares_score(self):
scorer = LeastSquares()
score = scorer(self.shift_registration, self.X)
np.testing.assert_allclose(score, 0.795933, rtol=1e-6)
def test_sobolev_least_squares_score(self):
scorer = SobolevLeastSquares()
score = scorer(self.shift_registration, self.X)
np.testing.assert_allclose(score, 0.76124, rtol=1e-6)
def test_pairwise_correlation(self):
scorer = PairwiseCorrelation()
score = scorer(self.shift_registration, self.X)
np.testing.assert_allclose(score, 1.816228, rtol=1e-6)
def test_mse_decomposition(self):
fd = make_multimodal_samples(n_samples=3, random_state=1)
landmarks = make_multimodal_landmarks(n_samples=3, random_state=1)
landmarks = landmarks.squeeze()
warping = landmark_registration_warping(fd, landmarks)
fd_registered = fd.compose(warping)
scorer = AmplitudePhaseDecomposition(return_stats=True)
ret = scorer.score_function(fd, fd_registered, warping=warping)
np.testing.assert_allclose(ret.mse_amp, 0.0009866997121476962)
np.testing.assert_allclose(ret.mse_pha, 0.11576935495450151)
np.testing.assert_allclose(ret.r_squared, 0.9915489952877273)
np.testing.assert_allclose(ret.c_r, 0.999999, rtol=1e-6)
def test_raises_amplitude_phase(self):
scorer = AmplitudePhaseDecomposition()
# Inconsistent number of functions registered
with np.testing.assert_raises(ValueError):
scorer.score_function(self.X, self.X[:2])
# Inconsistent number of functions registered
with np.testing.assert_raises(ValueError):
scorer.score_function(self.X, self.X, warping=self.X[:2])
if __name__ == '__main__':
print()
unittest.main()
| 37.210918
| 79
| 0.650507
|
5a8ee9cd36f1b581bb1303980b774ba48b229a22
| 2,302
|
py
|
Python
|
examples/dfp/v201711/user_team_association_service/create_user_team_associations.py
|
christineyi3898/googleads-python-lib
|
cd707dc897b93cf1bbb19355f7424e7834e7fb55
|
[
"Apache-2.0"
] | 1
|
2019-10-21T04:10:22.000Z
|
2019-10-21T04:10:22.000Z
|
examples/dfp/v201711/user_team_association_service/create_user_team_associations.py
|
christineyi3898/googleads-python-lib
|
cd707dc897b93cf1bbb19355f7424e7834e7fb55
|
[
"Apache-2.0"
] | null | null | null |
examples/dfp/v201711/user_team_association_service/create_user_team_associations.py
|
christineyi3898/googleads-python-lib
|
cd707dc897b93cf1bbb19355f7424e7834e7fb55
|
[
"Apache-2.0"
] | 1
|
2019-10-21T04:10:51.000Z
|
2019-10-21T04:10:51.000Z
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a user to a team by creating an association between them.
To determine which teams exists, run get_all_teams.py. To determine which
users exist, run get_all_users.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
TEAM_ID = 'INSERT_TEAM_ID_HERE'
USER_IDS = ['INSERT_USER_IDS_TO_ASSOCIATE_TO_TEAM_HERE']
def main(client, team_id, user_ids):
# Initialize appropriate service.
user_team_association_service = client.GetService(
'UserTeamAssociationService', version='v201711')
user_team_associations = []
for user_id in user_ids:
user_team_associations.append(
{
'teamId': team_id,
'userId': user_id
})
# Create the user team association on the server.
user_team_associations = (
user_team_association_service.createUserTeamAssociations(
user_team_associations))
# Display results.
if user_team_associations:
for user_team_association in user_team_associations:
print ('A user team association between user with ID "%s" and team with'
' ID "%s"was created.' % (user_team_association['userId'],
user_team_association['teamId']))
else:
print 'No user team associations created.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, TEAM_ID, USER_IDS)
| 33.852941
| 78
| 0.732407
|
7fd51ca08d258beb6ec528fad093cd00ed4723e9
| 11,745
|
py
|
Python
|
electroncash_gui/qt/bip38_importer.py
|
thonkle/ElectrumABC
|
e0d58a1336e067957175242880d3e953a8dcbc2c
|
[
"MIT"
] | 23
|
2020-11-23T21:49:20.000Z
|
2022-02-23T05:43:44.000Z
|
electroncash_gui/qt/bip38_importer.py
|
thonkle/ElectrumABC
|
e0d58a1336e067957175242880d3e953a8dcbc2c
|
[
"MIT"
] | 73
|
2020-11-24T19:04:12.000Z
|
2022-03-25T15:09:37.000Z
|
electroncash_gui/qt/bip38_importer.py
|
thonkle/ElectrumABC
|
e0d58a1336e067957175242880d3e953a8dcbc2c
|
[
"MIT"
] | 6
|
2020-11-24T05:53:14.000Z
|
2022-01-24T16:09:36.000Z
|
# -*- mode: python3 -*-
# Electron Cash - (C) 2019 The Electron Cash Developers and Electron Cash LLC
#
import threading
from PyQt5.QtCore import Qt, QTimer, pyqtSignal
from PyQt5.QtGui import QFont
from PyQt5 import QtWidgets
from .util import (
MONOSPACE_FONT,
Buttons,
CancelButton,
ColorScheme,
HelpLabel,
OkButton,
WindowModalDialog,
)
from electroncash.i18n import _
from electroncash import util, bitcoin, address
from electroncash.constants import PROJECT_NAME
class Bip38Importer(WindowModalDialog, util.PrintError):
''' A drop-in GUI element for implementing a BIP38 import dialog.
For each of the passed-in bip38 keys, it will prompt the user to enter their
password and it will attempt to decrypt the keys.
Requires bitcoin.is_bip38_available() == True otherwise will raise
RuntimeError on instantiation. '''
decrypted_sig = pyqtSignal(object, object) # Decrypt thread emits this with _decrypt_thread.self, (decrypted_wif, Address) or _decrypt_thread.self, () on failure due to bad password
def __init__(self, bip38_keys, *,
parent=None, title=None,
message=None, # The message to display as a label up top
show_count=True, # If false, don't show 'Key 1/n:' in UI instead just say: 'Key: '
on_success=None, # Callback will be called with a dict of bip38key -> (decoded_wif_str, Address) objects
on_cancel=None): # Callback will be called if user hits cancel
''' bip38_keys should be a list of '6P' strings, representing bip38
keys. The user will be prompted for each key to enter a password
and will be shown the decoded address and WIF key. Note that this
method will raise RuntimeError if not bitcion.is_bip38_available().
on_success: if specified, will be called after the window has closed
(exec_ has finished) with a single argument: a dict of
bip38key -> (decoded_wif, Address).
on_cancel: if specified, will be called after the window was closed
(exec_ has finished) with no arguments.
If you don't specify any callbacks, results are still available in
the self.decoded_keys dict.
The dialog will always terminate with either all keys successfully
decrypted or a user cancel.
'''
if not title:
title = f'{PROJECT_NAME} - ' + _('BIP38 Import')
WindowModalDialog.__init__(self, parent=parent, title=title)
if not bitcoin.is_bip38_available():
raise RuntimeError('Bip38Importer: bip38 decoding is not available')
self.bip38_keys = tuple(bip38_keys)
assert self.bip38_keys and all(bitcoin.is_bip38_key(k) for k in self.bip38_keys)
if not parent:
self.setWindowModality(Qt.ApplicationModal)
self.decoded_keys = dict() # results are placed here on success
self.success_cb, self.cancel_cb = on_success, on_cancel
self.cur, self.decoded_wif, self.decoded_address = 0, None, None
self.decrypter = None
self.show_count = show_count
self.decrypted_sig.connect(self.on_decrypted)
self._setup_ui(message)
util.finalization_print_error(self)
def _setup_ui(self, message=None):
num = len(self.bip38_keys)
if message is None:
message = _('{} BIP38 keys were detected and require a password to decode').format(num)
grid = QtWidgets.QGridLayout(self)
grid.setContentsMargins(24,24,24,24)
grid.setSpacing(10)
top_title = QtWidgets.QLabel('<font size=+1><b>{}</b></font> - {}'
.format(_('BIP38 Import'), message), self)
top_title.setWordWrap(True)
grid.addWidget(top_title, 0, 0, 1, 2)
self.key_tit = QtWidgets.QLabel(' ', self)
self.key_lbl = QtWidgets.QLabel(' ', self)
f = self.key_lbl.font()
f.setBold(True); f.setFamily(MONOSPACE_FONT)
self.key_lbl.setFont(f)
self.key_lbl.setTextInteractionFlags(Qt.TextSelectableByMouse|Qt.TextSelectableByKeyboard)
grid.addWidget(self.key_tit, 1, 0)
grid.addWidget(self.key_lbl, 1, 1)
pw_tit = HelpLabel(_('Password:'),
_('BIP38 keys are strongly encrypted with a password. To decode this key, please specify the password you used when creating the key.'))
self.pw_le = QtWidgets.QLineEdit()
self.pw_le.setEchoMode(QtWidgets.QLineEdit.Password)
timer = QTimer(self)
timer.setSingleShot(True)
def start_decrypter():
if not self.isVisible():
return
# starts a new thread. note that the old thread is not cancelled and just allowed to run until completion, with its results ignored
pw = self.pw_le.text()
self.decoded_address, self.decoded_wif = ('decrypting', 'decrypting') if pw else (None, None)
b38key = self.bip38_keys[self.cur]
self.decoded_keys.pop(b38key, None)
self.refresh()
if pw:
self.decrypter = _decrypt_thread(self, b38key, self.pw_le.text()) # starts a new thread
else:
self.decrypter = None
def on_edit():
self.ok.setDisabled(True) # Disable the Next/Ok button right away
self.decrypter = None # Indicate the current decryptor is totally defunct (its results will now be ignored)
# re-start the timer to fire in 500 ms. this way there is some
# delay before we start another decrypter thread, in case the user
# wants to type more characters
timer.start(500)
timer.timeout.connect(start_decrypter)
self.pw_le.textEdited.connect(on_edit)
grid.addWidget(pw_tit, 2, 0)
grid.addWidget(self.pw_le, 2, 1)
hlp = _('The decrypted private key (WIF key) originally used to create this BIP38 key.')
wif_tit = HelpLabel(_('Decrypted Private Key:'), hlp)
self.wif_lbl = QtWidgets.QLabel(' ', self)
self.wif_lbl.setTextInteractionFlags(Qt.TextSelectableByMouse|Qt.TextSelectableByKeyboard)
grid.addWidget(wif_tit, 3, 0)
grid.addWidget(self.wif_lbl, 3, 1)
hlp = _('The address for the decrypted private key.')
adr_tit = HelpLabel(_('Address:'), hlp)
self.adr_lbl = QtWidgets.QLabel(' ', self)
self.adr_lbl.setTextInteractionFlags(Qt.TextSelectableByMouse|Qt.TextSelectableByKeyboard)
grid.addWidget(adr_tit, 4, 0)
grid.addWidget(self.adr_lbl, 4, 1)
self.ok = OkButton(self)
cancel = CancelButton(self)
buttons = Buttons(cancel, self.ok)
grid.addLayout(buttons, 5, 0, 1, 2)
self.setLayout(grid)
self.clear()
self.refresh()
def showEvent(self, e):
super().showEvent(e)
if e.isAccepted():
self.cur = 0
self.clear()
self.refresh()
def clear(self):
self.pw_le.setText('')
self.decrypter = None
self.decoded_address, self.decoded_wif = None, None
def refresh(self):
num = len(self.bip38_keys)
cur = self.cur
self.key_tit.setText(_('Encrypted Key') + ( (' ' + _('({} of {}):').format(cur+1, num)) if self.show_count else ':') )
self.key_lbl.setText(self.bip38_keys[cur])
pw_req = _('(password required)') if self.decoded_wif != 'decrypting' else _('decrypting...')
is_ok = bool(self.decoded_wif and self.decoded_wif not in ('bad', 'decrypting'))
bad_txt = pw_req if not self.decoded_wif or self.decoded_wif != 'bad' else '<font color={}>{}</font>'.format(ColorScheme.RED._get_color(False), _('password incorrect'))
# set wif_lbl font
f = self.wif_lbl.font(); f.setFamily(MONOSPACE_FONT if is_ok else QFont().family()); f.setItalic(not is_ok); self.wif_lbl.setFont(f)
self.wif_lbl.setText((is_ok and self.decoded_wif) or bad_txt)
# set adr_lbl font
f = self.adr_lbl.font(); f.setFamily(MONOSPACE_FONT if is_ok else QFont().family()); f.setItalic(not is_ok); self.adr_lbl.setFont(f)
self.adr_lbl.setText((is_ok and self.decoded_address.to_full_ui_string()) or bad_txt)
self.ok.setEnabled(isinstance(self.decoded_address, address.Address))
self.ok.setText(_('OK') if cur+1 == num else _("Next"))
def accept(self):
''' Overrides QDialog.accept '''
num = len(self.bip38_keys)
self.cur += 1
if self.cur == num:
if set(self.bip38_keys) != set(self.decoded_keys.keys()):
raise RuntimeError("Dialog finished but something's wrong -- not all passed-in keys are in the decoded keys dict. FIXME!")
self.decrypter = None # just in case a decrypter was running
super().accept()
if self.success_cb:
# we call the callback after we are definitely off-screen
QTimer.singleShot(250, lambda: self.success_cb(self.decoded_keys.copy()))
else:
self.clear()
self.refresh()
def reject(self):
''' Overrides QDialog.reject '''
super().reject()
self.decrypter = None # just in case a decrypter was running
self.decoded_keys.clear() # indicate to caller it was cancelled.
if self.cancel_cb:
# we call the callback after we are definitely off-screen
QTimer.singleShot(250, lambda: self.cancel_cb())
def on_decrypted(self, sender, tup):
if sender is not self.decrypter or not self.isVisible():
# ignore sender if it's not the currently-active decrypter or if we're already done
return
b38key = sender.key
if b38key != self.bip38_keys[self.cur]:
self.print_error("Warning: Got a result from decrypter but decrypter.key != self.cur. FIXME!")
return
if tup:
wif, adr = tup
self.decoded_keys[b38key] = (wif, adr)
self.decoded_wif = wif
self.decoded_address = adr
else:
self.decoded_keys.pop(b38key, None)
self.decoded_wif = 'bad'
self.decoded_address = 'bad'
self.refresh()
class _decrypt_thread(threading.Thread, util.PrintError):
''' Helper for the above Bip38Importer class. Does the computationally
expensive scrypt-based decode of a bip38 key in another thread in order to
keep the GUI responsive. Note that we create a new one of these each time
the user edits the password text edit, and the old ones continue to run
until they complete, at which point they emit the decrypted_sig. Only
the most recent decrypt_thread's results are accepted by the dialog, however.'''
def __init__(self, w, key, pw):
super().__init__(daemon=True, target=self.decrypt)
self.w = util.Weak.ref(w) # We keep a weak ref to parent because parent may die while we are still running. In which case we don't want to call into parent when it's already closed/done executing
self.key = key
self.pw = pw
self.start()
def decrypt(self):
result = bitcoin.bip38_decrypt(self.key, self.pw) # Potentially slow-ish operation. Note: result may be None or empty; client code's slot checks for that condition, so no need to check result here.
parent = self.w() # grab strong ref from weak ref if weak ref still alive
if parent:
parent.decrypted_sig.emit(self, result)
else:
self.print_error("parent widget was no longer alive, silently ignoring...")
| 44.657795
| 206
| 0.64189
|
38ac17bbd4b48d0fcd498666963069d9df00e7ac
| 6,940
|
py
|
Python
|
open_seq2seq/encoders/w2l_encoder.py
|
trevor-m/OpenSeq2Seq
|
0d2629d0df19fd86c90ff327edb3fc9206367773
|
[
"Apache-2.0"
] | null | null | null |
open_seq2seq/encoders/w2l_encoder.py
|
trevor-m/OpenSeq2Seq
|
0d2629d0df19fd86c90ff327edb3fc9206367773
|
[
"Apache-2.0"
] | null | null | null |
open_seq2seq/encoders/w2l_encoder.py
|
trevor-m/OpenSeq2Seq
|
0d2629d0df19fd86c90ff327edb3fc9206367773
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from .encoder import Encoder
from open_seq2seq.parts.cnns.conv_blocks import conv_actv, conv_bn_actv, conv_ln_actv, conv_in_actv, conv_bn_res_bn_actv
class Wave2LetterEncoder(Encoder):
"""Wave2Letter like encoder. Fully convolutional model"""
@staticmethod
def get_required_params():
return dict(Encoder.get_required_params(), **{
'dropout_keep_prob': float,
'convnet_layers': list,
'activation_fn': None, # any valid callable
})
@staticmethod
def get_optional_params():
return dict(Encoder.get_optional_params(), **{
'data_format': ['channels_first', 'channels_last'],
'normalization': [None, 'batch_norm', 'layer_norm', 'instance_norm'],
'bn_momentum': float,
'bn_epsilon': float,
})
def __init__(self, params, model, name="w2l_encoder", mode='train'):
"""Wave2Letter like encoder constructor.
See parent class for arguments description.
Config parameters:
* **dropout_keep_prop** (float) --- keep probability for dropout.
* **convnet_layers** (list) --- list with the description of convolutional
layers. For example::
"convnet_layers": [
{
"type": "conv1d", "repeat" : 5,
"kernel_size": [7], "stride": [1],
"num_channels": 250, "padding": "SAME"
},
{
"type": "conv1d", "repeat" : 3,
"kernel_size": [11], "stride": [1],
"num_channels": 500, "padding": "SAME"
},
{
"type": "conv1d", "repeat" : 1,
"kernel_size": [32], "stride": [1],
"num_channels": 1000, "padding": "SAME"
},
{
"type": "conv1d", "repeat" : 1,
"kernel_size": [1], "stride": [1],
"num_channels": 1000, "padding": "SAME"
},
]
* **activation_fn** --- activation function to use.
* **data_format** (string) --- could be either "channels_first" or
"channels_last". Defaults to "channels_last".
* **normalization** --- normalization to use. Accepts [None, 'batch_norm'].
Use None if you don't want to use normalization. Defaults to 'batch_norm'.
* **bn_momentum** (float) --- momentum for batch norm. Defaults to 0.90.
* **bn_epsilon** (float) --- epsilon for batch norm. Defaults to 1e-3.
"""
super(Wave2LetterEncoder, self).__init__(params, model, name, mode)
def _encode(self, input_dict):
"""Creates TensorFlow graph for Wav2Letter like encoder.
Args:
input_dict (dict): input dictionary that has to contain
the following fields::
input_dict = {
"source_tensors": [
src_sequence (shape=[batch_size, sequence length, num features]),
src_length (shape=[batch_size])
]
}
Returns:
dict: dictionary with the following tensors::
{
'outputs': hidden state, shape=[batch_size, sequence length, n_hidden]
'src_length': tensor, shape=[batch_size]
}
"""
source_sequence, src_length = input_dict['source_tensors']
training = (self._mode == "train")
dropout_keep_prob = self.params['dropout_keep_prob'] if training else 1.0
regularizer = self.params.get('regularizer', None)
data_format = self.params.get('data_format', 'channels_last')
normalization = self.params.get('normalization', 'batch_norm')
normalization_params = {}
if normalization is None:
conv_block = conv_actv
elif normalization == "batch_norm":
conv_block = conv_bn_actv
normalization_params['bn_momentum'] = self.params.get(
'bn_momentum', 0.90)
normalization_params['bn_epsilon'] = self.params.get('bn_epsilon', 1e-3)
elif normalization == "layer_norm":
conv_block = conv_ln_actv
elif normalization == "instance_norm":
conv_block = conv_in_actv
else:
raise ValueError("Incorrect normalization")
conv_inputs = source_sequence
if data_format == 'channels_last':
conv_feats = conv_inputs # B T F
else:
conv_feats = tf.transpose(conv_inputs, [0, 2, 1]) # B F T
# ----- Convolutional layers ---------------------------------------------
convnet_layers = self.params['convnet_layers']
for idx_convnet in range(len(convnet_layers)):
layer_type = convnet_layers[idx_convnet]['type']
layer_repeat = convnet_layers[idx_convnet]['repeat']
ch_out = convnet_layers[idx_convnet]['num_channels']
kernel_size = convnet_layers[idx_convnet]['kernel_size']
strides = convnet_layers[idx_convnet]['stride']
padding = convnet_layers[idx_convnet]['padding']
dilation = convnet_layers[idx_convnet]['dilation']
dropout_keep = convnet_layers[idx_convnet].get(
'dropout_keep_prob', dropout_keep_prob) if training else 1.0
residual = convnet_layers[idx_convnet].get('residual', False)
if residual:
layer_res = conv_feats
for idx_layer in range(layer_repeat):
if padding == "VALID":
src_length = (src_length - kernel_size[0]) // strides[0] + 1
else:
src_length = (src_length + strides[0] - 1) // strides[0]
if residual and idx_layer == layer_repeat - 1:
conv_feats = conv_bn_res_bn_actv(
layer_type=layer_type,
name="conv{}{}".format(
idx_convnet + 1, idx_layer + 1),
inputs=conv_feats,
res_inputs=layer_res,
filters=ch_out,
kernel_size=kernel_size,
activation_fn=self.params['activation_fn'],
strides=strides,
padding=padding,
dilation=dilation,
regularizer=regularizer,
training=training,
data_format=data_format,
**normalization_params
)
else:
conv_feats = conv_block(
layer_type=layer_type,
name="conv{}{}".format(
idx_convnet + 1, idx_layer + 1),
inputs=conv_feats,
filters=ch_out,
kernel_size=kernel_size,
activation_fn=self.params['activation_fn'],
strides=strides,
padding=padding,
dilation=dilation,
regularizer=regularizer,
training=training,
data_format=data_format,
**normalization_params
)
conv_feats = tf.nn.dropout(x=conv_feats, keep_prob=dropout_keep)
outputs = conv_feats
if data_format == 'channels_first':
outputs = tf.transpose(outputs, [0, 2, 1])
return {
'outputs': outputs,
'src_length': src_length,
}
| 36.145833
| 120
| 0.602738
|
0f8ee91157a053a61c4de5515c9b7af4e81f86f5
| 3,275
|
py
|
Python
|
tests/test_openapi.py
|
sobolevn/django-swagger-tester
|
5f47ea8056c1a26a40c4c2f3b5b22dba84242f30
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_openapi.py
|
sobolevn/django-swagger-tester
|
5f47ea8056c1a26a40c4c2f3b5b22dba84242f30
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_openapi.py
|
sobolevn/django-swagger-tester
|
5f47ea8056c1a26a40c4c2f3b5b22dba84242f30
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from django_swagger_tester.exceptions import OpenAPISchemaError
from django_swagger_tester.openapi import (
read_items,
list_types,
read_type,
read_properties,
is_nullable,
read_additional_properties,
)
def test_read_items():
"""
Ensure this helper function works as it's designed to.
"""
assert read_items({'items': 'test'}) == 'test'
with pytest.raises(OpenAPISchemaError, match='Array is missing an `items` attribute'):
read_items({'no-items': 'woops'})
def test_list_types():
"""
Ensure this helper function works as it's designed to.
"""
assert [i in list_types() for i in ['string', 'boolean', 'integer', 'number', 'file', 'object', 'array']]
assert len(list_types()) == 7
def test_read_type():
"""
Ensure this helper function works as it's designed to.
"""
e = 'Schema item has an invalid `type` attribute. The type should be a single string'
with pytest.raises(OpenAPISchemaError, match=e):
read_type('test')
e = 'Schema item has an invalid `type` attribute. The type `bad type` is not supported.'
with pytest.raises(OpenAPISchemaError, match=e):
read_type({'type': 'bad type'})
assert read_type({'type': 'string'}) == 'string'
example = {
'title': 'Other stuff',
'description': 'the decorator should determine the serializer class for this',
'required': ['foo'],
'type': 'object',
'properties': {'foo': {'title': 'Foo', 'type': 'string', 'minLength': 1}},
}
additional_example = {
'title': 'Other stuff',
'description': 'the decorator should determine the serializer class for this',
'required': ['foo'],
'type': 'object',
'additionalProperties': {'title': 'Foo', 'type': 'string', 'minLength': 1},
}
def test_read_read_properties():
"""
This function is a bit funny, and I'm not sure it will work in practice. Essentially, we're trying to handle
the edge case of getting `additionalProperties`, by making it look like a `properties` object.
This way we can apply the same testing logic on both objects.
"""
assert read_properties(example) == {'foo': {'title': 'Foo', 'type': 'string', 'minLength': 1}}
assert read_properties(additional_example) == {'': {'title': 'Foo', 'type': 'string', 'minLength': 1}}
with pytest.raises(OpenAPISchemaError):
read_properties({})
def test_additional_properties_validation():
with pytest.raises(OpenAPISchemaError):
read_additional_properties({})
nullable_example = {
'properties': {
'id': {'title': 'ID', 'type': 'integer', 'readOnly': 'true', 'x-nullable': 'true',},
'first_name': {
'title': 'First name',
'type': 'string',
'maxLength': '30',
'minLength': '1',
'nullable': 'true',
},
}
}
nullable_example_data = {'id': None, 'first_name': None}
def test_is_nullable():
"""
Ensure this helper function works as it's designed to.
"""
assert is_nullable(nullable_example['properties']['id']) == True
assert is_nullable(nullable_example['properties']['first_name']) == True
for item in [2, '', None, -1, {'nullable': 'false'}]:
assert is_nullable(item) == False
| 31.490385
| 112
| 0.634504
|
19943b2571adc70f7b5069bbcdcb7699eb2c5f73
| 290
|
py
|
Python
|
ex9.py
|
Csurlee/Python
|
1e8cd09a009f50c2459879517ca2a170b3235e33
|
[
"MIT"
] | null | null | null |
ex9.py
|
Csurlee/Python
|
1e8cd09a009f50c2459879517ca2a170b3235e33
|
[
"MIT"
] | null | null | null |
ex9.py
|
Csurlee/Python
|
1e8cd09a009f50c2459879517ca2a170b3235e33
|
[
"MIT"
] | null | null | null |
days = "Mon Tue Wed Thu Fri Sat Sun"
months = "\nJan\nFeb\nMar\nApr\nMay\nJun\nJul\nAug"
print("Here are the days: ", days)
print("here are the months: ", months)
print("""
ddafdsfadsfasdfasdfasdf
ddafdsfadsfasdfasdfasdfsdf
ddafdsfadsfasdfasdfasdfasdf
ddafdsfadsfasdfasdfasdfasdfasd
""")
| 22.307692
| 51
| 0.772414
|
8d159f327b2b60fdf0be49c5c5ab3db430befb01
| 2,644
|
py
|
Python
|
palm/cutoff_parameter_set.py
|
milapour/palm
|
53cfce67f6621795ca419a79bd91c9ecf02cc93f
|
[
"BSD-2-Clause"
] | 2
|
2015-03-25T13:02:32.000Z
|
2016-12-12T21:00:27.000Z
|
palm/cutoff_parameter_set.py
|
milapour/palm
|
53cfce67f6621795ca419a79bd91c9ecf02cc93f
|
[
"BSD-2-Clause"
] | null | null | null |
palm/cutoff_parameter_set.py
|
milapour/palm
|
53cfce67f6621795ca419a79bd91c9ecf02cc93f
|
[
"BSD-2-Clause"
] | null | null | null |
import numpy
from palm.base.parameter_set import ParameterSet
class CutoffParameterSet(ParameterSet):
"""
Parameters for a temporal cutoff model. The parameters are
`tau` and `N`.
Attributes
----------
parameter_dict : dict
Parameter values, indexed by parameter names.
bounds : dict
Bounds for parameter values (during optimization),
indexed by parameter names.
"""
def __init__(self):
super(CutoffParameterSet, self).__init__()
self.parameter_dict = {'tau':1.0, 'N':5}
self.bounds_dict = {'tau':(0.0, 3600.),
'N':(None, None)}
def __str__(self):
tau = self.get_parameter('tau')
N = self.get_parameter('N')
my_str = "%.4f,%d" % (tau, N)
return my_str
def __iter__(self):
for param_name, param_value in self.parameter_dict.iteritems():
yield param_name, param_value
def __eq__(self, other_param_set):
if type(self) is type(other_param_set):
return numpy.array_equal(self.as_array(), other_param_set.as_array())
else:
return False
def set_parameter(self, param_name, param_value):
if param_name in self.parameter_dict.keys():
self.parameter_dict[param_name] = param_value
else:
assert False, "No such parameter: %s" % param_name
def get_parameter(self, param_name):
return self.parameter_dict[param_name]
def as_array(self):
"""
Converts parameter set to numpy array.
Returns
-------
param_array : ndarray
"""
tau = self.get_parameter('tau')
N = self.get_parameter('N')
param_array = numpy.array([tau, N])
return param_array
def update_from_array(self, parameter_array):
"""
Set parameter values from a numpy array. Useful because numpy arrays
are the input and output type of scipy optimization methods.
Expected order of parameters in array:
`tau` and `N`
Parameters
----------
parameter_array : ndarray
"""
parameter_array = numpy.atleast_1d(parameter_array)
self.set_parameter('tau', parameter_array[0])
self.set_parameter('N', int(parameter_array[1]))
def set_parameter_bounds(self, parameter_name, min_value, max_value):
self.bounds_dict[parameter_name] = (min_value, max_value)
def get_parameter_bounds(self):
tau_bounds = self.bounds_dict['tau']
N_bounds = (N, N)
bounds = [tau_bounds, N_bounds]
return bounds
| 31.47619
| 81
| 0.61233
|
68f3cc3b14f5ff487c8fe3129dd5d8e844c8fb35
| 96
|
py
|
Python
|
tests/test_tayle_test_task.py
|
Gogee90/Bank-account
|
ee5e7996f9f0d1ec95dca2eabf508ef17c18a124
|
[
"MIT"
] | null | null | null |
tests/test_tayle_test_task.py
|
Gogee90/Bank-account
|
ee5e7996f9f0d1ec95dca2eabf508ef17c18a124
|
[
"MIT"
] | null | null | null |
tests/test_tayle_test_task.py
|
Gogee90/Bank-account
|
ee5e7996f9f0d1ec95dca2eabf508ef17c18a124
|
[
"MIT"
] | null | null | null |
from tayle_test_task import __version__
def test_version():
assert __version__ == '0.1.0'
| 16
| 39
| 0.739583
|
850a6b33f85e65092f369305aae0b3c6d0b51136
| 3,089
|
py
|
Python
|
setup.py
|
wgresshoff/invenio-cli
|
4723cc84d5aac9efb9d859c8be2fb34b97fc5d49
|
[
"MIT"
] | null | null | null |
setup.py
|
wgresshoff/invenio-cli
|
4723cc84d5aac9efb9d859c8be2fb34b97fc5d49
|
[
"MIT"
] | null | null | null |
setup.py
|
wgresshoff/invenio-cli
|
4723cc84d5aac9efb9d859c8be2fb34b97fc5d49
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019-2020 CERN.
# Copyright (C) 2019-2020 Northwestern University.
# Copyright (C) 2021 TU Wien.
#
# Invenio-Cli is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio module to ease the creation and management of applications."""
import os
from setuptools import find_packages, setup
readme = open('README.rst').read()
history = open('CHANGES.rst').read()
tests_require = [
'pytest-invenio>=1.4.0',
]
extras_require = {
'docs': [
'Sphinx==4.2.0',
],
'tests': tests_require,
}
extras_require['all'] = []
for reqs in extras_require.values():
extras_require['all'].extend(reqs)
install_requires = [
'cookiecutter>=1.7.1,<1.8.0',
'click>=7.1.1,<8.0',
'click-default-group>=1.2.2,<2.0.0',
'docker>=4.1.0,<6.0.0',
'pipenv>=2020.6.2',
'PyYAML>=5.1.2',
'pynpm>=0.1.2',
# virtualenv v20.13.1 ships with embedded setuptools 60.x, which means
# that "invenio-cli install" will by default create a new virtual
# environment with setuptools 60.x installed. celery v5.2.3 ships with a
# dependency on setuptools>=59.1.1,<59.7.0 due to breaking changes
# introduced in setuptools 60.x. pipenv or pip resolver does not properly
# respect the dependency from celery and thus does not install a
# compatible setuptools version leading to a ContextualVersionConflict
# once running any command.
# Once celery v5.2.4 is out, we can remove the pin again.
'virtualenv>=20.0.35,<=20.13.0',
]
packages = find_packages()
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('invenio_cli', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
setup(
name='invenio-cli',
version=version,
description=__doc__,
long_description=readme + '\n\n' + history,
keywords='invenio-cli',
license='MIT',
author='CERN & Northwestern University',
author_email='info@inveniosoftware.org',
url='https://github.com/inveniosoftware/invenio-cli',
packages=packages,
zip_safe=False,
include_package_data=True,
platforms='any',
entry_points={
'console_scripts': [
'invenio-cli = invenio_cli.cli.cli:invenio_cli',
]
},
extras_require=extras_require,
install_requires=install_requires,
tests_require=tests_require,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Development Status :: 3 - Alpha',
],
)
| 30.584158
| 77
| 0.648754
|
c60c01cde49e035320c42814b6c3e33fa3f5a639
| 6,218
|
py
|
Python
|
assets/src/ba_data/python/ba/_appconfig.py
|
Benefit-Zebra/ballistica
|
eb85df82cff22038e74a2d93abdcbe9cd755d782
|
[
"MIT"
] | null | null | null |
assets/src/ba_data/python/ba/_appconfig.py
|
Benefit-Zebra/ballistica
|
eb85df82cff22038e74a2d93abdcbe9cd755d782
|
[
"MIT"
] | null | null | null |
assets/src/ba_data/python/ba/_appconfig.py
|
Benefit-Zebra/ballistica
|
eb85df82cff22038e74a2d93abdcbe9cd755d782
|
[
"MIT"
] | null | null | null |
# Released under the MIT License. See LICENSE for details.
#
"""Provides the AppConfig class."""
from __future__ import annotations
from typing import TYPE_CHECKING
import _ba
if TYPE_CHECKING:
from typing import Any, List, Tuple
class AppConfig(dict):
"""A special dict that holds the game's persistent configuration values.
Category: App Classes
It also provides methods for fetching values with app-defined fallback
defaults, applying contained values to the game, and committing the
config to storage.
Call ba.appconfig() to get the single shared instance of this class.
AppConfig data is stored as json on disk on so make sure to only place
json-friendly values in it (dict, list, str, float, int, bool).
Be aware that tuples will be quietly converted to lists when stored.
"""
def resolve(self, key: str) -> Any:
"""Given a string key, return a config value (type varies).
This will substitute application defaults for values not present in
the config dict, filter some invalid values, etc. Note that these
values do not represent the state of the app; simply the state of its
config. Use ba.App to access actual live state.
Raises an Exception for unrecognized key names. To get the list of keys
supported by this method, use ba.AppConfig.builtin_keys(). Note that it
is perfectly legal to store other data in the config; it just needs to
be accessed through standard dict methods and missing values handled
manually.
"""
return _ba.resolve_appconfig_value(key)
def default_value(self, key: str) -> Any:
"""Given a string key, return its predefined default value.
This is the value that will be returned by ba.AppConfig.resolve() if
the key is not present in the config dict or of an incompatible type.
Raises an Exception for unrecognized key names. To get the list of keys
supported by this method, use ba.AppConfig.builtin_keys(). Note that it
is perfectly legal to store other data in the config; it just needs to
be accessed through standard dict methods and missing values handled
manually.
"""
return _ba.get_appconfig_default_value(key)
def builtin_keys(self) -> List[str]:
"""Return the list of valid key names recognized by ba.AppConfig.
This set of keys can be used with resolve(), default_value(), etc.
It does not vary across platforms and may include keys that are
obsolete or not relevant on the current running version. (for instance,
VR related keys on non-VR platforms). This is to minimize the amount
of platform checking necessary)
Note that it is perfectly legal to store arbitrary named data in the
config, but in that case it is up to the user to test for the existence
of the key in the config dict, fall back to consistent defaults, etc.
"""
return _ba.get_appconfig_builtin_keys()
def apply(self) -> None:
"""Apply config values to the running app."""
_ba.apply_config()
def commit(self) -> None:
"""Commits the config to local storage.
Note that this call is asynchronous so the actual write to disk may not
occur immediately.
"""
commit_app_config()
def apply_and_commit(self) -> None:
"""Run apply() followed by commit(); for convenience.
(This way the commit() will not occur if apply() hits invalid data)
"""
self.apply()
self.commit()
def read_config() -> Tuple[AppConfig, bool]:
"""Read the game config."""
import os
import json
from ba._generated.enums import TimeType
config_file_healthy = False
# NOTE: it is assumed that this only gets called once and the
# config object will not change from here on out
config_file_path = _ba.app.config_file_path
config_contents = ''
try:
if os.path.exists(config_file_path):
with open(config_file_path, encoding='utf-8') as infile:
config_contents = infile.read()
config = AppConfig(json.loads(config_contents))
else:
config = AppConfig()
config_file_healthy = True
except Exception as exc:
print(('error reading config file at time ' +
str(_ba.time(TimeType.REAL)) + ': \'' + config_file_path +
'\':\n'), exc)
# Whenever this happens lets back up the broken one just in case it
# gets overwritten accidentally.
print(('backing up current config file to \'' + config_file_path +
".broken\'"))
try:
import shutil
shutil.copyfile(config_file_path, config_file_path + '.broken')
except Exception as exc:
print('EXC copying broken config:', exc)
try:
_ba.log('broken config contents:\n' +
config_contents.replace('\000', '<NULL_BYTE>'),
to_stdout=False)
except Exception as exc:
print('EXC logging broken config contents:', exc)
config = AppConfig()
# Now attempt to read one of our 'prev' backup copies.
prev_path = config_file_path + '.prev'
try:
if os.path.exists(prev_path):
with open(prev_path, encoding='utf-8') as infile:
config_contents = infile.read()
config = AppConfig(json.loads(config_contents))
else:
config = AppConfig()
config_file_healthy = True
print('successfully read backup config.')
except Exception as exc:
print('EXC reading prev backup config:', exc)
return config, config_file_healthy
def commit_app_config(force: bool = False) -> None:
"""Commit the config to persistent storage.
Category: General Utility Functions
(internal)
"""
if not _ba.app.config_file_healthy and not force:
print('Current config file is broken; '
'skipping write to avoid losing settings.')
return
_ba.mark_config_dirty()
| 37.233533
| 79
| 0.645867
|
1dcc3e86dc124b9d55cb9dd88d3d6d3b19695b5b
| 1,094
|
py
|
Python
|
evaluation/measure_uiqm.py
|
rowantseng/FUnIE-GAN-PyTorch
|
d2c8064c7827001de3f4b7e71ae5b4fa2eff040c
|
[
"MIT"
] | 6
|
2021-07-27T09:03:41.000Z
|
2022-01-01T05:17:27.000Z
|
evaluation/measure_uiqm.py
|
rowantseng/FUnIE-GAN-PyTorch
|
d2c8064c7827001de3f4b7e71ae5b4fa2eff040c
|
[
"MIT"
] | null | null | null |
evaluation/measure_uiqm.py
|
rowantseng/FUnIE-GAN-PyTorch
|
d2c8064c7827001de3f4b7e71ae5b4fa2eff040c
|
[
"MIT"
] | 1
|
2020-08-25T23:17:06.000Z
|
2020-08-25T23:17:06.000Z
|
"""
# > Script for measuring quantitative performances in terms of
# - Structural Similarity Metric (SSIM)
# - Peak Signal to Noise Ratio (PSNR)
# > Maintainer: https://github.com/xahidbuffon
"""
# python libs
import numpy as np
from PIL import Image, ImageOps
from glob import glob
from os.path import join
from ntpath import basename
# local libs
from uqim_utils import getUIQM
import argparse
def measure_UIQMs(dir_name, im_res=(256, 256)):
paths = sorted(glob(join(dir_name, "*.*")))
uqims = []
for img_path in paths:
im = Image.open(img_path).resize(im_res)
uiqm = getUIQM(np.array(im))
uqims.append(uiqm)
return np.array(uqims)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="PyTorch FUnIE-GAN UIQM Metric Runner")
parser.add_argument("-d", "--data", default="", type=str, metavar="PATH",
help="path to images (default: none)")
args = parser.parse_args()
uqims = measure_UIQMs(args.data)
print(f"[UIQM] Mean={np.mean(uqims):.3f}, Std={np.std(uqims):.3f}")
| 28.051282
| 77
| 0.66362
|
7474564e9b6d85f3a497f1fb20bd7197508b0b20
| 522
|
py
|
Python
|
ex29.py
|
rishabhgautam/LPTHW_mynotes
|
eb462926ab924ea1096e0e81125e94f338b2ddd2
|
[
"MIT"
] | 2
|
2021-03-07T17:13:49.000Z
|
2022-03-29T08:55:17.000Z
|
ex29.py
|
rishabhgautam/LPTHW_mynotes
|
eb462926ab924ea1096e0e81125e94f338b2ddd2
|
[
"MIT"
] | 1
|
2021-06-10T20:17:55.000Z
|
2021-06-10T20:17:55.000Z
|
Learn-Python-The-Hard-Way/Python3/ex29.py
|
QuantFinEcon/py-learn
|
7151f01df9f7f096312e43434fe8026d1d7d7828
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# ex29: What If
people = 20
cats = 30
dogs = 15
if people < cats:
print("Too many cats! The world is doomed!")
if people > cats:
print("Not many cats! The world is saved!")
if people < dogs:
print("The world is drooled on!")
if people > dogs:
print("The world is dry!")
dogs += 5
if people >= dogs:
print("People are greater than or equal to dogs.")
if people <= dogs:
print("People are less than or equal to dogs.")
if people == dogs:
print("People are dogs.")
| 17.4
| 54
| 0.6341
|
207b884a2a1a3e3d3b7767813840fd28f003bc01
| 24,243
|
py
|
Python
|
Python/python3_version/klampt/plan/motionplanning.py
|
ipa-rmb-mr/Klampt
|
71793b54eead788811b4e62bcf8dadb49b68ff17
|
[
"BSD-3-Clause"
] | null | null | null |
Python/python3_version/klampt/plan/motionplanning.py
|
ipa-rmb-mr/Klampt
|
71793b54eead788811b4e62bcf8dadb49b68ff17
|
[
"BSD-3-Clause"
] | null | null | null |
Python/python3_version/klampt/plan/motionplanning.py
|
ipa-rmb-mr/Klampt
|
71793b54eead788811b4e62bcf8dadb49b68ff17
|
[
"BSD-3-Clause"
] | null | null | null |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
"""Python interface to C++ motion planing routines"""
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_motionplanning', [dirname(__file__)])
except ImportError:
import _motionplanning
return _motionplanning
if fp is not None:
try:
_mod = imp.load_module('_motionplanning', fp, pathname, description)
finally:
fp.close()
return _mod
_motionplanning = swig_import_helper()
del swig_import_helper
else:
import _motionplanning
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def setRandomSeed(seed):
"""
Sets the random seed used by the configuration sampler.
Args:
seed (int)
"""
return _motionplanning.setRandomSeed(seed)
def setPlanJSONString(string):
"""
Loads planner values from a JSON string.
Args:
string (str)
"""
return _motionplanning.setPlanJSONString(string)
def getPlanJSONString():
"""
Saves planner values to a JSON string.
Returns:
(str):
"""
return _motionplanning.getPlanJSONString()
def setPlanType(type):
"""
Sets the planner type.
Args:
type (str)
Valid values are
* prm: the Probabilistic Roadmap algorithm
* rrt: the Rapidly Exploring Random Trees algorithm
* sbl: the Single-Query Bidirectional Lazy planner
* sblprt: the probabilistic roadmap of trees (PRT) algorithm with SBL as the
inter-root planner.
* rrt*: the RRT* algorithm for optimal motion planning
* prm*: the PRM* algorithm for optimal motion planning
* lazyprm*: the Lazy-PRM* algorithm for optimal motion planning
* lazyrrg*: the Lazy-RRG* algorithm for optimal motion planning
* fmm: the fast marching method algorithm for resolution-complete optimal
motion planning
* fmm*: an anytime fast marching method algorithm for optimal motion planning
"""
return _motionplanning.setPlanType(type)
def setPlanSetting(*args):
"""
Sets a numeric or string-valued setting for the planner.
setPlanSetting (setting,value)
Args:
setting (str):
value (float or str):
Valid numeric values are:
* "knn": k value for the k-nearest neighbor connection strategy (only for
PRM)
* "connectionThreshold": a milestone connection threshold
* "perturbationRadius": (for RRT and SBL)
* "bidirectional": 1 if bidirectional planning is requested (for RRT)
* "grid": 1 if a point selection grid should be used (for SBL)
* "gridResolution": resolution for the grid, if the grid should be used (for
SBL with grid, FMM, FMM*)
* "suboptimalityFactor": allowable suboptimality (for RRT*, lazy PRM*, lazy
RRG*)
* "randomizeFrequency": a grid randomization frequency (for SBL)
* "shortcut": nonzero if you wish to perform shortcutting after a first plan
is found.
* "restart": nonzero if you wish to restart the planner to get better paths
with the remaining time.
Valid string values are:
* "pointLocation": a string designating a point location data structure.
"kdtree" is supported, optionally followed by a weight vector (for PRM,
RRT*, PRM*, LazyPRM*, LazyRRG*)
* "restartTermCond": used if the "restart" setting is true. This is a JSON
string defining the termination condition (default value:
"{foundSolution:1;maxIters:1000}")
"""
return _motionplanning.setPlanSetting(*args)
def destroy():
"""
destroys internal data structures
"""
return _motionplanning.destroy()
class CSpaceInterface(_object):
"""
A raw interface for a configuration space. Note: the native Python CSpace
interface class in cspace.py is easier to use.
You can either set a single feasibility test function using setFeasibility() or
add several feasibility tests, all of which need to be satisfied, using
addFeasibilityTest(). In the latter case, planners may be able to provide
debugging statistics, solve Minimum Constraint Removal problems, run faster by
eliminating constraint tests, etc.
Either setVisibility() or setVisibilityEpsilon() must be called to define a
visibility checker between two (feasible) configurations. In the latter case,
the path will be discretized at the resolution sent to setVisibilityEpsilon. If
you have special single-constraint visibility tests, you can call that using
addVisibilityTest (for example, for convex constraints you can set it to the
lambda function that returns true regardless of its arguments).
Supported properties include "euclidean" (boolean), "metric" (string),
"geodesic" (boolean), "minimum" (vector), and "maximum" (vector). These
may be used by planners to make planning faster or more accurate. For a complete
list see KrisLibrary/planning/CSpace.h.
C++ includes: motionplanning.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CSpaceInterface, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CSpaceInterface, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__ (): :class:`~klampt.CSpaceInterface`
__init__ (arg2): :class:`~klampt.CSpaceInterface`
Args:
arg2 (:class:`~klampt.CSpaceInterface`, optional):
"""
this = _motionplanning.new_CSpaceInterface(*args)
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _motionplanning.delete_CSpaceInterface
__del__ = lambda self: None
def destroy(self):
"""
"""
return _motionplanning.CSpaceInterface_destroy(self)
def setFeasibility(self, pyFeas):
"""
Args:
pyFeas (:obj:`object`)
"""
return _motionplanning.CSpaceInterface_setFeasibility(self, pyFeas)
def addFeasibilityTest(self, name, pyFeas):
"""
Args:
name (str)
pyFeas (:obj:`object`)
"""
return _motionplanning.CSpaceInterface_addFeasibilityTest(self, name, pyFeas)
def setVisibility(self, pyVisible):
"""
Args:
pyVisible (:obj:`object`)
"""
return _motionplanning.CSpaceInterface_setVisibility(self, pyVisible)
def addVisibilityTest(self, name, pyVisible):
"""
Args:
name (str)
pyVisible (:obj:`object`)
"""
return _motionplanning.CSpaceInterface_addVisibilityTest(self, name, pyVisible)
def setVisibilityEpsilon(self, eps):
"""
Args:
eps (float)
"""
return _motionplanning.CSpaceInterface_setVisibilityEpsilon(self, eps)
def setSampler(self, pySamp):
"""
Args:
pySamp (:obj:`object`)
"""
return _motionplanning.CSpaceInterface_setSampler(self, pySamp)
def setNeighborhoodSampler(self, pySamp):
"""
Args:
pySamp (:obj:`object`)
"""
return _motionplanning.CSpaceInterface_setNeighborhoodSampler(self, pySamp)
def setDistance(self, pyDist):
"""
Args:
pyDist (:obj:`object`)
"""
return _motionplanning.CSpaceInterface_setDistance(self, pyDist)
def setInterpolate(self, pyInterp):
"""
Args:
pyInterp (:obj:`object`)
"""
return _motionplanning.CSpaceInterface_setInterpolate(self, pyInterp)
def setProperty(self, key, value):
"""
Args:
key (str)
value (str)
"""
return _motionplanning.CSpaceInterface_setProperty(self, key, value)
def getProperty(self, key):
"""
Args:
key (str)
Returns:
(str):
"""
return _motionplanning.CSpaceInterface_getProperty(self, key)
def isFeasible(self, q):
"""
Queries whether a given configuration is feasible.
Args:
q (:obj:`object`)
Returns:
(bool):
"""
return _motionplanning.CSpaceInterface_isFeasible(self, q)
def isVisible(self, a, b):
"""
Queries whether two configurations are visible.
Args:
a (:obj:`object`)
b (:obj:`object`)
Returns:
(bool):
"""
return _motionplanning.CSpaceInterface_isVisible(self, a, b)
def testFeasibility(self, name, q):
"""
Queries whether a given configuration is feasible with respect to a given
constraint.
Args:
name (str)
q (:obj:`object`)
Returns:
(bool):
"""
return _motionplanning.CSpaceInterface_testFeasibility(self, name, q)
def testVisibility(self, name, a, b):
"""
Queries whether two configurations are visible with respect to a given
constraint.
Args:
name (str)
a (:obj:`object`)
b (:obj:`object`)
Returns:
(bool):
"""
return _motionplanning.CSpaceInterface_testVisibility(self, name, a, b)
def feasibilityFailures(self, q):
"""
Returns a list of all failed feasibility constraints.
Args:
q (:obj:`object`)
Returns:
(:obj:`object`):
"""
return _motionplanning.CSpaceInterface_feasibilityFailures(self, q)
def visibilityFailures(self, a, b):
"""
Returns a list of all failed visibility constraints.
Args:
a (:obj:`object`)
b (:obj:`object`)
Returns:
(:obj:`object`):
"""
return _motionplanning.CSpaceInterface_visibilityFailures(self, a, b)
def sample(self):
"""
Samples a configuration.
Returns:
(:obj:`object`):
"""
return _motionplanning.CSpaceInterface_sample(self)
def distance(self, a, b):
"""
Returns the distance between two configurations.
Args:
a (:obj:`object`)
b (:obj:`object`)
Returns:
(float):
"""
return _motionplanning.CSpaceInterface_distance(self, a, b)
def interpolate(self, a, b, u):
"""
Interpolates between two configurations.
Args:
a (:obj:`object`)
b (:obj:`object`)
u (float)
Returns:
(:obj:`object`):
"""
return _motionplanning.CSpaceInterface_interpolate(self, a, b, u)
def adaptiveQueriesEnabled(self):
"""
optional: adaptive queries can be used to automatically minimize the total cost
of testing feasibility / visibility using empirical estimates. Off by default.
Returns:
(bool):
"""
return _motionplanning.CSpaceInterface_adaptiveQueriesEnabled(self)
def enableAdaptiveQueries(self, enabled=True):
"""
Call this to enable adaptive queries. (It has a small overhead.)
enableAdaptiveQueries (enabled=True)
enableAdaptiveQueries ()
Args:
enabled (bool, optional): default value True
"""
return _motionplanning.CSpaceInterface_enableAdaptiveQueries(self, enabled)
def optimizeQueryOrder(self):
"""
Call this to optimize the feasibility / visibility testing order.
"""
return _motionplanning.CSpaceInterface_optimizeQueryOrder(self)
def setFeasibilityDependency(self, name, precedingTest):
"""
Marks that a certain feasibility test must be performed before another.
Args:
name (str)
precedingTest (str)
"""
return _motionplanning.CSpaceInterface_setFeasibilityDependency(self, name, precedingTest)
def setFeasibilityPrior(self, name, costPrior=0.0, feasibilityProbability=0.0, evidenceStrength=1.0):
"""
Resets the data for a certain feasibility test. Default values give a data-
gathering behavior.
setFeasibilityPrior (name,costPrior=0.0,feasibilityProbability=0.0,evidenceStrength=1.0)
setFeasibilityPrior (name,costPrior=0.0,feasibilityProbability=0.0)
setFeasibilityPrior (name,costPrior=0.0)
setFeasibilityPrior (name)
Args:
name (str):
costPrior (float, optional): default value 0.0
feasibilityProbability (float, optional): default value 0.0
evidenceStrength (float, optional): default value 1.0
"""
return _motionplanning.CSpaceInterface_setFeasibilityPrior(self, name, costPrior, feasibilityProbability, evidenceStrength)
def setVisibilityDependency(self, name, precedingTest):
"""
Marks that a certain feasibility test must be performed before another.
Args:
name (str)
precedingTest (str)
"""
return _motionplanning.CSpaceInterface_setVisibilityDependency(self, name, precedingTest)
def setVisibilityPrior(self, name, costPrior=0.0, visibilityProbability=0.0, evidenceStrength=1.0):
"""
Resets the data for a certain visibility test. Default values give a data-
gathering behavior.
setVisibilityPrior (name,costPrior=0.0,visibilityProbability=0.0,evidenceStrength=1.0)
setVisibilityPrior (name,costPrior=0.0,visibilityProbability=0.0)
setVisibilityPrior (name,costPrior=0.0)
setVisibilityPrior (name)
Args:
name (str):
costPrior (float, optional): default value 0.0
visibilityProbability (float, optional): default value 0.0
evidenceStrength (float, optional): default value 1.0
"""
return _motionplanning.CSpaceInterface_setVisibilityPrior(self, name, costPrior, visibilityProbability, evidenceStrength)
def feasibilityCost(self, name):
"""
Retrieves the empirical average cost of a given feasibility test.
Args:
name (str)
Returns:
(float):
"""
return _motionplanning.CSpaceInterface_feasibilityCost(self, name)
def feasibilityProbability(self, name):
"""
Retrieves the empirical average success rate of a given feasibility test.
Args:
name (str)
Returns:
(float):
"""
return _motionplanning.CSpaceInterface_feasibilityProbability(self, name)
def visibilityCost(self, name):
"""
Retrieves the empirical average cost of a given visibility test.
Args:
name (str)
Returns:
(float):
"""
return _motionplanning.CSpaceInterface_visibilityCost(self, name)
def visibilityProbability(self, name):
"""
Retrieves the empirical average success rate of a given visibility test.
Args:
name (str)
Returns:
(float):
"""
return _motionplanning.CSpaceInterface_visibilityProbability(self, name)
def feasibilityQueryOrder(self):
"""
Retrieves the current order of feasibility tests.
Returns:
(:obj:`object`):
"""
return _motionplanning.CSpaceInterface_feasibilityQueryOrder(self)
def visibilityQueryOrder(self):
"""
Retrieves the current order of visibility tests.
Returns:
(:obj:`object`):
"""
return _motionplanning.CSpaceInterface_visibilityQueryOrder(self)
def getStats(self):
"""
Returns constraint testing statistics. If adaptive queries are enabled, this
returns the stats on each constraint.
Returns:
(:obj:`object`):
"""
return _motionplanning.CSpaceInterface_getStats(self)
__swig_setmethods__["index"] = _motionplanning.CSpaceInterface_index_set
__swig_getmethods__["index"] = _motionplanning.CSpaceInterface_index_get
if _newclass:
index = _swig_property(_motionplanning.CSpaceInterface_index_get, _motionplanning.CSpaceInterface_index_set)
CSpaceInterface_swigregister = _motionplanning.CSpaceInterface_swigregister
CSpaceInterface_swigregister(CSpaceInterface)
class PlannerInterface(_object):
"""
An interface for a motion planner. The :class:`MotionPlan` interface in
cspace.py is somewhat easier to use.
On construction, uses the planner type specified by setPlanType and the settings
currently specified by calls to setPlanSetting.
Point-to-point planning is enabled by sending two configurations to the
setEndpoints method. This is mandatory for RRT and SBL-style planners. The start
and end milestones are given by indices 0 and 1, respectively
Point-to-set planning is enabled by sending a *goal test* as the second argument
to the setEndpoints method. It is possible also to send a special goal sampler
by providing a *pair of functions* as the second argument consisting of the two
functions (goaltest,goalsample). The first in this pair tests whether a
configuration is a goal, and the second returns a sampled configuration in a
superset of the goal. Ideally the goal sampler generates as many goals as
possible.
PRM can be used in either point-to-point or multi-query mode. In multi-query
mode, you may call addMilestone(q) to add a new milestone. addMilestone()
returns the index of that milestone, which can be used in later calls to
getPath().
To plan, call planMore(iters) until getPath(0,1) returns non-NULL. The return
value is a list of configurations.
To get a roadmap (V,E), call getRoadmap(). V is a list of configurations (each
configuration is a Python list) and E is a list of edges (each edge is a pair
(i,j) indexing into V).
To dump the roadmap to disk, call dump(fn). This saves to a Trivial Graph Format
(TGF) format.
C++ includes: motionplanning.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, PlannerInterface, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, PlannerInterface, name)
__repr__ = _swig_repr
def __init__(self, cspace):
"""
Args:
cspace (:class:`~klampt.CSpaceInterface`)
"""
this = _motionplanning.new_PlannerInterface(cspace)
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _motionplanning.delete_PlannerInterface
__del__ = lambda self: None
def destroy(self):
"""
"""
return _motionplanning.PlannerInterface_destroy(self)
def setEndpoints(self, start, goal):
"""
Args:
start (:obj:`object`)
goal (:obj:`object`)
Returns:
(bool):
"""
return _motionplanning.PlannerInterface_setEndpoints(self, start, goal)
def setEndpointSet(self, start, goal, goalSample=None):
"""
setEndpointSet (start,goal,goalSample=None): bool
setEndpointSet (start,goal): bool
Args:
start (:obj:`object`):
goal (:obj:`object`):
goalSample (:obj:`object`, optional): default value None
Returns:
(bool):
"""
return _motionplanning.PlannerInterface_setEndpointSet(self, start, goal, goalSample)
def addMilestone(self, milestone):
"""
Args:
milestone (:obj:`object`)
Returns:
(int):
"""
return _motionplanning.PlannerInterface_addMilestone(self, milestone)
def planMore(self, iterations):
"""
Args:
iterations (int)
"""
return _motionplanning.PlannerInterface_planMore(self, iterations)
def getPathEndpoints(self):
"""
Returns:
(:obj:`object`):
"""
return _motionplanning.PlannerInterface_getPathEndpoints(self)
def getPath(self, milestone1, milestone2):
"""
Args:
milestone1 (int)
milestone2 (int)
Returns:
(:obj:`object`):
"""
return _motionplanning.PlannerInterface_getPath(self, milestone1, milestone2)
def getData(self, setting):
"""
Args:
setting (str)
Returns:
(float):
"""
return _motionplanning.PlannerInterface_getData(self, setting)
def getStats(self):
"""
Returns:
(:obj:`object`):
"""
return _motionplanning.PlannerInterface_getStats(self)
def getRoadmap(self):
"""
Returns:
(:obj:`object`):
"""
return _motionplanning.PlannerInterface_getRoadmap(self)
def dump(self, fn):
"""
Args:
fn (str)
"""
return _motionplanning.PlannerInterface_dump(self, fn)
__swig_setmethods__["index"] = _motionplanning.PlannerInterface_index_set
__swig_getmethods__["index"] = _motionplanning.PlannerInterface_index_get
if _newclass:
index = _swig_property(_motionplanning.PlannerInterface_index_get, _motionplanning.PlannerInterface_index_set)
__swig_setmethods__["spaceIndex"] = _motionplanning.PlannerInterface_spaceIndex_set
__swig_getmethods__["spaceIndex"] = _motionplanning.PlannerInterface_spaceIndex_get
if _newclass:
spaceIndex = _swig_property(_motionplanning.PlannerInterface_spaceIndex_get, _motionplanning.PlannerInterface_spaceIndex_set)
PlannerInterface_swigregister = _motionplanning.PlannerInterface_swigregister
PlannerInterface_swigregister(PlannerInterface)
# This file is compatible with both classic and new-style classes.
| 29.243667
| 133
| 0.630821
|
43ac62446c34dd70f9038bb3ad8c0150e4711618
| 8,373
|
py
|
Python
|
WebCabinet/t_withwrawal_uploads.py
|
Otsgolyak/shared_storage
|
81118a5e090973285e411c6cff385b832be90c9f
|
[
"Apache-2.0"
] | null | null | null |
WebCabinet/t_withwrawal_uploads.py
|
Otsgolyak/shared_storage
|
81118a5e090973285e411c6cff385b832be90c9f
|
[
"Apache-2.0"
] | null | null | null |
WebCabinet/t_withwrawal_uploads.py
|
Otsgolyak/shared_storage
|
81118a5e090973285e411c6cff385b832be90c9f
|
[
"Apache-2.0"
] | null | null | null |
from selenium import webdriver
from PageObject.cabinet.login_page import LoginPage
from PageObject.cabinet.profile_page import ProfilePage
from PageObject.cabinet.profile_page import EditProfilePage
from PageObject.cabinet.profile_page import UploadFiles
from PageObject.cabinet.withdrawal_page import UploadFilesWithdrawalPage
import os, os.path
import unittest
from sys import platform
class UploadTestSuite(unittest.TestCase):
FILES_IN_DIR = os.listdir('.')
if 'Download_from_tests2' not in FILES_IN_DIR: # Если нет одноименной папки в текущей директории
os.mkdir('Download_from_tests2') # Создаем её
raise ('Необходимо перезапустить тест после создания папки')
@classmethod
def setUpClass(cls):
cls.filename = '56f6b1939cf19153b3a5a8bb.png' # Присваиваем переменной класса имя картинки
cls.webcabinet_dir = os.getcwd() # Получаем директорию проекта
lin_dir = '/'.join(cls.webcabinet_dir.split('\\')) # Переформатируем имя директории под линух
chrome_options = webdriver.ChromeOptions()
# Меняем опции у браузера для скачивания файла в нужную директорию + для разных ОС разные настройки
if platform == "win32":
prefs = {'download.default_directory': cls.webcabinet_dir + '\\Download_from_tests2'} # Путь к директории для скачивания файла windows
chrome_options.add_experimental_option('prefs', prefs)
else :
prefs = {'download.default_directory': lin_dir + '/Download_from_tests2'} # Путь к директории для скачивания файла linux
chrome_options.add_experimental_option('prefs', prefs)
capabilities = {
"browserName": "chrome",
}
cls.driver = webdriver.Remote(desired_capabilities=capabilities,
command_executor="http://195.201.213.204:4444/wd/hub", options=chrome_options)
cls.driver.maximize_window()
upload = UploadFiles(cls.driver)
upload.remove_folder_contents('Download_from_tests2/') # Чистим содержимое папки
login_page = LoginPage(cls.driver)
login_page.login(username='shanterrr0002@yahoo.com', password='Qweqwe321!') # Авторизуемся
profile = ProfilePage(cls.driver)
profile.page_is_loaded() # Проверяем загрузку страницы
profile.profile_lnk().click() # Кликаем по профилю
profile.page_is_loaded() # Проверка загрузки страницы
edit = EditProfilePage(cls.driver)
edit.fill_profile_random_data() # Если профиль не до конца заполнен, заполняем
upload = UploadFiles(cls.driver)
upload.remove_all_download_files() # Удаляем загруженные файлы, если они есть
def setUp(self):
self.driver.refresh() # Обновляем страницу
self.driver.get('https://trade.trademux.net/cabinet/clientarea/withdraw') # Переходим на урл
withdrawal = UploadFilesWithdrawalPage(self.driver)
withdrawal.page_is_loaded() # Проверяем загрузку страницы
def test_01_check_upload_download_remove_identity(self):
"""Тест проверки загрузки поля 'подтверждение личности'"""
os.chdir(self.webcabinet_dir) # Возвращаем директорию проекта
w_upload = UploadFilesWithdrawalPage(self.driver)
w_upload.upload_identity_inp().send_keys(self.filename) # Загружаем файл
upload = UploadFiles(self.driver)
upload.wait_upload_or_refresh() # Если элемент кнопки удаления не найден, обновляем страницу, по умолчанию 3 попытки
upload.last_downloaded_file().click() # Ждем видимость и кликаем по последнему загруженному файлу
upload.download_wait() # Ждем загрузку файла 20 сек по умолчанию
upload.remove_last_downloaded_file() # Удаляем последний загруженный файл со страницы
old_hesh = (upload.md5(self.filename)) # Берем хеш до загрузки
os.chdir(os.path.dirname(os.path.join(self.webcabinet_dir, 'Download_from_tests2'))) # Меняем текущую директорию на папку для загрузок
new_hesh = (upload.md5(self.filename)) # Берем хеш после загрузки
assert old_hesh == new_hesh # Сравниваем хеши
upload.remove_folder_contents('Download_from_tests2/') # Чистим содержимое папки
upload.wait_remove_folder_content() # Ждём пока очистится
def test_02_check_upload_download_remove_proof_of_residence(self):
"""Тест проверки загрузки поля 'подтверждение адреса'"""
os.chdir(self.webcabinet_dir) # Возвращаем директорию проекта
w_upload = UploadFilesWithdrawalPage(self.driver)
w_upload.upload_proof_of_residence_inp().send_keys(self.filename) # Загружаем файл
upload = UploadFiles(self.driver)
upload.wait_upload_or_refresh() # Если элемент кнопки удаления не найден, обновляем страницу, по умолчанию 3 попытки
upload.last_downloaded_file().click() # Ждем видимость и кликаем по последнему загруженному файлу
upload.download_wait() # Ждем загрузку файла 20 сек по умолчанию
upload.remove_last_downloaded_file() # Удаляем последний загруженный файл со страницы
old_hesh = (upload.md5(self.filename)) # Берем хеш до загрузки
os.chdir(os.path.dirname(os.path.join(self.webcabinet_dir, 'Download_from_tests2'))) # Меняем текущую директорию на папку для загрузок
new_hesh = (upload.md5(self.filename)) # Берем хеш после загрузки
assert old_hesh == new_hesh # Сравниваем хеши
upload.remove_folder_contents('Download_from_tests2/') # Чистим содержимое папки
upload.wait_remove_folder_content(path=self.webcabinet_dir + '/Download_from_tests2') # Ждём пока очистится
def test_03_check_upload_download_remove_credit_card(self):
"""Тест проверки загрузки поля 'кредитная карта'"""
os.chdir(self.webcabinet_dir) # Возвращаем директорию проекта
w_upload = UploadFilesWithdrawalPage(self.driver)
w_upload.upload_credit_card_inp().send_keys(self.filename) # Загружаем файл
upload = UploadFiles(self.driver)
upload.wait_upload_or_refresh() # Если элемент кнопки удаления не найден, обновляем страницу, по умолчанию 3 попытки
upload.last_downloaded_file().click() # Ждем видимость и кликаем по последнему загруженному файлу
upload.download_wait() # Ждем загрузку файла 20 сек по умолчанию
upload.remove_last_downloaded_file() # Удаляем последний загруженный файл со страницы
old_hesh = (upload.md5(self.filename)) # Берем хеш до загрузки
os.chdir(os.path.dirname(os.path.join(self.webcabinet_dir, 'Download_from_tests2'))) # Меняем текущую директорию на папку для загрузок
new_hesh = (upload.md5(self.filename)) # Берем хеш после загрузки
assert old_hesh == new_hesh # Сравниваем хеши
upload.remove_folder_contents('Download_from_tests2/') # Чистим содержимое папки
upload.wait_remove_folder_content(path=self.webcabinet_dir + '/Download_from_tests2') # Ждём пока очистится
def test_04_check_upload_download_remove_other(self):
"""Тест проверки загрузки поля 'другое'"""
os.chdir(self.webcabinet_dir) # Возвращаем директорию проекта
w_upload = UploadFilesWithdrawalPage(self.driver)
w_upload.upload_other_inp().send_keys(self.filename) # Загружаем файл
upload = UploadFiles(self.driver)
upload.wait_upload_or_refresh() # Если элемент кнопки удаления не найден, обновляем страницу, по умолчанию 3 попытки
upload.last_downloaded_file().click() # Ждем видимость и кликаем по последнему загруженному файлу
upload.download_wait() # Ждем загрузку файла 20 сек по умолчанию
upload.remove_last_downloaded_file() # Удаляем последний загруженный файл со страницы
old_hesh = (upload.md5(self.filename)) # Берем хеш до загрузки
os.chdir(os.path.dirname(os.path.join(self.webcabinet_dir, 'Download_from_tests2'))) # Меняем текущую директорию на папку для загрузок
new_hesh = (upload.md5(self.filename)) # Берем хеш после загрузки
assert old_hesh == new_hesh # Сравниваем хеши
upload.remove_folder_contents('Download_from_tests2/') # Чистим содержимое папки
upload.wait_remove_folder_content(path=self.webcabinet_dir + '/Download_from_tests2') # Ждём пока очистится
@classmethod
def tearDownClass(cls):
cls.driver.quit()
if __name__ == "__main__":
unittest.main()
| 63.916031
| 147
| 0.729965
|
f27c372d62f5120b8bcc9e14dc216f8482eb4829
| 1,150
|
py
|
Python
|
2017/day23.py
|
bovarysme/advent
|
9a7a3310984d4b7548ad23e2dfa017c6fe9e2c9c
|
[
"MIT"
] | 4
|
2017-12-05T00:53:21.000Z
|
2018-12-03T14:00:56.000Z
|
2017/day23.py
|
bovarysme/advent
|
9a7a3310984d4b7548ad23e2dfa017c6fe9e2c9c
|
[
"MIT"
] | null | null | null |
2017/day23.py
|
bovarysme/advent
|
9a7a3310984d4b7548ad23e2dfa017c6fe9e2c9c
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
from math import sqrt
def part_one(instructions):
regs = defaultdict(int)
regs['1'] = 1
ptr = 0
count = 0
while ptr >= 0 and ptr < len(instructions):
op, reg, val = instructions[ptr].split()
if val.isalpha():
val = regs[val]
else:
val = int(val)
if op == 'set':
regs[reg] = val
elif op == 'sub':
regs[reg] -= val
elif op == 'mul':
regs[reg] *= val
count += 1
elif op == 'jnz' and regs[reg] != 0:
ptr += val
continue
ptr += 1
return count
def part_two():
count = 0
for number in range(107900, 124901, 17):
sup = int(sqrt(number))
for divisor in range(2, sup):
if number % divisor == 0:
count += 1
break
return count
if __name__ == '__main__':
with open('inputs/day23.txt', 'r') as f:
instructions = [line.rstrip() for line in f]
print('Answer for part one:', part_one(instructions))
print('Answer for part two:', part_two())
| 21.296296
| 57
| 0.502609
|
a3126e49099945252f6f0bdb7d9e3e4f93e80b12
| 62,054
|
py
|
Python
|
utilities/facade-worker.py
|
mkdolan/facade
|
4360052bf10449d9e024fbd7679c74726910cf95
|
[
"Apache-2.0"
] | 26
|
2017-07-15T09:13:33.000Z
|
2022-01-17T03:35:49.000Z
|
utilities/facade-worker.py
|
brianwarner/facade
|
f08cb6ab01016fd3febda1bb0963eafe61acdf89
|
[
"Apache-2.0"
] | 40
|
2016-08-04T15:45:13.000Z
|
2022-02-14T15:43:57.000Z
|
utilities/facade-worker.py
|
mkdolan/facade
|
4360052bf10449d9e024fbd7679c74726910cf95
|
[
"Apache-2.0"
] | 17
|
2016-08-05T18:28:05.000Z
|
2022-02-14T15:11:18.000Z
|
#!/usr/bin/python3
# Copyright 2016-2018 Brian Warner
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# Git repo maintenance
#
# This script is responsible for cloning new repos and keeping existing repos up
# to date. It can be run as often as you want (and will detect when it's
# already running, so as not to spawn parallel processes), but once or twice per
# day should be more than sufficient. Each time it runs, it updates the repo
# and checks for any parents of HEAD that aren't already accounted for in the
# repos. It also rebuilds analysis data, checks any changed affiliations and
# aliases, and caches data for display.
import sys
import platform
import imp
import time
import datetime
import html.parser
import subprocess
import os
import getopt
import xlsxwriter
import configparser
if platform.python_implementation() == 'PyPy':
import pymysql
else:
import MySQLdb
global log_level
html = html.parser.HTMLParser()
# Important: Do not modify the database number unless you've also added an
# update clause to update_db!
upstream_db = 7
#### Database update functions ####
def increment_db(version):
# Helper function to increment the database number
increment_db = ("INSERT INTO settings (setting,value) "
"VALUES ('database_version',%s)")
cursor.execute(increment_db, (version, ))
db.commit()
print("Database updated to version: %s" % version)
def update_db(version):
# This function should incrementally step any version of the database up to
# the current schema. After executing the database operations, call
# increment_db to bring it up to the version with which it is now compliant.
print("Attempting database update")
if version < 0:
increment_db(0)
if version < 1:
# for commit f49b2f0e46b32997a72508bc83a6b1e834069588
add_update_frequency = ("INSERT INTO settings (setting,value) "
"VALUES ('update_frequency',24)")
cursor.execute(add_update_frequency)
db.commit
increment_db(1)
if version < 2:
add_recache_to_projects = ("ALTER TABLE projects ADD COLUMN "
"recache BOOL DEFAULT TRUE")
cursor.execute(add_recache_to_projects)
db.commit
increment_db(2)
if version < 3:
add_results_setting = ("INSERT INTO settings (setting,value) "
"VALUES ('results_visibility','show')")
cursor.execute(add_results_setting)
db.commit
increment_db(3)
if version < 4:
add_working_commits_table = ("CREATE TABLE IF NOT EXISTS working_commits ("
"repos_id INT UNSIGNED NOT NULL,"
"working_commit VARCHAR(40))")
cursor.execute(add_working_commits_table)
db.commit
# Make sure all working commits are processed
get_working_commits = ("SELECT id,working_commit "
"FROM repos WHERE working_commit > ''")
cursor.execute(get_working_commits)
working_commits = list(cursor)
for commit in working_commits:
trim_commit(commit['id'],commit['working_commit'])
# Now it's safe to discard the (now unused) column
remove_working_commit_column = ("ALTER TABLE repos DROP COLUMN "
"working_commit")
cursor.execute(remove_working_commit_column)
db.commit
increment_db(4)
if version < 5:
add_weekly_project_cache = ("CREATE TABLE IF NOT EXISTS project_weekly_cache ("
"projects_id INT UNSIGNED NOT NULL,"
"email VARCHAR(128) NOT NULL,"
"affiliation VARCHAR(128),"
"week TINYINT UNSIGNED NOT NULL,"
"year SMALLINT UNSIGNED NOT NULL,"
"added BIGINT UNSIGNED NOT NULL,"
"removed BIGINT UNSIGNED NOT NULL,"
"whitespace BIGINT UNSIGNED NOT NULL,"
"files BIGINT UNSIGNED NOT NULL,"
"patches BIGINT UNSIGNED NOT NULL,"
"INDEX `projects_id,year,affiliation` (projects_id,year,affiliation),"
"INDEX `projects_id,year,email` (projects_id,year,email),"
"INDEX `projects_id,affiliation` (projects_id,affiliation),"
"INDEX `projects_id,email` (projects_id,email))")
cursor.execute(add_weekly_project_cache)
db.commit
add_weekly_repo_cache = ("CREATE TABLE IF NOT EXISTS repo_weekly_cache ("
"repos_id INT UNSIGNED NOT NULL,"
"email VARCHAR(128) NOT NULL,"
"affiliation VARCHAR(128),"
"week TINYINT UNSIGNED NOT NULL,"
"year SMALLINT UNSIGNED NOT NULL,"
"added BIGINT UNSIGNED NOT NULL,"
"removed BIGINT UNSIGNED NOT NULL,"
"whitespace BIGINT UNSIGNED NOT NULL,"
"files BIGINT UNSIGNED NOT NULL,"
"patches BIGINT UNSIGNED NOT NULL,"
"INDEX `repos_id,year,affiliation` (repos_id,year,affiliation),"
"INDEX `repos_id,year,email` (repos_id,year,email),"
"INDEX `repos_id,affiliation` (repos_id,affiliation),"
"INDEX `repos_id,email` (repos_id,email))")
cursor.execute(add_weekly_repo_cache)
db.commit
increment_db(5)
if version < 6:
# As originally written, the UNIQUE wasn't working because it allowed
# multiple NULL values in end_date.
drop_special_tags_constraint = ("ALTER TABLE special_tags "
"DROP INDEX `email,start_date,end_date,tag`")
cursor.execute(drop_special_tags_constraint)
db.commit
add_unique_in_special_tags = ("ALTER TABLE special_tags "
"ADD UNIQUE `email,start_date,tag` (email,start_date,tag)")
cursor.execute(add_unique_in_special_tags)
db.commit
increment_db(6)
if version < 7:
# Using NULL for en unbounded nd_date in special_tags ended up being
# difficult when doing certain types of reports. The logic is much
# cleaner if we just use an end_date that is ridiculously far into the
# future.
remove_null_end_dates_in_special_tags = ("UPDATE special_tags "
"SET end_date = '9999-12-31' WHERE end_date IS NULL")
cursor.execute(remove_null_end_dates_in_special_tags)
db.commit
increment_db(7)
print("No further database updates.\n")
def migrate_database_config():
# Since we're changing the way we store database credentials, we need a way to
# transparently migrate anybody who was using the old file. Someday after a long
# while this can disappear.
try:
# If the old database config was found, write a new config
imp.find_module('db')
db_config = configparser.ConfigParser()
from db import db_user,db_pass,db_name,db_host
from db import db_user_people,db_pass_people,db_name_people,db_host_people
db_config.add_section('main_database')
db_config.set('main_database','user',db_user)
db_config.set('main_database','pass',db_pass)
db_config.set('main_database','name',db_name)
db_config.set('main_database','host',db_host)
db_config.add_section('people_database')
db_config.set('people_database','user',db_user_people)
db_config.set('people_database','pass',db_pass_people)
db_config.set('people_database','name',db_name_people)
db_config.set('people_database','host',db_host_people)
with open('db.cfg','w') as db_file:
db_config.write(db_file)
print("Migrated old style config file to new.")
except:
# If nothing is found, the user probably hasn't run setup yet.
sys.exit("Can't find database config. Have you run setup.py?")
try:
os.remove('db.py')
os.remove('db.pyc')
print("Removed unneeded config files")
except:
print("Attempted to remove unneeded config files")
return db_user,db_pass,db_name,db_host,db_user_people,db_pass_people,db_name_people,db_host_people
#### Global helper functions ####
def database_connection(db_host,db_user,db_pass,db_name):
# Return a database connection based upon which interpreter we're using. CPython
# can use any database connection, although MySQLdb is preferred over pymysql
# for performance reasons. However, PyPy can't use MySQLdb at this point,
# instead requiring a pure python MySQL client. This function returns a database
# connection that should provide maximum performance depending upon the
# interpreter in use.
if platform.python_implementation() == 'PyPy':
db = pymysql.connect(
host = db_host,
user = db_user,
passwd = db_pass,
db = db_name,
charset = 'utf8mb4')
cursor = db.cursor(pymysql.cursors.DictCursor)
else:
db = MySQLdb.connect(
host = db_host,
user = db_user,
passwd = db_pass,
db = db_name,
charset = 'utf8mb4')
cursor = db.cursor(MySQLdb.cursors.DictCursor)
return db,cursor
def get_setting(setting):
# Get a setting from the database
query = ("SELECT value FROM settings WHERE setting=%s ORDER BY "
"last_modified DESC LIMIT 1")
cursor.execute(query, (setting, ))
return cursor.fetchone()["value"]
def update_status(status):
# Update the status displayed in the UI
query = ("UPDATE settings SET value=%s WHERE setting='utility_status'")
cursor.execute(query, (status, ))
db.commit()
def log_activity(level,status):
# Log an activity based upon urgency and user's preference. If the log level is
# "Debug", then just print it and don't save it in the database.
log_options = ('Error','Quiet','Info','Verbose','Debug')
if log_level == 'Debug' and level == 'Debug':
sys.stderr.write("* %s\n" % status)
return
if log_options.index(level) <= log_options.index(log_level):
query = ("INSERT INTO utility_log (level,status) VALUES (%s,%s)")
cursor.execute(query, (level, status))
db.commit()
sys.stderr.write("* %s\n" % status)
def update_repo_log(repos_id,status):
# Log a repo's fetch status
log_message = ("INSERT INTO repos_fetch_log (repos_id,status) "
"VALUES (%s,%s)")
cursor.execute(log_message, (repos_id, status))
db.commit()
def trim_commit(repo_id,commit):
# Quickly remove a given commit
remove_commit = ("DELETE FROM analysis_data "
"WHERE repos_id=%s "
"AND commit=%s")
cursor.execute(remove_commit, (repo_id, commit))
db.commit()
log_activity('Debug','Trimmed commit: %s' % commit)
def store_working_author(email):
# Store the working author during affiliation discovery, in case it is
# interrupted and needs to be trimmed.
store = ("UPDATE settings "
"SET value = %s "
"WHERE setting = 'working_author'")
cursor.execute(store, (email, ))
db.commit()
log_activity('Debug','Stored working author: %s' % email)
def trim_author(email):
# Remove the affiliations associated with an email. Used when an analysis is
# interrupted during affiliation layering, and the data will be corrupt.
trim = ("UPDATE analysis_data "
"SET author_affiliation = NULL "
"WHERE author_email = %s")
cursor.execute(trim, (email, ))
db.commit()
trim = ("UPDATE analysis_data "
"SET committer_affiliation = NULL "
"WHERE committer_email = %s")
cursor.execute(trim, (email, ))
db.commit()
store_working_author('done')
log_activity('Debug','Trimmed working author: %s' % email)
def analyze_commit(repo_id,repo_loc,commit):
# This function analyzes a given commit, counting the additions, removals, and
# whitespace changes. It collects all of the metadata about the commit, and
# stashes it in the database. A new database connection is opened each time in
# case we are running in multithreaded mode, since MySQL cursors are not
# currently threadsafe.
### Local helper functions ###
def check_swapped_emails(name,email):
# Sometimes people mix up their name and email in their git settings
if name.find('@') >= 0 and email.find('@') == -1:
log_activity('Debug','Found swapped email/name: %s/%s' % (email,name))
return email,name
else:
return name,email
def strip_extra_amp(email):
# Some repos have multiple ampersands, which really messes up domain pattern
# matching. This extra info is not used, so we discard it.
if email.count('@') > 1:
log_activity('Debug','Found extra @: %s' % email)
return email[:email.find('@',email.find('@')+1)]
else:
return email
def discover_alias(email):
# Match aliases with their canonical email
fetch_canonical = ("SELECT canonical "
"FROM aliases "
"WHERE alias=%s "
"AND active = TRUE")
cursor_people_local.execute(fetch_canonical, (email, ))
db_people_local.commit()
canonical = list(cursor_people_local)
if canonical:
for email in canonical:
return email['canonical']
else:
return email
def store_commit(repos_id,commit,filename,
author_name,author_email,author_date,
committer_name,committer_email,committer_date,
added,removed, whitespace):
# Fix some common issues in git commit logs and store data.
# Sometimes git is misconfigured and name/email get swapped
author_name, author_email = check_swapped_emails(author_name,author_email)
committer_name,committer_email = check_swapped_emails(committer_name,committer_email)
# Some systems append extra info after a second @
author_email = strip_extra_amp(author_email)
committer_email = strip_extra_amp(committer_email)
store = ("INSERT INTO analysis_data (repos_id,commit,filename,"
"author_name,author_raw_email,author_email,author_date,"
"committer_name,committer_raw_email,committer_email,committer_date,"
"added,removed,whitespace) "
"VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)")
cursor_local.execute(store, (
repos_id,commit,filename,
author_name,author_email,discover_alias(author_email),author_date,
committer_name,committer_email,discover_alias(committer_email),committer_date,
added,removed,whitespace))
db_local.commit()
log_activity('Debug','Stored commit: %s' % commit)
### The real function starts here ###
header = True
filename = ''
filename = ''
added = 0
removed = 0
whitespace = 0
# Set up new threadsafe database connections if multithreading. Otherwise
# use the gloabl database connections so we don't incur a performance
# penalty.
if multithreaded:
db_local,cursor_local = database_connection(
db_host,
db_user,
db_pass,
db_name)
db_people_local,cursor_people_local = database_connection(
db_host_people,
db_user_people,
db_pass_people,
db_name_people)
else:
db_local = db
cursor_local = cursor
db_people_local = db_people
cursor_people_local = cursor_people
# Read the git log
git_log = subprocess.Popen(["git --git-dir %s log -p -M %s -n1 "
"--pretty=format:'"
"author_name: %%an%%nauthor_email: %%ae%%nauthor_date:%%ai%%n"
"committer_name: %%cn%%ncommitter_email: %%ce%%ncommitter_date: %%ci%%n"
"parents: %%p%%nEndPatch' "
% (repo_loc,commit)], stdout=subprocess.PIPE, shell=True)
# Stash the commit we're going to analyze so we can back it out if something
# goes wrong later.
store_working_commit = ("INSERT INTO working_commits "
"(repos_id,working_commit) VALUES (%s,%s)")
cursor_local.execute(store_working_commit, (repo_id,commit))
db_local.commit()
log_activity('Debug','Stored working commit and analyzing : %s' % commit)
for line in git_log.stdout.read().decode("utf-8",errors="ignore").split(os.linesep):
if len(line) > 0:
if line.find('author_name:') == 0:
author_name = line[13:]
continue
if line.find('author_email:') == 0:
author_email = line[14:]
continue
if line.find('author_date:') == 0:
author_date = line[12:22]
continue
if line.find('committer_name:') == 0:
committer_name = line[16:]
continue
if line.find('committer_email:') == 0:
committer_email = line[17:]
continue
if line.find('committer_date:') == 0:
committer_date = line[16:26]
continue
if line.find('parents:') == 0:
if len(line[9:].split(' ')) == 2:
# We found a merge commit, which won't have a filename
filename = '(Merge commit)';
added = 0
removed = 0
whitespace = 0
continue
if line.find('--- a/') == 0:
if filename == '(Deleted) ':
filename = filename + line[6:]
continue
if line.find('+++ b/') == 0:
if not filename.find('(Deleted) ') == 0:
filename = line[6:]
continue
if line.find('rename to ') == 0:
filename = line[10:]
continue
if line.find('deleted file ') == 0:
filename = '(Deleted) '
continue
if line.find('diff --git') == 0:
# Git only displays the beginning of a file in a patch, not
# the end. We need some kludgery to discern where one starts
# and one ends. This is the last line always separating
# files in commits. But we only want to do it for the second
# time onward, since the first time we hit this line it'll be
# right after parsing the header and there won't be any useful
# information contained in it.
if not header:
store_commit(repo_id,commit,filename,
author_name,author_email,author_date,
committer_name,committer_email,committer_date,
added,removed,whitespace)
header = False
# Reset stats and prepare for the next section
whitespaceCheck = []
resetRemovals = True
filename = ''
added = 0
removed = 0
whitespace = 0
continue
# Count additions and removals and look for whitespace changes
if not header:
if line[0] == '+':
# First check if this is a whitespace change
if len(line.strip()) == 1:
# Line with zero length
whitespace += 1
else:
# Compare against removals, detect whitespace changes
whitespaceChange = False
for check in whitespaceCheck:
# Mark matches of non-trivial length
if line[1:].strip() == check and len(line[1:].strip()) > 8:
whitespaceChange = True
if whitespaceChange:
# One removal was whitespace, back it out
removed -= 1
whitespace += 1
# Remove the matched line
whitespaceCheck.remove(check)
else:
# Did not trigger whitespace criteria
added += 1
# Once we hit an addition, next removal line will be new.
# At that point, start a new collection for checking.
resetRemovals = True
if line[0] == '-':
removed += 1
if resetRemovals:
whitespaceCheck = []
resetRemovals = False
# Store the line to check next add lines for a match
whitespaceCheck.append(line[1:].strip())
# Store the last stats from the git log
store_commit(repo_id,commit,filename,
author_name,author_email,author_date,
committer_name,committer_email,committer_date,
added,removed,whitespace)
# Remove the working commit.
remove_commit = ("DELETE FROM working_commits "
"WHERE repos_id = %s AND working_commit = %s")
cursor_local.execute(remove_commit, (repo_id,commit))
db_local.commit()
log_activity('Debug','Completed and removed working commit: %s' % commit)
# If multithreading, clean up the local database
if multithreaded:
cursor_local.close()
cursor_people_local.close()
db_local.close()
db_people_local.close()
#### Facade main functions ####
def git_repo_cleanup():
# Clean up any git repos that are pending deletion
update_status('Purging deleted repos')
log_activity('Info','Processing deletions')
repo_base_directory = get_setting('repo_directory')
query = "SELECT id,projects_id,path,name FROM repos WHERE status='Delete'"
cursor.execute(query)
delete_repos = list(cursor)
for row in delete_repos:
# Remove the files on disk
cmd = ("rm -rf %s%s/%s%s"
% (repo_base_directory,row['projects_id'],row['path'],row['name']))
return_code = subprocess.Popen([cmd],shell=True).wait()
# Remove the analysis data
remove_analysis_data = "DELETE FROM analysis_data WHERE repos_id=%s"
cursor.execute(remove_analysis_data, (row['id'], ))
optimize_table = "OPTIMIZE TABLE analysis_data"
cursor.execute(optimize_table)
db.commit()
# Remove cached repo data
remove_repo_weekly_cache = "DELETE FROM repo_weekly_cache WHERE repos_id=%s"
cursor.execute(remove_repo_weekly_cache, (row['id'], ))
db.commit()
optimize_table = "OPTIMIZE TABLE repo_weekly_cache"
cursor.execute(optimize_table)
db.commit()
remove_repo_monthly_cache = "DELETE FROM repo_monthly_cache WHERE repos_id=%s"
cursor.execute(remove_repo_monthly_cache, (row['id'], ))
db.commit()
optimize_table = "OPTIMIZE TABLE repo_monthly_cache"
cursor.execute(optimize_table)
db.commit()
remove_repo_annual_cache = "DELETE FROM repo_annual_cache WHERE repos_id=%s"
cursor.execute(remove_repo_annual_cache, (row['id'], ))
db.commit()
optimize_table = "OPTIMIZE TABLE repo_annual_cache"
cursor.execute(optimize_table)
db.commit()
# Set project to be recached if just removing a repo
set_project_recache = ("UPDATE projects SET recache=TRUE "
"WHERE id=%s")
cursor.execute(set_project_recache,(row['projects_id'], ))
db.commit()
# Remove the entry from the repos table
query = "DELETE FROM repos WHERE id=%s"
cursor.execute(query, (row['id'], ))
db.commit()
log_activity('Verbose','Deleted repo %s' % row['id'])
cleanup = '%s/%s%s' % (row['projects_id'],row['path'],row['name'])
# Remove any working commits
remove_working_commits = "DELETE FROM working_commits WHERE repos_id=%s"
cursor.execute(remove_working_commits, (row['id'], ))
db.commit()
# Remove the repo from the logs
remove_logs = ("DELETE FROM repos_fetch_log WHERE repos_id = %s")
cursor.execute(remove_logs, (row['id'], ))
db.commit()
optimize_table = "OPTIMIZE TABLE repos_fetch_log"
cursor.execute(optimize_table)
db.commit()
# Attempt to cleanup any empty parent directories
while (cleanup.find('/',0) > 0):
cleanup = cleanup[:cleanup.rfind('/',0)]
cmd = "rmdir %s%s" % (repo_base_directory,cleanup)
subprocess.Popen([cmd],shell=True).wait()
log_activity('Verbose','Attempted %s' % cmd)
update_repo_log(row['id'],'Deleted')
# Clean up deleted projects
get_deleted_projects = "SELECT id FROM projects WHERE name='(Queued for removal)'"
cursor.execute(get_deleted_projects)
deleted_projects = list(cursor)
for project in deleted_projects:
# Remove cached data for projects which were marked for deletion
clear_annual_cache = ("DELETE FROM project_annual_cache WHERE "
"projects_id=%s")
cursor.execute(clear_annual_cache, (project['id'], ))
db.commit()
optimize_table = "OPTIMIZE TABLE project_annual_cache"
cursor.execute(optimize_table)
db.commit()
clear_monthly_cache = ("DELETE FROM project_monthly_cache WHERE "
"projects_id=%s")
cursor.execute(clear_monthly_cache, (project['id'], ))
db.commit()
optimize_table = "OPTIMIZE TABLE project_monthly_cache"
cursor.execute(optimize_table)
db.commit()
clear_weekly_cache = ("DELETE FROM project_weekly_cache WHERE "
"projects_id=%s")
cursor.execute(clear_weekly_cache, (project['id'], ))
db.commit()
optimize_table = "OPTIMIZE TABLE project_weekly_cache"
cursor.execute(optimize_table)
db.commit()
clear_unknown_cache = ("DELETE FROM unknown_cache WHERE "
"projects_id=%s")
cursor.execute(clear_unknown_cache, (project['id'], ))
db.commit()
optimize_table = "OPTIMIZE TABLE project_weekly_cache"
cursor.execute(optimize_table)
db.commit()
# Remove any projects which were also marked for deletion
remove_project = "DELETE FROM projects WHERE id=%s"
cursor.execute(remove_project, (project['id'], ))
db.commit()
log_activity('Info','Processing deletions (complete)')
def git_repo_initialize():
# Select any new git repos so we can set up their locations and git clone
update_status('Fetching new repos')
log_activity('Info','Fetching new repos')
query = "SELECT id,projects_id,git FROM repos WHERE status LIKE 'New%'";
cursor.execute(query)
new_repos = list(cursor)
for row in new_repos:
print(row["git"])
update_repo_log(row['id'],'Cloning')
git = html.unescape(row["git"])
# Strip protocol from remote URL, set a unique path on the filesystem
if git.find('://',0) > 0:
repo_relative_path = git[git.find('://',0)+3:][:git[git.find('://',0)+3:].rfind('/',0)+1]
else:
repo_relative_path = git[:git.rfind('/',0)+1]
# Get the full path to the directory where we'll clone the repo
repo_path = ('%s%s/%s' %
(repo_base_directory,row["projects_id"],repo_relative_path))
# Get the name of repo
repo_name = git[git.rfind('/',0)+1:]
if repo_name.find('.git',0) > -1:
repo_name = repo_name[:repo_name.find('.git',0)]
# Check if there will be a storage path collision
query = ("SELECT NULL FROM repos WHERE CONCAT(projects_id,'/',path,name) = %s")
cursor.execute(query, ('{}/{}{}'.format(row["projects_id"], repo_relative_path, repo_name), ))
db.commit()
# If there is a collision, append a slug to repo_name to yield a unique path
if cursor.rowcount:
slug = 1
is_collision = True
while is_collision:
if os.path.isdir('%s%s-%s' % (repo_path,repo_name,slug)):
slug += 1
else:
is_collision = False
repo_name = '%s-%s' % (repo_name,slug)
log_activity('Verbose','Identical repo detected, storing %s in %s' %
(git,repo_name))
# Create the prerequisite directories
return_code = subprocess.Popen(['mkdir -p %s' %repo_path],shell=True).wait()
# Make sure it's ok to proceed
if return_code != 0:
print("COULD NOT CREATE REPO DIRECTORY")
update_repo_log(row['id'],'Failed (mkdir)')
update_status('Failed (mkdir %s)' % repo_path)
log_activity('Error','Could not create repo directory: %s' %
repo_path)
sys.exit("Could not create git repo's prerequisite directories. "
" Do you have write access?")
update_repo_log(row['id'],'New (cloning)')
query = ("UPDATE repos SET status='New (Initializing)', path=%s, "
"name=%s WHERE id=%s")
cursor.execute(query, (repo_relative_path,repo_name,row["id"]))
db.commit()
log_activity('Verbose','Cloning: %s' % git)
cmd = "git -C %s clone '%s' %s" % (repo_path,git,repo_name)
return_code = subprocess.Popen([cmd], shell=True).wait()
if (return_code == 0):
# If cloning succeeded, repo is ready for analysis
# Mark the entire project for an update, so that under normal
# circumstances caches are rebuilt only once per waiting period.
update_project_status = ("UPDATE repos SET status='Update' WHERE "
"projects_id=%s")
cursor.execute(update_project_status, (row['projects_id'], ))
db.commit()
# Since we just cloned the new repo, set it straight to analyze.
query = ("UPDATE repos SET status='Analyze',path=%s, name=%s "
"WHERE id=%s")
cursor.execute(query, (repo_relative_path,repo_name,row["id"]))
db.commit()
update_repo_log(row['id'],'Up-to-date')
log_activity('Info','Cloned %s' % git)
else:
# If cloning failed, log it and set the status back to new
update_repo_log(row['id'],'Failed (%s)' % return_code)
query = ("UPDATE repos SET status='New (failed)' WHERE id=%s")
cursor.execute(query, (row['id'], ))
db.commit()
log_activity('Error','Could not clone %s' % git)
log_activity('Info', 'Fetching new repos (complete)')
def check_for_repo_updates():
# Check the last time a repo was updated and if it has been longer than the
# update_frequency, mark its project for updating during the next analysis.
update_status('Checking if any repos need to update')
log_activity('Info','Checking repos to update')
update_frequency = get_setting('update_frequency')
get_initialized_repos = ("SELECT id FROM repos WHERE status NOT LIKE 'New%' "
"AND status != 'Delete' "
"AND status != 'Analyze'")
cursor.execute(get_initialized_repos)
repos = list(cursor)
for repo in repos:
# Figure out which repos have been updated within the waiting period
get_last_update = ("SELECT NULL FROM repos_fetch_log WHERE "
"repos_id=%s AND status='Up-to-date' AND "
"date >= CURRENT_TIMESTAMP(6) - INTERVAL %s HOUR ")
cursor.execute(get_last_update, (repo['id'], update_frequency))
# If the repo has not been updated within the waiting period, mark it.
# Also mark any other repos in the project, so we only recache the
# project once per waiting period.
if cursor.rowcount == 0:
mark_repo = ("UPDATE repos r JOIN projects p ON p.id = r.projects_id "
"SET status='Update' WHERE "
"r.id=%s ")
cursor.execute(mark_repo, (repo['id'], ))
db.commit()
# Mark the entire project for an update, so that under normal
# circumstances caches are rebuilt only once per waiting period.
update_project_status = ("UPDATE repos r LEFT JOIN repos s ON r.projects_id=s.projects_id "
"SET r.status='Update' WHERE s.status='Update' AND "
"r.status != 'Analyze'")
cursor.execute(update_project_status)
db.commit()
log_activity('Info','Checking repos to update (complete)')
def force_repo_updates():
# Set the status of all non-new repos to "Update".
update_status('Forcing all non-new repos to update')
log_activity('Info','Forcing repos to update')
get_repo_ids = ("UPDATE repos SET status='Update' WHERE status "
"NOT LIKE 'New%' AND STATUS!='Delete'")
cursor.execute(get_repo_ids)
db.commit()
log_activity('Info','Forcing repos to update (complete)')
def force_repo_analysis():
# Set the status of all non-new repos to "Analyze".
update_status('Forcing all non-new repos to be analyzed')
log_activity('Info','Forcing repos to be analyzed')
set_to_analyze = ("UPDATE repos SET status='Analyze' WHERE status "
"NOT LIKE 'New%' AND STATUS!='Delete'")
cursor.execute(set_to_analyze)
db.commit()
log_activity('Info','Forcing repos to be analyzed (complete)')
def git_repo_updates():
# Update existing repos
update_status('Updating repos')
log_activity('Info','Updating existing repos')
repo_base_directory = get_setting('repo_directory')
query = ("SELECT id,projects_id,git,name,path FROM repos WHERE "
"status='Update'");
cursor.execute(query)
existing_repos = list(cursor)
for row in existing_repos:
log_activity('Verbose','Attempting to update %s' % row['git'])
update_repo_log(row['id'],'Updating')
attempt = 0
# Try two times. If it fails the first time, reset and clean the git repo,
# as somebody may have done a rebase. No work is being done in the local
# repo, so there shouldn't be legit local changes to worry about.
while attempt < 2:
cmd = ("git -C %s%s/%s%s pull"
% (repo_base_directory,row['projects_id'],row['path'],row['name']))
return_code = subprocess.Popen([cmd],shell=True).wait()
# If the attempt succeeded, then don't try any further fixes. If
# the attempt to fix things failed, give up and try next time.
if return_code == 0 or attempt == 1:
break
elif attempt == 0:
log_activity('Verbose','git pull failed, attempting reset and '
'clean for %s' % row['git'])
cmd_reset = ("git -C %s%s/%s%s reset --hard origin/master"
% (repo_base_directory,row['projects_id'],row['path'],row['name']))
return_code_reset = subprocess.Popen([cmd_reset],shell=True).wait()
cmd_clean = ("git -C %s%s/%s%s clean -df"
% (repo_base_directory,row['projects_id'],row['path'],row['name']))
return_code_clean = subprocess.Popen([cmd_clean],shell=True).wait()
attempt += 1
if return_code == 0:
set_to_analyze = "UPDATE repos SET status='Analyze' WHERE id=%s"
cursor.execute(set_to_analyze, (row['id'], ))
db.commit()
update_repo_log(row['id'],'Up-to-date')
log_activity('Verbose','Updated %s' % row["git"])
else:
update_repo_log(row['id'],'Failed (%s)' % return_code)
log_activity('Error','Could not update %s' % row["git"])
log_activity('Info','Updating existing repos (complete)')
def analysis():
# Run the analysis by looping over all active repos. For each repo, we retrieve
# the list of commits which lead to HEAD. If any are missing from the database,
# they are filled in. Then we check to see if any commits in the database are
# not in the list of parents, and prune them out.
#
# We also keep track of the last commit to be processed, so that if the analysis
# is interrupted (possibly leading to partial data in the database for the
# commit being analyzed at the time) we can recover.
### Local helper functions ###
def update_analysis_log(repos_id,status):
# Log a repo's analysis status
log_message = ("INSERT INTO analysis_log (repos_id,status) "
"VALUES (%s,%s)")
cursor.execute(log_message, (repos_id,status))
db.commit()
### The real function starts here ###
update_status('Running analysis')
log_activity('Info','Beginning analysis')
start_date = get_setting('start_date')
repo_list = "SELECT id,projects_id,path,name FROM repos WHERE status='Analyze'"
cursor.execute(repo_list)
repos = list(cursor)
for repo in repos:
update_analysis_log(repo['id'],'Beginning analysis')
log_activity('Verbose','Analyzing repo: %s (%s)' % (repo['id'],repo['name']))
# First we check to see if the previous analysis didn't complete
get_status = ("SELECT working_commit FROM working_commits WHERE repos_id=%s")
cursor.execute(get_status, (repo['id'], ))
working_commits = list(cursor)
#cursor.fetchone()['working_commit']
# If there's a commit still there, the previous run was interrupted and
# the commit data may be incomplete. It should be trimmed, just in case.
for commit in working_commits:
trim_commit(repo['id'],commit['working_commit'])
# Remove the working commit.
remove_commit = ("DELETE FROM working_commits "
"WHERE repos_id = %s AND working_commit = %s")
cursor.execute(remove_commit, (repo['id'],commit['working_commit']))
db.commit()
log_activity('Debug','Removed working commit: %s' % commit['working_commit'])
# Start the main analysis
update_analysis_log(repo['id'],'Collecting data')
repo_loc = ('%s%s/%s%s/.git' % (repo_base_directory,
repo["projects_id"], repo["path"],
repo["name"]))
# Grab the parents of HEAD
parents = subprocess.Popen(["git --git-dir %s log --ignore-missing "
"--pretty=format:'%%H' --since=%s" % (repo_loc,start_date)],
stdout=subprocess.PIPE, shell=True)
parent_commits = set(parents.stdout.read().decode("utf-8",errors="ignore").split(os.linesep))
# If there are no commits in the range, we still get a blank entry in
# the set. Remove it, as it messes with the calculations
if '' in parent_commits:
parent_commits.remove('')
# Grab the existing commits from the database
existing_commits = set()
find_existing = ("SELECT DISTINCT commit FROM analysis_data WHERE repos_id=%s")
cursor.execute(find_existing, (repo['id'], ))
for commit in list(cursor):
existing_commits.add(commit['commit'])
# Find missing commits and add them
missing_commits = parent_commits - existing_commits
log_activity('Debug','Commits missing from repo %s: %s' %
(repo['id'],len(missing_commits)))
if multithreaded:
from multiprocessing import Pool
pool = Pool()
for commit in missing_commits:
result =pool.apply_async(analyze_commit,(repo['id'],repo_loc,commit))
pool.close()
pool.join()
else:
for commit in missing_commits:
analyze_commit(repo['id'],repo_loc,commit)
update_analysis_log(repo['id'],'Data collection complete')
update_analysis_log(repo['id'],'Beginning to trim commits')
# Find commits which are out of the analysis range
trimmed_commits = existing_commits - parent_commits
log_activity('Debug','Commits to be trimmed from repo %s: %s' %
(repo['id'],len(trimmed_commits)))
for commit in trimmed_commits:
trim_commit(repo['id'],commit)
set_complete = "UPDATE repos SET status='Complete' WHERE id=%s"
cursor.execute(set_complete, (repo['id'], ))
update_analysis_log(repo['id'],'Commit trimming complete')
update_analysis_log(repo['id'],'Complete')
log_activity('Info','Running analysis (complete)')
def nuke_affiliations():
# Delete all stored affiliations in the database. Normally when you
# add/remove/change affiliation data via the web UI, any potentially affected
# records will be deleted and then rebuilt on the next run. However, if you
# manually add affiliation records via the database or import them by some other
# means, there's no elegant way to discover which affiliations are affected. So
# this is the scorched earth way: remove them all to force a total rebuild.
# Brutal but effective.
log_activity('Info','Nuking affiliations')
nuke = ("UPDATE analysis_data SET author_affiliation = NULL, "
"committer_affiliation = NULL")
cursor.execute(nuke)
db.commit()
log_activity('Info','Nuking affiliations (complete)')
def fill_empty_affiliations():
# When a record is added, it has no affiliation data. Also, when an affiliation
# mapping is changed via the UI, affiliation data will be set to NULL. This
# function finds any records with NULL affiliation data and fills them.
### Local helper functions ###
def update_affiliation(email_type,email,affiliation,start_date):
update = ("UPDATE analysis_data "
"SET %s_affiliation = %%s "
"WHERE %s_email = %%s "
"AND %s_affiliation IS NULL "
"AND %s_date >= %%s" %
(email_type, email_type, email_type, email_type))
cursor.execute(update, (affiliation, email, start_date))
db.commit()
def discover_null_affiliations(attribution,email):
# Try a bunch of ways to match emails to attributions in the database. First it
# tries to match exactly. If that doesn't work, it tries to match by domain. If
# domain doesn't work, it strips subdomains from the email and tries again.
# First we see if there's an exact match. This will also catch malformed or
# intentionally mangled emails (e.g. "developer at domain.com") that have
# been added as an affiliation rather than an alias.
find_exact_match = ("SELECT affiliation,start_date "
"FROM affiliations "
"WHERE domain = %s "
"AND active = TRUE "
"ORDER BY start_date DESC")
cursor_people.execute(find_exact_match, (email, ))
db_people.commit
matches = list(cursor_people)
if not matches and email.find('@') < 0:
# It's not a properly formatted email, leave it NULL and log it.
log_activity('Info','Unmatchable email: %s' % email)
return
if not matches:
# Now we go for a domain-level match. Try for an exact match.
domain = email[email.find('@')+1:]
find_exact_domain = ("SELECT affiliation,start_date "
"FROM affiliations "
"WHERE domain= %s "
"AND active = TRUE "
"ORDER BY start_date DESC")
cursor_people.execute(find_exact_domain, (domain, ))
db_people.commit()
matches = list(cursor_people)
if not matches:
# Then try stripping any subdomains.
find_domain = ("SELECT affiliation,start_date "
"FROM affiliations "
"WHERE domain = %s "
"AND active = TRUE "
"ORDER BY start_date DESC")
cursor_people.execute(find_domain, (domain[domain.rfind('.',0,domain.rfind('.',0))+1:], ))
db_people.commit()
matches = list(cursor_people)
if not matches:
# One last check to see if it's an unmatched academic domain.
if domain[-4:] in '.edu':
matches.append({'affiliation':'(Academic)','start_date':'1970-01-01'})
# Done looking. Now we process any matches that were found.
if matches:
log_activity('Debug','Found domain match for %s' % email)
for match in matches:
update = ("UPDATE analysis_data "
"SET %s_affiliation = %%s "
"WHERE %s_email = %%s "
"AND %s_affiliation IS NULL "
"AND %s_date >= %%s" %
(attribution, attribution, attribution, attribution))
cursor.execute(update, (match['affiliation'], email, match['start_date']))
db.commit()
def discover_alias(email):
# Match aliases with their canonical email
fetch_canonical = ("SELECT canonical "
"FROM aliases "
"WHERE alias=%s "
"AND active = TRUE")
cursor_people.execute(fetch_canonical, (email, ))
db_people.commit()
canonical = list(cursor_people)
if canonical:
for email in canonical:
return email['canonical']
else:
return email
### The real function starts here ###
update_status('Filling empty affiliations')
log_activity('Info','Filling empty affiliations')
# Process any changes to the affiliations or aliases, and set any existing
# entries in analysis_data to NULL so they are filled properly.
# First, get the time we started fetching since we'll need it later
cursor.execute("SELECT current_timestamp(6) as fetched")
affiliations_fetched = cursor.fetchone()['fetched']
# Now find the last time we worked on affiliations, to figure out what's new
affiliations_processed = get_setting('affiliations_processed')
get_changed_affiliations = ("SELECT domain FROM affiliations WHERE "
"last_modified >= %s")
cursor_people.execute(get_changed_affiliations, (affiliations_processed, ))
changed_affiliations = list(cursor_people)
# Process any affiliations which changed since we last checked
for changed_affiliation in changed_affiliations:
log_activity('Debug','Resetting affiliation for %s' %
changed_affiliation['domain'])
set_author_to_null = ("UPDATE analysis_data SET author_affiliation = NULL "
"WHERE author_email LIKE CONCAT('%%',%s)")
cursor.execute(set_author_to_null, (changed_affiliation['domain'], ))
db.commit()
set_committer_to_null = ("UPDATE analysis_data SET committer_affiliation = NULL "
"WHERE committer_email LIKE CONCAT('%%',%s)")
cursor.execute(set_committer_to_null, (changed_affiliation['domain'], ))
db.commit()
# Update the last fetched date, so we know where to start next time.
update_affiliations_date = ("UPDATE settings SET value=%s "
"WHERE setting = 'affiliations_processed'")
cursor.execute(update_affiliations_date, (affiliations_fetched, ))
db.commit()
# On to the aliases, now
# First, get the time we started fetching since we'll need it later
cursor.execute("SELECT current_timestamp(6) as fetched")
aliases_fetched = cursor.fetchone()['fetched']
# Now find the last time we worked on aliases, to figure out what's new
aliases_processed = get_setting('aliases_processed')
get_changed_aliases = ("SELECT alias FROM aliases WHERE "
"last_modified >= %s")
cursor_people.execute(get_changed_aliases, (aliases_processed, ))
changed_aliases = list(cursor_people)
# Process any aliases which changed since we last checked
for changed_alias in changed_aliases:
log_activity('Debug','Resetting affiliation for %s' %
changed_alias['alias'])
set_author_to_null = ("UPDATE analysis_data SET author_affiliation = NULL "
"WHERE author_raw_email LIKE CONCAT('%%',%s)")
cursor.execute(set_author_to_null,(changed_alias['alias'], ))
db.commit()
set_committer_to_null = ("UPDATE analysis_data SET committer_affiliation = NULL "
"WHERE committer_raw_email LIKE CONCAT('%%',%s)")
cursor.execute(set_committer_to_null, (changed_alias['alias'], ))
db.commit()
reset_author = ("UPDATE analysis_data "
"SET author_email = %s "
"WHERE author_raw_email = %s")
cursor.execute(reset_author, (discover_alias(changed_alias['alias']),changed_alias['alias']))
db.commit
reset_committer = ("UPDATE analysis_data "
"SET committer_email = %s "
"WHERE committer_raw_email = %s")
cursor.execute(reset_committer, (discover_alias(changed_alias['alias']),changed_alias['alias']))
db.commit
# Update the last fetched date, so we know where to start next time.
update_aliases_date = ("UPDATE settings SET value=%s "
"WHERE setting = 'aliases_processed'")
cursor.execute(update_aliases_date, (aliases_fetched, ))
db.commit()
# Now rebuild the affiliation data
working_author = get_setting('working_author')
if working_author != 'done':
log_activity('Error','Trimming author data in affiliations: %s' %
working_author)
trim_author(working_author)
# Figure out which projects have NULL affiliations so they can be recached
set_recache = ("UPDATE projects p "
"JOIN repos r ON p.id = r.projects_id "
"JOIN analysis_data a ON r.id = a.repos_id "
"SET recache=TRUE WHERE "
"author_affiliation IS NULL OR "
"committer_affiliation IS NULL")
cursor.execute(set_recache)
db.commit()
# Find any authors with NULL affiliations and fill them
find_null_authors = ("SELECT DISTINCT author_email AS email, "
"MIN(author_date) AS earliest "
"FROM analysis_data "
"WHERE author_affiliation IS NULL "
"GROUP BY author_email")
cursor.execute(find_null_authors)
null_authors = list(cursor)
log_activity('Debug','Found %s authors with NULL affiliation' %
len(null_authors))
for null_author in null_authors:
email = null_author['email']
store_working_author(email)
discover_null_affiliations('author',email)
store_working_author('done')
# Find any committers with NULL affiliations and fill them
find_null_committers = ("SELECT DISTINCT committer_email AS email, "
"MIN(committer_date) AS earliest "
"FROM analysis_data "
"WHERE committer_affiliation IS NULL "
"GROUP BY committer_email")
cursor.execute(find_null_committers)
null_committers = list(cursor)
log_activity('Debug','Found %s committers with NULL affiliation' %
len(null_committers))
for null_committer in null_committers:
email = null_committer['email']
store_working_author(email)
discover_null_affiliations('committer',email)
# Now that we've matched as much as possible, fill the rest as (Unknown)
fill_unknown_author = ("UPDATE analysis_data "
"SET author_affiliation = '(Unknown)' "
"WHERE author_affiliation IS NULL")
cursor.execute(fill_unknown_author)
db.commit()
fill_unknown_committer = ("UPDATE analysis_data "
"SET committer_affiliation = '(Unknown)' "
"WHERE committer_affiliation IS NULL")
cursor.execute(fill_unknown_committer)
db.commit()
store_working_author('done')
log_activity('Info','Filling empty affiliations (complete)')
def invalidate_caches():
# Invalidate all caches
update_status('Invalidating caches')
log_activity('Info','Invalidating caches')
invalidate_cache = "UPDATE projects SET recache = TRUE"
cursor.execute(invalidate_cache)
db.commit()
log_activity('Info','Invalidating caches (complete)')
def rebuild_unknown_affiliation_and_web_caches():
# When there's a lot of analysis data, calculating display data on the fly gets
# pretty expensive. Instead, we crunch the data based upon the user's preferred
# statistics (author or committer) and store them. We also store all records
# with an (Unknown) affiliation for display to the user.
update_status('Caching data for display')
log_activity('Info','Caching unknown affiliations and web data for display')
report_date = get_setting('report_date')
report_attribution = get_setting('report_attribution')
# Clear stale caches
clear_project_weekly_cache = ("DELETE c.* FROM project_weekly_cache c "
"JOIN projects p ON c.projects_id = p.id WHERE "
"p.recache=TRUE")
cursor.execute(clear_project_weekly_cache)
db.commit()
clear_project_monthly_cache = ("DELETE c.* FROM project_monthly_cache c "
"JOIN projects p ON c.projects_id = p.id WHERE "
"p.recache=TRUE")
cursor.execute(clear_project_monthly_cache)
db.commit()
clear_project_annual_cache = ("DELETE c.* FROM project_annual_cache c "
"JOIN projects p ON c.projects_id = p.id WHERE "
"p.recache=TRUE")
cursor.execute(clear_project_annual_cache)
db.commit()
clear_repo_weekly_cache = ("DELETE c.* FROM repo_weekly_cache c "
"JOIN repos r ON c.repos_id = r.id "
"JOIN projects p ON r.projects_id = p.id WHERE "
"p.recache=TRUE")
cursor.execute(clear_repo_weekly_cache)
db.commit()
clear_repo_monthly_cache = ("DELETE c.* FROM repo_monthly_cache c "
"JOIN repos r ON c.repos_id = r.id "
"JOIN projects p ON r.projects_id = p.id WHERE "
"p.recache=TRUE")
cursor.execute(clear_repo_monthly_cache)
db.commit()
clear_repo_annual_cache = ("DELETE c.* FROM repo_annual_cache c "
"JOIN repos r ON c.repos_id = r.id "
"JOIN projects p ON r.projects_id = p.id WHERE "
"p.recache=TRUE")
cursor.execute(clear_repo_annual_cache)
db.commit()
clear_unknown_cache = ("DELETE c.* FROM unknown_cache c "
"JOIN projects p ON c.projects_id = p.id WHERE "
"p.recache=TRUE")
cursor.execute(clear_unknown_cache)
db.commit()
log_activity('Verbose','Caching unknown authors and committers')
# Cache the unknown authors
unknown_authors = ("INSERT INTO unknown_cache "
"SELECT 'author', "
"r.projects_id, "
"a.author_email, "
"SUBSTRING_INDEX(a.author_email,'@',-1), "
"SUM(a.added) "
"FROM analysis_data a "
"JOIN repos r ON r.id = a.repos_id "
"JOIN projects p ON p.id = r.projects_id "
"WHERE a.author_affiliation = '(Unknown)' "
"AND p.recache = TRUE "
"GROUP BY r.projects_id,a.author_email")
cursor.execute(unknown_authors)
db.commit()
# Cache the unknown committers
unknown_committers = ("INSERT INTO unknown_cache "
"SELECT 'committer', "
"r.projects_id, "
"a.committer_email, "
"SUBSTRING_INDEX(a.committer_email,'@',-1), "
"SUM(a.added) "
"FROM analysis_data a "
"JOIN repos r ON r.id = a.repos_id "
"JOIN projects p ON p.id = r.projects_id "
"WHERE a.committer_affiliation = '(Unknown)' "
"AND p.recache = TRUE "
"GROUP BY r.projects_id,a.committer_email")
cursor.execute(unknown_committers)
db.commit()
# Start caching by project
log_activity('Verbose','Caching projects')
cache_projects_by_week = ("INSERT INTO project_weekly_cache "
"SELECT r.projects_id AS projects_id, "
"a.%s_email AS email, "
"a.%s_affiliation AS affiliation, "
"WEEK(a.%s_date) AS week, "
"YEAR(a.%s_date) AS year, "
"SUM(a.added) AS added, "
"SUM(a.removed) AS removed, "
"SUM(a.whitespace) AS whitespace, "
"COUNT(DISTINCT a.filename) AS files, "
"COUNT(DISTINCT a.commit) AS patches "
"FROM analysis_data a "
"JOIN repos r ON r.id = a.repos_id "
"JOIN projects p ON p.id = r.projects_id "
"LEFT JOIN exclude e ON "
"(a.author_email = e.email "
"AND (e.projects_id = r.projects_id "
"OR e.projects_id = 0)) "
"OR (a.author_email LIKE CONCAT('%%',e.domain) "
"AND (e.projects_id = r.projects_id "
"OR e.projects_id = 0)) "
"WHERE e.email IS NULL "
"AND e.domain IS NULL "
"AND p.recache = TRUE "
"GROUP BY week, "
"year, "
"affiliation, "
"a.%s_email,"
"projects_id"
% (report_attribution,report_attribution,
report_date,report_date,report_attribution))
cursor.execute(cache_projects_by_week)
db.commit()
cache_projects_by_month = ("INSERT INTO project_monthly_cache "
"SELECT r.projects_id AS projects_id, "
"a.%s_email AS email, "
"a.%s_affiliation AS affiliation, "
"MONTH(a.%s_date) AS month, "
"YEAR(a.%s_date) AS year, "
"SUM(a.added) AS added, "
"SUM(a.removed) AS removed, "
"SUM(a.whitespace) AS whitespace, "
"COUNT(DISTINCT a.filename) AS files, "
"COUNT(DISTINCT a.commit) AS patches "
"FROM analysis_data a "
"JOIN repos r ON r.id = a.repos_id "
"JOIN projects p ON p.id = r.projects_id "
"LEFT JOIN exclude e ON "
"(a.author_email = e.email "
"AND (e.projects_id = r.projects_id "
"OR e.projects_id = 0)) "
"OR (a.author_email LIKE CONCAT('%%',e.domain) "
"AND (e.projects_id = r.projects_id "
"OR e.projects_id = 0)) "
"WHERE e.email IS NULL "
"AND e.domain IS NULL "
"AND p.recache = TRUE "
"GROUP BY month, "
"year, "
"affiliation, "
"a.%s_email,"
"projects_id"
% (report_attribution,report_attribution,
report_date,report_date,report_attribution))
cursor.execute(cache_projects_by_month)
db.commit()
cache_projects_by_year = ("INSERT INTO project_annual_cache "
"SELECT r.projects_id AS projects_id, "
"a.%s_email AS email, "
"a.%s_affiliation AS affiliation, "
"YEAR(a.%s_date) AS year, "
"SUM(a.added) AS added, "
"SUM(a.removed) AS removed, "
"SUM(a.whitespace) AS whitespace, "
"COUNT(DISTINCT a.filename) AS files, "
"COUNT(DISTINCT a.commit) AS patches "
"FROM analysis_data a "
"JOIN repos r ON r.id = a.repos_id "
"JOIN projects p ON p.id = r.projects_id "
"LEFT JOIN exclude e ON "
"(a.author_email = e.email "
"AND (e.projects_id = r.projects_id "
"OR e.projects_id = 0)) "
"OR (a.author_email LIKE CONCAT('%%',e.domain) "
"AND (e.projects_id = r.projects_id "
"OR e.projects_id = 0)) "
"WHERE e.email IS NULL "
"AND e.domain IS NULL "
"AND p.recache = TRUE "
"GROUP BY year, "
"affiliation, "
"a.%s_email,"
"projects_id"
% (report_attribution,report_attribution,
report_date,report_attribution))
cursor.execute(cache_projects_by_year)
db.commit()
# Start caching by repo
log_activity('Verbose','Caching repos')
cache_repos_by_week = ("INSERT INTO repo_weekly_cache "
"SELECT a.repos_id AS repos_id, "
"a.%s_email AS email, "
"a.%s_affiliation AS affiliation, "
"WEEK(a.%s_date) AS week, "
"YEAR(a.%s_date) AS year, "
"SUM(a.added) AS added, "
"SUM(a.removed) AS removed, "
"SUM(a.whitespace) AS whitespace, "
"COUNT(DISTINCT a.filename) AS files, "
"COUNT(DISTINCT a.commit) AS patches "
"FROM analysis_data a "
"JOIN repos r ON r.id = a.repos_id "
"JOIN projects p ON p.id = r.projects_id "
"LEFT JOIN exclude e ON "
"(a.author_email = e.email "
"AND (e.projects_id = r.projects_id "
"OR e.projects_id = 0)) "
"OR (a.author_email LIKE CONCAT('%%',e.domain) "
"AND (e.projects_id = r.projects_id "
"OR e.projects_id = 0)) "
"WHERE e.email IS NULL "
"AND e.domain IS NULL "
"AND p.recache = TRUE "
"GROUP BY week, "
"year, "
"affiliation, "
"a.%s_email,"
"repos_id"
% (report_attribution,report_attribution,
report_date,report_date,report_attribution))
cursor.execute(cache_repos_by_week)
db.commit()
cache_repos_by_month = ("INSERT INTO repo_monthly_cache "
"SELECT a.repos_id AS repos_id, "
"a.%s_email AS email, "
"a.%s_affiliation AS affiliation, "
"MONTH(a.%s_date) AS month, "
"YEAR(a.%s_date) AS year, "
"SUM(a.added) AS added, "
"SUM(a.removed) AS removed, "
"SUM(a.whitespace) AS whitespace, "
"COUNT(DISTINCT a.filename) AS files, "
"COUNT(DISTINCT a.commit) AS patches "
"FROM analysis_data a "
"JOIN repos r ON r.id = a.repos_id "
"JOIN projects p ON p.id = r.projects_id "
"LEFT JOIN exclude e ON "
"(a.author_email = e.email "
"AND (e.projects_id = r.projects_id "
"OR e.projects_id = 0)) "
"OR (a.author_email LIKE CONCAT('%%',e.domain) "
"AND (e.projects_id = r.projects_id "
"OR e.projects_id = 0)) "
"WHERE e.email IS NULL "
"AND e.domain IS NULL "
"AND p.recache = TRUE "
"GROUP BY month, "
"year, "
"affiliation, "
"a.%s_email,"
"repos_id"
% (report_attribution,report_attribution,
report_date,report_date,report_attribution))
cursor.execute(cache_repos_by_month)
db.commit()
cache_repos_by_year = ("INSERT INTO repo_annual_cache "
"SELECT a.repos_id AS repos_id, "
"a.%s_email AS email, "
"a.%s_affiliation AS affiliation, "
"YEAR(a.%s_date) AS year, "
"SUM(a.added) AS added, "
"SUM(a.removed) AS removed, "
"SUM(a.whitespace) AS whitespace, "
"COUNT(DISTINCT a.filename) AS files, "
"COUNT(DISTINCT a.commit) AS patches "
"FROM analysis_data a "
"JOIN repos r ON r.id = a.repos_id "
"JOIN projects p ON p.id = r.projects_id "
"LEFT JOIN exclude e ON "
"(a.author_email = e.email "
"AND (e.projects_id = r.projects_id "
"OR e.projects_id = 0)) "
"OR (a.author_email LIKE CONCAT('%%',e.domain) "
"AND (e.projects_id = r.projects_id "
"OR e.projects_id = 0)) "
"WHERE e.email IS NULL "
"AND e.domain IS NULL "
"AND p.recache = TRUE "
"GROUP BY year, "
"affiliation, "
"a.%s_email,"
"repos_id"
% (report_attribution,report_attribution,
report_date,report_attribution))
cursor.execute(cache_repos_by_year)
db.commit()
# Reset cache flags
reset_recache = "UPDATE projects SET recache = FALSE"
cursor.execute(reset_recache)
db.commit()
log_activity('Info','Caching unknown affiliations and web data for display (complete)')
### The real program starts here ###
# Set up the database
try:
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__),'db.cfg'))
# Read in the general connection info
db_user = config['main_database']['user']
db_pass = config['main_database']['pass']
db_name = config['main_database']['name']
db_host = config['main_database']['host']
# Read in the people connection info
db_user_people = config['people_database']['user']
db_pass_people = config['people_database']['pass']
db_name_people = config['people_database']['name']
db_host_people = config['people_database']['host']
except:
# If the config import fails, check if there's an older style db.py
db_user,db_pass,db_name,db_host,db_user_people,db_pass_people,db_name_people,db_host_people = migrate_database_config()
# Open a general-purpose connection
db,cursor = database_connection(
db_host,
db_user,
db_pass,
db_name)
# Open a connection for the people database
db_people,cursor_people = database_connection(
db_host_people,
db_user_people,
db_pass_people,
db_name_people)
# Figure out how much we're going to log
log_level = get_setting('log_level')
# Check if the database is current and update it if necessary
try:
current_db = int(get_setting('database_version'))
except:
# Catch databases which existed before database versioning
current_db = -1
if current_db < upstream_db:
print(("Current database version: %s\nUpstream database version %s\n" %
(current_db, upstream_db)))
update_db(current_db);
# Figure out what we need to do
limited_run = 0
delete_marked_repos = 0
pull_repos = 0
clone_repos = 0
check_updates = 0
force_updates = 0
run_analysis = 0
force_analysis = 0
nuke_stored_affiliations = 0
fix_affiliations = 0
force_invalidate_caches = 0
rebuild_caches = 0
force_invalidate_caches = 0
create_xlsx_summary_files = 0
multithreaded = 1
opts,args = getopt.getopt(sys.argv[1:],'hdpcuUaAmnfIrx')
for opt in opts:
if opt[0] == '-h':
print("\nfacade-worker.py does everything by default except invalidating caches\n"
"and forcing updates, unless invoked with one of the following options.\n"
"In those cases, it will only do what you have selected.\n\n"
"Options:\n"
" -d Delete marked repos\n"
" -c Run 'git clone' on new repos\n"
" -u Check if any repos should be marked for updating\n"
" -U Force all repos to be marked for updating\n"
" -p Run 'git pull' on repos\n"
" -a Analyze git repos\n"
" -A Force all repos to be analyzed\n"
" -m Disable multithreaded mode (but why?)\n"
" -n Nuke stored affiliations (if mappings modified by hand)\n"
" -f Fill empty affiliations\n"
" -I Invalidate caches\n"
" -r Rebuild unknown affiliation and web caches\n"
" -x Create Excel summary files\n\n")
sys.exit(0)
elif opt[0] == '-d':
delete_marked_repos = 1
limited_run = 1
log_activity('Info','Option set: delete marked repos.')
elif opt[0] == '-c':
clone_repos = 1
limited_run = 1
log_activity('Info','Option set: clone new repos.')
elif opt[0] == '-u':
check_updates = 1
limited_run = 1
log_activity('Info','Option set: checking for repo updates')
elif opt[0] == '-U':
force_updates = 1
log_activity('Info','Option set: forcing repo updates')
elif opt[0] == '-p':
pull_repos = 1
limited_run = 1
log_activity('Info','Option set: update repos.')
elif opt[0] == '-a':
run_analysis = 1
limited_run = 1
log_activity('Info','Option set: running analysis.')
elif opt[0] == '-A':
force_analysis = 1
run_analysis = 1
limited_run = 1
log_activity('Info','Option set: forcing analysis.')
elif opt[0] == '-m':
multithreaded = 0
log_activity('Info','Option set: disabling multithreading.')
elif opt[0] == '-n':
nuke_stored_affiliations = 1
limited_run = 1
log_activity('Info','Option set: nuking all affiliations')
elif opt[0] == '-f':
fix_affiliations = 1
limited_run = 1
log_activity('Info','Option set: fixing affiliations.')
elif opt[0] == '-I':
force_invalidate_caches = 1
limited_run = 1
log_activity('Info','Option set: Invalidate caches.')
elif opt[0] == '-r':
rebuild_caches = 1
limited_run = 1
log_activity('Info','Option set: rebuilding caches.')
elif opt[0] == '-x':
create_xlsx_summary_files = 1
limited_run = 1
log_activity('Info','Option set: creating Excel summary files.')
# Get the location of the directory where git repos are stored
repo_base_directory = get_setting('repo_directory')
# Determine if it's safe to start the script
current_status = get_setting('utility_status')
if current_status != 'Idle':
log_activity('Error','Something is already running, aborting maintenance '
'and analysis.\nIt is unsafe to continue.')
sys.exit(1)
if len(repo_base_directory) == 0:
log_activity('Error','No base directory. It is unsafe to continue.')
update_status('Failed: No base directory')
sys.exit(1)
# Begin working
start_time = time.time()
log_activity('Quiet','Running facade-worker.py')
if not limited_run or (limited_run and delete_marked_repos):
git_repo_cleanup()
if not limited_run or (limited_run and clone_repos):
git_repo_initialize()
if not limited_run or (limited_run and check_updates):
check_for_repo_updates()
if force_updates:
force_repo_updates()
if not limited_run or (limited_run and pull_repos):
git_repo_updates()
if force_analysis:
force_repo_analysis()
if not limited_run or (limited_run and run_analysis):
analysis()
if nuke_stored_affiliations:
nuke_affiliations()
if not limited_run or (limited_run and fix_affiliations):
fill_empty_affiliations()
if force_invalidate_caches:
invalidate_caches()
if not limited_run or (limited_run and rebuild_caches):
rebuild_unknown_affiliation_and_web_caches()
if not limited_run or (limited_run and create_xlsx_summary_files):
log_activity('Info','Creating summary Excel files')
from excel_generators import *
log_activity('Info','Creating summary Excel files (complete)')
# All done
update_status('Idle')
log_activity('Quiet','facade-worker.py completed')
elapsed_time = time.time() - start_time
print('\nCompleted in %s\n' % datetime.timedelta(seconds=int(elapsed_time)))
cursor.close()
cursor_people.close()
db.close()
db_people.close()
| 28.916123
| 120
| 0.712992
|
1bb7b64c48e1c04a817fcc5f49cf44b11d5de788
| 838
|
py
|
Python
|
src/tests/ftest/daos_perf/large.py
|
fedepad/daos
|
ac71a320b8426b1eeb1457b0b6f5e6e115dfc9aa
|
[
"BSD-2-Clause-Patent"
] | 429
|
2016-09-28T20:43:20.000Z
|
2022-03-25T01:22:50.000Z
|
src/tests/ftest/daos_perf/large.py
|
fedepad/daos
|
ac71a320b8426b1eeb1457b0b6f5e6e115dfc9aa
|
[
"BSD-2-Clause-Patent"
] | 6,341
|
2016-11-24T12:34:26.000Z
|
2022-03-31T23:53:46.000Z
|
src/tests/ftest/daos_perf/large.py
|
fedepad/daos
|
ac71a320b8426b1eeb1457b0b6f5e6e115dfc9aa
|
[
"BSD-2-Clause-Patent"
] | 202
|
2016-10-30T14:47:53.000Z
|
2022-03-30T21:29:11.000Z
|
#!/usr/bin/python3
"""
(C) Copyright 2019-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
from daos_perf_base import DaosPerfBase
class DaosPerfLarge(DaosPerfBase):
# pylint: disable=too-many-ancestors
"""Tests daos_perf with different config.
:avocado: recursive
"""
def test_large(self):
"""Jira ID: DAOS-1714.
Test Description:
Large daos_perf test for performance purpose.
Use Case:
Run daos_perf for scm and nvme.
Run daos_perf with 'EC2P1' object class.
Run the combination of above test cases with large number of clients
on four servers.
:avocado: tags=all,full_regression
:avocado: tags=hw,large
:avocado: tags=daosperf,daosperflarge
"""
self.run_daos_perf()
| 25.393939
| 78
| 0.647971
|
4d4d055ec200bf1151fa7d9a6a860f04e7c8c658
| 1,686
|
py
|
Python
|
PythonAPI/carissma_project/lib/python3.5/site-packages/numpy/conftest.py
|
AbdulHoffmann/carla_carissma
|
8d382769ffa02a6c61a22c57160285505f5ff0a4
|
[
"MIT"
] | 445
|
2019-01-26T13:50:26.000Z
|
2022-03-18T05:17:38.000Z
|
PythonAPI/carissma_project/lib/python3.5/site-packages/numpy/conftest.py
|
AbdulHoffmann/carla_carissma
|
8d382769ffa02a6c61a22c57160285505f5ff0a4
|
[
"MIT"
] | 242
|
2019-01-29T15:48:27.000Z
|
2022-03-31T22:09:21.000Z
|
PythonAPI/carissma_project/lib/python3.5/site-packages/numpy/conftest.py
|
AbdulHoffmann/carla_carissma
|
8d382769ffa02a6c61a22c57160285505f5ff0a4
|
[
"MIT"
] | 64
|
2018-04-25T08:51:57.000Z
|
2022-01-29T14:13:57.000Z
|
"""
Pytest configuration and fixtures for the Numpy test suite.
"""
from __future__ import division, absolute_import, print_function
import pytest
import numpy
from numpy.core._multiarray_tests import get_fpu_mode
_old_fpu_mode = None
_collect_results = {}
#FIXME when yield tests are gone.
@pytest.hookimpl()
def pytest_itemcollected(item):
"""
Check FPU precision mode was not changed during test collection.
The clumsy way we do it here is mainly necessary because numpy
still uses yield tests, which can execute code at test collection
time.
"""
global _old_fpu_mode
mode = get_fpu_mode()
if _old_fpu_mode is None:
_old_fpu_mode = mode
elif mode != _old_fpu_mode:
_collect_results[item] = (_old_fpu_mode, mode)
_old_fpu_mode = mode
@pytest.fixture(scope="function", autouse=True)
def check_fpu_mode(request):
"""
Check FPU precision mode was not changed during the test.
"""
old_mode = get_fpu_mode()
yield
new_mode = get_fpu_mode()
if old_mode != new_mode:
raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
" during the test".format(old_mode, new_mode))
collect_result = _collect_results.get(request.node)
if collect_result is not None:
old_mode, new_mode = collect_result
raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
" when collecting the test".format(old_mode,
new_mode))
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
doctest_namespace['np'] = numpy
| 27.639344
| 79
| 0.661329
|
559a0200712d527492760f01dd88691e8ef4b8a6
| 3,258
|
py
|
Python
|
actor_critic_discrete.py
|
OmarOsman/Reinforcement-Learning-Trading-
|
19bb3eba31fd53fe699543584d06003ff38123ae
|
[
"MIT"
] | 4
|
2019-12-22T12:54:33.000Z
|
2021-06-01T07:12:34.000Z
|
actor_critic_discrete.py
|
damonclifford/Reinforcement-Learning-Trading-
|
19bb3eba31fd53fe699543584d06003ff38123ae
|
[
"MIT"
] | null | null | null |
actor_critic_discrete.py
|
damonclifford/Reinforcement-Learning-Trading-
|
19bb3eba31fd53fe699543584d06003ff38123ae
|
[
"MIT"
] | 1
|
2020-02-14T01:35:30.000Z
|
2020-02-14T01:35:30.000Z
|
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from itertools import count
from collections import namedtuple
SavedAction = namedtuple('SavedAction', ['log_prob', 'value'])
class ActorCriticNetwork(nn.Module):
def __init__(self, alpha, input_dims, fc1_dims, fc2_dims,
n_actions):
super(ActorCriticNetwork, self).__init__()
self.input_dims = input_dims
self.fc1_dims = fc1_dims
self.fc2_dims = fc2_dims
self.n_actions = n_actions
self.fc1 = nn.Linear(np.prod(self.input_dims), self.fc1_dims)
self.fc2 = nn.Linear(self.fc1_dims, self.fc2_dims)
self.pi = nn.Linear(self.fc2_dims, n_actions)
self.v = nn.Linear(self.fc2_dims, 1)
self.optimizer = optim.Adam(self.parameters(), lr=alpha)
self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu:0')
self.to(self.device)
def forward(self, observation):
state = T.Tensor(observation).to(self.device)
state = state.flatten()
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
pi = self.pi(x)
v = self.v(x)
return (pi, v)
class Agent(object):
""" Agent class for use with a single actor critic network that shares
the lowest layers. For use with more complex environments
"""
def __init__(self, alpha, input_dims, gamma=0.99,
layer1_size=32, layer2_size=16, n_actions=2):
self.gamma = gamma
self.eps = np.finfo(np.float32).eps.item()
self.actor_critic = ActorCriticNetwork(alpha, input_dims, layer1_size,layer2_size, n_actions=n_actions)
self.saved_actions = []
self.rewards = []
self.losses = []
def choose_action(self, observation):
probabilities, state_value = self.actor_critic.forward(observation)
probabilities = F.softmax(probabilities,dim = -1)
action_probs = T.distributions.Categorical(probabilities)
action = action_probs.sample()
log_probs = action_probs.log_prob(action)
self.saved_actions.append(SavedAction(log_probs, state_value))
return action.item()
def learn(self, state, reward, done):
R = 0
saved_actions = self.saved_actions
policy_losses = []
value_losses = []
returns = []
for r in self.rewards[::-1]:
R = r + self.gamma * R
returns.insert(0, R)
returns = T.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + self.eps)
for (log_prob, value), R in zip(saved_actions, returns):
advantage = R - value.item()
policy_losses.append(-log_prob * advantage)
value_losses.append(F.smooth_l1_loss(value, T.tensor([R]).to(self.actor_critic.device)))
self.actor_critic.optimizer.zero_grad()
loss = T.stack(policy_losses).sum() + T.stack(value_losses).sum()
self.losses.append(loss.item())
loss.backward()
self.actor_critic.optimizer.step()
del self.rewards[:]
del self.saved_actions[:]
| 36.2
| 113
| 0.616022
|
58d4617517bafff5b405eb09e96082fc79db3c20
| 611
|
py
|
Python
|
Project/Post/models.py
|
AashishKhanal69/PeakyBlinders_ADC7_PartII
|
a4474c02be4ee8f8405b51df2f1d215e56ac192d
|
[
"bzip2-1.0.6"
] | null | null | null |
Project/Post/models.py
|
AashishKhanal69/PeakyBlinders_ADC7_PartII
|
a4474c02be4ee8f8405b51df2f1d215e56ac192d
|
[
"bzip2-1.0.6"
] | null | null | null |
Project/Post/models.py
|
AashishKhanal69/PeakyBlinders_ADC7_PartII
|
a4474c02be4ee8f8405b51df2f1d215e56ac192d
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.db import models
from django.conf import settings
# Create your models here.
class Post(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, default=1, on_delete=models.PROTECT)
title = models.CharField(max_length=120)
content = models.TextField()
updated = models.DateTimeField(auto_now=True, auto_now_add=False)
timestap = models.DateTimeField(auto_now=False, auto_now_add=True)
files = models.FileField(upload_to='')
def __str__(self):
return self.title
| 35.941176
| 91
| 0.636661
|
2193511567c8f90fbfe2f4cb6d7fa0c8a2ee91b6
| 23,926
|
py
|
Python
|
spear/Implyloss/my_model.py
|
ayushbits/spear
|
22d55b5f707c01321a1ebe39aa5d2d554ea8e017
|
[
"MIT"
] | 1
|
2021-11-08T07:52:03.000Z
|
2021-11-08T07:52:03.000Z
|
spear/Implyloss/my_model.py
|
ayushbits/spear
|
22d55b5f707c01321a1ebe39aa5d2d554ea8e017
|
[
"MIT"
] | null | null | null |
spear/Implyloss/my_model.py
|
ayushbits/spear
|
22d55b5f707c01321a1ebe39aa5d2d554ea8e017
|
[
"MIT"
] | null | null | null |
# import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import functools
import math
import numpy as np
import sys,os
from my_checkpoints import MRUCheckpoint, CheckpointsFactory
from my_data_types import *
import my_gen_cross_entropy_utils as gcross_utils
# from my_gen_cross_entropy_utils import *
import my_pr_utils
from my_test import HLSTest
from my_train import HLSTrain
from my_utils import print_tf_global_variables, updated_theta_copy
class HighLevelSupervisionNetwork:
# Parameters
display_step = 1
'''
Class Desc:
Initialize HLS with number of input features, number of classes, number of rules and the f and the w network.
f network is the classification network (P_{theta})
w network is the rule network (P_{j,phi})
'''
def __init__(self, num_features, num_classes, num_rules,
num_rules_to_train, rule_classes,
w_network, f_network,
f_d_epochs, f_d_U_epochs, f_d_adam_lr, f_d_U_adam_lr, dropout_keep_prob,
f_d_metrics_pickle, f_d_U_metrics_pickle, early_stopping_p, f_d_primary_metric, mode, data_dir,
tensorboard_dir, checkpoint_dir, checkpoint_load_mode, gamma, lamda, raw_d_x=None, raw_d_L=None):
'''
Func Desc:
initializes the class member variables with the provided arguments
Input:
self
num_features
num_classes
num_rules
num_rules_to_train
rule_classes
w_network
f_network
raw_d_x (default = None)
raw_d_L (default = None)
Output:
'''
# Modules for testing/training
self.mode = mode
self.gamma = gamma
self.lamda = lamda
self.raw_d_x = raw_d_x
self.raw_d_L = raw_d_L
self.rule_classes_list = rule_classes
self.rule_classes = tf.convert_to_tensor(rule_classes)
self.num_features = num_features
self.num_classes = num_classes
self.num_rules = num_rules
self.num_rules_to_train = num_rules_to_train
self.w_var_scope = 'w_network'
self.f_var_scope = 'f_network'
self.w_network = functools.partial(w_network, self.w_var_scope,
self.num_rules)
self.f_network = functools.partial(f_network, self.f_var_scope)
self.parse_params(f_d_epochs, f_d_U_epochs, f_d_adam_lr, f_d_U_adam_lr)
# Save global step for each different kind of run
self.global_steps = {}
# Global global step
self.global_step = tf.train.get_or_create_global_step()
# Create the compute graphs
# dropout rate used in f and w networks
self.dropout_keep_prob = tf.placeholder(tf.float32,name="keep_prob")
self.dropout_train_dict = {self.dropout_keep_prob: dropout_keep_prob}
self.dropout_test_dict = {self.dropout_keep_prob: 1.0}
self.train = HLSTrain(self, f_d_metrics_pickle,
f_d_U_metrics_pickle,
f_d_adam_lr,
f_d_U_adam_lr,
early_stopping_p,
f_d_primary_metric,
mode,
data_dir)
self.test = HLSTest(self)
self.make_f_d_train_ops()
self.make_f_d_U_train_ops()
# Print all global variables
print_tf_global_variables()
# Initialize all variables
self.init = tf.global_variables_initializer()
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=sess_config)
self.writer = tf.summary.FileWriter(tensorboard_dir, self.sess.graph)
# Now that all variables and the session is created, create a
# checkpoint saver. We use a single saver for all variables
self.mru_saver = MRUCheckpoint(checkpoint_dir, self.sess, tf.global_variables())
self.best_savers = CheckpointsFactory(self.sess, self.global_steps)
feed_dict = {}
self.sess.run(self.init, feed_dict=feed_dict)
if checkpoint_load_mode == 'mru':
self.restored = self.mru_saver.restore_if_checkpoint_exists()
else:
saver = self.best_savers.get_best_saver(checkpoint_load_mode)
self.restored = saver.restore_best_checkpoint_if_exists()
if not self.restored:
raise ValueError('Asked to restore best checkpoint of %s but not previously checkpointed' %
checkpoint_load_mode)
def parse_params(self, f_d_epochs, f_d_U_epochs, f_d_adam_lr, f_d_U_adam_lr):
'''
Func Desc:
Parses the given parameters
Input:
self
Sets:
f_d_epochs
f_d_U_epochs
initial_f_d_adam_lr
initial_f_d_U_adam_lr
'''
self.f_d_epochs = f_d_epochs
self.f_d_U_epochs = f_d_U_epochs
self.initial_f_d_adam_lr = f_d_adam_lr
self.initial_f_d_U_adam_lr = f_d_U_adam_lr
# Create the train op for training with d only
def make_f_d_train_ops(self):
'''
Func Desc:
create the train_ops based on labelled data only
Input:
self
Output:
'''
self.f_d_global_step = tf.Variable(0, trainable=False, name='f_d_global_step')
inc_f_d_global_step = tf.assign_add(self.f_d_global_step, 1)
self.global_steps[f_d] = self.f_d_global_step
self.f_d_adam_lr = tf.placeholder(tf.float32,name='f_d_U_adam_lr')
# [batch size, features]
self.f_x = tf.placeholder(tf.float32, shape=[None, self.num_features],
name='f_d_x')
self.f_d_labels = tf.placeholder(tf.float32, shape=[None,
self.num_classes], name='f_d_labels')
f_dict = {'x': self.f_x, 'labels': self.f_d_labels}
self.f_d_logits = self.f_network(f_dict, self.num_classes,
dropout_keep_prob=self.dropout_keep_prob)
self.f_d_probs = tf.math.softmax(self.f_d_logits, axis=-1)
self.f_d_preds = tf.argmax(self.f_d_probs, axis=-1)
model_loss = tf.nn.softmax_cross_entropy_with_logits(
labels=self.f_d_labels, logits=self.f_d_logits)
self.f_d_loss = tf.reduce_mean(model_loss)
self.f_d_optimizer = tf.train.AdamOptimizer(
learning_rate=self.f_d_adam_lr,
name='f_d_Adam')
with tf.control_dependencies([inc_f_d_global_step]):
self.f_d_train_op = self.f_d_optimizer.minimize(self.f_d_loss, global_step=self.global_step)
def make_f_d_U_train_ops(self):
self.f_d_U_global_step = tf.Variable(0, trainable=False, name='f_d_U_global_step')
inc_f_d_U_global_step = tf.assign_add(self.f_d_U_global_step, 1)
self.global_steps[f_d_U] = self.f_d_U_global_step
'''
Func desc:
make_f_d_U_train_ops i.e. training ops by combining labelled and unlabelled data, compute the training objective and aim to minimize the loss function using the adam optimizer
Input:
self object
Sets:
* x : feature representation of instance
- shape : [batch_size, num_features]
* l : Labels assigned by rules
- shape [batch_size, num_rules]
- l[i][j] provides the class label provided by jth rule on ith instance
- if jth rule does not fire on ith instance, then l[i][j] = num_classes (convention)
- in snorkel, convention is to keep l[i][j] = -1, if jth rule doesn't cover ith instance
- class labels belong to {0, 1, 2, .. num_classes-1}
* m : Rule coverage mask
- A binary matrix of shape [batch_size, num_rules]
- m[i][j] = 1 if jth rule cover ith instance
- m[i][j] = 0 otherwise
* L : Instance labels
- shape : [batch_size, 1]
- L[i] = label of ith instance, if label is available i.e. if instance is from labeled set d
- Else, L[i] = num_clases if instances comes from the unlabeled set U
- class labels belong to {0, 1, 2, .. num_classes-1}
* d : binary matrix of shape [batch_size, 1]
- d[i] = 1 if instance belongs to labeled data (d), d[i]=0 otherwise
- d[i]=1 for all instances is from d_processed.p
- d[i]=0 for all instances in other 3 pickles {U,validation,test}_processed.p
* r : A binary matrix of shape [batch_size, num_rules]
- r[i][j]=1 if jth rule was associated with ith instance
- Highly sparse matrix
- r is a 0 matrix for all the pickles except d_processed.p
- Note that this is different from rule coverage matrix "m"
- This matrix defines the rule,example pairs provided as supervision
* s : A similarity measure matrix shape [batch_size, num_rules]
- s[i][j] is in [0,1]
* n : A vector of size [num_rules,]
- Mask for s (denotes whether particular rule is continuous or discrete)
* k : a vector of size [num_rules,]
- #LF classes ie., what class each LF correspond to, range: 0 to num_classes-1
Computes:
weights, w_logits of rule network ([batch_size, num_rules]) - Used to train P_j_phi(r_j/x_i) i.e. whether rij = 1 for the ith instance and jth rule
f_logits of the classification network - Used to train P_j_theta(l_j/x_i) i.e. the probability of ith instance belonging to jth class
LL_phi term
LL_theta term
Training objective term
Minimum loss using adam optimizer
'''
self.f_d_U_adam_lr = tf.placeholder(tf.float32,name='f_d_U_adam_lr')
self.f_d_U_x = tf.placeholder(
tf.float32,
shape=[None, self.num_features],
name='f_d_U_x')
# l
self.f_d_U_l = tf.placeholder(
tf.int32,
shape=[None, self.num_rules],
name='f_d_U_l')
# m
self.f_d_U_m = tf.placeholder(tf.float32, shape=[None,
self.num_rules], name='f_d_U_m')
# L
L = self.f_d_U_L = tf.placeholder(tf.int32, shape=[None, 1], name='f_d_U_L')
# d
d = self.f_d_U_d = tf.placeholder(tf.float32, shape=[None, 1], name='f_d_U_d')
L = tf.squeeze(L)
d = tf.squeeze(d)
r = self.f_d_U_r = tf.placeholder(tf.float32, shape=[None,self.num_rules], name='f_d_U_r')
#weights: [batch_size, num_rules]
#w_logits: [batch_size, num_rules]
weights, w_logits = self.get_weights_and_logits_f_d_U(self.f_d_U_x)
self.f_d_U_weights = weights
self.f_d_U_num_d = tf.reduce_sum(d) #number of labeled instances in a batch
# w_network computation is done. Now run f_network to get logits for
# this batch
f_dict = {'x': self.f_d_U_x}
f_logits = self.f_network(f_dict, self.num_classes, reuse=True,
dropout_keep_prob=self.dropout_keep_prob)
self.f_d_U_probs = tf.math.softmax(f_logits, axis=-1)
self.f_d_U_preds = tf.argmax(self.f_d_U_probs, axis=-1)
self.joint_f_w_score = self.joint_scores_from_f_and_w(self.f_d_U_weights,self.f_d_U_m,self.f_d_U_probs)
# Do this so that the cross-entropy does not blow for data from U
# The actual value of cross-entropy for U does not matter since it
# will be multiplied by 0 anyway.
L = L % self.num_classes
# Ok now compute the loss LL_theta which is on d data
L_one_hot = tf.one_hot(L, self.num_classes)
LL_theta = tf.nn.softmax_cross_entropy_with_logits(logits=f_logits,
labels=L_one_hot)
LL_theta = d * LL_theta
LL_theta = tf.reduce_mean(LL_theta) # loss of f network on labeled data d
LL_theta = LL_theta # loss of f network on labeled data d
# first term in eqn 5 (LL(\theta))
# LL(\phi) term
LL_phi = self.compute_LL_phi(w_logits=w_logits,
weights=self.f_d_U_weights,
l=self.f_d_U_l,
m=self.f_d_U_m,
L=L,
d=d,
r=self.f_d_U_r)
f_cross_training_optimizer = tf.train.AdamOptimizer(
learning_rate=self.f_d_U_adam_lr,
name='f_d_U_Adam')
training_var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
if 'implication' == self.mode:
implication_loss = self.implication_loss(weights=self.f_d_U_weights,
f_probs=self.f_d_U_probs,
m=self.f_d_U_m,
rule_classes=self.rule_classes,
num_classes=self.num_classes,
d=d)
self.f_d_U_implication_loss = LL_phi \
+ LL_theta \
+ self.gamma*implication_loss
with tf.control_dependencies([inc_f_d_U_global_step, ]):
self.f_d_U_implication_op = f_cross_training_optimizer.minimize(
self.f_d_U_implication_loss,
var_list=training_var_list)
if 'pr_loss' == self.mode:
pr_loss = my_pr_utils.pr_loss(m=self.f_d_U_m,
f_logits=f_logits,
w_logits=w_logits,
f_probs=self.f_d_U_probs,
weights=self.f_d_U_weights,
rule_classes=self.rule_classes,
num_classes=self.num_classes,
C=0.1,
d=d)
self.pr_loss = LL_theta + LL_phi + self.gamma*pr_loss
with tf.control_dependencies([inc_f_d_U_global_step, ]):
self.pr_train_op = f_cross_training_optimizer.minimize(
self.pr_loss,
var_list=training_var_list)
if 'gcross' == self.mode:
self.f_d_U_snork_L = tf.placeholder(
tf.float32,
shape=[None, self.num_classes],
name='f_d_U_snork_L')
loss_on_d = LL_theta
loss_on_U = gcross_utils.generalized_cross_entropy(f_logits,self.f_d_U_snork_L,
self.lamda)
self.gcross_loss = loss_on_d + self.gamma*loss_on_U
with tf.control_dependencies([inc_f_d_U_global_step, ]):
self.gcross_train_op = f_cross_training_optimizer.minimize(
self.gcross_loss,
var_list=training_var_list)
if 'gcross_snorkel' == self.mode:
self.f_d_U_snork_L = tf.placeholder(
tf.float32,
shape=[None, self.num_classes],
name='f_d_U_snork_L')
loss_on_d = LL_theta
loss_on_U = gcross_utils.generalized_cross_entropy(f_logits,self.f_d_U_snork_L,
self.lamda)
self.snork_gcross_loss = loss_on_d + self.gamma*loss_on_U
#self.snork_gcross_loss = loss_on_d + loss_on_U
with tf.control_dependencies([inc_f_d_U_global_step, ]):
self.snork_gcross_train_op = f_cross_training_optimizer.minimize(
self.snork_gcross_loss,
var_list=training_var_list)
if 'label_snorkel' == self.mode or 'pure_snorkel' == self.mode:
self.f_d_U_snork_L = tf.placeholder(
tf.float32,
shape=[None, self.num_classes],
name='f_d_U_snork_L')
loss_on_d = LL_theta
self.pure_snorkel_loss = tf.nn.softmax_cross_entropy_with_logits(
labels=self.f_d_U_snork_L,logits=f_logits)
self.pure_snorkel_loss = tf.reduce_mean(self.pure_snorkel_loss)
self.label_snorkel_loss = loss_on_d + self.gamma*self.pure_snorkel_loss
if 'label_snorkel' == self.mode:
with tf.control_dependencies([inc_f_d_U_global_step, ]):
self.label_snorkel_train_op = f_cross_training_optimizer.minimize(
self.label_snorkel_loss,
var_list=training_var_list)
if 'pure_snorkel' == self.mode:
with tf.control_dependencies([inc_f_d_U_global_step, ]):
self.pure_snorkel_train_op = f_cross_training_optimizer.minimize(
self.pure_snorkel_loss,
var_list=training_var_list)
if 'learn2reweight' == self.mode:
len_raw_d_x = len(self.raw_d_x)
raw_d_bs = min(len_raw_d_x,32)
raw_d_x = tf.get_variable(name="raw_d_x", initializer=self.raw_d_x, trainable=False)
raw_d_x = tf.to_float(raw_d_x)
raw_d_L = tf.get_variable(name="raw_d_L", initializer=self.raw_d_L, trainable=False)
raw_d_L = tf.to_int32(raw_d_L)
#raw_d_L = tf.expand_dims(raw_d_L,1)
batch_points = tf.random.uniform([raw_d_bs],minval=0,maxval=len_raw_d_x, dtype=tf.int32)
one_hot_batch_points_float = tf.one_hot(batch_points,len_raw_d_x,dtype=tf.float32)
batch_raw_d_x = tf.matmul(one_hot_batch_points_float,raw_d_x)
one_hot_batch_points_int = tf.one_hot(batch_points,len_raw_d_x,dtype=tf.int32)
batch_raw_d_L = tf.matmul(one_hot_batch_points_int,raw_d_L)
batch_raw_d_L = tf.squeeze(batch_raw_d_L)
self.f_d_U_snork_L = tf.placeholder(
tf.float32,
shape=[None, self.num_classes],
name='f_d_U_snork_L')
# 1. initialize epsilon
# [batch_size]
epsilon = tf.zeros(tf.shape(self.f_d_U_x)[0])
# 2. compute epsilon weighted loss (ewl) for batch
#[batch_size, num_classes]
f_logits = self.f_network(f_dict, self.num_classes, reuse=True,
dropout_keep_prob=self.dropout_keep_prob)
#[batch_size]
unweighted_loss = tf.nn.softmax_cross_entropy_with_logits(labels=self.f_d_U_snork_L,logits=f_logits)
weighted_loss = epsilon * unweighted_loss
weighted_loss = tf.reduce_sum(weighted_loss)
# 3. compute grads of ewl wrt thetas
thetas = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.f_var_scope)
grads_thetas = tf.gradients(ys=weighted_loss,xs=thetas,stop_gradients=epsilon)
# 4. update theta
theta_hat = updated_theta_copy(
grads=grads_thetas,
variables=thetas,
lr=self.lamda,
mode=-1)
# 5. compute unweighted loss on raw_d with updated theta (theta_hat)
f_dict_on_d = {'x': batch_raw_d_x}
f_logits_on_d = self.f_network(f_dict_on_d, self.num_classes,
reuse=False, ph_vars=theta_hat,
dropout_keep_prob=self.dropout_keep_prob)
raw_d_L_one_hot = tf.one_hot(batch_raw_d_L,self.num_classes,dtype=tf.float32)
unweighted_loss = tf.nn.softmax_cross_entropy_with_logits(labels=raw_d_L_one_hot,
logits=f_logits_on_d)
unweighted_loss = tf.reduce_mean(unweighted_loss)
# 6. compute grads of unweighted loss wrt epsilons
grad_epsilon = tf.gradients(ys=unweighted_loss,xs=epsilon,stop_gradients=thetas)[0]
#grad_epsilon = tf.Print(grad_epsilon,[grad_epsilon],message="\n\n\n grad_epsilon \n\n\n")
# 7. truncate and normalize grad-epsilons to get w
w_tilde = tf.nn.relu(-grad_epsilon)
w_norm = w_tilde/(tf.reduce_sum(w_tilde) + 1e-25)
#w_norm = tf.Print(w_norm,[w_norm],message="\n\n\n w_norm \n\n\n")
# 8. Compute ewl for batch with original theta and normalized weights
f_logits = self.f_network(f_dict,self.num_classes,reuse=True,
dropout_keep_prob=self.dropout_keep_prob)
unweighted_loss = tf.nn.softmax_cross_entropy_with_logits(
labels=self.f_d_U_snork_L,logits=f_logits)
w_norm = tf.stop_gradient(w_norm)
weighted_loss = w_norm * unweighted_loss
weighted_loss = tf.reduce_sum(weighted_loss)
self.l2r_loss = weighted_loss
with tf.control_dependencies([inc_f_d_U_global_step, ]):
# 9. Compute grads of ewl wrt to original theta to obtain Update theta operation
self.l2r_train_op = f_cross_training_optimizer.minimize(
self.l2r_loss,
var_list=thetas)
def compute_LL_phi(self, w_logits, weights, l, m, L, d, r):
'''
Func desc:
Computes the LL_phi term coming in the training objective
Input:
self object
w_logits([batch_size, num_rules]) -
weights([batch_size, num_rules]) - the weights matrix corresponding to rule network(w_network) in the algorithm
l([batch_size, num_rules]) - labels assigned by the rules
m([batch_size, num_rules]) - the rule coverage matrix where m_ij = 1 if jth rule covers ith instance
L([batch_size, 1]) - L_i = 1 if the ith instance has already a label assigned to it in the dataset
d([batch_size, 1]) - d_i = 1 if the ith instance is from labelled dataset
r([batch_size, num_rules]) - the rule association matrix where r_ij = 1 if jth rule is associated with ith instance (r_ij = 1 => m_ij = 1)
Output:
loss(real number > 0) - the value of the LL_phi term
'''
psi = 1e-25
L = tf.expand_dims(L,1)
# [batch_size, num_rules]
L = tf.tile(L,[1,self.num_rules])
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.to_float(tf.equal(l,L)),
logits=w_logits)
loss = m*loss
loss = (tf.to_float(tf.not_equal(l,L)) * loss) + (tf.to_float(tf.equal(l,L)) * r * loss)
gcross_loss = gcross_utils.generalized_cross_entropy_bernoulli(weights,0.2)
gcross_loss = gcross_loss * m * tf.to_float(tf.equal(l,L)) * (1-r)
loss = loss + gcross_loss
loss = tf.reduce_sum(loss,axis=-1)
loss = loss * d
loss = tf.reduce_mean(loss)
return loss
def implication_loss(self, weights, f_probs, m, rule_classes, num_classes, d):
'''
Func desc:
Computes the implication loss value
input:
self object
weights([batch_size, num_rules]) - the weights matrix corresponding to rule network(w_network) in the algorithm
f_probs([batch_size, 1]) - the prob values from classification network (f_network)
m([batch_size, num_rules]) - the rule coverage matrix where m_ij = 1 if jth rule covers ith instance
rule_classes
num_classes(non_negative integer) - number of available classes
d([batch_size, 1]) - d_i = 1 if the ith instance is from labelled dataset
output:
-obj (real number) - the implication loss value
'''
# computes implication loss (Equation 4 in the paper)
# weights are P_{j,\phi} values from the w network (rule network)
# weights: [batch_size, num_rules]
# f_probs are probabilities from the f network (classification network)
# f_probs: [batch_size, num_classes]
psi = 1e-25 # a small value to avoid nans
#[num_rules, num_classes]
one_hot_mask = tf.one_hot(rule_classes,num_classes,dtype=tf.float32)
#[batch_size, num_rules]
f_probs = tf.matmul(f_probs, one_hot_mask, transpose_b=True)
obj = 1 - (weights * (1 - f_probs)) #(Argument of log in equation 4)
# computing last term of equation 5, will multiply with gamma outside this function
obj = m*tf.log(obj + psi)
obj = tf.reduce_sum(obj, axis=-1)
obj = obj * (1-d) #defined only for instances in U, so mask by (1-d)
obj = tf.reduce_mean(obj)
return -obj
def get_weights_and_logits_f_d_U(self, x):
'''
Func desc:
compute and get the weights and logits for the rule network (w_network)
Input:
self object
x([batch_size, num_features]) - instance matrix
Output:
weights([batch_size, num_rules]) - the r_ij values i.e. the possibility of a rule overfitting on an instance (r_ij = 0 for ith instance and jth rule)
w_logits([batch_size, num_rules]) -
'''
# Need to run the w network for each rule for the same x
#
# [batch_size, num_rules, num_features]
x_shape = tf.shape(x)
mul = tf.convert_to_tensor([1, self.num_rules_to_train])
expanded_x = tf.tile(x, mul)
# Need a python integer as the last dimension so that defining neural
# networks work later. Hence use num_features instead of x_shape[1]
x = tf.reshape(expanded_x , [x_shape[0], self.num_rules_to_train,
self.num_features])
batch_size = x_shape[0]
rules_int = tf.convert_to_tensor([list(range(0,
self.num_rules_to_train))])
# Need to tile rules_int batch_size times
#
# tilevar should be a 1-D tensor with number of values equal to number
# of columns in rules_int. Each column specifies the number of times
# that axis in rules_int will be replicated.
#
# Following will replicate the rows of rules_int batch_size times and
# leave the columns unchanged
tilevar = tf.convert_to_tensor([batch_size, 1])
rules_int = tf.tile(rules_int, tilevar)
rules_one_hot = tf.one_hot(rules_int, self.num_rules_to_train)
rules_int = tf.expand_dims(rules_int, axis=-1)
w_dict = {'x': x, 'rules' : rules_one_hot,
'rules_int': rules_int}
w_logits = self.w_network(w_dict, dropout_keep_prob=self.dropout_keep_prob)
w_logits = tf.squeeze(w_logits)
weights = tf.nn.sigmoid(w_logits)
return weights, w_logits
def joint_scores_from_f_and_w(self,weights,m,f_probs):
'''
Func desc:
Compute the learning scores obtained while jointly learning f(classification network) and w(rule network)
Input:
self object
weights([num_instances, num_rules]) - the weights matrix corresponding to rule network(w_network) in the algorithm
m([batch_size, num_rules]) - the rule coverage matrix where m_ij denotes if jth rule covers ith instance (if yes, then m_ij = 1)
f_probs([batch_size, 1]) - the prob values from classification network (f_network)
Output:
results([batch_size,1]) -
'''
num_classes = self.num_classes
rule_classes = self.rule_classes
#[batch_size, num_rules, 1]
weights = tf.expand_dims(weights,-1)
weights_mask = tf.to_float(tf.greater(weights,0.5))
#[batch_size, num_rules, 1]
m = tf.expand_dims(m,-1)
m = m*weights_mask
#[num_rules, num_classes]
one_hot_rule_classes = tf.one_hot(rule_classes,num_classes,dtype=tf.float32)
#[1, num_rules, num_classes]
one_hot_rule_classes = tf.expand_dims(one_hot_rule_classes,0)
#[batch_size, num_rules, num_classes]
rule_weight_product = weights * one_hot_rule_classes + (1-weights)*(1-one_hot_rule_classes)
sum_rule_firings = tf.reduce_sum(m,1)
result = m*rule_weight_product #+ (1-m)
#[batch_size, num_classes]
result = tf.reduce_sum(result,1)/(sum_rule_firings+1e-20)
result = result + f_probs
return result
| 37.384375
| 177
| 0.723523
|
efdb57f435390d6435c25d19c5dde97c076907a5
| 1,195
|
py
|
Python
|
alien.py
|
XinZhewu/Alien_Invasion
|
c6eb1141ba6522a218a9a9eaa528af1fa6fc336c
|
[
"MIT"
] | null | null | null |
alien.py
|
XinZhewu/Alien_Invasion
|
c6eb1141ba6522a218a9a9eaa528af1fa6fc336c
|
[
"MIT"
] | null | null | null |
alien.py
|
XinZhewu/Alien_Invasion
|
c6eb1141ba6522a218a9a9eaa528af1fa6fc336c
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# @Time : 2018/2/4 18:13
# @Author : XinZhewu_568580410@qq.com
"""此文件管理单个外星人的设置`绘制`碰撞`移动
"""
import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
def __init__(self, ai_settings, screen):
"""初始化外星人,设置起始位置"""
super(Alien, self).__init__()
self.screen = screen
self.ai_settings = ai_settings
# 加载外星人图像,设置rect
self.image = pygame.image.load('images/alien.png')
self.rect = self.image.get_rect()
# 每个外星人最初都在屏幕左上角
self.rect.x = self.rect.width
self.rect.y = self.rect.height
# 外星人的准确位置
self.x = float(self.rect.x)
def blitme(self):
"""在指定位置绘制外星人"""
self.screen.blit(self.image, self.rect)
def check_edges(self):
"""如果外星人位于屏幕边缘,返回True"""
screen_rect = self.screen.get_rect()
if self.rect.right >= screen_rect.right:
return True
elif self.rect.left <= 0:
return True
def update(self):
"""左右移动外星人"""
self.x += (
self.ai_settings.alien_speed_factor *
self.ai_settings.fleet_direction
)
self.rect.x = self.x
| 20.964912
| 58
| 0.574059
|
4a9f9fbbe96b3f4c31a065ffa3ab9aa586d68b77
| 108
|
py
|
Python
|
src/easysub/__init__.py
|
mtahirtariq/easysub
|
d6083a7120991819719566d1e4daf7714b3db77a
|
[
"MIT"
] | null | null | null |
src/easysub/__init__.py
|
mtahirtariq/easysub
|
d6083a7120991819719566d1e4daf7714b3db77a
|
[
"MIT"
] | null | null | null |
src/easysub/__init__.py
|
mtahirtariq/easysub
|
d6083a7120991819719566d1e4daf7714b3db77a
|
[
"MIT"
] | null | null | null |
from app import main
from easysub import EasySub
from common import File
from fileparser import FileParser
| 18
| 33
| 0.842593
|
4674daf61ca1c81ba08b97f044cdfaa8e0be103d
| 1,862
|
py
|
Python
|
tests_adapters/test_bart.py
|
rahuln/adapter-transformers
|
ac3284547064686d31b95e5e1b078447a2199779
|
[
"Apache-2.0"
] | null | null | null |
tests_adapters/test_bart.py
|
rahuln/adapter-transformers
|
ac3284547064686d31b95e5e1b078447a2199779
|
[
"Apache-2.0"
] | null | null | null |
tests_adapters/test_bart.py
|
rahuln/adapter-transformers
|
ac3284547064686d31b95e5e1b078447a2199779
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from tests.bart.test_modeling_bart import *
from transformers import BartAdapterModel
from transformers.testing_utils import require_torch
from .test_adapter import AdapterTestBase, make_config
from .test_adapter_backward_compability import CompabilityTestMixin
from .test_adapter_common import AdapterModelTestMixin
from .test_adapter_compacter import CompacterTestMixin
from .test_adapter_composition import ParallelAdapterInferenceTestMixin, ParallelTrainingMixin
from .test_adapter_conversion import ModelClassConversionTestMixin
from .test_adapter_embeddings import EmbeddingTestMixin
from .test_adapter_fusion_common import AdapterFusionModelTestMixin
from .test_adapter_heads import PredictionHeadModelTestMixin
from .test_adapter_training import AdapterTrainingTestMixin
from .test_common import AdapterModelTesterMixin
@require_torch
class BartAdapterModelTest(AdapterModelTesterMixin, BartModelTest):
all_model_classes = (
BartAdapterModel,
)
class BartAdapterTestBase(AdapterTestBase):
config_class = BartConfig
config = make_config(
BartConfig,
d_model=16,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=4,
decoder_attention_heads=4,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
)
tokenizer_name = "facebook/bart-base"
@require_torch
class BartAdapterTest(
AdapterModelTestMixin,
AdapterFusionModelTestMixin,
CompacterTestMixin,
CompabilityTestMixin,
EmbeddingTestMixin,
PredictionHeadModelTestMixin,
AdapterTrainingTestMixin,
ParallelAdapterInferenceTestMixin,
ParallelTrainingMixin,
BartAdapterTestBase,
unittest.TestCase,
):
pass
@require_torch
class BartClassConversionTest(
ModelClassConversionTestMixin,
BartAdapterTestBase,
unittest.TestCase,
):
pass
| 28.212121
| 94
| 0.80666
|
df6c55d452da3bb44f8ad33a30c169e019a69eaa
| 2,199
|
py
|
Python
|
src/users/models/componentsschemasmicrosoft_graph_messageruleallof1.py
|
peombwa/Sample-Graph-Python-Client
|
3396f531fbe6bb40a740767c4e31aee95a3b932e
|
[
"MIT"
] | null | null | null |
src/users/models/componentsschemasmicrosoft_graph_messageruleallof1.py
|
peombwa/Sample-Graph-Python-Client
|
3396f531fbe6bb40a740767c4e31aee95a3b932e
|
[
"MIT"
] | null | null | null |
src/users/models/componentsschemasmicrosoft_graph_messageruleallof1.py
|
peombwa/Sample-Graph-Python-Client
|
3396f531fbe6bb40a740767c4e31aee95a3b932e
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ComponentsschemasmicrosoftGraphMessageruleallof1(Model):
"""messageRule.
:param display_name:
:type display_name: str
:param sequence:
:type sequence: int
:param conditions:
:type conditions: ~users.models.MicrosoftgraphmessageRulePredicates
:param actions:
:type actions: ~users.models.MicrosoftgraphmessageRuleActions
:param exceptions:
:type exceptions: ~users.models.MicrosoftgraphmessageRulePredicates
:param is_enabled:
:type is_enabled: bool
:param has_error:
:type has_error: bool
:param is_read_only:
:type is_read_only: bool
"""
_validation = {
'sequence': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'sequence': {'key': 'sequence', 'type': 'int'},
'conditions': {'key': 'conditions', 'type': 'MicrosoftgraphmessageRulePredicates'},
'actions': {'key': 'actions', 'type': 'MicrosoftgraphmessageRuleActions'},
'exceptions': {'key': 'exceptions', 'type': 'MicrosoftgraphmessageRulePredicates'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
'has_error': {'key': 'hasError', 'type': 'bool'},
'is_read_only': {'key': 'isReadOnly', 'type': 'bool'},
}
def __init__(self, display_name=None, sequence=None, conditions=None, actions=None, exceptions=None, is_enabled=None, has_error=None, is_read_only=None):
super(ComponentsschemasmicrosoftGraphMessageruleallof1, self).__init__()
self.display_name = display_name
self.sequence = sequence
self.conditions = conditions
self.actions = actions
self.exceptions = exceptions
self.is_enabled = is_enabled
self.has_error = has_error
self.is_read_only = is_read_only
| 38.578947
| 157
| 0.627103
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.