content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def make_dirs(path):
"""
Creates any folders that are missing and assigns them the permissions of their
parents
"""
logger.log(u"Checking if the path " + path + " already exists", logger.DEBUG)
if not ek.ek(os.path.isdir, path):
# Windows, create all missing folders
if os.name == 'nt' or os.name == 'ce':
try:
logger.log(u"Folder " + path + " didn't exist, creating it", logger.DEBUG)
ek.ek(os.makedirs, path)
except (OSError, IOError), e:
logger.log(u"Failed creating " + path + " : " + ex(e), logger.ERROR)
return False
# not Windows, create all missing folders and set permissions
else:
sofar = ''
folder_list = path.split(os.path.sep)
# look through each subfolder and make sure they all exist
for cur_folder in folder_list:
sofar += cur_folder + os.path.sep;
# if it exists then just keep walking down the line
if ek.ek(os.path.isdir, sofar):
continue
try:
logger.log(u"Folder " + sofar + " didn't exist, creating it", logger.DEBUG)
ek.ek(os.mkdir, sofar)
# use normpath to remove end separator, otherwise checks permissions against itself
chmodAsParent(ek.ek(os.path.normpath, sofar))
# do the library update for synoindex
notifiers.synoindex_notifier.addFolder(sofar)
except (OSError, IOError), e:
logger.log(u"Failed creating " + sofar + " : " + ex(e), logger.ERROR)
return False
return True
| 22,900
|
def download(url, filename, proxies=None):
"""
Telechargement de l'URL dans le fichier destination
:param url: URL a telecharger
:param filename: fichier de destination
"""
error = ''
try:
req = requests.get(url, proxies=proxies, stream=True)
with open(filename, "wb") as f:
shutil.copyfileobj(req.raw, f)
except FileNotFoundError as fnf:
error = f"Error while downloading {url} - I/O Problem with {filename} : FileNotFound -> check path"
except Exception as ex:
error = f"Error while downloading {url}. {str(ex)}"
return len(error) == 0, error, filename
| 22,901
|
async def validate_devinfo(hass, data):
"""检验配置是否缺项。无问题返回[[],[]],有缺项返回缺项。"""
# print(result)
devtype = data['devtype']
ret = [[],[]]
requirements = VALIDATE.get(devtype)
if not requirements:
return ret
else:
for item in requirements[0]:
if item not in json.loads(data[CONF_MAPPING]):
ret[0].append(item)
for item in requirements[1]:
if item not in json.loads(data[CONF_CONTROL_PARAMS]):
ret[1].append(item)
return ret
| 22,902
|
def plot_artis_spectrum(
axes, modelpath, args, scale_to_peak=None, from_packets=False, filterfunc=None,
linelabel=None, plotpacketcount=False, **plotkwargs):
"""Plot an ARTIS output spectrum."""
if not Path(modelpath, 'input.txt').exists():
print(f"Skipping '{modelpath}' (no input.txt found. Not an ARTIS folder?)")
return
if plotpacketcount:
from_packets = True
for index, axis in enumerate(axes):
if args.multispecplot:
(timestepmin, timestepmax, args.timemin, args.timemax) = at.get_time_range(
modelpath, timedays_range_str=args.timedayslist[index])
else:
(timestepmin, timestepmax, args.timemin, args.timemax) = at.get_time_range(
modelpath, args.timestep, args.timemin, args.timemax, args.timedays)
modelname = at.get_model_name(modelpath)
if timestepmin == timestepmax == -1:
return
timeavg = (args.timemin + args.timemax) / 2.
timedelta = (args.timemax - args.timemin) / 2
if linelabel is None:
if len(modelname) < 70:
linelabel = f'{modelname}'
else:
linelabel = f'...{modelname[-67:]}'
if not args.hidemodeltime and not args.multispecplot:
# todo: fix this for multispecplot - use args.showtime for now
linelabel += f' +{timeavg:.0f}d'
if not args.hidemodeltimerange and not args.multispecplot:
linelabel += r' ($\pm$ ' + f'{timedelta:.0f}d)'
# Luke: disabled below because line label has already been formatted with e.g. timeavg values
# formatting for a second time makes it impossible to use curly braces in line labels (needed for LaTeX math)
# else:
# linelabel = linelabel.format(**locals())
if from_packets:
spectrum = get_spectrum_from_packets(
modelpath, args.timemin, args.timemax, lambda_min=args.xmin, lambda_max=args.xmax,
use_comovingframe=args.use_comovingframe, maxpacketfiles=args.maxpacketfiles,
delta_lambda=args.deltalambda, useinternalpackets=args.internalpackets, getpacketcount=plotpacketcount)
if args.outputfile is None:
statpath = Path()
else:
statpath = Path(args.outputfile).resolve().parent
else:
spectrum = get_spectrum(modelpath, timestepmin, timestepmax, fnufilterfunc=filterfunc)
if args.plotviewingangle: # read specpol res.
angles = args.plotviewingangle
viewinganglespectra = {}
for angle in angles:
viewinganglespectra[angle] = get_res_spectrum(modelpath, timestepmin, timestepmax, angle=angle,
fnufilterfunc=filterfunc, args=args)
elif args.plotvspecpol is not None and os.path.isfile(modelpath/'vpkt.txt'):
# read virtual packet files (after running plotartisspectrum --makevspecpol)
vpkt_config = at.get_vpkt_config(modelpath)
if (vpkt_config['time_limits_enabled'] and (
args.timemin < vpkt_config['initial_time'] or args.timemax > vpkt_config['final_time'])):
print(f"Timestep out of range of virtual packets: start time {vpkt_config['initial_time']} days "
f"end time {vpkt_config['final_time']} days")
quit()
angles = args.plotvspecpol
viewinganglespectra = {}
for angle in angles:
viewinganglespectra[angle] = get_vspecpol_spectrum(
modelpath, timeavg, angle, args, fnufilterfunc=filterfunc)
spectrum.query('@args.xmin <= lambda_angstroms and lambda_angstroms <= @args.xmax', inplace=True)
print(f"Plotting '{linelabel}' timesteps {timestepmin} to {timestepmax} "
f'({args.timemin:.3f} to {args.timemax:.3f}d)')
print(f" modelpath {modelname}")
print_integrated_flux(spectrum['f_lambda'], spectrum['lambda_angstroms'])
if scale_to_peak:
spectrum['f_lambda_scaled'] = spectrum['f_lambda'] / spectrum['f_lambda'].max() * scale_to_peak
if args.plotvspecpol is not None:
for angle in args.plotvspecpol:
viewinganglespectra[angle]['f_lambda_scaled'] = (
viewinganglespectra[angle]['f_lambda'] / viewinganglespectra[angle]['f_lambda'].max() *
scale_to_peak)
ycolumnname = 'f_lambda_scaled'
else:
ycolumnname = 'f_lambda'
if plotpacketcount:
ycolumnname = 'packetcount'
supxmin, supxmax = axis.get_xlim()
if (args.plotvspecpol is not None and os.path.isfile(modelpath/'vpkt.txt')) or args.plotviewingangle:
for angle in angles:
if args.binflux:
new_lambda_angstroms = []
binned_flux = []
wavelengths = viewinganglespectra[angle]['lambda_angstroms']
fluxes = viewinganglespectra[angle][ycolumnname]
nbins = 5
for i in np.arange(0, len(wavelengths - nbins), nbins):
new_lambda_angstroms.append(wavelengths[i + int(nbins/2)])
sum_flux = 0
for j in range(i, i + nbins):
sum_flux += fluxes[j]
binned_flux.append(sum_flux / nbins)
plt.plot(new_lambda_angstroms, binned_flux)
else:
if args.plotvspecpol:
if args.viewinganglelabelunits == 'deg':
viewing_angle = round(math.degrees(math.acos(vpkt_config['cos_theta'][angle])))
linelabel = fr"$\theta$ = {viewing_angle}$^\circ$" if index == 0 else None
elif args.viewinganglelabelunits == 'rad':
linelabel = fr"cos($\theta$) = {vpkt_config['cos_theta'][angle]}" if index == 0 else None
else:
linelabel = f'bin number {angle}'
viewinganglespectra[angle].query(
'@supxmin <= lambda_angstroms and lambda_angstroms <= @supxmax').plot(
x='lambda_angstroms', y=ycolumnname, ax=axis, legend=None,
label=linelabel) # {timeavg:.2f} days {at.get_model_name(modelpath)}
else:
spectrum.query('@supxmin <= lambda_angstroms and lambda_angstroms <= @supxmax').plot(
x='lambda_angstroms', y=ycolumnname, ax=axis, legend=None,
label=linelabel if index == 0 else None, **plotkwargs)
return spectrum[['lambda_angstroms', 'f_lambda']]
| 22,903
|
def worldbank_date_to_datetime(date):
"""Convert given world bank date string to datetime.date object."""
if "Q" in date:
year, quarter = date.split("Q")
return datetime.date(int(year), (int(quarter) * 3) - 2, 1)
if "M" in date:
year, month = date.split("M")
return datetime.date(int(year), int(month), 1)
return datetime.date(int(date), 1, 1)
| 22,904
|
def PCopy (inCleanVis, outCleanVis, err):
"""
Make a shallow copy of input object.
Makes structure the same as inCleanVis, copies pointers
* inCleanVis = Python CleanVis object to copy
* outCleanVis = Output Python CleanVis object, must be defined
* err = Python Obit Error/message stack
"""
################################################################
# Checks
if not PIsA(inCleanVis):
raise TypeError("inCleanVis MUST be a Python Obit CleanVis")
if not PIsA(outCleanVis):
raise TypeError("outCleanVis MUST be a Python Obit CleanVis")
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
#
Obit.CleanVisCopy (inCleanVis.me, outCleanVis.me, err.me)
if err.isErr:
OErr.printErrMsg(err, "Error copying CleanVis")
# end PCopy
| 22,905
|
def isinf(x):
"""
判断``x``是否是无限的,是则返回`True`
"""
pass
| 22,906
|
def select(
key: bytes, seq: Sequence[BucketType], *, seed: bytes = DEFAULT_SEED
) -> BucketType:
"""
Select one of the elements in seq based on the hash of ``key``.
Example partitioning of input on ``stdin`` into buckets::
bucketed_lines = {} # type: Dict[int, str]
for line in sys.stdin:
buckets[choice(b, [0, 1, 2, 3, 4, 5])] = line
:param key: The bytes to hash.
:param seq: The sequence from which to select an element. Must be non-empty.
:param seed: Seed to hash prior to hashing b.
:raise ValueError: If ``seq`` is empty.
:return: One of the elements in ``seq``.
"""
if not seq:
raise ValueError("non-empty sequence required")
return seq[range(key, len(seq), seed=seed)]
| 22,907
|
def unescape(s):
"""
unescape html
"""
html_codes = (
("'", '''),
('"', '"'),
('>', '>'),
('<', '<'),
('&', '&')
)
for code in html_codes:
s = s.replace(code[1], code[0])
return s
| 22,908
|
def ua_mnem(*args):
"""ua_mnem(ea_t ea, char buf) -> char"""
return _idaapi.ua_mnem(*args)
| 22,909
|
def tic(*names):
"""
Start timer, use `toc` to get elapsed time in seconds.
Parameters
----------
names : str, str, ...
Names of timers
Returns
-------
out : float
Current timestamp
Examples
--------
.. code-block:: python
:linenos:
:emphasize-lines: 10,11,12
import plazy
def foo():
total = 0
for _ in range(100000):
total += 1
return total
if __name__ == "__main__":
plazy.tic() # T1
plazy.tic("B") # T2
plazy.tic("C", "D", "E") # T3
foo()
dt1 = plazy.toc() # elapsed time since T1
dt2 = plazy.toc("B") # elapsed time since T2
dt3 = plazy.toc("C", "D") # elapsed time since T3
foo()
dt4 = plazy.toc("E") # elapsed time since T3
dt5 = plazy.toc("B") # elapsed time since T2
print(dt1) # 0.009924173355102539
print(dt2) # 0.009925603866577148
print(dt3) # [0.00992727279663086, 0.00992727279663086]
print(dt4) # 0.020497798919677734
print(dt5) # 0.020506620407104492
See also
--------
toc
"""
now_ts = time.time()
name_arr = list(names) + (
[
g_time_store.default_name,
]
if len(names) == 0
else []
)
for n in name_arr:
g_time_store.set_time(name=n, value=now_ts)
return now_ts
| 22,910
|
def convert_func_types_to_type_for_json(functions, types):
"""Converts parameters and return type of function declaration to json representation."""
for name, f_info in functions.items():
t = parse_type_to_type_for_json(f_info.ret_type_text, types)
if t.type_hash not in types:
types[t.type_hash] = t
f_info.ret_type = t.type_hash
parse_params_to_json_types(f_info.params_list, types)
| 22,911
|
def tile(fnames, resize=(64,64), textonly=0, rows=None, cols=None):
"""Tiles the given images (by filename) and returns a tiled image"""
maxsize = [0, 0]
assert fnames
todel = set()
for fname in fnames:
try:
im = Image.open(fname)
maxsize = [max(m, s) for m, s in zip(maxsize, im.size)]
except Exception:
todel.add(fname)
continue
fnames = [os.path.realpath(f) for f in fnames if f not in todel] # convert symlinks to real paths
print >>sys.stderr, "There were %d images (removed %d bad) with maxsize %d x %d" % (len(fnames), len(todel), maxsize[0], maxsize[1])
# now figure out the right size of the output image
if not cols and not rows: # if neither dim is given, use the sqrt
cols = int(sqrt(len(fnames)))
rows = len(fnames)//cols + (0 if len(fnames)%cols == 0 else 1)
elif cols and not rows: # only cols is given
rows = len(fnames)//cols + (0 if len(fnames)%cols == 0 else 1)
elif not cols and rows: # only rows is given
cols = len(fnames)//rows + (0 if len(fnames)%rows == 0 else 1)
else: # both are given
pass
if textonly:
cur = 0
rows = list(nkgrouper(cols, fnames))
return rows
if resize:
boxsize = resize
else:
boxsize = maxsize
outsize = tuple([s*n for s, n in zip(boxsize, [cols, rows])])
print >>sys.stderr, "Output will be tiling %d x %d images, with image size %d x %d" % (cols, rows, outsize[0], outsize[1])
out = Image.new(im.mode, outsize)
cur = 0
start = time.time()
for r in range(rows):
for c in range(cols):
print >>sys.stderr, ' At col %d, row %d, cur %d, %0.2f secs elapsed...\r ' % (c, r, cur, time.time()-start),
im = Image.open(fnames[cur]).resize(boxsize, Image.ANTIALIAS)
box = (c*boxsize[0], r*boxsize[1])
out.paste(im, box)
cur += 1
if cur >= len(fnames): break
print >>sys.stderr
return out
| 22,912
|
def get_ctrls(controls, timeout=10):
"""Get various servod controls."""
get_dict = {}
cmd = 'dut-control %s' % controls
(retval, _, out) = do_cmd(cmd, timeout, flist=['Errno', '- ERROR -'])
if retval:
for ctrl_line in out.split('\n'):
ctrl_line = ctrl_line.strip()
if len(ctrl_line):
logging.debug('ctrl_line=%s', ctrl_line)
try:
(name, value) = ctrl_line.strip().split(':')
get_dict[name] = value
except ValueError:
logging.debug("Unable to parse ctrl %s", ctrl_line)
return (True, get_dict)
return (False, get_dict)
| 22,913
|
def retrieve_database():
"""Return the contents of MongoDB as a dataframe."""
return pd.DataFrame(list(restaurant_collection.find({})))
| 22,914
|
def test_package_inference():
"""correctly identify the package name"""
name = get_package_name(CachingLogger)
assert name == "scitrack"
| 22,915
|
def try_download_ted3(target_dir, sample_rate, min_duration, max_duration):
"""
Method to download ted3 data set. Creates manifest files.
Args:
target_dir:
sample_rate:
min_duration:
max_duration:
Returns:
"""
path_to_data = os.path.join(os.path.expanduser("~"), target_dir)
path_utils.try_create_directory(path_to_data)
target_unpacked_dir = os.path.join(path_to_data, "ted3_unpacked")
path_utils.try_create_directory(target_unpacked_dir)
extracted_dir = os.path.join(path_to_data, "Ted3")
if os.path.exists(extracted_dir):
shutil.rmtree(extracted_dir)
LOGGER.info(f"Start downloading Ted 3 from {TED_LIUM_V2_DL_URL}")
file_name = TED_LIUM_V2_DL_URL.split("/")[-1]
target_filename = os.path.join(target_unpacked_dir, file_name)
path_utils.try_download(target_filename, TED_LIUM_V2_DL_URL)
LOGGER.info("Download complete")
LOGGER.info("Unpacking...")
tar = tarfile.open(target_filename)
tar.extractall(extracted_dir)
tar.close()
os.remove(target_unpacked_dir)
assert os.path.exists(extracted_dir), f"Archive {file_name} was not properly uncompressed"
LOGGER.info("Converting files to wav and extracting transcripts...")
prepare_dir(path_to_data, sample_rate)
create_manifest(path_to_data, os.path.join(path_to_data,'ted3_train_manifest.csv'), min_duration, max_duration)
| 22,916
|
def readAbstractMethodsFromFile(file: str) -> List[AbstractMethod]:
"""
Returns a list of `AbstractMethods` read from the given `file`. The file should have one `AbstractMethod`
per line with tokens separated by spaces.
"""
abstractMethods = []
with open(file, "r") as f:
for line in f:
abstractMethods.append(AbstractMethod(line.strip()))
return abstractMethods
| 22,917
|
def create_file_object(list_of_files,repo) :
"""
This function create from the list of files stored in the repo
the corresponding file_object
"""
for e in list_of_files :
cat_and_ext = re.split(r'\.',e);
f = file_object(e, re.sub('[^a-zA-Z]+', '',cat_and_ext[0]),
cat_and_ext[1],repo)
yield f
| 22,918
|
def _find_possible_tox(path, toxenv):
"""Given a path and a tox target, see if flake8 is already installed."""
# First try to discover existing flake8
while(path and path != '/'):
path = os.path.dirname(path)
# the locations of possible flake8
venv = path + "/.tox/%s" % toxenv
flake8 = venv + "/bin/flake8"
if os.path.isdir(venv) and os.path.exists(flake8):
# we found a flake8 in a venv so set that as the running venv
ENV["VIRTUAL_ENV"] = venv
# parse the ignores to pass them on the command line
ENV["CONFIG"] = ignores(path)
ENV["IGNORES"] = ENV["CONFIG"].get("ignore", "")
# set the working directory so that 'hacking' can pick up
# it's config
ENV['PWD'] = path
LOG.debug("Found flake8 %s, ENV=%s" % (flake8, ENV))
return flake8
| 22,919
|
def recursive_descent(data: np.ndarray, function: Callable):
"""
**Recursivly process an `np.ndarray` until the last dimension.**
This function applies a callable to the very last dimension of a numpy multidimensional array. It is foreseen
for time series processing expecially in combination with the function `ts_gaf_transform`.
+ param **data**: multidimensional data, type `np.ndarray`.
+ param **function**: callable, type `Callable`.
+ return **function(data)**: all kind of processed data.
"""
if len(data.shape) == 1:
return function(data)
for i in range(0, data.shape[0]):
return ts_recursive_descent(data[i], function)
| 22,920
|
def write_item_mtime(item, mtime):
"""Write the given mtime to an item's `mtime` field and to the mtime of the
item's file.
"""
if mtime is None:
log.warn(u"No mtime to be preserved for item '{0}'",
util.displayable_path(item.path))
return
# The file's mtime on disk must be in sync with the item's mtime
write_file_mtime(util.syspath(item.path), mtime)
item.mtime = mtime
| 22,921
|
def watt_spectrum(a, b):
""" Samples an energy from the Watt energy-dependent fission spectrum.
Parameters
----------
a : float
Spectrum parameter a
b : float
Spectrum parameter b
Returns
-------
float
Sampled outgoing energy
"""
return _dll.watt_spectrum(a, b)
| 22,922
|
def get_definition_from_stellarbeat_quorum_set(quorum_set: QuorumSet) -> Definition:
"""Turn a stellarbeat quorum set into a quorum slice definition"""
return {
'threshold': quorum_set['threshold'],
'nodes': set(quorum_set['validators']) if 'validators' in quorum_set else set(),
'children_definitions': [
get_definition_from_stellarbeat_quorum_set(inner_quorum_set)
for inner_quorum_set in quorum_set['innerQuorumSets']
] if 'innerQuorumSets' in quorum_set else set()
}
| 22,923
|
def values(names):
"""
Method decorator that allows inject return values into method parameters.
It tries to find desired value going deep. For convinience injects list with only one value as value.
:param names: dict of "value-name": "method-parameter-name"
"""
def wrapper(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
if len(args)>1:
instance=args[1]
else:
instance = kwargs['instance']
def findReturnValues(rvalues):
for k, v in rvalues.iteritems():
if isinstance(v, dict):
findReturnValues(v) #go deep, to find desired name
if k in names.keys():
if isinstance(v,list) and len(v)==1:
kwargs.update({names[k]: v[0]})
else:
kwargs.update({names[k]: v})
findReturnValues(instance.returnValues)
#ensure all names was set
missing_params = [k for k, v in names.items() if v not in kwargs]
if missing_params:
raise AttributeError("Parameters {0} for '{1}' were not found".format(missing_params, func.__name__), missing_params)
func(*args, **kwargs)
return wrapped_func
return wrapper
| 22,924
|
def select_user(with_dlslots=True):
"""
Select one random user, if can_download is true then user must have
download slots available
:returns User
"""
with session_scope() as db:
try:
query = db.query(User).filter(User.enabled.is_(True))
if with_dlslots:
query = query.filter(or_(
User.downloads_limit > User.downloads_today,
User.downloads_limit.is_(None)
))
user = query.order_by(func.random()).limit(1).one()
except NoResultFound:
raise OperationInterruptedException('No suitable users found')
else:
db.expunge(user)
return user
| 22,925
|
def set_rf_log_level(level):
"""Set RooFit log level."""
if level not in RooFitLogLevel:
return
ROOT.RooMsgService.instance().setGlobalKillBelow(level)
| 22,926
|
def silero_number_detector(onnx=False):
"""Silero Number Detector
Returns a model with a set of utils
Please see https://github.com/snakers4/silero-vad for usage examples
"""
if onnx:
url = 'https://models.silero.ai/vad_models/number_detector.onnx'
else:
url = 'https://models.silero.ai/vad_models/number_detector.jit'
model = Validator(url)
utils = (get_number_ts,
save_audio,
read_audio,
collect_chunks,
drop_chunks)
return model, utils
| 22,927
|
def etaCalc(T, Tr = 296.15, S = 110.4, nr = 1.83245*10**-5):
"""
Calculates dynamic gas viscosity in kg*m-1*s-1
Parameters
----------
T : float
Temperature (K)
Tr : float
Reference Temperature (K)
S : float
Sutherland constant (K)
nr : float
Reference dynamic viscosity
Returns
-------
eta : float
Dynamic gas viscosity in kg*m-1*s-1
"""
eta = nr * ( (Tr + S) / (T+S) )*(T/Tr)**(3/2)
return eta
| 22,928
|
def refine_markers_harris(patch, offset):
""" Heuristically uses the max Harris response for control point center. """
harris = cv2.cornerHarris(patch, 2, 5, 0.07)
edges = np.where(harris < 0, np.abs(harris), 0)
point = np.array(np.where(harris == harris.max())).flatten()
point += offset
return np.float64(point)
| 22,929
|
def test_teams_join_post():
"""Can a user post /teams/join"""
app = create_ctfd(user_mode="teams")
with app.app_context():
gen_user(app.db, name="user")
gen_team(app.db, name="team")
with login_as_user(app) as client:
r = client.get('/teams/join')
assert r.status_code == 200
with client.session_transaction() as sess:
data = {
"name": "team",
"password": "password",
"nonce": sess.get('nonce')
}
r = client.post('/teams/join', data=data)
assert r.status_code == 302
incorrect_data = data
incorrect_data['password'] = ""
r = client.post('/teams/join', data=incorrect_data)
assert r.status_code == 200
destroy_ctfd(app)
| 22,930
|
def get_realtime_price(symbol):
"""
获取实时股价
:param symbol:
:return:
"""
try:
df = get_real_price_dataframe()
df_s = df[df['code'] == symbol]
if len(df_s['trade'].get_values()):
return df_s['trade'].get_values()[0]
else:
return -1
except:
return -1
| 22,931
|
def create_employee(db_session: Session, employee: schemas.EmployeeRequest):
""" Create new employee """
new_employee = Employee(
idir=employee.idir,
status=employee.status,
location=employee.location,
phone=employee.phone)
db_session.add(new_employee)
db_session.commit()
db_session.refresh(new_employee)
return db_session.query(Employee).filter(Employee.idir == employee.idir).first()
| 22,932
|
def export_xr_as_nc(ds, filename):
"""
Takes a xarray dataset or array and exports as a
netcdf file.
Parameters
----------
ds: xarray dataset/array
Input xarray dataset or data array with any number of
dimensions.
filename : str
Name of putput path and filename.
"""
# notify
print('Exporting xarray as netcdf file.')
# check if xr ds or da
if not isinstance(ds, (xr.Dataset, xr.DataArray)):
raise TypeError('Only xarray dataset/data array supported.')
# check if attrs exist
if not hasattr(ds, 'attrs'):
print('Warning: xarray is missing attributes.')
elif not hasattr(ds, 'geobox'):
print('Warning: xarray is missing geobox.')
# check if filename valid
if not filename:
raise ValueError('Did not provide filename.')
# check file extension given
name, ext = os.path.splitext(filename)
if ext != '.nc':
raise ValueError('Filename must include .nc extension.')
try:
# write xr to netcdf
ds.to_netcdf(filename)
except Exception as e:
print('Could not export')
raise ValueError('Could not export netcdf: {0}.'.format(e))
# notify
print('Exported xarray as netcdf successfully.')
| 22,933
|
def sim_categorical(var_dist_params, size):
"""
Function to simulate data for
a categorical/Discrete variable.
"""
values = var_dist_params[0]
freq = var_dist_params[1]
data_sim = np.random.choice(a=values, p=freq, size=size)
return data_sim
| 22,934
|
def make_dirs(path):
"""
Creates directory and all intermediate parent directories. Does not fail if some of the directories already exist.
Basically, it is python version of sh command "mkdir -p path".
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
# Tail folder already exists
pass
else:
raise
| 22,935
|
def flip_team_observation(observation, result, config, from_team, to_team):
"""Rotates team-specific observations."""
result['{}_team'.format(to_team)] = rotate_points(
observation['{}_team'.format(from_team)])
result['{}_team_direction'.format(to_team)] = rotate_points(
observation['{}_team_direction'.format(from_team)])
result['{}_team_tired_factor'.format(to_team)] = observation[
'{}_team_tired_factor'.format(from_team)]
result['{}_team_active'.format(to_team)] = observation[
'{}_team_active'.format(from_team)]
result['{}_team_yellow_card'.format(to_team)] = observation[
'{}_team_yellow_card'.format(from_team)]
result['{}_team_roles'.format(to_team)] = observation['{}_team_roles'.format(
from_team)]
result['{}_team_active'.format(to_team)] = observation[
'{}_team_active'.format(from_team)]
if '{}_agent_controlled_player'.format(from_team) in observation:
result['{}_agent_controlled_player'.format(to_team)] = observation[
'{}_agent_controlled_player'.format(from_team)]
if '{}_agent_sticky_actions'.format(from_team) in observation:
result['{}_agent_sticky_actions'.format(to_team)] = [
rotate_sticky_actions(sticky, config)
for sticky in observation['{}_agent_sticky_actions'.format(from_team)]
]
| 22,936
|
def test_contains():
"""Test that bounds know if a Value is contained within it."""
bounds = CompositionBounds({"C", "H", "O", "N"})
assert bounds.contains(EmpiricalFormula('C2H5OH')._to_bounds())
assert not bounds.contains(EmpiricalFormula('NaCl')._to_bounds())
| 22,937
|
def test_load_managed_mode_directory(create_config, monkeypatch, tmp_path):
"""Validate managed-mode default directory is /root/project."""
monkeypatch.chdir(tmp_path)
monkeypatch.setenv("CHARMCRAFT_MANAGED_MODE", "1")
# Patch out Config (and Project) to prevent directory validation checks.
with patch("charmcraft.config.Config"):
with patch("charmcraft.config.Project") as mock_project:
with patch("charmcraft.config.load_yaml"):
load(None)
assert mock_project.call_args.kwargs["dirpath"] == pathlib.Path("/root/project")
| 22,938
|
def test_rules():
"""
Yield each uri so we get granular results.
"""
for uri, keys in RULES.items():
yield uri, assert_proper_keys, uri, keys
| 22,939
|
def validSolution(board: list) -> bool:
"""
A function validSolution/ValidateSolution/valid_solution()
that accepts a 2D array representing a Sudoku board,
and returns true if it is a valid solution, or false otherwise
:param board:
:return:
"""
return all([test_horizontally(board),
test_vertically(board),
test_sub_grids(board)])
| 22,940
|
async def test_create_saves_data(manager):
"""Test creating a config entry."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 5
async def async_step_init(self, user_input=None):
return self.async_create_entry(title="Test Title", data="Test Data")
await manager.async_init("test")
assert len(manager.async_progress()) == 0
assert len(manager.mock_created_entries) == 1
entry = manager.mock_created_entries[0]
assert entry["version"] == 5
assert entry["handler"] == "test"
assert entry["title"] == "Test Title"
assert entry["data"] == "Test Data"
assert entry["source"] is None
| 22,941
|
def get_videos_from_channel(service, channel_id):
"""Essentially a wrapper (but not really) for get_videos_from_playlist()
See get_videos_from_playlist() to see return values.
Parameters
----------
service:
As obtained from googleapiclient.discovery.build()
channel_id: str
Can be found in the url of a youtube channel's page.
"""
response = get_response(service.channels(), 'list',
part='contentDetails',
id=channel_id,
maxResults=50
)
# Id for playlist of this channel's uploads
uploads_id = response['items'][0]['contentDetails']['relatedPlaylists']['uploads']
for video in get_videos_from_playlist(service=service, playlist_id=uploads_id):
yield video
| 22,942
|
def drawingpad(where=None, x=0, y=0, image=None, color=0xffffff, fillingColor=0x000000, thickness=3):
"""Create a drawing pad.
Args:
where (np.ndarray) : image/frame where the component should be rendered.
x (int) : Position X where the component should be placed.
y (int) : Position Y where the component should be placed.
image (np.ndarray) : Image to be rendered in the specified destination.
color (uint) : Color of the line in the format ``0xRRGGBB``, e.g. ``0xff0000`` for red.
fillingColor (uint) : Color of filling in the format `0xAARRGGBB`, e.g. `0x00ff0000` for red, `0xff000000` for transparent filling.
thickness (int) : Thickness of the lines used to draw a line.
Returns:
np.ndarray : The current ``image`` .
Examples:
>>> import cv2
>>> import numpy as np
>>> from pycharmers.opencv import cvui
...
>>> WINDOW_NAME = 'Drawing Pad'
>>> frame = np.zeros(shape=(400, 650, 3), dtype=np.uint8)
>>> image = np.full(shape=(250,250,3), fill_value=255, dtype=np.uint8)
>>> bgr = [128, 128, 128]
>>> fillingColors = ["White", "Black"]
>>> fillingStates = [True, False]
>>> thickness = [3]
>>> cvui.init(WINDOW_NAME)
>>> cv2.moveWindow(winname=WINDOW_NAME, x=0, y=0)
...
>>> while (True):
... # Fill the frame with a nice color
... frame[:] = (49, 52, 49)
... cvui.text(where=frame, x=320, y=10, text="Thickness")
... cvui.text(where=frame, x=320, y=100, text="Filling Color")
... thick = cvui.trackbar(where=frame, x=320, y=30, width=300, value=thickness, min=1, max=10, options=cvui.TRACKBAR_DISCRETE, discreteStep=1)
... idx = cvui.radiobox(where=frame, x=350, y=120, labels=fillingColors, states=fillingStates)
... bgr = cvui.colorpalette(where=frame, x=320, y=180, bgr=bgr, width=300, height=50)
... image = cvui.drawingpad(where=frame, x=30, y=50, image=image, color=bgr, fillingColor=[0xffffff, 0x000000][idx], thickness=thick)
... cvui.update()
... # Show everything on the screen
... cv2.imshow(WINDOW_NAME, frame)
... # Check if ESC key was pressed
... if cv2.waitKey(20) == cvui.ESCAPE:
... break
>>> cv2.destroyWindow(WINDOW_NAME)
>>> # You can draw a picture as follows by executing the following program while running the above program.
>>> def drawing(path, dsize=(250,250), thresh=127, sleep=3, drawing_val=0, offset=(30,125)):
... \"\"\"
... Args:
... path (str) : Path to binary image.
... dsize (tuple) : The size of drawing pad. ( ``width`` , ``height`` )
... thresh (int) : If you prepare the binary (bgr) image, you can use ``cv2.threshold`` to convert it to binary image. (See :meth:`cvPencilSketch <pycharmers.cli.cvPencilSketch.cvPencilSketch>` for more details.)
... sleep (int) : Delay execution for a given number of seconds. (You have to click the OpenCV window before before entering the for-loop.)
... drawing_val (int) : At what value to draw.
... offset (tuple) : Offset from top left ( ``cv2.moveWindow(winname=WINDOW_NAME, x=0, y=0)`` ) to drawing pad.
... \"\"\"
... import cv2
... import time
... import pyautogui as pgui # Use for controling the mouse. (https://pyautogui.readthedocs.io/en/latest/mouse.html)
... img = cv2.resize(src=cv2.imread(path, 0), dsize=dsize)
... img = cv2.threshold(src=img, thresh=thresh, maxval=255, type=cv2.THRESH_BINARY)[1]
... WINDOW_NAME = "Apotheosis"
... cv2.imshow(winname=v, mat=img)
... width,height = dsize
... x_offset, y_offset = offset
... time.sleep(sleep)
... for i in range(height):
... pgui.moveTo(x_offset, y_offset+i)
... prev_val, prev_pos = (0, 0)
... for j in range(width+1):
... if j<width:
... val = img[i,j]
... else:
... val = -1 # Must be different from ``prev_val``
... if prev_val != val:
... # Drawing.
... if prev_val == drawing_val:
... pgui.mouseDown()
... pgui.dragRel(xOffset=j-prev_pos, yOffset=0, button="left", duration=0.0, mouseDownUp=True)
... pgui.mouseUp()
... else:
... pgui.moveRel(xOffset=j-prev_pos, yOffset=0, duration=0.0)
... prev_pos = j
... prev_val = val
... key = cv2.waitKey(1)
... if key == 27: break
... if key == 27: break
... cv2.destroyWindow(WINDOW_NAME)
+--------------------------------------------------------+-------------------------------------------------------+
| Example |
+========================================================+=======================================================+
| .. image:: _images/opencv.cvui.drawingpad-konotaro.gif | .. image:: _images/opencv.cvui.drawingpad-tanziro.gif |
+--------------------------------------------------------+-------------------------------------------------------+
"""
handleTypeError(types=[np.ndarray, NoneType], where=where)
if isinstance(where, np.ndarray):
__internal.screen.where = where
block = __internal.screen
else:
block = __internal.topBlock()
x += block.anchor.x
y += block.anchor.y
return __internal.drawingpad(block, x, y, image, color, fillingColor, thickness)
| 22,943
|
def json_request(url, **kwargs):
"""
Request JSON data by HTTP
:param url: requested URL
:return: the dictionary
"""
if 'auth_creds' in kwargs and 'authentication_enabled' in kwargs['auth_creds']:
if 'sessionToken' in kwargs:
url += "&sessionToken=%s" % kwargs['auth_creds']['sessionToken']
else:
url += "&ignite.login=%s&ignite.password=%s" % (kwargs['auth_creds']['auth_login'],
kwargs['auth_creds']['auth_password'])
req = Request(url)
decoded = {}
try:
r = urlopen(req)
reply = r.read().decode('UTF-8')
decoded = loads(reply)
except HTTPError:
print('')
print("HTTPError %s" % url)
except URLError:
print('')
print("URLError %s" % url)
return decoded
| 22,944
|
def write_patch(source,target,stream,**kwds):
"""Generate patch commands to transform source into target.
'source' and 'target' must be paths to a file or directory, and 'stream'
an object supporting the write() method. Patch protocol commands to
transform 'source' into 'target' will be generated and written sequentially
to the stream.
"""
Differ(stream,**kwds).diff(source,target)
| 22,945
|
def list():
"""
List all added path
"""
try:
with io.open(FILE_NAME, 'r', encoding='utf-8') as f:
data = json.load(f)
except:
data = {}
return data
| 22,946
|
def load(filename):
"""
Load an EigenM object
"""
with open(filename, 'rb') as f:
return pickle.load(f)
| 22,947
|
def resize_image_bboxes_with_crop_or_pad(image, bboxes, xs, ys,
target_height, target_width, mask_image=None):
"""Crops and/or pads an image to a target width and height.
Resizes an image to a target width and height by either centrally
cropping the image or padding it evenly with zeros.
If `width` or `height` is greater than the specified `target_width` or
`target_height` respectively, this op centrally crops along that dimension.
If `width` or `height` is smaller than the specified `target_width` or
`target_height` respectively, this op centrally pads with 0 along that
dimension.
Args:
image: 3-D tensor of shape `[height, width, channels]`
target_height: Target height.
target_width: Target width.
Raises:
ValueError: if `target_height` or `target_width` are zero or negative.
Returns:
Cropped and/or padded image of shape
`[target_height, target_width, channels]`
"""
with tf.name_scope('resize_with_crop_or_pad'):
image = ops.convert_to_tensor(image, name='image')
if mask_image is not None:
print('Image: ', image)
print('MaskImage: ', mask_image)
mask_image = ops.convert_to_tensor(mask_image, name='image')
assert_ops = []
assert_ops += _Check3DImage(image, require_static=False)
assert_ops += _assert(target_width > 0, ValueError,
'target_width must be > 0.')
assert_ops += _assert(target_height > 0, ValueError,
'target_height must be > 0.')
image = control_flow_ops.with_dependencies(assert_ops, image)
# `crop_to_bounding_box` and `pad_to_bounding_box` have their own checks.
# Make sure our checks come first, so that error messages are clearer.
if _is_tensor(target_height):
target_height = control_flow_ops.with_dependencies(
assert_ops, target_height)
if _is_tensor(target_width):
target_width = control_flow_ops.with_dependencies(assert_ops, target_width)
def max_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.maximum(x, y)
else:
return max(x, y)
def min_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.minimum(x, y)
else:
return min(x, y)
def equal_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.equal(x, y)
else:
return x == y
height, width, _ = _ImageDimensions(image)
width_diff = target_width - width
offset_crop_width = max_(-width_diff // 2, 0)
offset_pad_width = max_(width_diff // 2, 0)
height_diff = target_height - height
offset_crop_height = max_(-height_diff // 2, 0)
offset_pad_height = max_(height_diff // 2, 0)
# Maybe crop if needed.
height_crop = min_(target_height, height)
width_crop = min_(target_width, width)
cropped = tf.image.crop_to_bounding_box(image, offset_crop_height, offset_crop_width,
height_crop, width_crop)
if mask_image is not None:
cropped_mask_image = tf.image.crop_to_bounding_box(mask_image, offset_crop_height, offset_crop_width,
height_crop, width_crop)
bboxes, xs, ys = bboxes_crop_or_pad(bboxes, xs, ys,
height, width,
-offset_crop_height, -offset_crop_width,
height_crop, width_crop)
# Maybe pad if needed.
resized = tf.image.pad_to_bounding_box(cropped, offset_pad_height, offset_pad_width,
target_height, target_width)
if mask_image is not None:
resized_mask_image = tf.image.pad_to_bounding_box(cropped_mask_image, offset_pad_height, offset_pad_width,
target_height, target_width)
bboxes, xs, ys = bboxes_crop_or_pad(bboxes, xs, ys,
height_crop, width_crop,
offset_pad_height, offset_pad_width,
target_height, target_width)
# In theory all the checks below are redundant.
if resized.get_shape().ndims is None:
raise ValueError('resized contains no shape.')
resized_height, resized_width, _ = _ImageDimensions(resized)
assert_ops = []
assert_ops += _assert(equal_(resized_height, target_height), ValueError,
'resized height is not correct.')
assert_ops += _assert(equal_(resized_width, target_width), ValueError,
'resized width is not correct.')
resized = control_flow_ops.with_dependencies(assert_ops, resized)
if mask_image is None:
return resized, None, bboxes, xs, ys
else:
return resized, resized_mask_image, bboxes, xs, ys
| 22,948
|
def sharpe_ratio(R_p, sigma_p, R_f=0.04):
"""
:param R_p: 策略年化收益率
:param R_f: 无风险利率(默认0.04)
:param sigma_p: 策略收益波动率
:return: sharpe_ratio
"""
sharpe_ratio = 1.0 * (R_p - R_f) / sigma_p
return sharpe_ratio
| 22,949
|
def save_table(pgcursor, table, df):
"""Clear existing data from postgres table and insert data from df."""
clr_str = "TRUNCATE TABLE " + table + ";"
pgcursor.execute(clr_str)
columns = ''
values = ''
for col in df.columns.values:
columns += col + ', '
values += '%s, '
sql_str = 'INSERT INTO ' \
+ table + ' (' \
+ columns[:-2] + ') VALUES (' \
+ values[:-2] + ');'
for row in df.values.tolist():
pgcursor.execute(sql_str, row)
| 22,950
|
def xr_vol_int_regional(xa, AREA, DZ, MASK):
""" volumen integral with regional MASK
input:
xa, AREA, DZ .. same as in 'xr_vol_int'
MASK .. 2D xr DataArray of booleans with the same dimensions as xa
output:
integral, int_levels .. same as in 'xr_vol_int'
"""
assert type(xa)==xr.core.dataarray.DataArray
assert type(AREA)==xr.core.dataarray.DataArray
assert type(DZ)==xr.core.dataarray.DataArray
assert np.shape(AREA)==np.shape(xa)[-2:]
assert np.shape(DZ)==np.shape(xa)[-3:]
assert np.dtype(MASK)==np.dtype('bool')
# determine min/max i/j of masked region
(imin, imax, jmin, jmax) = find_regional_coord_extent(MASK)
xa_reg = xa.where(MASK)[:,jmin:jmax+1,imin:imax+1]
AREA_reg = AREA.where(MASK)[jmin:jmax+1,imin:imax+1]
DZ_reg = DZ.where(MASK)[:,jmin:jmax+1,imin:imax+1]
integral, int_levels = xr_vol_int(xa_reg, AREA_reg, DZ_reg)
return integral, int_levels
| 22,951
|
def save_feature(df: pd.DataFrame, feature_name: Union[int, str], directory: str = './features/',
with_csv_dump: bool = False, create_directory: bool = True,
reference_target_variable: Optional[pd.Series] = None, overwrite: bool = False):
"""
Save pandas dataframe as feather-format
Args:
df:
The dataframe to be saved.
feature_name:
The name of the feature. The output file will be ``{feature_name}.f``.
directory:
The directory where the feature will be stored.
with_csv_dump:
If True, the first 1000 lines are dumped to csv file for debug.
create_directory:
If True, create directory if not exists.
reference_target_variable:
If not None, instant validation will be made on the feature.
overwrite:
If False and file already exists, RuntimeError will be raised.
"""
if create_directory:
os.makedirs(directory, exist_ok=True)
if reference_target_variable is not None:
validate_feature(df, reference_target_variable)
path = os.path.join(directory, str(feature_name) + '.f')
if not overwrite and os.path.exists(path):
raise RuntimeError('File already exists')
df.to_feather(path)
if with_csv_dump:
df.head(1000).to_csv(os.path.join(directory, str(feature_name) + '.csv'), index=False)
| 22,952
|
def test_nn_functional_interpolate_bicubic_scale_factor_tuple():
"""
api: paddle.nn.functional.interpolate
op version: 11
"""
op = Net(mode='bicubic', scale_factor=(1.5, 1.5))
op.eval()
# net, name, ver_list, delta=1e-6, rtol=1e-5
obj = APIOnnx(op, 'nn_functional_interpolate', [11])
obj.set_input_data(
"input_data",
paddle.to_tensor(
randtool("float", -1, 1, [1, 2, 2, 5]).astype('float32')))
obj.run()
| 22,953
|
def test(
coverage: bool = typer.Option( # noqa: B008
default=False, help='Generate coverage information.'
),
html: bool = typer.Option( # noqa: B008
default=False, help='Generate an html coverage report.'
),
) -> List[Result]:
"""Run tests."""
coverage_flag = [f'--cov={PACKAGE_NAME}'] if coverage else []
return [
execute(['pytest', *coverage_flag, 'tests'], raise_error=False),
*(coverage_html() if coverage and html else ()),
]
| 22,954
|
def _get_win_folder_from_registry(csidl_name: Any) -> Any:
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names."""
if PY3:
import winreg as _winreg
else:
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders",
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
| 22,955
|
def transitions2kernelreward(transitions, num_states, num_actions):
"""Transform a dictionary of transitions to kernel, reward matrices."""
kernel = np.zeros((num_states, num_actions, num_states))
reward = np.zeros((num_states, num_actions))
for (state, action), transition in transitions.items():
for data in transition:
kernel[state, action, data["next_state"]] += data["probability"]
reward[state, action] += data["reward"] * data["probability"]
return kernel, reward
| 22,956
|
def _inline_svg(svg: str) -> str:
"""Encode SVG to be used inline as part of a data URI.
Replacements are not complete, but sufficient for this case.
See https://codepen.io/tigt/post/optimizing-svgs-in-data-uris
for details.
"""
replaced = (
svg
.replace('\n', '%0A')
.replace('#', '%23')
.replace('<', '%3C')
.replace('>', '%3E')
.replace('"', '\'')
)
return 'data:image/svg+xml,' + replaced
| 22,957
|
def delete(email, organization=None):
"""
Delete user EMAIL.
"""
if organization:
org = models.Organization.get_by_slug(organization)
deleted_count = models.User.query.filter(
models.User.email == email, models.User.org == org.id
).delete()
else:
deleted_count = models.User.query.filter(models.User.email == email).delete(
synchronize_session=False
)
models.db.session.commit()
print("Deleted %d users." % deleted_count)
| 22,958
|
def update_msgs_name(person_pk):
"""Back date sender_name field on inbound sms."""
from apostello.models import Recipient, SmsInbound
person_ = Recipient.objects.get(pk=person_pk)
name = str(person_)
number = str(person_.number)
for sms in SmsInbound.objects.filter(sender_num=number):
sms.sender_name = name
sms.save()
| 22,959
|
def draw_random_DNA(current_base_turtle, base_index, letter):
"""
Draw a random sequence to be used later to create the complementary base pair
:param current_base_turtle: a turtle object
:param base_index: an index, to help position the turtle
:param letter: the letter being drawn
:return: None
"""
current_base_turtle.penup()
current_base_turtle.right(90)
current_base_turtle.setpos(-250 + 95*base_index, 230) # Moves the turtle right the appropriate amount
current_base_turtle.pendown()
current_base_turtle.shape("square")
current_base_turtle.pensize(10)
current_base_turtle.forward(50)
current_base_turtle.color(nucleotides[letter])
current_base_turtle.pensize(30)
current_base_turtle.forward(70)
current_base_turtle.backward(40)
current_base_turtle.color("black")
# draw out the letters for the base_turtles and return back to the center.
(xpos, ypos) = current_base_turtle.pos()
letter_turtle.setpos(xpos, ypos+5 )
letter_turtle.write(letter,move=False,align='center',font=("Arial",25,("bold","normal")))
letter_turtle.setpos(0,0)
| 22,960
|
def plot_extreme_edges(gdf: gpd.geodataframe.GeoDataFrame,
aoi: gpd.geodataframe.GeoDataFrame,
**kwargs) -> None:
"""
Plots extreme depths along edges along with an overview map showing current
plotted domain versus all other domains.
:param gdf:
:param aoi:
:param \**kwargs:
See below
:Keyword Arguments:
* *mini_map* (gpd.geodataframe.GeoDataFrame) -- Multiple domain perimeters.
"""
if 'mini_map' in kwargs.keys():
mini_map = list(kwargs.values())[0]
fig, (ax_string) = plt.subplots(1, 2, figsize=(20, 8))
ax1 = plt.subplot2grid((1, 2), (0, 0))
aoi.plot(color='k', alpha=0.25, ax=ax1)
gdf.plot(column='abs_max', cmap='viridis', legend=True, ax=ax1, markersize=16)
ax1.set_title('Cell Locations with Depths > 1 ft\n(Check for Ponding)'.format(len(gdf)),
fontsize=12, fontweight='bold')
ax1.axis('off')
ax2 = plt.subplot2grid((1, 2), (0, 1))
mini_map.plot(color='#BFBFBF', edgecolor='k', ax=ax2, markersize=16)
aoi.plot(color='#FFC0CB', edgecolor='k', ax=ax2)
ax2.set_title('Current domain (pink) compared to all domains (grey)'.format(len(gdf)),
fontsize=12, fontweight='bold')
ax2.axis('off')
else:
fig, ax = plt.subplots(figsize = (7,7))
aoi.plot(color='k', alpha=0.25, ax=ax)
gdf.plot(column='abs_max', cmap='viridis', legend=True, ax=ax, markersize=16)
ax.set_title('Cell Locations with Depths > 1 ft\n(Check for Ponding)'.format(len(gdf)),
fontsize=12, fontweight='bold')
ax.axis('off')
| 22,961
|
def contact_infectivity_asymptomatic_00x40():
"""
Real Name: b'contact infectivity asymptomatic 00x40'
Original Eqn: b'contacts per person normal 00x40*infectivity per contact'
Units: b'1/Day'
Limits: (None, None)
Type: component
b''
"""
return contacts_per_person_normal_00x40() * infectivity_per_contact()
| 22,962
|
def implemented_verified_documented(function):
""" Common story options """
options = [
click.option(
'--implemented', is_flag=True,
help='Implemented stories only.'),
click.option(
'--unimplemented', is_flag=True,
help='Unimplemented stories only.'),
click.option(
'--verified', is_flag=True,
help='Stories verified by tests.'),
click.option(
'--unverified', is_flag=True,
help='Stories not verified by tests.'),
click.option(
'--documented', is_flag=True,
help='Documented stories only.'),
click.option(
'--undocumented', is_flag=True,
help='Undocumented stories only.'),
click.option(
'--covered', is_flag=True,
help='Covered stories only.'),
click.option(
'--uncovered', is_flag=True,
help='Uncovered stories only.'),
]
for option in reversed(options):
function = option(function)
return function
| 22,963
|
def create_collection(self, name, url, sourceType, **options):
"""Creates a new collection from a web or S3 url. Automatically kick off default indexes"""
(endpoint, method) = self.endpoints['create_collection']
try:
headers = {'Authorization': self.token.authorization_header()}
data = {
'name': name,
'url': url,
'sourceType': sourceType,
'indexWithDefault': 'true' if options.get('indexWithDefault') else 'false'
}
return requests.request(method, endpoint, **{'headers': headers, 'data': data})
except Exception as e:
raise error.APIConnectionError(message=e)
| 22,964
|
def rmse(estimated: np.ndarray, true: np.ndarray) -> Union[np.ndarray, None]:
"""
Calculate the root-mean-squared error between two arrays.
:param estimated: estimated solution
:param true: 'true' solution
:return: root-mean-squared error
"""
return np.sqrt(((estimated - true) ** 2).mean(axis=1))
| 22,965
|
def use_default_driver():
""" Use the default driver as the current driver. """
global current_driver
current_driver = None
| 22,966
|
def energy_com(data):
""" Calculate the energy center of mass for each day, and use this quantity
as an estimate for solar noon.
Function infers time stamps from the length of the first axis of the 2-D
data array.
:param data: PV power matrix as generated by `make_2d` from `solardatatools.data_transforms`
:return: A 1-D array, containing the solar noon estimate for each day in the data set
"""
data = np.copy(data)
data[np.isnan(data)] = 0
num_meas_per_hour = data.shape[0] / 24
x = np.arange(0, 24, 1. / num_meas_per_hour)
div1 = np.dot(x, data)
div2 = np.sum(data, axis=0)
com = np.empty_like(div1)
com[:] = np.nan
msk = div2 != 0
com[msk] = np.divide(div1[msk], div2[msk])
return com
| 22,967
|
def cifar_noniid(dataset, no_participants, alpha=0.9):
"""
Input: Number of participants and alpha (param for distribution)
Output: A list of indices denoting data in CIFAR training set.
Requires: cifar_classes, a preprocessed class-indice dictionary.
Sample Method: take a uniformly sampled 10-dimension vector as parameters for
dirichlet distribution to sample number of images in each class.
"""
np.random.seed(666)
random.seed(666)
cifar_classes = {}
for ind, x in enumerate(dataset):
_, label = x
if label in cifar_classes:
cifar_classes[label].append(ind)
else:
cifar_classes[label] = [ind]
per_participant_list = defaultdict(list)
no_classes = len(cifar_classes.keys())
class_size = len(cifar_classes[0])
datasize = {}
for n in range(no_classes):
random.shuffle(cifar_classes[n])
sampled_probabilities = class_size * np.random.dirichlet(
np.array(no_participants * [alpha]))
for user in range(no_participants):
no_imgs = int(round(sampled_probabilities[user]))
datasize[user, n] = no_imgs
sampled_list = cifar_classes[n][:min(len(cifar_classes[n]), no_imgs)]
per_participant_list[user].extend(sampled_list)
cifar_classes[n] = cifar_classes[n][min(len(cifar_classes[n]), no_imgs):]
train_img_size = np.zeros(no_participants)
for i in range(no_participants):
train_img_size[i] = sum([datasize[i,j] for j in range(10)])
clas_weight = np.zeros((no_participants,10))
for i in range(no_participants):
for j in range(10):
clas_weight[i,j] = float(datasize[i,j])/float((train_img_size[i]))
return per_participant_list, clas_weight
| 22,968
|
def example_metadata(
request,
l1_ls5_tarball_md_expected: Dict,
l1_ls7_tarball_md_expected: Dict,
l1_ls8_folder_md_expected: Dict,
):
"""
Test against arbitrary valid eo3 documents.
"""
which = request.param
if which == "ls5":
return l1_ls5_tarball_md_expected
elif which == "ls7":
return l1_ls7_tarball_md_expected
elif which == "ls8":
return l1_ls8_folder_md_expected
raise AssertionError
| 22,969
|
def register_middleware(app: FastAPI):
"""
请求响应拦截 hook
https://fastapi.tiangolo.com/tutorial/middleware/
:param app:
:return:
"""
@app.middleware("http")
async def logger_request(request: Request, call_next):
# https://stackoverflow.com/questions/60098005/fastapi-starlette-get-client-real-ip
logger.info(f"request:{request.method} url:{request.url}\nheaders:{request.headers.get('user-agent')}"
f"\nIP:{request.client.host}")
response = await call_next(request)
return response
| 22,970
|
def test_dtmc__matrix_copied():
"""
Validate that if matrix is passed as an array, it is copied, so changes
in the argument don't affect the chain matrix.
"""
matrix = np.asarray([[0.5, 0.5], [0.5, 0.5]])
chain = DiscreteTimeMarkovChain(matrix)
matrix[0, 0] = 0.42
assert chain.matrix[0, 0] == 0.5
| 22,971
|
def asfarray(a, dtype=_nx.float_):
"""
Return an array converted to float type.
Parameters
----------
a : array_like
Input array.
dtype : string or dtype object, optional
Float type code to coerce input array `a`. If one of the 'int' dtype,
it is replaced with float64.
Returns
-------
out : ndarray, float
Input `a` as a float ndarray.
Examples
--------
>>> np.asfarray([2, 3])
array([ 2., 3.])
>>> np.asfarray([2, 3], dtype='float')
array([ 2., 3.])
>>> np.asfarray([2, 3], dtype='int8')
array([ 2., 3.])
"""
dtype = _nx.obj2sctype(dtype)
if not issubclass(dtype, _nx.inexact):
dtype = _nx.float_
return asarray(a,dtype=dtype)
| 22,972
|
def handle_no_cache(context):
"""Handle lack-of-cache error, prompt user for index process."""
logger.error(
f"Could not locate wily cache, the cache is required to provide insights."
)
p = input("Do you want to run setup and index your project now? [y/N]")
if p.lower() != "y":
exit(1)
else:
revisions = input("How many previous git revisions do you want to index? : ")
revisions = int(revisions)
path = input("Path to your source files; comma-separated for multiple: ")
paths = path.split(",")
context.invoke(build, max_revisions=revisions, targets=paths, operators=None)
| 22,973
|
def test_single_dihedral(tmpdir):
"""Test running a torsiondrive for a molecule with one bond."""
with tmpdir.as_cwd():
mol = Ligand.from_file(get_data("ethane.sdf"))
# build a scanner with grid spacing 60 and clear out avoided methyl
qc_spec = QCOptions(program="rdkit", method="uff", basis=None)
local_ops = LocalResource(cores=1, memory=1)
tdrive = TorsionDriver(
n_workers=1,
grid_spacing=60,
)
t_scan = TorsionScan1D(torsion_driver=tdrive)
t_scan.clear_avoided_torsions()
result_mol = t_scan.run(molecule=mol, qc_spec=qc_spec, local_options=local_ops)
assert len(result_mol.qm_scans) == 1
# make sure input molecule coords were not changed
assert np.allclose(mol.coordinates, result_mol.coordinates)
| 22,974
|
def remove_stored_files(srr_file, store_files):
"""Remove files stored inside a SRR file.
srr_file: the SRR file to remove stored files from
store_files: list of files to be removed
must contain the relative path when necessary
raises ArchiveNotFoundError, NotSrrFile, TypeError"""
rr = RarReader(srr_file) # ArchiveNotFoundError
if rr.file_type() != RarReader.SRR:
raise NotSrrFile("Not an SRR file.")
# create a temporarily file
tmpfd, tmpname = mkstemp(prefix="remove_", suffix=".srr",
dir=os.path.dirname(srr_file))
tmpfile = os.fdopen(tmpfd, "wb")
try:
for block in rr.read_all():
if block.rawtype == BlockType.SrrStoredFile:
if block.file_name in store_files:
_fire(MsgCode.DEL_STORED_FILE,
message="'%s' deleted." % block.file_name)
else: # write header and stored file
tmpfile.write(block.block_bytes())
tmpfile.write(block.srr_data())
else: # TODO: write better tests here!!
tmpfile.write(block.block_bytes())
tmpfile.close()
os.remove(srr_file)
os.rename(tmpname, srr_file)
except BaseException as ex:
print(ex)
os.unlink(tmpname)
raise
| 22,975
|
def test_green_color():
"""Test GREEN Constant"""
assert settings.Colors.GREEN == (0, 255, 0)
| 22,976
|
def read_geotransform_s2(path, fname='MTD_TL.xml', resolution=10):
"""
Parameters
----------
path : string
location where the meta data is situated
fname : string
file name of the meta-data file
resolution : {float,integer}, unit=meters, default=10
resolution of the grid
Returns
-------
geoTransform : tuple, size=(1,6)
affine transformation coefficients
Notes
-----
The metadata is scattered over the file structure of Sentinel-2, L1C
.. code-block:: text
* S2X_MSIL1C_20XX...
├ AUX_DATA
├ DATASTRIP
│ └ DS_XXX_XXXX...
│ └ QI_DATA
│ └ MTD_DS.xml <- metadata about the data-strip
├ GRANULE
│ └ L1C_TXXXX_XXXX...
│ ├ AUX_DATA
│ ├ IMG_DATA
│ ├ QI_DATA
│ └ MTD_TL.xml <- metadata about the tile
├ HTML
├ rep_info
├ manifest.safe
├ INSPIRE.xml
└ MTD_MSIL1C.xml <- metadata about the product
The following acronyms are used:
- DS : datastrip
- TL : tile
- QI : quality information
- AUX : auxiliary
- MTD : metadata
- MSI : multi spectral instrument
- L1C : product specification,i.e.: level 1, processing step C
"""
root = get_root_of_table(path, fname)
# image dimensions
for meta in root.iter('Geoposition'):
res = float(meta.get('resolution'))
if res == resolution:
ul_X,ul_Y= float(meta[0].text), float(meta[1].text)
d_X, d_Y = float(meta[2].text), float(meta[3].text)
geoTransform = (ul_X, d_X, 0., ul_Y, 0., d_Y)
return geoTransform
| 22,977
|
def embed_into_hbox_layout(w, margin=5):
"""Embed a widget into a layout to give it a frame"""
result = QWidget()
layout = QHBoxLayout(result)
layout.setContentsMargins(margin, margin, margin, margin)
layout.addWidget(w)
return result
| 22,978
|
def make_word_ds(grids, trfiles, bad_words=DEFAULT_BAD_WORDS):
"""Creates DataSequence objects containing the words from each grid, with any words appearing
in the [bad_words] set removed.
"""
ds = dict()
stories = grids.keys()
for st in stories:
grtranscript = grids[st].tiers[1].make_simple_transcript()
## Filter out bad words
goodtranscript = [x for x in grtranscript
if x[2].lower().strip("{}").strip() not in bad_words]
d = DataSequence.from_grid(goodtranscript, trfiles[st][0])
ds[st] = d
return ds
| 22,979
|
def fslimage_to_qpdata(img, name=None, vol=None, region=None, roi=False):
""" Convert fsl.data.Image to QpData """
if not name: name = img.name
if vol is not None:
data = img.data[..., vol]
else:
data = img.data
if region is not None:
data = (data == region).astype(np.int)
return NumpyData(data, grid=DataGrid(img.shape[:3], img.voxToWorldMat), name=name, roi=roi)
| 22,980
|
def xgb_cv(
data_, test_, y_, max_depth,gamma, reg_lambda , reg_alpha,\
subsample, scale_pos_weight, min_child_weight, colsample_bytree,
test_phase=False, stratify=False,
):
"""XGBoost cross validation.
This function will instantiate a XGBoost classifier with parameters
such as max_depth, subsample etc. Combined with data and
targets this will in turn be used to perform cross validation. The result
of cross validation is returned.
Our goal is to find combinations of parameters that maximizes AUC.
Returns:
if test_phase (and new data for validators, just change the test_ param
to the new data and make sure that the features are processed):
sub_preds : models prediction to get the hold-out score
else:
validation AUC score
Model Notes:
XGBoost overfits in this dataset, params should be set accordingly.
Parameter Notes
gamma : Minimum loss reduction required to make a further partition on a leaf \
node of the tree. The larger gamma is, the more conservative the algorithm will be.
min_child_weight : The larger min_child_weight is, the more conservative the algorithm will be.
colsample_bytree : The subsample ratio of columns when constructing each tree.
scale_pos_weight : A typical value to consider: sum(negative instances) / sum(positive instances)
"""
oof_preds = np.zeros(data_.shape[0])
sub_preds = np.zeros(test_.shape[0])
if test_phase:
max_depth = int(np.round(max_depth))
feats = [f for f in data_.columns if f not in ['bookingid', 'label']]
if stratify:
folds_ = StratifiedKFold(n_splits=4, shuffle=True, random_state=610)
splitted = folds_.split(data_, y_)
else:
splitted = folds_.split(data_)
for n_fold, (trn_idx, val_idx) in enumerate(splitted):
trn_x, trn_y = data_[feats].iloc[trn_idx], y_.iloc[trn_idx]
val_x, val_y = data_[feats].iloc[val_idx], y_.iloc[val_idx]
xg_train = xgb.DMatrix(
trn_x.values, label=trn_y.values
)
xg_valid = xgb.DMatrix(
val_x.values, label=val_y.values
)
watchlist = [(xg_train, 'train'),(xg_valid, 'eval')]
num_round=10000
param = {
'gamma' : gamma,
'max_depth':max_depth,
'colsample_bytree':colsample_bytree,
'subsample':subsample,
'min_child_weight':min_child_weight,
'objective':'binary:logistic',
'random_state':1029,
'n_jobs':8,
'eval_metric':'auc',
'metric': 'auc',
'scale_pos_weight':scale_pos_weight,
'eta':0.05,
'silent':True
}
clf = xgb.train(param, xg_train, num_round, watchlist, verbose_eval=100, early_stopping_rounds = 100)
oof_preds[val_idx] = clf.predict(xgb.DMatrix(data_[feats].iloc[val_idx].values), ntree_limit=clf.best_ntree_limit)
if test_phase:
sub_preds += clf.predict(xgb.DMatrix(test_[feats].values), ntree_limit=clf.best_ntree_limit) / folds_.n_splits
print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(val_y, oof_preds[val_idx])))
del clf, trn_x, trn_y, val_x, val_y
gc.collect()
print('Full AUC score %.6f' % roc_auc_score(y_, oof_preds))
if test_phase:
return sub_preds
else:
return roc_auc_score(y_, oof_preds)
| 22,981
|
def _infer_elem_type(list_var):
"""
Returns types.tensor. None if failed to infer element type.
Example:
Given:
main(%update: (2,fp32)) {
block0() {
%list: List[unknown] = tf_make_list(...) # unknown elem type
%while_loop_0:0: (i32), %while_loop_0:1: List[(2,fp32)] = while_loop(loop_vars=(...))
while_loop_0_body(...) {
%list_write_0: List[(2,fp32)] = list_write(index=..., ls=%list, value=%update)
} -> (%add_0, %list_write_0)
Result:
main(%update: (2,fp32)) {
block0() {
%list: List[(2,fp32)] = tf_make_list(...) # Get the elem type from list_write
%while_loop_0:0: (i32), %while_loop_0:1: List[(2,fp32)] = while_loop(loop_vars=(...))
while_loop_0_body(...) {
%list_write_0: List[(2,fp32)] = list_write(index=..., ls=%list, value=%update)
} -> (%add_0, %list_write_0)
"""
# Search for child op that have informative element types
for o in list_var.child_ops:
if o.op_type in ["list_write", "list_scatter"]:
return o.outputs[0].elem_type
if o.op_type == "while_loop":
idx = list(o.loop_vars).index(list_var)
block = o.blocks[0]
# the corresponding Var in body block
block_var = block.inputs[idx]
elem_type = _infer_elem_type(block_var)
if elem_type is not None:
def _set_types_for_block_inputs(block):
block_var = block.inputs[idx]
new_block_var = ListVar(name=block_var.name, elem_type=elem_type,
init_length=block_var.sym_type.T[1],
dynamic_length=block_var.sym_type.T[2])
block._replace_var(block_var, new_block_var)
_set_types_for_block_inputs(o.blocks[0]) # condition block
_set_types_for_block_inputs(o.blocks[1]) # body block
return elem_type
# otherwise continue to other block_var (a list_var can be
# passed into while_loop twice).
return None
| 22,982
|
def plot_hairy_mean_binstat_base(
list_of_pred_true_weight_label_color, key, spec,
is_rel = False, err = 'rms'
):
"""Plot binstats of means of relative energy resolution vs true energy."""
spec = spec.copy()
if spec.title is None:
spec.title = 'MEAN + E[ %s ]' % (err.upper())
else:
spec.title = '(MEAN + E[ %s ]) ( %s )' % (err.upper(), spec.title)
f, ax = plt.subplots()
for pred,true,weights,label,color in list_of_pred_true_weight_label_color:
x = true[key]
y = (pred[key] - true[key])
if is_rel:
y = y / x
plot_hairy_mean_binstat_single(
ax, x, y, weights, spec.bins_x, color, label, err
)
ax.axhline(0, 0, 1, color = 'C2', linestyle = 'dashed')
spec.decorate(ax)
ax.legend()
return f, ax
| 22,983
|
def log_system_status():
"""
Print the status of the system
"""
module_available=True
try:
import psutil
except ImportError:
module_available=False
if module_available:
try:
# record the memory used
memory = psutil.virtual_memory()
logger.info("Total memory = " + str(byte_to_gigabyte(memory.total)) + " GB")
logger.info("Available memory = " + str(byte_to_gigabyte(memory.available)) + " GB")
logger.info("Free memory = " + str(byte_to_gigabyte(memory.free)) + " GB")
logger.info("Percent memory used = " + str(memory.percent) + " %")
# record the cpu info
logger.info("CPU percent = " + str(psutil.cpu_percent()) + " %")
logger.info("Total cores count = " + str(psutil.cpu_count()))
# record the disk usage
disk = psutil.disk_usage('/')
logger.info("Total disk = " + str(byte_to_gigabyte(disk.total)) + " GB")
logger.info("Used disk = "+ str(byte_to_gigabyte(disk.used)) + " GB")
logger.info("Percent disk used = " + str(disk.percent) + " %")
# record information about this current process
process=psutil.Process()
process_memory=process.memory_info()
process_create_time=datetime.datetime.fromtimestamp(
process.create_time()).strftime("%Y-%m-%d %H:%M:%S")
process_cpu_times=process.cpu_times()
# two calls required to cpu percent for non-blocking as per documentation
process_cpu_percent=process.cpu_percent()
process_cpu_percent=process.cpu_percent()
logger.info("Process create time = " + process_create_time)
logger.info("Process user time = " + str(process_cpu_times.user) + " seconds")
logger.info("Process system time = " + str(process_cpu_times.system) + " seconds")
logger.info("Process CPU percent = " + str(process_cpu_percent) + " %")
logger.info("Process memory RSS = " + str(byte_to_gigabyte(process_memory.rss)) + " GB")
logger.info("Process memory VMS = " + str(byte_to_gigabyte(process_memory.vms)) + " GB")
logger.info("Process memory percent = " + str(process.memory_percent()) + " %")
except (AttributeError, OSError, TypeError, psutil.Error):
pass
| 22,984
|
def del_all(widget, event, data):
"""
Returns Deletes all parameters from the sheet data table
-------
"""
sheet.items = []
| 22,985
|
def parse_csv_file(file_contents):
"""
The helper function which converts the csv file into a dictionary where each
item's key is the provided value 'id' and each item's value is another
dictionary.
"""
list_of_contents = file_contents.split('\n')
key, lines = (list_of_contents[0].split(','),
list_of_contents[1:])
objects_dict = {}
# Build a dictionary
for index, line in enumerate(lines):
if line.isspace() or line == u'': continue
values = unicode_csv_reader([line]).next()
line_dict = dict([(key[i], val)
for i, val in enumerate(values)])
media_id = line_dict.get('id') or index
objects_dict[media_id] = (line_dict)
return objects_dict
| 22,986
|
def get_config():
"""
Returns the current bot config.
"""
return BOT_CONFIG
| 22,987
|
def SensorLocation_Meta():
"""SensorLocation_Meta() -> MetaObject"""
return _DataModel.SensorLocation_Meta()
| 22,988
|
def plot_sample_eval(images: list,
sub_titles=None,
main_title=None,
vmin=None, vmax=None,
label_str=None, pred_str=None,
additional_info=None,
show_plot=False, save_as=None):
"""
Plots one or multiple images in a row, including titles and additional information, if given.
Recommended to use for visualising network input, prediction, label etc. of a data sample or time step
Args:
images (list[2D numpy.ndarray]): Images to display in the plot, e.g. sensor frames, flowfronts etc.
sub_titles (list[str]): list of titles that will be displayed above the corresponding image. Length should match
the number of images
main_title (str): the main title displayed at the top
vmin (list[float or int]): set the min value for each subplot manually (useful for time series plots).
Length should match the number of images
vmax (list[float or int]): set the max value for each subplot manually (useful for time series plots).
Length should match the number of images
label_str: Label as a string (useful if label is a class, not an image)
pred_str: Prediction as a string (useful if prediction is a class, not an image)
additional_info (list[str]): List of strings that will be displayed at the bottom of the plot. Each list entry
is put in a new row.
show_plot: if True, the plot will be shown in a window during runtime
save_as (pathlib.Path or str): full path, including filename and type (e.g. '/cfs/example/output.png')
"""
assert bool(images)
assert sub_titles is None or len(sub_titles) == len(images)
assert vmin is None or len(vmin) == len(images)
assert vmin is None or len(vmin) == len(images)
plt.rcParams['figure.constrained_layout.use'] = True
# set up figure size and basic structure
ratio = images[0].shape[0] / images[0].shape[1]
base_size = 4
text_space = 0.35 if main_title is not None else 0
text_space += 0.35 if label_str is not None else 0
text_space += 0.35 if pred_str is not None else 0
text_space += 0.35 * len(additional_info) if additional_info is not None else 0
figsize = (base_size * len(images), base_size * ratio + text_space)
fig, axs = plt.subplots(1, len(images), figsize=figsize)
if len(images) == 1:
axs = [axs]
if main_title is not None:
fig.suptitle(main_title)
for i, img in enumerate(images):
axs[i].imshow(img, vmin=None if vmin is None else vmin[i], vmax=None if vmax is None else vmax[i])
axs[i].set(xticks=[], yticks=[], title=None if sub_titles is None else sub_titles[i])
text = ""
color = 'black'
if label_str is not None:
text += f"{'Label: ':8}{label_str}"
if label_str is not None and pred_str is not None:
color = 'green' if label_str == pred_str else 'red'
text += '\n'
if pred_str is not None:
text += f"{'Pred: ':8}{pred_str}"
if additional_info is not None:
for info in additional_info:
text += f"\n{info}"
plt.figtext(0.01, 0.01, text, c=color, ha='left')
if show_plot:
plt.show()
if save_as is not None:
Path(save_as).parent.mkdir(parents=True, exist_ok=True)
plt.savefig(save_as)
return fig
| 22,989
|
def predict():
"""
Get data and do the same processing as when we prototyped,
because we need to normalize based on training data summary stats
:return:
"""
data = pd.read_csv('data.csv')
df = data.drop("Unnamed: 32", axis=1)
df = data.drop("id", axis=1)
df.drop(columns=["Unnamed: 32"], inplace=True)
X = df.drop(labels="diagnosis", axis=1)
input_data = []
for col in cols:
input_data.append(float(request.form[col]))
df_norm = (input_data - X.mean()) / (X.max() - X.min())
pred = loaded_model.predict(df_norm)
if pred == 1:
return "Prediction : Benign Tumor Found"
else:
return "Prediction : Malignant Tumor Found"
| 22,990
|
def test_eqpt_creation(tmpdir):
""" tests that convert correctly creates equipment according to equipment sheet
including all cominations in testTopologyconvert.xls: if a line exists the amplifier
should be created even if no values are provided.
"""
xls_input = DATA_DIR / 'testTopologyconvert.xls'
xls_copy = Path(tmpdir) / xls_input.name
shutil.copyfile(xls_input, xls_copy)
convert_file(xls_copy)
actual_json_output = xls_copy.with_suffix('.json')
actual = load_json(actual_json_output)
unlink(actual_json_output)
connections = {elem['from_node']: elem['to_node'] for elem in actual['connections']}
jsonconverted = {}
for elem in actual['elements']:
if 'type' in elem.keys() and elem['type'] == 'Edfa':
print(elem['uid'])
if 'type_variety' in elem.keys():
jsonconverted[elem['uid']] = Amp(elem['uid'], connections[elem['uid']], elem['type_variety'])
else:
jsonconverted[elem['uid']] = Amp(elem['uid'], connections[elem['uid']])
with open_workbook(xls_input) as wobo:
# reading Eqpt sheet assuming header is node A, Node Z, amp variety
# fused should not be recorded as an amp
eqpt_sheet = wobo.sheet_by_name('Eqpt')
raw_eqpts = {}
for row in all_rows(eqpt_sheet, start=5):
if row[0].value not in raw_eqpts.keys():
raw_eqpts[row[0].value] = Amp(row[0].value, [row[1].value], [row[2].value], [row[7].value])
else:
raw_eqpts[row[0].value].to_node.append(row[1].value)
raw_eqpts[row[0].value].eqpt.append(row[2].value)
raw_eqpts[row[0].value].west.append(row[7].value)
# create the possible names similarly to what convert should do
possiblename = [f'east edfa in {xlsname} to {node}' for xlsname, value in raw_eqpts.items()
for i, node in enumerate(value.to_node) if value.eqpt[i] != 'fused'] +\
[f'west edfa in {xlsname} to {node}' for xlsname, value in raw_eqpts.items()
for i, node in enumerate(value.to_node) if value.west[i] != 'fused']
# check that all lines in eqpt sheet correctly converts to an amp element
for name in possiblename:
assert name in jsonconverted.keys()
# check that all amp in the converted files corresponds to an eqpt line
for ampuid in jsonconverted.keys():
assert ampuid in possiblename
| 22,991
|
def format_number(number, num_decimals=2):
"""
Format a number as a string including thousands separators.
:param number: The number to format (a number like an :class:`int`,
:class:`long` or :class:`float`).
:param num_decimals: The number of decimals to render (2 by default). If no
decimal places are required to represent the number
they will be omitted regardless of this argument.
:returns: The formatted number (a string).
This function is intended to make it easier to recognize the order of size
of the number being formatted.
Here's an example:
>>> from humanfriendly import format_number
>>> print(format_number(6000000))
6,000,000
> print(format_number(6000000000.42))
6,000,000,000.42
> print(format_number(6000000000.42, num_decimals=0))
6,000,000,000
"""
integer_part, _, decimal_part = str(float(number)).partition('.')
reversed_digits = ''.join(reversed(integer_part))
parts = []
while reversed_digits:
parts.append(reversed_digits[:3])
reversed_digits = reversed_digits[3:]
formatted_number = ''.join(reversed(','.join(parts)))
decimals_to_add = decimal_part[:num_decimals].rstrip('0')
if decimals_to_add:
formatted_number += '.' + decimals_to_add
return formatted_number
| 22,992
|
def test_fov_standard():
""" NB: For tests on #STARS section, see test_fov_stare_and_star_list() above."""
fov_name = "Std_SA100"
f = fov.Fov("Std_SA100", TEST_FOV_DIRECTORY)
assert f.fov_name == fov_name
assert f.format_version == CURRENT_SCHEMA_VERSION
assert f.ra == util.ra_as_degrees("08:53:14.3")
assert f.dec == util.dec_as_degrees("-00:37:56")
assert f.chart == "X15687X"
assert f.fov_date == "12/20/2015"
assert f.period is None
assert f.motive == ''
assert f.acp_comments == ''
assert (f.JD_bright, f.JD_faint, f.JD_second) == (None, None, None)
assert (f.mag_V_bright, f.mag_V_faint, f.mag_V_second) == (None, None, None)
assert (f.color_VI_bright, f.color_VI_faint, f.color_VI_second) == (None, None, None)
assert f.main_target == "Standard"
assert f.target_type == "Standard"
assert f.observing_style == "Standard"
assert f.alert is None
assert len(f.observing_list) == 3
assert f.observing_list == [("V", 11.5, 1), ("R", 11.2, 1), ("I", 10.2, 1)]
assert f.max_exposure is None
assert f.priority is None
assert f.gap_score_days is None
assert f.punches == []
assert len(f.aavso_stars) == 6
assert all([star.is_valid for star in f.aavso_stars])
assert [star.star_id for star in f.aavso_stars] == \
['118', '130', '101', '114', '124', '92']
| 22,993
|
def query_rockets():
"""
request all rockets
"""
query = '''
{
rockets {
id
}
}
'''
return query
| 22,994
|
def copyfile(path, dest_dir, workspace = ""):
"""
path the full filepath to a file
dest_dir destination for copy
returns the full filepath of the new destination
removes the workspace from the filepath to give a
workspace relative filepath.
"""
if os.path.isfile(path):
head, tail = os.path.split(path)
destination = os.path.join(workspace, dest_dir, tail)
if not os.path.isfile(destination):
shutil.copy(path, destination)
print("Added {0}".format(destination))
else:
print("Found {0}".format(destination))
return destination.replace(workspace + "\\", "")
else:
print("{0} is an invalid filepath!".format(path))
return None
| 22,995
|
def test_particular_store_search(browser):
"""This test fails because store finder filter doesn't work."""
pass
| 22,996
|
def get_block_len(built_prims, prim_type):
""" Calculates the maximum block length for a given primitive type """
retval = 0
for _, p in built_prims:
if p.prim_type == prim_type:
retval = max(retval, p.block_len)
return retval
| 22,997
|
def test_cat3(capsys, thresh_files, args):
""" Test the behavior of CAT on a simple file and creating a column"""
args = ("A=" + str(thresh_files["pass_a.txt"]) + " cat" + args).split()
thresh.main(args)
out, err = capsys.readouterr()
assert out == """ a b c d
+7.00000000000000000e+00 +8.00000000000000000e+00 +2.00000000000000000e+00 +1.50000000000000000e+01
+0.00000000000000000e+00 +5.00000000000000000e+00 +0.00000000000000000e+00 +5.00000000000000000e+00
+1.00000000000000000e+00 +2.00000000000000000e+00 +3.00000000000000000e+00 +3.00000000000000000e+00
+3.00000000000000000e+00 +4.00000000000000000e+00 +5.00000000000000000e+00 +7.00000000000000000e+00
+7.00000000000000000e+00 +1.00000000000000000e+00 +4.00000000000000000e+00 +8.00000000000000000e+00
"""
| 22,998
|
def _interpolate_gather(array, x):
"""
Like ``torch.gather(-1, array, x)`` but continuously indexes into the
rightmost dim of an array, linearly interpolating between array values.
"""
with torch.no_grad():
x0 = x.floor().clamp(min=0, max=array.size(-1) - 2)
x1 = x0 + 1
f0 = _gather(array, -1, x0.long())
f1 = _gather(array, -1, x1.long())
return f0 * (x1 - x) + f1 * (x - x0)
| 22,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.