max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
setup.py
|
ELC/testnbdev
| 1
|
12000
|
<reponame>ELC/testnbdev<filename>setup.py
from pkg_resources import parse_version
from configparser import ConfigParser
import setuptools
assert parse_version(setuptools.__version__)>=parse_version('36.2')
# note: all settings are in settings.ini; edit there, not here
config = ConfigParser(delimiters=['='])
config.read('settings.ini')
config = config['DEFAULT']
config_keys = 'version description keywords author author_email'.split()
expected = config_keys + "lib_name user branch license status min_python audience language".split()
for setting in expected:
assert setting in config, f"missing expected setting: {setting}"
setup_config = {setting:config[setting] for setting in config_keys}
licenses = {
'apache2': ('Apache Software License 2.0','OSI Approved :: Apache Software License'),
}
statuses = [ '1 - Planning', '2 - Pre-Alpha', '3 - Alpha',
'4 - Beta', '5 - Production/Stable', '6 - Mature', '7 - Inactive' ]
py_versions = '2.0 2.1 2.2 2.3 2.4 2.5 2.6 2.7 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9'.split()
requirements = config.get('requirements','').split()
lic = licenses[config['license']]
min_python = config['min_python']
setuptools.setup(
name = config['lib_name'],
license = lic[0],
classifiers = [
'Development Status :: ' + statuses[int(config['status'])],
'Intended Audience :: ' + config['audience'].title(),
'License :: ' + lic[1],
'Natural Language :: ' + config['language'].title(),
] + [f'Programming Language :: Python :: {version}' for version in py_versions[py_versions.index(min_python):]],
url = config['git_url'],
packages = setuptools.find_packages(),
include_package_data = True,
install_requires = requirements,
dependency_links = config.get('dep_links','').split(),
python_requires = '>=' + config['min_python'],
long_description = open('README.md').read(),
long_description_content_type = 'text/markdown',
zip_safe = False,
entry_points = { 'console_scripts': config.get('console_scripts','').split() },
**setup_config)
| 2.359375
| 2
|
amaascore/tools/generate_party.py
|
amaas-fintech/amaas-core-sdk-python
| 0
|
12001
|
<reponame>amaas-fintech/amaas-core-sdk-python<filename>amaascore/tools/generate_party.py
from __future__ import absolute_import, division, print_function, unicode_literals
from amaasutils.random_utils import random_string, random_decimal
import random
from amaascore.core.reference import Reference
from amaascore.parties.asset_manager import AssetManager
from amaascore.parties.broker import Broker
from amaascore.parties.children import Address, Email
from amaascore.parties.individual import Individual
from amaascore.parties.party import Party
def generate_common(asset_manager_id, party_id, party_status):
common = {'asset_manager_id': asset_manager_id or random.randint(1, 1000),
'party_id': party_id or str(random.randint(1, 1000)),
'party_status': party_status or 'Active',
'display_name': random_string(10),
'legal_name': random_string(10),
'url': random_string(10)
}
return common
def generate_party(asset_manager_id=None, party_id=None, party_status=None):
references = {'PartyDB': Reference(random_string(10))}
attributes = generate_common(asset_manager_id=asset_manager_id, party_id=party_id, party_status=party_status)
party = Party(**attributes)
# This is ok from a mutability perspective as the references collection doesn't trigger anything
party.references.update(references)
party.upsert_address('Registered', generate_address(address_primary=True))
party.upsert_email('Office', generate_email(email_primary=True))
return party
def generate_asset_manager(asset_manager_id=None, party_id=None, party_status=None):
references = {'LEI': Reference(random_string(10))}
attributes = generate_common(asset_manager_id=asset_manager_id, party_id=party_id, party_status=party_status)
asset_manager = AssetManager(**attributes)
asset_manager.references.update(references)
asset_manager.upsert_address('Registered', generate_address(address_primary=True))
asset_manager.upsert_email('Office', generate_email(email_primary=True))
return asset_manager
def generate_broker(asset_manager_id=None, party_id=None, party_status=None):
references = {'LEI': Reference(random_string(10))}
attributes = generate_common(asset_manager_id=asset_manager_id, party_id=party_id, party_status=party_status)
broker = Broker(**attributes)
broker.references.update(references)
broker.upsert_address('Registered', generate_address(address_primary=True))
broker.upsert_email('Office', generate_email(email_primary=True))
return broker
def generate_individual(asset_manager_id=None, party_id=None, party_status=None):
attributes = generate_common(asset_manager_id=asset_manager_id, party_id=party_id, party_status=party_status)
individual = Individual(given_names=random_string(10), surname=random_string(10), **attributes)
return individual
def generate_address(country_id=None, address_primary=False):
address = Address(line_one=random_string(20),
line_two=random.choice([None, random_string(10)]),
city=random_string(10),
region=random_string(10),
postal_code=random_string(6),
country_id=country_id or random_string(3), # Make this a real country code
address_primary=address_primary)
return address
def generate_email(email=None, email_primary=False):
return Email(email=email or (random_string(10) + '@amaas.com'), email_primary=email_primary)
def generate_parties(asset_manager_ids=[], number=5):
parties = []
for i in range(number):
party = generate_party(asset_manager_id=random.choice(asset_manager_ids))
parties.append(party)
return parties
| 2.0625
| 2
|
fitgrid/utils/lmer.py
|
vishalbelsare/fitgrid
| 10
|
12002
|
# -*- coding: utf-8 -*-
"""User functions to streamline working with selected pymer4 LMER fit
attributes from lme4::lmer and lmerTest for ``fitgrid.lmer`` grids.
"""
import functools
import re
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib import pyplot as plt
import fitgrid
from fitgrid.fitgrid import LMERFitGrid
def get_lmer_dfbetas(epochs, factor, **kwargs):
r"""Fit lmers leaving out factor levels one by one, compute DBETAS.
Parameters
----------
epochs : Epochs
Epochs object
factor : str
column name of the factor of interest
**kwargs
keyword arguments to pass on to ``fitgrid.lmer``, like ``RHS``
Returns
-------
dfbetas : pandas.DataFrame
dataframe containing DFBETAS values
Examples
--------
Example calculation showing how to pass in model fitting parameters::
dfbetas = fitgrid.utils.lmer.get_lmer_dfbetas(
epochs=epochs,
factor='subject_id',
RHS='x + (x|a)
)
Notes
-----
DFBETAS is computed according to the following formula [NieGroPel2012]_:
.. math::
DFBETAS_{ij} = \frac{\hat{\gamma}_i - \hat{\gamma}_{i(-j)}}{se\left(\hat{\gamma}_{i(-j)}\right)}
for parameter :math:`i` and level :math:`j` of ``factor``.
"""
# get the factor levels
table = epochs.table.reset_index().set_index(
[epochs.epoch_id, epochs.time]
)
levels = table[factor].unique()
# produce epochs tables with each level left out
looo_epochs = (
fitgrid.epochs_from_dataframe(
table[table[factor] != level],
time=epochs.time,
epoch_id=epochs.epoch_id,
channels=epochs.channels,
)
for level in levels
)
# fit lmer on these epochs
fitter = functools.partial(fitgrid.lmer, **kwargs)
grids = map(fitter, looo_epochs)
coefs = (grid.coefs for grid in grids)
# get coefficient estimates and se from leave one out fits
looo_coefs = pd.concat(coefs, keys=levels, axis=1)
looo_estimates = looo_coefs.loc[pd.IndexSlice[:, :, 'Estimate'], :]
looo_se = looo_coefs.loc[pd.IndexSlice[:, :, 'SE'], :]
# get coefficient estimates from regular fit (all levels included)
all_levels_coefs = fitgrid.lmer(epochs, **kwargs).coefs
all_levels_estimates = all_levels_coefs.loc[
pd.IndexSlice[:, :, 'Estimate'], :
]
# drop outer level of index for convenience
for df in (looo_estimates, looo_se, all_levels_estimates):
df.index = df.index.droplevel(level=-1)
# (all_levels_estimate - level_excluded_estimate) / level_excluded_se
dfbetas = all_levels_estimates.sub(looo_estimates, level=1).div(
looo_se, level=1
)
return dfbetas.stack(level=0)
def get_lmer_warnings(lmer_grid):
"""grid the LMERFitGrid lme4::lmer4 warnings by type
lmer warnings are a mishmash of characters, punctuation, and digits, some with
numerical values specific to the message, for instance,
| Model failed to converge with max|grad| = 0.00222262 (tol = 0.002, component 1)
| unable to evaluate scaled gradient
| boundary (singular) fit: see ?isSingular
| np.nan
The warning strings are returned as-is except for stripping
leading and trailing whitespace and the "= N.NNNNNNNN" portion of the
max \|grad\| convergence failure.
Parameters
----------
lmer_grid : fitgrid.LMERFitGrid
as returned by ``fitgrid.lmer()``, shape = time x channel
Returns
-------
warning_grids : dict
A dictionary, the keys are lmer warning strings, each value
is a `pandas.DataFrame` indicator grid where grid.loc[time, channel] == 1 if the
lmer warning == key, otherwise 0.
"""
if not isinstance(lmer_grid, LMERFitGrid):
msg = (
"get_lmer_warnings() must be called on an "
f"LMERFitGrid not {type(lmer_grid)}"
)
raise ValueError(msg)
# In pymer4 0.7.1+ and lme4::lmer 0.22+ warnings come back from
# lme4::lmer via pymer4 as list of strings and each LMERFitgrid
# cell may have a list of 0, 1, 2, ... ? warnings. This means
# LMERFitGrid.warnings time index may have missing time stamps (= no
# warnings), a single time stamp (one warning), or duplicate time
# stamps (> 1 warning) and np.nan at channels where there is no
# warning at that timestamp.
# strip reported decimal values so max|grad| convergence failures are one kind
tidy_strings = lmer_grid.warnings.applymap(
lambda x: re.sub(
r"max\|grad\|\s+=\s+\d+\.\d+\s+", "max|grad| ", x
).strip()
if isinstance(x, str)
else x # no warning == np.nan
).rename_axis([lmer_grid.time, "wdx", "_empty"], axis=0)
# the number and types of warning generally vary by time and/or channel
warning_kinds = (
pd.Series(tidy_strings.to_numpy().flatten()).dropna().unique()
)
# collect messy gappy, multiple warnings as a dict of key==warning,
# value==tidy time x channel indicator grid (0, 1)
warning_grids = {}
assert lmer_grid._grid.shape == lmer_grid.has_warning.shape
for warning_kind in warning_kinds:
# empty grid w/ correct shape, row index and columns
warning_grid = pd.DataFrame(
np.zeros(lmer_grid._grid.shape, dtype=int),
index=lmer_grid._grid.index.copy(),
columns=lmer_grid._grid.columns.copy(),
)
# select rows w/ at least one non-na
warning_rows = tidy_strings[tidy_strings == warning_kind].dropna(
axis=0, how="all"
)
assert warning_rows.index.names[0] == lmer_grid._grid.index.name
assert all(
warning_rows.index.get_level_values(0)
== warning_rows.index.get_level_values(0).unique()
)
for rdx, row in warning_rows.iterrows():
warning_grid.loc[rdx[0], :] = (row == warning_kind).astype(int)
assert all(warning_grid.index == lmer_grid._grid.index)
assert all(warning_grid.columns == lmer_grid._grid.columns)
warning_grids[warning_kind] = warning_grid
return warning_grids
def plot_lmer_warnings(lmer_grid, which="each", verbose=True):
"""Raster plot lme4::lmer warning grids
Parameters
----------
lmer_grid : fitgrid.LMERFitGrid
as returned by ``fitgrid.lmer()``, shape = time x channel
which : {"each", "all", or list of str}
select the types of warnings to plot. `each` (default) plots
each type of warning separately. `all` plots one grid showing
where any type of warning occurred. A list of strings searches
the lmer warnings and plots those that match.
verbose : bool, default=True
If `True` warn of failed matches for warnings keywords.
Examples
--------
default, plot each warning grid separately
>>> plot_lmer_warnings(lmer_grid)
one plot shows everywhere there is a warning
>>> plot_lmer_warnings(lmer_grid, which="all")
plot just warnings that match these strings
>>> plot_lmer_warnings(lmer_grid, which=["converge", "singular"])
"""
def _plot_warnings(warning, warning_grid):
# masked array non-values are transparent in pcolormesh
_, axi = plt.subplots(figsize=(12, len(warning_grid.columns) / 2))
axi.set_title(warning)
ylabels = warning_grid.columns
axi.yaxis.set_major_locator(
mpl.ticker.FixedLocator(np.arange(len(ylabels)))
)
axi.yaxis.set_major_formatter(mpl.ticker.FixedFormatter(ylabels))
axi.pcolormesh(
warning_grid.index,
np.arange(len(ylabels)),
np.ma.masked_not_equal(warning_grid.T.to_numpy(), 1),
shading="nearest",
cmap=mpl.colors.ListedColormap(['red']),
)
# validate kwarg
if not (
isinstance(which, str)
or (
isinstance(which, list)
and all((isinstance(wrn, str) for wrn in which))
)
):
raise ValueError(
"The value for which=value must be 'any', 'each', a warning "
f"string pattern to match or list of them, not this: {which}"
)
warning_grids = get_lmer_warnings(lmer_grid)
warning_grids["all"] = lmer_grid.has_warning.astype(int)
keys = None
if which == "all":
keys = ["all"]
elif which == "each":
keys = list(warning_grids.keys())
else:
# lookup matching patterns var so as to not step on original kwarg
patterns = [which] if isinstance(which, str) else which
keys = []
for pattern in patterns:
matches = [key for key in warning_grids if pattern in key]
keys += matches # may be []
if verbose and not matches:
warnings.warn(f"warning pattern '{pattern}' not found")
assert isinstance(keys, list), f"this should be type list: {type(keys)}"
for key in keys:
if verbose:
print(f"{key}")
_plot_warnings(key, warning_grids[key])
if verbose and not keys:
warnings.warn(f"no model warnings match {which}")
| 2.765625
| 3
|
pyesasky/jupyter_server.py
|
pierfra-ro/pyesasky
| 13
|
12003
|
<reponame>pierfra-ro/pyesasky<filename>pyesasky/jupyter_server.py
import os
import json
from hashlib import md5
from tornado import web
from notebook.utils import url_path_join
from notebook.base.handlers import IPythonHandler
__all__ = ['load_jupyter_server_extension']
STATIC_DIR = os.path.join(os.path.dirname(__file__), 'nbextension', 'static');
CONFIG = os.path.expanduser('~/.pyesasky')
class ESASkyFileHandler(IPythonHandler):
def get(self, filename):
filename = os.path.basename(filename)
# First we check if this is a standard file in the static directory
if os.path.exists(os.path.join(STATIC_DIR, filename)):
path = os.path.join(STATIC_DIR, filename)
else:
# If not, we open the config file which should contain a JSON
# dictionary with filenames and paths.
if not os.path.exists(CONFIG):
raise web.HTTPError(404)
with open(CONFIG) as f:
config = json.load(f)
if filename in config['paths']:
path = config['paths'][filename]
else:
raise web.HTTPError(404)
with open(path, 'rb') as f:
content = f.read()
self.finish(content)
def serve_file(path, extension=''):
if not os.path.exists(path):
raise ValueError("Path {0} does not exist".format(path))
hash = md5(path.encode('utf-8')).hexdigest() + extension
with open(CONFIG) as f:
config = json.load(f)
if hash not in config['paths']:
config['paths'][hash] = os.path.abspath(path)
with open(CONFIG, 'w') as f:
json.dump(config, f)
return '/esasky/' + hash
def load_jupyter_server_extension(nb_server_app):
web_app = nb_server_app.web_app
host_pattern = '.*$'
if not os.path.exists(CONFIG):
config = {'paths': {}}
with open(CONFIG, 'w') as f:
json.dump(config, f)
route_pattern = url_path_join(web_app.settings['base_url'], '/esasky/(.*)')
web_app.add_handlers(host_pattern, [(route_pattern, ESASkyFileHandler)])
| 2.390625
| 2
|
pingpongskill/pingpongskill.py
|
Garvys/PingPongSkill
| 1
|
12004
|
<filename>pingpongskill/pingpongskill.py<gh_stars>1-10
# -*-: coding utf-8 -*-
""" Skeleton Snips skill. """
import re
import json
import os
import datetime
from text2num import text2num
from collections import defaultdict
FORMAT = '%Y.%m.%dT%H:%M:%S'
class PingPongSkill(object):
""" Skeleton Snips skill. """
def __init__(self):
pass
def handle_loser(self):
db = JsonDB()
perfs = db.compute_perfs()
if len(perfs) == 0:
print "No match registred"
return
loser = sorted(perfs.iteritems(), key=lambda x: x[1])[0][0]
print "The one who lost the most matches is {}".format(loser)
def handle_winner(self):
db = JsonDB()
perfs = db.compute_perfs()
if len(perfs) == 0:
print "No match registred"
return
loser = sorted(perfs.iteritems(), key=lambda x: -x[1])[0][0]
print "The one who lost the most matches is {}".format(loser)
def handle_terminate_game(self, winner, loser, score):
print "*** {} {} {}".format(winner, loser, score)
try:
score = parse_core(score)
except ValueError, err:
print err
db = JsonDB()
timestamp = datetime.datetime.now().strftime(FORMAT)
db.add(winner, loser, score[0], score[1], timestamp)
print "I added the match {} versus {}: score: {}".format(winner,
loser,
score)
regex = re.compile('([\w\s]+)to([\w\s]+)')
def parse_core(score):
match = regex.search(score)
if not match or len(match.groups()) != 2:
raise ValueError("{} is an incorrect score".format(score))
score_1 = text2num(match.groups()[0].strip())
score_2 = text2num(match.groups()[1].strip())
if score_1 != 11 and score_2 != 11:
raise ValueError(
"{} is an incorrect score: one of the player needs to have "
"11".format(
score))
return sorted([score_1, score_2], reverse=True)
class JsonDB(object):
path = 'ping_pong_db.json'
def __init__(self):
if not os.path.exists(self.path):
self._results = []
else:
with open(self.path, 'r') as f:
results = json.load(f)
self._results = results
def add(self, player_1, player_2, score_player_1, score_player_2,
datetime_str):
self._results += [
(datetime_str, player_1, player_2, score_player_1, score_player_2)]
self.save_results()
def save_results(self):
with open(self.path, 'w') as f:
json.dump(self._results, f)
def compute_perfs(self):
player_to_win = defaultdict(int)
player_to_lose = defaultdict(int)
for _, win, lose, _, _ in self._results:
player_to_win[win] += 1
player_to_lose[lose] += 1
player_to_proportion = {}
for player in set(player_to_win.keys() + player_to_lose.keys()):
proportion = float(player_to_win[player]) / (
player_to_win[player] + player_to_lose[player])
player_to_proportion[player] = proportion
return player_to_proportion
if __name__ == '__main__':
scores = [
'eleven to two',
'twenty to eleven'
]
for score in scores:
print parse_core(score)
PingPongSkill().handle_loser()
PingPongSkill().handle_terminate_game('thib', 'alex', 'eleven to two')
PingPongSkill().handle_loser()
| 3.21875
| 3
|
xarray/backends/npy_io.py
|
martinResearch/xarray
| 0
|
12005
|
<filename>xarray/backends/npy_io.py
import numpy as np
import xarray as xr
import pandas as pd
import sys
import json
import os
import datetime
from xarray.core.utils import (
decode_numpy_dict_values,
either_dict_or_kwargs,
ensure_us_time_resolution,
)
from numpy.compat import (
asbytes, asstr, asunicode, bytes, basestring, os_fspath, os_PathLike,
pickle, contextlib_nullcontext
)
from numpy.lib import format
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, datetime.datetime):
return obj.__str__()
if isinstance(obj, np.datetime64):
return obj.__str__()
return json.JSONEncoder.default(self, obj)
def _is_string_like(obj):
"""
Check whether obj behaves like a string.
"""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
def myJsonConverter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
def save_npys(file, data, compress=False,min_dims_coord_npy = 2):
if isinstance(data,xr.DataArray):
_save_dataarray(file, data, compress=compress,min_dims_coord_npy=min_dims_coord_npy)
elif isinstance(data,xr.Dataset):
_save_dataset(file, data, compress=compress,min_dims_coord_npy=min_dims_coord_npy)
else:
raise BaseException('Unexpected type %'%str(type(data)))
class zip_file():
def __init__(self,file, *args, **kwargs):
"""
Create a ZipFile.
Allows for Zip64, and the `file` argument can accept file, str, or
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
if not hasattr(file, 'read'):
file = os_fspath(file)
import zipfile
kwargs['allowZip64'] = True
file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
self.file_dir = file_dir
self.file_prefix = file_prefix
self.zipf = zipfile.ZipFile(file, *args, **kwargs)
def close(self):
self.zipf.close()
def open(self,x):
return self.zipf.open(x)
def read(self,x):
return self.zipf.read(x)
def namelist(self):
return self.zipf.namelist()
def add_bin_data(self,fname,data_bytes):
if sys.version_info >= (3, 6):
with self.zipf.open(fname, 'w', force_zip64=True) as fid:
fid.write(data_bytes)
else:
import tempfile
fd, tmpfile = tempfile.mkstemp(prefix=self.file_prefix, dir=self.file_dir, suffix=fname)
os.close(fd)
try:
fid = open(tmpfile, 'wb')
try:
fid.write(data_bytes)
fid.close()
fid = None
self.zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
def add_npy(self,fname,val):
if sys.version_info >= (3, 6):
with self.zipf.open(fname, 'w', force_zip64=True) as fid:
format.write_array(fid, np.asanyarray(val), allow_pickle=False, pickle_kwargs=None)
else:
import tempfile
# Since target file might be big enough to exceed capacity of a global
# temporary directory, create temp file side-by-side with the target file.
fd, tmpfile = tempfile.mkstemp(prefix=self.file_prefix, dir=self.file_dir, suffix=fname)
os.close(fd)
try:
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val), allow_pickle=False, pickle_kwargs=None)
fid.close()
fid = None
self.zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
def _save_dataarray(file, dataarray, compress=False, min_dims_coord_npy =2):#mostly copied from _savez in numpy\lib\npyio.py
import zipfile
if not hasattr(file, 'write'):
file = os_fspath(file)
if not file.endswith('.xar'):
file = file + '.xar'
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zip_file(file, mode="w", compression=compression)
if dataarray.name is None:
data_name = 'data'
else:
data_name = dataarray.name
zipf.add_npy(data_name+'.npy',dataarray.values)
d = dataarray.variable.to_dict(data=False)
d['version'] = xr.__version__
d.update({"coords": {}, "name": dataarray.name})
for k in dataarray.coords:
assert(k!=data_name)
coord_var = dataarray.coords[k].variable
item = {"attrs": decode_numpy_dict_values(coord_var.attrs), "dtype":str(coord_var.values.dtype)}# we save the type here
if (coord_var.dims!=()) and( len(coord_var.dims)>1 or coord_var.dims[0]!=k): # we don't keep the dims if we have a dimension_coordinate or if dims is empty to keep the json more concise (see http://xarray.pydata.org/en/stable/data-structures.html#coordinates)
item['dims'] = coord_var.dims
if (coord_var.dims!=()) and len(coord_var.dims)>=min_dims_coord_npy:
zipf.add_npy(k+'.npy',coord_var.values)
else:
item["data"] = ensure_us_time_resolution(coord_var.values) # keeping coordinates data in the json
d["coords"][k] = item
json_str = json.dumps(d,cls=NumpyEncoder) + "\n" # 2. string (i.e. JSON)
json_bytes = json_str.encode('utf-8')
zipf.add_bin_data('DataArray.json',json_bytes)
zipf.close()
def _save_dataset(file, dataset, compress=False, min_dims_coord_npy = 2):#mostly copied from _savez in numpy\lib\npyio.py
import zipfile
if not hasattr(file, 'write'):
file = os_fspath(file)
if not file.endswith('.xar'):
file = file + '.xar'
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zip_file(file, mode="w", compression=compression)
dataset_dict = dataset.to_dict(data = False)
dataset_dict['version'] = xr.__version__
for key, array in dict(dataset.data_vars).items():
val = np.asanyarray(array.values)
if val.ndim >= min_dims_coord_npy:
zipf.add_npy('%s.npy'%key, val)
else:
dataset_dict['data_vars'][key]['data']=ensure_us_time_resolution(val)
for key, array in dict(dataset.coords).items():
val = np.asanyarray(array.values)
if val.ndim >= min_dims_coord_npy:
zipf.add_npy('%s.npy'%key, val)
else:
dataset_dict['coords'][key]['data']=ensure_us_time_resolution(val)
json_str = json.dumps(dataset_dict,cls=NumpyEncoder) + "\n"
json_bytes = json_str.encode('utf-8')
zipf.add_bin_data('Dataset.json', json_bytes)
zipf.close()
def load_npys(file):
# TODO: Use contextlib.ExitStack once we drop Python 2
if hasattr(file, 'read'):
fid = file
own_fid = False
else:
fid = open(os_fspath(file), "rb")
own_fid = True
if True:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = b'PK\x03\x04'
_ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
_zip = zip_file(fid)
files = _zip.namelist()
_data_dict={}
_type = None
for x in files:
if x.endswith('.npy'):
bytes = _zip.open(x)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
assert( magic == format.MAGIC_PREFIX)
bytes = _zip.open(x)
_data_dict[x[:-4]] = format.read_array(bytes, allow_pickle=False, pickle_kwargs=None)
elif x=='Dataset.json':
assert(_type is None)
_type = xr.Dataset
header = json.loads(_zip.read(x))
elif x=='DataArray.json':
assert(_type is None)
_type = xr.DataArray
header = json.loads(_zip.read(x))
if _type is None:
raise IOError("Failed to read file")
if _type == xr.DataArray:
if 'name' in header and (header['name'] is not None):
data_name = header['name']
else:
data_name = 'data'
data = _data_dict[data_name]
assert (data.dtype==header['dtype'])
assert (data.shape==tuple(header['shape']))
coords={}
for k,coord in header['coords'].items():
if 'data' in coord:
coord_data = np.array(coord['data'],dtype=coord['dtype'])
else:
coord_data = _data_dict[k]
if 'dims' in coord:
dims=coord['dims']
elif coord_data.ndim==0:
dims=()
else:
dims= [k]
coords[k]=xr.DataArray(coord_data,dims=dims)
return xr.DataArray(data, coords = coords, dims=header['dims'],attrs=header['attrs'],name=header['name'])
else: # type is Dataset
coords={}
data_vars={}
for k,d in header['coords'].items():
if 'data' in d:
data = np.array(d['data'],dtype=d['dtype'])
else:
data = _data_dict[k]
coords[k]=xr.DataArray(data, dims=d['dims'], attrs=d['attrs'])
for k,d in header['data_vars'].items():
if 'data' in d:
data = np.array(d['data'],dtype=d['dtype'])
else:
data = _data_dict[k]
data_vars[k]=xr.DataArray(data, dims=d['dims'], attrs=d['attrs'])
return xr.Dataset(data_vars, coords=coords,attrs=header['attrs'])
else:
raise IOError(
"Failed to interpret file %s as a zip" % repr(file))
return None
def test():
from xarray.testing import assert_identical
data = np.random.rand(4, 3)
locs = ['IA', 'IL', 'IN']
times = pd.date_range('2000-01-01', periods=4)
foo = xr.DataArray(data, coords=[times, locs], dims=['time', 'space'])
v=foo.coords['time'].variable
save_npys('foo',foo)
foo_loaded = load_npys('foo.xar')
assert_identical(foo,foo_loaded)
temp = 15 + 8 * np.random.randn(2, 2, 3)
lon = [[-99.83, -99.32], [-99.79, -99.23]]
lat = [[42.25, 42.21], [42.63, 42.59]]
da = xr.DataArray(temp,name='precipitations',dims=['x','y','time'],
coords={'long': (['x', 'y'], lon), 'lat': (['x', 'y'], lat), 'time': pd.date_range('2014-09-06', periods=3), 'reference_time': pd.Timestamp('2014-09-05')})
save_npys('da',da)
da_loaded=load_npys('da.xar')
assert_identical(da,da_loaded)
temp = 15 + 8 * np.random.randn(2, 2, 3)
precip = 10 * np.random.rand(2, 2, 3)
lon = [[-99.83, -99.32], [-99.79, -99.23]]
lat = [[42.25, 42.21], [42.63, 42.59]]
ds = xr.Dataset({'temperature' : (['x', 'y', 'time'], temp),
'precipitation': (['x', 'y', 'time'], precip)},
coords={'long': (['x', 'y'], lon), 'lat': (['x', 'y'], lat), 'time': pd.date_range('2014-09-06', periods=3), 'reference_time': pd.Timestamp('2014-09-05')})
save_npys('ds',ds,min_dims_coord_npy=1)
ds_loaded= load_npys('ds.xar')
assert_identical(ds, ds_loaded)
if __name__ == "__main__":
test()
| 2.453125
| 2
|
applications/popart/bert/bert_data/squad_dataset.py
|
Alwaysproblem/examples-1
| 0
|
12006
|
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import random
import pickle
import json
import fractions
import math
import subprocess
from logging import getLogger
from functools import reduce
from .dataset import DataSet
from .data_sampler import SequentialSampler, ShuffledSampler, DistributedDataSampler
from .tokenization import FullTokenizer
from .squad_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions, InputFeatures
logger = getLogger(__name__)
def generate_random_features(sequence_length, vocab_length, batch_size):
features = []
for i in range(batch_size):
features.append(InputFeatures(
i,
None,
None,
None,
None,
None,
np.random.randint(0, vocab_length, size=sequence_length),
None,
np.random.randint(0, 2, size=sequence_length),
0,
None,
None,
np.random.randint(0, sequence_length, size=1),
np.random.randint(0, sequence_length, size=1),
None,
np.random.randint(0, sequence_length+1, size=1)
))
return features
class SquadDataLoader(object):
def __init__(self,
features,
sequence_length=None,
batch_size=1,
dtype=np.int32,
sampler=None):
self.features = features
self.batch_size = batch_size
self.dtype = dtype
self.sequence_length = sequence_length
self.sampler = sampler
if sampler is None:
self.sampler = SequentialSampler(features)
self.num_batches = len(self.sampler)//self.batch_size
def __len__(self):
return self.num_batches
def __iter__(self):
self.feature_iterator = iter([self.features[idx] for idx in self.sampler])
return self
def __next__(self):
items = [next(self.feature_iterator) for _ in range(self.batch_size)]
indicies = []
positions = []
segments = []
sequence_mask_idx = []
start_pos = []
end_pos = []
uid = []
for item in items:
indicies.append(item.input_ids)
padding_max = self.sequence_length if self.sequence_length is not None else len(item.input_ids)
padding_length = len(item.input_ids) - item.padding_start_index
position_padding = np.full(padding_length, padding_max)
position_ids = np.arange(0, item.padding_start_index)
positions.append(np.concatenate((position_ids, position_padding)).astype(np.int32))
segments.append(item.segment_ids)
sequence_mask_idx.append(item.padding_start_index)
start_pos.append(item.start_position)
end_pos.append(item.end_position)
uid.append(item.unique_id)
# Including impossible samples during training is under investigation. T12851
# if item.is_impossible:
# logger.warning("Impossible sample exists in the dataset. "
# f"start pos: {item.start_position}, end pos: {item.end_position}")
inputs = []
for i in [indicies, positions, segments, sequence_mask_idx, start_pos, end_pos, uid]:
inputs.append(np.stack(i))
return inputs
class BertDataTransform(object):
'''
Masks the indices that are larger than the vocab_length
'''
def __init__(self, dataloader, vocab_length, sequence_length, embedding_dict, positional_dict, merge_both_embeddings, is_training=True):
self.dataloader = dataloader
self.vocab_length = vocab_length
self.sequence_length = sequence_length
self.is_training = is_training
self.embedding_dict = embedding_dict
self.positional_dict = positional_dict
self.merge_both_embeddings = merge_both_embeddings
def __len__(self):
return len(self.dataloader)
def __iter__(self):
self.dataloader_iterator = iter(self.dataloader)
return self
def __next__(self):
items = next(self.dataloader_iterator)
# Specific BERT Post Processing. TODO: Find a better place for this processing
# The vocab_length may be smaller than the original vocab... In this case with the custom_op
# Out of Bounds indicies over a certain threshold will cause numerical issues.
# 100 is unknown token [UNK]
# 0 in the label is padding
OOB = items[0] >= self.vocab_length
items[0][OOB] = 100
# Force use of uint32 for all inputs.
for i in range(len(items)):
if self.is_training or i < 4:
items[i] = items[i].astype(np.uint32)
if self.embedding_dict is not None:
items[0] = np.take(self.embedding_dict, items[0], 0)
if self.positional_dict is not None:
positional_expanded = np.take(self.positional_dict, items[1], 0)
if self.merge_both_embeddings:
items[0] += positional_expanded
else:
items[1] = positional_expanded
return items
def load_or_cache_features(input_file,
vocab_file,
sequence_length,
is_training=True,
cache_file=None,
overwrite_cache=False,
do_lower_case=False):
if cache_file is None:
cache_file = input_file + f".{sequence_length}.cache"
if os.path.exists(cache_file) and not overwrite_cache:
examples = None
logger.info(f"Loading Cache {cache_file}")
with open(cache_file, "rb") as f:
features = pickle.load(f)
else:
logger.info("Reading Examples")
examples = read_squad_examples(input_file=input_file,
is_training=is_training,
version_2_with_negative=False)
# google-research/bert uses sequence_length 384 with doc_stride 128
# TODO: Find a good value for the doc_stride with sequence_length <384
doc_stride = 128
if sequence_length < 384:
doc_stride = 64
logger.info("Converting to Features")
features = convert_examples_to_features(examples=examples,
tokenizer=FullTokenizer(vocab_file, do_lower_case=do_lower_case),
max_seq_length=sequence_length,
doc_stride=doc_stride,
max_query_length=64,
is_training=is_training)
logger.info(f"Saving Cache {cache_file}")
with open(cache_file, "wb") as f:
pickle.dump(features, f)
return features, examples
class SquadDataSet(DataSet):
def __init__(self,
features,
examples,
input_file,
is_training,
output_dir=None,
evaluate_script=None,
do_lower_case=False,
n_extra=0,
**kwargs):
super().__init__(**kwargs)
self.features = features
self.examples = examples
self.is_training = is_training
self.input_file = input_file
self.output_dir = output_dir
self.do_lower_case = do_lower_case
if not self.is_training and self.output_dir is not None:
os.makedirs(self.output_dir, exist_ok=True)
# If examples is None, features was loaded from the cache
# So the examples need to be recreated.
if self.examples is None:
self.examples = read_squad_examples(input_file=self.input_file,
is_training=self.is_training,
version_2_with_negative=False)
self.results = []
self.evaluate_script = evaluate_script
self.n_extra = n_extra
def add_results(self, data, logits):
# Results will be batched. Flatten to individual results
start_logits, end_logits = [
logit.reshape(-1, logit.shape[-1]).tolist()
for logit in logits]
for i, unique_id in enumerate(data["uid"]):
self.results.append(RawResult(
unique_id=unique_id,
start_logits=start_logits[i],
end_logits=end_logits[i]
))
def write_predictions(self, epoch=None):
if self.is_training:
raise RuntimeError("Predictions cannot be written for training datasets")
if self.output_dir is None:
raise RuntimeError("Predictions cannot be written when output_dir is None")
suffix = f"_{epoch}" if epoch is not None else ""
predictions_file = os.path.join(self.output_dir, f"predictions{suffix}.json")
nbest_file = os.path.join(self.output_dir, f"nbest_predictions{suffix}.json")
null_log_odds_file = os.path.join(self.output_dir, f"null_odds{suffix}.json")
self.results = self.results[:len(self.results) - self.n_extra]
write_predictions(self.examples,
self.features,
self.results,
20, 30,
self.do_lower_case,
predictions_file,
nbest_file,
null_log_odds_file,
True,
False, 0)
if self.evaluate_script is not None:
evaluation = subprocess.check_output(["python", self.evaluate_script, self.input_file, predictions_file])
evaluation = json.loads(evaluation)
f1 = evaluation["f1"]
exact_match = evaluation["exact_match"]
status_string = f"F1 Score: {f1} | Exact Match: {exact_match}"
if epoch is not None:
status_string = f"Epoch: {epoch:3}{args.epochs - 1} | " + status_string
logger.info(status_string)
def get_bert_dataset(tensor_shapes,
input_file,
output_dir,
sequence_length,
vocab_file,
vocab_length,
batch_size,
batches_per_step,
embedding_dict,
positional_dict,
merge_both_embeddings=False,
replication_factor=1,
accumulation_factor=1,
shuffle=True,
is_training=True,
overwrite_cache=False,
no_drop_remainder=False,
evaluate_script=None,
generated_data=False,
do_lower_case=False,
max_pipeline_stage=1,
seed=0,
mpi_size=1,
mpi_rank=0,
is_distributed=False):
samples_per_step = batch_size * batches_per_step * \
replication_factor * accumulation_factor
div_factor = batch_size * replication_factor * accumulation_factor * batches_per_step
pad = 0
if generated_data:
features = generate_random_features(
sequence_length, vocab_length, samples_per_step)
examples = None
output_dir = None
logger.info("Generating random dataset")
else:
features, examples = load_or_cache_features(
input_file,
vocab_file,
sequence_length,
is_training,
overwrite_cache=overwrite_cache,
do_lower_case=do_lower_case)
if no_drop_remainder and not generated_data:
# dataset will be padded to be divisible by batch-size and samples-per-step
pad = int(np.ceil(len(features)/div_factor)) * div_factor - len(features)
if is_distributed:
sampler = DistributedDataSampler(
features, seed, shuffle,
mpi_size, mpi_rank, padding=False, padding_sub=pad, div_factor=div_factor)
pad = sampler.get_subpadding_size()
elif shuffle:
sampler = ShuffledSampler(features, seed, pad)
else:
sampler = SequentialSampler(features, pad)
if no_drop_remainder and not generated_data:
logger.info(f"no_drop_remainder: Dataset padded by {pad} samples")
dl = SquadDataLoader(
features,
sequence_length=sequence_length,
batch_size=samples_per_step,
sampler=sampler
)
bert_ds = BertDataTransform(
dl,
vocab_length,
sequence_length,
embedding_dict,
positional_dict,
merge_both_embeddings,
is_training=is_training)
if not is_training:
# Add uid to the data dictionary so evaluation script can be run
tensor_shapes += [
("start", None),
("end", None),
("uid", None)]
ds = SquadDataSet(
features,
examples,
input_file,
is_training,
output_dir,
evaluate_script,
do_lower_case=do_lower_case,
n_extra=pad,
loader=bert_ds,
tensor_shapes=tensor_shapes,
batches_per_step=batches_per_step,
replication_factor=replication_factor,
accumulation_factor=accumulation_factor)
return ds
| 1.953125
| 2
|
utils.py
|
sWizad/HashNeRF-pytorch
| 0
|
12007
|
import json
import numpy as np
import pdb
import torch
from ray_utils import get_rays, get_ray_directions, get_ndc_rays
BOX_OFFSETS = torch.tensor([[[i,j,k] for i in [0, 1] for j in [0, 1] for k in [0, 1]]],
device='cuda')
SQR_OFFSETS = torch.tensor([[[i,j] for i in [0, 1] for j in [0, 1] ]], device='cuda')
def hash(coords, log2_hashmap_size):
'''
coords: 3D coordinates. B x 3
log2T: logarithm of T w.r.t 2
'''
x, y, z = coords[..., 0], coords[..., 1], coords[..., 2]
return torch.tensor((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663 ^ z*83492791)
#return ((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663 ^ z*83492791)
def hash2d(coords, log2_hashmap_size):
'''
coords: 2D coordinates. B x 3
log2T: logarithm of T w.r.t 2
'''
x, y = coords[..., 0], coords[..., 1]
return torch.tensor((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663)
def xy2index(xy,resolution):
return xy[...,0]+xy[...,1]*resolution
def get_bbox3d_for_blenderobj(camera_transforms, H, W, near=2.0, far=6.0):
camera_angle_x = float(camera_transforms['camera_angle_x'])
focal = 0.5*W/np.tan(0.5 * camera_angle_x)
# ray directions in camera coordinates
directions = get_ray_directions(H, W, focal)
min_bound = [100, 100, 100]
max_bound = [-100, -100, -100]
points = []
for frame in camera_transforms["frames"]:
c2w = torch.FloatTensor(frame["transform_matrix"])
rays_o, rays_d = get_rays(directions, c2w)
def find_min_max(pt):
for i in range(3):
if(min_bound[i] > pt[i]):
min_bound[i] = pt[i]
if(max_bound[i] < pt[i]):
max_bound[i] = pt[i]
return
for i in [0, W-1, H*W-W, H*W-1]:
min_point = rays_o[i] + near*rays_d[i]
max_point = rays_o[i] + far*rays_d[i]
points += [min_point, max_point]
find_min_max(min_point)
find_min_max(max_point)
return (torch.tensor(min_bound)-torch.tensor([1.0,1.0,1.0]), torch.tensor(max_bound)+torch.tensor([1.0,1.0,1.0]))
def get_bbox3d_for_llff(poses, hwf, near=0.0, far=1.0):
H, W, focal = hwf
H, W = int(H), int(W)
# ray directions in camera coordinates
directions = get_ray_directions(H, W, focal)
min_bound = [100, 100, 100]
max_bound = [-100, -100, -100]
points = []
poses = torch.FloatTensor(poses)
for pose in poses:
rays_o, rays_d = get_rays(directions, pose)
rays_o, rays_d = get_ndc_rays(H, W, focal, 1.0, rays_o, rays_d)
def find_min_max(pt):
for i in range(3):
if(min_bound[i] > pt[i]):
min_bound[i] = pt[i]
if(max_bound[i] < pt[i]):
max_bound[i] = pt[i]
return
for i in [0, W-1, H*W-W, H*W-1]:
min_point = rays_o[i] + near*rays_d[i]
max_point = rays_o[i] + far*rays_d[i]
points += [min_point, max_point]
find_min_max(min_point)
find_min_max(max_point)
return (torch.tensor(min_bound)-torch.tensor([0.1,0.1,0.0001]), torch.tensor(max_bound)+torch.tensor([0.1,0.1,0.0001]))
def get_voxel_vertices(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int()
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS
hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size)
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
def get_plane_vertices_old(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
def box2plane(input):
in_xy = input[:,:2]#.unsqueeze(1)
in_xz = input[:,::2]#.unsqueeze(1)
in_yz = input[:,-2:]#.unsqueeze(1)
return [in_xy,in_xz,in_yz]
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int() #(B, 3)
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
#voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS #(B, 8, 3)
#hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size) #(B, 8)
voxel_indices_xy = bottom_left_idx[:,:2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_xz = bottom_left_idx[:,::2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_yz = bottom_left_idx[:,-2:].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
hashed_voxel_indices_xy = hash2d(voxel_indices_xy, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_xz = hash2d(voxel_indices_xz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_yz = hash2d(voxel_indices_yz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices = [hashed_voxel_indices_xy,
hashed_voxel_indices_xz,
hashed_voxel_indices_yz]
voxel_min_vertex = box2plane(voxel_min_vertex)
voxel_max_vertex = box2plane(voxel_max_vertex)
#pdb.set_trace()
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
def get_plane_vertices(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
def box2plane(input):
in_xy = input[:,:2]#.unsqueeze(1)
in_xz = input[:,::2]#.unsqueeze(1)
in_yz = input[:,-2:]#.unsqueeze(1)
return [in_xy,in_xz,in_yz]
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int() #(B, 3)
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
#voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS #(B, 8, 3)
#hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size) #(B, 8)
voxel_indices_xy = bottom_left_idx[:,:2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_xz = bottom_left_idx[:,::2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_yz = bottom_left_idx[:,-2:].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
#hashed_voxel_indices_xy = hash2d(voxel_indices_xy, log2_hashmap_size) #(B, 4)
#hashed_voxel_indices_xz = hash2d(voxel_indices_xz, log2_hashmap_size) #(B, 4)
#hashed_voxel_indices_yz = hash2d(voxel_indices_yz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_xy = xy2index(voxel_indices_xy,resolution) #(B, 4)
hashed_voxel_indices_xz = xy2index(voxel_indices_xz,resolution) #(B, 4)
hashed_voxel_indices_yz = xy2index(voxel_indices_yz,resolution) #(B, 4)
#print(hashed_voxel_indices_yz.shape)
#pdb.set_trace()
hashed_voxel_indices = [hashed_voxel_indices_xy,
hashed_voxel_indices_xz,
hashed_voxel_indices_yz]
voxel_min_vertex = box2plane(voxel_min_vertex)
voxel_max_vertex = box2plane(voxel_max_vertex)
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
if __name__=="__main__":
with open("data/nerf_synthetic/chair/transforms_train.json", "r") as f:
camera_transforms = json.load(f)
bounding_box = get_bbox3d_for_blenderobj(camera_transforms, 800, 800)
| 2.1875
| 2
|
eval.py
|
dawnchen123/VS-Net
| 55
|
12008
|
import os
import cv2
import time
import json
import random
import inspect
import argparse
import numpy as np
from tqdm import tqdm
from dataloaders import make_data_loader
from models.sync_batchnorm.replicate import patch_replication_callback
from models.vs_net import *
from utils.loss import loss_dict
from utils.lr_scheduler import LR_Scheduler
from utils.saver import Saver
from utils.summaries import TensorboardSummary
from utils.metrics import Evaluator
from utils import utils
from torch.autograd import Variable
import os.path as osp
from configs import *
import warnings
warnings.filterwarnings("ignore")
class Trainer(object):
def __init__(self, cfg):
self.cfg = cfg
# Define Saver
self.saver = Saver(cfg)
# Define Tensorboard Summary
self.summary = TensorboardSummary(self.cfg["log_tb_dir"])
self.summary.create_summary()
# Define Dataloader
kwargs = {"num_workers": cfg["num_workers"], "pin_memory": True}
self.train_loader, self.val_loader, self.test_loader, dset = make_data_loader(
cfg, **kwargs)
# read landmark centers
self.id2center = np.array(json.load(
open(osp.join(cfg["data_dir"], "id2centers.json")))).astype(np.float64)
self.coding_book = torch.zeros(
(len(self.id2center), cfg["seg_channel"]), dtype=torch.float32).cuda()
torch.nn.init.xavier_uniform(self.coding_book)
print("coding book size = {}".format(self.coding_book.shape))
# generate color map
unique_label = np.arange(len(self.id2center))
unique_label = unique_label.astype(
np.int64) * 6364136223846793005 + 1442695040888963407
color_map = np.zeros((unique_label.shape[0], 3), np.uint8)
color_map[:, 0] = np.bitwise_and(unique_label, 0xff)
color_map[:, 1] = np.bitwise_and(np.right_shift(unique_label, 4), 0xff)
color_map[:, 2] = np.bitwise_and(np.right_shift(unique_label, 8), 0xff)
self.color_map = np.array(color_map)
self.coding_book = Variable(self.coding_book, requires_grad=True)
# Define network
model = VSNet(backbone=cfg["backbone"],
seg_decoder=cfg["seg_decoder"],
vertex_decoder=cfg["vertex_decoder"],
seg_channel=cfg["seg_channel"],
vertex_channel=cfg["vertex_channel"],
output_stride=cfg["out_stride"],
sync_bn=cfg["sync_bn"])
train_params = [{"params": model.get_1x_lr_params(), "lr": cfg["lr"]},
{"params": model.get_10x_lr_params(),
"lr": cfg["lr"] * 10},
{"params": self.coding_book, "lr": cfg["lr"] * 10}
]
# Define Optimizer
if cfg["optimizer"] == "SGD":
optimizer = torch.optim.SGD(train_params, momentum=cfg["momentum"],
weight_decay=cfg["weight_decay"], nesterov=cfg["nesterov"])
elif cfg["optimizer"] == "Adam":
optimizer = torch.optim.Adam(train_params, lr=cfg["lr"],
weight_decay=cfg["weight_decay"], amsgrad=True)
else:
raise NotImplementedError
# Define Criterion
self.seg_criterion = loss_dict[cfg["seg_loss_type"]]
self.vertex_criterion = loss_dict[cfg["vertex_loss_type"]]
self.model, self.optimizer = model, optimizer
# Define Evaluator
self.evaluator = Evaluator(
self.coding_book.shape[0], cfg["vertex_channel"])
# Define lr scheduler
self.scheduler = LR_Scheduler(mode=cfg["lr_scheduler"], base_lr=cfg["lr"],
num_epochs=cfg["epochs"], iters_per_epoch=len(
self.train_loader),
lr_step=cfg["lr_step"])
self.model = torch.nn.DataParallel(self.model)
patch_replication_callback(self.model)
self.model = self.model.cuda()
# Resuming checkpoint
self.best_pred = {"mIoU": 0.0, "Acc": 0.0, "Acc": 0.0,
"FWIoU": 0.0, "translation_median": 1000}
if cfg["resume"] is not None and cfg["resume"] == True:
print(os.path.isfile(cfg["resume_checkpoint"]))
if not os.path.isfile(cfg["resume_checkpoint"]):
raise RuntimeError("=> no checkpoint found at {}" .format(
cfg["resume_checkpoint"]))
checkpoint = torch.load(cfg["resume_checkpoint"])
cfg.opt["start_epoch"] = checkpoint["epoch"] - 1
self.model.module.load_state_dict(checkpoint["state_dict"])
if not cfg["ft"]:
self.optimizer.load_state_dict(checkpoint["optimizer"])
self.best_pred = checkpoint["best_pred"]
if "coding_book" in checkpoint.keys():
assert self.coding_book.shape == checkpoint["coding_book"].shape
self.coding_book = checkpoint["coding_book"]
else:
print("Alert! coding book does not exist in the checkpoint")
print("=> loaded checkpoint {} (epoch {})"
.format(cfg["resume"], checkpoint["epoch"]))
def validation(self, epoch):
print("=================================")
print("validation")
print("=================================")
self.model.eval()
self.evaluator.reset()
tbar = tqdm(self.val_loader, desc="\r")
num_iter_val = len(self.val_loader)
test_loss = 0.0
num_images = 0
ten_count = []
five_count = []
three_count = []
one_count = []
translation_list = []
angular_list = []
reproject_list = []
test_seg_loss = 0.0
test_ver_loss = 0.0
for i, data in enumerate(tbar):
image, seg_target, vertex_target = [d.cuda() for d in data[:3]]
valid_mask = data[-1].cuda()
pose_target, camera_k_matrix, ori_img = data[3:]
seg_target = seg_target.long()
valid_mask = (seg_target.detach() > 0).float()
with torch.no_grad():
seg_pred, vertex_pred, seg_pred_x4s = self.model(
image)
loss_seg = 0
if self.cfg["seg_decoder"]:
loss_seg = self.seg_criterion(seg_pred, seg_target, self.coding_book,
margin=self.cfg["seg_loss_margin"],
seg_k=self.cfg["seg_k"],
valid_mask=valid_mask)
test_seg_loss += loss_seg.item()
self.summary.add_scalar(
"val/loss_seg_iter", loss_seg.item(), i + num_iter_val * epoch)
loss_vertex = 0
if self.cfg["vertex_decoder"]:
loss_vertex = self.vertex_criterion(vertex_pred, vertex_target,
valid_mask)
test_ver_loss += loss_vertex.item()
self.summary.add_scalar(
"val/loss_vertex_iter", loss_vertex.item(), i + num_iter_val * epoch)
loss = 0
if self.cfg["seg_decoder"]:
loss += loss_seg
if self.cfg["vertex_decoder"]:
loss += loss_vertex * self.cfg["vertex_loss_ratio"]
test_loss += loss.item()
tbar.set_description("Test loss: %.9f" % (test_loss / (i + 1)))
self.summary.add_scalar(
"val/total_loss_iter", loss.item(), i + num_iter_val * epoch)
global_step = i * \
self.cfg["val_batch_size"] + image.data.shape[0]
# evaluate seg_pred
seg_target = seg_target.detach().squeeze()
if self.cfg["seg_decoder"]:
seg_pred, knn = utils.evaluate_segmentation(seg_pred_x4s,
self.coding_book, seg_target.size(), self.cfg["use_own_nn"])
else:
seg_pred = seg_target
# evaluate vertex
pt3d_filter, pt2d_filter, _ = utils.evaluate_vertex_v2(vertex_pred, seg_pred,
self.id2center, inlier_thresh=0.999,
min_mask_num=self.cfg["val_label_filter_threshsold"])
# pt3d_filter, pt2d_filter = utils.evaluate_vertex(vertex_target, seg_pred, self.id2center)
camera_k_matrix = camera_k_matrix.squeeze().numpy()
translation_distance, angular_distance, error = 1e9, 1e9, 1e9
if pt2d_filter.shape[0] > 6:
# pnp
ret, pose_pred = utils.pnp(
pt3d_filter, pt2d_filter, camera_k_matrix)
error = utils.reproject_error(
pt3d_filter, pt2d_filter, pose_pred, camera_k_matrix)
translation_distance, angular_distance = utils.cm_degree_metric(
pose_pred, pose_target)
print(translation_distance, angular_distance, error, i)
ten_count.append(translation_distance <
10 and angular_distance < 10)
five_count.append(translation_distance <
5 and angular_distance < 5)
three_count.append(translation_distance <
3 and angular_distance < 3)
one_count.append(translation_distance <
1 and angular_distance < 1)
translation_list.append(translation_distance)
angular_list.append(angular_distance)
reproject_list.append(error)
# Add batch sample into evaluator
if self.cfg["seg_decoder"]:
self.evaluator.add_seg_batch(seg_target, seg_pred)
if self.cfg["visualize_segmenation"]:
self.summary.visualize_seg_image(ori_img, seg_pred, seg_target,
epoch, i, global_step, self.color_map)
if self.cfg["vertex_decoder"]:
# evaluate vertex_pred
vertex_target, vertex_pred = vertex_target.squeeze(), vertex_pred.squeeze()
self.evaluator.add_vertex_batch(vertex_target, vertex_pred)
# vertex acc的计算
if self.cfg["visualize_voting"]:
if self.cfg["visualize_landmark"] != None and self.cfg["visualize_landmark"]:
self.summary.visualize_vertex_image(ori_img, vertex_pred, vertex_target,
epoch, i, global_step, pt2d_filter, True)
else:
self.summary.visualize_vertex_image(ori_img, vertex_pred, vertex_target,
epoch, i, global_step)
mIoU, Acc, Acc_class, FWIoU = self.summary.visualize_seg_evaluator(
self.evaluator, epoch, "val/seg/")
print("Validation:")
print("[Epoch: %d, numImages: %5d]" % (epoch, num_images))
print("Loss: %.9f" % (test_loss / num_iter_val))
self.summary.add_scalar("val/total_loss_epoch",
test_loss / num_iter_val, epoch)
self.summary.add_scalar("val/total_seg_epoch",
test_seg_loss / num_iter_val, epoch)
self.summary.add_scalar("val/total_ver_epoch",
test_ver_loss / num_iter_val, epoch)
self.summary.add_scalar("val/pnp/10cm_epoch",
np.mean(ten_count), epoch)
self.summary.add_scalar("val/pnp/5cm_epoch",
np.mean(five_count), epoch)
self.summary.add_scalar("val/pnp/3cm_epoch",
np.mean(three_count), epoch)
self.summary.add_scalar("val/pnp/1cm_epoch", np.mean(one_count), epoch)
self.summary.add_scalar(
"val/pnp/translation_median_epoch", np.median(translation_list), epoch)
self.summary.add_scalar(
"val/pnp/angular_median_epoch", np.median(angular_list), epoch)
new_pred = {"mIoU": mIoU.item(), "Acc": Acc.item(), "Acc_class": Acc_class.item(), "FWIoU": FWIoU.item(),
"10cm": np.mean(ten_count),
"5cm": np.mean(five_count), "3cm": np.mean(three_count), "1cm": np.mean(one_count),
"translation_median": np.median(translation_list), "angular_list": np.median(angular_list)}
print(new_pred)
if new_pred["translation_median"] < self.best_pred["translation_median"]:
is_best = True
self.best_pred = new_pred
self.saver.save_checkpoint({
"epoch": epoch + 1,
"state_dict": self.model.module.state_dict(),
"optimizer": self.optimizer.state_dict(),
"best_pred": self.best_pred,
"coding_book": self.coding_book
}, is_best, save_model=self.cfg["save_model"])
def main():
parser = argparse.ArgumentParser(
description="PyTorch Landmark Segmentation Training")
parser.add_argument("--dataset", type=str,
choices=["7scenes_loc", "cambridge_loc"], help="experiment config file")
parser.add_argument("--scene", type=str, default="",
help="experiment scene")
parser.add_argument("--gpu-id", type=str, default="",
help="experiment gpu id")
parser.add_argument("--use-aug", type=str, default="",
choices=["", "true", "false"], help="experiment use aug")
parser.add_argument("--config", type=str, default=None,
help="experiment config file")
parser.add_argument("--debug", type=str, default="",
choices=["", "true", "false"], help="debug")
parser.add_argument("--resume", type=str, default="true",
choices=["", "true", "false"], help="resume")
args = parser.parse_args()
debug = None
if args.debug != "":
debug = (args.debug == "true")
if args.dataset == "7scenes_loc":
cfg = SevenScenesLocConfig(args.config, debug)
elif args.dataset == "cambridge_loc":
cfg = CambridgeLocConfig(args.config, debug)
if args.scene != "":
cfg.opt["scene"] = args.scene
if args.gpu_id != "":
cfg.opt["devices"] = args.gpu_id
if args.use_aug == "true":
cfg.opt["use_aug"] = True
if args.resume == "true":
cfg.opt["resume"] = True
cfg.opt["resume_checkpoint"] = cfg["export_dir"] + \
'/ckpts/checkpoint-backup.pth.tar'
cfg.print_opt()
cfg.set_environmental_variables()
torch.manual_seed(cfg["seed"])
torch.cuda.manual_seed(cfg["seed"])
np.random.seed(cfg["seed"])
random.seed(cfg["seed"])
trainer = Trainer(cfg)
print("Starting Epoch:", trainer.cfg["start_epoch"])
print("Total Epoches:", trainer.cfg["epochs"])
trainer.validation(trainer.cfg["start_epoch"])
trainer.summary.close()
if __name__ == "__main__":
main()
| 1.828125
| 2
|
data/__init__.py
|
Joaomlg/multilayer-perceptron-mnist
| 13
|
12009
|
import numpy as np
import gzip
import pickle
import os
import urllib.request
class MNIST:
host = 'http://yann.lecun.com/exdb/mnist/'
filenames = {
'train': ('train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz'),
'test': ('t10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz'),
}
dataset_filename = 'mnist.pkl.gz'
train_samples = 50000
validation_samples = 10000
test_samples = 10000
def __init__(self):
self.current_dir = os.path.dirname(__file__)
if not self.is_dataset_available():
print('Dataset not available! It will be downloaded and decoded, and can be take a while, please wait!')
datasets = self.get_base_datasets_filenames()
for dataset in datasets:
if not self.is_base_dataset_downloaded(dataset):
print(f'Downloading {dataset}...')
self.download_dataset(dataset)
print('Decoding files and saving it...')
self.decode_and_save()
print('Deleting base files (downloaded)...')
for dataset in datasets:
self.delete_dataset(dataset)
print('Done.')
def is_dataset_available(self):
return os.path.exists(os.path.join(self.current_dir, self.dataset_filename))
def get_base_datasets_filenames(self):
return self.filenames['train'] + self.filenames['test']
def is_base_dataset_downloaded(self, filename):
return os.path.exists(os.path.join(self.current_dir, filename))
def download_dataset(self, filename):
url = self.host + filename
dest = os.path.join(self.current_dir, filename)
urllib.request.urlretrieve(url, dest)
def delete_dataset(self, filename):
os.remove(os.path.join(self.current_dir, filename))
def decode_and_save(self):
data = {}
for key, (images_filename, labels_filename) in self.filenames.items():
with gzip.open(os.path.join(self.current_dir, images_filename), 'rb') as file:
images = np.frombuffer(file.read(), np.uint8, offset=16).reshape(-1, 28*28)
with gzip.open(os.path.join(self.current_dir, labels_filename), 'rb') as file:
labels = np.frombuffer(file.read(), np.uint8, offset=8)
data[key] = (images, labels)
training = tuple(x[:self.train_samples] for x in data['train'])
validation = tuple(x[self.train_samples:] for x in data['train'])
test = tuple(data['test'])
with gzip.open(os.path.join(self.current_dir, self.dataset_filename), 'wb') as file:
pickle.dump((training, validation, test), file)
def load(self):
with gzip.open(os.path.join(self.current_dir, self.dataset_filename), 'rb') as file:
training, validation, test = pickle.load(file)
return training, validation, test
| 2.859375
| 3
|
Exemple.py
|
LVWolff/Python_Lesson_2
| 0
|
12010
|
#Задачи на циклы и оператор условия------
#----------------------------------------
'''
Задача 1
Вывести на экран циклом пять строк из нулей, причем каждая строка должна быть пронумерована.
'''
for i in range(1, 6):
print(i, '0000000000000000000000000000000000000000000')
'''
Задача 2
Пользователь в цикле вводит 10 цифр. Найти количество введеных пользователем цифр 5.
'''
count = 0
for i in range(10):
user_data = int(input('Введите число: '))
if user_data == 5:
count += 1
print(count)
'''
Задача 3
Найти сумму ряда чисел от 1 до 100. Полученный результат вывести на экран.
'''
sum = 0
for i in range(1, 101):
sum += i
print(sum)
'''
Задача 4
Найти произведение ряда чисел от 1 до 10. Полученный результат вывести на экран.
'''
proiz = 1
for i in range(2, 11):
proiz *= i
print(proiz)
'''
Задача 5
Вывести цифры числа на каждой строчке.
'''
integer_number = 123456
start_del = len(str(integer_number)) - 1
delitel = 10 ** start_del
#print(integer_number % delitel, integer_number // delitel)
while integer_number > 0:
print(int(integer_number // delitel))
integer_number = integer_number % delitel
delitel /= 10
'''
Задача 6
Найти сумму цифр числа.
'''
integer_number = 123456
sum = 0
while integer_number > 0:
sum += integer_number % 10
integer_number = integer_number // 10
print(sum)
'''
Задача 7
Найти произведение цифр числа.
'''
integer_number = 123456
proiz = 1
while integer_number > 0:
proiz *= integer_number % 10
integer_number = integer_number // 10
print(proiz)
'''
Задача 8
Дать ответ на вопрос: есть ли среди цифр числа 5?
'''
integer_number = 125254
while integer_number > 0:
if integer_number % 10 == 5:
print('Yes')
break
integer_number = integer_number // 10
else:
print('No')
'''
Задача 9
Найти максимальную цифру в числе
'''
integer_number = 125278954
max_num = integer_number % 10
while integer_number > 0:
max_num = max(max_num, integer_number % 10)
integer_number = integer_number // 10
print(max_num)
'''
Задача 10
Найти количество цифр 5 в числе
'''
integer_number = 125278954
count_num = 0
while integer_number > 0:
if integer_number % 10 == 5:
count_num += 1
integer_number = integer_number // 10
print(count_num)
| 3.609375
| 4
|
push_exp/main_CrouchSimulationForCOT.py
|
snumrl/DeepPushRecovery
| 0
|
12011
|
import os
import numpy as np
import time
import multiprocessing as mp
import csv
import socket
import datetime
import math
import glob
from pypushexp import PushSim
# # input - [recorded item]
# [weight] : 48
# [height] : 160
# [crouch_angle] (deg)
# [step_length_ratio]
# [halfcycle_duration_ratio]
# [push_step] : 8
# [push_duration] (sec) : .2
# [push_force] (N)
# [push_start_timing] (half gait cycle percent)
#
# # output
# [pushed_length] (m) : sim.out_pushed_length
# [pushed_steps] : sim.out_pushed_steps
# [push_strength] : abs(push_force * push_duration / weight)
# [step_length] (m) : sim.getPushedLength()
# [walking_speed] (m/s) : sim.getWalkingSpeed()
# [halfcycle_duration] (s) : sim.getStepLength() /sim.getWalkingSpeed()
#
# # output for hospital
# [distance] : pushed_length * 1000.
# [speed] : walking_speed * 1000.
# [force] : push_strength * 1000.
# [stride] : step_length * 1000.
# [start_timing_time_ic] = sim.start_timing_time_ic
# [mid_timing_time_ic] = sim.mid_timing_time_ic
# [start_timing_foot_ic] = sim.getStartTimingFootIC()
# [mid_timing_foot_ic] = sim.getMidTimingFootIC()
# [start_timing_time_fl] = sim.getStartTimingTimeFL()
# [mid_timing_time_fl] = sim.getMidTimingTimeFL()
# [start_timing_foot_fl] = sim.getStartTimingFootFL()
# [mid_timing_foot_fl] = sim.getMidTimingFootFL()
# # not used
# subject no
# sex
# left leg length
# right leg length
# stride
# speed
# experiment
# file name
# trial no
# push timing : 'left stance'
# push direction : 'from left'
# normalized push length
# push length until first step
# push end timing (time)
# push end timing (foot pos)
# return during first step
# push duration
# push start time
def gettimestringisoformat():
return datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
def worker_simulation(sim, param):
try:
push_step, push_duration,\
crouch_angle, step_length_ratio, walk_speed_ratio, push_force, push_start_timing, crouch_label,\
weight, height, ith, q = param
# print(int(crouch_angle), step_length_ratio, walk_speed_ratio, push_force, push_start_timing)
sim.setParamedStepParams(int(crouch_angle), step_length_ratio, walk_speed_ratio)
sim.setPushParams(8, 0.2, 0., 0.)
print(step_length_ratio, walk_speed_ratio)
stopcode = sim.simulate()
# stopcode = 0
if stopcode in [0, 3, 4]:
cot = sim.getCostOfTransport()
walking_speed = sim.getWalkingSpeed()
q.put((ith, crouch_angle, walking_speed, cot))
except IndexError:
pass
def write_start(csvfilepath):
csvfile = open(csvfilepath, 'w')
csvfile.write('type,ith,crouch_angle,speed,cot\n')
return csvfile
def write_body(q, csvfile):
while True:
try:
ith, crouch_angle, walking_speed, cot = q.get(False)
csvfile.write('torque,%d,%s,%s,%s\n' % (ith, crouch_angle, walking_speed, cot))
csvfile.flush()
except:
break
def write_end(csvfile):
csvfile.close()
def simulate(sim, launch_order, num, option_str=''):
#=======================================================================
# settings
#=======================================================================
TEST = True if launch_order is None else False
# TEST = True
# TEST = False
weight = 72
height = 170
push_step = 8
push_duration = .2
test_params = [] # element: (crouch_angle, step_length_ratio, halfcycle_duration_ratio, push_force, push_start_timing)
# ===========================================================================
#
# ===========================================================================
if TEST:
# test
additional_str = ''
num = 2
# num = 5000
mean_crouch = [0, 20, 30, 60]
else:
# real
all_mean_crouch = [0, 20, 30, 60]
mean_crouch = [all_mean_crouch[launch_order % len(all_mean_crouch)]]
additional_str = '_%ddeg__push' % mean_crouch[0]
# if launch_order==0:
# param_opt_result = '130810_113234_0_60_push'
# additional_str = '_0_60_push'
# elif launch_order==2:
# param_opt_result = '130810_161152_0_30_60_push'
# additional_str = '_0_30_60_push'
# =======================================================================
# set logger
# =======================================================================
outDir = os.path.dirname(os.path.abspath(__file__)) + '/results/'
if not os.path.exists(outDir):
os.makedirs(outDir)
csvfilepath = outDir + 'COT_' +option_str + '_' + gettimestringisoformat() + '_' + str(num) + 'trials_' + socket.gethostname() + '.csv'
print('start logging at', gettimestringisoformat())
print()
print('<simulation setting>')
# =======================================================================
# test2 : multivariate normal distribution
# =======================================================================
stride_means = [1.1262070300, 0.9529737358, 0.9158506655, 0.8755451448]
speed_means = [0.9943359644, 0.8080297151, 0.7880050552, 0.7435198328]
stride_vars = [0.03234099289, 0.02508595114, 0.02772452640, 0.02817863267]
stride_speed_covars = [0.03779884365, 0.02225320798, 0.02906793442, 0.03000639027]
speed_vars = [0.06929309644, 0.04421889347, 0.04899931048, 0.05194827755]
# crouch angle
# mean_crouch = [0,20,30,60]
std_crouch = 1
# step length
motion_stride_bvh_after_default_param = 1.1886
experi_stride_mean = stride_means[launch_order]
experi_stride_std = math.sqrt(stride_vars[launch_order])
mean_length_ratio = experi_stride_mean / motion_stride_bvh_after_default_param
std_length_ratio = experi_stride_std / motion_stride_bvh_after_default_param
# walk speed
speed_bvh_after_default_param = 0.9134
experi_speed_mean = speed_means[launch_order]
experi_speed_std = math.sqrt(speed_vars[launch_order])
mean_speed_ratio = experi_speed_mean / speed_bvh_after_default_param
std_speed_ratio = experi_speed_std / speed_bvh_after_default_param
# push strength
mean_strength = .535
std_strength = .096
mean_force = -(mean_strength*weight/push_duration)
std_force = (std_strength*weight/push_duration)
# push timing
mean_timing = 34
std_timing = 21
if TEST:
np.set_printoptions(precision=4, linewidth=200)
# for i in range(len(mean_crouch)):
# mean = [mean_crouch[i], mean_length_ratio, mean_duration_ratio, mean_force, mean_timing, mean_crouch[i]]
# cov = np.diag( [std_crouch**2, std_length_ratio**2, std_duration_ratio**2, std_force**2, std_timing**2, 0])
for i in range(len(mean_crouch)):
mean = [mean_crouch[i], mean_length_ratio, mean_speed_ratio, mean_force, mean_timing, mean_crouch[i]]
cov = np.diag([0 , std_length_ratio**2, std_speed_ratio**2, std_force**2, std_timing**2, 0])
cov[1, 2] = stride_speed_covars[i] / speed_bvh_after_default_param / motion_stride_bvh_after_default_param
cov[2, 1] = stride_speed_covars[i] / speed_bvh_after_default_param / motion_stride_bvh_after_default_param
if len(test_params) == 0:
test_params = np.random.multivariate_normal(mean, cov, num)
else:
test_params = np.vstack((test_params, np.random.multivariate_normal(mean, cov, num)))
# no negative crouch angle
for i in range(len(test_params)):
test_params[i][0] = abs(test_params[i][0])
test_params[i][2] = abs(test_params[i][2])
test_params[i][3] = -abs(test_params[i][3])
# print(test_params)
print()
print('multivariate normal distribution')
print()
print('mean_crouch', mean_crouch)
print('std_crouch', std_crouch)
print()
print('motion_step_stride', motion_stride_bvh_after_default_param)
print('experi_step_length_mean', experi_stride_mean)
print('experi_step_length_std', experi_stride_std)
print('mean_length_ratio', mean_length_ratio)
print('std_length_ratio', std_length_ratio)
print()
print('motion_speed', speed_bvh_after_default_param)
print('experi_speed_mean', experi_speed_mean)
print('experi_speed_std', experi_speed_std)
print('mean_speed_ratio', mean_speed_ratio)
print('std_speed_ratio', std_speed_ratio)
print()
print('num', num)
print()
print('total # of simulations', len(test_params))
print()
# =======================================================================
# simulation
# =======================================================================
pt = time.time()
print('<start simulation>')
print('hostname %s ' % socket.gethostname())
print()
q = mp.Manager().Queue()
groupsize = 100
paramgroups = [[] for i in range( len(test_params)//groupsize + 1 )]
ith = 1
for i in range(len(test_params)):
crouch_angle = test_params[i][0]
step_length_ratio = test_params[i][1]
walk_speed_ratio = test_params[i][2]
push_force = test_params[i][3]
push_start_timing = test_params[i][4]
crouch_label = test_params[i][5]
paramgroups[i//groupsize].append((push_step, push_duration,
crouch_angle, step_length_ratio, walk_speed_ratio, push_force, push_start_timing, crouch_label,
weight, height, ith, q))
ith += 1
csvfile = write_start(csvfilepath)
for i in range(len(paramgroups)):
for j in range(len(paramgroups[i])):
print(j)
worker_simulation(sim, paramgroups[i][j])
write_body(q, csvfile)
write_end(csvfile)
print()
_s = time.time() - pt
_h = _s // 3600
_m = _s // 60
_s -= 60 * _m
_m -= 60 * _h
print('elapsed time = %d h:%d m:%d s' % (int(_h), int(_m), int(_s)))
print()
print('end logging at', gettimestringisoformat())
if __name__ == '__main__':
import sys
import re
option = sys.argv[1]
trial_num = int(sys.argv[2])
_metadata_dir = os.path.dirname(os.path.abspath(__file__)) + '/../data/metadata/'
_nn_finding_dir = os.path.dirname(os.path.abspath(__file__)) + '/../nn/*/'
nn_dir = None
if _nn_finding_dir is not None:
nn_dir = glob.glob(_nn_finding_dir + option)[0]
meta_file = _metadata_dir + option + '.txt'
sim = None
if 'muscle' in option:
sim = PushSim(meta_file, nn_dir+'/max.pt', nn_dir+'/max_muscle.pt')
else:
sim = PushSim(meta_file, nn_dir+'/max.pt')
if "all" in option:
simulate(sim, 0, trial_num, option)
simulate(sim, 1, trial_num, option)
simulate(sim, 2, trial_num, option)
simulate(sim, 3, trial_num, option)
else:
crouch = re.findall(r'crouch\d+', option)[0][6:]
simulate(sim, ['0', '20', '30', '60'].index(crouch), trial_num, option)
| 2.203125
| 2
|
lepiota/lepiota/urls.py
|
sgelias/lepiota
| 0
|
12012
|
<reponame>sgelias/lepiota
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, re_path
from django.conf.urls import include
from django.views.generic import TemplateView, RedirectView
urlpatterns = [
# Administration
path('admin/', admin.site.urls),
# Accounts
path('account/', include('account.urls', namespace='account')),
# Oauth2
path('api/v1/o/', include('oauth.urls', namespace='oauth2_provider')),
# General purpose
path('welcome/', TemplateView.as_view(template_name="welcome.html")),
path('', RedirectView.as_view(url="/welcome/")),
re_path(r'^$', RedirectView.as_view(url="/welcome/")),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 1.859375
| 2
|
src/base/admin.py
|
dhavall13/Decode
| 0
|
12013
|
from django.contrib import admin
from .models import Room, Topic, Message, User
admin.site.register(Room)
admin.site.register(Topic)
admin.site.register(Message)
admin.site.register(User)
| 1.445313
| 1
|
gorilla/config/_config.py
|
sunjiahao1999/gorilla-core
| 4
|
12014
|
<filename>gorilla/config/_config.py<gh_stars>1-10
# Copyright (c) Open-MMLab. All rights reserved.
import os
import json
import tempfile
import warnings
from typing import Optional
from argparse import Namespace
from addict import Dict
from ..utils import check_file
BASE_KEY = "_base_"
RESERVED_KEYS = ["filename", "text"]
class ConfigDict(Dict):
r"""ConfigDict based on Dict, which use to convert the config
file into config dict
"""
def __missing__(self, name):
raise KeyError(name)
def __getattr__(self, name):
try:
value = super(ConfigDict, self).__getattr__(name)
except KeyError:
ex = AttributeError(
f"`{self.__class__.__name__}` object has no attribute `{name}`"
)
except Exception as e:
ex = e
else:
return value
raise ex
class Config(object):
r"""A facility for config and config files.
It supports common file formats as configs: python/json/yaml. The interface
is the same as a dict object and also allows access config values as
attributes.
Example:
>>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))
>>> cfg.a
1
>>> cfg.b
{"b1": [0, 1]}
>>> cfg.b.b1
[0, 1]
>>> cfg = Config.fromfile("./configs/test.py")
>>> cfg.filename
"/home/gorilla_lab/code/gorilla/configs/test.py"
>>> cfg.item4
"test"
>>> cfg
"Config [path: /home/gorilla_lab/code/gorilla/configs/test.py]: "
"{"item1": [1, 2], "item2": {"a": 0}, "item3": True, "item4": "test"}"
"""
def __init__(self,
cfg_dict: Optional[Dict] = None,
cfg_text: Optional[str] = None,
filename: Optional[str] = None):
if cfg_dict is None:
cfg_dict = dict()
elif not isinstance(cfg_dict, dict):
raise TypeError(f"cfg_dict must be a dict, "
f"but got {type(cfg_dict)}")
for key in cfg_dict:
if key in RESERVED_KEYS:
raise KeyError(f"{key} is reserved for config file")
super(Config, self).__setattr__("_cfg_dict", ConfigDict(cfg_dict))
super(Config, self).__setattr__("_filename", filename)
if cfg_text:
text = cfg_text
elif filename:
with open(filename, "r") as f:
text = f.read()
else:
text = ""
super(Config, self).__setattr__("_text", text)
@staticmethod
def _file2dict(filename: str):
filename = os.path.abspath(os.path.expanduser(filename))
check_file(filename)
from gorilla.fileio import load
cfg_dict = ConfigDict(load(filename))
with open(filename, "r") as f:
cfg_text = f.read()
# here cfg_dict is still the same as content in --config file,
# and the code block below read 4 sub-config file then merge into one.
if BASE_KEY in cfg_dict:
cfg_dir = os.path.dirname(filename)
base_filename = cfg_dict.pop(BASE_KEY)
base_filename = base_filename if isinstance(
base_filename, list) else [base_filename]
cfg_dict_list = list()
cfg_text_list = list()
for f in base_filename:
_cfg_dict, _cfg_text = Config._file2dict(os.path.join(cfg_dir, f))
cfg_dict_list.append(_cfg_dict)
cfg_text_list.append(_cfg_text)
base_cfg_dict = dict()
for c in cfg_dict_list:
if len(base_cfg_dict.keys() & c.keys()) > 0:
# e.g. sub-config file about dataset should not overlap with
# the one about model
raise KeyError("Duplicate key is not allowed among bases")
base_cfg_dict.update(c)
cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict)
# merge cfg_text
cfg_text_list.append(cfg_text)
cfg_text = "\n".join(cfg_text_list)
return cfg_dict, cfg_text
@staticmethod
def _merge_a_into_b(a, b):
r"""merge dict ``a`` into dict ``b`` (non-inplace).
Values in ``a`` will overwrite ``b``. ``b`` is copied first to avoid
in-place modifications.
Args:
a (dict): The source dict to be merged into ``b``.
b (dict): The origin dict to be fetch keys from ``a``.
Returns:
dict: The modified dict of ``b`` using ``a``.
Examples:
# Normally merge a into b.
>>> Config._merge_a_into_b(
... dict(obj=dict(a=2)), dict(obj=dict(a=1)))
{"obj": {"a": 2}}
"""
b = b.copy()
for k, v in a.items():
if isinstance(v, dict) and k in b:
allowed_types = dict
if not isinstance(b[k], allowed_types):
raise TypeError(
f"{k}={v} in child config cannot inherit from base "
f"because {k} is a dict in the child config but is of "
f"type {type(b[k])} in base config.")
b[k] = Config._merge_a_into_b(v, b[k])
else:
b[k] = v
return b
@staticmethod
def fromfile(filename: str):
r"""cfg_text is the text content read from 5 files, and cfg_dict is
a dict resolved by the text content.
"""
cfg_dict, cfg_text = Config._file2dict(filename)
return Config(cfg_dict, cfg_text=cfg_text, filename=filename)
@staticmethod
def fromstring(cfg_str, file_format):
"""Generate config from config str.
Args:
cfg_str (str): Config str.
file_format (str): Config file format corresponding to the
config str. Only py/yml/yaml/json type are supported now!
Returns:
obj:`Config`: Config obj.
"""
if file_format not in [".py", ".json", ".yaml", ".yml"]:
raise IOError("Only py/yml/yaml/json type are supported now!")
if file_format != ".py" and "dict(" in cfg_str:
# check if users specify a wrong suffix for python
warnings.warn(
"Please check 'file_format', the file format may be .py")
with tempfile.NamedTemporaryFile("w", suffix=file_format) as temp_file:
temp_file.write(cfg_str)
temp_file.flush()
cfg = Config.fromfile(temp_file.name)
return cfg
@property
def filename(self) -> str:
return self._filename
@property
def text(self) -> str:
return self._text
def __repr__(self) -> str:
content = f"Config (path: {self.filename})\n"
content += json.dumps(self._cfg_dict, indent=4, ensure_ascii=False)
return content
def __len__(self) -> int:
return len(self._cfg_dict)
def __getattr__(self, name: str):
return getattr(self._cfg_dict, name)
def __getitem__(self, name: str):
return self._cfg_dict.__getitem__(name)
def __setattr__(self, name: str, value: Dict):
if isinstance(value, dict):
value = ConfigDict(value)
self._cfg_dict.__setattr__(name, value)
def __setitem__(self, name: str, value: Dict):
if isinstance(value, dict):
value = ConfigDict(value)
self._cfg_dict.__setitem__(name, value)
def __iter__(self):
return iter(self._cfg_dict)
def dump(self, file: Optional[str] = None, **kwargs):
cfg_dict = self._cfg_dict.to_dict()
from gorilla.fileio import dump
if file is None:
# output the content
file_format = self.filename.split(".")[-1]
if file_format == "py":
return self.text
else:
return dump(cfg_dict, file_format=file_format, **kwargs)
else:
if file.endswith("py"):
with open(file, "w") as f:
f.write(self.text)
else:
dump(cfg_dict, file, **kwargs)
def merge_from_dict(self, options: Dict):
r"""Merge list into cfg_dict.
Merge the dict parsed by MultipleKVAction into this cfg.
Examples:
>>> options = {"model.backbone.depth": 50,
... "model.backbone.with_cp":True}
>>> cfg = Config(dict(model=dict(backbone=dict(type="ResNet"))))
>>> cfg.merge_from_dict(options)
>>> cfg_dict = super(Config, self).__getattribute__("_cfg_dict")
>>> assert cfg_dict == dict(
... model=dict(backbone=dict(depth=50, with_cp=True)))
# Merge list element
>>> cfg = Config(dict(pipeline=[
... dict(type="LoadImage"), dict(type="LoadAnnotations")]))
>>> options = dict(pipeline={"0": dict(type="SelfLoadImage")})
Args:
options (dict): dict of configs to merge from.
"""
option_cfg_dict = {}
for full_key, v in options.items():
if v is None: # handle the case when a parameter simultaneously appears in argparse and config file
continue
d = option_cfg_dict
key_list = full_key.split(".")
for subkey in key_list[:-1]:
d.setdefault(subkey, ConfigDict())
d = d[subkey]
subkey = key_list[-1]
d[subkey] = v
cfg_dict = self._cfg_dict
cfg_dict = Config._merge_a_into_b(option_cfg_dict, cfg_dict)
# NOTE: strange phenomenon
# self._cfg_dict = cfg_dict
super(Config, self).__setattr__("_cfg_dict", cfg_dict)
def merge_cfg_and_args(cfg: Optional[Config] = None,
args: Optional[Namespace] = None) -> Config:
r"""merge args and cfg into a Config by calling 'merge_from_dict' func
Args:
cfg (Config, optional): Config from cfg file.
args (Namespace, optional): Argument parameters input.
Returns:
Config: Merged Config
"""
assert cfg is not None or args is not None, "'cfg' or 'args' can not be None simultaneously"
if cfg is None:
cfg = Config()
else:
assert isinstance(
cfg, Config
), f"'cfg' must be None or gorilla.Config, but got {type(cfg)}"
if args is None:
args = Namespace()
else:
assert isinstance(
args, Namespace
), f"'args' must be None or argsparse.Namespace, but got {type(args)}"
# convert namespace into dict
args_dict = vars(args)
cfg.merge_from_dict(args_dict)
return cfg
| 2.6875
| 3
|
Kaspa/modules/extension_modules/spotify_module/spotifyModuleEn.py
|
karim-awad/kaspa
| 0
|
12015
|
<filename>Kaspa/modules/extension_modules/spotify_module/spotifyModuleEn.py
from Kaspa.modules.abstract_modules.abstractSubmodule import AbstractSubmodule
from Kaspa.modules.exceptions.impossibleActionError import ImpossibleActionError
from Kaspa.config import Config
class SpotifyModuleEn(AbstractSubmodule):
module_name = "Spotify"
language = "en"
key_regexes = dict()
def __init__(self):
self.key_regexes = {'(?i).*?(?=continue)+.+?(?=playback)+.': self.action_continue_playback,
'(?i).*?(?=pause)+.': self.action_play,
'(?i).*?(?=play)+.': self.action_play,
'(?i).*?(?=next)+.': self.action_next,
'(?i).*?(?=stop)+.': self.action_pause,
'(?i).*?(?=what)+.+?(?=song)+.': self.action_song_info}
def action_continue_playback(self, query):
communicator = query.get_communicator()
self.main_module.continue_playback()
communicator.say("I am now continuing your music playback.")
return
def action_pause(self, query):
communicator = query.get_communicator()
self.main_module.pause()
communicator.say("Music paused.")
return
def action_play(self, query):
communicator = query.get_communicator()
text = query.get_text()
try:
self.action_continue_playback(query)
return
except ImpossibleActionError:
pass
if self.main_module.current_song() is None:
self.main_module.play_saved()
communicator.say("Okay, playing your last added songs.")
return
# fetch all playlist macros from config file and search for matches in the query
playlists = Config.get_instance().get_section_content('playlists')
for playlist in playlists:
if playlist[0].lower() in text.lower():
self.main_module.play_playlist(playlist[1])
communicator.say("Okay, I'll now play the playlist" + playlist[0] + ".")
return
self.main_module.play()
communicator.say("Okay")
return
def action_next(self, query):
communicator = query.get_communicator()
self.main_module.next()
communicator.say("Okay")
return
def action_song_info(self, query):
communicator = query.get_communicator()
if self.main_module.current_song():
title, artist = self.main_module.current_song()
communicator.say("The song is " + title + " by " + artist + ".")
else:
communicator.say("There is no music loaded right now.")
| 2.1875
| 2
|
screenshot-server/app/main.py
|
martindines/ScreenshotServer
| 1
|
12016
|
<reponame>martindines/ScreenshotServer
import os
import sys
import pathlib
from utilities import get_random_hash
from flask import Flask, flash, request, redirect, url_for, send_from_directory, jsonify, Response
UPLOAD_FOLDER = os.environ.get('UPLOAD_FOLDER') if os.environ.get('UPLOAD_FOLDER') else '/tmp'
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
SECRET = os.environ.get('SECRET')
app = Flask(__name__)
app.config['SERVER_NAME'] = os.environ.get('SERVER_NAME')
def allowed_file(filename):
return '.' in filename and \
pathlib.Path(filename).suffix[1:] in ALLOWED_EXTENSIONS
def is_secret_valid(guess):
try:
if guess == SECRET:
return True
return False
except KeyError:
return False
def verify_auth_headers():
if 'secret' in request.headers:
guess = request.headers['secret']
return is_secret_valid(guess)
return False
def upload_file_and_return_external_path(file):
extension = pathlib.Path(file.filename).suffix
filename = get_random_hash() + extension
filepath = os.path.join(UPLOAD_FOLDER, filename)
if os.path.exists(filepath):
upload_file_and_return_external_path(file)
else:
file.save(filepath)
return url_for('upload', filename=filename, _external=True)
@app.route('/')
def index():
return '''
<!doctype html>
'''
@app.route('/<filename>', methods=['GET'])
def upload(filename):
if allowed_file(filename):
return send_from_directory(UPLOAD_FOLDER, filename)
@app.route('/api/auth', methods=['GET'])
def api_auth():
if verify_auth_headers():
return jsonify(
success=True
)
return jsonify(
success=False,
message='Invalid secret'
)
@app.route('/api/upload', methods=['POST'])
def api_upload():
if verify_auth_headers():
# check if the post request has the file part
if 'file' not in request.files:
return jsonify(
success=False,
message='No file present'
)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
return jsonify(
success=False,
message='Filename missing'
)
if file and allowed_file(file.filename):
path = upload_file_and_return_external_path(file)
return jsonify(
success=True,
path=path
)
else:
return jsonify(
success=False,
message='File type not allowed'
)
return jsonify(
success=False,
message='Invalid secret'
)
| 2.5
| 2
|
src/waldur_mastermind/notifications/migrations/0002_json_field.py
|
opennode/nodeconductor-assembly-waldur
| 2
|
12017
|
<reponame>opennode/nodeconductor-assembly-waldur
# Generated by Django 3.2 on 2022-01-31 14:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='emails',
field=models.JSONField(),
),
migrations.AlterField(
model_name='notification',
name='query',
field=models.JSONField(),
),
]
| 1.367188
| 1
|
gitgoggles/utils.py
|
nowells/git-goggles
| 13
|
12018
|
<reponame>nowells/git-goggles
import copy
import subprocess
import sys
import unicodedata
def disable_colored_func(text, *args, **kwargs):
return text
try:
from termcolor import colored as colored_func
except ImportError:
print 'You should run "pip install termcolor" to fully utilize these utilities.'
colored_func = disable_colored_func
def supports_color():
"""
Returns True if the running system's terminal supports color, and False
otherwise.
"""
unsupported_platform = (sys.platform in ('win32', 'Pocket PC'))
# isatty is not always implemented, #6223.
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if unsupported_platform or not is_a_tty:
return False
return True
if not supports_color():
colored_func = disable_colored_func
class Colored(object):
disabled = False
def __call__(self, *args, **kwargs):
if self.disabled:
return disable_colored_func(*args, **kwargs)
return colored_func(*args, **kwargs)
colored = Colored()
def force_unicode(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
# Normalize the unicode data to have characters that in NFKD format would be represented by 2 characters, instead of 1.
obj = unicodedata.normalize('NFKC', obj)
return obj
def force_str(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, str):
obj = obj.encode(encoding)
return obj
def console(obj):
sys.stdout.write(force_str(obj))
class AccumulatorDict(dict):
def __init__(self, default, *args, **kwargs):
self.__default = default
def __getitem__(self, key):
if key not in self:
self[key] = copy.copy(self.__default)
return super(AccumulatorDict, self).__getitem__(key)
def memoize(func):
def _(self, *args, **kwargs):
if not hasattr(self, '__memoize_cache'):
self.__memoize_cache = AccumulatorDict(AccumulatorDict({}))
key = tuple([ tuple(args), tuple([ tuple([x, y]) for x, y in kwargs.items() ]) ])
if key not in self.__memoize_cache[func]:
self.__memoize_cache[func][key] = func(self, *args, **kwargs)
return self.__memoize_cache[func][key]
return _
def terminal_dimensions():
try:
# This probably does not work on windows, but it should work just about
# everywhere else.
p = subprocess.Popen(['stty', 'size'], stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate(None)
stdout = force_unicode(stdout)
stderr = force_unicode(stderr)
rows, columns = [ int(x) for x in stdout.split() ]
except:
rows, columns = 40, 79
return rows, columns
| 2.578125
| 3
|
test/unit/tools/test_basisconstructors.py
|
colibri-coruscans/pyGSTi
| 73
|
12019
|
<filename>test/unit/tools/test_basisconstructors.py
import numpy as np
import pygsti.baseobjs.basisconstructors as bc
from ..util import BaseCase
class BasisConstructorsTester(BaseCase):
def test_GellMann(self):
id2x2 = np.array([[1, 0], [0, 1]])
sigmax = np.array([[0, 1], [1, 0]])
sigmay = np.array([[0, -1.0j], [1.0j, 0]])
sigmaz = np.array([[1, 0], [0, -1]])
# Gell-Mann 2x2 matrices should just be the sigma matrices
GM2_mxs = bc.gm_matrices_unnormalized(2)
self.assertTrue(len(GM2_mxs) == 4)
self.assertArraysAlmostEqual(GM2_mxs[0], id2x2)
self.assertArraysAlmostEqual(GM2_mxs[1], sigmax)
self.assertArraysAlmostEqual(GM2_mxs[2], sigmay)
self.assertArraysAlmostEqual(GM2_mxs[3], sigmaz)
with self.assertRaises(TypeError):
bc.gm_matrices_unnormalized("FooBar") # arg must be tuple,list,or int
# Normalized Gell-Mann 2x2 matrices should just be the sigma matrices / sqrt(2)
NGM2_mxs = bc.gm_matrices(2)
self.assertTrue(len(NGM2_mxs) == 4)
self.assertArraysAlmostEqual(NGM2_mxs[0], id2x2 / np.sqrt(2))
self.assertArraysAlmostEqual(NGM2_mxs[1], sigmax / np.sqrt(2))
self.assertArraysAlmostEqual(NGM2_mxs[2], sigmay / np.sqrt(2))
self.assertArraysAlmostEqual(NGM2_mxs[3], sigmaz / np.sqrt(2))
#TODO: test 4x4 matrices?
def test_orthogonality(self):
#Gell Mann
dim = 5
mxs = bc.gm_matrices(dim)
N = len(mxs); self.assertTrue(N == dim**2)
gm_trMx = np.zeros((N, N), 'complex')
for i in range(N):
for j in range(N):
gm_trMx[i, j] = np.trace(np.dot(np.conjugate(np.transpose(mxs[i])), mxs[j]))
#Note: conjugate transpose not needed since mxs are Hermitian
self.assertArraysAlmostEqual(gm_trMx, np.identity(N, 'complex'))
#Std Basis
dim = 5
mxs = bc.std_matrices(dim)
N = len(mxs); self.assertTrue(N == dim**2)
std_trMx = np.zeros((N, N), 'complex')
for i in range(N):
for j in range(N):
std_trMx[i, j] = np.trace(np.dot(np.conjugate(np.transpose(mxs[i])), mxs[j]))
self.assertArraysAlmostEqual(std_trMx, np.identity(N, 'complex'))
#Pauli-product basis
dim = 4
mxs = bc.pp_matrices(dim)
N = len(mxs); self.assertTrue(N == dim**2)
with self.assertRaises(TypeError):
bc.pp_matrices("Foobar") # dim must be an int
with self.assertRaises(ValueError):
bc.pp_matrices(3) # dim must be a power of 4
specialCase = bc.pp_matrices(1) # single 1x1 identity mx
self.assertEqual(specialCase, [np.identity(1, 'complex')])
pp_trMx = np.zeros((N, N), 'complex')
for i in range(N):
for j in range(N):
pp_trMx[i, j] = np.trace(np.dot(np.conjugate(np.transpose(mxs[i])), mxs[j]))
#Note: conjugate transpose not needed since mxs are Hermitian
self.assertArraysAlmostEqual(pp_trMx, np.identity(N, 'complex'))
def test_basis_misc(self):
mx = bc.pp_matrices(1) # was [1] but this shouldn't be allowed
self.assertArraysAlmostEqual(np.identity(1, 'complex'), mx)
def test_pp_maxweight(self):
pp2Max1 = bc.pp_matrices(2, max_weight=1) # using max_weight
pp2 = bc.pp_matrices(2) # For 2x2, should match max_weight=1
for mxMax, mx in zip(pp2Max1, pp2):
self.assertArraysAlmostEqual(mxMax, mx)
pp4Max1 = bc.pp_matrices(4, max_weight=1)
pp4 = bc.pp_matrices(4)
pp4Subset = [pp4[0], pp4[1], pp4[2], pp4[3], pp4[4], pp4[8], pp4[12]] # Pull out II,IX,IY,IZ,XI,YI,ZI
for mxMax, mxSub in zip(pp4Max1, pp4Subset):
self.assertArraysAlmostEqual(mxMax, mxSub)
def test_qt_dim1(self):
qutrit1 = bc.qt_matrices(1) # special case when dim==1
self.assertArraysAlmostEqual(np.identity(1, 'd'), qutrit1)
def test_qt_orthonorm(self):
mxs = bc.qt_matrices(3)
for i in range(len(mxs)):
for j in range(len(mxs)):
dp = np.vdot(mxs[i], mxs[j])
if i == j:
self.assertAlmostEqual(dp, 1.0)
else:
self.assertAlmostEqual(dp, 0.0)
| 2.5625
| 3
|
tensorflow/contrib/training/python/training/hparam_test.py
|
DEVESHTARASIA/tensorflow
| 384
|
12020
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for hparam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.training.python.training import hparam
from tensorflow.python.platform import test
class HParamsTest(test.TestCase):
def _assertDictEquals(self, d1, d2):
self.assertEqual(len(d1), len(d2))
for k, v in six.iteritems(d1):
self.assertTrue(k in d2, k)
self.assertEquals(v, d2[k], d2[k])
def testEmpty(self):
hparams = hparam.HParams()
self._assertDictEquals({}, hparams.values())
hparams.parse('')
self._assertDictEquals({}, hparams.values())
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('xyz=123')
def testSomeValues(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6')
self._assertDictEquals(
{'aaa': 1, 'b': 2.0, 'c_c': 'relu6'}, hparams.values())
expected_str = '[(\'aaa\', 1), (\'b\', 2.0), (\'c_c\', \'relu6\')]'
self.assertEquals(expected_str, str(hparams.__str__()))
self.assertEquals(expected_str, str(hparams))
self.assertEquals(1, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('relu6', hparams.c_c)
hparams.parse('aaa=12')
self._assertDictEquals(
{'aaa': 12, 'b': 2.0, 'c_c': 'relu6'}, hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('relu6', hparams.c_c)
hparams.parse('c_c=relu4,b=-2.0e10')
self._assertDictEquals({'aaa': 12, 'b': -2.0e10, 'c_c': 'relu4'},
hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(-2.0e10, hparams.b)
self.assertEquals('relu4', hparams.c_c)
hparams.parse('c_c=,b=0,')
self._assertDictEquals({'aaa': 12, 'b': 0, 'c_c': ''}, hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(0.0, hparams.b)
self.assertEquals('', hparams.c_c)
hparams.parse('c_c=2.3",b=+2,')
self.assertEquals(2.0, hparams.b)
self.assertEquals('2.3"', hparams.c_c)
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('x=123')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=poipoi')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=1.0')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=12x')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=relu')
with self.assertRaisesRegexp(ValueError, 'Must not pass a list'):
hparams.parse('aaa=[123]')
self.assertEquals(12, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('2.3"', hparams.c_c)
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
# Verifies that all hparams are restored.
self.assertEquals(12, hparams2.aaa)
self.assertEquals(2.0, hparams2.b)
self.assertEquals('2.3"', hparams2.c_c)
def testBoolParsing(self):
for value in 'true', 'false', 'True', 'False', '1', '0':
for initial in False, True:
hparams = hparam.HParams(use_gpu=initial)
hparams.parse('use_gpu=' + value)
self.assertEqual(hparams.use_gpu, value in ['True', 'true', '1'])
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
self.assertEquals(hparams.use_gpu, hparams2.use_gpu)
# Check that hparams2.use_gpu is a bool rather than an int.
# The assertEquals() call above won't catch this, since
# (0 == False) and (1 == True) in Python.
self.assertEquals(bool, type(hparams2.use_gpu))
def testBoolParsingFail(self):
hparams = hparam.HParams(use_gpu=True)
with self.assertRaisesRegexp(ValueError, r'Could not parse.*use_gpu'):
hparams.parse('use_gpu=yep')
def testLists(self):
hparams = hparam.HParams(aaa=[1], b=[2.0, 3.0], c_c=['relu6'])
self._assertDictEquals({'aaa': [1], 'b': [2.0, 3.0], 'c_c': ['relu6']},
hparams.values())
self.assertEquals([1], hparams.aaa)
self.assertEquals([2.0, 3.0], hparams.b)
self.assertEquals(['relu6'], hparams.c_c)
hparams.parse('aaa=[12]')
self.assertEquals([12], hparams.aaa)
hparams.parse('aaa=[12,34,56]')
self.assertEquals([12, 34, 56], hparams.aaa)
hparams.parse('c_c=[relu4,relu12],b=[1.0]')
self.assertEquals(['relu4', 'relu12'], hparams.c_c)
self.assertEquals([1.0], hparams.b)
hparams.parse('c_c=[],aaa=[-34]')
self.assertEquals([-34], hparams.aaa)
self.assertEquals([], hparams.c_c)
hparams.parse('c_c=[_12,3\'4"],aaa=[+3]')
self.assertEquals([3], hparams.aaa)
self.assertEquals(['_12', '3\'4"'], hparams.c_c)
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('x=[123]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=[poipoi]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=[1.0]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=[12x]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=[relu]')
with self.assertRaisesRegexp(ValueError, 'Must pass a list'):
hparams.parse('aaa=123')
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
# Verifies that all hparams are restored.
self.assertEquals([3], hparams2.aaa)
self.assertEquals([1.0], hparams2.b)
self.assertEquals(['_12', '3\'4"'], hparams2.c_c)
def testJson(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d=True)
self._assertDictEquals(
{'aaa': 1, 'b': 2.0, 'c_c': 'relu6', 'd': True}, hparams.values())
self.assertEquals(1, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('relu6', hparams.c_c)
hparams.parse_json('{"aaa": 12, "b": 3.0, "c_c": "relu4", "d": false}')
self._assertDictEquals(
{'aaa': 12, 'b': 3.0, 'c_c': 'relu4', 'd': False}, hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(3.0, hparams.b)
self.assertEquals('relu4', hparams.c_c)
json_str = hparams.to_json()
hparams2 = hparam.HParams(aaa=10, b=20.0, c_c='hello', d=False)
hparams2.parse_json(json_str)
self.assertEquals(12, hparams2.aaa)
self.assertEquals(3.0, hparams2.b)
self.assertEquals('relu4', hparams2.c_c)
self.assertEquals(False, hparams2.d)
def testNonProtoFails(self):
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=1)
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=1.0)
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def='hello')
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=[1, 2, 3])
if __name__ == '__main__':
test.main()
| 2.328125
| 2
|
CodigoSOLID.py
|
JhonGalarza/SOLID
| 0
|
12021
|
<filename>CodigoSOLID.py
#DATOS DE ENTRADA
ANIMAL= int(input("¿De cual animal quiere conocer la caracteristicas? 1.Leon 2.Ballena 3.Tucan? "))
class Animal:
def __init__(self, ANIMAL):
self.ANIMAL = ANIMAL
def acciones_comun():
comun = "Comer"
return comun
def sentido_vista():
vista = "Puede ver"
return vista
class Animal_Tierra:
def acciones_Tierra():
Tierra = "camina en cuatro patas"
return Tierra
class Animal_Agua:
def acciones_Agua():
return "Nada bajo el agua"
class Animal_Aire (Animal):
def acciones_Aire():
return "Vuela"
class Leon (Animal, Animal_Tierra):
def llamar():
caracteristicas = ()
return caracteristicas
class Ballena(Animal, Animal_Agua):
def llamar():
caracteristicas = ()
return caracteristicas
class Tucan(Animal, Animal_Aire):
def llamar():
caracteristicas = ()
return caracteristicas
if ANIMAL == 1 :
print ("debe imprimir las caracteristicas del leon, el leon es clase hija de animal y debe agragar animal_tierra" )
elif ANIMAL == 2 :
print ("lo mismo que el leon, pero con la ballena")
elif ANIMAL == 3 :
print("Lo mismo pero con el tucan")
| 4.09375
| 4
|
orca/topology/infra/k8s/__init__.py
|
filwie/orca
| 0
|
12022
|
<gh_stars>0
# Copyright 2020 OpenRCA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from orca.topology import bundle
from orca.topology.infra.istio import linker as istio_linker
from orca.topology.infra.k8s import cluster, linker, probe
def get_probes():
return [
bundle.ProbeBundle(
probe=probe.PodPullProbe,
linkers=[
linker.PodToServiceLinker,
linker.PodToReplicaSetLinker,
linker.PodToStatefulSetLinker,
linker.PodToDaemonSetLinker,
linker.PodToNodeLinker,
linker.ConfigMapToPodLinker,
linker.SecretToPodLinker,
linker.PersistentVolumeClaimToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.PodPushProbe,
linkers=[
linker.PodToServiceLinker,
linker.PodToReplicaSetLinker,
linker.PodToStatefulSetLinker,
linker.PodToDaemonSetLinker,
linker.PodToNodeLinker,
linker.ConfigMapToPodLinker,
linker.SecretToPodLinker,
linker.PersistentVolumeClaimToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.ServicePullProbe,
linkers=[
linker.PodToServiceLinker,
linker.EndpointsToServiceLinker,
istio_linker.VirtualServiceToServiceLinker,
istio_linker.DestinationRuleToServiceLinker,
linker.IngressToServiceLinker
]
),
bundle.ProbeBundle(
probe=probe.ServicePushProbe,
linkers=[
linker.PodToServiceLinker,
linker.EndpointsToServiceLinker,
istio_linker.VirtualServiceToServiceLinker,
istio_linker.DestinationRuleToServiceLinker,
linker.IngressToServiceLinker
]
),
bundle.ProbeBundle(
probe=probe.EndpointsPullProbe,
linkers=[
linker.EndpointsToServiceLinker
]
),
bundle.ProbeBundle(
probe=probe.EndpointsPushProbe,
linkers=[
linker.EndpointsToServiceLinker
]
),
bundle.ProbeBundle(
probe=probe.DeploymentPullProbe,
linkers=[
linker.DeploymentToHorizontalPodAutoscalerLinker,
linker.ReplicaSetToDeploymentLinker
]
),
bundle.ProbeBundle(
probe=probe.DeploymentPushProbe,
linkers=[
linker.DeploymentToHorizontalPodAutoscalerLinker,
linker.ReplicaSetToDeploymentLinker
]
),
bundle.ProbeBundle(
probe=probe.ReplicaSetPullProbe,
linkers=[
linker.PodToReplicaSetLinker,
linker.ReplicaSetToDeploymentLinker,
linker.ReplicaSetToHorizontalPodAutoscalerLinker
]
),
bundle.ProbeBundle(
probe=probe.ReplicaSetPushProbe,
linkers=[
linker.PodToReplicaSetLinker,
linker.ReplicaSetToDeploymentLinker,
linker.ReplicaSetToHorizontalPodAutoscalerLinker
]
),
bundle.ProbeBundle(
probe=probe.DaemonSetPullProbe,
linkers=[
linker.PodToDaemonSetLinker
]
),
bundle.ProbeBundle(
probe=probe.DaemonSetPushProbe,
linkers=[
linker.PodToDaemonSetLinker
]
),
bundle.ProbeBundle(
probe=probe.StatefulSetPullProbe,
linkers=[
linker.PodToStatefulSetLinker,
linker.StatefulSetToHorizontalPodAutoscalerLinker
]
),
bundle.ProbeBundle(
probe=probe.StatefulSetPushProbe,
linkers=[
linker.PodToStatefulSetLinker,
linker.StatefulSetToHorizontalPodAutoscalerLinker
]
),
bundle.ProbeBundle(
probe=probe.ConfigMapPullProbe,
linkers=[
linker.ConfigMapToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.ConfigMapPushProbe,
linkers=[
linker.ConfigMapToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.SecretPullProbe,
linkers=[
linker.SecretToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.SecretPushProbe,
linkers=[
linker.SecretToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.StorageClassPullProbe,
linkers=[
linker.PersistentVolumeToStorageClassLinker
]
),
bundle.ProbeBundle(
probe=probe.StorageClassPushProbe,
linkers=[
linker.PersistentVolumeToStorageClassLinker
]
),
bundle.ProbeBundle(
probe=probe.PersistentVolumePullProbe,
linkers=[
linker.PersistentVolumeToStorageClassLinker,
linker.PersistentVolumeToPersistentVolumeClaimLinker
]
),
bundle.ProbeBundle(
probe=probe.PersistentVolumePushProbe,
linkers=[
linker.PersistentVolumeToStorageClassLinker,
linker.PersistentVolumeToPersistentVolumeClaimLinker
]
),
bundle.ProbeBundle(
probe=probe.PersistentVolumeClaimPullProbe,
linkers=[
linker.PersistentVolumeToPersistentVolumeClaimLinker,
linker.PersistentVolumeClaimToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.PersistentVolumeClaimPushProbe,
linkers=[
linker.PersistentVolumeToPersistentVolumeClaimLinker,
linker.PersistentVolumeClaimToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.HorizontalPodAutoscalerPullProbe,
linkers=[
linker.DeploymentToHorizontalPodAutoscalerLinker,
linker.ReplicaSetToHorizontalPodAutoscalerLinker,
linker.StatefulSetToHorizontalPodAutoscalerLinker
]
),
bundle.ProbeBundle(
probe=probe.HorizontalPodAutoscalerPushProbe,
linkers=[
linker.DeploymentToHorizontalPodAutoscalerLinker,
linker.ReplicaSetToHorizontalPodAutoscalerLinker,
linker.StatefulSetToHorizontalPodAutoscalerLinker
]
),
bundle.ProbeBundle(
probe=probe.NodePullProbe,
linkers=[
linker.PodToNodeLinker,
linker.NodeToClusterLinker
]
),
bundle.ProbeBundle(
probe=probe.NodePushProbe,
linkers=[
linker.PodToNodeLinker,
linker.NodeToClusterLinker
]
),
bundle.ProbeBundle(
probe=probe.IngressPullProbe,
linkers=[
linker.IngressToServiceLinker
]
),
bundle.ProbeBundle(
probe=probe.IngressPushProbe,
linkers=[
linker.IngressToServiceLinker
]
),
bundle.ProbeBundle(
probe=cluster.ClusterProbe,
linkers=[
linker.NodeToClusterLinker
]
)
]
| 1.546875
| 2
|
thelma/repositories/rdb/view.py
|
fogathmann/TheLMA
| 1
|
12023
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Utilities to create/drop views.
Based on a recipe published in:
http://www.sqlalchemy.org/trac/wiki/UsageRecipes/Views
"""
from sqlalchemy.sql import table
from sqlalchemy.ext import compiler
from sqlalchemy.schema import DDLElement
__docformat__ = 'reStructuredText en'
__all__ = ['CreateView',
'DropView',
'view_factory',
]
class CreateView(DDLElement):
def __init__(self, name, selectable): # pylint: disable=W0231
self.name = name
self.selectable = selectable
class DropView(DDLElement):
def __init__(self, name): # pylint: disable=W0231
self.name = name
@compiler.compiles(CreateView, 'postgresql')
def create_view_compile_postgresql(element, compiler, **kw): # pylint: disable=W0621,W0613
selection = compiler.sql_compiler.process(element.selectable)
stmt = "CREATE OR REPLACE VIEW %s AS %s" % (element.name, selection)
# FIXME: we should not combine the statement and params here.
# it is a SQLAlchemy bug... report it.
params = {}
for k, v in element.selectable.compile().params.iteritems():
params[k] = ("'%s'" % v) if isinstance(v, basestring) else v
return stmt % params
@compiler.compiles(CreateView, 'sqlite')
def create_view_compile_sqlite(element, compiler, **kw): # pylint: disable=W0621,W0613
# FIXME: duplicate code
# FIXME: it seems that there is a bug in SQLAlchemy and creating views
# this way emits an exception
selection = compiler.sql_compiler.process(element.selectable)
stmt = "CREATE VIEW %s AS %s" % (element.name, selection)
# FIXME: we should not combine the statement and params here.
# it is a SQLAlchemy bug... report it.
params = {}
for k, v in element.selectable.compile().params.iteritems():
params[k] = ("'%s'" % v) if isinstance(v, basestring) else v
return stmt % params
@compiler.compiles(DropView)
def drop_view_compile(element, compiler, **kw): # pylint: disable=W0621,W0613
return "DROP VIEW %s" % (element.name)
def view_factory(name, metadata, selectable):
if not hasattr(metadata, 'views'):
metadata.views = {}
metadata.views[name] = table(name)
for c in selectable.c:
c._make_proxy(metadata.views[name]) # pylint: disable=W0212
CreateView(name, selectable).execute_at('after-create', metadata)
DropView(name).execute_at('before-drop', metadata)
return metadata.views[name]
| 2.25
| 2
|
msblog/models.py
|
designermanjeets/mscreativepixel
| 0
|
12024
|
<reponame>designermanjeets/mscreativepixel
from django.db import models
from datetime import datetime
import string, random
import uuid
# Create your models here.
class HeaderNavs(models.Model):
title = models.CharField(max_length = 50)
url = models.CharField(max_length = 50)
def __str__(self):
return self.title
class Meta:
verbose_name_plural = "HeaderNavs"
class Blogs(models.Model):
title = models.CharField(max_length = 50)
short_description = models.TextField(max_length = 100)
description = models.TextField()
created_at = models.DateTimeField(default=datetime.now, blank=True)
avatar = models.ImageField(upload_to = 'static/img/avatar/', default = 'static/img/avatar_1.jpg')
slug = models.CharField(max_length=40, blank=True, default=uuid.uuid4, unique=True)
def __str__(self):
return self.title
class Meta:
verbose_name_plural = "Blogs"
| 2.65625
| 3
|
Practical/Easy/HSV color wheel/colorwheel.py
|
saintwithataint/Pro-g-rammingChallenges4
| 1
|
12025
|
<filename>Practical/Easy/HSV color wheel/colorwheel.py
import colour
import matplotlib.pyplot as plt
import numpy as np
COLOUR_STYLE = colour.plotting.colour_style()
COLOUR_STYLE.update(
{
"figure.figsize": (11, 11),
"legend.framealpha": colour.plotting.COLOUR_STYLE_CONSTANTS.opacity.low,
}
)
plt.style.use(COLOUR_STYLE)
plt.style.use("dark_background")
colour.utilities.describe_environment()
colour.utilities.filter_warnings(*[True] * 4)
def colour_wheel(samples=1024, clip_circle=True, method="Colour"):
xx, yy = np.meshgrid(
np.linspace(-1, 1, samples), np.linspace(-1, 1, samples)
)
S = np.sqrt(xx**2 + yy**2)
H = (np.arctan2(xx, yy) + np.pi) / (np.pi * 2)
HSV = colour.utilities.tstack([H, S, np.ones(H.shape)])
RGB = colour.HSV_to_RGB(HSV)
if clip_circle:
RGB[S > 1] = 0
A = np.where(S > 1, 0, 1)
else:
A = np.ones(S.shape)
if method.lower() == "matplotlib":
RGB = colour.utilities.orient(RGB, "90 CW")
elif method.lower() == "nuke":
RGB = colour.utilities.orient(RGB, "Flip")
RGB = colour.utilities.orient(RGB, "90 CW")
R, G, B = colour.utilities.tsplit(RGB)
return colour.utilities.tstack([R, G, B, A])
COLOUR_WHEEL = colour_wheel(method="Nuke")
colour.plotting.plot_image(COLOUR_WHEEL)
| 2.78125
| 3
|
kojen/smgen.py
|
kohjaen/kojen
| 3
|
12026
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'eugene'
'''
MIT License
Copyright (c) 2015 <NAME> (email : <EMAIL>)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
'''
Step 1) Load template files to memory
Step 2) Search and replace these tags in memory (including filenames).
<<<NAMESPACE>>>
<<<STATEMACHINENAME>>> or <<<CLASSNAME>>>
<<<AUTHOR>>>
Step 3) Search for the following pairs of tags
<<<PER_STATE_BEGIN>>>
<<<PER_STATE_END>>>
<<<PER_EVENT_BEGIN>>>
<<<PER_EVENT_END>>>
<<<PER_ACTION_BEGIN>>>
<<<PER_ACTION_END>>>
<<<PER_ACTION_SIGNATURE_BEGIN>>>
<<<PER_ACTION_SIGNATURE_END>>>
<<<PER_GUARD_BEGIN>>>
<<<PER_GUARD_END>>>
and duplicate the following for each item, replacing each tag with the item name
<<<STATENAME>>>
<<<EVENTNAME>>>
<<<ACTIONNAME>>>
<<<GUARDNAME>>>
These need to be expanded for event structs
<<<EVENTSIGNATURE>>>
<<<EVENTMEMBERSINSTANTIATE>>>
<<<EVENTMEMBERSDECLARE>>>
When looping <<<ALPH>>> should increment from a through Z.
When looping <<<NUM>>> should increment from 1 through 10000.
When reading the transition table, first state name (top, left) should be set to the value for this tag : <<<STATE_0>>>
Then, the transition table needs to go here, following the rules.
<<<TTT_BEGIN>>>
<<<TTT_END>>>
or
<<<TTT_LITE_BEGIN>>>
<<<TTT_LITE_END>>>
or
<<<TTT_LITE_SML_BEGIN>>>
<<<TTT_LITE_SML_END>>>
# EMBEDDED SM SUPPORT.
Step 4) In each <<PER_XXX tag, there might be more expansion required. The following tags apply in this pass
<<<PER_EVENT_CURRENT_NEXT_STATE_BEGIN>>>
<<<PER_EVENT_NEXT_STATE_END>>>
and the following replacement tags will be correctly set
<<<EVENTSTATECURRENT>>>
<<<EVENTSTATENEXT>>>
Also, the original SM only allows a single state-based action to happen.
I want there to be several actions allowed in a State, based on several events valid in that state.
These tags provide for that.
<<<PER_STATE_ACTION_EVENT_BEGIN>>>
<<<PER_STATE_ACTION_EVENT_END>>>
and the following replacement tags will be correctly set
<<<PER_STATE_ACTION>>>
<<<PER_STATE_EVENT>>>
# END EMBEDDED SM SUPPORT.
'''
__TAG_AUTHOR__ = '<<<AUTHOR>>>'
__TAG_GROUP__ = '<<<GROUP>>>'
__TAG_BRIEF__ = '<<<BRIEF>>>'
__TAG_NAMESPACE__ = '<<<NAMESPACE>>>'
__TAG_SM_NAME__ = '<<<STATEMACHINENAME>>>'
__TAG_SM_NAME_UPPER__ = '<<<STATEMACHINENAMEUPPER>>>'
__TAG_CLASS_NAME__ = '<<<CLASSNAME>>>'
__TAG_PyIFGen_NAME__ = '<<<PYIFGENNAME>>>'
__TAG_PS_BEGIN__ = "<<<PER_STATE_BEGIN>>>"
__TAG_PS_END__ = "<<<PER_STATE_END>>>"
__TAG_PE_BEGIN__ = "<<<PER_EVENT_BEGIN>>>"
__TAG_PE_END__ = "<<<PER_EVENT_END>>>"
__TAG_PA_BEGIN__ = "<<<PER_ACTION_BEGIN>>>"
__TAG_PA_END__ = "<<<PER_ACTION_END>>>"
__TAG_PASIG_BEGIN__ = "<<<PER_ACTION_SIGNATURE_BEGIN>>>"
__TAG_PASIG_END__ = "<<<PER_ACTION_SIGNATURE_END>>>"
__TAG_PG_BEGIN__ = "<<<PER_GUARD_BEGIN>>>"
__TAG_PG_END__ = "<<<PER_GUARD_END>>>"
__TAG_EVENT_SIGNATURE__ = "<<<EVENTSIGNATURE>>>"
__TAG_EVENT_MEMBERINST__ = "<<<EVENTMEMBERSINSTANTIATE>>>"
__TAG_LITE_EVENT_MEMBERINST__ = "<<<EVENTMEMBERSLITEINSTANTIATE>>>"
__TAG_EVENT_MEMBERDECL__ = "<<<EVENTMEMBERSDECLARE>>>"
__TAG_STATENAME__ = '<<<STATENAME>>>'
__TAG_EVENTNAME__ = '<<<EVENTNAME>>>'
__TAG_EVENTNAME_SMALL_CAMEL__ = '<<<EVENTNAMESMALLCAMEL>>>'
__TAG_ACTIONNAME__ = '<<<ACTIONNAME>>>'
__TAG_GUARDNAME__ = '<<<GUARDNAME>>>'
__TAG_ABC__ = '<<<ALPH>>>'
__TAG_123__ = '<<<NUM>>>'
__TAG_INIT_STATE__ = '<<<STATE_0>>>'
__TAG_TTT_BEGIN__ = '<<<TTT_BEGIN>>>'
__TAG_TTT_END___ = '<<<TTT_END>>>'
__TAG_TTT_LITE_BEGIN__ = '<<<TTT_LITE_BEGIN>>>'
__TAG_TTT_LITE_END__ = '<<<TTT_LITE_END>>>'
__TAG_TTT_LITE_SML_BEGIN__ = '<<<TTT_LITE_SML_BEGIN>>>'
__TAG_TTT_LITE_SML_END__ = '<<<TTT_LITE_SML_END>>>'
__TAG_DECLSPEC_DLL_EXPORT__ = "<<<DLL_EXPORT>>>"
# EMBEDDED SM SUPPORT.
__TAG_EVENT_CURNEX_ST_BEG__ = "<<<PER_EVENT_CURRENT_NEXT_STATE_BEGIN>>>"
__TAG_EVENT_CURNEX_ST_END__ = "<<<PER_EVENT_NEXT_STATE_END>>>"
__TAG_EVENT_ST_CUR__ = "<<<EVENTSTATECURRENT>>>"
__TAG_EVENT_ST_NXT__ = "<<<EVENTSTATENEXT>>>"
__TAG_PSAE_BEGIN__ = "<<<PER_STATE_ACTION_EVENT_BEGIN>>>"
__TAG_PSAE_END__ = "<<<PER_STATE_ACTION_EVENT_END>>>"
__TAG_PSAE_ACTION__ = "<<<PER_STATE_ACTION>>>"
__TAG_PSAE_EVENT__ = "<<<PER_STATE_EVENT>>>"
# END EMBEDDED SM SUPPORT.
# Python2 -> 3 shennanigans...try support both
try:
from interface_base import * # py2
except (ModuleNotFoundError, ImportError) as e:
from .interface_base import * # py3
try:
from .preservative import *
except (ModuleNotFoundError, ImportError) as e:
from preservative import *
try:
from .cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case
except (ModuleNotFoundError, ImportError) as e:
from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case
try:
from LanguageCPP import LanguageCPP
except (ModuleNotFoundError, ImportError) as e:
from .LanguageCPP import LanguageCPP
# Model that describes a state machine.
class CStateMachineModel:
def __init__(self):
self.statemachinename = ""
self.namespacename = ""
self.declspecdllexport = ""
self.pythoninterfacegeneratorfilename = ""
self.states = []
self.actions = []
self.events = []
self.guards = []
# EMBEDDED SM SUPPORT.
self.event_transitions_per_state = {} # ['event', ['next state,current state' , ...]]
self.actionevents_per_state = {} # ['state', [['event', 'action'] , ...]
# END EMBEDDED SM SUPPORT.
self.actionsignatures = OrderedDict()
# Transition Table Model uses State Machine Model to generate all code required for a working state machine.
class CTransitionTableModel(CStateMachineModel):
START_STATE = 0
EVENT = 1
NEXT_STATE = 2
ACTION = 3
GUARD = 4
def __init__(self, tt, nn, smn, dclspc = ""):
CStateMachineModel.__init__(self)
self.transition_table = tt
self.statemachinename = smn
self.namespacename = nn
self.declspecdllexport = dclspc
tstate = OrderedDict()
taction = OrderedDict()
tevent = OrderedDict()
tguard = OrderedDict()
# EMBEDDED SM SUPPORT. ['current state, event', 'next state']
tevent_transitions_tmp = {}
# END EMBEDDED SM SUPPORT.
# Filter
for tableline in self.transition_table:
if tableline[self.START_STATE] != "" and tableline[self.START_STATE].lower() != "none":
tstate[tableline[self.START_STATE]] = 0
if tableline[self.NEXT_STATE] != "" and tableline[self.NEXT_STATE].lower() != "none":
tstate[tableline[self.NEXT_STATE]] = 0
if tableline[self.EVENT] != "" and tableline[self.EVENT].lower() != "none":
tevent[tableline[self.EVENT]] = 0
# EMBEDDED SM SUPPORT. ['current state, event', 'next state']
'''
if tableline[self.NEXT_STATE] == "" or tableline[self.NEXT_STATE].lower() == "none":
raise Exception('Events that dont change state should re-enter the current state.\nPlease fix your transition table')
tevent_transitions_tmp[tableline[self.START_STATE] + ',' + tableline[self.EVENT]] = tableline[self.NEXT_STATE]
TODO : For the case below, how to support a different 'action' on the in-state-event???? Ie that event might have gotten the machine
to this state with a particular action, but perhaps the user has configured a different action for this event in-state???
'''
if tableline[self.NEXT_STATE] == "" or tableline[self.NEXT_STATE].lower() == "none":
tevent_transitions_tmp[tableline[self.START_STATE] + ',' + tableline[self.EVENT]] = tableline[self.START_STATE]
else:
tevent_transitions_tmp[tableline[self.START_STATE] + ',' + tableline[self.EVENT]] = tableline[self.NEXT_STATE]
# This is for in-state-actions based on events...
if tableline[self.ACTION] != "" and tableline[self.ACTION].lower() != "none":
if not (tableline[self.START_STATE] in self.actionevents_per_state):
self.actionevents_per_state[tableline[self.START_STATE]] = []
self.actionevents_per_state[tableline[self.START_STATE]].append([tableline[self.EVENT], tableline[self.ACTION]])
# END EMBEDDED SM SUPPORT.
if tableline[self.ACTION] != "" and tableline[self.ACTION].lower() != "none":
taction[tableline[self.ACTION]] = 0
if not ((tableline[self.ACTION] + tableline[self.EVENT]) in self.actionsignatures):
self.actionsignatures[tableline[self.ACTION] + tableline[self.EVENT]] = (tableline[self.ACTION], tableline[self.EVENT]) #, tableline[self.START_STATE],tableline[self.NEXT_STATE]))
if tableline[self.GUARD] != "" and tableline[self.GUARD].lower() != "none":
tguard[tableline[self.GUARD]] = 0
# Populate CStateMachineModel
for s in tstate:
self.states.append(s)
for e in tevent:
self.events.append(e)
for a in taction:
self.actions.append(a)
for g in tguard:
self.guards.append(g)
# EMBEDDED SM SUPPORT.
for e in tevent:
self.event_transitions_per_state[e] = []
for s in tstate:
key = s+','+e
if key in tevent_transitions_tmp:
self.event_transitions_per_state[e].append([tevent_transitions_tmp[key], s])
else:
self.event_transitions_per_state[e].append(['EVENT_IGNORED', s])
# END EMBEDDED SM SUPPORT.
def __getfirststate__(self):
if not self.transition_table:
return "NO TT PRESENT!"
return self.transition_table[0][0]
class CStateMachineGenerator(CBASEGenerator):
def __init__(self, inputfiledir, outputfiledir, events_interface=None, language=None, author='Anonymous', group='', brief=''):
CBASEGenerator.__init__(self,inputfiledir,outputfiledir,language, author, group, brief)
self.events_interface = events_interface
def __loadtemplates_firstfiltering__(self, smmodel):
"""
See baseclass implementation. This just prepares the dictionary of things to replace
for this type of codegeneration.
@param smmodel:
@return: cgen.CCodeModel, a dictionary -> {filename,[lines]}
"""
dict_to_replace_lines = {}
dict_to_replace_lines[__TAG_SM_NAME_UPPER__] = caps(smmodel.statemachinename)
dict_to_replace_lines[__TAG_SM_NAME__] = smmodel.statemachinename
dict_to_replace_lines[__TAG_CLASS_NAME__] = smmodel.statemachinename
dict_to_replace_lines[__TAG_PyIFGen_NAME__] = smmodel.pythoninterfacegeneratorfilename.replace('.py', '') # hack : for tcpgen simple templates,
if not dict_to_replace_lines[__TAG_PyIFGen_NAME__]:
dict_to_replace_lines[__TAG_PyIFGen_NAME__] = self.vpp_filename
dict_to_replace_lines[__TAG_NAMESPACE__] = smmodel.namespacename
dict_to_replace_lines[__TAG_AUTHOR__] = self.author
dict_to_replace_lines[__TAG_GROUP__] = self.group
dict_to_replace_lines[__TAG_BRIEF__] = self.brief
dict_to_replace_lines[__TAG_DECLSPEC_DLL_EXPORT__] = smmodel.declspecdllexport
dict_to_replace_filenames = {}
dict_to_replace_filenames["TEMPLATE_"] = smmodel.statemachinename
#dict_to_replace_filenames['.ty'] = '.py'
#dict_to_replace_filenames['.t#'] = '.cs'
#dict_to_replace_filenames['.t'] = '.h'
#dict_to_replace_filenames['.hpp'] = '.cpp' # there are no '.hpp' templates...but search and replace will apply '.t -> .h' first so '.tpp' becomes '.hpp'...grrr
return CBASEGenerator.__loadtemplates_firstfiltering__(self,dict_to_replace_lines,dict_to_replace_filenames)
def __get_event_signature__(self,name):
if self.events_interface is None or self.language is None:
return ""
for s in self.events_interface.Structs():
if s.Name == name:
return self.language.ParameterString(self.language.GetFactoryCreateParams(s, self.events_interface))
return ""
def __instantiate_event_struct_member(self, name, whitespace_cnt, is_ptr=True, instancename="data"):
if self.events_interface is None or self.language is None:
return ""
for s in self.events_interface.Structs():
if s.Name == name:
guts = self.language.InstantiateStructMembers(s, self.events_interface, '', instancename, self.language.Accessor(is_ptr))
result = ''
cnt = 0
for g in guts:
result = result + (whitespace_cnt*' ' if cnt > 0 else '') + g + '\n'
cnt = cnt + 1
return result
return ""
def __declare_event_struct_members(self, name, whitespace_cnt):
if self.events_interface is None or self.language is None:
return ""
for s in self.events_interface.Structs():
if s.Name == name:
guts = self.language.DeclareStructMembers(s, self.events_interface, '', False)
result = ''
cnt = 0
for g in guts:
result = result + ((whitespace_cnt+1)*' ' if cnt > 0 else ' ') + g + '\n'
cnt = cnt + 1
# remove last '\n'
result = result[:-1]
return result
return ""
def hasTag(self, line, tag):
return line.find(tag.replace("<<<", "").replace(">>>", "")) > 0
def hasMemberName(self, a):
return a.find("::") > 0
def extractMemberNameAndTag(self, a):
member = a[a.find("::"):a.find(">>>")].replace("::", "")
tag = a.strip()
return [tag, member]
def __innerexpand__secondfiltering__(self, names2x, lines2x, puthere):
global alpha
__resetalphabet__()
cnt = 0
for name in names2x:
for line in lines2x:
newline = line
newline = newline.replace(__TAG_STATENAME__, name)
newline = newline.replace(__TAG_EVENTNAME_SMALL_CAMEL__, camel_case_small(name))
newline = newline.replace(__TAG_EVENTNAME__, name)
newline = newline.replace(__TAG_ACTIONNAME__, name)
newline = newline.replace(__TAG_GUARDNAME__, name)
newline = newline.replace(__TAG_ABC__, chr(alpha))
newline = newline.replace(__TAG_123__, str(cnt))
# EMBEDDED SM SUPPORT.
newline = newline.replace(__TAG_EVENT_CURNEX_ST_BEG__, __TAG_EVENT_CURNEX_ST_BEG__ + '<<<' + name + '>>>') # put a marker (event name) for mapping
newline = newline.replace(__TAG_PSAE_BEGIN__, __TAG_PSAE_BEGIN__ + '<<<' + name + '>>>') # put a marker (state name) for mapping
# END EMBEDDED SM SUPPORT.
tabcnt = newline.count(' ')
newline = newline.replace(__TAG_EVENT_SIGNATURE__, self.__get_event_signature__(name))
# __TAG_EVENT_MEMBERINST__ -> PTR
if self.hasTag(newline,__TAG_EVENT_MEMBERINST__) and self.hasMemberName(newline):
line_member = self.extractMemberNameAndTag(newline)
newline = newline.replace(line_member[0],self.__instantiate_event_struct_member(name, tabcnt, True, line_member[1]))
else:
newline = newline.replace(__TAG_EVENT_MEMBERINST__, self.__instantiate_event_struct_member(name, tabcnt, True)) # PTR
# __TAG_LITE_EVENT_MEMBERINST__ -> NO PTR
if self.hasTag(newline,__TAG_LITE_EVENT_MEMBERINST__) and self.hasMemberName(newline):
line_member = self.extractMemberNameAndTag(newline)
newline = newline.replace(line_member[0],self.__instantiate_event_struct_member(name, tabcnt, False, line_member[1]))
else:
newline = newline.replace(__TAG_LITE_EVENT_MEMBERINST__, self.__instantiate_event_struct_member(name, tabcnt, False)) # NO PTR
newline = newline.replace(__TAG_EVENT_MEMBERDECL__, self.__declare_event_struct_members(name, tabcnt))
# END EMBEDDED SUPPORT
puthere.append(newline)
cnt = cnt + 1
__getnextalphabet__()
def __innerexpand_actionsignatures__(self, states2x, lines2x, puthere):
global alpha
__resetalphabet__()
cnt = 0
for key, (actionname, eventname) in states2x.items():
if eventname == "" or eventname.lower() == 'none':
eventname = "NONE"
elif eventname.lower() == 'any':
eventname = "ANY"
for line in lines2x:
puthere.append(line
.replace(__TAG_ACTIONNAME__, actionname)
.replace(__TAG_EVENTNAME_SMALL_CAMEL__, camel_case_small(eventname))
.replace(__TAG_EVENTNAME__, eventname)
.replace(__TAG_ABC__, chr(alpha))
.replace(__TAG_123__, str(cnt)))
cnt = cnt + 1
__getnextalphabet__()
def __transitiontable_replace_NONE__(self, val):
if val == "" or val.lower() == 'none':
val = "msmf::none"
return val
def __transitiontableLITE_guard_replace_NONE__(self, val):
tmp_val = val.replace('__', '')
if tmp_val == "" or tmp_val.lower() == 'none':
val = "boost::msm::gnone"
return val
def __transitiontableLITE_action_replace_NONE__(self, val):
tmp_val = val.replace('__', '')
if tmp_val == "" or tmp_val.lower() == 'none' or tmp_val.lower().find('::none<') > -1:
val = "boost::msm::none"
return val
''' This SM doesnt seem to allow 'none' transitions -> make it transition to the source state'''
def __transitiontableLITE_nextstate_replace_NONE__(self, val, source_state):
tmp_val = val.replace('__', '')
tmp_val = tmp_val.replace('msmf::', '')
if tmp_val == "" or tmp_val.lower() == 'none':
val = source_state
return val
def __expand_secondfiltering__(self, smmodel, cmmodel):
for file in cmmodel.filenames_to_lines:
ex_state = False
ex_event = False
ex_action = False
ex_actionsig = False
ex_guard = False
ex_tt = False
ex_tt_lite = False
ex_tt_lite_sml = False
snipped_to_expand = []
alllinesexpanded = []
for line in cmmodel.filenames_to_lines[file]:
begin = line.find(__TAG_PS_BEGIN__) > -1 or \
line.find(__TAG_PE_BEGIN__) > -1 or \
line.find(__TAG_PA_BEGIN__) > -1 or \
line.find(__TAG_PASIG_BEGIN__) > -1 or \
line.find(__TAG_PG_BEGIN__) > -1 or \
line.find(__TAG_TTT_BEGIN__) > -1 or \
line.find(__TAG_TTT_LITE_BEGIN__) > -1 or \
line.find(__TAG_TTT_LITE_SML_BEGIN__) > -1
ex_state = line.find(__TAG_PS_BEGIN__) > -1 or ex_state
ex_event = line.find(__TAG_PE_BEGIN__) > -1 or ex_event
ex_action = line.find(__TAG_PA_BEGIN__) > -1 or ex_action
ex_actionsig = line.find(__TAG_PASIG_BEGIN__) > -1 or ex_actionsig
ex_guard = line.find(__TAG_PG_BEGIN__) > -1 or ex_guard
ex_tt = line.find(__TAG_TTT_BEGIN__) > -1 or ex_tt
ex_tt_lite = line.find(__TAG_TTT_LITE_BEGIN__) > -1 or ex_tt_lite
ex_tt_lite_sml = line.find(__TAG_TTT_LITE_SML_BEGIN__) > -1 or ex_tt_lite_sml
if not ex_state and not ex_event and not ex_action and not ex_actionsig and not ex_guard and not ex_tt and not ex_tt_lite and not ex_tt_lite_sml:
alllinesexpanded.append(line.replace(__TAG_INIT_STATE__, smmodel.__getfirststate__()))
if ex_state and line.find(__TAG_PS_END__) > -1:
self.__innerexpand__secondfiltering__(smmodel.states, snipped_to_expand, alllinesexpanded)
snipped_to_expand = []
ex_state = False
if ex_event and line.find(__TAG_PE_END__) > -1:
self.__innerexpand__secondfiltering__(smmodel.events, snipped_to_expand, alllinesexpanded)
snipped_to_expand = []
ex_event = False
if ex_action and line.find(__TAG_PA_END__) > -1:
self.__innerexpand__secondfiltering__(smmodel.actions, snipped_to_expand, alllinesexpanded)
snipped_to_expand = []
ex_action = False
if ex_actionsig and line.find(__TAG_PASIG_END__) > -1:
self.__innerexpand_actionsignatures__(smmodel.actionsignatures, snipped_to_expand, alllinesexpanded)
snipped_to_expand = []
ex_actionsig = False
if ex_guard and line.find(__TAG_PG_END__) > -1:
self.__innerexpand__secondfiltering__(smmodel.guards, snipped_to_expand, alllinesexpanded)
snipped_to_expand = []
ex_guard = False
if ex_tt and line.find(__TAG_TTT_END___) > -1:
len_tt = len(smmodel.transition_table)
tt_out = " // " + len("msmf::Row < ") * ' ' + even_space("Start") + even_space("Event") + even_space("Next") + even_space("Action") + even_space("Guard") + '\n'
for i, ttline in enumerate(smmodel.transition_table):
tt_out += ' msmf::Row < '
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.START_STATE])) + ','
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.EVENT] )) + ','
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.NEXT_STATE] )) + ','
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.ACTION] )) + ','
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.GUARD] )) + '> '
if i != len_tt-1:
tt_out += ","
tt_out += " // " + str(i) + '\n'
alllinesexpanded.append(tt_out)
tt_out = ""
ex_tt = False
if ex_tt_lite and line.find(__TAG_TTT_LITE_END__) > -1:
tt_out = " // " + even_space("Start + ") + even_space("Event") + even_space("[ Guard ] ") + even_space("/ Action") + even_space(" = Next") + '\n'
startStateHasEntryExit = {}
for i, ttline in enumerate(smmodel.transition_table):
if i == 0: # initial state
tt_out += " *"
else:
tt_out += " , "
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.START_STATE])) + '+'
tt_out += even_space('event<' + self.__transitiontable_replace_NONE__(ttline[smmodel.EVENT]) + ">") + ' '
tt_out += even_space('['+self.__transitiontableLITE_guard_replace_NONE__('__'+ttline[smmodel.GUARD])+']') + ' / '
tt_out += even_space(self.__transitiontableLITE_action_replace_NONE__('__'+ttline[smmodel.ACTION]))
if ttline[smmodel.NEXT_STATE].lower() != 'none': # to not get transitions into/outof state on actions that dont change the state...
tt_out += ' = ' + even_space(self.__transitiontableLITE_nextstate_replace_NONE__(ttline[smmodel.NEXT_STATE], ttline[smmodel.START_STATE]))
tt_out += '\n'
alllinesexpanded.append(tt_out)
tt_out = ""
# State entry/exit, once only
if not (ttline[smmodel.START_STATE] in startStateHasEntryExit):
startStateHasEntryExit[ttline[smmodel.START_STATE]] = True
tt_out += " , "+ttline[smmodel.START_STATE]+" + msm::on_entry / __" + ttline[smmodel.START_STATE] + 'OnEntry\n'
tt_out += " , "+ttline[smmodel.START_STATE]+" + msm::on_exit / __" + ttline[smmodel.START_STATE] + 'OnExit'
tt_out += '\n'
alllinesexpanded.append(tt_out)
tt_out = ""
ex_tt_lite = False
if ex_tt_lite_sml and line.find(__TAG_TTT_LITE_SML_END__) > -1:
tt_out = " // " + even_space("Start + ") + even_space("Event") + even_space("[ Guard ] ") + even_space("/ Action", 100) + even_space(" = Next") + '\n'
startStateHasEntryExit = {}
for i, ttline in enumerate(smmodel.transition_table):
if i == 0: # initial state
tt_out += " *"
else:
tt_out += " , "
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.START_STATE])) + '+'
tt_out += even_space('event<' + self.__transitiontable_replace_NONE__(ttline[smmodel.EVENT]) + ">") + ' '
tt_out += even_space('['+self.__transitiontableLITE_guard_replace_NONE__('__'+ttline[smmodel.GUARD])+']') + ' / '
#tt_out += even_space(self.__transitiontableLITE_action_replace_NONE__('call(this,&CONCRETE::' + ttline[smmodel.ACTION] + '<' + ttline[smmodel.EVENT] + ">)"), 100)
tt_out += even_space(self.__transitiontableLITE_action_replace_NONE__('__' + ttline[smmodel.ACTION]), 100)
if ttline[smmodel.NEXT_STATE].lower() != 'none': # to not get transitions into/outof state on actions that dont change the state...
tt_out += ' = ' + even_space(self.__transitiontableLITE_nextstate_replace_NONE__(ttline[smmodel.NEXT_STATE], ttline[smmodel.START_STATE]))
tt_out += '\n'
alllinesexpanded.append(tt_out)
tt_out = ""
# State entry/exit, once only
if not (ttline[smmodel.START_STATE] in startStateHasEntryExit):
startStateHasEntryExit[ttline[smmodel.START_STATE]] = True
tt_out += " , "+ttline[smmodel.START_STATE]+" + msm::on_entry<_> / __" + ttline[smmodel.START_STATE] + 'OnEntry\n'
tt_out += " , "+ttline[smmodel.START_STATE]+" + msm::on_exit<_> / __" + ttline[smmodel.START_STATE] + 'OnExit'
tt_out += '\n'
alllinesexpanded.append(tt_out)
tt_out = ""
ex_tt_lite_sml = False
if (ex_state or ex_event or ex_action or ex_actionsig or ex_guard or ex_tt or ex_tt_lite or ex_tt_lite_sml) and not begin:
snipped_to_expand.append(line)
cmmodel.filenames_to_lines[file] = alllinesexpanded
# EMBEDDED SM SUPPORT.
def __innerexpand__thirdfiltering__eventtransitionsperstate(self, namesmap3x, lines3x, puthere):
global alpha
__resetalphabet__()
cnt = 0
# First find the mapping marker
for _map in namesmap3x:
currentstate = _map[1]
nextstate = _map[0]
for line in lines3x:
#puthere.append(line.replace(__TAG_ABC__, chr(alpha)).replace(__TAG_123__, str(cnt)))
puthere.append(line.replace(__TAG_EVENT_ST_CUR__, currentstate).replace(__TAG_EVENT_ST_NXT__, nextstate).replace(__TAG_ABC__, chr(alpha)).replace(__TAG_123__, str(cnt)))
cnt = cnt + 1
__getnextalphabet__()
# this function is pretty much the same as the one above...
def __innerexpand__thirdfiltering__eventactionsperstate(self, namesmap3x, lines3x, puthere):
global alpha
__resetalphabet__()
cnt = 0
# First find the mapping marker
for _map in namesmap3x:
action = _map[1]
event = _map[0]
for line in lines3x:
# puthere.append(line.replace(__TAG_ABC__, chr(alpha)).replace(__TAG_123__, str(cnt)))
puthere.append(line.replace(__TAG_PSAE_ACTION__, action).replace(__TAG_PSAE_EVENT__, event).replace(__TAG_ABC__, chr(alpha)).replace(__TAG_123__, str(cnt)))
cnt = cnt + 1
__getnextalphabet__()
def __expand_thirdfiltering__(self, smmodel, cmmodel):
for file in cmmodel.filenames_to_lines:
ex_state = False
ex_event = False
#ex_action = False
#ex_guard = False
snippet_to_expand = []
alllinesexpanded = []
state_action_map = ''
event_map = ''
for line in cmmodel.filenames_to_lines[file]:
begin = line.find(__TAG_EVENT_CURNEX_ST_BEG__) > -1 or line.find(__TAG_PSAE_BEGIN__) > -1 #or line.find(__TAG_PA_BEGIN__) > -1 or line.find(__TAG_PG_BEGIN__) > -1
if begin:
event_map = line.replace(__TAG_EVENT_CURNEX_ST_BEG__, '').replace('<<<', '').replace('>>>', '').replace('\t', '').replace('\n', '').replace(" ","")
state_action_map = line.replace(__TAG_PSAE_BEGIN__, '').replace('<<<', '').replace('>>>', '').replace('\t', '').replace('\n', '').replace(" ","")
end_event = (line.find(__TAG_EVENT_CURNEX_ST_END__) > -1)
end_state = (line.find(__TAG_PSAE_END__) > -1)
ex_state = line.find(__TAG_PSAE_BEGIN__) > -1 or ex_state
ex_event = line.find(__TAG_EVENT_CURNEX_ST_BEG__) > -1 or ex_event
#ex_action = line.find(__TAG_PA_BEGIN__) > -1 or ex_action
#ex_guard = line.find(__TAG_PG_BEGIN__) > -1 or ex_guard
#if not ex_state and not ex_event and not ex_action and not ex_guard:
# alllinesexpanded.append(line.replace(__TAG_INIT_STATE__, smmodel.__getfirststate__()))
if ex_state and line.find(__TAG_PSAE_END__) > -1:
if state_action_map in smmodel.actionevents_per_state:
self.__innerexpand__thirdfiltering__eventactionsperstate(smmodel.actionevents_per_state[state_action_map], snippet_to_expand, alllinesexpanded)
snippet_to_expand = []
ex_state = False
if ex_event and line.find(__TAG_EVENT_CURNEX_ST_END__) > -1:
self.__innerexpand__thirdfiltering__eventtransitionsperstate(smmodel.event_transitions_per_state[event_map], snippet_to_expand, alllinesexpanded)
snippet_to_expand = []
ex_event = False
#if ex_action and line.find(__TAG_PA_END__) > -1:
# self.__innerexpand__thirdfiltering__(smmodel.actions, snippet_to_expand, alllinesexpanded)
# snippet_to_expand = []
# ex_action = False
#if ex_guard and line.find(__TAG_PG_END__) > -1:
# self.__innerexpand__thirdfiltering__(smmodel.guards, snippet_to_expand, alllinesexpanded)
# snippet_to_expand = []
# ex_guard = False
#if (ex_state or ex_event or ex_action or ex_guard) and not begin:
if (ex_event or ex_state) and not begin:
snippet_to_expand.append(line)
elif not begin and not end_event and not end_state: # Unlike the second pass, this needs to preserve what was done there...
alllinesexpanded.append(line)
cmmodel.filenames_to_lines[file] = alllinesexpanded
# END EMBEDDED SM SUPPORT.
''' Used for State Machine Generation
'''
def Generate(self, transitiontable, namespacenname, statemachinename, dclspc="", copyotherfiles = True):
print("*************************************")
print("******* SMGen ***********************")
print("*************************************")
print(" Output Dir : " + self.output_gen_file_dir)
print(" State Machine: " + statemachinename)
print(" Executing in : " + os.path.realpath(__file__))
print("*************************************")
sm = CTransitionTableModel(transitiontable, namespacenname, statemachinename, dclspc)
cm = self.__loadtemplates_firstfiltering__(sm)
self.__expand_secondfiltering__(sm, cm)
# EMBEDDED SM SUPPORT.
self.__expand_thirdfiltering__(sm, cm)
# END EMBEDDED SM SUPPORT.
# Preserve user tags.
self.__preserve_usertags_in_files__(cm)
'''
# Round-trip Code Preservation. Will load the code to preserve upon creation (if the output dir is not-empty/the same as the one in the compile path).
preservation = Preservative(self.output_gen_file_dir)
preservation.Emplace(cm.filenames_to_lines)
'''
# Write output to file.
self.__createoutput__(cm.filenames_to_lines)
# Copy non-autogenerated required files to output.
if isinstance(self.language, LanguageCPP) and copyotherfiles:
# Files...
files_to_copy = []
files_to_copy.append("allocator.h")
files_to_copy.append("allocator.cpp")
files_to_copy.append("basetypes.h")
files_to_copy.append("CMakeLists.txt")
files_to_copy.append("Fault.h")
files_to_copy.append("Fault.cpp")
files_to_copy.append("stl_allocator.h")
files_to_copy.append("thread_FreeRTOS.h")
files_to_copy.append("thread_FreeRTOS.cpp")
files_to_copy.append("threaded_dispatcher.h")
files_to_copy.append("threaded_dispatcher_FreeRTOS.h")
files_to_copy.append("threadsafe_queue.h")
files_to_copy.append("threadsafe_queue_FreeRTOS.h")
files_to_copy.append("waitcondition.h")
files_to_copy.append("waitcondition.cpp")
files_to_copy.append("xallocator.h")
files_to_copy.append("xallocator.cpp")
files_to_copy.append("xlist.h")
files_to_copy.append("xmap.h")
files_to_copy.append("xqueue.h")
files_to_copy.append("xset.h")
files_to_copy.append("xsstream.h")
files_to_copy.append("xstring.h")
allplatformsfrom = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.path.join("allplatforms", "CPP"))
allplatformsto = os.path.join(os.path.abspath(self.output_gen_file_dir), "allplatforms")
FileCopyUtil(allplatformsfrom, allplatformsto, files_to_copy)
# Boost SML ...
smlfrom = os.path.join(allplatformsfrom, os.path.join("sml", os.path.join("include","boost")))
smlto = os.path.join(allplatformsto, "boost")
smlfiles_to_copy = []
smlfiles_to_copy.append("sml.hpp")
FileCopyUtil(smlfrom, smlto, smlfiles_to_copy)
# Tests...
testfiles_to_copy = []
testfiles_to_copy.append("CMakeLists.txt")
testfiles_to_copy.append("Test.ThreadingConcepts.cpp")
testfiles_to_copy.append("test_main.cpp")
tests_allplatformsfrom = os.path.join(allplatformsfrom, "testsuite")
tests_allplatformsto = os.path.join(allplatformsto, "testsuite")
FileCopyUtil(tests_allplatformsfrom, tests_allplatformsto, testfiles_to_copy)
# Micro Unit Test Framework
microunit_files_to_copy = []
microunit_files_to_copy.append("minunit.h")
microunit_files_to_copy.append("minunit.cpp")
microunit_allplatformsfrom = os.path.join(tests_allplatformsfrom, "minunit")
microunit_allplatformsto = os.path.join(tests_allplatformsto, "minunit")
FileCopyUtil(microunit_allplatformsfrom, microunit_allplatformsto, microunit_files_to_copy)
''' Used for Protocol Generation
'''
def GenerateProtocol(self, pythoninterfacegeneratorfilename, namespacenname, classname, dclspc="", preserve_dir=""):
sm = CTransitionTableModel([], namespacenname, classname, dclspc)
sm.pythoninterfacegeneratorfilename = pythoninterfacegeneratorfilename
cm = self.__loadtemplates_firstfiltering__(sm)
self.__expand_secondfiltering__(sm, cm)
# Round-trip Code Preservation. Will load the code to preserve upon creation (if the output dir is not-empty/the same as the one in the compile path).
# TCP gen might have a different output directory (typically COG will put files into an intermediate dir, and them copy them elsewhere
preservation = None
if preserve_dir == "":
preservation = Preservative(self.output_gen_file_dir)
else:
preservation = Preservative(preserve_dir)
preservation.Emplace(cm.filenames_to_lines)
# Write output to file.
self.__createoutput__(cm.filenames_to_lines)
# return the filenames
filenames = []
for filename in cm.filenames_to_lines.keys():
filenames.append(filename)
return filenames
| 1.484375
| 1
|
roku/discovery.py
|
metagrapher/python-roku
| 0
|
12027
|
<reponame>metagrapher/python-roku<filename>roku/discovery.py
"""
Code adapted from <NAME>.
https://gist.github.com/dankrause/6000248
http://github.com/dankrause
"""
import socket
from http.client import HTTPResponse
from io import BytesIO
ST_DIAL = 'urn:dial-multiscreen-org:service:dial:1'
ST_ECP = 'roku:ecp'
class _FakeSocket(BytesIO):
def makefile(self, *args, **kw):
return self
class SSDPResponse(object):
def __init__(self, response):
self.location = response.getheader('location')
self.usn = response.getheader('usn')
self.st = response.getheader('st')
self.cache = response.getheader('cache-control').split('=')[1]
def __repr__(self):
return '<SSDPResponse({location}, {st}, {usn})'.format(**self.__dict__)
def discover(timeout=2, retries=1, st=ST_ECP):
group = ('192.168.127.12', 1900)
message = '\r\n'.join([
'M-SEARCH * HTTP/1.1',
'HOST: {0}:{1}'.format(*group),
'MAN: "ssdp:discover"',
'ST: {st}', 'MX: 3', '', ''])
socket.setdefaulttimeout(timeout)
responses = {}
for _ in range(retries):
sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
m = message.format(st=st)
sock.sendto(m.encode(), group)
while 1:
try:
rhttp = HTTPResponse(_FakeSocket(sock.recv(1024)))
rhttp.begin()
if rhttp.status == 200:
rssdp = SSDPResponse(rhttp)
responses[rssdp.location] = rssdp
except socket.timeout:
break
return responses.values()
| 2.484375
| 2
|
Approxilyzer/gem5/scripts/relyzer/run_gem5_gl.py
|
cornell-zhang/GLAIVE
| 10
|
12028
|
<reponame>cornell-zhang/GLAIVE<filename>Approxilyzer/gem5/scripts/relyzer/run_gem5_gl.py
import os, sys
from argparse import ArgumentParser
from datetime import datetime as dt
from pprint import pprint as pp
import shutil, glob
#from pyfiglet import figlet_format, Figlet
import datetime
'''
python run_gem5_gl.py -a radix -l inst
python run_gem5_gl.py -a radix -l bit
'''
def app(args):
if not args:
return []
else:
return args.split(',')
parser = ArgumentParser()
parser.add_argument('-a', "--apps", help='Target application names seperated by comma', \
dest='targetapp', required=True)
parser.add_argument('-l', "--info_level", help='Target application architecture', \
dest='info_level', default='bit')
args = parser.parse_args()
apps = app(args.targetapp)
level = args.info_level
#num = args.num_progs
src_dir = os.environ.get('GRAPHLEARN')
gem5_dir= os.environ.get('APPROXGEM5') + '/gem5/scripts/relyzer/'
dest_dir = os.environ.get('APPROXGEM5') + '/workloads/x86/apps/'
for app in apps:
app1 = app + '_' + level
os.chdir(gem5_dir)
if level == 'bit':
# cp result from src to dest
gl_src_file = src_dir + 'sdc_output' +'/' + app1 + '_post.txt'
gl_dest_file = dest_dir + app +'/' + app1 + '_post.txt'
cmd = 'cp ' + gl_src_file + ' ' + gl_dest_file
status = os.system(cmd)
if status != 0:
print('cp data in gl failure ' + app1)
exit(-1)
bit_rf_src_file = src_dir + 'sdc_output_ml_bit' +'/' + app1 + '_post_rf.txt'
bit_rf_dest_file = dest_dir + app +'/' + app1 + '_post_rf.txt'
cmd = 'cp ' + bit_rf_src_file + ' ' + bit_rf_dest_file
status = os.system(cmd)
if status != 0:
print('cp data in rf_bit faigem5_dirlure ' + app1)
exit(-1)
bit_mlpc_src_file = src_dir + 'sdc_output_ml_bit' +'/' + app1 + '_post_mlpc.txt'
bit_mlpc_dest_file = dest_dir + app +'/' + app1 + '_post_mlpc.txt'
cmd = 'cp ' + bit_mlpc_src_file + ' ' + bit_mlpc_dest_file
status = os.system(cmd)
if status != 0:
print('cp data in mlpc_bit failure ' + app1)
exit(-1)
#call sdc_comp
print('this is for %s comp_sdc under graph learning ' % app)
cmd = 'python comp_sdc.py ' + app + ' ' + 'x86' + ' ' + 'gl'
status = os.system(cmd)
if status != 0:
print('sdc comp in gl_bit failure ' + app1)
exit(-1)
print('this is for %s comp_sdc under random forest learning ' % app)
cmd = 'python comp_sdc.py ' + app + ' ' + 'x86' + ' ' + 'rf'
status = os.system(cmd)
if status != 0:
print('sdc comp in rf_bit failure ' + app1)
exit(-1)
print('this is for %s comp_sdc under MLP learning ' % app)
cmd = 'python comp_sdc.py ' + app + ' ' + 'x86' + ' ' + 'mlpc'
status = os.system(cmd)
if status != 0:
print('sdc comp in mlpc_bit failure ' + app1)
exit(-1)
# call coverage_comp
log_file = src_dir + 'glog/' + app + '.log'
cmd = 'python sdc_coverage.py ' + app + ' ' + '5' + ' ' + '105' + ' > ' + log_file
status = os.system(cmd)
if status != 0:
print('coverage comp for all methods failure ' + app)
exit(-1)
elif level == 'inst':
inst_rf_src_file = src_dir + 'sdc_output_classic' +'/' + app1 + '_rf.sdclist'
inst_rf_dest_file = dest_dir + app +'/' + app1 + '_rf.sdclist'
cmd = 'cp ' + inst_rf_src_file + ' ' + inst_rf_dest_file
status = os.system(cmd)
if status != 0:
print('cp data in inst_rf failure ' + app1)
exit(-1)
inst_svm_src_file = src_dir + 'sdc_output_classic' +'/' + app1 + '_svm.sdclist'
inst_svm_dest_file = dest_dir + app +'/' + app1 + '_svm.sdclist'
cmd = 'cp ' + inst_svm_src_file + ' ' + inst_svm_dest_file
status = os.system(cmd)
if status != 0:
print('cp data in inst_svm failure ' + app1)
exit(-1)
| 2.15625
| 2
|
onnxsim/__init__.py
|
Wheest/onnx-simplifier
| 1,977
|
12029
|
<gh_stars>1000+
from onnxsim.onnx_simplifier import simplify
__version__ = '0.0.0'
| 1.054688
| 1
|
src/commands/__init__.py
|
lysol/lvlss
| 0
|
12030
|
<gh_stars>0
from quit import Quit
from set_name import SetName
from who import Who
from say import Say
from look import Look
from go import Go
from take import Take
from inventory import Inventory
from drop import Drop
from make import Make
from landfill import Landfill
from item_info import ItemInfo
from script import SetScript, GetScript
from image_editing import ImageEditing
all_commands = (Quit, SetName, Who, Say, Look,
Go, Take, Inventory, Drop, Make, Landfill,
SetScript, GetScript, ItemInfo, ImageEditing)
| 1.765625
| 2
|
Softmax.py
|
tranbamanh229289/Machine-and-Data-mining-
| 0
|
12031
|
<filename>Softmax.py
import Common
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
RATIO = 0.8
EPOCHS = 500
LEARN_RATE = 0.01
INDENTIFICATION_RATE = 0.6
# Read training data
X_train, Y_train, X_test, Y_test,scale_train,scale_test = Common.process(RATIO)
def preprocessing (X_train,Y_train ,X_test ,Y_test):
X_train = np.concatenate((X_train, np.ones((X_train.shape[0], 1))), axis=1)
X_test = np.concatenate((X_test, np.ones((X_test.shape[0], 1))), axis=1)
Y_train = Common.onehot(Y_train)
Y_test = Common.onehot(Y_test)
return X_train,Y_train,X_test,Y_test
X_train, Y_train, X_test, Y_test =preprocessing(X_train,Y_train ,X_test ,Y_test)
def softmax(Z):
Z = np.array(Z, dtype=np.float32)
return (np.exp(Z) / np.sum(np.exp(Z), axis=1).reshape(Z.shape[0], 1))
# Cross Entropy
def cost(X, Y, W):
Z = np.array(np.dot(X, W), dtype=np.float32)
return -np.sum(Y * np.log(softmax(Z)))
def gradient(Y, X, W, learningrate, k):
loss = []
for i in range(k):
Z = np.array(np.dot(X, W), dtype=np.float32)
delta = np.dot((Y - softmax(Z)).T, X).T
W = W + learningrate * delta
loss.append(cost(X, Y, W))
return W, loss
W = np.zeros((5, 3))
W, loss = gradient(Y_train, X_train, W, LEARN_RATE, EPOCHS)
def accuracy(W, X_test, Y_test, ratio):
Y_predict = softmax(np.dot(X_test, W))
Y_predict[np.where(Y_predict > ratio)] = 1
Y_predict[np.where(Y_predict < ratio)] = 0
result = np.sum(np.abs(Y_test - Y_predict), axis=1)
count = 0
for i in result:
if (i != 0):
count = count + 1
N = Y_test.shape[0]
acc = (N - count) / N
return acc, Y_predict
acc, Y_predict = accuracy(W, X_test ,Y_test, INDENTIFICATION_RATE)
def graph_cost(loss, EPOCHS):
plt.title("Loss", size=20)
plt.xlabel('$epochs$', size=20)
plt.ylabel('$error$', size=20)
plt.plot(np.arange(EPOCHS), loss)
plt.show()
X_train=Common.inverse(scale_train ,X_train[:,:-1])
X_test=Common.inverse(scale_test,X_test[:,:-1])
graph_cost(loss, EPOCHS)
Common.graph_accuracy(X_test, Y_test, Y_predict)
print("Accuracy :")
print(acc * 100, "%")
| 2.9375
| 3
|
Wallpaper change.py
|
Arbazkhan4712/Wallpaper-Changer-using-Python
| 4
|
12032
|
<filename>Wallpaper change.py
import ctypes
import os
import time
from pynput.keyboard import Key,Controller
import Bing
def closeTerminal():
keyboard=Controller()
keyboard.press(Key.alt)
keyboard.press(Key.f4)
keyboard.release(Key.alt)
keyboard.release(Key.f4)
def changeWallpaper(image_path):
start=time.time()
end=time.time()
while True:
for dirname,dirnames,filenames in os.walk(image_path):
for file_name in filenames:
if (end-start)//3600 > 6:
try:
Bing.wallpaper_of_the_day(image_path)
start=time.time()
except:
pass
if file_name.endswith('.png') or file_name.endswith('.jpg'):
image=os.path.join(image_path,dirname,file_name)
SPI_SETDESKTOPWALLPAPER=20
ctypes.windll.user32.SystemParametersInfoW(SPI_SETDESKTOPWALLPAPER,0,image,3)
time.sleep(30)
end=time.time()
def main():
closeTerminal()
#configure own folder
image_path = r'D:\Wallpapers'
try:
os.makedirs(image_path)
except:
pass
try:
Bing.wallpaper_of_the_day(image_path)
except:
pass
changeWallpaper(image_path)
if __name__=='__main__':
main()
| 3.015625
| 3
|
tests/sensitivity/sf2/sf2_test.py
|
vic-c137/mpi-boids-simulation
| 0
|
12033
|
<gh_stars>0
# Import statements
import subprocess
from os import system
# Variable declarations
np = "10"
cexe = "./Boids"
nboids = "50"
nloops = "500"
k = "7"
maxv = "10"
acc = "1.25"
width = "1000"
height = "1000"
sf1 = "1"
sf2 = "32"
min = "50"
sf3 = "8"
sf4 = "10"
dataPath = "./data/"
jexe = "BoidModelTest"
bdata = "boid_data.boid"
# Test calls
collection = [0.125, 0.25, 0.5, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 1048576]
for i in collection:
print "Running test %s" % (str(i))
boidData = "run"+str(i)+".boid"
gif = "run"+str(i)+".gif"
sf2 = str(i)
subprocess.call("mpirun -np " + np +" "+ cexe +" "+ nboids +" "+ nloops +" "+ k +" "+ maxv +" "+ acc +" "+ width +" "+ height +" "+ sf1 +" "+ sf2 +" "+ min +" "+ sf3 +" "+ sf4 + " > " + dataPath + boidData, shell=True)
subprocess.call("java " + jexe + " " + gif + " " + boidData, shell=True)
system('gnuplot ./data/boid_script.gp')
| 1.921875
| 2
|
flask_start/flask_start/public/email.py
|
kostekci/flask_start
| 0
|
12034
|
from flask_mail import Message
from flask import render_template
from flask_start.extensions import mail
'''
from threading import Thread
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
'''
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
mail.send(msg)
#Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('Reset Your Password',
sender='<EMAIL>',
recipients=[user.email],
text_body=render_template('public/reset_password_mail.txt',
user=user, token=token),
html_body=render_template('public/reset_password_mail.html',
user=user, token=token))
| 2.65625
| 3
|
demo/trace_model.py
|
furkankirac/maskrcnn-benchmark
| 0
|
12035
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from __future__ import division
import os
import numpy
from io import BytesIO
from matplotlib import pyplot
import requests
import torch
from PIL import Image
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
from maskrcnn_benchmark.structures.image_list import ImageList
if __name__ == "__main__":
# load config from file and command-line arguments
project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
cfg.merge_from_file(
os.path.join(project_dir,
"configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml"))
cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
cfg.freeze()
# prepare object that handles inference plus adds predictions on top of image
coco_demo = COCODemo(
cfg,
confidence_threshold=0.7,
show_mask_heatmaps=False,
masks_per_dim=2,
min_image_size=480,
)
def single_image_to_top_predictions(image):
image = image.float() / 255.0
image = image.permute(2, 0, 1)
# we are loading images with OpenCV, so we don't need to convert them
# to BGR, they are already! So all we need to do is to normalize
# by 255 if we want to convert to BGR255 format, or flip the channels
# if we want it to be in RGB in [0-1] range.
if cfg.INPUT.TO_BGR255:
image = image * 255
else:
image = image[[2, 1, 0]]
# we absolutely want fixed size (int) here (or we run into a tracing error (or bug?)
# or we might later decide to make things work with variable size...
image = image - torch.tensor(cfg.INPUT.PIXEL_MEAN)[:, None, None]
# should also do variance...
image_list = ImageList(image.unsqueeze(0), [(int(image.size(-2)), int(image.size(-1)))])
result, = coco_demo.model(image_list)
scores = result.get_field("scores")
keep = (scores >= coco_demo.confidence_threshold)
result = (result.bbox[keep],
result.get_field("labels")[keep],
result.get_field("mask")[keep],
scores[keep])
return result
@torch.jit.script
def my_paste_mask(mask, bbox, height, width, threshold=0.5, padding=1, contour=True, rectangle=False):
# type: (Tensor, Tensor, int, int, float, int, bool, bool) -> Tensor
padded_mask = torch.constant_pad_nd(mask, (padding, padding, padding, padding))
scale = 1.0 + 2.0 * float(padding) / float(mask.size(-1))
center_x = (bbox[2] + bbox[0]) * 0.5
center_y = (bbox[3] + bbox[1]) * 0.5
w_2 = (bbox[2] - bbox[0]) * 0.5 * scale
h_2 = (bbox[3] - bbox[1]) * 0.5 * scale # should have two scales?
bbox_scaled = torch.stack([center_x - w_2, center_y - h_2,
center_x + w_2, center_y + h_2], 0)
TO_REMOVE = 1
w = (bbox_scaled[2] - bbox_scaled[0] + TO_REMOVE).clamp(min=1).long()
h = (bbox_scaled[3] - bbox_scaled[1] + TO_REMOVE).clamp(min=1).long()
scaled_mask = torch.ops.maskrcnn_benchmark.upsample_bilinear(padded_mask.float(), h, w)
x0 = bbox_scaled[0].long()
y0 = bbox_scaled[1].long()
x = x0.clamp(min=0)
y = y0.clamp(min=0)
leftcrop = x - x0
topcrop = y - y0
w = torch.min(w - leftcrop, width - x)
h = torch.min(h - topcrop, height - y)
# mask = torch.zeros((height, width), dtype=torch.uint8)
# mask[y:y + h, x:x + w] = (scaled_mask[topcrop:topcrop + h, leftcrop:leftcrop + w] > threshold)
mask = torch.constant_pad_nd((scaled_mask[topcrop:topcrop + h, leftcrop:leftcrop + w] > threshold),
(int(x), int(width - x - w), int(y), int(height - y - h))) # int for the script compiler
if contour:
mask = mask.float()
# poor person's contour finding by comparing to smoothed
mask = (mask - torch.nn.functional.conv2d(mask.unsqueeze(0).unsqueeze(0),
torch.full((1, 1, 3, 3), 1.0 / 9.0), padding=1)[0, 0]).abs() > 0.001
if rectangle:
x = torch.arange(width, dtype=torch.long).unsqueeze(0)
y = torch.arange(height, dtype=torch.long).unsqueeze(1)
r = bbox.long()
# work around script not liking bitwise ops
rectangle_mask = ((((x == r[0]) + (x == r[2])) * (y >= r[1]) * (y <= r[3]))
+ (((y == r[1]) + (y == r[3])) * (x >= r[0]) * (x <= r[2])))
mask = (mask + rectangle_mask).clamp(max=1)
return mask
@torch.jit.script
def add_annotations(image, labels, scores, bboxes, class_names=','.join(coco_demo.CATEGORIES), color=torch.tensor([255, 255, 255], dtype=torch.long)):
# type: (Tensor, Tensor, Tensor, Tensor, str, Tensor) -> Tensor
result_image = torch.ops.maskrcnn_benchmark.add_annotations(image, labels, scores, bboxes, class_names, color)
return result_image
@torch.jit.script
def combine_masks(image, labels, masks, scores, bboxes, threshold=0.5, padding=1, contour=True, rectangle=False, palette=torch.tensor([33554431, 32767, 2097151])):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, float, int, bool, bool, Tensor) -> Tensor
height = image.size(0)
width = image.size(1)
image_with_mask = image.clone()
for i in range(masks.size(0)):
color = ((palette * labels[i]) % 255).to(torch.uint8)
one_mask = my_paste_mask(masks[i, 0], bboxes[i], height, width, threshold, padding, contour, rectangle)
image_with_mask = torch.where(one_mask.unsqueeze(-1), color.unsqueeze(0).unsqueeze(0), image_with_mask)
image_with_mask = add_annotations(image_with_mask, labels, scores, bboxes)
return image_with_mask
def process_image_with_traced_model(image):
original_image = image
if coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY:
assert (image.size(0) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0
and image.size(1) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0)
boxes, labels, masks, scores = traced_model(image)
# todo: make this in one large thing
result_image = combine_masks(original_image, labels, masks, scores, boxes, 0.5, 1, rectangle=True)
return result_image
def fetch_image(url):
response = requests.get(url)
return Image.open(BytesIO(response.content)).convert("RGB")
if __name__ == "__main__":
pil_image = fetch_image(
url="http://farm3.staticflickr.com/2469/3915380994_2e611b1779_z.jpg")
# convert to BGR format
image = torch.from_numpy(numpy.array(pil_image)[:, :, [2, 1, 0]])
original_image = image
if coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY:
assert (image.size(0) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0
and image.size(1) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0)
for p in coco_demo.model.parameters():
p.requires_grad_(False)
traced_model = torch.jit.trace(single_image_to_top_predictions, (image,))
@torch.jit.script
def end_to_end_model(image):
boxes, labels, masks, scores = traced_model(image)
result_image = combine_masks(image, labels, masks, scores, boxes, 0.5, 1, rectangle=True)
return result_image
end_to_end_model.save('end_to_end_model.pt')
result_image = process_image_with_traced_model(original_image)
# self.show_mask_heatmaps not done
pyplot.imshow(result_image[:, :, [2, 1, 0]])
pyplot.show()
# second image
image2 = fetch_image(
url='http://farm4.staticflickr.com/3153/2970773875_164f0c0b83_z.jpg')
image2 = image2.resize((640, 480), Image.BILINEAR)
image2 = torch.from_numpy(numpy.array(image2)[:, :, [2, 1, 0]])
result_image2 = process_image_with_traced_model(image2)
# self.show_mask_heatmaps not done
pyplot.imshow(result_image2[:, :, [2, 1, 0]])
pyplot.show()
| 2.25
| 2
|
validation_tests/analytical_exact/river_at_rest_varying_topo_width/numerical_varying_width.py
|
samcom12/anuga_core
| 136
|
12036
|
<filename>validation_tests/analytical_exact/river_at_rest_varying_topo_width/numerical_varying_width.py
"""Simple water flow example using ANUGA
Water driven up a linear slope and time varying boundary,
similar to a beach environment
"""
#------------------------------------------------------------------------------
# Import necessary modules
#------------------------------------------------------------------------------
import sys
import anuga
from anuga import myid, finalize, distribute
from anuga import Domain as Domain
from math import cos
from numpy import zeros, ones, array, interp, polyval, ones_like, zeros_like
from numpy import where, logical_and
from time import localtime, strftime, gmtime
from scipy.interpolate import interp1d
from anuga.geometry.polygon import inside_polygon, is_inside_triangle
#from balanced_dev import *
#-------------------------------------------------------------------------------
# Copy scripts to time stamped output directory and capture screen
# output to file
#-------------------------------------------------------------------------------
time = strftime('%Y%m%d_%H%M%S',localtime())
#output_dir = 'varying_width'+time
output_dir = '.'
output_file = 'varying_width'
#anuga.copy_code_files(output_dir,__file__)
#start_screen_catcher(output_dir+'_')
args = anuga.get_args()
alg = args.alg
verbose = args.verbose
#------------------------------------------------------------------------------
# Setup domain
#------------------------------------------------------------------------------
dx = 1.
dy = dx
L = 1500.
W = 60.
#===============================================================================
# Create sequential domain
#===============================================================================
if myid == 0:
# structured mesh
points, vertices, boundary = anuga.rectangular_cross(int(L/dx), int(W/dy), L, W, (0.,-W/2.))
#domain = anuga.Domain(points, vertices, boundary)
domain = Domain(points, vertices, boundary)
domain.set_name(output_file)
domain.set_datadir(output_dir)
#------------------------------------------------------------------------------
# Setup Algorithm, either using command line arguments
# or override manually yourself
#------------------------------------------------------------------------------
domain.set_flow_algorithm(alg)
#------------------------------------------------------------------------------
# Setup initial conditions
#------------------------------------------------------------------------------
domain.set_quantity('friction', 0.0)
domain.set_quantity('stage', 12.0)
XX = array([0.,50.,100.,150.,250.,300.,350.,400.,425.,435.,450.,470.,475.,500.,
505.,530.,550.,565.,575.,600.,650.,700.,750.,800.,820.,900.,950.,
1000.,1500.])
ZZ = array([0.,0.,2.5,5.,5.,3.,5.,5.,7.5,8.,9.,9.,9.,9.1,9.,9.,6.,5.5,5.5,5.,
4.,3.,3.,2.3,2.,1.2,0.4,0.,0.])
WW = array([40.,40.,30.,30.,30.,30.,25.,25.,30.,35.,35.,40.,40.,40.,45.,45.,50.,
45.,40.,40.,30.,40.,40.,5.,40.,35.,25.,40.,40.])/2.
depth = interp1d(XX, ZZ)
width = interp1d(XX, WW)
def bed_elevation(x,y):
z = 25.0*ones_like(x)
wid = width(x)
dep = depth(x)
z = where( logical_and(y < wid, y>-wid), dep, z)
return z
domain.set_quantity('elevation', bed_elevation)
else:
domain = None
#===========================================================================
# Create Parallel domain
#===========================================================================
domain = distribute(domain)
#-----------------------------------------------------------------------------
# Setup boundary conditions
#------------------------------------------------------------------------------
from math import sin, pi, exp
Br = anuga.Reflective_boundary(domain) # Solid reflective wall
#Bt = anuga.Transmissive_boundary(domain) # Continue all values on boundary
#Bd = anuga.Dirichlet_boundary([1,0.,0.]) # Constant boundary values
# Associate boundary tags with boundary objects
domain.set_boundary({'left': Br, 'right': Br, 'top': Br, 'bottom': Br})
#------------------------------------------------------------------------------
# Produce a documentation of parameters
#------------------------------------------------------------------------------
if myid == 0:
parameter_file=open('parameters.tex', 'w')
parameter_file.write('\\begin{verbatim}\n')
from pprint import pprint
pprint(domain.get_algorithm_parameters(),parameter_file,indent=4)
parameter_file.write('\\end{verbatim}\n')
parameter_file.close()
#------------------------------------------------------------------------------
# Evolve system through time
#------------------------------------------------------------------------------
import time
t0 = time.time()
for t in domain.evolve(yieldstep = 0.1, finaltime = 5.0):
#print(domain.timestepping_statistics(track_speeds=True))
if myid == 0 and verbose: print(domain.timestepping_statistics())
#vis.update()
if myid == 0 and verbose: print('That took %s sec' % str(time.time()-t0))
domain.sww_merge(delete_old=True)
finalize()
| 2.09375
| 2
|
src/temperature/urls.py
|
JohanGovers/home-mon-server
| 0
|
12037
|
from django.conf.urls import patterns, url
from temperature import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^save_temp_reading$', views.save_temp_reading, name='save_temp_reading'),
)
| 1.703125
| 2
|
FictionTools/amitools/amitools/binfmt/elf/BinFmtELF.py
|
polluks/Puddle-BuildTools
| 38
|
12038
|
from amitools.binfmt.BinImage import *
from .ELFFile import *
from .ELF import *
from .ELFReader import ELFReader
from .DwarfDebugLine import DwarfDebugLine
class BinFmtELF:
"""Handle Amiga m68k binaries in ELF format (usually from AROS)"""
def is_image(self, path):
"""check if a given file is a supported ELF file"""
with open(path, "rb") as f:
return self.is_image_fobj(f)
def is_image_fobj(self, fobj):
"""check if a given fobj is a supported ELF file"""
try:
pos = fobj.tell()
# read identifier
ident = ELFIdentifier()
ident_data = fobj.read(16)
ident.parse(ident_data)
# read header
hdr = ELFHeader()
hdr_data = fobj.read(36)
hdr.parse(hdr_data)
# seek back
fobj.seek(pos, 0)
# check header
return self.is_supported_elf(ident, hdr)
except ELFParseError:
return False
def is_supported_elf(self, ident, hdr):
"""check ELF header if its a m68k binary"""
if hdr.machine != EM_68K:
return False
if ident.osabi not in (ELFOSABI_SYSV, ELFOSABI_AROS):
return False
return True
def load_image(self, path):
"""load a BinImage from an ELF file given via path"""
with open(path, "rb") as f:
return self.load_image_fobj(f)
def load_image_fobj(self, fobj):
"""load a BinImage from an ELF file given via file object"""
# read elf file
reader = ELFReader()
elf = reader.load(fobj)
# create bin image and assign elf file
bi = BinImage(BIN_IMAGE_TYPE_ELF)
bi.set_file_data(elf)
# walk through elf sections
sect_to_seg = {}
for sect in elf.sections:
# determine segment type
seg_type = None
name = sect.name_str
flags = 0
if name == b".text":
seg_type = SEGMENT_TYPE_CODE
elif name == b".data":
seg_type = SEGMENT_TYPE_DATA
elif name == b".rodata":
seg_type = SEGMENT_TYPE_DATA
flags = SEGMENT_FLAG_READ_ONLY
elif name == b".bss":
seg_type = SEGMENT_TYPE_BSS
# we got a segment
if seg_type is not None:
size = sect.header.size
data = sect.data
seg = Segment(seg_type, size, data, flags)
bi.add_segment(seg)
# assign section to segment
seg.set_file_data(sect)
sect_to_seg[sect] = seg
# now run through segments to add relocations
bi_segs = bi.get_segments()
for seg in bi_segs:
# retrieve associated ELF section
sect = seg.get_file_data()
# any relocations?
rela = sect.get_rela()
num_rela = len(rela)
if num_rela > 0:
self.add_elf_rela(sect, seg, sect_to_seg)
# any symbols?
symbols = sect.get_symbols()
num_syms = len(symbols)
if num_syms > 0:
self.add_elf_symbols(symbols, seg)
# try to add debug info
ddl = DwarfDebugLine()
got = ddl.decode(elf)
if got:
self.add_debug_line(ddl, bi, sect_to_seg)
return bi
def add_elf_rela(self, sect, seg, sect_to_seg):
for tgt_sect in sect.get_rela_sections():
# is this a relocation to a used section?
if tgt_sect in sect_to_seg:
to_seg = sect_to_seg[tgt_sect]
rl = Relocations(to_seg)
seg.add_reloc(to_seg, rl)
# add relocations
for rel in sect.get_rela_by_section(tgt_sect):
r = Reloc(rel.offset, addend=rel.section_addend)
rl.add_reloc(r)
def add_elf_symbols(self, symbols, seg):
symtab = SymbolTable()
seg.set_symtab(symtab)
for sym in symbols:
# add entry
off = sym.value
name = sym.name_str
file_sym = sym.file_sym
if file_sym is not None:
file_name = file_sym.name_str
else:
file_name = None
symbol = Symbol(off, name, file_name)
symtab.add_symbol(symbol)
def add_debug_line(self, ddl, bi, sect_to_seg):
seg_to_dl = {}
matrix = ddl.get_matrix()
for row in matrix:
sect = row.section
if sect in sect_to_seg:
segment = sect_to_seg[sect]
# fetch debug info
if segment in seg_to_dl:
dl, file_to_df = seg_to_dl[segment]
else:
dl = DebugLine()
file_to_df = {}
segment.set_debug_line(dl)
seg_to_dl[segment] = (dl, file_to_df)
# fetch file instance
fid = row.file
if fid in file_to_df:
df = file_to_df[fid]
else:
df = DebugLineFile(ddl.get_file_name(fid), ddl.get_file_dir(fid))
dl.add_file(df)
file_to_df[fid] = df
# add entry
e = DebugLineEntry(row.address, row.line)
df.add_entry(e)
# mini test
if __name__ == "__main__":
import sys
bf = BinFmtELF()
for a in sys.argv[1:]:
if bf.is_image(a):
print("loading", a)
bi = bf.load_image(a)
print(bi)
else:
print("NO ELF:", a)
| 2.53125
| 3
|
recordtransform.py
|
Andresfgomez970/Managing-.wav-files-in-python
| 0
|
12039
|
<gh_stars>0
import pyaudio
import wave
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pylab as plt
from scipy.io import wavfile
import cmath as cm
from scipy.fftpack import fft
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from scipy.io.wavfile import write
from scipy import signal
def recordaudio(chunk,formato,Channels,Rate,Record_seconds,
wave_output_name):
'''Record and audio and get it as wave output.
chunk:
formato:
Channels:
Rate:
Record_seconds:
wave_output_name:
'''
p=pyaudio.PyAudio()
stream=p.open(format=formato,
channels=Channels,
rate=Rate,
input=True,
frames_per_buffer=chunk)
print("Recording..")
frames=[]
for i in range(0,int(Rate/chunk*Record_seconds)):
data=stream.read(chunk)
frames.append(data)
print("Done recording.")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(wave_output_name, 'wb')
wf.setnchannels(Channels)
wf.setsampwidth(p.get_sample_size(formato))
wf.setframerate(Rate)
wf.writeframes(b''.join(frames))
wf.close()
def generatetones(secondpertone,steptone,littletone,maxtone,rate,name):
t = np.linspace(0,secondpertone,rate*secondpertone)
lista = []
for i in range(littletone,maxtone+steptone,steptone):
data = np.sin(t*i*2*np.pi)
lista += list(data)
scaled = np.int16(lista/np.max(np.abs(lista)) * 32000)
write(name, rate, scaled )
def getsignal(wave_output_name,Record_seconds):
fs, data = wavfile.read(wave_output_name)
#Necessary parameters to the fourier transform
try:
tamdata = data[:,0].size
except:
tamdata = data.size
dt = Record_seconds*1./tamdata
t = np.arange(0,Record_seconds-dt/2,dt)
try:
return t,data[:,0],dt
except:
return t,data,dt
def Fourier1(time, data,dt):
dataft = np.fft.fftshift(np.fft.fft(np.fft.fftshift(data)))*dt
freq = np.arange(-1/(2*dt),1/(2*dt)-1/(2*dt*time.size),1/(dt*time.size))
return freq,dataft
def dft(f,w,t,sign):
if type(f)==type(w):
F = f
else:
F = f(t)
DFT = []
for j in w:
r2 = 0
for i in range(len(t)):
r2 += F[i]*np.exp(1j*j*t[i]*sign)
DFT.append( (t[-1]-t[-2]) *r2)
return t,np.array(DFT)
def plotfourier(freq,dataft,fi,ff,norm):
if norm=='yes':
plt.plot(freq,abs(dataft)/abs(dataft).sum(),'b',linewidth='5')
plt.title('Normalized spectrum of frequencies',fontsize=25)
else:
plt.plot(freq,abs(dataft),'b',linewidth='5')
plt.title('Spectrum of frquencies',fontsize=25)
plt.xlim(fi,ff)
plt.ylabel('a.u.',fontsize=20)
plt.xlabel('Frecuencia (Hz)',fontsize=20)
plt.grid()
def recordtransform(chunk,formato,Channels,Rate,Record_seconds,wave_output_name,fi,ff,norm):
recordaudio(chunk,formato,Channels,Rate,Record_seconds,wave_output_name)
time, data,dt = getsignal(wave_output_name,Record_seconds)
freq,dataft = Fourier1(time, data,dt)
plotfourier(freq,dataft,fi,ff,norm)
plt.show()
def plotonlytransform(chunk,formato,Channels,Rate,Record_seconds,wave_output_name,fi,ff,norm):
time, data,dt = getsignal(wave_output_name,Record_seconds)
freq,dataft = Fourier1(time, data,dt)
plotfourier(freq,dataft,fi,ff,norm)
plt.show()
def zoomplotonlytransform(chunk,formato,Channels,Rate,Record_seconds,wave_output_name,fi,ff,norm):
time, data,dt = getsignal(wave_output_name,Record_seconds)
freq,dataft = Fourier1(time, data,dt)
plt.subplot(2,1,1)
plt.plot(freq,abs(dataft)/abs(dataft).sum(),'b',linewidth='5')
plt.title('Normalized spectrum of frequencies',fontsize=15)
plt.xlim(fi,ff)
plt.subplot(2,1,2)
plt.plot(freq,abs(dataft)/abs(dataft).sum(),'b',linewidth='5')
plt.title('Zoom to measured frequency',fontsize=15)
con1 = abs(dataft)==abs(dataft).max()
ft=abs(freq[con1])
ft = ft[0]
plt.xlim(ft-0.5,ft+0.5)
plt.ylabel('a.u.',fontsize=20)
plt.xlabel('Frecuencia (Hz)',fontsize=20)
plt.grid()
con1 = abs(dataft)==abs(dataft).max()
print ('Frequency found at maximum value: %.2f \n ' % (abs(freq[con1])) )
plt.show()
def comparing(chunk,formato,Channels,Rate,Record_seconds,wavename1,
wavename2,fi,ff,norm,tol):
time, data,dt = getsignal(wavename1,Record_seconds)
freq,dataft = Fourier1(time, data,dt)
time2, data2,dt = getsignal(wavename2,Record_seconds)
freq2,dataft2 = Fourier1(time2, data2,dt)
plt.figure(figsize=(20,10))
plt.subplot(2,2,1)
plt.plot(freq,abs(dataft)/abs(dataft).sum(),'b',linewidth='5')
plt.title('Normalized spectrum of frequencies',fontsize=15)
plt.xlim(fi,ff)
plt.ylabel('a.u.',fontsize=10)
plt.xlabel('Frecuencia (Hz)',fontsize=10)
plt.grid()
plt.subplot(2,2,2)
plt.plot(freq,abs(dataft)/abs(dataft).sum(),'b',linewidth='5')
plt.title('Zoom to measured frequency',fontsize=15)
con1 = abs(dataft)==abs(dataft).max()
ft1= abs(freq[con1])
plt.xlim(ft1-tol,ft1+tol)
plt.ylabel('a.u.',fontsize=10)
plt.xlabel('Frecuencia (Hz)',fontsize=10)
plt.grid()
plt.subplot(2,2,3)
plt.plot(freq2,abs(dataft2)/abs(dataft2).sum(),'b',linewidth='5')
plt.title('Normalized spectrum of frequencies',fontsize=15)
plt.xlim(fi,ff)
plt.ylabel('a.u.',fontsize=10)
plt.xlabel('Frecuencia (Hz)',fontsize=10)
plt.grid()
plt.subplot(2,2,4)
plt.plot(freq2,abs(dataft2)/abs(dataft2).sum(),'b',linewidth='5')
plt.title('Normalized spectrum of frequencies',fontsize=15)
con2 = abs(dataft2)==abs(dataft2).max()
ft2=abs(freq2[con2])
plt.xlim(ft2-tol,ft2+tol)
plt.ylabel('a.u.',fontsize=10)
plt.xlabel('Frecuencia (Hz)',fontsize=10)
plt.grid()
print ('The difference was of %.2f Hz' %(abs(ft1-ft2)) )
plt.show()
def f(wave_output_name,Record_seconds,time):
t,data,dt = getsignal(wave_output_name,Record_seconds)
datapersecond = len(data)/Record_seconds
freqtimes = []
dataft_times = []
times = []
for i in range(Record_seconds/time):
datai = data[i*time*datapersecond:(i+1)*time*datapersecond]
timei = t[i*time*datapersecond:(i+1)*time*datapersecond]
dataft = np.fft.fftshift(np.fft.fft(np.fft.fftshift(datai)))*dt
freq = np.arange(-1/(2*dt),1/(2*dt)-1/(2*dt*timei.size),1/(dt*timei.size))
freqtimes.append(freq)
dataft_times.append(dataft)
times.append( (i+1)*time )
fig = plt.figure()
ax = fig.gca(projection='3d')
# Make data.
X = times
Y = freqtimes
for i in range(len(times)):
#plt.plot(np.array([1,2]), np.array([1,2]), np.array([1,2]) ,'o')
plt.plot( np.ones(len(freqtimes[i]))*times[i] , freqtimes[i] , abs(dataft_times[i]))
ax.set_xlabel('Time')
ax.set_ylabel('Freq')
ax.set_zlabel('A.U.')
plt.show()
for i in range(1000,20000,1000):
plt.plot( i,freqtimes[i/1000].max() ,'ko')
plt.show()
for i in range(len(times)):
plt.plot(freqtimes[i], abs(dataft_times[i] ) )
plt.show()
def f(wave_output_name,Record_seconds,time,Rate):
tm = 1./Rate
a = time%tm
if a>=tm/2.:
time = time + (tm - time%tm)
else:
time = time - time%tm
t,data,dt = getsignal(wave_output_name,Record_seconds)
datapersecond = len(data)/Record_seconds
freqtimes = []
dataft_times = []
times = []
for i in range( int(Record_seconds/time) ):
s1 , s2 = int(i*time*datapersecond),int( (i+1)*time*datapersecond)
datai = data[s1:s2]
timei = t[s1:s2]
dataft = np.fft.fftshift(np.fft.fft(np.fft.fftshift(datai)))*dt
freq = np.arange(-1/(2*dt),1/(2*dt)-1/(2*dt*timei.size),1/(dt*timei.size))
freqtimes.append(freq)
dataft_times.append(dataft)
times.append( (i+1)*time )
fig = plt.figure()
ax = fig.gca(projection='3d')
# Make data.
X = times
Y = freqtimes
for i in range(len(times)):
plt.plot( np.ones(len(freqtimes[i]))*times[i] , freqtimes[i] , abs(dataft_times[i]))
ax.set_xlabel('Time')
ax.set_ylabel('Freq')
ax.set_zlabel('A.U.')
if __name__ == "__main__":
chunk=1024 #number of frames
formato=pyaudio.paInt16 #format of the record
Channels=2 #Number of channels to record (this alter data)
Rate=16040 #Number of frames per second
Record_seconds=38 #lenghth of the recording
wavename1="records/test1withegeneratednoise.wav" #output file name
fi,ff=0,20000
norm = 'yes'
wavename2 = "records/test1.wav"
### Example 1
print("\nThe transform of the file 'test1withegeneratednoise.wav' is \n shown:\n")
plotonlytransform(chunk,formato,Channels,Rate,Record_seconds,wavename1,fi,ff,norm)
### Example 2
print("\nThe transform of the file '3200.wav' is shown and also a \n zoom to the maximum value of the fourirer tranform:\n")
### This part measure a given frequency that is already in a wave format in the program; in
### addition a zoom is made to it with some tolerance
Rate=44100
Record_seconds=4.99
wavename2 = "records/3200.wav"
fi, ff = 0, 10000
zoomplotonlytransform(chunk,formato,Channels,Rate,Record_seconds,wavename2,fi,ff,norm)
### Example 3
### This part record with the computer microphone and after that
### show the fourier transform of the record
#You could change the paramters of the record that is going to be made
Record_seconds=5
wave_output_name = 'recorded.wav'
recordtransform(chunk,formato,Channels,Rate,Record_seconds,wave_output_name,fi,ff,norm)
### Example 4
###This part plot the transform of the two wave files and permits
### to compare the amplitues and the frequencies at the maximum
### amplitude
Record_seconds= 3.0
wavename1="records/1000.wav"
wavename2="records/1000t.wav"
ft = 3265
tol = 3
comparing(chunk,formato,Channels,Rate,Record_seconds,wavename1,
wavename2,fi,ff,norm,tol)
### Example 4
###This is basically the short fourier transform
### it is important to know that the algorithm
### chose as step time the nearer on to the one that
### you give that satisfy being a multiple of the
### recorded seconds.
wave_output_name = "records/1000.wav"
Record_seconds = 3
time = 0.1
Rate = 46080
f(wave_output_name,Record_seconds,time,Rate)
plt.show()
### Example 5
###This algorithm compare the Fourier transform given by python
### with one that I made, it is a way to test the the programed is
### expected to work with some cases at least, a further analysis
### could explain the differences (The graphs were scales for a value)
### chosen at hand.
wavename = 'records/3265.wav'
Record_seconds = 3
t, data, dt = getsignal(wavename,Record_seconds)
freq, dataft = Fourier1(t, data,dt)
data = data[1000:1500]
t = t[1000:1500]
w = np.arange(-np.pi/dt,np.pi/dt,2*np.pi/(len(t)*dt) )
t, ft = dft(data,w,t,1)
plt.plot(w/(2*np.pi),abs(ft.real)/abs(ft.real).sum()*(0.0169/0.0881) ,'b')
plt.plot(freq,abs(dataft.real)/abs(dataft.real).sum() ,'g')
plt.show()
| 2.46875
| 2
|
AllSidesScraper/allsides.py
|
Epicrider/polibalance
| 0
|
12040
|
<filename>AllSidesScraper/allsides.py
from bs4 import BeautifulSoup
import requests
from communityFeedback import *
from time import sleep
from rich.progress import track
import json
page = [
'https://www.allsides.com/media-bias/media-bias-ratings',
]
def table(full_table):
# The main table
print('Web scraper is parsing the table!')
for url in page:
source = requests.get(url)
soup = BeautifulSoup(source.content, 'lxml')
main_table = soup.select('tbody tr')
for row in main_table:
f = dict() # dictionary
f['News Source'] = row.select_one('.source-title').text.strip()
f['AllSides Bias Rating'] = row.select_one(
'.views-field-field-bias-image a')['href'].split('/')[-1]
f['News Media Info'] = 'https://www.allsides.com' + \
row.select_one('.source-title a')['href']
f['Agree'] = int(row.select_one('.agree').text)
f['Disagree'] = int(row.select_one('.disagree').text)
f['Ratio'] = (f['Agree'] / f['Disagree'])
f['Community feedback'] = communityVote(f['Ratio'])
f['Ratio'] = "{:.3f}".format(f['Ratio'])
full_table.append(f) # adds it to the empty list
sleep(10) # this is due to the ten seconds before request in robots.txt
return full_table
def website(full_table):
# Enters into the info page and parses out the info
for f in track(full_table, description="Parsing..."):
source = requests.get(f['News Media Info'])
soup = BeautifulSoup(source.content, 'lxml')
try:
# getting the website link to news source
locate_html_class = soup.find('div', {'class': 'dynamic-grid'})
locate_paragraph = locate_html_class.find('a')['href']
f['News Source Site'] = locate_paragraph
except TypeError:
pass
try:
# getting the creation date of the news source
locate__html_class = soup.find('div', {'class': 'dynamic-grid'})
locate_paragraph = locate__html_class.find_all('p')[1].text.split('.')[-1].strip()
f['Established'] = locate_paragraph
except IndexError:
pass
try:
# Who the news source owned by
locate__html_class = soup.find('div', {'class': 'dynamic-grid'})
locate_paragraph = locate__html_class.find_all('p')[2].text.split(':')[-1].strip()
f['Owned by'] = locate_paragraph
except IndexError:
pass
sleep(10)
return full_table
def saving_data(full_table):
# Saves the data into a json file with no lines
with open('all-sides.json', 'w', newline="") as i:
json.dump(full_table, i)
def main():
# main function
full_table = [] # empty list
full_table = table(full_table)
full_table = website(full_table)
saving_data(full_table)
print('Parsing has finished!')
if __name__ == '__main__':
main()
| 2.84375
| 3
|
commands/calc.py
|
periodicaidan/dalton-cli
| 2
|
12041
|
"""
File: commands/calc.py
Purpose: Performs calculations in response to user input, and outputs the result
"""
from sys import argv
import click
from calculator import *
from models import History
from models.Config import Config
from help_menus import calc_help
@click.group("calc", invoke_without_command=True)
@click.option("-M", "--mass-spec",
is_flag=True, default=False,
help="Get a theoretical mass spectrum of a molecule")
@click.option("-i", "--histogram",
is_flag=True, default=False,
help="Use with -M/--mass-spec to display the mass spec as a histogram")
@click.argument("formula", required=False)
def calc(mass_spec, histogram, formula):
config = Config.setup() # todo: Pass as context
if not any(locals().items()) or len(argv) == 2:
calc_help()
else:
if mass_spec:
click.echo(get_mass_spec(formula, histogram))
else:
mass = History.get(formula)["mass"] or get_mass(formula)
click.echo("%.3f %s" % (mass, config.units))
| 3.40625
| 3
|
aot/meta_triggers/metatrigger_treasure.py
|
jaycheungchunman/age-of-triggers
| 0
|
12042
|
<gh_stars>0
from aot import *
from aot.model.trigger import *
from aot.model.condition import *
from aot.model.effect import *
from aot.meta_triggers.metatrigger import MetaTrigger
from aot.model.enums.resource import EnumResource
from aot.model.enums.player import PlayerEnum
from aot.model.enums.unit import UnitConstant, UnitType
class Treasure(MetaTrigger):
def __init__(self, x, y, unit, amount, resource, players=range(1, 9), create_the_unit=False,
trigger_name="treasure"):
self.players = players
self.trigger_name = trigger_name
self.x = x
self.amount = amount
self.resource = resource
self.unit = unit
self.y = y
self.create_the_unit = create_the_unit
def setup(self, scenario):
if self.create_the_unit:
scenario.units.new(owner=0, x=self.x, y=self.y)
for p in self.players:
t = Trigger(self.trigger_name+" (P{})".format(p), enable=True)
t.if_(ObjectInArea(0,
amount=1,
unit_cons=self.unit,
x1=max(0, self.x - 1), y1=max(0, self.y - 1),
x2=min(scenario.map.width, self.x + 1),
y2=min(scenario.map.height, self.y + 1)))\
.if_(ObjectInArea(p,
amount=1,
x1=max(0, self.x - 1), y1=max(0, self.y - 1),
x2=min(scenario.map.width, self.x + 1),
y2=min(scenario.map.height, self.y + 1))) \
.then_(Tribute(p, self.amount, self.resource, silent=False)) \
.then_(SendChat(player=p, message="You found a treasure !")) \
.then_(RemoveObject(player=PlayerEnum.GAIA.value,
unit_cons=self.unit, x1=self.x, x2=self.x, y1=self.y, y2=self.y))
scenario.triggers.add(t)
class TreasureLoot(Treasure):
def __init__(self, x, y, amount, players=range(1, 9), create_the_unit=True):
super().__init__(x=x, y=y, unit=UnitConstant.LOOT.value,
amount=amount,
create_the_unit=create_the_unit,
players=players, resource=EnumResource.GOLD.value,
trigger_name="TreasureLoot({},{})".format(x,y))
class TreasureLumber(Treasure):
def __init__(self, x, y, amount, players=range(1, 9), create_the_unit=True):
super().__init__(x=x, y=y, unit=UnitConstant.LUMBER.value,
create_the_unit=create_the_unit,
amount=amount,
players=players, resource=EnumResource.WOOD.value,
trigger_name="TreasureLumber({},{})".format(x,y))
class TreasureQuarry(Treasure):
def __init__(self, x, y, amount, players=range(1, 9), create_the_unit=True, ):
super().__init__(x=x, y=y, unit=UnitConstant.QUARRY.value,
create_the_unit=create_the_unit,
amount=amount,
players=players, resource=EnumResource.STONE.value,
trigger_name="TreasureQuarry({},{})".format(x,y))
| 2.203125
| 2
|
agent/lm_agent/server_interfaces/lsdyna.py
|
omnivector-solutions/license-manager
| 2
|
12043
|
"""LS-Dyna license server interface."""
import typing
from lm_agent.config import settings
from lm_agent.exceptions import LicenseManagerBadServerOutput
from lm_agent.parsing import lsdyna
from lm_agent.server_interfaces.license_server_interface import LicenseReportItem, LicenseServerInterface
from lm_agent.server_interfaces.utils import run_command
class LSDynaLicenseServer(LicenseServerInterface):
"""Extract license information from LS-Dyna license server."""
def __init__(self, license_servers: typing.List[str]):
"""Initialize the license server instance with the license server host and parser."""
self.license_servers = license_servers
self.parser = lsdyna.parse
def get_commands_list(self):
"""Generate a list of commands with the available license server hosts."""
host_ports = [(server.split(":")[1:]) for server in self.license_servers]
commands_to_run = []
for host, port in host_ports:
command_line = f"{settings.LSDYNA_PATH} -s {port}@{host} -R"
commands_to_run.append(command_line)
return commands_to_run
async def get_output_from_server(self):
"""Override abstract method to get output from Ls-Dyna license server."""
# get the list of commands for each license server host
commands_to_run = self.get_commands_list()
# run each command in the list, one at a time, until one succeds
for cmd in commands_to_run:
output = await run_command(cmd)
# try the next server if the previous didn't return the expected data
if output is None:
continue
return output
raise RuntimeError("None of the checks for LS-Dyna succeeded!")
async def get_report_item(self, product_feature: str):
"""Override abstract method to parse LS-Dyna license server output into License Report Item."""
server_output = await self.get_output_from_server()
parsed_output = self.parser(server_output)
(_, feature) = product_feature.split(".")
current_feature_item = parsed_output.get(feature)
# raise exception if parser didn't output license information
if current_feature_item is None:
raise LicenseManagerBadServerOutput("Invalid data returned from parser.")
report_item = LicenseReportItem(
product_feature=product_feature,
used=current_feature_item["used"],
total=current_feature_item["total"],
used_licenses=current_feature_item["uses"],
)
return report_item
| 2.40625
| 2
|
examples/gan/gan_embeddings.py
|
ojmakhura/DIGITS
| 0
|
12044
|
#!/usr/bin/env python3
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
import argparse
import os
import pickle
import shutil
import numpy as np
import PIL.Image
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
TB_DIR = os.path.join(os.getcwd(), "gan-tb")
SPRITE_IMAGE_FILENAME = os.path.join(TB_DIR, "sprite.png")
def save_tb_embeddings(embeddings_filename):
f = open(embeddings_filename, 'rb')
embeddings = pickle.load(f)
images = embeddings['images']
zs = embeddings['zs']
# overwrite Tensorboard log dir if necessary
if os.path.exists(TB_DIR):
shutil.rmtree(TB_DIR)
os.makedirs(TB_DIR)
# create grid image
img_width, img_height = save_sprite_image(images)
with tf.device('cpu:0'):
# create embedding var
embedding_var = tf.Variable(initial_value=zs)
# save projector config
summary_writer = tf.summary.FileWriter(TB_DIR)
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
embedding.sprite.image_path = SPRITE_IMAGE_FILENAME
embedding.sprite.single_image_dim.extend([img_width, img_height])
projector.visualize_embeddings(summary_writer, config)
# save embeddings
sess = tf.Session()
sess.run(embedding_var.initializer)
saver = tf.train.Saver([embedding_var])
saver.save(sess, os.path.join(TB_DIR, 'model.ckpt'))
def save_sprite_image(images):
n_embeddings = images.shape[0]
grid_cols = int(np.sqrt(n_embeddings))
grid_rows = int(np.ceil(float(n_embeddings) / grid_cols))
img_height, img_width, img_channels = images[0].shape
grid_image = np.empty((img_height * grid_rows, img_width * grid_cols, img_channels))
for i, image in enumerate(images):
row = i / grid_cols
col = i % grid_cols
x = img_width * col
y = img_height * row
grid_image[y:y + img_height, x:x + img_width] = image
grid_image = PIL.Image.fromarray(grid_image.astype('uint8'))
grid_image.save(SPRITE_IMAGE_FILENAME)
return img_width, img_height
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Inference tool - DIGITS')
# Positional arguments
parser.add_argument(
'embeddings_file',
help='Embeddings pickle file')
args = vars(parser.parse_args())
try:
save_tb_embeddings(
args['embeddings_file'],
)
except Exception as e:
print(('%s: %s' % (type(e).__name__, e.message)))
raise
| 2.21875
| 2
|
punch_version.py
|
joshua-s/punch
| 0
|
12045
|
<reponame>joshua-s/punch
major = 1
minor = 4
patch = 5
| 0.785156
| 1
|
17.py
|
yonghuatang/python
| 0
|
12046
|
from datetime import date
now = date.today()
print('The date today is', now, now.strftime("%A"))
| 3.765625
| 4
|
pyxtal/miscellaneous/from_ase_molecule.py
|
ubikpt/PyXtal
| 127
|
12047
|
<reponame>ubikpt/PyXtal
from pyxtal.molecule import *
from ase.build import molecule
from pymatgen.core import Molecule
def get_ase_mol(molname):
"""convert ase molecule to pymatgen style"""
ase_mol = molecule(molname)
pos = ase_mol.get_positions()
symbols = ase_mol.get_chemical_symbols()
return Molecule(symbols, pos)
if __name__ == "__main__":
# ---------------------------------------------------
for name in ["H2", "H2O", "HCl", "CS2", "C2Cl4", "PH3", "CH4", "C6H6", "C60"]:
mol = get_ase_mol(name)
pga = PointGroupAnalyzer(mol)
# Symmetrize the molecule using pymatgen
mol = pga.symmetrize_molecule()["sym_mol"]
pga = PointGroupAnalyzer(mol)
print(name, " has point group symmetry: ", pga.get_pointgroup())
# Check if orders of rotation are detected correctly
pg = pga.get_pointgroup()
for op in pg:
opa = OperationAnalyzer(op)
if opa.order == "irrational":
print(opa)
elif opa.order > 10:
print(opa)
# orientation_in_wyckoff_position(mol, sg, WP's index in sg)
# returns a list of orientations consistent with the WP's symmetry.
# We can choose any of these orientations at random using np.random.choice
# To use an orientation, do mol.apply_operation(orientation)
# Spacegroup 16, index 6 has .2. symmetry
# check 2 fold rotation
allowed = orientation_in_wyckoff_position(mol, 16, 6, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm 2",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check reflection
allowed = orientation_in_wyckoff_position(mol, 25, 2, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm m",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check 3 fold rotation
allowed = orientation_in_wyckoff_position(mol, 147, 4, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm 3",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check -1
allowed = orientation_in_wyckoff_position(mol, 2, 2, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm -1",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check 2/m
allowed = orientation_in_wyckoff_position(mol, 64, 6, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm 2/m",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check 6
allowed = orientation_in_wyckoff_position(mol, 168, 3, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm 6",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
| 2.6875
| 3
|
my_project/evolution_forces.py
|
Abhigyan-Mishra/Quantum-Animation
| 0
|
12048
|
from manimlib.imports import *
"""
TODO:
[ ] fix arrow head size
auto scale according to size?
have a default size, but, if the arrow size is too short, then shrink the head
[ ] slide the point according to the gradient
"""
class ParaboloidPlot(SpecialThreeDScene):
CONFIG = {
"three_d_axes_config": {
"num_axis_pieces": 1,
"number_line_config": {
"unit_size": 2,
"tick_frequency": 1,
"numbers_with_elongated_ticks": [0, 1, 2],
"stroke_width": 2,
},
"axis_config": {
"unit_size": 1,
"tick_frequency": 1,
"numbers_with_elongated_ticks": [],
"stroke_width": 2,
},
"x_min": 0,
"x_max": 7,
"y_min": 0,
"y_max": 7,
"z_min": 0,
"z_max": 7,
},
"init_camera_orientation": {
"phi": 80 * DEGREES,
# "theta": -135 * DEGREES,
"theta": 290 * DEGREES,
},
"paraboloid_config": {
"r_max": 1,
"center_point": 2*X_AXIS + 2*Y_AXIS,
},
"axes_center_point": -2.5*X_AXIS - 2.5*Y_AXIS - 0.75*Z_AXIS,
}
def construct(self):
self.init_camera()
self.init_axes()
self.init_paraboloid()
## add dot
x, y = 2.1, 2.9
# x, y = 2.1, 2.1
# x, y = 3, 2
z = self.paraboloid.get_value_at_point([x,y])
point = np.array([x,y,z])
sphere = Sphere(radius=0.05, fill_color=WHITE, checkerboard_colors=False)
sphere.shift(point)
sphere.shift(self.axes_center_point)
self.add(sphere)
self.rotate_phi()
## add force
gradient = self.paraboloid.get_gradient(point)
step = np.array([
gradient[0],
gradient[1],
gradient[0]**2 + gradient[1]**2
])
end = point - step
end = self.paraboloid_config["center_point"]
force = Arrow3d(start=point, end=end)
force.shift(self.axes_center_point)
self.add(force)
self.wait()
self.rotate_phi()
self.wait()
def init_camera(self):
self.set_camera_orientation(**self.init_camera_orientation)
def init_axes(self):
self.axes = self.get_axes()
self.axes.x_axis.set_color(BLUE)
self.axes.y_axis.set_color(GREEN)
self.axes.z_axis.set_color(RED)
# self.set_axes_labels()
self.axes.shift(self.axes_center_point)
self.add(self.axes)
def init_paraboloid(self):
paraboloid = self.paraboloid = ParaboloidPolar(**self.paraboloid_config)
paraboloid.shift(self.axes_center_point)
self.add(paraboloid)
def rotate_phi(self, duration=2, degrees=+20):
# e.g. duration=2 ; degrees = 20
# going 20 degrees in 2 seconds
# 60 frames per seconds
# 20 degrees in 120 frames
rate = - degrees / (60*duration)
# it won't be exact, but it'll be close enough
self.begin_ambient_camera_rotation(rate=rate, about="phi")
self.wait(2)
self.stop_ambient_camera_rotation(about="phi")
| 2.84375
| 3
|
tests/python/tensor_graph/test/test_internal/performance/build_time_resnet.py
|
QinHan-Erin/AMOS
| 22
|
12049
|
import tvm
import sys
import time
import numpy as np
from tvm.tensor_graph.testing.models import resnet
from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, \
GraphTensor, GraphOp, PyTIRGraph
from tvm.tensor_graph.nn import CELoss, SGD
from tvm.tensor_graph.core.schedule_generator import ConnectedSet, GPUScheduleBaseSet, \
GPUScheduleMasterBaseSet, form_connected_sets, GPUScheduleMasterSet, \
SingleCut, form_cut_candidates, LayoutTransform
from tvm.tensor_graph.core.utils import flatten_tir_graph
from tvm.tensor_graph.core.space import PrimitiveSpace, PartitionSpace, ForwardGraphSpace
from tvm.tensor_graph.core.tuner import RandomPrimitiveTuner, RandomPartitionTuner, RandomForwardTuner
from tvm.tensor_graph.core.scheduler import PrimitiveScheduler as Scheduler
from tvm.tensor_graph.core.scheduler import schedule_all
from tvm.tensor_graph.core.build_graph import build_all
from tvm.tensor_graph.core.runtime import run_all
def test1():
print("test 1 ##############################")
batch = 64
img_shape = [batch, 3, 224, 224]
num_classes = 1000
label_shape = [batch, num_classes]
dtype = "float32"
model = resnet.resnet50(num_classes=1000)
img_tensor = GraphTensor(img_shape, dtype=dtype, name="image")
label_tensor = GraphTensor(label_shape, dtype=dtype, name="label")
# get output_tensor
output_tensor = model(img_tensor)
# get the weights tensors
weights_tensors = []
for w in model.weights():
weights_tensors.append(w)
# this is data
img_np = np.random.uniform(-1, 1, img_shape).astype(dtype)
label_np = np.random.uniform(-1, 1, [batch, num_classes]).astype(dtype)
ce_loss = CELoss(label_tensor)
sgd = SGD(0.002)
fwd_graph = ForwardGraph([img_tensor], [output_tensor], weights_tensors)
begin = time.time()
# change data layout
forward_space = ForwardGraphSpace()
forward_tuner = RandomForwardTuner(forward_space)
layout_generator = LayoutTransform(fwd_graph, forward_space, forward_tuner)
fgraph = layout_generator.generate()
after_layout = time.time()
# autodiff
bgraph = fgraph.make_backward(ce_loss, sgd)
after_autodiff = time.time()
# make tir graph
inputs = [x.tvm_tensor for x in bgraph.inputs]
weights = [x.tvm_tensor for x in bgraph.weights]
outputs = [x.tvm_tensor for x in bgraph.outputs]
# labels = [x.tvm_tensor for x in bgraph.labels]
# loss = bgraph.loss.tvm_tensor
# gradients = [x.tvm_tensor for x in bgraph.gradients]
# updates = [x.tvm_tensor for x in bgraph.updates]
labels = []
loss = None
gradients = []
lr = None
updates = []
tgraph = PyTIRGraph(
inputs,
labels,
outputs,
weights,
loss,
gradients,
lr,
updates)
after_tir_graph = time.time()
# subgraph partition
partition_space = PartitionSpace()
partition_tuner = RandomPartitionTuner(partition_space)
cut_candidates = form_cut_candidates(tgraph)
# print(cut_candidates)
for i, candidate in enumerate(cut_candidates):
name = "graph_cut_" + str(i)
partition_generator = SingleCut(tgraph, name, candidate, partition_space, partition_tuner)
partition_generator.generate()
# for op, stat in tgraph.op_stat_dict.items():
# print(op, " head=", stat.head)
tgraph.partition_graph()
after_partition = time.time()
print("num subgraphs:", len(tgraph.subgraphs))
target = "cuda"
dev = 0
# update the op stat dict of subgraphs
# do auto-schedule
total_build_trials = 0
build_time_record = []
for mark, subgraph in tgraph.subgraphs.items():
# print("subgraph", mark)
tensors = list(subgraph.outputs.keys()) + list(subgraph.loss.keys()) \
+ list(subgraph.gradients.keys()) + list(subgraph.updates.keys())
ops = [x.op for x in tensors]
op_list, down_graph = flatten_tir_graph(ops, output_first=True)
op_stat_dict = {}
for op in op_list:
v = tgraph.op_map[op]
if v in tgraph.op_stat_dict:
op_stat_dict[op] = tgraph.op_stat_dict[v]
c_list = form_connected_sets(subgraph, op_stat_dict, tensors, ops, down_graph)
# print("c_list_length=", len(c_list))
# print("check connected set")
# for connected_set in c_list:
# print(connected_set)
scheduler = Scheduler()
# sch = tgraph.schedules[mark]
for i, connected_set in enumerate(c_list):
name = "subgraph_" + str(mark) + "_connect_" + str(i)
assert not connected_set.empty()
build_success = False
for trial in range(10):
total_build_trials += 1
tgraph.create_schedule_for(mark=mark)
sch = tgraph.schedules[mark]
if connected_set.has_master():
if connected_set.iso_base():
PrimitiveScheduler = GPUScheduleMasterBaseSet
else:
PrimitiveScheduler = GPUScheduleMasterSet
primitive_generator = PrimitiveScheduler(
name, subgraph, connected_set, down_graph, op_stat_dict, scheduler)
else:
PrimitiveScheduler = GPUScheduleBaseSet
primitive_generator = PrimitiveScheduler(
name, connected_set, scheduler)
primitive_generator.generate(sch)
# try:
# print(tvm.lower(sch, tgraph.bufs[mark], simple_mode=True))
# except Exception as e:
# print(e)
# print("prologue")
# for p in connected_set.prologue:
# print(p.body)
# print("epilogue")
# for e in connected_set.epilogue:
# print(e.body)
# print("base")
# print(connected_set.base.body)
# print("master")
# print(connected_set.master.body)
# print(connected_set.master.input_tensors)
# for op, master in connected_set.prologue.items():
# in_input = False
# for inp in master.input_tensors:
# if op == inp.op:
# in_input = True
# break
# if not in_input:
# print(op, "not in the inputs of", master)
build_beg = time.time()
build_success = tgraph.build_for(target, mark=mark)
build_end = time.time()
build_time_record.append(build_end - build_beg)
if build_success:
break
if not build_success:
raise RuntimeError("Can't build for subgraph", mark)
after_schedule = time.time()
tgraph.set_inputs({bgraph.inputs[0].tvm_tensor: img_np})
# tgraph.set_labels({bgraph.labels[0].tvm_tensor: label_np})
# tgraph.set_lr(optimize_engine.get_lr())
tgraph.allocate_buffer(target, dev)
beg = time.time()
for mark in tgraph.call_order:
func = tgraph.functions[mark]
bufs = tgraph.bufs[mark]
real_bufs = [tgraph.tvm_array_dict[tgraph.subgraphs[mark].index[x]] for x in bufs]
func_beg = time.time()
func(*real_bufs)
func_end = time.time()
print((func_end - func_beg) * 1e3, "ms")
end = time.time()
print("End to end time:", (end - beg) * 1e3, "ms")
print("total build trails=", total_build_trials)
print("layout change time cost=", (after_layout - begin) * 1e3, "ms")
print("autodiff time cost=", (after_autodiff - after_layout) * 1e3, "ms")
print("make tir_graph time cost=", (after_tir_graph - after_autodiff) * 1e3, "ms")
print("subgraph partition time cost=", (after_partition - after_tir_graph) * 1e3, "ms")
print("schedule time cost=", (after_schedule - after_partition) * 1e3, "ms. average=",
(after_schedule - after_partition) * 1e3 / total_build_trials, "ms")
print("average build time cost=", np.array(build_time_record).mean() * 1e3, "ms")
print("total build time cost=", (after_schedule - begin) * 1e3, "ms")
print("Success!")
def test2(file=sys.stdout):
print("test 2 ##############################")
batch = 64
img_shape = [batch, 3, 224, 224]
num_classes = 1000
label_shape = [batch, num_classes]
dtype = "float32"
model = resnet.resnet50(num_classes=1000)
img_tensor = GraphTensor(img_shape, dtype=dtype, name="image")
label_tensor = GraphTensor(label_shape, dtype=dtype, name="label")
# get output_tensor
output_tensor = model(img_tensor)
# get the weights tensors
weights_tensors = []
for w in model.weights():
weights_tensors.append(w)
# this is data
img_np = np.random.uniform(-1, 1, img_shape).astype(dtype)
label_np = np.random.uniform(-1, 1, [batch, num_classes]).astype(dtype)
ce_loss = CELoss(label_tensor)
sgd = SGD(0.002)
fwd_graph = ForwardGraph([img_tensor], [output_tensor], weights_tensors)
tir_graph = schedule_all(fwd_graph, loss=ce_loss, optimizer=sgd, inference=False)
print(len(tir_graph.subgraphs))
print("different subgraphs:", len(set(tir_graph.subgraph_features.values())), file=file)
print("direrent ops:", len(set(tir_graph.op_feature_dict.values())), file=file)
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
for k, v in tir_graph.op_map.items():
print(k.name, v.name, file=file)
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
tmp = {}
for f in set(tir_graph.op_feature_dict.values()):
if f.split(")")[-1] not in tmp:
tmp[f.split(")")[-1]] = []
tmp[f.split(")")[-1]].append(f)
print("different kinds of ops:", len(tmp), file=file)
for k, v in tmp.items():
print(k, file=file)
for vv in v:
print(" ", vv, file=file)
print("####################################################", file=file)
tmp = {}
for f in set(tir_graph.subgraph_features.values()):
key = ";".join([x.split(")")[-1] for x in f.split(";")])
if key not in tmp:
tmp[key] = []
tmp[key].append(f)
print("different kinds of subgraphs:", len(tmp), file=file)
for k, v in tmp.items():
print(k, file=file)
for vv in v:
print(" ", vv, file=file)
for k, v in tir_graph.subgraph_features.items():
key = ";".join([x.split(")")[-1] for x in v.split(";")])
if key == "collect_3_dim4;grad_bn2d_to_conv2d_nchw_8;grad_bn2d_var_to_conv2d_nchw_10;grad_bn2d_mean_to_conv2d_nchw_2;collect_2_dim1":
i = 1
for op in tir_graph.subgraphs[k].op_list:
print(i, ". #####")
i += 1
print(op.body)
print(op.input_tensors)
break
# target = "cuda"
# dev = 0
# print("begin schedule")
# beg_build = time.time()
# build_all(fwd_graph, tir_graph, target=target, build_trial=10)
# end_build = time.time()
# print("num functions:", len(tir_graph.shared_functions))
# print("build time cost=", (end_build - beg_build) * 1e3, "ms")
# try:
# run_all(tir_graph, [img_np], [label_np], sgd.get_lr(), target=target, dev=dev)
# except Exception as e:
# print("run error:", e)
print("Success", file=file)
def test3():
print("test 3 ##############################")
batch = 64
img_shape = [batch, 3, 224, 224]
num_classes = 1000
label_shape = [batch, num_classes]
dtype = "float32"
model = resnet.resnet50(num_classes=1000)
img_tensor = GraphTensor(img_shape, dtype=dtype, name="image")
label_tensor = GraphTensor(label_shape, dtype=dtype, name="label")
# get output_tensor
output_tensor = model(img_tensor)
# get the weights tensors
weights_tensors = []
for w in model.weights():
weights_tensors.append(w)
# this is data
img_np = np.random.uniform(-1, 1, img_shape).astype(dtype)
label_np = np.random.uniform(-1, 1, [batch, num_classes]).astype(dtype)
ce_loss = CELoss(label_tensor)
sgd = SGD(0.002)
fwd_graph = ForwardGraph([img_tensor], [output_tensor], weights_tensors)
tir_graph = schedule_all(fwd_graph)
print(len(tir_graph.subgraphs))
print("different subgraphs:", len(set(tir_graph.subgraph_features.values())))
print("direrent ops:", len(set(tir_graph.op_feature_dict.values())))
tmp = {}
# for f in set(tir_graph.op_feature_dict.values()):
# if f.split(")")[-1] not in tmp:
# tmp[f.split(")")[-1]] = []
# tmp[f.split(")")[-1]].append(f)
# for k, v in tmp.items():
# print(k)
# for vv in v:
# print(" ", vv)
print("####################################################")
tmp = {}
# for f in set(tir_graph.subgraph_features.values()):
# key = ";".join([x.split(")")[-1] for x in f.split(";")])
# if key not in tmp:
# tmp[key] = []
# tmp[key].append(f)
print("different kinds of subgraphs:", len(tmp))
for k, v in tmp.items():
print(k)
for vv in v:
print(" ", vv)
# target = "cuda"
# dev = 1
# print("begin build")
# beg_build = time.time()
# build_all(fwd_graph, tir_graph, target=target, build_trial=10)
# end_build = time.time()
# print("num functions:", len(tir_graph.shared_functions))
# print("build time cost=", (end_build - beg_build) * 1e3, "ms")
# try:
# run_all(tir_graph, [img_np], target=target, dev=dev)
# except Exception as e:
# print("run error:", e)
print("Success")
if __name__ == "__main__":
with open("trace_resnet_subgraph.log", "w") as fout:
test2(file=fout)
# test3()
| 2.234375
| 2
|
Python3/src/basicExample.py
|
emanuelen5/XPlaneConnect
| 457
|
12050
|
<filename>Python3/src/basicExample.py
from time import sleep
import xpc
def ex():
print("X-Plane Connect example script")
print("Setting up simulation")
with xpc.XPlaneConnect() as client:
# Verify connection
try:
# If X-Plane does not respond to the request, a timeout error
# will be raised.
client.getDREF("sim/test/test_float")
except:
print("Error establishing connection to X-Plane.")
print("Exiting...")
return
# Set position of the player aircraft
print("Setting position")
# Lat Lon Alt Pitch Roll Yaw Gear
posi = [37.524, -122.06899, 2500, 0, 0, 0, 1]
client.sendPOSI(posi)
# Set position of a non-player aircraft
print("Setting NPC position")
# Lat Lon Alt Pitch Roll Yaw Gear
posi = [37.52465, -122.06899, 2500, 0, 20, 0, 1]
client.sendPOSI(posi, 1)
# Set angle of attack, velocity, and orientation using the DATA command
print("Setting orientation")
data = [\
[18, 0, -998, 0, -998, -998, -998, -998, -998],\
[ 3, 130, 130, 130, 130, -998, -998, -998, -998],\
[16, 0, 0, 0, -998, -998, -998, -998, -998]\
]
client.sendDATA(data)
# Set control surfaces and throttle of the player aircraft using sendCTRL
print("Setting controls")
ctrl = [0.0, 0.0, 0.0, 0.8]
client.sendCTRL(ctrl)
# Pause the sim
print("Pausing")
client.pauseSim(True)
sleep(2)
# Toggle pause state to resume
print("Resuming")
client.pauseSim(False)
# Stow landing gear using a dataref
print("Stowing gear")
gear_dref = "sim/cockpit/switches/gear_handle_status"
client.sendDREF(gear_dref, 0)
# Let the sim run for a bit.
sleep(4)
# Make sure gear was stowed successfully
gear_status = client.getDREF(gear_dref)
if gear_status[0] == 0:
print("Gear stowed")
else:
print("Error stowing gear")
print("End of Python client example")
input("Press any key to exit...")
if __name__ == "__main__":
ex()
| 3.03125
| 3
|
venv/lib/python3.6/site-packages/pelican/readers.py
|
RyanHelgoth/CMPUT404-Lab5
| 0
|
12051
|
import datetime
import logging
import os
import re
from collections import OrderedDict
from html import escape
from html.parser import HTMLParser
from io import StringIO
import docutils
import docutils.core
import docutils.io
from docutils.parsers.rst.languages import get_language as get_docutils_lang
from docutils.writers.html4css1 import HTMLTranslator, Writer
from pelican import rstdirectives # NOQA
from pelican.cache import FileStampDataCacher
from pelican.contents import Author, Category, Page, Tag
from pelican.plugins import signals
from pelican.utils import get_date, pelican_open, posixize_path
try:
from markdown import Markdown
except ImportError:
Markdown = False # NOQA
# Metadata processors have no way to discard an unwanted value, so we have
# them return this value instead to signal that it should be discarded later.
# This means that _filter_discardable_metadata() must be called on processed
# metadata dicts before use, to remove the items with the special value.
_DISCARD = object()
DUPLICATES_DEFINITIONS_ALLOWED = {
'tags': False,
'date': False,
'modified': False,
'status': False,
'category': False,
'author': False,
'save_as': False,
'url': False,
'authors': False,
'slug': False
}
METADATA_PROCESSORS = {
'tags': lambda x, y: ([
Tag(tag, y)
for tag in ensure_metadata_list(x)
] or _DISCARD),
'date': lambda x, y: get_date(x.replace('_', ' ')),
'modified': lambda x, y: get_date(x),
'status': lambda x, y: x.strip() or _DISCARD,
'category': lambda x, y: _process_if_nonempty(Category, x, y),
'author': lambda x, y: _process_if_nonempty(Author, x, y),
'authors': lambda x, y: ([
Author(author, y)
for author in ensure_metadata_list(x)
] or _DISCARD),
'slug': lambda x, y: x.strip() or _DISCARD,
}
logger = logging.getLogger(__name__)
def ensure_metadata_list(text):
"""Canonicalize the format of a list of authors or tags. This works
the same way as Docutils' "authors" field: if it's already a list,
those boundaries are preserved; otherwise, it must be a string;
if the string contains semicolons, it is split on semicolons;
otherwise, it is split on commas. This allows you to write
author lists in either "<NAME>, <NAME>" or "<NAME>; <NAME>"
format.
Regardless, all list items undergo .strip() before returning, and
empty items are discarded.
"""
if isinstance(text, str):
if ';' in text:
text = text.split(';')
else:
text = text.split(',')
return list(OrderedDict.fromkeys(
[v for v in (w.strip() for w in text) if v]
))
def _process_if_nonempty(processor, name, settings):
"""Removes extra whitespace from name and applies a metadata processor.
If name is empty or all whitespace, returns _DISCARD instead.
"""
name = name.strip()
return processor(name, settings) if name else _DISCARD
def _filter_discardable_metadata(metadata):
"""Return a copy of a dict, minus any items marked as discardable."""
return {name: val for name, val in metadata.items() if val is not _DISCARD}
class BaseReader:
"""Base class to read files.
This class is used to process static files, and it can be inherited for
other types of file. A Reader class must have the following attributes:
- enabled: (boolean) tell if the Reader class is enabled. It
generally depends on the import of some dependency.
- file_extensions: a list of file extensions that the Reader will process.
- extensions: a list of extensions to use in the reader (typical use is
Markdown).
"""
enabled = True
file_extensions = ['static']
extensions = None
def __init__(self, settings):
self.settings = settings
def process_metadata(self, name, value):
if name in METADATA_PROCESSORS:
return METADATA_PROCESSORS[name](value, self.settings)
return value
def read(self, source_path):
"No-op parser"
content = None
metadata = {}
return content, metadata
class _FieldBodyTranslator(HTMLTranslator):
def __init__(self, document):
super().__init__(document)
self.compact_p = None
def astext(self):
return ''.join(self.body)
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
pass
def render_node_to_html(document, node, field_body_translator_class):
visitor = field_body_translator_class(document)
node.walkabout(visitor)
return visitor.astext()
class PelicanHTMLWriter(Writer):
def __init__(self):
super().__init__()
self.translator_class = PelicanHTMLTranslator
class PelicanHTMLTranslator(HTMLTranslator):
def visit_abbreviation(self, node):
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def visit_image(self, node):
# set an empty alt if alt is not specified
# avoids that alt is taken from src
node['alt'] = node.get('alt', '')
return HTMLTranslator.visit_image(self, node)
class RstReader(BaseReader):
"""Reader for reStructuredText files
By default the output HTML is written using
docutils.writers.html4css1.Writer and translated using a subclass of
docutils.writers.html4css1.HTMLTranslator. If you want to override it with
your own writer/translator (e.g. a HTML5-based one), pass your classes to
these two attributes. Look in the source code for details.
writer_class Used for writing contents
field_body_translator_class Used for translating metadata such
as article summary
"""
enabled = bool(docutils)
file_extensions = ['rst']
writer_class = PelicanHTMLWriter
field_body_translator_class = _FieldBodyTranslator
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
lang_code = self.settings.get('DEFAULT_LANG', 'en')
if get_docutils_lang(lang_code):
self._language_code = lang_code
else:
logger.warning("Docutils has no localization for '%s'."
" Using 'en' instead.", lang_code)
self._language_code = 'en'
def _parse_metadata(self, document, source_path):
"""Return the dict containing document metadata"""
formatted_fields = self.settings['FORMATTED_FIELDS']
output = {}
if document.first_child_matching_class(docutils.nodes.title) is None:
logger.warning(
'Document title missing in file %s: '
'Ensure exactly one top level section',
source_path)
for docinfo in document.traverse(docutils.nodes.docinfo):
for element in docinfo.children:
if element.tagname == 'field': # custom fields (e.g. summary)
name_elem, body_elem = element.children
name = name_elem.astext()
if name.lower() in formatted_fields:
value = render_node_to_html(
document, body_elem,
self.field_body_translator_class)
else:
value = body_elem.astext()
elif element.tagname == 'authors': # author list
name = element.tagname
value = [element.astext() for element in element.children]
else: # standard fields (e.g. address)
name = element.tagname
value = element.astext()
name = name.lower()
output[name] = self.process_metadata(name, value)
return output
def _get_publisher(self, source_path):
extra_params = {'initial_header_level': '2',
'syntax_highlight': 'short',
'input_encoding': 'utf-8',
'language_code': self._language_code,
'halt_level': 2,
'traceback': True,
'warning_stream': StringIO(),
'embed_stylesheet': False}
user_params = self.settings.get('DOCUTILS_SETTINGS')
if user_params:
extra_params.update(user_params)
pub = docutils.core.Publisher(
writer=self.writer_class(),
destination_class=docutils.io.StringOutput)
pub.set_components('standalone', 'restructuredtext', 'html')
pub.process_programmatic_settings(None, extra_params, None)
pub.set_source(source_path=source_path)
pub.publish()
return pub
def read(self, source_path):
"""Parses restructured text"""
pub = self._get_publisher(source_path)
parts = pub.writer.parts
content = parts.get('body')
metadata = self._parse_metadata(pub.document, source_path)
metadata.setdefault('title', parts.get('title'))
return content, metadata
class MarkdownReader(BaseReader):
"""Reader for Markdown files"""
enabled = bool(Markdown)
file_extensions = ['md', 'markdown', 'mkd', 'mdown']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
settings = self.settings['MARKDOWN']
settings.setdefault('extension_configs', {})
settings.setdefault('extensions', [])
for extension in settings['extension_configs'].keys():
if extension not in settings['extensions']:
settings['extensions'].append(extension)
if 'markdown.extensions.meta' not in settings['extensions']:
settings['extensions'].append('markdown.extensions.meta')
self._source_path = None
def _parse_metadata(self, meta):
"""Return the dict containing document metadata"""
formatted_fields = self.settings['FORMATTED_FIELDS']
# prevent metadata extraction in fields
self._md.preprocessors.deregister('meta')
output = {}
for name, value in meta.items():
name = name.lower()
if name in formatted_fields:
# formatted metadata is special case and join all list values
formatted_values = "\n".join(value)
# reset the markdown instance to clear any state
self._md.reset()
formatted = self._md.convert(formatted_values)
output[name] = self.process_metadata(name, formatted)
elif not DUPLICATES_DEFINITIONS_ALLOWED.get(name, True):
if len(value) > 1:
logger.warning(
'Duplicate definition of `%s` '
'for %s. Using first one.',
name, self._source_path)
output[name] = self.process_metadata(name, value[0])
elif len(value) > 1:
# handle list metadata as list of string
output[name] = self.process_metadata(name, value)
else:
# otherwise, handle metadata as single string
output[name] = self.process_metadata(name, value[0])
return output
def read(self, source_path):
"""Parse content and metadata of markdown files"""
self._source_path = source_path
self._md = Markdown(**self.settings['MARKDOWN'])
with pelican_open(source_path) as text:
content = self._md.convert(text)
if hasattr(self._md, 'Meta'):
metadata = self._parse_metadata(self._md.Meta)
else:
metadata = {}
return content, metadata
class HTMLReader(BaseReader):
"""Parses HTML files as input, looking for meta, title, and body tags"""
file_extensions = ['htm', 'html']
enabled = True
class _HTMLParser(HTMLParser):
def __init__(self, settings, filename):
super().__init__(convert_charrefs=False)
self.body = ''
self.metadata = {}
self.settings = settings
self._data_buffer = ''
self._filename = filename
self._in_top_level = True
self._in_head = False
self._in_title = False
self._in_body = False
self._in_tags = False
def handle_starttag(self, tag, attrs):
if tag == 'head' and self._in_top_level:
self._in_top_level = False
self._in_head = True
elif tag == 'title' and self._in_head:
self._in_title = True
self._data_buffer = ''
elif tag == 'body' and self._in_top_level:
self._in_top_level = False
self._in_body = True
self._data_buffer = ''
elif tag == 'meta' and self._in_head:
self._handle_meta_tag(attrs)
elif self._in_body:
self._data_buffer += self.build_tag(tag, attrs, False)
def handle_endtag(self, tag):
if tag == 'head':
if self._in_head:
self._in_head = False
self._in_top_level = True
elif self._in_head and tag == 'title':
self._in_title = False
self.metadata['title'] = self._data_buffer
elif tag == 'body':
self.body = self._data_buffer
self._in_body = False
self._in_top_level = True
elif self._in_body:
self._data_buffer += '</{}>'.format(escape(tag))
def handle_startendtag(self, tag, attrs):
if tag == 'meta' and self._in_head:
self._handle_meta_tag(attrs)
if self._in_body:
self._data_buffer += self.build_tag(tag, attrs, True)
def handle_comment(self, data):
self._data_buffer += '<!--{}-->'.format(data)
def handle_data(self, data):
self._data_buffer += data
def handle_entityref(self, data):
self._data_buffer += '&{};'.format(data)
def handle_charref(self, data):
self._data_buffer += '&#{};'.format(data)
def build_tag(self, tag, attrs, close_tag):
result = '<{}'.format(escape(tag))
for k, v in attrs:
result += ' ' + escape(k)
if v is not None:
# If the attribute value contains a double quote, surround
# with single quotes, otherwise use double quotes.
if '"' in v:
result += "='{}'".format(escape(v, quote=False))
else:
result += '="{}"'.format(escape(v, quote=False))
if close_tag:
return result + ' />'
return result + '>'
def _handle_meta_tag(self, attrs):
name = self._attr_value(attrs, 'name')
if name is None:
attr_list = ['{}="{}"'.format(k, v) for k, v in attrs]
attr_serialized = ', '.join(attr_list)
logger.warning("Meta tag in file %s does not have a 'name' "
"attribute, skipping. Attributes: %s",
self._filename, attr_serialized)
return
name = name.lower()
contents = self._attr_value(attrs, 'content', '')
if not contents:
contents = self._attr_value(attrs, 'contents', '')
if contents:
logger.warning(
"Meta tag attribute 'contents' used in file %s, should"
" be changed to 'content'",
self._filename,
extra={'limit_msg': "Other files have meta tag "
"attribute 'contents' that should "
"be changed to 'content'"})
if name == 'keywords':
name = 'tags'
if name in self.metadata:
# if this metadata already exists (i.e. a previous tag with the
# same name has already been specified then either convert to
# list or append to list
if isinstance(self.metadata[name], list):
self.metadata[name].append(contents)
else:
self.metadata[name] = [self.metadata[name], contents]
else:
self.metadata[name] = contents
@classmethod
def _attr_value(cls, attrs, name, default=None):
return next((x[1] for x in attrs if x[0] == name), default)
def read(self, filename):
"""Parse content and metadata of HTML files"""
with pelican_open(filename) as content:
parser = self._HTMLParser(self.settings, filename)
parser.feed(content)
parser.close()
metadata = {}
for k in parser.metadata:
metadata[k] = self.process_metadata(k, parser.metadata[k])
return parser.body, metadata
class Readers(FileStampDataCacher):
"""Interface for all readers.
This class contains a mapping of file extensions / Reader classes, to know
which Reader class must be used to read a file (based on its extension).
This is customizable both with the 'READERS' setting, and with the
'readers_init' signall for plugins.
"""
def __init__(self, settings=None, cache_name=''):
self.settings = settings or {}
self.readers = {}
self.reader_classes = {}
for cls in [BaseReader] + BaseReader.__subclasses__():
if not cls.enabled:
logger.debug('Missing dependencies for %s',
', '.join(cls.file_extensions))
continue
for ext in cls.file_extensions:
self.reader_classes[ext] = cls
if self.settings['READERS']:
self.reader_classes.update(self.settings['READERS'])
signals.readers_init.send(self)
for fmt, reader_class in self.reader_classes.items():
if not reader_class:
continue
self.readers[fmt] = reader_class(self.settings)
# set up caching
cache_this_level = (cache_name != '' and
self.settings['CONTENT_CACHING_LAYER'] == 'reader')
caching_policy = cache_this_level and self.settings['CACHE_CONTENT']
load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE']
super().__init__(settings, cache_name, caching_policy, load_policy)
@property
def extensions(self):
return self.readers.keys()
def read_file(self, base_path, path, content_class=Page, fmt=None,
context=None, preread_signal=None, preread_sender=None,
context_signal=None, context_sender=None):
"""Return a content object parsed with the given format."""
path = os.path.abspath(os.path.join(base_path, path))
source_path = posixize_path(os.path.relpath(path, base_path))
logger.debug(
'Read file %s -> %s',
source_path, content_class.__name__)
if not fmt:
_, ext = os.path.splitext(os.path.basename(path))
fmt = ext[1:]
if fmt not in self.readers:
raise TypeError(
'Pelican does not know how to parse %s', path)
if preread_signal:
logger.debug(
'Signal %s.send(%s)',
preread_signal.name, preread_sender)
preread_signal.send(preread_sender)
reader = self.readers[fmt]
metadata = _filter_discardable_metadata(default_metadata(
settings=self.settings, process=reader.process_metadata))
metadata.update(path_metadata(
full_path=path, source_path=source_path,
settings=self.settings))
metadata.update(_filter_discardable_metadata(parse_path_metadata(
source_path=source_path, settings=self.settings,
process=reader.process_metadata)))
reader_name = reader.__class__.__name__
metadata['reader'] = reader_name.replace('Reader', '').lower()
content, reader_metadata = self.get_cached_data(path, (None, None))
if content is None:
content, reader_metadata = reader.read(path)
reader_metadata = _filter_discardable_metadata(reader_metadata)
self.cache_data(path, (content, reader_metadata))
metadata.update(reader_metadata)
if content:
# find images with empty alt
find_empty_alt(content, path)
# eventually filter the content with typogrify if asked so
if self.settings['TYPOGRIFY']:
from typogrify.filters import typogrify
import smartypants
typogrify_dashes = self.settings['TYPOGRIFY_DASHES']
if typogrify_dashes == 'oldschool':
smartypants.Attr.default = smartypants.Attr.set2
elif typogrify_dashes == 'oldschool_inverted':
smartypants.Attr.default = smartypants.Attr.set3
else:
smartypants.Attr.default = smartypants.Attr.set1
# Tell `smartypants` to also replace " HTML entities with
# smart quotes. This is necessary because Docutils has already
# replaced double quotes with said entities by the time we run
# this filter.
smartypants.Attr.default |= smartypants.Attr.w
def typogrify_wrapper(text):
"""Ensures ignore_tags feature is backward compatible"""
try:
return typogrify(
text,
self.settings['TYPOGRIFY_IGNORE_TAGS'])
except TypeError:
return typogrify(text)
if content:
content = typogrify_wrapper(content)
if 'title' in metadata:
metadata['title'] = typogrify_wrapper(metadata['title'])
if 'summary' in metadata:
metadata['summary'] = typogrify_wrapper(metadata['summary'])
if context_signal:
logger.debug(
'Signal %s.send(%s, <metadata>)',
context_signal.name,
context_sender)
context_signal.send(context_sender, metadata=metadata)
return content_class(content=content, metadata=metadata,
settings=self.settings, source_path=path,
context=context)
def find_empty_alt(content, path):
"""Find images with empty alt
Create warnings for all images with empty alt (up to a certain number),
as they are really likely to be accessibility flaws.
"""
imgs = re.compile(r"""
(?:
# src before alt
<img
[^\>]*
src=(['"])(.*?)\1
[^\>]*
alt=(['"])\3
)|(?:
# alt before src
<img
[^\>]*
alt=(['"])\4
[^\>]*
src=(['"])(.*?)\5
)
""", re.X)
for match in re.findall(imgs, content):
logger.warning(
'Empty alt attribute for image %s in %s',
os.path.basename(match[1] + match[5]), path,
extra={'limit_msg': 'Other images have empty alt attributes'})
def default_metadata(settings=None, process=None):
metadata = {}
if settings:
for name, value in dict(settings.get('DEFAULT_METADATA', {})).items():
if process:
value = process(name, value)
metadata[name] = value
if 'DEFAULT_CATEGORY' in settings:
value = settings['DEFAULT_CATEGORY']
if process:
value = process('category', value)
metadata['category'] = value
if settings.get('DEFAULT_DATE', None) and \
settings['DEFAULT_DATE'] != 'fs':
if isinstance(settings['DEFAULT_DATE'], str):
metadata['date'] = get_date(settings['DEFAULT_DATE'])
else:
metadata['date'] = datetime.datetime(*settings['DEFAULT_DATE'])
return metadata
def path_metadata(full_path, source_path, settings=None):
metadata = {}
if settings:
if settings.get('DEFAULT_DATE', None) == 'fs':
metadata['date'] = datetime.datetime.fromtimestamp(
os.stat(full_path).st_mtime)
metadata['modified'] = metadata['date']
# Apply EXTRA_PATH_METADATA for the source path and the paths of any
# parent directories. Sorting EPM first ensures that the most specific
# path wins conflicts.
epm = settings.get('EXTRA_PATH_METADATA', {})
for path, meta in sorted(epm.items()):
# Enforce a trailing slash when checking for parent directories.
# This prevents false positives when one file or directory's name
# is a prefix of another's.
dirpath = posixize_path(os.path.join(path, ''))
if source_path == path or source_path.startswith(dirpath):
metadata.update(meta)
return metadata
def parse_path_metadata(source_path, settings=None, process=None):
r"""Extract a metadata dictionary from a file's path
>>> import pprint
>>> settings = {
... 'FILENAME_METADATA': r'(?P<slug>[^.]*).*',
... 'PATH_METADATA':
... r'(?P<category>[^/]*)/(?P<date>\d{4}-\d{2}-\d{2})/.*',
... }
>>> reader = BaseReader(settings=settings)
>>> metadata = parse_path_metadata(
... source_path='my-cat/2013-01-01/my-slug.html',
... settings=settings,
... process=reader.process_metadata)
>>> pprint.pprint(metadata) # doctest: +ELLIPSIS
{'category': <pelican.urlwrappers.Category object at ...>,
'date': datetime.datetime(2013, 1, 1, 0, 0),
'slug': 'my-slug'}
"""
metadata = {}
dirname, basename = os.path.split(source_path)
base, ext = os.path.splitext(basename)
subdir = os.path.basename(dirname)
if settings:
checks = []
for key, data in [('FILENAME_METADATA', base),
('PATH_METADATA', source_path)]:
checks.append((settings.get(key, None), data))
if settings.get('USE_FOLDER_AS_CATEGORY', None):
checks.append(('(?P<category>.*)', subdir))
for regexp, data in checks:
if regexp and data:
match = re.match(regexp, data)
if match:
# .items() for py3k compat.
for k, v in match.groupdict().items():
k = k.lower() # metadata must be lowercase
if v is not None and k not in metadata:
if process:
v = process(k, v)
metadata[k] = v
return metadata
| 2.203125
| 2
|
xmuda/models/CP_v5.py
|
anhquancao/xmuda-extend
| 0
|
12052
|
<gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
from xmuda.models.DDR import Bottleneck3D
from xmuda.models.LMSCNet import SegmentationHead, ASPP
import numpy as np
from xmuda.models.modules import Process, Upsample, Downsample
import math
from xmuda.data.utils.preprocess import create_voxel_position
class AggregationModule(nn.Module):
"""Aggregation Module"""
def __init__(self,
feature, out_feature):
super(AggregationModule, self).__init__()
dilations = [1, 2, 3] # kitti
# dilations = [1, 1, 1] # NYU
self.b1 = Bottleneck3D(feature, feature // 4, norm_layer=nn.BatchNorm3d, dilation=[dilations[0], dilations[0], dilations[0]])
self.b2 = Bottleneck3D(feature, feature // 4, norm_layer=nn.BatchNorm3d, dilation=[dilations[1], dilations[1], dilations[1]])
self.b3 = Bottleneck3D(feature, feature // 4, norm_layer=nn.BatchNorm3d, dilation=[dilations[2], dilations[2], dilations[2]])
self.resize = nn.Conv3d(feature * 4, out_feature, kernel_size=1, padding=0)
self.aspp = ASPP(out_feature, [1, 2, 3])
def forward(self, x):
x1 = self.b1(x)
x2 = self.b2(x1)
x3 = self.b3(x2)
x = torch.cat([x, x1, x2, x3], dim=1)
x = self.aspp(self.resize(x))
return x
class CPMegaVoxels(nn.Module):
def __init__(self, out_channels, feature, size,
n_relations=4,
bn_momentum=0.0003):
super().__init__()
self.size = size
self.n_relations = n_relations
print("n_relations", self.n_relations)
self.flatten_size = size[0] * size[1] * size[2]
self.context_feature = feature
self.agg = AggregationModule(feature, self.context_feature)
self.mega_context = nn.AvgPool3d(kernel_size=2, stride=2)
self.flatten_context_size = (size[0]//2) * (size[1]//2) * (size[2]//2)
self.context_prior_logits = nn.ModuleList([
nn.Sequential(
nn.Conv3d(self.context_feature, self.flatten_context_size, padding=0, kernel_size=1),
) for i in range(n_relations)
])
self.resize = nn.Sequential(
nn.Conv3d(self.context_feature * self.n_relations + feature, out_channels, kernel_size=3, padding=1),
)
self.mega_context_logit = nn.Sequential(
nn.Conv3d(self.context_feature, 12, kernel_size=1, padding=0)
)
def forward(self, input):
ret = {}
bs, c, h, w, d = input.shape
x_agg = self.agg(input)
# get the mega context
x_mega_context = self.mega_context(x_agg) # bs, 512, 7, 4, 7
x_mega_context = x_mega_context.reshape(bs, x_mega_context.shape[1], -1) # bs, 512, 196
x_mega_context = x_mega_context.permute(0, 2, 1) # bs, 196, 512
# get context prior map
x_context_prior_logits = []
x_context_rels = []
for rel in range(self.n_relations):
x_context_prior_logit = self.context_prior_logits[rel](x_agg) # bs, 784, 15, 9, 15
x_context_prior_logit = x_context_prior_logit.reshape(bs, 1, self.flatten_context_size, self.flatten_size)
x_context_prior_logits.append(x_context_prior_logit)
x_context_prior = torch.sigmoid(x_context_prior_logit).squeeze(dim=1).permute(0, 2, 1) # bs, 2025, 196
x_context_rel = torch.bmm(x_context_prior, x_mega_context) # bs, 2025, 1024
x_context_rels.append(x_context_rel)
x_context = torch.cat(x_context_rels, dim=2)
x_context = x_context.permute(0, 2, 1)
x_context = x_context.reshape(bs, self.context_feature * self.n_relations, self.size[0], self.size[1], self.size[2])
x = torch.cat([input, x_context], dim=1)
x = self.resize(x)
x_context_prior_logits = torch.cat(x_context_prior_logits, dim=1) # bs, n_relations, 196, 2025
ret["P_logits"] = x_context_prior_logits
ret["x"] = x
return ret
| 1.867188
| 2
|
gala/potential/potential/tests/helpers.py
|
ltlancas/gala
| 1
|
12053
|
# coding: utf-8
from __future__ import division, print_function
# Standard library
import time
# Third-party
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import derivative
from astropy.extern.six.moves import cPickle as pickle
import pytest
# Project
from ..io import load
from ..core import CompositePotential
from ....units import UnitSystem, DimensionlessUnitSystem
from ....dynamics import PhaseSpacePosition
from ....integrate import LeapfrogIntegrator
def partial_derivative(func, point, dim_ix=0, **kwargs):
xyz = np.array(point, copy=True)
def wraps(a):
xyz[dim_ix] = a
return func(xyz)
return derivative(wraps, point[dim_ix], **kwargs)
class PotentialTestBase(object):
name = None
potential = None # MUST SET THIS
tol = 1E-5
show_plots = False
@classmethod
def setup_class(cls):
if cls.name is None:
cls.name = cls.__name__[4:] # remove Test
print("Testing potential: {}".format(cls.name))
cls.w0 = np.array(cls.w0)
cls.ndim = cls.w0.size // 2
# TODO: need to test also quantity objects and phasespacepositions!
# these are arrays we will test the methods on:
w0_2d = np.repeat(cls.w0[:,None], axis=1, repeats=16)
w0_3d = np.repeat(w0_2d[...,None], axis=2, repeats=8)
w0_list = list(cls.w0)
w0_slice = w0_2d[:,:4]
cls.w0s = [cls.w0, w0_2d, w0_3d, w0_list, w0_slice]
cls._grad_return_shapes = [cls.w0[:cls.ndim].shape + (1,),
w0_2d[:cls.ndim].shape,
w0_3d[:cls.ndim].shape,
cls.w0[:cls.ndim].shape + (1,),
w0_slice[:cls.ndim].shape]
cls._hess_return_shapes = [(cls.ndim,) + cls.w0[:cls.ndim].shape + (1,),
(cls.ndim,) + w0_2d[:cls.ndim].shape,
(cls.ndim,) + w0_3d[:cls.ndim].shape,
(cls.ndim,) + cls.w0[:cls.ndim].shape + (1,),
(cls.ndim,) + w0_slice[:cls.ndim].shape]
cls._valu_return_shapes = [x[1:] for x in cls._grad_return_shapes]
def test_unitsystem(self):
assert isinstance(self.potential.units, UnitSystem)
def test_energy(self):
assert self.ndim == self.potential.ndim
for arr,shp in zip(self.w0s, self._valu_return_shapes):
v = self.potential.energy(arr[:self.ndim])
assert v.shape == shp
g = self.potential.energy(arr[:self.ndim], t=0.1)
g = self.potential.energy(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.energy(arr[:self.ndim], t=t)
g = self.potential.energy(arr[:self.ndim], t=t*self.potential.units['time'])
def test_gradient(self):
for arr,shp in zip(self.w0s, self._grad_return_shapes):
g = self.potential.gradient(arr[:self.ndim])
assert g.shape == shp
g = self.potential.gradient(arr[:self.ndim], t=0.1)
g = self.potential.gradient(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.gradient(arr[:self.ndim], t=t)
g = self.potential.gradient(arr[:self.ndim], t=t*self.potential.units['time'])
def test_hessian(self):
for arr,shp in zip(self.w0s, self._hess_return_shapes):
g = self.potential.hessian(arr[:self.ndim])
assert g.shape == shp
g = self.potential.hessian(arr[:self.ndim], t=0.1)
g = self.potential.hessian(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.hessian(arr[:self.ndim], t=t)
g = self.potential.hessian(arr[:self.ndim], t=t*self.potential.units['time'])
def test_mass_enclosed(self):
for arr,shp in zip(self.w0s, self._valu_return_shapes):
g = self.potential.mass_enclosed(arr[:self.ndim])
assert g.shape == shp
assert np.all(g > 0.)
g = self.potential.mass_enclosed(arr[:self.ndim], t=0.1)
g = self.potential.mass_enclosed(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.mass_enclosed(arr[:self.ndim], t=t)
g = self.potential.mass_enclosed(arr[:self.ndim], t=t*self.potential.units['time'])
def test_circular_velocity(self):
for arr,shp in zip(self.w0s, self._valu_return_shapes):
g = self.potential.circular_velocity(arr[:self.ndim])
assert g.shape == shp
assert np.all(g > 0.)
g = self.potential.circular_velocity(arr[:self.ndim], t=0.1)
g = self.potential.circular_velocity(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.circular_velocity(arr[:self.ndim], t=t)
g = self.potential.circular_velocity(arr[:self.ndim], t=t*self.potential.units['time'])
def test_repr(self):
pot_repr = repr(self.potential)
if isinstance(self.potential.units, DimensionlessUnitSystem):
assert "dimensionless" in pot_repr
else:
assert str(self.potential.units['length']) in pot_repr
assert str(self.potential.units['time']) in pot_repr
assert str(self.potential.units['mass']) in pot_repr
for k in self.potential.parameters.keys():
assert "{}=".format(k) in pot_repr
def test_compare(self):
# skip if composite potentials
if len(self.potential.parameters) == 0:
return
other = self.potential.__class__(units=self.potential.units,
**self.potential.parameters)
assert other == self.potential
pars = self.potential.parameters.copy()
for k in pars.keys():
if k != 0:
pars[k] = 1.1*pars[k]
other = self.potential.__class__(units=self.potential.units, **pars)
assert other != self.potential
# check that comparing to non-potentials works
assert not self.potential == "sup"
assert not self.potential == None
def test_plot(self):
p = self.potential
if self.show_plots:
f = p.plot_contours(grid=(np.linspace(-10., 10., 100), 0., 0.),
labels=["X"])
# f.suptitle("slice off from 0., won't have cusp")
# f.savefig(os.path.join(plot_path, "contour_x.png"))
f = p.plot_contours(grid=(np.linspace(-10., 10., 100),
np.linspace(-10., 10., 100),
0.),
cmap='Blues')
# f.savefig(os.path.join(plot_path, "contour_xy.png"))
f = p.plot_contours(grid=(np.linspace(-10., 10., 100),
1.,
np.linspace(-10., 10., 100)),
cmap='Blues', labels=["X", "Z"])
# f.savefig(os.path.join(plot_path, "contour_xz.png"))
plt.show()
plt.close('all')
def test_save_load(self, tmpdir):
"""
Test writing to a YAML file, and reading back in
"""
fn = str(tmpdir.join("{}.yml".format(self.name)))
self.potential.save(fn)
p = load(fn)
p.energy(self.w0[:self.w0.size//2])
p.gradient(self.w0[:self.w0.size//2])
def test_numerical_gradient_vs_gradient(self):
"""
Check that the value of the implemented gradient function is close to a
numerically estimated value. This is to check the coded-up version.
"""
dx = 1E-3 * np.sqrt(np.sum(self.w0[:self.w0.size//2]**2))
max_x = np.sqrt(np.sum([x**2 for x in self.w0[:self.w0.size//2]]))
grid = np.linspace(-max_x,max_x,8)
grid = grid[grid != 0.]
grids = [grid for i in range(self.w0.size//2)]
xyz = np.ascontiguousarray(np.vstack(map(np.ravel, np.meshgrid(*grids))).T)
def energy_wrap(xyz):
xyz = np.ascontiguousarray(xyz[None])
return self.potential._energy(xyz, t=np.array([0.]))[0]
num_grad = np.zeros_like(xyz)
for i in range(xyz.shape[0]):
num_grad[i] = np.squeeze([partial_derivative(energy_wrap, xyz[i], dim_ix=dim_ix, n=1, dx=dx, order=5)
for dim_ix in range(self.w0.size//2)])
grad = self.potential._gradient(xyz, t=np.array([0.]))
assert np.allclose(num_grad, grad, rtol=self.tol)
def test_orbit_integration(self):
"""
Make we can integrate an orbit in this potential
"""
w0 = self.w0
w0 = np.vstack((w0,w0,w0)).T
t1 = time.time()
orbit = self.potential.integrate_orbit(w0, dt=1., n_steps=10000,
Integrator=LeapfrogIntegrator)
print("Integration time (10000 steps): {}".format(time.time() - t1))
if self.show_plots:
f = orbit.plot()
f.suptitle("Vector w0")
plt.show()
plt.close(f)
us = self.potential.units
w0 = PhaseSpacePosition(pos=w0[:self.ndim]*us['length'],
vel=w0[self.ndim:]*us['length']/us['time'])
orbit = self.potential.integrate_orbit(w0, dt=1., n_steps=10000,
Integrator=LeapfrogIntegrator)
if self.show_plots:
f = orbit.plot()
f.suptitle("Object w0")
plt.show()
plt.close(f)
def test_pickle(self, tmpdir):
fn = str(tmpdir.join("{}.pickle".format(self.name)))
with open(fn, "wb") as f:
pickle.dump(self.potential, f)
with open(fn, "rb") as f:
p = pickle.load(f)
p.energy(self.w0[:self.w0.size//2])
class CompositePotentialTestBase(PotentialTestBase):
@pytest.mark.skip(reason="Skip composite potential repr test")
def test_repr(self):
pass
@pytest.mark.skip(reason="Skip composite potential compare test")
def test_compare(self):
pass
| 2.125
| 2
|
main.py
|
ailzy/Reinforcement-learning-in-portfolio-management-
| 0
|
12054
|
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
import json
import time
import pandas as pd
import tensorflow as tf
import numpy as np
import math
from decimal import Decimal
import matplotlib.pyplot as plt
from agents.ornstein_uhlenbeck import OrnsteinUhlenbeckActionNoise
eps=10e-8
epochs=0
M=0
class StockTrader():
def __init__(self):
self.reset()
def reset(self):
self.wealth = 10e3
self.total_reward = 0
self.ep_ave_max_q = 0
self.loss = 0
self.actor_loss=0
self.wealth_history = []
self.r_history = []
self.w_history = []
self.p_history = []
self.noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(M))
def update_summary(self,loss,r,q_value,actor_loss,w,p):
self.loss += loss
self.actor_loss+=actor_loss
self.total_reward+=r
self.ep_ave_max_q += q_value
self.r_history.append(r)
self.wealth = self.wealth * math.exp(r)
self.wealth_history.append(self.wealth)
self.w_history.extend([','.join([str(Decimal(str(w0)).quantize(Decimal('0.00'))) for w0 in w.tolist()[0]])])
self.p_history.extend([','.join([str(Decimal(str(p0)).quantize(Decimal('0.000'))) for p0 in p.tolist()])])
def write(self,epoch):
wealth_history = pd.Series(self.wealth_history)
r_history = pd.Series(self.r_history)
w_history = pd.Series(self.w_history)
p_history = pd.Series(self.p_history)
history = pd.concat([wealth_history, r_history, w_history, p_history], axis=1)
history.to_csv('result' + str(epoch) + '-' + str(math.exp(np.sum(self.r_history)) * 100) + '.csv')
def print_result(self,epoch,agent):
self.total_reward=math.exp(self.total_reward) * 100
print('*-----Episode: {:d}, Reward:{:.6f}%, ep_ave_max_q:{:.2f}, actor_loss:{:2f}-----*'.format(epoch, self.total_reward,self.ep_ave_max_q,self.actor_loss))
agent.write_summary(self.loss, self.total_reward,self.ep_ave_max_q,self.actor_loss, epoch)
agent.save_model()
def plot_result(self):
pd.Series(self.wealth_history).plot()
plt.show()
def action_processor(self,a,ratio):
a = np.clip(a + self.noise() * ratio, 0, 1)
a = a / (a.sum() + eps)
return a
def parse_info(info):
return info['reward'],info['continue'],info[ 'next state'],info['weight vector'],info ['price'],info['risk']
def traversal(stocktrader,agent,env,epoch,noise_flag,framework,method,trainable):
info = env.step(None,None)
r,contin,s,w1,p,risk=parse_info(info)
contin=1
t=0
while contin:
w2 = agent.predict(s,w1)
if noise_flag=='True':
w2=stocktrader.action_processor(w2,(epochs-epoch)/epochs)
env_info = env.step(w1, w2)
r, contin, s_next, w1, p,risk = parse_info(env_info)
if framework=='PG':
agent.save_transition(s,p,w2,w1)
else:
agent.save_transition(s, w2, r-risk, contin, s_next, w1)
loss, q_value,actor_loss=0,0,0
if framework=='DDPG':
if not contin and trainable=="True":
agent_info= agent.train(method,epoch)
loss, q_value=agent_info["critic_loss"],agent_info["q_value"]
if method=='model_based':
actor_loss=agent_info["actor_loss"]
elif framework=='PPO':
if not contin and trainable=="True":
agent_info = agent.train(method, epoch)
loss, q_value = agent_info["critic_loss"], agent_info["q_value"]
if method=='model_based':
actor_loss=agent_info["actor_loss"]
elif framework=='PG':
if not contin and trainable=="True":
agent.train()
stocktrader.update_summary(loss,r,q_value,actor_loss,w2,p)
s = s_next
t=t+1
def backtest(agent,env):
print("starting to backtest......")
from agents.UCRP import UCRP
from agents.Winner import WINNER
from agents.Losser import LOSSER
agents=[]
agents.append(agent)
agents.append(WINNER())
agents.append(UCRP())
agents.append(LOSSER())
labels=['PG','Winner','UCRP','Losser']
wealths_result=[]
rs_result=[]
for i,agent in enumerate(agents):
info = env.step(None, None)
r, contin, s, w1, p, risk = parse_info(info)
contin = 1
wealth=10000
wealths = [wealth]
rs=[1]
while contin:
w2 = agent.predict(s, w1)
if i==0:
print(w2)
env_info = env.step(w1, w2)
r, contin, s_next, w1, p, risk = parse_info(env_info)
wealth=wealth*math.exp(r)
rs.append(math.exp(r)-1)
wealths.append(wealth)
s=s_next
print('finish one agent')
wealths_result.append(wealths)
rs_result.append(rs)
for i in range(len(agents)):
plt.plot(wealths_result[i],label=labels[i])
print(labels[i],' ',np.mean(rs_result[i]),' ',np.std(rs_result[i]))
plt.legend()
plt.show()
def parse_config(config,mode):
codes = config["session"]["codes"]
start_date = config["session"]["start_date"]
end_date = config["session"]["end_date"]
features = config["session"]["features"]
agent_config = config["session"]["agents"]
market = config["session"]["market_types"]
noise_flag, record_flag, plot_flag=config["session"]["noise_flag"],config["session"]["record_flag"],config["session"]["plot_flag"]
predictor, framework, window_length = agent_config
reload_flag, trainable=config["session"]['reload_flag'],config["session"]['trainable']
method=config["session"]['method']
global epochs
epochs = int(config["session"]["epochs"])
if mode=='test':
record_flag='True'
noise_flag='False'
plot_flag='True'
reload_flag='True'
trainable='False'
method='model_free'
print("*--------------------Training Status-------------------*")
print('Codes:',codes)
print("Date from",start_date,' to ',end_date)
print('Features:',features)
print("Agent:Noise(",noise_flag,')---Recoed(',noise_flag,')---Plot(',plot_flag,')')
print("Market Type:",market)
print("Predictor:",predictor," Framework:", framework," Window_length:",window_length)
print("Epochs:",epochs)
print("Trainable:",trainable)
print("Reloaded Model:",reload_flag)
print("Method",method)
print("Noise_flag",noise_flag)
print("Record_flag",record_flag)
print("Plot_flag",plot_flag)
return codes,start_date,end_date,features,agent_config,market,predictor, framework, window_length,noise_flag, record_flag, plot_flag,reload_flag,trainable,method
def session(config,mode):
from data.environment import Environment
codes, start_date, end_date, features, agent_config, market,predictor, framework, window_length,noise_flag, record_flag, plot_flag,reload_flag,trainable,method=parse_config(config,mode)
env = Environment(start_date, end_date, codes, features, int(window_length),market)
global M
M=len(codes)+1
if framework == 'DDPG':
print("*-----------------Loading DDPG Agent---------------------*")
from agents.ddpg import DDPG
agent = DDPG(predictor, len(codes) + 1, int(window_length), len(features), '-'.join(agent_config), reload_flag,trainable)
elif framework == 'PPO':
print("*-----------------Loading PPO Agent---------------------*")
from agents.ppo import PPO
agent = PPO(predictor, len(codes) + 1, int(window_length), len(features), '-'.join(agent_config), reload_flag,trainable)
elif framework == 'PG':
print("*-----------------Loading PG Agent---------------------*")
from agents.pg import PG
agent = PG(len(codes) + 1, int(window_length), len(features), '-'.join(agent_config), reload_flag,trainable)
stocktrader=StockTrader()
if mode=='train':
print("Training with {:d}".format(epochs))
for epoch in range(epochs):
print("Now we are at epoch", epoch)
traversal(stocktrader,agent,env,epoch,noise_flag,framework,method,trainable)
if record_flag=='True':
stocktrader.write(epoch)
if plot_flag=='True':
stocktrader.plot_result()
agent.reset_buffer()
stocktrader.print_result(epoch,agent)
stocktrader.reset()
elif mode=='test':
backtest(agent, env)
def build_parser():
parser = ArgumentParser(description='Provide arguments for training different DDPG or PPO models in Portfolio Management')
parser.add_argument("--mode",dest="mode",help="download(China), train, test",metavar="MODE", default="train",required=True)
parser.add_argument("--model",dest="model",help="DDPG,PPO",metavar="MODEL", default="DDPG",required=False)
return parser
def main():
parser = build_parser()
args=vars(parser.parse_args())
with open('config.json') as f:
config=json.load(f)
if args['mode']=='download':
from data.download_data import DataDownloader
data_downloader=DataDownloader(config)
data_downloader.save_data()
else:
session(config,args['mode'])
if __name__=="__main__":
main()
| 2.421875
| 2
|
packages/structural_dhcp_mriqc/structural_dhcp_mriqc/utils/fs2gif.py
|
amakropoulos/structural-pipeline-measures
| 2
|
12055
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: oesteban
# @Date: 2016-03-16 11:28:27
# @Last Modified by: oesteban
# @Last Modified time: 2016-04-04 13:50:50
"""
Batch export freesurfer results to animated gifs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import os.path as op
import subprocess as sp
from shutil import rmtree
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from tempfile import mkdtemp
from errno import EEXIST
import glob
from six import string_types
import numpy as np
import nibabel as nb
from skimage import exposure
def main():
"""Entry point"""
parser = ArgumentParser(description='Batch export freesurfer results to animated gifs',
formatter_class=RawTextHelpFormatter)
g_input = parser.add_argument_group('Inputs')
g_input.add_argument('-S', '--subjects-dir', action='store', default=os.getcwd())
g_input.add_argument('-s', '--subject-id', action='store')
g_input.add_argument('-t', '--temp-dir', action='store')
g_input.add_argument('--keep-temp', action='store_true', default=False)
g_input.add_argument('--zoom', action='store_true', default=False)
g_input.add_argument('--hist-eq', action='store_true', default=False)
g_outputs = parser.add_argument_group('Outputs')
g_outputs.add_argument('-o', '--output-dir', action='store', default='fs2gif')
opts = parser.parse_args()
if opts.temp_dir is None:
tmpdir = mkdtemp()
else:
tmpdir = op.abspath(opts.temp_dir)
try:
os.makedirs(tmpdir)
except OSError as exc:
if exc.errno != EEXIST:
raise exc
out_dir = op.abspath(opts.output_dir)
try:
os.makedirs(out_dir)
except OSError as exc:
if exc.errno != EEXIST:
raise exc
subjects_dir = op.abspath(opts.subjects_dir)
subject_list = opts.subject_id
if subject_list is None:
subject_list = [name for name in os.listdir(subjects_dir)
if op.isdir(os.path.join(subjects_dir, name))]
elif isinstance(subject_list, string_types):
if '*' not in subject_list:
subject_list = [subject_list]
else:
all_dirs = [op.join(subjects_dir, name) for name in os.listdir(subjects_dir)
if op.isdir(os.path.join(subjects_dir, name))]
pattern = glob.glob(op.abspath(op.join(subjects_dir, opts.subject_id)))
subject_list = list(set(pattern).intersection(set(all_dirs)))
environ = os.environ.copy()
environ['SUBJECTS_DIR'] = subjects_dir
# tcl_file = pkgr.resource_filename('structural_dhcp_mriqc', 'data/fsexport.tcl')
tcl_contents = """
SetOrientation 0
SetCursor 0 128 128 128
SetDisplayFlag 3 0
SetDisplayFlag 22 1
set i 0
"""
for sub_path in subject_list:
subid = op.basename(sub_path)
tmp_sub = op.join(tmpdir, subid)
try:
os.makedirs(tmp_sub)
except OSError as exc:
if exc.errno != EEXIST:
raise exc
niifile = op.join(tmp_sub, '%s.nii.gz') % subid
ref_file = op.join(sub_path, 'mri', 'T1.mgz')
sp.call(['mri_convert', op.join(sub_path, 'mri', 'norm.mgz'), niifile],
cwd=tmp_sub)
data = nb.load(niifile).get_data()
data[data > 0] = 1
# Compute brain bounding box
indexes = np.argwhere(data)
bbox_min = indexes.min(0)
bbox_max = indexes.max(0) + 1
center = np.average([bbox_min, bbox_max], axis=0)
if opts.hist_eq:
modnii = op.join(tmp_sub, '%s.nii.gz' % subid)
ref_file = op.join(tmp_sub, '%s.mgz' % subid)
img = nb.load(niifile)
data = exposure.equalize_adapthist(img.get_data(), clip_limit=0.03)
nb.Nifti1Image(data, img.get_affine(), img.get_header()).to_filename(modnii)
sp.call(['mri_convert', modnii, ref_file], cwd=tmp_sub)
if not opts.zoom:
# Export tiffs for left hemisphere
tcl_file = op.join(tmp_sub, '%s.tcl' % subid)
with open(tcl_file, 'w') as tclfp:
tclfp.write(tcl_contents)
tclfp.write('for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2]))
tclfp.write(' SetSlice $slice\n')
tclfp.write(' RedrawScreen\n')
tclfp.write(' SaveTIFF [format "%s/%s-' % (tmp_sub, subid) + '%03d.tif" $i]\n')
tclfp.write(' incr i\n')
tclfp.write('}\n')
tclfp.write('QuitMedit\n')
sp.call(['tkmedit', subid, 'T1.mgz', 'lh.pial', '-aux-surface', 'rh.pial', '-tcl', tcl_file], env=environ)
# Convert to animated gif
sp.call(['convert', '-delay', '10', '-loop', '0', '%s/%s-*.tif' % (tmp_sub, subid),
'%s/%s.gif' % (out_dir, subid)])
else:
# Export tiffs for left hemisphere
tcl_file = op.join(tmp_sub, 'lh-%s.tcl' % subid)
with open(tcl_file, 'w') as tclfp:
tclfp.write(tcl_contents)
tclfp.write('SetZoomLevel 2')
tclfp.write('for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2]))
tclfp.write(' SetZoomCenter %d %d $slice\n' % (center[0] + 30, center[1] - 10))
tclfp.write(' SetSlice $slice\n')
tclfp.write(' RedrawScreen\n')
tclfp.write(' SaveTIFF [format "%s/%s-lh-' % (tmp_sub, subid) + '%03d.tif" $i]\n')
tclfp.write(' incr i\n')
tclfp.write('}\n')
tclfp.write('QuitMedit\n')
sp.call(['tkmedit', subid, 'norm.mgz', 'lh.white', '-tcl', tcl_file], env=environ)
# Export tiffs for right hemisphere
tcl_file = op.join(tmp_sub, 'rh-%s.tcl' % subid)
with open(tcl_file, 'w') as tclfp:
tclfp.write(tcl_contents)
tclfp.write('SetZoomLevel 2')
tclfp.write('for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2]))
tclfp.write(' SetZoomCenter %d %d $slice\n' % (center[0] - 30, center[1] - 10))
tclfp.write(' SetSlice $slice\n')
tclfp.write(' RedrawScreen\n')
tclfp.write(' SaveTIFF [format "%s/%s-rh-' % (tmp_sub, subid) + '%03d.tif" $slice]\n')
tclfp.write(' incr i\n')
tclfp.write('}\n')
tclfp.write('QuitMedit\n')
sp.call(['tkmedit', subid, 'norm.mgz', 'rh.white', '-tcl', tcl_file], env=environ)
# Convert to animated gif
sp.call(['convert', '-delay', '10', '-loop', '0', '%s/%s-lh-*.tif' % (tmp_sub, subid),
'%s/%s-lh.gif' % (out_dir, subid)])
sp.call(['convert', '-delay', '10', '-loop', '0', '%s/%s-rh-*.tif' % (tmp_sub, subid),
'%s/%s-rh.gif' % (out_dir, subid)])
if not opts.keep_temp:
try:
rmtree(tmp_sub)
except:
pass
if __name__ == '__main__':
main()
| 2.078125
| 2
|
DE_DataBase.py
|
almirjgomes/DE_DataBaseConnect
| 0
|
12056
|
<reponame>almirjgomes/DE_DataBaseConnect
import os
import sqlite3 as sq3
import cx_Oracle as ora
import pandas as pd
import psycopg2 as ps2
import mysql.connector as mysql
import sqlalchemy
# Reponsabilidades desta classe:
# Apenas se conectar a uma das bases de dados abaixo especificadas
# Bases conhecidas: SQLITE, ORACLE, MYSQL, POSTGRES
class DATABASE:
def __init__(self):
pass
def ORACLE(selfself, string_conect: dict):
instancia = None
try:
# Definindo a Library ORACLE
if string_conect["path_library"] is None:
pathlib = os.getenv("ORACLE_LIB")
else:
pathlib = string_conect["path_library"]
# Consistindo se a biblioteca do oracle ja esta iniciada
try:
ora.init_oracle_client(lib_dir=pathlib)
except:
pass
# não faz nada (e para deixar assim se nao da erro)
database = string_conect["database"]
driver = "cx_oracle"
user = string_conect["username"]
pwd = string_conect["password"]
host = string_conect["host"]
port = string_conect["port"]
# Definindo o tipo de instancia SID/SERVICE_NAME
if string_conect["sid"] is not None:
#dnsName = ora.makedsn(host=string_conect["host"], port=string_conect["port"], sid=string_conect["sid"])
dnsName = f"""{string_conect["host"]}:{string_conect["port"]}/{string_conect["sid"]}"""
else:
#dnsName = ora.makedsn(host=string_conect["host"], port=string_conect["port"], service_name=string_conect["service_name"])
dnsName = f"""{string_conect["host"]}:{string_conect["port"]}/{string_conect["service_name"]}"""
str_cnn = f"""{database.lower()}+{driver}://{user}:{pwd}@{dnsName}"""
engine = sqlalchemy.create_engine(str_cnn, arraysize=1000)
except Exception as error:
engine = error
finally:
return engine
def ORACLE_NAT(self, string_connect: dict):
pathlib, cnn = None, None
try:
# Definindo a Library ORACLE
if "library" in string_connect.keys():
if string_connect["library"] is None:
pathlib = os.getenv("ORACLE_LIB")
else:
pathlib = string_connect["library"]
else:
pathlib = os.getenv("ORACLE_LIB")
# Consistindo se a biblioteca do oracle ja esta iniciada
try:
ora.init_oracle_client(lib_dir=pathlib)
except:
pass
# não faz nada (e para deixar assim se nao da erro)
# Definindo o tipo de instancia SID/SERVICE_NAME
if string_connect["sid"] is not None:
dnsName = ora.makedsn(host=string_connect["host"], port=string_connect["port"], sid=string_connect["sid"])
else:
dnsName = ora.makedsn(host=string_connect["host"], port=string_connect["port"], service_name=string_connect["service_name"])
# Efetuando a conexao com a instancia do BANCO
cnn = ora.connect(string_connect["username"], string_connect["password"], dnsName, threaded=True)
except Exception as error:
msg = f"""Falha ao tentar se conectar com o banco de dados ORACLE [{string_connect["name_conection"]}].\nErro: {error} """
cnn = msg
finally:
return cnn
def SQLITE(self, database):
DATABASE_NAME, result, msg, conn = None, False, None, None
try:
if os.path.isfile(database):
conn = sq3.connect(database)
msg = f"""SQLITE [{database}]- Conexao efetuada com sucesso!"""
else:
msg = f"""SQLITE [{database}]- Não existe no local informado!"""
raise Exception
except Exception as error:
msg = f"""Falha ao tentar conectar com o banco de dados SQLITE "{DATABASE_NAME}". Erro: {error} """
cnn = False
finally:
return conn
def POSTGRES(selfself, string_connect: dict):
msg, cnn = None, None
try:
# Efetuando a conexao com a instancia do BANCO
cnn = ps2.connect(user=string_connect["username"], password=string_connect["password"], database=string_connect["instance"], host=string_connect["host"])
except Exception as error:
msg = f"""Falha ao tentar se conectar com o banco de dados POSTGRES.\n """
cnn = msg
finally:
return cnn
def MYSQL(selfself, string_connect: dict):
msg, cnn = None, None
try:
# Efetuando a conexao com a instancia do BANCO
cnn = mysql.connect(user=string_connect["username"], password=string_connect["password"], database=string_connect["instance"], host=string_connect["host"])
except Exception as error:
msg = f"""Falha ao tentar se conectar com o banco de dados MYSQL.\n """
cnn = msg
finally:
return cnn
def METADATA(self,
conexao: object,
database: str,
nome_tabela: str,
alias: str = 'x',
quoted: bool = False,
rowid: bool = False,
join: str = None,
where: str = None,
orderby: str = None,
limit: int = 0
) -> str:
try:
querys = {"ORACLE": f"""Select * from all_tab_columns where table_name = '{nome_tabela}' order by column_id""""",
"POSTGRES": f"""Select * from information_schema.columns where table_name = '{nome_tabela}' order by ordinal_position""",
"SQLITE": f"""Select * from pragma_table_info('{nome_tabela}') order by cid""",
"MYSQL": f"""Select * from information_schema.columns where table_name = '{nome_tabela}' order by ordinal_position"""}
qry = querys[database]
df = pd.read_sql(con=conexao, sql=qry)
nom_owner, column_list = None, []
# OBTEM AS COLUNAS
for index, row in df.iterrows():
# -----------------------------------------
# Banco SQLITE
if database == "SQLITE":
column = df.loc[index, "name"]
# OWNER
nom_owner = ""
# QUOTED
if quoted:
column_list.append(f"""{alias}.\"{column}\"""")
else:
column_list.append(f"""{alias}.{column}""")
# -----------------------------------------
# Banco ORACLE
elif database == 'ORACLE':
column = df.loc[index, "column_name"]
# QUOTED
if quoted:
column_list.append(f"""{alias}.\"{column}\"""")
# OWNER
nom_owner = f"""\"{row.owner}"."""
else:
column_list.append(f"""{alias}.{column}""")
# OWNER
nom_owner = f"""{row.owner}."""
# Banco MYSQL
elif database == "MYSQL":
column = df.loc[index, "column_name"]
# QUOTED
if quoted:
column_list.append(f"""{alias}.\"{column}\"""")
else:
column_list.append(f"""{alias}.{column}""")
# OWNER
nom_owner = ""
# -----------------------------------------
# Banco POSTGRES
elif database == "POSTGRES":
column = df.loc[index, "column_name".lower()]
# QUOTED
if quoted:
column_list.append(f"""{alias}.\"{column}\"""")
else:
column_list.append(f"""{alias}.{column}""")
# OWNER
nom_owner = ""
# ROWID
if rowid:
# -----------------------------------------
# Banco SQLITE
if database == "SQLITE":
column_list.append(f"""{alias}.ROWID ROWID_TABELA""")
# -----------------------------------------
# Banco ORACLE
elif database == "ORACLE":
column_list.append(f"""rowidtochar({alias}.Rowid) "ROWID_TABELA" """)
# -----------------------------------------
# Banco MYSQL
elif database == "MYSQL":
# não implementado
# tem que identificar qual a coluna do MYSQL que representa esta informação
pass
# -----------------------------------------
# Banco POSTGRES
elif database == "POSTGRES":
column_list.append(f"""{alias}.row_number() OVER () ROWID_TABELA""")
# Estruturando as colunas
colunas = "\n ,".join(column_list)
select = f"""select {colunas}"""
# NOME TABELA
tabela = f"""\n from {nome_tabela.strip()} {alias.strip()}"""
# JOIN
if join is None:
join = ""
else:
join = f"""\n {join}"""
#WHERE
if where is None:
if database == "ORACLE" and limit > 0:
where = f"""\n where rownum <= {limit}"""
else:
where = ""
else:
if database == "ORACLE" and limit > 0:
where = f"""\n {where.strip()}\n and rownum <= {limit}"""
else:
where = f"""\n {where.strip()}"""
#ORDERBY
if orderby is None:
orderby = ""
else:
orderby = f"""\n {orderby.strip()}"""
# LIMIT
if database in ["MYSQL", "SQLITE", "POSTGRES"]:
if limit > 0:
limit = f"""\nlimit {limit}"""
else:
limit = ""
else:
limit = ""
qry = f"""{select}{tabela}{join}{where}{orderby}{limit}""".lstrip()
msg = qry
except Exception as error:
msg = error + qry
finally:
return msg
if __name__ == "__main__":
pass
| 2.6875
| 3
|
peter_lists/blog/views.py
|
pvize1/peter_lists
| 0
|
12057
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView,
)
from django.shortcuts import render
from django.db.models import Count
from django.db.models.functions import Trim, Lower
from django.urls import reverse_lazy
from .models import Blog
from .forms import EditBlogForm
def tag_count(blog_user, topn=0):
# TODO Move to model manager
raw_tags = (
Blog.blog.filter(user=blog_user)
.order_by("tag")
.values("tag")
.annotate(count=Count("tag"), tag_new=Trim(Lower("tag")))
)
count_tags = dict()
# TODO Split by tags with "," and those without
for record in raw_tags:
for tag in record["tag_new"].split(","):
k = tag.strip()
if len(k) > 0:
count_tags[k] = count_tags.get(k, 0) + record["count"]
# TODO Sort by value (desc) and then key (ascend) for common values
if topn == 0:
return {
k: count_tags[k]
for k in sorted(count_tags, key=count_tags.get, reverse=True)
}
else:
return {
k: count_tags[k]
for k in sorted(count_tags, key=count_tags.get, reverse=True)[:topn]
}
# Create your views here.
def BlogHome(request):
blog_all = Blog.blog.filter(user=request.user)
blogs = blog_all.order_by("-modified")[:3]
blog_count = blog_all.count()
tag_sorted = tag_count(request.user, topn=5)
return render(
request,
"blog/blog_home.html",
{"blogs": blogs, "tags": tag_sorted, "blog_count": blog_count},
)
class BlogListView(PermissionRequiredMixin, ListView):
model = Blog
paginate_by = 3
template_name = "blog/blog_list.html"
permission_required = "blog.view_blog"
def get_queryset(self):
return Blog.blog.filter(user=self.request.user)
def BlogAllTagsView(request):
# TODO turn into ListView with paginate
tag_sorted = tag_count(request.user)
return render(request, "blog/blog_tags.html", {"tags": tag_sorted})
class BlogTagListView(PermissionRequiredMixin, ListView):
model = Blog
paginate_by = 3
template_name = "blog/blog_list.html"
permission_required = "blog.view_blog"
def get_queryset(self):
return Blog.blog.filter(tag__contains=self.kwargs["tag_name"], user=self.request.user)
class BlogDetailView(PermissionRequiredMixin, DetailView):
model = Blog
template_name = "blog/blog_detail.html"
permission_required = "blog.view_blog"
class BlogCreateView(PermissionRequiredMixin, LoginRequiredMixin, CreateView):
form_class = EditBlogForm
model = Blog
action = "Add"
template_name = "blog/blog_form.html"
permission_required = "blog.add_blog"
class BlogUpdateView(PermissionRequiredMixin, LoginRequiredMixin, UpdateView):
form_class = EditBlogForm
model = Blog
action = "Edit"
template_name = "blog/blog_form.html"
permission_required = "blog.change_blog"
class BlogDeleteView(PermissionRequiredMixin, LoginRequiredMixin, DeleteView):
model = Blog
success_url = reverse_lazy("blog:list")
permission_required = "blog.delete_blog"
| 2.015625
| 2
|
tests/test_pythonpath.py
|
browniebroke/pytest-srcpaths
| 26
|
12058
|
import sys
from typing import Generator
from typing import List
from typing import Optional
import pytest
from _pytest.pytester import Pytester
def test_one_dir_pythonpath(pytester: Pytester, file_structure) -> None:
pytester.makefile(".ini", pytest="[pytest]\npythonpath=sub\n")
result = pytester.runpytest("test_foo.py")
assert result.ret == 0
result.assert_outcomes(passed=1)
def test_two_dirs_pythonpath(pytester: Pytester, file_structure) -> None:
pytester.makefile(".ini", pytest="[pytest]\npythonpath=sub sub2\n")
result = pytester.runpytest("test_foo.py", "test_bar.py")
assert result.ret == 0
result.assert_outcomes(passed=2)
def test_unconfigure_unadded_dir_pythonpath(pytester: Pytester) -> None:
pytester.makeconftest(
"""
def pytest_configure(config):
config.addinivalue_line("pythonpath", "sub")
"""
)
pytester.makepyfile(
"""
import sys
def test_something():
pass
"""
)
result = pytester.runpytest()
result.assert_outcomes(passed=1)
def test_clean_up_pythonpath(pytester: Pytester) -> None:
"""Test that the srcpaths plugin cleans up after itself."""
pytester.makefile(".ini", pytest="[pytest]\npythonpath=I_SHALL_BE_REMOVED\n")
pytester.makepyfile(test_foo="""def test_foo(): pass""")
before: Optional[List[str]] = None
after: Optional[List[str]] = None
class Plugin:
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_unconfigure(self) -> Generator[None, None, None]:
nonlocal before, after
before = sys.path.copy()
yield
after = sys.path.copy()
result = pytester.runpytest_inprocess(plugins=[Plugin()])
assert result.ret == 0
assert before is not None
assert after is not None
assert any("I_SHALL_BE_REMOVED" in entry for entry in before)
assert not any("I_SHALL_BE_REMOVED" in entry for entry in after)
| 2.375
| 2
|
coretemp.py
|
InScene/dht22-mqtt-daemon
| 0
|
12059
|
#!/usr/bin/env python2
import paho.mqtt.client as mqtt
import time
import Adafruit_DHT
from configparser import ConfigParser
import json
config = ConfigParser(delimiters=('=', ))
config.read('config.ini')
sensor_type = config['sensor'].get('type', 'dht22').lower()
if sensor_type == 'dht22':
sensor = Adafruit_DHT.DHT22
elif sensor_type == 'dht11':
sensor = Adafruit_DHT.dht11
elif sensor_type == 'am2302':
sensor = Adafruit_DHT.AM2302
else:
raise Exception('Supported sensor types: DHT22, DHT11, AM2302')
pin = config['sensor'].get('pin', 10)
topic = config['mqtt'].get('topic', 'temperature/dht22')
decim_digits = config['sensor'].getint('decimal_digits', 2)
sleep_time = config['sensor'].getint('interval', 60)
user = config['mqtt'].get('user', 'guest')
password = config['mqtt'].get('password', '<PASSWORD>')
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code {}".format(rc))
client = mqtt.Client()
client.on_connect = on_connect
client.username_pw_set(user, password)
client.connect(config['mqtt'].get('hostname', 'homeassistant'),
config['mqtt'].getint('port', 1883),
config['mqtt'].getint('timeout', 60))
client.loop_start()
while True:
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None and temperature is not None:
data = {'temperature': round(temperature, decim_digits),
'humidity': round(humidity, decim_digits)}
client.publish(topic, json.dumps(data))
print('Published. Sleeping ...')
else:
print('Failed to get reading. Skipping ...')
time.sleep(sleep_time)
| 2.8125
| 3
|
mythril/support/support_utils.py
|
step21/mythril
| 0
|
12060
|
<gh_stars>0
"""This module contains utility functions for the Mythril support package."""
from typing import Dict
class Singleton(type):
"""A metaclass type implementing the singleton pattern."""
_instances = {} # type: Dict
def __call__(cls, *args, **kwargs):
"""Delegate the call to an existing resource or a a new one.
This is not thread- or process-safe by default. It must be protected with
a lock.
:param args:
:param kwargs:
:return:
"""
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
| 2.265625
| 2
|
tests/functions/list/test_lists_map.py
|
sukovanej/mplisp
| 0
|
12061
|
import unittest
import mplisp.evaluator as evaluator
class TestListMap(unittest.TestCase):
def map_test(self):
input1 = """
(map (lambda (x) (* 2 x)) (list 1 2 3))
"""
output1 = list(evaluator.evaluate(input1))
self.assertEqual(output1[0], [2, 4, 6])
def map_test_2(self):
input1 = """
(import "sys")
(def a (list 1 2 3 4))
(map (lambda (x) (* 2 x)) a)
"""
output1 = list(evaluator.evaluate(input1))
self.assertEqual(output1[2], [2, 4, 6, 8])
| 3.3125
| 3
|
malpickle/main.py
|
erose1337/malpickle
| 0
|
12062
|
<gh_stars>0
import argparse
from __init__ import insert_code
def main():
parser = argparse.ArgumentParser(description="Inject code into pickle files")
parser.add_argument("pickle_file", help="The pickle file to inject code into")
parser.add_argument("code_file", help="The shell script to inject")
#parser.add_argument("-u", "--unittest", help="Only run the unit test; Ignores pickle_file and code_file", type=bool)
args = parser.parse_args()
# if args.unittest:
# return test_insert_code()
filename = args.pickle_file
code_file = args.code_file
with open(filename, "rb+") as pickle_file, open(code_file, 'r') as code_file:
saved_data = pickle_file.read()
_malpickle = insert_code(code_file.read(), saved_data)
pickle_file.truncate(0)
pickle_file.seek(0)
pickle_file.write(_malpickle)
def test_insert_code():
import pickle
shell_code = "echo RCE"
data = ({1 : ['a', None, (.1, 0xff)]}, object, tuple)
saved = pickle.dumps(data)
malpickle = insert_code(shell_code, saved)
output = pickle.loads(malpickle)
assert output == data, (output, data)
if __name__ == "__main__":
#test_insert_code()
main()
| 3
| 3
|
src/redis_lock/django_cache.py
|
suligap/python-redis-lock
| 0
|
12063
|
from django.core.cache.backends.base import DEFAULT_TIMEOUT
from django_redis.cache import RedisCache as PlainRedisCache
from redis_lock import Lock
from redis_lock import reset_all
class RedisCache(PlainRedisCache):
@property
def __client(self):
try:
return self.client.get_client()
except Exception as exc:
raise NotImplementedError(
"RedisCache doesn't have a raw client: %r. "
"Use 'redis_cache.client.DefaultClient' as the CLIENT_CLASS !" % exc
)
def lock(self, key, expire=None, id=None):
return Lock(self.__client, key, expire=expire, id=id)
def locked_get_or_set(self, key, value_creator, version=None,
expire=None, id=None, lock_key=None,
timeout=DEFAULT_TIMEOUT):
"""
Fetch a given key from the cache. If the key does not exist, the key is added and
set to the value returned when calling `value_creator`. The creator function
is invoked inside of a lock.
"""
if lock_key is None:
lock_key = 'get_or_set:' + key
val = self.get(key, version=version)
if val is not None:
return val
with self.lock(lock_key, expire=expire, id=id):
# Was the value set while we were trying to acquire the lock?
val = self.get(key, version=version)
if val is not None:
return val
# Nope, create value now.
val = value_creator()
if val is None:
raise ValueError('`value_creator` must return a value')
self.set(key, val, timeout=timeout, version=version)
return val
def reset_all(self):
"""
Forcibly deletes all locks if its remains (like a crash reason). Use this with care.
"""
reset_all(self.__client)
| 2.59375
| 3
|
nets/facenet.py
|
QiongWang-l/llfr
| 0
|
12064
|
<filename>nets/facenet.py
import torch
import torch.nn as nn
from torch.nn import functional as F
from torchvision.models.utils import load_state_dict_from_url
from nets.inception_resnetv1 import InceptionResnetV1
from nets.mobilenet import MobileNetV1
class mobilenet(nn.Module):
def __init__(self, pretrained):
super(mobilenet, self).__init__()
self.model = MobileNetV1()
if pretrained:
state_dict = load_state_dict_from_url("https://github.com/bubbliiiing/facenet-pytorch/releases/download/v1.0/backbone_weights_of_mobilenetv1.pth", model_dir="model_data",
progress=True)
self.model.load_state_dict(state_dict)
del self.model.fc
del self.model.avg
def forward(self, x):
x = self.model.stage1(x)
x = self.model.stage2(x)
x = self.model.stage3(x)
return x
class inception_resnet(nn.Module):
def __init__(self, pretrained):
super(inception_resnet, self).__init__()
self.model = InceptionResnetV1()
if pretrained:
state_dict = load_state_dict_from_url("https://github.com/bubbliiiing/facenet-pytorch/releases/download/v1.0/backbone_weights_of_inception_resnetv1.pth", model_dir="model_data",
progress=True)
self.model.load_state_dict(state_dict)
def forward(self, x):
x = self.model.conv2d_1a(x)
x = self.model.conv2d_2a(x)
x = self.model.conv2d_2b(x)
x = self.model.maxpool_3a(x)
x = self.model.conv2d_3b(x)
x = self.model.conv2d_4a(x)
x = self.model.conv2d_4b(x)
x = self.model.repeat_1(x)
x = self.model.mixed_6a(x)
x = self.model.repeat_2(x)
x = self.model.mixed_7a(x)
x = self.model.repeat_3(x)
x = self.model.block8(x)
return x
class Facenet(nn.Module):
def __init__(self, backbone="mobilenet", dropout_keep_prob=0.5, embedding_size=128, num_classes=None, mode="train", pretrained=False):
super(Facenet, self).__init__()
if backbone == "mobilenet":
self.backbone = mobilenet(pretrained)
flat_shape = 1024
elif backbone == "inception_resnetv1":
self.backbone = inception_resnet(pretrained)
flat_shape = 1792
else:
raise ValueError('Unsupported backbone - `{}`, Use mobilenet, inception_resnetv1.'.format(backbone))
self.avg = nn.AdaptiveAvgPool2d((1,1))
self.Dropout = nn.Dropout(1 - dropout_keep_prob)
self.Bottleneck = nn.Linear(flat_shape, embedding_size,bias=False)
self.last_bn = nn.BatchNorm1d(embedding_size, eps=0.001, momentum=0.1, affine=True)
if mode == "train":
self.classifier = nn.Linear(embedding_size, num_classes)
def forward(self, x):
x = self.backbone(x)
x = self.avg(x)
x = x.view(x.size(0), -1)
x = self.Dropout(x)
x = self.Bottleneck(x)
x = self.last_bn(x)
x = F.normalize(x, p=2, dim=1)
return x
def forward_feature(self, x):
x = self.backbone(x)
x = self.avg(x)
x = x.view(x.size(0), -1)
x = self.Dropout(x)
x = self.Bottleneck(x)
before_normalize = self.last_bn(x)
x = F.normalize(before_normalize, p=2, dim=1)
return before_normalize, x
def forward_classifier(self, x):
x = self.classifier(x)
return x
| 2.375
| 2
|
distributed_social_network/posts/migrations/0003_auto_20190308_2055.py
|
leevtori/CMPUT404-project
| 0
|
12065
|
<filename>distributed_social_network/posts/migrations/0003_auto_20190308_2055.py
# Generated by Django 2.1.7 on 2019-03-08 20:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20190221_0234'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='visiblilty',
new_name='visibility',
),
]
| 1.546875
| 2
|
minify/migrations/0004_auto__del_unique_urlminify_short_url__add_unique_urlminify_short_url_s.py
|
djsan15/url-minifier
| 0
|
12066
|
<reponame>djsan15/url-minifier
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'URLMinify', fields ['short_url']
db.delete_unique(u'minify_urlminify', ['short_url'])
# Adding unique constraint on 'URLMinify', fields ['short_url', 'short_url_domain']
db.create_unique(u'minify_urlminify', ['short_url', 'short_url_domain_id'])
def backwards(self, orm):
# Removing unique constraint on 'URLMinify', fields ['short_url', 'short_url_domain']
db.delete_unique(u'minify_urlminify', ['short_url', 'short_url_domain_id'])
# Adding unique constraint on 'URLMinify', fields ['short_url']
db.create_unique(u'minify_urlminify', ['short_url'])
models = {
u'minify.domain': {
'Meta': {'object_name': 'Domain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'minify.urlminify': {
'Meta': {'unique_together': "(('short_url', 'short_url_domain'),)", 'object_name': 'URLMinify'},
'date_added': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'long_url': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'long_url_domain': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'long_url_domain'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['minify.Domain']"}),
'short_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'short_url_domain': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'short_url_domain'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['minify.Domain']"})
}
}
complete_apps = ['minify']
| 2.140625
| 2
|
Validation/EcalRecHits/test/EcalTBValidationData_cfg.py
|
pasmuss/cmssw
| 0
|
12067
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("h4ValidData")
# initialize MessageLogger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:./ECALH4TB_data_hits.root')
)
process.tbValidData = cms.EDAnalyzer("EcalTBValidation",
rootfile = cms.untracked.string('EcalTBValidationData.root'),
eventHeaderProducer = cms.string('ecalTBunpack'),
hitProducer = cms.string('ecal2006TBWeightUncalibRecHit'),
digiCollection = cms.string(''),
tdcRecInfoCollection = cms.string('EcalTBTDCRecInfo'),
data_ = cms.untracked.int32(0),
digiProducer = cms.string('ecalUnsuppressedDigis'),
xtalInBeam = cms.untracked.int32(1104),
hitCollection = cms.string('EcalUncalibRecHitsEB'),
hodoRecInfoProducer = cms.string('ecal2006TBHodoscopeReconstructor'),
eventHeaderCollection = cms.string(''),
hodoRecInfoCollection = cms.string('EcalTBHodoscopeRecInfo'),
tdcRecInfoProducer = cms.string('ecal2006TBTDCReconstructor')
)
process.p = cms.Path(process.tbValidData)
| 1.5625
| 2
|
database/domains.py
|
changyc9928/Genshin-Discord-Bot
| 0
|
12068
|
import asyncio
from query_graphql import query_artifact_domains, query_weapon_materials_book
class Domains:
leylines = {
"Blossom of Revelation": "Character EXP Materials",
"Blossom of Wealth": "Mora"
}
weapon_domains = {}
talent_domains = {}
artifact_domains = {}
trounce_domains = {
"Wolf of the North Challenge": "Andrius (Lupus Boreas), Dominator of Wolves",
"Beneath the Dragon-Queller": "Azhdaha, Sealed Lord of Vishaps",
"Enter the Golden House": "Childe, Eleventh of the Fatui Harbingers",
"Narukami Island: Tenshukaku": "La Signora (Rosalyne-Kruzchka Lohefalter), The Fair Lady",
"End of the Oneiric Euthymia": "<NAME> no Mikoto, Raiden no Inazuma Tono"
}
world_bosses = {
"Anemo Hypostasis": None,
"Electro Hypostasis": None,
"Cryo Regisvine": None,
"Cryo Hypostasis": None,
"Oceanid": None,
"Pyro Regisvine": None,
"Geo Hypostasis": None,
"Primo Geovishap": None,
"Maguu Kenki": None,
"Pyro Hypostasis": None,
"Perpetual Mechanical Array": None,
"Hydro Hypostasis": None,
"Thunder Manifestation": None,
"Golden Wolflord": None,
"Bathysmal Vishap Herd": None,
"Ruin Serpent": None,
}
@staticmethod
async def initialize():
Domains.artifact_domains = await query_artifact_domains()
tuple = await query_weapon_materials_book()
Domains.weapon_domains = tuple[0]
Domains.talent_domains = tuple[1]
Domains.domains = {
"Ley Line Outcrops": Domains.leylines,
"Weapon Ascension Materials": Domains.weapon_domains,
"Talent Books": Domains.talent_domains,
"Artifacts": Domains.artifact_domains,
"Trounce Domains": Domains.trounce_domains,
"World Bosses": Domains.world_bosses
}
| 2.21875
| 2
|
policy.py
|
nyu-dl/dl4mt-simul-trans
| 34
|
12069
|
<reponame>nyu-dl/dl4mt-simul-trans<filename>policy.py
"""
-- Policy Network for decision making [more general]
"""
from nmt_uni import *
from layers import _p
import os
import time, datetime
import cPickle as pkl
# hyper params
TINY = 1e-7
PI = numpy.pi
E = numpy.e
A = 0.2
B = 1
class Controller(object):
def __init__(self, trng,
options,
n_in=None, n_out=None,
recurrent=False, id=None):
self.WORK = options['workspace']
self.trng = trng
self.options = options
self.recurrent = recurrent
self.type = options.get('type', 'categorical')
self.n_hidden = 128
self.n_in = n_in
self.n_out = n_out
if self.options.get('layernorm', True):
self.rec = 'lngru'
else:
self.rec = 'gru'
if not n_in:
self.n_in = options['readout_dim']
if not n_out:
if self.type == 'categorical':
self.n_out = 2 # initially it is a WAIT/COMMIT action.
elif self.type == 'gaussian':
self.n_out = 100
else:
raise NotImplementedError
# build the policy network
print 'parameter initialization'
params = OrderedDict()
if not self.recurrent:
print 'building a feedforward controller'
params = get_layer('ff')[0](options, params, prefix='policy_net_in',
nin=self.n_in, nout=self.n_hidden)
else:
print 'building a recurrent controller'
params = get_layer(self.rec)[0](options, params, prefix='policy_net_in',
nin=self.n_in, dim=self.n_hidden)
params = get_layer('ff')[0](options, params, prefix='policy_net_out',
nin=self.n_hidden,
nout=self.n_out if self.type == 'categorical' else self.n_out * 2)
# bias the forget probability
# if self.n_out == 3:
# params[_p('policy_net_out', 'b')][-1] = -2
# for the baseline network.
params_b = OrderedDict()
# using a scalar baseline [**]
# params_b['b0'] = numpy.array(numpy.random.rand() * 0.0, dtype='float32')
# using a MLP as a baseline
params_b = get_layer('ff')[0](options, params_b, prefix='baseline_net_in',
nin=self.n_in, nout=128)
params_b = get_layer('ff')[0](options, params_b, prefix='baseline_net_out',
nin=128, nout=1)
if id is not None:
print 'reload the saved model: {}'.format(id)
params = load_params(self.WORK + '.policy/{}-{}.current.npz'.format(id, self.options['base']), params)
params_b = load_params(self.WORK + '.policy/{}-{}.current.npz'.format(id, self.options['base']), params_b)
else:
id = datetime.datetime.fromtimestamp(time.time()).strftime('%y%m%d-%H%M%S')
print 'start from a new model: {}'.format(id)
self.id = id
self.model = self.WORK + '.policy/{}-{}'.format(id, self.options['base'])
# theano shared params
tparams = init_tparams(params)
tparams_b = init_tparams(params_b)
self.tparams = tparams
self.tparams_b = tparams_b
# build the policy network
self.build_sampler(options=options)
self.build_discriminator(options=options)
print 'policy network'
for p in params:
print p, params[p].shape
def build_batchnorm(self, observation, mask=None):
raise NotImplementedError
def build_sampler(self, options):
# ==================================================================================== #
# Build Action function: samplers
# ==================================================================================== #
observation = tensor.matrix('observation', dtype='float32') # batch_size x readout_dim (seq_steps=1)
prev_hidden = tensor.matrix('p_hidden', dtype='float32')
if not self.recurrent:
hiddens = get_layer('ff')[1](self.tparams, observation,
options, prefix='policy_net_in',
activ='tanh')
else:
hiddens = get_layer(self.rec)[1](self.tparams, observation,
options, prefix='policy_net_in', mask=None,
one_step=True, _init_state=prev_hidden)[0]
act_inps = [observation, prev_hidden]
if self.type == 'categorical':
act_prob = get_layer('ff')[1](self.tparams, hiddens, options,
prefix='policy_net_out',
activ='softmax') # batch_size x n_out
act_prob2 = tensor.clip(act_prob, TINY, 1 - TINY)
# compiling the sampling function for action
# action = self.trng.binomial(size=act_prop.shape, p=act_prop)
action = self.trng.multinomial(pvals=act_prob).argmax(1) # 0, 1, ...
print 'build action sampling function [Discrete]'
self.f_action = theano.function(act_inps, [action, act_prob, hiddens, act_prob2],
on_unused_input='ignore') # action/dist/hiddens
elif self.type == 'gaussian':
_temp = get_layer('ff')[1](self.tparams, hiddens, options,
prefix='policy_net_out',
activ='linear'
) # batch_size x n_out
mean, log_std = _temp[:, :self.n_out], _temp[:, self.n_out:]
mean, log_std = -A * tanh(mean), -B-relu(log_std)
action0 = self.trng.normal(size=mean.shape, dtype='float32')
action = action0 * tensor.exp(log_std) + mean
print 'build action sampling function [Gaussian]'
self.f_action = theano.function(act_inps, [action, mean, log_std, hiddens],
on_unused_input='ignore') # action/dist/hiddens
else:
raise NotImplementedError
def build_discriminator(self, options):
# ==================================================================================== #
# Build Action Discriminator
# ==================================================================================== #
observations = tensor.tensor3('observations', dtype='float32')
mask = tensor.matrix('mask', dtype='float32')
if self.type == 'categorical':
actions = tensor.matrix('actions', dtype='int64')
elif self.type == 'gaussian':
actions = tensor.tensor3('actions', dtype='float32')
else:
raise NotImplementedError
if not self.recurrent:
hiddens = get_layer('ff')[1](self.tparams, observations,
options, prefix='policy_net_in',
activ='tanh')
else:
hiddens = get_layer(self.rec)[1](self.tparams, observations,
options, prefix='policy_net_in', mask=mask)[0]
act_inputs = [observations, mask]
if self.type == 'categorical':
act_probs = get_layer('ff')[1](self.tparams, hiddens, options, prefix='policy_net_out',
activ='softmax') # seq_steps x batch_size x n_out
act_probs = tensor.clip(act_probs, TINY, 1 - TINY)
print 'build action distribiution'
self.f_probs = theano.function(act_inputs, act_probs,
on_unused_input='ignore') # get the action probabilities
elif self.type == 'gaussian':
_temps = get_layer('ff')[1](self.tparams, hiddens, options,
prefix='policy_net_out',
activ='linear'
) # batch_size x n_out
means, log_stds = _temps[:, :, :self.n_out], _temps[:, :, self.n_out:]
means, log_stds = -A * tanh(means), -B-relu(log_stds)
act_probs = [means, log_stds]
print 'build Gaussian PDF'
self.f_pdf = theano.function(act_inputs, [means, log_stds],
on_unused_input='ignore') # get the action probabilities
else:
raise NotImplementedError
# ==================================================================================== #
# Build Baseline Network (Input-dependent Value Function) & Advantages
# ==================================================================================== #
print 'setup the advantages & baseline network'
reward = tensor.matrix('reward') # seq_steps x batch_size :: rewards for each steps
# baseline is estimated with a 2-layer neural network.
hiddens_b = get_layer('ff')[1](self.tparams_b, observations, options,
prefix='baseline_net_in',
activ='tanh')
baseline = get_layer('ff')[1](self.tparams_b, hiddens_b, options,
prefix='baseline_net_out',
activ='linear')[:, :, 0] # seq_steps x batch_size or batch_size
advantages = self.build_advantages(act_inputs, reward, baseline, normalize=True)
# ==================================================================================== #
# Build Policy Gradient (here we provide two options)
# ==================================================================================== #
if self.options['updater'] == 'REINFORCE':
print 'build RENIFROCE.'
self.build_reinforce(act_inputs, act_probs, actions, advantages)
elif self.options['updater'] == 'TRPO':
print 'build TRPO'
self.build_trpo(act_inputs, act_probs, actions, advantages)
else:
raise NotImplementedError
# ==================================================================================== #
# Controller Actions
# ==================================================================================== #
def random(self, states, p=0.5):
live_k = states.shape[0]
return (numpy.random.random(live_k) > p).astype('int64'), \
numpy.ones(live_k) * p
def action(self, states, prevhidden):
return self.f_action(states, prevhidden)
def init_hidden(self, n_samples=1):
return numpy.zeros((n_samples, self.n_hidden), dtype='float32')
def init_action(self, n_samples=1):
states0 = numpy.zeros((n_samples, self.n_in), dtype='float32')
return self.f_action(states0, self.init_hidden(n_samples))
def get_learner(self):
if self.options['updater'] == 'REINFORCE':
return self.run_reinforce
elif self.options['updater'] == 'TRPO':
return self.run_trpo
else:
raise NotImplementedError
@staticmethod
def kl(prob0, prob1):
p1 = (prob0 + TINY) / (prob1 + TINY)
# p2 = (1 - prob0 + TINY) / (1 - prob1 + TINY)
return tensor.sum(prob0 * tensor.log(p1), axis=-1)
@staticmethod
def _grab_prob(probs, X):
assert probs.ndim == 3
batch_size = probs.shape[1]
max_len = probs.shape[0]
vocab_size = probs.shape[2]
probs = probs.reshape((batch_size * max_len, vocab_size))
return probs[tensor.arange(batch_size * max_len), X.flatten(1)].reshape(X.shape) # advanced indexing
def cross(self, probs, actions):
# return tensor.log(probs) * actions + tensor.log(1 - probs) * (1 - actions)
return self._grab_prob(tensor.log(probs), actions)
def build_advantages(self, act_inputs, reward, baseline, normalize=True):
# TODO: maybe we need a discount factor gamma for advantages.
# TODO: we can also rewrite advantages with value functions (GAE)
# Advantages and Normalization the return
reward_adv = reward - baseline
mask = act_inputs[1]
if normalize:
reward_mean = tensor.sum(mask * reward_adv) / tensor.sum(mask)
reward_mean2 = tensor.sum(mask * (reward_adv ** 2)) / tensor.sum(mask)
reward_std = tensor.sqrt(tensor.maximum(reward_mean2 - reward_mean ** 2, TINY)) + TINY
# reward_std = tensor.maximum(reward_std, 1)
reward_c = reward_adv - reward_mean # independent mean
advantages = reward_c / reward_std
else:
advantages = reward_adv
print 'build advantages and baseline gradient'
L = tensor.sum(mask * (reward_adv ** 2)) / tensor.sum(mask)
dL = tensor.grad(L, wrt=itemlist(self.tparams_b))
lr = tensor.scalar(name='lr')
inps_b = act_inputs + [reward]
oups_b = [L, advantages]
f_adv, f_update_b = adam(lr, self.tparams_b, dL, inps_b, oups_b)
self.f_adv = f_adv
self.f_update_b = f_update_b
return advantages
# ===================================================================
# Policy Grident: REINFORCE with Adam
# ===================================================================
def build_reinforce(self, act_inputs, act_probs, actions, advantages):
mask = act_inputs[1]
if self.type == 'categorical':
negEntropy = tensor.sum(tensor.log(act_probs) * act_probs, axis=-1)
logLikelihood = self.cross(act_probs, actions)
elif self.type == 'gaussian':
means, log_stds = act_probs
negEntropy = -tensor.sum(log_stds + tensor.log(tensor.sqrt(2 * PI * E)), axis=-1)
actions0 = (actions - means) / tensor.exp(log_stds)
logLikelihood = -tensor.sum(log_stds, axis=-1) - \
0.5 * tensor.sum(tensor.sqr(actions0), axis=-1) - \
0.5 * means.shape[-1] * tensor.log(2 * PI)
else:
raise NotImplementedError
# tensor.log(act_probs) * actions + tensor.log(1 - act_probs) * (1 - actions)
H = tensor.sum(mask * negEntropy, axis=0).mean() * 0.001 # penalty
J = tensor.sum(mask * -logLikelihood * advantages, axis=0).mean() + H
dJ = grad_clip(tensor.grad(J, wrt=itemlist(self.tparams)))
print 'build REINFORCE optimizer'
lr = tensor.scalar(name='lr')
inps = act_inputs + [actions, advantages]
outps = [J, H]
if self.type == 'gaussian':
outps += [actions0.mean(), actions.mean()]
f_cost, f_update = adam(lr, self.tparams, dJ, inps, outps)
self.f_cost = f_cost
self.f_update = f_update
print 'done'
def run_reinforce(self, act_inputs, actions, reward, update=True, lr=0.0002):
# sub baseline
inps_adv = act_inputs + [reward]
L, advantages = self.f_adv(*inps_adv)
inps_reinfoce = act_inputs + [actions, advantages]
if self.type == 'gaussian':
J, H, m, s = self.f_cost(*inps_reinfoce)
info = {'J': J, 'G_norm': H, 'B_loss': L, 'Adv': advantages.mean(), 'm': m, 's': s}
else:
J, H = self.f_cost(*inps_reinfoce)
info = {'J': J, 'Entropy': H, 'B_loss': L, 'Adv': advantages.mean()}
info['advantages'] = advantages
if update: # update the parameters
self.f_update_b(lr)
self.f_update(lr)
return info
# ==================================================================================== #
# Trust Region Policy Optimization
# ==================================================================================== #
def build_trpo(self, act_inputs, act_probs, actions, advantages):
assert self.type == 'categorical', 'in this stage not support TRPO'
# probability distribution
mask = act_inputs[1]
probs = act_probs
probs_old = tensor.matrix(dtype='float32')
logp = self.cross(probs, actions)
logp_old = self.cross(probs_old, actions)
# policy gradient
J = tensor.sum(mask * -tensor.exp(logp - logp_old) * advantages, axis=0).mean()
dJ = flatgrad(J, self.tparams)
probs_fix = theano.gradient.disconnected_grad(probs)
kl_fix = tensor.sum(mask * self.kl(probs_fix, probs), axis=0).mean()
kl_grads = tensor.grad(kl_fix, wrt=itemlist(self.tparams))
ftangents = tensor.fvector(name='flat_tan')
shapes = [self.tparams[var].get_value(borrow=True).shape for var in self.tparams]
start = 0
tangents = []
for shape in shapes:
size = numpy.prod(shape)
tangents.append(tensor.reshape(ftangents[start:start + size], shape))
start += size
gvp = tensor.add(*[tensor.sum(g * t) for (g, t) in zipsame(kl_grads, tangents)])
# Fisher-vectror product
fvp = flatgrad(gvp, self.tparams)
entropy = tensor.sum(mask * -self.cross(probs, probs), axis=0).mean()
kl = tensor.sum(mask * self.kl(probs_old, probs), axis=0).mean()
print 'compile the functions'
inps = act_inputs + [actions, advantages, probs_old]
loss = [J, kl, entropy]
self.f_pg = theano.function(inps, dJ)
self.f_loss = theano.function(inps, loss)
self.f_fisher = theano.function([ftangents] + inps, fvp, on_unused_input='ignore')
# get/set flatten params
print 'compling flat updater'
self.get_flat = theano.function([], tensor.concatenate([self.tparams[v].flatten() for v in self.tparams]))
theta = tensor.vector()
start = 0
updates = []
for v in self.tparams:
p = self.tparams[v]
shape = p.shape
size = tensor.prod(shape)
updates.append((p, theta[start:start + size].reshape(shape)))
start += size
self.set_flat = theano.function([theta], [], updates=updates)
def run_trpo(self, act_inputs, actions, reward,
update=True, cg_damping=1e-3, max_kl=1e-2, lr=0.0002):
# sub baseline
inps_adv = act_inputs + [reward]
L, advantages = self.f_adv(*inps_adv)
self.f_update_b(lr)
# get current action distributions
probs = self.f_probs(*act_inputs)
inps = act_inputs + [actions, advantages, probs]
thprev = self.get_flat()
def fisher_vector_product(p):
return self.f_fisher(p, *inps) + cg_damping * p
g = self.f_pg(*inps)
losses_before = self.f_loss(*inps)
if numpy.allclose(g, 0):
print 'zero gradient, not updating'
else:
stepdir = self.cg(fisher_vector_product, -g)
shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
lm = numpy.sqrt(shs / max_kl)
print "\nlagrange multiplier:", lm, "gnorm:", numpy.linalg.norm(g)
fullstep = stepdir / lm
neggdotstepdir = -g.dot(stepdir)
def loss(th):
self.set_flat(th)
return self.f_loss(*inps)[0]
print 'do line search'
success, theta = self.linesearch(loss, thprev, fullstep, neggdotstepdir / lm)
print "success", success
self.set_flat(theta)
losses_after = self.f_loss(*inps)
info = OrderedDict()
for (lname, lbefore, lafter) in zipsame(['J', 'KL', 'entropy'], losses_before, losses_after):
info[lname + "_before"] = lbefore
info[lname + "_after"] = lafter
# add the baseline loss into full information
info['B_loss'] = L
return info
@staticmethod
def linesearch(f, x, fullstep, expected_improve_rate, max_backtracks=10, accept_ratio=.1):
"""
Backtracking linesearch, where expected_improve_rate is the slope dy/dx at the initial point
"""
fval = f(x)
print "fval before", fval
for (_n_backtracks, stepfrac) in enumerate(.5 ** numpy.arange(max_backtracks)):
xnew = x + stepfrac * fullstep
newfval = f(xnew)
actual_improve = fval - newfval
expected_improve = expected_improve_rate * stepfrac
ratio = actual_improve / expected_improve
print "a/e/r", actual_improve, expected_improve, ratio
if ratio > accept_ratio and actual_improve > 0:
print "fval after", newfval
return True, xnew
return False, x
@staticmethod
def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10):
"""
Conjuctate Gradient
"""
p = b.copy()
r = b.copy()
x = numpy.zeros_like(b)
rdotr = r.dot(r)
fmtstr = "%10i %10.3g %10.3g"
titlestr = "%10s %10s %10s"
if verbose: print titlestr % ("iter", "residual norm", "soln norm")
for i in xrange(cg_iters):
if callback is not None:
callback(x)
if verbose: print fmtstr % (i, rdotr, numpy.linalg.norm(x))
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v * p
r -= v * z
newrdotr = r.dot(r)
mu = newrdotr / rdotr
p = r + mu * p
rdotr = newrdotr
if rdotr < residual_tol:
break
if callback is not None:
callback(x)
if verbose: print fmtstr % (i + 1, rdotr, numpy.linalg.norm(x))
return x
# ====================================================================== #
# Save & Load
# ====================================================================== #
def save(self, history, it):
_params = OrderedDict()
_params = unzip(self.tparams, _params)
_params = unzip(self.tparams_b, _params)
print 'save the policy network >> {}'.format(self.model)
numpy.savez('%s.current' % (self.model),
history=history,
it=it,
**_params)
numpy.savez('{}.iter={}'.format(self.model, it),
history=history,
it=it,
**_params)
def load(self):
if os.path.exists(self.model):
print 'loading from the existing model (current)'
rmodel = numpy.load(self.model)
history = rmodel['history']
it = rmodel['it']
self.params = load_params(rmodel, self.params)
self.params_b = load_params(rmodel, self.params_b)
self.tparams = init_tparams(self.params)
self.tparams_b = init_tparams(self.params_b)
print 'the dataset need to go over {} lines'.format(it)
return history, it
else:
return [], -1
| 2.546875
| 3
|
src/app/drivers/pycolator/splitmerge.py
|
husensofteng/msstitch
| 0
|
12070
|
<gh_stars>0
from app.drivers.pycolator import base
from app.actions.pycolator import splitmerge as preparation
from app.readers import pycolator as readers
from app.drivers.options import pycolator_options
class SplitDriver(base.PycolatorDriver):
outfile = None
def run(self):
self.set_filter_types()
for filter_type, suffix in self.filter_types:
self.prepare()
self.set_features(filter_type)
self.outsuffix = suffix
self.write()
self.finish()
def set_options(self):
"""Since splitdriver splits into multiple files we cannot set an
output file"""
super().set_options()
del(self.options['-o'])
def set_features(self, filter_type):
"""Calls splitter to split percolator output into target/decoy
elements.
Writes two new xml files with features. Currently only psms and
peptides. Proteins not here, since one cannot do protein inference
before having merged and remapped multifraction data anyway.
"""
elements_to_split = {'psm': self.allpsms, 'peptide': self.allpeps}
self.features = self.splitfunc(elements_to_split, self.ns, filter_type)
class SplitTDDriver(SplitDriver):
command = 'splittd'
commandhelp = ('Splits target and decoy data, producing 2 output files')
def set_filter_types(self):
self.filter_types = [('target', '_target.xml'),
('decoy', '_decoy.xml')]
def set_features(self, filter_type):
self.splitfunc = preparation.split_target_decoy
super().set_features(filter_type)
class SplitProteinDriver(SplitDriver):
command = 'splitprotein'
commandhelp = ('Splits input XML into multiple files depending based on '
'the protein headers specified. Each header class gets '
'its own output file')
def set_filter_types(self):
maxdigits = len(str(len(self.protheaders)))
self.filter_types = [(headers, '_h{i:0{dig}d}.xml'.format(
i=ix, dig=maxdigits))
for ix, headers in enumerate(self.protheaders)]
def set_features(self, filter_type):
self.splitfunc = preparation.split_protein_header_id_type
super().set_features(filter_type)
def set_options(self):
super().set_options()
options = self.define_options(['protheaders'], pycolator_options)
self.options.update(options)
class MergeDriver(base.PycolatorDriver):
"""Base class for merging multiple percolator fractions under different
sorts of filtering. It writes a single percolator out xml from
multiple fractions.
Namespace and static xml come from first percolator file.
Make sure fractions are from same percolator run."""
outsuffix = '_merged.xml'
command = 'merge'
commandhelp = 'Merges percolator xml files, nothing else.'
def parse_input(self, **kwargs):
super().parse_input(**kwargs)
self.mergefiles = self.fn[:]
self.fn = self.fn[0]
def set_options(self):
super().set_options()
options = self.define_options(['multifiles'], pycolator_options)
self.options.update(options)
def prepare(self):
self.ns, self.static_xml = self.prepare_percolator_output(self.fn)
def set_features(self):
""""Merge all psms and peptides"""
allpsms_str = readers.generate_psms_multiple_fractions_strings(
self.mergefiles, self.ns)
allpeps = preparation.merge_peptides(self.mergefiles, self.ns)
self.features = {'psm': allpsms_str, 'peptide': allpeps}
| 2.734375
| 3
|
ejercicio 14.py
|
Davidpadilla1234/taller_estructura-secuencial
| 0
|
12071
|
"""
Entradas:
lectura actual--->float--->lect2
lectura anterior--->float--->lect1
valor kw--->float--->valorkw
Salidas:
consumo--->float--->consumo
total factura-->flotante--->total
"""
lect2 = float ( entrada ( "Digite lectura real:" ))
lect1 = float ( entrada ( "Digite lectura anterior:" ))
valorkw = float ( input ( "Valor del kilowatio: " ))
consumo = ( lect2 - lect1 )
total = ( consumo * valorkw )
print ( "El valor a pagar es: " + str ( total ))
| 3.765625
| 4
|
src/workers/correct.py
|
brainsqueeze/Image_correction
| 10
|
12072
|
# __author__ = 'Dave'
import cv2
from skimage import io
from skimage.transform import probabilistic_hough_line
import matplotlib.pyplot as plt
import os
import warnings
import random
import numpy as np
warnings.filterwarnings('ignore', category=RuntimeWarning)
class CorrectImage(object):
def __init__(self):
self.path = ""
self.name = ""
self.image = None
self.edges = None
self.lines = None
def _load_image(self, image):
"""
:param image: image file name (str)
:return: skimage image data
"""
filename = os.path.join(self.path, image)
return io.imread(filename)
def add_path(self, image_path):
"""
Adds image to the list of images
:param image_path: (string)
"""
self.path = image_path + '/'
def add_image(self, filename):
"""
Adds image to the list of images
:param filename: (string)
"""
self.name = filename
self.hough_transform()
def _detect_edges(self, image, vary=False, plot=False):
"""
:param image: image file name (str)
:param vary: turn tunable plotting on
:param plot: turn plotting on
:return: detected edges with variable filters
"""
self.image = self._load_image(image)
if vary:
def nothing(x):
pass
cv2.namedWindow('image')
cv2.createTrackbar('th1', 'image', 0, 255, nothing)
cv2.createTrackbar('th2', 'image', 0, 255, nothing)
while True:
th1 = cv2.getTrackbarPos('th1', 'image')
th2 = cv2.getTrackbarPos('th2', 'image')
edges = cv2.Canny(self.image, th1, th2)
cv2.imshow('image', edges)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
edges = cv2.Canny(self.image, 255, 255)
if plot:
cv2.namedWindow('image')
cv2.imshow('image', edges)
cv2.waitKey(5000)
cv2.destroyAllWindows()
return edges
def hough_transform(self, vary=False, plot=False):
"""
:param vary: turn edge detection tunable plotting on
:param plot: turn plotting on
:return: numpy array of probabilistically found straight lines
"""
if self.name == "":
raise ValueError('Missing image: you need to specify the image file using add_image.')
self.edges = self._detect_edges(self.name, vary=vary, plot=plot)
self.lines = probabilistic_hough_line(self.edges, threshold=10, line_length=5, line_gap=3)
if plot:
for line in self.lines:
p0, p1 = line
plt.plot((p0[0], p1[0]), (p0[1], p1[1]))
plt.show()
@staticmethod
def slope(lines):
"""
:param lines: array of coordinates (ie. [((x0, y0), (xf, yf)), ...]
:return: array of slope values with the same number of entries as lines
"""
# for doing vectorized subtraction across all line pairs,
# we need the first line of each pair to be the negative of itself
sign_op = np.ones_like(lines)
sign_op[:, :, 0] *= -1
# get the differences between x and y coordinates (start, end), respectively
slopes = np.sum(sign_op * lines, axis=2)
# compute the slopes of each line for every line pair
slopes = slopes[:, :, 0] / slopes[:, :, 1]
# turn infinite values to a finite, but very large value
slopes[np.isinf(slopes)] = 1e6
# this catches cases when the line - as defined - is actually a point and the slope doesn't exist
slopes[np.isnan(slopes)] = 0
return slopes
def line_pair(self, num_pairs):
"""
:param num_pairs: number of line pairs to take (int)
:return: line pairs (array)
"""
idx = np.random.randint(len(self.lines), size=num_pairs * 2)
lines = np.array(self.lines)[idx]
return lines.reshape(num_pairs, 2, 2, 2)
@staticmethod
def mutation(pairs, p_mutate=0.01):
"""
:param pairs: (numpy array with dimensions (n_pairs, 2, 2, 2)) pairs of lines
:param p_mutate: (float) probability of a mutation
:return: (numpy array with dimensions (n_pairs, 2, 2, 2)) pairs of lines with mutations
"""
for i in range(len(pairs)):
if p_mutate > random.random():
# column = np.random.randint(low=0, high=2)
for column in [0, 1]:
t = pairs[i, :, :, column]
low, high = np.min(t), np.max(t)
if high == low:
high *= 2
pairs[i, :, :, column] = np.random.randint(low=low, high=high, size=t.shape)
return pairs
| 2.78125
| 3
|
plot_scripts/CC_timeline_plot.py
|
idunnam/Thesis
| 0
|
12073
|
<filename>plot_scripts/CC_timeline_plot.py
"""
This code is used for plotting induvidual timelines of seasonal CC for each CMIP5 and CMIP6 model
"""
import matplotlib.pyplot as plt
import xarray as xr
import numpy as np
import seaborn as sns
import pandas as pd
#=== Import SEB Anomalies ====
#from seasonal_SEB_components import *
ACCESS = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_ACCESS.nc')
HADGEM = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_HADGEM.nc')
CSIRO = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_CSIRO.nc')
IPSL = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_IPSL.nc')
MIROC5 = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_MIROC5.nc')
NORESM = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_NORESM.nc')
#CMIP6
CESM = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_CESM.nc')
CNRM_CM6 = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_CNRM_CM6.nc')
CNRM_ESM2 = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_CNRM_ESM2.nc')
MRI = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_MRI.nc')
UKMO = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_UKMO.nc')
fig, axs = plt.subplots(1,2, sharey = True, figsize=(30, 10))
axs[0].plot(ACCESS.CC.year, ACCESS.CC.mean(dim=["X10_105","Y21_199"]), label='ACCESS')
axs[0].plot(HADGEM.CC.year, HADGEM.CC.mean(dim=["X10_105","Y21_199"]),label='HADGEM')
axs[0].plot(IPSL.CC.year, IPSL.CC.mean(dim=["X10_105","Y21_199"]),label='IPSL')
axs[0].plot(MIROC5.CC.year, MIROC5.CC.mean(dim=["X10_105","Y21_199"]),label='MIROC5')
axs[0].plot(NORESM.CC.year, NORESM.CC.mean(dim=["X10_105","Y21_199"]),label='NORESM')
axs[0].plot(CSIRO.CC.year, CSIRO.CC.mean(dim=["X10_105","Y21_199"]),label='CSIRO')
axs[0].legend(loc='upper left')
axs[0].set_xlabel('year')
axs[0].set_ylabel('CC')
axs[0].set_title('Cloud Cover - CMIP5 Models')
axs[1].plot(CESM.CC.year, ACCESS.CC.mean(dim=["X10_105","Y21_199"]), label='CESM')
axs[1].plot(CNRM_CM6.CC.year, CNRM_CM6.CC.mean(dim=["X10_105","Y21_199"]),label='CNRM_CM6')
axs[1].plot(CNRM_ESM2.CC.year, CNRM_ESM2.CC.mean(dim=["X10_105","Y21_199"]),label='CNRM_ESM2')
axs[1].plot(MIROC5.CC.year, MIROC5.CC.mean(dim=["X10_105","Y21_199"]),label='MRI')
axs[1].plot(UKMO.CC.year, UKMO.CC.mean(dim=["X10_105","Y21_199"]),label='UKMO')
axs[1].legend(loc='upper left')
axs[1].set_xlabel('year')
axs[1].set_ylabel('CC')
axs[1].set_title('Cloud Cover - CMIP5 Models')
sns.set_palette('colorblind')
plt.savefig('CC_test_2.png')
plt.show()
| 2.265625
| 2
|
manage.py
|
xinbingliang/dockertest
| 30
|
12074
|
<gh_stars>10-100
# manage.py
import unittest
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from skeleton.server import app, db
from skeleton.server.models import User
migrate = Migrate(app, db)
manager = Manager(app)
# migrations
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Runs the unit tests without coverage."""
tests = unittest.TestLoader().discover('tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
else:
return 1
@manager.command
def create_db():
"""Creates the db tables."""
db.create_all()
@manager.command
def drop_db():
"""Drops the db tables."""
db.drop_all()
@manager.command
def create_admin():
"""Creates the admin user."""
db.session.add(User(email='<EMAIL>', password='<PASSWORD>', admin=True))
db.session.commit()
@manager.command
def create_data():
"""Creates sample data."""
pass
if __name__ == '__main__':
manager.run()
| 2.515625
| 3
|
client/middleware.py
|
uktrade/directory-forms-api
| 0
|
12075
|
<filename>client/middleware.py
import sigauth.middleware
import sigauth.helpers
from client import helpers
class SignatureCheckMiddleware(sigauth.middleware.SignatureCheckMiddlewareBase):
secret = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.request_checker = helpers.RequestSignatureChecker(self.secret)
def should_check(self, request):
if request.resolver_match.namespace in [
'admin', 'healthcheck', 'authbroker_client'
] or request.path_info.startswith('/admin/login'):
return False
return super().should_check(request)
| 2.15625
| 2
|
scripts/prepare-kernel-headers.py
|
sonicyang/mctest
| 4
|
12076
|
<reponame>sonicyang/mctest
import os
import subprocess
import errno
import shutil
import re
import sys
kernel_path = ''
install_path = ''
patch_rules = []
arch = ''
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def patch_rule_append(find_pattern, replace):
global patch_rules
patch_rules.append((find_pattern, replace))
def file_patch(infile):
with open(infile, 'r') as f:
lines = f.readlines()
with open(infile, 'w') as f:
global patch_rules
for line in lines:
for rule in patch_rules:
line = re.sub(rule[0], rule[1], line)
f.write(line)
def header_check(header):
global arch
unrelated_header_types =['drivers', 'tools', 'scripts', 'security',
'sound', 'drm', 'kvm', 'xen', 'scsi', 'video']
# skip unrelated architecture
arch_path = 'arch/' + arch
if 'arch/' in header and not arch_path in header:
return False
for h in unrelated_header_types:
if h in header:
return False
return True
def file_patch_and_install(src_path):
global kernel_path
global install_path
relative_path = src_path.split(kernel_path)[1]
file = relative_path.rsplit('/')[-1]
relative_dir = relative_path.split(file)[0]
dest_dir = install_path + relative_dir
if header_check(dest_dir) == False:
return
mkdir_p(dest_dir)
shutil.copy2(src_path, dest_dir)
dest_path = dest_dir + file
file_patch(dest_path)
def main():
"""Main function."""
argv = sys.argv
assert len(argv) == 4, 'Invalid arguments'
global kernel_path
global install_path
global arch
kernel_path = argv[1]
install_path = argv[2]
arch = argv[3]
# avoid the conflic with the 'new' operator in C++
patch_rule_append('new', 'anew')
# TODO: Add "extern "C"" to function declaration in string_64.h
# while we want to compile module with C++ code.
if 'x86' in arch:
patch_rule_append('void \*memset\(void \*s, int c, size_t n\)\;',
'extern \"C\" {\nvoid *memset(void *s, int c, size_t n);')
patch_rule_append('int strcmp\(const char \*cs, const char \*ct\);',
'int strcmp(const char *cs, const char *ct);}')
# wrap the declaration of extern function with extern "C"
# e.g. extern void func(void); => extern "C" {void func(void);}
def wrapped_with_externC(matched):
func = matched.group(0).split('extern')[1]
return 'extern \"C\" {' + func + '}'
pattern = re.compile(r'^extern\s*[\w_][\w\d_]*[\s\*]*[\w_][\w\d_]*\(.*\);$')
patch_rule_append(pattern, wrapped_with_externC)
# avoid duplicated keyword definition
# e.g. typedef _Bool bool;
# => #ifndef __cplusplus
# typedef _Bool bool;
# #endif
def wrapped_with_ifndef_cpluscplus_macro(matched):
line = matched.group(0)
return '#ifndef __cplusplus\n' + line + '\n#endif\n'
pattern = re.compile(r'^\s*typedef.*\s*(false|true|bool);$')
patch_rule_append(pattern, wrapped_with_ifndef_cpluscplus_macro)
pattern = re.compile(r'^\s*(false|true|bool)\s*=.*$')
patch_rule_append(pattern, wrapped_with_ifndef_cpluscplus_macro)
# Use find command to find out all headers
find_cmd = 'find -L ' + kernel_path + ' -name *.h'
proc = subprocess.Popen(find_cmd, shell = True, stdout = subprocess.PIPE)
lines = proc.stdout.readlines()
for line in lines:
if line == '':
break
# Remove the newline character
src = line.replace('\n', "")
file_patch_and_install(src)
if __name__ == '__main__':
sys.exit(main())
| 2.234375
| 2
|
model/loss.py
|
Daipuwei/YOLO-tf2
| 0
|
12077
|
# -*- coding: utf-8 -*-
# @Time : 2021/9/18 下午11:19
# @Author : DaiPuWei
# @Email : <EMAIL>
# @File : loss.py
# @Software: PyCharm
"""
这是YOLO模型的损失函数的定义脚本,目前目标分类损失支持smooth Label;
目标定位损失支持均方差损失、GIOU Loss、DIOU Loss和CIOU Loss;
"""
import math
import tensorflow as tf
from tensorflow.keras import backend as K
# ---------------------------------------------------#
# 平滑标签
# ---------------------------------------------------#
def _smooth_labels(y_true, label_smoothing):
num_classes = tf.cast(K.shape(y_true)[-1], dtype=K.floatx())
label_smoothing = K.constant(label_smoothing, dtype=K.floatx())
return y_true * (1.0 - label_smoothing) + label_smoothing / num_classes
# ---------------------------------------------------#
# 将预测值的每个特征层调成真实值
# ---------------------------------------------------#
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
num_anchors = len(anchors)
# ---------------------------------------------------#
# [1, 1, 1, num_anchors, 2]
# ---------------------------------------------------#
anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])
# ---------------------------------------------------#
# 获得x,y的网格
# (13, 13, 1, 2)
# ---------------------------------------------------#
grid_shape = K.shape(feats)[1:3] # height, width
grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
[1, grid_shape[1], 1, 1])
grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
[grid_shape[0], 1, 1, 1])
grid = K.concatenate([grid_x, grid_y])
grid = K.cast(grid, K.dtype(feats))
# ---------------------------------------------------#
# 将预测结果调整成(batch_size,13,13,3,85)
# 85可拆分成4 + 1 + 80
# 4代表的是中心宽高的调整参数
# 1代表的是框的置信度
# 80代表的是种类的置信度
# ---------------------------------------------------#
feats = K.reshape(feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
# ---------------------------------------------------#
# 将预测值调成真实值
# box_xy对应框的中心点
# box_wh对应框的宽和高
# ---------------------------------------------------#
box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[..., ::-1], K.dtype(feats))
box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[..., ::-1], K.dtype(feats))
box_confidence = K.sigmoid(feats[..., 4:5])
box_class_probs = K.sigmoid(feats[..., 5:])
# ---------------------------------------------------------------------#
# 在计算loss的时候返回grid, feats, box_xy, box_wh
# 在预测的时候返回box_xy, box_wh, box_confidence, box_class_probs
# ---------------------------------------------------------------------#
if calc_loss == True:
return grid, feats, box_xy, box_wh
return box_xy, box_wh, box_confidence, box_class_probs
# ---------------------------------------------------#
# 用于计算每个预测框与真实框的iou
# ---------------------------------------------------#
def box_iou(b_true, b_pred):
# 13,13,3,1,4
# 计算左上角的坐标和右下角的坐标
b_true = K.expand_dims(b_true, -2)
b_true_xy = b_true[..., :2]
b_true_wh = b_true[..., 2:4]
b_true_wh_half = b_true_wh / 2.
b_true_mins = b_true_xy - b_true_wh_half
b_true_maxes = b_true_xy + b_true_wh_half
# 1,n,4
# 计算左上角和右下角的坐标
b_pred = K.expand_dims(b_pred, 0)
b_pred_xy = b_pred[..., :2]
b_pred_wh = b_pred[..., 2:4]
b_pred_wh_half = b_pred_wh / 2.
b_pred_mins = b_pred_xy - b_pred_wh_half
b_pred_maxes = b_pred_xy + b_pred_wh_half
# 计算重合面积
intersect_mins = K.maximum(b_true_mins, b_pred_mins)
intersect_maxes = K.minimum(b_true_maxes, b_pred_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b_true_area = b_true_wh[..., 0] * b_true_wh[..., 1]
b_pred_area = b_pred_wh[..., 0] * b_pred_wh[..., 1]
iou = intersect_area / (b_true_area + b_pred_area - intersect_area)
return iou
def box_giou(b_true, b_pred):
"""
Calculate GIoU loss on anchor boxes
Reference Paper:
"Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression"
https://arxiv.org/abs/1902.09630
Parameters
----------
b_true: GT boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
b_pred: predict boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
Returns
-------
giou: tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)
"""
b_true_xy = b_true[..., :2]
b_true_wh = b_true[..., 2:4]
b_true_wh_half = b_true_wh / 2.
b_true_mins = b_true_xy - b_true_wh_half
b_true_maxes = b_true_xy + b_true_wh_half
b_pred_xy = b_pred[..., :2]
b_pred_wh = b_pred[..., 2:4]
b_pred_wh_half = b_pred_wh / 2.
b_pred_mins = b_pred_xy - b_pred_wh_half
b_pred_maxes = b_pred_xy + b_pred_wh_half
intersect_mins = K.maximum(b_true_mins, b_pred_mins)
intersect_maxes = K.minimum(b_true_maxes, b_pred_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b_true_area = b_true_wh[..., 0] * b_true_wh[..., 1]
b_pred_area = b_pred_wh[..., 0] * b_pred_wh[..., 1]
union_area = b_true_area + b_pred_area - intersect_area
# calculate IoU, add epsilon in denominator to avoid dividing by 0
iou = intersect_area / (union_area + K.epsilon())
# get enclosed area
enclose_mins = K.minimum(b_true_mins, b_pred_mins)
enclose_maxes = K.maximum(b_true_maxes, b_pred_maxes)
enclose_wh = K.maximum(enclose_maxes - enclose_mins, 0.0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
# calculate GIoU, add epsilon in denominator to avoid dividing by 0
giou = iou - 1.0 * (enclose_area - union_area) / (enclose_area + K.epsilon())
giou = K.expand_dims(giou, -1)
return giou
def box_diou(b_true, b_pred,use_ciou_loss=False):
"""
输入为:
----------
b1: tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
b2: tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
返回为:
-------
ciou: tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)
"""
# 求出预测框左上角右下角
b_true_xy = b_true[..., :2]
b_true_wh = b_true[..., 2:4]
b_true_wh_half = b_true_wh / 2.
b_true_mins = b_true_xy - b_true_wh_half
b_true_maxes = b_true_xy + b_true_wh_half
# 求出真实框左上角右下角
b_pred_xy = b_pred[..., :2]
b_pred_wh = b_pred[..., 2:4]
b_pred_wh_half = b_pred_wh / 2.
b_pred_mins = b_pred_xy - b_pred_wh_half
b_pred_maxes = b_pred_xy + b_pred_wh_half
# 求真实框和预测框所有的iou
intersect_mins = K.maximum(b_true_mins, b_pred_mins)
intersect_maxes = K.minimum(b_true_maxes, b_pred_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b1_area = b_true_wh[..., 0] * b_true_wh[..., 1]
b_pred_area = b_pred_wh[..., 0] * b_pred_wh[..., 1]
union_area = b1_area + b_pred_area - intersect_area
iou = intersect_area / K.maximum(union_area, K.epsilon())
# 计算中心的差距
center_distance = K.sum(K.square(b_true_xy - b_pred_xy), axis=-1)
# 找到包裹两个框的最小框的左上角和右下角
enclose_mins = K.minimum(b_true_mins, b_pred_mins)
enclose_maxes = K.maximum(b_true_maxes, b_pred_maxes)
enclose_wh = K.maximum(enclose_maxes - enclose_mins, 0.0)
# 计算对角线距离
enclose_diagonal = K.sum(K.square(enclose_wh), axis=-1)
diou = iou - 1.0 * (center_distance) / K.maximum(enclose_diagonal, K.epsilon())
if use_ciou_loss:
v = 4 * K.square(tf.math.atan2(b_true_wh[..., 0], K.maximum(b_true_wh[..., 1], K.epsilon()))
- tf.math.atan2(b_pred_wh[..., 0],K.maximum(b_pred_wh[..., 1],K.epsilon()))) / (math.pi * math.pi)
# a trick: here we add an non-gradient coefficient w^2+h^2 to v to customize it's back-propagate,
# to match related description for equation (12) in original paper
#
#
# v'/w' = (8/pi^2) * (arctan(wgt/hgt) - arctan(w/h)) * (h/(w^2+h^2)) (12)
# v'/h' = -(8/pi^2) * (arctan(wgt/hgt) - arctan(w/h)) * (w/(w^2+h^2))
#
# The dominator w^2+h^2 is usually a small value for the cases
# h and w ranging in [0; 1], which is likely to yield gradient
# explosion. And thus in our implementation, the dominator
# w^2+h^2 is simply removed for stable convergence, by which
# the step size 1/(w^2+h^2) is replaced by 1 and the gradient direction
# is still consistent with Eqn. (12).
v = v * tf.stop_gradient(b_pred_wh[..., 0] * b_pred_wh[..., 0] + b_pred_wh[..., 1] * b_pred_wh[..., 1])
alpha = v / K.maximum((1.0 - iou + v), K.epsilon())
diou = diou - alpha * v
diou = K.expand_dims(diou, -1)
diou = tf.where(tf.math.is_nan(diou), tf.zeros_like(diou), diou)
return diou
# ---------------------------------------------------#
# loss值计算
# ---------------------------------------------------#
def yolo_loss(args, anchors,num_classes,ignore_threshold=.5,label_smoothing=0.1,
use_giou_loss=False,use_diou_loss=False,use_ciou_loss=False,normalize=True,model_name='yolov3'):
# 根据不同yolo模型初始化不同anchor掩膜和输出层数
if model_name == "yolov3": # yolov3
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
elif model_name == 'yolov3-spp': # yolov3-spp
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
elif model_name == 'yolov4': # yolov4
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
elif model_name == 'yolov4-csp': # yolov4-csp
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
elif model_name == 'yolov4-p5': # yolov4-p5
anchor_mask = [[8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]]
num_layers = 3
elif model_name == 'yolov4-p6': # yolov4-p6
anchor_mask = [[12, 13, 14, 15], [8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]]
num_layers = 4
elif model_name == 'yolov4-p7': # yolov4-p7
anchor_mask = [[16, 17, 18, 19], [12, 13, 14, 15], [8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]]
num_layers = 5
elif model_name == 'yolov3-tiny': # yolov3-tiny
anchor_mask = [[3, 4, 5], [0, 1, 2]]
num_layers = 2
elif model_name == 'yolov4-tiny': # yolov4-tiny
anchor_mask = [[3, 4, 5], [0, 1, 2]]
num_layers = 2
else: # 默认为yolov3
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
# 将预测结果和实际ground truth分开,args是[*model_body.output, *y_true]
y_true = args[num_layers:]
yolo_outputs = args[:num_layers]
# 根据不同yolo模型初始化输入尺度和网格尺度
if model_name == "yolov3": # yolov3
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
elif model_name == 'yolov3-spp': # yolov3-spp
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
elif model_name == 'yolov4': # yolov4
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
elif model_name == 'yolov4-csp': # yolov4-csp
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
elif model_name == 'yolov4-p5': # yolov4-p5
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
elif model_name == 'yolov4-p6': # yolov4-p6
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*64, K.dtype(y_true[0]))
elif model_name == 'yolov4-p7': # yolov4-p7
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*128, K.dtype(y_true[0]))
elif model_name == 'yolov3-tiny': # yolov3-tiny
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
elif model_name == 'yolov4-tiny': # yolov4-tiny
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
else: # 默认为yolov3
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[l])) for l in range(num_layers)]
loss = 0
num_pos = 0
m = K.shape(yolo_outputs[0])[0]
mf = K.cast(m, K.dtype(yolo_outputs[0]))
for l in range(num_layers):
# -----------------------------------------------------------#
# 以第一个特征层(m,13,13,3,85)为例子
# 取出该特征层中存在目标的点的位置。(m,13,13,3,1)
# -----------------------------------------------------------#
object_mask = y_true[l][..., 4:5]
true_class_probs = y_true[l][..., 5:]
if label_smoothing: # 使用平滑标签
true_class_probs = _smooth_labels(true_class_probs, label_smoothing)
# -----------------------------------------------------------#
# 将yolo_outputs的特征层输出进行处理、获得四个返回值
# grid为网格坐标
# raw_pred为尚未处理的预测结果
# pred_xy为解码后的中心坐标
# pred_wh为解码后的宽高坐标
# -----------------------------------------------------------#
grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],
anchors[anchor_mask[l]], num_classes, input_shape, calc_loss=True)
# pred_box是解码后的预测的box的位置
pred_box = K.concatenate([pred_xy, pred_wh])
# -----------------------------------------------------------#
# 找到负样本群组,第一步是创建一个数组,[]
# -----------------------------------------------------------#
ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)
object_mask_bool = K.cast(object_mask, 'bool')
# 对每一张图片计算ignore_mask
def loop_body(b, ignore_mask):
# 取出n个真实框:n,4
true_box = tf.boolean_mask(y_true[l][b, ..., 0:4], object_mask_bool[b, ..., 0])
# -----------------------------------------------------------#
# 计算预测框与真实框的iou
# pred_box为预测框的坐标
# true_box为真实框的坐标
# iou为预测框和真实框的iou
# -----------------------------------------------------------#
iou = box_iou(pred_box[b], true_box)
# best_iou为每个特征点与真实框的最大重合程度
best_iou = K.max(iou, axis=-1)
# -----------------------------------------------------------#
# 判断预测框和真实框的最大iou小于ignore_thresh
# 则认为该预测框没有与之对应的真实框
# 该操作的目的是:
# 忽略预测结果与真实框非常对应特征点,因为这些框已经比较准了
# 不适合当作负样本,所以忽略掉。
# -----------------------------------------------------------#
ignore_mask = ignore_mask.write(b, K.cast(best_iou < ignore_threshold, K.dtype(true_box)))
return b + 1, ignore_mask
# 在这个地方进行一个循环、循环是对每一张图片进行的
_, ignore_mask = tf.while_loop(lambda b, *args: b < m, loop_body, [0, ignore_mask])
# ignore_mask用于提取出作为负样本的特征点
ignore_mask = ignore_mask.stack()
ignore_mask = K.expand_dims(ignore_mask, -1)
# 真实框越大,比重越小,小框的比重更大。
box_loss_scale = 2 - y_true[l][..., 2:3] * y_true[l][..., 3:4]
# ------------------------------------------------------------------------------#
# 如果该位置本来有框,那么计算1与置信度的交叉熵
# 如果该位置本来没有框,那么计算0与置信度的交叉熵
# 在这其中会忽略一部分样本,这些被忽略的样本满足条件best_iou<ignore_thresh
# 该操作的目的是:
# 忽略预测结果与真实框非常对应特征点,因为这些框已经比较准了
# 不适合当作负样本,所以忽略掉。
# ------------------------------------------------------------------------------#
confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[..., 4:5], from_logits=True) + \
(1 - object_mask) * K.binary_crossentropy(object_mask, raw_pred[..., 4:5],
from_logits=True) * ignore_mask
class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[..., 5:], from_logits=True)
# 根据不同参数选择不同定位损失
if use_giou_loss: # 计算GIOU损失
raw_true_box = y_true[l][..., 0:4]
giou = box_giou(raw_true_box, pred_box)
giou_loss = object_mask * box_loss_scale * (1 - giou)
giou_loss = K.sum(giou_loss)
location_loss = giou_loss
elif use_diou_loss: # 计算DIOU损失
raw_true_box = y_true[l][..., 0:4]
diou = box_diou(pred_box, raw_true_box, use_ciou_loss=False)
diou_loss = object_mask * box_loss_scale * (1 - diou)
location_loss = diou_loss
elif use_ciou_loss: # 计算CIOU损失
raw_true_box = y_true[l][..., 0:4]
ciou = box_diou(pred_box, raw_true_box,use_ciou_loss=True)
ciou_loss = object_mask * box_loss_scale * (1 - ciou)
location_loss = ciou_loss
else: # YOLO v3边界框定位损失
# Standard YOLOv3 location loss
# K.binary_crossentropy is helpful to avoid exp overflow.
raw_true_xy = y_true[l][..., :2] * grid_shapes[l][::-1] - grid
raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])
raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf
box_loss_scale = 2 - y_true[l][..., 2:3] * y_true[l][..., 3:4]
xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[..., 0:2],
from_logits=True)
wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh - raw_pred[..., 2:4])
xy_loss = K.sum(xy_loss)
wh_loss = K.sum(wh_loss)
location_loss = xy_loss + wh_loss
location_loss = K.sum(location_loss)
confidence_loss = K.sum(confidence_loss)
class_loss = K.sum(class_loss)
# 计算正样本数量
num_pos += tf.maximum(K.sum(K.cast(object_mask, tf.float32)), 1)
loss += location_loss + confidence_loss + class_loss
loss = K.expand_dims(loss, axis=-1)
# 计算YOLO模型损失
if normalize:
loss = loss / num_pos
else:
loss = loss / mf
return loss
| 2.78125
| 3
|
electrum_vtc/tests/test_lnpeer.py
|
samdisk11/electrum
| 0
|
12078
|
import asyncio
import tempfile
from decimal import Decimal
import os
from contextlib import contextmanager
from collections import defaultdict
import logging
import concurrent
from concurrent import futures
import unittest
from typing import Iterable, NamedTuple, Tuple, List, Dict
from aiorpcx import TaskGroup, timeout_after, TaskTimeout
import electrum_vtc as electrum
import electrum_vtc.trampoline
from electrum_vtc import bitcoin
from electrum_vtc import constants
from electrum_vtc.network import Network
from electrum_vtc.ecc import ECPrivkey
from electrum_vtc import simple_config, lnutil
from electrum_vtc.lnaddr import lnencode, LnAddr, lndecode
from electrum_vtc.bitcoin import COIN, sha256
from electrum_vtc.util import bh2u, create_and_start_event_loop, NetworkRetryManager, bfh
from electrum_vtc.lnpeer import Peer, UpfrontShutdownScriptViolation
from electrum_vtc.lnutil import LNPeerAddr, Keypair, privkey_to_pubkey
from electrum_vtc.lnutil import LightningPeerConnectionClosed, RemoteMisbehaving
from electrum_vtc.lnutil import PaymentFailure, LnFeatures, HTLCOwner
from electrum_vtc.lnchannel import ChannelState, PeerState, Channel
from electrum_vtc.lnrouter import LNPathFinder, PathEdge, LNPathInconsistent
from electrum_vtc.channel_db import ChannelDB
from electrum_vtc.lnworker import LNWallet, NoPathFound
from electrum_vtc.lnmsg import encode_msg, decode_msg
from electrum_vtc import lnmsg
from electrum_vtc.logging import console_stderr_handler, Logger
from electrum_vtc.lnworker import PaymentInfo, RECEIVED
from electrum_vtc.lnonion import OnionFailureCode
from electrum_vtc.lnutil import derive_payment_secret_from_payment_preimage
from electrum_vtc.lnutil import LOCAL, REMOTE
from electrum_vtc.invoices import PR_PAID, PR_UNPAID
from .test_lnchannel import create_test_channels
from .test_bitcoin import needs_test_with_all_chacha20_implementations
from . import TestCaseForTestnet
def keypair():
priv = ECPrivkey.generate_random_key().get_secret_bytes()
k1 = Keypair(
pubkey=privkey_to_pubkey(priv),
privkey=priv)
return k1
@contextmanager
def noop_lock():
yield
class MockNetwork:
def __init__(self, tx_queue):
self.callbacks = defaultdict(list)
self.lnwatcher = None
self.interface = None
user_config = {}
user_dir = tempfile.mkdtemp(prefix="electrum-lnpeer-test-")
self.config = simple_config.SimpleConfig(user_config, read_user_dir_function=lambda: user_dir)
self.asyncio_loop = asyncio.get_event_loop()
self.channel_db = ChannelDB(self)
self.channel_db.data_loaded.set()
self.path_finder = LNPathFinder(self.channel_db)
self.tx_queue = tx_queue
self._blockchain = MockBlockchain()
@property
def callback_lock(self):
return noop_lock()
def get_local_height(self):
return 0
def blockchain(self):
return self._blockchain
async def broadcast_transaction(self, tx):
if self.tx_queue:
await self.tx_queue.put(tx)
async def try_broadcasting(self, tx, name):
await self.broadcast_transaction(tx)
class MockBlockchain:
def height(self):
return 0
def is_tip_stale(self):
return False
class MockWallet:
def set_label(self, x, y):
pass
def save_db(self):
pass
def add_transaction(self, tx):
pass
def is_lightning_backup(self):
return False
def is_mine(self, addr):
return True
class MockLNWallet(Logger, NetworkRetryManager[LNPeerAddr]):
MPP_EXPIRY = 2 # HTLC timestamps are cast to int, so this cannot be 1
TIMEOUT_SHUTDOWN_FAIL_PENDING_HTLCS = 0
INITIAL_TRAMPOLINE_FEE_LEVEL = 0
def __init__(self, *, local_keypair: Keypair, chans: Iterable['Channel'], tx_queue, name):
self.name = name
Logger.__init__(self)
NetworkRetryManager.__init__(self, max_retry_delay_normal=1, init_retry_delay_normal=1)
self.node_keypair = local_keypair
self.network = MockNetwork(tx_queue)
self.taskgroup = TaskGroup()
self.lnwatcher = None
self.listen_server = None
self._channels = {chan.channel_id: chan for chan in chans}
self.payments = {}
self.logs = defaultdict(list)
self.wallet = MockWallet()
self.features = LnFeatures(0)
self.features |= LnFeatures.OPTION_DATA_LOSS_PROTECT_OPT
self.features |= LnFeatures.OPTION_UPFRONT_SHUTDOWN_SCRIPT_OPT
self.features |= LnFeatures.VAR_ONION_OPT
self.features |= LnFeatures.PAYMENT_SECRET_OPT
self.features |= LnFeatures.OPTION_TRAMPOLINE_ROUTING_OPT
self.pending_payments = defaultdict(asyncio.Future)
for chan in chans:
chan.lnworker = self
self._peers = {} # bytes -> Peer
# used in tests
self.enable_htlc_settle = True
self.enable_htlc_forwarding = True
self.received_mpp_htlcs = dict()
self.sent_htlcs = defaultdict(asyncio.Queue)
self.sent_htlcs_routes = dict()
self.sent_buckets = defaultdict(set)
self.trampoline_forwarding_failures = {}
self.inflight_payments = set()
self.preimages = {}
self.stopping_soon = False
self.downstream_htlc_to_upstream_peer_map = {}
self.logger.info(f"created LNWallet[{name}] with nodeID={local_keypair.pubkey.hex()}")
def get_invoice_status(self, key):
pass
@property
def lock(self):
return noop_lock()
@property
def channel_db(self):
return self.network.channel_db if self.network else None
@property
def channels(self):
return self._channels
@property
def peers(self):
return self._peers
def get_channel_by_short_id(self, short_channel_id):
with self.lock:
for chan in self._channels.values():
if chan.short_channel_id == short_channel_id:
return chan
def channel_state_changed(self, chan):
pass
def save_channel(self, chan):
print("Ignoring channel save")
def diagnostic_name(self):
return self.name
async def stop(self):
await LNWallet.stop(self)
if self.channel_db:
self.channel_db.stop()
await self.channel_db.stopped_event.wait()
async def create_routes_from_invoice(self, amount_msat: int, decoded_invoice: LnAddr, *, full_path=None):
return [r async for r in self.create_routes_for_payment(
amount_msat=amount_msat,
final_total_msat=amount_msat,
invoice_pubkey=decoded_invoice.pubkey.serialize(),
min_cltv_expiry=decoded_invoice.get_min_final_cltv_expiry(),
r_tags=decoded_invoice.get_routing_info('r'),
invoice_features=decoded_invoice.get_features(),
trampoline_fee_levels=defaultdict(int),
use_two_trampolines=False,
payment_hash=decoded_invoice.paymenthash,
payment_secret=decoded_invoice.payment_secret,
full_path=full_path)]
get_payments = LNWallet.get_payments
get_payment_info = LNWallet.get_payment_info
save_payment_info = LNWallet.save_payment_info
set_invoice_status = LNWallet.set_invoice_status
set_request_status = LNWallet.set_request_status
set_payment_status = LNWallet.set_payment_status
get_payment_status = LNWallet.get_payment_status
check_received_mpp_htlc = LNWallet.check_received_mpp_htlc
htlc_fulfilled = LNWallet.htlc_fulfilled
htlc_failed = LNWallet.htlc_failed
save_preimage = LNWallet.save_preimage
get_preimage = LNWallet.get_preimage
create_route_for_payment = LNWallet.create_route_for_payment
create_routes_for_payment = LNWallet.create_routes_for_payment
_check_invoice = staticmethod(LNWallet._check_invoice)
pay_to_route = LNWallet.pay_to_route
pay_to_node = LNWallet.pay_to_node
pay_invoice = LNWallet.pay_invoice
force_close_channel = LNWallet.force_close_channel
try_force_closing = LNWallet.try_force_closing
get_first_timestamp = lambda self: 0
on_peer_successfully_established = LNWallet.on_peer_successfully_established
get_channel_by_id = LNWallet.get_channel_by_id
channels_for_peer = LNWallet.channels_for_peer
_calc_routing_hints_for_invoice = LNWallet._calc_routing_hints_for_invoice
handle_error_code_from_failed_htlc = LNWallet.handle_error_code_from_failed_htlc
is_trampoline_peer = LNWallet.is_trampoline_peer
wait_for_received_pending_htlcs_to_get_removed = LNWallet.wait_for_received_pending_htlcs_to_get_removed
on_proxy_changed = LNWallet.on_proxy_changed
_decode_channel_update_msg = LNWallet._decode_channel_update_msg
_handle_chanupd_from_failed_htlc = LNWallet._handle_chanupd_from_failed_htlc
_on_maybe_forwarded_htlc_resolved = LNWallet._on_maybe_forwarded_htlc_resolved
class MockTransport:
def __init__(self, name):
self.queue = asyncio.Queue()
self._name = name
def name(self):
return self._name
async def read_messages(self):
while True:
yield await self.queue.get()
class NoFeaturesTransport(MockTransport):
"""
This answers the init message with a init that doesn't signal any features.
Used for testing that we require DATA_LOSS_PROTECT.
"""
def send_bytes(self, data):
decoded = decode_msg(data)
print(decoded)
if decoded[0] == 'init':
self.queue.put_nowait(encode_msg('init', lflen=1, gflen=1, localfeatures=b"\x00", globalfeatures=b"\x00"))
class PutIntoOthersQueueTransport(MockTransport):
def __init__(self, keypair, name):
super().__init__(name)
self.other_mock_transport = None
self.privkey = keypair.privkey
def send_bytes(self, data):
self.other_mock_transport.queue.put_nowait(data)
def transport_pair(k1, k2, name1, name2):
t1 = PutIntoOthersQueueTransport(k1, name1)
t2 = PutIntoOthersQueueTransport(k2, name2)
t1.other_mock_transport = t2
t2.other_mock_transport = t1
return t1, t2
class PeerInTests(Peer):
DELAY_INC_MSG_PROCESSING_SLEEP = 0 # disable rate-limiting
high_fee_channel = {
'local_balance_msat': 10 * bitcoin.COIN * 1000 // 2,
'remote_balance_msat': 10 * bitcoin.COIN * 1000 // 2,
'local_base_fee_msat': 500_000,
'local_fee_rate_millionths': 500,
'remote_base_fee_msat': 500_000,
'remote_fee_rate_millionths': 500,
}
low_fee_channel = {
'local_balance_msat': 10 * bitcoin.COIN * 1000 // 2,
'remote_balance_msat': 10 * bitcoin.COIN * 1000 // 2,
'local_base_fee_msat': 1_000,
'local_fee_rate_millionths': 1,
'remote_base_fee_msat': 1_000,
'remote_fee_rate_millionths': 1,
}
GRAPH_DEFINITIONS = {
'square_graph': {
'alice': {
'channels': {
# we should use copies of channel definitions if
# we want to independently alter them in a test
'bob': high_fee_channel.copy(),
'carol': low_fee_channel.copy(),
},
},
'bob': {
'channels': {
'dave': high_fee_channel.copy(),
},
'config': {
'lightning_forward_payments': True,
'lightning_forward_trampoline_payments': True,
},
},
'carol': {
'channels': {
'dave': low_fee_channel.copy(),
},
'config': {
'lightning_forward_payments': True,
'lightning_forward_trampoline_payments': True,
},
},
'dave': {
},
}
}
class Graph(NamedTuple):
workers: Dict[str, MockLNWallet]
peers: Dict[Tuple[str, str], Peer]
channels: Dict[Tuple[str, str], Channel]
class PaymentDone(Exception): pass
class SuccessfulTest(Exception): pass
class TestPeer(TestCaseForTestnet):
@classmethod
def setUpClass(cls):
super().setUpClass()
console_stderr_handler.setLevel(logging.DEBUG)
def setUp(self):
super().setUp()
self.asyncio_loop, self._stop_loop, self._loop_thread = create_and_start_event_loop()
self._lnworkers_created = [] # type: List[MockLNWallet]
def tearDown(self):
async def cleanup_lnworkers():
async with TaskGroup() as group:
for lnworker in self._lnworkers_created:
await group.spawn(lnworker.stop())
self._lnworkers_created.clear()
run(cleanup_lnworkers())
self.asyncio_loop.call_soon_threadsafe(self._stop_loop.set_result, 1)
self._loop_thread.join(timeout=1)
super().tearDown()
def prepare_peers(self, alice_channel: Channel, bob_channel: Channel):
k1, k2 = keypair(), keypair()
alice_channel.node_id = k2.pubkey
bob_channel.node_id = k1.pubkey
t1, t2 = transport_pair(k1, k2, alice_channel.name, bob_channel.name)
q1, q2 = asyncio.Queue(), asyncio.Queue()
w1 = MockLNWallet(local_keypair=k1, chans=[alice_channel], tx_queue=q1, name=bob_channel.name)
w2 = MockLNWallet(local_keypair=k2, chans=[bob_channel], tx_queue=q2, name=alice_channel.name)
self._lnworkers_created.extend([w1, w2])
p1 = PeerInTests(w1, k2.pubkey, t1)
p2 = PeerInTests(w2, k1.pubkey, t2)
w1._peers[p1.pubkey] = p1
w2._peers[p2.pubkey] = p2
# mark_open won't work if state is already OPEN.
# so set it to FUNDED
alice_channel._state = ChannelState.FUNDED
bob_channel._state = ChannelState.FUNDED
# this populates the channel graph:
p1.mark_open(alice_channel)
p2.mark_open(bob_channel)
return p1, p2, w1, w2, q1, q2
def prepare_chans_and_peers_in_graph(self, graph_definition) -> Graph:
keys = {k: keypair() for k in graph_definition}
txs_queues = {k: asyncio.Queue() for k in graph_definition}
channels = {} # type: Dict[Tuple[str, str], Channel]
transports = {}
workers = {} # type: Dict[str, MockLNWallet]
peers = {}
# create channels
for a, definition in graph_definition.items():
for b, channel_def in definition.get('channels', {}).items():
channel_ab, channel_ba = create_test_channels(
alice_name=a,
bob_name=b,
alice_pubkey=keys[a].pubkey,
bob_pubkey=keys[b].pubkey,
local_msat=channel_def['local_balance_msat'],
remote_msat=channel_def['remote_balance_msat'],
)
channels[(a, b)], channels[(b, a)] = channel_ab, channel_ba
transport_ab, transport_ba = transport_pair(keys[a], keys[b], channel_ab.name, channel_ba.name)
transports[(a, b)], transports[(b, a)] = transport_ab, transport_ba
# set fees
channel_ab.forwarding_fee_proportional_millionths = channel_def['local_fee_rate_millionths']
channel_ab.forwarding_fee_base_msat = channel_def['local_base_fee_msat']
channel_ba.forwarding_fee_proportional_millionths = channel_def['remote_fee_rate_millionths']
channel_ba.forwarding_fee_base_msat = channel_def['remote_base_fee_msat']
# create workers and peers
for a, definition in graph_definition.items():
channels_of_node = [c for k, c in channels.items() if k[0] == a]
workers[a] = MockLNWallet(local_keypair=keys[a], chans=channels_of_node, tx_queue=txs_queues[a], name=a)
self._lnworkers_created.extend(list(workers.values()))
# create peers
for ab in channels.keys():
peers[ab] = Peer(workers[ab[0]], keys[ab[1]].pubkey, transports[ab])
# add peers to workers
for a, w in workers.items():
for ab, peer_ab in peers.items():
if ab[0] == a:
w._peers[peer_ab.pubkey] = peer_ab
# set forwarding properties
for a, definition in graph_definition.items():
for property in definition.get('config', {}).items():
workers[a].network.config.set_key(*property)
# mark_open won't work if state is already OPEN.
# so set it to FUNDED
for channel_ab in channels.values():
channel_ab._state = ChannelState.FUNDED
# this populates the channel graph:
for ab, peer_ab in peers.items():
peer_ab.mark_open(channels[ab])
graph = Graph(
workers=workers,
peers=peers,
channels=channels,
)
for a in workers:
print(f"{a} -> pubkey {keys[a].pubkey}")
return graph
@staticmethod
async def prepare_invoice(
w2: MockLNWallet, # receiver
*,
amount_msat=100_000_000,
include_routing_hints=False,
) -> Tuple[LnAddr, str]:
amount_btc = amount_msat/Decimal(COIN*1000)
payment_preimage = os.urandom(32)
RHASH = sha256(payment_preimage)
info = PaymentInfo(RHASH, amount_msat, RECEIVED, PR_UNPAID)
w2.save_preimage(RHASH, payment_preimage)
w2.save_payment_info(info)
if include_routing_hints:
routing_hints = await w2._calc_routing_hints_for_invoice(amount_msat)
else:
routing_hints = []
trampoline_hints = []
for r in routing_hints:
node_id, short_channel_id, fee_base_msat, fee_proportional_millionths, cltv_expiry_delta = r[1][0]
if len(r[1])== 1 and w2.is_trampoline_peer(node_id):
trampoline_hints.append(('t', (node_id, fee_base_msat, fee_proportional_millionths, cltv_expiry_delta)))
invoice_features = w2.features.for_invoice()
if invoice_features.supports(LnFeatures.PAYMENT_SECRET_OPT):
payment_secret = derive_payment_secret_from_payment_preimage(payment_preimage)
else:
payment_secret = None
lnaddr1 = LnAddr(
paymenthash=RHASH,
amount=amount_btc,
tags=[('c', lnutil.MIN_FINAL_CLTV_EXPIRY_FOR_INVOICE),
('d', 'coffee'),
('9', invoice_features),
] + routing_hints + trampoline_hints,
payment_secret=payment_secret,
)
invoice = lnencode(lnaddr1, w2.node_keypair.privkey)
lnaddr2 = lndecode(invoice) # unlike lnaddr1, this now has a pubkey set
return lnaddr2, invoice
def test_reestablish(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
for chan in (alice_channel, bob_channel):
chan.peer_state = PeerState.DISCONNECTED
async def reestablish():
await asyncio.gather(
p1.reestablish_channel(alice_channel),
p2.reestablish_channel(bob_channel))
self.assertEqual(alice_channel.peer_state, PeerState.GOOD)
self.assertEqual(bob_channel.peer_state, PeerState.GOOD)
gath.cancel()
gath = asyncio.gather(reestablish(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p1.htlc_switch())
async def f():
await gath
with self.assertRaises(concurrent.futures.CancelledError):
run(f())
@needs_test_with_all_chacha20_implementations
def test_reestablish_with_old_state(self):
random_seed = os.urandom(32)
alice_channel, bob_channel = create_test_channels(random_seed=random_seed)
alice_channel_0, bob_channel_0 = create_test_channels(random_seed=random_seed) # these are identical
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
lnaddr, pay_req = run(self.prepare_invoice(w2))
async def pay():
result, log = await w1.pay_invoice(pay_req)
self.assertEqual(result, True)
gath.cancel()
gath = asyncio.gather(pay(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
async def f():
await gath
with self.assertRaises(concurrent.futures.CancelledError):
run(f())
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel_0, bob_channel)
for chan in (alice_channel_0, bob_channel):
chan.peer_state = PeerState.DISCONNECTED
async def reestablish():
await asyncio.gather(
p1.reestablish_channel(alice_channel_0),
p2.reestablish_channel(bob_channel))
self.assertEqual(alice_channel_0.peer_state, PeerState.BAD)
self.assertEqual(bob_channel._state, ChannelState.FORCE_CLOSING)
# wait so that pending messages are processed
#await asyncio.sleep(1)
gath.cancel()
gath = asyncio.gather(reestablish(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
async def f():
await gath
with self.assertRaises(concurrent.futures.CancelledError):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment(self):
"""Alice pays Bob a single HTLC via direct channel."""
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
async def pay(lnaddr, pay_req):
self.assertEqual(PR_UNPAID, w2.get_payment_status(lnaddr.paymenthash))
result, log = await w1.pay_invoice(pay_req)
self.assertTrue(result)
self.assertEqual(PR_PAID, w2.get_payment_status(lnaddr.paymenthash))
raise PaymentDone()
async def f():
async with TaskGroup() as group:
await group.spawn(p1._message_loop())
await group.spawn(p1.htlc_switch())
await group.spawn(p2._message_loop())
await group.spawn(p2.htlc_switch())
await asyncio.sleep(0.01)
lnaddr, pay_req = await self.prepare_invoice(w2)
invoice_features = lnaddr.get_features()
self.assertFalse(invoice_features.supports(LnFeatures.BASIC_MPP_OPT))
await group.spawn(pay(lnaddr, pay_req))
with self.assertRaises(PaymentDone):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment_race(self):
"""Alice and Bob pay each other simultaneously.
They both send 'update_add_htlc' and receive each other's update
before sending 'commitment_signed'. Neither party should fulfill
the respective HTLCs until those are irrevocably committed to.
"""
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
async def pay():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
# prep
_maybe_send_commitment1 = p1.maybe_send_commitment
_maybe_send_commitment2 = p2.maybe_send_commitment
lnaddr2, pay_req2 = await self.prepare_invoice(w2)
lnaddr1, pay_req1 = await self.prepare_invoice(w1)
# create the htlc queues now (side-effecting defaultdict)
q1 = w1.sent_htlcs[lnaddr2.paymenthash]
q2 = w2.sent_htlcs[lnaddr1.paymenthash]
# alice sends htlc BUT NOT COMMITMENT_SIGNED
p1.maybe_send_commitment = lambda x: None
route1 = (await w1.create_routes_from_invoice(lnaddr2.get_amount_msat(), decoded_invoice=lnaddr2))[0][0]
amount_msat = lnaddr2.get_amount_msat()
await w1.pay_to_route(
route=route1,
amount_msat=amount_msat,
total_msat=amount_msat,
amount_receiver_msat=amount_msat,
payment_hash=lnaddr2.paymenthash,
min_cltv_expiry=lnaddr2.get_min_final_cltv_expiry(),
payment_secret=lnaddr2.payment_secret,
)
p1.maybe_send_commitment = _maybe_send_commitment1
# bob sends htlc BUT NOT COMMITMENT_SIGNED
p2.maybe_send_commitment = lambda x: None
route2 = (await w2.create_routes_from_invoice(lnaddr1.get_amount_msat(), decoded_invoice=lnaddr1))[0][0]
amount_msat = lnaddr1.get_amount_msat()
await w2.pay_to_route(
route=route2,
amount_msat=amount_msat,
total_msat=amount_msat,
amount_receiver_msat=amount_msat,
payment_hash=lnaddr1.paymenthash,
min_cltv_expiry=lnaddr1.get_min_final_cltv_expiry(),
payment_secret=lnaddr1.payment_secret,
)
p2.maybe_send_commitment = _maybe_send_commitment2
# sleep a bit so that they both receive msgs sent so far
await asyncio.sleep(0.2)
# now they both send COMMITMENT_SIGNED
p1.maybe_send_commitment(alice_channel)
p2.maybe_send_commitment(bob_channel)
htlc_log1 = await q1.get()
assert htlc_log1.success
htlc_log2 = await q2.get()
assert htlc_log2.success
raise PaymentDone()
async def f():
async with TaskGroup() as group:
await group.spawn(p1._message_loop())
await group.spawn(p1.htlc_switch())
await group.spawn(p2._message_loop())
await group.spawn(p2.htlc_switch())
await asyncio.sleep(0.01)
await group.spawn(pay())
with self.assertRaises(PaymentDone):
run(f())
<EMAIL>("too expensive")
#@needs_test_with_all_chacha20_implementations
def test_payments_stresstest(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
alice_init_balance_msat = alice_channel.balance(HTLCOwner.LOCAL)
bob_init_balance_msat = bob_channel.balance(HTLCOwner.LOCAL)
num_payments = 50
payment_value_msat = 10_000_000 # make it large enough so that there are actually HTLCs on the ctx
max_htlcs_in_flight = asyncio.Semaphore(5)
async def single_payment(pay_req):
async with max_htlcs_in_flight:
await w1.pay_invoice(pay_req)
async def many_payments():
async with TaskGroup() as group:
pay_reqs_tasks = [await group.spawn(self.prepare_invoice(w2, amount_msat=payment_value_msat))
for i in range(num_payments)]
async with TaskGroup() as group:
for pay_req_task in pay_reqs_tasks:
lnaddr, pay_req = pay_req_task.result()
await group.spawn(single_payment(pay_req))
gath.cancel()
gath = asyncio.gather(many_payments(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
async def f():
await gath
with self.assertRaises(concurrent.futures.CancelledError):
run(f())
self.assertEqual(alice_init_balance_msat - num_payments * payment_value_msat, alice_channel.balance(HTLCOwner.LOCAL))
self.assertEqual(alice_init_balance_msat - num_payments * payment_value_msat, bob_channel.balance(HTLCOwner.REMOTE))
self.assertEqual(bob_init_balance_msat + num_payments * payment_value_msat, bob_channel.balance(HTLCOwner.LOCAL))
self.assertEqual(bob_init_balance_msat + num_payments * payment_value_msat, alice_channel.balance(HTLCOwner.REMOTE))
@needs_test_with_all_chacha20_implementations
def test_payment_multihop(self):
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
peers = graph.peers.values()
async def pay(lnaddr, pay_req):
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
result, log = await graph.workers['alice'].pay_invoice(pay_req)
self.assertTrue(result)
self.assertEqual(PR_PAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
raise PaymentDone()
async def f():
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
lnaddr, pay_req = await self.prepare_invoice(graph.workers['dave'], include_routing_hints=True)
await group.spawn(pay(lnaddr, pay_req))
with self.assertRaises(PaymentDone):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment_multihop_with_preselected_path(self):
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
peers = graph.peers.values()
async def pay(pay_req):
with self.subTest(msg="bad path: edges do not chain together"):
path = [PathEdge(start_node=graph.workers['alice'].node_keypair.pubkey,
end_node=graph.workers['carol'].node_keypair.pubkey,
short_channel_id=graph.channels[('alice', 'bob')].short_channel_id),
PathEdge(start_node=graph.workers['bob'].node_keypair.pubkey,
end_node=graph.workers['dave'].node_keypair.pubkey,
short_channel_id=graph.channels['bob', 'dave'].short_channel_id)]
with self.assertRaises(LNPathInconsistent):
await graph.workers['alice'].pay_invoice(pay_req, full_path=path)
with self.subTest(msg="bad path: last node id differs from invoice pubkey"):
path = [PathEdge(start_node=graph.workers['alice'].node_keypair.pubkey,
end_node=graph.workers['bob'].node_keypair.pubkey,
short_channel_id=graph.channels[('alice', 'bob')].short_channel_id)]
with self.assertRaises(LNPathInconsistent):
await graph.workers['alice'].pay_invoice(pay_req, full_path=path)
with self.subTest(msg="good path"):
path = [PathEdge(start_node=graph.workers['alice'].node_keypair.pubkey,
end_node=graph.workers['bob'].node_keypair.pubkey,
short_channel_id=graph.channels[('alice', 'bob')].short_channel_id),
PathEdge(start_node=graph.workers['bob'].node_keypair.pubkey,
end_node=graph.workers['dave'].node_keypair.pubkey,
short_channel_id=graph.channels['bob', 'dave'].short_channel_id)]
result, log = await graph.workers['alice'].pay_invoice(pay_req, full_path=path)
self.assertTrue(result)
self.assertEqual(
[edge.short_channel_id for edge in path],
[edge.short_channel_id for edge in log[0].route])
raise PaymentDone()
async def f():
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
lnaddr, pay_req = await self.prepare_invoice(graph.workers['dave'], include_routing_hints=True)
await group.spawn(pay(pay_req))
with self.assertRaises(PaymentDone):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment_multihop_temp_node_failure(self):
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
graph.workers['bob'].network.config.set_key('test_fail_htlcs_with_temp_node_failure', True)
graph.workers['carol'].network.config.set_key('test_fail_htlcs_with_temp_node_failure', True)
peers = graph.peers.values()
async def pay(lnaddr, pay_req):
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
result, log = await graph.workers['alice'].pay_invoice(pay_req)
self.assertFalse(result)
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
self.assertEqual(OnionFailureCode.TEMPORARY_NODE_FAILURE, log[0].failure_msg.code)
raise PaymentDone()
async def f():
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
lnaddr, pay_req = await self.prepare_invoice(graph.workers['dave'], include_routing_hints=True)
await group.spawn(pay(lnaddr, pay_req))
with self.assertRaises(PaymentDone):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment_multihop_route_around_failure(self):
# Alice will pay Dave. Alice first tries A->C->D route, due to lower fees, but Carol
# will fail the htlc and get blacklisted. Alice will then try A->B->D and succeed.
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
graph.workers['carol'].network.config.set_key('test_fail_htlcs_with_temp_node_failure', True)
peers = graph.peers.values()
async def pay(lnaddr, pay_req):
self.assertEqual(500000000000, graph.channels[('alice', 'bob')].balance(LOCAL))
self.assertEqual(500000000000, graph.channels[('dave', 'bob')].balance(LOCAL))
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
result, log = await graph.workers['alice'].pay_invoice(pay_req, attempts=2)
self.assertEqual(2, len(log))
self.assertTrue(result)
self.assertEqual(PR_PAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
self.assertEqual([graph.channels[('alice', 'carol')].short_channel_id, graph.channels[('carol', 'dave')].short_channel_id],
[edge.short_channel_id for edge in log[0].route])
self.assertEqual([graph.channels[('alice', 'bob')].short_channel_id, graph.channels[('bob', 'dave')].short_channel_id],
[edge.short_channel_id for edge in log[1].route])
self.assertEqual(OnionFailureCode.TEMPORARY_NODE_FAILURE, log[0].failure_msg.code)
self.assertEqual(499899450000, graph.channels[('alice', 'bob')].balance(LOCAL))
await asyncio.sleep(0.2) # wait for COMMITMENT_SIGNED / REVACK msgs to update balance
self.assertEqual(500100000000, graph.channels[('dave', 'bob')].balance(LOCAL))
raise PaymentDone()
async def f():
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
lnaddr, pay_req = await self.prepare_invoice(graph.workers['dave'], include_routing_hints=True)
invoice_features = lnaddr.get_features()
self.assertFalse(invoice_features.supports(LnFeatures.BASIC_MPP_OPT))
await group.spawn(pay(lnaddr, pay_req))
with self.assertRaises(PaymentDone):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment_with_temp_channel_failure_and_liquidty_hints(self):
# prepare channels such that a temporary channel failure happens at c->d
graph_definition = GRAPH_DEFINITIONS['square_graph'].copy()
graph_definition['alice']['channels']['carol']['local_balance_msat'] = 200_000_000
graph_definition['alice']['channels']['carol']['remote_balance_msat'] = 200_000_000
graph_definition['carol']['channels']['dave']['local_balance_msat'] = 50_000_000
graph_definition['carol']['channels']['dave']['remote_balance_msat'] = 200_000_000
graph_definition['alice']['channels']['bob']['local_balance_msat'] = 200_000_000
graph_definition['alice']['channels']['bob']['remote_balance_msat'] = 200_000_000
graph_definition['bob']['channels']['dave']['local_balance_msat'] = 200_000_000
graph_definition['bob']['channels']['dave']['remote_balance_msat'] = 200_000_000
graph = self.prepare_chans_and_peers_in_graph(graph_definition)
# the payment happens in two attempts:
# 1. along a->c->d due to low fees with temp channel failure:
# with chanupd: ORPHANED, private channel update
# c->d gets a liquidity hint and gets blocked
# 2. along a->b->d with success
amount_to_pay = 100_000_000
peers = graph.peers.values()
async def pay(lnaddr, pay_req):
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
result, log = await graph.workers['alice'].pay_invoice(pay_req, attempts=3)
self.assertTrue(result)
self.assertEqual(2, len(log))
self.assertEqual(PR_PAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
self.assertEqual(OnionFailureCode.TEMPORARY_CHANNEL_FAILURE, log[0].failure_msg.code)
liquidity_hints = graph.workers['alice'].network.path_finder.liquidity_hints
pubkey_a = graph.workers['alice'].node_keypair.pubkey
pubkey_b = graph.workers['bob'].node_keypair.pubkey
pubkey_c = graph.workers['carol'].node_keypair.pubkey
pubkey_d = graph.workers['dave'].node_keypair.pubkey
# check liquidity hints for failing route:
hint_ac = liquidity_hints.get_hint(graph.channels[('alice', 'carol')].short_channel_id)
hint_cd = liquidity_hints.get_hint(graph.channels[('carol', 'dave')].short_channel_id)
self.assertEqual(amount_to_pay, hint_ac.can_send(pubkey_a < pubkey_c))
self.assertEqual(None, hint_ac.cannot_send(pubkey_a < pubkey_c))
self.assertEqual(None, hint_cd.can_send(pubkey_c < pubkey_d))
self.assertEqual(amount_to_pay, hint_cd.cannot_send(pubkey_c < pubkey_d))
# check liquidity hints for successful route:
hint_ab = liquidity_hints.get_hint(graph.channels[('alice', 'bob')].short_channel_id)
hint_bd = liquidity_hints.get_hint(graph.channels[('bob', 'dave')].short_channel_id)
self.assertEqual(amount_to_pay, hint_ab.can_send(pubkey_a < pubkey_b))
self.assertEqual(None, hint_ab.cannot_send(pubkey_a < pubkey_b))
self.assertEqual(amount_to_pay, hint_bd.can_send(pubkey_b < pubkey_d))
self.assertEqual(None, hint_bd.cannot_send(pubkey_b < pubkey_d))
raise PaymentDone()
async def f():
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
lnaddr, pay_req = await self.prepare_invoice(graph.workers['dave'], amount_msat=amount_to_pay, include_routing_hints=True)
await group.spawn(pay(lnaddr, pay_req))
with self.assertRaises(PaymentDone):
run(f())
def _run_mpp(self, graph, fail_kwargs, success_kwargs):
"""Tests a multipart payment scenario for failing and successful cases."""
self.assertEqual(500_000_000_000, graph.channels[('alice', 'bob')].balance(LOCAL))
self.assertEqual(500_000_000_000, graph.channels[('alice', 'carol')].balance(LOCAL))
amount_to_pay = 600_000_000_000
peers = graph.peers.values()
async def pay(
attempts=1,
alice_uses_trampoline=False,
bob_forwarding=True,
mpp_invoice=True
):
if mpp_invoice:
graph.workers['dave'].features |= LnFeatures.BASIC_MPP_OPT
if not bob_forwarding:
graph.workers['bob'].enable_htlc_forwarding = False
if alice_uses_trampoline:
if graph.workers['alice'].network.channel_db:
graph.workers['alice'].network.channel_db.stop()
await graph.workers['alice'].network.channel_db.stopped_event.wait()
graph.workers['alice'].network.channel_db = None
else:
assert graph.workers['alice'].network.channel_db is not None
lnaddr, pay_req = await self.prepare_invoice(graph.workers['dave'], include_routing_hints=True, amount_msat=amount_to_pay)
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
result, log = await graph.workers['alice'].pay_invoice(pay_req, attempts=attempts)
if not bob_forwarding:
# reset to previous state, sleep 2s so that the second htlc can time out
graph.workers['bob'].enable_htlc_forwarding = True
await asyncio.sleep(2)
if result:
self.assertEqual(PR_PAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
raise PaymentDone()
else:
raise NoPathFound()
async def f(kwargs):
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
await group.spawn(pay(**kwargs))
with self.assertRaises(NoPathFound):
run(f(fail_kwargs))
with self.assertRaises(PaymentDone):
run(f(success_kwargs))
@needs_test_with_all_chacha20_implementations
def test_payment_multipart_with_timeout(self):
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
self._run_mpp(graph, {'bob_forwarding': False}, {'bob_forwarding': True})
@needs_test_with_all_chacha20_implementations
def test_payment_multipart(self):
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
self._run_mpp(graph, {'mpp_invoice': False}, {'mpp_invoice': True})
@needs_test_with_all_chacha20_implementations
def test_payment_trampoline(self):
async def turn_on_trampoline_alice():
if graph.workers['alice'].network.channel_db:
graph.workers['alice'].network.channel_db.stop()
await graph.workers['alice'].network.channel_db.stopped_event.wait()
graph.workers['alice'].network.channel_db = None
async def pay(lnaddr, pay_req):
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
result, log = await graph.workers['alice'].pay_invoice(pay_req, attempts=10)
self.assertTrue(result)
self.assertEqual(PR_PAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
raise PaymentDone()
async def f():
await turn_on_trampoline_alice()
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
lnaddr, pay_req = await self.prepare_invoice(graph.workers['dave'], include_routing_hints=True)
await group.spawn(pay(lnaddr, pay_req))
for is_legacy in (True, False):
graph_definition = GRAPH_DEFINITIONS['square_graph'].copy()
# insert a channel from bob to carol for faster tests,
# otherwise will fail randomly
graph_definition['bob']['channels']['carol'] = high_fee_channel
graph = self.prepare_chans_and_peers_in_graph(graph_definition)
peers = graph.peers.values()
if is_legacy:
# turn off trampoline features
graph.workers['dave'].features = graph.workers['dave'].features ^ LnFeatures.OPTION_TRAMPOLINE_ROUTING_OPT
# declare routing nodes as trampoline nodes
electrum_vtc.trampoline._TRAMPOLINE_NODES_UNITTESTS = {
graph.workers['bob'].name: LNPeerAddr(host="127.0.0.1", port=9735, pubkey=graph.workers['bob'].node_keypair.pubkey),
graph.workers['carol'].name: LNPeerAddr(host="127.0.0.1", port=9735, pubkey=graph.workers['carol'].node_keypair.pubkey),
}
with self.assertRaises(PaymentDone):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment_multipart_trampoline(self):
# single attempt will fail with insufficient trampoline fee
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
electrum_vtc.trampoline._TRAMPOLINE_NODES_UNITTESTS = {
graph.workers['bob'].name: LNPeerAddr(host="127.0.0.1", port=9735, pubkey=graph.workers['bob'].node_keypair.pubkey),
graph.workers['carol'].name: LNPeerAddr(host="127.0.0.1", port=9735, pubkey=graph.workers['carol'].node_keypair.pubkey),
}
try:
self._run_mpp(
graph,
{'alice_uses_trampoline': True, 'attempts': 1},
{'alice_uses_trampoline': True, 'attempts': 30})
finally:
electrum_vtc.trampoline._TRAMPOLINE_NODES_UNITTESTS = {}
@needs_test_with_all_chacha20_implementations
def test_fail_pending_htlcs_on_shutdown(self):
"""Alice tries to pay Dave via MPP. Dave receives some HTLCs but not all.
Dave shuts down (stops wallet).
We test if Dave fails the pending HTLCs during shutdown.
"""
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
self.assertEqual(500_000_000_000, graph.channels[('alice', 'bob')].balance(LOCAL))
self.assertEqual(500_000_000_000, graph.channels[('alice', 'carol')].balance(LOCAL))
amount_to_pay = 600_000_000_000
peers = graph.peers.values()
graph.workers['dave'].MPP_EXPIRY = 120
graph.workers['dave'].TIMEOUT_SHUTDOWN_FAIL_PENDING_HTLCS = 3
async def pay():
graph.workers['dave'].features |= LnFeatures.BASIC_MPP_OPT
graph.workers['bob'].enable_htlc_forwarding = False # Bob will hold forwarded HTLCs
assert graph.workers['alice'].network.channel_db is not None
lnaddr, pay_req = await self.prepare_invoice(graph.workers['dave'], include_routing_hints=True, amount_msat=amount_to_pay)
try:
async with timeout_after(0.5):
result, log = await graph.workers['alice'].pay_invoice(pay_req, attempts=1)
except TaskTimeout:
# by now Dave hopefully received some HTLCs:
self.assertTrue(len(graph.channels[('dave', 'carol')].hm.htlcs(LOCAL)) > 0)
self.assertTrue(len(graph.channels[('dave', 'carol')].hm.htlcs(REMOTE)) > 0)
else:
self.fail(f"pay_invoice finished but was not supposed to. result={result}")
await graph.workers['dave'].stop()
# Dave is supposed to have failed the pending incomplete MPP HTLCs
self.assertEqual(0, len(graph.channels[('dave', 'carol')].hm.htlcs(LOCAL)))
self.assertEqual(0, len(graph.channels[('dave', 'carol')].hm.htlcs(REMOTE)))
raise SuccessfulTest()
async def f():
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
await group.spawn(pay())
with self.assertRaises(SuccessfulTest):
run(f())
@needs_test_with_all_chacha20_implementations
def test_close(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
w1.network.config.set_key('dynamic_fees', False)
w2.network.config.set_key('dynamic_fees', False)
w1.network.config.set_key('fee_per_kb', 5000)
w2.network.config.set_key('fee_per_kb', 1000)
w2.enable_htlc_settle = False
lnaddr, pay_req = run(self.prepare_invoice(w2))
async def pay():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
# alice sends htlc
route, amount_msat = (await w1.create_routes_from_invoice(lnaddr.get_amount_msat(), decoded_invoice=lnaddr))[0][0:2]
p1.pay(route=route,
chan=alice_channel,
amount_msat=lnaddr.get_amount_msat(),
total_msat=lnaddr.get_amount_msat(),
payment_hash=lnaddr.paymenthash,
min_final_cltv_expiry=lnaddr.get_min_final_cltv_expiry(),
payment_secret=lnaddr.payment_secret)
# alice closes
await p1.close_channel(alice_channel.channel_id)
gath.cancel()
async def set_settle():
await asyncio.sleep(0.1)
w2.enable_htlc_settle = True
gath = asyncio.gather(pay(), set_settle(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
async def f():
await gath
with self.assertRaises(concurrent.futures.CancelledError):
run(f())
@needs_test_with_all_chacha20_implementations
def test_close_upfront_shutdown_script(self):
alice_channel, bob_channel = create_test_channels()
# create upfront shutdown script for bob, alice doesn't use upfront
# shutdown script
bob_uss_pub = lnutil.privkey_to_pubkey(os.urandom(32))
bob_uss_addr = bitcoin.pubkey_to_address('p2wpkh', bh2u(bob_uss_pub))
bob_uss = bfh(bitcoin.address_to_script(bob_uss_addr))
# bob commits to close to bob_uss
alice_channel.config[HTLCOwner.REMOTE].upfront_shutdown_script = bob_uss
# but bob closes to some receiving address, which we achieve by not
# setting the upfront shutdown script in the channel config
bob_channel.config[HTLCOwner.LOCAL].upfront_shutdown_script = b''
p1, p2, w1, w2, q1, q2 = self.prepare_peers(alice_channel, bob_channel)
w1.network.config.set_key('dynamic_fees', False)
w2.network.config.set_key('dynamic_fees', False)
w1.network.config.set_key('fee_per_kb', 5000)
w2.network.config.set_key('fee_per_kb', 1000)
async def test():
async def close():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
# bob closes channel with different shutdown script
await p1.close_channel(alice_channel.channel_id)
gath.cancel()
async def main_loop(peer):
async with peer.taskgroup as group:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
coros = [close(), main_loop(p1), main_loop(p2)]
gath = asyncio.gather(*coros)
await gath
with self.assertRaises(UpfrontShutdownScriptViolation):
run(test())
# bob sends the same upfront_shutdown_script has he announced
alice_channel.config[HTLCOwner.REMOTE].upfront_shutdown_script = bob_uss
bob_channel.config[HTLCOwner.LOCAL].upfront_shutdown_script = bob_uss
p1, p2, w1, w2, q1, q2 = self.prepare_peers(alice_channel, bob_channel)
w1.network.config.set_key('dynamic_fees', False)
w2.network.config.set_key('dynamic_fees', False)
w1.network.config.set_key('fee_per_kb', 5000)
w2.network.config.set_key('fee_per_kb', 1000)
async def test():
async def close():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
await p1.close_channel(alice_channel.channel_id)
gath.cancel()
async def main_loop(peer):
async with peer.taskgroup as group:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
coros = [close(), main_loop(p1), main_loop(p2)]
gath = asyncio.gather(*coros)
await gath
with self.assertRaises(concurrent.futures.CancelledError):
run(test())
def test_channel_usage_after_closing(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, q1, q2 = self.prepare_peers(alice_channel, bob_channel)
lnaddr, pay_req = run(self.prepare_invoice(w2))
lnaddr = w1._check_invoice(pay_req)
route, amount_msat = run(w1.create_routes_from_invoice(lnaddr.get_amount_msat(), decoded_invoice=lnaddr))[0][0:2]
assert amount_msat == lnaddr.get_amount_msat()
run(w1.force_close_channel(alice_channel.channel_id))
# check if a tx (commitment transaction) was broadcasted:
assert q1.qsize() == 1
with self.assertRaises(NoPathFound) as e:
run(w1.create_routes_from_invoice(lnaddr.get_amount_msat(), decoded_invoice=lnaddr))
peer = w1.peers[route[0].node_id]
# AssertionError is ok since we shouldn't use old routes, and the
# route finding should fail when channel is closed
async def f():
min_cltv_expiry = lnaddr.get_min_final_cltv_expiry()
payment_hash = lnaddr.paymenthash
payment_secret = lnaddr.payment_secret
pay = w1.pay_to_route(
route=route,
amount_msat=amount_msat,
total_msat=amount_msat,
amount_receiver_msat=amount_msat,
payment_hash=payment_hash,
payment_secret=payment_secret,
min_cltv_expiry=min_cltv_expiry)
await asyncio.gather(pay, p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
with self.assertRaises(PaymentFailure):
run(f())
@needs_test_with_all_chacha20_implementations
def test_sending_weird_messages_that_should_be_ignored(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
async def send_weird_messages():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
# peer1 sends known message with trailing garbage
# BOLT-01 says peer2 should ignore trailing garbage
raw_msg1 = encode_msg('ping', num_pong_bytes=4, byteslen=4) + bytes(range(55))
p1.transport.send_bytes(raw_msg1)
await asyncio.sleep(0.05)
# peer1 sends unknown 'odd-type' message
# BOLT-01 says peer2 should ignore whole message
raw_msg2 = (43333).to_bytes(length=2, byteorder="big") + bytes(range(55))
p1.transport.send_bytes(raw_msg2)
await asyncio.sleep(0.05)
raise SuccessfulTest()
async def f():
async with TaskGroup() as group:
for peer in [p1, p2]:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
await group.spawn(send_weird_messages())
with self.assertRaises(SuccessfulTest):
run(f())
@needs_test_with_all_chacha20_implementations
def test_sending_weird_messages__unknown_even_type(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
async def send_weird_messages():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
# peer1 sends unknown 'even-type' message
# BOLT-01 says peer2 should close the connection
raw_msg2 = (43334).to_bytes(length=2, byteorder="big") + bytes(range(55))
p1.transport.send_bytes(raw_msg2)
await asyncio.sleep(0.05)
failing_task = None
async def f():
nonlocal failing_task
async with TaskGroup() as group:
await group.spawn(p1._message_loop())
await group.spawn(p1.htlc_switch())
failing_task = await group.spawn(p2._message_loop())
await group.spawn(p2.htlc_switch())
await asyncio.sleep(0.2)
await group.spawn(send_weird_messages())
with self.assertRaises(lnmsg.UnknownMandatoryMsgType):
run(f())
self.assertTrue(isinstance(failing_task.exception(), lnmsg.UnknownMandatoryMsgType))
@needs_test_with_all_chacha20_implementations
def test_sending_weird_messages__known_msg_with_insufficient_length(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
async def send_weird_messages():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
# peer1 sends known message with insufficient length for the contents
# BOLT-01 says peer2 should fail the connection
raw_msg1 = encode_msg('ping', num_pong_bytes=4, byteslen=4)[:-1]
p1.transport.send_bytes(raw_msg1)
await asyncio.sleep(0.05)
failing_task = None
async def f():
nonlocal failing_task
async with TaskGroup() as group:
await group.spawn(p1._message_loop())
await group.spawn(p1.htlc_switch())
failing_task = await group.spawn(p2._message_loop())
await group.spawn(p2.htlc_switch())
await asyncio.sleep(0.2)
await group.spawn(send_weird_messages())
with self.assertRaises(lnmsg.UnexpectedEndOfStream):
run(f())
self.assertTrue(isinstance(failing_task.exception(), lnmsg.UnexpectedEndOfStream))
def run(coro):
return asyncio.run_coroutine_threadsafe(coro, loop=asyncio.get_event_loop()).result()
| 1.304688
| 1
|
dictionaries/lab/06_students.py
|
Galchov/python-fundamentals
| 0
|
12079
|
<filename>dictionaries/lab/06_students.py
data = input()
courses = {}
while ":" in data:
student_name, id, course_name = data.split(":")
if course_name not in courses:
courses[course_name] = {}
courses[course_name][id] = student_name
data = input()
searched_course = data
searched_course_name_as_list = searched_course.split("_")
searched_course = " ".join(searched_course_name_as_list)
for course_name in courses:
if course_name == searched_course:
for id, name in courses[course_name].items():
print(f"{name} - {id}")
| 3.875
| 4
|
eth_tester/normalization/common.py
|
PabloLefort/eth-tester
| 215
|
12080
|
from cytoolz.functoolz import (
curry,
)
from eth_utils import (
to_dict,
to_tuple,
)
@curry
@to_dict
def normalize_dict(value, normalizers):
for key, item in value.items():
normalizer = normalizers[key]
yield key, normalizer(item)
@curry
@to_tuple
def normalize_array(value, normalizer):
"""
This is just `map` but it's nice to have it return a consisten type
(tuple).
"""
for item in value:
yield normalizer(item)
@curry
def normalize_if(value, conditional_fn, normalizer):
if conditional_fn(value):
return normalizer(value)
else:
return value
| 2.921875
| 3
|
je_open_cv/modules/image_operations.py
|
JE-Chen/Python-OPENCV-JE
| 0
|
12081
|
<reponame>JE-Chen/Python-OPENCV-JE
import cv2
'''
基本圖像處理用
'''
# 取得圖像 行 列 通道數
def get_image_properties(image):
total = [image.shape, image.size, image.dtype]
return total
def get_image_shape(image):
return image.shape
# 取得 圖片大小
def get_image_size(image):
return image.size
# 取得圖片類型
def get_image_type(image):
return image.dtype
# 分割通道
def split_image(image):
B, G, R = cv2.split(image)
return [B, G, R]
'''
The B,G,R channels of an image can be split into their individual planes when needed. Then,
the individual channels can be merged back together to form a BGR image again. This can be performed by:
b = img[:,:,0]
Suppose, you want to make all the red pixels to zero, you need not split like this and put it equal to zero.
You can simply use Numpy indexing which is faster.
img[:,:,2] = 0
'''
# 組合通道
def merge_image(B, G, R):
return cv2.merge((B, G, R))
# 合併2張圖片 採用透明度
def image_Blending(image1, image1_Alpha, image2, image2_Alpha):
return cv2.addWeighted(image1, image1_Alpha, image2, image2_Alpha, 0)
| 3.546875
| 4
|
Incident-Response/Tools/grr/grr/client/grr_response_client/vfs_handlers/ntfs.py
|
sn0b4ll/Incident-Playbook
| 1
|
12082
|
#!/usr/bin/env python
"""Virtual filesystem module based on pyfsntfs."""
import stat
from typing import Any, Callable, Dict, Iterable, Optional, Text, Type
import pyfsntfs
from grr_response_client import client_utils
from grr_response_client.vfs_handlers import base as vfs_base
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
# Caches pyfsntfs.volume instances.
MOUNT_CACHE = utils.TimeBasedCache()
# See
# https://github.com/libyal/libfsntfs/blob/master/documentation/New%20Technologies%20File%20System%20(NTFS).asciidoc#file_attribute_flags
FILE_ATTRIBUTE_READONLY = 0x00000001
FILE_ATTRIBUTE_HIDDEN = 0x00000002
def _GetAlternateDataStreamCaseInsensitive(
fd: pyfsntfs.file_entry, name: Text) -> Optional[pyfsntfs.data_stream]:
name = name.lower()
for data_stream in fd.alternate_data_streams:
if data_stream.name.lower() == name:
return data_stream
class NTFSFile(vfs_base.VFSHandler):
"""VFSHandler implementation based on pyfsntfs."""
supported_pathtype = rdf_paths.PathSpec.PathType.NTFS
def __init__(self,
base_fd: Optional[vfs_base.VFSHandler],
handlers: Dict[Any, Type[vfs_base.VFSHandler]],
pathspec: Optional[rdf_paths.PathSpec] = None,
progress_callback: Optional[Callable[[], None]] = None):
super().__init__(
base_fd,
handlers=handlers,
pathspec=pathspec,
progress_callback=progress_callback)
# self.pathspec is initialized to a copy of base_fd
if base_fd is None:
raise ValueError("NTFS driver must have a file base.")
elif isinstance(base_fd, NTFSFile) and base_fd.IsDirectory():
self.volume = base_fd.volume
last_path = utils.JoinPath(self.pathspec.last.path, pathspec.path)
# Replace the last component with this one.
self.pathspec.Pop(-1)
self.pathspec.Append(pathspec)
self.pathspec.last.path = last_path
elif not base_fd.IsDirectory():
cache_key = base_fd.pathspec.SerializeToBytes()
try:
self.volume = MOUNT_CACHE.Get(cache_key)
except KeyError:
self.volume = pyfsntfs.volume()
self.volume.open_file_object(base_fd)
MOUNT_CACHE.Put(cache_key, self.volume)
self.pathspec.Append(pathspec)
elif base_fd.IsDirectory():
raise IOError("Base must be a file.")
self.fd = None
self.data_stream = None
# Try to open by "inode" number.
if pathspec is not None and pathspec.HasField("inode"):
# The lower 48 bits of the file_reference are the MFT index.
mft_index = pathspec.inode & ((1 << 48) - 1)
self.fd = self.volume.get_file_entry(mft_index)
# If the file_reference changed, then the MFT entry points now to
# a different file. Reopen it by path.
if self.fd is not None and self.fd.file_reference != pathspec.inode:
self.fd = None
# Try to open by path
if self.fd is None:
path = self.pathspec.last.path
path = path.replace("/", "\\")
self.fd = self.volume.get_file_entry_by_path(path)
if self.fd is None:
raise IOError("Failed to open {}".format(path))
# Determine data stream
if pathspec is not None and pathspec.HasField("stream_name"):
if pathspec.path_options == rdf_paths.PathSpec.Options.CASE_LITERAL:
self.data_stream = self.fd.get_alternate_data_stream_by_name(
pathspec.stream_name)
else:
self.data_stream = _GetAlternateDataStreamCaseInsensitive(
self.fd, pathspec.stream_name)
if self.data_stream is None:
raise IOError("Failed to open data stream {} in {}.".format(
pathspec.stream_name, path))
self.pathspec.last.stream_name = self.data_stream.name
else:
if self.fd.has_default_data_stream():
self.data_stream = self.fd
# self.pathspec will be used for future access to this file.
# The name is now literal, so disable case-insensitive lookup (expensive).
self.pathspec.last.path_options = rdf_paths.PathSpec.Options.CASE_LITERAL
# Access the file by file_reference, to skip path lookups.
self.pathspec.last.inode = self.fd.file_reference
if not self.IsDirectory():
if self.data_stream is not None:
self.size = self.data_stream.get_size()
else:
self.size = 0
def Stat(self,
ext_attrs: bool = False,
follow_symlink: bool = True) -> rdf_client_fs.StatEntry:
return self._Stat(self.fd, self.data_stream, self.pathspec.Copy())
def Read(self, length: int) -> bytes:
self.data_stream.seek(self.offset)
data = self.data_stream.read(length)
self.offset += len(data)
return data
def IsDirectory(self) -> bool:
return self.fd.has_directory_entries_index()
def ListFiles(self,
ext_attrs: bool = False) -> Iterable[rdf_client_fs.StatEntry]:
del ext_attrs # Unused.
self._CheckIsDirectory()
for entry in self.fd.sub_file_entries:
pathspec = self.pathspec.Copy()
pathspec.last.path = utils.JoinPath(pathspec.last.path, entry.name)
pathspec.last.inode = entry.file_reference
pathspec.last.options = rdf_paths.PathSpec.Options.CASE_LITERAL
data_stream = entry if entry.has_default_data_stream() else None
yield self._Stat(entry, data_stream, pathspec.Copy())
# Create extra entries for alternate data streams
for data_stream in entry.alternate_data_streams:
pathspec.last.stream_name = data_stream.name
yield self._Stat(entry, data_stream, pathspec.Copy())
def ListNames(self) -> Iterable[Text]:
self._CheckIsDirectory()
for entry in self.fd.sub_file_entries:
yield entry.name
def _CheckIsDirectory(self) -> None:
if not self.IsDirectory():
raise IOError("{} is not a directory".format(
self.pathspec.CollapsePath()))
def _Stat(
self,
entry: pyfsntfs.file_entry,
data_stream: pyfsntfs.data_stream,
pathspec: rdf_paths.PathSpec,
) -> rdf_client_fs.StatEntry:
st = rdf_client_fs.StatEntry()
st.pathspec = pathspec
st.st_atime = rdfvalue.RDFDatetimeSeconds.FromDatetime(
entry.get_access_time())
st.st_mtime = rdfvalue.RDFDatetimeSeconds.FromDatetime(
entry.get_modification_time())
st.st_btime = rdfvalue.RDFDatetimeSeconds.FromDatetime(
entry.get_creation_time())
st.st_ctime = rdfvalue.RDFDatetimeSeconds.FromDatetime(
entry.get_entry_modification_time())
if entry.has_directory_entries_index():
st.st_mode = stat.S_IFDIR
else:
st.st_mode = stat.S_IFREG
if data_stream is not None:
st.st_size = data_stream.get_size()
flags = entry.file_attribute_flags
st.st_mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
if (flags & FILE_ATTRIBUTE_READONLY) == 0:
st.st_mode |= stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
if (flags & FILE_ATTRIBUTE_HIDDEN) == 0:
st.st_mode |= stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
return st
@classmethod
def Open(
cls,
fd: Optional[vfs_base.VFSHandler],
component: rdf_paths.PathSpec,
handlers: Dict[Any, Type[vfs_base.VFSHandler]],
pathspec: Optional[rdf_paths.PathSpec] = None,
progress_callback: Optional[Callable[[], None]] = None
) -> Optional[vfs_base.VFSHandler]:
# A Pathspec which starts with NTFS means we need to resolve the mount
# point at runtime.
if (fd is None and
component.pathtype == rdf_paths.PathSpec.PathType.NTFS and
pathspec is not None):
# We are the top level handler. This means we need to check the system
# mounts to work out the exact mount point and device we need to
# open. We then modify the pathspec so we get nested in the raw
# pathspec.
raw_pathspec, corrected_path = client_utils.GetRawDevice(component.path) # pytype: disable=attribute-error
# Insert the raw device before the component in the pathspec and correct
# the path
component.path = corrected_path
pathspec.Insert(0, component)
pathspec.Insert(0, raw_pathspec)
# Allow incoming pathspec to be given in the local system path
# conventions.
for component in pathspec:
if component.path:
component.path = client_utils.LocalPathToCanonicalPath(component.path)
# We have not actually opened anything in this iteration, but modified the
# pathspec. Next time we should be able to open it properly.
return fd
# If an inode is specified, just use it directly.
# This is necessary so that component.path is ignored.
elif component.HasField("inode"):
return NTFSFile(
fd, handlers, component, progress_callback=progress_callback)
else:
return super(NTFSFile, cls).Open(
fd=fd,
component=component,
handlers=handlers,
pathspec=pathspec,
progress_callback=progress_callback)
| 2.125
| 2
|
04.Encapsulation/Exe/pizza_maker/project/main.py
|
nmoskova/Python-OOP
| 0
|
12083
|
from encapsulation_04.exe.pizza_maker.project.dough import Dough
from encapsulation_04.exe.pizza_maker.project.pizza import Pizza
from encapsulation_04.exe.pizza_maker.project.topping import Topping
tomato_topping = Topping("Tomato", 60)
print(tomato_topping.topping_type)
print(tomato_topping.weight)
mushrooms_topping = Topping("Mushroom", 75)
print(mushrooms_topping.topping_type)
print(mushrooms_topping.weight)
mozzarella_topping = Topping("Mozzarella", 80)
print(mozzarella_topping.topping_type)
print(mozzarella_topping.weight)
cheddar_topping = Topping("Cheddar", 150)
pepperoni_topping = Topping("Pepperoni", 120)
white_flour_dough = Dough("White Flour", "Mixing", 200)
print(white_flour_dough.flour_type)
print(white_flour_dough.weight)
print(white_flour_dough.baking_technique)
whole_wheat_dough = Dough("Whole Wheat Flour", "Mixing", 200)
print(whole_wheat_dough.weight)
print(whole_wheat_dough.flour_type)
print(whole_wheat_dough.baking_technique)
p = Pizza("Margherita", whole_wheat_dough, 2)
p.add_topping(tomato_topping)
print(p.calculate_total_weight())
p.add_topping(mozzarella_topping)
print(p.calculate_total_weight())
p.add_topping(mozzarella_topping)
| 2.5625
| 3
|
src/unicon/plugins/iosxe/cat9k/__init__.py
|
nielsvanhooy/unicon.plugins
| 0
|
12084
|
<reponame>nielsvanhooy/unicon.plugins<filename>src/unicon/plugins/iosxe/cat9k/__init__.py
""" cat9k IOS-XE connection implementation.
"""
__author__ = "<NAME> <<EMAIL>>"
from unicon.plugins.iosxe import (
IosXESingleRpConnection,
IosXEDualRPConnection,
IosXEServiceList,
HAIosXEServiceList)
from .statemachine import IosXECat9kSingleRpStateMachine, IosXECat9kDualRpStateMachine
from .settings import IosXECat9kSettings
from . import service_implementation as svc
class IosXECat9kServiceList(IosXEServiceList):
def __init__(self):
super().__init__()
self.reload = svc.Reload
self.rommon = svc.Rommon
class IosxeCat9kHAServiceList(HAIosXEServiceList):
def __init__(self):
super().__init__()
self.reload = svc.HAReloadService
class IosXECat9kSingleRpConnection(IosXESingleRpConnection):
platform = 'cat9k'
state_machine_class = IosXECat9kSingleRpStateMachine
subcommand_list = IosXECat9kServiceList
settings = IosXECat9kSettings()
class IosXECat9kDualRPConnection(IosXEDualRPConnection):
platform = 'cat9k'
subcommand_list = IosxeCat9kHAServiceList
settings = IosXECat9kSettings()
state_machine_class = IosXECat9kDualRpStateMachine
| 1.773438
| 2
|
ludopediaAnuncios.py
|
christianbobsin/LudopediaDataMiner
| 2
|
12085
|
# -*- coding: utf-8 -*-
from lxml import html
from time import sleep
from datetime import datetime
import requests
import os
import sqlite3
import sys
# No terminal usar ~: python ludopedia.py [idIni] [regs]
# por ex. ~: python ludopedia.py 451 3000
con = sqlite3.connect('ludopedia.db')
cursor = con.cursor()
cursor.execute("""SELECT (ANUNCIO + 1) FROM JOGOS WHERE ANUNCIO=(SELECT MAX(ANUNCIO) FROM JOGOS WHERE TIPO='ANUNCIO') """)
anuncios = cursor.fetchall()
con.close()
idIni = int(anuncios[0][0])
#idIni = 75691
#regs = int(sys.argv[2])
regs = 9999
idMax = ( idIni + regs )
jogosAdicionados = 0
for id in range(idIni, idMax):
# 'http://www.ludopedia.com.br/anuncio?id_anuncio='+str(id)
#url = 'http://www.ludopedia.com.br/anuncio?id_anuncio=' % id
try:
page = requests.get('http://www.ludopedia.com.br/anuncio?id_anuncio='+str(id))
tree = html.fromstring(page.content)
except:
print 'nova tentativa em 10s'
sleep(10)
page = requests.get('http://www.ludopedia.com.br/anuncio?id_anuncio='+str(id))
tree = html.fromstring(page.content)
#jogoNome = tree.xpath('//div[@class="col-xs-10"]/h3/a/text()')
jogoNome = tree.xpath('//*[@id="page-content"]/div/div/div/div[2]/h3/a/text()')
#jogoFlavor = tree.xpath('//div[@class="col-xs-10"]/h3/span/text()')
jogoFlavor = tree.xpath('//*[@id="page-content"]/div/div/div/div[2]/h3/span/text()')
if len(jogoFlavor):
detalhes = jogoFlavor[0]
else:
detalhes = 'NA'
jogoPreco = tree.xpath('//span[@class="negrito proximo_lance"]/text()')
if len(jogoPreco):
jogoPreco =jogoPreco[0].split()
jogoPreco[1] = jogoPreco[1].replace('.','')
preco = float( jogoPreco[1].replace( ',','.' ) )
else:
preco = 0.0
status = tree.xpath('//td/span/text()')
validadeAnuncio = tree.xpath('//td/text()')
if len(validadeAnuncio):
validadeAnuncio[4] = validadeAnuncio[4].replace(',',' ')
data = validadeAnuncio[4].split()
ano = data[0].split('/')
hora = data[1].split(':')
data = datetime( int(ano[2]), int(ano[1]),int(ano[0]), int(hora[0]), int(hora[1]))
if ( data > datetime.now() and status[1] == 'Vendido'):
data = datetime.now()
else:
data = datetime( 1979, 8, 10 )
pessoa = tree.xpath('//td/a/text()')
if len(pessoa):
vendedor = pessoa[1]
if len(pessoa) < 3:
comprador = 'NA'
else:
comprador = pessoa[2]
current = id - idIni + 1
total = idMax - idIni
progress = (current/float(total))*100
#print str(current) + ' / ' + str(total) + " : " + "%.2f" % round(progress,2) + "%"
#print 'Id: ', id
#jogoCount = id - idIni
if len(jogoNome):
jogosAdicionados = jogosAdicionados + 1
if ( len(status[1]) > 15 ):
status[1] = 'Ativo'
#print 'Jogo: ', jogoNome[0]
#print 'Detalhes ', detalhes
#print 'Preco: ', str(preco)
#print 'Status: ', status[1]
#print 'Validade: ', data
#print 'Estado: ', validadeAnuncio[6]
#print 'Local: ', validadeAnuncio[8]
#print 'Vendedor: ', vendedor
#print 'Comprador:', comprador
print str( current ).zfill( 4 ) + ' '+ str ( id ) + ' ' + ano[2] + '-' +str( ano[1] ).zfill(2) + '-'+ str( ano[0] ).zfill(2) + ' ' + status[1] + '\t\t' + validadeAnuncio[6] + '\t' + str(preco) + '\t ' + jogoNome[0]
con = sqlite3.connect('ludopedia.db')
cursor = con.cursor()
cursor.execute("""INSERT INTO JOGOS ( ANUNCIO, JOGO, SUBTITULO, PRECO, STATUS, VALIDADE, ESTADO, ORIGEM, VENDEDOR, COMPRADOR, TIPO )
VALUES (?,?,?,?,?,?,?,?,?,?,?)""", (id, jogoNome[0], detalhes, preco, status[1], data, validadeAnuncio[6],
validadeAnuncio[8], vendedor, comprador, 'ANUNCIO' ) )
try:
con.commit()
except:
print 'Falha no Commit, tentando novamente em 10s.'
sleep(10)
con.commit()
con.close()
#print '-----------------------'
#print 'Jogos Adicionados: ' + str( jogosAdicionados )
#print '-----------------------'
else:
print str( current ).zfill( 4 ) + ' ' + str ( id ) + '\t ' + '-------' + ' \t ' + '-------' + ' \t ' + '------' + '\t ' + '---'
sleep(0.05)
#os.system('clear')
print '---------------------------------------------------------------'
print 'Jogos Adicionados: ' + str( jogosAdicionados )
print '---------------------------------------------------------------'
########################################################################
#sTable = sorted( table, key = getKey )
#print tabulate(sTable, tablefmt="plain" )
#f = open ( 'LudopediaLeaks %s-%s.csv' % ( idIni, idMax) , 'w' )
#for x in range ( 0, len( sTable ) ):
# row = "%s;%s;%s;%s;%s;%s;%s;%s;%s;%s" % ( sTable[x][0],
# sTable[x][1].encode('utf8'),
# sTable[x][2].encode('utf8'),
# sTable[x][3],
# sTable[x][4].encode('utf8'),
# sTable[x][5],
# sTable[x][6].encode('utf8'),
# sTable[x][7].encode('utf8'),
# sTable[x][8].encode('utf8'),
# sTable[x][9].encode('utf8') )
# print row
# f.write(row + '\n' )
#f.close()
| 3.03125
| 3
|
midterm/yolo_utils.py
|
ClarkBrun/emotic
| 0
|
12086
|
import cv2
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
def to_cpu(tensor):
return tensor.detach().cpu()
def xywh2xyxy(x):
''' Convert bounding box from [x, y, w, h] to [x1, y1, x2, y2]
:param x: bounding boxes array
:return: Converted bounding box array
'''
y = x.new(x.shape)
y[..., 0] = x[..., 0] - x[..., 2] / 2
y[..., 1] = x[..., 1] - x[..., 3] / 2
y[..., 2] = x[..., 0] + x[..., 2] / 2
y[..., 3] = x[..., 1] + x[..., 3] / 2
return y
def bbox_iou(box1, box2, x1y1x2y2=True):
"""
Returns the IoU of two bounding boxes
"""
if not x1y1x2y2:
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
else:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
# get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(
inter_rect_y2 - inter_rect_y1 + 1, min=0
)
# Union Area
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)
return iou
def rescale_boxes(boxes, current_dim, original_shape):
""" Rescales bounding boxes to the original shape """
orig_h, orig_w = original_shape
# The amount of padding that was added
pad_x = max(orig_h - orig_w, 0) * (current_dim / max(original_shape))
pad_y = max(orig_w - orig_h, 0) * (current_dim / max(original_shape))
# Image height and width after padding is removed
unpad_h = current_dim - pad_y
unpad_w = current_dim - pad_x
# Rescale bounding boxes to dimension of original image
boxes[:, 0] = ((boxes[:, 0] - pad_x // 2) / unpad_w) * orig_w
boxes[:, 1] = ((boxes[:, 1] - pad_y // 2) / unpad_h) * orig_h
boxes[:, 2] = ((boxes[:, 2] - pad_x // 2) / unpad_w) * orig_w
boxes[:, 3] = ((boxes[:, 3] - pad_y // 2) / unpad_h) * orig_h
return boxes
def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4):
"""
Removes detections with lower object confidence score than 'conf_thres' and performs
Non-Maximum Suppression to further filter detections.
Returns detections with shape:
(x1, y1, x2, y2, object_conf, class_score, class_pred)
"""
# From (center x, center y, width, height) to (x1, y1, x2, y2)
prediction[..., :4] = xywh2xyxy(prediction[..., :4])
output = [None for _ in range(len(prediction))]
for image_i, image_pred in enumerate(prediction):
# Filter out confidence scores below threshold
image_pred = image_pred[image_pred[:, 4] >= conf_thres]
# If none are remaining => process next image
if not image_pred.size(0):
continue
# Object confidence times class confidence
score = image_pred[:, 4] * image_pred[:, 5:].max(1)[0]
# Sort by it
image_pred = image_pred[(-score).argsort()]
class_confs, class_preds = image_pred[:, 5:].max(1, keepdim=True)
detections = torch.cat((image_pred[:, :5], class_confs.float(), class_preds.float()), 1)
# Perform non-maximum suppression
keep_boxes = []
while detections.size(0):
large_overlap = bbox_iou(detections[0, :4].unsqueeze(0), detections[:, :4]) > nms_thres
label_match = detections[0, -1] == detections[:, -1]
# Indices of boxes with lower confidence scores, large IOUs and matching labels
invalid = large_overlap & label_match
weights = detections[invalid, 4:5]
# Merge overlapping bboxes by order of confidence
detections[0, :4] = (weights * detections[invalid, :4]).sum(0) / weights.sum()
keep_boxes += [detections[0]]
detections = detections[~invalid]
if keep_boxes:
output[image_i] = torch.stack(keep_boxes)
return output
def parse_model_config(path):
"""Parses the yolo-v3 layer configuration file and returns module definitions"""
file = open(path, 'r')
lines = file.read().split('\n')
lines = [x for x in lines if x and not x.startswith('#')]
lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
module_defs = []
for line in lines:
if line.startswith('['): # This marks the start of a new block
module_defs.append({})
module_defs[-1]['type'] = line[1:-1].rstrip()
if module_defs[-1]['type'] == 'convolutional':
module_defs[-1]['batch_normalize'] = 0
else:
key, value = line.split("=")
value = value.strip()
module_defs[-1][key.rstrip()] = value.strip()
return module_defs
def parse_data_config(path):
"""Parses the data configuration file"""
options = dict()
options['gpus'] = '0,1,2,3'
options['num_workers'] = '10'
with open(path, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
key, value = line.split('=')
options[key.strip()] = value.strip()
return options
def create_modules(module_defs):
"""
Constructs module list of layer blocks from module configuration in module_defs
"""
hyperparams = module_defs.pop(0)
output_filters = [int(hyperparams["channels"])]
module_list = nn.ModuleList()
for module_i, module_def in enumerate(module_defs):
modules = nn.Sequential()
if module_def["type"] == "convolutional":
bn = int(module_def["batch_normalize"])
filters = int(module_def["filters"])
kernel_size = int(module_def["size"])
pad = (kernel_size - 1) // 2
modules.add_module(
f"conv_{module_i}",
nn.Conv2d(
in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(module_def["stride"]),
padding=pad,
bias=not bn,
),
)
if bn:
modules.add_module(f"batch_norm_{module_i}", nn.BatchNorm2d(filters, momentum=0.9, eps=1e-5))
if module_def["activation"] == "leaky":
modules.add_module(f"leaky_{module_i}", nn.LeakyReLU(0.1))
elif module_def["type"] == "maxpool":
kernel_size = int(module_def["size"])
stride = int(module_def["stride"])
if kernel_size == 2 and stride == 1:
modules.add_module(f"_debug_padding_{module_i}", nn.ZeroPad2d((0, 1, 0, 1)))
maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=int((kernel_size - 1) // 2))
modules.add_module(f"maxpool_{module_i}", maxpool)
elif module_def["type"] == "upsample":
upsample = Upsample(scale_factor=int(module_def["stride"]), mode="nearest")
modules.add_module(f"upsample_{module_i}", upsample)
elif module_def["type"] == "route":
layers = [int(x) for x in module_def["layers"].split(",")]
filters = sum([output_filters[1:][i] for i in layers])
modules.add_module(f"route_{module_i}", EmptyLayer())
elif module_def["type"] == "shortcut":
filters = output_filters[1:][int(module_def["from"])]
modules.add_module(f"shortcut_{module_i}", EmptyLayer())
elif module_def["type"] == "yolo":
anchor_idxs = [int(x) for x in module_def["mask"].split(",")]
# Extract anchors
anchors = [int(x) for x in module_def["anchors"].split(",")]
anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]
anchors = [anchors[i] for i in anchor_idxs]
num_classes = int(module_def["classes"])
img_size = int(hyperparams["height"])
# Define detection layer
yolo_layer = YOLOLayer(anchors, num_classes, img_size)
modules.add_module(f"yolo_{module_i}", yolo_layer)
# Register module list and number of output filters
module_list.append(modules)
output_filters.append(filters)
return hyperparams, module_list
class Upsample(nn.Module):
""" nn.Upsample is deprecated """
def __init__(self, scale_factor, mode="nearest"):
super(Upsample, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
return x
class EmptyLayer(nn.Module):
"""Placeholder for 'route' and 'shortcut' layers"""
def __init__(self):
super(EmptyLayer, self).__init__()
class YOLOLayer(nn.Module):
"""Detection layer"""
def __init__(self, anchors, num_classes, img_dim=416):
super(YOLOLayer, self).__init__()
self.anchors = anchors
self.num_anchors = len(anchors)
self.num_classes = num_classes
self.ignore_thres = 0.5
self.mse_loss = nn.MSELoss()
self.bce_loss = nn.BCELoss()
self.obj_scale = 1
self.noobj_scale = 100
self.metrics = {}
self.img_dim = img_dim
self.grid_size = 0 # grid size
def compute_grid_offsets(self, grid_size, cuda=True):
self.grid_size = grid_size
g = self.grid_size
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
self.stride = self.img_dim / self.grid_size
# Calculate offsets for each grid
self.grid_x = torch.arange(g).repeat(g, 1).view([1, 1, g, g]).type(FloatTensor)
self.grid_y = torch.arange(g).repeat(g, 1).t().view([1, 1, g, g]).type(FloatTensor)
self.scaled_anchors = FloatTensor([(a_w / self.stride, a_h / self.stride) for a_w, a_h in self.anchors])
self.anchor_w = self.scaled_anchors[:, 0:1].view((1, self.num_anchors, 1, 1))
self.anchor_h = self.scaled_anchors[:, 1:2].view((1, self.num_anchors, 1, 1))
def forward(self, x, targets=None, img_dim=None):
# Tensors for cuda support
FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if x.is_cuda else torch.ByteTensor
self.img_dim = img_dim
num_samples = x.size(0)
grid_size = x.size(2)
prediction = (
x.view(num_samples, self.num_anchors, self.num_classes + 5, grid_size, grid_size)
.permute(0, 1, 3, 4, 2)
.contiguous()
)
# Get outputs
x = torch.sigmoid(prediction[..., 0]) # Center x
y = torch.sigmoid(prediction[..., 1]) # Center y
w = prediction[..., 2] # Width
h = prediction[..., 3] # Height
pred_conf = torch.sigmoid(prediction[..., 4]) # Conf
pred_cls = torch.sigmoid(prediction[..., 5:]) # Cls pred.
# If grid size does not match current we compute new offsets
if grid_size != self.grid_size:
self.compute_grid_offsets(grid_size, cuda=x.is_cuda)
# Add offset and scale with anchors
pred_boxes = FloatTensor(prediction[..., :4].shape)
pred_boxes[..., 0] = x.data + self.grid_x
pred_boxes[..., 1] = y.data + self.grid_y
pred_boxes[..., 2] = torch.exp(w.data) * self.anchor_w
pred_boxes[..., 3] = torch.exp(h.data) * self.anchor_h
output = torch.cat(
(
pred_boxes.view(num_samples, -1, 4) * self.stride,
pred_conf.view(num_samples, -1, 1),
pred_cls.view(num_samples, -1, self.num_classes),
),
-1,
)
if targets is None:
return output, 0
else:
iou_scores, class_mask, obj_mask, noobj_mask, tx, ty, tw, th, tcls, tconf = build_targets(
pred_boxes=pred_boxes,
pred_cls=pred_cls,
target=targets,
anchors=self.scaled_anchors,
ignore_thres=self.ignore_thres,
)
# Loss : Mask outputs to ignore non-existing objects (except with conf. loss)
loss_x = self.mse_loss(x[obj_mask], tx[obj_mask])
loss_y = self.mse_loss(y[obj_mask], ty[obj_mask])
loss_w = self.mse_loss(w[obj_mask], tw[obj_mask])
loss_h = self.mse_loss(h[obj_mask], th[obj_mask])
loss_conf_obj = self.bce_loss(pred_conf[obj_mask], tconf[obj_mask])
loss_conf_noobj = self.bce_loss(pred_conf[noobj_mask], tconf[noobj_mask])
loss_conf = self.obj_scale * loss_conf_obj + self.noobj_scale * loss_conf_noobj
loss_cls = self.bce_loss(pred_cls[obj_mask], tcls[obj_mask])
total_loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls
# Metrics
cls_acc = 100 * class_mask[obj_mask].mean()
conf_obj = pred_conf[obj_mask].mean()
conf_noobj = pred_conf[noobj_mask].mean()
conf50 = (pred_conf > 0.5).float()
iou50 = (iou_scores > 0.5).float()
iou75 = (iou_scores > 0.75).float()
detected_mask = conf50 * class_mask * tconf
precision = torch.sum(iou50 * detected_mask) / (conf50.sum() + 1e-16)
recall50 = torch.sum(iou50 * detected_mask) / (obj_mask.sum() + 1e-16)
recall75 = torch.sum(iou75 * detected_mask) / (obj_mask.sum() + 1e-16)
self.metrics = {
"loss": to_cpu(total_loss).item(),
"x": to_cpu(loss_x).item(),
"y": to_cpu(loss_y).item(),
"w": to_cpu(loss_w).item(),
"h": to_cpu(loss_h).item(),
"conf": to_cpu(loss_conf).item(),
"cls": to_cpu(loss_cls).item(),
"cls_acc": to_cpu(cls_acc).item(),
"recall50": to_cpu(recall50).item(),
"recall75": to_cpu(recall75).item(),
"precision": to_cpu(precision).item(),
"conf_obj": to_cpu(conf_obj).item(),
"conf_noobj": to_cpu(conf_noobj).item(),
"grid_size": grid_size,
}
return output, total_loss
class Darknet(nn.Module):
"""YOLOv3 object detection model"""
def __init__(self, config_path, img_size=416):
super(Darknet, self).__init__()
self.module_defs = parse_model_config(config_path)
self.hyperparams, self.module_list = create_modules(self.module_defs)
self.yolo_layers = [layer[0] for layer in self.module_list if hasattr(layer[0], "metrics")]
self.img_size = img_size
self.seen = 0
self.header_info = np.array([0, 0, 0, self.seen, 0], dtype=np.int32)
def forward(self, x, targets=None):
img_dim = x.shape[2]
loss = 0
layer_outputs, yolo_outputs = [], []
for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):
if module_def["type"] in ["convolutional", "upsample", "maxpool"]:
x = module(x)
elif module_def["type"] == "route":
x = torch.cat([layer_outputs[int(layer_i)] for layer_i in module_def["layers"].split(",")], 1)
elif module_def["type"] == "shortcut":
layer_i = int(module_def["from"])
x = layer_outputs[-1] + layer_outputs[layer_i]
elif module_def["type"] == "yolo":
x, layer_loss = module[0](x, targets, img_dim)
loss += layer_loss
yolo_outputs.append(x)
layer_outputs.append(x)
yolo_outputs = to_cpu(torch.cat(yolo_outputs, 1))
return yolo_outputs if targets is None else (loss, yolo_outputs)
def load_darknet_weights(self, weights_path):
"""Parses and loads the weights stored in 'weights_path'"""
# Open the weights file
with open(weights_path, "rb") as f:
header = np.fromfile(f, dtype=np.int32, count=5) # First five are header values
self.header_info = header # Needed to write header when saving weights
self.seen = header[3] # number of images seen during training
weights = np.fromfile(f, dtype=np.float32) # The rest are weights
# Establish cutoff for loading backbone weights
cutoff = None
if "darknet53.conv.74" in weights_path:
cutoff = 75
ptr = 0
for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):
if i == cutoff:
break
if module_def["type"] == "convolutional":
conv_layer = module[0]
if module_def["batch_normalize"]:
# Load BN bias, weights, running mean and running variance
bn_layer = module[1]
num_b = bn_layer.bias.numel() # Number of biases
# Bias
bn_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.bias)
bn_layer.bias.data.copy_(bn_b)
ptr += num_b
# Weight
bn_w = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.weight)
bn_layer.weight.data.copy_(bn_w)
ptr += num_b
# Running Mean
bn_rm = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_mean)
bn_layer.running_mean.data.copy_(bn_rm)
ptr += num_b
# Running Var
bn_rv = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_var)
bn_layer.running_var.data.copy_(bn_rv)
ptr += num_b
else:
# Load conv. bias
num_b = conv_layer.bias.numel()
conv_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(conv_layer.bias)
conv_layer.bias.data.copy_(conv_b)
ptr += num_b
# Load conv. weights
num_w = conv_layer.weight.numel()
conv_w = torch.from_numpy(weights[ptr : ptr + num_w]).view_as(conv_layer.weight)
conv_layer.weight.data.copy_(conv_w)
ptr += num_w
def save_darknet_weights(self, path, cutoff=-1):
"""
@:param path - path of the new weights file
@:param cutoff - save layers between 0 and cutoff (cutoff = -1 -> all are saved)
"""
fp = open(path, "wb")
self.header_info[3] = self.seen
self.header_info.tofile(fp)
# Iterate through layers
for i, (module_def, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
if module_def["type"] == "convolutional":
conv_layer = module[0]
# If batch norm, load bn first
if module_def["batch_normalize"]:
bn_layer = module[1]
bn_layer.bias.data.cpu().numpy().tofile(fp)
bn_layer.weight.data.cpu().numpy().tofile(fp)
bn_layer.running_mean.data.cpu().numpy().tofile(fp)
bn_layer.running_var.data.cpu().numpy().tofile(fp)
# Load conv bias
else:
conv_layer.bias.data.cpu().numpy().tofile(fp)
# Load conv weights
conv_layer.weight.data.cpu().numpy().tofile(fp)
fp.close()
def prepare_yolo(model_dir):
''' Download yolo model files and load the model weights
:param model_dir: Directory path where to store yolo model weights and yolo model configuration file.
:return: Yolo model after loading model weights
'''
cfg_file = os.path.join(model_dir, 'yolov3.cfg')
if not os.path.exists(cfg_file):
download_command = 'wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3.cfg -O ' + cfg_file
os.system(download_command)
weight_file = os.path.join(model_dir, 'yolov3.weights')
if not os.path.exists(weight_file):
download_command = 'wget https://pjreddie.com/media/files/yolov3.weights -O ' + weight_file
os.system(download_command)
yolo_model = Darknet(cfg_file, 416)
yolo_model.load_darknet_weights(weight_file)
print ('prepared yolo model')
return yolo_model
# if __name__ == '__main__':
# prepare_yolo(model_dir = '/home/face-r/Steps_face_recognition/emotic/debug/models')
| 2.46875
| 2
|
pytracking-master/ltr/train_settings/bbreg/atom.py
|
wsumel/AMMC
| 3
|
12087
|
import torch.nn as nn
import torch.optim as optim
from ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k
from ltr.data import processing, sampler, LTRLoader
import ltr.models.bbreg.atom as atom_models
from ltr import actors
from ltr.trainers import LTRTrainer
import ltr.data.transforms as tfm
def run(settings):
# Most common settings are assigned in the settings struct
settings.description = 'ATOM IoUNet with default settings, but additionally using GOT10k for training.'
settings.batch_size = 64
settings.num_workers = 8 #8
settings.print_interval = 1
settings.normalize_mean = [0.485, 0.456, 0.406]
settings.normalize_std = [0.229, 0.224, 0.225]
settings.search_area_factor = 5.0
settings.feature_sz = 18
settings.output_sz = settings.feature_sz * 16
settings.center_jitter_factor = {'train': 0, 'test': 4.5}
settings.scale_jitter_factor = {'train': 0, 'test': 0.5}
# Train datasets
lasot_train = Lasot(settings.env.lasot_dir, split='train')
got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')
trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))
# coco_train = MSCOCOSeq(settings.env.coco_dir,version='2017')
# Validation datasets
got10k_val = Got10k(settings.env.got10k_dir, split='votval')
# The joint augmentation transform, that is applied to the pairs jointly
transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))
# The augmentation transform applied to the training set (individually to each image in the pair)
transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),
tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))
# The augmentation transform applied to the validation set (individually to each image in the pair)
transform_val = tfm.Transform(tfm.ToTensor(),
tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))
# Data processing to do on the training pairs
proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 16, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]}
data_processing_train = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,
output_sz=settings.output_sz,
center_jitter_factor=settings.center_jitter_factor,
scale_jitter_factor=settings.scale_jitter_factor,
mode='sequence',
proposal_params=proposal_params,
transform=transform_train,
joint_transform=transform_joint)
# Data processing to do on the validation pairs
data_processing_val = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,
output_sz=settings.output_sz,
center_jitter_factor=settings.center_jitter_factor,
scale_jitter_factor=settings.scale_jitter_factor,
mode='sequence',
proposal_params=proposal_params,
transform=transform_val,
joint_transform=transform_joint)
# The sampler for training
dataset_train = sampler.ATOMSampler([lasot_train, got10k_train, trackingnet_train], [1,1,1],
samples_per_epoch=1000*settings.batch_size, max_gap=50, processing=data_processing_train)
# dataset_train = sampler.ATOMSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1],
# samples_per_epoch=1000*settings.batch_size, max_gap=50, processing=data_processing_train)
# The loader for training
loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,
shuffle=True, drop_last=True, stack_dim=1)
# The sampler for validation
dataset_val = sampler.ATOMSampler([got10k_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=50,
processing=data_processing_val)
dataset_val.datatype = 'val'
# The loader for validation
loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,
shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)
# Create network and actor
net = atom_models.atom_resnet18(backbone_pretrained=True)
objective = nn.MSELoss()
actor = actors.AtomActor(net=net, objective=objective)
# Optimizer
optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) #过了15epoch lr=lr*gamma
# Create trainer
trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)
# Run training (set fail_safe=False if you are debugging)
trainer.train(50, load_latest=False, fail_safe=True)
# trainer.train(50, load_latest=True, fail_safe=False)
| 1.960938
| 2
|
exercise 8.6.py
|
tuyanyang/python_exercise
| 0
|
12088
|
<filename>exercise 8.6.py<gh_stars>0
nums = list()
while True:
nStr = input('Enter a number: ')
try:
if nStr == 'done':
break
n = float(nStr)
nums.append(n)
except:
print('Invalid input')
continue
print('Maximum: ',max(nums))
print('Minimum: ',min(nums))
| 3.9375
| 4
|
gui/activity_list.py
|
keremkoseoglu/Kifu
| 0
|
12089
|
""" Activity list window """
import tkinter
import tkinter.ttk
from model import activity, invoice
from model.activity import Activity
from model.company import Company
from gui.activity import ActivityWindow
from gui.activity_split import ActivitySplit
from gui.invoice import InvoiceWindow
from gui.popup_file import popup_email
from gui.prime_singleton import PrimeSingleton
from util import activity_xlsx_report, backup, date_time
import config
class ActivityListWindow(tkinter.Toplevel):
""" Activity list window """
_BUTTON_WIDTH = 150
_WINDOW_WIDTH = 1200
_WINDOW_HEIGHT = 400
_Y_SPACING = 10
def __init__(self):
# Initialization
tkinter.Toplevel.__init__(self)
self.wm_geometry(str(self._WINDOW_WIDTH) + "x" + str(self._WINDOW_HEIGHT))
# Build tree
self._tree = tkinter.ttk.Treeview(self)
tree_height = self._WINDOW_HEIGHT - config.CONSTANTS["GUI_CELL_HEIGHT"] - self._Y_SPACING
self._tree.place(x=0, y=0, width=self._WINDOW_WIDTH, height=tree_height)
cell_y = tree_height + self._Y_SPACING
self._tree["columns"] = ("Client", "Project", "Location", "GUID")
self._tree.heading("Client", text="Client")
self._tree.heading("Project", text="Project")
self._tree.heading("Location", text="Location")
self._tree.heading("GUID", text="GUID")
# Fill tree with data
self._activities = []
self._tree_content = {}
self._fill_tree_with_activities()
# Buttons
cell_x = 0
edit_button = tkinter.Button(self, text="Edit", command=self._edit_click)
edit_button.place(x=cell_x, y=cell_y)
cell_x += self._BUTTON_WIDTH
edit_button = tkinter.Button(self, text="Excel", command=self._excel_click)
edit_button.place(x=cell_x, y=cell_y)
cell_x += self._BUTTON_WIDTH
split_button = tkinter.Button(self, text="Split", command=self._split_click)
split_button.place(x=cell_x, y=cell_y)
cell_x += self._BUTTON_WIDTH
invoice_button = tkinter.Button(self, text="Invoice", command=self._invoice_click)
invoice_button.place(x=cell_x, y=cell_y)
cell_x += self._BUTTON_WIDTH
invoice_button = tkinter.Button(self, text="Delete", command=self._delete_click)
invoice_button.place(x=cell_x, y=cell_y)
cell_x += self._BUTTON_WIDTH
@property
def _first_selected_activity(self) -> activity.Activity:
selected_activities = self._selected_activities
if len(selected_activities) == 0:
return None
return selected_activities[0]
@property
def _selected_activities(self) -> []:
selected_activities = []
for selected_id in self._tree.selection():
selected_activity = self._tree_content[selected_id]
selected_activities.append(selected_activity)
return selected_activities
def _delete_click(self):
deletable_activities = self._selected_activities
if len(deletable_activities) == 0:
return
deletable_guids = []
for act in deletable_activities:
deletable_guids.append(act.guid)
backup.execute()
Activity.delete_activities(deletable_guids)
self._fill_tree_with_activities()
PrimeSingleton.get().refresh()
def _edit_click(self):
first_selected_activity = self._first_selected_activity
if first_selected_activity is None:
return
activity_window = ActivityWindow()
activity_window.fill_with_activity(first_selected_activity)
self.after(1, self.destroy())
activity_window.mainloop()
def _excel_click(self):
selected_activity_objects = self._selected_activities
xlsx_report = activity_xlsx_report.Report()
xlsx_report.generate_with_activity_objects(selected_activity_objects)
activity_company = Company(config.CONSTANTS["COMPANY_NAME_1E1"])
popup_email(recipients=activity_company.activity_emails,
subject="Bu ayki aktivitelerim",
attachment=xlsx_report.last_saved_files[0])
def _fill_tree_with_activities(self):
self._activities = Activity.get_activities()
self._activities["activities"] = sorted(
self._activities["activities"],
key=lambda x: x["date"],
reverse=False)
self._tree_content = {}
self._tree.delete(*self._tree.get_children())
for activity_line in self._activities["activities"]:
activity_obj = activity.Activity(activity_line)
project_obj = activity_obj.project
tree_val = (
project_obj.client.name,
project_obj.name,
activity_obj.location,
activity_obj.guid
)
id_in_tree = self._tree.insert(
'',
'end',
text=date_time.get_formatted_date(activity_obj.date),
value=tree_val
)
self._tree_content[id_in_tree] = activity_obj
self.update()
def _invoice_click(self):
selected_activities = self._selected_activities
if len(selected_activities) == 0:
return
new_invoice = invoice.get_invoice_obj_from_activities(selected_activities)
invoice_window = InvoiceWindow()
invoice_window.fill_with_invoice(new_invoice, browser=True, invoice_dir=True)
invoice_window.mainloop()
def _split_click(self):
first_selected_activity = self._first_selected_activity
if first_selected_activity is None:
return
activity_split = ActivitySplit()
activity_split.fill_with_activity(first_selected_activity)
self.after(1, self.destroy())
activity_split.mainloop()
| 2.640625
| 3
|
src/cactus/shared/commonTest.py
|
thiagogenez/cactus
| 209
|
12090
|
import os
import shutil
import unittest
from base64 import b64encode
from sonLib.bioio import TestStatus
from sonLib.bioio import getTempFile
from sonLib.bioio import getTempDirectory
from sonLib.bioio import system
from toil.job import Job
from toil.common import Toil
from cactus.shared.common import cactus_call, ChildTreeJob
class TestCase(unittest.TestCase):
def setUp(self):
self.testNo = TestStatus.getTestSetup(1, 5, 10, 100)
self.tempDir = getTempDirectory(os.getcwd())
self.tempFiles = []
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
system("rm -rf %s" % self.tempDir)
@TestStatus.shortLength
def testCactusCall(self):
inputFile = getTempFile(rootDir=self.tempDir)
with open("/dev/urandom", "rb") as randText:
with open(inputFile, 'w') as fh:
fh.write(b64encode(randText.read(1024)).decode())
with open(inputFile) as fh:
input = "".join(fh.read().split("\n"))
#Send input to container's stdin through a file, get output
#from stdout
output = "".join(cactus_call(infile=inputFile, check_output=True,
parameters=["docker_test_script"]).split("\n"))
self.assertEqual(input, output)
#Send input as string, get output from stdout
output = "".join(cactus_call(stdin_string=input, check_output=True,
parameters=["docker_test_script"]).split("\n"))
self.assertEqual(input, output)
@TestStatus.shortLength
def testCactusCallPipes(self):
inputFile = getTempFile(rootDir=self.tempDir)
with open(inputFile, 'w') as f:
f.write('foobar\n')
# using 'cat' here rather than infile is intentional; it tests
# whether the directory is mounted into containers correctly.
output = cactus_call(parameters=[['cat', inputFile],
['sed', 's/foo/baz/g'],
['awk', '{ print "quux" $0 }']],
check_output=True)
self.assertEqual(output, 'quuxbazbar\n')
@TestStatus.mediumLength
def testChildTreeJob(self):
"""Check that the ChildTreeJob class runs all children."""
numChildren = 100
flagDir = getTempDirectory()
options = Job.Runner.getDefaultOptions(getTempDirectory())
shutil.rmtree(options.jobStore)
with Toil(options) as toil:
toil.start(CTTestParent(flagDir, numChildren))
# Check that all jobs ran
for i in range(numChildren):
self.assertTrue(os.path.exists(os.path.join(flagDir, str(i))))
shutil.rmtree(flagDir)
class CTTestParent(ChildTreeJob):
def __init__(self, flagDir, numChildren):
self.flagDir = flagDir
self.numChildren = numChildren
super(CTTestParent, self).__init__()
def run(self, fileStore):
for i in range(self.numChildren):
self.addChild(CTTestChild(self.flagDir, i))
class CTTestChild(Job):
def __init__(self, flagDir, index):
self.flagDir = flagDir
self.index = index
super(CTTestChild, self).__init__()
def run(self, fileStore):
# Mark that this job has run using a flag file
path = os.path.join(self.flagDir, str(self.index))
with open(path, 'w') as f:
# Empty file
f.write('')
if __name__ == '__main__':
unittest.main()
| 2.078125
| 2
|
porespy/networks/__getnet__.py
|
hfathian/porespy
| 3
|
12091
|
<filename>porespy/networks/__getnet__.py<gh_stars>1-10
import sys
import numpy as np
import openpnm as op
from tqdm import tqdm
import scipy.ndimage as spim
from porespy.tools import extend_slice
import openpnm.models.geometry as op_gm
def regions_to_network(im, dt=None, voxel_size=1):
r"""
Analyzes an image that has been partitioned into pore regions and extracts
the pore and throat geometry as well as network connectivity.
Parameters
----------
im : ND-array
An image of the pore space partitioned into individual pore regions.
Note that this image must have zeros indicating the solid phase.
dt : ND-array
The distance transform of the pore space. If not given it will be
calculated, but it can save time to provide one if available.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1, which is useful when overlaying the PNM on the original
image since the scale of the image is alway 1 unit lenth per voxel.
Returns
-------
A dictionary containing all the pore and throat size data, as well as the
network topological information. The dictionary names use the OpenPNM
convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
directly to an OpenPNM network object using the ``update`` command.
"""
print('-' * 60, flush=True)
print('Extracting pore and throat information from image', flush=True)
from skimage.morphology import disk, ball
struc_elem = disk if im.ndim == 2 else ball
# if ~np.any(im == 0):
# raise Exception('The received image has no solid phase (0\'s)')
if dt is None:
dt = spim.distance_transform_edt(im > 0)
dt = spim.gaussian_filter(input=dt, sigma=0.5)
# Get 'slices' into im for each pore region
slices = spim.find_objects(im)
# Initialize arrays
Ps = np.arange(1, np.amax(im)+1)
Np = np.size(Ps)
p_coords = np.zeros((Np, im.ndim), dtype=float)
p_volume = np.zeros((Np, ), dtype=float)
p_dia_local = np.zeros((Np, ), dtype=float)
p_dia_global = np.zeros((Np, ), dtype=float)
p_label = np.zeros((Np, ), dtype=int)
p_area_surf = np.zeros((Np, ), dtype=int)
t_conns = []
t_dia_inscribed = []
t_area = []
t_perimeter = []
t_coords = []
# dt_shape = np.array(dt.shape)
# Start extracting size information for pores and throats
for i in tqdm(Ps, file=sys.stdout):
pore = i - 1
if slices[pore] is None:
continue
s = extend_slice(slices[pore], im.shape)
sub_im = im[s]
sub_dt = dt[s]
pore_im = sub_im == i
padded_mask = np.pad(pore_im, pad_width=1, mode='constant')
pore_dt = spim.distance_transform_edt(padded_mask)
s_offset = np.array([i.start for i in s])
p_label[pore] = i
p_coords[pore, :] = spim.center_of_mass(pore_im) + s_offset
p_volume[pore] = np.sum(pore_im)
p_dia_local[pore] = (2*np.amax(pore_dt)) - np.sqrt(3)
p_dia_global[pore] = 2*np.amax(sub_dt)
p_area_surf[pore] = np.sum(pore_dt == 1)
im_w_throats = spim.binary_dilation(input=pore_im, structure=struc_elem(1))
im_w_throats = im_w_throats*sub_im
Pn = np.unique(im_w_throats)[1:] - 1
for j in Pn:
if j > pore:
t_conns.append([pore, j])
vx = np.where(im_w_throats == (j + 1))
t_dia_inscribed.append(2*np.amax(sub_dt[vx]))
t_perimeter.append(np.sum(sub_dt[vx] < 2))
t_area.append(np.size(vx[0]))
t_inds = tuple([i+j for i, j in zip(vx, s_offset)])
temp = np.where(dt[t_inds] == np.amax(dt[t_inds]))[0][0]
if im.ndim == 2:
t_coords.append(tuple((t_inds[0][temp],
t_inds[1][temp])))
else:
t_coords.append(tuple((t_inds[0][temp],
t_inds[1][temp],
t_inds[2][temp])))
# Clean up values
Nt = len(t_dia_inscribed) # Get number of throats
if im.ndim == 2: # If 2D, add 0's in 3rd dimension
p_coords = np.vstack((p_coords.T, np.zeros((Np, )))).T
t_coords = np.vstack((np.array(t_coords).T, np.zeros((Nt, )))).T
net = {}
net['pore.all'] = np.ones((Np, ), dtype=bool)
net['throat.all'] = np.ones((Nt, ), dtype=bool)
net['pore.coords'] = np.copy(p_coords)*voxel_size
net['pore.centroid'] = np.copy(p_coords)*voxel_size
net['throat.centroid'] = np.array(t_coords)*voxel_size
net['throat.conns'] = np.array(t_conns)
net['pore.label'] = np.array(p_label)
net['pore.volume'] = np.copy(p_volume)*(voxel_size**3)
net['throat.volume'] = np.zeros((Nt, ), dtype=float)
net['pore.diameter'] = np.copy(p_dia_local)*voxel_size
net['pore.inscribed_diameter'] = np.copy(p_dia_local)*voxel_size
net['pore.equivalent_diameter'] = 2*((3/4*net['pore.volume']/np.pi)**(1/3))
net['pore.extended_diameter'] = np.copy(p_dia_global)*voxel_size
net['pore.surface_area'] = np.copy(p_area_surf)*(voxel_size)**2
net['throat.diameter'] = np.array(t_dia_inscribed)*voxel_size
net['throat.inscribed_diameter'] = np.array(t_dia_inscribed)*voxel_size
net['throat.area'] = np.array(t_area)*(voxel_size**2)
net['throat.perimeter'] = np.array(t_perimeter)*voxel_size
net['throat.equivalent_diameter'] = (np.array(t_area) * (voxel_size**2))**0.5
P12 = net['throat.conns']
PT1 = np.sqrt(np.sum(((p_coords[P12[:, 0]]-t_coords) * voxel_size)**2, axis=1))
PT2 = np.sqrt(np.sum(((p_coords[P12[:, 1]]-t_coords) * voxel_size)**2, axis=1))
net['throat.total_length'] = PT1 + PT2
PT1 = PT1-p_dia_local[P12[:, 0]]/2*voxel_size
PT2 = PT2-p_dia_local[P12[:, 1]]/2*voxel_size
net['throat.length'] = PT1 + PT2
dist = (p_coords[P12[:, 0]]-p_coords[P12[:, 1]])*voxel_size
net['throat.direct_length'] = np.sqrt(np.sum(dist**2, axis=1))
# Make a dummy openpnm network to get the conduit lengths
pn = op.network.GenericNetwork()
pn.update(net)
pn.add_model(propname='throat.endpoints',
model=op_gm.throat_endpoints.spherical_pores,
pore_diameter='pore.inscribed_diameter',
throat_diameter='throat.inscribed_diameter')
pn.add_model(propname='throat.conduit_lengths',
model=op_gm.throat_length.conduit_lengths)
pn.add_model(propname='pore.area',
model=op_gm.pore_area.sphere)
net['throat.endpoints.head'] = pn['throat.endpoints.head']
net['throat.endpoints.tail'] = pn['throat.endpoints.tail']
net['throat.conduit_lengths.pore1'] = pn['throat.conduit_lengths.pore1']
net['throat.conduit_lengths.pore2'] = pn['throat.conduit_lengths.pore2']
net['throat.conduit_lengths.throat'] = pn['throat.conduit_lengths.throat']
net['pore.area'] = pn['pore.area']
prj = pn.project
prj.clear()
wrk = op.Workspace()
wrk.close_project(prj)
return net
| 2.625
| 3
|
pandas/core/internals.py
|
lodagro/pandas
| 0
|
12092
|
import itertools
from datetime import datetime
from numpy import nan
import numpy as np
from pandas.core.common import _possibly_downcast_to_dtype, isnull
from pandas.core.index import Index, MultiIndex, _ensure_index, _handle_legacy_indexes
from pandas.core.indexing import _check_slice_bounds, _maybe_convert_indices
import pandas.core.common as com
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.core.expressions as expressions
from pandas.tslib import Timestamp
from pandas.util import py3compat
class Block(object):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['items', 'ref_items', '_ref_locs', 'values', 'ndim']
is_numeric = False
is_bool = False
is_object = False
_can_hold_na = False
_downcast_dtype = None
def __init__(self, values, items, ref_items, ndim=2):
if issubclass(values.dtype.type, basestring):
values = np.array(values, dtype=object)
if values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
if len(items) != len(values):
raise ValueError('Wrong number of items passed %d, indices imply %d'
% (len(items), len(values)))
self._ref_locs = None
self.values = values
self.ndim = ndim
self.items = _ensure_index(items)
self.ref_items = _ensure_index(ref_items)
def _gi(self, arg):
return self.values[arg]
@property
def ref_locs(self):
if self._ref_locs is None:
indexer = self.ref_items.get_indexer(self.items)
indexer = com._ensure_platform_int(indexer)
if (indexer == -1).any():
raise AssertionError('Some block items were not in block '
'ref_items')
self._ref_locs = indexer
return self._ref_locs
def set_ref_items(self, ref_items, maybe_rename=True):
"""
If maybe_rename=True, need to set the items for this guy
"""
if not isinstance(ref_items, Index):
raise AssertionError('block ref_items must be an Index')
if maybe_rename:
self.items = ref_items.take(self.ref_locs)
self.ref_items = ref_items
def __repr__(self):
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
name = type(self).__name__
result = '%s: %s, %s, dtype %s' % (
name, com.pprint_thing(self.items), shape, self.dtype)
if py3compat.PY3:
return unicode(result)
return com.console_encode(result)
def __contains__(self, item):
return item in self.items
def __len__(self):
return len(self.values)
def __getstate__(self):
# should not pickle generally (want to share ref_items), but here for
# completeness
return (self.items, self.ref_items, self.values)
def __setstate__(self, state):
items, ref_items, values = state
self.items = _ensure_index(items)
self.ref_items = _ensure_index(ref_items)
self.values = values
self.ndim = values.ndim
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
return make_block(values, self.items, self.ref_items)
def merge(self, other):
if not self.ref_items.equals(other.ref_items):
raise AssertionError('Merge operands must have same ref_items')
# Not sure whether to allow this or not
# if not union_ref.equals(other.ref_items):
# union_ref = self.ref_items + other.ref_items
return _merge_blocks([self, other], self.ref_items)
def reindex_axis(self, indexer, axis=1, fill_value=np.nan, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values, self.items, self.ref_items)
def reindex_items_from(self, new_ref_items, copy=True):
"""
Reindex to only those items contained in the input set of items
E.g. if you have ['a', 'b'], and the input items is ['b', 'c', 'd'],
then the resulting items will be ['b']
Returns
-------
reindexed : Block
"""
new_ref_items, indexer = self.items.reindex(new_ref_items)
if indexer is None:
new_items = new_ref_items
new_values = self.values.copy() if copy else self.values
else:
masked_idx = indexer[indexer != -1]
new_values = com.take_nd(self.values, masked_idx, axis=0,
allow_fill=False)
new_items = self.items.take(masked_idx)
return make_block(new_values, new_items, new_ref_items)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def set(self, item, value):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
loc = self.items.get_loc(item)
self.values[loc] = value
def delete(self, item):
"""
Returns
-------
y : Block (new object)
"""
loc = self.items.get_loc(item)
new_items = self.items.delete(loc)
new_values = np.delete(self.values, loc, 0)
return make_block(new_values, new_items, self.ref_items)
def split_block_at(self, item):
"""
Split block into zero or more blocks around columns with given label,
for "deleting" a column without having to copy data by returning views
on the original array.
Returns
-------
generator of Block
"""
loc = self.items.get_loc(item)
if type(loc) == slice or type(loc) == int:
mask = [True] * len(self)
mask[loc] = False
else: # already a mask, inverted
mask = -loc
for s, e in com.split_ranges(mask):
yield make_block(self.values[s:e],
self.items[s:e].copy(),
self.ref_items)
def fillna(self, value, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
return self
else:
return self.copy()
new_values = self.values if inplace else self.values.copy()
mask = com.isnull(new_values)
np.putmask(new_values, mask, value)
block = make_block(new_values, self.items, self.ref_items)
if downcast:
block = block.downcast()
return block
def downcast(self, dtypes = None):
""" try to downcast each item to the dict of dtypes if present """
if dtypes is None:
dtypes = dict()
values = self.values
blocks = []
for i, item in enumerate(self.items):
dtype = dtypes.get(item,self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i])
blocks.append(make_block(nv, [ item ], self.ref_items))
continue
nv = _possibly_downcast_to_dtype(values[i], np.dtype(dtype))
nv = _block_shape(nv)
blocks.append(make_block(nv, [ item ], self.ref_items))
return blocks
def astype(self, dtype, copy = True, raise_on_error = True):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
try:
newb = make_block(com._astype_nansafe(self.values, dtype, copy = copy),
self.items, self.ref_items)
except:
if raise_on_error is True:
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if (newb.shape != self.shape or
(not copy and newb.itemsize < self.itemsize)):
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name, newb.itemsize))
return newb
def convert(self, copy = True, **kwargs):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we are not an ObjectBlock here! """
return self.copy() if copy else self
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result):
""" try to cast the result to our original type,
we may have roundtripped thru object in the mean-time """
return result
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def to_native_types(self, slicer=None, na_rep='', **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:,slicer]
values = np.array(values,dtype=object)
mask = isnull(values)
values[mask] = na_rep
return values.tolist()
def replace(self, to_replace, value, inplace=False, filter=None):
""" replace the to_replace value with value, possible to create new blocks here
this is just a call to putmask """
mask = com.mask_missing(self.values, to_replace)
if filter is not None:
for i, item in enumerate(self.items):
if item not in filter:
mask[i] = False
if not mask.any():
if inplace:
return [ self ]
return [ self.copy() ]
return self.putmask(mask, value, inplace=inplace)
def putmask(self, mask, new, inplace=False):
""" putmask the data to the block; it is possible that we may create a new dtype of block
return the resulting block(s) """
new_values = self.values if inplace else self.values.copy()
# may need to align the new
if hasattr(new, 'reindex_axis'):
axis = getattr(new, '_het_axis', 0)
new = new.reindex_axis(self.items, axis=axis, copy=False).values.T
# may need to align the mask
if hasattr(mask, 'reindex_axis'):
axis = getattr(mask, '_het_axis', 0)
mask = mask.reindex_axis(self.items, axis=axis, copy=False).values.T
if self._can_hold_element(new):
new = self._try_cast(new)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
# need to go column by column
new_blocks = []
for i, item in enumerate(self.items):
m = mask[i]
# need a new block
if m.any():
n = new[i] if isinstance(new, np.ndarray) else new
# type of the new block
dtype, _ = com._maybe_promote(np.array(n).dtype)
# we need to exiplicty astype here to make a copy
nv = new_values[i].astype(dtype)
# we create a new block type
np.putmask(nv, m, n)
else:
nv = new_values[i] if inplace else new_values[i].copy()
nv = _block_shape(nv)
new_blocks.append(make_block(nv, [ item ], self.ref_items))
return new_blocks
if inplace:
return [ self ]
return [ make_block(new_values, self.items, self.ref_items) ]
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, missing=None, coerce=False):
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return self
else:
return self.copy()
values = self.values if inplace else self.values.copy()
if values.ndim != 2:
raise NotImplementedError
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
if missing is None:
mask = None
else: # todo create faster fill func without masking
mask = com.mask_missing(transf(values), missing)
if method == 'pad':
com.pad_2d(transf(values), limit=limit, mask=mask)
else:
com.backfill_2d(transf(values), limit=limit, mask=mask)
return make_block(values, self.items, self.ref_items)
def take(self, indexer, axis=1):
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
new_values = com.take_nd(self.values, indexer, axis=axis,
allow_fill=False)
return make_block(new_values, self.items, self.ref_items)
def get_values(self, dtype):
return self.values
def diff(self, n):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=1)
return make_block(new_values, self.items, self.ref_items)
def shift(self, indexer, periods):
""" shift the block by periods, possibly upcast """
new_values = self.values.take(indexer, axis=1)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(new_values)
if periods > 0:
new_values[:, :periods] = fill_value
else:
new_values[:, periods:] = fill_value
return make_block(new_values, self.items, self.ref_items)
def eval(self, func, other, raise_on_error = True, try_cast = False):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function, False by default (and just return
the data that we had coming in)
Returns
-------
a new block, the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other, 'reindex_axis'):
axis = getattr(other, '_het_axis', 0)
other = other.reindex_axis(self.items, axis=axis, copy=True).values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
values = values.T
is_transposed = True
values, other = self._try_coerce_args(values, other)
args = [ values, other ]
try:
result = self._try_coerce_result(func(*args))
except (Exception), detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values [%s]'
% (repr(other),str(detail)))
else:
# return the values
result = np.empty(values.shape,dtype='O')
result.fill(np.nan)
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result, self.items, self.ref_items)
def where(self, other, cond, raise_on_error = True, try_cast = False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
raise_on_error : if True, raise when I can't perform the function, False by default (and just return
the data that we had coming in)
Returns
-------
a new block(s), the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other,'reindex_axis'):
axis = getattr(other,'_het_axis',0)
other = other.reindex_axis(self.items, axis=axis, copy=True).values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
values = values.T
is_transposed = True
# see if we can align cond
if not hasattr(cond,'shape'):
raise ValueError("where must have a condition that is ndarray like")
if hasattr(cond,'reindex_axis'):
axis = getattr(cond,'_het_axis',0)
cond = cond.reindex_axis(self.items, axis=axis, copy=True).values
else:
cond = cond.values
# may need to undo transpose of values
if hasattr(values, 'ndim'):
if values.ndim != cond.ndim or values.shape == cond.shape[::-1]:
values = values.T
is_transposed = not is_transposed
# our where function
def func(c,v,o):
if c.ravel().all():
return v
v, o = self._try_coerce_args(v, o)
try:
return self._try_coerce_result(expressions.where(c, v, o, raise_on_error=True))
except (Exception), detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values [%s]'
% (repr(o),str(detail)))
else:
# return the values
result = np.empty(v.shape,dtype='float64')
result.fill(np.nan)
return result
def create_block(result, items, transpose = True):
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if transpose and is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result, items, self.ref_items)
# see if we can operate on the entire block, or need item-by-item
if not self._can_hold_na:
axis = cond.ndim-1
result_blocks = []
for item in self.items:
loc = self.items.get_loc(item)
item = self.items.take([loc])
v = values.take([loc],axis=axis)
c = cond.take([loc],axis=axis)
o = other.take([loc],axis=axis) if hasattr(other,'shape') else other
result = func(c,v,o)
if len(result) == 1:
result = np.repeat(result,self.shape[1:])
result = _block_shape(result,ndim=self.ndim,shape=self.shape[1:])
result_blocks.append(create_block(result, item, transpose = False))
return result_blocks
else:
result = func(cond,values,other)
return create_block(result, self.items)
class NumericBlock(Block):
is_numeric = True
_can_hold_na = True
def _try_cast_result(self, result):
return _possibly_downcast_to_dtype(result, self.dtype)
class FloatBlock(NumericBlock):
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if isinstance(element, np.ndarray):
return issubclass(element.dtype.type, (np.floating, np.integer))
return isinstance(element, (float, int))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_format=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:,slicer]
values = np.array(values,dtype=object)
mask = isnull(values)
values[mask] = na_rep
if float_format:
imask = (-mask).ravel()
values.flat[imask] = np.array([ float_format % val for val in values.ravel()[imask] ])
return values.tolist()
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return issubclass(value.dtype.type, np.floating) and value.dtype == self.dtype
class ComplexBlock(NumericBlock):
def _can_hold_element(self, element):
return isinstance(element, complex)
def _try_cast(self, element):
try:
return complex(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
_can_hold_na = False
def _can_hold_element(self, element):
if isinstance(element, np.ndarray):
return issubclass(element.dtype.type, np.integer)
return com.is_integer(element)
def _try_cast(self, element):
try:
return int(element)
except: # pragma: no cover
return element
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
class BoolBlock(NumericBlock):
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
return isinstance(element, (int, bool))
def _try_cast(self, element):
try:
return bool(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
class ObjectBlock(Block):
is_object = True
_can_hold_na = True
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type object """
return lib.is_bool_array(self.values.ravel())
def convert(self, convert_dates = True, convert_numeric = True, copy = True):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# attempt to create new type blocks
blocks = []
for i, c in enumerate(self.items):
values = self.get(c)
values = com._possibly_convert_objects(values, convert_dates=convert_dates, convert_numeric=convert_numeric)
values = _block_shape(values)
items = self.items.take([i])
newb = make_block(values, items, self.ref_items)
blocks.append(newb)
return blocks
def _can_hold_element(self, element):
return True
def _try_cast(self, element):
return element
def should_store(self, value):
return not issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_))
_NS_DTYPE = np.dtype('M8[ns]')
_TD_DTYPE = np.dtype('m8[ns]')
class DatetimeBlock(Block):
_can_hold_na = True
def __init__(self, values, items, ref_items, ndim=2):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
Block.__init__(self, values, items, ref_items, ndim=ndim)
def _gi(self, arg):
return lib.Timestamp(self.values[arg])
def _can_hold_element(self, element):
return com.is_integer(element) or isinstance(element, datetime)
def _try_cast(self, element):
try:
return int(element)
except:
return element
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments
we are going to compare vs i8, so coerce to integer
values is always ndarra like, other may not be """
values = values.view('i8')
if isinstance(other, datetime):
other = lib.Timestamp(other).asm8.view('i8')
elif isnull(other):
other = tslib.iNaT
else:
other = other.view('i8')
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype == 'i8':
result = tslib.array_to_datetime(result.astype(object).ravel()).reshape(result.shape)
elif isinstance(result, np.integer):
result = lib.Timestamp(result)
return result
def to_native_types(self, slicer=None, na_rep=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:,slicer]
mask = isnull(values)
rvalues = np.empty(self.shape,dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (-mask).ravel()
if self.dtype == 'datetime64[ns]':
rvalues.flat[imask] = np.array([ Timestamp(val)._repr_base for val in values.ravel()[imask] ],dtype=object)
elif self.dtype == 'timedelta64[ns]':
rvalues.flat[imask] = np.array([ lib.repr_timedelta64(val) for val in values.ravel()[imask] ],dtype=object)
return rvalues.tolist()
def should_store(self, value):
return issubclass(value.dtype.type, np.datetime64)
def set(self, item, value):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
loc = self.items.get_loc(item)
if value.dtype != _NS_DTYPE:
value = tslib.cast_to_nanoseconds(value)
self.values[loc] = value
def get_values(self, dtype):
if dtype == object:
flat_i8 = self.values.ravel().view(np.int64)
res = tslib.ints_to_pydatetime(flat_i8)
return res.reshape(self.values.shape)
return self.values
def make_block(values, items, ref_items):
dtype = values.dtype
vtype = dtype.type
klass = None
if issubclass(vtype, np.floating):
klass = FloatBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif issubclass(vtype, np.datetime64):
klass = DatetimeBlock
elif issubclass(vtype, np.integer):
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
# try to infer a datetimeblock
if klass is None and np.prod(values.shape):
flat = values.ravel()
inferred_type = lib.infer_dtype(flat)
if inferred_type == 'datetime':
# we have an object array that has been inferred as datetime, so
# convert it
try:
values = tslib.array_to_datetime(flat).reshape(values.shape)
klass = DatetimeBlock
except: # it already object, so leave it
pass
if klass is None:
klass = ObjectBlock
return klass(values, items, ref_items, ndim=values.ndim)
# TODO: flexible with index=None and/or items=None
class BlockManager(object):
"""
Core internal data structure to implement DataFrame
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', '_known_consolidated', '_is_consolidated']
def __init__(self, blocks, axes, do_integrity_check=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = blocks
ndim = len(axes)
for block in blocks:
if ndim != block.values.ndim:
raise AssertionError(('Number of Block dimensions (%d) must '
'equal number of axes (%d)')
% (block.values.ndim, ndim))
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
@classmethod
def make_empty(self):
return BlockManager([], [[], []])
def __nonzero__(self):
return True
@property
def ndim(self):
return len(self.axes)
def set_axis(self, axis, value):
cur_axis = self.axes[axis]
value = _ensure_index(value)
if len(value) != len(cur_axis):
raise Exception('Length mismatch (%d vs %d)'
% (len(value), len(cur_axis)))
self.axes[axis] = value
if axis == 0:
for block in self.blocks:
block.set_ref_items(self.items, maybe_rename=True)
# make items read only for now
def _get_items(self):
return self.axes[0]
items = property(fget=_get_items)
def get_dtype_counts(self):
""" return a dict of the counts of dtypes in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
counts[b.dtype.name] = counts.get(b.dtype,0) + b.shape[0]
return counts
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [b.items for b in self.blocks]
axes_array = [ax for ax in self.axes]
return axes_array, block_values, block_items
def __setstate__(self, state):
# discard anything after 3rd, support beta pickling format for a little
# while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
self.axes = _handle_legacy_indexes(self.axes)
self._is_consolidated = False
self._known_consolidated = False
blocks = []
for values, items in zip(bvalues, bitems):
blk = make_block(values, items, self.axes[0])
blocks.append(blk)
self.blocks = blocks
def __len__(self):
return len(self.items)
def __repr__(self):
output = 'BlockManager'
for i, ax in enumerate(self.axes):
if i == 0:
output += '\nItems: %s' % ax
else:
output += '\nAxis %d: %s' % (i, ax)
for block in self.blocks:
output += '\n%s' % repr(block)
return output
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.items) for x in self.blocks)
for block in self.blocks:
if block.ref_items is not self.items:
raise AssertionError("Block ref_items must be BlockManager "
"items")
if block.values.shape[1:] != mgr_shape[1:]:
construction_error(tot_items,block.values.shape[1:],self.axes)
if len(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
'block items')
def apply(self, f, *args, **kwargs):
""" iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only call the block if the filter is in the block
"""
axes = kwargs.pop('axes',None)
filter = kwargs.get('filter')
result_blocks = []
for blk in self.blocks:
if filter is not None:
kwargs['filter'] = set(kwargs['filter'])
if not blk.items.isin(filter).any():
result_blocks.append(blk)
continue
if callable(f):
applied = f(blk, *args, **kwargs)
else:
applied = getattr(blk,f)(*args, **kwargs)
if isinstance(applied,list):
result_blocks.extend(applied)
else:
result_blocks.append(applied)
bm = self.__class__(result_blocks, axes or self.axes)
bm._consolidate_inplace()
return bm
def where(self, *args, **kwargs):
return self.apply('where', *args, **kwargs)
def eval(self, *args, **kwargs):
return self.apply('eval', *args, **kwargs)
def putmask(self, *args, **kwargs):
return self.apply('putmask', *args, **kwargs)
def diff(self, *args, **kwargs):
return self.apply('diff', *args, **kwargs)
def interpolate(self, *args, **kwargs):
return self.apply('interpolate', *args, **kwargs)
def shift(self, *args, **kwargs):
return self.apply('shift', *args, **kwargs)
def fillna(self, *args, **kwargs):
return self.apply('fillna', *args, **kwargs)
def downcast(self, *args, **kwargs):
return self.apply('downcast', *args, **kwargs)
def astype(self, *args, **kwargs):
return self.apply('astype', *args, **kwargs)
def convert(self, *args, **kwargs):
return self.apply('convert', *args, **kwargs)
def replace(self, *args, **kwargs):
return self.apply('replace', *args, **kwargs)
def replace_list(self, src_lst, dest_lst, inplace=False):
""" do a list replace """
# figure out our mask a-priori to avoid repeated replacements
values = self.as_matrix()
def comp(s):
if isnull(s):
return isnull(values)
return values == s
masks = [ comp(s) for i, s in enumerate(src_lst) ]
result_blocks = []
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [ blk if inplace else blk.copy() ]
for i, d in enumerate(dest_lst):
new_rb = []
for b in rb:
# get our mask for this element, sized to this
# particular block
m = masks[i][b.ref_locs]
if m.any():
new_rb.extend(b.putmask(m, d, inplace=True))
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
dtypes = [blk.dtype.type for blk in self.blocks]
self._is_consolidated = len(dtypes) == len(set(dtypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
self._consolidate_inplace()
return all([ block.is_numeric for block in self.blocks ])
def get_numeric_data(self, copy=False, type_list=None, as_blocks = False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
type_list : tuple of type, default None
Numeric types by default (Float/Complex/Int but not Datetime)
"""
if type_list is None:
filter_blocks = lambda block: block.is_numeric
else:
type_list = self._get_clean_block_types(type_list)
filter_blocks = lambda block: isinstance(block, type_list)
maybe_copy = lambda b: b.copy() if copy else b
num_blocks = [maybe_copy(b) for b in self.blocks if filter_blocks(b)]
if as_blocks:
return num_blocks
if len(num_blocks) == 0:
return BlockManager.make_empty()
indexer = np.sort(np.concatenate([b.ref_locs for b in num_blocks]))
new_items = self.items.take(indexer)
new_blocks = []
for b in num_blocks:
b = b.copy(deep=False)
b.ref_items = new_items
new_blocks.append(b)
new_axes = list(self.axes)
new_axes[0] = new_items
return BlockManager(new_blocks, new_axes, do_integrity_check=False)
def _get_clean_block_types(self, type_list):
if not isinstance(type_list, tuple):
try:
type_list = tuple(type_list)
except TypeError:
type_list = (type_list,)
type_map = {int: IntBlock, float: FloatBlock,
complex: ComplexBlock,
np.datetime64: DatetimeBlock,
datetime: DatetimeBlock,
bool: BoolBlock,
object: ObjectBlock}
type_list = tuple([type_map.get(t, t) for t in type_list])
return type_list
def get_bool_data(self, copy=False, as_blocks=False):
return self.get_numeric_data(copy=copy, type_list=(BoolBlock,),
as_blocks=as_blocks)
def get_slice(self, slobj, axis=0, raise_on_error=False):
new_axes = list(self.axes)
if raise_on_error:
_check_slice_bounds(slobj, new_axes[axis])
new_axes[axis] = new_axes[axis][slobj]
if axis == 0:
new_items = new_axes[0]
if len(self.blocks) == 1:
blk = self.blocks[0]
newb = make_block(blk.values[slobj], new_items,
new_items)
new_blocks = [newb]
else:
return self.reindex_items(new_items)
else:
new_blocks = self._slice_blocks(slobj, axis)
return BlockManager(new_blocks, new_axes, do_integrity_check=False)
def _slice_blocks(self, slobj, axis):
new_blocks = []
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = slobj
slicer = tuple(slicer)
for block in self.blocks:
newb = make_block(block.values[slicer], block.items,
block.ref_items)
new_blocks.append(newb)
return new_blocks
def get_series_dict(self):
# For DataFrame
return _blocks_to_series_dict(self.blocks, self.axes[1])
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean, default True
If False, return shallow copy (do not copy data)
Returns
-------
copy : BlockManager
"""
copy_blocks = [block.copy(deep=deep) for block in self.blocks]
# copy_axes = [ax.copy() for ax in self.axes]
copy_axes = list(self.axes)
return BlockManager(copy_blocks, copy_axes, do_integrity_check=False)
def as_matrix(self, items=None):
if len(self.blocks) == 0:
mat = np.empty(self.shape, dtype=float)
elif len(self.blocks) == 1:
blk = self.blocks[0]
if items is None or blk.items.equals(items):
# if not, then just call interleave per below
mat = blk.values
else:
mat = self.reindex_items(items).as_matrix()
else:
if items is None:
mat = self._interleave(self.items)
else:
mat = self.reindex_items(items).as_matrix()
return mat
def _interleave(self, items):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
items = _ensure_index(items)
result = np.empty(self.shape, dtype=dtype)
itemmask = np.zeros(len(items), dtype=bool)
# By construction, all of the item should be covered by one of the
# blocks
if items.is_unique:
for block in self.blocks:
indexer = items.get_indexer(block.items)
if (indexer == -1).any():
raise AssertionError('Items must contain all block items')
result[indexer] = block.get_values(dtype)
itemmask[indexer] = 1
else:
for block in self.blocks:
mask = items.isin(block.items)
indexer = mask.nonzero()[0]
if (len(indexer) != len(block.items)):
raise AssertionError('All items must be in block items')
result[indexer] = block.get_values(dtype)
itemmask[indexer] = 1
if not itemmask.all():
raise AssertionError('Some items were not contained in blocks')
return result
def xs(self, key, axis=1, copy=True):
if axis < 1:
raise AssertionError('Can only take xs across axis >= 1, got %d'
% axis)
loc = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if isinstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if len(self.blocks) > 1:
if not copy:
raise Exception('cannot get view of mixed-type or '
'non-consolidated DataFrame')
for blk in self.blocks:
newb = make_block(blk.values[slicer], blk.items, blk.ref_items)
new_blocks.append(newb)
elif len(self.blocks) == 1:
vals = self.blocks[0].values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(vals, self.items, self.items)]
return BlockManager(new_blocks, new_axes)
def fast_2d_xs(self, loc, copy=False):
"""
"""
if len(self.blocks) == 1:
result = self.blocks[0].values[:, loc]
if copy:
result = result.copy()
return result
if not copy:
raise Exception('cannot get view of mixed-type or '
'non-consolidated DataFrame')
dtype = _interleaved_dtype(self.blocks)
items = self.items
n = len(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
for j, item in enumerate(blk.items):
i = items.get_loc(item)
result[i] = blk._gi((j, loc))
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
new_blocks = _consolidate(self.blocks, self.items)
return BlockManager(new_blocks, self.axes)
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = _consolidate(self.blocks, self.items)
self._is_consolidated = True
self._known_consolidated = True
def get(self, item):
_, block = self._find_block(item)
return block.get(item)
def iget(self, i):
item = self.items[i]
if self.items.is_unique:
return self.get(item)
else:
# ugh
try:
inds, = (self.items == item).nonzero()
except AttributeError: # MultiIndex
inds, = self.items.map(lambda x: x == item).nonzero()
_, block = self._find_block(item)
try:
binds, = (block.items == item).nonzero()
except AttributeError: # MultiIndex
binds, = block.items.map(lambda x: x == item).nonzero()
for j, (k, b) in enumerate(zip(inds, binds)):
if i == k:
return block.values[b]
raise Exception('Cannot have duplicate column names '
'split across dtypes')
def get_scalar(self, tup):
"""
Retrieve single item
"""
item = tup[0]
_, blk = self._find_block(item)
# this could obviously be seriously sped up in cython
item_loc = blk.items.get_loc(item),
full_loc = item_loc + tuple(ax.get_loc(x)
for ax, x in zip(self.axes[1:], tup[1:]))
return blk.values[full_loc]
def delete(self, item):
i, _ = self._find_block(item)
loc = self.items.get_loc(item)
self._delete_from_block(i, item)
if com._is_bool_indexer(loc): # dupe keys may return mask
loc = [i for i, v in enumerate(loc) if v]
new_items = self.items.delete(loc)
self.set_items_norename(new_items)
self._known_consolidated = False
def set(self, item, value):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
"""
value = _block_shape(value,self.ndim-1)
if value.shape[1:] != self.shape[1:]:
raise AssertionError('Shape of new values must be compatible '
'with manager shape')
def _set_item(item, arr):
i, block = self._find_block(item)
if not block.should_store(value):
# delete from block, create and append new block
self._delete_from_block(i, item)
self._add_new_block(item, arr, loc=None)
else:
block.set(item, arr)
try:
loc = self.items.get_loc(item)
if isinstance(loc, int):
_set_item(self.items[loc], value)
else:
subset = self.items[loc]
if len(value) != len(subset):
raise AssertionError(
'Number of items to set did not match')
for i, (item, arr) in enumerate(zip(subset, value)):
_set_item(item, arr[None, :])
except KeyError:
# insert at end
self.insert(len(self.items), item, value)
self._known_consolidated = False
def insert(self, loc, item, value):
if item in self.items:
raise Exception('cannot insert %s, already exists' % item)
try:
new_items = self.items.insert(loc, item)
self.set_items_norename(new_items)
# new block
self._add_new_block(item, value, loc=loc)
except:
# so our insertion operation failed, so back out of the new items
# GH 3010
new_items = self.items.delete(loc)
self.set_items_norename(new_items)
# re-raise
raise
if len(self.blocks) > 100:
self._consolidate_inplace()
self._known_consolidated = False
def set_items_norename(self, value):
value = _ensure_index(value)
self.axes[0] = value
for block in self.blocks:
block.set_ref_items(value, maybe_rename=False)
def _delete_from_block(self, i, item):
"""
Delete and maybe remove the whole block
"""
block = self.blocks.pop(i)
for b in block.split_block_at(item):
self.blocks.append(b)
def _add_new_block(self, item, value, loc=None):
# Do we care about dtype at the moment?
# hm, elaborate hack?
if loc is None:
loc = self.items.get_loc(item)
new_block = make_block(value, self.items[loc:loc + 1].copy(),
self.items)
self.blocks.append(new_block)
def _find_block(self, item):
self._check_have(item)
for i, block in enumerate(self.blocks):
if item in block:
return i, block
def _check_have(self, item):
if item not in self.items:
raise KeyError('no item named %s' % com.pprint_thing(item))
def reindex_axis(self, new_axis, method=None, axis=0, copy=True):
new_axis = _ensure_index(new_axis)
cur_axis = self.axes[axis]
if new_axis.equals(cur_axis):
if copy:
result = self.copy(deep=True)
result.axes[axis] = new_axis
if axis == 0:
# patch ref_items, #1823
for blk in result.blocks:
blk.ref_items = new_axis
return result
else:
return self
if axis == 0:
if method is not None:
raise AssertionError('method argument not supported for '
'axis == 0')
return self.reindex_items(new_axis)
new_axis, indexer = cur_axis.reindex(new_axis, method)
return self.reindex_indexer(new_axis, indexer, axis=axis)
def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=np.nan):
"""
pandas-indexer with -1's only.
"""
if axis == 0:
return self._reindex_indexer_items(new_axis, indexer, fill_value)
new_blocks = []
for block in self.blocks:
newb = block.reindex_axis(indexer, axis=axis, fill_value=fill_value)
new_blocks.append(newb)
new_axes = list(self.axes)
new_axes[axis] = new_axis
return BlockManager(new_blocks, new_axes)
def _reindex_indexer_items(self, new_items, indexer, fill_value):
# TODO: less efficient than I'd like
item_order = com.take_1d(self.items.values, indexer)
# keep track of what items aren't found anywhere
mask = np.zeros(len(item_order), dtype=bool)
new_blocks = []
for blk in self.blocks:
blk_indexer = blk.items.get_indexer(item_order)
selector = blk_indexer != -1
# update with observed items
mask |= selector
if not selector.any():
continue
new_block_items = new_items.take(selector.nonzero()[0])
new_values = com.take_nd(blk.values, blk_indexer[selector], axis=0,
allow_fill=False)
new_blocks.append(make_block(new_values, new_block_items,
new_items))
if not mask.all():
na_items = new_items[-mask]
na_block = self._make_na_block(na_items, new_items,
fill_value=fill_value)
new_blocks.append(na_block)
new_blocks = _consolidate(new_blocks, new_items)
return BlockManager(new_blocks, [new_items] + self.axes[1:])
def reindex_items(self, new_items, copy=True, fill_value=np.nan):
"""
"""
new_items = _ensure_index(new_items)
data = self
if not data.is_consolidated():
data = data.consolidate()
return data.reindex_items(new_items)
# TODO: this part could be faster (!)
new_items, indexer = self.items.reindex(new_items)
# could have some pathological (MultiIndex) issues here
new_blocks = []
if indexer is None:
for blk in self.blocks:
if copy:
new_blocks.append(blk.reindex_items_from(new_items))
else:
blk.ref_items = new_items
new_blocks.append(blk)
else:
for block in self.blocks:
newb = block.reindex_items_from(new_items, copy=copy)
if len(newb.items) > 0:
new_blocks.append(newb)
mask = indexer == -1
if mask.any():
extra_items = new_items[mask]
na_block = self._make_na_block(extra_items, new_items,
fill_value=fill_value)
new_blocks.append(na_block)
new_blocks = _consolidate(new_blocks, new_items)
return BlockManager(new_blocks, [new_items] + self.axes[1:])
def _make_na_block(self, items, ref_items, fill_value=np.nan):
# TODO: infer dtypes other than float64 from fill_value
block_shape = list(self.shape)
block_shape[0] = len(items)
dtype, fill_value = com._infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
na_block = make_block(block_values, items, ref_items)
return na_block
def take(self, indexer, axis=1, verify=True):
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
indexer = com._ensure_platform_int(indexer)
n = len(self.axes[axis])
if verify:
indexer = _maybe_convert_indices(indexer, n)
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_axes = list(self.axes)
new_axes[axis] = self.axes[axis].take(indexer)
new_blocks = []
for blk in self.blocks:
new_values = com.take_nd(blk.values, indexer, axis=axis,
allow_fill=False)
newb = make_block(new_values, blk.items, self.items)
new_blocks.append(newb)
return BlockManager(new_blocks, new_axes)
def merge(self, other, lsuffix=None, rsuffix=None):
if not self._is_indexed_like(other):
raise AssertionError('Must have same axes to merge managers')
this, other = self._maybe_rename_join(other, lsuffix, rsuffix)
cons_items = this.items + other.items
consolidated = _consolidate(this.blocks + other.blocks, cons_items)
new_axes = list(this.axes)
new_axes[0] = cons_items
return BlockManager(consolidated, new_axes)
def _maybe_rename_join(self, other, lsuffix, rsuffix, copydata=True):
to_rename = self.items.intersection(other.items)
if len(to_rename) > 0:
if not lsuffix and not rsuffix:
raise Exception('columns overlap: %s' % to_rename)
def lrenamer(x):
if x in to_rename:
return '%s%s' % (x, lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '%s%s' % (x, rsuffix)
return x
this = self.rename_items(lrenamer, copydata=copydata)
other = other.rename_items(rrenamer, copydata=copydata)
else:
this = self
return this, other
def _is_indexed_like(self, other):
"""
Check all axes except items
"""
if self.ndim != other.ndim:
raise AssertionError(('Number of dimensions must agree '
'got %d and %d') % (self.ndim, other.ndim))
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def rename_axis(self, mapper, axis=1):
index = self.axes[axis]
if isinstance(index, MultiIndex):
new_axis = MultiIndex.from_tuples([tuple(mapper(y) for y in x) for x in index], names=index.names)
else:
new_axis = Index([mapper(x) for x in index], name=index.name)
if not new_axis.is_unique:
raise AssertionError('New axis must be unique to rename')
new_axes = list(self.axes)
new_axes[axis] = new_axis
return BlockManager(self.blocks, new_axes)
def rename_items(self, mapper, copydata=True):
new_items = Index([mapper(x) for x in self.items])
new_items.is_unique
new_blocks = []
for block in self.blocks:
newb = block.copy(deep=copydata)
newb.set_ref_items(new_items, maybe_rename=True)
new_blocks.append(newb)
new_axes = list(self.axes)
new_axes[0] = new_items
return BlockManager(new_blocks, new_axes)
def add_prefix(self, prefix):
f = (('%s' % prefix) + '%s').__mod__
return self.rename_items(f)
def add_suffix(self, suffix):
f = ('%s' + ('%s' % suffix)).__mod__
return self.rename_items(f)
@property
def block_id_vector(self):
# TODO
result = np.empty(len(self.items), dtype=int)
result.fill(-1)
for i, blk in enumerate(self.blocks):
indexer = self.items.get_indexer(blk.items)
if (indexer == -1).any():
raise AssertionError('Block items must be in manager items')
result.put(indexer, i)
if (result < 0).any():
raise AssertionError('Some items were not in any block')
return result
@property
def item_dtypes(self):
result = np.empty(len(self.items), dtype='O')
mask = np.zeros(len(self.items), dtype=bool)
for i, blk in enumerate(self.blocks):
indexer = self.items.get_indexer(blk.items)
result.put(indexer, blk.values.dtype.name)
mask.put(indexer, 1)
if not (mask.all()):
raise AssertionError('Some items were not in any block')
return result
def construction_error(tot_items, block_shape, axes):
""" raise a helpful message about our construction """
raise ValueError("Shape of passed values is %s, indices imply %s" % (
tuple(map(int, [tot_items] + list(block_shape))),
tuple(map(int, [len(ax) for ax in axes]))))
def create_block_manager_from_blocks(blocks, axes):
try:
# if we are passed values, make the blocks
if len(blocks) == 1 and not isinstance(blocks[0], Block):
blocks = [ make_block(blocks[0], axes[0], axes[0]) ]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError):
blocks = [ getattr(b,'values',b) for b in blocks ]
tot_items = sum(b.shape[0] for b in blocks)
construction_error(tot_items,blocks[0].shape[1:],axes)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError):
construction_error(len(arrays),arrays[0].shape[1:],axes)
def form_blocks(arrays, names, axes):
# pre-filter out items if we passed it
items = axes[0]
if len(arrays) < len(items):
extra_items = items - Index(names)
else:
extra_items = []
# put "leftover" items in float bucket, where else?
# generalize?
float_items = []
complex_items = []
int_items = []
bool_items = []
object_items = []
datetime_items = []
for k, v in zip(names, arrays):
if issubclass(v.dtype.type, np.floating):
float_items.append((k, v))
elif issubclass(v.dtype.type, np.complexfloating):
complex_items.append((k, v))
elif issubclass(v.dtype.type, np.datetime64):
if v.dtype != _NS_DTYPE:
v = tslib.cast_to_nanoseconds(v)
if hasattr(v, 'tz') and v.tz is not None:
object_items.append((k, v))
else:
datetime_items.append((k, v))
elif issubclass(v.dtype.type, np.integer):
if v.dtype == np.uint64:
# HACK #2355 definite overflow
if (v > 2 ** 63 - 1).any():
object_items.append((k, v))
continue
int_items.append((k, v))
elif v.dtype == np.bool_:
bool_items.append((k, v))
else:
object_items.append((k, v))
blocks = []
if len(float_items):
float_blocks = _multi_blockify(float_items, items)
blocks.extend(float_blocks)
if len(complex_items):
complex_blocks = _simple_blockify(complex_items, items, np.complex128)
blocks.extend(complex_blocks)
if len(int_items):
int_blocks = _multi_blockify(int_items, items)
blocks.extend(int_blocks)
if len(datetime_items):
datetime_blocks = _simple_blockify(datetime_items, items, _NS_DTYPE)
blocks.extend(datetime_blocks)
if len(bool_items):
bool_blocks = _simple_blockify(bool_items, items, np.bool_)
blocks.extend(bool_blocks)
if len(object_items) > 0:
object_blocks = _simple_blockify(object_items, items, np.object_)
blocks.extend(object_blocks)
if len(extra_items):
shape = (len(extra_items),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(nan)
na_block = make_block(block_values, extra_items, items)
blocks.append(na_block)
blocks = _consolidate(blocks, items)
return blocks
def _simple_blockify(tuples, ref_items, dtype):
""" return a single array of a block that has a single dtype; if dtype is not None, coerce to this dtype """
block_items, values = _stack_arrays(tuples, ref_items, dtype)
# CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
return [ make_block(values, block_items, ref_items) ]
def _multi_blockify(tuples, ref_items, dtype = None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[1].dtype)
new_blocks = []
for dtype, tup_block in grouper:
block_items, values = _stack_arrays(list(tup_block), ref_items, dtype)
block = make_block(values, block_items, ref_items)
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, ref_items, dtype):
from pandas.core.series import Series
# fml
def _asarray_compat(x):
# asarray shouldn't be called on SparseSeries
if isinstance(x, Series):
return x.values
else:
return np.asarray(x)
def _shape_compat(x):
# sparseseries
if isinstance(x, Series):
return len(x),
else:
return x.shape
names, arrays = zip(*tuples)
# index may box values
items = ref_items[ref_items.isin(names)]
first = arrays[0]
shape = (len(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return items, stacked
def _blocks_to_series_dict(blocks, index=None):
from pandas.core.series import Series
series_dict = {}
for block in blocks:
for item, vec in zip(block.items, block.values):
series_dict[item] = Series(vec, index=index, name=item)
return series_dict
def _interleaved_dtype(blocks):
if not len(blocks): return None
from collections import defaultdict
counts = defaultdict(lambda: [])
for x in blocks:
counts[type(x)].append(x)
def _lcd_dtype(l):
""" find the lowest dtype that can accomodate the given types """
m = l[0].dtype
for x in l[1:]:
if x.dtype.itemsize > m.itemsize:
m = x.dtype
return m
have_int = len(counts[IntBlock]) > 0
have_bool = len(counts[BoolBlock]) > 0
have_object = len(counts[ObjectBlock]) > 0
have_float = len(counts[FloatBlock]) > 0
have_complex = len(counts[ComplexBlock]) > 0
have_dt64 = len(counts[DatetimeBlock]) > 0
have_numeric = have_float or have_complex or have_int
if (have_object or
(have_bool and have_numeric) or
(have_numeric and have_dt64)):
return np.dtype(object)
elif have_bool:
return np.dtype(bool)
elif have_int and not have_float and not have_complex:
return _lcd_dtype(counts[IntBlock])
elif have_dt64 and not have_float and not have_complex:
return np.dtype('M8[ns]')
elif have_complex:
return np.dtype('c16')
else:
return _lcd_dtype(counts[FloatBlock])
def _consolidate(blocks, items):
"""
Merge blocks having same dtype
"""
get_dtype = lambda x: x.dtype.name
# sort by dtype
grouper = itertools.groupby(sorted(blocks, key=get_dtype),
lambda x: x.dtype)
new_blocks = []
for dtype, group_blocks in grouper:
new_block = _merge_blocks(list(group_blocks), items, dtype)
new_blocks.append(new_block)
return new_blocks
def _merge_blocks(blocks, items, dtype=None):
if len(blocks) == 1:
return blocks[0]
if dtype is None:
if len(set([ b.dtype for b in blocks ])) != 1:
raise AssertionError("_merge_blocks are invalid!")
dtype = blocks[0].dtype
new_values = _vstack([ b.values for b in blocks ], dtype)
new_items = blocks[0].items.append([b.items for b in blocks[1:]])
new_block = make_block(new_values, new_items, items)
return new_block.reindex_items_from(items)
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim == ndim:
if shape is None:
shape = values.shape
values = values.reshape(tuple((1,) + shape))
return values
def _vstack(to_stack, dtype):
# work around NumPy 1.6 bug
if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
new_values = np.vstack([x.view('i8') for x in to_stack])
return new_values.view(dtype)
else:
return np.vstack(to_stack)
| 2.375
| 2
|
Sketches/MH/PipeBuilder/BuildViewer.py
|
sparkslabs/kamaelia_orig
| 12
|
12093
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
# Simple control window for a looping audio player
import pygame
from Axon.Ipc import producerFinished, shutdownMicroprocess
from Kamaelia.Visualisation.PhysicsGraph.TopologyViewerComponent import TopologyViewerComponent
from Kamaelia.Physics.Simple import SimpleLaws, Particle
import time
class ComponentParticle(Particle):
"""Version of Physics.Particle designed to represent components in a simple pipeline"""
def __init__(self, ID, position, name):
super(ComponentParticle,self).__init__(position=position, ID = ID )
self.radius = 20
self.labelText = name # strip up to the first pipe only
self.name = name
font = pygame.font.Font(None, 24)
self.label = font.render(self.labelText, False, (0,0,0))
self.left = 0
self.top = 0
self.selected = False
def render(self, surface):
"""Rendering passes. A generator method that renders in multiple passes.
Use yields to specify a wait until the pass the next stage of rendering
should take place at.
Example, that renders bonds 'behind' the blobs.
def render(self, surface):
yield 1
self.renderBonds(surface) # render bonds on pass 1
yield 5
self.renderSelf(surface) # render 'blob' on pass 5
If another particle type rendered, for example, on pass 3, then it
would be rendered on top of the bonds, but behind the blobs.
Use this mechanism to order rendering into layers.
"""
sx = int(self.pos[0]) - self.left
sy = int(self.pos[1]) - self.top
yield 1
phase = (time.time()*4) % 2.0
off = phase > 1.0
phase = phase % 1.0
for p in self.bondedTo:
ex = int(p.pos[0] -self.left)
ey = int(p.pos[1] - self.top)
# 'make a crawling dotted line' appearance, to give an animated indication
# directionality of the link
dx = ex-sx
dy = ey-sy
length = (dx*dx + dy*dy)**0.5
dx = dx/length
dy = dy/length
p=0
while p<length:
newp = min(length, p+ phase * 10.0 )
phase = 1.0
if not off:
pygame.draw.line( surface,
(128,128,255),
(sx+dx*p,sy+dy*p),
(sx+dx*newp,sy+dy*newp)
)
off = not off
p=newp
yield 2
if self.selected:
pygame.draw.circle(surface, (255,255,128), (sx,sy), self.radius)
else:
pygame.draw.circle(surface, (192,192,192), (sx,sy), self.radius)
surface.blit(self.label, (sx - self.label.get_width()/2, sy - self.label.get_height()/2))
def setOffset( self, (left,top) ):
"""Inform of a change to the coords of the top left of the drawing surface,
so that this entity can render, as if the top left had moved
"""
self.left = left
self.top = top
def select( self ):
"""Tell this particle it is selected"""
self.selected = True
def deselect( self ):
"""Tell this particle it is selected"""
self.selected = False
def BuildViewer(screensize = (800,600), fullscreen = False, transparency = None):
laws = SimpleLaws(bondLength=100)
return TopologyViewerComponent( screensize=screensize,
fullscreen=fullscreen,
caption = "The pipeline",
particleTypes = {"component":ComponentParticle},
laws = laws
)
| 2.484375
| 2
|
examples/exp_example.py
|
physimals/avb
| 0
|
12094
|
"""
Example of usage of the AVB framework to infer a single exponential decay
model.
This uses the Python classes directly to infer the parameters for a single
instance of noisy data constructed as a Numpy array.
"""
import sys
import logging
import numpy as np
from vaby_avb import Avb
import vaby
# Uncomment line below to start the random number generator off with the same seed value
# each time, for repeatable results
#np.random.seed(0)
# Ground truth parameters
PARAMS_TRUTH = [42, 0.5]
NOISE_PREC_TRUTH = 0.1
NOISE_VAR_TRUTH = 1/NOISE_PREC_TRUTH
NOISE_STD_TRUTH = np.sqrt(NOISE_VAR_TRUTH)
print("Ground truth: a=%f, r=%f, noise=%f (precision)" % (PARAMS_TRUTH[0], PARAMS_TRUTH[1], NOISE_PREC_TRUTH))
# Create single exponential model
model = vaby.get_model_class("exp")(None)
# Observed data samples are generated by Numpy from the ground truth
# Gaussian distribution. Reducing the number of samples should make
# the inference less 'confident' - i.e. the output variances for
# MU and BETA will increase
N = 100
DT = 0.02
t = np.array([float(t)*DT for t in range(N)])
DATA_CLEAN = model.evaluate(PARAMS_TRUTH, t).numpy()
DATA_NOISY = DATA_CLEAN + np.random.normal(0, NOISE_STD_TRUTH, [N])
print("Time values:")
print(t)
print("Data samples (clean):")
print(DATA_CLEAN)
print("Data samples (noisy):")
print(DATA_NOISY)
# Run Fabber as a comparison if desired
#import os
#import nibabel as nib
#niidata = DATA_NOISY.reshape((1, 1, 1, N))
#nii = nib.Nifti1Image(niidata, np.identity(4))
#nii.to_filename("data_noisy.nii.gz")
#os.system("fabber_exp --data=data_noisy --print-free-energy --output=fabberout --dt=%.3f --model=exp --num-exps=1 --method=vb --noise=white --overwrite --debug" % DT)
# Log to stdout
logging.getLogger().setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('%(levelname)s : %(message)s'))
logging.getLogger().addHandler(handler)
# Run AVB inference
avb = Avb(t, vaby.DataModel(DATA_NOISY), model)
avb.run(method="leastsq", maxits=20, learning_rate=0.1, debug="--debug" in sys.argv)
| 3.203125
| 3
|
tests/test_classes/users.py
|
dialogs/python-bot-sdk
| 9
|
12095
|
from dialog_api.users_pb2 import RequestLoadFullUsers, ResponseLoadFullUsers, FullUser
class Users:
def LoadFullUsers(self, request: RequestLoadFullUsers) -> ResponseLoadFullUsers:
return ResponseLoadFullUsers(full_users=[FullUser(id=1, contact_info=[], about=None)])
| 1.976563
| 2
|
aws_marketplace/creating_marketplace_products/src/training_specification.py
|
jerrypeng7773/amazon-sagemaker-examples
| 2,610
|
12096
|
import json
class TrainingSpecification:
template = """
{
"TrainingSpecification": {
"TrainingImage": "IMAGE_REPLACE_ME",
"SupportedHyperParameters": [
{
"Description": "Grow a tree with max_leaf_nodes in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes",
"Name": "max_leaf_nodes",
"Type": "Integer",
"Range": {
"IntegerParameterRangeSpecification": {
"MinValue": "1",
"MaxValue": "100000"
}
},
"IsTunable": true,
"IsRequired": false,
"DefaultValue": "100"
}
],
"SupportedTrainingInstanceTypes": INSTANCES_REPLACE_ME,
"SupportsDistributedTraining": false,
"MetricDefinitions": METRICS_REPLACE_ME,
"TrainingChannels": CHANNELS_REPLACE_ME,
"SupportedTuningJobObjectiveMetrics": TUNING_OBJECTIVES_REPLACE_ME
}
}
"""
def get_training_specification_dict(
self,
ecr_image,
supports_gpu,
supported_channels=None,
supported_metrics=None,
supported_tuning_job_objective_metrics=None,
):
return json.loads(
self.get_training_specification_json(
ecr_image,
supports_gpu,
supported_channels,
supported_metrics,
supported_tuning_job_objective_metrics,
)
)
def get_training_specification_json(
self,
ecr_image,
supports_gpu,
supported_channels=None,
supported_metrics=None,
supported_tuning_job_objective_metrics=None,
):
if supported_channels is None:
print("Please provide at least one supported channel")
raise ValueError("Please provide at least one supported channel")
if supported_metrics is None:
supported_metrics = []
if supported_tuning_job_objective_metrics is None:
supported_tuning_job_objective_metrics = []
return (
self.template.replace("IMAGE_REPLACE_ME", ecr_image)
.replace("INSTANCES_REPLACE_ME", self.get_supported_instances(supports_gpu))
.replace(
"CHANNELS_REPLACE_ME",
json.dumps([ob.__dict__ for ob in supported_channels], indent=4, sort_keys=True),
)
.replace(
"METRICS_REPLACE_ME",
json.dumps([ob.__dict__ for ob in supported_metrics], indent=4, sort_keys=True),
)
.replace(
"TUNING_OBJECTIVES_REPLACE_ME",
json.dumps(
[ob.__dict__ for ob in supported_tuning_job_objective_metrics],
indent=4,
sort_keys=True,
),
)
)
@staticmethod
def get_supported_instances(supports_gpu):
cpu_list = [
"ml.m4.xlarge",
"ml.m4.2xlarge",
"ml.m4.4xlarge",
"ml.m4.10xlarge",
"ml.m4.16xlarge",
"ml.m5.large",
"ml.m5.xlarge",
"ml.m5.2xlarge",
"ml.m5.4xlarge",
"ml.m5.12xlarge",
"ml.m5.24xlarge",
"ml.c4.xlarge",
"ml.c4.2xlarge",
"ml.c4.4xlarge",
"ml.c4.8xlarge",
"ml.c5.xlarge",
"ml.c5.2xlarge",
"ml.c5.4xlarge",
"ml.c5.9xlarge",
"ml.c5.18xlarge",
]
gpu_list = [
"ml.p2.xlarge",
"ml.p2.8xlarge",
"ml.p2.16xlarge",
"ml.p3.2xlarge",
"ml.p3.8xlarge",
"ml.p3.16xlarge",
]
list_to_return = cpu_list
if supports_gpu:
list_to_return = cpu_list + gpu_list
return json.dumps(list_to_return)
| 2.328125
| 2
|
Exercises1_12/R-1.12.py
|
opnsesame/Data-Structures-and-Algorithms-Exercises
| 0
|
12097
|
<gh_stars>0
#Python's random module includes a function choice(data) that returns a
#random element from a non-empty sequence. The random modul includes a
#more basic function randrange,with parameterization similar to the
#built-in range function , that return a random choice from the given
#range.Using only the randrange funciton,implement your own version of
#the choice function.
import random
lottoMax = list()
lottoMax = [random.randrange(1,50,1) for i in range(1,8)]
print(lottoMax)
| 3.90625
| 4
|
database/mongo_connector.py
|
timburbank/openrvdas
| 1
|
12098
|
<reponame>timburbank/openrvdas
#!/usr/bin/env python3
"""Tables:
data: pk timestamp field_name field_value source_record
We don't know what type each value will have, so have a column for
int, float, str and bool and leave all but the appropriate value type
NULL. Docs claim that NULL values take no space, so...
Still so many ways we could make this more space efficient, most
obviously by partitioning field_name (and even timestamp?) into
foreign keys.
field_name - could store this in a separate table so that it's only
a foreign key in the data table. Something like:
fields: id field_name field_type
source_record - an id indexing a table where raw source records are
stored, so that we can re-parse and recreate whatever data we want
if needed.
Current implementation is simple and inefficient in both computation
and storage.
TODO: Allow wildcarding field selection, so client can specify 'S330*,Knud*'
"""
import logging
import sys
import json
sys.path.append('.')
from logger.utils.formats import Python_Record
from logger.utils.das_record import DASRecord
try:
import pymongo
MONGO_ENABLED = True
except ImportError:
MONGO_ENABLED = False
################################################################################
class MongoConnector:
# Name of table in which we will store mappings from record field
# names to the tnames of the tables containing those fields.
DATA_TABLE = 'data'
FIELD_TABLE = 'fields'
SOURCE_TABLE = 'source'
def __init__(self, database, host, user, password,
tail=False, save_source=True):
"""Interface to MongoConnector, to be imported by, e.g. DatabaseWriter."""
if not MONGO_ENABLED:
logging.warning('MongoClient not found, so MongoDB functionality not available.')
return
self.client = pymongo.MongoClient([host])
self.db = self.client[database]
self.save_source = save_source
# What's the next id we're supposed to read? Or if we've been
# reading by timestamp, what's the last timestamp we've seen?
self.next_id = 0
self.last_timestamp = None
# Create tables if they don't exist yet
if not self.SOURCE_TABLE in self.db.collection_names():
sourceCol = self.db[self.SOURCE_TABLE]
if not self.DATA_TABLE in self.db.collection_names():
dataCol = self.db[self.DATA_TABLE]
############################
def write_record(self, record):
"""Write record to table."""
# First, check that we've got something we can work with
if not record:
return
if not type(record) == DASRecord:
logging.error('write_record() received non-DASRecord as input. '
'Type: %s', type(record))
return
# If we're saving source records, we have to do a little
# legerdemain: after we've saved the record, we need to retrieve
# the id of the record we've just saved so that we can attach it
# to the data values we're about to save.
if self.save_source:
logging.debug('Inserting source into table')
logging.debug(record)
result = self.db[self.SOURCE_TABLE].insert_one(json.loads(record.as_json()))
# Get the id of the saved source record. Note: documentation
# *claims* that this is kept on a per-client basis, so it's safe
# even if another client does an intervening write.
source_id = result.inserted_id
else:
source_id = None
if not record.fields:
logging.info('DASRecord has no parsed fields. Skipping record.')
return
# Write one row for each field-value pair. Columns are:
# timestamp
# field_name
# int_value \
# float_value, \ Only one of these fields will be non-NULL,
# str_value / depending on the type of the value.
# bool_value /
values = []
for field_name, value in record.fields.items():
data_record = {
'timestamp': record.timestamp,
'field_name': field_name,
'int_value': None,
'float_value': None,
'str_value': None,
'bool_value': None,
'source_id': None
}
if type(value) is int:
data_record['int_value'] = value
elif type(value) is float:
data_record['float_value'] = value
elif type(value) is str:
data_record['str_value'] = value
elif type(value) is bool:
data_record['bool_value'] = True if value else False
elif value is None:
data_record['str_value'] = '""'
else:
logging.error('Unknown record value type (%s) for %s: %s',
type(value), key, value)
continue
# If we've saved this field's source record, append source's
# foreign key to row so we can look it up.
if source_id:
data_record['source_id'] = source_id
# Join entries into a string, append to list of other values
# we've already saved.
values.append(data_record)
# Build the SQL query
# fields = ['timestamp',
# 'field_name',
# 'int_value',
# 'float_value',
# 'str_value',
# 'bool_value']
# if source_id:
# fields.append('source')
if not values:
logging.warning('No values found in record %s', str(record))
# write_cmd = 'insert into `%s` (%s) values %s' % \
# (self.DATA_TABLE, ','.join(fields), ','.join(values))
logging.debug('Inserting record into table')
result = self.db[self.DATA_TABLE].insert_many(values)
# self.exec_sql_command(write_cmd)
############################
def read(self, field_list=None, start=None, num_records=1):
"""Read the next record from table. If start is specified, reset read
to start at that position."""
query = {}
projection = { '_id': 0 }
if start is None:
start = self.next_id
# If they haven't given us any fields, retrieve everything
if field_list:
query['field_name'] = { "$in": field_list.split(',') }
if num_records is None:
limit = 0
else:
limit = num_records
# query = 'select * from `%s` where %s' % (self.DATA_TABLE, condition)
results = list(self.db[self.DATA_TABLE].find(query, projection).skip(start).limit(limit))
if len(results) == 0:
return {}
output = {}
for result in results:
if not result['field_name'] in output:
output[result['field_name']] = []
if result['int_value'] is not None:
output[result['field_name']].append((result['timestamp'], result['int_value']))
elif result['float_value'] is not None:
output[result['field_name']].append((result['timestamp'], result['float_value']))
elif result['str_value'] is not None:
output[result['field_name']].append((result['timestamp'], result['str_value']))
elif result['bool_value'] is not None:
output[result['field_name']].append((result['timestamp'], result['bool_value']))
else:
output[result['field_name']].append((result['timestamp']))
self.next_id = start + len(results)
return output
# return self._process_query(query)
############################
def read_time(self, field_list=None, start_time=None, stop_time=None):
"""Read the next records from table based on timestamps. If start_time
is None, use the timestamp of the last read record. If stop_time is None,
read all records since then."""
query = {}
if start_time or stop_time:
query['timestamp'] = {}
if start_time is not None:
query['timestamp']['$gte'] = start_time
if stop_time is not None:
query['timestamp']['$lte'] = stop_time
# If they haven't given us any fields, retrieve everything
if field_list:
query['field_name'] = { "$in": {field_list.split(',')} }
sort = { timestamp: -1 }
logging.debug('read query: %s', query)
return self.db[self.DATA_TABLE].find(query).sort(sort).toArray()
############################
def seek(self, offset=0, origin='current'):
"""Behavior is intended to mimic file seek() behavior but with
respect to records: 'offset' means number of records, and origin
is either 'start', 'current' or 'end'."""
num_rows = self.db[self.DATA_TABLE].count()
if origin == 'current':
self.next_id += offset
elif origin == 'start':
self.next_id = offset
elif origin == 'end':
self.next_id = num_rows + offset
self._next_id = min(num_rows, self.next_id)
logging.debug('Seek: next position %d', self.next_id)
############################
# def _num_rows(self, table_name):
# query = 'select count(1) from `%s`' % table_name
# cursor = self.connection.cursor()
# cursor.execute(query)
# num_rows = next(cursor)[0]
# return num_rows
############################
# def _process_query(self, query):
# cursor = self.connection.cursor()
# cursor.execute(query)
# results = {}
# for values in cursor:
# (id, timestamp, field_name,
# int_value, float_value, str_value, bool_value,
# source) = values
# if not field_name in results:
# results[field_name] = []
# if int_value is not None:
# val = int_value
# elif float_value is not None:
# val = float_value
# elif str_value is not None:
# val = str_value
# elif float_value is not None:
# val = int_value
# elif bool_value is not None:
# val = bool(bool_value)
# results[field_name].append((timestamp, val))
# self.next_id = id + 1
# self.last_timestamp = timestamp
# cursor.close()
# return results
############################
def delete_table(self, table_name):
"""Delete a table."""
# delete_cmd = 'drop table `%s`' % table_name
logging.info('Dropping table')
return self.db[table_name].drop()
# self.exec_sql_command(delete_cmd)
############################
def close(self):
"""Close connection."""
# self.connection.close()
self.client.close()
| 2.671875
| 3
|
benchmarks/cifar10/benchmark_sample_creator.py
|
aarati-K/one-access
| 0
|
12099
|
<reponame>aarati-K/one-access<gh_stars>0
from store.cifar10 import Cifar10
import torchvision.transforms as transforms
import time
import matplotlib.pyplot as plt
batch_size = 1
rel_sample_size = 10000
ds = Cifar10(input_data_folder="/home/aarati/datasets/cifar-10-batches-py", \
max_batches=2, batch_size=batch_size, rel_sample_size=rel_sample_size, \
max_samples=1, transform=transforms.ToTensor())
ds.count_num_points()
ds.generate_IR()
all_times = []
for i in range(10):
start = time.time()
ds.initialize_samples()
end = time.time()
all_times.append(end-start)
s = ds.samples[0].get()
print(all_times)
# Sample creation time for sample size:
# 1: [0.349, 0.306, 0.431, 0.303, 0.18, 0.69, 0.557, 0.681, 0.424, 0.300]
# 10: [0.742, 0.685, 0.679, 0.676, 0.673, 0.676, 0.551, 0.673, 0.669, 0.670]
# 100: [0.713, 0.672, 0.668, 0.671, 0.668, 0.680, 0.682, 0.675, 0.673, 0.669]
# 1000: [0.738, 0.689, 0.704, 0.693, 0.684, 0.683, 0.678, 0.677, 0.700, 0.687]
# 10000: [0.765, 0.727, 0.717, 0.740, 0.723, 0.774, 0.720, 0.868, 0.724, 0.771]
# Plotting code
# x = [1, 10, 50, 100, 1000, 10000]
# y = [0.45, 0.702, 0.703, 0.708, 0.715, 0.746]
# plt.plot(x, y, color='b', marker='o', markerfacecolor='k', markersize=10, fillstyle='full', linewidth=3, linestyle='solid')
# plt.xscale('log')
# plt.ylim(0.40, 0.78)
# plt.xlabel("Reservoir Sample Size", fontsize=20, fontweight='semibold', fontname='serif')
# plt.ylabel("Creation Time (s)", fontsize=20, fontweight='semibold', fontname='serif')
# plt.xticks(x, [1, 10, '', 100, 1000, 10000])
# _, ticks = plt.xticks()
# for tick in ticks:
# tick.set_fontsize(16)
# tick.set_fontweight('medium')
# tick.set_fontname('serif')
# _, ticks = plt.yticks()
# for tick in ticks:
# tick.set_fontsize(16)
# tick.set_fontweight('medium')
# tick.set_fontname('serif')
# plt.show()
| 2.171875
| 2
|