max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
tia/analysis/model/ret.py | lsternlicht/tia | 0 | 12773351 | <filename>tia/analysis/model/ret.py
from collections import OrderedDict
import pandas as pd
import numpy as np
from tia.util.decorator import lazy_property
from tia.analysis.model.interface import TxnPlColumns as TPL
from tia.analysis.perf import drawdown_info, drawdowns, guess_freq, downside_deviation, periodicity
from tia.analysis.plots import plot_return_on_dollar
from tia.util.mplot import AxesFormat
from tia.util.fmt import PercentFormatter, new_percent_formatter, new_float_formatter
__all__ = ['RoiiRetCalculator', 'AumRetCalculator', 'FixedAumRetCalculator', 'CumulativeRets', 'Performance']
def return_on_initial_capital(capital, period_pl, leverage=None):
"""Return the daily return series based on the capital"""
if capital <= 0:
raise ValueError('cost must be a positive number not %s' % capital)
leverage = leverage or 1.
eod = capital + (leverage * period_pl.cumsum())
ltd_rets = (eod / capital) - 1.
dly_rets = ltd_rets
dly_rets.iloc[1:] = (1. + ltd_rets).pct_change().iloc[1:]
return dly_rets
class RetCalculator(object):
def compute(self, txns):
raise NotImplementedError()
class RoiiRetCalculator(RetCalculator):
def __init__(self, leverage=None):
"""
:param leverage: {None, scalar, Series}, number to scale the position returns
:return:
"""
get_lev = None
if leverage is None:
pass
elif np.isscalar(leverage):
if leverage <= 0:
raise ValueError('leverage must be a positive non-zero number, not %s' % leverage)
else:
get_lev = lambda ts: leverage
elif isinstance(leverage, pd.Series):
get_lev = lambda ts: leverage.asof(ts)
else:
raise ValueError(
'leverage must be {None, positive scalar, Datetime/Period indexed Series} not %s' % type(leverage))
self.leverage = leverage
self.get_lev = get_lev
def compute(self, txns):
txnpl = txns.pl.txn_frame
txnrets = pd.Series(0, index=txnpl.index, name='ret')
get_lev = self.get_lev
for pid, pframe in txnpl[[TPL.OPEN_VAL, TPL.PID, TPL.PL, TPL.DT]].groupby(TPL.PID):
if pid != 0:
cost = abs(pframe[TPL.OPEN_VAL].iloc[0])
ppl = pframe[TPL.PL]
lev = None if get_lev is None else get_lev(pframe[TPL.DT].iloc[0])
ret = return_on_initial_capital(cost, ppl, lev)
txnrets[ppl.index] = ret
txnrets.index = txnpl[TPL.DT]
crets = CumulativeRets(txnrets)
return Performance(crets)
class FixedAumRetCalculator(RetCalculator):
def __init__(self, aum, reset_freq='M'):
self.aum = aum
self.reset_freq = reset_freq
# capture what cash flows would be needed on reset date to reset the aum
self.external_cash_flows = None
def compute(self, txns):
ltd = txns.pl.ltd_txn
grouper = pd.TimeGrouper(self.reset_freq)
period_rets = pd.Series(np.nan, index=ltd.index)
aum = self.aum
at = 0
cf = OrderedDict()
for key, grp in ltd.groupby(grouper):
if grp.empty:
continue
eod = aum + grp
sod = eod.shift(1)
sod.iloc[0] = aum
period_rets.iloc[at:at + len(grp.index)] = eod / sod - 1.
at += len(grp.index)
# get aum back to fixed amount
cf[key] = eod.iloc[-1] - aum
self.external_cash_flows = pd.Series(cf)
crets = CumulativeRets(period_rets)
return Performance(crets)
class AumRetCalculator(RetCalculator):
def __init__(self, starting_aum, freq='M'):
self.starting_aum = starting_aum
self.freq = freq
self.txn_aum = None
def compute(self, txns):
ltd = txns.pl.ltd_txn
grouper = pd.TimeGrouper(self.freq)
period_rets = pd.Series(np.nan, index=ltd.index)
self.txn_aum = txn_aum = pd.Series(np.nan, index=ltd.index)
sop = self.starting_aum
at = 0
for key, grp in ltd.groupby(grouper):
if grp.empty:
continue
eod = sop + grp
sod = eod.shift(1)
sod.iloc[0] = sop
period_rets.iloc[at:at + len(grp.index)] = eod / sod - 1.
txn_aum.iloc[at:at + len(grp.index)] = sod
at += len(grp.index)
sop = eod.iloc[-1]
crets = CumulativeRets(period_rets)
return Performance(crets)
class CumulativeRets(object):
def __init__(self, rets=None, ltd_rets=None):
if rets is None and ltd_rets is None:
raise ValueError('rets or ltd_rets must be specified')
if rets is None:
if ltd_rets.empty:
rets = ltd_rets
else:
rets = (1. + ltd_rets).pct_change()
rets.iloc[0] = ltd_rets.iloc[0]
if ltd_rets is None:
if rets.empty:
ltd_rets = rets
else:
ltd_rets = (1. + rets).cumprod() - 1.
self.rets = rets
self.ltd_rets = ltd_rets
pds_per_year = property(lambda self: periodicity(self.rets))
def asfreq(self, freq):
other_pds_per_year = periodicity(freq)
if self.pds_per_year < other_pds_per_year:
msg = 'Cannot downsample returns. Cannot convert from %s periods/year to %s'
raise ValueError(msg % (self.pds_per_year, other_pds_per_year))
if freq == 'B':
rets = (1. + self.rets).groupby(self.rets.index.date).apply(lambda s: s.prod()) - 1.
# If you do not do this, it will be an object index
rets.index = pd.DatetimeIndex([i for i in rets.index])
return CumulativeRets(rets)
else:
rets = (1. + self.rets).resample(freq, how='prod') - 1.
return CumulativeRets(rets)
# -----------------------------------------------------------
# Resampled data
dly = lazy_property(lambda self: self.asfreq('B'), 'dly')
weekly = lazy_property(lambda self: self.asfreq('W'), 'weekly')
monthly = lazy_property(lambda self: self.asfreq('M'), 'monthly')
quarterly = lazy_property(lambda self: self.asfreq('Q'), 'quarterly')
annual = lazy_property(lambda self: self.asfreq('A'), 'annual')
# -----------------------------------------------------------
# Basic Metrics
@lazy_property
def ltd_rets_ann(self):
return (1. + self.ltd_rets) ** (self.pds_per_year / pd.expanding_count(self.rets)) - 1.
cnt = property(lambda self: self.rets.notnull().astype(int).sum())
mean = lazy_property(lambda self: self.rets.mean(), 'avg')
mean_ann = lazy_property(lambda self: self.mean * self.pds_per_year, 'avg_ann')
ltd = lazy_property(lambda self: self.ltd_rets.iloc[-1], name='ltd')
ltd_ann = lazy_property(lambda self: self.ltd_rets_ann.iloc[-1], name='ltd_ann')
std = lazy_property(lambda self: self.rets.std(), 'std')
std_ann = lazy_property(lambda self: self.std * np.sqrt(self.pds_per_year), 'std_ann')
drawdown_info = lazy_property(lambda self: drawdown_info(self.rets), 'drawdown_info')
drawdowns = lazy_property(lambda self: drawdowns(self.rets), 'drawdowns')
maxdd = lazy_property(lambda self: self.drawdown_info['maxdd'].min(), 'maxdd')
dd_avg = lazy_property(lambda self: self.drawdown_info['maxdd'].mean(), 'dd_avg')
kurtosis = lazy_property(lambda self: self.rets.kurtosis(), 'kurtosis')
skew = lazy_property(lambda self: self.rets.skew(), 'skew')
sharpe_ann = lazy_property(lambda self: np.divide(self.ltd_ann, self.std_ann), 'sharpe_ann')
downside_deviation = lazy_property(lambda self: downside_deviation(self.rets, mar=0, full=0, ann=1),
'downside_deviation')
sortino = lazy_property(lambda self: self.ltd_ann / self.downside_deviation, 'sortino')
@lazy_property
def maxdd_dt(self):
ddinfo = self.drawdown_info
if ddinfo.empty:
return None
else:
return self.drawdown_info['maxdd dt'].loc[self.drawdown_info['maxdd'].idxmin()]
# -----------------------------------------------------------
# Expanding metrics
expanding_mean = property(lambda self: pd.expanding_mean(self.rets), 'expanding_avg')
expanding_mean_ann = property(lambda self: self.expanding_mean * self.pds_per_year, 'expanding_avg_ann')
expanding_std = lazy_property(lambda self: pd.expanding_std(self.rets), 'expanding_std')
expanding_std_ann = lazy_property(lambda self: self.expanding_std * np.sqrt(self.pds_per_year), 'expanding_std_ann')
expanding_sharpe_ann = property(lambda self: np.divide(self.ltd_rets_ann, self.expanding_std_ann))
# -----------------------------------------------------------
# Rolling metrics
rolling_mean = property(lambda self: pd.rolling_mean(self.rets), 'rolling_avg')
rolling_mean_ann = property(lambda self: self.rolling_mean * self.pds_per_year, 'rolling_avg_ann')
def rolling_ltd_rets(self, n):
return pd.rolling_apply(self.rets, n, lambda s: (1. + s).prod() - 1.)
def rolling_ltd_rets_ann(self, n):
tot = self.rolling_ltd_rets(n)
return tot ** (self.pds_per_year / n)
def rolling_std(self, n):
return pd.rolling_std(self.rets, n)
def rolling_std_ann(self, n):
return self.rolling_std(n) * np.sqrt(self.pds_per_year)
def rolling_sharpe_ann(self, n):
return self.rolling_ltd_rets_ann(n) / self.rolling_std_ann(n)
def iter_by_year(self):
"""Split the return objects by year and iterate"""
for key, grp in self.rets.groupby(lambda x: x.year):
yield key, CumulativeRets(rets=grp)
def truncate(self, before=None, after=None):
rets = self.rets.truncate(before=before, after=after)
return CumulativeRets(rets=rets)
@lazy_property
def summary(self):
d = OrderedDict()
d['ltd'] = self.ltd
d['ltd ann'] = self.ltd_ann
d['mean'] = self.mean
d['mean ann'] = self.mean_ann
d['std'] = self.std
d['std ann'] = self.std_ann
d['sharpe ann'] = self.sharpe_ann
d['sortino'] = self.sortino
d['maxdd'] = self.maxdd
d['maxdd dt'] = self.maxdd_dt
d['dd avg'] = self.dd_avg
d['cnt'] = self.cnt
return pd.Series(d, name=self.rets.index.freq or guess_freq(self.rets.index))
def _repr_html_(self):
from tia.util.fmt import new_dynamic_formatter
fmt = new_dynamic_formatter(method='row', precision=2, pcts=1, trunc_dot_zeros=1, parens=1)
df = self.summary.to_frame()
return fmt(df)._repr_html_()
def get_alpha_beta(self, bm_rets):
if isinstance(bm_rets, pd.Series):
bm = CumulativeRets(bm_rets)
elif isinstance(bm_rets, CumulativeRets):
bm = bm_rets
else:
raise ValueError('bm_rets must be series or CumulativeRetPerformace not %s' % (type(bm_rets)))
bm_freq = guess_freq(bm_rets)
if self.pds_per_year != bm.pds_per_year:
tgt = {'B': 'dly', 'W': 'weekly', 'M': 'monthly', 'Q': 'quarterly', 'A': 'annual'}.get(bm_freq, None)
if tgt is None:
raise ValueError('No mapping for handling benchmark with frequency: %s' % bm_freq)
tmp = getattr(self, tgt)
y = tmp.rets
y_ann = tmp.ltd_ann
else:
y = self.rets
y_ann = self.ltd_ann
x = bm.rets.truncate(y.index[0], y.index[-1])
x_ann = bm.ltd_ann
model = pd.ols(x=x, y=y)
beta = model.beta[0]
alpha = y_ann - beta * x_ann
return pd.Series({'alpha': alpha, 'beta': beta}, name=bm_freq)
def plot_ltd(self, ax=None, style='k', label='ltd', show_dd=1, title=True, legend=1):
ltd = self.ltd_rets
ax = ltd.plot(ax=ax, style=style, label=label)
if show_dd:
dd = self.drawdowns
dd.plot(style='r', label='drawdowns', alpha=.5, ax=ax)
ax.fill_between(dd.index, 0, dd.values, facecolor='red', alpha=.25)
fmt = PercentFormatter
AxesFormat().Y.percent().X.label("").apply(ax)
legend and ax.legend(loc='upper left', prop={'size': 12})
# show the actualy date and value
mdt, mdd = self.maxdd_dt, self.maxdd
bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.25)
try:
dtstr = '{0}'.format(mdt.to_period())
except:
# assume daily
dtstr = '{0}'.format(hasattr(mdt, 'date') and mdt.date() or mdt)
ax.text(mdt, dd[mdt], "{1} \n {0}".format(fmt(mdd), dtstr).strip(), ha="center", va="top", size=8,
bbox=bbox_props)
if title is True:
pf = new_percent_formatter(1, parens=False, trunc_dot_zeros=True)
ff = new_float_formatter(precision=1, parens=False, trunc_dot_zeros=True)
total = pf(self.ltd_ann)
vol = pf(self.std_ann)
sh = ff(self.sharpe_ann)
mdd = pf(self.maxdd)
title = 'ret$\mathregular{_{ann}}$ %s vol$\mathregular{_{ann}}$ %s sharpe %s maxdd %s' % (
total, vol, sh, mdd)
title and ax.set_title(title, fontdict=dict(fontsize=10, fontweight='bold'))
return ax
def plot_ret_on_dollar(self, title=None, show_maxdd=1, figsize=None, ax=None, append=0, label=None, **plot_args):
plot_return_on_dollar(self.rets, title=title, show_maxdd=show_maxdd, figsize=figsize, ax=ax, append=append,
label=label, **plot_args)
def plot_hist(self, ax=None, **histplot_kwargs):
pf = new_percent_formatter(precision=1, parens=False, trunc_dot_zeros=1)
ff = new_float_formatter(precision=1, parens=False, trunc_dot_zeros=1)
ax = self.rets.hist(ax=ax, **histplot_kwargs)
AxesFormat().X.percent(1).apply(ax)
m, s, sk, ku = pf(self.mean), pf(self.std), ff(self.skew), ff(self.kurtosis)
txt = '$\mathregular{\mu}$=%s $\mathregular{\sigma}$=%s skew=%s kurt=%s' % (m, s, sk, ku)
bbox = dict(facecolor='white', alpha=0.5)
ax.text(0, 1, txt, fontdict={'fontweight': 'bold'}, bbox=bbox, ha='left', va='top', transform=ax.transAxes)
return ax
def filter(self, mask, keep_ltd=0):
if isinstance(mask, pd.Series):
mask = mask.values
rets = self.rets.loc[mask]
ltd = None
if keep_ltd:
ltd = self.ltd_rets.loc[mask]
return CumulativeRets(rets=rets, ltd_rets=ltd)
class Performance(object):
def __init__(self, txn_rets):
if isinstance(txn_rets, pd.Series):
txn_rets = CumulativeRets(txn_rets)
self.txn_details = txn_rets
txn = property(lambda self: self.txn_details.rets)
ltd_txn = property(lambda self: self.txn_details.ltd_rets)
dly_details = lazy_property(lambda self: self.txn_details.dly, 'dly_details')
dly = property(lambda self: self.dly_details.rets)
ltd_dly = property(lambda self: self.dly_details.ltd_rets)
ltd_dly_ann = property(lambda self: self.dly_details.ltd_rets_ann)
weekly_details = lazy_property(lambda self: self.txn_details.weekly, 'weekly_details')
weekly = property(lambda self: self.weekly_details.rets)
ltd_weekly = property(lambda self: self.weekly_details.ltd_rets)
ltd_weekly_ann = property(lambda self: self.weekly_details.ltd_rets_ann)
monthly_details = lazy_property(lambda self: self.txn_details.monthly, 'monthly_details')
monthly = property(lambda self: self.monthly_details.rets)
ltd_monthly = property(lambda self: self.monthly_details.ltd_rets)
ltd_monthly_ann = property(lambda self: self.monthly_details.ltd_rets_ann)
quarterly_details = lazy_property(lambda self: self.txn_details.quarterly, 'quarterly_details')
quarterly = property(lambda self: self.quarterly_details.rets)
ltd_quarterly = property(lambda self: self.quarterly_details.ltd_rets)
ltd_quarterly_ann = property(lambda self: self.quarterly_details.ltd_rets_ann)
annual_details = lazy_property(lambda self: self.txn_details.annual, 'annual_details')
annual = property(lambda self: self.annual_details.rets)
ltd_annual = property(lambda self: self.annual_details.ltd_rets)
ltd_annual_ann = property(lambda self: self.annual_details.ltd_rets_ann)
def iter_by_year(self):
"""Split the return objects by year and iterate"""
for yr, details in self.txn_details.iter_by_year():
yield yr, Performance(details)
def filter(self, txn_mask):
details = self.txn_details.filter(txn_mask)
return Performance(details)
def truncate(self, before=None, after=None):
details = self.txn_details.truncate(before, after)
return Performance(details)
def report_by_year(self, summary_fct=None, years=None, ltd=1, prior_n_yrs=None, first_n_yrs=None, ranges=None,
bm_rets=None):
"""Summary the returns
:param summary_fct: function(Rets) and returns a dict or Series
:param years: int, array, boolean or None. If boolean and False, then show no years. If int or array
show only those years, else show all years if None
:param ltd: include live to date summary
:param prior_n_years: integer or list. Include summary for N years of return data prior to end date
:param first_n_years: integer or list. Include summary for N years of return data after start date
:param ranges: list of ranges. The range consists of a year start and year end
:param dm_dly_rets: daily return series for the benchmark for beta/alpha calcs
:return: DataFrame
"""
if years and np.isscalar(years):
years = [years]
if summary_fct is None:
def summary_fct(performance):
monthly = performance.monthly_details
dly = performance.dly_details
data = OrderedDict()
data['ltd ann'] = monthly.ltd_ann
data['mret avg'] = monthly.mean
data['mret std ann'] = monthly.std_ann
data['sharpe ann'] = monthly.sharpe_ann
data['sortino'] = monthly.sortino
data['maxdd'] = dly.maxdd
data['maxdd dt'] = dly.maxdd_dt
if bm_rets is not None:
abseries = performance.get_alpha_beta(bm_rets)
prefix = {'weekly': 'wkly ', 'monthly': 'mret '}.get(abseries.name, abseries.name)
data['{0}beta'.format(prefix)] = abseries['beta']
data['{0}alpha'.format(prefix)] = abseries['alpha']
data['avg dd'] = dly.dd_avg
data['best month'] = monthly.rets.max()
data['worst month'] = monthly.rets.min()
data['nmonths'] = monthly.cnt
return data
results = OrderedDict()
if years is not False:
for yr, robj in self.iter_by_year():
if years is None or yr in years:
results[yr] = summary_fct(robj)
# First n years
if first_n_yrs:
first_n_yrs = first_n_yrs if not np.isscalar(first_n_yrs) else [first_n_yrs]
for first in first_n_yrs:
after = '12/31/%s' % (self.dly.index[0].year + first)
firstN = self.truncate(after=after)
results['first {0}yrs'.format(first)] = summary_fct(firstN)
# Ranges
if ranges:
for range in ranges:
yr_start, yr_end = range
rng_rets = self.truncate('1/1/%s' % yr_start, '12/31/%s' % yr_end)
results['{0}-{1}'.format(yr_start, yr_end)] = summary_fct(rng_rets)
# Prior n years
if prior_n_yrs:
prior_n_yrs = prior_n_yrs if not np.isscalar(prior_n_yrs) else [prior_n_yrs]
for prior in prior_n_yrs:
before = '1/1/%s' % (self.dly.index[-1].year - prior)
priorN = self.truncate(before)
results['past {0}yrs'.format(prior)] = summary_fct(priorN)
# LTD
if ltd:
results['ltd'] = summary_fct(self)
return pd.DataFrame(results, index=list(results.values())[0].keys()).T
| 2.046875 | 2 |
babybuddy/settings/development.py | MCAdkins0/babybuddy | 0 | 12773352 | <gh_stars>0
from .base import *
# Quick-start development settings - unsuitable for production
# https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
SECRET_KEY = '<PASSWORD>'
DEBUG = True
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
#
# Comment out STATICFILES_STORAGE and uncomment DEBUG = False to test with
# production static files.
# DEBUG = False
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# Django Rest Framework
# https://www.django-rest-framework.org/
REST_FRAMEWORK['DEFAULT_RENDERER_CLASSES'] = (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
)
| 1.664063 | 2 |
examples/example_reading_LAMDA_files.py | gica3618/pythonradex | 5 | 12773353 | <reponame>gica3618/pythonradex<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 21 16:37:59 2017
@author: gianni
"""
from pythonradex import LAMDA_file
import numpy as np
data_filepath = './co.dat' #relative or absolute path to the LAMDA datafile
data = LAMDA_file.read(data_filepath)
levels = data['levels']
rad_transitions = data['radiative transitions']
coll_transitions = data['collisional transitions']
print('Third level statistical weight: {:g}'.format(levels[2].g))
print('Third level energy: {:g} J'.format(levels[2].E))
print('Third level number: {:d}'.format(levels[2].number)) #index is 0 based
print('\n')
print('There are {:d} radiative transitions'.format(len(rad_transitions)))
#choose some random radiative transition:
rad_trans = rad_transitions[10]
print('Upper level stat weight: {:g}'.format(rad_trans.up.g))
print('Lower level energy: {:g} J'.format(rad_trans.low.E))
print('frequency: {:g} Hz'.format(rad_trans.nu0))
print('Energy difference: {:g} J'.format(rad_trans.Delta_E))
print('Einstein A21: {:g}'.format(rad_trans.A21))
print('example excitation temperature:')
print(rad_trans.Tex(x1=0.3,x2=0.1))
#one can also give numpy arrays as input:
x1 = np.array((0.1,0.5,0.15))
x2 = np.array((0.05,0.1,0.07))
print(rad_trans.Tex(x1=x1,x2=x2))
print('\n')
print(coll_transitions.keys())
coll_transitions_ortho_H2 = coll_transitions['ortho-H2']
print('there are {:d} ortho-H2 coll transitions'.format(len(coll_transitions_ortho_H2)))
#choose random collisional transition:
coll_trans = coll_transitions['ortho-H2'][99]
print('number of upper level: {:d}'.format(coll_trans.up.number))
print('stat weight of lower level: {:g}'.format(coll_trans.low.g))
print('energy difference of transitions: {:g} J'.format(coll_trans.Delta_E))
print('transition name: {:s}'.format(coll_trans.name))
Tkin = 100.5
print('coll coeff K21 (at T={:g} K): {:g} m3/s'.format(Tkin,coll_trans.coeffs(Tkin)['K21']))
#one can also give numpy arrays as input:
Tkin = np.array((52.3,70.4,100.2,150.4))
print(coll_trans.coeffs(Tkin=Tkin)) | 2.5625 | 3 |
apps/tests.py | roycek7/django_application | 0 | 12773354 | import unittest
from django.test import Client
from django.urls import reverse
from rest_framework import status
client = Client()
class VerifyTestCases(unittest.TestCase):
def setUp(self):
self.valid_payload = {
'third_party_company_name': 'UD Saragih Tbk'
}
self.valid_payload_f = {
'third_party_company_name': 'PT Hutasoit Januar (Persero) Tbk'
}
self.not_found_payload = {
'third_party_company_name': 'FAANG'
}
self.invalid = {
'third_party_company_name': ''
}
def test_check_valid_true(self):
response = client.post(reverse('verify_company'), self.valid_payload)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'vendor_company': {'UD Saragih Tbk'}, 'user': {True}})
def test_check_valid_false(self):
response = client.post(reverse('verify_company'), self.valid_payload_f)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'vendor_company': {'PT Hutasoit Januar (Persero) Tbk'}, 'user': {False}})
def test_check_notfound(self):
response = client.post(reverse('verify_company'), self.not_found_payload)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data, "FAANG Vendor Does Not Exist!")
def test_check_invalid(self):
response = client.post(reverse('verify_company'), self.invalid)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, "Vendor Name is required!")
class TransactionTestCases(unittest.TestCase):
def setUp(self):
self.valid_payload = {
'company_name': '<NAME>',
'company_vendor': 'PT Putra',
'from_date': '2020-04-05 08:42:35',
'to_date': '2020-05-06 14:03:08'
}
self.not_found_payload = {
'company_name': '<NAME>',
'company_vendor': 'FAANG',
'from_date': '2020-04-05 08:42:35',
'to_date': '2020-05-06 14:03:08'
}
self.invalid = {
'company_name': '',
'company_vendor': '',
'from_date': '',
'to_date': ''
}
self.greater_date = {
'company_name': '<NAME>',
'company_vendor': 'PT Putra',
'from_date': '2020-06-05 08:42:35',
'to_date': '2020-05-06 14:03:08'
}
self.invalid_date = {
'company_name': '<NAME>',
'company_vendor': 'PT Putra',
'from_date': '-06-05 08:42:35',
'to_date': '2020-05-06 14:03:08'
}
def test_check_valid(self):
response = client.post(reverse('transaction_frequency'), self.valid_payload)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {"companies": "Perum Prasetya Permadi & PT Putra", "transactions": {47}})
def test_check_notfound(self):
response = client.post(reverse('transaction_frequency'), self.not_found_payload)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data, "FAANG Vendor Does Not Exist!")
def test_check_invalid(self):
response = client.post(reverse('transaction_frequency'), self.invalid)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, "Vendor Name is required!")
def test_check_greater_date(self):
response = client.post(reverse('transaction_frequency'), self.greater_date)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, "to_date, to_date must be bigger than from_date")
def test_check_invalid_date(self):
response = client.post(reverse('transaction_frequency'), self.invalid_date)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, "Date: invalid_format")
| 2.84375 | 3 |
ocnn/octree/python/ocnn/utils/off_utils.py | FrozenSilent/O-CNN | 6 | 12773355 | """ Functions to fix off file headers """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from multiprocessing import Pool
from ocnn.utils.file_utils import find_files
def clean_off_file(file_name):
""" Fixes header of OFF file
Args:
file_name (str): Name of file to fix
"""
file_cleaned = False
with open(file_name) as f_check:
file_str = f_check.read()
if file_str[0:3] != 'OFF':
raise AttributeError('Unexpected Header for {0}'.format(file_name))
elif file_str[0:4] != 'OFF\n':
new_str = file_str[0:3] + '\n' + file_str[3:]
with open(file_name, 'w') as f_rewrite:
f_rewrite.write(new_str)
file_cleaned = True
return file_cleaned
def clean_off_folder(input_folder):
""" Fixes headers of all OFF files in a given folder.
Args:
input_folder (str): Folder to search for off files
"""
executor = Pool()
file_list = find_files(input_folder, '*.[Oo][Ff][Ff]')
files_cleaned_list = executor.map(clean_off_file, file_list)
num_files_cleaned = 0
for file_cleaned in files_cleaned_list:
if file_cleaned:
num_files_cleaned += 1
print("{0} out of {1} files cleaned".format(num_files_cleaned, len(files_cleaned_list)))
| 3.125 | 3 |
py/gui/kivy/setUpGui.py | EdgardoCS/Arduino_tesis | 1 | 12773356 | from kivy.app import App
from kivy.lang import Builder
from kivy.properties import StringProperty, ObjectProperty
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.anchorlayout import AnchorLayout
Builder.load_string("""
<Boxes>:
AnchorLayout:
anchor_x: 'center'
anchor_y: 'top'
size_hint: 1, .9
BoxLayout:
orientation: 'vertical'
padding: 10
BoxLayout:
padding: 10
orientation: 'horizontal'
textinputtext1: txt1.text
textinputtext2: txt2.text
textinputtext3: txt3.text
Button:
size_hint: 0.5,0.5
on_press: root.print_txt()
text:'Set velocities'
TextInput:
font_size: 40
id: txt1
text: root.textinputtext1
Button:
on_press: root.print_txt()
text:'Set samples'
TextInput:
id: txt2
text: root.textinputtext2
Button:
on_press: root.print_txt()
text:'Set pause'
TextInput:
id: txt3
text: root.textinputtext3
BoxLayout:
orientation: 'horizontal'
Button:
text: "2"
Button:
text: "3"
Button:
text: "4"
BoxLayout:
orientation: 'horizontal'
Button:
text: "5"
Button:
text: "6"
BoxLayout:
orientation: 'horizontal'
Button:
text: "7"
Button:
text: "8"
Button:
text: "9"
Button:
text: "10"
AnchorLayout:
anchor_x: 'center'
anchor_y: 'bottom'
BoxLayout:
orientation: 'horizontal'
size_hint: 1, .1
Button:
text: 'Go to Screen 1'
on_press: _screen_manager.current = 'screen1'
Button:
text: 'Go to Screen 1'
on_press: _screen_manager.current = 'screen1'
Button:
text: 'Go to Screen 1'
on_press: _screen_manager.current = 'screen1'
Button:
text: 'Go to Screen 2'
on_press: _screen_manager.current = 'screen2'""")
class Boxes(FloatLayout):
textinputtext1 = StringProperty()
textinputtext2 = StringProperty()
textinputtext3 = StringProperty()
def __init__(self, **kwargs):
super(Boxes, self).__init__(**kwargs)
self.textinputtext1 = 'palim'
self.textinputtext2 = '5'
self.textinputtext3 = '20'
def print_txt(self):
print(self.textinputtext1)
print(self.textinputtext2)
print(self.textinputtext3)
class TestApp(App):
def build(self):
return Boxes()
if __name__ == '__main__':
TestApp().run()
| 2.8125 | 3 |
scripts/import_adminlevels.py | mercycorps/TolaWorkflow | 0 | 12773357 | from collections import OrderedDict
import csv
from workflow.models import Country, Province, District, AdminLevelThree, Village
"""
Import admin levels from a csv file. First column should be country, second
should be province, etc... Country should be created before you try to upload the file, this
script will not add counties that aren't in the database.
Requires module django-extensions
Syntax: sudo py manage.py runscript import_adminlevels --script-args /path/to/file.csv
"""
def run(*args):
counts = OrderedDict([
('provinces', 0), ('districts', 0), ('admin level 3s', 0),
('villages', 0), ])
skipped = {}
rows_with_blank_values = 0
with open(args[0], 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
try:
c = Country.objects.get(country=row[0])
if row[1]:
p, created = Province.objects.get_or_create(name=row[1], country=c)
if created:
counts['provinces'] += 1
else:
rows_with_blank_values += 1
continue
if row[2]:
d, created = District.objects.get_or_create(name=row[2], province=p)
if created:
counts['districts'] += 1
else:
rows_with_blank_values += 1
continue
if row[3]:
a3, created = AdminLevelThree.objects.get_or_create(name=row[3], district=d)
if created:
counts['admin level 3s'] += 1
else:
rows_with_blank_values += 1
continue
if row[4]:
v, created = Village.objects.get_or_create(name=row[4], admin_3=a3, district=d)
if created:
counts['villages'] += 1
else:
rows_with_blank_values += 1
continue
except IndexError:
pass
except Country.DoesNotExist:
try:
skipped[row[0]] += 1
except KeyError:
skipped[row[0]] = 1
for key in counts:
print "Inserted %s: %s" % (key, counts[key])
print ''
if len(skipped) > 0:
for country in sorted(skipped.keys()):
print "Couldn't find the country \"%s\" in the database. Skipped %s rows associated with %s." % (
country, skipped[country], country)
if rows_with_blank_values > 0:
print "\n%s rows had blank values" % rows_with_blank_values
| 2.8125 | 3 |
Languages/Python/python_study_4/page6/script.py | myarist/Progate | 5 | 12773358 | <reponame>myarist/Progate
class MenuItem:
# Definiskan method info
def info(self):
print('Tampilkan nama dan harga dari menu item')
menu_item1 = MenuItem()
menu_item1.name = '<NAME>'
menu_item1.price = 5
# Panggil method info dari menu_item1
menu_item1.info()
menu_item2 = MenuItem()
menu_item2.name = '<NAME>'
menu_item2.price = 4
# Panggil method info dari menu_item2
menu_item2.info()
| 2.984375 | 3 |
scripts/scrap_author/scrap_author/pipelines.py | Tarpelite/2019Spring_SoftwareAnalysis | 6 | 12773359 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exceptions import DropItem
from .spiders.scrap_authors import author,article
import json
"""
class ScrapAuthorPipeline(object):
def process_item(self, item, spider):
authorset=set()
if not isinstance(item,author):
return item
else:
if item['name'] not in authorset:
authorset.add(item['name'])
return item
else:
raise DropItem
class ScrapArticlePipeline(object):
def process_item(self,item,spider):
articleset=set()
if not isinstance(item,article):
return item
else:
if item['url'] not in articleset:
articleset.add(item['url'])
return item
else:
raise DropItem
"""
class ArticleJsonWriterPipeline(object):
def open_spider(self, spider):
self.file = open('article.jl', 'wb')
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
if not isinstance(item,article):
return item
else:
line = json.dumps(dict(item),ensure_ascii=False) + "\n"
self.file.write(line.encode('utf-8'))
return item
class AuthorJsonWriterPipeline(object):
def open_spider(self, spider):
self.file = open('author.jl', 'wb')
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
if not isinstance(item,author):
return item
else:
line = json.dumps(dict(item),ensure_ascii=False) + "\n"
self.file.write(line.encode('utf-8'))
return item | 2.671875 | 3 |
speedysvc/logger/time_series_data/TimeSeriesData.py | mcyph/shmrpc | 4 | 12773360 | <gh_stars>1-10
from struct import Struct
from time import time, sleep
from collections import deque, Counter
from abc import ABC, abstractmethod
from os.path import getsize, exists
from _thread import allocate_lock, start_new_thread
from psutil import AccessDenied
# OPEN ISSUE: It might be easier to keep this to stats obtained by psutil,
# so as to not require communication with the child processes?
# I'm not sure how useful that data is actually likely to be
SAMPLE_INTERVAL_SECS = 5
_sample_loop_started = [False]
_LTimeSeriesData = []
def _time_series_loop():
"""
Monitor time series data in a
single thread to minimize resources
"""
while True:
for tsd in _LTimeSeriesData[:]:
try:
tsd.sample_data_loop()
except:
import traceback
traceback.print_exc()
sleep(SAMPLE_INTERVAL_SECS)
class TimeSeriesData(ABC):
def __init__(self, LFormat,
fifo_cache_len=300,
sample_interval_secs=5,
start_collecting_immediately=False):
"""
A base class for binary-backed time series data.
The first item for each "entry" is the unix timestamp
from the epoch in seconds.
Note that subclasses define the metadata
(i.e. how to read the data/which fields there are)
and so if LFormat changes, then the file will not be readable.
For this reason, it's probably best to create new subclasses
rather than modify existing ones, if backwards compatibility
is desired.
:param path: where to put the binary data on-disk
:param LFormat: a tuple/list of ((struct format, property name), ...).
struct format is one of the values at
https://docs.python.org/3/library/struct.html,
and the property name is something unique/descriptive,
e.g. 'io_reads'
"""
self.LFormat = LFormat
self.fifo_cache_len = fifo_cache_len
self.sample_interval_secs = sample_interval_secs
self.lock = allocate_lock()
# Start off with a timestamp, down to the second
# (4 bytes as in Unix seconds since epoch)
LOut = ['!I']
for typecode, name in LFormat:
LOut.append(typecode)
self.struct = Struct(''.join(LOut))
# Get the number of items in the file (if it was previously written to)
self.deque = deque(maxlen=fifo_cache_len)
# Fill the rest of the FIFO cache with blank values
for i in range(fifo_cache_len - len(self.deque)):
# CHECK ME!!! ===========================================================================================
# This might throw off the averages!!
self.deque.appendleft({
property: 0
for _, property in [(None, 'timestamp')]+list(LFormat)
})
_LTimeSeriesData.append(self)
self.collecting_data = False
if start_collecting_immediately:
self.start_collecting()
def start_collecting(self):
"""
Start the collection of data
"""
if self.collecting_data:
# Can't start collection if already are
# Best to raise an exception explicitly here,
# as could indicate start_collecting_immediately
# was mistakenly set, etc
raise Exception("Collection of data already started")
if not _sample_loop_started[0]:
_sample_loop_started[0] = True
start_new_thread(_time_series_loop, ())
self.collecting_data = True
def stop_collecting(self):
"""
Pause the collection of data
"""
if not self.collecting_data:
raise Exception("Not currently collecting data")
self.collecting_data = False
#=========================================================================#
# Recording of Data #
#=========================================================================#
def sample_data_loop(self):
if self.collecting_data:
try:
DSample = self.sample_data()
if DSample: # WARNING!!! ======================================
self.__add_sample(**DSample)
except AccessDenied:
self.stop_collecting()
except:
import traceback
traceback.print_exc()
@abstractmethod
def sample_data(self):
"""
Must be implemented by subclasses
:return: a dictionary with all of the keys provided in LFormat
"""
pass
def __add_sample(self, **items):
"""
Add to both the limited in-memory samples, and write them to disk
:param items:
:return:
"""
for key in items:
assert key in [i[1] for i in self.LFormat], key
items = items.copy()
items['timestamp'] = int(time())
self.deque.appendleft(items)
#=========================================================================#
# Retrieval of Data From Disk #
#=========================================================================#
def __len__(self):
"""
Get total number of entries
:return: Get the total number of entries as an int
"""
return len(self.deque)
def __getitem__(self, item):
"""
Get a specific time series data item
:param item: the index (not the timestamp)
relative to the first item
:return: a dict
"""
return self.deque[item]
def __iter__(self):
with self.lock:
for i in self.iterate_forwards():
yield i
def iterate_forwards(self):
"""
Simply iterate thru using the __getitem__ method
from the first to the last entry.
"""
for x in range(len(self)-1, -1, -1):
yield self[x]
def iterate_backwards(self):
for x in range(len(self)):
yield self[x]
def select_range(self, from_time, to_time):
"""
Get statistics over a specified time range.
:param from_time: the number of seconds since
the epoch, as in unix timestamps
:param to_time: the number of seconds since
the epoch, as in unix timestamps
"""
# TODO: Use a bisect algorithm/store seek positions for certain
# timestamps to prevent having to go thru every single record!!! ===========================================================================
for DRecord in self:
if from_time <= DRecord['timestamp'] <= to_time:
yield DRecord
def get_average_over(self, from_time, to_time):
"""
Get an average of all recent values - a single value.
May not make sense for all kinds of data.
NOTE: This raises a DivideByZeroError if
there aren't any values collected!
:param from_time: the number of seconds since
the epoch, as in unix timestamps
:param to_time: the number of seconds since the epoch,
as in unix timestamps
:return: an integer
"""
DVals = Counter()
num_vals = 0
with self.lock:
for DRecord in self.deque:
if from_time <= DRecord['timestamp'] <= to_time:
for property in DRecord:
if property == 'timestamp':
continue
DVals[property] += DRecord[property]
num_vals += 1
return {
key: val / num_vals
for key, val
in DVals.items()
}
#=========================================================================#
# Short-term In-Memory Data Processing #
#=========================================================================#
def get_last_record(self):
"""
:return:
"""
return self.deque[0]
def get_recent_values(self, reversed=True):
"""
Get a list of the most recent values.
By default in reversed order, so as to allow
for graphs displayed from right to left.
:param reversed: True/False
:return: a list of the most recent values,
of length self.fifo_cache_len
"""
if reversed:
return list(self.deque)[::-1]
else:
return list(self.deque)[::]
def get_recent_average(self, property):
"""
Get an average of all recent values -
a single value.
May not make sense for all kinds of data.
NOTE: This raises a DivideByZeroError if
there aren't any values collected!
:param property: The name of the property,
as in LFormat
:return: an integer
"""
val = 0
num_vals = 0
with self.lock:
for DRecord in self.deque:
val += DRecord[property]
return val / num_vals
| 2.359375 | 2 |
secondary_market/mailsender.py | chinmaysb/fomoboard | 0 | 12773361 | from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
# set up the SMTP server
my_login = '<EMAIL>'
my_server = 'smtp.office365.com'
my_port = '587'
my_password = <PASSWORD>#'
def send_mail(to, subject, body):
msg = MIMEMultipart() # create a message
# setup the parameters of the message
msg['From'] = my_login
msg['To'] = to
msg['Subject'] = subject
# add in the message body
msg.attach(MIMEText(body, 'plain'))
s = smtplib.SMTP(my_server, my_port)
s.starttls()
s.login(my_login, my_password)
# send the message via the server set up earlier.
s.send_message(msg)
del msg
s.quit() | 2.84375 | 3 |
tests/test_headerfile.py | MoseleyBioinformaticsLab/chi2plookup | 0 | 12773362 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import subprocess
from scipy.stats import chi2
TESTFILE_TEMPLATE = """#include <iostream>
#include "Chi2PLookup.h"
int main() {{
Chi2PLookup Chi2PLookupTable;
double x = {0};
int df = {1};
double outvalue;
outvalue = Chi2PLookupTable.getPValue(x, df);
std::cout << outvalue << "\\n";
return 0;
}}
"""
def test_headerfile(template=TESTFILE_TEMPLATE, testvalue=1.1,
df=1, precision=10000, start_chi=25, headerfile="tests/Chi2PLookup.h",
srcfpath="tests/test.cpp", binfpath="tests/test.out"):
"""Test generated header file within cpp source file.
:param str template: Template file that contains main() function and imports header file.
:param testvalue: Chi value.
:param int df: Degree of freedom.
:param str srcfpath: Path where source file will be saved.
:param str binfpath: Path where binary file will be saved.
:return: None
:rtype: None
"""
command = "python -m chi2plookup generate --headerfile={} --df={} --precision={} --start_chi={}".format(headerfile, df, precision, start_chi)
subprocess.call(command, shell=True)
p_value = 1 - chi2.cdf(testvalue, df)
template = template.format(testvalue, df)
with open(srcfpath, "w") as outfile:
outfile.write(template)
subprocess.call("g++ -std=c++11 {} -o {}".format(srcfpath, binfpath), shell=True)
generated_p_value = subprocess.check_output("./{}".format(binfpath))
assert round(float(p_value), 6) == round(float(generated_p_value.strip()), 6)
| 2.4375 | 2 |
SOLID/Lab/03_LSP/ducks.py | vasetousa/OOP | 0 | 12773363 | from abc import abstractmethod, ABC
""" BEFORE """
# class Duck(ABC):
# pass
#
#
# class RubberDuck(Duck):
# @staticmethod
# def quack():
# return "Squeek"
#
# @staticmethod
# def walk():
# """Rubber duck can walk only if you move it"""
# raise Exception('I cannot walk by myself')
#
# @staticmethod
# def fly():
# """Rubber duck can fly only if you throw it"""
# raise Exception('I cannot fly by myself')
#
#
# class RobotDuck(Duck):
# HEIGHT = 50
#
# def __init__(self):
# self.height = 0
#
# @staticmethod
# def quack():
# return 'Robotic quacking'
#
# @staticmethod
# def walk():
# return 'Robotic walking'
#
# def fly(self):
# """can only fly to specific height but
# when it reaches it starts landing automatically"""
# if self.height == RobotDuck.HEIGHT:
# self.land()
# else:
# self.height += 1
#
# def land(self):
# self.height = 0
""" AFTER CORRECTION"""
class Duck(ABC):
@staticmethod
@abstractmethod
def quack():
return "Squeek"
class RubberDuck(Duck):
@staticmethod
def quack():
return "Squeek"
class RobotDuck(Duck):
HEIGHT = 50
def __init__(self):
self.height = 0
@staticmethod
def quack():
return 'Robotic quacking'
@staticmethod
def walk():
return 'Robotic walking'
def fly(self):
"""can only fly to specific height but
when it reaches it starts landing automatically"""
if self.height == RobotDuck.HEIGHT:
self.land()
else:
self.height += 1
def land(self):
self.height = 0
class LiveDuck(RobotDuck, Duck):
@staticmethod
def quack():
return 'quacking'
@staticmethod
def walk():
return 'walking'
def fly(self):
return 'flying'
@staticmethod
def eat():
return 'eating'
duck = RobotDuck()
print(duck.fly())
| 4.375 | 4 |
src/zoslogs/utils.py | Tam-Lin/zoslogs | 0 | 12773364 | import datetime
def julian_to_datetime(input_string: str):
"""
:param: input_string String to be converted
:rtype: datetime object
"""
if len(input_string) == 5:
date = datetime.datetime.strptime(input_string, '%y%j')
elif len(input_string) == 7:
date = datetime.datetime.strptime(input_string, '%Y%j')
else:
raise UtilityException("Incorrect parameter length passed to "
"julian_to_datetime")
return date
class UtilityException(Exception):
pass
| 3.640625 | 4 |
growingspheres/growingspheres.py | juliendelaunay35000/APE-Adapted_Post-Hoc_Explanations | 0 | 12773365 | <reponame>juliendelaunay35000/APE-Adapted_Post-Hoc_Explanations<filename>growingspheres/growingspheres.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .utils.gs_utils import generate_inside_ball, get_distances
from itertools import combinations
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.utils import check_random_state
class GrowingSpheres:
"""
class to fit the Original Growing Spheres algorithm
Inputs:
obs_to_interprete: instance whose prediction is to be interpreded
prediction_fn: prediction function, must return an integer label
caps: min max values of the explored area. Right now: if not None, the minimum and maximum values of the
"""
def __init__(self,
obs_to_interprete,
prediction_fn,
target_class=None,
caps=None,
n_in_layer=2000,
first_radius=0.1,
dicrease_radius=10,
sparse=True,
verbose=False,
continuous_features=None,
categorical_features=[],
categorical_values=[],
feature_variance=None,
farthest_distance_training_dataset=None,
probability_categorical_feature=None,
min_counterfactual_in_sphere=0,
max_features=None,
min_features=None):
"""
"""
self.obs_to_interprete = obs_to_interprete
self.prediction_fn = prediction_fn
self.y_obs = prediction_fn(obs_to_interprete.reshape(1, -1))
if target_class == None: #To change: works only for binary classification...
target_class = 1 - self.y_obs
self.target_class = target_class
self.caps = caps
self.n_in_layer = n_in_layer
self.first_radius = first_radius
self.dicrease_radius = dicrease_radius
self.sparse = sparse
# For experiments to compare with Growing Fields on dataset with categorical features
self.continuous_features = continuous_features
self.categorical_features = categorical_features
self.categorical_values = categorical_values
self.verbose = verbose
if int(self.y_obs) != self.y_obs:
raise ValueError("Prediction function should return a class (integer)")
def find_counterfactual(self):
"""
Finds the decision border then perform projections to make the explanation sparse.
"""
ennemies_, radius = self.exploration()
ennemies = sorted(ennemies_,
key= lambda x: pairwise_distances(self.obs_to_interprete.reshape(1, -1), x.reshape(1, -1)))
self.e_star = ennemies[0]
if self.sparse == True:
out = self.feature_selection(ennemies[0])
else:
out = ennemies[0]
print("I AM IN GS")
return out, ennemies, radius
def exploration(self):
"""
Exploration of the feature space to find the decision boundary. Generation of instances in growing hyperspherical layers.
"""
n_ennemies_ = 999
radius_ = self.first_radius
while n_ennemies_ > 0:
first_layer_ = self.ennemies_in_layer_((0, radius_), self.caps, self.n_in_layer)
n_ennemies_ = first_layer_.shape[0]
radius_ = radius_ / self.dicrease_radius
if self.verbose == True:
print("%d ennemies found in initial sphere. Zooming in..."%n_ennemies_)
else:
if self.verbose == True:
print("Exploring...")
iteration = 0
step_ = (self.dicrease_radius - 1) * radius_/5.0
while n_ennemies_ <= 0:
layer = self.ennemies_in_layer_((radius_, radius_ + step_), self.caps, self.n_in_layer)
n_ennemies_ = layer.shape[0]
radius_ = radius_ + step_
iteration += 1
if self.verbose == True:
print("Final number of iterations: ", iteration)
if self.verbose == True:
print("Final radius: ", (radius_ - step_, radius_))
print("Final number of ennemies: ", n_ennemies_)
return layer, radius_
def ennemies_in_layer_(self, segment, caps=None, n=1000):
"""
Basis for GS: generates a hypersphere layer, labels it with the blackbox and returns the instances that are predicted to belong to the target class.
"""
layer = self.generate_inside_spheres(self.obs_to_interprete, segment, n)
#cap here: not optimal
if caps != None:
cap_fn_ = lambda x: min(max(x, caps[0]), caps[1])
layer = np.vectorize(cap_fn_)(layer)
preds_ = self.prediction_fn(layer)
return layer[np.where(preds_ == self.target_class)]
def feature_selection(self, counterfactual):
"""
Projection step of the GS algorithm. Make projections to make (e* - obs_to_interprete) sparse.
Heuristic: sort the coordinates of np.abs(e* - obs_to_interprete) in ascending order and project as long as it does not change the predicted class
Inputs:
counterfactual: e*
"""
if self.verbose == True:
print("Feature selection...")
move_sorted = sorted(enumerate(abs(counterfactual - self.obs_to_interprete)), key=lambda x: x[1])
move_sorted = [x[0] for x in move_sorted if x[1] > 0.0]
out = counterfactual.copy()
reduced = 0
for k in move_sorted:
new_enn = out.copy()
new_enn[k] = self.obs_to_interprete[k]
if self.prediction_fn(new_enn.reshape(1, -1)) == self.target_class:
out[k] = new_enn[k]
reduced += 1
if self.verbose == True:
print("Reduced %d coordinates"%reduced)
return out
def feature_selection_all(self, counterfactual):
"""
Try all possible combinations of projections to make the explanation as sparse as possible.
Warning: really long!
"""
if self.verbose == True:
print("Grid search for projections...")
for k in range(self.obs_to_interprete.size):
print('==========', k, '==========')
for combo in combinations(range(self.obs_to_interprete.size), k):
out = counterfactual.copy()
new_enn = out.copy()
for v in combo:
new_enn[v] = self.obs_to_interprete[v]
if self.prediction_fn(new_enn.reshape(1, -1)) == self.target_class:
print('bim')
out = new_enn.copy()
reduced = k
if self.verbose == True:
print("Reduced %d coordinates"%reduced)
return out
def generate_inside_spheres(self, center, segment, n, feature_variance=None):
"""
Args:
"center" corresponds to the target instance to explain
Segment corresponds to the size of the hypersphere
n corresponds to the number of instances generated
feature_variance: Array of variance for each continuous feature
"""
def norm(v):
v= np.linalg.norm(v, ord=2, axis=1)
return v
#def perturb_continuous_features(continuous_features, n, segment, center, matrix_perturb_instances):
"""
Perturb each continuous features of the n instances around center in the area of a sphere of radius equals to segment
Return a matrix of n instances of d dimension perturbed based on the distribution of the dataset
"""
"""
d = len(continuous_features)
z = np.random.normal(0, 1, (n, d))
# Draw uniformaly instances between the value of segment[0]**d and segment[1]**d with d the number of dimension of the instance to explain
u = np.random.uniform(segment[0]**d, segment[1]**d, n)
r = u**(1/float(d))
z = np.array([a * b / c for a, b, c in zip(z, r, norm(z))])
to_add = np.zeros((n, len(center)))
for continuous in continuous_features:
to_add[:,continuous] = center[continuous]
z = z + to_add[:,continuous_features]
for nb, continuous in enumerate(continuous_features):
matrix_perturb_instances[:,continuous] = z[:,nb].ravel()
return matrix_perturb_instances
"""
# Just for clarity of display
d = center.shape[0]
"""z = np.zeros((n,d))
if feature_variance is not None:
for feature in range(d):
# Modify the generation of artificial instance depending on the variance of each feature
z[:,feature] = np.random.normal(0, feature_variance[feature], n)
else:
z = np.random.normal(0, 1, (n, d))"""
z = np.random.normal(0, 1, (n, d))
# Draw uniformaly instances between the value of segment[0]**d and segment[1]**d with d the number of dimension of the instance to explain
u = np.random.uniform(segment[0]**d, segment[1]**d, n)
r = u**(1/float(d))
z = np.array([a * b / c for a, b, c in zip(z, r, norm(z))])
z = z + center
return z
"""if self.categorical_features != []:
matrix_perturb_instances = np.zeros((n, len(center)))
for i in range(len(self.categorical_features)):
# add for each categorical feature these values to be considered as a probability
matrix_perturb_instances[:, self.categorical_features[i]] = center[self.categorical_features[i]]
matrix_perturb_instances = perturb_continuous_features(self.continuous_features, n, segment, center, matrix_perturb_instances)
return matrix_perturb_instances
else:
# Just for clarity of display
d = center.shape[0]
z = np.zeros((n,d))
if feature_variance is not None:
for feature in range(d):
# Modify the generation of artificial instance depending on the variance of each feature
z[:,feature] = np.random.normal(0, feature_variance[feature], n)
else:
z = np.random.normal(0, 1, (n, d))
# Draw uniformaly instances between the value of segment[0]**d and segment[1]**d with d the number of dimension of the instance to explain
u = np.random.uniform(segment[0]**d, segment[1]**d, n)
r = u**(1/float(d))
z = np.array([a * b / c for a, b, c in zip(z, r, norm(z))])
z = z + center
return z""" | 2.90625 | 3 |
scripts/gui.py | RT-EGG/leword_vocab | 0 | 12773366 | import string
import tkinter
from search_option import SearchOption
class GUI:
def __init__(self) -> None:
self.window = None
self.entries_known_character = []
self.entry_contain_characters = None
self.entry_remove_characters = None
self.check_remove_duplicate = None
self.list_box_words = None
self.next_character_entries = {}
self.value_remove_duplicate = None
self.value_list_box_word = None
self.search_execute_command = None
def mainloop(self):
self.window = tkinter.Tk()
self.window.title('leword_vocab')
self.window.geometry("600x400")
self.frame_options = tkinter.Frame(self.window, width=200)
self.frame_options.propagate(False)
self.frame_options.pack(side=tkinter.LEFT, fill=tkinter.Y)
self.button_search = tkinter.Button(self.frame_options, text='検索',
command=lambda: self.__search_button_click())
self.button_search.propagate(True)
self.button_search.grid(row=0, column=0, rowspan=1, columnspan=5, sticky=tkinter.E+tkinter.W, pady=2, padx=2)
self.label_known_characters = tkinter.Label(self.frame_options, text='判明済の文字')
self.label_known_characters.grid(row=1, column=0, rowspan=1, columnspan=1, sticky=tkinter.W, padx=2, pady=2)
self.entries_known_character = []
entry_font = ("", 48)
entry_validation_command = self.window.register(self.__validate_entry_character)
for i in range(5):
entry = tkinter.Entry(self.frame_options, name=f'character_entry_{i}',
width=2, font=entry_font,
validate='all',
validatecommand=(entry_validation_command, '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W'),
justify=tkinter.CENTER)
entry.grid(row=2, column=i, rowspan=1, columnspan=1, padx=2)
self.entries_known_character.append(entry)
if i > 0:
self.next_character_entries[str(self.entries_known_character[i-1])] = entry
self.label_contains_characters = tkinter.Label(self.frame_options, text='含まれる文字')
self.label_contains_characters.grid(row=3, column=0, rowspan=1, columnspan=1, sticky=tkinter.W, padx=2, pady=2)
self.entry_contain_characters = tkinter.Entry(self.frame_options, justify=tkinter.LEFT)
self.entry_contain_characters.grid(row=4, column=0, rowspan=1, columnspan=5, sticky=tkinter.E+tkinter.W, padx=2, pady=2)
self.label_remove_characters = tkinter.Label(self.frame_options, text='除外する文字')
self.label_remove_characters.grid(row=5, column=0, rowspan=1, columnspan=1, sticky=tkinter.W, padx=2, pady=2)
self.entry_remove_characters = tkinter.Entry(self.frame_options, justify=tkinter.LEFT)
self.entry_remove_characters.grid(row=6, column=0, rowspan=1, columnspan=5, sticky=tkinter.E+tkinter.W, padx=2, pady=2)
self.value_remove_duplicate = tkinter.BooleanVar()
self.value_remove_duplicate.set(True)
self.check_remove_duplicate = tkinter.Checkbutton(self.frame_options, variable=self.value_remove_duplicate, text='同じ文字は1回のみ')
self.check_remove_duplicate.grid(row=7, column=0, rowspan=1, columnspan=5, sticky=tkinter.W, padx=2, pady=2)
self.value_list_box_word = tkinter.StringVar()
list_box_words = tkinter.Listbox(self.window, listvariable=self.value_list_box_word, width=200)
list_box_words.propagate(True)
list_box_words.pack(side=tkinter.LEFT, expand=True, fill=tkinter.BOTH)
self.window.mainloop()
def get_search_option(self):
result = SearchOption()
result.search_pattern = ''
for i in range(5):
char = self.entries_known_character[i].get()
if char == '':
result.search_pattern = result.search_pattern + '.'
else:
result.search_pattern = result.search_pattern + char
result.contain_characters = self.entry_contain_characters.get()
result.remove_characters = self.entry_remove_characters.get()
result.remove_duplicate = self.value_remove_duplicate.get()
return result
def set_word_list(self, in_list):
self.value_list_box_word.set(in_list)
def __validate_entry_character(self, in_action, in_index, in_new_str, in_old_str, in_item, in_validate_options, in_mode, in_name):
if in_mode == 'key':
if not (in_item in string.ascii_lowercase):
if in_new_str != '': # delete character
return False
if in_name in self.next_character_entries.keys():
self.next_character_entries[in_name].focus_set()
return True
def __search_button_click(self):
if self.search_execute_command is not None:
self.search_execute_command()
| 3.3125 | 3 |
setup.py | mmgalushka/squids | 0 | 12773367 | <gh_stars>0
"""
SquiDS setup script.
"""
import setuptools
from deeptrace import __version__
def get_long_description():
"""Reads the long project description from the 'README.md' file."""
with open("README.md", "r", encoding="utf-8") as f:
return f.read()
setuptools.setup(
name="squids",
version=__version__,
author="<NAME>",
author_email="<EMAIL>",
description="The synthetic dataset generator for Computer Vision tasks.",
long_description=get_long_description(),
long_description_content_type="text/markdown",
url="https://github.com/mmgalushka/squids",
project_urls={
"Bug Tracker": "https://github.com/mmgalushka/squids/issues",
},
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
],
package_dir={"": "."},
packages=setuptools.find_packages(where=".", exclude=["tests"]),
python_requires=">=3.6",
)
| 1.484375 | 1 |
fermat_v2.py | Filipe-Barbos/Teste | 0 | 12773368 | # Rodar script online
# https://trinket.io/features/python3
a = int(input('Digite um número qualquer: '))
result = 0
# Verificando se p é primo
while True:
p = int(input('Digite um número primo: '))
cont = 0
for i in range(1, p + 1):
if p % i == 0:
cont += 1
if cont == 2:
break
else:
print('{} não é primo!'.format(p))
if (a % p) == 0:
result = ((a**p) - a) / p
print("Primeira Fórmula")
if (((a**p) - a) % p) == 0:
print('Resultado: {}'.format(result))
print('a e p são congruentes')
else:
print('Resultado: {}'.format(result))
print('a e p não são congruentes')
else:
p2 = p -1
result = (((a**p2)-1) / p)
print('Segunda Formula')
if (((a**p2)-1) % p) == 0:
print('Resultado: {}'.format(result))
print('a e p são congruentes')
else:
print('Resultado: {}'.format(result))
print('a e p não são congruentes')
| 3.984375 | 4 |
python/game/gui/tk-realtime_timer.py | rrbb014/rrbb-playground | 0 | 12773369 | import tkinter
TIMER = 0
FONT = "Times New Roman"
def count_up():
global TIMER
TIMER += 1
label["text"] = TIMER
root.after(1000, count_up) # 1초 후, count_up함수를 재실행
if __name__ == "__main__":
root = tkinter.Tk()
label = tkinter.Label(font=(FONT, 80))
label.pack()
root.after(1000, count_up)
root.mainloop()
| 3.859375 | 4 |
examples/count_data.py | thomasaarholt/xgboost-distribution | 17 | 12773370 | """Example of count data sampled from negative-binomial distribution
"""
import numpy as np
from matplotlib import pyplot as plt
from scipy import stats
from sklearn.model_selection import train_test_split
from xgboost_distribution import XGBDistribution
def generate_count_data(n_samples=10_000):
X = np.random.uniform(-2, 0, n_samples)
n = 66 * np.abs(np.cos(X))
p = 0.5 * np.abs(np.cos(X / 3))
y = np.random.negative_binomial(n=n, p=p, size=n_samples)
return X[..., np.newaxis], y
def predict_distribution(model, X, y):
"""Predict a distribution for a given X, and evaluate over y"""
distribution_func = {
"normal": getattr(stats, "norm").pdf,
"laplace": getattr(stats, "laplace").pdf,
"poisson": getattr(stats, "poisson").pmf,
"negative-binomial": getattr(stats, "nbinom").pmf,
}
preds = model.predict(X[..., np.newaxis])
dists = np.zeros(shape=(len(X), len(y)))
for ii, x in enumerate(X):
params = {field: param[ii] for (field, param) in zip(preds._fields, preds)}
dists[ii] = distribution_func[model.distribution](y, **params)
return dists
def create_distribution_heatmap(
model, x_range=(-2, 0), x_steps=100, y_range=(0, 100), normalize=True
):
xx = np.linspace(x_range[0], x_range[1], x_steps)
yy = np.linspace(y_range[0], y_range[1], y_range[1] - y_range[0] + 1)
ym, xm = np.meshgrid(xx, yy)
z = predict_distribution(model, xx, yy)
if normalize:
z = z / z.max(axis=0)
return ym, xm, z.transpose()
def main():
random_state = 10
np.random.seed(random_state)
X, y = generate_count_data(n_samples=10_000)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=random_state)
model = XGBDistribution(
distribution="negative-binomial", # try changing the distribution here
natural_gradient=True,
max_depth=3,
n_estimators=500,
)
model.fit(
X_train,
y_train,
eval_set=[(X_test, y_test)],
early_stopping_rounds=10,
verbose=False,
)
xm, ym, z = create_distribution_heatmap(model)
fig, ax = plt.subplots(figsize=(9, 6))
ax.pcolormesh(
xm, ym, z, cmap="Oranges", vmin=0, vmax=1.608, alpha=1.0, shading="auto"
)
ax.scatter(X_test, y_test, s=0.75, alpha=0.25, c="k", label="data")
plt.show()
if __name__ == "__main__":
main()
| 3.53125 | 4 |
scripts/extract_runtime_from_logs.py | MIDA-group/itkAlphaAMD | 1 | 12773371 | <reponame>MIDA-group/itkAlphaAMD
# Small script for extracting and parsing the runtimes from the logfiles of the registration tools
import numpy as np
def parse_log_runtime(path):
keyword = "Registration time elapsed:"
with open(path, 'r') as myfile:
data = myfile.read()
pos = data.find(keyword)
if pos < 0:
raise 'Keyword not found.'
else:
pos = pos + len(keyword)
while pos < len(data) and data[pos] == ' ':
pos = pos + 1
datatail = data[pos:]
return float(datatail)
def parse_runtimes(paths):
times = np.zeros(len(paths))
for (index, pth) in enumerate(paths):
times[index] = parse_log_runtime(pth)
return times
def parse_runtimes_for_experiment(path_prefix, path_postfix, N, metric, transformation_size, noise_level):
log_file = "register_affine_out.txt"
path = path_prefix + transformation_size + "/" + noise_level + "/" + metric + "/" + path_postfix
paths = [path + ("registration_%d/" % i) + log_file for i in xrange(1, N+1)]
times = parse_runtimes(paths)
return times
if __name__ == "__main__":
# Cilia
metrics = ["alpha_smd", "ssd", "ncc", "mi"]
transformation_sizes = ["all"]
noise_levels = ["large"]
cilia_prefix = "/home/johof680/work/itkAlphaCut-4j/cilia_random_6/"
print("--- CILIA ---")
for t in transformation_sizes:
for n in noise_levels:
for m in metrics:
print(t + ", " + n + ", " + m)
times = parse_runtimes_for_experiment(cilia_prefix, "", 1000, m, t, n)
print(times)
print("Mean: %.3f" % np.mean(times))
print("Std-dev: %.3f" % np.std(times))
# LPBA40
metrics = ["alpha_smd", "ssd", "ncc", "mi"]
transformation_sizes = ["all"]
noise_levels = ["large"]
lpba40_prefix = "/home/johof680/work/itkAlphaCut-4j/lpba40_random_5/"
print("--- LPBA40 ---")
for t in transformation_sizes:
for n in noise_levels:
for m in metrics:
print(t + ", " + n + ", " + m)
times = parse_runtimes_for_experiment(lpba40_prefix, "w_pyramid/", 200, m, t, n)
print(times)
print("Mean: %.3f" % np.mean(times))
print("Std-dev: %.3f" % np.std(times))
| 2.296875 | 2 |
topoflow/components/channels_kinematic_wave.py | mintproject/topoflow36 | 1 | 12773372 | """
This file defines a "kinematic wave" channel flow component and
related functions. It inherits from the channels "base class" in
"channels_base.py".
"""
#-----------------------------------------------------------------------
# Copyright (c) 2001-2019, <NAME>
#
# Sep 2014. New standard names and BMI updates and testing.
# Nov 2013. Converted TopoFlow to a Python package.
# Feb 2013. Adapted to use EMELI framework.
# Oct 2012. CSDMS Standard Names and BMI.
# May 2010. Changes to unit_test() and read_cfg_file().
# Jul 2009. Updates.
# May 2009. Updates.
# Jan 2009. Converted from IDL to Python with I2PY.
#
#-----------------------------------------------------------------------
#
# class channels_component
#
# get_component_name()
# get_attribute() # (10/26/11)
# get_input_var_names() # (defined in channels_base.py)
# get_output_var_names() # (defined in channels_base.py)
# get_var_name() # (defined in channels_base.py)
# get_var_units() # (defined in channels_base.py)
# ------------------------
# update_velocity()
#
#-----------------------------------------------------------------------
import numpy
from topoflow.components import channels_base
#-----------------------------------------------------------------------
class channels_component(channels_base.channels_component):
#-------------------------------------------------------------------
_att_map = {
'model_name': 'Channels_Kinematic_Wave',
'version': '3.1',
'author_name': '<NAME>',
'grid_type': 'uniform',
'time_step_type': 'fixed',
'step_method': 'explicit',
#------------------------------------------------------
'comp_name': 'ChannelsKinWave',
'model_family': 'TopoFlow',
'cfg_template_file': 'Channels_Kinematic_Wave.cfg.in',
'cfg_extension': '_channels_kinematic_wave.cfg',
'cmt_var_prefix': '/ChannelsKinWave/Input/Var/',
'gui_xml_file': '/home/csdms/cca/topoflow/3.1/src/share/cmt/gui/Channels_Kinematic_Wave.xml',
'dialog_title': 'Channels: Kinematic Wave Parameters',
'time_units': 'seconds' }
#-------------------------------------------------------------------
def get_component_name(self):
return 'TopoFlow_Channels_Kinematic_Wave'
# get_component_name()
#-------------------------------------------------------------------
def get_attribute(self, att_name):
#-----------------------------------------------------------
# This is done in channels_base.set_computed_input_vars()
#-----------------------------------------------------------
# self.KINEMATIC_WAVE = True
# self.DIFFUSIVE_WAVE = False
# self.DYNAMIC_WAVE = False
try:
return self._att_map[ att_name.lower() ]
except:
print('###################################################')
print(' ERROR: Could not find attribute: ' + att_name)
print('###################################################')
print(' ')
# get_attribute()
#-------------------------------------------------------------------
def update_velocity(self):
#---------------------------------------------------------
# Notes: Compute u from d and S_bed. (7/13/05 version)
# nval = Manning's n values (grid)
# z0_val = z0 roughness values (grid)
# width = channel bottom widths (grid)
# angle = channel bank angles (grid)
# Could use slopes in cp also, but S_bed has
# been modified from those values to impose a
# minimum slope that is nonzero.
# Rh = hydraulic radius (trapezoid here)
# S = S_bed for KINEMATIC_WAVE option.
# S = S_free for other options.
#---------------------------------------------------------
#------------------------
# Use Manning's formula
#------------------------
if (self.MANNING):
self.u = self.manning_formula()
#--------------------------------------
# Use the Logarithmic Law of the Wall
#--------------------------------------
if (self.LAW_OF_WALL):
self.u = self.law_of_the_wall()
#------------------------------------------
# Use a constant velocity (test: 5/18/15)
# See initialize().
#------------------------------------------
# if not(self.MANNING) and not(self.LAW_OF_WALL):
# self.u[:] =
# print '(umin, umax) =', self.u.min(), self.u.max()
# update_velocity()
#-------------------------------------------------------------------
| 2.21875 | 2 |
code_examples/compiling_references/compiling_ref1.py | thautwarm/ebnf-parser-generator- | 0 | 12773373 | # This file is automatically generated by EBNFParser.
from Ruikowa.ObjectRegex.Tokenizer import unique_literal_cache_pool, regex_matcher, char_matcher, str_matcher, Tokenizer
from Ruikowa.ObjectRegex.Node import AstParser, Ref, SeqParser, LiteralValueParser, LiteralNameParser, Undef
namespace = globals()
recur_searcher = set()
token_table = ((unique_literal_cache_pool["keyword"], str_matcher(('efgh', 'abcd'))),
(unique_literal_cache_pool["auto_const"], char_matcher(('c', 'b', 'a'))))
class UNameEnum:
# names
auto_const = unique_literal_cache_pool['auto_const']
keyword = unique_literal_cache_pool['keyword']
S = unique_literal_cache_pool['S']
# values
auto_const_c = unique_literal_cache_pool['c']
keyword_efgh = unique_literal_cache_pool['efgh']
auto_const_a = unique_literal_cache_pool['a']
auto_const_b = unique_literal_cache_pool['b']
keyword_abcd = unique_literal_cache_pool['abcd']
token_func = lambda _: Tokenizer.from_raw_strings(_, token_table, ({}, {}))
keyword = LiteralNameParser('keyword')
S = AstParser([SeqParser(['a', 'b', 'c'], at_least=0,at_most=Undef)],
name="S",
to_ignore=({}, {}))
S.compile(namespace, recur_searcher)
# add here
print (S.possibilities[0][0].name)
| 2.140625 | 2 |
UserCode/cdahl/sbc_run5_make_onelines.py | RunzZhang/SBCcode | 4 | 12773374 | <filename>UserCode/cdahl/sbc_run5_make_onelines.py
import numpy as np
import SBCcode as sbc
import os
import re
from SBCcode.DataHandling.WriteBinary import WriteBinaryNtupleFile as wb
import pdb
recondir = '/bluearc/storage/recon/devel/SBC-15/output'
dd = sbc.read_bin(os.path.join(recondir, 'PMTpulseAnalysis_all.bin'))
ev_float = np.float64(dd['runid'][:, 0] - 20161000) +\
np.float64(dd['runid'][:, 1]) * 1e-3 +\
np.float64(dd['ev']) * 1e-6
firsthit = dd['iPMThit'] < 2
lasthit = np.ones(firsthit.shape, dtype=np.bool)
lasthit[:-1] = firsthit[1:]
dd_oneline = {k: dd[k][lasthit] for k in dd.keys()}
wb(os.path.join(recondir, 'PMTpulseAnalysis_all_oneline.bin'), [dd_oneline],
rowdef=1, initialkeys=['runid', 'ev'], drop_first_dim=True)
| 1.851563 | 2 |
B2G/gecko/browser/locales/filter.py | wilebeast/FireFox-OS | 3 | 12773375 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
def test(mod, path, entity = None):
import re
# ignore anyhting but Firefox
if mod not in ("netwerk", "dom", "toolkit", "security/manager",
"browser", "extensions/reporter", "extensions/spellcheck",
"other-licenses/branding/firefox",
"browser/branding/official",
"services/sync"):
return False
if mod != "browser" and mod != "extensions/spellcheck":
# we only have exceptions for browser and extensions/spellcheck
return True
if not entity:
if mod == "extensions/spellcheck":
return False
# browser
return not (re.match(r"searchplugins\/.+\.xml", path) or
re.match(r"chrome\/help\/images\/[A-Za-z-_]+\.png", path))
if mod == "extensions/spellcheck":
# l10n ships en-US dictionary or something, do compare
return True
if path == "defines.inc":
return entity != "MOZ_LANGPACK_CONTRIBUTORS"
if path != "chrome/browser-region/region.properties":
# only region.properties exceptions remain, compare all others
return True
return not (re.match(r"browser\.search\.order\.[1-9]", entity) or
re.match(r"browser\.contentHandlers\.types\.[0-5]", entity) or
re.match(r"gecko\.handlerService\.schemes\.", entity) or
re.match(r"gecko\.handlerService\.defaultHandlersVersion", entity))
| 1.992188 | 2 |
spark_fhir_schemas/stu3/complex_types/address.py | icanbwell/SparkFhirSchemas | 2 | 12773376 | <reponame>icanbwell/SparkFhirSchemas<gh_stars>1-10
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class AddressSchema:
"""
An address expressed using postal conventions (as opposed to GPS or other
location definition formats). This data type may be used to convey addresses
for use in delivering mail as well as for visiting locations which might not
be valid for mail delivery. There are a variety of postal address formats
defined around the world.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
An address expressed using postal conventions (as opposed to GPS or other
location definition formats). This data type may be used to convey addresses
for use in delivering mail as well as for visiting locations which might not
be valid for mail delivery. There are a variety of postal address formats
defined around the world.
id: unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
use: The purpose of this address.
type: Distinguishes between physical addresses (those you can visit) and mailing
addresses (e.g. PO Boxes and care-of addresses). Most addresses are both.
text: A full text representation of the address.
line: This component contains the house number, apartment number, street name,
street direction, P.O. Box number, delivery hints, and similar address
information.
city: The name of the city, town, village or other community or delivery center.
district: The name of the administrative area (county).
state: Sub-unit of a country with limited sovereignty in a federally organized
country. A code may be used if codes are in common use (i.e. US 2 letter state
codes).
postalCode: A postal code designating a region defined by the postal service.
country: Country - a nation as commonly understood or generally accepted.
period: Time period when address was/is in use.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.period import PeriodSchema
if (
max_recursion_limit and nesting_list.count("Address") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["Address"]
schema = StructType(
[
# unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The purpose of this address.
StructField("use", StringType(), True),
# Distinguishes between physical addresses (those you can visit) and mailing
# addresses (e.g. PO Boxes and care-of addresses). Most addresses are both.
StructField("type", StringType(), True),
# A full text representation of the address.
StructField("text", StringType(), True),
# This component contains the house number, apartment number, street name,
# street direction, P.O. Box number, delivery hints, and similar address
# information.
StructField("line", ArrayType(StringType()), True),
# The name of the city, town, village or other community or delivery center.
StructField("city", StringType(), True),
# The name of the administrative area (county).
StructField("district", StringType(), True),
# Sub-unit of a country with limited sovereignty in a federally organized
# country. A code may be used if codes are in common use (i.e. US 2 letter state
# codes).
StructField("state", StringType(), True),
# A postal code designating a region defined by the postal service.
StructField("postalCode", StringType(), True),
# Country - a nation as commonly understood or generally accepted.
StructField("country", StringType(), True),
# Time period when address was/is in use.
StructField(
"period",
PeriodSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
| 2.484375 | 2 |
biothings-hub/files/nde-hub/hub/dataload/sources/dde/uploader.py | NIAID-Data-Ecosystem/nde-crawlers | 0 | 12773377 | <gh_stars>0
from hub.dataload.nde import NDESourceUploader
class DDEUploader(NDESourceUploader):
name = "dde"
| 1.34375 | 1 |
results/results/urls.py | gnufede/results | 0 | 12773378 | from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.contrib.auth.views import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name='index.html')),
url(r'^', include('goals.urls')),
url(r'^api-token-auth/', 'rest_framework.authtoken.views.obtain_auth_token'),
url(r'^api-token-register/', 'goals.views.create_auth'),
# Examples:
# url(r'^$', 'results.views.home', name='home'),
# url(r'^results/', include('results.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
| 1.796875 | 2 |
cpsc217/as2/linguistics.py | edwardchen123/UofC | 1 | 12773379 | <reponame>edwardchen123/UofC<filename>cpsc217/as2/linguistics.py<gh_stars>1-10
#CPSC 217 Assignment 2 - Computational Linguistics
#<NAME>
#10023875
import sys
file = sys.stdin.readlines()
#input sorting and handling
d = {}
linecount = 0
wordcount = 0
for line in file:
linecount += 1
for word in line.split(' '):
word = word.replace('\n','')
wordcount += 1
if word not in d:
d[word] = 1
else:
d[word] += 1
#print words in order of magnitude of occurence
n = wordcount
while n > 0:
for word in d:
if d[word] == n:
print word, d[word]
n -= 1
#prints file info
print
print linecount, 'sentences'
print wordcount, 'words total'
print len(d), 'unique words'
| 3.390625 | 3 |
language/python/Lib_urllib/urlencode.py | LIU2016/Demo | 1 | 12773380 | <filename>language/python/Lib_urllib/urlencode.py<gh_stars>1-10
'''
url encode decode
'''
from urllib.parse import urljoin,unquote,quote,parse_qs,parse_qsl
query="key=1000&value=00000"
result=parse_qs(query)
result1=parse_qsl(query)
print(result)
print(result1)
| 2.515625 | 3 |
analysis/legacy/plot_massComparison_ML.py | Fizzics/desCluster | 2 | 12773381 | import pylab as pyl
from astLib import astStats
from sklearn.metrics import median_absolute_error, mean_squared_error
import h5py as hdf
from matplotlib.ticker import AutoMinorLocator
def calc_err(pred, true):
return (pred - true)/true
golden_mean = (pyl.sqrt(5.)-1.0)/2.0
f = pyl.figure(figsize=(10,10*golden_mean))
ax1 = pyl.subplot2grid((3,4), (0,0), rowspan=2)
ax2 = pyl.subplot2grid((3,4), (0,1), rowspan=2, sharex=ax1)
ax3 = pyl.subplot2grid((3,4), (0,2), rowspan=2, sharex=ax1, sharey=ax2)
ax4 = pyl.subplot2grid((3,4), (0,3), rowspan=2, sharex=ax1, sharey=ax2)
# now for the bottom bits
ax1s = pyl.subplot2grid((3,4), (2,0))
ax2s = pyl.subplot2grid((3,4), (2,1), sharex=ax1s)
ax3s = pyl.subplot2grid((3,4), (2,2), sharex=ax1s, sharey=ax2s)
ax4s = pyl.subplot2grid((3,4), (2,3), sharex=ax1s, sharey=ax2s)
ax2.set_yticklabels([])
ax1.set_xticklabels([])
ax2s.set_yticklabels([])
# add minor ticks to the bottom
ax1s.yaxis.set_minor_locator(AutoMinorLocator())
ax2s.yaxis.set_minor_locator(AutoMinorLocator())
### Perfect ###
###############
#with hdf.File('./result_targetedPerfect_MLmasses.hdf5', 'r') as f:
with hdf.File('./targetedPerfect_MLmasses_realisticOnly.hdf5', 'r') as f:
dset = f[f.keys()[0]]
perfect = dset['M200c', 'MASS', 'ML_pred_1d', 'ML_pred_2d', 'ML_pred_3d']
# filter bad values
mask = (perfect['ML_pred_1d'] != 0)
perfect = perfect[mask]
### Targeted ###
################
with hdf.File('./targetedRealistic_MLmasses.hdf5', 'r') as f:
dset = f[f.keys()[0]]
target = dset['M200c', 'MASS', 'ML_pred_1d', 'ML_pred_2d', 'ML_pred_3d']
# filter bad values
mask = (target['ML_pred_1d'] != 0)
target = target[mask]
### Survey ###
##############
with hdf.File('./surveyCompleteRealistic_MLmasses.hdf5', 'r') as f:
dset = f[f.keys()[0]]
survey = dset['M200c', 'MASS', 'ML_pred_1d', 'ML_pred_2d', 'ML_pred_3d']
# filter bad values
mask = (survey['ML_pred_1d'] != 0)
survey = survey[mask]
# plot one to one lines
ax1.plot([12,15.5], [12,15.5], c='k', zorder=0)
ax2.plot([12,15.5], [12,15.5], c='k', zorder=0)
ax3.plot([12,15.5], [12,15.5], c='k', zorder=0)
ax4.plot([12,15.5], [12,15.5], c='k', zorder=0)
ax1s.axhline(0)
ax2s.axhline(0)
ax3s.axhline(0)
ax4s.axhline(0)
# now for the plotting
###################
#### Power Law ####
###################
for d, c, style, zo in zip([target, survey, perfect], ['#7A68A6', '#188487',
'#e24a33'], ['-', '--', '-.'], [1,2,0]):
print('power law')
y_ = astStats.runningStatistic(pyl.log10(d['M200c']),
pyl.log10(d['MASS']), pyl.percentile, binNumber=20, q=[16, 50, 84])
quants = pyl.array(y_[1])
ax1.plot(y_[0],quants[:,1], style, c=c, zorder=zo)
if not c == '#e24a33':
ax1.fill_between(y_[0], quants[:,2], quants[:,0], facecolor=c,
alpha=0.3, edgecolor=c)
err = calc_err(d['MASS'], d['M200c'])
y_ = astStats.runningStatistic(pyl.log10(d['M200c']), err,
pyl.percentile, binNumber=20, q=[16, 50, 84])
quants = pyl.array(y_[1])
ax1s.plot(y_[0],quants[:,1], style, c=c, zorder=zo)
if not c == '#e24a33':
ax1s.fill_between(y_[0], quants[:,2], quants[:,0], facecolor=c,
alpha=0.3, edgecolor=c)
print('MAE', median_absolute_error(pyl.log10(d['M200c']),
pyl.log10(d['MASS'])))
print('RMSE', pyl.sqrt(mean_squared_error(pyl.log10(d['M200c']),
pyl.log10(d['MASS']))))
############
#### 1d ####
############
print('1d')
y_ = astStats.runningStatistic(pyl.log10(d['M200c']), d['ML_pred_1d'],
pyl.percentile, binNumber=20, q=[16, 50, 84])
quants = pyl.array(y_[1])
ax2.plot(y_[0],quants[:,1], style, c=c, zorder=zo)
if not c == '#e24a33':
ax2.fill_between(y_[0], quants[:,2], quants[:,0], facecolor=c,
alpha=0.4, edgecolor=c)
err = calc_err(10**d['ML_pred_1d'], d['M200c'])
y_ = astStats.runningStatistic(pyl.log10(d['M200c']), err,
pyl.percentile, binNumber=20, q=[16, 50, 84])
quants = pyl.array(y_[1])
ax2s.plot(y_[0],quants[:,1], style, c=c, zorder=zo)
if not c == '#e24a33':
ax2s.fill_between(y_[0], quants[:,2], quants[:,0], facecolor=c,
alpha=0.4, edgecolor=c)
print('MAE', median_absolute_error(pyl.log10(d['M200c']), d['ML_pred_1d']))
print('RMSE', pyl.sqrt(mean_squared_error(pyl.log10(d['M200c']),
d['ML_pred_1d'])))
#############
#### 2d #####
#############
print('2d')
y_ = astStats.runningStatistic(pyl.log10(d['M200c']), d['ML_pred_2d'],
pyl.percentile, binNumber=20, q=[16, 50, 84])
quants = pyl.array(y_[1])
ax3.plot(y_[0],quants[:,1], style, c=c, zorder=zo)
if not c == '#e24a33':
ax3.fill_between(y_[0], quants[:,2], quants[:,0], facecolor=c,
alpha=0.4, edgecolor=c)
err = calc_err(10**d['ML_pred_2d'], d['M200c'])
y_ = astStats.runningStatistic(pyl.log10(d['M200c']), err,
pyl.percentile, binNumber=20, q=[16, 50, 84])
quants = pyl.array(y_[1])
ax3s.plot(y_[0],quants[:,1], style, c=c, zorder=zo)
if not c == '#e24a33':
ax3s.fill_between(y_[0], quants[:,2], quants[:,0], facecolor=c,
alpha=0.4, edgecolor=c)
print('MAE', median_absolute_error(pyl.log10(d['M200c']), d['ML_pred_2d']))
print('RMSE', pyl.sqrt(mean_squared_error(pyl.log10(d['M200c']),
d['ML_pred_2d'])))
##############
##### 3d #####
##############
print('3d')
y_ = astStats.runningStatistic(pyl.log10(d['M200c']), d['ML_pred_3d'],
pyl.percentile, binNumber=20, q=[16, 50, 84])
quants = pyl.array(y_[1])
ax4.plot(y_[0],quants[:,1], style, c=c, zorder=zo)
if not c == '#e24a33':
ax4.fill_between(y_[0], quants[:,2], quants[:,0], facecolor=c,
alpha=0.4, edgecolor=c)
err = calc_err(10**d['ML_pred_3d'], d['M200c'])
y_ = astStats.runningStatistic(pyl.log10(d['M200c']), err,
pyl.percentile, binNumber=20, q=[16, 50, 84])
quants = pyl.array(y_[1])
ax4s.plot(y_[0],quants[:,1], style, c=c, zorder=zo)
if not c == '#e24a33':
ax4s.fill_between(y_[0], quants[:,2], quants[:,0], facecolor=c,
alpha=0.4, edgecolor=c)
print('MAE', median_absolute_error(pyl.log10(d['M200c']), d['ML_pred_3d']))
print('RMSE', pyl.sqrt(mean_squared_error(pyl.log10(d['M200c']),
d['ML_pred_3d'])))
print '----'
### Add Legend ###
##################
line1 = pyl.Line2D([], [], ls='-', color='#7A68A6')
line2 = pyl.Line2D([], [], ls='--', color='#188487')
line3 = pyl.Line2D([], [], ls='-.', color='#e24a33')
ax1.legend((line3, line1, line2), ('Perfect', 'Targeted', 'Survey'), loc=2)
#### tweak ####
ax1.set_xticks([12,13,14,15])
ax2.set_xticks([12,13,14,15])
ax2s.set_xticks([12,13,14,15])
ax2s.set_ylim(-2,4)
ax1s.set_ylim(-2,4)
ax2s.set_yticks([-2,0,2])
ax1s.set_yticks([-2,0,2])
ax1.set_ylim(ax2.get_ylim())
ax1s.set_ylim(ax2s.get_ylim())
ax1.set_ylabel('Log $M_{pred}$')
ax1s.set_ylabel('$\epsilon$')
ax1s.set_xlabel('Log $M_{200c}$', fontsize=18)
ax2s.set_xlabel('Log $M_{200c}$', fontsize=18)
ax3s.set_xlabel('Log $M_{200c}$', fontsize=18)
ax4s.set_xlabel('Log $M_{200c}$', fontsize=18)
ax1.text(14, 12.25, 'Power Law', fontsize=18, horizontalalignment='center')
ax2.text(14, 12.25, '$ML_{\sigma}$', fontsize=18, horizontalalignment='center')
ax3.text(14, 12.25, '$ML_{\sigma, z}$', fontsize=18,
horizontalalignment='center')
ax4.text(14, 12.25, '$ML_{\sigma, z, Ngal}$', fontsize=18,
horizontalalignment='center')
| 2.21875 | 2 |
src/pyrin/task_queues/celery/cli/interface.py | wilsonGmn/pyrin | 0 | 12773382 | <reponame>wilsonGmn/pyrin
# -*- coding: utf-8 -*-
"""
celery cli interface module.
"""
from pyrin.cli.base import CLIHandlerBase
from pyrin.cli.params import CLIParamBase
from pyrin.core.globals import LIST_TYPES
class CeleryCLIParamBase(CLIParamBase):
"""
celery cli param base class.
all celery cli param classes must be subclassed from this.
"""
def _convert_result(self, value):
"""
converts the given value to required type.
celery command line arguments must be all strings.
:param list[object] | object value: value to be converted.
:rtype: list[object] | object
"""
if value is not None and not isinstance(value, str):
if isinstance(value, LIST_TYPES):
original_type = type(value)
converted_results = []
for item in value:
if item is not None and not isinstance(item, str):
converted_results.append(str(item))
else:
converted_results.append(item)
return original_type(converted_results)
else:
return str(value)
return value
class CeleryCLIHandlerBase(CLIHandlerBase):
"""
celery cli handler base class.
all celery cli handlers must be subclassed from this.
"""
def __init__(self, name):
"""
initializes an instance of CeleryCLIHandlerBase.
:param str name: the handler name that should be registered
with. this name must be the exact name that
this handler must emmit to cli.
"""
super().__init__(name)
def _get_common_cli_options(self):
"""
gets the list of common cli options.
:rtype: list
"""
return ['celery', '-A', 'cli:celery_app']
| 2.171875 | 2 |
Algorithms/tf2algos/base.py | familywei/RLs | 0 | 12773383 | import os
import numpy as np
import tensorflow as tf
from utils.recorder import RecorderTf2 as Recorder
class Base(tf.keras.Model):
def __init__(self, a_dim_or_list, action_type, base_dir):
super().__init__()
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
self.device = "/gpu:0"
tf.config.experimental.set_memory_growth(physical_devices[0], True)
else:
self.device = "/cpu:0"
tf.keras.backend.set_floatx('float64')
self.cp_dir, self.log_dir, self.excel_dir = [os.path.join(base_dir, i) for i in ['model', 'log', 'excel']]
self.action_type = action_type
self.a_counts = int(np.array(a_dim_or_list).prod())
self.global_step = tf.Variable(0, name="global_step", trainable=False, dtype=tf.int64) # in TF 2.x must be tf.int64, because function set_step need args to be tf.int64.
self.episode = 0
def get_init_episode(self):
"""
get the initial training step. use for continue train from last training step.
"""
if os.path.exists(os.path.join(self.cp_dir, 'checkpoint')):
return int(tf.train.latest_checkpoint(self.cp_dir).split('-')[-1])
else:
return 0
def generate_recorder(self, logger2file, model=None):
"""
create model/log/data dictionary and define writer to record training data.
"""
self.check_or_create(self.cp_dir, 'checkpoints')
self.check_or_create(self.log_dir, 'logs(summaries)')
self.check_or_create(self.excel_dir, 'excel')
self.recorder = Recorder(
cp_dir=self.cp_dir,
log_dir=self.log_dir,
excel_dir=self.excel_dir,
logger2file=logger2file,
model=model
)
def init_or_restore(self, base_dir):
"""
check whether chekpoint and model be within cp_dir, if in it, restore otherwise initialize randomly.
"""
cp_dir = os.path.join(base_dir, 'model')
if os.path.exists(os.path.join(cp_dir, 'checkpoint')):
try:
self.recorder.checkpoint.restore(self.recorder.saver.latest_checkpoint)
except:
self.recorder.logger.error('restore model from checkpoint FAILED.')
else:
self.recorder.logger.info('restore model from checkpoint SUCCUESS.')
else:
self.recorder.logger.info('initialize model SUCCUESS.')
def save_checkpoint(self, global_step):
"""
save the training model
"""
self.recorder.saver.save(checkpoint_number=global_step)
def writer_summary(self, global_step, **kargs):
"""
record the data used to show in the tensorboard
"""
tf.summary.experimental.set_step(global_step)
for i in [{'tag': 'MAIN/' + key, 'value': kargs[key]} for key in kargs]:
tf.summary.scalar(i['tag'], i['value'])
self.recorder.writer.flush()
def check_or_create(self, dicpath, name=''):
"""
check dictionary whether existing, if not then create it.
"""
if not os.path.exists(dicpath):
os.makedirs(dicpath)
print(f'create {name} directionary :', dicpath)
def close(self):
"""
end training, and export the training model
"""
pass
def get_global_step(self):
"""
get the current trianing step.
"""
return self.global_step
def set_global_step(self, num):
"""
set the start training step.
"""
self.global_step = num
def update_target_net_weights(self, tge, src, ployak=None):
if ployak is None:
tf.group([r.assign(v) for r, v in zip(tge, src)])
else:
tf.group([r.assign(self.ployak * v + (1 - self.ployak) * r) for r, v in zip(tge, src)])
| 2.484375 | 2 |
src/common_utils_data/config_table.py | Mi524/common_utils_data | 0 | 12773384 | <filename>src/common_utils_data/config_table.py<gh_stars>0
import gc
import re
import sys
import warnings
import os
import time
from datetime import datetime
import warnings
import numpy as np
import pandas as pd
import hashlib
from xlsxwriter import Workbook
from collections import defaultdict,Counter
from .os_functions import get_walk_files,get_walk_abs_files,\
check_require_files,check_create_new_folder,get_require_files,enter_exit
from .df_functions import normalize_multi_header,copy_seperate_header_columns,\
check_abnormal_dates, stack_list_column, df_fillna_str
from .excel_functions import write_pct_columns
warnings.filterwarnings('ignore')
warnings.simplefilter(action='ignore', category=FutureWarning)
from pandas.core.index import MultiIndex
class ConfigReader(object):
def __init__(self,config_file_dir, config_table_name, config_list,*args, **kwargs):
self.config_file_dir = config_file_dir
self.config_table_name = config_table_name
self.config_list = config_list
self.require_file_dir = kwargs.get('require_file_dir','.\\require_tables')
self.data_file_dir = kwargs.get('data_file_dir',r"..\\data_files")
def get_header_table(self,header_table_df):
#过滤全都是空的行
header_table_df = header_table_df.dropna(how='all',axis=0)
header_table_df = df_fillna_str(header_table_df)
header_table_df = normalize_multi_header(header_table_df)
return header_table_df
def get_complete_header_df(self, header_table_df):
#保留的标准表头数量 以第一列的序号为准
header_table_columns = header_table_df.columns
standard_column = header_table_df[header_table_columns[0]].fillna('').tolist()
standard_column = [x for x in standard_column if x != '']
for s in standard_column[::-1] :
if s == '':
standard_column.pop(-1)
else:
break
target_column_num = len(standard_column)
target_cn_columns = header_table_df[header_table_columns[2]][:target_column_num].tolist()
complete_header_df = pd.DataFrame(data= [],columns=target_cn_columns)
return complete_header_df,target_cn_columns
def get_config_tables(self, if_walk_path = True ):
require_file_dict = get_require_files(self.config_file_dir, self.config_table_name,if_walk_path=if_walk_path)
header_table_path = require_file_dict[self.config_table_name]
df_workbook = pd.ExcelFile(header_table_path)
sheet_property_list = df_workbook.book.sheets()
table_dict = { }
for sheet_property in sheet_property_list:
sheet = sheet_property.name
sheet_visibility = sheet_property.visibility
if sheet_visibility == 0 : #只读取可见的sheet
for config in self.config_list:
if config in sheet.lower().strip():
#需要特殊处理的合并表
if 'mapping' in config:
table = df_workbook.parse(sheet, header = [0, 1])
table = self.get_header_table(table)
complete_header_df, target_cn_columns = self.get_complete_header_df(table)
else:
table = df_workbook.parse(sheet, header = 0 )
if not table.empty:
table = df_fillna_str(table)
table_dict.update({sheet:table})
table_dict.update({ 'complete_header_df' :complete_header_df,
'target_cn_columns':target_cn_columns })
return table_dict
if __name__ == '__main__':
config_list = [ 'mapping',
'standardization',
'split',
'match',
'deduplication',
'fill&sort',
'filter',
'extraction']
config_list =[ 'mapping',
'time process',
'statistic groups',
'calculations',
'fill&sort']
table_reader = ConfigReader(config_file_dir= '.\\',config_list=config_list,config_table_name= 'config',)
table_dict = table_reader.get_config_tables(if_walk_path=False)
df = table_dict['time process']
print(df)
| 2.296875 | 2 |
stac_api/config.py | waseem-aidash/arturo-stac-api | 0 | 12773385 | """Application settings."""
import enum
from typing import Optional, Set
from pydantic import BaseSettings
# TODO: Move to stac-pydantic
class ApiExtensions(enum.Enum):
"""Enumeration of available stac api extensions.
Ref: https://github.com/radiantearth/stac-api-spec/tree/master/extensions
"""
context = "context"
fields = "fields"
query = "query"
sort = "sort"
transaction = "transaction"
class AddOns(enum.Enum):
"""Enumeration of available third party add ons."""
tiles = "tiles"
bulk_transaction = "bulk-transaction"
class ApiSettings(BaseSettings):
"""ApiSettings.
Defines api configuration, potentially through environment variables.
See https://pydantic-docs.helpmanual.io/usage/settings/.
Attributes:
environment: name of the environment (ex. dev/prod).
debug: toggles debug mode.
forbidden_fields: set of fields defined by STAC but not included in the database.
indexed_fields:
set of fields which are usually in `item.properties` but are indexed as distinct columns in
the database.
"""
environment: str
debug: bool = False
# Fields which are defined by STAC but not included in the database model
forbidden_fields: Set[str] = {"type"}
# Fields which are item properties but indexed as distinct fields in the database model
indexed_fields: Set[str] = {"datetime"}
class Config:
"""model config (https://pydantic-docs.helpmanual.io/usage/model_config/)."""
extra = "allow"
env_file = ".env"
class PostgresSettings(ApiSettings):
"""Postgres-specific API settings.
Attributes:
postgres_user: postgres username.
postgres_pass: postgres password.
postgres_host_reader: hostname for the reader connection.
postgres_host_writer: hostname for the writer connection.
postgres_port: database port.
postgres_dbname: database name.
"""
postgres_user: str
postgres_pass: str
postgres_host_reader: str
postgres_host_writer: str
postgres_port: str
postgres_dbname: str
@property
def reader_connection_string(self):
"""Create reader psql connection string."""
return f"postgresql://{self.postgres_user}:{self.postgres_pass}@{self.postgres_host_reader}:{self.postgres_port}/{self.postgres_dbname}"
@property
def writer_connection_string(self):
"""Create writer psql connection string."""
return f"postgresql://{self.postgres_user}:{self.postgres_pass}@{self.postgres_host_writer}:{self.postgres_port}/{self.postgres_dbname}"
settings: Optional[ApiSettings] = None
def inject_settings(base_settings: ApiSettings):
"""Inject settings to global scope.
Attributes:
base_settings: api settings.
"""
global settings
settings = base_settings
| 2.171875 | 2 |
examples/exampleApp16.py | dirk-attraktor/pyHtmlGui | 0 | 12773386 | <reponame>dirk-attraktor/pyHtmlGui
import os
import sys
import time
import subprocess
from pyhtmlgui import PyHtmlGui, PyHtmlView, Observable
class App(Observable):
pass
class AppView(PyHtmlView):
TEMPLATE_STR = '''
<p>i am a button calling a method of the python frontend object</p>
<button onclick="pyhtmlgui.call(this.get_time).then(function(e){alert(e);})">Click me</button>
'''
def get_time(self):
return time.time()
if __name__ == "__main__":
listen_host = "127.0.0.1"
listen_port = 8001
secret = "i_am_secret"
electron_exe = sys.argv[1]
gui = PyHtmlGui(
appInstance=App(),
appViewClass=AppView,
listen_host=listen_host,
listen_port=listen_port,
mode="electron",
template_dir="templates",
static_dir="static",
main_html="window.html",
shared_secret=secret, # must be the same in electron pyhtmlgui.json,
)
if "launch_from_within_electron" in sys.argv:
gui.start(show_frontend=False, block=True)
else:
# in a deployed app, set these value in package.json and launch electron.exe as your app so the all code below is unneccessary
args = sys.argv.copy()
args.append("launch_from_within_electron")
env = os.environ.copy()
env.update({
"PYHTMLGUI_HOST" : listen_host,
"PYHTMLGUI_PORT" : "%s" % listen_port,
"PYHTMLGUI_SECRET" : secret,
"PYHTMLGUI_CMD" : sys.executable,
"PYHTMLGUI_CMD_ARGS": ",".join(args),
})
subprocess.Popen([electron_exe, gui.electron_app_dir], env=env) # receive defaul app dir from gui and launch electron
| 2.4375 | 2 |
python/utilityFuncs.py | tianran/glimvec | 9 | 12773387 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import sys
import heapq
def readerLine(fn):
with open(fn, 'rb') as file:
while True:
yield next(file).decode('utf-8').rstrip('\r\n')
def show_top(k, scores, lst):
h = []
for i, s in enumerate(scores):
heapq.heappush(h, (s, -i))
if len(h) > k:
heapq.heappop(h)
num = len(h)
res = [heapq.heappop(h) for _ in six.moves.range(num)]
for s, ii in res[::-1]:
print(' ' + str(s) + '\t' + lst[-ii])
def split_wrt_brackets(str, sp):
blocks = []
part = []
count = 0
for x in str:
if count == 0 and x in sp:
blocks.append(''.join(part))
part = []
else:
part.append(x)
if x == '(':
count += 1
elif x == ')':
count -= 1
if count < 0:
print("Unmatched )", file=sys.stderr)
blocks.append(''.join(part))
return blocks
| 2.875 | 3 |
upman/schemas/reporter_schema.py | marcsello/upman | 0 | 12773388 | #!/usr/bin/env python3
from marshmallow import fields
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from model import Reporter
class ReporterSchema(SQLAlchemyAutoSchema):
name = fields.String(required=True)
key = fields.String(required=True)
class Meta:
dump_only = ['id', 'created', 'info_digest', 'last_seen']
load_only = ['key']
model = Reporter
include_relationships = True
load_instance = True
include_fk = False
| 2.21875 | 2 |
estacao/tests/modules/test_modules.py | fpcardoso/projeto-estacao-meteorologica | 0 | 12773389 | from unittest import main, TestCase
from modules.bmp280 import BMP280
bmp = BMP280()
bmp.start()
class TestModuleBMP280(TestCase):
def test_read_temperatura(self):
medida_temperatura = bmp.read('Temperatura')
assert (medida_temperatura >= 0) or (medida_temperatura <= 100)
def test_read_pressure(self):
medida_pressure = bmp.read('Pressure')
assert (medida_pressure >= 300) or (medida_pressure <= 3000)
def test_read_invalid_grandeza(self):
medida_umidade = bmp.read('Umidade')
self.assertFalse(medida_umidade, 'Leitura realizada mesmo sem suporte')
| 3.078125 | 3 |
plugins/trendmicro_apex/icon_trendmicro_apex/util/util.py | lukaszlaszuk/insightconnect-plugins | 46 | 12773390 | import base64
import jwt
import hashlib
import time
from datetime import timedelta
from datetime import datetime
DEFAULT_TASK_TYPE = "CMEF"
class TaskType:
@staticmethod
def value_of(task_type: str) -> int:
return {
"UNKNOWN": 1,
"INTERNAL": 2,
"CM": 3,
"CMEF": 4,
"OSF_COMMAND": 5,
"OSF_QUERY": 6,
"OSF_NOTIFY": 7,
"OSF_LOG": 8,
"MDR_ATTACK_DISCOVERY": 9,
"OSF_SYS_CALL": 10,
}.get(task_type)
def create_base64_checksum(http_method: str, raw_url: str, raw_header: str, request_body: str) -> str:
"""Create a base64 encoded hash string for an Apex JWT token"""
string_to_hash = http_method.upper() + "|" + raw_url.lower() + "|" + raw_header + "|" + request_body
base64_hash_string = base64.b64encode(hashlib.sha256(str.encode(string_to_hash)).digest()).decode("utf-8")
return base64_hash_string
def create_jwt_token(
application_id: str,
api_key: str,
http_method: str,
raw_url: str,
header: str,
request_body: str,
algorithm="HS256",
) -> str:
"""Generate a JWT token for an Apex HTTP request. Specific to a url destination and payload"""
issue_time = time.time()
payload = {
"appid": application_id,
"iat": issue_time,
"version": "V1",
"checksum": create_base64_checksum(http_method, raw_url, header, request_body),
}
token = jwt.encode(payload, api_key, algorithm).decode("utf-8")
return token
def get_expiration_utc_date_string(num_days=30):
if not isinstance(num_days, int) or num_days < 1:
num_days = 1
today = datetime.now()
# +5 hours for timezones, just a buffer
timedelta_days = timedelta(days=num_days, hours=5)
future = today + timedelta_days
return future.strftime("%Y-%m-%dT%H:%MU")
| 2.46875 | 2 |
pizzaapp/migrations/0016_usercart.py | vivekx01/pizzaapp-v2 | 1 | 12773391 | <gh_stars>1-10
# Generated by Django 3.2.3 on 2021-05-28 09:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pizzaapp', '0015_auto_20210528_1157'),
]
operations = [
migrations.CreateModel(
name='usercart',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=10)),
('item_name', models.CharField(max_length=200)),
('quantity', models.IntegerField(blank=True, null=True)),
('amount', models.IntegerField(blank=True, null=True)),
],
),
]
| 1.828125 | 2 |
plots/plot_lead.py | kmkolasinski/Bubel | 0 | 12773392 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import csv
from xml.dom import minidom
from matplotlib.collections import LineCollection
from numpy import linalg as LA
xmldoc = minidom.parse('lead.dat')
file = "lattice.dat"
xml_shape_type = xmldoc.getElementsByTagName('shape_type')
xml_shape_data = xmldoc.getElementsByTagName('shape_data')
xml_lead_vector = xmldoc.getElementsByTagName('lead_vector')
shape_type = xml_shape_type[0].childNodes[0].nodeValue
print "Shape type :",shape_type
shape_data = xml_shape_data[0].childNodes[0].nodeValue
f_shape_data = shape_data.split()
f_shape_data = [float(i) for i in f_shape_data]
print "Shape data :" ,f_shape_data
xml_lead_vector = xml_lead_vector[0].childNodes[0].nodeValue
f_lead_vector = xml_lead_vector.split()
f_lead_vector = [float(i) for i in f_lead_vector]
print "Shape vector:",f_lead_vector
plt.clf()
f = plt.figure(1)
ax = plt.subplot(111)
ax.set_aspect('equal')
ax.margins(0.1)
#shape_contour_x = [f_shape_data[0],f_shape_data[2],f_shape_data[2],f_shape_data[0],f_shape_data[0]]
#shape_contour_y = [f_shape_data[1],f_shape_data[1],f_shape_data[3],f_shape_data[3],f_shape_data[1]]
#ax.plot( shape_contour_x , shape_contour_y )
offset_x = [f_lead_vector[0]]*5
offset_y = [f_lead_vector[1]]*5
#ax.plot( map(add, shape_contour_x, offset_x) , map(add, shape_contour_y, offset_y) )
import matplotlib.patches as patches
#ax3.add_patch(Polygon([[0,0],[4,1.1],[6,2.5],[2,1.4]], closed=True,
#fill=False, hatch='/')
print "Plotting unit cell"
# rebuild ends using none to separate line segments
wlist = []
lines = []
lead_datas = xmldoc.getElementsByTagName('lead_data')
for ldatas in lead_datas:
ldata = ldatas.getElementsByTagName('data')
for i in range(len(ldata)):
fdata = ldata[i].childNodes[0].nodeValue.split()
fdata = fdata[0:7]
fdata = [float(j) for j in fdata]
lines.append([ (fdata[0],fdata[1]) , (fdata[3],fdata[4]) ])
wlist.extend([fdata[6]])
if(np.size(wlist) > 0):
lc = LineCollection(lines, linewidths=wlist,colors='gray',lw=2.0)
ax.add_collection(lc)
print "Plotting next unit cells"
# rebuild ends using none to separate line segments
wlist = []
lines = []
lead_datas = xmldoc.getElementsByTagName('next_cell_lead_data')
for ldatas in lead_datas:
ldata = ldatas.getElementsByTagName('data')
for i in range(len(ldata)):
fdata = ldata[i].childNodes[0].nodeValue.split()
fdata = fdata[0:7]
fdata = [float(j) for j in fdata]
lines.append([ (fdata[0],fdata[1]) , (fdata[3],fdata[4]) ])
wlist.extend([fdata[6]])
if(np.size(wlist) > 0):
lc = LineCollection(lines, linewidths=wlist,colors='green',lw=2.0)
ax.add_collection(lc)
print "Plotting coupling between units cells"
# rebuild ends using none to separate line segments
wlist = []
lines = []
lead_datas = xmldoc.getElementsByTagName('lead_coupling')
for ldatas in lead_datas:
ldata = ldatas.getElementsByTagName('data')
for i in range(len(ldata)):
fdata = ldata[i].childNodes[0].nodeValue.split()
fdata = fdata[0:7]
fdata = [float(j) for j in fdata]
lines.append([ (fdata[0],fdata[1]) , (fdata[3],fdata[4]) ])
wlist.extend([fdata[6]])
if(np.size(wlist) > 0):
lc = LineCollection(lines, linewidths=wlist,colors='red',lw=2.0)
ax.add_collection(lc)
print "Plotting lattice"
# rebuild ends using none to separate line segments
wlist = []
points = []
lead_datas = xmldoc.getElementsByTagName('nearest_atoms')
for ldatas in lead_datas:
ldata = ldatas.getElementsByTagName('data')
for i in range(len(ldata)):
fdata = ldata[i].childNodes[0].nodeValue.split()
fdata = fdata[0:4]
fdata = [float(j) for j in fdata]
points.append([ fdata[0],fdata[1]])
wlist.extend([fdata[3]])
if(np.size(wlist) > 0):
points = np.array(points)
wlist = np.array(wlist)
ax.scatter(points[:,0],points[:,1], cmap='PuBu', c=wlist , s=10 , edgecolors='k' , zorder=2 )
if(shape_type == "SHAPE_RECTANGLE_XY"):
ax.add_patch(
patches.Rectangle(
(f_shape_data[0], f_shape_data[1]),
f_shape_data[2] - f_shape_data[0],
f_shape_data[3] - f_shape_data[1],
alpha=0.1,
color='gray'
)
)
ax.add_patch(
patches.Rectangle(
(f_shape_data[0]+f_lead_vector[0], f_shape_data[1]+f_lead_vector[1]),
f_shape_data[2] - f_shape_data[0],
f_shape_data[3] - f_shape_data[1],
alpha=0.1,
color='g'
)
)
if(shape_type == "SHAPE_CONVEX_QUAD_XY"):
coords = np.array(f_shape_data).reshape(4,2)
offset = np.array([[f_lead_vector[0],f_lead_vector[1]]]*4)
ax.add_patch(
patches.Polygon(coords,
alpha=0.2,
color='gray'
)
)
ax.add_patch(
patches.Polygon(coords+offset,
alpha=0.2,
color='green'
)
)
if(shape_type == "SHAPE_RANGE_3D"):
coords = np.array(f_shape_data).reshape(2,3)
base = coords[0][0:2]
normal = coords[1][0:2]
tangent = [normal[1],-normal[0]]
tangent = np.array(tangent)
tangent = tangent/LA.norm(tangent)
normal = np.array(normal)
base = np.array(base)
offset = np.array([[f_lead_vector[0],f_lead_vector[1]]]*4)
scale = 100
coords = [ base - scale*tangent , base - scale*tangent + normal , base + scale*tangent + normal , base + scale*tangent]
ax.add_patch(
patches.Polygon(coords,
alpha=0.2,
color='gray'
)
)
ax.add_patch(
patches.Polygon(coords+offset,
alpha=0.2,
color='green'
)
)
ax.set_xlim([min(points[:,0]),max(points[:,0])])
ax.set_ylim([min(points[:,1]),max(points[:,1])])
ax.margins(0.2)
plt.savefig("lead.pdf") | 2.6875 | 3 |
discogspy/core/rq_database.py | cpow-89/discogspy | 0 | 12773393 | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_core.rq_database.ipynb (unless otherwise specified).
__all__ = ['get_release', 'get_user_release_rating', 'update_user_release_rating', 'delete_user_release_rating',
'get_community_release_rating', 'get_master_release', 'get_releases_related_to_master_release', 'get_artist',
'get_artist_releases', 'get_label', 'get_label_releases']
# Cell
import requests
from typing import Union
from . import *
# Cell
def get_release(user: Union[UserWithoutAuthentication,
UserWithUserTokenBasedAuthentication],
release_id: int,
curr_abbr: Union[CurrAbbr, None] = None
) -> requests.models.Response:
"""
Get information about a particular release from the Discogs database.
No user Authentication needed.
Parameters:
user: user object (required)
release_id : number (required)
-> The Release ID.
curr_abbr: string (optional)
-> Currency for marketplace data. Defaults to the authenticated users currency.
"""
url = f"{RELEASES_URL}/{release_id}"
headers = user.headers
params = user.params
if curr_abbr:
params["curr_abbr"] = curr_abbr.value
return requests.get(url, headers=headers, params=params)
# Cell
def get_user_release_rating(user: Union[UserWithoutAuthentication,
UserWithUserTokenBasedAuthentication],
release_id: int,
username: str
) -> requests.models.Response:
"""
Get the rating of a release made by the given user.
No user Authentication needed.
Parameters:
user: user object (required)
release_id : number (required)
-> The Release ID.
username: string (required)
-> The username of the rating you are trying to request.
"""
url = f"{RELEASES_URL}/{release_id}/rating/{username}"
headers = user.headers
params = user.params
return requests.get(url, headers=headers, params=params)
# Cell
def update_user_release_rating(user: UserWithUserTokenBasedAuthentication,
release_id: int,
username: str,
rating: int
) -> requests.models.Response:
"""
Update the rating of a release made by the given user.
If there is no rating, it will create one.
User Authentication needed.
Parameters:
user: user object (required)
release_id : number (required)
-> The Release ID.
username: string (required)
-> The username of the rating you are trying to request.
rating: int (required)
-> The new rating value. Must be a value between 1 and 5.
"""
url = f"{RELEASES_URL}/{release_id}/rating/{username}"
headers = user.headers
params = user.params
rating = min(max(0, rating), 5)
data = {"rating": rating}
return requests.put(url, headers=headers, params=params, json=data)
# Cell
def delete_user_release_rating(user: UserWithUserTokenBasedAuthentication,
release_id: int,
username: str
) -> requests.models.Response:
"""
Delete the rating of a release made by the given user.
User Authentication needed.
Parameters:
user: user object (required)
release_id : number (required)
-> The Release ID.
username: string (required)
-> The username of the rating you are trying to delete.
"""
url = f"{RELEASES_URL}/{release_id}/rating/{username}"
headers = user.headers
params = user.params
return requests.delete(url, headers=headers, params=params)
# Cell
def get_community_release_rating(user: Union[UserWithoutAuthentication,
UserWithUserTokenBasedAuthentication],
release_id: int
) -> requests.models.Response:
"""
Get the rating of a release made by the community.
A community release rating includes the average rating and
the total number of user ratings for a given release.
This function doesn't work for master releases!
No user Authentication needed.
Parameters:
user: user object (required)
release_id : number (required)
-> The Release ID.
"""
url = f"{RELEASES_URL}/{release_id}/rating"
headers = user.headers
params = user.params
return requests.get(url, headers=headers, params=params)
# Cell
def get_master_release(user: Union[UserWithoutAuthentication,
UserWithUserTokenBasedAuthentication],
master_id: int
) -> requests.models.Response:
"""
Get information to a particular master release from Discogs database.
No user Authentication needed.
Parameters:
user: user object (required)
master_id : number (required)
-> The Master ID.
"""
url = f"{MASTERS_URL}/{master_id}"
headers = user.headers
params = user.params
return requests.get(url, headers=headers, params=params)
# Cell
def get_releases_related_to_master_release(user: Union[UserWithoutAuthentication,
UserWithUserTokenBasedAuthentication],
master_id: int,
page: Union[int, None] = None,
per_page: Union[int, None] = None,
release_format: Union[str, None] = None,
label: Union[str, None] = None,
released: Union[str, None] = None,
country: Union[str, None] = None,
sort: Union[SortOptionsMaster, None] = None,
sort_order: Union[SortOrder, None] = None
) -> requests.models.Response:
"""
Get a list of all Releases that are versions of the given master release.
No user Authentication needed.
Parameters:
user: user object (required)
master_id : number (required)
-> The Master ID.
page: number (optional)
-> The page you want to request.
per_page: number (optional)
-> The number of items per page.
release_format: string (optional)
-> The format to filter.
label: string (optional)
-> The label to filter.
released: string (optional)
-> The release year to filter.
country: string (optional)
-> The country to filter.
sort: string (optional)
-> Sort items by this field.
sort_order: string (optional)
-> Sort items in a particular order (one of asc, desc)
"""
url = f"{MASTERS_URL}/{master_id}/versions"
headers = user.headers
params = user.params
if page:
params["page"] = max(1, page)
if per_page:
params["per_page"] = max(1, per_page)
if release_format:
params["format"] = release_format
if label:
params["label"] = label
if released:
params["released"] = released
if country:
params["country"] = country
if sort:
params["sort"] = sort.value
if sort_order:
params["sort_order"] = sort_order.value
return requests.get(url, headers=headers, params=params)
# Cell
def get_artist(user: Union[UserWithoutAuthentication,
UserWithUserTokenBasedAuthentication],
artist_id: int
) -> requests.models.Response:
"""
Get information about an artist.
No user Authentication needed.
Parameters:
user: user object (required)
artist_id : number (required)
-> The Artist ID.
"""
url = f"{ARTIST_URL}/{artist_id}"
headers = user.headers
params = user.params
return requests.get(url, headers=headers, params=params)
# Cell
def get_artist_releases(user: Union[UserWithoutAuthentication,
UserWithUserTokenBasedAuthentication],
artist_id: int,
page: Union[int, None] = None,
per_page: Union[int, None] = None,
sort: Union[SortOptionsArtist, None] = None,
sort_order: Union[SortOrder, None] = None
) -> requests.models.Response:
"""
Get a list of releases and masters associated with the given artist.
No user Authentication needed.
Parameters:
user: user object (required)
artist_id : number (required)
-> The Artist ID.
page: number (optional)
-> The page you want to request.
per_page: number (optional)
-> The number of items per page.
sort: string (optional)
-> Sort items by this field.
sort_order: string (optional)
-> Sort items in a particular order (one of asc, desc)
"""
url = f"{ARTIST_URL}/{artist_id}/releases"
headers = user.headers
params = user.params
if page:
params["page"] = max(1, page)
if per_page:
params["per_page"] = max(1, per_page)
if sort:
params["sort"] = sort.value
if sort_order:
params["sort_order"] = sort_order.value
return requests.get(url, headers=headers, params=params)
# Cell
def get_label(user: Union[UserWithoutAuthentication,
UserWithUserTokenBasedAuthentication],
label_id: int
) -> requests.models.Response:
"""
Get information about a label.
No user Authentication needed.
Parameters:
user: user object (required)
label_id : number (required)
-> The Label ID.
"""
url = f"{LABEL_URL}/{label_id}"
headers = user.headers
params = user.params
return requests.get(url, headers=headers, params=params)
# Cell
def get_label_releases(user: Union[UserWithoutAuthentication,
UserWithUserTokenBasedAuthentication],
label_id: int,
page: Union[int, None] = None,
per_page: Union[int, None] = None,
sort: Union[SortOptionsLabel, None] = None,
sort_order: Union[SortOrder, None] = None
) -> requests.models.Response:
"""
Get a list of releases and masters associated with the given label.
No user Authentication needed.
Parameters:
user: user object (required)
label_id : number (required)
-> The Label ID.
page: number (optional)
-> The page you want to request.
per_page: number (optional)
-> The number of items per page.
sort: string (optional)
-> Sort items by this field.
sort_order: string (optional)
-> Sort items in a particular order (one of asc, desc)
"""
url = f"{LABEL_URL}/{label_id}/releases"
headers = user.headers
params = user.params
if page:
params["page"] = max(1, page)
if per_page:
params["per_page"] = max(1, per_page)
if sort:
params["sort"] = sort.value
if sort_order:
params["sort_order"] = sort_order.value
return requests.get(url, headers=headers, params=params) | 2.53125 | 3 |
01-algorithmic-design-and-techniques/week-4/inversions.py | andrewnachtigal/UCSD-Algorithms | 0 | 12773394 | <reponame>andrewnachtigal/UCSD-Algorithms
#!/user/bin/python
'''Number of Inversions
'''
| 1.320313 | 1 |
config.py | Go2SleepSoshi/ct_test | 0 | 12773395 | <reponame>Go2SleepSoshi/ct_test<gh_stars>0
token = ''
db_name = 'testDB.db' | 0.9375 | 1 |
src/automotive/application/testcase/reader/standard_excel_reader_sample.py | philosophy912/automotive | 0 | 12773396 | # -*- coding:utf-8 -*-
# --------------------------------------------------------
# Copyright (C), 2016-2021, lizhe, All rights reserved
# --------------------------------------------------------
# @Name: standard_excel_reader.py
# @Author: lizhe
# @Created: 2021/7/3 - 22:14
# --------------------------------------------------------
import os
from typing import Dict, List
from automotive.application.common.constants import Testcase, priority_config, point, index_list
from automotive.application.common.interfaces import BaseReader, TestCases
from automotive.logger.logger import logger
from automotive.application.common.enums import ModifyTypeEnum
try:
import xlwings as xw
except ModuleNotFoundError:
os.system("pip install xlwings")
finally:
import xlwings as xw
from xlwings import Sheet, Book
class StandardExcelSampleReader(BaseReader):
def __init__(self, ignore_sheet_name: List[str] = None):
# 从哪一行开始读取
if ignore_sheet_name is None:
ignore_sheet_name = ["Summary"]
self.__start_row = 3
self.__ignore_sheet_name = ignore_sheet_name
def read_from_file(self, file: str) -> Dict[str, TestCases]:
result = dict()
app = xw.App(visible=False, add_book=False)
app.display_alerts = False
app.screen_updating = False
wb = app.books.open(file)
sheet_count = wb.sheets.count
for i in range(sheet_count):
sheet_name = wb.sheets[i].name
# 可以过滤SummarySheet页面
if sheet_name not in self.__ignore_sheet_name:
self.__handle_sheet(wb, sheet_name, result)
wb.close()
app.quit()
try:
app.kill()
except AttributeError:
logger.debug("app kill fail")
logger.info("read excel done")
return result
def __handle_sheet(self, wb: Book, sheet_name: str, result: Dict[str, TestCases]):
"""
解析sheet
:param wb: workbook
:param sheet_name: sheet name
:param result: 结果集
"""
logger.info(f"handle sheet {sheet_name}")
sheet = wb.sheets[sheet_name]
testcases = self.__parse_test_case(sheet)
result[sheet_name] = testcases
def __parse_test_case(self, sheet: Sheet) -> TestCases:
"""
逐个解析测试用例
:param sheet:
:return: 测试用例
"""
testcases = []
# 存放用例ID
tem = []
max_row = sheet.used_range.last_cell.row
for i in range(max_row + 1):
if i > (self.__start_row - 1):
testcase = Testcase()
testcase.name = sheet.range(f"C{i}").value
index = testcase.name.split('_')[-1]
if not index.isdigit():
raise RuntimeError(f"此条用例名称: {testcase.name} 缺少ID号,请添加")
tem.append(index)
testcase.module = sheet.range(f"B{i}").value
testcase.pre_condition = self.__parse_pre_condition(sheet.range(f"D{i}").value)
testcase.actions = self.__parse_actions(sheet.range(f"E{i}").value)
testcase.exceptions = self.__parse_exceptions(sheet.range(f"F{i}").value)
requirement = sheet.range(f"G{i}").value
testcase.requirement = requirement.split("\n") if requirement else None
fix_cell = sheet.range(f"J{i}").value
if fix_cell is not None:
try:
testcase.fix = ModifyTypeEnum.read_excel_from_name(fix_cell)
except ValueError:
logger.debug(f"{fix_cell} is not ModifyTypeEnum")
automation_cell = sheet.range(f"H{i}").value
# automation_cell=空“”,automation=None; =是,automation=True;=other,automation=False,只有是,xmind才写入[A]
testcase.automation = automation_cell == "是" if automation_cell else None
priority_cell = sheet.range(f"I{i}").value
testcase.priority = priority_config[priority_cell] if priority_cell else None
test_result = sheet.range(f"N{i}").value
testcase.test_result = test_result.strip().upper() if test_result else None
testcase.calc_hash()
testcases.append(testcase)
for i in tem:
if tem.count(i) > 1:
raise RuntimeError(f"此ID: {i} 有重复,请检查")
return testcases
@staticmethod
def __filter_automotive(content: str) -> bool:
return not (content.startswith("0x") or content.startswith("0X"))
def __parse_pre_condition(self, pre_condition: str) -> List[str]:
"""
解析前置条件
:param pre_condition: 前置条件的字符串
:return:
"""
logger.debug(f"pre_condition = {pre_condition}")
contents = []
if pre_condition:
if "\r\n" in pre_condition:
pre_condition = pre_condition.replace("\r\n", "$")
# pre_conditions = list(filter(lambda x: self.__filter_automotive(x) and x != "", pre_condition.split("\n")))
pre_conditions = list(filter(lambda x: x != "", pre_condition.split("\n")))
pre_conditions = list(map(lambda x: x.replace("、", "."), pre_conditions))
for pre in pre_conditions:
# if point in pre:
# pre = pre.replace(point, " ").strip()
# 为了不去掉不带序号的前两个字符
if pre[0].isdecimal() and pre[:2] != '0x':
pre = pre[2:].strip()
if pre[:2] == "0x":
pre = pre
logger.debug(f"pre = {pre}")
if "$" in pre:
pre = pre.replace("$", "\r\n")
contents.append(pre)
return contents
def __parse_actions(self, actions: str) -> List[str]:
total = []
lines = actions.split("\n")
temp = []
for i, line in enumerate(lines):
if line == '':
continue
if line[0] in index_list:
# temp里装的是,每行带序号的索引(比如有四行:第一行和第三行,带操作步骤序号,temp=[0,2]
temp.append(i)
# 没有序号的情况,即只有一个操作步骤
if temp:
# 列表切片操作 0 2
temp.pop(0)
start_index = 0
for t in temp:
content = "\n".join(lines[start_index:t])
total.append(content)
start_index = t
# 把最后一个步骤序号所在行,到最后一行都用\n拼接
content = "\n".join(lines[start_index:])
total.append(content)
else:
total.append(actions)
# 处理掉1.类似的数据
new_total = []
for t in total:
content = self.__handle_prefix_str(t)
new_total.append(content)
return new_total
@staticmethod
def __handle_prefix_str(content: str) -> str:
"""
处理1. 2.这种前缀,去掉他们
:param content:
:return:
"""
if content[0] in index_list:
content = content[1:]
if content[0] in (".", "。", " "):
content = content[1:]
return content
def __parse_exceptions(self, exceptions: str) -> List[str]:
contents = []
if exceptions:
exception_lines = exceptions.split("\r\n")
for line in exception_lines:
content = self.__handle_prefix_str(line)
contents.append(content)
return contents
| 2.078125 | 2 |
lib/cogs/help.py | yosiz/msf-alliance-bot | 0 | 12773397 | <filename>lib/cogs/help.py
from typing import Optional
from discord import Embed
from discord.utils import get
from discord.ext.commands import Cog, command, Context
from discord.ext.menus import MenuPages, ListPageSource
name = "help"
def syntax(cmd: command):
cmd_and_aliases = "|".join([str(cmd), *cmd.aliases])
params = []
for key, value in cmd.params.items():
if key not in ("self", "ctx"):
params.append(f"[{key}]" if "NoneType" in str(value) else f"<{key}>")
params = " ".join(params)
return f"```{cmd_and_aliases} {params}```"
class HelpMenu(ListPageSource):
def __init__(self, ctx: Context, data):
self.ctx = ctx
super().__init__(data, per_page=3)
async def write_page(self, menu, fields=[]):
offset = (menu.current_page * self.per_page)
len_data = len(self.entries)
embed = Embed(title="Help",
description="Welcome to the Help page",
color=self.ctx.author.color)
embed.set_thumbnail(url=self.ctx.guild.me.avatar_url)
embed.set_footer(text=f"{offset:,} - {min(len_data, offset + self.per_page - 1):,} of {len_data:,} commands.")
for cmd_name, value in fields:
embed.add_field(name=cmd_name, value=value, inline=False)
return embed
async def format_page(self, menu, entries):
fields = []
for entry in entries:
fields.append((entry.brief or "No description", syntax(entry)))
return await self.write_page(menu, fields)
class Help(Cog):
def __init__(self, bot):
self.bot = bot
self.bot.remove_command("help")
async def cmd_help(self, ctx: Context, cmd):
embed = Embed(title=f"help for `{cmd}`",
description=syntax(cmd),
color=ctx.author.color)
embed.add_field(name="Command description", value="command.help")
await ctx.send(embed=embed)
@command(name="help")
async def show_help(self, ctx: Context, cmd: Optional[str]):
"""display help"""
if cmd is None:
menu = MenuPages(source=HelpMenu(ctx, list(self.bot.commands)),
clear_reactions_after=True,
delete_message_after=True,
timeout=60.0)
await menu.start(ctx)
else:
if cmnd := get(self.bot.commands, name=cmd):
await self.cmd_help(ctx, cmnd)
else:
await ctx.send("Command does not exist")
@Cog.listener()
async def on_ready(self):
if not self.bot.ready:
self.bot.cogs_ready.ready_up(name)
print(f"{name} Cog ready")
def setup(bot):
bot.add_cog(Help(bot))
| 2.53125 | 3 |
examples/draw_3D_test.py | c12qe/HFSSdrawpy | 8 | 12773398 | import os
import HFSSdrawpy.libraries.example_elements as elt
from HFSSdrawpy import Body, Modeler
from HFSSdrawpy.parameters import GAP, TRACK
# import HFSSdrawpy.libraries.base_elements as base
pm = Modeler("hfss")
relative = pm.set_variable("1mm")
main = Body(pm, "main")
chip = Body(pm, "chip", rel_coor=[["1mm", "1mm", "1mm"], [1, 0, 0], [0, 0, 1]], ref_name="main")
chip1 = Body(pm, "chip1", rel_coor=[[0, 0, 0], [0, 1, 0], [1, 0, 0]], ref_name="chip")
chip2 = Body(pm, "chip2", rel_coor=[[0, 0, 0], [1, 0, 0], [0, 0, 1]], ref_name="chip")
track = pm.set_variable("20um")
gap = pm.set_variable("10um", name="gap")
track_big = pm.set_variable("25um")
gap_big = pm.set_variable("15um")
track_middle = pm.set_variable("22.5um")
gap_middle = pm.set_variable("12.5um")
offset = pm.set_variable("-50um")
# chip1
# default is the widths of track and gap
(port11,) = elt.create_port(chip1, [track, track + 2 * gap], name="port11")
with chip1(["2.0mm", "0.0mm"], [1, 0]):
# default is the widths of track and gap
(port12,) = elt.create_port(chip1, [track, track + 2 * gap], name="port12")
bond_length, bond_slope, pcb_track, pcb_gap = "200um", 0.5, "300um", "200um"
with chip1(["0.5mm", "0.5mm"], [0, 1]):
(con_port1,) = elt.draw_connector(chip1, pcb_track, pcb_gap, bond_length, name="con_port1")
with chip1(["1.5mm", "-1.0mm"], [0, 1]):
(port13,) = elt.create_port(chip1, [track, track + 2 * gap], name="port13")
chip1.draw_cable(
con_port1,
port13,
is_bond=True,
fillet="100um",
reverse_adaptor=False,
to_meander=[0, 0, 0],
meander_length=0,
name="con_port1_port13",
)
ground_plane1 = chip1.rect([0, 0], ["3mm", "3mm"], layer=TRACK, name="gp1")
# chip2
# default is the widths of track and gap
(port21,) = elt.create_port(chip2, [track, track + 2 * gap], name="port21")
with chip2(["2.0mm", "0.0mm"], [1, 0]):
# default is the widths of track and gap
(port22,) = elt.create_port(chip2, [track, track + 2 * gap], name="port22")
bond_length, bond_slope, pcb_track, pcb_gap = "200um", 0.5, "300um", "200um"
with chip2(["0.5mm", "0.5mm"], [0, 1]):
(con_port2,) = elt.draw_connector(chip2, pcb_track, pcb_gap, bond_length, name="con_port2")
with chip2(["1.5mm", "-1.0mm"], [0, 1]):
(port23,) = elt.create_port(chip2, [track, track + 2 * gap], name="port23")
chip2.draw_cable(
con_port2,
port23,
is_bond=True,
fillet="100um",
reverse_adaptor=False,
to_meander=[0, 0, 0],
meander_length=0,
name="con_port2_port23",
)
# # 3D
chip.box([0, 0, 0], ["3mm", "3mm", "3mm"], material="silicon")
ground_plane2 = chip2.rect([0, 0], ["3mm", "3mm"], layer=TRACK, name="gp2")
ground_plane1.subtract(chip1.entities[GAP])
ground_plane1.unite(chip1.entities[TRACK])
ground_plane1.assign_perfect_E()
ground_plane2.subtract(chip2.entities[GAP])
ground_plane2.unite(chip2.entities[TRACK])
ground_plane2.assign_perfect_E()
main.cylinder([0, 0, 0], "0.5mm", "0.7mm", "Z", name="tube")
# generate gds file
pm.generate_gds(os.path.join(os.getcwd(), "gds_files"), "cable_test")
| 2.296875 | 2 |
series_tiempo_ar_api/apps/metadata/migrations/0004_auto_20181219_1227.py | datosgobar/series-tiempo-ar-api | 28 | 12773399 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-12-19 15:27
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('metadata', '0003_metadataconfig'),
]
operations = [
migrations.AddField(
model_name='metadataconfig',
name='query_config',
field=django.contrib.postgres.fields.jsonb.JSONField(default={'dataset_description': {'boost': 1},
'dataset_source': {'boost': 1},
'dataset_title': {'boost': 1},
'description': {'boost': 1.5}}),
preserve_default=False,
),
migrations.AlterField(
model_name='indexmetadatatask',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
]
| 1.703125 | 2 |
DSSPparser/pdbToxssp.py | neolei/DSSPparser | 10 | 12773400 | # This example client takes a PDB file, sends it to the REST service, which
# creates HSSP data. The HSSP data is then output to the console.
# api by https://github.com/cmbi/xssp-api/blob/master/xssp_api/frontend/api/endpoints.py
import json
import requests
import time
REST_URL = "https://www3.cmbi.umcn.nl/xssp/"
inputCollection = ["pdb_id", "pdb_redo_id", "pdb_file", "sequence"]
outputCollection = ["hssp_hssp", "hssp_stockholm", "dssp"]
def pdbToxssp(_input, inputF="pdb_id", outputF="dssp"):
'''transform PDB to xssp
Arguments:
_input {str} -- input id or PDB file
Keyword Arguments:
inputF {str} -- input format (default: {"pdb_id"})
outputF {str} -- output format (default: {"dssp"})
Raises:
Exception -- raise format error
Returns:
str -- dssp or hssp format
'''
# inuptF type check
if inputF not in inputCollection:
raise "input Format error, Please check your format!"
if outputF not in outputCollection:
raise "output Format error, Please check your format!"
# request url
url_create = '{0}api/create/{1}/{2}/'.format(REST_URL, inputF, outputF)
if inputF == "pdb_id":
pdb_id = {"data": _input}
r = requests.post(url_create, data=pdb_id)
elif inputF == "pdb_file":
files = {'file_': open(_input, 'rb')}
r = requests.post(url_create, files=files)
elif inputF == "pdb_redo_id":
pdb_redo_id = {"data": _input}
r = requests.post(url_create, data=pdb_redo_id)
elif inputF == "sequence":
sequence = {"data": open(_input, 'rb')}
r = requests.post(url_create, data=sequence)
# Send a request to the server to create hssp data from the pdb file data.
# If an error occurs, an exception is raised and the program exits. If the
# request is successful, the id of the job running on the server is
# returned.
r.raise_for_status()
job_id = json.loads(r.text)['id']
print("Job submitted successfully. Id is: '{}'".format(job_id))
# Loop until the job running on the server has finished, either successfully
# or due to an error.
ready = False
while not ready:
# Check the status of the running job. If an error occurs an exception
# is raised and the program exits. If the request is successful, the
# status is returned.
url_status = '{0}api/status/{1}/{2}/{3}/'.format(
REST_URL, inputF, outputF, job_id)
r = requests.get(url_status)
r.raise_for_status()
status = json.loads(r.text)['status']
print("Job status is: '{}'".format(status))
# If the status equals SUCCESS, exit out of the loop by changing the
# condition ready. This causes the code to drop into the `else` block
# below.
#
# If the status equals either FAILURE or REVOKED, an exception is raised
# containing the error message. The program exits.
#
# Otherwise, wait for five seconds and start at the beginning of the
# loop again.
if status == 'SUCCESS':
ready = True
elif status in ['FAILURE', 'REVOKED']:
raise Exception(json.loads(r.text)['message'])
else:
time.sleep(5)
else:
# Requests the result of the job. If an error occurs an exception is
# raised and the program exits. If the request is successful, the result
# is returned.
url_result = '{0}api/result/{1}/{2}/{3}/'.format(
REST_URL, inputF, outputF, job_id)
r = requests.get(url_result)
r.raise_for_status()
result = json.loads(r.text)['result']
# Return the result to the caller, which prints it to the screen.
return result
if __name__ == '__main__':
result = pdbToxssp("2GW9")
print(result)
| 2.875 | 3 |
terrascript/data/davidji99/herokux.py | mjuenema/python-terrascript | 507 | 12773401 | # terrascript/data/davidji99/herokux.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:18:42 UTC)
import terrascript
class herokux_addons(terrascript.Data):
pass
class herokux_kafka_mtls_iprules(terrascript.Data):
pass
class herokux_postgres_mtls_certificate(terrascript.Data):
pass
class herokux_registry_image(terrascript.Data):
pass
__all__ = [
"herokux_addons",
"herokux_kafka_mtls_iprules",
"herokux_postgres_mtls_certificate",
"herokux_registry_image",
]
| 1.390625 | 1 |
13/tidyprop.py | Gridelen/core-python-ex | 1 | 12773402 | <reponame>Gridelen/core-python-ex<gh_stars>1-10
class HideX(object):
# def x():
# def fget(self):
# return ~self.__x
# def fset(self, x):
# assert isinstance(x, int), 'x must be int'
# self.__x = ~x
# return locals()
# x = property(**x())
@property
def x(self):
return ~self.__x
@x.setter
def x(self, x):
assert isinstance(x, int), 'x must be int'
self.__x = ~x
o = HideX()
o.x = 5
print(o.x)
print(o._HideX__x) | 3.21875 | 3 |
scripts/run.py | mhw32/contrastive-learning-scaffold | 1 | 12773403 | import os
import torch
from copy import deepcopy
from src.agents.agents import *
from src.utils.setup import process_config
from src.utils.utils import load_json
def run(config_path, gpu_device=-1):
config = process_config(config_path)
if gpu_device >= 0:
config.gpu_device = [gpu_device]
AgentClass = globals()[config.agent]
agent = AgentClass(config)
if config.continue_exp_dir is not None:
agent.logger.info("Found existing model... Continuing training!")
checkpoint_dir = os.path.join(config.continue_exp_dir, 'checkpoints')
agent.load_checkpoint(
config.continue_exp_name,
checkpoint_dir=checkpoint_dir,
load_memory_bank=True,
load_model=True,
load_optim=True,
load_epoch=True,
)
try:
agent.run()
agent.finalise()
except KeyboardInterrupt:
pass
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('config', type=str, default='path to config file')
parser.add_argument('--gpu-device', type=int, default=-1)
args = parser.parse_args()
run(args.config, args.gpu_device)
| 2.171875 | 2 |
get_awap.py | tammasloughran/get_awap | 1 | 12773404 | <reponame>tammasloughran/get_awap
#!/usr/bin/env python
"""get_awap.py downloads AWAP data from the BoM and converts it to netcdf.
Usage: python get_awap.py ddmmyyyy-DDMMYYYY VAR
ddmmyyyy-DDMMYYYY - is the period to download
VAR - is the variable to download. Use either tmax, tmin, rain, 9amvapr, or 3pmvapr.
eg. python get_awap.py 01012015-31122015 tmax
"""
import urllib
import sys
if sys.version[0]=='3':
urlretrieve = urllib.request.urlretrieve
elif sys.version[0]=='2':
urlretrieve = urllib.urlretrieve
import numpy as np
import os
import pandas
from netCDF4 import Dataset
import datetime as dt
# Get args
args = sys.argv
period = args[1]
getvar = args[2]
# Check formatting of period
try:
assert('-' in period)
assert(type(int(period[:period.find('-')]))==int)
assert(len(period[:period.find('-')])==8)
assert(type(int(period[period.find('-')+1:]))==int)
assert(len(period[period.find('-')+1:])==8)
except:
print("Formatting of dates is incorrect. Dates format should be ddmmyyyy-DDMMYYYY")
sys.exit(1)
# Manage dates
start = period[:period.find('-')]
end = period[period.find('-')+1:]
start_date = dt.datetime(int(start[4:]),int(start[2:4]), int(start[:2]))
end_date = dt.datetime(int(end[4:]),int(end[2:4]), int(end[:2]))
assert(start_date.year>=1900)
dates = pandas.date_range(start_date, end_date)
# Setup url strings
site = 'http://www.bom.gov.au/web03/ncc/www/awap/'
location = ''
getvar = args[2]
if getvar == 'tmin':
location = 'temperature/minave/daily/grid/0.05/history/nat/'
elif getvar == 'tmax':
location = 'temperature/maxave/daily/grid/0.05/history/nat/'
elif getvar == 'rain':
location = 'rainfall/totals/daily/grid/0.05/history/nat/'
elif getvar == '9amvapr':
location = 'vprp/vprph09/daily/grid/0.05/history/nat/'
elif getvar == '3pmvapr':
location = 'vprp/vprph09/daily/grid/0.05/history/nat/'
else:
print(getvar+' is not a valid variable. Use tmin, tmax, rain, 9amvapr, or 3pmvapr')
# Download
for idate in dates:
cdate = idate.strftime('%Y%m%d')
filename = cdate + cdate + '.grid.Z'
if not os.path.exists(filename):
url = site + location + filename
urlretrieve(url, filename)
os.system('uncompress '+filename)
# Load .grid files
for cyear in range(start_date.year, end_date.year+1):
iday = 0
this_year = dates[dates.year==cyear]
days_in_year = (dates.year==cyear).sum()
cdate = this_year[0]
filename = cdate.strftime('%Y%m%d') + cdate.strftime('%Y%m%d') + '.grid'
f = open(filename)
cols = int(f.readline()[6:9])
rows = int(f.readline()[6:9])
first_lon = float(f.readline()[10:18])
first_lat = float(f.readline()[10:18])
delta = float(f.readline()[9:15])
awap_data2 = np.ones((days_in_year,rows,cols))*np.nan
for i, cday in enumerate(this_year):
# Load
cdate = cday.strftime('%Y%m%d')
filename = cdate + cdate + '.grid'
f = open(filename)
cols = int(f.readline()[6:9])
rows = int(f.readline()[6:9])
awap_data = np.ones((rows,cols))*np.nan
f.readline() # These lines are just the grid specification
f.readline()
f.readline()
f.readline()
for ix in range(rows):
line = f.readline().split()
awap_data[ix,:] = np.array([float(x) for x in line])
awap_data2[i,...] = np.flipud(awap_data)
# Save to file
outfile = 'AWAP_'+getvar+'_'+this_year[0].strftime('%Y%m%d')+'-'+this_year[-1].strftime('%Y%m%d')+'.nc'
ncdata = Dataset(outfile, 'w')
setattr(ncdata, 'notes', 'Downloaded using get_awap https://github.com/tammasloughran/get_awap')
times = [(this_year[i]-dt.datetime(1899,1,1)).days for i in range(this_year.size)]
lats = [(int(first_lat*100)+i*int(delta*100))/100 for i in range(rows)]
lons = [(int(first_lon*100)+i*int(delta*100))/100 for i in range(cols)]
ncdata.createDimension('time',len(times))
ncdata.createDimension('lat',rows)
ncdata.createDimension('lon',cols)
otime = ncdata.createVariable('time','float',dimensions=('time'))
setattr(otime, 'standard_name', 'time')
setattr(otime, 'calendar', 'proleptic_gregorian')
setattr(otime, 'units', 'days since 1899-01-01 00:00:00')
olat = ncdata.createVariable('lat','float',dimensions=('lat'))
setattr(olat, 'standard_name', 'latitude')
setattr(olat, 'long_name', 'Latitude')
setattr(olat, 'units', 'degrees_north')
setattr(olat, 'axis', 'Y')
olon = ncdata.createVariable('lon','float',dimensions=('lon'))
setattr(olon, 'standard_name', 'longitude')
setattr(olon, 'long_name', 'Longitude')
setattr(olon, 'units', 'degrees_east')
setattr(olon, 'axis', 'X')
odata = ncdata.createVariable(getvar,awap_data2.dtype,dimensions=('time','lat','lon'),fill_value=-99.99)
if getvar=='tmax':
setattr(odata, 'long_name', "Daily maximum temperature")
setattr(odata, 'units', 'deg C')
elif getvar=='tmin':
setattr(odata, 'long_name', "Dialy minimum temperature")
setattr(odata, 'units', 'deg C')
elif getvar=='rain':
setattr(odata, 'long_name', "Daily rainfall total")
setattr(odata, 'units', 'kg m-2 d-1')
otime[:] = times
olat[:] = lats
olon[:] = lons
odata[:] = awap_data2
ncdata.close() | 2.8125 | 3 |
game/path_game/actor.py | CospanDesign/python | 5 | 12773405 | import os
import pygame
from game_defines import DIRECTIONS
ASSET_BASE = os.path.join(os.path.dirname(__file__), "assets")
class Actor(object):
@staticmethod
def asset(name):
return os.path.join(ASSET_BASE, name)
def __init__(self, name, image_path, actor_type, startx, starty):
self.image = pygame.image.load(Actor.asset(image_path))
self.name = name
self.x = startx
self.y = starty
self.actor_type = actor_type
self.map_object = None
def process(self, sensor_input):
raise AssertionError("Process Needs to be overriden")
def get_image(self):
return self.image
def get_type(self):
return self.actor_type
def get_x(self):
return self.x
def get_y(self):
return self.y
def set_map(self, map_object):
self.map_object = map_object
def move(self, move_to):
x_offset = 0
y_offset = 0
if move_to == DIRECTIONS.UP:
x_offset = 0
y_offset = -1
elif move_to == DIRECTIONS.UPRIGHT:
x_offset = 1
y_offset = -1
elif move_to == DIRECTIONS.UPLEFT:
x_offset = -1
y_offset = -1
elif move_to == DIRECTIONS.RIGHT:
x_offset = 1
y_offset = 0
elif move_to == DIRECTIONS.DOWN:
x_offset = 0
y_offset = 1
elif move_to == DIRECTIONS.DOWNRIGHT:
x_offset = 1
y_offset = 1
elif move_to == DIRECTIONS.DOWNLEFT:
x_offset = -1
y_offset = 1
elif move_to == DIRECTIONS.LEFT:
x_offset = -1
y_offset = 0
if self.map_object.is_blocked(self.x + x_offset, self.y + y_offset):
return False
self.x += x_offset
self.y += y_offset
return True
| 2.921875 | 3 |
exam2/image/test.py | kkkarnav/ashoka | 1 | 12773406 | import math
from images_fcp import *
array = readImage("lenna.png")
width, height = len(array[0]), len(array) # 2560, 1707
kernel = [
[0, -1, 0],
[-1, 5, -1],
[0, -1, 0]
]
# turn the array left by 90 degrees
def transpose(A):
B = [[(255, 255, 0) for row in A] for column in A[0]]
assert(len(B[0]) == len(A))
assert(len(B) == len(A[0]))
for row in range(len(A)): # height-wise
for column in range(len(A[row])): # width-wise
# A[row][column] rotates 90 left
# A[len(A)-row-1][column] rotates 90 right
B[column][row] = A[row][column]
return B
# mirror the array vertically
def yflip(A):
B = [[(255, 255, 0) for column in row] for row in A]
for row in range(len(A)): # height-wise
for column in range(len(A[row])): # width-wise
B[row][len(A[row])-column-1] = A[row][column]
return B
# mirror the array horizontally
def xflip(A):
B = [[(255, 255, 0) for column in row] for row in A]
for row in range(len(A)): # height-wise
for column in range(len(A[row])): # width-wise
B[len(A)-row-1][column] = A[row][column]
return B
# blur or sharpen A with K
def apply_kernel(A, K):
B = [[(255, 255, 0) for column in row] for row in A]
for row in range(len(A)): # height-wise
for column in range(len(A[row])): # width-wise
B[row][column] = multiply(A, K, row, column, len(A), len(A[row]))
return B
# generate colour pattern
def create_pattern(pattern, height, width):
for row in range(width):
for col in range(height):
nb = ((255 - (row+col)*1.2)/4)%255
pattern[col][row] = (pattern[col][row][0]-nb, pattern[col][row][1]-nb, nb)
return pattern
def surround(A, k):
B = [[(0, 0, 0) for column in range(len(A[0])+(k))] for row in range(len(A) + (k))]
for row in range(k*2, len(A[0])-k-1):
for col in range(k*2, len(A)-k-1):
B[col][row] = A[col][row]
return B
if __name__ == '__main__':
# add a colour filter
filtered_array = [[(array[row][column][0], array[row][column][1], array[row][column][2]) for column in range(width)] for row in range(height)]
# rotate or mirror the array
mirrored_array = xflip(yflip(transpose(array)))
# apply image kernel
output_array = apply_kernel(array, kernel)
# generate colour pattern
# pattern_array = create_pattern(800, 500)
q2 = array
reverse_q2 = [[(255-(q2[row][column][0]), 255-(q2[row][column][1]), 255-(q2[row][column][2])) for column in range(len(q2[0]))] for row in range(len(q2))]
detected_reverse_q2 = apply_kernel(reverse_q2, kernel)
blue = create_pattern(q2, len(q2), len(q2[0]))
bordered = surround(blue, 10)
writeImage(blue, "blue.jpg")
writeImage(reverse_q2, "q2_reverse.jpg")
writeImage(detected_reverse_q2, "q2_detected.jpg")
writeImage(bordered, "bordered.jpg")
| 3.28125 | 3 |
coupled-harmonic-oscillator/test_cho.py | guiltygyoza/rk4-starknet | 18 | 12773407 | <reponame>guiltygyoza/rk4-starknet
import pytest
from starkware.starknet.testing.starknet import Starknet
from timeit import default_timer as timer
@pytest.mark.asyncio
async def test_dict():
starknet = await Starknet.empty()
print()
contract = await starknet.deploy("cho.cairo")
## note: if passing negative numbers to cairo function => must mod P in python first;
## also, the return value from cairo has been mod P, so must detect neg value specifically
PRIME = 3618502788666131213697322783095070105623107215331596699973092056135872020481
PRIME_HALF = PRIME//2
SCALE_FP = 10000 # consistent with SCALE_FP in the contract
## handling negative numbers returned by Cairo function (current testing framework does not handle this)
def adjust_for_negative (history):
return [e if e < PRIME_HALF else e-PRIME for e in history]
def adjust_for_negative_single (e):
return e if e < PRIME_HALF else e-PRIME
# set constants for the experiment
W = 1000 # consistent with const W set in contract
x1_0 = 100.
x1d_0 = 0.
x2_0 = 900.
x2d_0 = 0.
t_0 = 0.
dt = 0.01
T = 2
x1_0_fp = int(x1_0 * SCALE_FP)
x1d_0_fp = int(x1d_0 * SCALE_FP)
x2_0_fp = int(x2_0 * SCALE_FP)
x2d_0_fp = int(x2d_0 * SCALE_FP)
t_0_fp = int(t_0 * SCALE_FP)
dt_fp = int(dt * SCALE_FP)
# run rk4 integration continuously
t_fp = t_0_fp
x1_fp = x1_0_fp
x1d_fp = x1d_0_fp
x2_fp = x2_0_fp
x2d_fp = x2d_0_fp
print(f'm1 starting at x1: {x1_fp} with v1: {x1d_fp}')
print(f'm2 starting at x2: {x2_fp} with v2: {x2d_fp}')
x1_fp_history = [x1_fp]
x1d_fp_history = [x1d_fp]
x2_fp_history = [x2_fp]
x2d_fp_history = [x2d_fp]
x1_delta_history = []
N = int(T//dt)
for i in range(N):
ret = await contract.query_next_given_coordinates(
t = t_fp,
dt = dt_fp,
x1 = x1_fp,
x1d = x1d_fp,
x2 = x2_fp,
x2d = x2d_fp
).call()
x1_fp = ret.x1_nxt
x1d_fp = ret.x1d_nxt
x2_fp = ret.x2_nxt
x2d_fp = ret.x2d_nxt
x1_fp_history.append (x1_fp)
x1d_fp_history.append(x1d_fp)
x2_fp_history.append (x2_fp)
x2d_fp_history.append(x2d_fp)
print(f'{i+1}th/{N} retrieved.')
x1_fp_history = adjust_for_negative (x1_fp_history)
x1d_fp_history = adjust_for_negative (x1d_fp_history)
x2_fp_history = adjust_for_negative (x2_fp_history)
x2d_fp_history = adjust_for_negative (x2d_fp_history)
print('x1_fp_history:')
print(f' {x1_fp_history}')
print()
print('x2_fp_history:')
print(f' {x2_fp_history}')
| 2.25 | 2 |
artemis/utils/basic.py | StanfordGeometryLab/artemis | 254 | 12773408 | """
Various simple (basic) functions in the "utilities".
The MIT License (MIT)
Originally created at 8/31/20, for Python 3.x
Copyright (c) 2021 <NAME> (<EMAIL>) & Stanford Geometric Computing Lab
"""
import torch
import multiprocessing as mp
import dask.dataframe as dd
from torch import nn
from sklearn.model_selection import train_test_split
def iterate_in_chunks(l, n):
"""Yield successive 'n'-sized chunks from iterable 'l'.
Note: last chunk will be smaller than l if n doesn't divide l perfectly.
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def df_parallel_column_apply(df, func, column_name):
n_partitions = mp.cpu_count() * 4
d_data = dd.from_pandas(df, npartitions=n_partitions)
res =\
d_data.map_partitions(lambda df: df.apply((lambda row: func(row[column_name])), axis=1))\
.compute(scheduler='processes')
return res
def cross_entropy(pred, soft_targets):
""" pred: unscaled logits
soft_targets: target-distributions (i.e., sum to 1)
"""
logsoftmax = nn.LogSoftmax(dim=1)
return torch.mean(torch.sum(-soft_targets * logsoftmax(pred), 1))
def make_train_test_val_splits(datataset_df, loads, random_seed, unique_id_column=None):
""" Split the data into train/val/test.
:param datataset_df: pandas Dataframe containing the dataset (e.g., ArtEmis)
:param loads: list with the three floats summing to one for train/val/test
:param random_seed: int
:return: changes the datataset_df in-place to include a column ("split") indicating the split of each row
"""
if sum(loads) != 1:
raise ValueError()
train_size, val_size, test_size = loads
print("Using a {},{},{} for train/val/test purposes".format(train_size, val_size, test_size))
df = datataset_df
## unique id
if unique_id_column is None:
unique_id = df.art_style + df.painting # default for ArtEmis
else:
unique_id = df[unique_id_column]
unique_ids = unique_id.unique()
unique_ids.sort()
train, rest = train_test_split(unique_ids, test_size=val_size+test_size, random_state=random_seed)
train = set(train)
if val_size != 0:
val, test = train_test_split(rest, test_size=round(test_size*len(unique_ids)), random_state=random_seed)
else:
test = rest
test = set(test)
assert len(test.intersection(train)) == 0
def mark_example(x):
if x in train:
return 'train'
elif x in test:
return 'test'
else:
return 'val'
df = df.assign(split=unique_id.apply(mark_example))
return df | 2.859375 | 3 |
T07-00/program.py | maa76/SSof-Project1920 | 2 | 12773409 | <filename>T07-00/program.py
a = source()
sink(a) | 0.921875 | 1 |
bot-stopots/funcoes.py | leosantosx/bot-stopots | 2 | 12773410 | from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from functools import partial
from time import sleep
from variaveis import *
from controler_palavras import buscar_palavra
from aprende import adicionar_resposta
import os
def iniciar(browser, botao_entrada, nome=''):
limpar_tela()
print('Carregando...')
wdw = WebDriverWait(browser, 25)
browser.get(url)
wdw.until(
partial(espera_elemento, By.XPATH, botao_entrada),
'"Botão entrar" não foi encontrado'
)
browser.find_element_by_xpath(botao_entrada).click()
if 'Twitter' in browser.title:
print('Entrando com o twitter...')
user = input('Digite o usuário/email: ')
password = input('Digite a senha: ')
browser.find_element_by_xpath(input_user_twitter).send_keys(user)
browser.find_element_by_xpath(input_pass_twitter).send_keys(password)
browser.find_element_by_xpath(input_submit_twitter).click()
elif 'Facebook' in browser.title:
print('Entrando com o Facebook...')
user = input('Digite o email/telefone: ')
password = input('Digite a senha: ')
browser.find_element_by_xpath(input_user_facebook).send_keys(user)
browser.find_element_by_xpath(input_pass_facebook).send_keys(password)
browser.find_element_by_xpath(input_submit_facebook).click()
else:
if len(nome) > 0:
wdw.until(
partial(espera_elemento, By.XPATH, input_nome_jogador),
'"Botão nome jogador" não foi encontrado'
)
input_name = browser.find_element_by_xpath(input_nome_jogador)
input_name.clear()
input_name.send_keys(nome)
wdw.until(
partial(espera_elemento, By.XPATH, botao_iniciar),
'"Botão iniciar" não foi encontrado'
)
button_jogar = browser.find_element_by_xpath(botao_iniciar)
button_jogar.click()
def iniciar_jogo(browser):
limpar_tela()
print('1 - Entrar com Twitter.')
print('2 - Entrar com Facebook.')
print('3 - Entrar com nome.')
print('4 - Entrar como anônimo')
tipo_entrada = int(input('=> '))
if tipo_entrada == 1:
iniciar(browser, botao_entrar_twitter)
elif tipo_entrada == 2:
iniciar(browser, botao_entrar_facebook)
elif tipo_entrada == 3:
nome_jogador = input("Digite o nome: ")
iniciar(browser, botao_entrar, nome_jogador)
elif tipo_entrada == 4:
iniciar(browser, botao_entrar)
else:
print('Resposta invalida.')
sleep(2)
iniciar_jogo(browser)
def espera_elemento(by, elemento, browser):
el = browser.find_elements(by, elemento)
return bool(el)
def limpar_tela():
os.system('cls' if os.name == 'nt' else 'clear')
def pegar_letra_atual(browser):
letra = browser.find_element_by_xpath(letra_atual).text
if len(letra) > 0:
return letra
return '?'
def escrever_resposta(browser, letra):
labels = browser.find_elements_by_xpath(label_inputs_palavras)
if bool(labels):
for label in labels:
categoria = label.find_element_by_tag_name('span').text
input = label.find_element_by_tag_name('input')
if len(input.get_attribute('value')) == 0:
resposta = buscar_palavra(categoria, letra)
if resposta:
limpar_tela()
print('Preenchendo campos...')
input.send_keys(resposta)
def buscar_pontos(browser):
pontos = browser.find_element_by_xpath(meus_pontos).text
if len(pontos) > 0:
return pontos.split(' ')[0]
return '0'
def clica_button(browser, botao_elemento, msg):
pode_clicar = browser.find_elements_by_xpath(botao_elemento)
if bool(pode_clicar):
button_preparado = browser.find_element_by_xpath(botao_elemento)
if 'disable' not in button_preparado.get_attribute('class'):
print(msg)
button_preparado.click()
def clica_estou_pronto(browser):
clica_button(browser, botao_estou_pronto, 'Clicando em "Estou pronto"')
def avalia_respostas(browser):
clica_button(browser, botao_avalia_respostas, 'Clicando em "Avaliar respostas"')
def aprende_novas_respostas(browser, primeira_letra):
tema = browser.find_elements_by_xpath(div_tema)
if bool(tema):
tema_text = browser.find_element_by_xpath(div_tema).text
if ':' in tema_text:
tema_text = tema_text.split(':')[1].strip()
respostas = browser.find_elements_by_xpath(div_palavras)
for resposta in respostas:
adicionar_resposta(tema_text, resposta.text, primeira_letra)
| 2.921875 | 3 |
pizza_modules/pizza_combination.py | Freeways/hashcode2017-practice | 1 | 12773411 | <filename>pizza_modules/pizza_combination.py
"""
Generate all possible slice shapes from max to min
"""
from math import sqrt
def generate(min, max):
if min < 1 or max < 1:
raise ValueError('min : {min} & max :{max} must be > 0'.format(min, max))
slices = []
for count in range(min, max+1)[::-1]:
slice = {}
slice["count"] = count
# Calc all divisors
dividers = []
for divider in range(1, int(sqrt(count))+1):
if count % divider == 0 :
dividers.append(divider)
# don't include square root twice
if divider**2 != count:
dividers.append(count/divider)
dividers.sort(reverse=True)
shapes = []
while len(dividers) > 0:
n = m = dividers.pop()
if len(dividers) != 0:
m = dividers.pop(0)
shapes.append({'n': n, 'm': m})
if n != m :
shapes.append({'n': m, 'm': n})
slice["shapes"] = shapes
slices.append(slice)
return slices
| 3.78125 | 4 |
1-99/50-59/52.py | dcragusa/LeetCode | 0 | 12773412 | """
The n-queens puzzle is the problem of placing n queens on an n×n chessboard such that no two queens attack each other.
Given an integer n, return the number of distinct solutions to the n-queens puzzle.
Example:
Input: 4, Output: 2
Explanation: There are two distinct solutions to the 4-queens puzzle as shown below.
[[".Q..", // Solution 1
"...Q",
"Q...",
"..Q."],
["..Q.", // Solution 2
"Q...",
"...Q",
".Q.."]]
"""
"""
Same procedure as problem 51 except we propagate the number of results upwards, and not the final state of the board.
"""
def solve_n_queens(n):
if n == 1:
return 1
elif n < 4:
return 0
def place_queen_on_row(row_idx, board):
results = 0
for col_idx, col in enumerate(board[row_idx]):
if col != '-':
continue
board_c = board.copy()
board_c[row_idx] = f"{'.'*col_idx}Q{'.'*(n-1-col_idx)}"
if row_idx == n - 1:
return 1
for below_step in range(1, n-row_idx):
new_below_row = ''
for below_col_idx, char in enumerate(board_c[row_idx+below_step]):
new_below_row += '.' if (
char == '.' or below_col_idx in {col_idx-below_step, col_idx, col_idx+below_step}
) else '-'
board_c[row_idx+below_step] = new_below_row
if res := place_queen_on_row(row_idx+1, board_c):
results += res
return results
board = ['-'*n for _ in range(n)]
return place_queen_on_row(0, board)
assert solve_n_queens(4) == 2
assert solve_n_queens(8) == 92
| 4.03125 | 4 |
djangox2/apps.py | DrPayne25/djangoX | 0 | 12773413 | from django.apps import AppConfig
class Djangox2Config(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'djangox2'
| 1.367188 | 1 |
pyfos/pyfos_brocade_time.py | madhavinaiduprathap/pyfosbrocade | 44 | 12773414 | <reponame>madhavinaiduprathap/pyfosbrocade
# Copyright 2018 Brocade Communications Systems LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`pyfos_brocade_time` - PyFOS module to provide rest support for time server.
*********************************************************************************
The :mod:`pyfos_brocade_time` provides a REST support for Time Server.
"""
from pyfos import pyfos_rest_util
from pyfos.pyfos_type import pyfos_type
import pyfos.pyfos_version as version
class time_zone(pyfos_rest_util.rest_object):
"""This class provides system time zone information and also can configure the
time zone by both name and offset values.
Important class members:
+--------------------------------+----------------------------------+------------------------------------+
| Attribute name | Description |Frequently used methods |
+================================+==================================+====================================+
| name | Time zone by name |:func:`set_name` |
| | |:func:`peek_name` |
+--------------------------------+----------------------------------+------------------------------------+
| gmt-offset-hours | Hours offset values |:func:`set_gmt_offset_hours` |
| | |:func:`peek_gmt_offset_hours` |
+--------------------------------+----------------------------------+------------------------------------+
| gmt-offset-minutes | Minutes offset values |:func:`set_gmt_offset_minutes` |
| | |:func:`peek_gmt_offset_minutes` |
+--------------------------------+----------------------------------+------------------------------------+
*Object methods*
.. staticmethod:: get(session)
Return a :class:`time_zone` object filled with TS Time Zone
attributes.
Each object can be printed using :func:`pyfos_util.response_print`
and individual attributes accessed through peek methods.
:param session: session handler returned by
:func:`pyfos_auth.login`
:rtype: :class:`time_zone` object. Dictionary in case of error.
.. method:: patch()
Replace existing configuration. Fields involved are set within
the object using attribute's set method. This command is used to
replace the existing time zone configuration.
Example usage of the method to configure time zone by name:
.. code-block:: python
tz_obj = pyfos_brocade_time.time_zone()
tz_obj.set_name("Africa/Accra")
tz_obj.patch(session)
:param session: session handler returned by
:func:`pyfos_auth.login`
:rtype: dictionary in case of error or success response
*Attribute methods*
.. method:: peek_name()
Reads time zone name from the object.
:rtype: dictionary in case of error or success response
.. method:: set_name(name)
Sets name in the object
:param name: time zone by name
:rtype: dictionary in case of error or success response
.. method:: peek_gmt_offset_hours()
Reads time zone hours offset value from the object.
:rtype: dictionary in case of error or success response
.. method:: set_gmt_offset_hours(value)
Sets value in the object
:param value: time zone by hours offset
:rtype: dictionary in case of error or success response
.. method:: peek_gmt_offset_minutes()
Reads time zone minutes offset value from the object.
:rtype: dictionary in case of error or success response
.. method:: set_gmt_offset_minutes(value)
Sets value in the object
:param value: time zone by minutes offset
:rtype: dictionary in case of error or success response
"""
def __init__(self, dictvalues={}):
super().__init__(pyfos_rest_util.rest_obj_type.time_zone,
"/rest/running/brocade-time/time-zone",
version.VER_RANGE_821_and_ABOVE)
self.add(pyfos_rest_util.rest_attribute(
"name", pyfos_type.type_str,
None, pyfos_rest_util.REST_ATTRIBUTE_CONFIG))
self.add(pyfos_rest_util.rest_attribute(
"gmt-offset-hours", pyfos_type.type_int,
None, pyfos_rest_util.REST_ATTRIBUTE_CONFIG))
self.add(pyfos_rest_util.rest_attribute(
"gmt-offset-minutes", pyfos_type.type_int,
None, pyfos_rest_util.REST_ATTRIBUTE_CONFIG))
self.load(dictvalues, 1)
class clock_server(pyfos_rest_util.rest_object):
"""This class provides NTP clock server information and also can configure the
list of NTP clock server or LOCL.
Important class members:
+--------------------------------------+----------------------------------+---------------------------------------------------+
| Attribute name | Description |Frequently used methods |
+======================================+==================================+===================================================+
| ntp-server-address/server-address | NTP server address list or LOCL |:func:`set_ntp_server_address_server_address` |
| | |:func:`peek_ntp_server_address_server_address` |
+--------------------------------------+----------------------------------+---------------------------------------------------+
| active-server | Active server address or LOCL |:func:`peek_active_server` |
+--------------------------------------+----------------------------------+---------------------------------------------------+
*Object methods*
.. staticmethod:: get(session)
Return a :class:`clock_server` object filled with TS
Clock Server attributes.
Each object can be printed using :func:`pyfos_util.response_print`
and individual attributes accessed through peek methods.
:param session: session handler returned by
:func:`pyfos_auth.login`
:rtype: :class:`clock_server` object. Dictionary in case of error.
.. method:: patch()
Replace existing configuration. Fields involved are set within
the object using attribute's set method. This command is used to
replace the existing NTP clock server configuration.
Example usage of the method to configure ntp clock server:
.. code-block:: python
ts_obj = pyfos_brocade_time.clock_server()
tz_obj.set_ntp_server_address_server-address("172.16.58.3","172.16.31.10")
tz_obj.patch(session)
:param session: session handler returned by
:func:`pyfos_auth.login`
:rtype: dictionary in case of error or success response
*Attribute methods*
.. method:: peek_ntp_server_address_server_address()
Reads list or single ntp clock server from the object.
:rtype: dictionary in case of error or success response
.. method:: set_ntp_server_address_server_address(name)
Sets ntp clock server in the object
:param name: list of server ip's
:rtype: dictionary in case of error or success response
.. method:: peek_active_server()
Reads active ntp clock server from the object.
:rtype: dictionary in case of error or success response
"""
def __init__(self, dictvalues={}):
super().__init__(pyfos_rest_util.rest_obj_type.clock_server,
"/rest/running/brocade-time/clock-server",
version.VER_RANGE_821_and_ABOVE)
self.add(pyfos_rest_util.rest_attribute(
"ntp-server-address", pyfos_type.type_na,
dict(), pyfos_rest_util.REST_ATTRIBUTE_CONTAINER))
self.add(pyfos_rest_util.rest_attribute(
"server-address", pyfos_type.type_ip_addr,
None, pyfos_rest_util.REST_ATTRIBUTE_LEAF_LIST),
["ntp-server-address"])
self.add(pyfos_rest_util.rest_attribute(
"active-server", pyfos_type.type_ip_addr,
None, pyfos_rest_util.REST_ATTRIBUTE_NOT_CONFIG))
self.load(dictvalues, 1)
| 1.9375 | 2 |
mosaic.py | karttur/geoimagine02-ancillary | 0 | 12773415 | <reponame>karttur/geoimagine02-ancillary
'''
Created on 11 Mar 2021
@author: thomasgumbricht
'''
from os import path, makedirs, walk
from sys import exit
from geoimagine.ktgdal import MakeMosaic
class MosaicAncillary():
''' class for downloading ancillary data'''
def __init__(self, pp):
'''
'''
self.process = pp.process
self.verbose = self.process.verbose
dstExists= self._SetDstFPN(pp)
if dstExists:
return
if self.process.parameters.mosaiccode.lower() == 'subdirfiles':
self._MosaicSubDirFiles()
else:
exitstr = 'EXITING - unrecognized mosaiccode in ancillary.mosaic.mosaciancillary'
exit(exitstr)
def _SetDstFPN(self,pp):
'''
'''
for locus in pp.dstLayerD:
for datum in pp.dstLayerD[locus]:
for comp in pp.dstLayerD[locus][datum]:
self.dstFPN = pp.dstLayerD[locus][datum][comp].FPN
return pp.dstLayerD[locus][datum][comp]._Exists()
def _MosaicSubDirFiles(self):
'''
'''
srcRootFP = path.join('/volumes',self.process.srcpath.volume,self.process.parameters.datadir)
tileL = []
for thepath, subdirs, files in walk(srcRootFP):
for f in files:
if f.endswith(self.process.srcpath.hdr):
tileL.append(path.join(thepath, f))
if len(tileL) == 0:
exitstr = 'No tiles found for mosaicking in msoaciAncillary'
exit (exitstr)
vrtFPN = MakeMosaic(tileL, self.dstFPN)
infostr = ' Ancillary Mosaic created:\n %s' %(vrtFPN)
print (infostr)
| 2.140625 | 2 |
trackeroo-real-time/chalicelib/models.py | Wahuh/wurkout-api | 3 | 12773416 | <gh_stars>1-10
import boto3
from .db import connection
import uuid
import json
from decimal import Decimal
_users_table = connection.Table("users")
_runs_table = connection.Table("runs")
_followers_table = connection.Table("followers")
_subscriptions_table = connection.Table("subscriptions")
_connections_table = connection.Table("connections")
class User:
@staticmethod
def add_one(username):
new_user_item = {
"username": username,
"cumulative_distance": 0,
"followers": [],
"subscriptions": [],
}
try:
_users_table.put_item(Item=new_user_item)
return new_user_item
except Exception as e:
raise e
@staticmethod
def add_follower(username, follower):
try:
patch_user_response = _users_table.update_item(
TableName="users",
Key={"username": username},
UpdateExpression="SET followers = list_append(followers, :followers)",
ExpressionAttributeValues={":followers": [follower]},
ReturnValues="ALL_NEW",
)
return patch_user_response
except Exception as e:
raise e
@staticmethod
def add_subscription(username, subscription):
try:
patch_user_response = _users_table.update_item(
TableName="users",
Key={"username": username},
UpdateExpression="SET subscriptions = list_append(subscriptions, :subscriptions)",
ExpressionAttributeValues={":subscriptions": [subscription]},
ReturnValues="ALL_NEW",
)
return patch_user_response
except Exception as e:
raise e
@staticmethod
def scan_users(start_username=None):
try:
scan_response = None
if start_username:
scan_response = _users_table.scan(
TableName="users",
Limit=10,
ExclusiveStartKey={"username": start_username},
)
else:
scan_response = _users_table.scan(TableName="users", Limit=10)
users = scan_response["Items"]
last_username = None
if "LastEvaluatedKey" in scan_response:
if "username" in scan_response["LastEvaluatedKey"]:
last_username = scan_response["LastEvaluatedKey"][
"username"
]
new_response = {"users": users, "last_username": last_username}
return new_response
except Exception as e:
raise e
@staticmethod
def get_user(username):
try:
get_response = _users_table.get_item(Key={"username": username})
return get_response["Item"]
except Exception as e:
raise e
class Run:
@staticmethod
def add_one(username, start_time):
new_run_id = str(uuid.uuid4())
new_run_item = {
"run_id": new_run_id,
"username": username,
"start_time": start_time,
}
try:
_runs_table.put_item(Item=new_run_item)
return new_run_item
except Exception as e:
raise e
@staticmethod
def update_one(
run_id, username, finish_time, average_speed, total_distance
):
try:
patch_run_response = _runs_table.update_item(
TableName="runs",
Key={"username": username, "run_id": run_id},
UpdateExpression="SET finish_time=:finish, average_speed=:average, total_distance=:distance",
ExpressionAttributeValues={
":finish": {"S": finish_time},
":average": {"N": average_speed},
":distance": {"N": total_distance},
},
ReturnValues="ALL_NEW",
)
return patch_run_response
except Exception as e:
raise e
@staticmethod
def get_runs_by_subscriptions(subscriptions):
try:
scan_response = _runs_table.scan(
IndexName="username-start_time-index",
ScanFilter={
"username": {
"AttributeValueList": subscriptions,
"ComparisonOperator": "IN",
}
},
)
runs = scan_response["Items"]
sorted_runs = sorted(runs, key=lambda run: run["start_time"])
return sorted_runs
except Exception as e:
raise e
class Followers:
@staticmethod
def get_one(username):
try:
response = _followers_table.get_item(Key={"username": username})
return response["Item"]
except Exception as e:
raise e
@staticmethod
def add_one(username, follower):
new_follower_item = {"username": username, "followers": [follower]}
try:
_followers_table.put_item(Item=new_follower_item)
return new_follower_item
except Exception as e:
raise e
@staticmethod
def update_one(username, follower):
try:
patch_follower_response = _followers_table.update_item(
TableName="followers",
Key={"username": username},
UpdateExpression="SET followers = list_append(followers, :followers)",
ExpressionAttributeValues={
":followers": {"L": [{"S": follower}]}
},
ReturnValues="ALL_NEW",
)
return patch_follower_response
except Exception as e:
raise e
class Subscriptions:
@staticmethod
def add_one(username, subscription):
new_subscription_item = {
"username": username,
"subscriptions": [subscription],
}
try:
_subscriptions_table.put_item(Item=new_subscription_item)
return new_subscription_item
except Exception as e:
raise e
@staticmethod
def update_one(username, subscription):
try:
patch_subscription_response = _subscriptions_table.update_item(
TableName="subscriptions",
Key={"username": {"S": username}},
UpdateExpression="SET subscriptions = list_append(subscriptions, :subscriptions)",
ExpressionAttributeValues={
":subscriptions": {"L": [{"S": subscription}]}
},
ReturnValues="ALL_NEW",
)
return patch_subscription_response
except Exception as e:
raise e
class Connection:
@staticmethod
def add_one(username):
try:
new_connection_item = {"username": username}
put_connection_response = _connections_table.put_item(
Item=new_connection_item
)
return put_connection_response
except Exception as e:
raise e
@staticmethod
def get_connection_id(username):
try:
get_response = _connections_table.get_item(
Key={"username": username}
)
print(get_response)
print(
get_response,
get_response["Item"],
get_response["Item"].get("connection_id"),
)
item = get_response.get("Item")
if item:
return get_response["Item"].get("connection_id")
else:
return None
except Exception as e:
raise e
@staticmethod
def add_connection_id(username, connection_id):
try:
updated_connection_response = _connections_table.update_item(
TableName="connections",
Key={"username": username},
UpdateExpression="SET connection_id=:connection",
ExpressionAttributeValues={":connection": connection_id},
ReturnValues="ALL_NEW",
)
return updated_connection_response
except Exception as e:
raise e
@staticmethod
def remove_connection_id(username):
try:
updated_connection_response = _connections_table.update_item(
TableName="connections",
Key={"username": username},
UpdateExpression="REMOVE connection_id",
ReturnValues="ALL_NEW",
)
return updated_connection_response
except Exception as e:
raise e
| 2.21875 | 2 |
src/states/gameoverstate.py | Rishikesh-kumar-7258/Block_breaker | 20 | 12773417 | <reponame>Rishikesh-kumar-7258/Block_breaker
import pygame
from pygame.color import THECOLORS
from src.states.basestate import Base
from src.utilfuntions import Write
class GameOver(Base):
""" This state is active when game is over. """
def __init__(self) -> None:
super().__init__()
#options
self.options = ['Restart', 'Quit']
# currently chosen option
self.option = 1
def render(self) -> None:
Write(self.screen, "Game Over", self.screen_width / 2, self.screen_height/2, 72, THECOLORS['darkgoldenrod'], True)
Write(self.screen, f"Score : {self.score}", self.screen_width / 2, self.screen_height/2 + 50, 32, THECOLORS['darkgoldenrod'], True)
# Rendering the options
for i in range(len(self.options)):
color = THECOLORS['white']
if i == self.option - 1:
color = THECOLORS['skyblue']
Write(self.screen, self.options[i], self.screen_width / 2, self.screen_height/2 + 150 + i*30, 32, color, True)
def update(self, param):
# event handling in game over state
for event in param:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
self.option = self.option -1 if self.option > 1 else len(self.options)
if event.key == pygame.K_DOWN:
self.option = (self.option + 1 ) if self.option < len(self.options) else 1
if event.key == pygame.K_RETURN:
state = None
if self.option == 1:
self.gstatemachine.change("sliders", screen=self.screen, gstatemachine=self.gstatemachine)
elif self.option == 2:
pygame.quit()
quit()
self.render()
def enter(self, **param):
self.screen = param['screen']
self.screen_width = self.screen.get_width()
self.screen_height = self.screen.get_height()
self.gstatemachine = param['gstatemachine']
self.score = param['score'] | 3.125 | 3 |
python/server.py | talp101/upload-files-comparison | 6 | 12773418 | <reponame>talp101/upload-files-comparison
import os
import json
from uuid import uuid4
from flask import Flask, request, redirect, url_for, jsonify
from werkzeug import secure_filename
UPLOAD_FOLDER = './uploads'
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route('/upload', methods=['POST'])
def upload_file():
if request.method == 'POST':
uploaded_file = request.files['files']
if uploaded_file:
file_name = str(uuid4())
uploaded_file.save(os.path.join(app.config['UPLOAD_FOLDER'], file_name))
return jsonify({'file_name':file_name })
if __name__ == "__main__":
app.run()
| 2.796875 | 3 |
lib/doekbase/data_api/tests/test_genome_annotation_api.py | scanon/data_api2 | 0 | 12773419 | <reponame>scanon/data_api2
"""
Unit tests for genome_annotation
"""
import logging
from unittest import skipUnless
from . import shared
from doekbase.data_api.annotation.genome_annotation.api import GenomeAnnotationAPI
from doekbase.data_api.annotation.genome_annotation.api import _KBaseGenomes_Genome
from doekbase.data_api.annotation.genome_annotation.api import _GenomeAnnotation
from doekbase.data_api.annotation.genome_annotation.api import GenomeAnnotationClientAPI
from doekbase.data_api.sequence.assembly.api import AssemblyAPI
from doekbase.data_api.taxonomy.taxon.api import TaxonAPI
_log = logging.getLogger(__name__)
genome_new = "ReferenceGenomeAnnotations/kb|g.166819"
genome_old = "OriginalReferenceGenomes/kb|g.166819"
t_new = None
t_new_e = None
t_old = None
t_old_e = None
t_client_new = None
t_client_old = None
def setup():
shared.setup()
global t_new, t_new_e, t_old, t_old_e, t_client_new, t_client_old
t_new = GenomeAnnotationAPI(shared.services, shared.token, genome_new)
t_new_e = _GenomeAnnotation(shared.services, shared.token, genome_new)
t_old = GenomeAnnotationAPI(shared.services, shared.token, genome_old)
t_old_e = _KBaseGenomes_Genome(shared.services, shared.token, genome_old)
t_client_new = GenomeAnnotationClientAPI(shared.services["genome_annotation_service_url"], shared.token, genome_new)
t_client_old = GenomeAnnotationClientAPI(shared.services["genome_annotation_service_url"], shared.token, genome_old)
######## New Genome type tests
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_taxon_new():
_log.debug("Input {}".format(genome_new))
for t_o in [t_new, t_new_e]:
taxon_t_o = t_o.get_taxon()
assert isinstance(taxon_t_o, TaxonAPI)
_log.debug("Output {}".format(taxon_t_o))
taxon_c_new = t_client_new.get_taxon()
assert taxon_c_new is not None
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_assembly_new():
_log.debug("Input {}".format(genome_new))
for t_o in [t_new, t_new_e]:
assembly_t_o = t_o.get_assembly()
assert isinstance(assembly_t_o, AssemblyAPI)
_log.debug("Output {}".format(assembly_t_o))
assembly_c_new = t_client_new.get_assembly()
assert assembly_c_new is not None
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_types_new():
_log.debug("Input {}".format(genome_new))
for t_o in [t_new, t_new_e, t_client_new]:
feature_types_t_o = t_o.get_feature_types()
assert isinstance(feature_types_t_o, list)
_log.debug("Output {}".format(len(feature_types_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_type_descriptions_new():
_log.debug("Input {}".format(genome_new))
for t_o in [t_new, t_new_e, t_client_new]:
feature_type_descriptions_t_o = t_o.get_feature_type_descriptions()
assert isinstance(feature_type_descriptions_t_o, dict)
_log.debug("Output {}".format(len(feature_type_descriptions_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_ids_new():
_log.debug("Input {}".format(genome_new))
for t_o in [t_new, t_new_e, t_client_new]:
feature_ids_t_o = t_o.get_feature_ids()
assert isinstance(feature_ids_t_o, dict)
_log.debug("Output {}".format(len(feature_ids_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_type_counts_new():
_log.debug("Input {}".format(genome_new))
for t_o in [t_new, t_new_e, t_client_new]:
feature_type_counts_t_o = t_o.get_feature_type_counts()
assert isinstance(feature_type_counts_t_o, dict)
_log.debug("Output {}".format(len(feature_type_counts_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_locations_new():
_log.debug("Input {}".format(genome_new))
for t_o in [t_new, t_new_e, t_client_new]:
feature_locations_t_o = t_o.get_feature_locations()
assert isinstance(feature_locations_t_o, dict)
_log.debug("Output {}".format(len(feature_locations_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_dna_new():
_log.debug("Input {}".format(genome_new))
for t_o in [t_new, t_new_e, t_client_new]:
feature_dna_t_o = t_o.get_feature_dna()
assert isinstance(feature_dna_t_o, dict)
_log.debug("Output {}".format(len(feature_dna_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_functions_new():
_log.debug("Input {}".format(genome_new))
for t_o in [t_new, t_new_e, t_client_new]:
feature_functions_t_o = t_o.get_feature_functions()
assert isinstance(feature_functions_t_o, dict)
_log.debug("Output {}".format(len(feature_functions_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_aliases_new():
_log.debug("Input {}".format(genome_new))
for t_o in [t_new, t_new_e, t_client_new]:
feature_aliases_t_o = t_o.get_feature_aliases()
assert isinstance(feature_aliases_t_o, dict)
_log.debug("Output {}".format(len(feature_aliases_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_publications_new():
_log.debug("Input {}".format(genome_new))
for t_o in [t_new, t_new_e, t_client_new]:
feature_publications_t_o = t_o.get_feature_publications()
assert isinstance(feature_publications_t_o, dict)
_log.debug("Output {}".format(len(feature_publications_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_features_new():
_log.debug("Input {}".format(genome_new))
for t_o in [t_new, t_new_e, t_client_new]:
features_t_o = t_o.get_features()
assert isinstance(features_t_o, dict)
_log.debug("Output {}".format(len(features_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_proteins_new():
_log.debug("Input {}".format(genome_new))
for t_o in [t_new, t_new_e, t_client_new]:
proteins_t_o = t_o.get_proteins()
assert isinstance(proteins_t_o, dict)
_log.debug("Output {}".format(len(proteins_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_cds_by_mrna_valid_new():
inputs = ["kb|g.166819.mRNA.0", "kb|g.166819.mRNA.238"]
_log.debug("Input {} {}".format(genome_new, inputs))
for t_o in [t_new, t_new_e, t_client_new]:
cds_t_o = t_o.get_cds_by_mrna(inputs)
assert len(cds_t_o) == 2
_log.debug("Output {}".format(cds_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_cds_by_mrna_invalid_new():
inputs = ["kb|g.166819.mRNA.99999999999", "kb|g.166819.CDS.1"]
_log.debug("Input {} {}".format(genome_new, inputs))
for t_o in [t_new, t_new_e, t_client_new]:
cds_t_o = t_o.get_cds_by_mrna(inputs)
assert len(cds_t_o) == 0
_log.debug("Output {}".format(cds_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_mrna_by_cds_valid_new():
inputs = ["kb|g.166819.CDS.0", "kb|g.166819.CDS.278"]
_log.debug("Input {} {}".format(genome_new, inputs))
for t_o in [t_new, t_new_e, t_client_new]:
mrna_t_o = t_o.get_mrna_by_cds(inputs)
assert len(mrna_t_o) == 2
_log.debug("Output {}".format(mrna_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_mrna_by_cds_invalid_new():
inputs = ["kb|g.166819.mRNA.1", "kb|g.166819.CDS.9999999999"]
_log.debug("Input {} {}".format(genome_new, inputs))
for t_o in [t_new, t_new_e, t_client_new]:
mrna_t_o = t_o.get_mrna_by_cds(inputs)
assert len(mrna_t_o) == 0
_log.debug("Output {}".format(mrna_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_gene_by_mrna_valid_new():
inputs = ["kb|g.166819.mRNA.0", "kb|g.166819.mRNA.238"]
_log.debug("Input {} {}".format(genome_new, inputs))
for t_o in [t_new, t_new_e, t_client_new]:
genes_t_o = t_o.get_gene_by_mrna(inputs)
assert len(genes_t_o) == 2
_log.debug("Output {}".format(genes_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_gene_by_mrna_invalid_new():
inputs = ["kb|g.166819.mRNA.99999999999", "kb|g.166819.CDS.1"]
_log.debug("Input {} {}".format(genome_new, inputs))
for t_o in [t_new, t_new_e, t_client_new]:
genes_t_o = t_o.get_gene_by_mrna(inputs)
assert len(genes_t_o) == 0
_log.debug("Output {}".format(genes_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_cds_by_gene_valid_new():
inputs = ["kb|g.166819.locus.256", "kb|g.166819.locus.112"]
_log.debug("Input {} {}".format(genome_new, inputs))
for t_o in [t_new, t_new_e, t_client_new]:
cds_t_o = t_o.get_cds_by_gene(inputs)
assert len(cds_t_o) == 2
_log.debug("Output {}".format(cds_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_cds_by_gene_invalid_new():
inputs = ["kb|g.166819.mRNA.1", "kb|g.166819.locus.999999"]
_log.debug("Input {} {}".format(genome_new, inputs))
for t_o in [t_new, t_new_e, t_client_new]:
cds_t_o = t_o.get_cds_by_gene(inputs)
assert len(cds_t_o) == 0
_log.debug("Output {}".format(cds_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_mrna_by_gene_valid_new():
inputs = ["kb|g.166819.locus.256", "kb|g.166819.locus.112"]
_log.debug("Input {} {}".format(genome_new, inputs))
for t_o in [t_new, t_new_e, t_client_new]:
mrna_t_o = t_o.get_mrna_by_gene(inputs)
assert len(mrna_t_o) == 2
_log.debug("Output {}".format(mrna_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_mrna_by_gene_invalid_new():
inputs = ["kb|g.166819.mRNA.1", "kb|g.166819.locus.999999"]
_log.debug("Input {} {}".format(genome_new, inputs))
for t_o in [t_new, t_new_e, t_client_new]:
mrna_t_o = t_o.get_mrna_by_gene(inputs)
assert len(mrna_t_o) == 0
_log.debug("Output {}".format(mrna_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_gene_by_cds_valid_new():
inputs = ["kb|g.166819.CDS.0", "kb|g.166819.CDS.278"]
_log.debug("Input {} {}".format(genome_new, inputs))
for t_o in [t_new, t_new_e, t_client_new]:
genes_t_o = t_o.get_gene_by_cds(inputs)
assert len(genes_t_o) == 2
_log.debug("Output {}".format(genes_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_gene_by_cds_invalid_new():
inputs = ["kb|g.166819.mRNA.1", "kb|g.166819.CDS.999999"]
_log.debug("Input {} {}".format(genome_new, inputs))
for t_o in [t_new, t_new_e, t_client_new]:
genes_t_o = t_o.get_gene_by_cds(inputs)
assert len(genes_t_o) == 0
_log.debug("Output {}".format(genes_t_o))
######## Old Genome Annotation Type tests
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_taxon_old():
_log.debug("Input {}".format(genome_old))
for t_o in [t_old, t_old_e]:
taxon_t_o = t_o.get_taxon()
assert isinstance(taxon_t_o, TaxonAPI)
_log.debug("Output {}".format(taxon_t_o))
taxon_c_old = t_client_old.get_taxon()
assert taxon_c_old is not None
_log.debug("Output {}".format(taxon_c_old))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_assembly_old():
_log.debug("Input {}".format(genome_old))
for t_o in [t_old, t_old_e]:
assembly_t_o = t_o.get_assembly()
assert isinstance(assembly_t_o, AssemblyAPI)
_log.debug("Output {}".format(assembly_t_o))
assembly_c_old = t_client_old.get_assembly()
assert assembly_c_old is not None
_log.debug("Output {}".format(assembly_c_old))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_types_old():
_log.debug("Input {}".format(genome_old))
for t_o in [t_old, t_old_e, t_client_old]:
feature_types_t_o = t_o.get_feature_types()
assert isinstance(feature_types_t_o, list)
_log.debug("Output {}".format(feature_types_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_type_descriptions_old():
_log.debug("Input {}".format(genome_old))
for t_o in [t_old, t_old_e, t_client_old]:
feature_type_descriptions_t_o = t_o.get_feature_type_descriptions()
assert isinstance(feature_type_descriptions_t_o, dict)
_log.debug("Output {}".format(feature_type_descriptions_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_ids_old():
_log.debug("Input {}".format(genome_old))
for t_o in [t_old, t_old_e, t_client_old]:
feature_ids_t_o = t_o.get_feature_ids()
assert isinstance(feature_ids_t_o, dict)
_log.debug("Output {}".format(type(feature_ids_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_type_counts_old():
_log.debug("Input {}".format(genome_old))
for t_o in [t_old, t_old_e, t_client_old]:
feature_type_counts_t_o = t_o.get_feature_type_counts()
assert isinstance(feature_type_counts_t_o, dict)
_log.debug("Output {}".format(feature_type_counts_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_locations_old():
_log.debug("Input {}".format(genome_old))
for t_o in [t_old, t_old_e, t_client_old]:
feature_locations_t_o = t_o.get_feature_locations()
assert isinstance(feature_locations_t_o, dict)
_log.debug("Output {}".format(len(feature_locations_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_dna_old():
_log.debug("Input {}".format(genome_old))
for t_o in [t_old, t_old_e, t_client_old]:
feature_dna_t_o = t_o.get_feature_dna()
assert isinstance(feature_dna_t_o, dict)
_log.debug("Output {}".format(len(feature_dna_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_functions_old():
_log.debug("Input {}".format(genome_old))
for t_o in [t_new_e, t_old, t_old_e, t_client_new, t_client_old]:
feature_functions_t_o = t_o.get_feature_functions()
assert isinstance(feature_functions_t_o, dict)
_log.debug("Output {}".format(len(feature_functions_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_aliases_old():
_log.debug("Input {}".format(genome_old))
for t_o in [t_old, t_old_e, t_client_old]:
feature_aliases_t_o = t_o.get_feature_aliases()
assert isinstance(feature_aliases_t_o, dict)
_log.debug("Output {}".format(len(feature_aliases_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_publications_old():
_log.debug("Input {}".format(genome_old))
for t_o in [t_old, t_old_e, t_client_old]:
feature_publications_t_o = t_o.get_feature_publications()
assert isinstance(feature_publications_t_o, dict)
_log.debug("Output {}".format(len(feature_publications_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_features_old():
_log.debug("Input {}".format(genome_old))
for t_o in [t_old, t_old_e, t_client_old]:
features_t_o = t_o.get_features()
assert isinstance(features_t_o, dict)
_log.debug("Output {}".format(len(features_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_proteins_old():
_log.debug("Input {}".format(genome_old))
for t_o in [t_old, t_old_e, t_client_old]:
proteins_t_o = t_o.get_proteins()
assert isinstance(proteins_t_o, dict)
_log.debug("Output {}".format(len(proteins_t_o)))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_cds_by_mrna_valid_old():
inputs = ["kb|g.166819.mRNA.0", "kb|g.166819.mRNA.238"]
_log.debug("Input {} {}".format(genome_old, inputs))
for t_o in [t_old, t_old_e, t_client_old]:
cds_t_o = t_o.get_cds_by_mrna(inputs)
assert len(cds_t_o) == 0
_log.debug("Output {}".format(cds_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_cds_by_mrna_invalid_old():
inputs = ["kb|g.166819.mRNA.99999999999", "kb|g.166819.CDS.1"]
_log.debug("Input {} {}".format(genome_old, inputs))
for t_o in [t_old, t_old_e, t_client_old]:
cds_t_o = t_o.get_cds_by_mrna(inputs)
assert len(cds_t_o) == 0
_log.debug("Output {}".format(cds_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_mrna_by_cds_valid_old():
inputs = ["kb|g.166819.CDS.0", "kb|g.166819.CDS.278"]
_log.debug("Input {} {}".format(genome_old, inputs))
for t_o in [t_old, t_old_e, t_client_old]:
mrna_t_o = t_o.get_mrna_by_cds(inputs)
assert len(mrna_t_o) == 0
_log.debug("Output {}".format(mrna_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_mrna_by_cds_invalid_old():
inputs = ["kb|g.166819.mRNA.1", "kb|g.166819.CDS.9999999999"]
_log.debug("Input {} {}".format(genome_old, inputs))
for t_o in [t_old, t_old_e, t_client_old]:
mrna_t_o = t_o.get_mrna_by_cds(inputs)
assert len(mrna_t_o) == 0
_log.debug("Output {}".format(mrna_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_gene_by_mrna_valid_old():
inputs = ["kb|g.166819.mRNA.0", "kb|g.166819.mRNA.238"]
_log.debug("Input {} {}".format(genome_old, inputs))
for t_o in [t_old, t_old_e, t_client_old]:
genes_t_o = t_o.get_gene_by_mrna(inputs)
assert len(genes_t_o) == 0
_log.debug("Output {}".format(genes_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_gene_by_mrna_invalid_old():
inputs = ["kb|g.166819.mRNA.99999999999", "kb|g.166819.CDS.1"]
_log.debug("Input {} {}".format(genome_old, inputs))
for t_o in [t_old, t_old_e, t_client_old]:
genes_t_o = t_o.get_gene_by_mrna(inputs)
assert len(genes_t_o) == 0
_log.debug("Output {}".format(genes_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_cds_by_gene_valid_old():
inputs = ["kb|g.166819.locus.256", "kb|g.166819.locus.112"]
_log.debug("Input {} {}".format(genome_old, inputs))
for t_o in [t_old, t_old_e, t_client_old]:
cds_t_o = t_o.get_cds_by_gene(inputs)
assert len(cds_t_o) == 0
_log.debug("Output {}".format(cds_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_cds_by_gene_invalid_old():
inputs = ["kb|g.166819.mRNA.1", "kb|g.166819.locus.999999"]
_log.debug("Input {} {}".format(genome_old, inputs))
for t_o in [t_old, t_old_e, t_client_old]:
cds_t_o = t_o.get_cds_by_gene(inputs)
assert len(cds_t_o) == 0
_log.debug("Output {}".format(cds_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_mrna_by_gene_valid_old():
inputs = ["kb|g.166819.locus.256", "kb|g.166819.locus.112"]
_log.debug("Input {} {}".format(genome_old, inputs))
for t_o in [t_old, t_old_e, t_client_old]:
mrna_t_o = t_o.get_mrna_by_gene(inputs)
assert len(mrna_t_o) == 0
_log.debug("Output {}".format(mrna_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_mrna_by_gene_invalid_old():
inputs = ["kb|g.166819.mRNA.1", "kb|g.166819.locus.999999"]
_log.debug("Input {} {}".format(genome_old, inputs))
for t_o in [t_old, t_old_e, t_client_old]:
mrna_t_o = t_o.get_mrna_by_gene(inputs)
assert len(mrna_t_o) == 0
_log.debug("Output {}".format(mrna_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_gene_by_cds_valid_old():
inputs = ["kb|g.166819.CDS.0", "kb|g.166819.CDS.278"]
_log.debug("Input {} {}".format(genome_old, inputs))
for t_o in [t_old, t_old_e, t_client_old]:
genes_t_o = t_o.get_gene_by_cds(inputs)
assert len(genes_t_o) == 0
_log.debug("Output {}".format(genes_t_o))
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_gene_by_cds_invalid_old():
inputs = ["kb|g.166819.mRNA.1", "kb|g.166819.CDS.999999"]
_log.debug("Input {} {}".format(genome_old, inputs))
for t_o in [t_old, t_old_e, t_client_old]:
genes_t_o = t_o.get_gene_by_cds(inputs)
assert len(genes_t_o) == 0
_log.debug("Output {}".format(genes_t_o))
| 1.6875 | 2 |
deeplearning/ml4pl/models/batch.py | Zacharias030/ProGraML | 0 | 12773420 | # Copyright 2019 the ProGraML authors.
#
# Contact <NAME> <<EMAIL>>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains TODO: one line summary.
TODO: Detailed explanation of the file.
"""
from typing import Any
from typing import Iterable
from typing import List
from typing import NamedTuple
from typing import Optional
import numpy as np
import sklearn.metrics
from labm8.py import app
FLAGS = app.FLAGS
app.DEFINE_string(
"batch_scores_averaging_method",
"weighted",
"Selects the averaging method to use when computing recall/precision/F1 "
"scores. See <https://scikit-learn.org/stable/modules/generated/sklearn"
".metrics.f1_score.html>",
)
class Data(NamedTuple):
"""The model data for a batch."""
graph_ids: List[int]
data: Any
# A flag used to mark that this batch is the end of an iterable sequences of
# batches.
end_of_batches: bool = False
@property
def graph_count(self) -> int:
return len(self.graph_ids)
def EmptyBatch() -> Data:
"""Construct an empty batch."""
return Data(graph_ids=[], data=None)
def EndOfBatches() -> Data:
"""Construct a 'end of batches' marker."""
return Data(graph_ids=[], data=None, end_of_batches=True)
class BatchIterator(NamedTuple):
"""A batch iterator"""
batches: Iterable[Data]
# The total number of graphs in all of the batches.
graph_count: int
class Results(NamedTuple):
"""The results of running a batch through a model.
Don't instantiate this tuple directly, use Results.Create().
"""
targets: np.array
predictions: np.array
# The number of model iterations to compute the final results. This is used
# by iterative models such as message passing networks.
iteration_count: int
# For iterative models, this indicates whether the state of the model at
# iteration_count had converged on a solution.
model_converged: bool
# The learning rate and loss of models, if applicable.
learning_rate: Optional[float]
loss: Optional[float]
# Batch-level average performance metrics.
accuracy: float
precision: float
recall: float
f1: float
@property
def has_learning_rate(self) -> bool:
return self.learning_rate is not None
@property
def has_loss(self) -> bool:
return self.loss is not None
@property
def target_count(self) -> int:
"""Get the number of targets in the batch.
For graph-level classifiers, this will be equal to Data.graph_count, else
it's equal to the batch node count.
"""
return self.targets.shape[1]
def __repr__(self) -> str:
return (
f"accuracy={self.accuracy:.2%}%, "
f"precision={self.precision:.3f}, "
f"recall={self.recall:.3f}, "
f"f1={self.f1:.3f}"
)
def __eq__(self, rhs: "Results"):
"""Compare batch results."""
return self.accuracy == rhs.accuracy
def __gt__(self, rhs: "Results"):
"""Compare batch results."""
return self.accuracy > rhs.accuracy
@classmethod
def Create(
cls,
targets: np.array,
predictions: np.array,
iteration_count: int = 1,
model_converged: bool = True,
learning_rate: Optional[float] = None,
loss: Optional[float] = None,
):
"""Construct a results instance from 1-hot targets and predictions.
This is the preferred means of construct a Results instance, which takes
care of evaluating all of the metrics for you. The behavior of metrics
calculation is dependent on the --batch_scores_averaging_method flag.
Args:
targets: An array of 1-hot target vectors with
shape (y_count, y_dimensionality), dtype int32.
predictions: An array of 1-hot prediction vectors with
shape (y_count, y_dimensionality), dtype int32.
iteration_count: For iterative models, the number of model iterations to
compute the final result.
model_converged: For iterative models, whether model converged.
learning_rate: The model learning rate, if applicable.
loss: The model loss, if applicable.
Returns:
A Results instance.
"""
if targets.shape != predictions.shape:
raise TypeError(
f"Expected model to produce targets with shape {targets.shape} but "
f"instead received predictions with shape {predictions.shape}"
)
y_dimensionality = targets.shape[1]
if y_dimensionality < 2:
raise TypeError(
f"Expected label dimensionality > 1, received {y_dimensionality}"
)
# Create dense arrays of shape (target_count).
true_y = np.argmax(targets, axis=1)
pred_y = np.argmax(predictions, axis=1)
# NOTE(github.com/ChrisCummins/ProGraML/issues/22): This assumes that
# labels use the values [0,...n).
labels = np.arange(y_dimensionality, dtype=np.int64)
return cls(
targets=targets,
predictions=predictions,
iteration_count=iteration_count,
model_converged=model_converged,
learning_rate=learning_rate,
loss=loss,
accuracy=sklearn.metrics.accuracy_score(true_y, pred_y),
precision=sklearn.metrics.precision_score(
true_y,
pred_y,
labels=labels,
average=FLAGS.batch_scores_averaging_method,
),
recall=sklearn.metrics.recall_score(
true_y,
pred_y,
labels=labels,
average=FLAGS.batch_scores_averaging_method,
),
f1=sklearn.metrics.f1_score(
true_y,
pred_y,
labels=labels,
average=FLAGS.batch_scores_averaging_method,
),
)
class RollingResults:
"""Maintain weighted rolling averages across batches."""
def __init__(self):
self.weight_sum = 0
self.batch_count = 0
self.graph_count = 0
self.target_count = 0
self.weighted_iteration_count_sum = 0
self.weighted_model_converged_sum = 0
self.has_learning_rate = False
self.weighted_learning_rate_sum = 0
self.has_loss = False
self.weighted_loss_sum = 0
self.weighted_accuracy_sum = 0
self.weighted_precision_sum = 0
self.weighted_recall_sum = 0
self.weighted_f1_sum = 0
def Update(
self, data: Data, results: Results, weight: Optional[float] = None
) -> None:
"""Update the rolling results with a new batch.
Args:
data: The batch data used to produce the results.
results: The batch results to update the current state with.
weight: A weight to assign to weighted sums. E.g. to weight results
across all targets, use weight=results.target_count. To weight across
targets, use weight=batch.target_count. To weight across
graphs, use weight=batch.graph_count. By default, weight by target
count.
"""
if weight is None:
weight = results.target_count
self.weight_sum += weight
self.batch_count += 1
self.graph_count += data.graph_count
self.target_count += results.target_count
self.weighted_iteration_count_sum += results.iteration_count * weight
self.weighted_model_converged_sum += (
weight if results.model_converged else 0
)
if results.has_learning_rate:
self.has_learning_rate = True
self.weighted_learning_rate_sum += results.learning_rate * weight
if results.has_loss:
self.has_loss = True
self.weighted_loss_sum += results.loss * weight
self.weighted_accuracy_sum += results.accuracy * weight
self.weighted_precision_sum += results.precision * weight
self.weighted_recall_sum += results.recall * weight
self.weighted_f1_sum += results.f1 * weight
@property
def iteration_count(self) -> float:
return self.weighted_iteration_count_sum / max(self.weight_sum, 1)
@property
def model_converged(self) -> float:
return self.weighted_model_converged_sum / max(self.weight_sum, 1)
@property
def learning_rate(self) -> Optional[float]:
if self.has_learning_rate:
return self.weighted_learning_rate_sum / max(self.weight_sum, 1)
@property
def loss(self) -> Optional[float]:
if self.has_loss:
return self.weighted_loss_sum / max(self.weight_sum, 1)
@property
def accuracy(self) -> float:
return self.weighted_accuracy_sum / max(self.weight_sum, 1)
@property
def precision(self) -> float:
return self.weighted_precision_sum / max(self.weight_sum, 1)
@property
def recall(self) -> float:
return self.weighted_recall_sum / max(self.weight_sum, 1)
@property
def f1(self) -> float:
return self.weighted_f1_sum / max(self.weight_sum, 1)
| 2.546875 | 3 |
colour/models/tests/tests_cie_ucs.py | canavandl/colour | 1 | 12773421 | <reponame>canavandl/colour
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.models.cie_ucs` module.
"""
from __future__ import division, unicode_literals
import numpy as np
import sys
if sys.version_info[:2] <= (2, 6):
import unittest2 as unittest
else:
import unittest
from colour.models import XYZ_to_UCS, UCS_to_XYZ, UCS_to_uv, UCS_uv_to_xy
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['TestXYZ_to_UCS',
'TestUCS_to_XYZ',
'TestUCS_to_uv',
'TestUCS_uv_to_xy']
class TestXYZ_to_UCS(unittest.TestCase):
"""
Defines :func:`colour.models.cie_ucs.XYZ_to_UCS` definition unit tests
methods.
"""
def test_XYZ_to_UCS(self):
"""
Tests :func:`colour.models.cie_ucs.XYZ_to_UCS` definition.
"""
np.testing.assert_almost_equal(
XYZ_to_UCS(np.array([11.80583421, 10.34, 5.15089229])),
np.array([7.87055614, 10.34, 12.18252904]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_UCS(np.array([3.08690042, 3.2, 2.68925666])),
np.array([2.05793361, 3.2, 4.60117812]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_UCS(np.array([0.96907232, 1, 1.12179215])),
np.array([0.64604821, 1., 1.57635992]),
decimal=7)
class TestUCS_to_XYZ(unittest.TestCase):
"""
Defines :func:`colour.models.cie_ucs.UCS_to_XYZ` definition unit tests
methods.
"""
def test_UCS_to_XYZ(self):
"""
Tests :func:`colour.models.cie_ucs.UCS_to_XYZ` definition.
"""
np.testing.assert_almost_equal(
UCS_to_XYZ(np.array([7.87055614, 10.34, 12.18252904])),
np.array([11.80583421, 10.34, 5.15089229]),
decimal=7)
np.testing.assert_almost_equal(
UCS_to_XYZ(np.array([2.05793361, 3.2, 4.60117812])),
np.array([3.08690042, 3.2, 2.68925666]),
decimal=7)
np.testing.assert_almost_equal(
UCS_to_XYZ(np.array([0.64604821, 1, 1.57635992])),
np.array([0.96907232, 1., 1.12179215]),
decimal=7)
class TestUCS_to_uv(unittest.TestCase):
"""
Defines :func:`colour.models.cie_ucs.UCS_to_uv` definition unit tests
methods.
"""
def test_UCS_to_uv(self):
"""
Tests :func:`colour.models.cie_ucs.UCS_to_uv` definition.
"""
np.testing.assert_almost_equal(
UCS_to_uv(np.array([7.87055614, 10.34, 12.18252904])),
(0.25895877609618834, 0.34020896328103534),
decimal=7)
np.testing.assert_almost_equal(
UCS_to_uv(np.array([2.05793361, 3.2, 4.60117812])),
(0.20873418076173886, 0.32457285074301517),
decimal=7)
np.testing.assert_almost_equal(
UCS_to_uv(np.array([0.64604821, 1, 1.57635992])),
(0.20048615319251942, 0.31032692311386395),
decimal=7)
class TestUCS_uv_to_xy(unittest.TestCase):
"""
Defines :func:`colour.models.cie_ucs.UCS_uv_to_xy` definition unit tests
methods.
"""
def test_UCS_uv_to_xy(self):
"""
Tests :func:`colour.models.cie_ucs.UCS_uv_to_xy` definition.
"""
np.testing.assert_almost_equal(
UCS_uv_to_xy((0.2033733344733139, 0.3140500001549052)),
(0.32207410281368043, 0.33156550013623537),
decimal=7)
np.testing.assert_almost_equal(
UCS_uv_to_xy((0.20873418102926322, 0.32457285063327812)),
(0.3439000000209443, 0.35650000010917804),
decimal=7)
np.testing.assert_almost_equal(
UCS_uv_to_xy((0.25585459629500179, 0.34952813701502972)),
(0.4474327628361858, 0.40749796251018744),
decimal=7)
if __name__ == '__main__':
unittest.main()
| 2.359375 | 2 |
src/sage/groups/libgap_group.py | fchapoton/sage | 1,742 | 12773422 | <gh_stars>1000+
"""
Generic LibGAP-based Group
This is useful if you need to use a GAP group implementation in Sage
that does not have a dedicated Sage interface.
If you want to implement your own group class, you should not derive
from this but directly from
:class:`~sage.groups.libgap_wrapper.ParentLibGAP`.
EXAMPLES::
sage: F.<a,b> = FreeGroup()
sage: G_gap = libgap.Group([ (a*b^2).gap() ])
sage: from sage.groups.libgap_group import GroupLibGAP
sage: G = GroupLibGAP(G_gap); G
Group([ a*b^2 ])
sage: type(G)
<class 'sage.groups.libgap_group.GroupLibGAP_with_category'>
sage: G.gens()
(a*b^2,)
"""
##############################################################################
# Copyright (C) 2013 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
##############################################################################
from sage.groups.group import Group
from sage.groups.libgap_wrapper import ParentLibGAP, ElementLibGAP
from sage.groups.libgap_mixin import GroupMixinLibGAP
class GroupLibGAP(GroupMixinLibGAP, Group, ParentLibGAP):
Element = ElementLibGAP
def __init__(self, *args, **kwds):
"""
Group interface for LibGAP-based groups.
INPUT:
Same as :class:`~sage.groups.libgap_wrapper.ParentLibGAP`.
TESTS::
sage: F.<a,b> = FreeGroup()
sage: G_gap = libgap.Group([ (a*b^2).gap() ])
sage: from sage.groups.libgap_group import GroupLibGAP
sage: G = GroupLibGAP(G_gap); G
Group([ a*b^2 ])
sage: g = G.gen(0); g
a*b^2
sage: TestSuite(G).run(skip=['_test_pickling', '_test_elements'])
sage: TestSuite(g).run(skip=['_test_pickling'])
"""
ParentLibGAP.__init__(self, *args, **kwds)
Group.__init__(self)
| 1.84375 | 2 |
instauto/helpers/models.py | javad94/instauto | 0 | 12773423 | <reponame>javad94/instauto
import dataclasses
from typing import ByteString, List, Optional, Tuple
@dataclasses.dataclass
class FriendshipStatus:
following: bool
is_private: Optional[bool]
incoming_request: Optional[bool]
outgoing_request: Optional[bool]
is_bestie: bool
is_restricted: bool
@classmethod
def parse(cls, friendship_status: dict) -> "FriendshipStatus":
return cls(
friendship_status['following'], friendship_status.get('is_private'),
friendship_status.get('incoming_request'), friendship_status['outgoing_request'],
friendship_status['is_bestie'], friendship_status['is_restricted']
)
@dataclasses.dataclass
class AccountBadge:
pass
@classmethod
def parse(cls, account_badge: dict) -> "AccountBadge":
if account_badge is not None:
raise Exception("Parsing of account badge not implemented, please open an issue" \
"on Github. Account badge dict:", account_badge)
return cls()
@dataclasses.dataclass
class User:
pk: int
username: str
full_name: str
is_private: bool
profile_pic_url: str
profile_pic_id: Optional[str]
is_verified: Optional[bool]
has_anonymous_profile_picture: Optional[bool]
account_badges: List[AccountBadge]
friendship_status: Optional[FriendshipStatus]
latest_reel_media: Optional[int]
live_broadcast_id: Optional[str]
@classmethod
def parse(cls, user: dict) -> "User":
return cls(
user['pk'], user['username'], user['full_name'], user['is_private'],
user['profile_pic_url'], user.get('profile_pic_id'), user.get('is_verified'),
user.get('has_anonymous_profile_picture'),
[AccountBadge.parse(a) for a in user.get('account_badges') or []],
FriendshipStatus.parse(user['friendship_status']) if user.get('friendship_status') else None,
user.get('latest_reel_media'), user.get('live_broadcast_id')
)
@dataclasses.dataclass
class Tag:
id: int
name: str
media_count: int
formatted_media_count: str
search_result_subtitle: str
profile_pic_url: str
use_default_avatar: bool
@classmethod
def parse(cls, tag: dict) -> "Tag":
return cls(
tag['id'], tag['name'], tag['media_count'], tag['formatted_media_count'],
tag['search_result_subtitle'], tag['profile_pic_url'], tag['use_default_avatar']
)
@dataclasses.dataclass
class ImageVersion:
key: str
width: int
height: int
url: str
scans_profile: Optional[str]
estimated_scans_sizes: List[int]
@classmethod
def parse(cls, key: str, image_version: dict) -> Optional["ImageVersion"]:
if cls is None:
return None
return cls(
key, image_version['width'], image_version['height'], image_version['url'],
image_version.get('scans_profile'), image_version.get('estimated_scans_sizes') or []
)
@dataclasses.dataclass
class VideoVersion:
type: int
width: int
height: int
url: str
id: str
@classmethod
def parse(cls, vv: dict) -> "VideoVersion":
return cls(vv['type'], vv['width'], vv['height'], vv['url'], vv['id'])
@dataclasses.dataclass
class MusicAssetInfo:
audio_cluster_id: str
id: str
title: str
subtitle: str
display_artist: str
cover_artwork_uri: str
cover_artwork_thumbnail_uri: str
progressive_download_url: str
reactive_audio_download_url: str
highlight_start_times_in_ms: List[int]
is_explicit: bool
dash_manifest: str
has_lyrics: str
audio_asset_id: str
duration_in_ms: str
dark_message: str
allows_saving: bool
@classmethod
def parse(cls, mai: dict) -> "MusicAssetInfo":
return cls(
mai['audio_cluster_id'], mai['id'], mai['title'], mai['subtitle'], mai['display_artist'],
mai['cover_artwork_uri'], mai['cover_artwork_thumbnail_uri'], mai['progressive_download_url'],
mai['reactive_audio_download_url'], mai['highlight_start_times_in_ms'],
mai['is_explicit'], mai['dash_manifest'], mai['has_lyrics'], mai['audio_asset_id'],
mai['duration_in_ms'], mai['dark_message'], mai['allows_saving']
)
@dataclasses.dataclass
class ConsumptionInfo:
is_bookmarked: bool
should_mute_audio_reason: str
is_trending_in_clips: bool
@classmethod
def parse(cls, ci) -> "ConsumptionInfo":
return cls(ci['is_bookmarked'], ci['should_mute_audio_reason'], ci['is_trending_in_clips'])
@dataclasses.dataclass
class MusicInfo:
music_asset_info: MusicAssetInfo
music_consumption_info: ConsumptionInfo
@classmethod
def parse(cls, music_info: dict) -> "MusicInfo":
return cls(MusicAssetInfo.parse(music_info['music_asset_info']),
ConsumptionInfo.parse(music_info['music_consumption_info']))
@dataclasses.dataclass
class Usertag:
user: User
position: Tuple[float, float]
start_time_in_video_in_sec: Optional[float]
duration_video_in_sec: Optional[float]
@classmethod
def parse(cls, user_tag: dict) -> "Usertag":
return cls(
User.parse(user_tag['user']), user_tag['position'], user_tag['start_time_in_video_in_sec'],
user_tag['duration_in_video_in_sec']
)
@dataclasses.dataclass
class SoundInfo:
audio_asset_id: str
progressive_download_url: str
dash_manifest: str
ig_artist: User
should_mute_audio: bool
original_media_id: str
hide_remixing: bool
duration_in_ms: int
time_created: int
original_audio_title: str
consumption_info: ConsumptionInfo
allow_creator_to_rename: bool
can_remix_be_shared_to_fb: bool
formatted_clips_media_count: int
@classmethod
def parse(cls, si: dict) -> "SoundInfo":
return cls(
si['audio_asset_id'], si['progressive_download_url'], si['dash_manifest'],
User.parse(si['ig_artist']), si['should_mute_audio'], si['original_media_id'],
si['hide_remixing'], si['duration_in_ms'], si['time_created'], si['original_audio_title'],
ConsumptionInfo.parse(si['consumption_info']), si['allow_creator_to_rename'],
si['can_remix_be_shared_to_fb'], si['formatted_clips_media_count']
)
@dataclasses.dataclass
class AudioInfo:
pass
@classmethod
def parse(cls, audio_info: dict) -> "AudioInfo":
if audio_info is not None:
raise Exception("Parsing of audio info not implemented, please open an issue" \
"on Github. Audio info dict:", audio_info)
return cls()
@dataclasses.dataclass
class MashupInfo:
mashups_allowed: bool
can_toggle_mashups_allowed: bool
has_been_mashed_up: bool
formatted_mashups_count: Optional[int]
original_media: Optional[int]
@classmethod
def parse(cls, mashup_info: dict) -> "MashupInfo":
return cls(
mashup_info['mashups_allowed'], mashup_info['can_toggle_mashups_allowed'],
mashup_info['has_been_mashed_up'], mashup_info.get('formatted_mashups_count'),
mashup_info.get('original_media')
)
@dataclasses.dataclass
class NuxInfo:
@classmethod
def parse(cls, nux_info: dict) -> "NuxInfo":
if nux_info is not None:
raise Exception("Parsing of nux info not implemented, please open an issue" \
"on Github. Account badge dict:", nux_info)
return cls()
@dataclasses.dataclass
class ViewerInteractionSettings:
@classmethod
def parse(cls, viewer_interaction_settings: dict) -> "ViewerInteractionSettings":
if viewer_interaction_settings is not None:
raise Exception("Parsing of viewer interaction settings not implemented, please open an issue" \
"on Github. Viewer interaction settings dict:", viewer_interaction_settings)
return cls()
@dataclasses.dataclass
class BrandedContentTagInfo:
can_add_tag: bool
@classmethod
def parse(cls, bcti: dict) -> "BrandedContentTagInfo":
return cls(bcti['can_add_tag'])
@dataclasses.dataclass
class ShoppingInfo:
@classmethod
def parse(cls, shopping_info: dict) -> "ShoppingInfo":
if ShoppingInfo is not None:
raise Exception("Parsing of shopping info not implemented, please open an issue" \
"on Github. Shopping info dict:", shopping_info)
return cls()
@dataclasses.dataclass
class AudioReatrributionInfo:
should_allow_restore: bool
@classmethod
def parse(cls, audio_reatrribution_info: dict) -> "AudioReatrributionInfo":
return cls(audio_reatrribution_info['should_allow_restore'])
@dataclasses.dataclass
class AdditionalAudioInfo:
additional_audio_username: Optional[str]
audio_reatrribution_info: AudioReatrributionInfo
@classmethod
def parse(cls, additional_audio_info: dict) -> "AdditionalAudioInfo":
return cls(
additional_audio_info.get('additional_audio_username'),
AudioReatrributionInfo.parse(additional_audio_info['audio_reattribution_info'])
)
@dataclasses.dataclass
class ClipsMetadata:
music_info: Optional[MusicInfo]
original_sound_info: Optional[SoundInfo]
hybrid_audio_info: Optional[AudioInfo]
audio_type: str
music_canonical_id: str
featured_label: Optional[str]
mashup_info: Optional[MashupInfo]
nux_info: Optional[NuxInfo]
viewer_interaction_settings: Optional[ViewerInteractionSettings]
branded_content_tag_info: Optional[BrandedContentTagInfo]
shopping_info: Optional[ShoppingInfo]
additional_audio_info: Optional[AdditionalAudioInfo]
is_shared_to_fb: bool
@classmethod
def parse(cls, cm: dict) -> "ClipsMetadata":
return cls(
MusicInfo.parse(cm['music_info']) if cm.get('music_info') else None,
SoundInfo.parse(cm['original_sound_info']) if cm.get('original_sound_info') else None,
AudioInfo.parse(cm['hybrid_audio_info']) if cm.get('hybrid_audio_info') else None,
cm['audio_type'], cm['music_canonical_id'], cm.get('featured_label'),
MashupInfo.parse(cm['mashup_info']) if cm.get('mashup_info') else None,
NuxInfo.parse(cm['nux_info']) if cm.get('nux_info') else None,
ViewerInteractionSettings.parse(cm['viewer_interaction_settings']) if cm.get('viewer_interaction_settings') else None,
BrandedContentTagInfo.parse(cm['branded_content_tag_info']) if cm.get('branded_content_tag_info') else None,
ShoppingInfo.parse(cm['shopping_info']) if cm.get('shopping_info') else None,
AdditionalAudioInfo.parse(cm['additional_audio_info']) if cm.get('additional_audio_info') else None,
cm['is_shared_to_fb']
)
@dataclasses.dataclass
class MediaCroppingInfo:
key: str
crop_left: float
crop_right: float
crop_top: float
crop_bottom: float
@classmethod
def parse(cls, key: str, mci: dict) -> "MediaCroppingInfo":
return cls(
key, mci['crop_left'], mci['crop_right'], mci['crop_top'], mci['crop_bottom']
)
@dataclasses.dataclass
class Post:
taken_at: int
pk: int
id: str
device_timestamp: Optional[int]
media_type: int
code: Optional[str]
client_cache_key: Optional[str]
filter_type: Optional[int]
should_request_ads: Optional[bool]
user: User
can_viewer_reshare: Optional[bool]
caption_is_edited: bool
like_and_view_counts_disabled: Optional[bool]
is_commercial: Optional[bool]
is_paid_partnership: Optional[bool]
comment_likes_enabled: Optional[bool]
comment_threading_enabled: Optional[bool]
has_more_comments: Optional[bool]
max_num_visible_preview_comments: Optional[int]
can_view_more_preview_comments: Optional[bool]
comment_count: Optional[int]
hide_view_all_comment_entrypoint: Optional[bool]
inline_composer_display_condition: Optional[str]
inline_composer_imp_trigger_time: Optional[int]
image_versions2: List[ImageVersion]
original_width: Optional[int]
original_height: Optional[int]
like_count: int
has_liked: bool
top_likers: List[str]
photo_of_you: bool
usertags: List[Usertag]
can_see_insights_as_brand: Optional[bool]
video_versions: List[VideoVersion]
has_audio: Optional[bool]
video_duration: Optional[float]
view_count: Optional[int]
play_count: Optional[int]
caption: str
can_viewer_save: bool
organic_tracking_token: Optional[str]
sharing_friction_info: Optional[bool]
product_type: Optional[str]
is_in_profile_grid: Optional[bool]
profile_grid_control_enabled: Optional[bool]
deleted_reason: Optional[int]
integrity_review_decision: str
clips_metadata: Optional[ClipsMetadata]
media_cropping_info: List[MediaCroppingInfo]
@classmethod
def parse(cls, post: dict) -> "Post":
v = post.get('image_versions2')
if v is not None:
image_versions = [ImageVersion.parse('', p) for p in v['candidates']]
image_versions.extend([ImageVersion.parse(k, v) for k,v in (v.get('additional_candidates') or {}).items()])
else:
image_versions = []
return cls(
post['taken_at'], post['pk'], post['id'], post.get('device_timestamp'),
post['media_type'], post.get('code'), post.get('client_cache_key'), post.get('filter_type'),
post.get('should_request_ads'), User.parse(post['user']),
post.get('can_viewer_reshare'), post['caption_is_edited'],
post.get('like_and_view_counts_disabled'), post.get('is_commercial'),
post.get('is_paid_partnership'), post.get('comment_likes_enabled'),
post.get('comment_threading_enabled'), post.get('has_more_comments'),
post.get('max_num_visible_preview_comments'), post.get('can_view_more_preview_comments'),
post.get('comment_count'), post.get('hide_view_all_comment_entrypoint'),
post.get('inline_composer_display_condition'), post.get('inline_composer_imp_trigger_time'),
image_versions, post.get('original_width'), post.get('original_height'), post['like_count'],
post['has_liked'], post.get('top_likers') or [], post['photo_of_you'],
[Usertag.parse(u) for u in (post.get('usertags') or {'in': []})['in']],
post.get('can_see_insights_as_brand'),
[VideoVersion.parse(v) for v in post.get('video_versions') or []],
post.get('has_audio'), post.get('video_duration'), post.get('view_count'),
post.get('play_count'), post['caption'], post['can_viewer_save'],
post.get('organic_tracking_token'), post.get('sharing_friction_info'), post.get('product_type'),
post.get('is_in_profile_grid'), post.get('profile_grid_control_enabled'),
post.get('deleted_reason'), post.get('integrity_review_decision'),
ClipsMetadata.parse(post['clips_metadata']) if post.get('clips_metadata') else None,
[MediaCroppingInfo.parse(k, v) for k, v in (post.get('media_cropping_info') or {}).items()]
)
| 2.546875 | 3 |
template/language/vc.py | clayne/syringe-1 | 25 | 12773424 | <reponame>clayne/syringe-1
'''
This module is based on the ndk.exception library module. Please
consider using that module instead of this one.
'''
import ptypes, ndk, ndk.exception
from ndk.exception import *
class vtable_ptr(PVOID): pass
class type_info(TypeDescriptor): pass
class exception(pstruct.type):
_fields_ = [
(dyn.pointer(vtable_ptr), 'vtable'),
(dyn.pointer(pstr.szstring), 'name'),
(pint.int32_t, 'do_free'),
]
class cxx_exception_frame(pstruct.type):
_fields_ = [
(dyn.pointer(ptype.undefined), 'frame'), # XXX
(pint.int32_t, 'trylevel'),
(pint.uint32_t, 'ebp'),
]
class cxx_copy_ctor(PVOID): pass
class this_ptr_offsets(pstruct.type):
_fields_ = [
(pint.int32_t, 'this_offset'),
(pint.int32_t, 'vbase_descr'),
(pint.int32_t, 'vbase_offset'),
]
class cxx_type_info(pstruct.type):
_fields_ = [
(pint.uint32_t, 'flags'),
(dyn.pointer(type_info), 'type_info'),
(this_ptr_offsets, 'offsets'),
(pint.uint32_t, 'size'),
(cxx_copy_ctor, 'copy_ctor'),
]
class cxx_type_info_table(pstruct.type):
_fields_ = [
(pint.uint32_t, 'count'),
(dyn.array(cxx_type_info, 3), 'info'),
]
class cxx_exc_custom_handler(PVOID): pass
class cxx_exception_type(pstruct.type):
_fields_ = [
(pint.uint32_t, 'flags'),
(PVOID, 'destructor'),
(cxx_exc_custom_handler, 'custom_handler'),
(dyn.pointer(cxx_type_info_table), 'type_info_table'),
]
| 1.609375 | 2 |
sc2ai/actorcritic/agent.py | telecombcn-dl/2017-dlai-team5 | 9 | 12773425 | import collections
import os
import numpy as np
import tensorflow as tf
from pysc2.lib import actions
from tensorflow.contrib import layers
from tensorflow.contrib.layers.python.layers.optimizers import OPTIMIZER_SUMMARIES
from actorcritic.policy import FullyConvPolicy
from common.preprocess import ObsProcesser, FEATURE_KEYS, AgentInputTuple
from common.util import weighted_random_sample, select_from_each_row, ravel_index_pairs
def _get_placeholders(spatial_dim):
sd = spatial_dim
feature_list = [
(FEATURE_KEYS.minimap_numeric, tf.float32, [None, sd, sd, ObsProcesser.N_MINIMAP_CHANNELS]),
(FEATURE_KEYS.screen_numeric, tf.float32, [None, sd, sd, ObsProcesser.N_SCREEN_CHANNELS]),
(FEATURE_KEYS.screen_unit_type, tf.int32, [None, sd, sd]),
(FEATURE_KEYS.is_spatial_action_available, tf.float32, [None]),
(FEATURE_KEYS.available_action_ids, tf.float32, [None, len(actions.FUNCTIONS)]),
(FEATURE_KEYS.selected_spatial_action, tf.int32, [None, 2]),
(FEATURE_KEYS.selected_action_id, tf.int32, [None]),
(FEATURE_KEYS.value_target, tf.float32, [None]),
(FEATURE_KEYS.player_relative_screen, tf.int32, [None, sd, sd]),
(FEATURE_KEYS.player_relative_minimap, tf.int32, [None, sd, sd]),
(FEATURE_KEYS.advantage, tf.float32, [None])
]
return AgentInputTuple(
**{name: tf.placeholder(dtype, shape, name) for name, dtype, shape in feature_list}
)
class ACMode:
A2C = "a2c"
PPO = "ppo"
SelectedLogProbs = collections.namedtuple("SelectedLogProbs", ["action_id", "spatial", "total"])
class ActorCriticAgent:
_scalar_summary_key = "scalar_summaries"
def __init__(self,
sess: tf.Session,
summary_path: str,
all_summary_freq: int,
scalar_summary_freq: int,
spatial_dim: int,
mode: str,
clip_epsilon=0.2,
unit_type_emb_dim=4,
loss_value_weight=1.0,
entropy_weight_spatial=1e-6,
entropy_weight_action_id=1e-5,
max_gradient_norm=None,
optimiser="adam",
optimiser_pars: dict = None,
policy=FullyConvPolicy
):
"""
Actor-Critic Agent for learning pysc2-minigames
https://arxiv.org/pdf/1708.04782.pdf
https://github.com/deepmind/pysc2
Can use
- A2C https://blog.openai.com/baselines-acktr-a2c/ (synchronous version of A3C)
or
- PPO https://arxiv.org/pdf/1707.06347.pdf
:param summary_path: tensorflow summaries will be created here
:param all_summary_freq: how often save all summaries
:param scalar_summary_freq: int, how often save scalar summaries
:param spatial_dim: dimension for both minimap and screen
:param mode: a2c or ppo
:param clip_epsilon: epsilon for clipping the ratio in PPO (no effect in A2C)
:param loss_value_weight: value weight for a2c update
:param entropy_weight_spatial: spatial entropy weight for a2c update
:param entropy_weight_action_id: action selection entropy weight for a2c update
:param max_gradient_norm: global max norm for gradients, if None then not limited
:param optimiser: see valid choices below
:param optimiser_pars: optional parameters to pass in optimiser
:param policy: Policy class
"""
assert optimiser in ["adam", "rmsprop"]
assert mode in [ACMode.A2C, ACMode.PPO]
self.mode = mode
self.sess = sess
self.spatial_dim = spatial_dim
self.loss_value_weight = loss_value_weight
self.entropy_weight_spatial = entropy_weight_spatial
self.entropy_weight_action_id = entropy_weight_action_id
self.unit_type_emb_dim = unit_type_emb_dim
self.summary_path = summary_path
os.makedirs(summary_path, exist_ok=True)
self.summary_writer = tf.summary.FileWriter(summary_path)
self.all_summary_freq = all_summary_freq
self.scalar_summary_freq = scalar_summary_freq
self.train_step = 0
self.max_gradient_norm = max_gradient_norm
self.clip_epsilon = clip_epsilon
self.policy = policy
opt_class = tf.train.AdamOptimizer if optimiser == "adam" else tf.train.RMSPropOptimizer
if optimiser_pars is None:
pars = {
"adam": {
"learning_rate": 1e-4,
"epsilon": 5e-7
},
"rmsprop": {
"learning_rate": 2e-4
}
}[optimiser]
else:
pars = optimiser_pars
self.optimiser = opt_class(**pars)
def init(self):
self.sess.run(self.init_op)
if self.mode == ACMode.PPO:
self.update_theta()
def _get_select_action_probs(self, pi, selected_spatial_action_flat):
action_id = select_from_each_row(
pi.action_id_log_probs, self.placeholders.selected_action_id
)
spatial = select_from_each_row(
pi.spatial_action_log_probs, selected_spatial_action_flat
)
total = spatial + action_id
return SelectedLogProbs(action_id, spatial, total)
def _scalar_summary(self, name, tensor):
tf.summary.scalar(name, tensor,
collections=[tf.GraphKeys.SUMMARIES, self._scalar_summary_key])
def build_model(self):
self.placeholders = _get_placeholders(self.spatial_dim)
with tf.variable_scope("theta"):
theta = self.policy(self, trainable=True).build()
selected_spatial_action_flat = ravel_index_pairs(
self.placeholders.selected_spatial_action, self.spatial_dim
)
selected_log_probs = self._get_select_action_probs(theta, selected_spatial_action_flat)
# maximum is to avoid 0 / 0 because this is used to calculate some means
sum_spatial_action_available = tf.maximum(
1e-10, tf.reduce_sum(self.placeholders.is_spatial_action_available)
)
neg_entropy_spatial = tf.reduce_sum(
theta.spatial_action_probs * theta.spatial_action_log_probs
) / sum_spatial_action_available
neg_entropy_action_id = tf.reduce_mean(tf.reduce_sum(
theta.action_id_probs * theta.action_id_log_probs, axis=1
))
if self.mode == ACMode.PPO:
# could also use stop_gradient and forget about the trainable
with tf.variable_scope("theta_old"):
theta_old = self.policy(self, trainable=False).build()
new_theta_var = tf.global_variables("theta/")
old_theta_var = tf.global_variables("theta_old/")
assert len(tf.trainable_variables("theta/")) == len(new_theta_var)
assert not tf.trainable_variables("theta_old/")
assert len(old_theta_var) == len(new_theta_var)
self.update_theta_op = [
tf.assign(t_old, t_new) for t_new, t_old in zip(new_theta_var, old_theta_var)
]
selected_log_probs_old = self._get_select_action_probs(
theta_old, selected_spatial_action_flat
)
ratio = tf.exp(selected_log_probs.total - selected_log_probs_old.total)
clipped_ratio = tf.clip_by_value(
ratio, 1.0 - self.clip_epsilon, 1.0 + self.clip_epsilon
)
l_clip = tf.minimum(
ratio * self.placeholders.advantage,
clipped_ratio * self.placeholders.advantage
)
self.sampled_action_id = weighted_random_sample(theta_old.action_id_probs)
self.sampled_spatial_action = weighted_random_sample(theta_old.spatial_action_probs)
self.value_estimate = theta_old.value_estimate
self._scalar_summary("action/ratio", tf.reduce_mean(clipped_ratio))
self._scalar_summary("action/ratio_is_clipped",
tf.reduce_mean(tf.to_float(tf.equal(ratio, clipped_ratio))))
policy_loss = -tf.reduce_mean(l_clip)
else:
self.sampled_action_id = weighted_random_sample(theta.action_id_probs)
self.sampled_spatial_action = weighted_random_sample(theta.spatial_action_probs)
self.value_estimate = theta.value_estimate
policy_loss = -tf.reduce_mean(selected_log_probs.total * self.placeholders.advantage)
value_loss = tf.losses.mean_squared_error(
self.placeholders.value_target, theta.value_estimate)
loss = (
policy_loss
+ value_loss * self.loss_value_weight
+ neg_entropy_spatial * self.entropy_weight_spatial
+ neg_entropy_action_id * self.entropy_weight_action_id
)
self.train_op = layers.optimize_loss(
loss=loss,
global_step=tf.train.get_global_step(),
optimizer=self.optimiser,
clip_gradients=self.max_gradient_norm,
summaries=OPTIMIZER_SUMMARIES,
learning_rate=None,
name="train_op"
)
self._scalar_summary("value/estimate", tf.reduce_mean(self.value_estimate))
self._scalar_summary("value/target", tf.reduce_mean(self.placeholders.value_target))
self._scalar_summary("action/is_spatial_action_available",
tf.reduce_mean(self.placeholders.is_spatial_action_available))
self._scalar_summary("action/selected_id_log_prob",
tf.reduce_mean(selected_log_probs.action_id))
self._scalar_summary("loss/policy", policy_loss)
self._scalar_summary("loss/value", value_loss)
self._scalar_summary("loss/neg_entropy_spatial", neg_entropy_spatial)
self._scalar_summary("loss/neg_entropy_action_id", neg_entropy_action_id)
self._scalar_summary("loss/total", loss)
self._scalar_summary("value/advantage", tf.reduce_mean(self.placeholders.advantage))
self._scalar_summary("action/selected_total_log_prob",
tf.reduce_mean(selected_log_probs.total))
self._scalar_summary("action/selected_spatial_log_prob",
tf.reduce_sum(selected_log_probs.spatial) / sum_spatial_action_available)
self.init_op = tf.global_variables_initializer()
self.saver = tf.train.Saver(max_to_keep=2)
self.all_summary_op = tf.summary.merge_all(tf.GraphKeys.SUMMARIES)
self.scalar_summary_op = tf.summary.merge(tf.get_collection(self._scalar_summary_key))
def _input_to_feed_dict(self, input_dict):
return {k + ":0": v for k, v in input_dict.items()}
def step(self, obs):
feed_dict = self._input_to_feed_dict(obs)
action_id, spatial_action, value_estimate = self.sess.run(
[self.sampled_action_id, self.sampled_spatial_action, self.value_estimate],
feed_dict=feed_dict
)
spatial_action_2d = np.array(
np.unravel_index(spatial_action, (self.spatial_dim,) * 2)
).transpose()
return action_id, spatial_action_2d, value_estimate
def train(self, input_dict):
feed_dict = self._input_to_feed_dict(input_dict)
ops = [self.train_op]
write_all_summaries = (
(self.train_step % self.all_summary_freq == 0) and
self.summary_path is not None
)
write_scalar_summaries = (
(self.train_step % self.scalar_summary_freq == 0) and
self.summary_path is not None
)
if write_all_summaries:
ops.append(self.all_summary_op)
elif write_scalar_summaries:
ops.append(self.scalar_summary_op)
r = self.sess.run(ops, feed_dict)
if write_all_summaries or write_scalar_summaries:
self.summary_writer.add_summary(r[-1], global_step=self.train_step)
self.train_step += 1
def get_value(self, obs):
feed_dict = self._input_to_feed_dict(obs)
return self.sess.run(self.value_estimate, feed_dict=feed_dict)
def flush_summaries(self):
self.summary_writer.flush()
def save(self, path, step=None):
os.makedirs(path, exist_ok=True)
step = step or self.train_step
print("saving model to %s, step %d" % (path, step))
self.saver.save(self.sess, path + '/model.ckpt', global_step=step)
def load(self, path):
ckpt = tf.train.get_checkpoint_state(path)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
self.train_step = int(ckpt.model_checkpoint_path.split('-')[-1])
print("loaded old model with train_step %d" % self.train_step)
self.train_step += 1
def update_theta(self):
if self.mode == ACMode.PPO:
self.sess.run(self.update_theta_op)
| 1.867188 | 2 |
djangoerp/core/signals.py | xarala221/django-erp | 345 | 12773426 | #!/usr/bin/env python
"""This file is part of the django ERP project.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright (c) 2013-2015, django ERP Team'
__version__ = '0.0.5'
from django.conf import settings
from django.db.models.signals import post_save
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from .utils.models import get_model
from .cache import LoggedInUserCache
from .models import Permission, ObjectPermission, Group
## HANDLERS ##
def _update_author_permissions(sender, instance, raw, created, **kwargs):
"""Updates the permissions assigned to the author of the given object.
"""
author = LoggedInUserCache().user
if author and author.is_authenticated:
content_type = ContentType.objects.get_for_model(sender)
app_label = content_type.app_label
model_name = content_type.model
if created:
can_view_this_object, is_new = ObjectPermission.objects.get_or_create_by_natural_key("view_%s" % model_name, app_label, model_name, instance.pk)
can_change_this_object, is_new = ObjectPermission.objects.get_or_create_by_natural_key("change_%s" % model_name, app_label, model_name, instance.pk)
can_delete_this_object, is_new = ObjectPermission.objects.get_or_create_by_natural_key("delete_%s" % model_name, app_label, model_name, instance.pk)
can_view_this_object.users.add(author)
can_change_this_object.users.add(author)
can_delete_this_object.users.add(author)
def manage_author_permissions(cls, enabled=True):
"""Adds permissions assigned to the author of the given object.
Connects the post_save signal of the given model class to the handler which
adds default permissions to the current user. i.e.:
>> manage_author_permissions(Project)
It will add default view, change and delete permissions for each Project's
instances created by the current user.
To disconnect:
>> manage_author_permissions(Project, False)
"""
cls = get_model(cls)
dispatch_uid = "update_%s_permissions" % cls.__name__.lower()
if enabled:
post_save.connect(_update_author_permissions, cls, dispatch_uid=dispatch_uid)
else:
post_save.disconnect(_update_author_permissions, cls, dispatch_uid=dispatch_uid)
def user_post_save(sender, instance, created, *args, **kwargs):
"""Add view/delete/change object permissions to users (on themselves).
It also adds new user instances to "users" group.
"""
auth_app, sep, user_model_name = settings.AUTH_USER_MODEL.rpartition('.')
user_model_name = user_model_name.lower()
# All new users have full control over themselves.
can_view_this_user, is_new = ObjectPermission.objects.get_or_create_by_natural_key("view_%s" % user_model_name, auth_app, user_model_name, instance.pk)
can_change_this_user, is_new = ObjectPermission.objects.get_or_create_by_natural_key("change_%s" % user_model_name, auth_app, user_model_name, instance.pk)
can_delete_this_user, is_new = ObjectPermission.objects.get_or_create_by_natural_key("delete_%s" % user_model_name, auth_app, user_model_name, instance.pk)
can_view_this_user.users.add(instance)
can_change_this_user.users.add(instance)
can_delete_this_user.users.add(instance)
# All new users are members of "users" group.
if created:
users_group, is_new = Group.objects.get_or_create(name='users')
instance.groups.add(users_group)
def add_view_permission(sender, instance, **kwargs):
"""Adds a view permission related to each new ContentType instance.
"""
if isinstance(instance, ContentType):
codename = "view_%s" % instance.model
Permission.objects.get_or_create(content_type=instance, codename=codename, name="Can view %s" % instance.name)
## CONNECTIONS ##
post_save.connect(user_post_save, get_user_model())
post_save.connect(add_view_permission, ContentType)
| 1.960938 | 2 |
funky_horse/__init__.py | AlexandreBidon/Funky-Horse | 0 | 12773427 | <filename>funky_horse/__init__.py
from .random_color_palette import RandomColorPalette
import os, random
from cairosvg import svg2png
class FunkyHorse():
def __init__(self):
self.__svg_list = []
self.__create()
def __create(self):
self.__svg_list.clear()
self.color_palette = RandomColorPalette()
self.__svg_list.append("""<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 2006.3 2006.3">\n""")
self.__svg_list.append("""<defs><clipPath id="clip-path"><circle cx="1003.15" cy="1003.15" r="1003.15"/></clipPath>""")
self.__svg_list.append(self.color_palette.generate_svg_style())
self.__svg_list.append("</defs>")
#Adds the background
background = self.__choose_random_asset("assets\\background")
self.__add_to_list("assets\\background\\" + background)
#Adds the BG Hair
hair = self.__choose_random_asset("assets\\hair")
self.__add_to_list("assets\\hair\\{}\\bg.svg".format(hair))
#Adds the left ear
ears = self.__choose_random_asset("assets\\ears")
self.__add_to_list("assets\\ears\\{}\\bg.svg".format(ears))
#Adds the main body
body = self.__choose_random_asset("assets\\body")
self.__add_to_list("assets\\body\\" + body)
#Adds the eyes
eyes = self.__choose_random_asset("assets\\eyes")
self.__add_to_list("assets\\eyes\\" + eyes)
#Adds the muzzle
muzzle = self.__choose_random_asset("assets\\muzzle")
self.__add_to_list("assets\\muzzle\\" + muzzle)
#Adds the FG Hair
self.__add_to_list("assets\\hair\\{}\\fg.svg".format(hair))
#Adds the right ear
self.__add_to_list("assets\\ears\\{}\\fg.svg".format(ears))
#adds the accessory
if random.randrange(0,10) > 5 :
accessory = self.__choose_random_asset("assets\\accessory")
self.__add_to_list("assets\\accessory\\" + accessory)
self.__svg_list.append("</svg>")
def __choose_random_asset(self,path):
return random.choice(os.listdir( os.path.join(os.path.dirname(__file__),path)))
def __add_to_list(self,path):
try :
with open(os.path.join(os.path.dirname(__file__),path)) as svg :
self.__svg_list.append(svg.read())
except :
pass
def __str__(self):
"""
Returns the entire drawing by joining list elements.
"""
return("".join(self.__svg_list))
def save_svg(self, path):
"""
Saves the SVG drawing to specified path.
Let any exceptions propagate up to calling code.
"""
f = open(path, "w+")
f.write(self.__str__())
f.close()
def save_png(self, write_to):
"""
Saves the PNG drawing to specified path.
"""
svg2png( self.__str__(), write_to= write_to)
| 2.8125 | 3 |
forumdemo/comment/models.py | lamdba0602/forumdemo | 0 | 12773428 | from django.db import models
from django.contrib.auth.models import User
from article.models import Article
class Comment(models.Model):
owner = models.ForeignKey(User, verbose_name="作者")
article = models.ForeignKey(Article, verbose_name="文章ID")
content = models.CharField("评论内容", max_length=1000)
to_comment = models.ForeignKey("self", null=True, blank=True, verbose_name="回复哪个评论")
status = models.IntegerField("状态", choices=((0, "正常"), (-1, "删除")), default = 0)
create_timestamp = models.DateTimeField("创建时间", auto_now_add=True)
last_update_timestamp = models.DateTimeField("最后更新时间", auto_now=True)
def __str__(self):
return self.content
class Meta:
verbose_name = "评论"
verbose_name_plural = "评论"
| 2.140625 | 2 |
decrypt.py | nciefeiniu/wenshu | 57 | 12773429 | import re
import base64
from urllib.parse import urljoin
_pattern = re.compile(r"dynamicurl\|(?P<path>.+?)\|wzwsquestion\|(?P<question>.+?)\|wzwsfactor\|(?P<factor>\d+)")
def decrypt_wzws(text: str) -> str:
# noinspection PyBroadException
try:
return _decrypt_by_python(text)
except Exception:
print("解析html错误")
def _decrypt_by_python(text: str) -> str:
base_url = "http://wenshu.court.gov.cn"
group_dict = _pattern.search(text).groupdict()
question = group_dict["question"]
factor = int(group_dict["factor"])
path = group_dict["path"]
label = "WZWS_CONFIRM_PREFIX_LABEL{}".format(sum(ord(i) for i in question) * factor + 111111)
challenge = base64.b64encode(label.encode()).decode()
dynamic_url = urljoin(base_url, path)
dynamic_url = "{url}?{query}".format(url=dynamic_url, query="wzwschallenge={}".format(challenge))
return dynamic_url
if __name__ == "__main__":
with open("demo.html") as f:
_content = f.read()
_resp = decrypt_wzws(_content)
print(_resp)
| 3.09375 | 3 |
mayan/apps/appearance/handlers.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 2 | 12773430 | <gh_stars>1-10
from django.apps import apps
def handler_user_theme_setting_create(sender, instance, created, **kwargs):
UserThemeSetting = apps.get_model(
app_label='appearance', model_name='UserThemeSetting'
)
if created:
UserThemeSetting.objects.create(user=instance)
| 2.015625 | 2 |
scripts_figs/hovmoller_1year_sims.py | deepsphere/deepsphere-weather | 38 | 12773431 | <gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 19 13:12:02 2021
@author: ghiggi
"""
import os
os.chdir("/home/ghiggi/Projects/deepsphere-weather")
import dask
import matplotlib
import numpy as np
import cartopy.crs as ccrs
import xarray as xr
import matplotlib.pyplot as plt
## DeepSphere-Weather
from modules.utils_config import read_config_file
from modules.utils_config import get_model_settings
from modules.utils_config import get_training_settings
from modules.utils_config import get_ar_settings
from modules.utils_config import get_dataloader_settings
from modules.utils_config import check_same_dict
from modules.utils_config import get_pytorch_model
from modules.utils_config import set_pytorch_settings
from modules.utils_config import load_pretrained_model
from modules.utils_config import print_tensor_info
from modules.utils_io import get_ar_model_tensor_info
from modules.utils_xr import xr_common_vars
from modules.predictions_autoregressive import AutoregressivePredictions
from modules.my_plotting import create_hovmoller_plots
## Project specific functions
import modules.my_models_graph_old as my_architectures
## Side-project utils (maybe migrating to separate packages in future)
import modules.xsphere # required for xarray 'sphere' accessor
from modules.xscaler import LoadScaler
from modules.xscaler import SequentialScaler
from modules.xscaler import LoadAnomaly
from modules.xscaler import HovmollerDiagram
# For plotting
# matplotlib.use('cairo') # Cairo
matplotlib.rcParams["figure.facecolor"] = "white"
matplotlib.rcParams["savefig.facecolor"] = "white" # (1,1,1,0)
matplotlib.rcParams["savefig.edgecolor"] = 'none'
##------------------------------------------------------------------------------.
# Define settings
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
data_dir = "/ltenas3/DeepSphere/data/preprocessed_ds/ERA5_HRES"
exp_dir = "/data/weather_prediction/experiments_GG/new_old_archi"
model_name = "OLD_fine_tuned-RNN-AR6-UNetSpherical-Healpix_400km-Graph_knn-k20-MaxAreaPooling"
model_dir = os.path.join(exp_dir, model_name)
batch_size = 4
n_year_sims = 2
ar_blocks_days = 366*2
forecast_reference_times = ['1992-07-22T00:00:00','2000-01-01T18:00:00','2003-04-01T00:00:00', '2015-01-01T05:00:00']
long_forecast_zarr_fpath = None
long_forecast_zarr_fpath = os.path.join(model_dir, "model_predictions", "long_simulation", "2year_sim.zarr")
# Read config path
cfg_path = os.path.join(model_dir, 'config.json')
cfg = read_config_file(fpath=cfg_path)
data_sampling_dir = os.path.join(data_dir, cfg['model_settings']["sampling_name"])
##-----------------------------------------------------------------------------.
# Some special stuff you might want to adjust
cfg['dataloader_settings']["prefetch_factor"] = 2
cfg['dataloader_settings']["num_workers"] = 8
cfg['dataloader_settings']["autotune_num_workers"] = False
cfg['training_settings']['gpu_training'] = True
cfg['dataloader_settings']["pin_memory"] = False
cfg['dataloader_settings']["asyncronous_gpu_transfer"] = True
##-----------------------------------------------------------------------------.
# Define model weights fpath
model_fpath = os.path.join(model_dir, "model_weights", "model.h5")
##------------------------------------------------------------------------.
### Retrieve experiment-specific configuration settings
model_settings = get_model_settings(cfg)
ar_settings = get_ar_settings(cfg)
training_settings = get_training_settings(cfg)
dataloader_settings = get_dataloader_settings(cfg)
##------------------------------------------------------------------------.
#### Load Zarr Datasets
ds_dynamic = xr.open_zarr(os.path.join(data_sampling_dir, "Data","dynamic", "time_chunked", "dynamic.zarr"))
ds_bc = xr.open_zarr(os.path.join(data_sampling_dir, "Data","bc", "time_chunked", "bc.zarr"))
ds_static = xr.open_zarr(os.path.join(data_sampling_dir, "Data", "static.zarr"))
# - Select dynamic features
ds_dynamic = ds_dynamic[['z500','t850']]
##------------------------------------------------------------------------.
### Prepare static data
# - Keep land-surface mask as it is
# - Keep sin of latitude and remove longitude information
ds_static = ds_static.drop(["sin_longitude","cos_longitude"])
# - Scale orography between 0 and 1 (is already left 0 bounded)
ds_static['orog'] = ds_static['orog']/ds_static['orog'].max()
# - One Hot Encode soil type
# ds_slt_OHE = xscaler.OneHotEnconding(data_static['slt'])
# ds_static = xr.merge([ds_static, ds_slt_OHE])
# ds_static = ds_static.drop('slt')
# - Load static data
ds_static = ds_static.load()
##------------------------------------------------------------------------.
#### Define scaler to apply on the fly within DataLoader
# - Load scalers
dynamic_scaler = LoadScaler(os.path.join(data_sampling_dir, "Scalers", "GlobalStandardScaler_dynamic.nc"))
bc_scaler = LoadScaler(os.path.join(data_sampling_dir, "Scalers", "GlobalStandardScaler_bc.nc"))
# - Create single scaler
scaler = SequentialScaler(dynamic_scaler, bc_scaler)
##------------------------------------------------------------------------.
### Define pyTorch settings (before PyTorch model definition)
# - Here inside is eventually set the seed for fixing model weights initialization
# - Here inside the training precision is set (currently only float32 works)
device = set_pytorch_settings(training_settings)
##------------------------------------------------------------------------.
## Retrieve dimension info of input-output Torch Tensors
tensor_info = get_ar_model_tensor_info(ar_settings = ar_settings,
data_dynamic = ds_dynamic,
data_static = ds_static,
data_bc = ds_bc)
print_tensor_info(tensor_info)
# Check that tensor_info match between model training and now
check_same_dict(model_settings['tensor_info'], tensor_info)
##------------------------------------------------------------------------.
### Define the model architecture
model = get_pytorch_model(module = my_architectures,
model_settings = model_settings)
###-----------------------------------------------------------------------.
## Load a pre-trained model
load_pretrained_model(model = model,
model_dir = model_dir)
###-----------------------------------------------------------------------.
### Transfer model to the device (i.e. GPU)
model = model.to(device)
##------------------------------------------------------------------------.
# Run predictions
dask.config.set(scheduler='synchronous')
forecast_cycle = ar_settings['forecast_cycle']
ar_iterations = 24/forecast_cycle*365*n_year_sims
ar_blocks = None
ds_long_forecasts = AutoregressivePredictions( model = model,
# Data
data_dynamic = ds_dynamic,
data_static = ds_static,
data_bc = ds_bc,
scaler_transform = scaler,
scaler_inverse = scaler,
# Dataloader options
device = device,
batch_size = batch_size, # number of forecasts per batch
num_workers = dataloader_settings['num_workers'],
prefetch_factor = dataloader_settings['prefetch_factor'],
prefetch_in_gpu = dataloader_settings['prefetch_in_gpu'],
pin_memory = dataloader_settings['pin_memory'],
asyncronous_gpu_transfer = dataloader_settings['asyncronous_gpu_transfer'],
# Autoregressive settings
input_k = ar_settings['input_k'],
output_k = ar_settings['output_k'],
forecast_cycle = ar_settings['forecast_cycle'],
stack_most_recent_prediction = ar_settings['stack_most_recent_prediction'],
# Prediction options
forecast_reference_times = forecast_reference_times,
ar_blocks = ar_blocks,
ar_iterations = ar_iterations, # How many time to autoregressive iterate
# Save options
zarr_fpath = long_forecast_zarr_fpath, # None --> do not write to disk
rounding = 2, # Default None. Accept also a dictionary
compressor = "auto", # Accept also a dictionary per variable
chunks = "auto")
print(ds_long_forecasts)
##------------------------------------------------------------------------------.
##------------------------------------------------------------------------------.
################################
#### Create hovmoller plots ####
################################
# data_sampling_dir = os.path.join(data_dir, cfg['model_settings']["sampling_name"])
# ds_dynamic = xr.open_zarr(os.path.join(data_sampling_dir, "Data","dynamic", "time_chunked", "dynamic.zarr"))
# ds_long_forecasts = xr.open_zarr(long_forecast_zarr_fpath)
#-------------------------------------------------------------------------------
# - Load anomaly scalers
monthly_std_anomaly_scaler = LoadAnomaly(os.path.join(data_sampling_dir, "Scalers", "MonthlyStdAnomalyScaler_dynamic.nc"))
# - Create directory where to save figures
# os.makedirs(os.path.join(model_dir, "figs/hovmoller_plots"), exist_ok=True)
# - Create figures
for i in range(len(forecast_reference_times)):
# Select 1 forecast
ds_forecast = ds_long_forecasts.isel(forecast_reference_time=i)
# Plot variable 'State' Hovmoller
fig = create_hovmoller_plots(ds_obs = ds_dynamic,
ds_pred = ds_forecast,
scaler = None,
arg = "state",
time_groups = None)
fig.savefig(os.path.join(model_dir, "figs/hovmoller_plots", "state_sim" + '{:01}.png'.format(i)))
# Plot variable 'standard anomalies' Hovmoller
fig = create_hovmoller_plots(ds_obs = ds_dynamic,
ds_pred = ds_forecast,
scaler = monthly_std_anomaly_scaler,
arg = "anom",
time_groups = None)
fig.savefig(os.path.join(model_dir, "figs/hovmoller_plots", "anom_sim" + '{:01}.png'.format(i)))
#-------------------------------------------------------------------------------
# # Select 1 forecast
# ds_forecast = ds_forecasts.isel(forecast_reference_time=1)
# # Plot
# fig = create_hovmoller_plots(ds_obs = ds_dynamic,
# ds_pred = ds_forecast,
# scaler=None,
# arg="state",
# time_groups=None)
# plt.show()
# fig = create_hovmoller_plots(ds_obs = ds_dynamic,
# ds_pred = ds_forecast,
# scaler=anomaly_scaler,
# arg="anom",
# time_groups=None)
# plt.show()
# Save figure
# fig.savefig("/home/Projects/deepsphere-weather/figs/hovmoller_long_sim.png")
| 1.695313 | 2 |
apps/barb_example/main.py | cemac/forest | 20 | 12773432 | """Matplotlib-esque usage of barbs in Typescript/Bokeh
Additional comment
"""
import bokeh.plotting
from forest import wind # Magic to extend bokeh.Figure
def main():
"""Example using forest.wind.Barb"""
x=[0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3]
y=[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
u = [i*10 for i in x]
v = [j*10 for j in y]
figure = bokeh.plotting.figure()
figure.barb(x=x, y=y, u=u, v=v)
document = bokeh.plotting.curdoc()
document.add_root(figure)
if __name__.startswith("bk"):
main()
| 3.03125 | 3 |
data_files/gen_csv.py | RichieBrady/SimAPI-Python | 1 | 12773433 | import csv
import random
import sys
def populate_test_csv():
f = open('test.csv', 'w')
with f:
input_fields = ['time_step', 'PlantOnSched', 'HeatingSetpointSchedule']
temp_change = [1, -1]
plant_on_sched_last = 52
heating_setpoint_schedule_last = 20
writer = csv.DictWriter(f, fieldnames=input_fields)
writer.writeheader()
j = 0
for i in range(35040):
k = random.randint(0, 1)
l = random.randint(0, 1)
a = temp_change[k]
b = temp_change[l]
if (10 < (plant_on_sched_last + a) < 55) and (10 < (heating_setpoint_schedule_last + b) < 55):
plant_on_sched_last += temp_change[k]
heating_setpoint_schedule_last += temp_change[l]
writer.writerow({'time_step': j,
'PlantOnSched': plant_on_sched_last,
'HeatingSetpointSchedule': heating_setpoint_schedule_last})
j += 900
f.close()
def populate_new_csv(index):
f = open(f'year{index}.csv', 'w')
with f:
input_fields = ['time_step', 'Tset']
temp_change = [1, -1]
q = 23
writer = csv.DictWriter(f, fieldnames=input_fields)
writer.writeheader()
j = 0
for i in range(35040):
k = random.randint(0, 1)
a = temp_change[k]
if -6 < (q + a) < 53:
q += temp_change[k]
writer.writerow({'time_step': j, 'Tset': q})
j += 900
f.close()
for i in range(1, 20):
populate_new_csv(i)
| 3.09375 | 3 |
pybamm/models/submodels/interface/inverse_kinetics/inverse_butler_volmer.py | zlgenuine/pybamm | 0 | 12773434 | <gh_stars>0
#
# Inverse Bulter-Volmer class
#
import pybamm
from .base_inverse_kinetics import BaseInverseKinetics
from ..kinetics.butler_volmer import ButlerVolmer
class InverseButlerVolmer(BaseInverseKinetics, ButlerVolmer):
"""
A base submodel that implements the inverted form of the Butler-Volmer relation to
solve for the reaction overpotential.
Parameters
----------
param
Model parameters
domain : iter of str, optional
The domain(s) in which to compute the interfacial current. Default is None,
in which case j.domain is used.
**Extends:** :class:`pybamm.interface.kinetics.ButlerVolmer`
"""
def __init__(self, param, domain):
super().__init__(param, domain)
def _get_overpotential(self, j, j0, ne, T):
return (2 * (1 + self.param.Theta * T) / ne) * pybamm.arcsinh(j / (2 * j0))
| 2.953125 | 3 |
teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/home/pagefactory/home_template_path.py | zhangyin2088/Teamcat | 6 | 12773435 | #coding=utf-8
'''
Created on 2015-10-10
@author: Devuser
'''
class HomeTaskPath(object):
left_nav_template_path="home/home_left_nav.html"
sub_nav_template_path="task/home_task_leftsub_nav.html"
task_index_path="task/home_task_index.html"
class HomeProjectPath(object):
left_nav_template_path="home/home_left_nav.html"
sub_nav_template_path="project/home_project_leftsub_nav.html"
project_list_template_path="project/home_project_listview.html"
project_list_control_path="project/home_project_list_control.html"
class HomeAutoTaskPath(object):
left_nav_template_path="home/home_left_nav.html"
sub_nav_template_path="autotask/home_autotask_leftsub_nav.html"
project_list_template_path="autotask/home_autotask_listview.html"
class HomeDashBoardPath(object):
left_nav_template_path="home/home_left_nav.html"
activity_template_path="dashboard/home_dashboard_activity.html"
summary_template_path="dashboard/home_dashboard_summary.html"
class HomeFortestingPath(object):
left_nav_template_path="home/home_left_nav.html"
sub_nav_template_path="fortesting/home_fortesting_leftsub_nav.html"
class HomeIssuePath(object):
left_nav_template_path="home/home_left_nav.html"
sub_nav_template_path="issue/home_issue_leftsub_nav.html"
home_issue_webapp="issue/home_issue_webapp.html"
home_issue_index="issue/index.html"
class HomeWebappsPath(object):
webapps_index_path="webapps/home_webapp_index.html"
left_nav_template_path="home/home_left_nav.html"
sub_nav_template_path="webapps/home_webapps_leftsub_nav.html"
webapps_webpart_path="webapps/home_webapp_webpart.html"
webapps_create_dialog_path="webapps/home_webapp_create_dialog.html"
class Home_unloginPagePath(object):
home_page_path="home/home_page.html"
home_welcome_path="home/home_page_welcome.html"
home_project_summary_path="home/home_page_project_summary.html"
home_device_page_path="home/home_page_device.html"
class DevicePagePath(object):
left_nav_template_path="home/home_left_nav.html"
device_page_path="device/home_device_index.html"
device_list_page="device/home_device_list_page.html"
device_list_controll="device/home_device_list_controll.html"
| 1.78125 | 2 |
kumo/kumo.py | lightnet328/kurokumo | 48 | 12773436 | <reponame>lightnet328/kurokumo<gh_stars>10-100
# coding: utf8
from twitter import *
from xml.sax.saxutils import *
from . import normalize_neologd as Normalizer
import MeCab
import matplotlib.pyplot as plt
from collections import Counter
from wordcloud import WordCloud
import io
import base64
class Kumo:
def __init__(self, twitter, mecab):
if isinstance(twitter, Twitter):
self._twitter = twitter
else:
raise Exception('Instance is not Twitter')
if isinstance(mecab, MeCab.Tagger):
self._mecab = mecab
self._mecab.parse('')
else:
raise Exception('Instance is not MeCab')
def _get_user_timeline(self):
account = self._twitter.account.verify_credentials()
params = {
"user_id": account["id"],
"count": 200,
"include_entities": 1,
}
timeline = self._twitter.statuses.user_timeline(**params)
return timeline
def _get_original_tweets(self, tweets):
original_tweets = []
for tweet in tweets:
if "retweeted_status" in tweet:
original_tweets.append(tweet["retweeted_status"])
else:
original_tweets.append(tweet)
return original_tweets
def _extract_text_from_tweets(self, tweets):
texts = []
for tweet in tweets:
if "entities" in tweet:
entities = tweet["entities"]
if "urls" in entities:
for url in entities["urls"]:
tweet["text"] = tweet["text"].replace(url["url"], "")
if "hashtags" in entities:
for hashtag in entities["hashtags"]:
tweet["text"] = tweet["text"].replace("#" + hashtag["text"], "")
if "symbols" in entities:
for symbol in entities["symbols"]:
tweet["text"] = tweet["text"].replace("$" + symbol["text"], "")
if "user_mentions" in entities:
for mention in entities["user_mentions"]:
tweet["text"] = tweet["text"].replace("@" + mention["screen_name"], "")
if "media" in entities:
for media in entities["media"]:
tweet["text"] = tweet["text"].replace(media["url"], "")
texts.append(unescape(tweet["text"]))
return texts
def _normalize_texts(self, texts):
normalized_texts = []
for text in texts:
normalized_texts.append(Normalizer.normalize_neologd(text))
return normalized_texts
def _feature_to_dict(self, feature):
keys = [
"品詞",
"品詞細分類1",
"品詞細分類2",
"品詞細分類3",
"活用形",
"活用型",
"原形",
"読み",
"発音",
]
values = feature.split(",")
feature_dict = {}
for key in range(len(keys) - 1):
if key < len(values):
feature_dict[keys[key]] = values[key]
return feature_dict
def _is_ignore_feature(self, feature):
ignore = {
"品詞": ["助詞", "助動詞", "連体詞", "副詞", "接頭詞", "感動詞", "記号", "BOS/EOS"],
"品詞細分類1": ["非自立", "接尾", "数"],
"活用形": ["サ変・スル", "五段・ラ行"]
}
is_partical_match = False
for feature_key, feature_value in feature.items():
if feature_key in ignore:
for ignore_value in ignore[feature_key]:
if feature_value == ignore_value:
is_partical_match = True
break
return is_partical_match
def _get_dictionary_form(self, word, feature):
if "原形" in feature and feature["原形"] != "*":
return feature["原形"]
else:
return word
def _get_normalized_texts_from_twitter(self):
tweets = self._get_user_timeline()
texts = self._get_original_tweets(tweets)
texts = self._extract_text_from_tweets(texts)
normalized_texts = self._normalize_texts(texts)
return normalized_texts
def _get_words(self):
texts = self._get_normalized_texts_from_twitter()
words = []
for text in texts:
node = self._mecab.parseToNode(text)
while node:
feature = self._feature_to_dict(node.feature)
if self._is_ignore_feature(feature) == False:
word = self._get_dictionary_form(node.surface, feature)
words.append(word)
node = node.next
return words
def _get_word_frequencies(self):
words = self._get_words()
counter = Counter(words)
word_frequencies = counter.most_common()
return word_frequencies
def generate(self, params={}):
default_params = {
"background_color": "white",
"width": 640,
"height": 400,
}
if params:
default_params.update(params)
params = default_params
word_frequencies = self._get_word_frequencies()
self._wordcloud = WordCloud(**params).generate_from_frequencies(word_frequencies)
def to_encoded_image(self):
image = self._wordcloud.to_image()
output = io.BytesIO()
image.save(output, "png")
encoded_image = base64.b64encode(output.getvalue()).decode('utf-8')
return encoded_image
| 2.609375 | 3 |
4th_100/problem346.py | takekoputa/project-euler | 0 | 12773437 | <filename>4th_100/problem346.py
# Problem: https://projecteuler.net/problem=346
"""
Repunits of n digits of base b has the form of (in base 10) r(b, n) = 1 + b + b^2 + ... + b^(n-2) + b^(n-1)
= (b^n-1)/(b-1)
For any positive integer b,
n = 1: r(b, 1) = 1
n = 2: r(b, 2) = 1 + b
n = 3: r(b, 3) = 1 + b + b^2
...
Observations: . 1 is a repunit of all bases.
. a positive integer k in a repunit of base (k - 1)
Why? r(k-1, 2) = k
. r(b, m) >= b^2 for all m >= 3
From the observations, we have that:
All strong repunits k in the range [1, N] iff k = r(b, n) for some b <= sqrt(N) and n >= 3.
Why?
(=>)
. Let k be a strong repunit in the range [2, N].
-> k is a repunit of base k - 1
. Now consider a base b where k is also a repunit where b != k - 1 (we considered it above).
. If b > sqrt(N):
r(b, 1) = 1 != k
r(b, 2) = 1 + b != k
r(b, 3) = 1 + b + b^2 > b^2 > N^2 (contradicts k <= N).
-> b <= sqrt(N) (and obviously, n >= 3).
(<=) this direction is trivial.
"""
from math import sqrt
N = 10**12
SQRT_N = int(sqrt(N))
ans = 0
ans += 1 # 1 is a repunit of all bases
strong_repunits = set()
for base in range(2, SQRT_N+1):
n = base ** 3
r = (n-1)//(base-1)
while r < N:
strong_repunits.add(r)
n = n * base
r = (n-1)//(base-1)
ans += sum(strong_repunits)
print(ans)
| 3.609375 | 4 |
core/agents/default_agent.py | Yoshi-0921/MAEXP | 0 | 12773438 | <reponame>Yoshi-0921/MAEXP
# -*- coding: utf-8 -*-
"""Source code for dqn agent class.
Author: <NAME> <<EMAIL>>
"""
from random import random
from .abstract_agent import AbstractAgent
class DefaultAgent(AbstractAgent):
def get_action(self, state, epsilon):
if random() < epsilon:
action = self.get_random_action()
else:
action = self.brain.get_action(state)
return action
| 2.609375 | 3 |
static_2d_FM_TFIsing__field_large_to_0/fig_gather/make_fig_gather.py | ryuikaneko/exact_diagonalization_dynamics | 1 | 12773439 | #!/usr/bin/env python
# coding:utf-8
from __future__ import print_function
#import sys
import re
import glob
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
all_files = glob.glob('../*/dat_L*_tau_inf')
list_L = []
list_N = []
list_mx = []
list_mz0mz1 = []
list_ene = []
for file_name in all_files:
# N = file_name.replace("dat_L","")
L = re.sub(".*dat_L","",file_name)
L = int(L.replace("_tau_inf",""))
N = L**2
list_L.append(L)
list_N.append(N)
print(file_name,L,N)
# file = open(sys.argv[1])
# file = open('dat_L3_tau_inf')
file = open(file_name)
lines = file.readlines()
file.close()
for line in lines:
if line.startswith("mx ["):
line_mx = line[:-1]
line_mx = line_mx.replace("mx [","")
line_mx = line_mx.replace("]","")
# list_mx = np.fromstring(line_mx,dtype=np.float,sep=',')
list_mx.append(np.fromstring(line_mx,dtype=np.float,sep=','))
if line.startswith("mz0mz1 ["):
line_mz0mz1 = line[:-1]
line_mz0mz1 = line_mz0mz1.replace("mz0mz1 [","")
line_mz0mz1 = line_mz0mz1.replace("]","")
list_mz0mz1.append(np.fromstring(line_mz0mz1,dtype=np.float,sep=','))
if line.startswith("ene ["):
line_ene = line[:-1]
line_ene = line_ene.replace("ene [","")
line_ene = line_ene.replace("]","")
list_ene.append(np.fromstring(line_ene,dtype=np.float,sep=','))
if line.startswith("field_steps: h(t)= ["):
line_h = line[:-1]
line_h = line_h.replace("field_steps: h(t)= [","")
line_h = line_h.replace("]","")
list_h = np.fromstring(line_h,dtype=np.float,sep=',')
list_enedens = []
for i in range(len(list_N)):
list_enedens.append(np.array([x/list_N[i] for x in list_ene[i]],dtype=np.float))
print("h",list_h)
for i in range(len(list_L)):
print("L mx",list_L[i],list_mx[i])
print("L mz0mz1",list_L[i],list_mz0mz1[i])
print("L enedens",list_L[i],list_enedens[i])
fig0 = plt.figure()
fig0.suptitle("mx")
for i in range(len(list_L)):
plt.plot(list_h,list_mx[i],label=list_L[i])
plt.xlabel("field")
plt.legend(bbox_to_anchor=(1,1),loc='upper right',borderaxespad=1)
plt.gca().invert_xaxis()
fig0.savefig("fig_mx.png")
fig1 = plt.figure()
fig1.suptitle("mz0mz1")
for i in range(len(list_L)):
plt.plot(list_h,list_mz0mz1[i],label=list_L[i])
plt.xlabel("field")
plt.legend(bbox_to_anchor=(1,0),loc='lower right',borderaxespad=1)
plt.gca().invert_xaxis()
fig1.savefig("fig_mz0mz1.png")
fig2 = plt.figure()
fig2.suptitle("enedens")
for i in range(len(list_L)):
plt.plot(list_h,list_enedens[i],label=list_L[i])
plt.xlabel("field")
plt.legend(bbox_to_anchor=(1,0),loc='lower right',borderaxespad=1)
plt.gca().invert_xaxis()
fig2.savefig("fig_enedens.png")
| 2.421875 | 2 |
tests/test_vmtkScripts/test_vmtkimagecompose.py | ramtingh/vmtk | 0 | 12773440 | <filename>tests/test_vmtkScripts/test_vmtkimagecompose.py<gh_stars>0
## Program: VMTK
## Language: Python
## Date: January 10, 2018
## Version: 1.4
## Copyright (c) <NAME>, <NAME>, All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this code was contributed by
## <NAME> (Github @rlizzo)
## University at Buffalo
import pytest
import vmtk.vmtkimagecompose as comp
def test_multiply_images(aorta_image, compare_images):
name = __name__ + '_test_multiply_images.mha'
composer = comp.vmtkImageCompose()
composer.Operation = 'multiply'
composer.Image = aorta_image
composer.Image2 = aorta_image
composer.Execute()
assert compare_images(composer.Image, name) == True
def test_subtract_images(aorta_image, compare_images):
name = __name__ + '_test_subtract_images.mha'
composer = comp.vmtkImageCompose()
composer.Operation = 'subtract'
composer.Image = aorta_image
composer.Image2 = aorta_image
composer.Execute()
assert compare_images(composer.Image, name) == True
def test_negate_image2_and_multiply(aorta_image, compare_images):
name = __name__ + '_test_negate_image2_and_multiply.mha'
composer = comp.vmtkImageCompose()
composer.Operation = 'multiply'
composer.NegateImage2 = True
composer.Image = aorta_image
composer.Image2 = aorta_image
composer.Execute()
assert compare_images(composer.Image, name) == True
def test_negate_image2_and_min(aorta_image, compare_images):
name = __name__ + '_test_negate_image2_and_min.mha'
composer = comp.vmtkImageCompose()
composer.Operation = 'min'
composer.NegateImage2 = True
composer.Image = aorta_image
composer.Image2 = aorta_image
composer.Execute()
assert compare_images(composer.Image, name) == True
def test_negate_image2_and_max(aorta_image, compare_images):
name = __name__ + '_test_negate_image2_and_max.mha'
composer = comp.vmtkImageCompose()
composer.Operation = 'max'
composer.NegateImage2 = True
composer.Image = aorta_image
composer.Image2 = aorta_image
composer.Execute()
assert compare_images(composer.Image, name) == True
| 2.296875 | 2 |
Winter 2017/lec7/codingpractice.py | hyunjaemoon/pythonteaching | 0 | 12773441 |
#Link Class is given!
class Link:
"""A linked list.
>>> s = Link(1, Link(2, Link(3)))
>>> s.first
1
>>> s.rest
Link(2, Link(3))
"""
empty = ()
def __init__(self, first, rest=empty):
assert rest is Link.empty or isinstance(rest, Link)
self.first = first
self.rest = rest
def __repr__(self):
if self.rest is Link.empty:
return 'Link({})'.format(self.first)
else:
return 'Link({}, {})'.format(self.first, repr(self.rest))
def __str__(self):
"""Returns a human-readable string representation of the Link
>>> s = Link(1, Link(2, Link(3, Link(4))))
>>> str(s)
'<1 2 3 4>'
>>> str(Link(1))
'<1>'
>>> str(Link.empty) # empty tuple
'()'
"""
string = '<'
while self.rest is not Link.empty:
string += str(self.first) + ' '
self = self.rest
return string + str(self.first) + '>'
def __eq__(self, other):
"""Compares if two Linked Lists contain same values or not.
>>> s, t = Link(1, Link(2)), Link(1, Link(2))
>>> s == t
True
"""
if self is Link.empty and other is Link.empty:
return True
if self is Link.empty or other is Link.empty:
return False
return self.first == other.first and self.rest == other.rest
def list_to_link(lst):
"""Takes a Python list and returns a Link with the same elements.
>>> link = list_to_link([1, 2, 3])
>>> link
'Link(1, Link(2, Link(3)))'
"""
"***YOUR CODE HERE***"
def link_to_list(link):
"""Takes a Link and returns a Python list with the same elements.
>>> link = Link(1, Link(2, Link(3, Link(4))))
>>> link_to_list(link)
[1, 2, 3, 4]
>>> link_to_list(Link.empty)
[]
"""
"*** YOUR CODE HERE ***"
def remove_all(link , value):
"""Remove all the nodes containing value. Assume there exists some
nodes to be removed and the first element is never removed.
>>> l1 = Link(0, Link(2, Link(2, Link(3, Link(1, Link(2, Link(3)))))))
>>> remove_all(l1, 2)
>>> l1
Link(0, Link(3, Link(1, Link(3))))
>>> remove_all(l1, 3)
>>> l1
Link(0, Link(1))
"""
"*** YOUR CODE HERE ***"
def linked_sum(lnk, total):
"""Return the number of combinations of elements in lnk that
sum up to total .
>>> # Four combinations : 1 1 1 1 , 1 1 2 , 1 3 , 2 2
>>> linked_sum (Link(1, Link(2, Link(3, Link(5)))), 4)
4
>>> linked_sum(Link(2, Link(3, Link(5))), 1)
0
>>> # One combination : 2 3
>>> linked_sum(Link(2, Link(4, Link(3))), 5)
1
"""
"*** YOUR CODE HERE ***"
def has_cycle(s):
"""
>>> has_cycle(Link.empty)
False
>>> a = Link(1, Link(2, Link(3)))
>>> has_cycle(a)
False
>>> a.rest.rest.rest = a
>>> has_cycle(a)
True
"""
"*** YOUR CODE HERE ***"
| 4.21875 | 4 |
vendor/home/python/home_daemon.py | hvos234/raspberrypi.home.website | 0 | 12773442 | <reponame>hvos234/raspberrypi.home.website<filename>vendor/home/python/home_daemon.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess, time
# the subprocess.call by start and stop will return failt when you try to
# start the daemon if it is already started this is also with stop, if the status
# is diffrent it will return zero if the daemon is started and not zero if the
# daemon is stopped, so will have to check wit status if the start or stop command
# was succesfull
class home_daemon:
def start(self):
# check if it is stopped, before
if not self.status():
subprocess.call(['sudo', 'service', 'home-daemon-receiver', 'start'])
def status(self):
if not subprocess.call(['sudo', 'service', 'home-daemon-receiver', 'status']):
# it returns 0 if everything is oke, it is running
return True
else:
return False
def stop(self):
# check if it is started, before
if self.status():
subprocess.call(['sudo', 'service', 'home-daemon-receiver', 'stop'])
# check if the daemon is realy stopped, and check again after 11 seconds
# linux give the daemon a stop command and after 10 seconds a kill command
if self.status():
time.sleep(11) # delays for 11 seconds
def __enter__(self):
return self
def __exit__(self):
# start the daemon no matter what
self.start()
def __del__(self):
# start the daemon no matter what
self.start()
| 2.875 | 3 |
Semestre_2018_2/Taller/Python/punto4_3.py | SherylA/Archivo_Fundamentos | 0 | 12773443 | <gh_stars>0
import math
N=int(input("Ingrese el grado del polinomio 1: "))
M=int(input("Ingrese el grado del polinomio 2: "))
GradRes=max(N+1,M+1)
coef1=[0.0]*GradRes
coef2=[0.0]*GradRes
for i in range(0,N+1):
coef1[i]=float(input("Ingrese el coef de x^%d "%i))
for i in range(0,M+1):
coef2[i]=float(input("Ingrese el coef de x^%d "%i))
for i in range(0,GradRes):
print("La suma para x^",i,"es: ",coef1[i]+coef2[i])
| 3.515625 | 4 |
quant/markets/market.py | doubleDragon/QuantBot | 7 | 12773444 | <filename>quant/markets/market.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
import time
from quant import config
class Market(object):
"""
eth_btc
base_currency :btc
quote_currency:eth
"""
def __init__(self, base_currency, market_currency, pair_code, fee_rate):
self._name = None
self.base_currency = base_currency
self.market_currency = market_currency
self.pair_code = pair_code
self.fee_rate = fee_rate
self.depth_updated = 0
self.update_rate = 1
self.is_terminated = False
self.request_timeout = 5 # 5s
self.depth = {'asks': [{'price': 0, 'amount': 0}], 'bids': [{'price': 0, 'amount': 0}]}
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def terminate(self):
self.is_terminated = True
def get_depth(self):
time_diff = time.time() - self.depth_updated
# logging.warn('Market: %s order book1:(%s>%s)', self.name, time_diff, self.depth_updated)
if time_diff > self.update_rate:
logging.debug('%s should update...', self.name)
if not self.ask_update_depth():
return None
time_diff = time.time() - self.depth_updated
# logging.warn('Market: %s order book2:(%s>%s)', self.name, time_diff, self.depth_updated)
if time_diff > config.market_expiration_time:
# logging.warn('Market: %s order book is expired(%s>%s)', self.name, time_diff,
# config.market_expiration_time)
return None
return self.depth
def ask_update_depth(self):
try:
self.update_depth()
# self.convert_to_usd()
self.depth_updated = time.time()
return True
except Exception as e:
logging.error("Can't update market: %s - err:%s" % (self.name, str(e)))
# log_exception(logging.DEBUG)
return False
# traceback.print_exc()
def get_ticker(self):
depth = self.get_depth()
if not depth:
return None
res = {'ask': {'price': 0, 'amount': 0}, 'bid': {'price': 0, 'amount': 0}}
if len(depth['asks']) > 0:
res['ask'] = depth['asks'][0]
if len(depth['bids']) > 0:
res['bid'] = depth['bids'][0]
return res
def update_depth(self):
"""子类重写该方法,每个market的数据不一样"""
pass
| 2.578125 | 3 |
TheSeller/run_game.py | KolegaLiterat/imadethis | 0 | 12773445 | #libs
import pygame
import datetime
#modules
from modules.gui import UserInterface
from modules.sprites import Sprites
from modules.supplies import Supplies
from modules.workers import Workers
from modules.demand import Demand
from modules.gamelogic import Actions
pygame.init()
window = pygame.display.set_mode((1280, 720))
worker_and_supplies_font = pygame.font.Font('./font/kennyFont.ttf', 70)
demand_font = pygame.font.Font('./font/kennyFont.ttf', 35)
information_font = pygame.font.Font('./font/kennyFont.ttf', 30)
pygame.display.set_caption('The Seller')
actions = Actions()
gui = UserInterface()
gui.create_lines_for_gui(window)
sprites_for_tokens = Sprites()
sprites_for_tokens.load_sprites()
workers_objects = pygame.sprite.Group()
resource_objects = pygame.sprite.Group()
demand_objects = pygame.sprite.Group()
RESOURCE_BUCKET_ORE = Supplies((400, 140), sprites_for_tokens.loaded_sprites[0], 'bucket of ore', False, 0, 0, 0)
RESOURCE_BUCKET_WATER = Supplies((400, 250), sprites_for_tokens.loaded_sprites[1], 'bucket of water', True, 0, 0, 0)
RESOURCE_CLAM_CLOSED = Supplies((400, 360), sprites_for_tokens.loaded_sprites[2], 'clam', False, 0, 0, 0)
RESOURCE_CLAM_OPEN = Supplies((400, 470), sprites_for_tokens.loaded_sprites[3], 'opened clam', True, 0, 0, 0)
RESOURCE_MEAT_COOKED = Supplies((400, 580), sprites_for_tokens.loaded_sprites[4], 'cooked meat', True, 0, 0, 0)
RESOURCE_MEAT_RAW = Supplies((650, 140), sprites_for_tokens.loaded_sprites[5], 'raw meat', False, 0, 0, 0)
RESOURCE_TOOL_SHOVEL = Supplies((650, 250), sprites_for_tokens.loaded_sprites[6], 'shovel', True, 0, 0, 0)
RESOURCE_TOOL_SWORD = Supplies((650, 360), sprites_for_tokens.loaded_sprites[7], 'sword', True, 0, 0, 0)
RESOURCE_WOOD_LOG = Supplies((650, 470), sprites_for_tokens.loaded_sprites[8], 'log', False, 0, 0, 0)
RESOURCE_WOOD_TREE = Supplies((650, 580), sprites_for_tokens.loaded_sprites[9], 'tree', False, 0, 0, 0)
DEMAND_BUCKET_WATER = Demand((1050, 100), sprites_for_tokens.loaded_sprites[1], 'bucket of water', 0, 0)
DEMAND_CLAM_OPEN = Demand((1050, 230), sprites_for_tokens.loaded_sprites[3], 'opened clam', 0, 0)
DEMAND_MEAT_COOKED = Demand((1050, 360), sprites_for_tokens.loaded_sprites[4], 'cooked meat', 0, 0)
DEMAND_TOOL_SHOVEL = Demand((1050, 490), sprites_for_tokens.loaded_sprites[6], 'shovel',0, 0)
DEMAND_TOOL_SWORD = Demand((1050, 620), sprites_for_tokens.loaded_sprites[7], 'sword', 0, 0)
WORKER1 = Workers((80, 130), sprites_for_tokens.loaded_sprites[10], True)
WORKER2 = Workers((80, 220), sprites_for_tokens.loaded_sprites[10], True)
WORKER3 = Workers((80, 310), sprites_for_tokens.loaded_sprites[10], True)
WORKER4 = Workers((80, 400), sprites_for_tokens.loaded_sprites[10], True)
WORKER5 = Workers((80, 490), sprites_for_tokens.loaded_sprites[10], True)
WORKER6 = Workers((80, 580), sprites_for_tokens.loaded_sprites[10], True)
resource_objects.add(RESOURCE_BUCKET_ORE, RESOURCE_BUCKET_WATER, RESOURCE_CLAM_CLOSED,
RESOURCE_CLAM_OPEN, RESOURCE_MEAT_COOKED, RESOURCE_MEAT_RAW,
RESOURCE_TOOL_SHOVEL, RESOURCE_TOOL_SWORD, RESOURCE_WOOD_LOG,
RESOURCE_WOOD_TREE)
resource_objects.update()
resource_objects.draw(window)
demand_objects.add(DEMAND_BUCKET_WATER, DEMAND_CLAM_OPEN,
DEMAND_MEAT_COOKED, DEMAND_TOOL_SHOVEL, DEMAND_TOOL_SWORD)
demand_objects.update()
demand_objects.draw(window)
workers_objects.add(WORKER1, WORKER2, WORKER3, WORKER4, WORKER5, WORKER6)
workers_objects.update()
workers_objects.draw(window)
fps = pygame.time.Clock()
is_game_running: bool = True
happines = 100
end_turn = True
action_menu = False
selected_supply = None
def produce_supplies_with_one_resource(selected_supply, resources, value_change):
if actions.check_is_supply_producable(selected_supply, resources, -1):
if actions.start_production(workers_objects):
actions.calculate_growth_for_resource(selected_supply, value_change)
actions.calculate_usage_of_resources(resources, -1)
else:
gui.action_informations(window, information_font, 'noFreeWorkers')
else:
gui.action_informations(window, information_font, 'notEnoughResources')
def produce_supplies(value_change: int):
resources = []
if selected_supply.name == 'opened clam':
resources.append(RESOURCE_CLAM_CLOSED)
produce_supplies_with_one_resource(selected_supply, resources, value_change)
resources.clear()
elif selected_supply.name == 'cooked meat':
resources.append(RESOURCE_MEAT_RAW)
produce_supplies_with_one_resource(selected_supply, resources, value_change)
resources.clear()
elif selected_supply.name == 'shovel':
resources.extend((RESOURCE_WOOD_LOG, RESOURCE_BUCKET_ORE))
produce_supplies_with_one_resource(selected_supply, resources, value_change)
resources.clear()
elif selected_supply.name == 'sword':
pass
elif selected_supply.name == 'log':
resources.append(RESOURCE_WOOD_TREE)
produce_supplies_with_one_resource(selected_supply, resources, value_change)
resources.clear()
else:
if actions.start_production(workers_objects):
actions.calculate_growth_for_resource(selected_supply, value_change)
else:
gui.action_informations(window, information_font, 'noFreeWorkers')
# if actions.check_is_supply_producable(selected_supply, resource_objects, value_change):
# if actions.start_production(workers_objects):
# actions.calculate_growth_for_resource(selected_supply, value_change)
# else:
# gui.action_informations(window, information_font, 'noFreeWorkers')
# else:
# gui.action_informations(window, information_font, 'notEnoughResources')
def supplies_selling(value_change: int):
if actions.check_is_supply_sellable(selected_supply, value_change):
if actions.start_production(workers_objects):
actions.sell_supplies(selected_supply, value_change)
else:
gui.action_informations(window, information_font, 'noFreeWorkers')
else:
gui.action_informations(window, information_font, 'negativeAmount')
while is_game_running:
fps.tick(30)
gui.create_text_with_workers_status(worker_and_supplies_font, window, workers_objects)
gui.create_text_with_supplies_amounts(worker_and_supplies_font, window, resource_objects)
gui.create_text_with_demand_information(demand_font, window, demand_objects)
gui.create_text_with_happines(window, demand_font, happines)
if happines > 0:
if end_turn == True:
actions.free_workers(workers_objects)
actions.change_supplies_amount(resource_objects)
end_turn = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
is_game_running = False
if event.type == pygame.MOUSEBUTTONUP:
mouse_position = pygame.mouse.get_pos()
for supply in resource_objects:
if supply.rect.collidepoint(mouse_position):
gui.on_click_actions(window, information_font, supply, demand_objects)
action_menu = True
selected_supply = supply
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_F12:
pygame.image.save(window, 'screen.png')
if event.key == pygame.K_f:
end_turn = True
action_menu = False
gui.action_informations(window, information_font, 'endTurn')
if event.key == pygame.K_p and action_menu:
produce_supplies(1)
if event.key == pygame.K_s and action_menu:
supplies_selling(-1)
if event.key == pygame.K_d and action_menu:
supplies_selling(-5)
pygame.display.update()
pygame.quit()
| 2.46875 | 2 |
src/canvas.py | vt-sailbot/sailbot-18 | 1 | 12773446 | <gh_stars>1-10
"""
Reads the Airmar three times, probably was used for testing
"""
import airmar_reader as ar
import time
ar.print_airmar_sentence_contents = True
ar.setup()
ar.read_airmar()
time.sleep(1)
ar.read_airmar()
time.sleep(1)
ar.read_airmar()
| 1.851563 | 2 |
security/enums/hash_type.py | LiquidFun/stegowav | 0 | 12773447 | <reponame>LiquidFun/stegowav
from enum import Enum
class HashType(Enum):
NONE = 0
PBKDF2 = 1
SCRYPT = 2
| 2.0625 | 2 |
python/testData/inspections/PyRedundantParenthesesInspection/ParenthesizedTupleWithUnpackingInYield.py | tgodzik/intellij-community | 2 | 12773448 | <filename>python/testData/inspections/PyRedundantParenthesesInspection/ParenthesizedTupleWithUnpackingInYield.py
def func(xs):
yield <weak_warning descr="Remove redundant parentheses">(42, *xs)</weak_warning>
| 1.757813 | 2 |
netavg/jobs/minimization.py | grollins/netavg-django | 1 | 12773449 | #!/usr/bin/env python
# encoding: utf-8
import argparse
import prody
import os
import shutil
import subprocess
import numpy
from os.path import join
GMX_PATH = '/usr/local/gromacs/bin/'
mdp_string = '''
define = -DPOSRES
integrator = {integrator}
nsteps = 1000
emtol = 1
nstlist = 1
coulombtype = Cut-off
vdwtype = Cut-off
ns_type = simple
rlist = 1.8
rcoulomb = 1.8
rvdw = 1.8
pbc = xyz
implicit_solvent = GBSA
gb_algorithm = OBC
sa_algorithm = ACE-approximation
rgbradii = 1.8
;nstxout = 1
'''
def parse_args():
parser = argparse.ArgumentParser(description='Generate trajectory with gaussian flucutations.')
parser.add_argument('pdb_file', metavar='INPUT_PDB_FILE', help='path to input pdb file')
parser.add_argument('trajectory', metavar='TRAJECTORY', help='path to input trajectory')
parser.add_argument('out_file', metavar='OUTPUT_PDB_FILE', help='path to input pdb file')
parser.add_argument('--pos_res_k', type=float, default=1000.)
args = parser.parse_args()
return (args.pdb_file, args.trajectory, args.out_file, args.pos_res_k)
class Minimizer(object):
def __init__(self, input_pdb_filename, trajectory_filename):
self.input_pdb = self._load_pdb(input_pdb_filename)
self.trajectory = self._load_pdb(trajectory_filename)
def _load_pdb(self, in_file):
protein = prody.parsePDB(in_file)
return protein
def _get_closest_frame(self):
output = prody.AtomGroup('Cartesian average coordinates')
output.setCoords( self.trajectory.getCoords() )
output.setNames( self.trajectory.getNames() )
output.setResnums( self.trajectory.getResnums() )
output.setResnames( self.trajectory.getResnames() )
ensemble = prody.PDBEnsemble(self.trajectory)
ensemble.setCoords(self.input_pdb)
ensemble.superpose()
rmsds = ensemble.getRMSDs()
min_index = numpy.argmin(rmsds)
output.setCoords( ensemble.getCoordsets(min_index) )
return output
def _create_no_h_file(self, output_stream):
# make the index file
cmd = join(GMX_PATH, 'make_ndx')
cmd += ' -f min_round_2.gro -o no_h.ndx'
p1 = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=output_stream, stderr=output_stream)
p1.communicate('q\n')
# run editconf
edit_cmd = join(GMX_PATH, 'editconf')
edit_cmd += ' -f min_round_2.gro -o no_h.gro -n no_h.ndx'
p2 = subprocess.Popen(edit_cmd, shell=True, stdin=subprocess.PIPE,
stdout=output_stream, stderr=output_stream)
p2.communicate('2\n')
def _re_order(self, output_stream):
# create a new index file
lines = open('index.ndx').read().splitlines()
header = lines[0]
indices = []
for line in lines[1:]:
cols = line.split()
for col in cols:
indices.append( int(col) )
resorted = [ indices.index(val)+1 for val in range( 1, max(indices)+1 ) ]
with open('resort.ndx', 'w') as out:
print >>out, header
for val in resorted:
print >>out, val
# resort
edit_cmd = join(GMX_PATH, 'editconf')
edit_cmd += ' -f no_h.gro -o min.pdb -n resort.ndx'
subprocess.check_call(edit_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
def run_minimization(self, posres_force_const=1000., output_stream=None):
start = self._get_closest_frame()
# create temp dir
if os.path.isdir('Temp'):
pass
else:
os.mkdir('Temp')
os.chdir('Temp')
# write the average file
prody.writePDB('average.pdb', self.input_pdb)
pdb_cmd = join(GMX_PATH, 'pdb2gmx')
pdb_cmd += ' -f average.pdb -ff amber99sb-ildn -water none -n index.ndx -posrefc {} -o ref.gro -his'.format(
posres_force_const)
p = subprocess.Popen(pdb_cmd, shell=True, stdin=subprocess.PIPE,
stdout=output_stream, stderr=output_stream)
p.communicate('0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n')
# put it in a bigger box
box_cmd = join(GMX_PATH, 'editconf')
box_cmd += ' -f ref.gro -o ref_box.gro -c -box 999 999 999'
subprocess.check_call(box_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
# write pdb file
prody.writePDB('start.pdb', start)
# pdb2gmx
pdb_cmd = join(GMX_PATH, 'pdb2gmx')
pdb_cmd += ' -f start.pdb -ff amber99sb-ildn -water none -n index.ndx -posrefc {} -his'.format(
posres_force_const)
p = subprocess.Popen(pdb_cmd, shell=True, stdin=subprocess.PIPE,
stdout=output_stream, stderr=output_stream)
p.communicate('0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n')
# put it in a bigger box
box_cmd = join(GMX_PATH, 'editconf')
box_cmd += ' -f conf.gro -o box.gro -c -box 999 999 999'
subprocess.check_call(box_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
#
# Round 1
#
# write mdp file
with open('min_round_1.mdp', 'w') as min_file:
min_file.write( mdp_string.format(integrator='steep') )
# run grompp
grompp_cmd = join(GMX_PATH, 'grompp')
grompp_cmd += ' -f min_round_1.mdp -c box.gro -p topol.top -o min_round_1 -r ref_box.gro'
subprocess.check_call(grompp_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
# run mdrun
md_cmd = join(GMX_PATH, 'mdrun')
md_cmd += ' -deffnm min_round_1 -v -nt 1'
subprocess.check_call(md_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
#
# Round 2
#
# write mdp file
with open('min_round_2.mdp', 'w') as min_file:
min_file.write( mdp_string.format(integrator='l-bfgs') )
# run grompp
grompp_cmd = join(GMX_PATH, 'grompp')
grompp_cmd += ' -f min_round_2.mdp -c min_round_1.gro -p topol.top -o min_round_2 -maxwarn 1 -r ref_box.gro'
subprocess.check_call(grompp_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
# run mdrun
md_cmd = join(GMX_PATH, 'mdrun')
md_cmd += ' -deffnm min_round_2 -v -nt 1'
subprocess.check_call(md_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
#
# gather results
#
self._create_no_h_file(output_stream)
self._re_order(output_stream)
# load the pdb
protein = prody.parsePDB('min.pdb').select('not hydrogen')
# clean up
os.chdir('..')
shutil.rmtree('Temp')
return protein
def main():
r = parse_args()
input_pdb_filename = r[0]
trajectory_filename = r[1]
output_pdb_filename = r[2]
force_const = r[3]
m = Minimizer(input_pdb_filename, trajectory_filename)
minimized_protein = m.run_minimization(force_const)
prody.writePDB(output_pdb_filename, minimized_protein)
if __name__ == '__main__':
main()
| 2.34375 | 2 |
main.py | rice-lords-telles/googles-hash-code-2020 | 0 | 12773450 | <reponame>rice-lords-telles/googles-hash-code-2020
from utils import get_input, TEST_INPUTS, write_output_to_files
from collections import OrderedDict
from brute_force import as_they_come_algorithm
# can you see this?
# can you edit this
# This is Roy
# hello hello
# Possible Data Structure
# {
# total_books: 0,
# total_libraries: 0,
# total_days: 0,
# books: [7, 3, 6, 9]
# libraries:[
# {
# total_books: 0
# books: [],
# signup: 0,
# shipping: 0
# }],
# ...
# }
# Example
# {
# total_books: 6,
# total_libraries: 2,
# total_days: 7,
# book_scores: [1, 2, 3, 6, 5, 4],
# libraries:[
# {
# total_books: 5
# books: [0, 1, 2, 3, 4],
# signup: 2,
# # books per day
# bpd: 2
# },
# {
# total_books: 5
# books: [0, 2, 3, 5],
# signup: 3,
# bpd: 1
# }
# ]
# }
def set_lib_priority(all_data):
"""Return array with priority for each library
priority = (signup day cost/total days available) / (num of books/books per day)
This will give us a value and the largest value is what we want to start with.
Protected against divide by 0 errors because input is guaranteed to be at least 1
"""
lib_priority = {}
# lib_priority_arr = [all_data['total_libraries']]
lib_index = 0
for lib in all_data["libraries"]:
signup = lib["signup"]
total_days = all_data["total_days"]
book_in_lib = lib["total_books"]
books_per_day = lib["bpd"]
priority = (signup / total_days) / (books_in_lib / books_per_day)
lib_priority[lib_index] = priority
lib_index += 1
sorted_lib_priority = OrderedDict(sorted(lib_priority.items()))
return sorted_lib_priority
def book_score(all_data):
book_priority = {}
lib_index = 0
for lib in all_data["libraries"]:
for book in lib[books]:
book_priority[lib_index]
return sorted_book_priority
def lib_picker(lib_dict):
num_libraries = 0
lib_id = None
num_books_to_send = 0
books_sent = None
def algorithm_template(file_path):
"""Return: Array"""
return []
def main():
os.mkdir
# all_data = get_input()
for input_file in TEST_INPUTS:
data = get_input(input_file)
output = as_they_come_algorithm(data)
write_output_to_files(
output, "output/" + input_file + as_they_come_algorithm.__name__
)
# lib_priority_dict = set_lib_priority(all_data)
# book_priority_dict = book_score(all_data)
# lib_picker(lib_priority_dict)
if __name__ == "__main__":
main()
| 3.359375 | 3 |