max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
space_shuttle_autoladnding_eda.py
|
ShaonMajumder/space_shuttle_autolanding_decision
| 2
|
12779051
|
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
data = pd.read_csv('data/shuttle-landing-control.csv',names=['auto_control','stability','error','sign','wind','magnitude','visibility'])
## replacing missing values '*' with 0
data = data.replace('*',0)
## |---------- Data Set Properties ------------|
# |----- Catagorical Value Map: 2 = True / 1 = False, 0 = Missing value -----|
## Always be aware of data type of a column, it can create error or unchange value when condition applied or other proccessing task applied.
## Converting data types into homogeneus element
data=data.astype('int')
## Assuming standardization is not needed at all value is mapped into same type of catagory
## Cross validation is not needed because data is too low
print(data.dtypes)
print(data.describe())
data.loc[data['auto_control']==1,'auto_control'] = False
data.loc[data['auto_control']==2,'auto_control'] = True
data.loc[data['visibility']==1,'visibility'] = False
data.loc[data['visibility']==2,'visibility'] = True
data.loc[data['sign']==1,'sign'] = '-'
data.loc[data['sign']==2,'sign'] = '+'
data.loc[data['wind']==1,'wind'] = 'tail'
data.loc[data['wind']==2,'wind'] = 'head'
data.loc[data['stability']==1,'stability'] = 'stab'
data.loc[data['stability']==2,'stability'] = 'xstab'
print(data)
| 3.46875
| 3
|
tests/test_cmanalysis.py
|
niklastoe/classifier_metric_uncertainty
| 4
|
12779052
|
import unittest as ut
import pandas as pd
import pymc3 as pm
from bayesian_inference_confusion_matrix import ConfusionMatrixAnalyser, bayes_laplace_prior
class TestConfusionMatrixAnalyser(ut.TestCase):
def __init__(self, *args, **kwargs):
super(TestConfusionMatrixAnalyser, self).__init__(*args, **kwargs)
input_cm = pd.Series([9, 1, 3, 2], index=['TP', 'FN', 'TN', 'FP'])
# use improper prior to avoid bias / simplifies calculation
self.analyser = ConfusionMatrixAnalyser(input_cm)
self.N = self.analyser.confusion_matrix.values.sum()
sel_n = 100000
inf_n_pp = self.analyser.posterior_predict_confusion_matrices(pp_n=sel_n)
inf_n_pp /= float(sel_n)
self.inf_n_pp = inf_n_pp
def test_theta_and_x_sampling(self):
"""confirm that sampled expected value/variance for theta and X are close to the analytical solution
see https://en.wikipedia.org/wiki/Dirichlet_distribution
and https://en.wikipedia.org/wiki/Dirichlet-multinomial_distribution"""
alpha = self.analyser.confusion_matrix
alpha_0 = float(sum(alpha))
dirichlet_mean = alpha / alpha_0
dcm_mean = self.N * dirichlet_mean
dirichlet_var = dirichlet_mean * (1 - dirichlet_mean) / (alpha_0 + 1)
dcm_var = self.N * (self.N + alpha_0) * dirichlet_var
for i in self.analyser.theta_samples:
self.assertAlmostEqual(dirichlet_mean[i],
self.analyser.theta_samples[i].mean(),
delta=1e-2)
self.assertAlmostEqual(dcm_mean[i],
self.analyser.pp_samples[i].mean(),
delta=5e-2)
self.assertAlmostEqual(dirichlet_var[i],
self.analyser.theta_samples[i].var(),
delta=1e-3)
self.assertAlmostEqual(dcm_var[i],
self.analyser.pp_samples[i].var(),
delta=2e-1)
def test_expected_value(self):
"""confirm that expected value is equal to the metric for the original confusion matrix
(within 1 percentage point)"""
for metric in self.analyser.cm_metrics.index:
self.assertAlmostEqual(self.analyser.cm_metrics[metric],
self.analyser.theta_metrics.mean()[metric],
delta=1e-2)
def test_variance_convergence(self):
"""test that the variance of the posterior predictions of V_i/N converge towards the variance of theta_i"""
theta_var = self.analyser.theta_samples.var()
inf_n_pp_var = self.inf_n_pp.var()
for i in theta_var.index:
self.assertAlmostEqual(theta_var[i], inf_n_pp_var[i], delta=1e-5)
def test_expected_value_pp_theta(self):
"""test that the expected value from the posterior prediction and theta are identical
this only works for very large N"""
for i in self.analyser.theta_samples.columns:
self.assertAlmostEqual(self.analyser.theta_samples.mean()[i],
self.inf_n_pp.mean()[i],
delta=1e-4)
def test_selected_metrics(self):
"""test if metrics are properly calculated, this is only done for a handful"""
self.assertEqual(self.analyser.cm_metrics['ACC'], 12. / 15.)
self.assertEqual(self.analyser.cm_metrics['PREVALENCE'], 10. / 15.)
self.assertEqual(self.analyser.cm_metrics['TPR'], 9. / 10.)
@ut.skip("pyMC test is disabled because it takes 15-90 seconds")
def test_pymc_implementation(self):
"""my analytical implementation and pyMC should yield the same results.
Test expected value and variance for theta"""
# need to use Bayes-Laplace prior: pyMC cannot deal with Haldane prior
analyser_bl = ConfusionMatrixAnalyser(self.analyser.confusion_matrix,
prior=bayes_laplace_prior)
# inference with pyMC
with pm.Model() as multinom_test:
a = pm.Dirichlet('a', a=bayes_laplace_prior.astype(float).values)
data_pred = pm.Multinomial('data_pred',
n=self.N,
p=a,
observed=self.analyser.confusion_matrix)
trace = pm.sample(5000)
# get pymc samples
pymc_trace_samples = pd.DataFrame(trace.get_values('a'),
columns=self.analyser.confusion_matrix.index)
# compare expected value and variance
for i in self.analyser.theta_samples:
self.assertAlmostEqual(pymc_trace_samples[i].mean(),
analyser_bl.theta_samples[i].mean(),
delta=1e-2)
self.assertAlmostEqual(pymc_trace_samples[i].var(),
analyser_bl.theta_samples[i].var(),
delta=1e-3)
if __name__ == '__main__':
ut.main(verbosity=2)
| 2.625
| 3
|
pystratis/api/coldstaking/responsemodels/infomodel.py
|
TjadenFroyda/pyStratis
| 8
|
12779053
|
<filename>pystratis/api/coldstaking/responsemodels/infomodel.py
from pydantic import Field
from pystratis.api import Model
class InfoModel(Model):
"""A pydantic model for cold wallet information."""
cold_wallet_account_exists: bool = Field(alias='coldWalletAccountExists')
"""True if cold wallet account exists."""
hot_wallet_account_exists: bool = Field(alias='hotWalletAccountExists')
"""True if hot wallet account exists."""
| 2.484375
| 2
|
models/skin.py
|
V1ckeyR/snake_snake
| 1
|
12779054
|
<reponame>V1ckeyR/snake_snake
from app import db
class Skin(db.Model):
"""
Params for category 'color': 'hex'
Params for category 'gradient': 'direction', 'colors'
"""
id = db.Column(db.Integer, primary_key=True)
category = db.Column(db.Integer, db.ForeignKey('category.id'), nullable=False)
params = db.Column(db.JSON, nullable=False)
@staticmethod
def add(category, params):
db.session.add(Skin(category=category, params=params))
db.session.commit()
def __repr__(self):
return f'<Skin { self.category } with params { self.params }>'
| 2.71875
| 3
|
stock_algorithms/record_coordinates.py
|
Vermee81/practice-coding-contests
| 0
|
12779055
|
if __name__ == '__main__':
N = int(input())
xy_arr = [list(map(int, input().split())) for _ in range(N)]
M = int(input())
op_arr = [list(map(int, input().split())) for _ in range(M)]
Q = int(input())
ab_arr = [list(map(int, input().split())) for _ in range(Q)]
ans_arr = [xy_arr]
def rot_plus_90(xy):
return [xy[1], -xy[0]]
def rot_minus_90(xy):
return [-xy[1], xy[0]]
def taisho_x(x, xy):
return xy if xy[0] == x else [2 * x - xy[0], xy[1]]
def taisho_y(y, xy):
return xy if xy[1] == y else [xy[0], 2 * y - xy[1]]
for op in op_arr:
if len(op) == 2 and op[0] == 3:
nxy = []
for xy in ans_arr[-1]:
nxy.append(taisho_x(op[1], xy))
ans_arr.append(nxy)
continue
if len(op) == 2 and op[0] == 4:
nxy = []
for xy in ans_arr[-1]:
nxy.append(taisho_y(op[1], xy))
ans_arr.append(nxy)
continue
if op[0] == 1:
nxy = []
for xy in ans_arr[-1]:
nxy.append(rot_plus_90(xy))
ans_arr.append(nxy)
continue
if op[0] == 2:
nxy = []
for xy in ans_arr[-1]:
nxy.append(rot_minus_90(xy))
ans_arr.append(nxy)
continue
for ab in ab_arr:
ans = ans_arr[ab[0]][ab[1] - 1]
print(f"{ans[0]} {ans[1]}")
| 2.796875
| 3
|
src/server/main.py
|
arkarkark/feedapp
| 0
|
12779056
|
# Copyright 2011 <NAME> (wtwf.com)
# based on code by '<EMAIL> (<NAME>)'
__author__ = 'wtwf.com (<NAME>)'
# If you want to check this with pychecker on osx you can do this...
# export PYTHONPATH=$PYTHONPATH:/usr/local/google_appengine/
# export PYTHONPATH=$PYTHONPATH:/usr/local/google_appengine/lib/yaml/lib/
from google.appengine.ext import vendor
vendor.add('lib')
from google.appengine.ext import webapp
from wtwf import wtwfhandler
from crud import crud_handler
import auth
import blogger
# import expand
import gps
import instagram
import mail
app = webapp.WSGIApplication([
('/data/gps.*', gps.Demo),
('/data/blogger/oauth', blogger.BloggerHandler),
('/data/blogger/blog', blogger.BloggerDataHandler),
(r'/public/data/blogger/feed/(\w+)', blogger.GetFeedHandler),
(r'/public/data/instagram/feed/([^/]+)', instagram.RssFeed),
(r'/mailfeed/([a-zA-Z0-9_-]+)', mail.FeedFromEmail),
# ('/expand/([a-zA-Z0-9_.%-]+)', expand.ExpandHandler),
# ('/data/expand/feed.json', crud_handler.GetCrudHandler(expand.ExpandFeed)),
# ('/data/expand/item.json', expand.ExpandItemDataHandler),
('/data/mail/feed.json', crud_handler.GetCrudHandler(mail.MailFeed)),
('/data/mail/item.json', mail.MailItemDataHandler),
('/data/user/user.json', wtwfhandler.UserHandler),
('/data/bulkdeletemailitems', mail.BulkDeleteMailItems),
('/data/setupdemo', mail.SetupDemo),
(r'/_ah/mail/(.+)', mail.EmailToFeed),
(auth.decorator.callback_path, auth.decorator.callback_handler())
])
| 2.09375
| 2
|
src/RGT/XML/SVG/Attribs/classAttribute.py
|
danrg/RGT-tool
| 7
|
12779057
|
from RGT.XML.SVG.Attribs.basicSvgAttribute import BasicSvgAttribute
from types import StringType
class ClassAttribute(BasicSvgAttribute):
ATTRIBUTE_CLASS = 'class'
def setClass(self, data=None):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_CLASS, data)
def getClass(self):
node = self._getNodeAttribute(self.ATTRIBUTE_CLASS)
if node is not None:
return node.nodeValue
return None
| 2.640625
| 3
|
swap/router_addresses.py
|
samirma/BasicDefiToolkit
| 0
|
12779058
|
<gh_stars>0
spooky_factory = "0x152eE697f2E276fA89E96742e9bB9aB1F2E61bE3"
hyper_factory = "0x991152411A7B5A14A8CF0cDDE8439435328070dF"
spirit_factory = "0xEF45d134b73241eDa7703fa787148D9C9F4950b0"
waka_factory = "0xB2435253C71FcA27bE41206EB2793E44e1Df6b6D"
sushi_factory = "0xc35DADB65012eC5796536bD9864eD8773aBc74C4"
pancake_factory = '0xcA143Ce32Fe78f1f7019d7d551a6402fC5350c73'
spooky_router = '0xF491e7B69E4244ad4002BC14e878a34207E38c29'
hyper_router = '0x53c153a0df7E050BbEFbb70eE9632061f12795fB'
spirit_router = '0x16327E3FbDaCA3bcF7E38F5Af2599D2DDc33aE52'
waka_router = '0x7B17021FcB7Bc888641dC3bEdfEd3734fCaf2c87'
sushi_router = '0x1b02dA8Cb0d097eB8D57A175b88c7D8b47997506'
pancake_router = '0x10ED43C718714eb63d5aA57B78B54704E256024E'
| 1.195313
| 1
|
clusprotools/report/__init__.py
|
Mingchenchen/cluspro-tools
| 1
|
12779059
|
# ./report/__init__.py
from .filtering_parameters import *
| 1.09375
| 1
|
block_average/tests/test.py
|
rsdefever/block_average
| 1
|
12779060
|
# coding: utf-8
import numpy as np
import math
from block_average import block_average
def main():
# Enter details here
n_samples = [int(2.5e5)]
# n_samples = [int(5e5),int(1e6),int(2e6),int(4e6)]
for n_sample in n_samples:
# Generate uncorrelated random samples
uncorrelated_samples = np.random.normal(size=n_sample)
average = np.mean(uncorrelated_samples)
variance = np.var(uncorrelated_samples)
# Calculate block averages and variances
means_est, vars_est, vars_err = block_average(uncorrelated_samples)
# Write output
outfile = "uncorr_n{}_blkavg.out".format(n_sample)
with open(outfile, "w") as f:
f.write(
"# Average: {:16.4f}, Variance: {:16.4f}\n".format(
average, variance
)
)
f.write("# N_blk_ops, Mean_est, Var_est, var_err\n")
for n_blk_ops, (mean_est, var_est, var_err) in enumerate(
zip(means_est, vars_est, vars_err)
):
f.write(
"{:10d}{:18.6f}{:16.4e}{:16.4e}\n".format(
n_blk_ops, mean_est, var_est, var_err
)
)
# Generate correlated random samples with MC walk
moves = np.random.normal(0.0, 0.05, size=5 * n_sample)
series = []
pos = 0.0
ener = energy(pos)
for i in range(n_sample):
series.append(pos)
trial_pos = pos + moves[i]
trial_ener = energy(trial_pos)
if trial_ener < ener:
pos = trial_pos
ener = trial_ener
else:
rand = np.random.uniform()
if math.exp(-(trial_ener - ener)) > rand:
pos = trial_pos
ener = trial_ener
correlated_samples = np.asarray(series)
# np.savetxt('correlated-samples.txt',correlated_samples)
average = np.mean(correlated_samples)
variance = np.var(correlated_samples)
# Calculate block averages and variances
means_est, vars_est, vars_err = block_average(correlated_samples)
# Write output
outfile = "corr_n{}_blkavg.out".format(n_sample)
with open(outfile, "w") as f:
f.write(
"# Average: {:16.4f}, Variance: {:16.4f}\n".format(
average, variance
)
)
f.write("# N_blk_ops, Mean_est, Var_est, var_err\n")
for n_blk_ops, (mean_est, var_est, var_err) in enumerate(
zip(means_est, vars_est, vars_err)
):
f.write(
"{:10d}{:18.6f}{:16.4e}{:16.4e}\n".format(
n_blk_ops, mean_est, var_est, var_err
)
)
def energy(x):
return x ** 2
if __name__ == "__main__":
main()
| 2.71875
| 3
|
WebScrapping/getData.py
|
marcsze/pythonPrograms
| 0
|
12779061
|
#! Python
from bs4 import BeautifulSoup
from urllib.request import urlopen
import requests, re, time
# Might need to run the following command in windows to change the encoder
# chcp 65001
def getInput(datafile):
links = open(datafile, 'r')
LinkStorage = []
for line in links:
goodLine = line.strip('\n')
LinkStorage.append(goodLine)
links.close()
return LinkStorage
def createOutput(storedData, name):
outfile = open(name, 'w')
for i in storedData:
descrip = storedData[i]
print("{0}\n\t{1}".format(i, descrip), file = outfile)
outfile.close()
def main():
LinkStorage = getInput("combined.txt")
storedData = {}
linkedData = {}
companyData = {}
for i in LinkStorage:
PosNameSearch = re.search('http://www.biospace.com/jobs/job-listing/(.*)-[0-9]', i)
position = PosNameSearch.group(1)
html = requests.get(i).text
soup = BeautifulSoup(html, 'html5lib')
description = soup.find("div").findAll("span", attrs={'id':'ctl00_phMainContent_lblJobRequirements'})
company = soup.find("div").findAll("span", attrs={'id':'ctl00_phMainContent_lblJobDescription'})
storedData[position] = description
linkedData[position] = i
companyData[position] = company
print(i)
time.sleep(1)
createOutput(storedData, "output2.txt")
createOutput(linkedData, "linkedData1.txt")
createOutput(companyData, "companyData.txt")
if __name__ == '__main__': main()
| 2.625
| 3
|
descriptors/preprocessing.py
|
truejulosdu13/NiCOlit
| 2
|
12779062
|
<reponame>truejulosdu13/NiCOlit
import numpy as np
from rdkit import Chem
from rdkit import RDLogger
RDLogger.logger().setLevel(RDLogger.CRITICAL)
def preprocess(df):
"""Preprocesses the dataframe as described in the article : reference.
### 1.None substrates are removed.
### 2.Reaction extracted from Chemical Reviews are removed.
### 3.Reaction extracted from https://doi.org/10.1021/acs.orglett.5b03151 are removed.
### 4.Double Step Reactions are removed.
### 5.Potential Lewis Acid reagent in the reaction are identified and a Lewis Acid category is set up.
### 6.All SMILES chain are written as RDKit canonical SMILES.
### 7.Unfeaturized molecules are removed.
### 8.Remove with less than 20 datapoints after previous preprocessing stages
Parameters:
df (dataframe): dataframe obtain from the NiCOLit csv file
Returns:
df (dataframe): preprocessed dataframe
"""
# 1
df = df[df["substrate"].isna() == False]
# 2
df = df[df["review"] != 'Review']
# 3
df = df[df["DOI"] != 'https://doi.org/10.1021/acs.orglett.5b03151']
# 4
df = df[df["2_steps"] != "Yes"]
# 5
df = find_Lewis_Acid(df)
# 6
# substrate
co_can = [Chem.CanonSmiles(smi) for smi in df["substrate"]]
# coupling partner
ax_can = [Chem.CanonSmiles(smi) for smi in df["effective_coupling_partner"]]
# ligand
lig_can = []
for lig in df["effective_ligand"]:
try:
lig_can.append(Chem.CanonSmiles(dict_ligand[lig]))
except:
lig_can.append(dict_ligand[str(lig)])
# base-reagents
add_can = smiles_additifs(df["effective_reagents"])
# Lewis acid
al_can = []
for al in [additives_mapping(al) for al in df["Lewis Acid"]]:
try:
al_can.append(Chem.CanonSmiles(al))
except:
al_can.append(al)
# full dataframe
df["substrate"] = co_can
df["effective_coupling_partner"] = ax_can
df["effective_ligand"] = lig_can
df["reagents"] = add_can
df["Lewis Acid"] = al_can
# 7
df = df[df["effective_ligand"] != '[C]1N(C23CC4CC(CC(C4)C2)C3)C=CN1C12CC3CC(CC(C3)C1)C2']
df = df[df["effective_coupling_partner"] != "[Li][Zn]([Li])(C)(C)(C)c1ccc(C(=O)N(C(C)C)C(C)C)cc1"]
df = df[df["effective_coupling_partner"] != "[Na+].c1ccc([B-](c2ccccc2)(c2ccccc2)c2ccccc2)cc1" ]
df = df[df["substrate"] != "COc1ccc(I)cc1" ]
df["Lewis Acid"] = df["Lewis Acid"].fillna('NoLewisAcid')
df["Lewis Acid"] = df["Lewis Acid"].replace('nan', 'NoLewisAcid')
for al in Lewis_Acids_to_drop:
df = df[df["Lewis Acid"] != al]
df = df.reset_index(drop=True)
# 8.
vc = df.DOI.value_counts()
doi_above_20 = np.array(vc[vc > 20].index)
indexes = []
for i, row in df.iterrows():
if row["DOI"] not in doi_above_20:
indexes.append(i)
df = df.drop(indexes)
df = df.reset_index(drop=True)
return df
def find_Lewis_Acid(df):
""" Splits additives raw information into Lewis Acids and Bases.
1. Search for the potential Lewis Acids in the base additive section.
2. Search for a Lewis Acid in the coupling partner section.
3. Select best Lewis Acid if more than one candidate appears.
Parameters:
df (dataframe): dataframe obtain from the NiCOLit csv file
Dict or List User Defined :
no_lewis_acid (list) : list of non Lewis Acid reagents.
dict_non_charge_al (dict): dict with a selction rule when multiple Lewis Acids are encountered.
Returns:
df (dataframe): with a new "Lewis Acid" column
"""
AL = []
# first we find the Lewis Acid for each reaction.
for i, row in df.iterrows():
# is there a Lewis Acid in the covalent Lewis Acid column ?
base = row["effective_reagents_covalent"]
al = None
# is there a Lewis Acid in the ionic Lewis Acid column ?
if isNaN(base):
base = row["effective_reagents"]
try:
if Chem.CanonSmiles(base) in no_lewis_acid:
base = 'NoLewisAcid'
except:
pass
# in case there are no additives, the stronger Lewis Acid may be the coupling partner
if isNaN(base) or base == 'NoLewisAcid':
meca = row["Mechanism"]
# when there is no LA added in the mechanism, the stronger lewis acid is the coupling partner
# we assume that only one Nickel center is involved in the mechanism.
if meca in ['Murahashi', 'Kumada', 'Negishi', 'Al _coupling', 'Suzuki']:
al = row["effective_coupling_partner"]
if Chem.CanonSmiles(al) in no_lewis_acid:
al = 'NoLewisAcid'
else:
al = 'NoLewisAcid'
AL.append(al)
else:
try:
if Chem.CanonSmiles(base) in no_lewis_acid:
AL.append('NoLewisAcid')
else:
AL.append(base)
except:
AL.append(base)
# Choosing the good Lewis Acid when more than one candidate are present.
new_AL = []
for al in list(AL) :
# separates Lewis base from Lewis acid
als = al.split('.')
if len(als) == 1: # in case there is only one Lewis acid
new_AL.append(al)
else:
# when there is no positively charge Lewis Acid : specific rule is applied
if '+' not in al:
new_AL.append(dict_non_charge_al[al])
else:
# when there is a cationic specie we take it as the Lewis Acid.
new_als = []
for smi in als:
if '+' in smi:
new_als.append(smi)
# when there are more than one we prioretize the positively charged one.
if len(np.unique(new_als)) == 1:
new_AL.append(new_als[0])
else:
# this should not happen
print("You have to make a choice between ", new_als)
df["Lewis Acid"] = new_AL
return df
no_lewis_acid = ['Cc1ccc(Br)cc1',
'Oc1ccccc1',
'CCN(CC)CC',
'CC(C)(C)C(=O)O',
'O',
'C1CCC2=NCCCN2CC1',
'C1=CCCC=CCC1',
'NoLewisAcid']
# choose the good Lewis Acid when two are available
dict_non_charge_al = {"c1cccc(C)c1[Mg]Br.[Li]Cl" : "[Li]Cl",
"c1ccccc1[Mg]Br.Cl[Mg]Cl" : "Cl[Mg]Cl",
"c1ccccc1[Mg]Br.[Li]Cl" : "[Li]Cl",
"c1ccccc1[Mg]Br.[Cs]Cl" : "[Cs]Cl",
"c1ccccc1[Mg]Br.[Sc](OS(=O)(=O)C(F)(F)F)(OS(=O)(=O)C(F)(F)F)OS(=O)(=O)C(F)(F)F" : "[Sc](OS(=O)(=O)C(F)(F)F)(OS(=O)(=O)C(F)(F)F)OS(=O)(=O)C(F)(F)F",
"c1ccccc1[Mg]Br.[Ti](OC(C)C)(OC(C)C)OC(C)C" : "[Ti](OC(C)C)(OC(C)C)(OC(C)C)OC(C)C",
"c1ccccc1[Mg]Br.Cl[Al](Cl)Cl" : "Cl[Al](Cl)Cl",
"F[Cu]F.F[Sr]F" : "F[Sr]F",
"F[Cu]F.[Al](C)(C)C" : '[Al](C)(C)C',
"F[Cu]F.[Cs]F" : '[Cs]F',
"Br[Cu]Br.[Cs]F" : '[Cs]F',
'Cl[Cu]Cl.[Cs]F' : '[Cs]F',
"[Cu]I.[Cs]F" : '[Cs]F',
'CC(=O)O[Cu]OC(=O)C.[Cs]F' : '[Cs]F'
}
def find_Lewis_Base(df):
""" Same idea as find_Lewis_Acid but not available yet
Parameters:
df (dataframe): dataframe obtain from the NiCOLit csv file
Returns:
df (dataframe): with a new "Lewis Acid" column
"""
Base = []
for i, row in df.iterrows():
base = row["effective_reagents"]
if isNaN(base): # if there is no base/additives : the base will be the solvent.
try:
# if the solvent is not a mix of solvents:
base = dict_solvent_to_smiles[row["solvent"]]
except:
# in cas of a solvent mix : a choice is made.
if row["solvent"] == 'tAmOMe + Et2O':
base = 'CCOCC'
elif row["solvent"] == '(EtO)2CH2 + Et2O':
base = 'CCOCC'
elif row["solvent"] == 'THF/DMA' or row["solvent"] == 'THF + DMA':
base = dict_solvants['THF']
else:
print(row["solvent"])
Base.append(base)
# choose good Lewis Base when more than one candidate is present.
new_Base = []
for base in list(Base) :
bases = base.split('.')
if len(bases) == 1:
new_Base.append(base)
else:
if '-' not in base:
print(base)
else: #when there are more than one we prioretize the positively charged one.
new_bases = []
for smi in bases:
if '-' in smi:
new_bases.append(smi)
if len(np.unique(new_bases)) == 1:
new_Base.append(new_bases[0])
else:
new_base = new_bases[0]
# needs a ranking between bases.
for smi in new_bases:
if smi == '[F-]':
new_base = smi
new_Base.append(new_base)
#print("You have to make a choice between ", np.unique(new_bases))
df["Lewis Base"] = new_Base
# case where df["Lewis Base"] is the same as df["Lewis Acid"]
for i, row in df.iterrows():
if row["Lewis Acid"] == row["Lewis Base"]:
print(row["Lewis Base"])
# numeroter les acides et les bases par atomes :
# comparer les acides et les bases à nouveau.
# quand les bases ne possèdent pas de numerotation propre : mettre le solvant à la place.
return df
# Maps an additive to its category
def additives_mapping(add):
add = str(add)
add = add.replace('[Sc+++]', '[Sc+3]').replace('[Ti++++]', '[Ti+4]').replace('[Al+++]', '[Al+3]').replace('[Fe+++]', '[Fe+3]').replace('[HO-]', '[O-]')
if Chem.MolFromSmiles(add):
return Chem.CanonSmiles(add)
elif add == 'NoLewisAcid':
return add
else:
return 'nan'
# Maps an additive to its category for the entire list
def smiles_additifs(liste_additif) :
base_additif = []
for i in liste_additif :
base_additif.append(additives_mapping(i))
return base_additif
# auxiliary function
def isNaN(num):
return num != num
dict_solvants = {'(EtO)2CH2': 'CCOCOCC',
'(EtO)2CH2 + Et2O': 'CCOCOCC.CCOCC',
'CH3CN': 'CC#N',
'CPME': 'COC1CCCC1',
'DCE': '[Cl]CC[Cl]',
'DMA': 'CC(=O)N(C)C',
'DME': 'COCCOC',
'DMF': 'C(=O)N(C)C',
'DMSO': 'CS(=O)C',
'Et2O': 'CCOCC',
'EtOH': 'CCO',
'MeOH': 'CO',
'NMP': 'CN1CCCC1(=O)',
'THF': 'C1OCCC1',
'THF + DMA': 'C1OCCC1.CC(=O)N(C)C',
'THF/DMA': 'C1OCCC1.CC(=O)N(C)C',
'benzene': 'c1ccccc1',
'dioxane': 'C1COCCO1',
'dioxane - H2O': 'C1COCCO1.O',
'hexane': 'CCCCCC',
'iPr2O': 'CC(C)OC(C)C',
'iPrOH': 'OC(C)C',
'm-xylene': 'Cc1cc(C)ccc1',
'nBu2O': 'CCCCOCCCC',
'o-xylene': 'Cc1c(C)cccc1',
'p-xylene': 'Cc1ccc(C)cc1',
'sBuOH': 'CC(O)CC',
't-amyl alcohol': 'CC(O)(C)CC',
'tAmOMe': 'CCC(C)(C)OC',
'tAmOMe + Et2O': 'CCC(C)(C)OC.CCOCC',
'tBuOH': 'C(C)(C)(C)O',
'tBuOH + H2O': 'C(C)(C)(C)O.O',
'tBuOMe': 'C(C)(C)(C)OC',
'toluene': 'c1ccccc1C',
'toluene - H2O': 'c1ccccc1C.O'}
dict_ligand = {
'nan': 'NoLigand',
#Phosphines
'PCy3': 'C1CCC(P(C2CCCCC2)C2CCCCC2)CC1',
'PCy2(1,2-biPh)': 'c1ccc(-c2ccccc2P(C2CCCCC2)C2CCCCC2)cc1',
'PCy2(1,2-biPhN)': 'CN(C)c1ccccc1-c1ccccc1P(C1CCCCC1)C1CCCCC1',
'PPhCy2': 'c1ccc(P(C2CCCCC2)C2CCCCC2)cc1',
'PhPCy2': 'c1ccc(P(C2CCCCC2)C2CCCCC2)cc1',
'CC(O)c1ccccc1P(c2ccccc2)c3ccccc3': 'CC(O)c1ccccc1P(c1ccccc1)c1ccccc1',
't-BuPCy2': 'CC(C)(C)P(C1CCCCC1)C1CCCCC1',
'PCp3': 'C1=CC(P(C2C=CC=C2)C2C=CC=C2)C=C1',
'PPh3': 'c1ccc(P(c2ccccc2)c2ccccc2)cc1',
'P(o-tolyl)3': 'Cc1ccccc1P(c1ccccc1C)c1ccccc1C',
'P(nBu)3': 'CCCCP(CCCC)CCCC',
'P(tBu)3': 'CC(C)(C)P(C(C)(C)C)C(C)(C)C',
'P(OMe)3': 'COP(OC)OC',
'P(CH2Ph)3': 'c1ccc(CP(Cc2ccccc2)Cc2ccccc2)cc1',
'P(p-OMePh)3': 'COc1ccc(P(c2ccc(OC)cc2)c2ccc(OC)cc2)cc1',
'PMe3': 'CP(C)C',
'PEt3': 'CCP(CC)CC',
'PiPr3': 'CC(C)P(C(C)C)C(C)C',
'PiBu3': 'CC(C)CP(CC(C)C)CC(C)C',
'PBu3': 'CCCCP(CCCC)CCCC',
'PMetBu': 'CP(C(C)(C)C)C(C)(C)C',
'JohnPhos': 'CC(C)(C)P(c1ccccc1-c1ccccc1)C(C)(C)C',
'CyJohnPhos': 'c1ccc(-c2ccccc2P(C2CCCCC2)C2CCCCC2)cc1',
'CyDPEphos': 'c1cc2c(c(P(C3CCCCC3)C3CCCCC3)c1)Oc1c(cccc1P(C1CCCCC1)C1CCCCC1)C2',
'Xantphos': 'CC1(C)c2cccc(P(c3ccccc3)c3ccccc3)c2Oc2c(P(c3ccccc3)c3ccccc3)cccc21',
'CyXantphos': 'CC1(C)c2cccc(P(C3CCCCC3)C3CCCCC3)c2Oc2c(P(C3CCCCC3)C3CCCCC3)cccc21',
'XPhos': 'CC(C)c1cc(C(C)C)c(-c2ccccc2P(C2CCCCC2)C2CCCCC2)c(C(C)C)c1',
'RuPhos': 'CC(C)Oc1cccc(OC(C)C)c1-c1ccccc1P(C1CCCCC1)C1CCCCC1',
'SPhos': 'COc1cccc(OC)c1-c1ccccc1P(C1CCCCC1)C1CCCCC1',
'Tris(2-methoxyphenyl)phosphine': 'COc1ccccc1P(c1ccccc1OC)c1ccccc1OC',
'Tris(4-trifluoromethylphenyl) phosphine': 'FC(F)(F)c1ccc(P(c2ccc(C(F)(F)F)cc2)c2ccc(C(F)(F)F)cc2)cc1',
'PMetBu2': 'CP(C(C)(C)C)C(C)(C)C',
'PPh2Cy': 'c1ccc(P(c2ccccc2)C2CCCCC2)cc1',
'P(p-tolyl)3': 'Cc1ccc(P(c2ccc(C)cc2)c2ccc(C)cc2)cc1',
'P(C6F5)3': 'Fc1c(F)c(F)c(P(c2c(F)c(F)c(F)c(F)c2F)c2c(F)c(F)c(F)c(F)c2F)c(F)c1F',
'P(NMe2)3': 'CN(C)P(N(C)C)N(C)C',
'C1CCCC1P(C2CCCC2)c3cc(c4c(C(C)C)cc(C(C)C)cc4(C(C)C))cc(c4c(C(C)C)cc(C(C)C)cc4(C(C)C))c3': 'CC(C)c1cc(C(C)C)c(-c2cc(-c3c(C(C)C)cc(C(C)C)cc3C(C)C)cc(P(C3CCCC3)C3CCCC3)c2)c(C(C)C)c1',
#Diphosphines
'c6ccc5c(P(C1CCCCC1)C2CCCCC2)c(P(C3CCCCC3)C4CCCCC4)sc5c6': 'c1ccc2c(P(C3CCCCC3)C3CCCCC3)c(P(C3CCCCC3)C3CCCCC3)sc2c1',
'c5cc(P(C1CCCCC1)C2CCCCC2)c(P(C3CCCCC3)C4CCCCC4)s5': 'c1cc(P(C2CCCCC2)C2CCCCC2)c(P(C2CCCCC2)C2CCCCC2)s1',
'c7ccc(c6cc(c1ccccc1)n(c2ccccc2NC(c3ccccc3)P(c4ccccc4)c5ccccc5)n6)cc7': 'c1ccc(-c2cc(-c3ccccc3)n(-c3ccccc3NC(c3ccccc3)P(c3ccccc3)c3ccccc3)n2)cc1',
'CC(C)P(C(C)C)C(Nc1ccccc1n3nc(c2ccccc2)cc3c4ccccc4)c5ccccc5': 'CC(C)P(C(C)C)C(Nc1ccccc1-n1nc(-c2ccccc2)cc1-c1ccccc1)c1ccccc1',
'c7ccc(c6cc(c1ccccc1)n(c2ccccc2NC(c3ccccc3)P(C4CCCCC4)C5CCCCC5)n6)cc7': 'c1ccc(-c2cc(-c3ccccc3)n(-c3ccccc3NC(c3ccccc3)P(C3CCCCC3)C3CCCCC3)n2)cc1',
'C3CCC(P(C1CCCCC1)C2CCCCC2)CC3': 'C1CCC(P(C2CCCCC2)C2CCCCC2)CC1',
'CC(C)c5cc(C(C)C)c(c4cc(c1c(C(C)C)cc(C(C)C)cc1C(C)C)cc(P(C2CCCC2)C3CCCC3)c4)c(C(C)C)c5': 'CC(C)c1cc(C(C)C)c(-c2cc(-c3c(C(C)C)cc(C(C)C)cc3C(C)C)cc(P(C3CCCC3)C3CCCC3)c2)c(C(C)C)c1',
'CC(C)c5cc(C(C)C)c(c4ccc(c1c(C(C)C)cc(C(C)C)cc1C(C)C)c(P(C2CCCC2)C3CCCC3)c4)c(C(C)C)c5': 'CC(C)c1cc(C(C)C)c(-c2ccc(-c3c(C(C)C)cc(C(C)C)cc3C(C)C)c(P(C3CCCC3)C3CCCC3)c2)c(C(C)C)c1',
'dppe': 'c1ccc(P(CCP(c2ccccc2)c2ccccc2)c2ccccc2)cc1',
'depe': 'CCP(CC)CCP(CC)CC',
'dppp': 'c1ccc(P(CCCP(c2ccccc2)c2ccccc2)c2ccccc2)cc1',
'dppb': 'c1ccc(P(CCCCP(c2ccccc2)c2ccccc2)c2ccccc2)cc1',
'dppf': 'c1ccc(P(c2ccccc2)C23C4C5C6C2[Fe]56432789C3C2C7C8(P(c2ccccc2)c2ccccc2)C39)cc1',
'dippf': 'CC(C)P(C(C)C)C12C3C4C5C1[Fe]45321678C2C1C6C7(P(C(C)C)C(C)C)C28',
'dppf-Ipr': 'CCCP(CCC)C12C3C4C5C1[Fe]45321678C2C1C6C7(P(CCC)CCC)C28',
'dppf-tBu': 'CC(C)(C)P(C(C)(C)C)C12C3C4C5C1[Fe]45321678C2C1C6C7(P(C(C)(C)C)C(C)(C)C)C28',
'dppf-Cy': 'C1CCC(CC1)P(C12C3[Fe]4567892(C1C5C34)C1C6C7C9(C81)P(C1CCCCC1)C1CCCCC1)C1CCCCC1',
'dcypf': 'C1CCC(CC1)P(C12C3[Fe]4567892(C1C5C34)C1C6C7C9(C81)P(C1CCCCC1)C1CCCCC1)C1CCCCC1',
'dcype': 'C1CCC(P(CCP(C2CCCCC2)C2CCCCC2)C2CCCCC2)CC1',
'dcypbz': 'c1ccc(P(C2CCCCC2)C2CCCCC2)c(P(C2CCCCC2)C2CCCCC2)c1',
'dcypt': 'c1scc(P(C2CCCCC2)C2CCCCC2)c1P(C1CCCCC1)C1CCCCC1',
'DCYPT': 'c1scc(P(C2CCCCC2)C2CCCCC2)c1P(C1CCCCC1)C1CCCCC1',
'dcypb': 'C1CCC(P(CCCCP(C2CCCCC2)C2CCCCC2)C2CCCCC2)CC1',
'dmpe': 'CP(C)CCP(C)C',
'rac-BINAP': 'c1ccc(P(c2ccccc2)c2ccc3ccccc3c2-c2c(P(c3ccccc3)c3ccccc3)ccc3ccccc23)cc1',
'L1': 'C1CC(P(CCP(C2CCC2)C2CCC2)C2CCC2)C1',
'L2': 'C1CCC(P(CCP(C2CCCC2)C2CCCC2)C2CCCC2)C1',
'L3': 'C1CCCC(P(CCP(C2CCCCCC2)C2CCCCCC2)C2CCCCCC2)CC1',
'L4': 'CC(C)P(CCP(C(C)C)C(C)C)C(C)C',
'L5': 'CC(C)(C)P(CCP(C(C)(C)C)C(C)(C)C)C(C)(C)C',
'L6': 'c1ccc(P(CCP(C2CCCCC2)C2CCCCC2)c2ccccc2)cc1',
# NHC
'ItBu': 'CC(C)(C)N1[C]N(C(C)(C)C)C=C1',
'ICy': '[C]1N(C2CCCCC2)C=CN1C1CCCCC1',
'IPr': 'CC(C)c1cccc(C(C)C)c1N1[C]N(c2c(C(C)C)cccc2C(C)C)C=C1',
'IMes': 'Cc1cc(C)c(N2[C]N(c3c(C)cc(C)cc3C)C=C2)c(C)c1',
'IAd': '[C]1N(C23CC4CC(CC(C4)C2)C3)C=CN1C12CC3CC(CC(C3)C1)C2',
'I(2-Ad)': '[C]1N(C2C3CC4CC(C3)CC2C4)C=CN1C1C2CC3CC(C2)CC1C3',
'SIPr': 'CC(C)c1cccc(C(C)C)c1N1[C]N(c2c(C(C)C)cccc2C(C)C)CC1',
'SIMes': 'Cc1cc(C)c(N2[C]N(c3c(C)cc(C)cc3C)CC2)c(C)c1',
'SItBu': 'CC(C)(C)N1[C]N(C(C)(C)C)CC1',
'CDC': 'CC(C)N1c2ccccc2N(C)C1[C]C1N(C)c2ccccc2N1C(C)C',
'C1-CDC': 'CC(C)N1c2ccccc2N(C(C)C)C1[C]C1N(C)c2ccccc2N1C',
'Me2IPr': 'CC1=C(C)N(c2c(C(C)C)cccc2C(C)C)[C]N1c1c(C(C)C)cccc1C(C)C',
'CCCCN5CN(c3cccc(N2CN(CCCC)c1ccccc12)n3)c4ccccc45': 'CCCCN1[C]N(c2cccc(N3[C]N(CCCC)c4ccccc43)n2)c2ccccc21',
'c2c[n+](C1CCCCC1)cn2C3CCCCC3.[Cl-]': '[Cl-].c1c[n+](C2CCCCC2)cn1C1CCCCC1',
'IIpr-HCl': 'CC(C)n1cc[n+](C(C)C)c1.[Cl-]',
'IIPr': 'CC(C)N1[C]N(C(C)C)C=C1',
'(IMe)2-2HBr': 'Cn1cc[n+](C[n+]2ccn(C)c2)c1.[Br-].[Br-]',
'(IMe)2': 'CN1[C]N(CN2[C]N(C)C=C2)C=C1',
'IPrIMeIIPr-2HBr': 'CC(C)n1cc[n+](C[n+]2ccn(C(C)C)c2)c1.[Br-].[Br-]',
'IPrIMeIIPr': 'CC(C)N1[C]N(CN2[C]N(C(C)C)C=C2)C=C1',
'ItBuIMeIItBu-2HBr': 'CC(C)(C)n1cc[n+](C[n+]2ccn(C(C)(C)C)c2)c1.[Br-].[Br-]',
'ItBuIMeIItBu': 'CC(C)(C)N1[C]N(CN2[C]N(C(C)(C)C)C=C2)C=C1',
'ICyIMeIICy-2HBr': '[Br-].[Br-].c1c[n+](C[n+]2ccn(C3CCCCC3)c2)cn1C1CCCCC1',
'ICyIMeIICy': '[C]1N(CN2[C]N(C3CCCCC3)C=C2)C=CN1C1CCCCC1',
'CC(C)(C)n2cc[n+](n1cc[n+](C(C)(C)C)c1)c2.[Br-].[Br-]': 'CC(C)(C)n1cc[n+](-n2cc[n+](C(C)(C)C)c2)c1.[Br-].[Br-]',
# phen/bipy
'phen': 'c1cnc2c(c1)ccc1cccnc12',
'bipy': 'c1ccc(-c2ccccn2)nc1',
# NHC + Phosphine
'IPr+PPh3': 'CC(C)c1cccc(C(C)C)c1N1C=CN(c2c(C(C)C)cccc2C(C)C)C1.c1ccc(P(c2ccccc2)c2ccccc2)cc1',
'PPh3+ItBu': 'CC(C)(C)N1C=CN(C(C)(C)C)C1.c1ccc(P(c2ccccc2)c2ccccc2)cc1',
'PPh3+IPr': 'CC(C)c1cccc(C(C)C)c1N1C=CN(c2c(C(C)C)cccc2C(C)C)C1.c1ccc(P(c2ccccc2)c2ccccc2)cc1',
'PCy3+ItBu': 'C1CCC(P(C2CCCCC2)C2CCCCC2)CC1.CC(C)(C)N1C=CN(C(C)(C)C)C1',
'PCy3+IPr': 'C1CCC(P(C2CCCCC2)C2CCCCC2)CC1.CC(C)c1cccc(C(C)C)c1N1C=CN(c2c(C(C)C)cccc2C(C)C)C1',
'dppf + PCy3': 'C1CCC(P(C2CCCCC2)C2CCCCC2)CC1.[CH]1[CH][CH][C](P(c2ccccc2)c2ccccc2)[CH]1.[CH]1[CH][CH][C](P(c2ccccc2)c2ccccc2)[CH]1.[Fe]',
# others
'COD': 'C1=CCCC=CCC1',
'acac': 'CC(=O)/C=C(/C)[O-]',
}
Lewis_Acids_to_drop = ['O=C(O[Cs])O[Cs]', 'Cl[Cs]',
'O=S(=O)(O[Sc](OS(=O)(=O)C(F)(F)F)OS(=O)(=O)C(F)(F)F)C(F)(F)F',
'F[Cs]', 'O=P(O[Na])(O[Na])O[Na]', '[Rb+]',
'CC(C)(C)C(=O)O[Cs]', '[Cs+]', 'CC(=O)O[Cu]OC(C)=O', 'F[Sr]F']
| 2.5625
| 3
|
Policy Refinement Using Bayesian Optimization/BipadelRandom.py
|
britig/policy-refinement-bo
| 1
|
12779063
|
"""
Code for collecting failure trajectories using Bayesian Optimization
Project : Policy correction using Bayesian Optimization
Description : The file contains functions for computing failure trajectories given RL policy and
safety specifications
"""
import numpy as np
import gym
import GPyOpt
from numpy.random import seed
from eval_policy import display
import gym
from network import FeedForwardActorNN
import torch
import pickle
from numpy import arange
from numpy.random import rand
'''
Bayesian Optimization module for uncovering failure trajectories
Safety Requirement
# Requirement 1: The walker should not fall down in any trajectory
'''
#=============================================Global Variables =================================#
policy = None
env = None
traj_spec_dic = {}
traj_count = 0
index_count = 0
'''
The function called from within the bayesian optimization module
parameters : bounds containing the sampled variables of the state vector
return : calls specification function and computes and returns the minimum value
'''
def sample_trajectory(sample_1,sample_2,sample_3):
global policy, env, traj_spec_dic,traj_count, index_count
selected_seed = env.seed(None)
x1 = sample_1
x2 = sample_2
x3 = sample_3
env.reset()
env.env.state[0] = x1
env.env.state[2] = x2
env.env.state[3] = x3
obs = torch.Tensor(env.env.state)
#print(f'env.env.state =========== {env.env.state}')
iters= 0
ep_ret = 0
ep_ret, traj, iter = display(obs,policy,env,False)
additional_data = {'reward':ep_ret}
#Create trajectory to be sent to safety specification
traj = (traj, additional_data)
#print(f'trajectory ========== {traj}')
specification_evaluation = safet_spec_2(traj)
index_count = index_count+1
#Store the set of trajectories with negative evaluation
if specification_evaluation<0:
traj_spec_dic[traj_count] = (traj[0],specification_evaluation,selected_seed,(x1,x2,x3))
traj_count = traj_count + 1
print(f'specification_evaluation ========== {specification_evaluation}')
return specification_evaluation
def run_Random():
x1_max = 2*np.pi
x1_min = 0
x2_max = 1
x2_min = -1
x3_max = 1
x3_min = -1
# generate a random sample from the domain
sample_1 = x1_min + rand(1000) * (x1_max - x1_min)
sample_2 = x2_min + rand(1000) * (x2_max - x2_min)
sample_3 = x3_min + rand(1000) * (x3_max - x3_min)
print(f'sample length ========== {len(sample_1)}')
for i in range(len(sample_1)):
val = sample_trajectory(sample_1[i],sample_2[i],sample_3[i])
print(f'sample1 =========== {sample_1[i]} ======== sample2 ==== {sample_2[i]} ==== sample3 ===== {sample_3[i]}')
'''sample = list()
step = 0.7
for sample_1 in arange(x1_min, x1_max+step, step):
for sample_2 in arange(x2_min, x2_max+step, step):
for sample_3 in arange(x3_min, x3_max+step, step):
sample.append([sample_1,sample_2,sample_3])
print(f'sample length ========== {len(sample)}')
for i in range(len(sample)):
val = sample_trajectory(sample[i][0],sample[i][1],sample[i][2])
print(f'sample1 =========== {sample[i][0]} ======== sample2 ==== {sample[i][1]} ==== sample3 ===== {sample[i][2]}')'''
# 1. Find the initial condition such that the pendulum stabilizes to 0
def safet_spec_1(traj, gamma=0.25):
traj = traj[0]
cos_thetas = np.array(traj).T[0]
theta_dots = np.array(traj).T[2]
stab_vals = 0
for ct, td in zip(cos_thetas, theta_dots):
stab_vals = np.abs(np.arccos(ct))**2 + np.abs(td)**2 + stab_vals*gamma
return -stab_vals
# 1. Find the initial condition such that the reward is less than 50
def safet_spec_2(traj):
traj = traj[1]
reward = traj['reward']
#print(f'reward ========== {reward}')
return -(50-reward)
if __name__ == '__main__':
env = gym.make('BipedalWalker-v3')
seed = 0
env.seed(seed)
actor_model = 'Policies/ppo_actor_updatedBipedalWalker-v3.pth'
# Extract out dimensions of observation and action spaces
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
# Build our policy the same way we build our actor model in PPO
policy = FeedForwardActorNN(obs_dim, act_dim, False)
# Load in the actor model saved by the PPO algorithm
policy.load_state_dict(torch.load(actor_model))
run_Random()
print(f'Length trajectory ========== {len(traj_spec_dic)}')
with open('failure_trajectory_bipedal.data', 'wb') as filehandle1:
# store the observation data as binary data stream
pickle.dump(traj_spec_dic, filehandle1)
| 2.84375
| 3
|
affiliate/req/mobidea.py
|
gods-view/AdclickIO
| 0
|
12779064
|
<gh_stars>0
#!/usr/bin/env python
# encoding: utf-8
"""
@author: amigo
@contact: <EMAIL>
@phone: 15618318407
@software: PyCharm
@file: mobidea.py
@time: 2017/4/11 下午2:58
"""
import urllib
import requests
from affiliate.req.base_req import BaseReq
class MobideaReq(BaseReq):
def __init__(self, url, username, password):
self.url = url
self.username = username
self.password = password
def get_all_offer(self):
"""
:return: flag,msg,result_string
"""
params = {'login': self.username,
'password': <PASSWORD>,
'currency': 'USD',
'tags': 'status,category,payouts,url',
}
url = self.url + '?%s' % urllib.parse.urlencode(params, safe=',')
response = requests.request("GET", url)
print (response.text)
self.log(url, '', response.text)
# print (response.status_code, response.text)
if response.status_code == 401:
return False, 'login or password error', ''
elif response.status_code != 200:
return False, 'unknown error', ''
return True, 'success', response.text
| 2.375
| 2
|
CGPA calculator.py
|
jasonlmfong/UofT-Grade-Analytics
| 0
|
12779065
|
<filename>CGPA calculator.py
from openpyxl import load_workbook
from grade_to_gpa import find_gpa
from Predictor import predict
def weighted_average_grade(workbook):
"""outputs weighted average of grades"""
total_weight = 0
total_grade = 0
for i in range(2, workbook.max_row+1):
if workbook[f"{'D'}{i}"].value is False:
if workbook[f"{'C'}{i}"].value == "IPR":
total_grade += workbook[f"{'B'}{i}"].value * predict(workbook[f"{'A'}{i}"].value)
else:
total_grade += workbook[f"{'B'}{i}"].value * workbook[f"{'C'}{i}"].value
total_weight += workbook[f"{'B'}{i}"].value
print("your weighted average grade is")
print(total_grade/total_weight)
def unweighted_average_grade(workbook):
"""outputs unweighted average of grades"""
count = 0
total_grade = 0
for i in range(2, workbook.max_row+1):
if workbook[f"{'D'}{i}"].value is False:
if workbook[f"{'C'}{i}"].value == "IPR":
total_grade += predict(workbook[f"{'A'}{i}"].value)
else:
total_grade += workbook[f"{'C'}{i}"].value
count += 1
print("your unweighted average grade is")
print(total_grade/count)
def weighted_average_gpa(workbook):
"""outputs weighted average of gpa"""
total_weight = 0
total_gpa = 0
for i in range(2, workbook.max_row+1):
if workbook[f"{'D'}{i}"].value is False:
if workbook[f"{'C'}{i}"].value == "IPR":
total_gpa += workbook[f"{'B'}{i}"].value * find_gpa(predict(workbook[f"{'A'}{i}"].value))
else:
total_gpa += workbook[f"{'B'}{i}"].value * find_gpa(workbook[f"{'C'}{i}"].value)
total_weight += workbook[f"{'B'}{i}"].value
print("your weighted average gpa is")
print(total_gpa/total_weight)
def unweighted_average_gpa(workbook):
"""outputs unweighted average of gpa"""
count = 0
total_gpa = 0
for i in range(2, workbook.max_row+1):
if workbook[f"{'D'}{i}"].value is False:
if workbook[f"{'C'}{i}"].value == "IPR":
total_gpa += find_gpa(predict(workbook[f"{'A'}{i}"].value))
else:
total_gpa += find_gpa(workbook[f"{'C'}{i}"].value)
count += 1
print("your unweighted average gpa is")
print(total_gpa/count)
if __name__ == "__main__":
wb = load_workbook(filename = 'Transcript.xlsx')
sheet = wb['Sheet1']
weighted_average_grade(sheet)
unweighted_average_grade(sheet)
weighted_average_gpa(sheet)
unweighted_average_gpa(sheet)
| 3.5625
| 4
|
74-search-a-2d-matrix/74-search-a-2d-matrix.py
|
felirox/DS-Algos-Python
| 0
|
12779066
|
<filename>74-search-a-2d-matrix/74-search-a-2d-matrix.py
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
for row in matrix:
if row[-1]>=target:
if target in row:
return True
return False
| 3.484375
| 3
|
tests/test_pybunpro.py
|
patrickayoup/pybunpro
| 1
|
12779067
|
<gh_stars>1-10
import pytest
from click.testing import CliRunner
from pybunpro.__main__ import cli
class TestPyBunpro(object):
@pytest.fixture
def runner(self):
return CliRunner()
def test_study_queue(self, requests_mock, api_key, runner,
mock_study_queue_response,
user_information,
study_queue):
requests_mock.get(f'https://bunpro.jp/api/user/{api_key}/study_queue',
json=mock_study_queue_response)
result = runner.invoke(cli, ['--api-key', api_key,
'study-queue'])
assert result.exit_code == 0
assert str(user_information) in result.output
assert str(study_queue) in result.output
def test_study_queue_error(self, requests_mock, api_key, runner,
error_response):
requests_mock.get(f'https://bunpro.jp/api/user/{api_key}/study_queue',
json=error_response, status_code=400)
result = runner.invoke(cli, ['--api-key', api_key,
'study-queue'])
assert result.exit_code == 1
assert 'User does not exist' in result.output
def test_recent_items(self, requests_mock, api_key, runner,
mock_recent_items_response,
user_information,
grammar_point):
requests_mock.get(f'https://bunpro.jp/api/user/{api_key}/recent_items',
json=mock_recent_items_response)
result = runner.invoke(cli, ['--api-key', api_key,
'recent-items'])
assert result.exit_code == 0
assert str(user_information) in result.output
assert str([grammar_point]) in result.output
def test_recent_items_error(self, requests_mock, api_key, runner,
error_response):
requests_mock.get(f'https://bunpro.jp/api/user/{api_key}/recent_items',
json=error_response, status_code=400)
result = runner.invoke(cli, ['--api-key', api_key,
'recent-items'])
assert result.exit_code == 1
assert 'User does not exist' in result.output
def test_debug_mode(self, requests_mock, api_key, runner,
mock_recent_items_response,
user_information,
grammar_point,
caplog):
requests_mock.get(f'https://bunpro.jp/api/user/{api_key}/recent_items',
json=mock_recent_items_response)
result = runner.invoke(cli, ['--api-key', api_key,
'--debug',
'recent-items'])
assert result.exit_code == 0
assert 'Debug Mode Enabled' in caplog.text
| 2.28125
| 2
|
lfs.py
|
g-k/github-org-scripts
| 0
|
12779068
|
<reponame>g-k/github-org-scripts
#!/usr/bin/env python
from __future__ import print_function
import json
import os
import re
import time
from github_selenium import GitHub2FA, WebDriverException
URL = "https://github.com/organizations/mozilla/settings/billing"
GH_LOGIN = os.getenv('GH_LOGIN', "org_owner_login")
GH_PASSWORD = os.getenv('GH_PASSWORD', 'password')
class LFS_Usage(GitHub2FA):
# no init needed
def get_values(self, selector):
# get the "line" and parse it
e = self.get_element(selector)
if e:
text = e.text
match = re.match(r'''\D+(?P<used>\S+)\D+(?P<purchased>\S+)''', text)
if match:
d = match.groupdict()
used = float(d['used'].replace(',', ''))
purchased = float(d['purchased'].replace(',', ''))
else:
print("no element for '{}'".format(selector))
used = purchased = None
return used, purchased
def get_usage(self):
r = {}
r['bw_used'], r['bw_purchased'] = self.get_values('div.mt-2:nth-child(4)')
r['sp_used'], r['sp_purchased'] = self.get_values('div.mt-2:nth-child(5)')
return r
if __name__ == "__main__":
# if all goes well, we quit. If not user is dropped into pdb while
# browser is still alive for introspection
# TODO put behind --debug option
print("Obtain current LFS billing info")
print("Attempting login as '{}', please enter OTP when asked".format(GH_LOGIN))
print(" (if wrong, set GH_LOGIN & GH_PASSWORD in environtment properly)")
quit = True
try:
# hack to allow script reload in iPython without destroyind
# exisiting instance
driver
except NameError:
driver = None
if not driver:
try:
token = input("token please: ")
driver = LFS_Usage()
driver.login(GH_LOGIN, GH_PASSWORD, URL, 'Billing', token)
results = driver.get_usage()
results['time'] = time.strftime('%Y-%m-%d %H:%M')
print(json.dumps(results))
except WebDriverException:
quit = False
print("Deep error - did browser crash?")
except ValueError as e:
quit = False
print("Navigation issue: {}".format(e.args[0]))
if quit:
driver.quit()
else:
import pdb; pdb.set_trace()
| 2.640625
| 3
|
data_io_fns/export_data/write_matrix.py
|
chrisjdavie/ws_cross_project
| 0
|
12779069
|
<filename>data_io_fns/export_data/write_matrix.py
'''
writes various file strutures.
Created on 11 Oct 2012
@author: chris
'''
import h5py
import numpy as np
import csv
def write_zmp_matrix_hdf(fname,data,x,y,z,t,dname='gas density'):
hdf_file = __open_hdf__(fname)
__write_data__(hdf_file,dname,data)
__write_data__(hdf_file,'i coord',x)
__write_data__(hdf_file,'j coord',y)
__write_data__(hdf_file,'k coord',z)
__write_data__(hdf_file,' time',[t])
hdf_file.close()
def __open_hdf__(fname):
return h5py.File(fname,'w')
def __write_data__(hdf_file,dname,data):
lower = data
for i in range(len(np.shape(data))):
lower = lower[i]
dtype = type(lower)
ds = hdf_file.create_dataset(dname, np.shape(data), dtype)
ds[...] = data
def write_matrix_csv(fname,data):
f = open(fname,'wb')
csv_file = csv.writer(f)
for d in data: csv_file.writerow(d)
f.close()
def write_vector_csv(fname,data):
f = open(fname,'wb')
csv_file = csv.writer(f)
for d in data: csv_file.writerow([d])
f.close()
def write_scalar_csv(fname,data):
f = open(fname,'wb')
csv_file = csv.writer(f)
csv_file.writerow([data])
f.close()
| 2.6875
| 3
|
experiments/jz/utils/loading_script_utils/load_dataset.py
|
chkla/metadata
| 13
|
12779070
|
<gh_stars>10-100
import logging
import sys
import hydra
from datasets import config, load_dataset
from hydra.core.config_store import ConfigStore
from bsmetadata.input_pipeline import DataConfig
from bsmetadata.train import show_help
logger = logging.getLogger(__name__)
cs = ConfigStore.instance()
cs.store(name="data_config", node=DataConfig)
@hydra.main(config_name="data_config")
def main(args: DataConfig) -> None:
data_files = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
if not data_files:
data_files = None
logger.info(config.HF_DATASETS_CACHE)
if args.dataset_name is not None:
logger.info(
"Downloading and loading a dataset from the hub"
f"{args.dataset_name}, {args.dataset_config_name}, data_files={data_files}, cache_dir={args.cache_dir},"
)
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
args.dataset_name,
args.dataset_config_name,
data_files=data_files,
cache_dir=args.cache_dir,
keep_in_memory=False,
download_mode="force_redownload",
)
if "validation" not in raw_datasets.keys():
logger.info("validation not in raw_datasets.keys()")
raw_datasets["validation"] = load_dataset(
args.dataset_name,
args.dataset_config_name,
split=f"train[:{args.validation_split_percentage}%]",
cache_dir=args.cache_dir,
)
raw_datasets["train"] = load_dataset(
args.dataset_name,
args.dataset_config_name,
split=f"train[{args.validation_split_percentage}%:]",
cache_dir=args.cache_dir,
)
else:
extension = args.train_file.split(".")[-1] if not args.extension else args.extension
if extension == "txt":
raise ValueError(
"You have entered a text file for the train data, but this type of file cannot contain metadata "
"columns. Wouldn't you rather have a file in json/jsonl or pandas format?"
)
if extension == "jsonl":
extension = "json"
raw_datasets = load_dataset(
extension, data_files=data_files, cache_dir=args.cache_dir, download_mode="force_redownload"
)
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
extension,
data_files=data_files,
split=f"train[:{args.validation_split_percentage}%]",
cache_dir=args.cache_dir,
)
raw_datasets["train"] = load_dataset(
extension,
data_files=data_files,
split=f"train[{args.validation_split_percentage}%:]",
cache_dir=args.cache_dir,
)
train_dataset = raw_datasets["train"]
val_dataset = raw_datasets["validation"]
logger.info(f" Num train examples = {len(train_dataset)}")
logger.info(f" Num validation examples = {len(val_dataset)}")
logger.info(" Train sample:")
logger.info(f" Train sample n°{0} text:\n{train_dataset[0]['text']}")
logger.info(f" Train sample n°{0} metadata:\n{train_dataset[0]['metadata']}")
if __name__ == "__main__":
if "--help" in sys.argv or "-h" in sys.argv:
show_help()
sys.exit()
main()
| 2.078125
| 2
|
core/analyzers/postgresqlanalyzer.py
|
cmu-db/cmdbac
| 31
|
12779071
|
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import logging
import re
from baseanalyzer import BaseAnalyzer
## =====================================================================
## LOGGING CONFIGURATION
## =====================================================================
LOG = logging.getLogger()
## =====================================================================
## POSTGRESQL ANALYZER
## =====================================================================
class PostgreSQLAnalyzer(BaseAnalyzer):
def __init__(self, deployer):
BaseAnalyzer.__init__(self, deployer)
def analyze_queries(self, queries):
self.queries_stats['num_transactions'] = self.count_transaction(queries) + self.queries_stats.get('num_transactions', 0)
try:
conn = self.deployer.get_database_connection()
conn.set_isolation_level(0)
cur = conn.cursor()
for query in queries:
try:
if self.is_valid_for_explain(query['raw']):
explain_query = 'EXPLAIN ANALYZE {};'.format(query['raw'])
# print explain_query
cur.execute(explain_query)
rows = cur.fetchall()
output = '\n'
for row in rows:
output += row[0] + '\n'
query['explain'] = output
except Exception, e:
pass
# LOG.exception(e)
conn.set_isolation_level(1)
cur.close()
conn.close()
except Exception, e:
LOG.exception(e)
def analyze_database(self):
try:
conn = self.deployer.get_database_connection()
cur = conn.cursor()
database = self.deployer.get_database_name()
# the number of tables
cur.execute("SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'public';")
self.database_stats['num_tables'] = int(cur.fetchone()[0])
# the number of indexes
cur.execute("SELECT COUNT(*) FROM pg_stat_all_indexes WHERE schemaname = 'public';")
self.database_stats['num_indexes'] = int(cur.fetchone()[0])
# the number of constraints
cur.execute("SELECT COUNT(*) FROM information_schema.table_constraints WHERE constraint_schema = 'public';")
self.database_stats['num_constraints'] = int(cur.fetchone()[0])
# the number of foreign keys
cur.execute("SELECT COUNT(*) FROM information_schema.referential_constraints WHERE constraint_schema = 'public';")
self.database_stats['num_foreignkeys'] = int(cur.fetchone()[0])
# the full information of tables
cur.execute("SELECT * FROM information_schema.tables WHERE table_schema = 'public';")
self.database_informations['tables'] = str(cur.fetchall())
# the full information of columns
cur.execute("SELECT * FROM information_schema.columns WHERE table_schema = 'public';")
self.database_informations['columns'] = str(cur.fetchall())
# the full information of indexes
cur.execute("SELECT * FROM pg_stat_all_indexes WHERE schemaname = 'public';")
self.database_informations['indexes'] = str(cur.fetchall())
# the full information of constraints
cur.execute("SELECT * FROM information_schema.table_constraints WHERE constraint_schema = 'public';")
self.database_informations['constraints'] = str(cur.fetchall())
# the full information of constraints
cur.execute("SELECT * FROM information_schema.key_column_usage WHERE constraint_schema = 'public';")
self.database_informations['key_column_usage'] = str(cur.fetchall())
# the full information of foreign keys
cur.execute("SELECT * FROM information_schema.referential_constraints WHERE constraint_schema = 'public';")
self.database_informations['foreignkeys'] = str(cur.fetchall())
# the full information of triggers
cur.execute("SELECT * FROM information_schema.triggers WHERE trigger_schema = 'public';")
self.database_informations['triggers'] = str(cur.fetchall())
# the full information of views
cur.execute("SELECT * FROM information_schema.views WHERE table_schema = 'public';")
self.database_informations['views'] = str(cur.fetchall())
cur.close()
conn.close()
except Exception, e:
LOG.exception(e)
| 2.171875
| 2
|
tutorial/pipeline_outputProcessing.py
|
AGAPIA/waymo-open-dataset
| 0
|
12779072
|
# The purpose of this pileine stage script is to copy only the cleaned output files that is needed in the end (such that we can share them easily)
import os
import pipeline_commons
import shutil
import ReconstructionUtils
def do_output(segmentPath, globalParams):
segmentName = pipeline_commons.extractSegmentNameFromPath(segmentPath)
segmentOutputMinimalPath = os.path.join(globalParams.MINIMAL_OUTPUT_PATH, segmentName)
segmentOutputFullPath = os.path.join(globalParams.BASE_OUTPUT_PATH, segmentName)
if not os.path.exists(segmentOutputMinimalPath):
os.makedirs(segmentOutputMinimalPath, exist_ok=True)
# PAIRs of: (filename to copy, optional or not)
filesToCopyToOutputMin = [(ReconstructionUtils.FILENAME_CARS_TRAJECTORIES, False),
(ReconstructionUtils.FILENAME_PEOPLE_TRAJECTORIES, False),
(ReconstructionUtils.FILENAME_CARLA_BBOXES, True),
(ReconstructionUtils.FILENAME_COMBINED_CARLA_ENV_POINTCLOUD, False),
(ReconstructionUtils.FILENAME_COMBINED_CARLA_ENV_POINTCLOUD_SEGCOLOR, True),
(ReconstructionUtils.FILENAME_CENTERING_ENV, False),
(ReconstructionUtils.FILENAME_CAMERA_INTRISICS, True)
]
for fileToCopy in filesToCopyToOutputMin:
optional = fileToCopy[1]
filename = fileToCopy[0]
srcFullFilePath = os.path.join(segmentOutputFullPath, filename)
dstFullFilePath = os.path.join(segmentOutputMinimalPath, filename)
if os.path.exists(srcFullFilePath) == False:
if optional == False:
assert False, (f"Can't copy filename {filename} because it doesn't exists !")
else:
shutil.copyfile(srcFullFilePath, dstFullFilePath)
if __name__ == "__main__":
import pipeline_params
do_output(pipeline_params.FILENAME_SAMPLE[0], pipeline_params.globalParams)
| 2.296875
| 2
|
models/base_model.py
|
MoustafaMeshry/lsr
| 8
|
12779073
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from data import voxceleb_data_provider as voxceleb
from enum import Enum
from models import model_utils
from typing import Any, Dict, List, Optional, Set, Text, Tuple, Union
import layers
import tensorflow as tf
GENERATOR_NAME_SCOPE = 'image_gen'
LAYOUT_GENERATOR_NAME_SCOPE = 'layout_gen'
DISCRIMINATOR_NAME_SCOPE = 'discriminator'
LAYOUT_ENCODER_NAME_SCOPE = 'layout_enc'
ENCODER_NAME_SCOPE = 'image_enc'
BACKGROUND_IDX = 0
class BaseHeadSynthesisModel(object):
def __init__(self,
config: Dict,
order: Enum=layers.NHWC):
"""
Initializes a few-shot talking head synthesis model.
This function should be called within `strategy.scope()` if using
tf.disribute.Strategy.
Args:
- config: dictionary, model configuration and options (command line
flags).
- order: Enum, one of {layers.NHWC or NCHW} to specify the channel format
of image tensors.
"""
self.config = config
self.concat_landmarks_to_encoder_input = \
config.concat_conditional_inputs_to_encoder
self.order = order
if self.order != layers.NHWC:
raise NotImplementedError('NCHW format not yet supported!')
self.train_and_eval_networks_initialized = False
self.optimizers_initialized = False
def init_extra_train_and_eval_networks(self):
"""Initializes train losses, networks and optimizers.
This function should be called within `strategy.scope()` if using
tf.distribute.Strategy.
"""
pass
def load_loss_pretrained_weights(self):
"""Loads pre-trained weights for networks used for loss computation."""
if self._vgg_face_loss is not None:
resolution = int(self.config.train_resolution)
input_shape = (None, resolution , resolution, 3)
self._vgg_face_loss.load_pretrained_weights(input_shape=input_shape)
return
def create_optimizers(
self,
lr_warmstart_steps: int,
decay_start_step: int,
decay_end_step: int,
decay_num_intervals: int,
starting_step: Optional[int]=0,
lr_mul_factor: Optional[float]=1.) -> Dict[Text,
tf.keras.optimizers.Optimizer]:
"""Initializes optimizers for training.
This function should be called within `strategy.scope()` if using
tf.distribute.Strategy.
Args:
- lr_warmstart_steps: int, number of steps to apply learning rate warmup.
- decay_start_step: int, train step at which to start learning rate decay.
- decay_end_step: int, train step at which to end learning rate decay.
- decay_num_intervals: int, factor by which to decay the learning rate;
final learning rate = initial_learning_rate / `decay_num_intervals`.
- starting_step: int, the train starting step. This is zero when training
from scratch, or the loaded train step for finetuning a pre-trained
model.
- lr_mul_factor: optional float, multiplier factor for the learning rate;
mainly used to increase the learning rate w.r.t the number of gpus.
Returns:
A dictionary with all the otpimizers of the model training.
"""
pass
def parse_inputs(self,
inputs_dict: Dict[Text, tf.Tensor],
mode: Enum=model_utils.Mode.TRAIN,
augmentation: bool=False) -> Tuple[tf.Tensor, ...]:
"""Parses the input dataset into the required few-shot inputs.
Given an input dicionary for a mini-batch, this function constructs the
inputs to the encoder and the generator, as well as the ground truth output
for training/evaluation.
"""
# Parse mode-agnostic inputs.
person_id = inputs_dict[voxceleb.PERSON_ID_KEY]
video_id = inputs_dict[voxceleb.VIDEO_ID_KEY]
video_part_id = inputs_dict[voxceleb.VIDEO_PART_ID_KEY]
frames_few_shots = inputs_dict[voxceleb.FRAMES_KEY]
frame_target = inputs_dict[voxceleb.TARGET_FRAME_KEY]
contours_few_shots = inputs_dict[voxceleb.CONTOURS_KEY]
contour_target = inputs_dict[voxceleb.TARGET_CONTOUR_KEY]
segmaps_few_shots = inputs_dict[voxceleb.SEGMAPS_KEY]
segmap_target = inputs_dict[voxceleb.TARGET_SEGMAP_KEY]
# Cast segmentation label maps to int32
segmaps_few_shots = tf.dtypes.cast(segmaps_few_shots, tf.dtypes.int32)
segmap_target = tf.dtypes.cast(segmap_target, tf.dtypes.int32)
conditional_inputs = contour_target
z_style = inputs_dict['z_style'] if 'z_style' in inputs_dict else None
z_layout = inputs_dict['z_layout'] if 'z_layout' in inputs_dict else None
if z_style is not None or z_layout is not None:
precomputed_latents = (z_style, z_layout)
else:
precomputed_latents = None
basename = tf.strings.join((person_id, video_id, video_part_id),
separator='-')
channel_axis = 3 if self.order == layers.NHWC else 1
if precomputed_latents is None:
encoder_inputs = frames_few_shots
if self.concat_landmarks_to_encoder_input:
encoder_inputs = tf.concat((encoder_inputs, contours_few_shots),
axis=channel_axis + 1)
else:
encoder_inputs = None
# Parse mode-specific inputs.
if mode == model_utils.Mode.TRAIN or mode == model_utils.Mode.EVAL:
x_gt = frame_target
assert not augmentation, 'No augmentation supported yet!'
return (encoder_inputs, conditional_inputs, x_gt, segmap_target, basename,
precomputed_latents)
elif mode == model_utils.Mode.PREDICT:
return encoder_inputs, conditional_inputs, basename, precomputed_latents
else:
raise ValueError('Unsupported mode %s; must be one of '
'{TRAIN, EVAL, PREDICT}.' % str(mode))
def _add_summaries(
self,
encoder_inputs: tf.Tensor,
target_landmarks: tf.Tensor,
target_segmap: tf.Tensor,
real: tf.Tensor,
outputs_dict: Dict[Text, tf.Tensor],
fg_mask: Union[float, tf.Tensor]=1.,
person_id: Optional[tf.Tensor]=None,
video_id: Optional[tf.Tensor]=None,
video_part_id: Optional[tf.Tensor]=None,
input_basename: Optional[Union[Text, tf.Tensor]]=None,
visualize_rgb: Optional[bool]=True) -> Tuple[Dict[Text, tf.Tensor], ...]:
"""Prepares tensorboard summaries for training/evaluation.
This method takes all inputs, ground truth and intermediate outputs and
prepares image/scalar/text tensorboard summaries to visualize the training
or evaluation.
Args:
- encoder_inputs: 4D tensor, the input to the encoder network.
- target_landmarks: 4D tensor, the input the generator network.
- target_segmap: 4D tensor, the label map of the semantic segmentation (
shape = [batch_size, H, W, 1]).
- real: 4D tensor, the ground truth output.
- outputs_dict: dict string->tf.Tensor, all intermediate and final
outputs.
- fg_mask: Optional 4D tensor, a mask image to apply to the final output
and ground truth. Default is a scalar 1, which leaves the output and
ground truth unmasked.
- person_id: Optional text tensor, person_id for each example.
- video_id: Optional text tensor, video_id for each example.
- video_part_id: Optional text tensor, video_part_id for each example.
- input_basename: Optional text, basenames/base paths for each input in
the minibatch.
- visualize_rgb: Optional bool, whether or not to visualize RGB output.
Returns:
A 3-tuple: (scalar_summaries, image_summaries, text_summaries); each is a
dictionary of str->tf.Tensor.
"""
scalar_summaries_dict = {}
image_summaries_dict = {}
text_summaries_dict = {}
# Retrieve outputs.
fake = outputs_dict['output']
# Tensorboard text summaries.
if person_id is not None:
text_summaries_dict['person_id'] = person_id
if video_id is not None:
text_summaries_dict['video_id'] = video_id
if video_part_id is not None:
text_summaries_dict['video_part_id'] = video_part_id
if input_basename is not None:
text_summaries_dict['basename'] = input_basename
# Visualize few-shot inputs and target rgb frames.
if encoder_inputs is not None:
few_shot_inputs = tf.slice(
encoder_inputs, [0, 0, 0, 0, 0], [-1, -1, -1, -1, 3])
num_viz_shots = min(
5, tf.compat.v1.dimension_value(few_shot_inputs.shape[1]))
few_shot_splits = tf.split(few_shot_inputs, self.config.K, axis=1)
few_shot_splits = few_shot_splits[:num_viz_shots]
few_shot_splits = [tf.squeeze(x, axis=1) for x in few_shot_splits]
input_and_target_frames = few_shot_splits
input_and_target_frames.append(real)
few_shot_tuple_viz = tf.concat(input_and_target_frames, axis=2)
image_summaries_dict['few_shot_inputs_and_target'] = few_shot_tuple_viz
# Add IO tuple visualization.
io_tuple_items = []
io_tuple_items.append(target_landmarks)
if target_segmap is not None:
segmap_out_label_map = outputs_dict['segmap_label_map']
num_seg_classes = self.config.num_segmentation_classes
segmap_out_vis = model_utils.visualize_label_map(
segmap_out_label_map, num_seg_classes=num_seg_classes)
segmap_gt_vis = model_utils.visualize_label_map(
target_segmap, num_seg_classes=num_seg_classes)
io_tuple_items.append(segmap_out_vis)
io_tuple_items.append(segmap_gt_vis)
if visualize_rgb:
if not self.config.synthesize_background:
io_tuple_items.append(tf.clip_by_value(fake, -1, 1))
io_tuple_items.append(tf.clip_by_value(fake * fg_mask, -1, 1))
io_tuple_items.append(real * fg_mask)
# Concatenate along width.
io_tuple = tf.concat(io_tuple_items, axis=2)
image_summaries_dict['io_tuple'] = io_tuple
return scalar_summaries_dict, image_summaries_dict, text_summaries_dict
def compute_losses(
self,
real: tf.Tensor,
segmap_gt: tf.Tensor,
outputs_dict: Dict[Text, tf.Tensor],
training: bool,
fg_mask: Union[float, tf.Tensor]=1.,
conditional_inputs: Optional[tf.Tensor]=None,
gradient_tape: Optional[tf.GradientTape]=None) -> Dict[Text, tf.Tensor]:
"""Computes and returns per-example losses of a mini-batch.
Args:
- real: 4D tensor, the ground truth output.
- segmap_gt: 4D tensor, the label map of the semantic segmentation (
shape = [batch_size, H, W, 1]).
- outputs_dict: dict string->tf.Tensor, all intermediate and final outputs.
- training: boolean, whether or not to run the networks in training mode.
- fg_mask: Optional 4D tensor, a mask image to apply to the final output
and ground truth. Default is a scalar 1, which leaves the output and
ground truth unchanged.
- conditional_inputs: Optional 4D tensor, the conditional input the
generator network. This is used for the conditional discriminator.
- gradient_tape: Optional tf.GradientTape, tensorflow's gradient_tape
for gradient penalty computation (if any).
Returns:
A dictionary (str->tf.Tensor), the value of each entry is a 1-D tensor
of length equal to the mini-batch size, representing the per-example loss
values.
"""
pass
def compute_latents(
self,
encoder_inputs: tf.Tensor,
num_few_shots: int,
training: bool,
use_vae: bool=False) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Computes layout and style latents given the input to the encoder(s).
Args:
- encoder_inputs: 4D or 5D tensor, the input the encoder network.
- num_few_shots: integer, number of few-shot inputs to the encoder.
- training: boolean, whether or not to run the networks in training mode.
- use_vae: boolean, whether the encoder is variational or not. If use_vae
is true AND training is true, then noise sampled from N(0,1) is added
to the standard deviaiton of the style latent.
Returns: a 2-tuple represeting the style and layout latents respectively.
"""
pass
def process(
self,
encoder_inputs: tf.Tensor,
conditional_inputs: tf.Tensor,
training: bool,
precomputed_latents: Optional[Tuple[tf.Tensor, ...]]=None,
) -> Dict[Text, Union[tf.Tensor, Tuple[tf.Tensor, ...]]]:
"""Runs the forward pass and returns all intermediate and final outputs.
Args:
- encoder_inputs: 4D or 5D tensor, the input the encoder network.
- conditional_inputs: 4D tensor, the input the generator network.
- training: boolean, whether or not to run the networks in training mode.
- precomputed_latents: Optional 2-tuple of tf.Tensor, pre-computed latent
codes for the input mini-batch. If not None, then the encoder network
is not run, and the pre-computed latents are used instead. If a single,
latent is being used, then the 2nd element in the tuple is None.
Returns: a dictionary holding all intermediate and final outputs.
"""
pass
def train(
self,
inputs_dict: Dict[Text, tf.Tensor],
global_batch_size: int,
train_g_step: bool=True,
train_d_step: bool=True) -> Tuple[
Dict[Text, tf.Tensor], Dict[Text, Any], Tuple[Dict[Text, Any], ...]]:
"""Runs a train step over the input mini/sub-mini batch.
Runs the training step over the input minibatch and aggregates the train
losses over the "global" batch size.
Args:
- inputs_dict: dictionary of strings->tensors representing an input
minibatch.
- global_batch_size: integer representing the "global" minibatch size,
which is equal to one minibatch_size * num_gpus.
- train_g_step: boolean, whether to update the generator weights or not.
- train_d_step: boolean, whether to update the discriminator weights or
not.
Returns: a 3-tuple:
- loss_dict: dictionary of all train losses aggregated according to the
global batch size.
- outputs_dict: dict string->tf.Tensor, all intermediate and final outputs.
- summaries: A 3-tuple representing scalara, image and text summaries.
returend by _add_summaries(). See _add_summaries() for more details.
"""
pass
@tf.function
def train_distributed(
self,
strategy: tf.distribute.Strategy,
dist_inputs_dict: Dict[Text, Any],
global_batch_size: int,
train_g_step: bool=True,
train_d_step: bool=True) -> Tuple[
Dict[Text, tf.Tensor], Dict[Text, Any], Tuple[Dict[Text, Any], ...]]:
"""Runs a distributed train step and aggregates losses across replicas.
Runs the train step over the global minibatch and aggregates the train
losses across different replicas.
Args:
- strategy: tf.distribute.Strategy to be used for building strategy-aware
networks.
- dist_inputs_dict: dictionary of strings->tensors representing an input
minibatch to be distributed across replicas.
- global_batch_size: integer representing the "global" minibatch size,
which is equal to one minibatch_size * num_gpus.
- train_g_step: boolean, whether to update the generator weights or not.
- train_d_step: boolean, whether to update the discriminator weights or
not.
Returns: a 3-tuple:
- loss_dict: dictionary of all train losses aggregated properly across
different replicas (i.e over the global batch size).
- outputs_dict: dict string->PerReplica object, all intermediate and
final outputs, but not aggregated (concatenated) across replicas.
- summaries: A 3-tuple representing scalara, image and text summaries.
returend by _add_summaries(). See _add_summaries() for more details.
Summary tensors are PerReplica objects that are not aggregated
(concatenated) across replicas.
"""
(per_replica_loss_dict, per_replica_outputs_dict,
per_replica_summaries) = strategy.run(
self.train, args=(
dist_inputs_dict, global_batch_size, train_g_step, train_d_step))
loss_dict = {}
for loss_key, loss_val in per_replica_loss_dict.items():
loss_dict[loss_key] = strategy.reduce(
tf.distribute.ReduceOp.SUM, loss_val, axis=None)
return loss_dict, per_replica_outputs_dict, per_replica_summaries
def evaluate(
self,
inputs_dict: Dict[Text, tf.Tensor],
global_batch_size: int) -> Tuple[
Dict[Text, tf.Tensor], Dict[Text, Any], Tuple[Dict[Text, Any], ...]]:
"""Runs an evaluation step and updates evaluation metrics.
Runs the evaluation step over the input minibatch and aggregates the eval
losses over the "global" batch size. A side effect of this method is
updating the state of evaluation metrics in self.eval_metrics_dict.
Args:
- inputs_dict: dictionary of strings->tensors representing an input
minibatch.
- global_batch_size: integer representing the "global" minibatch size,
which is equal to one minibatch_size * num_gpus.
Returns: a 3-tuple:
- loss_dict: dictionary of all train losses aggregated according to the
global batch size.
- outputs_dict: dict string->tf.Tensor, all intermediate and final outputs.
- summaries: A 3-tuple representing scalara, image and text summaries.
returend by _add_summaries(). See _add_summaries() for more details.
"""
pass
@tf.function
def evaluate_distributed(
self,
strategy: tf.distribute.Strategy,
dist_inputs_dict: Dict[Text, Any],
global_batch_size: int) -> Tuple[
Dict[Text, tf.Tensor], Dict[Text, Any], Tuple[Dict[Text, Any], ...]]:
"""Runs a distributed evaluation step and aggregates losses across replicas.
Runs the evaluation step over the global minibatch and aggregates the eval
losses across different replicas. A side effect of this method is
updating the state of evaluation metrics in self.eval_metrics_dict.
Args:
- strategy: tf.distribute.Strategy to be used for building strategy-aware
networks.
- dist_inputs_dict: dictionary of strings->tensors representing an input
minibatch to be distributed across replicas.
- global_batch_size: integer representing the "global" minibatch size,
which is equal to one minibatch_size * num_gpus.
Returns: a 3-tuple:
- loss_dict: dictionary of all train losses aggregated properly across
different replicas (i.e over the global batch size).
- outputs_dict: dict string->PerReplica object, all intermediate and
final outputs, but not aggregated (concatenated) across replicas.
- summaries: A 3-tuple representing scalara, image and text summaries.
returend by _add_summaries(). See _add_summaries() for more details.
Summary tensors are PerReplica objects that are not aggregated
(concatenated) across replicas.
"""
(per_replica_loss_dict, per_replica_outputs_dict,
per_replica_summaries) = strategy.run(
self.evaluate, args=(dist_inputs_dict, global_batch_size))
loss_dict = {}
for loss_key, loss_val in per_replica_loss_dict.items():
loss_dict[loss_key] = strategy.reduce(
tf.distribute.ReduceOp.SUM, loss_val, axis=None)
return loss_dict, per_replica_outputs_dict, per_replica_summaries
def get_networks(self) -> Dict[Text, Union[
tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary with all networks and submodules of the model."""
pass
def get_optimizers(self) -> Dict[Text, tf.keras.optimizers.Optimizer]:
"""Returns a dictionary with all the otpimizers of the model training."""
pass
def reset_eval_metrics(self):
"""Resets the internal state of all evaluation metrics."""
for metric in self.eval_metrics_dict.values():
metric.reset_states()
| 1.859375
| 2
|
chaosplt_experiment/storage/model/__init__.py
|
chaostoolkit/chaosplatform-experiment
| 0
|
12779074
|
<filename>chaosplt_experiment/storage/model/__init__.py
# -*- coding: utf-8 -*-
from .discovery import Discovery
from .execution import Execution
from .experiment import Experiment
from .init import Init
from .recommendation import Recommendation
__all__ = ["Discovery", "Execution", "Experiment", "Execution", "Init",
"Recommendation"]
| 1.179688
| 1
|
connectdjango/settings.py
|
gabrielstonedelza/connectdjango
| 0
|
12779075
|
import os
import locale
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
# ALLOWED_HOSTS = ['.connectdjango.com', 'localhost', '192.168.127.12'] #use for production
ALLOWED_HOSTS = ['127.0.0.1' ] # use for development
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
'users.apps.UsersConfig',
'crispy_forms',
'social_django',
'channels',
]
AUTHENTICATION_BACKENDS = [
'social_core.backends.google.GoogleOAuth2',
'social_core.backends.github.GithubOAuth2',
'django.contrib.auth.backends.ModelBackend',
'users.authentication.EmailAuthBackend',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
]
ROOT_URLCONF = 'connectdjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'connectdjango.wsgi.application'
ASGI_APPLICATION = 'connectdjango.asgi.application'
# channels
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [('127.0.0.1', 6379)],
},
},
}
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'connectdjango',
'USER': 'connectdjangouser',
'PASSWORD': '<PASSWORD>?',
'HOST': 'localhost',
'PORT': '5432',
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_URL = 'login'
LOGIN_REDIRECT_URL = 'blogs'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = config('SOCIAL_AUTH_GOOGLE_OAUTH2_KEY')
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = config('SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET')
SOCIAL_AUTH_GITHUB_KEY = config('SOCIAL_AUTH_GITHUB_KEY')
SOCIAL_AUTH_GITHUB_SECRET = config('SOCIAL_AUTH_GITHUB_SECRET')
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)
EMAIL_HOST = config('EMAIL_HOST', default='localhost')
EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')
EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)
CSRF_FAILURE_VIEW = 'blog.views.csrf_failure'
| 1.773438
| 2
|
challenge/migrations/0001_initial.py
|
cedricnoel/django-hearthstone
| 0
|
12779076
|
<gh_stars>0
# Generated by Django 2.2.dev20190116205049 on 2019-01-16 20:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('decks', '0007_auto_20190116_2054'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Challenge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(default='pending', max_length=200)),
('deck1', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='challenger_deck', to='decks.Deck')),
('deck2', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='challenged_deck', to='decks.Deck')),
('player1', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='challenger', to=settings.AUTH_USER_MODEL)),
('player2', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='challenged', to=settings.AUTH_USER_MODEL)),
('winner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='wins', to=settings.AUTH_USER_MODEL)),
],
),
]
| 1.78125
| 2
|
scraps/fitsS21/utils.py
|
FaustinCarter/scraps
| 9
|
12779077
|
<gh_stars>1-10
"""A set of simple utility functions for array math."""
import numpy as np
import scipy.signal as sps
def reduce_by_midpoint(array):
"""Subtract off and divide by middle array element.
Sorts the array before picking mid-point, but returned
array is not sorted."""
midpoint = sorted(array)[int(np.round((len(array) - 1) / 2.0))]
return (array - midpoint) / midpoint
def filter_data(array, filter_win_length=0):
"""Filter data with a Savitsky-Golay filter of window-length
filter_win_length.
filter_win_length must be odd and >= 3. This function will
enforce that requirement by adding 1 to filter_win_length
until it is satisfied."""
# If no window length is supplied, defult to 1% of the data vector or 3
if filter_win_length == 0:
filter_win_length = int(np.round(len(array) / 100.0))
if filter_win_length % 2 == 0:
filter_win_length += 1
if filter_win_length < 3:
filter_win_length = 3
return sps.savgol_filter(array, filter_win_length, 1)
def mask_array_ends(array, mask=None):
"""Return the ends of an array.
If mask is an int, returns mask items from each end of array.
If mask is a float, treats mask as a fraction of array length.
If mask is a an array or a slice, return array[mask]."""
if mask is None:
masked_array = array
elif type(mask) == float:
pct_mask = int(len(array) * mask)
masked_array = np.concatenate((array[:pct_mask], array[-pct_mask:]))
elif type(mask) == int:
masked_array = np.concatenate((array[:mask], array[-mask:]))
elif type(mask) in [np.array, list, slice]:
masked_array = array[mask]
else:
raise ValueError("Mask type must be number, array, or slice")
return masked_array
| 3.453125
| 3
|
Curso Em Video-python/PYTHON (MUNDO 1, MUNDO 2 E MUNDO 3)/exercicios/ex0094Unindo_dict_listas.py
|
AlamoVinicius/code-pratice
| 0
|
12779078
|
""" Crie um programa que leia nome, sexo e idade de várias pessoas, guardando os dados de cada pessoa em um
dicionário e todos os dicionários em uma lista. No final, msotre:
a - quantas pessoas foram cadastradas/ b - a média de idade do grupo/ c- uma lista com todas as mulheres.
d - uma lista com todas as pessoas com idade acima da média"""
lista = []
mulheres = []
maiores_media_idade = []
pessoas = {}
media = 0
while True:
pessoas['nome'] = str(input('Nome: ')) # não preciso usar pessoas.clear() pois meu dict automaticamente limpa
# nos novos elementos quando sobrescritos.
pessoas['sexo'] = str(input('sexo: [M/F] ')).strip().upper()[0]
while pessoas['sexo'] not in 'MF':
pessoas['sexo'] = str(input('Por favor, digite somente "M" ou "F": ')).strip().upper()[0]
pessoas['idade'] = int(input('idade: '))
resp = str(input('Deseja continuar? [S/N]: '))
lista.append(pessoas.copy())
if resp in 'Nn':
break
# número de pssoas cadastradas:
print(f"{'='* 40}")
print(f'{"RESULTADOS":^40}')
print(f"{'='* 40}")
print(f'Foram cadastrado {len(lista)} pessoas.')
# média de idade do grupo
for dados in lista:
media += dados['idade']
media /= len(lista)
print(f'A média do grupo de pessoas é {media:.1f}')
# lista de mulheres no grupo
for dados in lista:
if dados['sexo'] == 'F':
mulheres.append(dados['nome'])
print('As mulheres do grupo são: ')
for dados in mulheres:
print(dados)
# lista de pessoas com idade acima da média
print('As pessoas com idade acima da média são: ')
for dados in lista:
if dados['idade'] >= media:
for key, values in dados.items():
print(f'{key} = {values}; ', end='')
''' Algumas diferençãs relacionadas a solução do curso foram observados, pode ser conferido no seguinte link
https://youtu.be/ETnExBCFeps mas nada que altere muita coisa em minha solução. '''
| 3.9375
| 4
|
tuesmon_ncurses/ui/views/auth.py
|
tuesmoncom/tuesmon-ncurses
| 0
|
12779079
|
# -*- coding: utf-8 -*-
"""
tuesmon_ncurses.ui.views.auth
~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from tuesmon_ncurses.ui.widgets import generic, auth
from . import base
class LoginView(base.View):
login_button = None
def __init__(self, username_text, password_text):
# Header
header = generic.banner()
# Username and password prompts
max_prompt_length = max(len(username_text), len(password_text))
max_prompt_padding = max_prompt_length + 2
self._username_editor = generic.editor()
username_prompt = auth.username_prompt(username_text, self._username_editor, max_prompt_padding)
self._password_editor = generic.editor(mask="♥")
password_prompt = auth.password_prompt(password_text, self._password_editor, max_prompt_padding)
# Login button
self.login_button = generic.button("login")
login_button_widget = auth.wrap_login_button(self.login_button)
# Notifier
self.notifier = generic.Notifier("")
login_widget = auth.Login([header,
generic.box_solid_fill(" ", 2),
username_prompt,
generic.box_solid_fill(" ", 1),
password_prompt,
generic.box_solid_fill(" ", 2),
login_button_widget,
generic.box_solid_fill(" ", 1),
self.notifier])
self.widget = generic.center(login_widget)
@property
def username(self):
return self._username_editor.get_edit_text()
@property
def password(self):
return self._password_editor.get_edit_text()
| 2.234375
| 2
|
generated/python/proto-google-cloud-language-v1beta2/google/cloud/proto/language/v1beta2/language_service_pb2_grpc.py
|
landrito/api-client-staging
| 18
|
12779080
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
import google.cloud.proto.language.v1beta2.language_service_pb2 as google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2
class LanguageServiceStub(object):
"""Provides text analysis operations such as sentiment analysis and entity
recognition.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.AnalyzeSentiment = channel.unary_unary(
'/google.cloud.language.v1beta2.LanguageService/AnalyzeSentiment',
request_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSentimentRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSentimentResponse.FromString,
)
self.AnalyzeEntities = channel.unary_unary(
'/google.cloud.language.v1beta2.LanguageService/AnalyzeEntities',
request_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitiesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitiesResponse.FromString,
)
self.AnalyzeEntitySentiment = channel.unary_unary(
'/google.cloud.language.v1beta2.LanguageService/AnalyzeEntitySentiment',
request_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitySentimentRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitySentimentResponse.FromString,
)
self.AnalyzeSyntax = channel.unary_unary(
'/google.cloud.language.v1beta2.LanguageService/AnalyzeSyntax',
request_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSyntaxRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSyntaxResponse.FromString,
)
self.AnnotateText = channel.unary_unary(
'/google.cloud.language.v1beta2.LanguageService/AnnotateText',
request_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnnotateTextRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnnotateTextResponse.FromString,
)
class LanguageServiceServicer(object):
"""Provides text analysis operations such as sentiment analysis and entity
recognition.
"""
def AnalyzeSentiment(self, request, context):
"""Analyzes the sentiment of the provided text.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AnalyzeEntities(self, request, context):
"""Finds named entities (currently proper names and common nouns) in the text
along with entity types, salience, mentions for each entity, and
other properties.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AnalyzeEntitySentiment(self, request, context):
"""Finds entities, similar to [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] in the text and analyzes
sentiment associated with each entity and its mentions.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AnalyzeSyntax(self, request, context):
"""Analyzes the syntax of the text and provides sentence boundaries and
tokenization along with part of speech tags, dependency trees, and other
properties.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AnnotateText(self, request, context):
"""A convenience method that provides all syntax, sentiment, and entity
features in one call.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_LanguageServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'AnalyzeSentiment': grpc.unary_unary_rpc_method_handler(
servicer.AnalyzeSentiment,
request_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSentimentRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSentimentResponse.SerializeToString,
),
'AnalyzeEntities': grpc.unary_unary_rpc_method_handler(
servicer.AnalyzeEntities,
request_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitiesRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitiesResponse.SerializeToString,
),
'AnalyzeEntitySentiment': grpc.unary_unary_rpc_method_handler(
servicer.AnalyzeEntitySentiment,
request_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitySentimentRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitySentimentResponse.SerializeToString,
),
'AnalyzeSyntax': grpc.unary_unary_rpc_method_handler(
servicer.AnalyzeSyntax,
request_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSyntaxRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSyntaxResponse.SerializeToString,
),
'AnnotateText': grpc.unary_unary_rpc_method_handler(
servicer.AnnotateText,
request_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnnotateTextRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnnotateTextResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.cloud.language.v1beta2.LanguageService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 1.679688
| 2
|
lametro/context_processors.py
|
datamade/la-metro-councilmatic
| 5
|
12779081
|
<reponame>datamade/la-metro-councilmatic
from django.conf import settings
def recaptcha_public_key(request):
# See: https://developers.google.com/recaptcha/docs/faq
recaptcha_dev_key = '<KEY>'
return {
'recaptcha_public_key': getattr(settings, 'RECAPTCHA_PUBLIC_KEY', recaptcha_dev_key)
}
| 1.78125
| 2
|
pygoap/memory.py
|
bitcraft/storymaker
| 6
|
12779082
|
<gh_stars>1-10
"""
Memories are stored precepts.
"""
class MemoryManager(set):
"""
Store and manage precepts.
"""
max_size = 300
def add(self, other):
assert (other is not None)
if len(self) > MemoryManager.max_size:
self.pop()
super().add(other)
def of_class(self, klass):
for i in self:
if isinstance(i, klass):
yield i
| 2.953125
| 3
|
SuperTracker_EPM_Template.py
|
EBGU/RodentTracker
| 1
|
12779083
|
import os
import numpy as np
import time
from multiprocessing import Pool
import psutil
import cv2
import matplotlib.pyplot as plt
import av #for better performance
##############################################################################
#For EPM, please select pionts from the OPEN arm to the CLOSE arm and press y:
# o1
# c3 c4
# o2
#For OFT, please select pionts clockwise from upper left corner and press y:
# UL1 UR2
#
# LL4 LR3
#Press y to confirm remove background.
#For EPM please select the central neutral zone(four points, like OFT) and press y to confirm.
##############################################################################
######################
####Set Parameters####
######################
home = 'yourFolder'
src = home + '/Video'
tgt = home + '/Picture'
rmbg_tgt = home + '/Picture_rmbg'
logDir = home + '/log'
isEPM = True # whether EPM or OFT
startT = 60 # start at 30s
cropLen = 600 # crop only 600s(10min)
imgSize = 500 #resize Image
if isEPM:
margin = 0.1 #for EPM, keep a margin of 10% image size
else:
margin = 0.2 #for OFT, keep a margin of 20% image size
useEllipse = False #whether used ellipise to fit mouse, otherwise use
refLenth = 100 # the arm lenth of EPM or size of OFT
centerCutOff = 0.5 # define the center zone, for OFT only!
multiThread = psutil.cpu_count(False)
video2img = True
img2binary = True
useAverFrame = True
cache = home + '/Cache'
tracking = True
preview = False
windowSize = 5 #window size for speed
Filter = 'aver' #a function to filter the positon, currently provide 'aver' 'median' 'none'
######################
##Function and Class##
######################
def padding(img): #padding img in case rotate to the outside
h, w = img.shape[:2]
img_padded = np.zeros(shape=(w+h, w+h), dtype=np.uint8)
img_padded[w//2:w//2+h,h//2:h//2+w] = img
return img_padded
x = 0
vector = []
def mouse_img_cod(event, cod_x, cod_y, flags, param):
global vector
global x
if event == cv2.EVENT_LBUTTONDOWN:
if x == 0 :
x += 1
vector.append([cod_x,cod_y])
else:
x = 0
vector.append([cod_x,cod_y])
class ImageCorrection():
def __init__(self,refPoints,expand,half_size,EPM,crop=0.7):
self.refPoints = refPoints
self.center = half_size
self.EPM = EPM
self.crop = int(crop*self.center)
if EPM:
self.target = np.float32([[expand,self.center], [2*self.center-expand, self.center], [self.center, expand], [self.center, 2*self.center-expand]])
else:
self.target = np.float32([[expand,expand], [2*self.center-expand, expand], [2*self.center-expand, 2*self.center-expand], [expand, 2*self.center-expand]])
self.M = cv2.getPerspectiveTransform(self.refPoints , self.target)
def __call__(self,img):
img = cv2.warpPerspective(img,self.M,(2*self.center,2*self.center))
if self.EPM:
img[0:self.crop,0:self.crop] = 255
img[2*self.center-self.crop:2*self.center,0:self.crop] = 255
img[2*self.center-self.crop:2*self.center,2*self.center-self.crop:2*self.center] = 255
img[0:self.crop,2*self.center-self.crop:2*self.center] = 255
return img
class ExtractAndWarp():
def __init__(self,tgt,cache,startT,cropLen,expand=25,half_size=250,EPM = False,preview=False):
self.tgt = tgt
self.cache = cache
self.startT =startT
self.cropLen = cropLen
self.expand =expand
self.half_size =half_size
self.EPM =EPM
self.preview =preview
def __call__(self,direction):
fileAddr,vector = direction
folder = os.path.join(self.tgt,fileAddr.split('.')[0].split('/')[-1])
cache = os.path.join(self.cache,fileAddr.split('.')[0].split('/')[-1])+'.npy'
try:
os.mkdir(folder)
except:
pass
warper = ImageCorrection(vector,self.expand,self.half_size,self.EPM)
cap = cv2.VideoCapture(fileAddr)
fps = cap.get(cv2.CAP_PROP_FPS)
startAt = int( self.startT * fps) #in seconds
#Record only 30min
length = int(min((self.startT+self.cropLen) * fps,cap.get(cv2.CAP_PROP_FRAME_COUNT)))
cap.release()
container = av.open(fileAddr)
for i,frame in enumerate(container.decode(video=0)):
if i < np.ceil(fps*10):
img = frame.to_ndarray(format='rgb24')
img = warper(img)
img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)/ np.ceil(fps*10)
try:
avgImg += img
except:
avgImg = img
if i >= startAt:
img = frame.to_ndarray(format='rgb24')
img = warper(img)
if self.preview:
cv2.imshow("Image",img)
k = cv2.waitKey(10)
if k ==27: # 键盘上Esc键的键值
cv2.destroyAllWindows()
break
else:
cv2.imwrite(os.path.join(folder,str(i-startAt+1)+'.jpg'), img,[cv2.IMWRITE_JPEG_QUALITY, 100])
if i >= length:
break
np.save(cache,avgImg)
container.close()
return True
class frameAverage():
def __init__(self,imgArray,dirs,nThread):
self.imgArray = imgArray
self.windowSize = len(imgArray) // nThread + 1
self.dirs = dirs
#@timer
def __call__(self,index):
maxIndex = min(index+self.windowSize,len(self.imgArray))
for path in self.imgArray[index:maxIndex]:
img = cv2.imread(os.path.join(self.dirs,path), cv2.IMREAD_GRAYSCALE).astype(np.double)
img = img / (maxIndex-index)
try:
avgImg += img
except:
avgImg = img
return avgImg
class rmBackground():
def __init__(self,imgArray,dirs,src,tgt,background,nThread,threshold=25):
self.imgArray = imgArray
self.windowSize = len(imgArray) // nThread + 1
self.dirs = dirs
self.background = background
self.tgt = tgt
self.src = src
self.threshold =threshold
#@timer
def __call__(self,index):
maxIndex = min(index+self.windowSize,len(self.imgArray))
for path in self.imgArray[index:maxIndex]:
img = cv2.imread(os.path.join(self.src,self.dirs,path), cv2.IMREAD_GRAYSCALE).astype(np.double)
img = img - self.background
img[np.where(img<self.threshold)] = 0
img = img.astype(np.uint8)
img = cv2.medianBlur(img,5)
img = 255-cv2.equalizeHist(img)
img = cv2.medianBlur(img,5)
cv2.imwrite(os.path.join(self.tgt,self.dirs,path), img)
return True
class logger(object):
def __init__(self,logDir):
self.logDir = logDir
def __call__(self,x,fileName):
print(x)
f = open(os.path.join(self.logDir,fileName+'.log'),'a')
f.write(str(x)+'\n')
f.close()
def trackingEPM(img,ori = None,kernal=5,thres = 150,preview=False): #kernel has to be odd
result_gray=cv2.medianBlur(img, kernal)
#result_binary = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,25,50) #use otsu autothreshold method
ret,result_binary=cv2.threshold(result_gray,thres,255,0)
ret, labels, stats, centroids = cv2.connectedComponentsWithStats(255-result_binary, 4)
largest = np.argmax(stats[:,4])
stats[largest,4] = -1
largest = np.argmax(stats[:,4])
left = stats[largest,0]
top = stats[largest,1]
right = stats[largest,0]+stats[largest,2]
down = stats[largest,1]+stats[largest,3]
center = centroids[largest]
if preview:
fit = cv2.rectangle(ori, (left, top), (right, down), (255, 25, 25), 1)
fit = cv2.circle(fit, np.int32(center),3, (25, 25, 255), 1)
cv2.imshow("Image",fit)
k = cv2.waitKey(2)
if k == 32:
cv2.waitKey(0)
return (left,right,top,down,center)
def trackingOFT(img,ori = None,kernal=11,thres = 100,preview=False):
result_gray=cv2.medianBlur(img, kernal)
#result_binary = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,25,50)
ret,result_binary=cv2.threshold(result_gray,thres,255,0) #use otsu autothreshold method
edge = cv2.Canny(result_binary,10,245)
y,x=np.nonzero(edge) #change coordination
edge_list = np.array([[_x,_y] for _x,_y in zip(x,y)]) #list edge-points
try:
ellipse = cv2.fitEllipse(edge_list) # fit ellipse and return (x,y) as center,(2a,2b) as radius and angle
except:
ellipse = [(0,0),(0,0),1000]
if preview:
fit=cv2.ellipse(ori, ellipse, (255,25,25),1)
cv2.imshow("Image",fit)
cv2.waitKey(10)
return ellipse
def Identity(x):
return x[-1]
class Speedometer():
def __init__(self,windowSize=5,Filter = 'aver'):
self.container = []
self.windowSize = windowSize
self.filter = Filter
assert(self.filter in ['aver','median','none'])
self.speed = []
def update(self,x):
self.container.append(x)
if len(self.container) == self.windowSize+2:
if self.filter == 'aver':
pastCord = np.mean(self.container[0:windowSize],axis=0)
curentCord = np.mean(self.container[2:],axis=0)
elif self.filter == 'median':
pastCord = np.median(self.container[0:windowSize],axis=0)
curentCord = np.median(self.container[2:],axis=0)
elif self.filter == 'none':
pastCord = self.container[windowSize//2+1]
curentCord = self.container[windowSize//2+3]
else:
pass
speed = ((pastCord[0]-curentCord[0])**2+(pastCord[1]-curentCord[1])**2)**0.5
self.speed.append(speed)
del(self.container[0])
return speed
else:
return 0
def aver(self):
x = np.mean(self.speed)
if np.isnan(x):
return 0
else:
return x
######################
####Prepare images####
######################
if video2img:
if os.path.isdir(src):
try:
os.mkdir(tgt)
except:
pass
try:
os.mkdir(logDir)
except:
pass
try:
os.mkdir(cache)
except:
pass
else:
raise ValueError('No video folder detected!')
vList = os.listdir(src)
direction=[]
for v in vList:
cap = cv2.VideoCapture(os.path.join(src,v))
fps = cap.get(cv2.CAP_PROP_FPS)
startAt = startT * fps
midFrame = int(min(cropLen * fps,cap.get(cv2.CAP_PROP_FRAME_COUNT)-startAt)) // 2
cap.set(cv2.CAP_PROP_POS_FRAMES,startAt+midFrame)
_,img = cap.read()
img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
#img = padding(img)
cv2.imshow("Image",img)
cv2.setMouseCallback("Image", mouse_img_cod)
k = cv2.waitKey(0)
if k ==121: # press y
cv2.destroyAllWindows()
cap.release()
direction.append((os.path.join(src,v),np.float32(vector)))
print((os.path.join(src,v),vector))
vector = []
print(len(direction))
extractor = ExtractAndWarp(tgt,cache,startT,cropLen,expand=int(margin*imgSize*0.5),half_size=imgSize//2,EPM=isEPM,preview=False)
for d in direction:
extractor(d)
if img2binary:
try:
os.mkdir(rmbg_tgt)
except:
pass
dirList = os.listdir(tgt)
for dirs in dirList:
try:
os.mkdir(os.path.join(rmbg_tgt,dirs))
except:
pass
frameList = os.listdir(os.path.join(tgt,dirs))
if useAverFrame:
aver = frameAverage(frameList,os.path.join(tgt,dirs),multiThread)
with Pool(multiThread) as p:
averaged=np.array(p.map(aver,range(0,len(frameList),aver.windowSize)))
averaged = np.median(averaged,axis=0)
else:
averaged = np.load(os.path.join(cache,dirs)+'.npy')
_averaged = averaged.astype(np.uint8)
print(dirs)
cv2.imshow('img',_averaged)
k = cv2.waitKey(0)
if k == 121: #121 is y
cv2.destroyAllWindows()
rmer = rmBackground(frameList,dirs,tgt,rmbg_tgt,averaged,multiThread)
with Pool(multiThread) as p:
p.map(rmer,range(0,len(frameList),rmer.windowSize))
printer = logger(logDir)
if tracking:
print('Tracking! Ready? Go!')
if isEPM:
vList = os.listdir(src)
for v in vList:
speedo = Speedometer(windowSize=windowSize,Filter=Filter)
cap = cv2.VideoCapture(os.path.join(src,v))
fps = cap.get(cv2.CAP_PROP_FPS)
cap.release()
localtime = time.asctime( time.localtime(time.time()) )
v = v.split('.')[0]
printer(localtime,v)
printer('FPS = ' + str(fps),v)
vector = []
frameList = os.listdir(os.path.join(tgt,v))
aver = frameAverage(frameList,os.path.join(tgt,v),multiThread)
with Pool(multiThread) as p:
averaged=np.array(p.map(aver,range(0,len(frameList),aver.windowSize)))
averaged = np.median(averaged,axis=0)
_averaged = averaged.astype(np.uint8)
cv2.imshow('img',_averaged)
cv2.setMouseCallback("img", mouse_img_cod)
k = cv2.waitKey(0)
if k ==121: # press y
cv2.destroyAllWindows()
printer('NeutralZone is:',v)
printer(vector,v)
printer('Time\tFrame\tleft\tright\ttop\tdown\tcenter_x\tcenter_y\tisOpen_center\tisOpen_any\tOpenTimeRatio_center\tOpenTimeRatio_any\tCurrentSpeed\tAverageSpeed',v)
neutralL = np.min(np.array(vector)[:,0])
neutralR = np.max(np.array(vector)[:,0])
neutralT = np.min(np.array(vector)[:,1])
neutralD = np.max(np.array(vector)[:,1])
ioc = 0
ioa = 1
for i in range(len(frameList)):
img = cv2.imread(os.path.join(rmbg_tgt,v,str(i+1)+'.jpg'),cv2.IMREAD_GRAYSCALE)
ori = cv2.imread(os.path.join(tgt,v,str(i+1)+'.jpg'))
left,right,top,down,(center_x,center_y) = trackingEPM(img,ori,preview=preview)
speed = speedo.update([center_x,center_y])*fps*refLenth/(2*imgSize*(1-margin))
averSpeed = speedo.aver()*fps*refLenth/(2*imgSize*(1-margin))
if center_x <= neutralL or center_x >= neutralR:
isOpen_center = 1
ioc += 1
else:
isOpen_center = 0
if left <= neutralL or right >= neutralR:
isOpen_any = 1
ioa += 1
else:
isOpen_any = 0
printer('{:0>10.3f}\t{:0>6.0f}\t{:0>3.0f}\t{:0>3.0f}\t{:0>3.0f}\t{:0>3.0f}\t{:0>3.0f}\t{:0>3.0f}\t{:.0f}\t{:.0f}\t{:.5f}\t{:.5f}\t{:0>7.3f}\t{:0>7.3f}'.format((i+1)/fps,i+1,left,right,top,down,center_x,center_y,isOpen_center,isOpen_any,ioc/(i+1),ioa/(i+1),speed,averSpeed),v)
else:
vList = os.listdir(src)
for v in vList:
speedo = Speedometer(windowSize=windowSize,Filter=Filter)
cap = cv2.VideoCapture(os.path.join(src,v))
fps = cap.get(cv2.CAP_PROP_FPS)
cap.release()
localtime = time.asctime( time.localtime(time.time()) )
v = v.split('.')[0]
printer(localtime,v)
printer('FPS = ' + str(fps),v)
printer('Time\tFrame\tcenter_x\tcenter_y\ta\tb\tangle\tcenter_distance\tisCenter\tCenterTimeRatio_center\tCurrentSpeed\tAverageSpeed',v)
ic = 0
frameList = os.listdir(os.path.join(tgt,v))
for i in range(len(frameList)):
img = cv2.imread(os.path.join(rmbg_tgt,v,str(i+1)+'.jpg'),cv2.IMREAD_GRAYSCALE)
ori = cv2.imread(os.path.join(tgt,v,str(i+1)+'.jpg'))
if useEllipse:
(center_x,center_y),(a,b),angle = trackingOFT(img,ori,preview=preview)
else:
left,right,top,down,(center_x,center_y)= trackingEPM(img,ori,preview=preview)
a = right-left
b = down-top
angle = 0
speed = speedo.update([center_x,center_y])*fps*refLenth/(2*imgSize*(1-margin))
averSpeed = speedo.aver()*fps*refLenth/(2*imgSize*(1-margin))
dis_x = abs(center_x-imgSize//2)
dis_y = abs(center_y-imgSize//2)
distance = ((dis_x**2+dis_y**2)**0.5)*refLenth/(imgSize*(1-margin))
if max(dis_x,dis_y) < imgSize*0.5*(1-margin)*centerCutOff:
isCenter = 1
ic += 1
else:
isCenter = 0
printer('{:0>10.3f}\t{:0>6.0f}\t{:0>3.0f}\t{:0>3.0f}\t{:0>7.3f}\t{:0>7.3f}\t{:0>7.3f}\t{:0>7.3f}\t{:.0f}\t{:.5f}\t{:0>7.3f}\t{:0>7.3f}'.format((i+1)/fps,i+1,center_x,center_y,a,b,angle,distance,isCenter,ic/(i+1),speed,averSpeed),v)
| 1.929688
| 2
|
data_filters/src/square.py
|
smontanaro/csvprogs
| 0
|
12779084
|
<reponame>smontanaro/csvprogs
#!/usr/bin/env python
"""
===========
%(PROG)s
===========
-----------------------------------------------------
Convert data from point-to-point to square movements
-----------------------------------------------------
:Author: <EMAIL>
:Date: 2013-03-15
:Copyright: TradeLink LLC 2013
:Version: 0.1
:Manual section: 1
:Manual group: data filters
SYNOPSIS
========
%(PROG)s [ -n ] [ -k n ] [ -s sep ] [ -h ] [ -b ] [ -H ]
OPTIONS
=======
-k n use field n as a key field (default: 1) - may be given
multiple times to build compound key. Last value of n is used as
the 'squared' value. The first field is assumed to be the datetime
(x axis). The key may be a column name as well.
-n X axis is numeric, not time.
-s sep Use sep as the field separator (default is comma).
-b Instead of removing entire rows, just blank duplicate fields.
-H Skip header at start of input - ignored if any element of the key is
non-numeric.
DESCRIPTION
===========
When thinking about market prices, it's useful to think of a given
price holding until there is a change. If all you do is plot the
changes, you wind up with diagonal lines between two quote prices.
It's more correct to think of the earlier price holding until just
before the new price is received.
Consider a simple CSV file:
2012-10-25T09:18:15.593480,F:C6Z12,10057
2012-10-25T09:18:38.796756,F:C6Z12,10058
2012-10-25T09:18:38.796769,F:C6Z12,10058
2012-10-25T09:18:38.796912,F:C6Z12,10058
2012-10-25T09:18:38.796924,F:C6Z12,10058
2012-10-25T09:18:38.796930,F:C6Z12,10059
The lines connecting the first two points and the last two points will
be plotted diagonally. That's probably not really the way these data
should be viewed. Once a price is seen, it should remain in effect
until the next price is seen. Also, the repetitive points with a y
value of 10058 cause useless work for downstream filters.
Given the above input, this filter emits the following:
2012-10-25T09:18:15.593480,F:C6Z12,10057
2012-10-25T09:18:38.796755,F:C6Z12,10057
2012-10-25T09:18:38.796756,F:C6Z12,10058
2012-10-25T09:18:38.796929,F:C6Z12,10058
2012-10-25T09:18:38.796930,F:C6Z12,10059
That is, it elides all duplicate y values and inserts a single
duplicate just before each price change to 'square up' the plot.
The net effect is that plots look square and are generally rendered
much faster because they contain many fewer points.
SEE ALSO
========
* nt
* pt
* mpl
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import csv
import getopt
import os
import datetime
import copy
import dateutil.parser
from six.moves import zip
PROG = os.path.basename(sys.argv[0])
def main(args):
opts, args = getopt.getopt(args, "hk:s:nbH")
keys = []
numeric = False
sep = ','
blank = False
skip_header = False
for opt, arg in opts:
if opt == "-h":
usage()
raise SystemExit
elif opt == "-s":
sep = arg
elif opt == "-n":
numeric = True
elif opt == "-k":
try:
keys.append(int(arg))
except ValueError:
# Non-int implies use of dictionaries later
keys.append(arg)
elif opt == "-b":
blank = True
elif opt == "-H":
skip_header = True
if not keys:
keys.append(1)
if str in set(type(k) for k in keys):
# At least one key is a string - use DictReader/DictWriter
rdr = csv.DictReader(sys.stdin, delimiter=sep)
wtr = csv.DictWriter(sys.stdout, fieldnames=rdr.fieldnames,
delimiter=sep)
wtr.writerow(dict(list(zip(wtr.fieldnames, wtr.fieldnames))))
else:
rdr = csv.reader(sys.stdin, delimiter=sep)
wtr = csv.writer(sys.stdout, delimiter=sep)
if skip_header:
wtr.writerow(next(rdr))
for row in square(remove_dups(rdr, keys, blank), 0, keys[-1], numeric):
wtr.writerow(row)
def remove_dups(iterator, keys, blank):
last = []
row = None
for row in iterator:
value = [row[k] for k in keys]
if value != last:
yield row
elif blank:
for k in keys:
row[k] = ""
yield row
last = value
if row is not None:
yield row
def square(iterator, t, y, numeric):
r1 = next(iterator)
yield r1
for r2 in iterator:
row = copy.copy(r2)
row[y] = r1[y]
yield row
yield r2
r1 = r2
def usage():
print(__doc__ % globals(), file=sys.stderr)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 2.953125
| 3
|
src/main/seed.py
|
Couapy/shelf
| 0
|
12779085
|
<reponame>Couapy/shelf
from django.contrib.auth.models import User
from django_seed import Seed
from .models import Book, Chapter, Shelf
seeder = Seed.seeder()
seeder.add_entity(User, 3)
seeder.add_entity(Shelf, 5)
seeder.add_entity(Book, 25)
seeder.add_entity(Chapter, 150)
inserted_pks = seeder.execute()
for shelf in Shelf.objects.all():
if len(Shelf.objects.filter(slug=shelf.slug)) != 1:
shelf.slug += str(shelf.pk)
shelf.save()
for book in Book.objects.all():
if len(Book.objects.filter(slug=book.slug)) != book:
book.slug += str(book.pk)
book.save()
for chapter in Chapter.objects.all():
if len(Chapter.objects.filter(slug=chapter.slug)) != 1:
chapter.slug += str(chapter.pk)
chapter.save()
| 2.125
| 2
|
python/_collections/py_collections_deque/main.py
|
bionikspoon/hackerrank-challenges
| 0
|
12779086
|
<reponame>bionikspoon/hackerrank-challenges<gh_stars>0
from collections import deque
from fileinput import input
def parse_input(data):
_ = int(data.pop(0))
return [op.split(' ', 1) for op in data]
def create_stack(ops):
stack = deque()
def_cmd = {
'append': lambda d, *args: d.append(*args),
'pop': lambda d, *args: d.pop(*args),
'popleft': lambda d, *args: d.popleft(*args),
'appendleft': lambda d, *args: d.appendleft(*args),
}
for op in ops:
cmd = def_cmd[op[0]]
cmd_args = op[1:]
cmd(stack, *cmd_args)
return stack
def main():
ops = parse_input([line.rstrip() for line in input()])
stack = create_stack(ops)
print(' '.join(stack))
if __name__ == '__main__':
main()
| 3.515625
| 4
|
pontoon/translate/tests/test_views.py
|
nanopony/pontoon
| 1
|
12779087
|
<reponame>nanopony/pontoon
import pytest
from django.urls import reverse
from waffle.testutils import override_switch
@pytest.mark.django_db
def test_translate_behind_switch(client):
url = reverse('pontoon.translate.next')
response = client.get(url)
assert response.status_code == 404
with override_switch('translate_next', active=True):
response = client.get(url)
assert response.status_code == 200
@pytest.mark.django_db
def test_translate_template(client):
url = reverse('pontoon.translate.next')
with override_switch('translate_next', active=True):
response = client.get(url)
assert response.status_code == 200
assert 'Translate.Next' in response.content
| 2.0625
| 2
|
tests/test_stock_availability.py
|
mrkevinomar/saleor
| 4
|
12779088
|
<reponame>mrkevinomar/saleor
import pytest
from django.test import override_settings
from saleor.core.exceptions import InsufficientStock
from saleor.warehouse.availability import (
are_all_product_variants_in_stock,
check_stock_quantity,
get_available_quantity,
get_available_quantity_for_customer,
get_quantity_allocated,
)
from saleor.warehouse.models import Allocation
COUNTRY_CODE = "US"
def test_check_stock_quantity(variant_with_many_stocks):
assert check_stock_quantity(variant_with_many_stocks, COUNTRY_CODE, 7) is None
def test_check_stock_quantity_out_of_stock(variant_with_many_stocks):
with pytest.raises(InsufficientStock):
check_stock_quantity(variant_with_many_stocks, COUNTRY_CODE, 8)
def test_check_stock_quantity_with_allocations(
variant_with_many_stocks, order_line_with_allocation_in_many_stocks
):
assert check_stock_quantity(variant_with_many_stocks, COUNTRY_CODE, 4) is None
def test_check_stock_quantity_with_allocations_out_of_stock(
variant_with_many_stocks, order_line_with_allocation_in_many_stocks
):
with pytest.raises(InsufficientStock):
check_stock_quantity(variant_with_many_stocks, COUNTRY_CODE, 5)
def test_check_stock_quantity_without_stocks(variant_with_many_stocks):
variant_with_many_stocks.stocks.all().delete()
with pytest.raises(InsufficientStock):
check_stock_quantity(variant_with_many_stocks, COUNTRY_CODE, 1)
def test_check_stock_quantity_without_one_stock(variant_with_many_stocks):
variant_with_many_stocks.stocks.get(quantity=3).delete()
assert check_stock_quantity(variant_with_many_stocks, COUNTRY_CODE, 4) is None
def test_get_available_quantity_without_allocation(order_line, stock):
assert not Allocation.objects.filter(order_line=order_line, stock=stock).exists()
available_quantity = get_available_quantity(order_line.variant, COUNTRY_CODE)
assert available_quantity == stock.quantity
def test_get_available_quantity(variant_with_many_stocks):
available_quantity = get_available_quantity(variant_with_many_stocks, COUNTRY_CODE)
assert available_quantity == 7
def test_get_available_quantity_with_allocations(
variant_with_many_stocks, order_line_with_allocation_in_many_stocks
):
available_quantity = get_available_quantity(variant_with_many_stocks, COUNTRY_CODE)
assert available_quantity == 4
def test_get_available_quantity_without_stocks(variant_with_many_stocks):
variant_with_many_stocks.stocks.all().delete()
available_quantity = get_available_quantity(variant_with_many_stocks, COUNTRY_CODE)
assert available_quantity == 0
@override_settings(MAX_CHECKOUT_LINE_QUANTITY=15)
def test_get_available_quantity_for_customer(variant_with_many_stocks, settings):
stock = variant_with_many_stocks.stocks.first()
stock.quantity = 16
stock.save(update_fields=["quantity"])
available_quantity = get_available_quantity_for_customer(
variant_with_many_stocks, COUNTRY_CODE
)
assert available_quantity == settings.MAX_CHECKOUT_LINE_QUANTITY
def test_get_available_quantity_for_customer_without_stocks(variant_with_many_stocks):
variant_with_many_stocks.stocks.all().delete()
available_quantity = get_available_quantity_for_customer(
variant_with_many_stocks, COUNTRY_CODE
)
assert available_quantity == 0
def test_get_quantity_allocated(
variant_with_many_stocks, order_line_with_allocation_in_many_stocks
):
quantity_allocated = get_quantity_allocated(variant_with_many_stocks, COUNTRY_CODE)
assert quantity_allocated == 3
def test_get_quantity_allocated_without_allocation(variant_with_many_stocks):
quantity_allocated = get_quantity_allocated(variant_with_many_stocks, COUNTRY_CODE)
assert quantity_allocated == 0
def test_get_quantity_allocated_without_stock(variant_with_many_stocks):
variant_with_many_stocks.stocks.all().delete()
quantity_allocated = get_quantity_allocated(variant_with_many_stocks, COUNTRY_CODE)
assert quantity_allocated == 0
def test_are_all_product_variants_in_stock_all_in_stock(stock):
assert are_all_product_variants_in_stock(
stock.product_variant.product, COUNTRY_CODE
)
def test_are_all_product_variants_in_stock_stock_empty(allocation, variant):
allocation.quantity_allocated = allocation.stock.quantity
allocation.save(update_fields=["quantity_allocated"])
assert not are_all_product_variants_in_stock(variant.product, COUNTRY_CODE)
def test_are_all_product_variants_in_stock_lack_of_stocks(variant):
assert not are_all_product_variants_in_stock(variant.product, COUNTRY_CODE)
def test_are_all_product_variants_in_stock_warehouse_without_stock(
variant_with_many_stocks,
):
variant_with_many_stocks.stocks.first().delete()
assert are_all_product_variants_in_stock(
variant_with_many_stocks.product, COUNTRY_CODE
)
| 2.296875
| 2
|
prototype/ukwa/lib/utils.py
|
GilHoggarth/ukwa-manage
| 1
|
12779089
|
<gh_stars>1-10
'''
Created on 10 Feb 2016
@author: andy
'''
import os
from urlparse import urlparse
def url_to_surt(in_url, host_only=False):
'''
Converts a URL to SURT form.
'''
parsed = urlparse(in_url)
authority = parsed.netloc.split(".")
authority.reverse()
surt = "http://(%s," % ",".join(authority)
if parsed.path and not host_only:
surt = "%s%s" %( surt , os.path.dirname(parsed.path) )
return surt
| 3.21875
| 3
|
src/promnesia/__init__.py
|
halhenke/promnesia
| 1,327
|
12779090
|
from pathlib import Path
from .common import PathIsh, Visit, Source, last, Loc, Results, DbVisit, Context, Res
# add deprecation warning so eventually this may converted to a namespace package?
import warnings
warnings.warn("DEPRECATED! Please import directly from 'promnesia.common', e.g. 'from promnesia.common import Visit, Source, Results'", DeprecationWarning)
| 1.3125
| 1
|
yourenv/pythonClub/pythonClubProject/views.py
|
oconnalla/pythonClub
| 0
|
12779091
|
<reponame>oconnalla/pythonClub<filename>yourenv/pythonClub/pythonClubProject/views.py
from django.shortcuts import render, get_object_or_404
from .models import Meeting, Meeting_Minutes, Resource, Event
from .forms import MeetingForm, MeetingMinutesForm
from django.contrib.auth.decorators import login_required
#may need to remove meeting details from imports
# Create your views here.
def index (request):
return render(request, 'pythonClubProject/index.html')
def getMeetings(request):
meeting_list=Meeting.objects.all()
return render(request, 'pythonClubProject/meetings.html' ,{'meeting_list' : meeting_list})
def getMeetingMinutes(request):
meeting_list=Meeting.objects.all()
return render(request, 'pythonClubProject/meetingMinutes.html' ,{'meeting_list' : meeting_list})
def getResources(request):
meeting_list=Meeting.objects.all()
return render(request, 'pythonClubProject/resources.html' ,{'meeting_list' : meeting_list})
def getEvents(request):
meeting_list=Meeting.objects.all()
return render(request, 'pythonClubProject/events.html',{'meeting_list' : meeting_list})
def meetingDetails(request, id):
meet=get_object_or_404(Meeting_Minutes, pk=id)
return render(request, 'pythonClubProject/meetingDetails.html', {'meet': meet})
@login_required
def newMeeting(request):
form=MeetingForm
if request.method=='POST':
form=MeetingForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save()
form=MeetingForm()
else:
form=MeetingForm()
return render(request, 'pythonClubProject/newMeeting.html', {'form': form})
@login_required
def newMeetingMinutes(request):
form=MeetingMinutesForm
if request.method=='POST':
form=MeetingMinutesForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save()
form=MeetingForm()
else:
form=MeetingMinutesForm()
return render(request, 'pythonClubProject/newMeetingMinutes.html', {'form': form})
def loginmessage(request):
return render(request, 'pythonClubProject/loginmessage.html')
def logoutmessage(request):
return render(request, 'pythonClubProject/logoutmessage.html')
| 2.140625
| 2
|
internalScripts/analysis-scripts/SummarizeGapfillResultsTables.py
|
kbase/probabilistic_annotation
| 0
|
12779092
|
<reponame>kbase/probabilistic_annotation<gh_stars>0
#!/usr/bin/python
# Generate summary statistics comparing two gapfill results tables from AnalyzeGapfillResults.py (one for probanno
# and one for non-probanno)
import optparse
import sys
usage = "%prog [Probanno_result_table] [Non_probanno_result_table]"
description = """Generate summary statistics like number of uniquely-added reactions, etc."""
parser = optparse.OptionParser(usage=usage, description=description)
parser.add_option("-o", "--added_only", help="Set this flag to ONLY include added reactions (not reversibility changes) in counts of genes (though not reactions) and in average probability calculations.",
action="store_true", dest="addedonly", default=False)
parser.add_option("-f", "--final_only", help="Set this flag to only include reactions that are in the final models (e.g. those that were integrated into the model and not deleted by reaction sensitivity analysis) in the counts and average calcualtions. By default we include all reactions in a gapfill solution regardless of downstream analysis", action="store_true", dest="finalonly", default=False)
parser.add_option("-d", "--deleted_only", help="The opposite of -f, set this flag to ONLY print information about gapfilled reactions that have been deleted from the model.", action="store_true", dest="deletedonly", default=False)
parser.add_option("-g", "--print_genes", help="Print detailed gene information instead of summaries.", action="store_true", dest="printgenes", default=False)
parser.add_option("-r", "--print_rxns", help="Print detailed reactions information instead of summaries.", action="store_true", dest="printrxns", default=False)
(options, args) = parser.parse_args()
if len(args) < 2:
print usage
exit(1)
def parse_result_table(filename, addedReactionsOnly=False, finalReactionsOnly=False, deletedOnly = False):
''' Parse a results table into a useful data structure keyed by solution number then other information
'''
if finalReactionsOnly and deletedOnly:
raise IOError("Setting both final reactions only and deleted reactions only is a contradiction. Please choose one or the other.")
fid = open(filename)
results = {}
for line in fid:
# Skip header row
if "rxn_likelihood" in line:
continue
spl = line.strip("\r\n").split("\t")
solnum = spl[0]
rxnid = spl[1]
objval = spl[2]
gapfill_uuid = spl[3] #not used
rxn_likelihood = spl[4]
is_revchange = spl[5]
gpr = spl[6]
newgenes = spl[7].split(";")
numnew = spl[8] #not used (only the aggregate number after taking unique is useful)
rxn_in_final_model = spl[9]
if is_revchange == "1" and addedReactionsOnly:
continue
if finalReactionsOnly and rxn_in_final_model == "False":
continue
if deletedOnly and rxn_in_final_model == "True":
continue
if rxn_likelihood == "NO_PROBABILITY":
rxn_likelihood = "0"
rxninfo = { "likelihood" : rxn_likelihood,
"gpr" : gpr,
"newgenes" : newgenes,
"revchange" : is_revchange }
if solnum in results:
results[solnum]["rxninfo"][rxnid] = rxninfo
else:
results[solnum] = {}
results[solnum]["objective"] = objval
results[solnum]["rxninfo"] = {}
results[solnum]["rxninfo"][rxnid] = rxninfo
return results
def getProbabilities(results, solnum, reaction_list, addedReactionsOnly=False):
''' Get the probabilities from a set of gapfilled reactions
'''
probabilities = []
for reaction in reaction_list:
if addedReactionsOnly and results[solnum]["rxninfo"][reaction]["revchange"] == "1":
continue
probabilities.append(float(results[solnum]["rxninfo"][reaction]["likelihood"]))
return probabilities
def getUniqueGenes(results, solnum, reaction_list, addedReactionsOnly = False):
''' Get the genes uniquely added from gapfill (e.g. those not in the model previously).
'''
unique_genes = set()
for reaction in reaction_list:
if addedReactionsOnly and results[solnum]["rxninfo"][reaction]["revchange"] == "1":
continue
for gene in results[solnum]["rxninfo"][reaction]["newgenes"]:
if gene == '':
continue
unique_genes.add(gene)
return unique_genes
def getReactions(results, solnum, gene):
''' Get the reaction(s) associated with a gene
'''
reactions = set()
for reaction in results[solnum]["rxninfo"].keys():
if gene in results[solnum]["rxninfo"][reaction]["newgenes"]:
reactions.add(reaction)
return reactions
def safeAverage(numarray):
try:
avg = sum(numarray)/len(numarray)
return avg
except ZeroDivisionError:
return None
probanno_results = parse_result_table(args[0], addedReactionsOnly = options.addedonly, finalReactionsOnly = options.finalonly, deletedOnly = options.deletedonly )
non_probanno_results = parse_result_table(args[1], addedReactionsOnly = options.addedonly, finalReactionsOnly = options.finalonly, deletedOnly = options.deletedonly )
if options.printrxns:
print "\t".join( [ "reaction", "likelihood", "whenfound", "solnum" ] )
elif options.printgenes:
print "\t".join( [ "whenfound", "gene", "reactin" ] )
else:
print "\t".join( [ "probanno_filename", "non_probanno_filename", "solution_number_compared",
"number_common", "number_probanno_only", "number_nonprobanno_only",
"average_common", "average_probanno_only", "average_nonprobanno_only",
"unique_genes_common", "unique_genes_probanno_only", "unique_genes_nonprobanno_only"]
)
for sol in probanno_results.keys():
if sol not in non_probanno_results:
continue
# Get reactions in common and unique to each solution
probanno_reactions = set(probanno_results[sol]["rxninfo"].keys())
non_probanno_reactions = set(non_probanno_results[sol]["rxninfo"].keys())
all_reactions = probanno_reactions | non_probanno_reactions
common_reactions = probanno_reactions & non_probanno_reactions
unique_to_probanno = probanno_reactions - non_probanno_reactions
unique_to_non_probanno = non_probanno_reactions - probanno_reactions
if options.printrxns:
for reaction in common_reactions:
if options.addedonly and probanno_results[sol]["rxninfo"][reaction]["revchange"] == "1":
continue
print "%s\t%s\t%s\t%s" %(reaction, str(probanno_results[sol]["rxninfo"][reaction]["likelihood"]), "COMMON", sol)
for reaction in unique_to_probanno:
if options.addedonly and probanno_results[sol]["rxninfo"][reaction]["revchange"] == "1":
continue
print "%s\t%s\t%s\t%s" %(reaction, str(probanno_results[sol]["rxninfo"][reaction]["likelihood"]), "PROBANNO_ONLY", sol)
for reaction in unique_to_non_probanno:
if options.addedonly and non_probanno_results[sol]["rxninfo"][reaction]["revchange"] == "1":
continue
print "%s\t%s\t%s\t%s" %(reaction, str(non_probanno_results[sol]["rxninfo"][reaction]["likelihood"]), "NON_PROBANNO_ONLY", sol)
continue
# Get unique genes for interesting sets
common_newgenes = getUniqueGenes(probanno_results, sol, common_reactions, addedReactionsOnly = options.addedonly)
unique_to_probanno_newgenes = getUniqueGenes(probanno_results, sol, unique_to_probanno, addedReactionsOnly = options.addedonly) - common_newgenes
unique_to_non_probanno_newgenes = getUniqueGenes(non_probanno_results, sol, unique_to_non_probanno, addedReactionsOnly = options.addedonly) - common_newgenes
if options.printgenes:
for gene in unique_to_probanno_newgenes:
rxns = getReactions(probanno_results, sol, gene)
for rxn in rxns:
print "%s\t%s\t%s" %("PROBANNO_ONLY", gene, rxn)
for gene in unique_to_non_probanno_newgenes:
rxns = getReactions(non_probanno_results, sol, gene)
for rxn in rxns:
print "%s\t%s\t%s" %("NON_PROBANNO_ONLY", gene, rxn)
for gene in common_newgenes:
rxns = getReactions(non_probanno_results, sol, gene)
for rxn in rxns:
print "%s\t%s\t%s" %("COMMON", gene, rxn)
continue
n_common_newgenes = len(common_newgenes)
n_unique_to_probanno_newgenes = len(unique_to_probanno_newgenes)
n_unique_to_non_probanno_newgenes = len(unique_to_non_probanno_newgenes)
# Get probability distributions for interesting sets
common_probabilities = getProbabilities(probanno_results, sol, common_reactions, addedReactionsOnly = options.addedonly)
unique_to_probanno_probabilities = getProbabilities(probanno_results, sol, unique_to_probanno, addedReactionsOnly = options.addedonly)
unique_to_non_probanno_probabilities = getProbabilities(non_probanno_results, sol, unique_to_non_probanno, addedReactionsOnly = options.addedonly)
# Get average probabilities for these sets
common_avg = safeAverage(common_probabilities)
unq_to_probanno_avg = safeAverage(unique_to_probanno_probabilities)
unq_to_non_probanno_avg = safeAverage(unique_to_non_probanno_probabilities)
n_common = len(common_reactions)
n_unq_to_probanno = len(unique_to_probanno)
n_unq_to_non_probanno = len(unique_to_non_probanno)
# Generate historgram data
# TODO
print "\t".join( [ args[0], args[1], sol,
str(n_common), str(n_unq_to_probanno), str(n_unq_to_non_probanno),
str(common_avg), str(unq_to_probanno_avg), str(unq_to_non_probanno_avg),
str(n_common_newgenes), str(n_unique_to_probanno_newgenes), str(n_unique_to_non_probanno_newgenes)
] )
| 2.265625
| 2
|
podcasts/utils/serializers.py
|
janwh/selfhosted-podcast-archive
| 26
|
12779093
|
import datetime
from django.core.serializers.json import DjangoJSONEncoder
class PodcastsJSONEncoder(DjangoJSONEncoder):
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.timedelta):
return round(o.total_seconds() * 1000)
else:
return super().default(o)
| 2.296875
| 2
|
31/00/list.remove.1.py
|
pylangstudy/201705
| 0
|
12779094
|
l = [1,2,1,3]
l.remove(4)
print(l)
| 2.859375
| 3
|
src/scripts/distribution_samples.py
|
secimTools/GalaxyTools
| 10
|
12779095
|
<gh_stars>1-10
#!/usr/bin/env python
################################################################################
# Date: 2016/July/06 ed. 1016/July/11
#
# Module: distribution_samples.py
#
# VERSION: 1.1
#
# AUTHOR: <NAME> (<EMAIL>)
# Edited by <NAME> (<EMAIL>)
#
# DESCRIPTION: This program creates a distribution plot by samples for a given
# datset
#
################################################################################
# Import built-in libraries
import os
import logging
import argparse
from argparse import RawDescriptionHelpFormatter
# Import add-on libraries
import matplotlib
matplotlib.use('Agg')
import numpy as np
import pandas as pd
from matplotlib.backends.backend_pdf import PdfPages
# Import local data libraries
from secimtools.dataManager import logger as sl
from secimtools.dataManager.interface import wideToDesign
# Import local plotting libraries
from secimtools.visualManager import module_box as box
from secimtools.visualManager import module_distribution as density
from secimtools.visualManager.manager_color import colorHandler
from secimtools.visualManager.manager_figure import figureHandler
def getOptions():
""" Function to pull in arguments """
description = """ Distribution Summaries: The tool plots the distribution of samples. """
parser = argparse.ArgumentParser(description=description,
formatter_class=RawDescriptionHelpFormatter)
standard = parser.add_argument_group(title="Standar input",
description="Standar input for SECIM tools.")
# Standard Input
standard.add_argument("-i","--input", dest="input", action='store',
required=True, help="Input dataset in wide format.")
standard.add_argument("-d","--design", dest="design", action='store',
required=True, help="Design file.")
standard.add_argument("-id","--ID", dest="uniqID", action='store',
required=True, help="Name of the column with uniqueID.")
standard.add_argument("-g","--group", dest="group", action='store',
required=False, default=False, help="Name of column in "\
"design file with Group/treatment information.")
standard.add_argument("-o",'--order', dest='order', action='store',
required=False, default=False, help="Name of the column "\
"with the runOrder")
standard.add_argument("-l","--levels",dest="levels",action="store",
required=False, default=False, help="Different groups to"\
"sort by separeted by commas.")
# Tool Output
output = parser.add_argument_group(title="Output paths", description="""Paths
and outputs""")
output.add_argument("-f","--figure",dest="figure",action="store",required=True,
help="Path for the distribution figure")
# Plot options
plot = parser.add_argument_group(title='Plot options')
plot.add_argument("-pal","--palette",dest="palette",action='store',required=False,
default="tableau", help="Name of the palette to use.")
plot.add_argument("-col","--color",dest="color",action="store",required=False,
default="Tableau_20", help="Name of a valid color scheme"\
" on the selected palette")
args = parser.parse_args()
# Standardize paths
args.input = os.path.abspath(args.input)
args.design = os.path.abspath(args.design)
args.figure = os.path.abspath(args.figure)
# if args.levels then split otherwise args.level = emptylist
if args.levels:
args.levels = args.levels.split(",")
return(args)
def plotDensityDistribution(pdf,wide,palette):
# Instanciating figureHandler object
figure = figureHandler(proj="2d", figsize=(12,7))
# Formating axis
figure.formatAxis(figTitle="Distribution by Samples Density",xlim="ignore",
ylim="ignore",grid=False)
# Plotting density plot
density.plotDensityDF(colors=palette.design["colors"],ax=figure.ax[0],data=wide)
# Add legend to the plot
figure.makeLegend(ax=figure.ax[0], ucGroups=palette.ugColors, group=palette.combName)
# Shrinking figure
figure.shrink()
# Adding to PDF
figure.addToPdf(pdf, dpi=600)
def plotBoxplotDistribution(pdf,wide,palette):
# Instanciating figureHandler object
figure = figureHandler(proj="2d",figsize=(max(len(wide.columns)/4,12),7))
# Formating axis
figure.formatAxis(figTitle="Distribution by Samples Boxplot",ylim="ignore",
grid=False,xlim="ignore")
# Plotting boxplot
box.boxDF(ax=figure.ax[0],colors=palette.design["colors"],dat=wide)
# Shrinking figure
figure.shrink()
#Adding to PDF
figure.addToPdf(pdf, dpi=600)
def main(args):
"""
Function to call all other functions
"""
# Checking if levels
if args.levels and args.group:
levels = [args.group]+args.levels
elif args.group and not args.levels:
levels = [args.group]
else:
levels = []
logger.info("Groups used to color by: {0}".format(",".join(levels)))
# Parsing files with interface
logger.info("Loading data with the Interface")
dat = wideToDesign(args.input, args.design, args.uniqID, args.group,
anno=args.levels, runOrder=args.order, logger=logger)
# Cleaning from missing data
dat.dropMissing()
# Sort data by runOrder if provided
if args.order:
logger.info("Sorting by runOrder")
design_final = dat.design.sort_values(by=args.order, axis=0)
wide_final = dat.wide.reindex(columns= design_final.index )
else:
design_final = dat.design
wide_final = dat.wide
# Get colors for each sample based on the group
palette.getColors(design=design_final,groups=levels)
# Open PDF pages to output figures
with PdfPages(args.figure) as pdf:
# Plot density plot
logger.info("Plotting density for sample distribution")
plotDensityDistribution(pdf=pdf, wide=wide_final, palette=palette)
# Plot boxplots
logger.info("Plotting boxplot for sample distribution")
plotBoxplotDistribution(pdf=pdf, wide=wide_final, palette=palette)
logger.info("Script complete!")
if __name__ == '__main__':
# import Arguments
args = getOptions()
# Setting logging
logger = logging.getLogger()
sl.setLogger(logger)
# Print logger info
logger.info("""Importing data with following parameters:
\tWide: {0}
\tDesign: {1}
\tUnique ID: {2}
\tGroup: {3}
\tRun Order: {4}
\tLevels: {5}
""" .format(args.input, args.design, args.uniqID, args.group,
args.order,args.levels))
# Set color palette
palette = colorHandler(pal=args.palette, col=args.color)
logger.info("Using {0} color scheme from {1} palette".format(args.color,
args.palette))
# main
main(args)
| 2.328125
| 2
|
src/view/ClassGraphView.py
|
dangbinghoo/SourceScope
| 0
|
12779096
|
<reponame>dangbinghoo/SourceScope<filename>src/view/ClassGraphView.py
#!/usr/bin/python2
# Copyright (c) 2010 <NAME>
# All rights reserved.
#
# License: BSD
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4.QtSvg import *
import os
import sys
if __name__ == '__main__':
import sys
import os
app_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(app_dir)
sys.path.insert(0, os.path.abspath('..'))
os.chdir(os.path.abspath('..'))
from backend.plugins.PluginBase import PluginProcess
class ClassGraphProcess(PluginProcess):
def __init__(self, wdir, rq):
PluginProcess.__init__(self, wdir, rq)
self.name = 'call graph process'
def parse_result(self, text, sig):
return [text]
class ClassGraphWidget(QWidget):
def __init__(self, parent, cmd_func, cmd_id, cmd_opt):
QWidget.__init__(self, parent)
self.is_busy = False
self.is_done = False
self.cmd_func = cmd_func
self.cmd_id = cmd_id
self.cmd_opt = cmd_opt
self.is_debug = os.getenv('SEASCOPE_CLASS_GRAPH_VIEW_DEBUG')
self.vlay1 = QVBoxLayout()
self.setLayout(self.vlay1)
#self.hlay1 = QHBoxLayout()
#self.vlay1.addLayout(self.hlay1)
#self.add_buttons(self.hlay1)
self.lbl = QLabel()
self.vlay1.addWidget(self.lbl)
self.vlay2 = QVBoxLayout()
self.scrolla = QScrollArea()
self.scrolla.setLayout(self.vlay2)
self.vlay1.addWidget(self.scrolla)
def startQuery(self, req, dname, proj_type, inx):
if self.is_done:
return
name = req if req else dname
self.lbl.setText(['derived', 'base'][inx] + '(' + name + ')')
tool_path = os.path.join('tools', 'ClassGraph.py')
pargs = [sys.executable, tool_path]
if inx == 1:
pargs += ['-b']
pargs += ['-d', dname]
if proj_type:
pargs += ['-t', proj_type]
if req:
pargs += [req]
sig_res = ClassGraphProcess('.', None).run_query_process(pargs, req)
sig_res[0].connect(self.clgraph_add_result)
self.is_busy = True
self.show_progress_bar()
def set_current(self, btn):
inx = self.bgrp.id(btn)
#self.btn[inx].setChecked(True)
#print 'inx clicked', inx
if inx == 0:
print self.svgw.renderer().defaultSize()
self.svgw.setMinimumSize(0, 0)
self.svgw.setMinimumSize(self.svgw.sizeHint())
#self.svgw.setMaximumSize(self.svgw.sizeHint())
print self.scrolla.sizeHint()
if inx == 1:
print self.svgw.renderer().defaultSize()
self.svgw.setMinimumSize(0, 0)
#self.svgw.setMaximumSize(self.svgw.sizeHint())
print self.scrolla.sizeHint()
if inx == 2:
print self.svgw.renderer().defaultSize()
self.svgw.setMinimumSize(0, 0)
self.svgw.resize(self.scrolla.size())
#self.svgw.setMaximumSize(self.svgw.sizeHint())
print self.scrolla.sizeHint()
def add_buttons(self, hlay):
self.bgrp = QButtonGroup()
self.bgrp.buttonClicked.connect(self.set_current)
self.bgrp.setExclusive(True)
for inx in range(3):
btn = QToolButton()
btn.setText(str(inx))
btn.setToolTip(str(inx))
#btn.setFlat(True)
btn.setCheckable(True)
self.bgrp.addButton(btn, inx)
hlay.addWidget(btn)
def clgraph_add_result(self, req, res):
if self.is_debug:
print res
self.is_busy = False
self.is_done = True
self.remove_progress_bar()
self.svgw = QSvgWidget()
self.scrolla.setWidget(self.svgw)
self.svgw.load(QByteArray(res[0]))
#print self.svgw.renderer().defaultSize()
sz = self.svgw.sizeHint()
scale = 1
if sz.width() > 1024:
scale = 0.8
self.svgw.setMinimumSize(sz.width() * scale, sz.height() * scale)
#self.svgw.setMaximumSize(self.svgw.sizeHint())
#print self.scrolla.sizeHint()
def show_progress_bar(self):
self.pbar = QProgressBar(self.scrolla)
self.pbar.setMinimum(0)
self.pbar.setMaximum(0)
self.pbar.show()
def remove_progress_bar(self):
if self.pbar:
self.pbar.hide()
self.pbar.setParent(None)
self.pbar = None
class ClassGraphWindow(QMainWindow):
parent = None
def __init__(self, req, dname, proj_type, cmd_func, cmd_args, cmd_opt):
QMainWindow.__init__(self, ClassGraphWindow.parent)
self.req = req
self.dname = dname
self.proj_type = proj_type
if req:
self.setWindowTitle(req)
else:
self.setWindowTitle(dname)
self.setFont(QFont("San Serif", 8))
w = QWidget()
self.setCentralWidget(w)
self.vlay = QVBoxLayout()
w.setLayout(self.vlay)
self.sw = QStackedWidget()
self.hlay = QHBoxLayout()
self.vlay.addLayout(self.hlay)
self.vlay.addWidget(self.sw)
self.bgrp = QButtonGroup()
self.bgrp.buttonClicked.connect(self.set_current)
self.bgrp.setExclusive(True)
self.btn = []
self.ctree = []
for inx in range(len(cmd_args)):
# cmd format: [ cmd_id, cmd_str, cmd_tip ]
cmd = cmd_args[inx]
btn = QToolButton()
btn.setText(cmd[1])
btn.setToolTip(cmd[2])
#btn.setFlat(True)
btn.setCheckable(True)
self.bgrp.addButton(btn, inx)
self.hlay.addWidget(btn)
ct = ClassGraphWidget(self, cmd_func, cmd[0], cmd_opt)
self.sw.addWidget(ct)
self.btn.append(btn)
self.ctree.append(ct)
self.hlay.addStretch(0)
self.set_current(self.btn[0])
def set_current(self, btn):
inx = self.bgrp.id(btn)
self.btn[inx].setChecked(True)
self.sw.setCurrentIndex(inx)
ct = self.ctree[inx]
ct.setFocus()
ct.startQuery(self.req, self.dname, self.proj_type, inx)
def create_page(req, dname, proj_type, cmd_func, cmd_args, cmd_opt):
w = ClassGraphWindow(req, dname, proj_type, cmd_func, cmd_args, cmd_opt)
w.resize(900, 600)
w.show()
return w
if __name__ == '__main__':
import optparse
usage = "usage: %prog (-d <code_dir/file> | -t <prj_type>) [symbol]"
op = optparse.OptionParser(usage=usage)
op.add_option("-d", "--codedir", dest="code_dir", help="Code dir", metavar="CODE_DIR")
op.add_option("-t", "--type", dest="prj_type", help="project type: idutils|gtags|cscope", metavar="PRJ_TYPE")
(options, args) = op.parse_args()
# dname
if not options.code_dir:
print >> sys.stderr, 'Specify -d'
sys.exit(-1)
dname = options.code_dir
if not os.path.exists(dname):
print >> sys.stderr, '"%s": does not exist' % dname
sys.exit(-2)
wdir = dname
if not os.path.isdir(wdir):
wdir = os.path.dirname(wdir)
# sym
sym = None
if len(args):
if len(args) != 1:
print >> sys.stderr, 'Please specify only one symbol'
sys.exit(-3)
sym = args[0]
# ptype
ptype = options.prj_type
if ptype:
if not sym:
print >> sys.stderr, '-t option needs sepficfying symbol'
sys.exit(-4)
if not os.path.isdir(dname):
print >> sys.stderr, '-t option needs codedir to be a directory'
sys.exit(-5)
pcmd = None
if ptype and os.path.isdir(dname):
prj_list = [
['idutils', 'ID' ],
['gtags', 'GRTAGS' ],
['cscope', 'cscope.out' ],
['grep', '' ],
]
for p in prj_list:
if p[0] == ptype:
if not os.path.exists(os.path.join(dname, p[1])):
print >> sys.stderr, 'Failed to find "%s" in directory "%s"' % (p[1], dname)
sys.exit(-6)
app = QApplication(sys.argv)
cmd_args = [
['CLGRAPH', 'D', 'Derived classes'],
['CLGRAPH', 'B', 'Base classes']
]
w = create_page(sym, dname, ptype, None, cmd_args, None)
sys.exit(app.exec_())
| 2
| 2
|
p1379_find_corresponding_node_in_binary_tree_clone.py
|
hugoleeney/leetcode_problems
| 0
|
12779097
|
"""
1379. Find a Corresponding Node of a Binary Tree in a Clone of That Tree
Difficulty: medium
Given two binary trees original and cloned and given a reference to a node target in the original tree.
The cloned tree is a copy of the original tree.
Return a reference to the same node in the cloned tree.
Note that you are not allowed to change any of the two trees or the target node and the answer must be a reference to a node in the cloned tree.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def getTargetCopy(self, original: TreeNode, cloned: TreeNode, target: TreeNode) -> TreeNode:
if cloned is None:
return None
if cloned.val == target.val:
return cloned
left = self.getTargetCopy(original.left, cloned.left, target)
if left is None:
return self.getTargetCopy(original.right, cloned.right, target)
return left
| 3.671875
| 4
|
reader.py
|
build18-fpga-on-the-web/server
| 2
|
12779098
|
import socket
import time
host = 'localhost'
port = 2540
size = 1024
def Open(host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(( host,port))
return s
def SendData(conn,IR,data,length):
value = bin(data).split('0b')[1].zfill(length) #Convert from int to binary string
conn.send("send " + str(IR) + " " + str(value) + " " + str(length) + '\n') #Newline is required to flush the buffer on the Tcl server
def ReadData(conn,IR,length):
conn.send("read " + bin(IR) + " " + bin(length) + '\n')
print "test\n"
print conn.recv(4096) + '\n'
conn = Open(host, port)
print "Connected"
while True:
ReadData(conn,1,18)
time.sleep(1)
conn.close()
| 2.640625
| 3
|
collect_tweets.py
|
yassineAlouini/intro_to_airflow
| 0
|
12779099
|
# Inspired from this: https://www.data-blogger.com/2017/02/24/gathering-tweets-with-python/
import tweepy
import json
# Specify the account credentials in the following variables:
# TODO: Get them from an env varibale or secret file
consumer_key = 'INSERT CONSUMER KEY HERE'
consumer_secret = 'INSERT CONSUMER SECRET HERE'
access_token = 'INSERT ACCESS TOKEN HERE'
access_token_secret = 'INSERT ACCESS TOKEN SECRET HERE'
# This listener will print out all Tweets it receives
# TODO: Adapt this to write to a CSV or something else.
class PrintListener(tweepy.StreamListener):
def on_data(self, data):
# Decode the JSON data
tweet = json.loads(data)
# Print out the Tweet
print('@%s: %s' % (tweet['user']['screen_name'], tweet['text'].encode('ascii', 'ignore')))
def on_error(self, status):
print(status)
if __name__ == '__main__':
listener = PrintListener()
# Show system message
print('I will now print Tweets containing "Python"! ==>')
# Authenticate
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# Connect the stream to our listener
stream = tweepy.Stream(auth, listener)
stream.filter(track=['Python'])
| 3.453125
| 3
|
erec/__init__.py
|
cajohare/DarkElectronRecoils
| 0
|
12779100
|
__all__ = ["Params","LabFuncs", "AtomicFuncs","HaloFuncs","DMFuncs","NeutrinoFuncs","PlotFuncs"]
| 1.101563
| 1
|
txsecrethandshake/server.py
|
david415/txsecrethandshake
| 3
|
12779101
|
<reponame>david415/txsecrethandshake
import automat
import attr
import cbor
import types
import zope
from twisted.protocols.basic import Int32StringReceiver
from twisted.internet.protocol import Factory
from nacl.signing import VerifyKey
from envelopes import SecretHandshakeEnvelopeFactory, Curve25519KeyPair, Ed25519KeyPair
from util import is_32bytes, SingleObserver
from interfaces import ISecretHandshakeMachine
@attr.s
@zope.interface.implementer(ISecretHandshakeMachine)
class ServerMachine(object):
"""
I am server-side state machine that implements the "secret handshake",
a cryptographic handshake protocol as described in the paper:
Designing a Secret Handshake: Authenticated Key Exchange as a
Capability System by <NAME>
"""
_machine = automat.MethodicalMachine()
envelope_factory = attr.ib(validator=attr.validators.instance_of(SecretHandshakeEnvelopeFactory))
notify_connected_handler = attr.ib(validator=attr.validators.instance_of(types.FunctionType))
send_datagram_handler = attr.ib(validator=attr.validators.instance_of(types.FunctionType))
receive_message_handler = attr.ib(validator=attr.validators.instance_of(types.FunctionType))
disconnect_handler = attr.ib(validator=attr.validators.instance_of(types.FunctionType))
# inputs
@_machine.input()
def start(self):
"the machine connects"
@_machine.input()
def stop(self):
"disconnet the machine"
@_machine.input()
def datagram_received(self, datagram):
"the machine receives data"
@_machine.input()
def send(self, datagram):
"send a datagram"
# outputs
@_machine.output()
def _send_disconnect(self):
close_command = {
"type":"disconnect",
}
disconnect_envelope = self.envelope_factory.datagram_encrypt(cbor.dumps(close_command))
self.send_datagram_handler(disconnect_envelope)
self.disconnect_handler()
@_machine.output()
def _verify_client_challenge(self, datagram):
self.envelope_factory.is_client_challenge_verified(datagram)
envelope = self.envelope_factory.create_server_challenge()
self.send_datagram_handler(envelope)
@_machine.output()
def _verify_client_auth(self, datagram):
self.envelope_factory.verify_client_auth(datagram)
envelope = self.envelope_factory.create_server_accept()
self.send_datagram_handler(envelope)
@_machine.output()
def _send_datagram(self, datagram):
"""
send datagram, first serialize, then encrypt and finally pass
to our send datagram handler.
"""
datagram_message = {
"type": "datagram",
"payload": datagram
}
datagram_envelope = self.envelope_factory.downstream_box.encrypt(cbor.dumps(datagram_message))
self.send_datagram_handler(datagram_envelope)
@_machine.output()
def _receive_datagram(self, datagram):
"""
post-handshake: decrypt received datagrams, deserialize and
then forward datagram payload upstream if message type is
"datagram", if type is "disconnect" then call our disconnect
handler.
"""
serialized_message = self.envelope_factory.downstream_box.decrypt(datagram)
message = cbor.loads(serialized_message)
if message["type"] == "datagram":
self.receive_message_handler(message["payload"])
if message["type"] == "disconnect":
self.disconnect_handler()
# states
@_machine.state(initial=True)
def unconnected(self):
"connection not yet initiated"
@_machine.state()
def awaiting_challenge(self):
"awaiting challenge envelope from client"
@_machine.state()
def challenge_sent(self):
"server challenge envelope sent"
@_machine.state()
def connected(self):
"accept envelope sent"
@_machine.state()
def disconnected(self):
"disconnected state"
unconnected.upon(start, enter=awaiting_challenge, outputs=[])
awaiting_challenge.upon(datagram_received, enter=challenge_sent, outputs=[_verify_client_challenge])
challenge_sent.upon(datagram_received, enter=connected, outputs=[_verify_client_auth])
connected.upon(datagram_received, enter=connected, outputs=[_receive_datagram])
connected.upon(send, enter=connected, outputs=[_send_datagram])
connected.upon(stop, enter=disconnected, outputs=[_send_disconnect])
| 2.34375
| 2
|
config.py
|
namhong1412/Image-Search-Engine-Python-and-Faiss
| 0
|
12779102
|
import psycopg2
HOSTNAME = '192.168.1.204'
USERNAME = 'postgres'
PASSWORD = '<PASSWORD>'
DATABASE_NAME = 'data_lake'
PORT = 5432
postgres_connection_string = "postgresql://{DB_USER}:{DB_PASS}@{DB_ADDR}:{PORT}/{DB_NAME}".format(
DB_USER=USERNAME,
DB_PASS=PASSWORD,
DB_ADDR=HOSTNAME,
PORT=PORT,
DB_NAME=DATABASE_NAME)
def get_postgres_connection():
try:
connection = psycopg2.connect(
user=USERNAME,
password=PASSWORD,
host=HOSTNAME,
port=PORT,
database=DATABASE_NAME)
return connection
except (Exception, psycopg2.Error) as error:
message = f"get_postgres_connection {error}"
return abort(400, message)
| 3.15625
| 3
|
exercicios/ex031.py
|
Roberto-Mota/CursoemVideo
| 0
|
12779103
|
<filename>exercicios/ex031.py<gh_stars>0
# Desafio 031 -> Desenvolva um programa que pergunte a distância de uma viagem em Km.
# Calcule o preço da passagem do ônibus, cobrando R$0,50 po km para viagens de até 200km
# e R$0.45 para viagens mais longas
dist = int(input("De quantos Km's será sua viagem de ônibus? "))
if dist >= 200:
print('Sua viagem custará R${:.2f}'.format(dist * 0.45))
else:
print('Sua viagem custará R${:.2f}'.format(dist * 0.50))
# if simplicado
# preco = dist * 0.45 if dist >= 200 else dist * 0.50
| 3.921875
| 4
|
migrations/versions/567424e5046c_add_fortunki_table.py
|
landmaj/qbot
| 3
|
12779104
|
"""add fortunki table
Revision ID: 567424e5046c
Revises: <PASSWORD>
Create Date: 2019-09-19 18:59:11.629057
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "567424e5046c"
down_revision = "f32a45256434"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"fortunki",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("text", sa.Text(), nullable=False),
sa.PrimaryKeyConstraint("id", name=op.f("pk_fortunki")),
sa.UniqueConstraint("text", name=op.f("uq_fortunki_text")),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("fortunki")
# ### end Alembic commands ###
| 1.375
| 1
|
feedcrawler/ombi.py
|
rix1337/FeedCrawler
| 16
|
12779105
|
<filename>feedcrawler/ombi.py
# -*- coding: utf-8 -*-
# FeedCrawler
# Projekt von https://github.com/rix1337
import json
import requests
from imdb import IMDb
import feedcrawler.search.shared.content_all
import feedcrawler.search.shared.content_shows
from feedcrawler import internal
from feedcrawler.common import decode_base64
from feedcrawler.common import encode_base64
from feedcrawler.common import sanitize
from feedcrawler.config import CrawlerConfig
from feedcrawler.db import FeedDb
from feedcrawler.imdb import clean_imdb_id
def imdb_movie(imdb_id):
try:
imdb_id = clean_imdb_id(imdb_id)
ia = IMDb('https', languages='de-DE')
output = ia.get_movie(imdb_id)
title = sanitize(output.data['localized title'])
year = str(output.data['year'])
return title + " " + year
except:
if imdb_id is None:
internal.logger.debug("Ein Film ohne IMDb-ID wurde angefordert.")
else:
print(u"[Ombi] - Fehler beim Abruf der IMDb für: " + imdb_id)
return False
def imdb_show(imdb_id):
try:
imdb_id = clean_imdb_id(imdb_id)
ia = IMDb('https', languages='de-DE')
output = ia.get_movie(imdb_id)
ia.update(output, 'episodes')
title = sanitize(output.data['localized title'])
seasons = output.data['episodes']
eps = {}
for sn in seasons:
ep = []
for e in seasons[sn]:
ep.append(int(e))
eps[int(sn)] = ep
return title, eps
except:
if imdb_id is None:
internal.logger.debug("Eine Serie ohne IMDb-ID wurde angefordert.")
else:
print(u"[Ombi] - Fehler beim Abruf der IMDb für: " + imdb_id)
return False
def ombi(first_launch):
db = FeedDb('Ombi')
config = CrawlerConfig('Ombi')
url = config.get('url')
api = config.get('api')
if not url or not api:
return [0, 0]
english = CrawlerConfig('FeedCrawler').get('english')
try:
requested_movies = requests.get(url + '/api/v1/Request/movie', headers={'ApiKey': api})
requested_movies = json.loads(requested_movies.text)
requested_shows = requests.get(url + '/api/v1/Request/tv', headers={'ApiKey': api})
requested_shows = json.loads(requested_shows.text)
len_movies = len(requested_movies)
len_shows = len(requested_shows)
if first_launch:
internal.logger.debug("Erfolgreich mit Ombi verbunden.")
print(u"Erfolgreich mit Ombi verbunden.")
except:
internal.logger.debug("Ombi ist nicht erreichbar!")
print(u"Ombi ist nicht erreichbar!")
return [0, 0]
if requested_movies:
internal.logger.debug(
"Die Suchfunktion für Filme nutzt BY, FX, HW und NK, sofern deren Hostnamen gesetzt wurden.")
for r in requested_movies:
if bool(r.get("approved")):
if not bool(r.get("available")):
imdb_id = r.get("imdbId")
if not db.retrieve('movie_' + str(imdb_id)) == 'added':
title = imdb_movie(imdb_id)
if title:
best_result = feedcrawler.search.shared.content_all.get_best_result(title)
print(u"Film: " + title + u" durch Ombi hinzugefügt.")
if best_result:
feedcrawler.search.shared.content_all.download(best_result)
if english:
title = r.get('title')
best_result = feedcrawler.search.shared.content_all.get_best_result(title)
print(u"Film: " + title + u"durch Ombi hinzugefügt.")
if best_result:
feedcrawler.search.shared.content_all.download(best_result)
db.store('movie_' + str(imdb_id), 'added')
if requested_shows:
internal.logger.debug("Die Suchfunktion für Serien nutzt SJ, sofern der Hostname gesetzt wurde.")
for r in requested_shows:
imdb_id = r.get("imdbId")
child_requests = r.get("childRequests")
for cr in child_requests:
if bool(cr.get("approved")):
if not bool(cr.get("available")):
details = cr.get("seasonRequests")
for season in details:
sn = season.get("seasonNumber")
eps = []
episodes = season.get("episodes")
for episode in episodes:
if not bool(episode.get("available")):
enr = episode.get("episodeNumber")
s = str(sn)
if len(s) == 1:
s = "0" + s
s = "S" + s
e = str(enr)
if len(e) == 1:
e = "0" + e
se = s + "E" + e
if not db.retrieve('show_' + str(imdb_id) + '_' + se) == 'added':
eps.append(enr)
if eps:
infos = imdb_show(imdb_id)
if infos:
title = infos[0]
all_eps = infos[1]
check_sn = False
if all_eps:
check_sn = all_eps.get(sn)
if check_sn:
sn_length = len(eps)
check_sn_length = len(check_sn)
if check_sn_length > sn_length:
for ep in eps:
e = str(ep)
if len(e) == 1:
e = "0" + e
se = s + "E" + e
payload = feedcrawler.search.shared.content_shows.get_best_result(title)
if payload:
payload = decode_base64(payload).split("|")
payload = encode_base64(payload[0] + "|" + payload[1] + "|" + se)
added_episode = feedcrawler.search.shared.content_shows.download(
payload)
if not added_episode:
payload = decode_base64(payload).split("|")
payload = encode_base64(payload[0] + "|" + payload[1] + "|" + s)
add_season = feedcrawler.search.shared.content_shows.download(
payload)
for e in eps:
e = str(e)
if len(e) == 1:
e = "0" + e
se = s + "E" + e
db.store('show_' + str(imdb_id) + '_' + se, 'added')
if not add_season:
internal.logger.debug(
u"Konnte kein Release für " + title + " " + se + "finden.")
break
db.store('show_' + str(imdb_id) + '_' + se, 'added')
else:
payload = feedcrawler.search.shared.content_shows.get_best_result(title)
if payload:
payload = decode_base64(payload).split("|")
payload = encode_base64(payload[0] + "|" + payload[1] + "|" + s)
feedcrawler.search.shared.content_shows.download(payload)
for ep in eps:
e = str(ep)
if len(e) == 1:
e = "0" + e
se = s + "E" + e
db.store('show_' + str(imdb_id) + '_' + se, 'added')
print(u"Serie/Staffel/Episode: " + title + u" durch Ombi hinzugefügt.")
return [len_movies, len_shows]
| 2.59375
| 3
|
scripts/030_fastenloc/utils/group_by_phenotype.py
|
miltondp/phenomexcan
| 3
|
12779106
|
import os
import re
from glob import glob
import pandas as pd
# enloc
#FILE_SUFFIX = '*.enloc.rst'
#FILE_PATTERN = '(?P<pheno>.+)__PM__(?P<tissue>.+)\.enloc\.rst'
# fastenloc
ALL_TISSUES = pd.read_csv('/mnt/phenomexcan/fastenloc/fastenloc_gtex_tissues.txt', header=None, squeeze=True).tolist()
FILE_PREFIX = 'fastenloc-'
FILE_SUFFIX = '*.sig.out'
all_tissues_regex = '|'.join([re.escape(t) for t in ALL_TISSUES])
FILE_PATTERN = f'fastenloc-(?P<pheno>.+)-(?P<tissue>{all_tissues_regex})\.enloc\.sig\.out'
assert len(ALL_TISSUES) == 49
all_files = glob(FILE_SUFFIX)
print(len(all_files))
file_pattern = re.compile(FILE_PATTERN)
all_phenos = [re.search(file_pattern, f).group('pheno') for f in all_files]
assert len(all_files) == len(all_phenos)
assert not any([x is None for x in all_phenos])
all_phenos = list(set(all_phenos))
print(len(all_phenos))
assert len(all_phenos) * len(ALL_TISSUES) == len(all_files)
for pheno in all_phenos:
os.makedirs(pheno, exist_ok=True)
s = os.system(f'mv {FILE_PREFIX}{pheno}-* {pheno}/')
assert s == 0
| 2.71875
| 3
|
scripts/colum.py
|
OkanoShogo0903/character_estimation
| 8
|
12779107
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
COLUMNS=[
'angle-R-mid-0', 'angle-R-mid-1', 'angle-R-mid-2',
'angle-L-mid-0', 'angle-L-mid-1', 'angle-L-mid-2',
'angle-R-top-0', 'angle-R-top-1',
'angle-L-top-0', 'angle-L-top-1',
'angle-R-bot-0', 'angle-R-bot-1', 'angle-R-bot-2',
'angle-L-bot-0', 'angle-L-bot-1', 'angle-L-bot-2',
'length-C-top-0',
'length-R-mid-0', 'length-R-mid-1', 'length-R-mid-2',
'length-L-mid-0', 'length-L-mid-1', 'length-L-mid-2',
'length-R-bot-0', 'length-R-bot-1', 'length-R-bot-2',
'length-L-bot-0', 'length-L-bot-1', 'length-L-bot-2',
'length-R-top-0', 'length-R-top-1',
'length-L-top-0', 'length-L-top-1',
'Label']
FLIP_COLUMNS=[
'angle-L-mid-0', 'angle-L-mid-1', 'angle-L-mid-2',
'angle-R-mid-0', 'angle-R-mid-1', 'angle-R-mid-2',
'angle-L-top-0', 'angle-L-top-1',
'angle-R-top-0', 'angle-R-top-1',
'angle-L-bot-0', 'angle-L-bot-1', 'angle-L-bot-2',
'angle-R-bot-0', 'angle-R-bot-1', 'angle-R-bot-2',
'length-C-top-0',
'length-R-mid-0', 'length-R-mid-1', 'length-R-mid-2',
'length-L-mid-0', 'length-L-mid-1', 'length-L-mid-2',
'length-R-bot-0', 'length-R-bot-1', 'length-R-bot-2',
'length-L-bot-0', 'length-L-bot-1', 'length-L-bot-2',
'length-R-top-0', 'length-R-top-1',
'length-L-top-0', 'length-L-top-1',
'Label']
DROP_LIST=[
'angle-L-top-0', 'angle-L-top-1',
'angle-R-top-0', 'angle-R-top-1',
'length-R-top-0', 'length-R-top-1',
'length-L-top-0', 'length-L-top-1',
]
| 1.953125
| 2
|
examples/fantom/rename.py
|
pkuksa/FILER_giggle
| 210
|
12779108
|
import sys
if len(sys.argv) != 4:
sys.stderr.write('usage:\t' + \
sys.argv[0] + \
' <name2library file>' + \
' <expression count matrix file>' + \
' <out dir>\n')
sys.exit(1)
name2library_file=sys.argv[1]
expression_count_matrix_file=sys.argv[2]
out_dir=sys.argv[3]
files={}
names = {}
for l in open(name2library_file, 'r'):
A = l.rstrip().split('\t')
names[A[1]] = A[0]
header = []
for l in open(expression_count_matrix_file, 'r'):
A = l.rstrip().split()
if A[0] == 'Id':
header = A[1:]
print len(header)
0/1
else:
i = 0
for a in A[1:]:
if a != '0':
if names[header[i]] not in files:
files[names[header[i]]] = \
open(out_dir + \
'/' + \
names[header[i]] + \
'.bed',
'w')
files[names[header[i]]].write( \
A[0].replace(':','\t').replace('-','\t') + \
'\t' + a + '\n')
i+=1
| 2.34375
| 2
|
appr/platforms/kubernetes.py
|
sergeyberezansky/appr
| 31
|
12779109
|
from __future__ import absolute_import, division, print_function
import hashlib
import json
import logging
import subprocess
import tempfile
import time
import requests
from requests.utils import urlparse
__all__ = ['Kubernetes', "get_endpoint"]
logger = logging.getLogger(__name__)
resource_endpoints = {
"daemonsets":
"apis/extensions/v1beta1/namespaces/{namespace}/daemonsets",
"deployments":
"apis/extensions/v1beta1/namespaces/{namespace}/deployments",
"horizontalpodautoscalers":
"apis/extensions/v1beta1/namespaces/{namespace}/horizontalpodautoscalers",
"ingresses":
"apis/extensions/v1beta1/namespaces/{namespace}/ingresses",
"jobs":
"apis/extensions/v1beta1/namespaces/{namespace}/jobs",
"namespaces":
"api/v1/namespaces",
"replicasets":
"apis/extensions/v1beta1/namespaces/{namespace}/replicasets",
"persistentvolumes":
"api/v1/namespaces/{namespace}/persistentvolumes",
"persistentvolumeclaims":
"api/v1/namespaces/{namespace}/persistentvolumeclaims",
"services":
"api/v1/namespaces/{namespace}/services",
"serviceaccounts":
"api/v1/namespaces/{namespace}/serviceaccounts",
"secrets":
"api/v1/namespaces/{namespace}/secrets",
"configmaps":
"api/v1/namespaces/{namespace}/configmaps",
"replicationcontrollers":
"api/v1/namespaces/{namespace}/replicationcontrollers",
"pods":
"api/v1/namespaces/{namespace}/pods",
"statefulset":
"apis/apps/v1beta1/namespaces/{namespace}/statefulsets",
"storageclass":
"apis/storage.k8s.io/v1beta1/statefulsets", }
resources_alias = {
"ds": "daemonsets",
"hpa": "horizontalpodautoscalers",
"ing": "ingresses",
"ingress": "ingresses",
"ns": "namespaces",
"sc": "storageclasses",
"sfs": "statefulsets",
"po": "pods",
"pv": "persistentvolumes",
"pvc": "persistentvolumeclaims",
"rc": "replicationcontrollers",
"svc": "services"}
ANNOTATIONS = {
'protected': 'resource.appr/protected',
'hash': 'resource.appr/hash',
'version': 'package.appr/version',
'parent': 'package.appr/parent',
'rand': 'resource.appr/rand',
'update-mode': 'resource.appr/update-mode',
'package': 'package.appr/package'}
def get_endpoint(kind):
name = None
if kind in resource_endpoints:
name = kind
elif kind in resources_alias:
name = resources_alias[kind]
elif kind + "s" in resource_endpoints:
name = kind + "s"
else:
return 'unknown'
return resource_endpoints[name]
class Kubernetes(object):
def __init__(self, namespace=None, endpoint=None, body=None, proxy=None):
self.proxy = None
if endpoint is not None and endpoint[0] == "/":
endpoint = endpoint[1:-1]
self.endpoint = endpoint
self.body = body
self.obj = None
self.protected = False
self._resource_load()
self.kind = self.obj['kind'].lower()
self.name = self.obj['metadata']['name']
self.force_rotate = ANNOTATIONS['rand'] in self.obj['metadata'].get('annotations', {})
self.namespace = self._namespace(namespace)
self.result = None
if proxy:
self.proxy = urlparse(proxy)
def _resource_load(self):
self.obj = json.loads(self.body)
if 'annotations' in self.obj['metadata']:
if (ANNOTATIONS['protected'] in self.obj['metadata']['annotations'] and
self.obj['metadata']['annotations'][ANNOTATIONS['protected']] == 'true'):
self.protected = True
def _gethash(self, src):
# Copy rand value
if (src is not None and ANNOTATIONS['rand'] in src['metadata'].get('annotations', {}) and
ANNOTATIONS['rand'] not in self.obj['metadata']['annotations']):
self.obj['metadata']['annotations'][ANNOTATIONS['rand']] = src['metadata'][
'annotations'][ANNOTATIONS['rand']]
# TODO(ant31) it should hash before the custom annotations
if ANNOTATIONS['hash'] in self.obj['metadata'].get('annotations', {}):
if self.obj['metadata']['annotations'][ANNOTATIONS['hash']] is None:
sha = hashlib.sha256(json.dumps(self.obj, sort_keys=True)).hexdigest()
self.obj['metadata']['annotations'][ANNOTATIONS['hash']] = sha
return self.obj['metadata']['annotations'][ANNOTATIONS['hash']]
else:
return None
def _namespace(self, namespace=None):
if namespace:
return namespace
elif 'namespace' in self.obj['metadata']:
return self.obj['metadata']['namespace']
else:
return 'default'
def create(self, force=False, dry=False, strategy='update'):
"""
- Check if resource name exists
- if it exists check if the apprhash is the same
- if not the same delete the resource and recreate it
- if force == true, delete the resource and recreate it
- if doesnt exists create it
"""
force = force or self.force_rotate
r = self.get()
if r is not None:
rhash = r['metadata'].get('annotations', {}).get(ANNOTATIONS['hash'], None)
objhash = self._gethash(r)
f = tempfile.NamedTemporaryFile()
method = "apply"
if self.proxy:
method = "create"
strategy = "replace"
cmd = [method, '-f', f.name]
f.write(json.dumps(self.obj))
f.flush()
if r is None:
self._call(cmd, dry=dry)
return 'created'
elif (objhash is None or rhash == objhash) and force is False:
return 'ok'
elif rhash != objhash or force is True:
if self.protected:
return 'protected'
if strategy == 'replace':
self.delete(dry=dry)
action = "replaced"
elif strategy == "update":
action = "updated"
else:
raise ValueError("Unknown action %s" % action)
self._call(cmd, dry=dry)
return action
def get(self):
cmd = ['get', self.kind, self.name, '-o', 'json']
try:
self.result = json.loads(self._call(cmd))
return self.result
except RuntimeError:
return None
except (requests.exceptions.HTTPError) as e:
if e.response.status_code == 404:
return None
else:
raise e
def delete(self, dry=False, **kwargs):
cmd = ['delete', self.kind, self.name]
if self.protected:
return 'protected'
r = self.get()
if r is not None:
self._call(cmd, dry=dry)
return 'deleted'
else:
return 'absent'
def wait(self, retries=3, seconds=1):
r = 1
time.sleep(seconds)
obj = self.get()
while (r < retries and obj is None):
r += 1
time.sleep(seconds)
obj = self.get()
return obj
def exists(self):
r = self.get()
if r is None:
return False
else:
return True
def _call(self, cmd, dry=False):
command = ['kubectl'] + cmd + ["--namespace", self.namespace]
if not dry:
if self.proxy is not None:
return self._request(cmd[0])
else:
try:
return subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise RuntimeError("Kubernetes failed to create %s (%s): "
"%s" % (self.name, self.kind, e.output))
else:
return True
def _request(self, method):
if method == 'create':
headers = {'Content-Type': 'application/json'}
method = 'post'
url = "%s/%s" % (self.proxy.geturl(), self.endpoint)
return requests.post(url, data=self.body, headers=headers)
else:
url = "%s/%s/%s" % (self.proxy.geturl(), self.endpoint, self.name)
query = getattr(requests, method)
r = query(url)
r.raise_for_status()
return r.content
| 1.96875
| 2
|
open_vsdcli/vsd_enterprise.py
|
maxiterr/openvsd
| 9
|
12779110
|
from open_vsdcli.vsd_common import *
@vsdcli.command(name='enterprise-list')
@click.option('--filter', metavar='<filter>',
help='Filter for name, description, lastUpdatedDate, '
'creationDate, externalID')
@click.pass_context
def enterprise_list(ctx, filter):
"""Show all enterprise within the VSD"""
result = ctx.obj['nc'].get("enterprises", filter=filter)
table = PrettyTable(["Enterprise ID", "Name"])
for line in result:
table.add_row([line['ID'],
line['name']])
print(table)
@vsdcli.command(name='enterprise-show')
@click.argument('enterprise-id', metavar='<enterprise-id>', required=True)
@click.pass_context
def enterprise_show(ctx, enterprise_id):
"""Show information for a given enterprise id"""
result = ctx.obj['nc'].get("enterprises/%s" % enterprise_id)[0]
print_object(result, exclude=['APIKey'], only=ctx.obj['show_only'])
@vsdcli.command(name='enterprise-create')
@click.argument('name', metavar='<name>', required=True)
@click.pass_context
def enterprise_create(ctx, name):
"""Add an enterprise to the VSD"""
params = {}
params['name'] = name
result = ctx.obj['nc'].post("enterprises", params)[0]
print_object(result, only=ctx.obj['show_only'])
@vsdcli.command(name='enterprise-delete')
@click.argument('enterprise-id', metavar='<enterprise ID>', required=True)
@click.confirmation_option(prompt='Are you sure ?')
@click.pass_context
def enterprise_delete(ctx, enterprise_id):
"""Delete a given enterprise"""
ctx.obj['nc'].delete("enterprises/%s?responseChoice=1" % enterprise_id)
@vsdcli.command(name='enterprise-update')
@click.argument('enterprise-id', metavar='<enterprise ID>', required=True)
@click.option('--key-value', metavar='<key:value>', multiple=True)
@click.pass_context
def enterprise_update(ctx, enterprise_id, key_value):
"""Update key/value for a given enterprise"""
params = {}
for kv in key_value:
key, value = kv.split(':', 1)
params[key] = value
ctx.obj['nc'].put("enterprises/%s" % enterprise_id, params)
result = ctx.obj['nc'].get("enterprises/%s" % enterprise_id)[0]
print_object(result, only=ctx.obj['show_only'])
@vsdcli.command(name='enterprisepermission-list')
@click.option('--redundancygroup-id', metavar='<id>')
@click.option('--gateway-id', metavar='<id>')
@click.option('--vlan-id', metavar='<id>')
@click.option('--service-id', metavar='<id>')
@click.option('--port-id', metavar='<id>')
@click.option('--filter', metavar='<filter>',
help='Filter for name, lastUpdatedDate, creationDate, '
'externalID')
@click.pass_context
def enterprisepermission_list(ctx, filter, **ids):
"""List all Enterprise Permission for a CSP entity"""
id_type, id = check_id(**ids)
request = "%ss/%s/enterprisepermissions" % (id_type, id)
if not filter:
result = ctx.obj['nc'].get(request)
else:
result = ctx.obj['nc'].get(request, filter=filter)
table = PrettyTable(["ID",
"Action",
"Entity ID",
"Entity type",
"Entity name"])
for line in result:
table.add_row([line['ID'],
line['permittedAction'],
line['permittedEntityID'],
line['permittedEntityType'],
line['permittedEntityName']])
print(table)
@vsdcli.command(name='enterprisepermission-show')
@click.argument('enterprisepermission-id', metavar='<enterprisepermission-id>',
required=True)
@click.pass_context
def enterprisepermission_show(ctx, permission_id):
"""Show information for a given enterprisepermission id"""
result = ctx.obj['nc'].get("enterprisepermissions/%s" %
enterprisepermission_id)[0]
print_object(result, only=ctx.obj['show_only'])
@vsdcli.command(name='enterprisepermission-create')
@click.argument('entity-id', metavar='<group or user ID>', required=True)
@click.option('--action', type=click.Choice(['USE',
'EXTEND',
'READ',
'INSTANTIATE']),
default='USE', help='Default : USE')
@click.option('--redundancygroup-id', metavar='<id>')
@click.option('--gateway-id', metavar='<id>')
@click.option('--vlan-id', metavar='<id>')
@click.option('--service-id', metavar='<id>')
@click.option('--port-id', metavar='<id>')
@click.pass_context
def enterprisepermission_create(ctx, entity_id, action, **ids):
"""Add permission for a given element (gateway, vlan, etc...)"""
id_type, id = check_id(**ids)
params = {}
params['permittedEntityID'] = entity_id
params['permittedAction'] = action
ctx.obj['nc'].post("%ss/%s/enterprisepermissions" % (id_type, id), params)
| 2.109375
| 2
|
settings.py
|
nukui-s/sscomdetection
| 0
|
12779111
|
from string import Template
const_base = "data/const/degree_order_{}_{}.pkl"
#LRF settings
N = 500
minc = 100
maxc = 100
mu = 0.3
k = 5
#k = 10
#maxk = 20
maxk = 50
t1 = 2
t2 = 1
name_tmp = Template("LRF_${N}_${k}_${maxk}_${minc}_${maxc}_${mu}")
lrf_data_label = name_tmp.substitute(N=N, k=k, maxk=maxk, minc=minc, maxc=maxc,
mu=mu)
data_label = lrf_data_label
#data_label = "gn_1000_4"
#data_label = "polblogs"
exp_name = "{}_degree_order_asc".format(data_label)
print("*********************", exp_name)
#General settings
#densities =[0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3]
densities = [0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1]
trials = 5
K = 5
lr_adam = 0.01
lr_sgd = 0.01
threshold = 10e-9
threads = 8
used_models = ["abs_adam","update_rule"]
#used_models = ["abs_adam", "update_rule"]
#used_models = ["abs_adam"]
max_iters = 1000
mlambda = 2
| 1.960938
| 2
|
neo/test/rawiotest/test_spike2rawio.py
|
deeptimittal12/python-neo
| 1
|
12779112
|
<filename>neo/test/rawiotest/test_spike2rawio.py
import unittest
from neo.rawio.spike2rawio import Spike2RawIO
from neo.test.rawiotest.common_rawio_test import BaseTestRawIO
class TestSpike2RawIO(BaseTestRawIO, unittest.TestCase, ):
rawioclass = Spike2RawIO
files_to_download = [
'File_spike2_1.smr',
'File_spike2_2.smr',
'File_spike2_3.smr',
'130322-1LY.smr', # this is for bug 182
'multi_sampling.smr', # this is for bug 466
]
entities_to_test = files_to_download
if __name__ == "__main__":
unittest.main()
| 2.25
| 2
|
src/merge_sort.py
|
Darren-Haynes/Data_Structures
| 0
|
12779113
|
"""Implement merge sort algorithm."""
from random import randint, shuffle
from timeit import timeit
def merge_sort(nums):
"""Merge list by merge sort."""
half = int(len(nums) // 2)
if len(nums) == 1:
return nums
if len(nums) == 2:
if nums[0] > nums[1]:
nums[0], nums[1] = nums[1], nums[0]
return nums
left = merge_sort(nums[:half])
right = merge_sort(nums[half:])
output = []
left_ct = 0
right_ct = 0
while left_ct < len(left) and right_ct < len(right):
if left[left_ct] < right[right_ct]:
output.append(left[left_ct])
left_ct += 1
else:
output.append(right[right_ct])
right_ct += 1
if left_ct == len(left):
output += right[right_ct:]
elif right_ct == len(right):
output += left[left_ct:]
return output
def timings(): # pragma: no cover
"""Generate timings report for insertion sort."""
import_sort = 'from merge_sort import merge_sort'
print("""
Timings for best, average and worst case scenarios for the merge sort.
--------------------------------------------------------------------------
""")
print("3 Best Case Scenarios - sorted except for one value")
for i in range(3):
lst_len = randint(9, 50)
rand_lst = [i for i in range(lst_len)]
rand_lst[6], rand_lst[-1] = rand_lst[-1], rand_lst[6]
best_time = timeit('merge_sort({})'.format(rand_lst), import_sort)
print('List {}: length={}; time = {}'.format(i + 1, lst_len, best_time))
print("\n3 Average Case Scenarios - Moderately sorted")
for i in range(3):
lst_len = randint(9, 50)
rand_lst = [i for i in range(lst_len)]
shuffle(rand_lst)
best_time = timeit('merge_sort({})'.format(rand_lst), import_sort)
print('List {}: length={}; time = {}'.format(i + 1, lst_len, best_time))
print("\n3 Worst Case Scenarios - Completely unsorted")
for i in range(3):
lst_len = randint(9, 50)
rand_lst = [i for i in range(lst_len)]
rand_lst = rand_lst[::-1]
best_time = timeit('merge_sort({})'.format(rand_lst), import_sort)
print('List {}: length={}; time = {}'.format(i + 1, lst_len, best_time))
if __name__ == '__main__': # pragma: no cover
timings()
| 4.09375
| 4
|
rd_average.py
|
WyohKnott/image-comparison-sources
| 4
|
12779114
|
#!/usr/bin/python3
# Copyright 2017-2018 <NAME>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
import glob
import numpy as np
import pandas as pd
import six
import pytablewriter
from multiprocessing import Pool
def get_lossless_average(path, reference_format):
merged_data = {}
columns = [
"format", "avg_bpp", "avg_compression_ratio", "avg_space_saving",
"wavg_encode_time", "wavg_decode_time"
]
final_data = pd.DataFrame(columns=columns)
final_data.set_index("format", drop=False, inplace=True)
for format in next(os.walk(path))[1]:
if not glob.glob(path + "/" + format + "/lossless/*.out"):
print("Lossless results files could not be found for format {}.".
format(format))
continue
rawdata = []
data_path = path + "/" + format + "/lossless/"
for f in glob.glob(data_path + "/*.out"):
rawdata.append(pd.read_csv(f, sep=":"))
merged_data[format] = pd.concat(rawdata)
sum_orig_file_size = np.sum(merged_data[format]["orig_file_size"])
sum_compressed_file_size = np.sum(
merged_data[format]["compressed_file_size"])
sum_pixels = np.sum(merged_data[format]["pixels"])
avg_bpp = sum_compressed_file_size * 8 / sum_pixels
avg_compression_ratio = sum_orig_file_size / sum_compressed_file_size
avg_space_saving = 1 - (1 / avg_compression_ratio)
wavg_encode_time = np.average(
merged_data[format]["encode_time"],
weights=merged_data[format]["pixels"])
wavg_decode_time = np.average(
merged_data[format]["decode_time"],
weights=merged_data[format]["pixels"])
final_data.loc[format] = [
format, avg_bpp, avg_compression_ratio, avg_space_saving,
wavg_encode_time, wavg_decode_time
]
final_data = final_data.assign(weissman_score=lambda x: x.avg_compression_ratio / x.loc[reference_format, "avg_compression_ratio"] * np.log(x.loc[reference_format, "wavg_encode_time"] * 1000) / np.log(x.wavg_encode_time * 1000))
final_data.sort_values("weissman_score", ascending=False, inplace=True)
results_file = path + "/" + os.path.basename(path) + ".lossless.out"
final_data.to_csv(results_file, sep=":")
file = open(path + "/" + os.path.basename(path) + ".lossless.md", "w")
markdown_writer = pytablewriter.MarkdownTableWriter()
markdown_writer.from_dataframe(final_data)
markdown_writer.stream = six.StringIO()
markdown_writer.write_table()
file.write(markdown_writer.stream.getvalue())
file.close()
print(
"Lossless results file successfully saved to {}.".format(results_file))
def get_lossy_average(args):
[path, format, reference_format] = args
if not glob.glob(path + "/" + format + "/lossy/*.out"):
print("Lossy results files could not be found for format {}.".format(
format))
return
rawdata = []
merged_data = []
columns = [
"file_name", "quality", "orig_file_size", "compressed_file_size",
"pixels", "bpp", "compression_ratio", "encode_time", "decode_time",
"y_ssim_score", "rgb_ssim_score", "msssim_score", "psnrhvsm_score",
"vmaf_score"
]
final_columns = [
"quality", "avg_bpp", "avg_compression_ratio", "avg_space_saving",
"wavg_encode_time", "wavg_decode_time", "wavg_y_ssim_score",
"wavg_rgb_ssim_score", "wavg_msssim_score", "wavg_psnrhvsm_score",
"wavg_vmaf_score"
]
final_data = pd.DataFrame(columns=final_columns)
data_path = path + "/" + format + "/lossy/"
for f in glob.glob(data_path + "*.out"):
rawdata.append(pd.read_csv(f, sep=":"))
quality_length = len(rawdata[0].index)
for i in range(quality_length):
merged_data.insert(i, pd.DataFrame(columns=columns))
for data in rawdata:
merged_data[i] = merged_data[i].append(data.iloc[[i]])
merged_data[i].sort_values("file_name", ascending=True, inplace=True)
quality = np.mean(merged_data[i]["quality"])
sum_orig_file_size = np.sum(merged_data[i]["orig_file_size"])
sum_compressed_file_size = np.sum(
merged_data[i]["compressed_file_size"])
sum_pixels = np.sum(merged_data[i]["pixels"])
avg_bpp = sum_compressed_file_size * 8 / sum_pixels
avg_compression_ratio = sum_orig_file_size / sum_compressed_file_size
avg_space_saving = 1 - (1 / avg_compression_ratio)
wavg_encode_time = np.average(
merged_data[i]["encode_time"], weights=merged_data[i]["pixels"])
wavg_decode_time = np.average(
merged_data[i]["decode_time"], weights=merged_data[i]["pixels"])
wavg_y_ssim_score = np.average(
merged_data[i]["y_ssim_score"], weights=merged_data[i]["pixels"])
wavg_rgb_ssim_score = np.average(
merged_data[i]["rgb_ssim_score"], weights=merged_data[i]["pixels"])
wavg_msssim_score = np.average(
merged_data[i]["msssim_score"], weights=merged_data[i]["pixels"])
wavg_psnrhvsm_score = np.average(
merged_data[i]["psnrhvsm_score"], weights=merged_data[i]["pixels"])
wavg_vmaf_score = np.average(
merged_data[i]["vmaf_score"], weights=merged_data[i]["pixels"])
final_data.loc[i] = [
quality, avg_bpp, avg_compression_ratio, avg_space_saving,
wavg_encode_time, wavg_decode_time, wavg_y_ssim_score,
wavg_rgb_ssim_score, wavg_msssim_score, wavg_psnrhvsm_score,
wavg_vmaf_score
]
results_file = path + "/" + os.path.basename(
path) + "." + format + ".lossy.out"
final_data.to_csv(results_file, sep=":", index=False)
print("Lossy results file for format {} successfully saved to {}.".format(
format, results_file))
def main(argv):
if sys.version_info[0] < 3 and sys.version_info[1] < 5:
raise Exception("Python 3.5 or a more recent version is required.")
if len(argv) < 2 or len(argv) > 3:
print(
"rd_average.py: Calculate a per format weighted averages of the results files generated by rd_collect.py"
)
print(
"Arg 1: Path to the results of a subset generated by rd_collect.py")
print(" For ex: rd_average.py \"results/subset1\"")
print("Arg 2: Reference format with which to compare other formats.")
print(" Default to mozjpeg")
return
results_folder = os.path.normpath(argv[1])
available_formats = next(os.walk(results_folder))[1]
# Check is there is actually results files in the path provided
if (not os.path.isdir(results_folder) or not available_formats
or not glob.glob(results_folder + "/**/*.out", recursive=True)):
print(
"Could not find all results file. Please make sure the path provided is correct."
)
return
try:
reference_format = argv[2]
except IndexError:
reference_format = "mozjpeg"
if (reference_format not in available_formats or not glob.glob(
results_folder + "/" + reference_format + "/lossless/*.out")
or not glob.glob(results_folder + "/" + reference_format +
"/lossy/*.out")):
print(
"Could not find reference format results files. Please choose a format among {} or check if the reference format results files are present.".
format(available_formats))
return
get_lossless_average(results_folder, reference_format)
Pool().map(get_lossy_average,
[(results_folder, format, reference_format)
for format in next(os.walk(results_folder))[1]])
if __name__ == "__main__":
main(sys.argv)
| 1.546875
| 2
|
ext_path/path.py
|
ruanhailiang/pyutils
| 0
|
12779115
|
import os
import shutil
def copy_file_path(source_path, target_path):
"""复制源文件目录下的所有目录到另一个文件目录下"""
for e, _, _ in os.walk(source_path):
path_name = os.path.splitdrive(e)[1]
file_path = os.path.join(target_path, path_name[len(source_path)-1:])
if not os.path.exists(file_path):
os.makedirs(file_path)
def copy_files(source_path, target_path):
"""复制一个文件夹所有文件到另一个文件夹"""
for source_file_Path, d, filelist in os.walk(source_path):
drivename, pathname = os.path.splitdrive(source_file_Path)
file_path = os.path.join(target_path, pathname)
if not os.path.exists(file_path):
os.makedirs(file_path)
for filename in filelist:
file = os.path.join(source_file_Path, filename)
shutil.copy(file, file_path)
def get_files(path, file_name, is_lower=False):
"""根据文件名与文件目录,获取目录下所有文件列表"""
files = []
for eachfilePath, d, file_names in os.walk(path):
for name in file_names:
if is_lower:
if name == file_name:
tempfile = os.path.join(eachfilePath, name)
files.append(tempfile)
else:
if name.lower() == file_name.lower():
tempfile = os.path.join(eachfilePath, name)
files.append(tempfile)
return files
def get_ext_files(path, ext_name):
# .exe的后缀为extension_name,后缀名中不允许有"."
filelists = []
for eachfilePath, d, file_names in os.walk(path):
for name in file_names:
if name.split(".")[-1].lower() == ext_name.lower():
tempfile = os.path.join(eachfilePath, name)
filelists.append(tempfile)
return filelists
if __name__ == '__main__':
pass
| 3.390625
| 3
|
calplus/v1/__init__.py
|
nghiadt16/CALplus
| 0
|
12779116
|
<filename>calplus/v1/__init__.py
def public_endpoint(wsgidriver, conf):
# Example:
# from calplus.v1.network import network
# ...
# return [
# ('/path',
# network.Resource())
# ]
return []
| 1.679688
| 2
|
src/tanuki/data_store/index/index.py
|
M-J-Murray/tanuki
| 0
|
12779117
|
from __future__ import annotations
from abc import abstractmethod, abstractproperty
from typing import Any, Generic, TYPE_CHECKING, TypeVar, Union
import numpy as np
if TYPE_CHECKING:
from tanuki.data_store.column_alias import ColumnAlias
from tanuki.data_store.index.pandas_index import PandasIndex
C = TypeVar("C", bound=tuple["ColumnAlias", ...])
class Index(Generic[C]):
@abstractproperty
def name(self: Index[C]) -> Union[str, list[str]]:
raise NotImplementedError()
@abstractproperty
def columns(self: Index[C]) -> list[str]:
raise NotImplementedError()
@abstractmethod
def to_pandas(self) -> PandasIndex[C]:
raise NotImplementedError()
@abstractmethod
def __getitem__(self, item) -> Index[C]:
raise NotImplementedError()
@abstractproperty
def values(self: Index[C]) -> np.ndarray:
raise NotImplementedError()
@abstractmethod
def tolist(self: Index[C]) -> list:
raise NotImplementedError()
@abstractmethod
def equals(self, other: Any) -> bool:
raise NotImplementedError()
@abstractmethod
def __eq__(self, other: Any) -> Index[C]:
raise NotImplementedError()
@abstractmethod
def __ne__(self, other: Any) -> Index[C]:
raise NotImplementedError()
@abstractmethod
def __gt__(self, other: Any) -> Index[C]:
raise NotImplementedError()
@abstractmethod
def __ge__(self, other: Any) -> Index[C]:
raise NotImplementedError()
@abstractmethod
def __lt__(self, other: Any) -> Index[C]:
raise NotImplementedError()
@abstractmethod
def __le__(self, other: Any) -> Index[C]:
raise NotImplementedError()
@abstractmethod
def __len__(self) -> int:
raise NotImplementedError()
@abstractmethod
def __str__(self: Index[C]) -> str:
raise NotImplementedError()
@abstractmethod
def __repr__(self: Index[C]) -> str:
raise NotImplementedError()
| 2.5625
| 3
|
tests/clip_ebagoola.py
|
intrepid-geophysics/intrepid-protobuf-py
| 1
|
12779118
|
#! /usr/bin/env python3
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format='%(asctime)s(%(relativeCreated)6d)[%(threadName)s]%(message)s')
# example of an airborne survey where some of the flight lines get too close to each other
# when gridded, the output contains "tares" that detract
# use this tool to clean up parts of a survey that are "over constrained" and show errors.
# changes X/Y alias
from intrepid import mastertask_pb2 as master
from intrepid.intrepid_tasks_pb2 import DB_Operations
from intrepid.utils import Executor
batch = master.BatchJob()
igtask = batch.IntrepidTask.add()
fmgr = igtask.FileManager
fmgr.Action = DB_Operations.CopyTable
fmgr.Input = "${tutorial}/Intrepid_datasets/EBA_DBs/ebagoola_S..DIR"
fmgr.Output = "./ebagoola_S..DIR"
igtask = batch.IntrepidTask.add()
clip_line = igtask.ClipLine
clip_line.InputFile = "ebagoola_S..DIR"
clip_line.X = "x"
clip_line.Y = "y"
clip_line.LineType = "linetype"
clip_line.Xout = "E_Clip"
clip_line.Yout = "N_Clip"
clip_line.MinimumSeparation = 200.0
clip_line.MinimumSegmentLength = 50
logging.info("\n%s", batch.__str__())
Executor.execute(batch)
| 2.015625
| 2
|
src/server/game_mode.py
|
Tommimon/cunegonda-online
| 0
|
12779119
|
# si occupa della gestione delle regole e dei dati privati del server
from server.global_var import GlobalVar
from replicated.game_state import Fase
from server.player_private import PlayerPrivate
from server.deck import Deck, Card
from threading import Timer
from tcp_basics import safe_recv_var
from socket import timeout
# PARAMETRI
TIMEOUT = 0.02
class GameMode:
def __init__(self):
GlobalVar.game_mode = self
self.game_state = GlobalVar.game_state
self.lista_player = []
self.replicators = [self.game_state.replicator] # metto il replicator del game state
for p in self.game_state.lista_player:
self.replicators.append(p.replicator) # aggiungo tutti i replicator dei public player
self.mazzo = Deck()
self.server_socket = None
self.running = True
self.pausa = False
self.primo_in_prima = 0 # giocatore primo in prima mano (gira ogni partita)
self.primo = 0
self.ultimo = 3
self.seme_giro = None
self.questo_giro = []
self.tutti_connessi = False # vero sse ci sono 4 giocatori connessi
self.g_disconnessi = []
def attesa(self):
while len(self.lista_player) < 4:
try:
new_socket, new_address = self.server_socket.accept()
new_socket.settimeout(TIMEOUT)
self.game_state.replicator.sockets.append(new_socket) # ha effetto solo lato server
new_private = PlayerPrivate(new_socket, len(self.lista_player))
self.lista_player.append(new_private)
self.replicators.append(new_private.player_state.replicator) # replicator del nuovo player state
print('conncted', new_address)
except timeout:
pass
safe_recv_var(self.replicators) # comincio già a ricevere per i ceck
self.tutti_connessi = True
for g in self.game_state.lista_player: # caccio una refreshata agli username
g.username.rep_val()
self.dai_carte()
self.game_loop()
def accetta_riconnessione(self):
if len(self.g_disconnessi) > 0:
try:
new_socket, address = self.server_socket.accept()
new_socket.settimeout(TIMEOUT)
self.game_state.replicator.sockets.append(new_socket) # avevo rimosso il vecchio socket del disconnesso
private = self.g_disconnessi.pop()
private.socket = new_socket # questo serve per poterlo ritogliere
private.player_state.replicator.sockets = [new_socket] # avevo svuotato ore metto il nuovo
self.game_state.replicator.refresh_all() # refresh game_state a tutti, (basterebbe questo nuovo player)
for p in self.game_state.lista_player: # refresh tutti per tutti, non efficiente ma tanto viene
p.replicator.refresh_all() # eseguito solo se uno esce e rientra
private.player_state.replicator.refresh_all() # refresho sono per il player giusto
if len(self.g_disconnessi) == 0:
self.tutti_connessi = True
except timeout:
pass
def disconnetti(self, private):
sock = private.socket
self.game_state.replicator.sockets.remove(sock) # tolgo il socket del disconnesso
private.player_state.replicator.sockets = [] # ce ne è uno solo quindi posso fare così
self.g_disconnessi.append(private)
self.game_state.lista_player[private.player_state.index.val].username.val = '---' # così gli altri lo vedono
self.tutti_connessi = False
def dai_carte(self):
self.mazzo.carte = []
self.mazzo.crea_carte()
self.mazzo.mischia()
for giocatore in self.lista_player:
carte = self.mazzo.pesca_n(13)
for c in carte:
giocatore.player_state.mano.val.append(c)
giocatore.player_state.mano.rep_val()
def game_loop(self):
self.game_state.fase_gioco.val = Fase.PASSAGGIO_CARTE
while self.running:
safe_recv_var(self.replicators)
self.accetta_riconnessione()
def carta_client(self, index_g, carta): # controlla in che fase siamo e se si può adoperare la carta e poi faù
giocatore = self.lista_player[index_g] # giocatore è un private player type
if Card.contiene_carta(giocatore.player_state.mano.val, carta): # se possiede davvero questa carta
if self.game_state.fase_gioco.val == Fase.PASSAGGIO_CARTE: # se le stiamo passando la metto nelle scambiate
if len(giocatore.player_state.scambiate.val) < 3: # se non ne ho già scambiate 3
self.metti_in_passate(giocatore, carta)
elif self.game_state.fase_gioco.val == Fase.GIOCO and (not self.pausa): # se stiamo giocando e non è pausa
if index_g == self.game_state.turno.val: # se è il suo turno
if (index_g == self.primo or Card.carta_permessa(giocatore.player_state.mano.val,
self.game_state.seme_primo.val, carta)):
self.metti_in_giocata(index_g, carta)
def metti_in_giocata(self, index, carta):
Card.del_carta(self.lista_player[index].player_state.mano.val, carta) # tolgo la carta dalla mano
self.lista_player[index].player_state.mano.rep_val() # non lo fa in automatico credo
self.game_state.lista_player[index].carta_giocata.val = carta # la metto nelle giocate
self.questo_giro.append(carta) # mi salvo le carte giocate nella gamemode
if self.game_state.turno.val == self.primo:
self.game_state.seme_primo.val = carta.seme # il primo decide il seme del giro
if self.game_state.turno.val == self.ultimo:
self.risolvi_questo_giro()
else:
turno = (self.game_state.turno.val + 1) % 4
self.game_state.turno.val = turno
def metti_in_passate(self, giocatore, carta):
Card.del_carta(giocatore.player_state.mano.val, carta) # tolgo la carta dalla mano
giocatore.player_state.mano.rep_val() # non lo fa in automatico credo
giocatore.player_state.scambiate.val.append(carta) # la metto in quelle scambiate
giocatore.player_state.scambiate.rep_val()
self.ceck_fine_passaggio()
def ceck_fine_passaggio(self):
for gioc in self.lista_player:
state = gioc.player_state
if len(state.scambiate.val) < 3:
return
self.passa_carte() # si occupa anche di fare self.game_state.fase_gioco.val = GIOCO
def passa_carte(self):
for gioc in self.lista_player:
state = gioc.player_state
index = (self.lista_player.index(gioc) - 1) % 4 # prendo il giocatore precedente
for carta in state.scambiate.val:
self.lista_player[index].player_state.mano.val.append(carta) # gli passo la carta
self.lista_player[index].player_state.mano.rep_val()
self.game_state.fase_gioco.val = Fase.GIOCO
state.scambiate.val = [] # tolgo tutte le scambiate
def calcola_punteggio(self):
punteggio = 10 # valore di base
for carta in self.questo_giro: # contro punti negativi
if carta.seme == Card.CUORI:
punteggio -= carta.valore # il valore della carta solo già i punti neg per i cuori
elif carta.seme == Card.PICCHE and carta.valore == Card.DONNA: # se è cuneconda
punteggio -= 26
return punteggio
def trova_vincitore(self):
val_max = self.questo_giro[0].valore # trovo carta vincente
index_max = 0
for carta in self.questo_giro:
if carta.seme == self.game_state.seme_primo.val: # se è seme che comanda
if carta.valore > val_max: # se è più grande del max
val_max = carta.valore
index_max = self.questo_giro.index(carta)
# adesso index_max è il primo ma contato a partire dal primo attuale quindi è di quanto devo spostarmi
vincitore = (self.primo + index_max) % 4
return vincitore
def risolvi_questo_giro(self):
punteggio = self.calcola_punteggio()
vincitore = self.trova_vincitore()
print('points: ' + str(punteggio) + ' to ' + str(vincitore))
self.primo = vincitore
self.ultimo = (self.primo - 1) % 4
self.pausa = True
self.lista_player[vincitore].punteggio += punteggio # assegno i punti
self.lista_player[vincitore].carte_prese += self.questo_giro # metto tutte le carte giocate nelle prese di vinc
self.questo_giro = [] # svuoto copia locale
t = Timer(5, self.fine_turno) # lascio vedere le carte per 5 sec
t.start()
@staticmethod
def ha_preso_carta(giocatore, carta):
for c in giocatore.carte_prese:
if c.seme == carta.seme and c.valore == carta.valore:
return True
return False
def check_cappotto(self):
for g_esaminato in self.lista_player:
if GameMode.ha_preso_carta(g_esaminato, Card(Card.DONNA, Card.PICCHE)): # se ha cunegonda
for val in Card.VALORI:
if not GameMode.ha_preso_carta(g_esaminato, Card(val, Card.CUORI)): # manca un cuore
return # se quello che ha la cune non ha un cuore allora niente cappotto
# se arrivo qui allor aho cappotto quindi setto tutti a -20 tranne g a cui do 60
for g_da_cambiare in self.lista_player:
if g_da_cambiare == g_esaminato:
g_da_cambiare.punteggio = 60
else:
g_da_cambiare.punteggio = -20
def fine_turno(self):
for g in self.game_state.lista_player: # svuoto giocate in ogni caso
g.carta_giocata.val = Card()
if len(self.lista_player[0].player_state.mano.val) == 0: # se un giocatore non ha carte (tutti le hanno finite)
self.check_cappotto()
for i in range(len(self.lista_player)): # aggiorno punteggi totali per tutti
g_privat = self.lista_player[i]
g_public = self.game_state.lista_player[i]
g_public.punteggio_tot.val = g_public.punteggio_tot.val + g_privat.punteggio
g_privat.punteggio = 0
g_privat.carte_prese = []
self.game_state.fase_gioco.val = Fase.FINE_PARTITA # così gli HUD scrivono fine partita
t = Timer(10, self.fine_partita)
t.start()
else:
self.pausa = False
self.game_state.turno.val = self.primo
self.game_state.seme_primo.val = Card.NESSUN_SEME
def fine_partita(self):
self.game_state.cont_partita.val = self.game_state.cont_partita.val + 1
self.pausa = False
self.primo_in_prima = (self.primo_in_prima + 1) % 4
self.primo = self.primo_in_prima
self.ultimo = (self.primo - 1) % 4
self.game_state.turno.val = self.primo
self.game_state.seme_primo.val = Card.NESSUN_SEME
self.dai_carte()
self.game_state.fase_gioco.val = Fase.PASSAGGIO_CARTE
| 2.4375
| 2
|
students/api_view.py
|
mayankgujrathi/LabExamManager
| 0
|
12779120
|
<filename>students/api_view.py
from json import loads
from django.http import HttpRequest, JsonResponse
from django.views.decorators.http import require_http_methods
from .decorators import student_required
from .models import StudentTask, StudentTaskImage
from admins.models import Task
from django.db.utils import IntegrityError
@require_http_methods(["UPDATE", "POST"])
@student_required
def student_profile(request: HttpRequest) -> JsonResponse:
req_type, req_value = '', ''
status, error = 424, 'unable to serve request'
if request.method == 'UPDATE':
body = loads(request.body)
req_type, req_value = body.get('type'), body.get('value')
if req_type == 'first_name':
if req_value.__len__() < 5:
status, error = 400, 'First Name is shorter than 5 letters'
else:
request.user.first_name = req_value
request.user.save()
status, error = 202, ''
return JsonResponse({'status': status, 'error': error})
elif req_type == 'last_name':
if req_value.__len__() < 5:
status, error = 400, 'Last Name is shorter than 5 letters'
else:
request.user.last_name = req_value
request.user.save()
status, error = 202, ''
return JsonResponse({'status': status, 'error': error})
elif req_type == 'address':
print('got',req_value)
if req_value.__len__() < 5:
status, error = 400, 'Addres is shorter than 5 letters'
else:
print('previous', request.user.student_profile.address)
request.user.student_profile.address = req_value
request.user.student_profile.save()
print('after value', request.user.student_profile.address)
status, error = 202, ''
return JsonResponse({'status': status, 'error': error})
elif req_type == 'phone':
if req_value.__len__() != 10:
status, error = 400, 'Invalid Phone Number'
else:
request.user.student_profile.phone = req_value
request.user.student_profile.save()
status, error = 202, ''
return JsonResponse({'status': status, 'error': error})
elif request.FILES.get('image'):
try:
request.user.student_profile.image = request.FILES.get('image')
request.user.student_profile.save()
status, error = 202, ''
except Exception:
status = 400
return JsonResponse({'status': status, 'error': error})
@require_http_methods(["UPDATE"])
@student_required
def update_student_std(request: HttpRequest) -> JsonResponse:
body = loads(request.body)
req_type, req_val = body.get('type'), body.get('value')
status, error = 424, 'unable to serve request'
if req_type == 'year':
try:
if int(req_val) not in (1, 2, 3):
status = 400, 'something went wrong'
else:
request.user.student_profile.current_year = req_val
request.user.student_profile.save()
status, error = 202, ''
except:
status, error = 400, 'something went wrong'
return JsonResponse({'status': status, 'error': error})
elif req_type == 'sem':
try:
if int(req_val) not in (1, 2, 3, 4, 5, 6):
status = 400, 'something went wrong'
else:
request.user.student_profile.current_semester = req_val
request.user.student_profile.save()
status, error = 202, ''
except:
status, error = 400, 'something went wrong'
return JsonResponse({'status': status, 'error': error})
elif req_type == 'shift':
try:
if int(req_val) not in (1, 2):
status = 400, 'something went wrong'
else:
request.user.student_profile.current_shift = req_val
request.user.student_profile.save()
status, error = 202, ''
except:
status, error = 400, 'something went wrong'
return JsonResponse({'status': status, 'error': error})
@require_http_methods(["POST"])
@student_required
def submit_student_task(request: HttpRequest) -> JsonResponse:
status, error = 424, 'unable to serve request'
task = Task.objects.get(id=request.POST.get('task_id')[0])
if not task:
status = 400, "Task Doesn't Exists"
else:
try:
prev = StudentTask.objects.filter(user=request.user, task=task)
if prev.__len__() == 1:
obj = prev[0]
obj.description = request.POST.get('description')
obj.save()
StudentTaskImage.objects.filter(student_task=obj).delete()
for image in request.FILES.values():
student_task_image = StudentTaskImage(image=image, student_task=obj)
student_task_image.save()
status, error = 202, ''
else:
obj = StudentTask(
user=request.user,
task=task,
description=request.POST.get('description'),
)
obj.save()
for image in request.FILES.values():
student_task_image = StudentTaskImage(image=image, student_task=obj)
student_task_image.save()
status, error = 200, ''
except IntegrityError:
status = 400
return JsonResponse({'status': status, 'error': error})
| 2.671875
| 3
|
mysite/spiders/xmlfeed.py
|
easy-test/template-python
| 1
|
12779121
|
<reponame>easy-test/template-python<filename>mysite/spiders/xmlfeed.py
from scrapy.spiders import XMLFeedSpider
class XmlfeedSpider(XMLFeedSpider):
name = 'xmlfeed'
allowed_domains = ['example.com']
start_urls = ['http://example.com/feed.xml']
iterator = 'iternodes' # you can change this; see the docs
itertag = 'item' # change it accordingly
def parse_node(self, response, selector):
item = {}
#item['url'] = selector.select('url').get()
#item['name'] = selector.select('name').get()
#item['description'] = selector.select('description').get()
return item
| 2.671875
| 3
|
plots/lasium_paper/plot_accuracy_based_on_gan.py
|
siavash-khodadadeh/MetaLearning-TF2.0
| 102
|
12779122
|
<gh_stars>100-1000
import cv2
import matplotlib.pyplot as plt
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
import numpy as np
def plot_img(img_name, location, index, zoom=0.1):
plt.scatter(index, accs[epochs.index(index)] + 1, color='#0D7377', linewidths=0.5, marker='v')
plt.plot((index, location[0]), (accs[epochs.index(index)], location[1]), '--', color='#0D7377', alpha=1)
img = plt.imread(f'./gan_images/{img_name}.png')
img = cv2.resize(img, (350, 350))
# img = img[50:-50, 50:-50, :]
ax = plt.gca()
im = OffsetImage(img, zoom=zoom)
ab = AnnotationBbox(im, location, xycoords='data', frameon=True, pad=0.2)
ax.add_artist(ab)
ax.update_datalim(np.column_stack(list(location)))
ax.autoscale()
return ab
def smooth_data(accs, weight):
last = accs[0]
for i in range(1, len(accs)):
accs[i] = last * weight + (1 - weight) * accs[i]
last = accs[i]
return accs
epochs = [0, 10, 20, 30, 40, 50, 100, 150, 200, 300, 400, 500]
accs = [51.95, 67.50, 71.26, 77.34, 77.67, 77.35, 78.14, 79.99, 78.21, 77.94, 80.51, 76.49]
accs = smooth_data(accs, 0.7)
accs_ci = [0.66, 0.71, 0.68, 0.62, 0.63, 0.64, 0.63, 0.60, 0.63, 0.64, 0.60, 0.67]
training_from_scratch = [51.64] * len(accs)
bottom = [acc - ci for acc, ci in zip(accs, accs_ci)]
top = [acc + ci for acc, ci in zip(accs, accs_ci)]
plt.plot(epochs, accs, color='b', label='LASIUM-N')
plt.plot(epochs, bottom, '--', color='#32E0C4', alpha=0.2)
plt.plot(epochs, top, '--', color='#32E0C4', alpha=0.2)
plt.plot(epochs, training_from_scratch, '--', color='r', alpha=0.5, label='baseline')
plt.fill_between(epochs, bottom, top, color='#32E0C4', alpha=.1)
plt.xticks([10, 30, 50, 100, 200, 300, 400, 500])
plt.xlabel('# GAN training epochs', fontsize=14)
plt.yticks([40, 50, 60, 70, 80, 100])
plt.ylabel('Accuracy (%)', fontsize=14)
# plt images
plot_img('00_4', location=(10, 85), index=0)
plot_img('10_4', location=(40, 90), index=10)
plot_img('30_4', location=(70, 85), index=30)
plot_img('50_4', location=(100, 90), index=50)
plot_img('100_4', location=(130, 85), index=100)
plot_img('200_4', location=(190, 90), index=200)
plot_img('300_4', location=(300, 85), index=300)
plot_img('400_4', location=(400, 90), index=400)
plot_img('500_4', location=(500, 85), index=500)
plt.scatter(
0, accs[epochs.index(0)] + 1, color='#0D7377', linewidths=0.5, marker='v', label='Generated image at epoch'
)
plt.subplots_adjust(bottom=0.1, top=0.9, right= 0.98, left=0.1)
plt.legend(loc='best')
# plt.show()
plt.savefig('./outputs/accuracy_based_on_gan.pdf', dpi=300)
| 2.03125
| 2
|
setup.py
|
moonbot/shotgun-cache-server
| 11
|
12779123
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import versioneer
versioneer.VCS = 'git'
versioneer.versionfile_source = 'shotgunCache/_version.py'
versioneer.versionfile_build = 'shotgunCache/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'shotgunCache' # dirname like 'myproject-1.2.0'
readme = open('README.md').read().strip()
license = open('LICENSE').read().strip()
setup(
name='shotgunCache',
version=versioneer.get_version(),
license=license,
cmdclass=versioneer.get_cmdclass(),
description='Shotgun Cache Server',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/moonbot/shotgun-cache-server',
packages=[
'shotgunCache',
],
scripts=[
'bin/shotgunCache'
],
package_dir={'shotgunCache':
'shotgunCache'},
include_package_data=True,
install_requires=[
'rethinkdb>=2.0.0.post1',
'pyyaml>=3.11',
'ruamel.yaml>=0.8',
'pyzmq>=13.1.0',
'shotgun_api3>=3.0.19',
],
zip_safe=False,
keywords='shotgunCache',
)
| 1.546875
| 2
|
data.py
|
bajcmartinez/CarND-Behavioral-Cloning-P3
| 0
|
12779124
|
<reponame>bajcmartinez/CarND-Behavioral-Cloning-P3
import cv2
import pandas as pd
import numpy as np
import sklearn
import matplotlib
matplotlib.use('PS')
import matplotlib.pyplot as plt
class Data:
def __init__(self, batch_size=512):
"""
Initializes the data structure and reads the CSV
:param batch_size:
"""
# self.data_path = '/opt/carnd_p3/data'
self.data_path = './data'
self.batch_size = batch_size
self.augmenting_by = 2
self.train = []
self.valid = []
self.load_normal_distributed_data()
def load_normal_distributed_data(self):
"""
Normal distributes the samples according to the steering angle
:return:
"""
df = pd.read_csv('{0}/driving_log.csv'.format(self.data_path),
names=['center', 'left', 'right', 'steering',
'throttle', 'brake', 'speed'],
dtype={'center': np.str, 'left': np.str,
'right': np.str, 'steering': np.float64,
'throttle': np.float64, 'brake': np.float64,
'speed': np.float64}, header=0)
df['steering'].plot.hist(title='Original steering distribution', bins=100)
plt.savefig("./output/distribution_original.png")
plt.gcf().clear()
# From the plot we see that most of the samples are rects, so let's remove some
zero_indices = df[df['steering'] == 0].index
df = df.drop(np.random.choice(zero_indices, size=int(len(zero_indices) * 0.95), replace=False))
df['steering'].plot.hist(title='Final steering distribution', bins=100)
plt.savefig("./output/distribution_final.png")
plt.gcf().clear()
sample_x = [None]*len(df)*3
sample_y = [None]*len(df)*3
i = 0
for index, row in df.iterrows():
center_steering = float(row['steering'])
correction = 0.2
left_steering = center_steering + correction
right_steering = center_steering - correction
center_path = row['center'].split('/')[-1]
left_path = row['left'].split('/')[-1]
right_path = row['right'].split('/')[-1]
sample_x[i*3] = center_path
sample_y[i*3] = center_steering
sample_x[i*3+1] = left_path
sample_y[i*3+1] = left_steering
sample_x[i*3+2] = right_path
sample_y[i*3+2] = right_steering
i += 1
sample_x, sample_y = sklearn.utils.shuffle(sample_x, sample_y)
samples = np.column_stack((sample_x, np.array(sample_y).astype(object)))
limit = int(len(samples) * 0.8)
self.train = samples[:limit]
self.valid = samples[limit:]
def train_length(self):
"""
Total number of images for training
:return: int
"""
return len(self.train) * self.augmenting_by
def valid_length(self):
"""
Total number of images for validation
:return: int
"""
return len(self.valid) * self.augmenting_by
def generator(self, samples):
"""
Generates samples to feed the network
:param samples:
:return:
"""
num_samples = len(samples)
batch_size = self.batch_size // self.augmenting_by
while True:
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset + batch_size]
gen_x = np.zeros((len(batch_samples)*self.augmenting_by, 160, 320, 3))
gen_y = np.zeros((len(batch_samples)*self.augmenting_by, 1))
for i, batch_sample in enumerate(batch_samples):
img = cv2.imread('{0}/IMG/{1}'.format(self.data_path, batch_sample[0]))
gen_x[i*self.augmenting_by] = img
gen_y[i*self.augmenting_by] = batch_sample[1]
gen_x[i*self.augmenting_by+1] = cv2.flip(img, 1)
gen_y[i*self.augmenting_by+1] = -batch_sample[1]
yield sklearn.utils.shuffle(np.array(gen_x), np.array(gen_y))
def train_generator(self):
return self.generator(self.train)
def valid_generator(self):
return self.generator(self.valid)
if __name__ == '__main__':
data = Data(batch_size=6)
g = data.train_generator()
for x, y in g:
for i, img in enumerate(x):
cv2.imwrite("output/{}.jpg".format(i), img)
print('y:', y)
break
| 2.953125
| 3
|
utils.py
|
byaka/CapybaraMail
| 0
|
12779125
|
<filename>utils.py
# -*- coding: utf-8 -*-
import sys
from importMail import ImportMail_MBox
IS_TTY=sys.stdout.isatty()
consoleColor={
# predefined colors
'fail':'\x1b[91m',
'ok':'\x1b[92m',
'warning':'\x1b[93m',
'okblue':'\x1b[94m',
'header':'\x1b[95m',
# colors
'black':'\x1b[30m',
'red':'\x1b[31m',
'green':'\x1b[32m',
'yellow':'\x1b[33m',
'blue':'\x1b[34m',
'magenta':'\x1b[35m',
'cyan':'\x1b[36m',
'white':'\x1b[37m',
# background colors
'bgblack':'\x1b[40m',
'bgred':'\x1b[41m',
'bggreen':'\x1b[42m',
'bgyellow':'\x1b[43m',
'bgblue':'\x1b[44m',
'bgmagenta':'\x1b[45m',
'bgcyan':'\x1b[46m',
'bgwhite':'\x1b[47m',
# specials
'light':'\x1b[2m',
'bold':'\x1b[1m',
'inverse':'\x1b[7m',
'underline':'\x1b[4m',
'clearLast':'\x1b[F\x1b[K',
'end':'\x1b[0m'
}
if not IS_TTY:
consoleColor={k:'' for k in consoleColor}
#? возможно эти данные есть в `email.MIMEImage`
ATTACHMENT_TYPES={
'audio': ['aiff', 'aac', 'mid', 'midi', 'mp3', 'mp2', '3gp', 'wav'],
'code': ['c', 'cpp', 'c++', 'css', 'cxx', 'h', 'hpp', 'h++', 'html', 'hxx', 'py', 'php', 'pl', 'rb', 'java', 'js', 'xml'],
'crypto': ['asc', 'pgp', 'key'],
'data': ['cfg', 'csv', 'gz', 'json', 'log', 'sql', 'rss', 'tar', 'tgz', 'vcf', 'xls', 'xlsx'],
'document': ['csv', 'doc', 'docx', 'htm', 'html', 'md', 'odt', 'ods', 'odp', 'ps', 'pdf', 'ppt', 'pptx', 'psd', 'txt', 'xls', 'xlsx', 'xml'],
'font': ['eot', 'otf', 'pfa', 'pfb', 'gsf', 'pcf', 'ttf', 'woff'],
'image': ['bmp', 'eps', 'gif', 'ico', 'jpeg', 'jpg', 'png', 'ps', 'psd', 'svg', 'svgz', 'tiff', 'xpm'],
'video': ['avi', 'divx'],
}
ATTACHMENT_TYPES['media']=ATTACHMENT_TYPES['audio']+ATTACHMENT_TYPES['font']+ATTACHMENT_TYPES['image']+ATTACHMENT_TYPES['video']
URI_SCHEMES_PERMANENT=set((
"data", "file", "ftp", "gopher", "http", "https", "imap",
"jabber", "mailto", "news", "telnet", "tftp", "ws", "wss"
))
URI_SCHEMES_PROVISIONAL=set((
"bitcoin", "chrome", "cvs", "feed", "git", "irc", "magnet",
"sftp", "smtp", "ssh", "steam", "svn"
))
URI_SCHEMES = URI_SCHEMES_PERMANENT.union(URI_SCHEMES_PROVISIONAL)
def isInt(v):
return v is not True and v is not False and isinstance(v, int)
# SQUISH_MIME_RULES = (
# # IMPORTANT: Order matters a great deal here! Full mime-types should come
# # first, with the shortest codes preceding the longer ones.
# ('text/plain', 'tp/'),
# ('text/html', 'h/'),
# ('application/zip', 'z/'),
# ('application/json', 'j/'),
# ('application/pdf', 'p/'),
# ('application/rtf', 'r/'),
# ('application/octet-stream', 'o/'),
# ('application/msword', 'ms/d'),
# ('application/vnd.ms-excel', 'ms/x'),
# ('application/vnd.ms-access', 'ms/m'),
# ('application/vnd.ms-powerpoint', 'ms/p'),
# ('application/pgp-keys', 'pgp/k'),
# ('application/pgp-signature', 'pgp/s'),
# ('application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'ms/xx'),
# ('application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'ms/dx'),
# ('application/vnd.openxmlformats-officedocument.presentationml.presentation', 'ms/px'),
# # These are prefixes that apply to many document types
# ('application/vnd.openxmlformats-officedocument.', 'msx/'),
# ('application/vnd.', 'vnd/'),
# ('application/x-', 'x/'),
# ('application/', '/'),
# ('video/', 'v/'),
# ('audio/', 'a/'),
# ('image/', 'i/'),
# ('text/', 't/'))
# def squish_mimetype(mimetype):
# for prefix, rep in SQUISH_MIME_RULES:
# if mimetype.startswith(prefix):
# return rep + mimetype[len(prefix):]
# return mimetype
# def unsquish_mimetype(mimetype):
# for prefix, rep in reversed(SQUISH_MIME_RULES):
# if mimetype.startswith(rep):
# return prefix + mimetype[len(rep):]
# return mimetype
class RepairDialogLinking(object):
problemName='Parent message missed'
def __init__(self, store):
self.__msg_progress=consoleColor['clearLast']+'%i (%i missed) from %i'
self.store=store
def count(self, user):
ids=(self.store.userId(user), 'node_problem', self.store.problemId(self.problemName))
return self.store.db.countBacklinks(ids)
def find_broken_msgs_without_dialogs(self, user):
tArr=[]
g=self.store.db.iterBranch((self.store.userId(user), 'node_date'), strictMode=True, recursive=True, treeMode=True, safeMode=False, calcProperties=False, skipLinkChecking=True)
for ids, (props, l) in g:
if len(ids)<4: continue
if ids[3]!='node_msg': g.send(False) # skip not-msgs nodes
if len(ids)>5: g.send(False) # skip branch inside msgs
if len(ids)==5:
try:
self.store.dialogFind_byMsgIds(ids, strictMode=True, asThread=True)
except Exception:
tArr.append(ids)
return tArr
def run(self, user):
userId=self.store.userId(user)
problemId=self.store.problemId(self.problemName)
ids=(userId, 'node_problem', problemId)
c=self.store.db.countBacklinks(ids)
if not c: return
parser=ImportMail_MBox(None)
i1=i2=0
if IS_TTY: print
for idsCur, (propsCur, lCur) in self.store.db.iterBacklinks(ids, recursive=False, allowContextSwitch=False):
msgIds=idsCur[:-1]
msgId=msgIds[-1]
dateId=idsCur[-4]
idsFrom=self.store.dialogFind_byMsg(userId, msgId, date=dateId, asThread=True)
oldDialog=(userId, 'node_dialog', self.store.dialogId(self.store._idsConv_thread2dialog(idsFrom, onlyDialog=True)))
data=self.store.msgGet(userId, msgId, date=dateId, strictMode=True, onlyPublic=False, resolveAttachments=False, andLabels=False)
raw=self.store._fileGet('raw', data['_raw'])
headers=parser._parseHeaders(parser._prepMsgObj(raw))
replyPoint=self.store._extract_replyPoint(headers)
idsTo=self.store.dialogFind_byMsg(userId, replyPoint, asThread=True)
if idsTo:
self.store.db.move(idsFrom, idsTo+(msgId,), onlyIfExist=True, strictMode=True, fixLinks=True, recursive=True)
self.store.db.remove(oldDialog)
self.store.db.remove(idsCur)
i1+=1
else: i2+=1
print self.__msg_progress%(i1, i2, c)
| 2.265625
| 2
|
make_eval_script.py
|
dptam/neural_wfst
| 0
|
12779126
|
import os
import sys
if __name__ == "__main__":
train_file = sys.argv[1]
dev_file = sys.argv[2]
test_folder = sys.argv[3]
folder = sys.argv[4]
param_file = sys.argv[5]
partition = sys.argv[6]
bash_script = os.path.join(folder, "parallel_eval_model.sh")
with open(bash_script, 'w+') as f:
f.write("#!/usr/bin/env bash \n")
for i in range(10):
test_file = os.path.join(test_folder, ('partition_' + str(i)))
folder_file = os.path.join(folder, ('partition_' + str(i)))
error_file = os.path.join(folder_file, "error")
output_file = os.path.join(folder_file, "output")
command = "sbatch --partition={} --gres=gpu:1 --error={} --output={}--mem=15GB test.sh {} {} {} {} {} \n".format(partition, error_file, output_file, train_file, dev_file, test_file, folder_file, param_file)
f.write(command + '\n')
| 1.929688
| 2
|
python/mlp/centroidal/none.py
|
daeunSong/multicontact-locomotion-planning
| 31
|
12779127
|
from mlp.utils.requirements import Requirements as CentroidalInputsNone
from mlp.utils.requirements import Requirements as CentroidalOutputsNone
def generate_centroidal_none(cfg, cs, cs_initGuess=None, fullBody=None, viewer=None, first_iter = True):
print("Centroidal trajectory not computed !")
| 1.820313
| 2
|
majority_report/views.py
|
jdelasoie/majority-report-vue
| 0
|
12779128
|
<gh_stars>0
from django.contrib.auth.models import User
from django.shortcuts import render
from rest_framework import viewsets
def index(request):
return render(request, 'index.html')
| 1.34375
| 1
|
data/input_pipeline.py
|
TropComplique/tracking-by-colorizing
| 1
|
12779129
|
import tensorflow.compat.v1 as tf
"""
I assume that each file represents a video.
All videos have minimal dimension equal to 256 and fps equal to 6.
Median video length is ~738 frames.
"""
NUM_FRAMES = 4 # must be greater or equal to 2
SIZE = 256 # must be less or equal to 256
class Pipeline:
def __init__(self, filenames, is_training, batch_size):
"""
Arguments:
filenames: a list of strings, paths to tfrecords files.
is_training: a boolean.
batch_size: an integer.
"""
self.is_training = is_training
dataset = tf.data.Dataset.from_tensor_slices(filenames)
dataset = dataset.shuffle(len(filenames)) if is_training else dataset
dataset = dataset.repeat(None if is_training else 1)
def get_subdataset(f):
dataset = tf.data.TFRecordDataset(f)
dataset = dataset.window(NUM_FRAMES, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda x: x.batch(NUM_FRAMES, drop_remainder=True))
dataset = dataset.map(self.parse_and_preprocess)
dataset = dataset.shuffle(1000) if is_training else dataset
return dataset
dataset = dataset.flat_map(get_subdataset)
dataset = dataset.shuffle(20000) if is_training else dataset
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
self.dataset = dataset
def parse_and_preprocess(self, examples):
"""
Arguments:
examples: a string tensor with shape [NUM_FRAMES].
Returns:
a uint8 tensor with shape [NUM_FRAMES, SIZE, SIZE, 2].
"""
features = {
'image': tf.FixedLenFeature([], tf.string),
'labels': tf.FixedLenFeature([], tf.string)
}
images_and_labels = []
for i in range(NUM_FRAMES):
parsed_features = tf.parse_single_example(examples[i], features)
image = tf.image.decode_jpeg(parsed_features['image'], channels=1)
labels = tf.image.decode_png(parsed_features['labels'], channels=1)
images_and_labels.append(tf.concat([image, labels], axis=2))
x = tf.stack(images_and_labels, axis=0)
# it has shape [NUM_FRAMES, h, w, 2]
if not self.is_training:
shape = tf.shape(x)
h, w = shape[1], shape[2]
offset_height = (h - SIZE) // 2
offset_width = (w - SIZE) // 2
x = tf.image.crop_to_bounding_box(x, offset_height, offset_width, SIZE, SIZE)
else:
do_flip = tf.less(tf.random.uniform([]), 0.5)
x = tf.cond(do_flip, lambda: tf.image.flip_left_right(x), lambda: x)
x = tf.image.random_crop(x, [NUM_FRAMES, SIZE, SIZE, 2])
return x
| 2.765625
| 3
|
apps/funcionario/migrations/0011_funcionario_imagem.py
|
diegocostacmp/gestao_rh
| 0
|
12779130
|
<gh_stars>0
# Generated by Django 2.1.1 on 2020-02-24 14:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("funcionario", "0010_funcionario_de_ferias"),
]
operations = [
migrations.AddField(
model_name="funcionario",
name="imagem",
field=models.ImageField(blank=True, upload_to=""),
),
]
| 1.367188
| 1
|
config/plugins/pbh.py
|
sg893052/sonic-utilities
| 0
|
12779131
|
"""
This CLI plugin was auto-generated by using 'sonic-cli-gen' utility, BUT
it was manually modified to meet the PBH HLD requirements.
PBH HLD - https://github.com/Azure/SONiC/pull/773
CLI Auto-generation tool HLD - https://github.com/Azure/SONiC/pull/78
"""
import click
import json
import ipaddress
import re
import utilities_common.cli as clicommon
from show.plugins.pbh import deserialize_pbh_counters
GRE_KEY_RE = r"^(0x){1}[a-fA-F0-9]{1,8}/(0x){1}[a-fA-F0-9]{1,8}$"
ETHER_TYPE_RE = r"^(0x){1}[a-fA-F0-9]{1,4}$"
L4_DST_PORT_RE = ETHER_TYPE_RE
INNER_ETHER_TYPE_RE = ETHER_TYPE_RE
IP_PROTOCOL_RE = r"^(0x){1}[a-fA-F0-9]{1,2}$"
IPV6_NEXT_HEADER_RE = IP_PROTOCOL_RE
HASH_FIELD_VALUE_LIST = [
"INNER_IP_PROTOCOL",
"INNER_L4_DST_PORT",
"INNER_L4_SRC_PORT",
"INNER_DST_IPV4",
"INNER_SRC_IPV4",
"INNER_DST_IPV6",
"INNER_SRC_IPV6"
]
PACKET_ACTION_VALUE_LIST = [
"SET_ECMP_HASH",
"SET_LAG_HASH"
]
FLOW_COUNTER_VALUE_LIST = [
"DISABLED",
"ENABLED"
]
PBH_TABLE_CDB = "PBH_TABLE"
PBH_RULE_CDB = "PBH_RULE"
PBH_HASH_CDB = "PBH_HASH"
PBH_HASH_FIELD_CDB = "PBH_HASH_FIELD"
PBH_TABLE_INTERFACE_LIST = "interface_list"
PBH_TABLE_DESCRIPTION = "description"
PBH_RULE_PRIORITY = "priority"
PBH_RULE_GRE_KEY = "gre_key"
PBH_RULE_ETHER_TYPE = "ether_type"
PBH_RULE_IP_PROTOCOL = "ip_protocol"
PBH_RULE_IPV6_NEXT_HEADER = "ipv6_next_header"
PBH_RULE_L4_DST_PORT = "l4_dst_port"
PBH_RULE_INNER_ETHER_TYPE = "inner_ether_type"
PBH_RULE_HASH = "hash"
PBH_RULE_PACKET_ACTION = "packet_action"
PBH_RULE_FLOW_COUNTER = "flow_counter"
PBH_HASH_HASH_FIELD_LIST = "hash_field_list"
PBH_HASH_FIELD_HASH_FIELD = "hash_field"
PBH_HASH_FIELD_IP_MASK = "ip_mask"
PBH_HASH_FIELD_SEQUENCE_ID = "sequence_id"
PBH_CAPABILITIES_SDB = "PBH_CAPABILITIES"
PBH_TABLE_CAPABILITIES_KEY = "table"
PBH_RULE_CAPABILITIES_KEY = "rule"
PBH_HASH_CAPABILITIES_KEY = "hash"
PBH_HASH_FIELD_CAPABILITIES_KEY = "hash-field"
PBH_ADD = "ADD"
PBH_UPDATE = "UPDATE"
PBH_REMOVE = "REMOVE"
PBH_COUNTERS_LOCATION = "/tmp/.pbh_counters.txt"
#
# DB interface --------------------------------------------------------------------------------------------------------
#
def add_entry(db, table, key, data):
""" Add new entry in table """
cfg = db.get_config()
cfg.setdefault(table, {})
if key in cfg[table]:
raise click.ClickException("{}{}{} already exists in Config DB".format(
table, db.TABLE_NAME_SEPARATOR, db.serialize_key(key)
)
)
cfg[table][key] = data
db.set_entry(table, key, data)
def update_entry(db, cap, table, key, data):
""" Update entry in table and validate configuration.
If field value in data is None, the field is deleted
"""
field_root = "{}{}{}".format(table, db.TABLE_NAME_SEPARATOR, db.serialize_key(key))
cfg = db.get_config()
cfg.setdefault(table, {})
if key not in cfg[table]:
raise click.ClickException("{} doesn't exist in Config DB".format(field_root))
for field, value in data.items():
if field not in cap:
raise click.ClickException(
"{}{}{} doesn't have a configuration capabilities".format(
field_root, db.KEY_SEPARATOR, field
)
)
if value is None: # HDEL
if field in cfg[table][key]:
if PBH_REMOVE in cap[field]:
cfg[table][key].pop(field)
else:
raise click.ClickException(
"Failed to remove {}{}{}: operation is prohibited".format(
field_root, db.KEY_SEPARATOR, field
)
)
else:
raise click.ClickException(
"Failed to remove {}{}{}: field doesn't exist".format(
field_root, db.KEY_SEPARATOR, field
)
)
else: # HSET
if field in cfg[table][key]:
if PBH_UPDATE not in cap[field]:
raise click.ClickException(
"Failed to update {}{}{}: operation is prohibited".format(
field_root, db.KEY_SEPARATOR, field
)
)
else:
if PBH_ADD not in cap[field]:
raise click.ClickException(
"Failed to add {}{}{}: operation is prohibited".format(
field_root, db.KEY_SEPARATOR, field
)
)
cfg[table][key][field] = value
db.set_entry(table, key, cfg[table][key])
def del_entry(db, table, key):
""" Delete entry in table """
cfg = db.get_config()
cfg.setdefault(table, {})
if key not in cfg[table]:
raise click.ClickException("{}{}{} doesn't exist in Config DB".format(
table, db.TABLE_NAME_SEPARATOR, db.serialize_key(key)
)
)
cfg[table].pop(key)
db.set_entry(table, key, None)
def is_exist_in_db(db, table, key):
""" Check if provided hash already exists in Config DB
Args:
db: reference to Config DB
table: table to search in Config DB
key: key to search in Config DB
Returns:
bool: The return value. True for success, False otherwise
"""
if (not table) or (not key):
return False
if not db.get_entry(table, key):
return False
return True
#
# PBH validators ------------------------------------------------------------------------------------------------------
#
def table_name_validator(ctx, db, table_name, is_exist=True):
if is_exist:
if not is_exist_in_db(db, str(PBH_TABLE_CDB), str(table_name)):
raise click.UsageError(
"Invalid value for \"TABLE_NAME\": {} is not a valid PBH table".format(table_name), ctx
)
else:
if is_exist_in_db(db, str(PBH_TABLE_CDB), str(table_name)):
raise click.UsageError(
"Invalid value for \"TABLE_NAME\": {} is a valid PBH table".format(table_name), ctx
)
def rule_name_validator(ctx, db, table_name, rule_name, is_exist=True):
if is_exist:
if not is_exist_in_db(db, str(PBH_RULE_CDB), (str(table_name), str(rule_name))):
raise click.UsageError(
"Invalid value for \"RULE_NAME\": {} is not a valid PBH rule".format(rule_name), ctx
)
else:
if is_exist_in_db(db, str(PBH_RULE_CDB), (str(table_name), str(rule_name))):
raise click.UsageError(
"Invalid value for \"RULE_NAME\": {} is a valid PBH rule".format(rule_name), ctx
)
def hash_name_validator(ctx, db, hash_name, is_exist=True):
if is_exist:
if not is_exist_in_db(db, str(PBH_HASH_CDB), str(hash_name)):
raise click.UsageError(
"Invalid value for \"HASH_NAME\": {} is not a valid PBH hash".format(hash_name), ctx
)
else:
if is_exist_in_db(db, str(PBH_HASH_CDB), str(hash_name)):
raise click.UsageError(
"Invalid value for \"HASH_NAME\": {} is a valid PBH hash".format(hash_name), ctx
)
def hash_field_name_validator(ctx, db, hash_field_name, is_exist=True):
if is_exist:
if not is_exist_in_db(db, str(PBH_HASH_FIELD_CDB), str(hash_field_name)):
raise click.UsageError(
"Invalid value for \"HASH_FIELD_NAME\": {} is not a valid PBH hash field".format(hash_field_name), ctx
)
else:
if is_exist_in_db(db, str(PBH_HASH_FIELD_CDB), str(hash_field_name)):
raise click.UsageError(
"Invalid value for \"HASH_FIELD_NAME\": {} is a valid PBH hash field".format(hash_field_name), ctx
)
def interface_list_validator(ctx, db, interface_list):
for intf in interface_list.split(','):
if not (clicommon.is_valid_port(db, str(intf)) or clicommon.is_valid_portchannel(db, str(intf))):
raise click.UsageError(
"Invalid value for \"--interface-list\": {} is not a valid interface".format(intf), ctx
)
def hash_field_list_validator(ctx, db, hash_field_list):
for hfield in hash_field_list.split(','):
if not is_exist_in_db(db, str(PBH_HASH_FIELD_CDB), str(hfield)):
raise click.UsageError(
"Invalid value for \"--hash-field-list\": {} is not a valid PBH hash field".format(hfield), ctx
)
def hash_validator(ctx, db, hash):
if not is_exist_in_db(db, str(PBH_HASH_CDB), str(hash)):
raise click.UsageError(
"Invalid value for \"--hash\": {} is not a valid PBH hash".format(hash), ctx
)
def re_match(ctx, param, value, regexp):
""" Regexp validation of given PBH rule parameter
Args:
ctx: click context
param: click parameter context
value: value to validate
regexp: regular expression
Return:
str: validated value
"""
if re.match(regexp, str(value)) is None:
raise click.UsageError(
"Invalid value for {}: {} is ill-formed".format(param.get_error_hint(ctx), value), ctx
)
return value
def match_validator(ctx, param, value):
""" Check if PBH rule options are valid
Args:
ctx: click context
param: click parameter context
value: value of parameter
Returns:
str: validated parameter
"""
if value is not None:
if param.name == PBH_RULE_GRE_KEY:
return re_match(ctx, param, value, GRE_KEY_RE)
elif param.name == PBH_RULE_ETHER_TYPE:
return re_match(ctx, param, value, ETHER_TYPE_RE)
elif param.name == PBH_RULE_IP_PROTOCOL:
return re_match(ctx, param, value, IP_PROTOCOL_RE)
elif param.name == PBH_RULE_IPV6_NEXT_HEADER:
return re_match(ctx, param, value, IPV6_NEXT_HEADER_RE)
elif param.name == PBH_RULE_L4_DST_PORT:
return re_match(ctx, param, value, L4_DST_PORT_RE)
elif param.name == PBH_RULE_INNER_ETHER_TYPE:
return re_match(ctx, param, value, INNER_ETHER_TYPE_RE)
def ip_mask_validator(ctx, param, value):
""" Check if PBH hash field IP mask option is valid
Args:
ctx: click context
param: click parameter context
value: value of parameter
Returns:
str: validated parameter
"""
if value is not None:
try:
ip = ipaddress.ip_address(value)
except Exception as err:
raise click.UsageError("Invalid value for {}: {}".format(param.get_error_hint(ctx), err), ctx)
return str(ip)
def hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, ip_mask):
""" Function to validate whether --hash-field value
corresponds to the --ip-mask value
Args:
ctx: click context
hash_field: native hash field value
ip_mask: ip address or None
"""
hf_no_ip = ["INNER_IP_PROTOCOL", "INNER_L4_DST_PORT", "INNER_L4_SRC_PORT"]
if ip_mask is None:
if hash_field not in hf_no_ip:
raise click.UsageError(
"Invalid value for \"--hash-field\": invalid choice: {}. (choose from {} when no \"--ip-mask\" is provided)".format(
hash_field, ", ".join(hf_no_ip)
), ctx
)
return
hf_v4 = ["INNER_DST_IPV4", "INNER_SRC_IPV4"]
hf_v6 = ["INNER_DST_IPV6", "INNER_SRC_IPV6"]
if not ((hash_field in hf_v4) or (hash_field in hf_v6)):
raise click.UsageError(
"Invalid value for \"--hash-field\": invalid choice: {}. (choose from {} when \"--ip-mask\" is provided)".format(
hash_field, ", ".join(hf_v4 + hf_v6)
), ctx
)
ip_ver = ipaddress.ip_address(ip_mask).version
if (hash_field in hf_v4) and (ip_ver != 4):
raise click.UsageError(
"Invalid value for \"--ip-mask\": {} is not compatible with {}".format(
ip_mask, hash_field
), ctx
)
if (hash_field in hf_v6) and (ip_ver != 6):
raise click.UsageError(
"Invalid value for \"--ip-mask\": {} is not compatible with {}".format(
ip_mask, hash_field
), ctx
)
def hash_field_to_ip_mask_validator(ctx, db, hash_field_name, hash_field, ip_mask, is_update=True):
""" Function to validate --hash-field and --ip-mask
correspondence, during add/update flow
Args:
ctx: click context
db: reference to Config DB
hash_field_name: name of the hash-field
hash_field: native hash field value
ip_mask: ip address
is_update: update flow flag
"""
if not is_update:
hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, ip_mask)
return
if (hash_field is None) and (ip_mask is None):
return
if (hash_field is not None) and (ip_mask is not None):
hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, ip_mask)
return
hf_obj = db.get_entry(str(PBH_HASH_FIELD_CDB), str(hash_field_name))
if not hf_obj:
raise click.ClickException(
"Failed to validate \"--hash-field\" and \"--ip-mask\" correspondence: {} is not a valid PBH hash field".format(
hash_field_name
)
)
if hash_field is None:
if PBH_HASH_FIELD_HASH_FIELD not in hf_obj:
raise click.ClickException(
"Failed to validate \"--hash-field\" and \"--ip-mask\" correspondence: {} is not a valid PBH field".format(
PBH_HASH_FIELD_HASH_FIELD
)
)
hash_field_to_ip_mask_correspondence_validator(ctx, hf_obj[PBH_HASH_FIELD_HASH_FIELD], ip_mask)
else:
if PBH_HASH_FIELD_IP_MASK in hf_obj:
hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, hf_obj[PBH_HASH_FIELD_IP_MASK])
else:
hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, ip_mask)
#
# PBH helpers ---------------------------------------------------------------------------------------------------------
#
def serialize_pbh_counters(obj):
""" Helper that performs PBH counters serialization.
in = {
('pbh_table1', 'pbh_rule1'): {'SAI_ACL_COUNTER_ATTR_BYTES': '0', 'SAI_ACL_COUNTER_ATTR_PACKETS': '0'},
...
('pbh_tableN', 'pbh_ruleN'): {'SAI_ACL_COUNTER_ATTR_BYTES': '0', 'SAI_ACL_COUNTER_ATTR_PACKETS': '0'}
}
out = [
{
"key": ["pbh_table1", "<KEY>"],
"value": {"SAI_ACL_COUNTER_ATTR_BYTES": "0", "SAI_ACL_COUNTER_ATTR_PACKETS": "0"}
},
...
{
"key": ["pbh_tableN", "<KEY>"],
"value": {"SAI_ACL_COUNTER_ATTR_BYTES": "0", "SAI_ACL_COUNTER_ATTR_PACKETS": "0"}
}
]
Args:
obj: counters dict.
"""
def remap_keys(obj):
return [{'key': k, 'value': v} for k, v in obj.items()]
try:
with open(PBH_COUNTERS_LOCATION, 'w') as f:
json.dump(remap_keys(obj), f)
except IOError as err:
pass
def update_pbh_counters(table_name, rule_name):
""" Helper that performs PBH counters update """
pbh_counters = deserialize_pbh_counters()
key_to_del = table_name, rule_name
if key_to_del in pbh_counters:
del pbh_counters[key_to_del]
serialize_pbh_counters(pbh_counters)
def pbh_capabilities_query(db, key):
""" Query PBH capabilities """
sdb_id = db.STATE_DB
sdb_sep = db.get_db_separator(sdb_id)
cap_map = db.get_all(sdb_id, "{}{}{}".format(str(PBH_CAPABILITIES_SDB), sdb_sep, str(key)))
if not cap_map:
return None
return cap_map
def pbh_match_count(db, table, key, data):
""" Count PBH rule match fields """
field_map = db.get_entry(table, key)
match_total = 0
match_count = 0
if PBH_RULE_GRE_KEY in field_map:
if PBH_RULE_GRE_KEY in data:
match_count += 1
match_total += 1
if PBH_RULE_ETHER_TYPE in field_map:
if PBH_RULE_ETHER_TYPE in data:
match_count += 1
match_total += 1
if PBH_RULE_IP_PROTOCOL in field_map:
if PBH_RULE_IP_PROTOCOL in data:
match_count += 1
match_total += 1
if PBH_RULE_IPV6_NEXT_HEADER in field_map:
if PBH_RULE_IPV6_NEXT_HEADER in data:
match_count += 1
match_total += 1
if PBH_RULE_L4_DST_PORT in field_map:
if PBH_RULE_L4_DST_PORT in data:
match_count += 1
match_total += 1
if PBH_RULE_INNER_ETHER_TYPE in field_map:
if PBH_RULE_INNER_ETHER_TYPE in data:
match_count += 1
match_total += 1
return match_total, match_count
def exit_with_error(*args, **kwargs):
""" Print a message and abort CLI """
click.secho(*args, **kwargs)
raise click.Abort()
#
# PBH CLI -------------------------------------------------------------------------------------------------------------
#
@click.group(
name='pbh',
cls=clicommon.AliasedGroup
)
def PBH():
""" Configure PBH (Policy based hashing) feature """
pass
#
# PBH hash field ------------------------------------------------------------------------------------------------------
#
@PBH.group(
name="hash-field",
cls=clicommon.AliasedGroup
)
def PBH_HASH_FIELD():
""" Configure PBH hash field """
pass
@PBH_HASH_FIELD.command(name="add")
@click.argument(
"hash-field-name",
nargs=1,
required=True
)
@click.option(
"--hash-field",
help="Configures native hash field for this hash field",
required=True,
type=click.Choice(HASH_FIELD_VALUE_LIST)
)
@click.option(
"--ip-mask",
help="""Configures IPv4/IPv6 address mask for this hash field, required when the value of --hash-field is - INNER_DST_IPV4 or INNER_SRC_IPV4 or INNER_DST_IPV6 or INNER_SRC_IPV6""",
callback=ip_mask_validator
)
@click.option(
"--sequence-id",
help="Configures in which order the fields are hashed and defines which fields should be associative",
required=True,
type=click.INT
)
@clicommon.pass_db
def PBH_HASH_FIELD_add(db, hash_field_name, hash_field, ip_mask, sequence_id):
""" Add object to PBH_HASH_FIELD table """
ctx = click.get_current_context()
hash_field_name_validator(ctx, db.cfgdb_pipe, hash_field_name, False)
hash_field_to_ip_mask_validator(ctx, db.cfgdb_pipe, hash_field_name, hash_field, ip_mask, False)
table = str(PBH_HASH_FIELD_CDB)
key = str(hash_field_name)
data = {}
if hash_field is not None:
data[PBH_HASH_FIELD_HASH_FIELD] = hash_field
if ip_mask is not None:
data[PBH_HASH_FIELD_IP_MASK] = ip_mask
if sequence_id is not None:
data[PBH_HASH_FIELD_SEQUENCE_ID] = sequence_id
if not data:
exit_with_error("Error: Failed to add PBH hash field: options are not provided", fg="red")
try:
add_entry(db.cfgdb_pipe, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_HASH_FIELD.command(name="update")
@click.argument(
"hash-field-name",
nargs=1,
required=True
)
@click.option(
"--hash-field",
help="Configures native hash field for this hash field",
type=click.Choice(HASH_FIELD_VALUE_LIST)
)
@click.option(
"--ip-mask",
help="""Configures IPv4/IPv6 address mask for this hash field, required when the value of --hash-field is - INNER_DST_IPV4 or INNER_SRC_IPV4 or INNER_DST_IPV6 or INNER_SRC_IPV6 """,
callback=ip_mask_validator
)
@click.option(
"--sequence-id",
help="Configures in which order the fields are hashed and defines which fields should be associative",
type=click.INT
)
@clicommon.pass_db
def PBH_HASH_FIELD_update(db, hash_field_name, hash_field, ip_mask, sequence_id):
""" Update object in PBH_HASH_FIELD table """
ctx = click.get_current_context()
hash_field_name_validator(ctx, db.cfgdb_pipe, hash_field_name)
hash_field_to_ip_mask_validator(ctx, db.cfgdb_pipe, hash_field_name, hash_field, ip_mask)
table = str(PBH_HASH_FIELD_CDB)
key = str(hash_field_name)
data = {}
if hash_field is not None:
data[PBH_HASH_FIELD_HASH_FIELD] = hash_field
if ip_mask is not None:
data[PBH_HASH_FIELD_IP_MASK] = ip_mask
if sequence_id is not None:
data[PBH_HASH_FIELD_SEQUENCE_ID] = sequence_id
if not data:
exit_with_error("Error: Failed to update PBH hash field: options are not provided", fg="red")
cap = pbh_capabilities_query(db.db, PBH_HASH_FIELD_CAPABILITIES_KEY)
if cap is None:
exit_with_error("Error: Failed to query PBH hash field capabilities: configuration is not available", fg="red")
try:
update_entry(db.cfgdb_pipe, cap, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_HASH_FIELD.command(name="delete")
@click.argument(
"hash-field-name",
nargs=1,
required=True
)
@clicommon.pass_db
def PBH_HASH_FIELD_delete(db, hash_field_name):
""" Delete object from PBH_HASH_FIELD table """
ctx = click.get_current_context()
hash_field_name_validator(ctx, db.cfgdb_pipe, hash_field_name)
table = str(PBH_HASH_FIELD_CDB)
key = str(hash_field_name)
try:
del_entry(db.cfgdb_pipe, table, key)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
#
# PBH hash ------------------------------------------------------------------------------------------------------------
#
@PBH.group(
name="hash",
cls=clicommon.AliasedGroup
)
def PBH_HASH():
""" Configure PBH hash """
pass
@PBH_HASH.command(name="add")
@click.argument(
"hash-name",
nargs=1,
required=True
)
@click.option(
"--hash-field-list",
help="The list of hash fields to apply with this hash",
required=True
)
@clicommon.pass_db
def PBH_HASH_add(db, hash_name, hash_field_list):
""" Add object to PBH_HASH table """
ctx = click.get_current_context()
hash_name_validator(ctx, db.cfgdb_pipe, hash_name, False)
table = str(PBH_HASH_CDB)
key = str(hash_name)
data = {}
if hash_field_list is not None:
hash_field_list_validator(ctx, db.cfgdb_pipe, hash_field_list)
data[PBH_HASH_HASH_FIELD_LIST] = hash_field_list.split(",")
if not data:
exit_with_error("Error: Failed to add PBH hash: options are not provided", fg="red")
try:
add_entry(db.cfgdb_pipe, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_HASH.command(name="update")
@click.argument(
"hash-name",
nargs=1,
required=True
)
@click.option(
"--hash-field-list",
help="The list of hash fields to apply with this hash"
)
@clicommon.pass_db
def PBH_HASH_update(db, hash_name, hash_field_list):
""" Update object in PBH_HASH table """
ctx = click.get_current_context()
hash_name_validator(ctx, db.cfgdb_pipe, hash_name)
table = str(PBH_HASH_CDB)
key = str(hash_name)
data = {}
if hash_field_list is not None:
hash_field_list_validator(ctx, db.cfgdb_pipe, hash_field_list)
data[PBH_HASH_HASH_FIELD_LIST] = hash_field_list.split(",")
if not data:
exit_with_error("Error: Failed to update PBH hash: options are not provided", fg="red")
cap = pbh_capabilities_query(db.db, PBH_HASH_CAPABILITIES_KEY)
if cap is None:
exit_with_error("Error: Failed to query PBH hash capabilities: configuration is not available", fg="red")
try:
update_entry(db.cfgdb_pipe, cap, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_HASH.command(name="delete")
@click.argument(
"hash-name",
nargs=1,
required=True
)
@clicommon.pass_db
def PBH_HASH_delete(db, hash_name):
""" Delete object from PBH_HASH table """
ctx = click.get_current_context()
hash_name_validator(ctx, db.cfgdb_pipe, hash_name)
table = str(PBH_HASH_CDB)
key = str(hash_name)
try:
del_entry(db.cfgdb_pipe, table, key)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
#
# PBH rule ------------------------------------------------------------------------------------------------------------
#
@PBH.group(
name="rule",
cls=clicommon.AliasedGroup
)
def PBH_RULE():
""" Configure PBH rule """
pass
@PBH_RULE.command(name="add")
@click.argument(
"table-name",
nargs=1,
required=True
)
@click.argument(
"rule-name",
nargs=1,
required=True
)
@click.option(
"--priority",
help="Configures priority for this rule",
required=True,
type=click.INT
)
@click.option(
"--gre-key",
help="Configures packet match for this rule: GRE key (value/mask)",
callback=match_validator
)
@click.option(
"--ether-type",
help="Configures packet match for this rule: EtherType (IANA Ethertypes)",
callback=match_validator
)
@click.option(
"--ip-protocol",
help="Configures packet match for this rule: IP protocol (IANA Protocol Numbers)",
callback=match_validator
)
@click.option(
"--ipv6-next-header",
help="Configures packet match for this rule: IPv6 Next header (IANA Protocol Numbers)",
callback=match_validator
)
@click.option(
"--l4-dst-port",
help="Configures packet match for this rule: L4 destination port",
callback=match_validator
)
@click.option(
"--inner-ether-type",
help="Configures packet match for this rule: inner EtherType (IANA Ethertypes)",
callback=match_validator
)
@click.option(
"--hash",
help="The hash to apply with this rule",
required=True
)
@click.option(
"--packet-action",
help="Configures packet action for this rule",
type=click.Choice(PACKET_ACTION_VALUE_LIST)
)
@click.option(
"--flow-counter",
help="Enables/Disables packet/byte counter for this rule",
type=click.Choice(FLOW_COUNTER_VALUE_LIST)
)
@clicommon.pass_db
def PBH_RULE_add(
db,
table_name,
rule_name,
priority,
gre_key,
ether_type,
ip_protocol,
ipv6_next_header,
l4_dst_port,
inner_ether_type,
hash,
packet_action,
flow_counter
):
""" Add object to PBH_RULE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name, False)
table = str(PBH_RULE_CDB)
key = (str(table_name), str(rule_name))
data = {}
match_count = 0
if priority is not None:
data[PBH_RULE_PRIORITY] = priority
if gre_key is not None:
data[PBH_RULE_GRE_KEY] = gre_key
match_count += 1
if ether_type is not None:
data[PBH_RULE_ETHER_TYPE] = ether_type
match_count += 1
if ip_protocol is not None:
data[PBH_RULE_IP_PROTOCOL] = ip_protocol
match_count += 1
if ipv6_next_header is not None:
data[PBH_RULE_IPV6_NEXT_HEADER] = ipv6_next_header
match_count += 1
if l4_dst_port is not None:
data[PBH_RULE_L4_DST_PORT] = l4_dst_port
match_count += 1
if inner_ether_type is not None:
data[PBH_RULE_INNER_ETHER_TYPE] = inner_ether_type
match_count += 1
if hash is not None:
hash_validator(ctx, db.cfgdb_pipe, hash)
data[PBH_RULE_HASH] = hash
if packet_action is not None:
data[PBH_RULE_PACKET_ACTION] = packet_action
if flow_counter is not None:
data[PBH_RULE_FLOW_COUNTER] = flow_counter
if not data:
exit_with_error("Error: Failed to add PBH rule: options are not provided", fg="red")
if match_count == 0:
exit_with_error("Error: Failed to add PBH rule: match options are not provided", fg="red")
try:
add_entry(db.cfgdb_pipe, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_RULE.group(
name="update",
cls=clicommon.AliasedGroup
)
def PBH_RULE_update():
""" Update object in PBH_RULE table """
pass
@PBH_RULE_update.group(
name="field",
cls=clicommon.AliasedGroup
)
def PBH_RULE_update_field():
""" Update object field in PBH_RULE table """
pass
@PBH_RULE_update_field.command(name="set")
@click.argument(
"table-name",
nargs=1,
required=True
)
@click.argument(
"rule-name",
nargs=1,
required=True
)
@click.option(
"--priority",
help="Configures priority for this rule",
type=click.INT
)
@click.option(
"--gre-key",
help="Configures packet match for this rule: GRE key (value/mask)",
callback=match_validator
)
@click.option(
"--ether-type",
help="Configures packet match for this rule: EtherType (IANA Ethertypes)",
callback=match_validator
)
@click.option(
"--ip-protocol",
help="Configures packet match for this rule: IP protocol (IANA Protocol Numbers)",
callback=match_validator
)
@click.option(
"--ipv6-next-header",
help="Configures packet match for this rule: IPv6 Next header (IANA Protocol Numbers)",
callback=match_validator
)
@click.option(
"--l4-dst-port",
help="Configures packet match for this rule: L4 destination port",
callback=match_validator
)
@click.option(
"--inner-ether-type",
help="Configures packet match for this rule: inner EtherType (IANA Ethertypes)",
callback=match_validator
)
@click.option(
"--hash",
help="The hash to apply with this rule"
)
@click.option(
"--packet-action",
help="Configures packet action for this rule",
type=click.Choice(PACKET_ACTION_VALUE_LIST)
)
@click.option(
"--flow-counter",
help="Enables/Disables packet/byte counter for this rule",
type=click.Choice(FLOW_COUNTER_VALUE_LIST)
)
@clicommon.pass_db
def PBH_RULE_update_field_set(
db,
table_name,
rule_name,
priority,
gre_key,
ether_type,
ip_protocol,
ipv6_next_header,
l4_dst_port,
inner_ether_type,
hash,
packet_action,
flow_counter
):
""" Set object field in PBH_RULE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name)
table = str(PBH_RULE_CDB)
key = (str(table_name), str(rule_name))
data = {}
if priority is not None:
data[PBH_RULE_PRIORITY] = priority
if gre_key is not None:
data[PBH_RULE_GRE_KEY] = gre_key
if ether_type is not None:
data[PBH_RULE_ETHER_TYPE] = ether_type
if ip_protocol is not None:
data[PBH_RULE_IP_PROTOCOL] = ip_protocol
if ipv6_next_header is not None:
data[PBH_RULE_IPV6_NEXT_HEADER] = ipv6_next_header
if l4_dst_port is not None:
data[PBH_RULE_L4_DST_PORT] = l4_dst_port
if inner_ether_type is not None:
data[PBH_RULE_INNER_ETHER_TYPE] = inner_ether_type
if hash is not None:
hash_validator(ctx, db.cfgdb_pipe, hash)
data[PBH_RULE_HASH] = hash
if packet_action is not None:
data[PBH_RULE_PACKET_ACTION] = packet_action
if flow_counter is not None:
data[PBH_RULE_FLOW_COUNTER] = flow_counter
if not data:
exit_with_error("Error: Failed to update PBH rule: options are not provided", fg="red")
cap = pbh_capabilities_query(db.db, PBH_RULE_CAPABILITIES_KEY)
if cap is None:
exit_with_error("Error: Failed to query PBH rule capabilities: configuration is not available", fg="red")
try:
update_entry(db.cfgdb_pipe, cap, table, key, data)
if data.get(PBH_RULE_FLOW_COUNTER, "") == "DISABLED":
update_pbh_counters(table_name, rule_name)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_RULE_update_field.command(name="del")
@click.argument(
"table-name",
nargs=1,
required=True
)
@click.argument(
"rule-name",
nargs=1,
required=True
)
@click.option(
"--priority",
help="Deletes priority for this rule",
is_flag=True
)
@click.option(
"--gre-key",
help="Deletes packet match for this rule: GRE key (value/mask)",
is_flag=True
)
@click.option(
"--ether-type",
help="Deletes packet match for this rule: EtherType (IANA Ethertypes)",
is_flag=True
)
@click.option(
"--ip-protocol",
help="Deletes packet match for this rule: IP protocol (IANA Protocol Numbers)",
is_flag=True
)
@click.option(
"--ipv6-next-header",
help="Deletes packet match for this rule: IPv6 Next header (IANA Protocol Numbers)",
is_flag=True
)
@click.option(
"--l4-dst-port",
help="Deletes packet match for this rule: L4 destination port",
is_flag=True
)
@click.option(
"--inner-ether-type",
help="Deletes packet match for this rule: inner EtherType (IANA Ethertypes)",
is_flag=True
)
@click.option(
"--hash",
help="Deletes hash for this rule",
is_flag=True
)
@click.option(
"--packet-action",
help="Deletes packet action for this rule",
is_flag=True
)
@click.option(
"--flow-counter",
help="Deletes packet/byte counter for this rule",
is_flag=True
)
@clicommon.pass_db
def PBH_RULE_update_field_del(
db,
table_name,
rule_name,
priority,
gre_key,
ether_type,
ip_protocol,
ipv6_next_header,
l4_dst_port,
inner_ether_type,
hash,
packet_action,
flow_counter
):
""" Delete object field from PBH_RULE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name)
table = str(PBH_RULE_CDB)
key = (str(table_name), str(rule_name))
data = {}
if priority:
data[PBH_RULE_PRIORITY] = None
if gre_key:
data[PBH_RULE_GRE_KEY] = None
if ether_type:
data[PBH_RULE_ETHER_TYPE] = None
if ip_protocol:
data[PBH_RULE_IP_PROTOCOL] = None
if ipv6_next_header:
data[PBH_RULE_IPV6_NEXT_HEADER] = None
if l4_dst_port:
data[PBH_RULE_L4_DST_PORT] = None
if inner_ether_type:
data[PBH_RULE_INNER_ETHER_TYPE] = None
if hash:
data[PBH_RULE_HASH] = None
if packet_action:
data[PBH_RULE_PACKET_ACTION] = None
if flow_counter:
data[PBH_RULE_FLOW_COUNTER] = None
if not data:
exit_with_error("Error: Failed to update PBH rule: options are not provided", fg="red")
match_total, match_count = pbh_match_count(db.cfgdb_pipe, table, key, data)
if match_count >= match_total:
exit_with_error("Error: Failed to update PBH rule: match options are required", fg="red")
cap = pbh_capabilities_query(db.db, PBH_RULE_CAPABILITIES_KEY)
if cap is None:
exit_with_error("Error: Failed to query PBH rule capabilities: configuration is not available", fg="red")
try:
update_entry(db.cfgdb_pipe, cap, table, key, data)
if flow_counter:
update_pbh_counters(table_name, rule_name)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_RULE.command(name="delete")
@click.argument(
"table-name",
nargs=1,
required=True
)
@click.argument(
"rule-name",
nargs=1,
required=True
)
@clicommon.pass_db
def PBH_RULE_delete(db, table_name, rule_name):
""" Delete object from PBH_RULE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name)
table = str(PBH_RULE_CDB)
key = (str(table_name), str(rule_name))
try:
del_entry(db.cfgdb_pipe, table, key)
update_pbh_counters(table_name, rule_name)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
#
# PBH table -----------------------------------------------------------------------------------------------------------
#
@PBH.group(
name="table",
cls=clicommon.AliasedGroup
)
def PBH_TABLE():
""" Configure PBH table"""
pass
@PBH_TABLE.command(name="add")
@click.argument(
"table-name",
nargs=1,
required=True
)
@click.option(
"--interface-list",
help="Interfaces to which this table is applied",
required=True
)
@click.option(
"--description",
help="The description of this table",
required=True
)
@clicommon.pass_db
def PBH_TABLE_add(db, table_name, interface_list, description):
""" Add object to PBH_TABLE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name, False)
table = str(PBH_TABLE_CDB)
key = str(table_name)
data = {}
if interface_list is not None:
interface_list_validator(ctx, db.cfgdb_pipe, interface_list)
data[PBH_TABLE_INTERFACE_LIST] = interface_list.split(",")
if description is not None:
data[PBH_TABLE_DESCRIPTION] = description
if not data:
exit_with_error("Error: Failed to add PBH table: options are not provided", fg="red")
try:
add_entry(db.cfgdb_pipe, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_TABLE.command(name="update")
@click.argument(
"table-name",
nargs=1,
required=True
)
@click.option(
"--interface-list",
help="Interfaces to which this table is applied"
)
@click.option(
"--description",
help="The description of this table",
)
@clicommon.pass_db
def PBH_TABLE_update(db, table_name, interface_list, description):
""" Update object in PBH_TABLE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
table = str(PBH_TABLE_CDB)
key = str(table_name)
data = {}
if interface_list is not None:
interface_list_validator(ctx, db.cfgdb_pipe, interface_list)
data[PBH_TABLE_INTERFACE_LIST] = interface_list.split(",")
if description is not None:
data[PBH_TABLE_DESCRIPTION] = description
if not data:
exit_with_error("Error: Failed to update PBH table: options are not provided", fg="red")
cap = pbh_capabilities_query(db.db, PBH_TABLE_CAPABILITIES_KEY)
if cap is None:
exit_with_error("Error: Failed to query PBH table capabilities: configuration is not available", fg="red")
try:
update_entry(db.cfgdb_pipe, cap, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_TABLE.command(name="delete")
@click.argument(
"table-name",
nargs=1,
required=True,
)
@clicommon.pass_db
def PBH_TABLE_delete(db, table_name):
""" Delete object from PBH_TABLE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
table = str(PBH_TABLE_CDB)
key = str(table_name)
try:
del_entry(db.cfgdb_pipe, table, key)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
#
# PBH plugin ----------------------------------------------------------------------------------------------------------
#
def register(cli):
cli_node = PBH
if cli_node.name in cli.commands:
raise Exception("{} already exists in CLI".format(cli_node.name))
cli.add_command(PBH)
| 1.664063
| 2
|
PCA_baseline_pipeline.py
|
GilianPonte/MachineLearning
| 0
|
12779132
|
import sklearn
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn import tree
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
from sklearn.model_selection import KFold
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from sklearn.externals.six import StringIO
import pydot
# In[13]:
df = load_breast_cancer()
df = pd.DataFrame(np.c_[df['data'], df['target']],
columns= np.append(df['feature_names'], ['target']))
for col in df.columns:
print(col)
print(df.head())
total_rows=len(df.axes[0])
print(total_rows)
# Outlier detection and visualization
# In[3]:
histograms = df.hist()
df.hist("target")
# In[2]:
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size = .2)
# In[3]:
#PCA with scikit learn
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
X_train_pca = pca = PCA().fit(X_train)
X_test_pca = pca.transform(X_test)
explained_variance = pca.explained_variance_ratio_
# In[4]:
plot = 1
# plot explained variance
if plot == 1:
plt.figure()
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('Number of Components')
plt.ylabel('Variance (%)') #for each component
plt.title('Breast Cancer data set Explained Variance')
plt.savefig('foo.png')
plt.show()
# In[5]:
print(np.cumsum(pca.explained_variance_ratio_))
# Selecting the amount of principle components
# In[6]:
# 10 features
pca = PCA(n_components=10)
X_train_pca = pca.fit_transform(X_train)
X_test_pca = pca.fit_transform(X_test)
# In[7]:
# baseline linear model
reg = LogisticRegression(random_state=0).fit(X_train, y_train)
prediction = reg.predict(X_test)
score = reg.score(X_test,y_test)
print(score)
reg_pca = LogisticRegression(random_state=0).fit(X_train_pca, y_train)
score_pca = reg_pca.score(X_test_pca,y_test)
print(score_pca)
# In[8]:
LPM = linear_model.LinearRegression()
LPM = LPM.fit(X_train, y_train)
LPM.coef_
predictionLPM = LPM.predict(X_test)
scoreLPM = LPM.score(X_test, y_test)
print(scoreLPM)
LPMpca = linear_model.LinearRegression()
LPMpca = LPMpca.fit(X_train_pca, y_train)
LPMpca.coef_
predictionLPM = LPMpca.predict(X_test_pca)
scoreLPMpca = LPMpca.score(X_test_pca, y_test)
print(scoreLPMpca)
# In[9]:
#baseline decicision tree
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
tree.export_graphviz(clf, out_file='tree.dot')
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph[0].write_pdf("decisiontree.pdf")
predictionBaseline = clf.predict(X_test)
scoreclf = clf.score(X_test, y_test)
#print(classification_report(y_test,predictionBaseline,target_names=['malignant', 'benign']))
print(scoreclf)
#baseline decicision tree
clfPca = tree.DecisionTreeClassifier()
clfPca = clfPca.fit(X_train_pca, y_train)
tree.export_graphviz(clfPca, out_file='treepca.dot')
dot_data = StringIO()
tree.export_graphviz(clfPca, out_file=dot_data)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph[0].write_pdf("decisiontreepca.pdf")
predictionBaselinePca = clfPca.predict(X_test_pca)
scoreclf = clfPca.score(X_test_pca, y_test)
#print(classification_report(y_test,predictionBaselinePca,target_names=['malignant', 'benign']))
print(scoreclf)
# In[18]:
# KNN classifier on original data
knn = KNeighborsClassifier(n_neighbors=5, metric='euclidean')
knn.fit(X_train, y_train)
score = knn.score(X_test,y_test)
print(score)
knn.fit(X_train_pca, y_train)
score_pca = knn.score(X_test_pca,y_test)
print(score_pca)
# In[14]:
# Decision tree with Gridsearch
clf = tree.DecisionTreeClassifier()
#create a dictionary of all values we want to test for n_neighbors
param_grid = {'max_depth': np.arange(1, 50)}
#use gridsearch to test all values for n_neighbors
clf_gscv = GridSearchCV(clf, param_grid, cv=10)
#fit model to data
clf_gscv.fit(X_train_pca, y_train)
#check top performing n_neighbors value
print(clf_gscv.best_params_)
#check mean score for the top performing value of n_neighbors
print(clf_gscv.best_score_)
# In[15]:
#KNN with PCA or without PCA and Gridsearch
knn2 = KNeighborsClassifier()
#create a dictionary of all values we want to test for n_neighbors
param_grid = {'n_neighbors': np.arange(1, 50)}
#use gridsearch to test all values for n_neighbors
knn_gscv = GridSearchCV(knn2, param_grid, cv=5)
#fit model to data
knn_gscv.fit(X_train_pca, y_train)
#check top performing n_neighbors value
print(knn_gscv.best_params_)
#check mean score for the top performing value of n_neighbors
print(knn_gscv.best_score_)
# In[32]:
## Plot results from gridsearches
def plot_cv_results(cv_results, param_x, metric='mean_test_score'):
"""
cv_results - cv_results_ attribute of a GridSearchCV instance (or similar)
param_x - name of grid search parameter to plot on x axis
param_z - name of grid search parameter to plot by line color
"""
cv_results = pd.DataFrame(cv_results)
col_x = 'param_' + param_x
fig, ax = plt.subplots(1, 1, figsize=(11, 8))
sns.pointplot(x= col_x, y=metric, data=cv_results, ci=95, ax = ax)
ax.set_title("CV Grid Search Results")
ax.set_xlabel(param_x)
ax.set_ylabel(metric)
return fig
# In[34]:
# Single function to make plot for each Gridsearch
fig = plot_cv_results(knn_gscv.cv_results_, 'n_neighbors')
# In[59]:
#10 fold cross validation with PCA applied
k_fold = KFold(10)
X_pca = pca.fit_transform(X)
classifiers = []
for k, (train, test) in enumerate(k_fold.split(X_pca, y)):
clfk = tree.DecisionTreeClassifier()
clfk = clfk.fit(X_pca[train], y[train])
predictionBaseline = clfk.predict(X_pca[test])
print ("Classification report for %d fold", k)
print(classification_report(y[test],predictionBaseline,target_names=['malignant', 'benign']))
classifiers.append(clfk)
votes = []
# In[60]:
# Construct ensemble based on majority vote
for classifier in classifiers:
classifier.fit(X_train_pca,y_train)
votes.append(classifier.predict(X_test_pca))
ensembleVotes = np.zeros((len(y_test),1), dtype=int)
predictionEnsemble = np.zeros((len(y_test),1), dtype=int)
for prediction in votes:
for idx in range(0,len(prediction)):
ensembleVotes[idx]+= prediction[idx]
for idx in range(0,len(prediction)):
if ensembleVotes[idx] > 5:
predictionEnsemble[idx] = 1
print("ensemble")
print(classification_report(y_test,predictionEnsemble,target_names=['malignant', 'benign']))
# In[ ]:
## Regularization
# In[15]:
# Ridge regression
param_grid = {'alpha': np.arange(start=0, stop=100, step=10)}
regridge = linear_model.Ridge()
#use gridsearch to test all values for n_neighbors
reg_gscv = GridSearchCV(regridge, param_grid, cv=10, return_train_score = True)
reg_gscv.fit(X_train_pca, y_train)
def plot_cv_results(cv_results, param_x, metric='mean_test_score'):
"""
cv_results - cv_results_ attribute of a GridSearchCV instance (or similar)
param_x - name of grid search parameter to plot on x axis
param_z - name of grid search parameter to plot by line color
"""
cv_results = pd.DataFrame(cv_results)
col_x = 'param_' + param_x
fig, ax = plt.subplots(1, 1, figsize=(11, 8))
sns.pointplot(x= col_x, y=metric, data=cv_results, ci=95, ax = ax)
ax.set_title("CV Grid Search Results")
ax.set_xlabel(param_x)
ax.set_ylabel(metric)
return fig
fig = plot_cv_results(reg_gscv.cv_results_, 'alpha')
# In[19]:
# Logistic regression
logitl2 = linear_model.LogisticRegression(penalty='l2', C = 1.0)
param_grid = {'C': np.arange(.1, .9, step = .1)}
reg_gscv = GridSearchCV(logitl2 , param_grid, cv=10, return_train_score = True)
reg_gscv.fit(X_train, y_train)
def plot_cv_results(cv_results, param_x, metric='mean_test_score'):
"""
cv_results - cv_results_ attribute of a GridSearchCV instance (or similar)
param_x - name of grid search parameter to plot on x axis
param_z - name of grid search parameter to plot by line color
"""
cv_results = pd.DataFrame(cv_results)
col_x = 'param_' + param_x
fig, ax = plt.subplots(1, 1, figsize=(11, 8))
sns.pointplot(x=col_x , y=metric, data=cv_results, ci=95, ax = ax)
ax.set_title("CV Grid Search Results")
ax.set_xlabel(param_x)
ax.set_ylabel(metric)
return fig
fig = plot_cv_results(reg_gscv.cv_results_, 'C')
print (reg_gscv.best_score_, reg_gscv.best_params_)
# In[17]:
## decision tree regularization
parameters = {'max_depth':range(1,40)}
clf = GridSearchCV(tree.DecisionTreeClassifier(), parameters, n_jobs=4)
clf.fit(X_train_pca, y_train)
tree_model = clf.best_estimator_
print (clf.best_score_, clf.best_params_)
| 2.734375
| 3
|
primeInterval.py
|
mayanksahu33/HackerEarth-1
| 0
|
12779133
|
"""
Find and Print All The Prime Numbers Between L and R (Both L and R Inclusive)
"""
def SieveOfEratosthenes(low,up):
if low == 1:
low = 2
prime = [True for i in range(up + 1)]
p = 2
while (p * p <= up):
if (prime[p] == True):
for i in range(p * 2, up + 1, p):
prime[i] = False
p += 1
prime[0]= False
prime[1]= False
res = []
for p in range(low,up +1):
if prime[p]:
res.append(p)
return res
L , R = list(map(int,input().split()))
# print(L,R)
for i in SieveOfEratosthenes(L,R):
print(i,end=' ')
| 3.9375
| 4
|
prada_bayes_opt/visualization.py
|
ntienvu/ICDM2017_FBO
| 4
|
12779134
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 27 23:22:32 2016
@author: Vu
"""
from __future__ import division
import numpy as np
#import mayavi.mlab as mlab
#from scipy.stats import norm
#import matplotlib as plt
from mpl_toolkits.mplot3d import Axes3D
from prada_bayes_opt import PradaBayOptFn
#from prada_bayes_opt import PradaBayOptBatch
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from matplotlib import gridspec
import random
from acquisition_functions import AcquisitionFunction, unique_rows
import os
from pylab import *
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.7),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.0),
(1.0, 0.5, 1.0))}
#my_cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap',cdict,256)
#my_cmap = plt.get_cmap('cubehelix')
my_cmap = plt.get_cmap('Blues')
counter = 0
#class Visualization(object):
#def __init__(self,bo):
#self.plot_gp=0
#self.posterior=0
#self.myBo=bo
def plot_bo(bo):
if bo.dim==1:
plot_bo_1d(bo)
if bo.dim==2:
plot_bo_2d(bo)
def plot_histogram(bo,samples):
if bo.dim==1:
plot_histogram_1d(bo,samples)
if bo.dim==2:
plot_histogram_2d(bo,samples)
def plot_mixturemodel(g,bo,samples):
if bo.dim==1:
plot_mixturemodel_1d(g,bo,samples)
if bo.dim==2:
plot_mixturemodel_2d(g,bo,samples)
def plot_mixturemodel_1d(g,bo,samples):
samples_original=samples*bo.max_min_gap+bo.bounds[:,0]
x_plot = np.linspace(np.min(samples), np.max(samples), len(samples))
x_plot = np.reshape(x_plot,(len(samples),-1))
y_plot = g.score_samples(x_plot)[0]
x_plot_ori = np.linspace(np.min(samples_original), np.max(samples_original), len(samples_original))
x_plot_ori=np.reshape(x_plot_ori,(len(samples_original),-1))
fig=plt.figure(figsize=(8, 3))
plt.plot(x_plot_ori, np.exp(y_plot), color='red')
plt.xlim(bo.bounds[0,0],bo.bounds[0,1])
plt.xlabel("X",fontdict={'size':16})
plt.ylabel("f(X)",fontdict={'size':16})
plt.title("IGMM Approximation",fontsize=16)
def plot_mixturemodel_2d(dpgmm,bo,samples):
samples_original=samples*bo.max_min_gap+bo.bounds[:,0]
dpgmm_means_original=dpgmm.truncated_means_*bo.max_min_gap+bo.bounds[:,0]
#fig=plt.figure(figsize=(12, 5))
fig=plt.figure()
myGmm=fig.add_subplot(1,1,1)
x1 = np.linspace(bo.scalebounds[0,0],bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0],bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
x_plot=np.c_[x1g.flatten(), x2g.flatten()]
y_plot2 = dpgmm.score_samples(x_plot)[0]
y_plot2=np.exp(y_plot2)
#y_label=dpgmm.predict(x_plot)[0]
x1_ori = np.linspace(bo.bounds[0,0],bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0],bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
CS_acq=myGmm.contourf(x1g_ori,x2g_ori,y_plot2.reshape(x1g.shape),cmap=plt.cm.bone,origin='lower')
CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
myGmm.scatter(dpgmm_means_original[:,0],dpgmm_means_original[:,1], marker='*',label=u'Estimated Peaks by IGMM', s=100,color='green')
myGmm.set_title('IGMM Approximation',fontsize=16)
myGmm.set_xlim(bo.bounds[0,0],bo.bounds[0,1])
myGmm.set_ylim(bo.bounds[1,0],bo.bounds[1,1])
myGmm.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_histogram_2d(bo,samples):
# convert samples from 0-1 to original scale
samples_original=samples*bo.max_min_gap+bo.bounds[:,0]
#fig=plt.figure(figsize=(12, 5))
fig=plt.figure()
myhist=fig.add_subplot(1,1,1)
myhist.set_title("Histogram of Samples under Acq Func",fontsize=16)
#xedges = np.linspace(myfunction.bounds['x1'][0], myfunction.bounds['x1'][1], 10)
#yedges = np.linspace(myfunction.bounds['x2'][0], myfunction.bounds['x2'][1], 10)
xedges = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 10)
yedges = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 10)
H, xedges, yedges = np.histogram2d(samples_original[:,0], samples_original[:,1], bins=50)
#data = [go.Histogram2d(x=vu[:,1],y=vu[:,0])]
#plot_url = py.plot(data, filename='2d-histogram')
# H needs to be rotated and flipped
H = np.rot90(H)
H = np.flipud(H)
# Mask zeros
Hmasked = np.ma.masked_where(H==0,H) # Mask pixels with a value of zero
# Plot 2D histogram using pcolor
myhist.pcolormesh(xedges,yedges,Hmasked)
myhist.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
myhist.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
def plot_histogram_1d(bo,samples):
samples_original=samples*bo.max_min_gap+bo.bounds[:,0]
fig=plt.figure(figsize=(8, 3))
fig.suptitle("Histogram",fontsize=16)
myplot=fig.add_subplot(111)
myplot.hist(samples_original,50)
myplot.set_xlim(bo.bounds[0,0],bo.bounds[0,1])
myplot.set_xlabel("Value",fontsize=16)
myplot.set_ylabel("Frequency",fontsize=16)
def plot_bo_1d(bo):
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 1000)
x_original=x*bo.max_min_gap+bo.bounds[:,0]
y_original = func(x_original)
#y = func(x)
#y_original=mu*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
fig=plt.figure(figsize=(8, 5))
fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
axis = plt.subplot(gs[0])
acq = plt.subplot(gs[1])
mu, sigma = bo.posterior(x)
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
mu_original=mu*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
sigma_original=sigma*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
axis.plot(x_original, y_original, linewidth=3, label='Real Function')
axis.plot(bo.X_original.flatten(), bo.Y_original, 'D', markersize=8, label=u'Observations', color='r')
axis.plot(x_original, mu_original, '--', color='k', label='GP mean')
#samples*bo.max_min_gap+bo.bounds[:,0]
temp_xaxis=np.concatenate([x_original, x_original[::-1]])
#temp_xaxis=temp*bo.max_min_gap+bo.bounds[:,0]
#temp_yaxis_original=np.concatenate([mu_original - 1.9600 * sigma_original, (mu_original + 1.9600 * sigma_original)[::-1]])
temp_yaxis=np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]])
temp_yaxis_original=temp_yaxis*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
axis.fill(temp_xaxis, temp_yaxis_original,alpha=.6, fc='c', ec='None', label='95% CI')
axis.set_xlim((np.min(x_original), np.max(x_original)))
#axis.set_ylim((None, None))
axis.set_ylabel('f(x)', fontdict={'size':16})
axis.set_xlabel('x', fontdict={'size':16})
utility = bo.acq_func.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq.plot(x_original, utility, label='Utility Function', color='purple')
acq.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
# check batch BO
try:
nSelectedPoints=np.int(bo.NumPoints[-1])
except:
nSelectedPoints=1
max_point=np.max(utility)
acq.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq.set_xlim((np.min(x_original), np.max(x_original)))
#acq.set_ylim((0, np.max(utility) + 0.5))
acq.set_ylim((np.min(utility), 1.1*np.max(utility)))
acq.set_ylabel('Acq', fontdict={'size':16})
acq.set_xlabel('x', fontdict={'size':16})
axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_bo_2d(bo):
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
fig = plt.figure()
#axis2d = fig.add_subplot(1, 2, 1)
acq2d = fig.add_subplot(1, 1, 1)
#mu, sigma = bo.posterior(X)
# plot the acquisition function
utility = bo.acq_func.acq_kind(X, bo.gp, np.max(bo.Y))
#acq3d.plot_surface(x1g,x1g,utility.reshape(x1g.shape))
CS_acq=acq2d.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
#acq2d.scatter(X_ori[idxBest,0],X_ori[idxBest,1],color='b',s=30,label='Current Peak')
#acq2d.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Observations')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Previous Selection')
acq2d.set_title('Acquisition Function',fontsize=16)
acq2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
#acq2d.legend(loc=1, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq2d.legend(loc='center left',bbox_to_anchor=(1.01, 0.5))
fig.colorbar(CS_acq, ax=acq2d, shrink=0.9)
#acq.set_xlim((np.min(x), np.max(x)))
#acq.set_ylim((np.min(utility), 1.1*np.max(utility)))
#acq.set_ylabel('Acq', fontdict={'size':16})
#acq.set_xlabel('x', fontdict={'size':16})
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_bo_2d_FBO(bo,myfunction):
global counter
counter=counter+1
strFolder="P:\\03.Research\\05.BayesianOptimization\\PradaBayesianOptimization\\plot_Nov_2016"
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
fig = plt.figure(figsize=(10, 3.5))
#axis2d = fig.add_subplot(1, 2, 1)
# plot invasion set
acq_expansion = fig.add_subplot(1, 2, 1)
x1 = np.linspace(bo.b_limit_lower[0], bo.b_limit_upper[0], 100)
x2 = np.linspace(bo.b_limit_lower[1], bo.b_limit_upper[1], 100)
x1g_ori_limit,x2g_ori_limit=np.meshgrid(x1,x2)
X_plot=np.c_[x1g_ori_limit.flatten(), x2g_ori_limit.flatten()]
Y = myfunction.func(X_plot)
Y=-np.log(np.abs(Y))
CS_expansion=acq_expansion.contourf(x1g_ori_limit,x2g_ori_limit,Y.reshape(x1g_ori.shape),cmap=my_cmap,origin='lower')
if len(bo.X_invasion)!=0:
myinvasion_set=acq_expansion.scatter(bo.X_invasion[:,0],bo.X_invasion[:,1],color='m',s=1,label='Invasion Set')
else:
myinvasion_set=[]
myrectangle=patches.Rectangle(bo.bounds_bk[:,0], bo.max_min_gap_bk[0],bo.max_min_gap_bk[1],
alpha=0.3, fill=False, facecolor="#00ffff",linewidth=3)
acq_expansion.add_patch(myrectangle)
acq_expansion.set_xlim(bo.b_limit_lower[0]-0.2, bo.b_limit_upper[0]+0.2)
acq_expansion.set_ylim(bo.b_limit_lower[1]-0.2, bo.b_limit_upper[1]+0.2)
if len(bo.X_invasion)!=0:
acq_expansion.legend([myrectangle,myinvasion_set],[ur'$X_{t-1}$',ur'$I_t$'],loc=4,ncol=1,prop={'size':16},scatterpoints = 5)
strTitle_Inv="[t={:d}] Invasion Set".format(counter)
acq_expansion.set_title(strTitle_Inv,fontsize=16)
else:
acq_expansion.legend([myrectangle,myinvasion_set],[ur'$X_{t-1}$',ur'Empty $I_t$'],loc=4,ncol=1,prop={'size':16},scatterpoints = 5)
strTitle_Inv="[t={:d}] Empty Invasion Set".format(counter)
acq_expansion.set_title(strTitle_Inv,fontsize=16)
# plot acquisition function
acq2d = fig.add_subplot(1, 2, 2)
utility = bo.acq_func.acq_kind(X, bo.gp, np.max(bo.Y))
#acq3d.plot_surface(x1g,x1g,utility.reshape(x1g.shape))
CS_acq=acq2d.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
myrectangle=patches.Rectangle(bo.bounds[:,0], bo.max_min_gap[0],bo.max_min_gap[1],
alpha=0.3, fill=False, facecolor="#00ffff",linewidth=3)
acq2d.add_patch(myrectangle)
#acq2d.scatter(X_ori[idxBest,0],X_ori[idxBest,1],color='b',s=30,label='Current Peak')
myobs=acq2d.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',s=6,label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
#acq2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
acq2d.set_xlim(bo.b_limit_lower[0]-0.2, bo.b_limit_upper[0]+0.2)
acq2d.set_ylim(bo.b_limit_lower[1]-0.2, bo.b_limit_upper[1]+0.2)
#acq2d.legend(loc=1, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq2d.legend(loc='center left',bbox_to_anchor=(1.2, 0.5))
#acq2d.legend(loc=4)
acq2d.legend([myrectangle,myobs],[ur'$X_{t}$','Data'],loc=4,ncol=1,prop={'size':16}, scatterpoints = 3)
strTitle_Acq="[t={:d}] Acquisition Func".format(counter)
acq2d.set_title(strTitle_Acq,fontsize=16)
fig.colorbar(CS_expansion, ax=acq_expansion, shrink=0.9)
fig.colorbar(CS_acq, ax=acq2d, shrink=0.9)
#acq.set_xlim((np.min(x), np.max(x)))
#acq.set_ylim((np.min(utility), 1.1*np.max(utility)))
#acq.set_ylabel('Acq', fontdict={'size':16})
#acq.set_xlabel('x', fontdict={'size':16})
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
strFileName="{:d}_bubo.eps".format(counter)
strPath=os.path.join(strFolder,strFileName)
#print strPath
#fig.savefig(strPath, bbox_inches='tight')
def plot_bo_2d_withGPmeans(bo):
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
#fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
fig = plt.figure(figsize=(12, 5))
#axis3d = fig.add_subplot(1, 2, 1, projection='3d')
axis2d = fig.add_subplot(1, 2, 1)
#acq3d = fig.add_subplot(2, 2, 3, projection='3d')
acq2d = fig.add_subplot(1, 2, 2)
mu, sigma = bo.posterior(X)
#axis.plot(x, y, linewidth=3, label='Target')
#axis3d.plot_surface(x1g,x1g,mu.reshape(x1g.shape))
#axis3d.scatter(bo.X[:,0],bo.X[:,1], bo.Y,zdir='z', label=u'Observations', color='r')
CS=axis2d.contourf(x1g_ori,x2g_ori,mu.reshape(x1g.shape),cmap=plt.cm.bone,origin='lower')
CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis2d.set_title('Gaussian Process Mean',fontsize=16)
axis2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS, ax=axis2d, shrink=0.9)
#plt.colorbar(ax=axis2d)
#axis.plot(x, mu, '--', color='k', label='Prediction')
#axis.set_xlim((np.min(x), np.max(x)))
#axis.set_ylim((None, None))
#axis.set_ylabel('f(x)', fontdict={'size':16})
#axis.set_xlabel('x', fontdict={'size':16})
# plot the acquisition function
utility = bo.acq_func.acq_kind(X, bo.gp, np.max(bo.Y))
#acq3d.plot_surface(x1g,x1g,utility.reshape(x1g.shape))
#CS_acq=acq2d.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=plt.cm.bone,origin='lower')
CS_acq=acq2d.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq2d.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g')
acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=60)
acq2d.scatter(X_ori[idxBest,0],X_ori[idxBest,1],color='b',s=60)
acq2d.set_title('Acquisition Function',fontsize=16)
acq2d.set_xlim(bo.bounds[0,0]-0.2, bo.bounds[0,1]+0.2)
acq2d.set_ylim(bo.bounds[1,0]-0.2, bo.bounds[1,1]+0.2)
#acq.set_xlim((np.min(x), np.max(x)))
#acq.set_ylim((np.min(utility), 1.1*np.max(utility)))
#acq.set_ylabel('Acq', fontdict={'size':16})
#acq.set_xlabel('x', fontdict={'size':16})
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
fig.colorbar(CS_acq, ax=acq2d, shrink=0.9)
def plot_bo_2d_withGPmeans_Sigma(bo):
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
#fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
fig = plt.figure(figsize=(12, 3))
#axis3d = fig.add_subplot(1, 2, 1, projection='3d')
axis2d = fig.add_subplot(1, 2, 1)
#acq3d = fig.add_subplot(2, 2, 3, projection='3d')
acq2d = fig.add_subplot(1, 2, 2)
mu, sigma = bo.posterior(X)
#axis.plot(x, y, linewidth=3, label='Target')
#axis3d.plot_surface(x1g,x1g,mu.reshape(x1g.shape))
#axis3d.scatter(bo.X[:,0],bo.X[:,1], bo.Y,zdir='z', label=u'Observations', color='r')
utility = bo.acq_func.acq_kind(X, bo.gp, np.max(bo.Y))
CS=axis2d.contourf(x1g_ori,x2g_ori,mu.reshape(x1g.shape),cmap=plt.cm.bone,origin='lower')
CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis2d.set_title('Gaussian Process Mean',fontsize=16)
axis2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS, ax=axis2d, shrink=0.9)
#CS_acq=acq2d.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=plt.cm.bone,origin='lower')
CS_acq=acq2d.contourf(x1g_ori,x2g_ori,sigma.reshape(x1g.shape),cmap=my_cmap,origin='lower')
CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq2d.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g')
acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=60)
acq2d.scatter(X_ori[idxBest,0],X_ori[idxBest,1],color='b',s=60)
acq2d.set_title('Gaussian Process Variance',fontsize=16)
#acq2d.set_xlim(bo.bounds[0,0]-0.2, bo.bounds[0,1]+0.2)
#acq2d.set_ylim(bo.bounds[1,0]-0.2, bo.bounds[1,1]+0.2)
fig.colorbar(CS_acq, ax=acq2d, shrink=0.9)
def plot_gp_batch(self,x,y):
bo=self.myBo
n_batch=bo.NumPoints
fig=plt.figure(figsize=(16, 10))
fig.suptitle('Gaussian Process and Utility Function After {} Steps'.format(len(bo.X)), fontdict={'size':30})
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
axis = plt.subplot(gs[0])
acq = plt.subplot(gs[1])
mu, sigma = posterior(bo)
axis.plot(x, y, linewidth=3, label='Target')
axis.plot(bo.X.flatten(), bo.Y, 'D', markersize=8, label=u'Observations', color='r')
axis.plot(x, mu, '--', color='k', label='GP mean')
axis.fill(np.concatenate([x, x[::-1]]),
np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]]),
alpha=.6, fc='c', ec='None', label='95% confidence interval')
axis.set_xlim((-2, 10))
axis.set_ylim((None, None))
axis.set_ylabel('f(x)', fontdict={'size':20})
axis.set_xlabel('x', fontdict={'size':20})
utility = bo.acq_func.acq_kind(x.reshape((-1, 1)), bo.gp, 0)
acq.plot(x, utility, label='Utility Function', color='purple')
#selected_x=x[np.argmax(utility)]
#selected_y=np.max(utility)
selected_x=bo.X[-1-n_batch:]
selected_y=utility(selected_x)
acq.plot(selected_x, selected_y,'*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
acq.set_xlim((-2, 10))
acq.set_ylim((0, np.max(utility) + 0.5))
acq.set_ylabel('Utility', fontdict={'size':20})
acq.set_xlabel('x', fontdict={'size':20})
axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_original_function(myfunction):
origin = 'lower'
func=myfunction.func
if myfunction.input_dim==1:
x = np.linspace(myfunction.bounds['x'][0], myfunction.bounds['x'][1], 1000)
y = func(x)
fig=plt.figure(figsize=(8, 5))
plt.plot(x, y)
strTitle="{:s}".format(myfunction.name)
plt.title(strTitle)
if myfunction.input_dim==2:
# Create an array with parameters bounds
if isinstance(myfunction.bounds,dict):
# Get the name of the parameters
bounds = []
for key in myfunction.bounds.keys():
bounds.append(myfunction.bounds[key])
bounds = np.asarray(bounds)
else:
bounds=np.asarray(myfunction.bounds)
x1 = np.linspace(bounds[0][0], bounds[0][1], 100)
x2 = np.linspace(bounds[1][0], bounds[1][1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X_plot=np.c_[x1g.flatten(), x2g.flatten()]
Y = func(X_plot)
#fig=plt.figure(figsize=(8, 5))
#fig = plt.figure(figsize=(12, 3.5))
fig = plt.figure(figsize=(6, 3.5))
ax3d = fig.add_subplot(1, 1, 1, projection='3d')
#ax2d = fig.add_subplot(1, 2, 2)
ax3d.plot_surface(x1g,x2g,Y.reshape(x1g.shape),cmap=my_cmap)
alpha = 30 # degrees
#mlab.view(azimuth=0, elevation=90, roll=-90+alpha)
strTitle="{:s}".format(myfunction.name)
#print strTitle
ax3d.set_title(strTitle)
#plt.plot(x, y)
#CS=ax2d.contourf(x1g,x2g,Y.reshape(x1g.shape),cmap=my_cmap,origin=origin)
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin=origin,hold='on')
#plt.colorbar(CS2, ax=ax2d, shrink=0.9)
strFolder="P:\\03.Research\\05.BayesianOptimization\\PradaBayesianOptimization\\plot_August_2016\\ei_eli"
strFileName="{:s}.eps".format(myfunction.name)
strPath=os.path.join(strFolder,strFileName)
#fig.savefig(strPath, bbox_inches='tight')
def plot_bo_multiple_gp_1d(bo):
func=bo.f
x = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 1000)
x_original=x*bo.max_min_gap+bo.bounds[:,0]
y_original = func(x_original)
fig=plt.figure(figsize=(10, 5))
fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
gs = gridspec.GridSpec(3, 1, height_ratios=[3,1,1])
axis = plt.subplot(gs[0])
acq = plt.subplot(gs[1])
acq_integrated=plt.subplot(gs[2])
mu, sigma = bo.posterior(x)
#mu_original=mu*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
nGP=len(mu)
axis.plot(x_original, y_original, linewidth=3, label='Real Function')
axis.plot(bo.X_original.flatten(), bo.Y_original, 'D', markersize=8, label=u'Observations', color='r')
for idx in range(nGP):
mu_original=mu[idx]*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
axis.plot(x_original,mu_original,'--',color = "#%06x" % random.randint(0, 0xFFFFFF),label='GP Theta={:.2f}'.format(bo.theta[idx]),linewidth=2)
temp_xaxis=np.concatenate([x_original, x_original[::-1]])
temp_yaxis=np.concatenate([mu[idx] - 1.9600 * sigma[idx], (mu[idx] + 1.9600 * sigma[idx])[::-1]])
temp_yaxis_original=temp_yaxis*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
axis.fill(temp_xaxis, temp_yaxis_original,alpha=.6, fc='c', ec='None', label='95% CI')
#axis.set_xlim((np.min(x), np.max(x)))
axis.set_ylim((np.min(y_original)*2, np.max(y_original)*2))
axis.set_ylabel('f(x)', fontdict={'size':16})
axis.set_xlabel('x', fontdict={'size':16})
## estimate the utility
utility = bo.acq_func.acq_kind(x.reshape((-1, 1)), bo.gp, bo.Y.max())
for idx in range(nGP):
acq.plot(x_original, utility[idx], label='Acq Func GP {:.2f}'.format(bo.theta[idx]),
color="#%06x" % random.randint(0, 0xFFFFFF),linewidth=2)
acq.plot(x_original[np.argmax(utility[idx])], np.max(utility[idx]), '*', markersize=15,
label=u'Next Guess GP {:.2f}'.format(bo.theta[idx]), markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
acq.set_xlim((np.min(x_original), np.max(x_original)))
#acq.set_ylim((0, np.max(utility[0]) + 0.5))
acq.set_ylabel('Acq', fontdict={'size':16})
acq.set_xlabel('x', fontdict={'size':16})
axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
## estimate the integrated acquisition function
util_integrated = bo.acq_func.utility_plot(x.reshape((-1, 1)), bo.gp, bo.Y.max())
acq_integrated.plot(x, util_integrated, label='Acq Int-Func GP',
color="#%06x" % random.randint(0, 0xFFFFFF),linewidth=2)
acq_integrated.plot(x[np.argmax(util_integrated)], np.max(util_integrated), '*', markersize=15,
label=u'Next Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
acq_integrated.set_xlim((np.min(x), np.max(x)))
acq_integrated.set_ylim((0, np.max(util_integrated) + 0.1))
acq_integrated.set_ylabel('Int-Acq', fontdict={'size':16})
acq_integrated.set_xlabel('x', fontdict={'size':16})
axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
acq_integrated.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#===========================================
def plot_gp_batch(bo,x,y):
n_batch=bo.NumPoints[-1]
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
fig = plt.figure()
#axis2d = fig.add_subplot(1, 2, 1)
acq2d = fig.add_subplot(1, 1, 1)
#mu, sigma = bo.posterior(X)
# plot the acquisition function
utility = bo.acq_func.acq_kind(X, bo.gp, np.max(bo.Y))
#acq3d.plot_surface(x1g,x1g,utility.reshape(x1g.shape))
CS_acq=acq2d.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
#acq2d.scatter(X_ori[idxBest,0],X_ori[idxBest,1],color='b',s=30,label='Current Peak')
#acq2d.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Observations')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
acq2d.scatter(bo.X_original[:,0],bo.X_original[:,1], marker='*',label=u'Estimated Peaks by IGMM', s=100,color='green')
acq2d.set_title('Acquisition Function',fontsize=16)
acq2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
#acq2d.legend(loc=1, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq2d.legend(loc='center left',bbox_to_anchor=(1.01, 0.5))
fig.colorbar(CS_acq, ax=acq2d, shrink=0.9)
def plot_gp_sequential_batch(bo,x_seq,x_batch):
global counter
counter=counter+1
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
fig=plt.figure(figsize=(10, 3))
#axis2d = fig.add_subplot(1, 2, 1)
acq2d_seq = fig.add_subplot(1, 2, 1)
acq2d_batch = fig.add_subplot(1, 2, 2)
#mu, sigma = bo.posterior(X)
# plot the acquisition function
utility = bo.acq_func.acq_kind(X, bo.gp, np.max(bo.Y))
#acq3d.plot_surface(x1g,x1g,utility.reshape(x1g.shape))
CS_acq=acq2d_seq.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
acq2d_seq.scatter(x_seq[0],x_seq[1], marker='*',label=u'Estimated Peaks by IGMM', s=100,color='green')
acq2d_seq.set_title('Sequential Bayesian Optimization',fontsize=16)
acq2d_seq.set_xlim(bo.bounds[0,0]-0.2, bo.bounds[0,1]+0.2)
acq2d_seq.set_ylim(bo.bounds[1,0]-0.2, bo.bounds[1,1]+0.2)
#acq2d.legend(loc=1, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq2d.legend(loc='center left',bbox_to_anchor=(1.01, 0.5))
fig.colorbar(CS_acq, ax=acq2d_seq, shrink=0.9)
CS_acq_batch=acq2d_batch.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq_batch = plt.contour(CS_acq_batch, levels=CS_acq_batch.levels[::2],colors='r',origin='lower',hold='on')
acq2d_batch.scatter(x_batch[:,0],x_batch[:,1], marker='*',label=u'Estimated Peaks by IGMM', s=100,color='green')
acq2d_batch.set_title('Batch Bayesian Optimization',fontsize=16)
acq2d_batch.set_xlim(bo.bounds[0,0]-0.2, bo.bounds[0,1]+0.2)
acq2d_batch.set_ylim(bo.bounds[1,0]-0.2, bo.bounds[1,1]+0.2)
fig.colorbar(CS_acq_batch, ax=acq2d_batch, shrink=0.9)
strFolder="V:\\plot_Nov_2016\\sequential_batch"
strFileName="{:d}.eps".format(counter)
strPath=os.path.join(strFolder,strFileName)
fig.savefig(strPath, bbox_inches='tight')
| 1.976563
| 2
|
PowerManagementToggle.py
|
favhwdg/PowerUsageSetBotNvidia
| 1
|
12779135
|
import os
from pyautogui import *
import pyautogui
import time
import keyboard
import random
import win32api, win32con
#This program was written in a few hours, its purpose is to set the computer's power usage.
# Disclaimer: This is an awful way to do it, even the cmds, a better way would be NViAPI but I dont want to use it
## 1200 150 , 1220 300 # To open control panel
## 776 419, 240 240 240 To scroll down
## 750 419 To click the menu
## 556 431 , 120 185 4 To Check if menu is opened
## Optimal : X same, 430
## Adaptive(Balanced): , 450
## Performance: , 470
def PowerSetting(x):
OpenCtrlPanel()
FindPowerSettings(x)
def ClickOnPosition(x,y):
win32api.SetCursorPos((x,y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
time.sleep(0.001)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
def OpenCtrlPanel():
ChangeinY = 140
ChangeinX = 20
win32api.SetCursorPos((1200,150))
x, y = win32api.GetCursorPos()
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN,0,0)
time.sleep(0.01)
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP,0,0)
time.sleep(0.011)
win32api.SetCursorPos((x+ChangeinX,y+ChangeinY))
time.sleep(0.011)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
time.sleep(0.011)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
Found = False
Found2 = False
Found3 = False
def FindPowerSettings(x):
try:
Found = False
while(Found == False):
if pyautogui.pixel(776,419)[0] == 240 and pyautogui.pixel(776,419)[1] == 240 and pyautogui.pixel(776,419)[2] == 240:
Found = True
ClickOnPosition(776,419)
time.sleep(0.1)
ClickOnPosition(750,419)
try:
Found2 = False
while(Found2 == False):
if pyautogui.pixel(556,431)[0] == 120 and pyautogui.pixel(556,431)[1] == 185 and pyautogui.pixel(556,431)[2] == 4:
Found2 = True
if(x == 1): ClickOnPosition(750, 430)
elif (x == 2): ClickOnPosition(750, 450)
elif (x==3): ClickOnPosition(750, 470)
time.sleep(1)
ClickOnPosition(1919, 0)
time.sleep(1)
keyboard.press_and_release("enter")
else: time.sleep(0.01)
except:
pass
else: time.sleep(0.01)
except:
pass
On = True
while (On == True):
e = input("Type s for Saving, b for Balanced, p for Performance, N to cancel: ")
if(e == 's' or e == 'S'):
os.startfile('PowerSaver.cmd')
PowerSetting(1)
elif (e== 'b' or e == 'B'):
os.startfile('Balanced.cmd')
PowerSetting(2)
elif (e=='p' or e == 'P'):
os.startfile('Performance.cmd')
PowerSetting(3)
elif (e=='n' or e == 'N'): On = False
| 3.015625
| 3
|
WebMirror/management/rss_parser_funcs/feed_parse_extractKitchennovelCom.py
|
fake-name/ReadableWebProxy
| 193
|
12779136
|
def extractKitchennovelCom(item):
'''
Parser for 'kitchennovel.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Strange World Alchemist Chef', 'Strange World Alchemist Chef', 'translated'),
('Imperial Chef Rookie', 'Imperial Chef Rookie', 'translated'),
('Daddy Fantasy World', 'Daddy Fantasy World', 'translated'),
('Here Comes the Lady Chef', 'Here Comes the Lady Chef', 'translated'),
('Different World Okonomiyaki Chain Store', 'Different World Okonomiyaki Chain Store', 'translated'),
('Strange World Little Cooking Saint', 'Strange World Little Cooking Saint', 'translated'),
('Fine Food Broadcastor', 'Fine Food Broadcaster', 'translated'),
('Kitchen Xiuzhen', 'Kitchen Xiuzhen', 'translated'),
('Reborn - Super Chef', 'Reborn - Super Chef', 'translated'),
('The Taming of the Black Bellied Scholar', 'The Taming of the Black Bellied Scholar', 'translated'),
('The Feast', 'The Feast', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| 2.34375
| 2
|
QuickDemos/TabsVSpaces.py
|
dannystiv/CMPT-120L-201-22S
| 1
|
12779137
|
if true:
if False:
if True:
print("Wooo")
| 2.359375
| 2
|
stonklib.py
|
jebjoya/jebstonks
| 0
|
12779138
|
<gh_stars>0
import os
import requests
import datetime
from datetime import date
import pandas as pd
from requests.api import options
import yfinance as yf
import numpy as np
def skipDate(d, weekends):
if weekends and d.weekday() in [5,6]:
return True
if d in [date(2021,1,1),date(2021,1,18),date(2021,2,15),date(2021,4,2),date(2021,5,31),date(2021,7,5)]:
return True
return False
def advanceDays(d, days, weekends=False):
delta = datetime.timedelta(1)
for x in range(days):
d += delta
while skipDate(d, weekends):
d += delta
return d
def getFTDTheoryDates(optionDate):
r = []
next = advanceDays(optionDate, 1, True)
next = advanceDays(next, 35, False)
r.append(next)
next = advanceDays(next, 21, True)
r.append(next)
next = advanceDays(next, 21, True)
r.append(next)
next = advanceDays(next, 21, True)
r.append(next)
return r
def download(url: str, dest_folder: str):
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
filename = url.split('/')[-1].replace(" ", "_")
file_path = os.path.join(dest_folder, filename)
r = requests.get(url, stream=True)
if r.ok:
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024 * 8):
if chunk:
f.write(chunk)
f.flush()
os.fsync(f.fileno())
else:
return True
def getOrDownloadFinra(d):
datestring = d.strftime("%Y%m%d")
try:
k = open('dataSet/CNMSshvol' + datestring + ".txt",'r')
k.close()
return 'dataSet/CNMSshvol' + datestring + ".txt"
except IOError:
if not download("http://regsho.finra.org/CNMSshvol" + datestring + ".txt", dest_folder="dataSet"):
return 'dataSet/CNMSshvol' + datestring + ".txt"
else:
return False
def getOrDownloadNYSEShort(d,exchange):
datestring = d.strftime("%Y%m%d")
yearstring = d.strftime("%Y")
yearmonthstring = d.strftime("%Y%m")
try:
k = open(exchange + 'DataSet/'+ exchange + 'shvol' + datestring + ".txt",'r')
k.close()
return exchange + 'DataSet/'+ exchange + 'shvol' + datestring + ".txt"
except IOError:
if not download("https://ftp.nyse.com/ShortData/" + exchange + "shvol/" + exchange + "shvol" + yearstring + "/" + exchange + "shvol" + yearmonthstring + "/" + exchange + "shvol" + datestring + ".txt", dest_folder=exchange + "DataSet"):
return exchange + 'DataSet/'+ exchange + 'shvol' + datestring + ".txt"
else:
print("Unable to get data for " + datestring)
print("https://ftp.nyse.com/ShortData/" + exchange + "shvol/" + exchange + "shvol" + yearstring + "/" + exchange + "shvol" + yearmonthstring + "/" + exchange + "shvol" + datestring + ".txt")
return False
def returnFinraShortData(fromDate, toDate=datetime.date.today()):
now = fromDate
while now < toDate:
fileLocationString = getOrDownloadFinra(now)
if fileLocationString:
tdf = pd.read_csv(fileLocationString, delimiter = "|")
tdf = tdf[tdf["Date"] > 100000]
try:
df = pd.concat([df, tdf])
except NameError:
df = tdf
now += datetime.timedelta(days=1)
return df
def returnNYSEShortData(exchange, fromDate, toDate=datetime.date.today()):
now = fromDate
while now < toDate:
fileLocationString = getOrDownloadNYSEShort(now,exchange)
if fileLocationString:
tdf = pd.read_csv(fileLocationString, delimiter = "|")
tdf = tdf[tdf["Date"] > 100000]
try:
df = pd.concat([df, tdf])
except NameError:
df = tdf
now += datetime.timedelta(days=1)
return df
def getMinGain(tickerString, date):
ticker = yf.Ticker(tickerString)
dateString = datetime.date.strftime(date, format="%Y-%m-%d")
calls = ticker.option_chain(dateString)[0]
puts = ticker.option_chain(dateString)[1]
minGainValue = np.infty
for price in np.arange(calls['strike'].min(), calls['strike'].max(),0.5):
relevantCalls = calls[calls['strike'] < price]
relevantPuts = puts[puts['strike'] > price]
callValue = (relevantCalls['openInterest'] * 100 * (price - relevantCalls['strike'])).sum()
putValue = (relevantPuts['openInterest'] * 100 * (relevantPuts['strike'] - price)).sum()
totalValue = callValue + putValue
if totalValue < minGainValue:
minGainValue = totalValue
minGain = price
return round(minGain,2)
def calculateOBV(volume, close):
lastClose = close.shift(1)
| 2.96875
| 3
|
wazimap_ng/datasets/models/geography.py
|
BarisSari/wazimap-ng
| 0
|
12779139
|
<reponame>BarisSari/wazimap-ng<gh_stars>0
from django.db import models
from django.contrib.postgres.indexes import GinIndex
from treebeard.mp_tree import MP_Node
from treebeard.ns_tree import NS_NodeManager, NS_NodeQuerySet
from django.contrib.postgres.indexes import GinIndex
from django.contrib.postgres.search import TrigramSimilarity
from wazimap_ng.extensions.index import GinTrgmIndex
class GeographyQuerySet(NS_NodeQuerySet):
def search(self, text, similarity=0.3):
return self.annotate(similarity=TrigramSimilarity("name", text)).filter(similarity__gt=similarity)
class GeographyManager(NS_NodeManager):
def get_queryset(self):
return GeographyQuerySet(self.model, using=self._db)
def search(self, text, similarity=0.3):
return self.get_queryset().search(text, similarity)
class Geography(MP_Node):
name = models.CharField(max_length=50)
code = models.CharField(max_length=20)
level = models.CharField(max_length=20)
def __str__(self):
return "%s" % self.name
objects = GeographyManager()
class Meta:
verbose_name_plural = "geographies"
indexes = [
GinTrgmIndex(fields=["name"])
]
| 2.3125
| 2
|
src/fetch_job_schedule.py
|
aws-samples/aws-iot-ota-deployment-tool
| 28
|
12779140
|
import boto3
import datetime
import argparse
import logging
import sys
from aws_interfaces.s3_interface import S3Interface
from boto3.dynamodb.conditions import Key, Attr
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--region", action="store", required=True, dest="region", help="the region for uploading")
parser.add_argument("-tb", "--tableName", action="store", required=True, dest="tableName", help="the table for jobs entry")
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
args = parser.parse_args()
region = args.region
tableName = args.tableName
s3 = boto3.client('s3', region_name=region)
dynamodb = boto3.client('dynamodb', region_name=region)
s3_interface = S3Interface(region)
def fetch_job():
fe = Key('jobStatus').eq('PendingDeployment')
tableJobConfigs = dynamodb.query(
TableName=tableName,
Limit=1,
KeyConditionExpression="#S = :jobStatus",
ExpressionAttributeNames={
"#S": "jobStatus"
},
ExpressionAttributeValues={
":jobStatus": {"S": "PendingDeployment"}
})
for JobConfig in tableJobConfigs['Items']:
bucket = JobConfig['bucketId']['S']
s3_interface.download_file_from_s3('dev.ini', bucket, JobConfig['devFileKey']['S'])
thingListFileKey = JobConfig['thingListFileKey']['S']
tmp, thingListFileName = thingListFileKey.split('release/')
s3_interface.download_file_from_s3(thingListFileName, bucket, thingListFileKey)
binFileKey = JobConfig['binFileKey']['S']
tmp, binName = binFileKey.split('release/')
s3_interface.download_file_from_s3(binName, bucket, binFileKey)
timestamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
response = dynamodb.delete_item(
TableName=tableName,
Key={
'jobStatus': JobConfig['jobStatus'],
'timestamp': JobConfig['timestamp']
}
)
if response is None:
raise Exception('job record delete failed')
else:
jobStatus = 'Deployed'
print(JobConfig['jobId'])
dynamodb.put_item(
TableName=tableName,
Item={
'jobId': {'S': JobConfig['jobId']['S']},
'bucketId': {'S': JobConfig['bucketId']['S']},
'binFileKey': {'S': JobConfig['binFileKey']['S']},
'thingListFileKey': {'S': JobConfig['thingListFileKey']['S']},
'devFileKey': {'S': JobConfig['devFileKey']['S']},
'jobStatus': {'S': jobStatus},
'timestamp': {'S': timestamp}
})
fetch_job()
| 1.9375
| 2
|
curie/null_oob_util.py
|
mike0615/curie
| 4
|
12779141
|
#
# Copyright (c) 2016 Nutanix Inc. All rights reserved.
#
"""
Provides stub Out-of-Band management util for cases with no OoB support.
"""
from curie.curie_error_pb2 import CurieError
from curie.exception import CurieException
from curie.oob_management_util import OobInterfaceType
from curie.oob_management_util import OobManagementUtil
#==============================================================================
# Null OoB Util
#==============================================================================
class NullOobUtil(OobManagementUtil):
"""
Dummy implementation of 'OobManagementUtil' interface.
Wraps calls to ipmitool.
"""
@classmethod
def _use_handler(cls, oob_type=None, oob_vendor=None):
return oob_type == OobInterfaceType.kNone
def __init__(self, *args, **kwargs):
pass
#==============================================================================
# OobManagementUtil interface
#==============================================================================
def get_chassis_status(self):
"""
Returns:
(dict): Map of IPMI chassis status data.
Raises:
CurieException on error.
"""
raise CurieException(
CurieError.kInvalidParameter,
"Attempted to make out-of-band management calls in an environment "
"which has not been configured to support out-of-band management")
def power_cycle(self, async=False):
"""
Power cycles the node associated with 'self.__flags["host"]'.
Args:
async (bool): Optional. If False, making blocking calls to 'power_off'
and then 'power_on'.
If True and node is powered off, performs async 'power_on' call,
otherwise issues the (async) 'power cycle' command.
Returns:
(bool): True on success, else False.
"""
raise CurieException(
CurieError.kInvalidParameter,
"Attempted to make out-of-band management calls in an environment "
"which has not been configured to support out-of-band management")
def power_on(self, async=False):
"""
Powers on the node associated with 'self.__flags["host"]'.
Args:
async (bool): Optional. If False, block until power state is on.
Returns:
(bool): True on success, else False.
"""
raise CurieException(
CurieError.kInvalidParameter,
"Attempted to make out-of-band management calls in an environment "
"which has not been configured to support out-of-band management")
def power_off(self, async=False):
"""
Powers off the node associated with 'self.__flags["host"]'.
Args:
async (bool): Optional. If False, block until power state is off.
Returns:
(bool): True on success, else False.
"""
raise CurieException(
CurieError.kInvalidParameter,
"Attempted to make out-of-band management calls in an environment "
"which has not been configured to support out-of-band management")
def is_powered_on(self):
"""
Checks whether chassis power state is 'on'.
Returns:
(bool) True if powered on, else False.
"""
raise CurieException(
CurieError.kInvalidParameter,
"Attempted to make out-of-band management calls in an environment "
"which has not been configured to support out-of-band management")
| 1.851563
| 2
|
servicesCalculator.py
|
LincT/PythonExamples
| 0
|
12779142
|
<filename>servicesCalculator.py
# GUI with checkboxes for itemized service charges.
# gives total when button clicked.
# procedural generation to reduce code and generate menu.
# update values in self.services dictionary to add/update/remove services.
import tkinter
__author__ = 'LincT, https://github.com/LincT/PythonExamples'
class AutoEstimateGUI:
def __init__(self):
# main window and our frames
self.mainWindow = tkinter.Tk()
self.topFrame = tkinter.Frame(self.mainWindow)
self.btmFrame = tkinter.Frame(self.mainWindow)
# service types and costs
self.services = {
'oil change': 30.00,
'lube job': 20.00,
'radiator flush': 40.00,
'transmission flush': 100.00,
'inspection': 35.00,
'muffler replacement': 200.00,
'tire rotation': 20.00,
'espresso': 3.00
}
# hold the check button variables
self.cbValues = {}
# form generation
for each in self.services.keys():
titleText = each.title()
priceText = self.services[each]
padLength = len(titleText) + len(format(priceText, '.2f'))
# make each an IntVar
self.cbValues[each] = tkinter.IntVar()
self.cbValues[each].set(0) # set each
self.cb = tkinter.Checkbutton(
self.topFrame,
text=str(each.title().ljust(40-padLength, '.') + ' $ ' + format(self.services[each], '.2f')),
variable=self.cbValues[each])
# pack each checkbutton as it finishes
self.cb.pack(side='top')
self.topFrame.pack() # pack the frame
self.totalLabel = \
tkinter.Label(
self.btmFrame,
text='Total:\t$')
# stringVar to hold data
self.value = tkinter.StringVar()
# create label, link w/ self.value
self.estimateLabel = \
tkinter.Label(
self.btmFrame,
textvariable=self.value)
# Pack the bottom frame's labels.
self.totalLabel.pack(side='left')
self.estimateLabel.pack(side='left')
# button widget
self.myButton = \
tkinter.Button(
self.btmFrame,
text='Get Estimate',
command=self.getTotal)
# pack the button
self.myButton.pack(side='right')
self.btmFrame.pack() # pack the frame
tkinter.mainloop() # enter main loop
def getTotal(self):
total = 0.00 # initialize total
# get our checkbutton values
for key in self.cbValues:
val = self.cbValues[key]
cbVal = val.get()
if cbVal == 1:
total += self.services[key]
# set the total, round to 2 places
self.value.set(format(total, '.2f'))
my_gui = AutoEstimateGUI()
| 3.21875
| 3
|
Tensorflow_InAction_Google/code/005/tensorflow/001.mnist_input_data_train_validation_test.py
|
lsieun/learn-AI
| 1
|
12779143
|
<reponame>lsieun/learn-AI
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets(train_dir="./path/to/MNIST_data/",one_hot=True)
print("Training data size: ", mnist.train.num_examples)
print("Validating data size: ", mnist.validation.num_examples)
print("Testing data size: ", mnist.test.num_examples)
# print("Example training data: ", mnist.train.images[0])
print("Example training data label: ", mnist.train.labels[0])
| 3.3125
| 3
|
syncstream/mproc.py
|
cainmagi/sync-stream
| 0
|
12779144
|
#!python
# -*- coding: UTF-8 -*-
'''
################################################################
# Multiprocessing based synchronization.
# @ Sync-stream
# Produced by
# <NAME> @ <EMAIL>,
# <EMAIL>.
# Requirements: (Pay attention to version)
# python 3.6+
# The base module for the message synchronization. It is totally
# based on the stdlib of python.
# This module should be only used for synchronizing messages
# between threads and processes on the same device.
################################################################
'''
import os
import io
import collections
import threading
import queue
import multiprocessing
from typing import NoReturn
try:
from typing import Tuple, Sequence
except ImportError:
from builtins import tuple as Tuple
from collections.abc import Sequence
from .base import is_end_line_break, GroupedMessage
class LineBuffer:
'''The basic line-based buffer handle.
This buffer provides a rotating item stroage for the text-based stream. The text is stored not
by length, but by lines. The maximal line number of the storage is limited.
'''
def __init__(self, maxlen: int = 20) -> None:
'''Initialization.
Arguments:
maxlen: the maximal number of stored lines.
'''
if not isinstance(maxlen, int) or maxlen < 1:
raise TypeError('syncstream: The argument "maxlen" should be a positive integer.')
self.storage = collections.deque(maxlen=maxlen)
self.last_line = io.StringIO()
self.__last_line_lock = threading.Lock()
def clear(self) -> None:
'''Clear the whole buffer.
This method would clear the storage and the last line stream of this buffer. However,
it would not clear any mirrors or copies of this object. This method is thread-safe
and should always success.
'''
with self.__last_line_lock:
self.last_line.seek(0, os.SEEK_SET)
self.last_line.truncate(0)
self.storage.clear()
def new_line(self) -> None:
R'''Manually trigger a new line to the buffer. If the current stream is already
a new line, do nothing.
This method is equivalent to
```python
if self.last_line.tell() > 0:
write('\n')
```
'''
with self.__last_line_lock:
if self.last_line.tell() > 0:
self.__write('\n')
def flush(self) -> None:
'''Flush the current written line stream.
'''
with self.__last_line_lock:
self.last_line.flush()
def parse_lines(self, lines: Sequence[str]) -> None:
'''Parse the lines.
This method would be triggered when the new lines are written by `write()` method.
The default behavior is adding the item into the storage.
Users could inherit this method and override it with their customized parsing method,
like regular expression searching.
Arguments:
lines: the new lines to be added into the stroage.
'''
self.storage.extend(lines)
def read(self, size: int = None) -> Tuple[str]:
'''Read the records.
Fetch the stored record items from the buffer. Using the `read()` method is thread-safe
and would not influence the cursor of `write()` method.
If the current written line is not blank, the `read()` method would regard it as the
last record item.
Arguments:
size: if set None, would return the whole storage.
if set a int value, would return the last `size` items.
'''
with self.__last_line_lock:
has_last_line = self.last_line.tell() > 0
n_lines = len(self.storage)
if size is None:
if has_last_line:
if n_lines > 0:
value = self.storage.popleft()
results = (*self.storage, self.last_line.getvalue())
self.storage.appendleft(value)
else:
results = (self.last_line.getvalue(), )
return results
else:
return tuple(self.storage)
elif size > 0:
is_storage_popped = has_last_line and n_lines > 0
if is_storage_popped:
preserved_value = self.storage.popleft()
size -= 1
results = list()
n_read = min(size, n_lines)
if n_read > 0:
self.storage.rotate(n_read)
for _ in range(n_read):
value = self.storage.popleft()
results.append(value)
self.storage.append(value)
if has_last_line:
results.append(self.last_line.getvalue())
if is_storage_popped:
self.storage.appendleft(preserved_value)
return tuple(results)
def __write(self, data: str) -> int:
'''The write() method without lock.
This method is private and should not be used by users.
'''
message_lines = data.splitlines()
n_lines = len(message_lines)
if n_lines == 1 and message_lines[0] == '':
self.parse_lines((self.last_line.getvalue(), ))
self.last_line.seek(0, os.SEEK_SET)
self.last_line.truncate(0)
return 1
elif is_end_line_break(data):
message_lines.append('')
n_lines += 1
if n_lines > 1:
message_lines[0] = self.last_line.getvalue() + message_lines[0]
last_line = message_lines.pop()
self.parse_lines(message_lines)
self.last_line.seek(0, os.SEEK_SET)
self.last_line.truncate(0)
return self.last_line.write(last_line)
elif n_lines == 1:
return self.last_line.write(message_lines[0])
def write(self, data: str) -> int:
'''Write the records.
The source data is the same as that of a text-based IO. Each time when `data` contains
a line break, a new record item would be pushed in the storage. The `write()` method
is thread-safe.
Arguments:
data: the data that would be written in the stream.
'''
with self.__last_line_lock:
return self.__write(data)
class LineProcMirror:
'''The mirror for the process-safe line-based buffer.
This mirror is initialized by `LineProcBuffer`, and would be used for managing the lines
written to the buffer.
'''
def __init__(self, q_maxsize: int = 0, aggressive: bool = False, timeout: float = None,
_queue: queue.Queue = None, _state: dict = None, _state_lock: threading.Lock = None) -> None:
'''Initialization
Arguments:
q_maxsize: the `maxsize` of the queue. Use 0 means no limitation. A size limited
queue is recommended for protecting the memory.
aggressive: the aggressive mode. If enabled, each call for the `write()` method
would trigger the process synchronization. Otherwise, the
synchronization would be triggered when a new line is written.
timeout: the timeout of the process syncholizing events. If not set, the
synchronization would block the current process.
Private arguments:
_queue: the queue used for handling the message flow. If not set, would be
created by multiprocessing.Queue(). A recommended way is to set
this value by multiprocessing.Manager(). In this case, `q_maxsize`
would not be used.
_state, _state_lock: required for getting the buffer states. If not set, would
not turn on the stop signal.
'''
self.__buffer = io.StringIO()
self.__buffer_lock_ = None
self.aggressive = aggressive
self.__timeout = timeout
self.__block = timeout is None
if _queue is None:
self.__queue = multiprocessing.Queue(maxsize=q_maxsize)
else:
self.__queue = _queue
if _state is not None and _state_lock is not None:
self.__state_lock = _state_lock
self.__state = _state
else:
self.__state_lock = None
@property
def __buffer_lock(self) -> threading.Lock:
'''The threading lock for the buffer.
This lock should not be exposed to users. It is used for ensuring that the
temporary buffer of the mirror is thread-safe.
'''
if self.__buffer_lock_ is None:
self.__buffer_lock_ = threading.Lock()
return self.__buffer_lock_
def clear(self) -> None:
'''Clear the temporary buffer.
This method would clear the temporary buffer of the mirror. If the mirror works
in the `aggresive` mode, the temporary buffer would not be used. In this case,
this method would not exert any influences to the mirror.
This method is thread-safe. Mirrors in different processes would not share the
temporary buffer. Note that the shared queue would not be cleared by this
method.
'''
with self.__buffer_lock:
self.__buffer.seek(0, os.SEEK_SET)
self.__buffer.truncate(0)
def new_line(self) -> None:
R'''Manually trigger a new line to the buffer. If the current stream is already
a new line, do nothing.
'''
with self.__buffer_lock:
if self.__buffer.tell() > 0:
self.__write('\n')
@property
def timeout(self) -> int:
'''The time out of the process synchronization.
'''
return self.__timeout
@timeout.setter
def timeout(self, timeout: int = None) -> None:
'''Setter for the property timeout.
'''
self.__timeout = timeout
self.__block = timeout is None
def send_eof(self) -> None:
'''Send an EOF signal to the main buffer.
The EOF signal is used for telling the main buffer stop to wait. Note that this
method would not close the queue. The mirror could be reused for another program.
'''
self.new_line()
self.__queue.put(
{'type': 'close', 'data': None},
block=self.__block, timeout=self.__timeout
)
def send_error(self, obj_err: Exception) -> None:
'''Send the error object to the main buffer.
The error object would be captured as an item of the storage in the main buffer.
'''
self.new_line()
self.__queue.put(
{'type': 'error', 'data': GroupedMessage(obj_err)},
block=self.__block, timeout=self.__timeout
)
def send_warning(self, obj_warn: Warning) -> None:
'''Send the warning object to the main buffer.
The warning object would be captured as an item of the storage in the main buffer.
'''
self.new_line()
self.__queue.put(
{'type': 'warning', 'data': GroupedMessage(obj_warn)},
block=self.__block, timeout=self.__timeout
)
def send_data(self, data: str) -> None:
'''Send the data to the main buffer.
This method is equivalent to call the main buffer (LineProcBuffer) by the following
method protected by process-safe synchronization:
```python
pbuf.write(data)
```
This method is used by other methods implicitly, and should not be used by users.
Arguments:
data: a str to be sent to the main buffer.
'''
self.__queue.put(
{'type': 'str', 'data': data},
block=self.__block, timeout=self.__timeout
)
def flush(self) -> None:
'''Flush the current written line stream.
'''
with self.__buffer_lock:
self.__buffer.flush()
def read(self) -> str:
'''Read the current buffer.
This method would only read the current bufferred values. If the property
`aggressive` is `True`, the `read()` method would always return empty value.
'''
with self.__buffer_lock:
return self.__buffer.getvalue()
def __write(self, data: str) -> int:
'''The write() method without lock.
This method is private and should not be used by users.
'''
try:
if self.__state_lock is not None:
with self.__state_lock:
is_closed = self.__state.get('closed', False)
if is_closed:
raise StopIteration('syncstream: The sub-process is terminated by users.')
except queue.Empty:
pass
message_lines = data.splitlines()
if self.aggressive:
self.send_data(data=data)
return len(data)
n_lines = len(message_lines)
if n_lines > 1 or (n_lines == 1 and message_lines[0] == '') or is_end_line_break(data): # A new line is triggerred.
res = self.__buffer.write(data)
self.send_data(data=self.__buffer.getvalue())
self.__buffer.seek(0, os.SEEK_SET)
self.__buffer.truncate(0)
return res
elif n_lines == 1:
return self.__buffer.write(data)
def write(self, data: str) -> int:
'''Write the stream.
The source data is the same as that of a text-based IO. If `aggressive` is `True`,
each call of `write()` would make the stream value sent to the main buffer. If not,
each time when `data` contains a line break, the stream value would be sent to
the main buffer.
The method is thread-safe, but the message synchronization is process-safe.
Arguments:
data: the data that would be written in the stream.
'''
with self.__buffer_lock:
return self.__write(data)
class LineProcBuffer(LineBuffer):
'''The process-safe line-based buffer.
The rotating buffer with a maximal storage length. This buffer is the extended version of
the basic `LineBuffer`. It is used for the case of multi-processing. Use the shared queue
of this buffer to ensure the synchronization among processes. For example,
```python
def f(buffer):
with contextlib.redirect_stdout(buffer):
print('example')
buffer.send_eof()
if __name__ == '__main__':
pbuf = LineProcBuffer(maxlen=10)
with multiprocessing.Pool(4) as p:
p.map_async(f, tuple(pbuf.mirror for _ in range(4)))
pbuf.wait()
print(pbuf.read())
```
'''
def __init__(self, maxlen: int = 20) -> None:
'''Initialization.
Arguments:
maxlen: the maximal number of stored lines.
'''
super().__init__(maxlen=maxlen)
self.__manager = multiprocessing.Manager()
self.__state = self.__manager.dict(closed=False)
self.__state_lock = self.__manager.Lock() # pylint: disable=no-member
self.__mirror = LineProcMirror(q_maxsize=2 * maxlen, aggressive=False, timeout=None, _queue=self.__manager.Queue(), _state=self.__state, _state_lock=self.__state_lock)
self.n_mirrors = 0
self.__config_lock = threading.Lock()
@property
def mirror(self) -> LineProcMirror:
'''Get the mirror of this buffer. The buffer should not be used in sub-processes
directly. Use `self.mirror` to provide the process-safe mirror of the buffer.
This property could not be modified after the initialization.
'''
self.n_mirrors += 1
return self.__mirror
def stop_all_mirrors(self) -> None:
'''Send stop signals to all mirrors.
This operation is used for terminating the sub-processes safely. It does not
guarantee that the processes would be closed instantly. Each time when the new
message is written by the sub-processes, a check would be triggered.
If users want to use this method, please ensure that the StopIteration error
is catched by the process. The error would not be catched automatically. If
users do not catch the error, the main process would stuck at `wait()`.
'''
with self.__state_lock:
self.__state['closed'] = True
def reset_states(self) -> None:
'''Reset the states of the buffer.
This method should be used if the buffer needs to be reused.
'''
with self.__state_lock:
self.__state.clear()
self.__state['closed'] = False
def __check_close(self) -> bool:
'''Check whether to finish the `wait()` method.
This method would be used when receiving a closing signal.
This method is private and should not be used by users.
Note that this method is always triggered in the config_lock.
'''
self.n_mirrors -= 1
if self.n_mirrors > 0:
return True
else:
return False
def receive(self) -> bool:
'''Receive one item from the mirror.
This method would fetch one item from the process-safe queue, and write the results
in the thread-safe buffer.
'''
with self.__config_lock:
data = self.__mirror._LineProcMirror__queue.get() # pylint: disable=protected-access
dtype = data['type']
if dtype == 'str':
super().write(data['data'])
return True
elif dtype == 'error':
obj = data['data']
self.storage.append(obj)
return self.__check_close()
elif dtype == 'warning':
obj = data['data']
self.storage.append(obj)
return True
elif dtype == 'close':
return self.__check_close()
return False
def wait(self) -> None:
'''Wait the mirror until the close signal is received.
'''
while self.receive():
pass
def write(self, data: str) -> NoReturn:
'''Write the records.
This method should not be used. For instead, please use self.mirror.write().
Arguments:
data: the data that would be written in the stream.
'''
raise NotImplementedError('syncstream: Should not use this method, use '
'`self.mirror.write()` for instead.')
| 2.859375
| 3
|
quantum/tests/unit/_test_extension_portbindings.py
|
cuiwow/quantum
| 1
|
12779145
|
<reponame>cuiwow/quantum<filename>quantum/tests/unit/_test_extension_portbindings.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 NEC Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: <NAME>, NEC Corporation
#
import contextlib
from quantum import context
from quantum.extensions import portbindings
from quantum.manager import QuantumManager
from quantum.openstack.common import cfg
from quantum.tests.unit import test_db_plugin
class PortBindingsTestCase(test_db_plugin.QuantumDbPluginV2TestCase):
# VIF_TYPE must be overridden according to plugin vif_type
VIF_TYPE = portbindings.VIF_TYPE_OTHER
# The plugin supports the port security feature such as
# security groups and anti spoofing.
HAS_PORT_FILTER = False
def _check_response_portbindings(self, port):
self.assertEqual(port['binding:vif_type'], self.VIF_TYPE)
port_cap = port[portbindings.CAPABILITIES]
self.assertEqual(port_cap[portbindings.CAP_PORT_FILTER],
self.HAS_PORT_FILTER)
def _check_response_no_portbindings(self, port):
self.assertTrue('status' in port)
self.assertFalse(portbindings.VIF_TYPE in port)
self.assertFalse(portbindings.CAPABILITIES in port)
def test_port_vif_details(self):
plugin = QuantumManager.get_plugin()
with self.port(name='name') as port:
port_id = port['port']['id']
# Check a response of create_port
self._check_response_portbindings(port['port'])
# Check a response of get_port
ctx = context.get_admin_context()
port = plugin.get_port(ctx, port_id)
self._check_response_portbindings(port)
# By default user is admin - now test non admin user
ctx = context.Context(user_id=None,
tenant_id=self._tenant_id,
is_admin=False,
read_deleted="no")
non_admin_port = plugin.get_port(ctx, port_id)
self._check_response_no_portbindings(non_admin_port)
def test_ports_vif_details(self):
plugin = QuantumManager.get_plugin()
cfg.CONF.set_default('allow_overlapping_ips', True)
with contextlib.nested(self.port(), self.port()):
ctx = context.get_admin_context()
ports = plugin.get_ports(ctx)
self.assertEqual(len(ports), 2)
for port in ports:
self._check_response_portbindings(port)
# By default user is admin - now test non admin user
ctx = context.Context(user_id=None,
tenant_id=self._tenant_id,
is_admin=False,
read_deleted="no")
ports = plugin.get_ports(ctx)
self.assertEqual(len(ports), 2)
for non_admin_port in ports:
self._check_response_no_portbindings(non_admin_port)
| 2.234375
| 2
|
opennem/core/parsers/aemo/facility_closures.py
|
paulculmsee/opennem
| 22
|
12779146
|
"""
OpenNEM AEMO facility closure dates parser.
"""
import logging
from datetime import datetime
from pathlib import Path
from typing import List, Optional, Union
from openpyxl import load_workbook
from pydantic import ValidationError
from pydantic.class_validators import validator
from opennem.core.normalizers import is_number, normalize_duid
from opennem.schema.core import BaseConfig
logger = logging.getLogger("opennem.parsers.aemo_nem_facility_closures")
WORKBOOK_SHEET_NAME = "Expected Closure Year"
CLOSURE_SHEET_FIELDS = [
"station_name",
"duid",
"expected_closure_year",
"expected_closure_date",
]
def _clean_expected_closure_year(closure_year: Union[str, int]) -> Optional[int]:
"""Clean up expected closure year because sometimes they just put comments in the field"""
if is_number(closure_year):
return int(closure_year)
return None
class AEMOClosureRecord(BaseConfig):
station_name: str
duid: Optional[str]
expected_closure_year: Optional[int]
expected_closure_date: Optional[datetime]
_validate_closure_year = validator("expected_closure_year", pre=True)(
_clean_expected_closure_year
)
_clean_duid = validator("duid", pre=True)(normalize_duid)
def parse_aemo_closures_xls() -> List[AEMOClosureRecord]:
"""Parse the AEMO NEM closures spreadsheet"""
aemo_path = (
Path(__file__).parent.parent.parent
/ "data"
/ "aemo"
/ "generating-unit-expected-closure-year.xlsx"
)
if not aemo_path.is_file():
raise Exception("Not found: {}".format(aemo_path))
# @TODO split here to read ByteIO from download / local file
wb = load_workbook(aemo_path, data_only=True)
generator_ws = wb[WORKBOOK_SHEET_NAME]
records = []
for row in generator_ws.iter_rows(min_row=2, values_only=True):
row_collapsed = row[0:2] + row[3:5]
return_dict = dict(zip(CLOSURE_SHEET_FIELDS, list(row_collapsed)))
r = None
try:
r = AEMOClosureRecord(**return_dict)
except ValidationError as e:
logger.error("Validation error: {}. {}".format(e, return_dict))
if r:
records.append(r)
return records
if __name__ == "__main__":
p = parse_aemo_closures_xls()
from pprint import pprint
pprint(p)
| 2.546875
| 3
|
tests/asp/cautious/test14.bug.multiaggregates.gringo.cautious.asp.test.py
|
bernardocuteri/wasp
| 19
|
12779147
|
<reponame>bernardocuteri/wasp
input = """
8 2 2 3 0 0
8 2 4 5 0 0
2 6 4 0 3 5 4 3 2
2 7 4 0 2 5 4 3 2
1 8 2 1 6 7
0
8 ok
2 a(1)
3 a(2)
4 a(3)
5 a(4)
6 aggrGT3
7 aggrGT2
0
B+
0
B-
1
0
1
"""
output = """
{aggrGT2, ok}
"""
| 1.945313
| 2
|
camnet/environment.py
|
rccrdo/python-camera-net
| 1
|
12779148
|
<reponame>rccrdo/python-camera-net
# Copyright (c) 2013 <NAME>, <EMAIL>.lucchese at gmail.com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
#
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
#
# 3. This notice may not be removed or altered from any source
# distribution.
import math
import numpy
import matplotlib
from matplotlib import rc
rc('font', family='sans-serif')
import matplotlib.pyplot as plt
from math2D import numeric_real, Point2D, Line2D, Rectangle
from geometry import Geometry
from camera import Camera
from target import Target
class Environment():
def __init__(self, width, height):
assert numeric_real(width)
assert width > 0
assert numeric_real(height)
assert height > 0
self._initialized = False
self._cameras = []
self._targets = []
self._geometries = []
self._walls = []
self._cur_time = 0
self._observation = {}
self._rect = Rectangle(Point2D(0, 0), 0., width, height)
if not self._rect.area():
print("warning, environment has zero area")
return
self._figure = None
self._initialized = True
def _contains_point(self, p):
return self._rect.contains_point(p)
def _contains_line(self, line):
return self._rect.contains_line(line)
def _update_walls(self):
self._walls = []
for geom in self._geometries:
for line in geom.border():
if line in self._walls:
continue
if not self._contains_line(line):
print "warning, discarding wall with both points outside the environment ", line
continue
self._walls.append(line)
def add_geometry(self, geom):
assert isinstance(geom, Geometry)
assert geom not in self._geometries
self._geometries.append(geom)
self._update_walls()
def add_camera(self, camera):
assert isinstance(camera, Camera)
assert camera not in self._cameras
o = camera.origin()
if not self._contains_point(o):
print "warning, camera with id \"%\" has origin outside the environment ", o
self._cameras.append(camera)
def add_cameras(self, cameras):
for cam in cameras:
self.add_camera(cam)
def add_target(self, target):
assert isinstance(target, Target)
assert target not in self._targets
self._targets.append(target)
def add_targets(self, targets):
for target in targets:
self.add_target(target)
def time(self):
return self._cur_time
def step(self, dt):
assert numeric_real(dt)
assert dt > 0
cur_time = self._cur_time
# update target positions
for target in self._targets:
target.step(cur_time, dt, self._walls)
# detect targets in each camera coverage area
data = []
for cam in self._cameras:
# step ptz cameras
cam.step(cur_time, dt)
for target in self._targets:
cam.detect(target, self._walls)
data.extend(cam.detection_data())
observ = {}
for entry in data:
#print entry
_id = entry.id
if _id in observ.keys():
observ[_id].append(entry.area_id)
else:
observ[_id] = [entry.area_id]
#for key in sorted(observ.keys()):
# print " observ for target \"%s\": nr. %d, %s" % (key, len(observ[key]), observ[key])
self._observation = {}
for targetid,val in observ.iteritems():
if val == []:
val = '0'
else:
val = '-'.join(sorted(val))
self._observation[targetid] = val
# open the building passage
#if self._cur_time == 30:
# self._geometries[0].set_passage_open(1)
# self._update_walls()
self._cur_time += dt
def get_observation(self):
return self._observation
def get_observation_for_target(self, targetid):
assert targetid
try:
entry = self._observation[targetid]
except:
entry = '0'
return entry
def plot(self, savepath=None):
if not self._figure:
#self._figure = plt.figure(figsize=(6.5,4.5))
self._figure = plt.figure(figsize=(9,6))
plt.ion()
plt.show()
fig = self._figure
plt.figure(fig.number)
# setup figure for this iteration
fig.clf()
fig.patch.set_facecolor((1,1,1))
# setup axis, limits, grid
gs = matplotlib.gridspec.GridSpec(1, 1, left=0.05, right=0.95, top=0.95, bottom=0.05, wspace=0, hspace=0)
axis = plt.subplot(gs[0,0], aspect='equal')
plt.axis('off')
if 0:
plt.axis('on')
#plt.grid('on')
xmin, xmax = self._rect.xlim()
plt.xlim(xmin -0.25, xmax)# +0.5)
ymin, ymax = self._rect.ylim()
plt.ylim(ymin -0.25, ymax +0.25)
# plot cameras
for cam in self._cameras:
cam.plot(axis)
# plot occlusion geometries
for geom in self._geometries:
geom.plot(axis)
# plot targets
for target in self._targets:
target.plot(axis)
# plot some stats
#axis.text(0.1, 5, str(self._cur_time), verticalalignment='center',
# horizontalalignment='center', family='sans-serif',
# color='black', fontsize=15)
fig.canvas.draw()
if savepath:
plt.savefig(savepath)
| 2.34375
| 2
|
src/mail.py
|
ccrsxx/autobsi
| 6
|
12779149
|
import os
import ssl
import smtplib
from typing import Callable
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
def send_mail(subject: str, log_path: str, img_path: str, get: Callable):
sender = get('email')
api_key = get('api_key')
receiver = get('target_email')
message = MIMEMultipart()
message['Subject'] = subject
message['From'] = sender
message['To'] = receiver
with open(log_path) as raw_log, open(img_path, 'rb') as raw_img:
log = raw_log.read()
image = raw_img.read()
log_file = MIMEText(log)
image_file = MIMEImage(image, name=os.path.basename(img_path))
message.attach(log_file)
message.attach(image_file)
text = message.as_string()
context = ssl.create_default_context()
with smtplib.SMTP_SSL('smtp.gmail.com', 465, context=context) as server:
server.login(sender, api_key)
server.sendmail(sender, receiver, text)
| 2.640625
| 3
|
src/mybot_pkg/scripts/line_follower_sim.py
|
leytpapas/thesis_project
| 0
|
12779150
|
<filename>src/mybot_pkg/scripts/line_follower_sim.py
#!/usr/bin/env python
import rospy
import cv2
import numpy as np
from cv_bridge import CvBridge, CvBridgeError
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image
from rgb_hsv import BGR_HSV
class LineFollower(object):
def __init__(self, rgb_to_track, colour_error_perc = 10.0,colour_cal=False, camera_topic="/mybot/raspicam_node/image_raw", cmd_vel_topic="/mybot/cmd_vel"):
self._colour_cal = colour_cal
self._colour_error_perc = colour_error_perc
self.rgb_hsv = BGR_HSV()
self.hsv, hsv_numpy_percentage = self.rgb_hsv.rgb_hsv(rgb=rgb_to_track)
# We check which OpenCV version is installed.
(self.major, minor, _) = cv2.__version__.split(".")
rospy.logwarn("OpenCV Version Installed==>"+str(self.major))
# This way we process only half the frames
self.process_this_frame = True
self.bridge_object = CvBridge()
self.image_sub = rospy.Subscriber(camera_topic, Image, self.camera_callback)
self.cmd_vel_pub = rospy.Publisher(cmd_vel_topic, Twist, queue_size=1)
def camera_callback(self, data):
if self.process_this_frame:
self.process_this_frame = False
try:
# We select bgr8 because its the OpenCV encoding by default
cv_image = self.bridge_object.imgmsg_to_cv2(data, desired_encoding="bgr8")
except CvBridgeError as e:
print(e)
# We get image dimensions and crop the parts of the image we dont need
# Bare in mind that because its image matrix first value is start and second value is down limit.
# Select the limits so that it gets the line not too close, not too far and the minimum portion possible
# To make process faster.
# TODO: Get multiple lines so that we can generate paths.
small_frame = cv2.resize(cv_image, (0, 0), fx=0.2, fy=0.2)
height, width, channels = small_frame.shape
rospy.loginfo("height=%s, width=%s" % (str(height), str(width)))
#descentre = 160
#rows_to_watch = 100
#crop_img = small_frame[(height) / 2 + descentre:(height) / 2 + (descentre + rows_to_watch)][1:width]
crop_img = small_frame
# Convert from RGB to HSV
hsv = cv2.cvtColor(crop_img, cv2.COLOR_BGR2HSV)
min_hsv = self.hsv * (1.0-(self._colour_error_perc / 100.0))
max_hsv = self.hsv * (1.0 + (self._colour_error_perc / 100.0))
lower_yellow = np.array(min_hsv)
upper_yellow = np.array(max_hsv)
# Threshold the HSV image to get only yellow colors
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(crop_img, crop_img, mask=mask)
if self.major == '3':
# If its 3
(_, contours, _) = cv2.findContours(mask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_L1)
else:
# If its 2 or 4
(contours, _) = cv2.findContours(mask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_L1)
rospy.loginfo("Number of centroids==>" + str(len(contours)))
centres = []
for i in range(len(contours)):
moments = cv2.moments(contours[i])
try:
centres.append((int(moments['m10'] / moments['m00']), int(moments['m01'] / moments['m00'])))
cv2.circle(res, centres[-1], 10, (0, 255, 0), -1)
except ZeroDivisionError:
pass
rospy.loginfo(str(centres))
# Select the right centroid
# [(542, 39), (136, 46)], (x, y)
most_right_centroid_index = 0
index = 0
max_x_value = 0
centroids_detected = []
for candidate in centres:
# Retrieve the cx value
cx = candidate[0]
# Get the Cx more to the right
if cx >= max_x_value:
max_x_value = cx
most_right_centroid_index = index
index += 1
try:
cx = centres[most_right_centroid_index][0]
cy = centres[most_right_centroid_index][1]
rospy.logwarn("Centroid FOUND ==" + str(cx) + "," + str(cy) + "")
except:
cy, cx = height / 2, width / 2
centroids_detected.append([cx,cy])
# Draw the centroid in the result image
cv2.circle(res, (int(cx), int(cy)), 5, (0, 0, 255), -1)
if self._colour_cal:
cv2.imshow("Original", small_frame)
else:
cv2.imshow("HSV", hsv)
cv2.imshow("MASK", mask)
cv2.imshow("RES", res)
# We send data from the first cetroid we get
if len(centroids_detected) > 0:
cx_final = width
cy_final = height
for centroid in centroids_detected:
# We get the values of the centroid closer to us
print(centroid)
if centroid[1]< cy_final:
cx_final = centroid[0]
cy_final = centroid[1]
print("Selected CENTROID AS FINAL")
else:
cx_final = None
cy_final = None
self.move_robot(height, width, cx_final, cy_final)
cv2.waitKey(1)
else:
self.process_this_frame = True
def move_robot(self, image_dim_y, image_dim_x, cx, cy, linear_vel_base = 0.1, angular_vel_base = 0.1):
"""
It move the Robot based on the Centroid Data
image_dim_x=96, image_dim_y=128
cx, cy = [(77, 71)]
"""
cmd_vel = Twist()
cmd_vel.linear.x = 0.0
cmd_vel.angular.z = 0.0
FACTOR_LINEAR = 0.001
FACTOR_ANGULAR = 0.1
if cx is not None and cy is not None:
origin = [image_dim_x / 2.0, image_dim_y / 2.0]
centroid = [cx, cy]
delta = [centroid[0] - origin[0], centroid[1]]
print("origin="+str(origin))
print("centroid="+str(centroid))
print("delta="+str(delta))
# -1 because when delta is positive we want to turn right, which means sending a negative angular
cmd_vel.angular.z = angular_vel_base * delta[0] * FACTOR_ANGULAR * -1
# If its further away it has to go faster, closer then slower
cmd_vel.linear.x = linear_vel_base - delta[1] * FACTOR_LINEAR
else:
cmd_vel.angular.z = angular_vel_base * 2
cmd_vel.linear.x = linear_vel_base * 0.5
print("NO CENTROID DETECTED...SEARCHING...")
print("SPEED==>["+str(cmd_vel.linear.x)+","+str(cmd_vel.angular.z)+"]")
self.cmd_vel_pub.publish(cmd_vel)
def loop(self):
rospy.spin()
if __name__ == '__main__':
rospy.init_node('line_follower_start', anonymous=True)
rgb_to_track = [255,255,255]
robot_mover = LineFollower(rgb_to_track=rgb_to_track, colour_error_perc= 20.0, colour_cal=False)
robot_mover.loop()
| 2.546875
| 3
|