max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
freebasics/templatetags/freebasics_tags.py | praekelt/molo-freebasics | 0 | 12762751 | <reponame>praekelt/molo-freebasics
from django.template import Library
from django.conf import settings
register = Library()
@register.inclusion_tag('custom_css.html', takes_context=True)
def custom_css(context):
styles = {
"body_border_color": settings.CUSTOM_CSS_BASE_BACKGROUND_COLOR,
"body_font_family": settings.CUSTOM_CSS_BODY_FONT_FAMILY,
"block_background_color": settings.CUSTOM_CSS_BLOCK_BACKGROUND_COLOR,
"block_font_family": settings.CUSTOM_CSS_BLOCK_FONT_FAMILY,
"text_transform": settings.CUSTOM_CSS_BLOCK_TEXT_TRANSFORM,
"accent_1": settings.CUSTOM_CSS_ACCENT_1,
"accent_2": settings.CUSTOM_CSS_ACCENT_2
}
return styles
@register.simple_tag(takes_context=True)
def get_site_name(context):
return settings.SITE_NAME
| 1.976563 | 2 |
plink2merlin.py | jlc-christie/plink2merlin | 3 | 12762752 | import os
import sys
import copy
import enum
import time
import uuid
import argparse
import subprocess
from dataclasses import dataclass, field
from typing import List, Dict, Set, Tuple
class PlinkWrapper:
class InputType(enum.Enum):
BED = 1
PED = 2
VCF = 3
def get_plink_flag(self):
if self.value == 1:
return '--bfile'
elif self.value == 2:
return '--file'
elif self.value == 3:
return '--vcf'
def __init__(self, args, uuid=str(uuid.uuid4())):
self.args = args
self.uuid = uuid # Not currently used, can be used for temp files
self.input_set = False
self._validate_input_path()
def _validate_input_path(self):
def check_input_init():
if self.input_set:
print("Maximum of 1 input allowed, found input:")
print("{}: {}".format(self.input_type, self.input_str))
print("exiting...")
sys.exit()
if self.args.bed is not None:
check_input_init()
self.input_type = self.InputType.BED
self.input_str = self.args.bed
self.input_set = True
elif self.args.ped is not None:
check_input_init()
self.input_type = self.InputType.PED
self.input_str = self.args.ped
self.input_set = True
elif self.args.vcf is not None:
check_input_init()
self.input_type = self.InputType.VCF
self.input_str = self.args.vcf
self.input_set = True
else:
if not self.input_set:
print("No valid input file path has been given")
print("exiting...")
sys.exit()
def run(self, options):
plink_options = [
self.args.plink_binary,
self.input_type.get_plink_flag(), self.input_str,
]
plink_options += options
subprocess.run(plink_options, capture_output=True)
def inout(f):
def in_out(*args, **kw):
start = time.time()
a = '...'
print(f"Entering {f.__name__}{a:25}", end='')
res = f(*args, **kw)
end = time.time()
print(f"Exiting. (Finished in {end-start:2.4} seconds)")
return res
return in_out
class Sex(enum.Enum):
UNKNOWN = 0
MALE = 1
FEMALE = 2
@dataclass
class MerlinRecord:
fid: str
iid: int
mid: int
pid: int
sex: Sex
genotypes: List[str]
def as_string(self):
line = f"{self.fid}\t{self.iid}\t{self.mid}\t{self.pid}\t{self.sex.value}\t"
for i in range(0, len(self.genotypes) - 2, 2):
line += f"{self.genotypes[i]}/{self.genotypes[i+1]}\t"
line += f"{self.genotypes[-2]}/{self.genotypes[-1]}\n"
return line
@dataclass
class Individual:
iid: int
fid: str
mid: int
pid: int
sex: Sex
def __eq__(self, obj):
iid_eq = self.iid == obj.iid
fid_eq = self.fid == obj.fid
return iid_eq and fid_eq
def __hash__(self):
return hash((self.iid, self.fid))
@dataclass
class Variant:
chrom: int
rsid: str
pos: float
bp: int
def __eq__(self, obj):
return self.rsid == obj.rsid # sketchy
def __hash__(self):
return hash(self.rsid)
def get_individuals_from_fam(filename: str) -> Set[Individual]:
individual_list = []
with open(filename, 'r') as f:
for line in f:
fid, iid, pid, mid, sex, _ = line.split()
indiv = Individual(int(iid), fid, int(mid), int(pid), Sex(int(sex)))
individual_list.append(indiv)
return set(individual_list)
def add_missing_individuals(indivs: Set[Individual]):
updated_indivs = []
for indiv in indivs:
if indiv.mid == 0 and indiv.pid == 0:
updated_indivs.append(indiv)
continue
tmp_mother = Individual(indiv.mid, indiv.fid, 0, 0, Sex(2))
tmp_father = Individual(indiv.pid, indiv.fid, 0, 0, Sex(1))
if tmp_mother not in indivs:
updated_indivs.append(tmp_mother)
if tmp_father not in indivs:
updated_indivs.append(tmp_father)
updated_indivs.append(indiv)
return set(updated_indivs)
def generate_family_map(individuals: Set[Individual]) -> Dict[str, List[Individual]]:
fam_map = {}
for indiv in individuals:
if indiv.fid not in fam_map.keys():
fam_map[indiv.fid] = []
fam_map[indiv.fid].append(indiv)
return fam_map
def filter_useful_fams(fam_map: Dict[str, List[Individual]]) -> Dict[str, List[Individual]]:
filtered = {}
for fid, indivs in fam_map.items():
founders = []
non_founders = []
for indiv in indivs:
if indiv.pid == 0 and indiv.mid == 0:
founders.append(indiv)
else:
non_founders.append(indiv)
if len(non_founders) > 1:
filtered[fid] = indivs
return filtered
def find_disjoint_fams(fam_map: Dict[str, List[Individual]]) -> Dict[str, List[set]]:
fid_disjoint_map = {}
for fid, indivs in fam_map.items():
family_sets = []
for indiv in indivs:
tmp_fs = set([indiv.iid])
if indiv.mid != 0:
tmp_fs.add(indiv.mid)
if indiv.pid != 0:
tmp_fs.add(indiv.pid)
if len(family_sets) == 0:
family_sets.append(tmp_fs)
continue
for fs in family_sets:
if not fs.isdisjoint(tmp_fs):
tmp_fs |= fs
break
family_sets.append(tmp_fs)
did_merge = True
while did_merge:
did_merge = False
for i in range(len(family_sets)):
fs = family_sets[i]
for j in range(i, len(family_sets)):
if i == j:
continue
tmp = family_sets[j]
if not fs.isdisjoint(tmp):
family_sets.remove(tmp)
family_sets.remove(fs)
fs |= tmp
family_sets.append(fs)
did_merge = True
break
if did_merge:
break
fid_disjoint_map[fid] = family_sets
return fid_disjoint_map
# Rename, and change func sig
def get_fidpid_genotype_map(plink: PlinkWrapper, fm: Dict[str, List[set]]):
'''
This is one of the two big bottlenecks in the script, uses plink to generate
the .ped/.map files from the .bed files. When the genotypes could be
directly read from the binary, but from experience doing this, it's very
easy to make mistakes and not very easy to know IF you've made a mistake.
Because of this, this just reads the plaintext genotypes from the .ped
file.
'''
files_to_delete = []
indiv_tuples = [(indiv.fid, indiv.iid) for indivs in fm.values() for indiv in indivs]
with open('keep.txt', 'w+') as f:
for fid, pid in indiv_tuples:
f.write(f"{fid} {pid}\n")
files_to_delete.append('keep.txt')
plink.run([
'--keep', 'keep.txt',
'--maf', '0.2',
'--indep-pairwise', '50', '5', '0.05',
'--out', 'plink',
])
files_to_delete += ['plink.prune.in', 'plink.prune.out', 'plink.log']
plink.run([
'--extract', 'plink.prune.in',
'--recode',
'--out', 'pruned'
])
files_to_delete += ['pruned.ped', 'pruned.map', 'pruned.log']
cm_rsid_set = set()
variants = []
indices = []
index = 0
with open('pruned.map', 'r') as f:
for line in f:
chrom, rsid, pos_cm, pos_bp = line.split()
if pos_cm in cm_rsid_set:
indices.append(index)
continue
cm_rsid_set.add(pos_cm)
variants.append(Variant(int(chrom), rsid, float(pos_cm), int(pos_bp)))
index += 1
fp_geno_map = {}
with open('pruned.ped', 'r') as f:
for line in f:
data = line.split()
fid, iid, pid, mid, sex = data[:5]
genotypes = data[6:]
for i in reversed(indices):
del genotypes[i:i+2]
fp_geno_map[fid+" "+iid] = genotypes
[os.remove(fn) for fn in files_to_delete]
return fp_geno_map, variants
def create_merlin_records(fam_map, disjoint_fams, fp_geno_map):
def swap_fid(disjoint_fams, fid, pid, iid):
tmp_pid = pid
if pid is 0:
tmp_pid = iid # Sets founders pid to itself, to get correct fid
for i in range(len(disjoint_fams[fid])):
disjoint_fam = disjoint_fams[fid][i]
if tmp_pid in disjoint_fam:
return f"{fid}_{i+1}"
records = []
genos_len = len(list(fp_geno_map.values())[0])
for fid, indivs in fam_map.items():
for indiv in indivs:
new_fid = swap_fid(disjoint_fams, fid, indiv.pid, indiv.iid)
genotypes = None
try:
genotypes = fp_geno_map[f"{fid} {indiv.iid}"]
except KeyError:
genotypes = ['0' for _ in range(genos_len)]
r = MerlinRecord(new_fid, indiv.iid, indiv.mid, indiv.pid,
indiv.sex, genotypes)
records.append(r)
return records
def write_merlin_ped(records, out_filename):
'''
This is the biggest bottleneck, for obvious reasons. Maybe optimising the
as_string method of the MerlinRecord class would speed it up, not sure how
much though.
'''
with open(out_filename, 'w+') as f:
for record in records:
f.write(record.as_string())
def write_merlin_dat(variants, out_filename):
with open(out_filename, 'w+') as f:
for variant in variants:
f.write(f"M {variant.rsid}\n")
def write_merlin_map(variants: List[Variant], outfile: str):
with open(outfile, 'w+') as f:
f.write("CHROMOSOME\tMARKER\tPOSITION\n")
for variant in variants:
line = f"{variant.chrom}\t{variant.rsid}\t{variant.pos}\n"
f.write(line)
@inout
def process_chrom(chr_n: int, plink: PlinkWrapper,
indir='split_by_chromosome', outdir='merlin_input_files'):
'''
This function is not ideal, ideally I would have written the
PlinkWrapper object with a better constructor so that I could easily make a
new PlinkWrapper object without using output from argparse. Instead I've chosen
to just deepcopy the object and then manually change the input string.
This function is also very redundant, the family information never changes,
only the genotypes...
'''
pc = copy.deepcopy(plink)
pc.input_str = f"{indir}/{chr_n}"
indivs = get_individuals_from_fam(f"{indir}/{chr_n}.fam")
indivs = add_missing_individuals(indivs)
fam_map = generate_family_map(indivs)
# Uncomment line below to only include families with >1 offspring (i.e.
# families with sib-pairs)
#fam_map = filter_useful_fams(fam_map)
disjoint_fams = find_disjoint_fams(fam_map)
fp_geno_map, variants = get_fidpid_genotype_map(pc, fam_map)
records = create_merlin_records(fam_map, disjoint_fams, fp_geno_map)
fp_geno_map = None # Makes sure we're not holding excessive memory
try:
os.mkdir(outdir)
except FileExistsError:
pass
write_merlin_ped(records, f"{outdir}/{chr_n}.ped")
write_merlin_map(variants, f"{outdir}/{chr_n}.map")
write_merlin_dat(variants, f"{outdir}/{chr_n}.dat")
def plink_split_by_chrom(plink: PlinkWrapper, outdir='split_by_chromosome'):
try:
os.mkdir(outdir)
except FileExistsError:
pass
for chrom in range(1,23):
plink.run([
'--chr', str(chrom),
'--make-bed',
'--out', f"{outdir}/{chrom}",
])
if __name__ == '__main__':
desc = '''
File format converter to take a plink binary file or vcf and convert it
to a format which MERLIN accepts, which consists of a .ped and a .dat
file. It is important to note that the .ped file here is not the same as
the plink .ped file.
'''
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--bed', type=str, help='PLINK binary input file path stub')
parser.add_argument('--ped', type=str, help='PLINK input file path stub')
parser.add_argument('--vcf', type=str, help='VCF file path')
parser.add_argument('--plink-binary', type=str, default='plink', help='''Path
to PLINK binary, useful if plink isn't globally accessible
or you want to use a specific version of plink''')
args = parser.parse_args()
plink = PlinkWrapper(args)
plink_split_by_chrom(plink)
for i in range(1, 23):
print(f"Processing chromosome {i}...")
process_chrom(i, plink)
| 2.546875 | 3 |
cache_gs/interfaces/super_cache.py | guionardo/py-cache-guiosoft | 0 | 12762753 | <gh_stars>0
from cache_gs.cache_classes.cache_data import CacheData
from cache_gs.utils.logging import get_logger
class SuperCache:
def __init__(self, string_connection: str, **extra_args):
if not isinstance(string_connection, str) or not string_connection:
raise AttributeError(
"bad string connection for {0}".format(type(self.__class__)))
self._string_connection = string_connection
self._extra_args = extra_args
self.setup()
def setup(self):
raise NotImplementedError
def _get_value(self, section: str, key: str, default=None) -> CacheData:
raise NotImplementedError
def _set_value(self, data: CacheData) -> bool:
raise NotImplementedError
def _delete_value(self, data: CacheData) -> bool:
raise NotImplementedError
def get_value(self, section: str, key: str, default=None) -> str:
data = self._get_value(section, key, default)
if not data or data.expired:
return default
return data.value
def set_value(self, section: str, key: str,
value: str, ttl: int = 0) -> bool:
data = CacheData(section, key, value, ttl)
return self._set_value(data)
def delete_value(self, section: str, key: str) -> bool:
data = CacheData(section, key, None, 0)
return self._delete_value(data)
def purge_expired(self) -> int:
raise NotImplementedError
@classmethod
def log_debug(cls, text, *args, **kwargs):
get_logger().debug(text, *args, **kwargs)
@classmethod
def log_info(cls, text, *args, **kwargs):
get_logger().info(text, *args, **kwargs)
@classmethod
def log_error(cls, text, *args, **kwargs):
get_logger().error(text, *args, **kwargs)
class CacheException(Exception):
pass
| 2.421875 | 2 |
remote_gource/utils.py | wraithy/gource-summary | 0 | 12762754 | import os
import typing
from .types import Author
def write_log(path, log: str):
path = os.path.expanduser(path)
base_path = os.path.dirname(path)
if not os.path.exists(base_path):
os.makedirs(base_path)
with open(path, 'w') as f:
f.write(log)
def write_avatars(dirname, avatars_by_author: typing.Dict[Author, bytes]):
dirname = os.path.expanduser(dirname)
if not os.path.exists(dirname):
os.makedirs(dirname)
for author, image in avatars_by_author.items():
with open(os.path.join(dirname, author.name), 'wb') as f:
f.write(image)
| 2.921875 | 3 |
custom/__init__.py | georgi/MusicTransformer-pytorch | 0 | 12762755 | <filename>custom/__init__.py
import argparse
def get_argument_parser(description=None):
parser = argparse.ArgumentParser(description)
parser.add_argument("-m", "--model_dir", type=str, required=True,
help="The directory for a trained model is saved.")
return parser | 2.71875 | 3 |
pmm_scripts/hello_world_script.py | cardosofede/hummingbot | 542 | 12762756 | from hummingbot.pmm_script.pmm_script_base import PMMScriptBase
class HelloWorldPMMScript(PMMScriptBase):
"""
Demonstrates how to send messages using notify and log functions. It also shows how errors and commands are handled.
"""
def on_tick(self):
if len(self.mid_prices) < 3:
self.notify("Hello Hummingbots World!")
self.log("Hello world logged.")
elif 3 <= len(self.mid_prices) < 5:
# This below statement will cause ZeroDivisionError, Hummingbot will later report this on the log screen.
_ = 1 / 0
def on_command(self, cmd, args):
if cmd == 'ping':
self.notify('pong!')
else:
self.notify(f'Unrecognised command: {cmd}')
| 3.125 | 3 |
models.py | NamPNQ/thongtinluadao.info | 0 | 12762757 | <filename>models.py
import uuid
import json
from flask.ext.redis import FlaskRedis
db = FlaskRedis()
class BadFieldException(BaseException):
pass
class NotFoundException(BaseException):
pass
class TTLDBaseModel(object):
__key_pattern__ = 'nothing'
__fields__ = []
__blacklist__ = ['_meta']
_doc = {}
def __init__(self, *args, **kwargs):
pass
def _hasfield(self, key):
return key in self.__fields__ and key not in self.__blacklist__
def __setitem__(self, key, val):
if self._hasfield(key):
self._doc[key] = val
else:
raise BadFieldException
def __getitem__(self, key):
if self._hasfield(key):
return self._doc[key]
else:
raise BadFieldException
def __delitem__(self, key):
if self._hasfield(key):
del self._doc[key]
else:
raise BadFieldException
@classmethod
def get(cls, **kwargs):
try:
_key = cls.__key_pattern__.format(**kwargs)
except KeyError:
raise Exception('Missing something value for fill key pattern')
_doc = db.get(_key)
if _doc:
_doc = json.loads(_doc)
if _doc['_meta']['classname'] != cls.__name__:
raise Exception('Invalid class')
del _doc['_meta']
rv = cls(**_doc)
rv._doc = _doc
return rv
else:
raise NotFoundException
def getitem(self, key, default=None):
if self._hasfield(key):
try:
return self._doc[key]
except KeyError:
return default
else:
raise BadFieldException
def save(self):
try:
_key = self.__key_pattern__.format(**self._doc)
except KeyError:
raise Exception('Missing something value for fill key pattern')
_doc = self._doc.copy()
_doc['_meta'] = {
'classname': self.__class__.__name__
}
db.set(_key, json.dumps(_doc))
class User(TTLDBaseModel):
__key_pattern__ = 'users:{id}'
__fields__ = ['id', 'email', 'username']
_email_to_uid_key = 'username.to.id:{email}'
def __init__(self, email, *arg, **kwargs):
super(User, self).__init__(*arg, **kwargs)
if not email:
raise Exception('Email cannot be empty')
self['id'] = str(uuid.uuid1())
self['email'] = email
@classmethod
def get_user_by_email(cls, email):
uid = db.get(cls._email_to_uid_key.format(email=email))
if not uid:
return None
else:
try:
return cls.get(id=uid)
except NotFoundException:
return None
def save(self):
super(User, self).save()
db.set(self._email_to_uid_key.format(email=self['email']), self['id'])
| 2.40625 | 2 |
day8/day8.py | margobra8/adventofcode2020 | 0 | 12762758 |
from typing import Union
program = tuple((command, int(number))
for command, number in map(str.split, open('input.txt', 'r')))
acc, index = 0, 0
states = set()
while index not in states:
states.add(index)
command, number = program[index]
index += number if command == 'jmp' else 1
acc += number if command == 'acc' else 0
print(acc)
program = [(command, int(number))
for command, number in map(str.split, open('input.txt'))]
def resolve_program() -> Union[None, int]:
acc, index = 0, 0
states = set()
while index not in states and index < len(program):
states.add(index)
command, number = program[index]
index += number if command == 'jmp' else 1
acc += number if command == 'acc' else 0
return acc if index == len(program) else None
def get_final_acc() -> int:
swap = {'jmp': 'nop', 'acc': 'acc', 'nop': 'jmp'}
for i, (command, number) in enumerate(program):
program[i] = swap[command], number
if resolve_program():
return resolve_program()
program[i] = command, number
print(get_final_acc())
| 3.5 | 4 |
compile-conformers.py | ghutchis/conformer-scoring | 4 | 12762759 | #!/usr/bin/env python
import sys, os, glob, json, re
import pybel
import openbabel as ob
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
ff = pybel._forcefields["mmff94"]
print 'name, conf, dftE, pm7E, mmffE'
# atomization energies
atomE = [0.0] * 118
atomE[1] = -0.497859903283 #H
atomE[6] = -37.792721177046 #C
atomE[7] = -54.512803258431 #N
atomE[8] = -74.971340131182 #O
atomE[9] = -99.606601941762 #F
atomE[15] = -341.109280569995 #P
atomE[16] = -397.934238591573 #S
atomE[17] = -459.940825896724 #Cl
atomE[35] = -2573.686833420785#Br
atomE[53] = -6919.638734715522#I
for d in glob.iglob("*jobs/*"):
# directories for both charged and neutral molecules
name= "/".join(d.split('/')[0:2]) # name of the entry
for f in glob.iglob(d + "/rmsd/rmsd*.mol"):
# all the base files
conf = f.split('/')[-1][0:-4] # conformer name/number
# read the file (e.g., with the base bond types)
try:
mol = pybel.readfile("mol", f).next()
except StopIteration:
pm7 = d + "/rmsd/pm7/" + conf + ".mol"
mol = pybel.readfile("mol", pm7).next()
ff.Setup(mol.OBMol)
# get the PM7 optimized geometry first
orcaFile = d + "/rmsd/" + conf + "_sp.out"
orcaSP = ""
if os.path.isfile(orcaFile):
with open(orcaFile) as o:
for line in o:
if "FINAL SINGLE POINT ENERGY" in line:
orcaSP = float(line.split()[4])
# get the MOPAC energy from the PM7 optimized
mopacFile = d + "/rmsd/pm7/" + conf + ".out"
mopacMM = float('nan')
if os.path.isfile(mopacFile):
with open(mopacFile) as m:
for line in m:
if "FINAL HEAT OF FORMATION" in line:
mopacOpt = float(line.split()[5]) # in kcal/mol
# save the mol file with updated coordinates
mmffPM7 = 0.0
try:
mol2 = pybel.readfile("mopout", mopacFile).next()
numAtoms = mol2.OBMol.NumAtoms()
for i in range(numAtoms):
oldAtom = mol.atoms[i]
nuAtom = mol2.atoms[i]
oldAtom.OBAtom.SetVector(nuAtom.vector)
mol.write("sdf", "%s.mol" % (d + "/" + conf + "-pm7"), overwrite=True)
# get the MMFF energy
ff.SetCoordinates(mol.OBMol)
mmffPM7 = ff.Energy() # in kcal/mol
except:
i = 1
# now look for the MMFF-optimized geometry (e.g., DFT single-point)
orcaMM = ""
orcaFile = d + "/rmsd/" + conf + "-mmff_sp.out"
if os.path.isfile(orcaFile):
with open(orcaFile) as o:
for line in o:
if "FINAL SINGLE POINT ENERGY" in line:
orcaMM = float(line.split()[4])
mopacFile = d + "/rmsd/pm7/" + conf + "-mmff-opt.out"
mopacMM = float('nan')
if os.path.isfile(mopacFile):
with open(mopacFile) as m:
for line in m:
if "FINAL HEAT OF FORMATION" in line:
mopacMM = float(line.split()[5]) # in kcal/mol
mmffOpt = 0.0
try:
mol2 = pybel.readfile("mopout", mopacFile).next()
numAtoms = mol2.OBMol.NumAtoms()
for i in range(numAtoms):
oldAtom = mol.atoms[i]
nuAtom = mol2.atoms[i]
oldAtom.OBAtom.SetVector(nuAtom.vector)
mol.write("sdf", "%s.mol" % (d + "/" + conf + "-mmff"), overwrite=True)
# get the MMFF energy
ff.SetCoordinates(mol.OBMol)
mmffOpt = ff.Energy() # in kcal/mol
except:
i = 2
####
# Now the DFT optimized geometry
####
orcaFile = d + "/rmsd/" + conf + "_opt.out"
orcaOpt = ""
if os.path.isfile(orcaFile):
with open(orcaFile) as o:
for line in o:
if "FINAL SINGLE POINT ENERGY" in line:
orcaOpt = float(line.split()[4])
mopacFile = d + "/rmsd/" + conf + "_opt_pm7.out"
mopacSP = float('nan')
if os.path.isfile(mopacFile):
with open(mopacFile) as m:
for line in m:
if "FINAL HEAT OF FORMATION" in line:
mopacSP = float(line.split()[5]) # in kcal/mol
# get the MMFF energy for this
mmffDFT = 0.0
try:
mol2 = pybel.readfile("mopout", mopacFile).next()
numAtoms = mol2.OBMol.NumAtoms()
for i in range(numAtoms):
oldAtom = mol.atoms[i]
nuAtom = mol2.atoms[i]
oldAtom.OBAtom.SetVector(nuAtom.vector)
mol.write("sdf", "%s.mol" % (d + "/" + conf + "-opt"), overwrite=True)
ff.SetCoordinates(mol.OBMol)
mmffDFT = ff.Energy() # in kcal/mol
except:
i = 3
# convert the orcaSP and orcaOpt energies
# to atomization energies in kcal/mol
if is_number(orcaSP) or is_number(orcaOpt) or is_number(orcaMM):
elements = [0] * 118
try:
for atom in mol.atoms: # how many of each element are there?
elements[atom.atomicnum] = elements[atom.atomicnum] + 1
totalAtomE = 0.0 # get the atomic contributions
for e in range(len(elements)):
totalAtomE = totalAtomE + elements[e] * atomE[e]
if is_number(orcaSP):
orcaSP = (totalAtomE - float(orcaSP)) * 627.509469 # hartree to kcal/mol
if is_number(orcaOpt):
orcaOpt = (totalAtomE - float(orcaOpt)) * 627.509469 # hartree to kcal/mol
if is_number(orcaMM):
orcaMM = (totalAtomE - float(orcaMM)) * 627.509469 # hartree to kcal/mol
except AttributeError:
print "%s, %s, error" % (name, conf)
continue
conf = conf.rstrip()
# try:
# print "%s, %s, %f, %f, %f, %f, %f, %f" % (name, conf, orcaSP, mopacOpt, mmffPM7, orcaOpt, mopacSP, mmffDFT)
# except TypeError:
# print "%s, %s, error" % (name, conf)
if is_number(orcaMM):
print "%s, %s-mmff, %f, %f, %f" % (name, conf, orcaMM, mopacMM, mmffOpt)
else:
try:
print "%s, %s-mmff, nan, %f, %f" % (name, conf, mopacMM, mmffOpt)
except:
print "%s, %s-mmff" % (name, conf), type(mopacMM), type(mmffOpt)
try:
print "%s, %s-pm7, %f, %f, %f" % (name, conf, orcaSP, mopacOpt, mmffPM7)
except:
pass
try:
print "%s, %s-opt, %f, %f, %f" % (name, conf, orcaOpt, mopacSP, mmffDFT)
except TypeError:
print "%s, %s-opt, error" % (name, conf), type(orcaOpt), type(mopacSP), type(mmffDFT)
| 2.015625 | 2 |
ics/structures/rad_reporting_settings.py | hollinsky-intrepid/python_ics | 0 | 12762760 | # This file was auto generated; Do not modify, if you value your sanity!
import ctypes
class rad_reporting_settings(ctypes.Structure):
_pack_ = 2
_fields_ = [
('flags', ctypes.c_uint32),
('temp_interval_ms', ctypes.c_uint16),
('gps_interval_ms', ctypes.c_uint16),
('serdes_interval_ms', ctypes.c_uint16),
('io_interval_ms', ctypes.c_uint16),
('rsvd', ctypes.c_uint8 * 4),
]
# Extra names go here:
RAD_REPORTING_SETTINGS = rad_reporting_settings
# End of extra names
| 1.625 | 2 |
code/generator/Model_new.py | humblef00ls/FinQA-new | 0 | 12762761 | import torch
from torch import nn
import torch.optim as optim
import torch.nn.functional as F
import math
import numpy as np
from config import parameters as conf
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
if conf.pretrained_model == "bert":
from transformers import BertModel
elif conf.pretrained_model == "roberta":
from transformers import RobertaModel
elif conf.pretrained_model == "finbert":
from transformers import BertModel
elif conf.pretrained_model == "longformer":
from transformers import LongformerModel
class Bert_model(nn.Module):
def __init__(self, num_decoder_layers, hidden_size, dropout_rate, input_length,
program_length, op_list, const_list, num_char_length, num_emb_dim):
super(Bert_model, self).__init__()
self.op_list_size = len(op_list)
self.const_list_size = len(const_list)
self.reserved_token_size = self.op_list_size + self.const_list_size
self.program_length = program_length
self.hidden_size = hidden_size
self.const_list = const_list
self.op_list = op_list
self.input_length = input_length
self.num_char_length = num_char_length
self.num_emb_dim = num_emb_dim
self.reserved_ind = nn.Parameter(torch.arange(
0, self.reserved_token_size), requires_grad=False)
self.reserved_go = nn.Parameter(torch.arange(op_list.index(
'GO'), op_list.index('GO') + 1), requires_grad=False)
self.reserved_para = nn.Parameter(torch.arange(op_list.index(
')'), op_list.index(')') + 1), requires_grad=False)
# masking for decoidng for test time
op_ones = nn.Parameter(torch.ones(
self.op_list_size), requires_grad=False)
op_zeros = nn.Parameter(torch.zeros(
self.op_list_size), requires_grad=False)
other_ones = nn.Parameter(torch.ones(
input_length + self.const_list_size), requires_grad=False)
other_zeros = nn.Parameter(torch.zeros(
input_length + self.const_list_size), requires_grad=False)
self.op_only_mask = nn.Parameter(
torch.cat((op_ones, other_zeros), 0), requires_grad=False)
self.seq_only_mask = nn.Parameter(
torch.cat((op_zeros, other_ones), 0), requires_grad=False)
# for ")"
para_before_ones = nn.Parameter(torch.ones(
op_list.index(')')), requires_grad=False)
para_after_ones = nn.Parameter(torch.ones(
input_length + self.reserved_token_size - op_list.index(')') - 1), requires_grad=False)
para_zero = nn.Parameter(torch.zeros(1), requires_grad=False)
self.para_mask = nn.Parameter(torch.cat(
(para_before_ones, para_zero, para_after_ones), 0), requires_grad=False)
# for step embedding
# self.step_masks = []
all_tmp_list = self.op_list + self.const_list
self.step_masks = nn.Parameter(torch.zeros(
conf.max_step_ind, input_length + self.reserved_token_size), requires_grad=False)
for i in range(conf.max_step_ind):
this_step_mask_ind = all_tmp_list.index("#" + str(i))
self.step_masks[i, this_step_mask_ind] = 1.0
# self.step_mask_eye = torch.eye(conf.max_step_ind)
if conf.pretrained_model == "bert":
self.bert = BertModel.from_pretrained(
conf.model_size, cache_dir=conf.cache_dir)
elif conf.pretrained_model == "roberta":
self.bert = RobertaModel.from_pretrained(
conf.model_size, cache_dir=conf.cache_dir)
elif conf.pretrained_model == "finbert":
self.bert = BertModel.from_pretrained(
conf.model_size, cache_dir=conf.cache_dir)
elif conf.pretrained_model == "longformer":
self.bert = LongformerModel.from_pretrained(
conf.model_size, cache_dir=conf.cache_dir)
self.cls_prj = nn.Linear(hidden_size, hidden_size, bias=True)
self.cls_dropout = nn.Dropout(dropout_rate)
self.seq_prj = nn.Linear(hidden_size, hidden_size, bias=True)
self.seq_dropout = nn.Dropout(dropout_rate)
self.reserved_token_embedding = nn.Embedding(
self.reserved_token_size, hidden_size)
self.num_char_embedding = nn.Embedding(self.num_char_length, num_emb_dim)
# attentions
self.decoder_history_attn_prj = nn.Linear(
hidden_size, hidden_size, bias=True)
self.decoder_history_attn_dropout = nn.Dropout(dropout_rate)
self.question_attn_prj = nn.Linear(hidden_size, hidden_size, bias=True)
self.question_attn_dropout = nn.Dropout(dropout_rate)
self.question_summary_attn_prj = nn.Linear(
hidden_size, hidden_size, bias=True)
self.question_summary_attn_dropout = nn.Dropout(dropout_rate)
if conf.sep_attention:
self.input_embeddings_prj = nn.Linear(
hidden_size*3, hidden_size, bias=True)
else:
self.input_embeddings_prj = nn.Linear(
hidden_size*2, hidden_size, bias=True)
self.input_embeddings_layernorm = nn.LayerNorm([1, hidden_size])
self.option_embeddings_prj = nn.Linear(
hidden_size*2, hidden_size, bias=True)
# decoder lstm
self.rnn = torch.nn.LSTM(input_size=hidden_size, hidden_size=hidden_size,
num_layers=conf.num_decoder_layers, batch_first=True)
# num char encoder
self.num_bilstm = torch.nn.LSTM(input_size=num_emb_dim, hidden_size=hidden_size // 2,
num_layers=conf.num_encoder_layers, bidirectional=True)
self.num_char_prj = nn.Linear(hidden_size, hidden_size, bias=True)
self.num_char_dropout = nn.Dropout(dropout_rate)
# num attention
self.num_attn_prj = nn.Linear(hidden_size, hidden_size, bias=True)
self.num_attn_dropout = nn.Dropout(dropout_rate)
# seq_out_prj
self.seqout_prj = nn.Linear(hidden_size * 2, hidden_size, bias=True)
self.seqout_dropout = nn.Dropout(dropout_rate)
# step vector
self.decoder_step_proj = nn.Linear(
3*hidden_size, hidden_size, bias=True)
self.decoder_step_proj_dropout = nn.Dropout(dropout_rate)
self.step_mix_proj = nn.Linear(
hidden_size*2, hidden_size, bias=True)
def forward(self, is_training, input_ids, input_mask, segment_ids, option_mask, program_ids, program_mask, num_char_ids, number_mask, num_char_mask, device):
bert_outputs = self.bert(
input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids)
# print("="*30)
# print("input_ids.size(), ", input_ids.size()) # [batch, seq_length], [16, 512]
# print("number_mask: ", number_mask.size())
# print("input_mask.size(), ", input_mask.size()) # [batch, seq_length], [16, 512]
# print("segment_ids.size(), ", segment_ids.size()) # [batch, seq_length], [16, 512]
# print("option_mask.size()", option_mask.size()) # [batch, option_length], [16, 556]
# print("program_ids.size()", program_ids.size()) # [batch, program_length], [16, 30]
# print("program_mask.size()", program_mask.size()) # [batch, program_length], [16, 30]
######
# Step 1: get the sequence, including questions and retrieved text: {h_i^e}
######
bert_sequence_output = bert_outputs.last_hidden_state # [batch, seq_length, hidden], [16, 512, 768]
bert_pooled_output = bert_sequence_output[:, 0, :] # [batch, hidden], [16, 768]
batch_size, seq_length, bert_dim = list(bert_sequence_output.size())
pooled_output = self.cls_prj(bert_pooled_output) # if conf.sep_attention is True, the pooled_output will not be used
pooled_output = self.cls_dropout(pooled_output)
option_size = self.reserved_token_size + seq_length # 556
sequence_output = self.seq_prj(bert_sequence_output)
sequence_output = self.seq_dropout(sequence_output) # [batch_size, seq_length, hidden], [16, 512, 768]
if conf.num_char:
######
# Step new1: get number embeddings and number_hidden_state
# the number char_hidden_avg will be concated with the input sequence
# therefore, for the word (not number), we copy the output of the encoder here
######
num_char_embeddings = self.num_char_embedding(num_char_ids)
size_a, size_b, size_c, size_d = num_char_embeddings.size()
num_char_embeddings = num_char_embeddings.reshape(-1, size_c, size_d) # [16 * 512, 10, 300]
# add pad, get bilstm output
num_char_length = num_char_mask.sum(-1).reshape(-1) # [16 * 512]
num_char_length += (num_char_length == 0).long() # add 1 to those has 0 number, we can multiply 0 again to avoid the calculation
num_char_length = num_char_length.tolist()
num_char_embeddings_pad = torch.nn.utils.rnn.pack_padded_sequence(input=num_char_embeddings, lengths=num_char_length, batch_first=True, enforce_sorted=False)
num_char_hidden, _ = self.num_bilstm(num_char_embeddings_pad)
num_char_hidden, out_len = pad_packed_sequence(num_char_hidden, batch_first=True)
num_char_hidden = num_char_hidden.reshape(size_a, size_b, size_c, -1) # because bilstm
num_char_mask_repeat = num_char_mask.unsqueeze(-1).repeat(1,1,1,self.hidden_size) # batch, seq_length, max_num_length, hidden
num_char_hidden = num_char_hidden * num_char_mask_repeat # same as above
num_char_hidden_sum = num_char_hidden.sum(-2)
num_char_mask = num_char_mask.sum(-1).unsqueeze(-1).repeat(1,1,self.hidden_size) + 1e-7
num_char_hidden_avg = num_char_hidden_sum / num_char_mask # batch, seq_length, hidden
num_char_output = self.num_char_prj(num_char_hidden_avg)
num_char_output = self.num_char_dropout(num_char_output) # batch, seq_length, hidden
mask = number_mask.unsqueeze(-1).repeat(1,1,self.hidden_size) # batch, seq_length, hidden
concat_num_word_output = num_char_output * mask + sequence_output * (mask - 1) # batch, seq_length, hidden
# copy the output of the encoder here
# number_mask: [batch, seq_length]
num_attn_vec = self.num_attn_prj(concat_num_word_output)
num_attn_vec = self.num_attn_dropout(num_attn_vec) # batch, seq_length, hidden
# print("num_attn_vec: ", num_attn_vec.size())
num_attn_w = torch.matmul(concat_num_word_output, torch.transpose(num_attn_vec, 1, 2)) # batch, seq_length, seq_length (len_generated)
# print("num_attn_w: ", num_attn_w.size())
# print("mask: ", mask.size())
attn_mask = number_mask.unsqueeze(-1).repeat(1, 1, num_attn_w.size()[-1]) # batch,
num_attn_w -= 1e6 * (1 - attn_mask)
num_attn_w = F.softmax(num_attn_w, dim=1)
# print("after softmax, num_attn_w.size(): ", num_attn_w.size()) #
num_ctx_out = torch.matmul(
torch.transpose(num_attn_w, 1, 2), concat_num_word_output) # batch, seq_length, hidden
# print("num_ctx_out: ", num_ctx_out.size()) # batch, seq_length, hidden
sequence_output = torch.cat([sequence_output, num_ctx_out], dim=-1)
sequence_output = self.seqout_prj(sequence_output)
sequence_output = self.seqout_dropout(sequence_output)
print("run this???")
# print(sequence_output)
######
# Step 2: get option embeddings: {h_i^s, h_i^m}
# and concat it with sequence_output: H
######
op_embeddings = self.reserved_token_embedding(self.reserved_ind)
op_embeddings = op_embeddings.repeat(batch_size, 1, 1) # [batch_size, reserved_ind_length, hidden], [16, 44, 768], the length of reserved_ind = len(op_list) + len(const_list)
# [batch, op + seq len, hidden]
initial_option_embeddings = torch.cat([op_embeddings, sequence_output], dim=1)
######
# Step 3: init something used for LSTM decoder
######
# for init, only one symbol "GO", so the size of decoder_output is [batch_size, 1, hidden]
init_decoder_output = self.reserved_token_embedding(self.reserved_go) # [1, 768]
decoder_output = init_decoder_output.repeat(batch_size, 1, 1) # [16, 1, 768]
if conf.sep_attention:
decoder_history = decoder_output
else:
decoder_history = torch.unsqueeze(pooled_output, dim=-1)
# initialize the hidden state for the LSTM decoder
decoder_state_h = torch.zeros(1, batch_size, self.hidden_size, device=device)
decoder_state_c = torch.zeros(1, batch_size, self.hidden_size, device=device)
######
# Step 4: prepare something for future use
######
split_program_ids = torch.split(program_ids, 1, dim=1) # len(split_program_ids) = 30, split_program_ids[0].size() = [16, 1]
# What's the float_input_mask for?
float_input_mask = input_mask.float()
float_input_mask = torch.unsqueeze(float_input_mask, dim=-1)
# used for updating option embeddings, adding step embedding
this_step_new_op_emb = initial_option_embeddings # [batch, option_length, hidden]
logits = []
######
# Step 5: generate program
######
for cur_step in range(self.program_length):
######
# Step 5.1: get decoder history attention: att_h
######
decoder_history_attn_vec = self.decoder_history_attn_prj(decoder_output) # [batch, 1, hidden], [16, 1, 768]
decoder_history_attn_vec = self.decoder_history_attn_dropout(decoder_history_attn_vec)
decoder_history_attn_w = torch.matmul(
decoder_history, torch.transpose(decoder_history_attn_vec, 1, 2)) # [batch, cur_step + 1, 1]
decoder_history_attn_w = F.softmax(decoder_history_attn_w, dim=1) # [batch, cur_step + 1, 1]
decoder_history_ctx_embeddings = torch.matmul(
torch.transpose(decoder_history_attn_w, 1, 2), decoder_history) # [batch, 1, hidden],[16, 1, 768]
######
# Step 5.2: get attention for input sequence: att_p
######
if conf.sep_attention:
# input seq att
question_attn_vec = self.question_attn_prj(decoder_output)
question_attn_vec = self.question_attn_dropout(question_attn_vec) #[batch, 1, hidden],[16, 1, 768]
question_attn_w = torch.matmul(
sequence_output, torch.transpose(question_attn_vec, 1, 2))#[batch, seq_length, 1],[16, 512, 1]
question_attn_w -= 1e6 * (1 - float_input_mask)
question_attn_w = F.softmax(question_attn_w, dim=1) # [batch, seq_length, 1], [16, 512, 1]
question_ctx_embeddings = torch.matmul(
torch.transpose(question_attn_w, 1, 2), sequence_output) # [batch, 1, hidden], [16, 1, 768]
######
# Step 5.3: get another input sequence attention: att_p'
######
question_summary_vec = self.question_summary_attn_prj(decoder_output)
question_summary_vec = self.question_summary_attn_dropout(question_summary_vec) # [batch, 1, hidden]
question_summary_w = torch.matmul(
sequence_output, torch.transpose(question_summary_vec, 1, 2)) #[batch, seq_length, 1],[16, 512, 1]
question_summary_w -= 1e6 * (1 - float_input_mask)
question_summary_w = F.softmax(question_summary_w, dim=1) # [batch, seq_length, 1], [16, 512, 1]
question_summary_embeddings = torch.matmul(
torch.transpose(question_summary_w, 1, 2), sequence_output)
######
# Step 5.4: get contextual information C_T
######
if conf.sep_attention:
# [batch, 1, hidden * 3], [16, 1, 2304]
concat_input_embeddings = torch.cat([decoder_history_ctx_embeddings,
question_ctx_embeddings,
decoder_output], dim=-1)
else:
concat_input_embeddings = torch.cat([decoder_history_ctx_embeddings,
decoder_output], dim=-1)
input_embeddings = self.input_embeddings_prj(concat_input_embeddings) #[batch, 1, hidden],[16, 1, 768]
if conf.layer_norm:
input_embeddings = self.input_embeddings_layernorm(
input_embeddings)
######
# Step 5.5: get all token embeddings: H_T'
######
question_option_vec = this_step_new_op_emb * question_summary_embeddings # [batch, option_size, hidden], [16 556, 768]
option_embeddings = torch.cat(
[this_step_new_op_emb, question_option_vec], dim=-1) # [batch, option_size, hidden*2], [16, 556, 1536]
option_embeddings = self.option_embeddings_prj(option_embeddings) # [batch, option_size, hidden], [16, 556, 768]
######
# Step 5.6: get logits
######
option_logits = torch.matmul(
option_embeddings, torch.transpose(input_embeddings, 1, 2)) # batch, option_size, 1],[16, 556, 1]
option_logits = torch.squeeze(option_logits, dim=2) # [batch, op + seq_len],op + seq_len = option_size
option_logits -= 1e6 * (1 - option_mask)
logits.append(option_logits)
######
# Step 6: update state
######
if is_training:
program_index = torch.unsqueeze(split_program_ids[cur_step], dim=1) # [batch, 1, 1], [16, 1, 1]
else:
# constrain decoding
if cur_step % 4 == 0 or (cur_step + 1) % 4 == 0:
# op round
option_logits -= 1e6 * self.seq_only_mask
else:
# number round
option_logits -= 1e6 * self.op_only_mask
if (cur_step + 1) % 4 == 0:
# ")" round
option_logits -= 1e6 * self.para_mask
# print(program_index)
program_index = torch.argmax(option_logits, axis=-1, keepdim=True)
program_index = torch.unsqueeze(program_index, dim=1)
if (cur_step + 1) % 4 == 0:
# update op embeddings
this_step_index = cur_step // 4
this_step_list_index = (
self.op_list + self.const_list).index("#" + str(this_step_index)) # ??? integer
this_step_mask = self.step_masks[this_step_index, :] # [option_size], [556]
decoder_step_vec = self.decoder_step_proj(concat_input_embeddings)
decoder_step_vec = self.decoder_step_proj_dropout(decoder_step_vec)#[batch,1,hidden], [16, 1, 768]
decoder_step_vec = torch.squeeze(decoder_step_vec) # [batch, hidden], [16, 768]
this_step_new_emb = decoder_step_vec # [batch, hidden]
this_step_new_emb = torch.unsqueeze(this_step_new_emb, 1)
this_step_new_emb = this_step_new_emb.repeat(
1, self.reserved_token_size+self.input_length, 1) # [batch, op seq, hidden]
this_step_mask = torch.unsqueeze(this_step_mask, 0) # [1, op seq]
this_step_mask = torch.unsqueeze(this_step_mask, 2) # [1, op seq, 1]
this_step_mask = this_step_mask.repeat(batch_size, 1, self.hidden_size) # [batch, op seq, hidden]
this_step_new_op_emb = torch.where(
this_step_mask > 0, this_step_new_emb, initial_option_embeddings)
program_index = torch.repeat_interleave(program_index, self.hidden_size, dim=2) # [batch, 1, hidden]
input_program_embeddings = torch.gather(option_embeddings, dim=1, index=program_index)
decoder_output, (decoder_state_h, decoder_state_c) = self.rnn(
input_program_embeddings, (decoder_state_h, decoder_state_c))
decoder_history = torch.cat(
[decoder_history, input_program_embeddings], dim=1) # [batch, cur_step + 1, hidden]
logits = torch.stack(logits, dim=1)
return logits
| 2.28125 | 2 |
tests/test_study_zero.py | cinaljess/goatools | 0 | 12762762 | #!/usr/bin/env python
"""Test gracefully exiting if no study genes are in assc or population."""
import os
# from goatools.rpt.goea_nt_xfrm import MgrNtGOEAs # get_goea_nts_all
from goatools.test_data.genes_NCBI_10090_ProteinCoding import GENEID2NT as GeneID2nt_mus
from goatools.test_data.nature3102_goea import get_geneid2symbol, get_goeaobj
__copyright__ = "Copyright (C) 2016-2017, <NAME>, <NAME>, All rights reserved."
REPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../")
def test_example():
"""Test GOEnrichmentStudy::print_results."""
# --------------------------------------------------------------------
# --------------------------------------------------------------------
# Gene Ontology Enrichment Analysis (GOEA)
# --------------------------------------------------------------------
# --------------------------------------------------------------------
taxid = 10090 # Mouse study
# Load ontologies, associations, and population ids
geneids_pop = GeneID2nt_mus.keys()
geneids2symbol_study = get_geneid2symbol("nbt.3102-S4_GeneIDs.xlsx")
goeaobj = get_goeaobj("fdr_bh", geneids_pop, taxid)
# No study genes at all
geneids_study_none = set()
goea_results_all = goeaobj.run_study(geneids_study_none)
assert not goea_results_all, 'NO STUDY GENES TEST FAILED: {R}'.format(R=goea_results_all)
# No study genes in population or association
geneids_study_bad = set(['BADVAL'])
goea_results_all = goeaobj.run_study(geneids_study_bad)
# goea_results_sig = [r for r in goea_results_all if r.p_fdr_bh < 0.05]
assert not goea_results_all, 'NO VALID STUDY GENES TEST FAILED: {R}'.format(R=goea_results_all)
# goea_results_all = goeaobj.run_study(geneids_study)
goeaobj.print_results(goea_results_all, pval=None)
goeaobj.print_date()
if __name__ == '__main__':
test_example()
# Copyright (C) 2016-2017, <NAME>, <NAME>, All rights reserved.
| 2.015625 | 2 |
dbt/adapters/athena/__init__.py | EarnestResearch/dbt-athena | 43 | 12762763 | <filename>dbt/adapters/athena/__init__.py
from dbt.adapters.athena.connections import AthenaConnectionManager, AthenaCredentials
from dbt.adapters.athena.impl import AthenaAdapter
from dbt.adapters.base import AdapterPlugin
from dbt.include import athena
AthenaConnectionManager = AthenaConnectionManager
Plugin = AdapterPlugin(
adapter=AthenaAdapter,
credentials=AthenaCredentials,
include_path=athena.PACKAGE_PATH,
)
| 1.414063 | 1 |
settings.py | saud-learning-services/module-progress | 5 | 12762764 | <reponame>saud-learning-services/module-progress
"""
Declares several global variables that are used throughout the project.
* The status dictionary gets updated to reflect the success/failed state of each course (and relevant errors)
* ROOT_DIR is the filepath to the src folder
"""
import os
status = {}
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
| 1.820313 | 2 |
update-locales-1.0-to-1.0.1.py | Marcool04/utilities | 10 | 12762765 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Python script to update locale files from CSL 1.0 to 1.0.1
# Author: <NAME>
# Version: 2012-07-15
# * Requires lxml library (http://lxml.de/)
#
# Extremely hacky (I need a linux box!)
import os, glob, re
from lxml import etree
localesPath = 'C:\Documents and Settings\zelle\My Documents\CSL\locales\\'
localeXSLTPath = 'C:\Documents and Settings\zelle\My Documents\CSL\utilities\update-locales-1.0-to-1.0.1.xsl'
class FileResolver(etree.Resolver):
def resolve(self, url, pubid, context):
return self.resolve_filename(url, context)
locales = []
for localePath in glob.glob( os.path.join(localesPath, 'locales-*.xml') ):
locales.append(os.path.basename(localePath))
if not os.path.exists(os.path.join(localesPath, '1.0.1')):
os.makedirs(os.path.join(localesPath, '1.0.1'))
for locale in locales:
with open(localeXSLTPath, 'r') as localeXSLT:
localeXSLTContent = localeXSLT.read()
localeXSLTContent = localeXSLTContent.replace('locales-nl-NL.xml', locale)
## print(localeXSLTContent)
localizedXSLT = open(os.path.join('C:\Documents and Settings\zelle\My Documents\CSL\utilities\\', 'localizedXSLT.xsl'), 'w')
localizedXSLT.write(localeXSLTContent)
localizedXSLT.close()
## need to read modified copy!!!
localeXSLT = etree.parse(os.path.join('C:\Documents and Settings\zelle\My Documents\CSL\utilities\\', 'localizedXSLT.xsl'))
localeTransform = etree.XSLT(localeXSLT)
parsedLocale = etree.parse('C:\Documents and Settings\zelle\My Documents\CSL\utilities\locales-en-US.xml')
## print(etree.tostring(parsedLocale, pretty_print=True, xml_declaration=True, encoding="utf-8"))
localeElement = parsedLocale.getroot()
updatedLocale = localeTransform(localeElement)
updatedLocale = etree.tostring(updatedLocale, pretty_print=True, xml_declaration=True, encoding="utf-8")
updatedLocale = updatedLocale.replace(" <!--", "\n <!--")
updatedLocale = updatedLocale.replace(" ", " ")
updatedLocale = updatedLocale.replace("'", '"', 4)
updatedLocaleFile = open(os.path.join(localesPath, '1.0.1', locale), 'w')
updatedLocaleFile.write ( updatedLocale )
updatedLocaleFile.close()
| 2.5 | 2 |
test/fit_tests/common/fit_common.py | smiller171/RackHD | 0 | 12762766 | <reponame>smiller171/RackHD
'''
Copyright 2016, EMC, Inc.
Author(s):
<NAME>
OnRack/RackHD Functional Integration Test (FIT) library
This is the main common function library for OnRack/RackHD FIT tests.
'''
# Standard imports
import os
import sys
import json
import subprocess
import time, datetime
import unittest
import signal
import re
import requests
import pexpect
import shutil
# Globals
# Pull arguments from environment into ARGS_LIST
ARGS_LIST = \
{
"v": os.getenv("VERBOSITY", "0"),
"config": os.getenv("CONFIG", "config"),
"stack": os.getenv("STACK", "None"), # Stack label
"ora": os.getenv("ORA", "localhost"), # Appliance IP or hostname
"bmc": "None", # BMC IP or hostname
"sku": os.getenv("SKU", "all"), # node SKU name
"obmmac": os.getenv("OBMMAC", "all"), # node OBM MAC address
"nodeid": os.getenv("NODEID", "None"), # node ID
"hyper": "None", # hypervisor address
"version": os.getenv("VERSION", "onrack-devel"), # code version
"template": os.getenv("TEMPLATE", "None"), # path or URL link to OVA for deployment
"xunit": os.getenv("XUNIT", False), # XUNIT output
"list": os.getenv("LIST", False), # list tests
"group": os.getenv("GROUP", "all"), # test group
"http": os.getenv("HTTP", "False"), # force http api protocol
"https": os.getenv("HTTPS", "False"), # force https api protocol
"port": os.getenv("PORT", "None") # port number override
}
# Get top level path via git
TEST_PATH = subprocess.check_output("git rev-parse --show-toplevel", shell=True).rstrip("\n") + "/test/fit_tests/"
CONFIG_PATH = TEST_PATH + ARGS_LIST["config"] + "/"
if ARGS_LIST["config"] != 'config':
print "**** Using config file path:", ARGS_LIST["config"]
VERBOSITY = int(os.getenv("VERBOSITY", "1"))
GLOBAL_CONFIG = []
STACK_CONFIG = []
API_PORT = "None"
API_PROTOCOL = "None"
AUTH_TOKEN = "None"
REDFISH_TOKEN = "None"
# List of BMC IP addresses
BMC_LIST = []
# Global Config files
try:
GLOBAL_CONFIG = json.loads(open(CONFIG_PATH + "global_config.json").read())
except:
print "**** Global Config file: " + CONFIG_PATH + "global_config.json" + " missing or corrupted! Exiting...."
sys.exit(255)
try:
STACK_CONFIG = json.loads(open(CONFIG_PATH + "stack_config.json").read())
except:
print "**** Stack Config file:" + CONFIG_PATH + "stack_config.json" + " missing or corrupted! Creating empty stack file...."
STACK_CONFIG = []
# apply stack detail files from config dir to STACK_CONFIG dict
for entry in os.listdir(CONFIG_PATH):
if entry != "global_config.json" and entry != "stack_config.json" and ".json" in entry:
try:
detailfile = json.loads(open(CONFIG_PATH + entry).read())
except:
print "**** Invalid JSON file:", CONFIG_PATH + entry
else:
STACK_CONFIG.update(detailfile)
# This section derives default stack configuration data from STACK-CONFIG, use environment to override
ARGS_LIST.update(
{
"usr": GLOBAL_CONFIG['credentials']['ora'][0]['username'],
"pwd": GLOBAL_CONFIG['credentials']['ora'][0]['password']
}
)
if ARGS_LIST["stack"] != "None":
if "ora" in STACK_CONFIG[ARGS_LIST["stack"]]:
ARGS_LIST["ora"] = STACK_CONFIG[ARGS_LIST["stack"]]['ora']
else:
ARGS_LIST["ora"] = "localhost"
if "bmc" in STACK_CONFIG[ARGS_LIST["stack"]]:
ARGS_LIST["bmc"] = STACK_CONFIG[ARGS_LIST["stack"]]['bmc']
if "hyper" in STACK_CONFIG[ARGS_LIST["stack"]]:
ARGS_LIST["hyper"] = STACK_CONFIG[ARGS_LIST["stack"]]['hyper']
# set api port and protocol from command line
if ARGS_LIST['port'] != "None":
API_PORT = ARGS_LIST['port']
if ARGS_LIST['http'] == "True":
API_PROTOCOL = "http"
if API_PORT == "None":
API_PORT = GLOBAL_CONFIG['ports']['http']
if ARGS_LIST['https'] == "True":
API_PROTOCOL = "https"
if API_PORT == "None":
API_PORT = GLOBAL_CONFIG['ports']['https']
if ARGS_LIST["ora"] == "localhost":
if API_PROTOCOL == "None":
API_PROTOCOL = 'http'
if API_PORT == "None":
API_PORT = '8080'
# set OVA template from command line
if ARGS_LIST["template"] == "None":
ARGS_LIST["template"] = GLOBAL_CONFIG['repos']['install']['template']
def timestamp(): # return formatted current timestamp
return time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime())
# This routine executes a sleep with countdown
def countdown(sleep_time, sleep_interval=1):
sys.stdout.write("Sleeping for " + str(sleep_time * sleep_interval)
+ " seconds.")
sys.stdout.flush()
for _ in range(0, sleep_time):
time.sleep(sleep_interval)
sys.stdout.write(".")
sys.stdout.flush()
print "Waking!"
return
def remote_shell(shell_cmd, expect_receive="", expect_send="", timeout=300, address=ARGS_LIST['ora'], user=ARGS_LIST['usr'], password=ARGS_LIST['pwd']):
'''
Run ssh based shell command on a remote machine at ARGS_LIST["ora"]
:param shell_cmd: string based command
:param expect_receive:
:param expect_send:
:param timeout: in seconds
:param address: IP or hostname of remote host
:param user: username of remote host
:param password: password of remote host
:return: dict = {'stdout': str:ouput, 'exitcode': return code}
'''
logfile_redirect = None
if VERBOSITY >= 4:
print "remote_shell: Host =", address
print "remote_shell: Command =", shell_cmd
if VERBOSITY >= 9:
print "remote_shell: STDOUT =\n"
logfile_redirect = sys.stdout
# if localhost just run the command local
if ARGS_LIST['ora'] == 'localhost':
(command_output, exitstatus) = \
pexpect.run("sudo bash -c \"" + shell_cmd + "\"",
withexitstatus=1,
events={"assword": password + "\n"},
timeout=timeout, logfile=logfile_redirect)
return {'stdout':command_output, 'exitcode':exitstatus}
# this clears the ssh key from ~/.ssh/known_hosts
subprocess.call(["touch ~/.ssh/known_hosts;ssh-keygen -R "
+ address + " -f ~/.ssh/known_hosts >/dev/null 2>&1"], shell=True)
shell_cmd.replace("'", "\\\'")
if expect_receive == "" or expect_send == "":
(command_output, exitstatus) = \
pexpect.run("ssh -q -o StrictHostKeyChecking=no -t " + user + "@"
+ address + " sudo bash -c \\\"" + shell_cmd + "\\\"",
withexitstatus=1,
events={"assword": password + "\n"},
timeout=timeout, logfile=logfile_redirect)
else:
(command_output, exitstatus) = \
pexpect.run("ssh -q -o StrictHostKeyChecking=no -t " + user + "@"
+ address + " sudo bash -c \\\"" + shell_cmd + "\\\"",
withexitstatus=1,
events={"assword": password + "\n",
expect_receive: expect_send + "\n"},
timeout=timeout, logfile=logfile_redirect)
if VERBOSITY >= 4:
print shell_cmd, "\nremote_shell: Exit Code =", exitstatus
return {'stdout':command_output, 'exitcode':exitstatus}
def scp_file_to_ora(src_file_name):
'''
scp the given file over to the ORA and place it in onrack's
home directory.
:param src_file_name: name of file to copy over. May include path
:type src_file_name: basestring
:return: just name of file on target (no path)
:rtype: basestring
'''
logfile_redirect = file('/dev/null', 'w')
just_fname = os.path.basename(src_file_name)
# if localhost just copy to home dir
if ARGS_LIST['ora'] == 'localhost':
remote_shell('cp ' + src_file_name + ' ~/' + src_file_name)
return src_file_name
scp_target = 'onrack@{0}:'.format(ARGS_LIST["ora"])
cmd = 'scp -o StrictHostKeyChecking=no {0} {1}'.format(src_file_name, scp_target)
if VERBOSITY >= 4:
print "scp_file_to_ora: '{0}'".format(cmd)
if VERBOSITY >= 9:
logfile_redirect = sys.stdout
(command_output, ecode) = pexpect.run(
cmd, withexitstatus=1,
events={'(?i)assword: ':ARGS_LIST['pwd'] + '\n'},
logfile=logfile_redirect)
if VERBOSITY >= 4:
print "scp_file_to_ora: Exit Code = {0}".format(ecode)
assert ecode == 0, \
'failed "{0}" because {1}. Output={2}'.format(cmd, ecode, command_output)
return just_fname
def get_auth_token():
# This is run once to get an auth token which is set to global AUTH_TOKEN and used for rest of session
global AUTH_TOKEN
global REDFISH_TOKEN
api_login = {"username": GLOBAL_CONFIG["api"]["admin_user"], "password": GLOBAL_CONFIG["api"]["admin_pass"]}
redfish_login = {"UserName": GLOBAL_CONFIG["api"]["admin_user"], "Password": GLOBAL_CONFIG["api"]["admin_pass"]}
try:
restful("https://" + ARGS_LIST['ora'] + ":" + str(API_PORT) +
"/login", rest_action="post", rest_payload=api_login, rest_timeout=2)
except:
AUTH_TOKEN = "Unavailable"
return False
else:
api_data = restful("https://" + ARGS_LIST['ora'] + ":" + str(API_PORT) +
"/login", rest_action="post", rest_payload=api_login, rest_timeout=2)
if api_data['status'] == 200:
AUTH_TOKEN = str(api_data['json']['token'])
redfish_data = restful("https://" + ARGS_LIST['ora'] + ":" + str(API_PORT) +
"/redfish/v1/SessionService/Sessions", rest_action="post", rest_payload=redfish_login, rest_timeout=2)
if 'x-auth-token' in redfish_data['headers']:
REDFISH_TOKEN = redfish_data['headers']['x-auth-token']
return True
else:
print "WARNING: Redfish API token not available."
else:
AUTH_TOKEN = "Unavailable"
return False
def rackhdapi(url_cmd, action='get', payload=[], timeout=None, headers={}):
'''
This routine will build URL for RackHD API, enable port, execute, and return data
Example: rackhdapi('/api/current/nodes') - simple 'get' command
Example: rackhdapi("/api/current/nodes/ID/dhcp/whitelist", action="post")
:param url_cmd: url command for monorail api
:param action: rest action (get/put/post/delete)
:param payload: rest payload
:param timeout: rest timeout
:param headers: rest_headers
:return: {'json':result_data.json(), 'text':result_data.text,
'status':result_data.status_code,
'headers':result_data.headers.get('content-type'),
'timeout':False}
'''
# Automatic protocol selection: unless protocol is specified, test protocols, save settings globally
global API_PROTOCOL
global API_PORT
if API_PROTOCOL == "None":
if API_PORT == "None":
API_PORT = str(GLOBAL_CONFIG['ports']['http'])
if restful("http://" + ARGS_LIST['ora'] + ":" + str(API_PORT) + "/", rest_timeout=2)['status'] == 0:
API_PROTOCOL = 'https'
API_PORT = str(GLOBAL_CONFIG['ports']['https'])
else:
API_PROTOCOL = 'http'
API_PORT = str(GLOBAL_CONFIG['ports']['http'])
# Retrieve authentication token for the session
if AUTH_TOKEN == "None":
get_auth_token()
return restful(API_PROTOCOL + "://" + ARGS_LIST['ora'] + ":" + str(API_PORT) + url_cmd,
rest_action=action, rest_payload=payload, rest_timeout=timeout, rest_headers=headers)
def restful(url_command, rest_action='get', rest_payload=[], rest_timeout=None, sslverify=False, rest_headers={}):
'''
This routine executes a rest API call to the host.
:param url_command: the full URL for the command
:param rest_action: what the restful do (get/post/put/delete)
:param rest_payload: payload for rest request
:param rest_headers: headers (JSON dict)
:param rest_timeout: timeout for rest request
:param sslverify: ssl Verify (True/False)
:return: {'json':result_data.json(), 'text':result_data.text,
'status':result_data.status_code,
'headers':result_data.headers,
'timeout':False}
'''
result_data = None
# print URL and action
if VERBOSITY >= 4:
print "restful: Action = ", rest_action, ", URL = ", url_command
# prepare payload for XML output
payload_print = []
try:
json.dumps(rest_payload)
except:
payload_print = []
else:
payload_print = json.dumps(rest_payload, sort_keys=True, indent=4,)
if len(payload_print) > 4096:
payload_print = payload_print[0:4096] + '\n...truncated...\n'
if VERBOSITY >= 7 and rest_payload != []:
print "restful: Payload =\n", payload_print
rest_headers.update({"Content-Type": "application/json"})
if VERBOSITY >= 5:
print "restful: Request Headers =", rest_headers, "\n"
# If AUTH_TOKEN is set, add to header
if AUTH_TOKEN != "None" and AUTH_TOKEN != "Unavailable" and "authorization" not in rest_headers:
rest_headers.update({"authorization": "JWT " + AUTH_TOKEN, "X-Auth-Token": REDFISH_TOKEN})
# Perform rest request
try:
if rest_action == "get":
result_data = requests.get(url_command,
timeout=rest_timeout,
verify=sslverify,
headers=rest_headers)
if rest_action == "delete":
result_data = requests.delete(url_command,
data=json.dumps(rest_payload),
timeout=rest_timeout,
verify=sslverify,
headers=rest_headers)
if rest_action == "put":
result_data = requests.put(url_command,
data=json.dumps(rest_payload),
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify,
)
if rest_action == "binary-put":
rest_headers.update({"Content-Type": "application/octet-stream"})
result_data = requests.put(url_command,
data=rest_payload,
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify,
)
if rest_action == "text-put":
rest_headers.update({"Content-Type": "text/plain"})
result_data = requests.put(url_command,
data=rest_payload,
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify,
)
if rest_action == "post":
result_data = requests.post(url_command,
data=json.dumps(rest_payload),
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify
)
if rest_action == "binary-post":
rest_headers.update({"Content-Type": "application/octet-stream"})
result_data = requests.post(url_command,
data=rest_payload,
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify
)
if rest_action == "text-post":
rest_headers.update({"Content-Type": "text/plain"})
result_data = requests.post(url_command,
data=rest_payload,
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify
)
if rest_action == "patch":
result_data = requests.patch(url_command,
data=json.dumps(rest_payload),
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify
)
except requests.exceptions.Timeout:
return {'json':'', 'text':'',
'status':0,
'headers':'',
'timeout':True}
try:
result_data.json()
except ValueError:
if VERBOSITY >= 9:
print "restful: TEXT =\n"
print result_data.text
if VERBOSITY >= 6:
print "restful: Response Headers =", result_data.headers, "\n"
if VERBOSITY >= 4:
print "restful: Status code =", result_data.status_code, "\n"
return {'json':{}, 'text':result_data.text, 'status':result_data.status_code,
'headers':result_data.headers,
'timeout':False}
else:
if VERBOSITY >= 9:
print "restful: JSON = \n"
print json.dumps(result_data.json(), sort_keys=True, indent=4)
if VERBOSITY >= 6:
print "restful: Response Headers =", result_data.headers, "\n"
if VERBOSITY >= 4:
print "restful: Status code =", result_data.status_code, "\n"
return {'json':result_data.json(), 'text':result_data.text,
'status':result_data.status_code,
'headers':result_data.headers,
'timeout':False}
# Get the list of BMC IP addresses that we can find
def get_bmc_ips():
idlist = [] # list of unique dcmi node IDs
# If we have already done this, use that list
if len(BMC_LIST) == 0:
ipscan = remote_shell('arp')['stdout'].split()
for ipaddr in ipscan:
if ipaddr[0:3] == "172" and remote_shell('ping -c 1 -w 5 ' + ipaddr)['exitcode'] == 0:
# iterate through all known IPMI users
for item in GLOBAL_CONFIG['credentials']['bmc']:
# check BMC credentials
ipmicheck = remote_shell('ipmitool -I lanplus -H ' + ipaddr + ' -U ' + item['username'] \
+ ' -P ' + item['password'] + ' -R 1 -N 3 chassis power status')
if ipmicheck['exitcode'] == 0:
# retrieve the ID string
return_code = remote_shell('ipmitool -I lanplus -H ' + ipaddr + ' -U ' + item['username'] \
+ ' -P ' + item['password'] + ' -R 1 -N 3 dcmi get_mc_id_string')
bmc_info = {"ip": ipaddr, "user": item['username'], "pw": item['password']}
if return_code['exitcode'] == 0 and return_code['stdout'] not in idlist:
# add to list if unique
idlist.append(return_code['stdout'])
BMC_LIST.append(bmc_info)
break
else:
# simulated nodes don't yet support dcmi, remove this else branch when supported
BMC_LIST.append(bmc_info)
break
if VERBOSITY >= 6:
print "get_bmc_ips: "
print "**** BMC IP node count =", len(BMC_LIST), "****"
return len(BMC_LIST)
# power on/off all compute nodes in the stack via the BMC
def power_control_all_nodes(state):
if state != "on" and state != "off":
print "power_control_all_nodes: invalid state " + state
return
# Get the list of BMCs that we know about
node_count = get_bmc_ips()
# Send power on/off to all of them
for bmc in BMC_LIST:
return_code = remote_shell('ipmitool -I lanplus -H ' + bmc['ip'] \
+ ' -U ' + bmc['user'] + ' -P ' \
+ bmc['pw'] + ' -R 4 -N 3 chassis power ' + state)
if return_code['exitcode'] != 0:
print "Error powering " + state + " node: " + bmc['ip']
return node_count
def mongo_reset():
# clears the Mongo database on ORA to default, returns 0 if successful
remote_shell('service onrack-conductor stop')
remote_shell('/opt/onrack/bin/monorail stop')
remote_shell("mongo pxe --eval 'db.dropDatabase\\\(\\\)'")
remote_shell('rm -f /var/lib/dhcp/dhcpd.leases')
remote_shell('rm -f /var/log/onrack-conductor-event.log')
remote_shell('/opt/onrack/bin/monorail start')
if remote_shell('service onrack-conductor start')['exitcode'] > 0:
return 1
return 0
def appliance_reset():
return_code = subprocess.call("ipmitool -I lanplus -H " + ARGS_LIST["bmc"] \
+ " -U root -P 1234567 chassis power reset", shell=True)
return return_code
def node_select():
# returns a list with valid compute node IDs that match ARGS_LIST["sku"] in 'Name' or 'Model' field
# and matches node BMC MAC address in ARGS_LIST["obmmac"] if specified
# Otherwise returns list of all IDs that are not 'Unknown' or 'Unmanaged'
nodelist = []
skuid = "None"
# check if user specified a single nodeid to run against
# user must know the nodeid and any check for a valid nodeid is skipped
if ARGS_LIST["nodeid"] != 'None':
nodelist.append(ARGS_LIST["nodeid"])
return nodelist
else:
# Find SKU ID
skumap = rackhdapi('/api/2.0/skus')
if skumap['status'] != 200:
print '**** Unable to retrieve SKU list via API.\n'
sys.exit(255)
for skuentry in skumap['json']:
if str(ARGS_LIST['sku']) in json.dumps(skuentry):
skuid = skuentry['id']
# Collect node IDs
catalog = rackhdapi('/api/2.0/nodes')
if skumap['status'] != 200:
print '**** Unable to retrieve node list via API.\n'
sys.exit(255)
# Select node by SKU
for nodeentry in catalog['json']:
if ARGS_LIST["sku"] == 'all':
# Select only managed compute nodes
if nodeentry['type'] == 'compute':
nodelist.append(nodeentry['id'])
else:
if 'sku' in nodeentry and skuid in json.dumps(nodeentry['sku']):
nodelist.append(nodeentry['id'])
# Select by node BMC MAC addr
if ARGS_LIST["obmmac"] != 'all':
idlist = nodelist
nodelist = []
for member in idlist:
nodeentry = rackhdapi('/api/2.0/nodes/' + member)
if ARGS_LIST["obmmac"] in json.dumps(nodeentry['json']):
nodelist = [member]
break
if VERBOSITY >= 6:
print "Node List:"
print nodelist, '\n'
if len(nodelist) == 0:
print '**** Empty node list.\n'
return nodelist
def list_skus():
# return list of installed SKU names
skunames = []
api_data = rackhdapi('/api/2.0/skus')['json']
for item in api_data:
skunames.append(item['name'])
return skunames
def get_node_sku(nodeid):
# return name field of node SKU if available
nodetype = ""
sku = ""
# get node info
mondata = rackhdapi("/api/2.0/nodes/" + nodeid)
if mondata['status'] == 200:
# get the sku id contained in the node
sku = mondata['json'].get("sku")
if sku:
skudata = rackhdapi(sku)
if skudata['status'] == 200:
nodetype = skudata['json'].get("name")
else:
if VERBOSITY >= 2:
errmsg = "Error: SKU API failed {}, return code {} ".format(sku, skudata['status'])
print errmsg
else:
if VERBOSITY >= 2:
errmsg = "Error: nodeid {} did not return a valid sku in get_rackhd_nodetype{}".format(nodeid,sku)
print errmsg
return nodetype
def check_active_workflows(nodeid):
# Return True if active workflows are found on node
workflows = rackhdapi('/api/2.0/nodes/' + nodeid + '/workflows')['json']
for item in workflows:
if 'running' in item['_status'] or 'pending' in item['_status']:
return True
return False
def cancel_active_workflows(nodeid):
# cancel all active workflows on node
exitstatus = True
apistatus = rackhdapi('/api/2.0/nodes/' + nodeid + '/workflows/action',
action='put', payload={"command": "cancel"})['status']
if apistatus != 202:
exitstatus = False
return exitstatus
def apply_obm_settings_new():
# Experimental routine to install OBM credentials via workflows
count = 0
for creds in GLOBAL_CONFIG['credentials']['bmc']:
# greate graph for setting OBM credentials
payload = \
{
"friendlyName": "IPMI" + str(count),
"injectableName": 'Graph.Obm.Ipmi.CreateSettings' + str(count),
"options": {
"obm-ipmi-task":{
"user": creds["username"],
"password": creds["password"]
}
},
"tasks": [
{
"label": "obm-ipmi-task",
"taskName": "Task.Obm.Ipmi.CreateSettings"
}
]
}
api_data = rackhdapi("/api/2.0/workflows/graphs", action="put", payload=payload)
if api_data['status'] != 201:
print "**** OBM workflow failed to load!"
return False
count += 1
# Setup additional OBM settings for nodes that currently use RMM port (still same bmc username/password used)
count = 0
for creds in GLOBAL_CONFIG['credentials']['bmc']:
# greate graph for setting OBM credentials for RMM
payload = \
{
"friendlyName": "RMM.IPMI" + str(count),
"injectableName": 'Graph.Obm.Ipmi.CreateSettings.RMM' + str(count),
"options": {
"obm-ipmi-task":{
"ipmichannel": "3",
"user": creds["username"],
"password": <PASSWORD>["password"]
}
},
"tasks": [
{
"label": "obm-ipmi-task",
"taskName": "Task.Obm.Ipmi.CreateSettings"
}
]
}
api_data = rackhdapi("/api/2.0/workflows/graphs", action="put", payload=payload)
if api_data['status'] != 201:
print "**** OBM workflow failed to load!"
return False
count += 1
# run each OBM credential workflow on each node in parallel until success
nodelist = node_select()
nodestatus = {} # dictionary with node IDs and status of each node
for node in nodelist:
nodestatus[node]= {"status": "pending", "instanceId": "", "sku": get_node_sku(node), "retry": 0}
for dummy in range(0, 60):
for num in range(0, count):
for node in nodelist:
skuid = rackhdapi('/api/2.0/nodes/' + node)['json'].get("sku")
skudata = rackhdapi(skuid)['text']
if "rmm.data.MAC" in skudata:
workflow = {"name": 'Graph.Obm.Ipmi.CreateSettings.RMM' + str(num)}
else:
workflow = {"name": 'Graph.Obm.Ipmi.CreateSettings' + str(num)}
# try workflow
if nodestatus[node]['status'] == "pending":
for dummy in range(0, 60):
# retry if other workflows active
result = rackhdapi("/api/2.0/nodes/" + node + "/workflows", action="post", payload=workflow)
if result['status'] == 201:
nodestatus[node].update({"status": "running", "instanceId": result['json']["instanceId"], "retry": 0})
break
else:
time.sleep(5)
for node in nodelist:
# check OBM workflow status
if nodestatus[node]['status'] == "running":
nodestatus[node]['retry'] += 1
state_data = rackhdapi("/api/2.0/workflows/" + nodestatus[node]['instanceId'])
if state_data['status'] == 200:
if "_status" in state_data['json']:
state = state_data['json']['_status']
else:
state = state_data['json']['status']
if state == "succeeded":
nodestatus[node]['status'] = "succeeded"
if state in ["failed", "cancelled", "timeout"]:
nodestatus[node]['status'] = "pending"
if VERBOSITY > 4:
print "**** Node(s) OBM status:\n", json.dumps(nodestatus, sort_keys=True, indent=4,)
if "pending" not in str(nodestatus) and "running" not in str(nodestatus):
# All OBM settings successful
return True
time.sleep(10)
# Failures occurred
print "**** Node(s) OBM settings failed."
return False
def apply_obm_settings():
# legacy routine to install OBM credentials via workflows
count = 0
for creds in GLOBAL_CONFIG['credentials']['bmc']:
# greate graph for setting OBM credentials
payload = \
{
"friendlyName": "IPMI" + str(count),
"injectableName": 'Graph.Obm.Ipmi.CreateSettings' + str(count),
"options": {
"obm-ipmi-task":{
"user": creds["username"],
"password": creds["password"]
}
},
"tasks": [
{
"label": "obm-ipmi-task",
"taskName": "Task.Obm.Ipmi.CreateSettings"
}
]
}
api_data = rackhdapi("/api/2.0/workflows/graphs", action="put", payload=payload)
if api_data['status'] != 201:
print "**** OBM workflow failed to load!"
return False
count += 1
# Setup additional OBM settings for nodes that currently use RMM port (still same bmc username/password used)
count = 0
for creds in GLOBAL_CONFIG['credentials']['bmc']:
# greate graph for setting OBM credentials for RMM
payload = \
{
"friendlyName": "RMM.IPMI" + str(count),
"injectableName": 'Graph.Obm.Ipmi.CreateSettings.RMM' + str(count),
"options": {
"obm-ipmi-task":{
"ipmichannel": "3",
"user": creds["username"],
"password": creds["password"]
}
},
"tasks": [
{
"label": "obm-ipmi-task",
"taskName": "Task.Obm.Ipmi.CreateSettings"
}
]
}
api_data = rackhdapi("/api/2.0/workflows/graphs", action="put", payload=payload)
if api_data['status'] != 201:
print "**** OBM workflow failed to load!"
return False
count += 1
# run each OBM workflow against each node until success
nodelist = node_select()
failedlist = []
for node in nodelist:
for num in range(0, count):
nodestatus = ""
skuid = rackhdapi('/api/2.0/nodes/' + node)['json'].get("sku")
skudata = rackhdapi(skuid)['text']
if "rmm.data.MAC" in skudata:
workflow = {"name": 'Graph.Obm.Ipmi.CreateSettings.RMM' + str(num)}
else:
workflow = {"name": 'Graph.Obm.Ipmi.CreateSettings' + str(num)}
# wait for existing workflow to complete
for dummy in range(0, 60):
result = rackhdapi("/api/2.0/nodes/" + node + "/workflows", action="post", payload=workflow)
if result['status'] != 201:
time.sleep(5)
else:
break
# wait for OBM workflow to complete
counter = 0
for counter in range(0, 60):
time.sleep(10)
state_data = rackhdapi("/api/2.0/workflows/" + result['json']["instanceId"])
if state_data['status'] == 200:
if "_status" in state_data['json']:
nodestatus = state_data['json']['_status']
else:
nodestatus = state_data['json']['status']
if nodestatus != "running" and nodestatus != "pending":
break
if nodestatus == "succeeded":
break
if counter == 60:
failedlist.append(node)
if len(failedlist) > 0:
print "**** Nodes failed OBM settings:", failedlist
return False
return True
def run_nose(nosepath):
# this routine runs nosetests from wrapper using path spec 'nosepath'
def _noserunner(pathspec):
xmlfile = str(time.time()) + ".xml" # XML report file name
return subprocess.call(
[
'export VERBOSITY=' + str(ARGS_LIST['v']) + ';' +
'export ORA=' + str(ARGS_LIST['ora']) + ';' +
'export STACK=' + str(ARGS_LIST['stack']) + ';' +
'export SKU="' + str(ARGS_LIST['sku']) + '";' +
'export NODEID=' + str(ARGS_LIST['nodeid']) + ';' +
'export OBMMAC=' + str(ARGS_LIST['obmmac']) + ';' +
'export VERSION=' + str(ARGS_LIST['version']) + ';' +
'export TEMPLATE=' + str(ARGS_LIST['template']) + ';' +
'export XUNIT=' + str(ARGS_LIST['xunit']) + ';' +
'export GROUP=' + str(ARGS_LIST['group']) + ';' +
'export CONFIG=' + str(ARGS_LIST['config']) + ';' +
'export HTTP=' + str(ARGS_LIST['http']) + ';' +
'export HTTPS=' + str(ARGS_LIST['https']) + ';' +
'export PORT=' + str(ARGS_LIST['port']) + ';' +
'nosetests ' + noseopts + ' --xunit-file ' + xmlfile + ' ' + pathspec
], shell=True)
exitcode = 0
# set nose options
noseopts = ' --exe '
if ARGS_LIST['group'] != 'all' and ARGS_LIST['group'] != '':
noseopts += ' -a ' + str(ARGS_LIST['group']) + ' '
if ARGS_LIST['list'] == True or ARGS_LIST['list'] == "True":
noseopts += ' -v --collect-only '
ARGS_LIST['v'] = 0
print "\nTest Listing for:", ARGS_LIST['test']
print "----------------------------------------------------------------------"
if ARGS_LIST['xunit'] == True or ARGS_LIST['xunit'] == "True":
noseopts += ' --with-xunit '
else:
noseopts += ' -s '
# if nosepath is a directory, recurse through subdirs else run single test file
if os.path.isdir(nosepath):
cmdline = ""
for subdir, dirs, files in os.walk(nosepath):
cmdline += " " + subdir
exitcode += _noserunner(cmdline)
else:
exitcode += _noserunner(nosepath)
return exitcode
| 1.679688 | 2 |
location/Peru/INEI/clean.py | vishalbelsare/classifications | 16 | 12762767 | # -*- coding: utf8 -*-
import pandas as pd
from fix_spanish_title_case import fix_spanish_title_case
from classification import (
Hierarchy,
parent_code_table_to_parent_id_table,
Classification,
)
if __name__ == "__main__":
df = pd.read_csv(
"in/ubigeo-data-titlecased.csv", encoding="utf-8", dtype={"inei": str}
)
df.columns = ["reniec_code", "code", "name", "complete_name"]
df = df[["code", "name"]]
df = df[~df.code.isnull()]
df.name = df.name.map(fix_spanish_title_case, na_action="ignore")
df["name_es"] = df.name
df["name_en"] = df.name
df["name_short_es"] = df.name
df["name_short_en"] = df.name
# This adds a highest level element that represents the whole country
peru = pd.Series(
{
"code": "000000",
"name": u"Peru",
"name_es": u"Perú",
"name_short_es": u"Perú",
"name_en": u"Peru",
"name_short_en": u"Peru",
}
)
df = pd.concat([pd.DataFrame(peru).T, df])
def fix_levels(row):
if row.code == "000000":
row["level"] = "country"
row["parent_code"] = pd.np.nan
elif row.code.endswith("0000"):
row["level"] = "department"
row["parent_code"] = "000000"
elif row.code.endswith("00"):
row["level"] = "province"
row["parent_code"] = row["code"][:2] + "0000"
else:
row["level"] = "district"
row["parent_code"] = row["code"][:4] + "00"
return row
df = df.apply(fix_levels, axis=1)
h = Hierarchy(["country", "department", "province", "district"])
df.level = df.level.astype("category", categories=h, ordered=True)
df = df.sort_values(by=["level", "code"])
df.level = df.level.astype(str)
df = df.reset_index(drop=True)
parent_id_table = parent_code_table_to_parent_id_table(df, h)
# TODO: This isn't the official classification level name but this makes
# compatibility between colombia and mexico way easier
# parent_code_table.loc[parent_code_table.level == "state", "level"] = "department"
# Drop the "locality" level since we don't use it
# parent_code_table = parent_code_table[parent_code_table.level != "locality"]
parent_id_table = parent_id_table[
[
"code",
"name",
"level",
"name_es",
"name_en",
"name_short_es",
"name_short_en",
"parent_id",
]
]
c = Classification(parent_id_table, h)
c.to_csv("out/locations_peru_inei.csv")
c.to_stata("out/locations_peru_inei.dta")
| 3.15625 | 3 |
N64RET/Loader/SegmentImpl.py | N64RET/decomp-framework | 1 | 12762768 | from N64RET.Loader.Abstract.SegmentInterface import SegmentInterface
class Segment(SegmentInterface):
def processCallback(self):
return True
| 1.710938 | 2 |
EFrame.py | ionicsolutions/eframe | 0 | 12762769 | <gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
**EFrame** is the main experimental control GUI for the IonCavity experiments.
*EFrame* was designed and written by <NAME> <<EMAIL>>
for the UVC experiment starting in 2014. Since the summer of 2016,
<NAME> <<EMAIL>> is the lead developer.
**List of contributors:**
* <NAME> (TB)
* <NAME> (AB)
* <NAME> (KK)
* <NAME> (MZ)
* <NAME> (PK)
* You?
"""
# NOTE: With dc513a29d2be25f966a4ef1036159a7ae73da7a6, core.state.State
# which was previously defined here was moved to its own file.
# In the process, core.config.XMLConfig, core.resourceManager.Resources and
# core.storage.Storage were separated from core.state.State and also moved
# to their own files.
import argparse
import logging.handlers
import os
import subprocess
from config.kafka import setup
from core.mainWindow import MainWindow
from core.exceptions import InitErrorException
from lib.kafkaLogging import KafkaLoggingHandler
if __name__ == "__main__":
# DEFAULTS
expFile = 'config/IRC_Experiment.conf' # which file to load
logLevel = logging.INFO # general log level
chLevel = logging.INFO # output to sys.stdout/sys.stderr
fhLevel = logging.WARNING # output to log files
thLevel = logging.WARNING # output to "Output" tab in EFrame GUI
# HANDLE ARGUMENTS
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action="store_true", dest="debug",
help="show log messages of level DEBUG")
parser.add_argument("-q", "--quiet", action="store_true", dest="quiet",
help="only show log messages of level WARNING in "
"console and of level ERROR in GUI")
parser.add_argument("-l", "--loglevel", type=str, dest="loglevel",
help="file loglevel, e.g. DEBUG or INFO")
parser.add_argument("-t", "--thirdparty", action="store_true",
dest="thirdparty",
help="show all third-party module log messages")
parser.add_argument(nargs=1, action="store", dest="file",
help="experiment file")
parser.add_argument("-k", "--no-kafka", action="store_true",
dest="nokafka",
help="disable Kafka log handler")
args = parser.parse_args()
# LOG CONFIGURATION
if args.debug:
logLevel = logging.DEBUG
chLevel = logging.DEBUG
if args.quiet:
chLevel = logging.WARNING
shLevel = logging.ERROR
if args.loglevel:
requested = args.loglevel.upper()
if requested == "DEBUG":
fhLevel = logging.DEBUG
elif requested == "INFO":
fhLevel = logging.INFO
elif requested == "WARNING":
fhLevel = logging.WARNING
# THIRD-PARTY LOGGER CONFIGURATION
# Hide INFO level logging from the requests and urllib3 package,
# otherwise we are flooded with HTTP connection messages
if not args.thirdparty:
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# VERIFY EXPERIMENT FILE
if os.path.isfile(args.file[0]):
expFile = args.file[0]
else:
raise InitErrorException("'%s' is not a file.",
args.file[0])
# STARTUP DISPLAY
# get git revision hash to log and display
gitRevision = subprocess.check_output(
['git', 'rev-parse', 'HEAD']).strip()
gitBranch = subprocess.check_output(
['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
print("")
print("EEEEEE FFFFFF RRRRR AAA MM MM EEEEEE")
print("EE FF RR RR AA AA MMMMMMM EE ")
print("EEEEEE FFFFF RRRRR AA AA MM M MM EEEEEE")
print("EE FF RR RR AAAAAAA MM MM EE ")
print("EEEEEE FF RR RR AA AA MM MM EEEEEE")
print("")
print("Providing Ionic Solutions Since 2014.")
print("")
print("Git Branch: %s" % gitBranch)
print("Git Revision: %s" % gitRevision)
print("")
print("Licensed under the Apache License, Version 2.0")
print("(c) 2014-2017 <NAME>, <NAME>, et al.")
print("")
# LOGGING CONFIGURATION
# initialize root logger
logger = logging.getLogger()
logger.setLevel(logLevel)
# common format for file and console logging
# (format for QTextEdit widget is defined in QTextEditHandler)
fmt = "%(asctime)s: %(levelname)s: %(name)s: %(message)s"
datefmt = "%Y/%m/%d - %H:%M:%S"
# log to file 'EFrame.log', each of max. 100 KB length, keep 5 backups
if not os.path.exists("log"):
os.mkdir("log")
fh = logging.handlers.RotatingFileHandler("log/EFrame.log", maxBytes=100000,
backupCount=5)
fh.setFormatter(logging.Formatter(fmt=fmt, datefmt=datefmt))
logger.addHandler(fh)
fh.setLevel(logging.INFO) # Ensure that we log the git revision
logger.info("Starting EFrame git revision %s (branch: %s).",
gitRevision, gitBranch)
logger.info("Configuration: %s", expFile)
fh.setLevel(fhLevel)
# when debugging, append additional information to each entry
if logLevel == logging.DEBUG:
fmt += " (@%(created)f in %(filename)s l. %(lineno)d)"
# log to sys.stdout/sys.stderr
ch = logging.StreamHandler()
ch.setLevel(chLevel)
ch.setFormatter(logging.Formatter(fmt=fmt, datefmt=datefmt))
logger.addHandler(ch)
# log to Kafka
if not args.nokafka:
kfmt = "EFrame %s: " % gitBranch
kfmt += fmt
kh = KafkaLoggingHandler(setup["servers"], setup["topic"])
kh.setFormatter(logging.Formatter(fmt=kfmt, datefmt=datefmt))
kh.setLevel(logging.WARNING)
logger.addHandler(kh)
# The log to the output tab in EFrame requires that we have
# an existing QTextEdit widget available. We therefore wait
# for EFrame's GUI to be initialized. No messages will be lost.
# START MAIN WINDOW
# if we are running on Windows, change the appUserModelID so the taskbar
# does not group us with other Python programs
if os.name == "nt":
import ctypes
myappid = "AQO.EFrame" # arbitrary string
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
logger.info("Starting EFrame main window")
mw = MainWindow(rootLogger=logger, thLevel=thLevel, expFile=expFile)
| 1.640625 | 2 |
glitchart/glitchart.py | delivrance/glitchart | 29 | 12762770 | # MIT License
#
# Copyright (c) 2019 <NAME> <https://github.com/delivrance>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import logging
import os
import subprocess
from pathlib import Path
from random import Random
from uuid import uuid4
from PIL import Image
MIN_AMOUNT_IMAGE = 1
MAX_AMOUNT_IMAGE = 10
MIN_AMOUNT_VIDEO = 0
MAX_AMOUNT_VIDEO = 3
MIN_SEED = -2 ** 63
MAX_SEED = 2 ** 63 - 1
SOS = b"\xFF\xDA" # Start Of Scan
EOI = b"\xFF\xD9" # End Of Image
OUT_NAME_TEMPLATE = "{}_glitch.{}"
log = logging.getLogger(__name__)
def jpeg(photo: str,
seed: int = None,
min_amount: int = MIN_AMOUNT_IMAGE,
max_amount: int = MAX_AMOUNT_IMAGE,
inplace: bool = False) -> str:
"""Glitch a JPEG file. A new image will be saved in the current working directory with the string
"_glitch" appended to the filename. E.g.: "monalisa.jpg" becomes "monalisa_glitch.jpg".
Args:
photo (str):
JPEG photo file to glitch.
Pass a file path as string to glitch a photo that exists on your local machine.
seed (int, optional):
Pseudo-random number generator seed.
Using again the same seed on the original file will result in identical glitched images.
Defaults to a random value.
min_amount (int, optional):
Minimum amount of bytes to corrupt.
A negative value will result in min_amount = 0.
A value higher than max_amount will result in max_amount = min_amount.
The actual amount will be chosen randomly in range [min_amount, max_amount].
Defaults to 1.
max_amount (int, optional):
Maximum amount of bytes to corrupt.
A negative value will result in max_amount = 1.
A value lower than min_amount will result in max_amount = min_amount.
The actual amount will be chosen randomly in range [min_amount, max_amount].
Defaults to 10.
inplace (bool, optional):
Pass True to glitch the image in-place and avoid creating a new JPEG file.
This will overwrite the original image.
Defaults to False.
Returns:
On success, the absolute path of the glitched image is returned.
"""
out = photo if inplace else OUT_NAME_TEMPLATE.format(Path(photo).stem, "jpg")
prng = Random(seed)
if min_amount < 0:
min_amount = 0
if max_amount < 0:
max_amount = 1
if min_amount > max_amount:
max_amount = min_amount
amount = prng.randint(min_amount, max_amount)
with open(photo, "rb") as f:
original = f.read()
start = original.index(SOS) + len(SOS) + 10
end = original.rindex(EOI)
data = bytearray(original[start:end])
glitched = set()
for _ in range(amount):
while True:
index = prng.randrange(len(data))
if index not in glitched:
if data[index] not in [0, 255]:
glitched.add(index)
break
while True:
value = prng.randint(1, 254)
if data[index] != value:
data[index] = value
break
with open(out, "wb") as f:
f.write(
original[:start]
+ data
+ original[end:]
)
return Path(out).absolute()
async def jpeg_async(*args, **kwargs):
return jpeg(*args, **kwargs)
def png(photo: str,
seed: int = None,
min_amount: int = MIN_AMOUNT_IMAGE,
max_amount: int = MAX_AMOUNT_IMAGE,
inplace: bool = False):
out = photo if inplace else OUT_NAME_TEMPLATE.format(Path(photo).stem, "png")
jpg_path = "{}.jpg".format(uuid4())
png = Image.open(photo).convert("RGBA")
bg = Image.new("RGB", png.size, (255, 255, 255))
bg.paste(png, png)
bg.save(jpg_path)
jpeg(jpg_path, seed, min_amount, max_amount, True)
Image.open(jpg_path).convert("RGBA").save(out)
os.remove(jpg_path)
return Path(out).absolute()
async def png_async(*args, **kwargs):
return png(*args, **kwargs)
def webp(photo: str,
seed: int = None,
min_amount: int = MIN_AMOUNT_IMAGE,
max_amount: int = MAX_AMOUNT_IMAGE,
inplace: bool = False):
out = photo if inplace else OUT_NAME_TEMPLATE.format(Path(photo).stem, "webp")
png_path = "{}.png".format(uuid4())
webp = Image.open(photo)
webp.save(png_path)
png(png_path, seed, min_amount, max_amount, True)
Image.open(png_path).save(out)
os.remove(png_path)
return Path(out).absolute()
async def webp_async(*args, **kwargs):
return webp(*args, **kwargs)
def mp4(video: str,
seed: int = None,
min_amount: int = MIN_AMOUNT_VIDEO,
max_amount: int = MAX_AMOUNT_VIDEO,
inplace: bool = False):
out = video if inplace else OUT_NAME_TEMPLATE.format(Path(video).stem, "mp4")
uuid = uuid4()
try:
fps = subprocess.check_output(
"ffprobe -v error -select_streams v -of "
"default=noprint_wrappers=1:nokey=1 -show_entries stream=r_frame_rate {video}".format(
video=video
),
shell=True
).strip().decode()
os.system(
"ffmpeg -loglevel quiet -i {video} {uuid}_%8d.jpg".format(
video=video,
uuid=uuid
)
)
prng = Random(seed)
for p in sorted(Path().rglob(f"{uuid}_*.jpg")):
jpeg(str(p), prng.getrandbits(2500), min_amount, max_amount, inplace=True)
os.system(
"ffmpeg -loglevel quiet -r {fps} -i {uuid}_%8d.jpg {out} -y".format(
fps=fps,
uuid=uuid,
out=out
)
)
except Exception as e:
log.error(e)
finally:
for p in Path().rglob(f"{uuid}_*.jpg"):
try:
os.remove(str(p))
except OSError:
pass
return Path(out).absolute()
async def mp4_async(video: str,
seed: int = None,
min_amount: int = MIN_AMOUNT_VIDEO,
max_amount: int = MAX_AMOUNT_VIDEO,
inplace: bool = False):
out = video if inplace else OUT_NAME_TEMPLATE.format(Path(video).stem, "mp4")
uuid = uuid4()
try:
fps = subprocess.check_output(
"ffprobe -v error -select_streams v -of "
"default=noprint_wrappers=1:nokey=1 -show_entries stream=r_frame_rate {video}".format(
video=video
),
shell=True
).strip().decode()
process = await asyncio.create_subprocess_shell(
"ffmpeg -loglevel quiet -i {video} {uuid}_%8d.jpg".format(
video=video,
uuid=uuid
)
)
await process.wait()
prng = Random(seed)
for p in sorted(Path().rglob(f"{uuid}_*.jpg")):
jpeg(str(p), prng.randint(MIN_SEED, MAX_SEED), min_amount, max_amount, inplace=True)
process = await asyncio.create_subprocess_shell(
"ffmpeg -loglevel quiet -r {fps} -i {uuid}_%8d.jpg {out} -y".format(
fps=fps,
uuid=uuid,
out=out
)
)
await process.wait()
except Exception as e:
log.error(e)
finally:
for p in Path().rglob("{uuid}_*.jpg".format(uuid=uuid)):
try:
os.remove(str(p))
except OSError:
pass
return Path(out).absolute()
| 2.015625 | 2 |
learn.py | mnabila/learningMininet | 0 | 12762771 | from mininet.topo import Topo
from mininet.net import OVSKernelSwitch
class GedungSatu(Topo):
def __init__(self, **opts):
Topo.__init__(self, **opts)
# tambah client sebanyak 15 komputer
h1 = self.addHost(name="h1", mac="00:00:00:00:0h:01", ip="192.168.100.10/27")
h2 = self.addHost(name="h2", mac="00:00:00:00:0h:02", ip="192.168.100.11/27")
h3 = self.addHost(name="h3", mac="00:00:00:00:0h:03", ip="192.168.100.12/27")
h4 = self.addHost(name="h4", mac="00:00:00:00:0h:04", ip="192.168.100.13/27")
h5 = self.addHost(name="h5", mac="00:00:00:00:0h:05", ip="192.168.100.14/27")
h6 = self.addHost(name="h6", mac="00:00:00:00:0h:06", ip="192.168.100.15/27")
h7 = self.addHost(name="h7", mac="00:00:00:00:0h:07", ip="192.168.100.16/27")
h8 = self.addHost(name="h8", mac="00:00:00:00:0h:08", ip="192.168.100.17/27")
h9 = self.addHost(name="h9", mac="00:00:00:00:0h:09", ip="192.168.100.18/27")
h10 = self.addHost(name="h10", mac="00:00:00:00:0h:10", ip="192.168.100.19/27")
h11 = self.addHost(name="h11", mac="00:00:00:00:0h:11", ip="192.168.100.20/27")
h12 = self.addHost(name="h12", mac="00:00:00:00:0h:12", ip="192.168.100.21/27")
h13 = self.addHost(name="h13", mac="00:00:00:00:0h:13", ip="192.168.100.22/27")
h14 = self.addHost(name="h14", mac="00:00:00:00:0h:14", ip="192.168.100.23/27")
h15 = self.addHost(name="h15", mac="00:00:00:00:0h:15", ip="192.168.100.24/27")
# tambah switch sebanyak 7 buah
s1 = self.addSwitch(name="s1", cls=OVSKernelSwitch, mac="00:00:00:00:0s:01",)
s2 = self.addSwitch(name="s2", cls=OVSKernelSwitch, mac="00:00:00:00:0s:02",)
s3 = self.addSwitch(name="s3", cls=OVSKernelSwitch, mac="00:00:00:00:0s:03",)
s4 = self.addSwitch(name="s4", cls=OVSKernelSwitch, mac="00:00:00:00:0s:04",)
s5 = self.addSwitch(name="s5", cls=OVSKernelSwitch, mac="00:00:00:00:0s:05",)
s6 = self.addSwitch(name="s6", cls=OVSKernelSwitch, mac="00:00:00:00:0s:06",)
s7 = self.addSwitch(name="s7", cls=OVSKernelSwitch, mac="00:00:00:00:0s:07",)
# membuat topologi tree
# menghubungkan switch bagian atas
# menghubungkan switch s1,s2,s3 ke switch s1
self.addLink(s2, s1)
self.addLink(s3, s1)
self.addLink(s4, s1)
# menguhubungkan kedua switch untuk jaringan atas dan bawah
self.addLink(s5, s1)
# menghubungkan switch bagian bawah
# menghubungkan switch s6,s7 ke switch s5
self.addLink(s6, s5)
self.addLink(s7, s5)
# menghubungkan client ke setiap switch bagian atas
# menghubungkan client ke switch s2
self.addLink(h1, s2)
self.addLink(h2, s2)
self.addLink(h3, s2)
# menghubungkan client ke switch s3
self.addLink(h4, s3)
self.addLink(h5, s3)
self.addLink(h6, s3)
# menghubungkan client ke switch s4
self.addLink(h7, s4)
self.addLink(h8, s4)
self.addLink(h9, s4)
# menguhubungkan client ke setiap switch bagian bawah
# menghubungkan client ke switch s6
self.addLink(h10, s6)
self.addLink(h11, s6)
self.addLink(h12, s6)
# menghubungkan client ke switch s7
self.addLink(h13, s7)
self.addLink(h14, s7)
self.addLink(h15, s7)
topos = {"g1": (lambda: GedungSatu())}
| 2.25 | 2 |
Support/Fuego/Pythia/pythia-0.5/packages/pyre/pyre/facilities/ScriptBinder.py | balos1/PelePhysics | 31 | 12762772 | <reponame>balos1/PelePhysics
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <NAME>
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from __future__ import absolute_import
from .Binder import Binder
class ScriptBinder(Binder):
def bind(self, facility, value):
script = file(self._resolve(value))
context = {}
exec(script, context)
component = eval("%s()" % facility, context)
return component
def _resolve(self, name):
import os
base, ext = os.path.splitext(name)
if not ext:
name += ".py"
return name
# version
__id__ = "$Id$"
# End of file
| 2.078125 | 2 |
brainda/algorithms/deep_learning/__init__.py | Mrswolf/brainda | 24 | 12762773 | <filename>brainda/algorithms/deep_learning/__init__.py
from .base import *
from .eegnet import EEGNet
from .shallownet import ShallowNet | 1.1875 | 1 |
blowdrycss/unit_tests/test_classPropertyParser.py | acnagy/test-blowdrycss | 0 | 12762774 | <filename>blowdrycss/unit_tests/test_classPropertyParser.py
# python 2
from __future__ import absolute_import
# builtin
from unittest import TestCase, main
# custom
from blowdrycss.classpropertyparser import ClassPropertyParser
class TestClassPropertyParser(TestCase):
def test_class_set_to_lowercase(self):
original_class_set = {'ThE', 'the', 'THE', 't2HE'}
expected_class_set = {'the', 'the', 'the', 't2he'}
class_parser = ClassPropertyParser(class_set=original_class_set)
class_parser.class_set_to_lowercase()
self.assertEqual(class_parser.class_set, expected_class_set)
def test_underscores_valid_is_true(self):
valid_classes = {'6_3', 'padding-5_2rem', 'height-24_48p', '1_2-5_75-1_2-5_75', 'n5_25cm', }
class_parser = ClassPropertyParser(class_set=valid_classes)
for css_class in class_parser.class_set:
self.assertTrue(class_parser.underscores_valid(css_class=css_class), msg=css_class)
def test_underscores_valid_is_false(self):
invalid_classes = {
'_bold', 'lighter-1_', 'width-_2', 'margin-2_rem', 'height-m_px', 'bg-color__blue',
'-_2', '2_rem', 'm_px', '__', '_35', '42_', '-7_2', '5_4-', ' _ ', ' _3_2', '8_9_ ', '6_4 _ ',
}
class_parser = ClassPropertyParser(class_set=set())
for css_class in invalid_classes:
self.assertFalse(class_parser.underscores_valid(css_class=css_class), msg=css_class)
def test_clean_class_set(self):
valid_classes = {
'color-hfff', 'font-color-hsla-120-60p-70p-0_3', 'padding-5_2rem', 'height-24_48p',
'padding-7_3-8_5-9_7-10_2', 'hfff-hover-i', 'hfff-i-hover', 'color-hfff-hover-i',
}
# Covers all invalid cases: first char, allowed chars, last char, and underscores.
invalid_classes = {
'', ' ', '*b', 'bg-color__blue', 'height-m_px', 'lighter-1$', 'margin-2_rem',
'padding-@1px-2px-1px-2px', 'width-_2', 'bold-', 'green_', 'font-color-#000',
}
expected_removed = {
' (May not be None or "".)',
' (Only a-z allowed for first character of class.)',
'*b (Only a-z allowed for first character of class.)',
'bg-color__blue (Invalid underscore usage in class.)',
'height-m_px (Invalid underscore usage in class.)',
'lighter-1$ (Only a-z, 0-9, "_", and "-" are allowed in class name.)',
'margin-2_rem (Invalid underscore usage in class.)',
'padding-@1px-2px-1px-2px (Only a-z, 0-9, "_", and "-" are allowed in class name.)',
'width-_2 (Invalid underscore usage in class.)',
'bold- (Only a-z and 0-9 allowed for last character of class.)',
'green_ (Only a-z and 0-9 allowed for last character of class.)',
'font-color-#000 (Only a-z, 0-9, "_", and "-" are allowed in class name.)',
}
class_parser = ClassPropertyParser(class_set=set()) # Prevents the implicit call in __init__()
class_parser.class_set = valid_classes.union(invalid_classes) # Mix valid and invalid classes
class_parser.clean_class_set()
self.assertEqual(class_parser.class_set, valid_classes) # Only valid classes should remain.
self.assertTrue(class_parser.removed_class_set == expected_removed, msg=expected_removed)
def test_get_property_name_by_identical_name_valid(self):
valid_identical_set = {'font-weight-bold', 'font-weight-700'}
expected_property_name = 'font-weight'
class_parser = ClassPropertyParser(class_set=valid_identical_set)
class_list = list(class_parser.class_set)
for i, css_class in enumerate(class_list):
property_name = class_parser.get_property_name(css_class=css_class)
self.assertEqual(property_name, expected_property_name)
def test_get_property_name_by_identical_name_invalid(self):
invalid_identical_set = [
'font-weight', 'font-weight-', 'afont-weight-', '-font-weight', 'font%weight', 'fw-', '700'
]
expected_property_name = ''
expected_empty_set = set()
class_parser = ClassPropertyParser(class_set=set())
for css_class in invalid_identical_set:
property_name = class_parser.get_property_name(css_class=css_class)
self.assertEqual(property_name, expected_property_name)
self.assertEqual(class_parser.class_set, expected_empty_set, msg=class_parser.class_set)
def test_get_property_name_by_alias(self):
class_alias_set = {'bold', 'bolder', 'lighter', 'fweight-200', 'f-weight-100', 'fw-bold', 'font-w-900', }
expected_property_name = 'font-weight'
class_parser = ClassPropertyParser(class_set=set())
class_list = list(class_alias_set)
for css_class in class_list:
property_name = class_parser.get_property_name(css_class=css_class)
self.assertEqual(property_name, expected_property_name, msg=css_class)
def test_get_property_name_by_regex(self):
class_alias_set = {'h0e2', 'h2ad', 'h987fcb', 'h15af36', }
expected_property_name = 'color'
class_parser = ClassPropertyParser(class_set=set())
class_list = list(class_alias_set)
for css_class in class_list:
property_name = class_parser.get_property_name(css_class=css_class)
self.assertEqual(property_name, expected_property_name, msg=css_class)
def test_get_property_name_non_matching(self):
non_matching = ['not-a-property-', 'a-font-not-']
expected_property_name = ''
expected_empty_set = set()
class_parser = ClassPropertyParser(class_set=set())
for css_class in non_matching:
property_name = class_parser.get_property_name(css_class=css_class)
self.assertEqual(property_name, expected_property_name)
self.assertEqual(class_parser.class_set, expected_empty_set)
def test_is_valid_pseudo_format_True(self):
valid_inputs = (
'color-blue-hover', 'padding-10rem-i-active', 'bgc-h048-visited',
'color-red-after', 'padding-20rem-i-before', 'bgc-h096-selection',
'hfff-hover-i', 'hfff-i-hover', 'color-hfff-hover-i', 'color-hfff-i-hover',
)
pseudo_items = (
'hover', 'active', 'visited', 'after', 'before', 'selection',
'hover', 'hover', 'hover', 'hover',
)
class_parser = ClassPropertyParser(class_set=set())
for i, valid_input in enumerate(valid_inputs):
self.assertTrue(
class_parser.is_valid_pseudo_format(pseudo_items[i], valid_input),
msg=valid_input
)
def test_is_valid_pseudo_format_False(self):
invalid_inputs = (
'-hover-blue', 'pa-active-10rem-i', 'bgc-', 'margin-10-medium-up',
'-after-blue', 'pa-before-10rem-i', 'bgc-', 'width-10-small-up'
)
pseudo_items = ('hover', 'active', 'invalid', 'invalid', 'after', 'before', 'invalid', 'invalid')
class_parser = ClassPropertyParser(class_set=set())
for i, invalid_input in enumerate(invalid_inputs):
self.assertFalse(
class_parser.is_valid_pseudo_format(pseudo_items[i], css_class=invalid_input),
msg=invalid_input
)
def test_get_pseudo_class(self):
valid_inputs = ('color-blue-hover', 'padding-10rem-i-active', 'bgc-h048-visited')
expected_classes = ('hover', 'active', 'visited', )
class_parser = ClassPropertyParser(class_set=set())
for i, valid_input in enumerate(valid_inputs):
class_parser.set_pseudo_class(css_class=valid_input)
self.assertEqual(class_parser.pseudo_class, expected_classes[i])
def test_get_pseudo_class_ValueError(self):
class_parser = ClassPropertyParser(class_set=set())
self.assertRaises(ValueError, class_parser.set_pseudo_class, '')
def test_get_pseudo_element(self):
valid_inputs = ('color-blue-after', 'padding-10rem-i-before', 'bgc-h048-selection', 'color-hfff-before')
expected_elements = ('after', 'before', 'selection', 'before')
class_parser = ClassPropertyParser(class_set=set())
for i, valid_input in enumerate(valid_inputs):
class_parser.set_pseudo_element(css_class=valid_input)
self.assertEqual(class_parser.pseudo_element, expected_elements[i])
def test_get_pseudo_element_ValueError(self):
class_parser = ClassPropertyParser(class_set=set())
self.assertRaises(ValueError, class_parser.set_pseudo_element, '')
def test_strip_property_name_matching(self):
property_name = 'font-weight'
encoded_property_value = 'font-weight-400'
expected_encoded_property_value = '400'
class_parser = ClassPropertyParser(class_set=set())
encoded_property_value = class_parser.strip_property_name(
property_name=property_name,
css_class=encoded_property_value
)
self.assertEqual(encoded_property_value, expected_encoded_property_value)
def test_strip_property_name_not_matching(self):
property_name = 'font-weight'
encoded_property_value = 'bold'
css_class = 'bold'
class_parser = ClassPropertyParser(class_set=set())
encoded_property_value = class_parser.strip_property_name(
property_name=property_name,
css_class=encoded_property_value
)
self.assertEqual(encoded_property_value, css_class)
def test_strip_property_name_empty(self):
empty_property_name = ''
css_class = 'bold'
class_parser = ClassPropertyParser(class_set=set())
self.assertRaises(ValueError, class_parser.strip_property_name, empty_property_name, css_class)
def test_strip_pseudo_item(self):
pseudo_items = ('hover', 'before', 'selection', )
css_classes = ('padding-10-i-hover', 'color-hfff-before', 'width-1rem-s-selection', )
expected = ('padding-10-i', 'color-hfff', 'width-1rem-s', )
class_parser = ClassPropertyParser(class_set=set())
for i, pseudo_item in enumerate(pseudo_items):
actual = class_parser.strip_pseudo_item(css_class=css_classes[i])
self.assertEqual(expected[i], actual)
def test_strip_pseudo_item_not_matching(self):
css_class = 'padding-10-i'
class_parser = ClassPropertyParser(class_set=set())
result = class_parser.strip_pseudo_item(css_class=css_class)
self.assertEqual(result, css_class) # css_class should remain unchanged.
def test_strip_pseudo_item_empty(self):
empty_class = ''
class_parser = ClassPropertyParser(class_set=set())
self.assertRaises(ValueError, class_parser.strip_pseudo_item, empty_class)
def test_strip_encoded_property_name_valueerror(self):
invalid_inputs = ['', ' ']
class_parser = ClassPropertyParser(class_set=set())
for invalid in invalid_inputs:
self.assertRaises(ValueError, class_parser.strip_property_name, invalid, 'c-lime')
self.assertRaises(ValueError, class_parser.strip_property_name, 'color', invalid)
def test_alias_is_abbreviation(self):
expected_true = ['fw-', 'p-', 'h-', 'w-']
expected_false = ['fw', 'p', 'height', 'width']
class_parser = ClassPropertyParser(class_set=set())
for _true in expected_true:
self.assertTrue(class_parser.alias_is_abbreviation(_true), msg=_true)
for _false in expected_false:
self.assertFalse(class_parser.alias_is_abbreviation(_false), msg=_false)
def test_get_property_abbreviations(self):
expected_abbreviations = ['fweight-', 'f-weight-', 'fw-', 'font-w-']
property_name = 'font-weight'
class_parser = ClassPropertyParser(class_set=set())
abbreviations = class_parser.get_property_abbreviations(property_name=property_name)
self.assertEqual(set(abbreviations), set(expected_abbreviations))
def test_get_property_abbreviations_raises_key_error(self):
invalid_property_name = 'invalid'
class_parser = ClassPropertyParser(class_set=set())
self.assertRaises(KeyError, class_parser.get_property_abbreviations, invalid_property_name)
def test_strip_property_abbreviation_matching(self):
property_name = 'font-weight'
css_class = 'fw-400'
expected_encoded_property_value = '400'
class_parser = ClassPropertyParser(class_set=set())
css_class = class_parser.strip_property_abbreviation(
property_name=property_name,
css_class=css_class
)
self.assertEqual(css_class, expected_encoded_property_value)
def test_strip_property_abbreviation_not_matching(self):
property_name = 'font-weight'
css_class = 'bold'
expected_encoded_property_value = 'bold'
class_parser = ClassPropertyParser(class_set=set())
css_class = class_parser.strip_property_abbreviation(
property_name=property_name,
css_class=css_class
)
self.assertEqual(css_class, expected_encoded_property_value)
def test_strip_property_abbreviation_raises_value_error(self):
invalid_inputs = ['', ' ']
class_parser = ClassPropertyParser(class_set=set())
for invalid in invalid_inputs:
self.assertRaises(ValueError, class_parser.strip_property_abbreviation, invalid, 'c-lime')
self.assertRaises(ValueError, class_parser.strip_property_abbreviation, 'color', invalid)
def test_get_encoded_property_value(self):
# 'fw-bold-i' --> 'bold' [abbreviated font-weight property_name]
# 'padding-1-10-10-5-i' --> '1-10-10-5' [standard property_name]
# 'height-7_25rem-i' --> '7_25rem' [contains underscores]
property_names = [
'font-weight', 'padding', 'height', 'width', 'background-color',
'color', 'color', 'color', 'color',
]
css_classes = [
'fw-bold-i', 'padding-1-10-10-5-i', 'height-7_25rem-i', 'width-50cm-s-i', 'bgc-red-i-hover',
'hfff-hover-i', 'hfff-i-hover', 'color-hfff-hover-i', 'color-hfff-i-hover',
]
expected_encoded_property_values = [
'bold', '1-10-10-5', '7_25rem', '50cm-s', 'red',
'hfff', 'hfff', 'hfff', 'hfff',
]
class_parser = ClassPropertyParser(class_set=set())
for i, css_class in enumerate(css_classes):
encoded_property_value = class_parser.get_encoded_property_value(
property_name=property_names[i],
css_class=css_class
)
self.assertEqual(encoded_property_value, expected_encoded_property_values[i], msg=encoded_property_value)
def test_get_encoded_property_value_invalid_raise_value_error(self):
invalid_inputs = ['', ' ']
class_parser = ClassPropertyParser(class_set=set())
for invalid in invalid_inputs:
self.assertRaises(ValueError, class_parser.get_encoded_property_value, invalid, 'c-lime')
self.assertRaises(ValueError, class_parser.get_encoded_property_value, 'color', invalid)
def test_get_property_value_valid_patterns(self):
property_name = 'color'
encoded_property_values = (
'green', 'h0ff48f', 'hfff', 'rgba-255-0-0-0_5', 'hsla-120-60p-70p-0_3', 'blue', 'hf8f8f8',
)
expected_property_values = (
'green', '#0ff48f', '#fff', 'rgba(255, 0, 0, 0.5)', 'hsla(120, 60%, 70%, 0.3)', 'blue', '#f8f8f8',
)
for i, value in enumerate(encoded_property_values):
css_class = property_name + '-' + value
class_parser = ClassPropertyParser(class_set={css_class})
property_value = class_parser.get_property_value(property_name=property_name, encoded_property_value=value)
self.assertEqual(property_value, expected_property_values[i])
self.assertEqual(class_parser.class_set, {css_class})
# Invalid CSS patterns that can be returned by this method.
def test_get_property_value_invalid_patterns(self):
property_name = 'color'
encoded_property_values = ['bold-50', '5u5', 'b1-a5-c1p-e5', '5pxrem', '1ap-10xp-3qp-1mp3', 'p12px']
expected_values = ['bold 50', '5u5', 'b1 a5 c1% e5', '5pxrem', '1a% 10x% 3q% 1mp3', 'p12px']
for i, value in enumerate(encoded_property_values):
css_class = property_name + '-' + value
class_parser = ClassPropertyParser(class_set={css_class})
property_value = class_parser.get_property_value(property_name=property_name, encoded_property_value=value)
self.assertEqual(property_value, expected_values[i])
def test_get_property_value_invalid_raise_value_error(self):
invalid_inputs = ['', ' ']
class_parser = ClassPropertyParser(class_set=set())
for invalid in invalid_inputs:
self.assertRaises(ValueError, class_parser.get_property_value, invalid, 'c-lime')
self.assertRaises(ValueError, class_parser.get_property_value, 'color', invalid)
def test_is_important_True(self):
expected_true = ('p-10-i', 'c-green-i-hover', 'hfff-hover-i', 'hfff-i-hover', 'color-hfff-hover-i',)
class_parser = ClassPropertyParser(class_set=set())
for valid in expected_true:
self.assertTrue(class_parser.is_important(css_class=valid), msg=valid)
def test_is_important_False(self):
expected_false = 'height-50'
class_parser = ClassPropertyParser(class_set=set())
self.assertFalse(class_parser.is_important(css_class=expected_false))
def test_is_important_raise_value_error(self):
invalid_inputs = ['', ' ']
class_parser = ClassPropertyParser(class_set=set())
for invalid in invalid_inputs:
self.assertRaises(ValueError, class_parser.is_important, invalid)
def test_strip_priority_designator(self):
important = 'p-10-i'
not_important = 'p-10'
expected_value = 'p-10'
class_parser = ClassPropertyParser(class_set=set())
value = class_parser.strip_priority_designator(css_class=important) # important
self.assertEqual(value, expected_value)
value = class_parser.strip_priority_designator(css_class=not_important) # not important
self.assertEqual(value, expected_value)
def test_strip_priority_designator_raise_value_error(self):
invalid_inputs = ['', ' ']
class_parser = ClassPropertyParser(class_set=set())
for invalid in invalid_inputs:
self.assertRaises(ValueError, class_parser.strip_priority_designator, invalid)
def test_get_property_priority_important(self):
expected_property_priority = 'important'
class_set = {
'font-weight-bold-i', 'font-weight-700-i', 'bold-i', 'normal-i-hover', 'padding-10-i-after',
'hfff-hover-i', 'hfff-i-hover', 'color-hfff-hover-i',
}
class_parser = ClassPropertyParser(class_set=class_set)
for css_class in class_parser.class_set:
property_priority = class_parser.get_property_priority(css_class=css_class)
self.assertEqual(property_priority, expected_property_priority)
def test_get_property_priority_not_important(self):
expected_property_priority = ''
class_set = {'font-weight-bold', 'font-weight-700', 'bold', 'normal-hover', 'padding-10-after', }
class_parser = ClassPropertyParser(class_set=class_set)
for css_class in class_parser.class_set:
property_priority = class_parser.get_property_priority(css_class=css_class)
self.assertEqual(property_priority, expected_property_priority)
def test_get_property_priority_raise_value_error(self):
invalid_inputs = ['', ' ']
class_parser = ClassPropertyParser(class_set=set())
for invalid in invalid_inputs:
self.assertRaises(ValueError, class_parser.get_property_priority, invalid)
if __name__ == '__main__':
main()
| 2.765625 | 3 |
tests/settings.py | DamnedScholar/django-sockpuppet | 371 | 12762775 | """
Django settings for example project.
"""
import os
from pathlib import Path
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = Path.cwd()
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "a_not_so_secret_key"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'channels',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'sockpuppet',
'tests.example',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tests.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ASGI_APPLICATION = 'sockpuppet.routing.application'
WSGI_APPLICATION = 'tests.example.wsgi.application'
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels.layers.InMemoryChannelLayer"
}
}
# CHANNEL_LAYERS = {
# "default": {
# "BACKEND": "channels_redis.core.RedisChannelLayer",
# "CONFIG": {
# "hosts": [("127.0.0.1", 6379)],
# },
# },
# }
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'handlers': ['console'],
'level': 'DEBUG'
},
'handlers': {
'console': {
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'sockpuppet': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'loggers': {
'django.db.backends': {
# uncomment to see all queries
# 'level': 'DEBUG',
'handlers': ['console'],
},
'sockpuppet': {
'level': 'DEBUG',
'handlers': ['sockpuppet']
}
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
STATICFILES_DIRS = [
('js', '{}/jsdist/js/'.format(BASE_DIR)),
]
| 2.0625 | 2 |
ner_dataset.py | benjimor/NER-Dataset | 0 | 12762776 | import argparse
import codecs
import csv
from utils.sparql_queries import load_dataset, clean_dataset
from utils.sparql_queries import get_uri_suffix, exec_ner_query
import filepath_config as RDFfiles
CSV_DELIMITER = '|'
REPLACE_DELIMITER = ' '
def results_to_csv(results, filename):
with codecs.open(filename, "w") as fp:
writer = csv.writer(fp, delimiter=CSV_DELIMITER)
header = ["entity", "entity_local_name", "label", "language", "type", "type_local_name"]
writer.writerow(header)
while results.hasNext():
row = []
try:
next_result = results.next()
entity = next_result.get("?entity").toString().replace(CSV_DELIMITER, REPLACE_DELIMITER)
row.append(entity)
row.append(get_uri_suffix(entity))
label = next_result.get("?label")
language = 'undefined'
if label:
label = label.toString().replace(CSV_DELIMITER, REPLACE_DELIMITER)
label, language = split_string_lang(label)
row.append(label)
row.append(language)
typ = next_result.get("?type").toString().replace(CSV_DELIMITER, REPLACE_DELIMITER)
row.append(typ)
row.append(get_uri_suffix(typ))
writer.writerow(row)
except Exception:
continue
def split_string_lang(obj):
splitted_obj = obj.replace('"', '').split('@')
string = splitted_obj[0].replace('^^http://www.w3.org/2001/XMLSchema#string', '')
language = 'undefined'
if len(splitted_obj) > 1:
language = splitted_obj[1]
return string, language
def dbpedia_ner_dataset():
dataset = load_dataset(RDFfiles.DBPEDIA_LABELS_EN)
dataset = load_dataset(RDFfiles.DBPEDIA_TRANSITIVE_TYPES_EN)
dataset = load_dataset(RDFfiles.DBPEDIA_LABELS_FR)
dataset = load_dataset(RDFfiles.DBPEDIA_TRANSITIVE_TYPES_FR)
results = exec_ner_query(dataset)
results_to_csv(results, "ner_dbpedia.csv")
def main():
argparse.ArgumentParser(prog='ner-dataset', description='Transform rdf dataset into a dataset for NER')
clean_dataset()
dbpedia_ner_dataset()
clean_dataset()
if __name__ == "__main__":
main()
| 2.859375 | 3 |
glamod-parser/glamod/parser/file_parser.py | GLAMOD-test/glamod-dm | 0 | 12762777 | '''
Created on 01 Oct 2018
@author: <NAME>
'''
import logging
import pandas
from glamod.parser.settings import (INPUT_ENCODING, INPUT_DELIMITER,
VERBOSE_LOGGING, CHUNK_SIZE)
logger = logging.getLogger(__name__)
class FileParser(object):
def __init__(self, fpath, delimiter=INPUT_DELIMITER, encoding=INPUT_ENCODING):
self.fpath = fpath
self.delimiter = delimiter
self.encoding = encoding
self._fh = open(fpath, 'r', encoding=self.encoding)
self._col_names = self._parse_header()
def rewind(self, to_line=0):
"Sets the seek position at the start of the file."
self._fh.seek(0)
if to_line > 0:
for _ in range(to_line):
self._fh.readline()
def _parse_header(self):
assert(self._fh.tell() == 0)
return self.readline()
def readline(self):
"Reads next line and splits on delimiter."
return [_.strip() for _ in self._fh.readline().rstrip().split(self.delimiter)]
def get_column_names(self):
return self._col_names
def get_subset_dataframe(self, convertors=None, columns=None):
self.rewind()
df = pandas.read_csv(self._fh, encoding=self.encoding,
delimiter=self.delimiter,
converters=convertors, usecols=columns,
skipinitialspace=True, verbose=VERBOSE_LOGGING)
return df
def read_chunks(self, convertors=None):
self.rewind()
chunk_reader = pandas.read_csv(self._fh, encoding=self.encoding,
delimiter=self.delimiter,
converters=convertors, skipinitialspace=True,
verbose=VERBOSE_LOGGING, chunksize=CHUNK_SIZE)
for chunk in chunk_reader:
yield chunk
def close(self):
self._fh.close()
| 2.625 | 3 |
apps/cal/views.py | mozilla/sheriffs | 0 | 12762778 | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Mozilla Sheriff Duty.
#
# The Initial Developer of the Original Code is Mozilla Corporation.
# Portions created by the Initial Developer are Copyright (C) 2011
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# <NAME> <<EMAIL>>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import datetime
from collections import defaultdict
from django.shortcuts import redirect
from django import http
from django.core.urlresolvers import reverse
from django.conf import settings
from django.db.models import Max, Min
from django.shortcuts import render
from django.contrib.auth.models import User
from roster.models import Slot
def handler404(request):
data = {}
return render(request, '404.html', data, status=404)
def home(request):
"""Main calendar view."""
# special shortcuts based on calendar filtering
if request.GET.get('cal_today'):
return redirect(reverse('cal.home'))
elif request.GET.get('cal_month'):
try:
year, month = [int(x) for x
in request.GET['cal_month'].split(',')]
as_date = datetime.date(year, month, 1)
except:
raise http.Http404("Invalid month")
return redirect(reverse('cal.home') +
'?cal_y=%s&cal_m=%s' % (as_date.year, as_date.month))
elif request.GET.get('cal_m_diff'):
m_diff = int(request.GET.get('cal_m_diff'))
cal_m = request.GET.get('cal_m')
if cal_m:
cal_m = int(cal_m)
else:
cal_m = datetime.date.today().month
cal_y = request.GET.get('cal_y')
if cal_y:
cal_y = int(cal_y)
else:
cal_y = datetime.date.today().year
if m_diff < 0 and (cal_m + m_diff) <= 0:
# trouble
cal_m += m_diff + 12
cal_y -= 1
elif m_diff > 0 and (cal_m + m_diff) > 12:
# trouble
cal_m += m_diff - 12
cal_y += 1
else:
cal_m += m_diff
return redirect(reverse('cal.home') +
'?cal_y=%s&cal_m=%s' % (cal_y, cal_m))
data = {} # You'd add data here that you're sending to the template.
on_duty_next = []
today = datetime.date.today()
def label(date):
if date == today - datetime.timedelta(days=1):
return 'Yesterday'
if date == today:
return 'Today'
if date == (today + datetime.timedelta(days=1)):
return 'Tomorrow'
return date.strftime('%A')
extra_i = 0
def is_weekend(date):
return date.strftime('%A') in ('Saturday', 'Sunday')
_days_back_and_forth = -1, 0, 1, 2, 3
# to reduce queries, first make a collection of all slots access these days
_all_slots = {}
_all_user_ids = set()
_min_date = today + datetime.timedelta(days=_days_back_and_forth[0])
_max_date = today + datetime.timedelta(days=_days_back_and_forth[-1] + 1)
for slot in (Slot.objects
.filter(date__gte=_min_date,
date__lt=_max_date)):
_all_slots[slot.date] = slot
_all_user_ids.add(slot.user_id)
# to avoid having to do a JOIN in the query above, fetch all users once
_all_users = {}
for user in User.objects.filter(pk__in=_all_user_ids):
_all_users[user.pk] = user
for i in _days_back_and_forth:
date = today + datetime.timedelta(days=i + extra_i)
remarks = []
users = []
try:
slot = _all_slots[date]
pk = slot.pk
slot.user = _all_users[slot.user_id]
users.append(slot.user)
if slot.swap_needed:
remarks.append('swap-needed')
if slot.user == request.user:
remarks.append('self')
except KeyError:
pk = None
if date >= today:
if is_weekend(date):
remarks.append('weekend')
else:
remarks.append('offer-needed')
if date == today:
remarks.append('today')
elif date < today:
remarks.append('past')
on_duty_next.append({
'label': label(date),
'users': users,
'remarks': remarks,
'pk': pk,
'date': date,
})
my_duty_dates = []
if request.user.is_authenticated():
try:
last_past = (Slot.objects
.filter(user=request.user, date__lt=today)
.order_by('-date'))[0]
my_duty_dates.append({
'pk': last_past.pk,
'date': last_past.date,
'remarks': ['last']
})
except IndexError:
last_past = None
next_slots = (Slot.objects
.filter(user=request.user, date__gte=today)
.order_by('date'))[:last_past and 4 or 5]
_first_next = None
for slot in next_slots:
remarks = []
if _first_next is None:
_first_next = slot.date
remarks.append('next')
my_duty_dates.append({
'pk': slot.pk,
'date': slot.date,
'remarks': remarks
})
data['my_duty_dates'] = my_duty_dates
data['on_duty_next'] = on_duty_next
data['date_format'] = settings.DEFAULT_DATE_FORMAT
month, year = request.GET.get('cal_m'), request.GET.get('cal_y')
if month:
month = int(month)
data['cal_m'] = month
if year:
year = int(year)
data['cal_y'] = year
week = None
if request.GET.get('cal_w'):
week = int(request.GET.get('cal_w'))
data['cal_w'] = week
data['weeks'] = _get_calendar_data(year, month, week, request.user,
sunday_first=True, weeks=6)
data['month_options'] = _get_month_options(year, month, week, weeks=6)
return render(request, 'cal/home.html', data)
class Dict(dict):
def __getattr__(self, key):
return self[key]
def _get_month_options(year, month, week, weeks=5):
min_date = Slot.objects.aggregate(Min('date'))['date__min']
max_date = Slot.objects.aggregate(Max('date'))['date__max']
if year is None or month is None:
date = datetime.date.today()
else:
date = datetime.date(year, month, 1)
if week:
date += datetime.timedelta(days=week * 7)
one_day = datetime.timedelta(days=1)
current_months = []
first_on_calendar = date
last_on_calendar = date + datetime.timedelta(days=7 * (weeks - 1))
while first_on_calendar < last_on_calendar:
first_on_calendar += one_day
if (first_on_calendar.month,
first_on_calendar.year) not in current_months:
current_months.append((first_on_calendar.month,
first_on_calendar.year))
d = min_date
months = []
done = []
while d < max_date:
if (d.month, d.year) not in done:
done.append((d.month, d.year))
current = today_month = False
if (d.month, d.year) == (date.month, date.year):
today_month = True
if (d.month, d.year) in current_months:
current = True
month = {
'label': d.strftime('%Y %B'),
'current': current,
'today_month': today_month,
'value': d.strftime('%Y,%m'),
}
months.append(Dict(month))
d += one_day
return months
def _get_calendar_data(year, month, week, user, sunday_first=False, weeks=5):
if year is None or month is None:
date = datetime.date.today()
else:
date = datetime.date(year, month, 1)
if week:
date += datetime.timedelta(days=week * 7)
no_weeks = weeks
weeks = []
_months = []
_rowspans = {}
_is_authenticated = user.is_authenticated()
_today = datetime.date.today()
# the code below (with no_weeks==5) causes 100+ SQL queries so instead
# we're going to use a dict to save lookups
date_range = (
date,
date + datetime.timedelta(days=7 * no_weeks)
)
all_slots = defaultdict(list)
unclaimed = []
users = defaultdict(list)
for slot in (Slot.objects
.filter(date__range=date_range)
.select_related('user')):
all_slots[slot.date].append(slot)
users[slot.date].append(slot.user)
if slot.swap_needed:
unclaimed.append(slot.date)
while len(weeks) < no_weeks:
days = []
for day_date in _get_day_dates(date, sunday_first=sunday_first):
remarks = []
if day_date < _today:
remarks.append('past')
if day_date in unclaimed:
remarks.append('unclaimed')
elif day_date == _today:
remarks.append('today')
elif _is_authenticated and user in users.get(day_date, []):
remarks.append('self')
day = {'date': day_date,
'remarks': remarks,
'slots': all_slots[day_date]}
days.append(Dict(day))
week = {'days': days}
if not _months or (_months and date.month != _months[-1]):
week['month'] = Dict({
'label': date.strftime('%b'),
'month_number': date.month,
'rowspan': None,
})
_rowspans[date.month] = 0
_months.append(date.month)
else:
_rowspans[date.month] += 1
week['month'] = None
weeks.append(Dict(week))
date += datetime.timedelta(days=7)
for week in weeks:
if getattr(week, 'month', None):
week.month['rowspan'] = _rowspans[week.month.month_number] + 1
return weeks
def _get_day_dates(this_date, sunday_first=False):
"""return 7 date instances that cover this week for any given date.
If this_date is a Wednesday, return
[<Monday's date>, <Tuesday's date>, this_date,
..., <Sunday's date>]
However, if @sunday_first==True return this:
[<Sunday's date>, <Monday's date>, <Tuesday's date>, this_date,
..., <Saturday's date>]
"""
if sunday_first and this_date.strftime('%A') == 'Sunday':
this_date += datetime.timedelta(days=1)
this_week = this_date.strftime('%W')
date = this_date - datetime.timedelta(days=7)
dates = []
while len(dates) < 7:
if date.strftime('%W') == this_week:
dates.append(date)
date += datetime.timedelta(days=1)
if sunday_first:
one_day = datetime.timedelta(days=1)
dates = [x - one_day for x in dates]
return dates
| 1.484375 | 1 |
purpledrop/purpledrop.py | uwmisl/purpledrop-driver | 0 | 12762779 | """Low-level driver for communicating with PurpleDrop via serial messages
"""
import inspect
import fnmatch
import logging
import queue
import struct
import serial
import serial.tools.list_ports
import sys
import threading
import time
from typing import Any, AnyStr, Callable, Dict, List, Optional, Sequence
from purpledrop.calibration import ElectrodeOffsetCalibration
from purpledrop.electrode_board import Board
import purpledrop.messages as messages
import purpledrop.protobuf.messages_pb2 as messages_pb2
from .messages import PurpleDropMessage, ElectrodeEnableMsg, SetPwmMsg
from .message_framer import MessageFramer, serialize
from .move_drop import move_drop, MoveDropResult
logger = logging.getLogger("purpledrop")
# Versions of purpledrop software supported by this driver
SUPPORTED_VERSIONS = [
"v0.5.*",
]
# List of USB VID/PID pairs which will be recognized as a purpledrop
PURPLEDROP_VIDPIDS = [
(0x02dd, 0x7da3),
(0x1209, 0xCCAA),
]
def pinlist2bool(pins):
pin_state = [False] * N_PINS
for p in pins:
if(p >= N_PINS):
raise ValueError(f"Pin {p} is invalid. Must be < {N_PINS}")
pin_state[p] = True
return pin_state
def pinlist2mask(pins):
mask = [0] * int(((N_PINS + 7) / 8))
for p in pins:
word = int(p / 8)
bit = p % 8
mask[word] |= (1<<bit)
return mask
def validate_version(v):
for pattern in SUPPORTED_VERSIONS:
if fnmatch.fnmatch(v, pattern):
return True
return False
def resolve_msg_filter(filt):
"""If the filter provided is a message type, then create a filter which returns
any message of that type. Otherwise, assume the filter is a lambda method.
"""
if inspect.isclass(filt): # and issubclass(filt, PurpleDropMessage):
return lambda x: isinstance(x, filt)
else:
return filt
def list_purpledrop_devices() -> List[serial.tools.list_ports_common.ListPortInfo]:
"""Get a list of detected purpledrop devices
Returns:
A list of `ListPortInfo` objects
"""
devices = serial.tools.list_ports.comports()
selected_devices = [x for x in devices if (x.vid, x.pid) in PURPLEDROP_VIDPIDS]
return selected_devices
def get_pb_timestamp():
"""Get a protobuf timestamp for the current system time
"""
time_f = time.time()
ts = messages_pb2.Timestamp()
ts.seconds = int(time_f)
ts.nanos = int((time_f % 1) * 1e9)
return ts
class PurpleDropRxThread(object):
def __init__(self, port: serial.Serial, callback: Callable[[PurpleDropMessage], None]=None):
self._thread = threading.Thread(target=self.run, name="PurpleDrop Rx", daemon=True)
self._ser = port
self._framer = MessageFramer(PurpleDropMessage.predictSize)
self._callback = callback
self.running = True
def start(self):
self._thread.start()
def stop(self):
self.running = False
def join(self):
self._thread.join()
def run(self):
while self.running:
rxBytes = None
try:
rxBytes = self._ser.read(64)
except serial.serialutil.SerialException as e:
logger.warn(f"Failed reading from port: {e}")
self.running = False
return
if(len(rxBytes) > 0):
for buf in self._framer.parse(rxBytes):
if(self._callback):
try:
self._callback(PurpleDropMessage.from_bytes(buf))
except Exception as e:
logger.exception(e)
def set_callback(self, callback):
self._callback = callback
class SyncListener(object):
class MsgDelegate(object):
def __init__(self, filter_func, fifo):
self.filter = filter_func
self.fifo = fifo
def __call__(self, msg: PurpleDropMessage):
if self.filter is None or self.filter(msg):
self.fifo.put(msg)
def __init__(self, owner, msg_filter=None):
self.owner = owner
self.filter = resolve_msg_filter(msg_filter)
self.fifo = queue.Queue()
self.delegate = self.MsgDelegate(self.filter, self.fifo)
def __del__(self):
self.unregister()
def unregister(self):
self.owner.unregister_listener(self.delegate)
def get_msg_handler(self):
return self.delegate
def wait(self, timeout: Optional[float]=None) -> Optional[PurpleDropMessage]:
try:
return self.fifo.get(timeout=timeout)
except queue.Empty:
return None
class AsyncListener(object):
class MsgDelegate(object):
def __init__(self, filter_func, callback):
self.filter = filter_func
self.callback = callback
def __call__(self, msg: PurpleDropMessage):
if self.filter is None or self.filter(msg):
self.callback(msg)
def __init__(self, owner, callback, msg_filter=None):
self.owner = owner
self.callback = callback
self.filter = resolve_msg_filter(msg_filter)
self.delegate = self.MsgDelegate(self.filter, callback)
def __del__(self):
self.owner.unregister_listener(self.delegate)
def get_msg_handler(self):
return self.delegate
class PurpleDropDevice():
"""Low level messaging for controlling a PurpleDrop via a serial port
Use `list_purpledrop_devices()` to find devices based on their USB VID/PID
and serial number. Then provide the com port (e.g. `/dev/ttyACM0`) when
instantiating a PurpleDropDevice.
NOTE: This class provides low level control of the device. For most uses,
you should be using PurpleDropControl which provides higher level
functionality and matches the JSON-RPC methods provided by `pd-server`.
"""
def __init__(self, port=None):
self._rx_thread = None
self._ser = None
self.lock = threading.Lock()
self.listeners = []
self.__connected_callbacks: List[Callable] = []
self.__disconnected_callbacks: List[Callable] = []
if port is not None:
self.open(port)
def register_connected_callback(self, callback: Callable):
self.__connected_callbacks.append(callback)
def register_disconnected_callback(self, callback: Callable):
self.__disconnected_callbacks.append(callback)
def open(self, port):
logger.debug(f"PurpleDropDevice: opening {port}")
self._ser = serial.Serial(port, timeout=0.01, write_timeout=0.5)
self._rx_thread = PurpleDropRxThread(self._ser, callback=self.message_callback)
self._rx_thread.start()
for cb in self.__connected_callbacks:
cb()
def close(self):
logger.debug("Closing PurpleDropDevice")
if self._rx_thread is not None:
self._rx_thread.stop()
self._rx_thread.join()
if self._ser is not None:
self._ser.close()
for cb in self.__disconnected_callbacks:
cb()
def connected(self):
return self._ser is not None and \
self._rx_thread is not None and \
self._rx_thread.running
def unregister_listener(self, listener):
with self.lock:
self.listeners.remove(listener)
def get_sync_listener(self, msg_filter=None) -> SyncListener:
new_listener = SyncListener(owner=self, msg_filter=msg_filter)
with self.lock:
self.listeners.append(new_listener.get_msg_handler())
return new_listener
def get_async_listener(self, callback, msg_filter=None) -> AsyncListener:
new_listener = AsyncListener(owner=self, callback=callback, msg_filter=msg_filter)
with self.lock:
self.listeners.append(new_listener.get_msg_handler())
return new_listener
def send_message(self, msg: PurpleDropMessage):
tx_bytes = serialize(msg.to_bytes())
with self.lock:
self._ser.write(tx_bytes)
def message_callback(self, msg: PurpleDropMessage):
with self.lock:
for handler in self.listeners:
handler(msg)
class PersistentPurpleDropDevice(PurpleDropDevice):
"""A wrapper for PurpleDropDevice that transparently tries to
connect/reconnect to a device.
If a serial is provided, it will only connect to that serial number.
Otherwise, it will connect to any purple drop detected (and may choose
one arbitrarilty if there are multiple).
"""
def __init__(self, serial_number: Optional[str]=None):
super().__init__()
self.target_serial_number: Optional[str] = serial_number
self.device_info: Optional[Any] = None
self.__thread = threading.Thread(
name="PersistentPurpleDropDevice Monitor",
target=self.__thread_entry,
daemon=True)
self.__thread.start()
def connected_serial_number(self) -> Optional[str]:
"""Returns the serial number of the connected device
"""
if self.device_info is None:
return None
else:
return self.device_info.serial_number
def __try_to_connect(self) -> bool:
device_list = list_purpledrop_devices()
selected_device = None
if len(device_list) == 0:
logger.debug("No purpledrop devices found to connect to")
return False
if self.target_serial_number:
for device in device_list:
if device.serial_number == self.target_serial_number:
selected_device = device
else:
selected_device = device_list[0]
if selected_device is None:
serial_numbers = [d.serial_number for d in device_list]
logger.warn(f"Found purpledrop, but not connecting because it has unexpected serial number ({serial_numbers}")
return False
self.device_info = selected_device
self.open(selected_device.device)
logger.warning(f"Connected to purpledrop {selected_device.serial_number} on {selected_device.device}")
return True
def __thread_entry(self):
status = False
while True:
if not self.connected():
if status:
logger.warning("Closing purpledrop device")
self.close()
self.device_info = None
status = False
logger.debug("Attempting to connect to purpledrop")
status = self.__try_to_connect()
time.sleep(5.0)
N_PINS = 128
N_MASK_BYTES = 16
# Compute coefficients to convert integrated voltage to integrated charge
# These values are nominal calculated values, not calibrated in any way
# Divide by the voltage to get farads.
# First stage gain
GAIN1 = 2.0
# Integrator gain (Vout per integrated input V*s)
GAIN2 = 25000.0
# Output stage gain
GAIN3 = 22.36
# Sense resistances for high/low gain
RLOW = 33.0
RHIGH = 220.0
CAPGAIN_HIGH = RHIGH * GAIN1 * GAIN2 * GAIN3 * 4096. / 3.3
CAPGAIN_LOW = RLOW * GAIN1 * GAIN2 * GAIN3 * 4096. / 3.3
class PinState(object):
"""Data record to store the state of purpledrop pin setting, including
active pins and capacitance scan groups
"""
N_DRIVE_GROUPS = 2
N_SCAN_GROUPS = 5
class PinGroup(object):
def __init__(self, pin_mask: Sequence[int], setting: int):
self.pin_mask = pin_mask
self.setting = setting
class DriveGroup(PinGroup):
def __init__(self, pin_mask=None, duty_cycle=255):
if pin_mask is None:
pin_mask = pinlist2bool([])
super().__init__(pin_mask, duty_cycle)
@property
def duty_cycle(self):
return self.setting
@duty_cycle.setter
def duty_cycle(self, dc):
self.setting = dc
def to_dict(self):
return {
'pins': self.pin_mask,
'duty_cycle': self.duty_cycle,
}
class ScanGroup(PinGroup):
def __init__(self, pin_mask=None, setting=0):
if pin_mask is None:
pin_mask = pinlist2bool([])
super().__init__(pin_mask, setting)
def to_dict(self):
return {
'pins': self.pin_mask,
'setting': self.setting,
}
def __init__(self):
self.drive_groups = [self.DriveGroup() for _ in range(self.N_DRIVE_GROUPS)]
self.scan_groups = [self.ScanGroup() for _ in range(self.N_SCAN_GROUPS)]
def to_dict(self):
return {
'drive_groups': [x.to_dict() for x in self.drive_groups],
'scan_groups': [x.to_dict() for x in self.scan_groups],
}
class PurpleDropController(object):
# Define the method names which will be made available via RPC server
RPC_METHODS = [
'get_board_definition',
'get_parameter_definitions',
'get_parameter',
'set_parameter',
'get_bulk_capacitance',
'get_scan_capacitance',
'get_group_capacitance',
'get_active_capacitance',
'set_capacitance_group',
'set_electrode_pins',
'get_electrode_pins',
'set_feedback_command',
'move_drop',
'get_temperatures',
'set_pwm_duty_cycle',
'get_hv_supply_voltage',
'calibrate_capacitance_offset',
'get_device_info',
'read_gpio',
'write_gpio',
'set_scan_gains',
'get_scan_gains',
'set_electrode_calibration',
]
def __init__(self, purpledrop, board_definition: Board, electrode_calibration: Optional[ElectrodeOffsetCalibration]=None):
self.purpledrop = purpledrop
self.board_definition = board_definition
self.active_capacitance = 0.0
self.electrode_calibration = electrode_calibration
self.raw_scan_capacitance: List[float] = []
self.calibrated_scan_capacitance: List[float] = []
self.raw_group_capacitance: List[float] = []
self.calibrated_group_capacitance: List[float] = []
self.scan_gains = [1.0] * N_PINS
self.temperatures: Sequence[float] = []
self.duty_cycles: Dict[int, float] = {}
self.hv_supply_voltage = 0.0
self.parameter_list: List[dict] = []
self.lock = threading.Lock()
self.event_listeners: List[Callable] = []
self.active_capacitance_counter = 0
self.group_capacitance_counter = 0
self.duty_cycle_updated_counter = 0
self.hv_regulator_counter = 0
self.pin_state = PinState()
def msg_filter(msg):
desired_types = [
messages.ActiveCapacitanceMsg,
messages.BulkCapacitanceMsg,
messages.CommandAckMsg,
messages.DutyCycleUpdatedMsg,
messages.TemperatureMsg,
messages.HvRegulatorMsg,
]
for t in desired_types:
if isinstance(msg, t):
return True
return False
if self.purpledrop.connected():
self.__on_connected()
self.purpledrop.register_connected_callback(self.__on_connected)
self.purpledrop.register_disconnected_callback(self.__on_disconnected)
self.listener = self.purpledrop.get_async_listener(self.__message_callback, msg_filter)
def __on_connected(self):
self.__set_scan_gains()
self.__get_parameter_descriptors()
software_version = self.get_software_version()
if not validate_version(software_version):
logger.error(f"Unsupported software version '{software_version}'. This driver may not" + \
"work correcly, and you should upgrade your purpledrop firmware to one of the following: " + \
f"{SUPPORTED_VERSIONS}")
self.__send_device_info_event(
True,
self.purpledrop.connected_serial_number() or '',
software_version or ''
)
if self.electrode_calibration is not None:
logger.info("Loading electrode calibration")
self.set_electrode_calibration(self.electrode_calibration.voltage, self.electrode_calibration.offsets)
def __on_disconnected(self):
self.__send_device_info_event(False, '', '')
def __send_device_info_event(self, connected: bool, serial_number: str, software_version: str):
event = messages_pb2.PurpleDropEvent()
event.device_info.connected = connected
event.device_info.serial_number = serial_number
event.device_info.software_version = software_version
self.__fire_event(event)
def __get_parameter_descriptors(self):
"""Request and receive the list of parameters from device
"""
listener = self.purpledrop.get_sync_listener(messages.ParameterDescriptorMsg)
self.purpledrop.send_message(messages.ParameterDescriptorMsg())
descriptors = []
while True:
msg = listener.wait(timeout=1.0)
if msg is None:
logger.error("Timed out waiting for parameter descriptors")
break
descriptors.append({
'id': msg.param_id,
'name': msg.name,
'description': msg.description,
'type': msg.type,
})
if msg.sequence_number == msg.sequence_total - 1:
break
self.parameter_list = descriptors
def __set_scan_gains(self, gains: Sequence[bool]=None):
"""Setup gains used for capacitance scan
If no gains are provided, the gains will be set based on the "oversized"
electrodes defined in the active board definition. Any oversized
electrodes are set to low gain, and the rest to high gain.
Args:
gains: A list of booleans. True indicates low gain should be used for
the corresponding electrode
"""
if gains is None:
gains = [False] * N_PINS
for pin in self.board_definition.oversized_electrodes:
gains[pin] = True # low gain
self.scan_gains = list(map(lambda x: CAPGAIN_LOW if x else CAPGAIN_HIGH, gains))
msg = messages.SetGainMsg()
msg.gains = list(map(lambda x: 1 if x else 0, gains))
listener = self.purpledrop.get_sync_listener(messages.CommandAckMsg)
self.purpledrop.send_message(msg)
ack = listener.wait(timeout=1.0)
if ack is None:
logger.error("Got no ACK for SetGains message")
def __calibrate_capacitance(self, raw, gain):
# Can't measure capacitance unless high voltage is on
if self.hv_supply_voltage < 60.0:
return 0.0
# Return as pF
return raw * 1e12 / gain / self.hv_supply_voltage
def __message_callback(self, msg):
if isinstance(msg, messages.ActiveCapacitanceMsg):
# TODO: I-sense resistor values are adjustable, and the
# CAPGAIN_HIGH/CAPGAIN_LOW should be gotten from the device at some
# point, rather than duplicated here
capgain = CAPGAIN_LOW if (msg.settings & 1 == 1) else CAPGAIN_HIGH
self.active_capacitance = self.__calibrate_capacitance(msg.measurement - msg.baseline, capgain)
self.active_capacitance_counter += 1
# Throttle the events. 500Hz messages is a lot for the browser to process.
# This also means logs don't have a full resolution, and it would be better
# if clients could choose what they get
if (self.active_capacitance_counter % 10) == 0:
cap_event = messages_pb2.PurpleDropEvent()
cap_event.active_capacitance.baseline = msg.baseline
cap_event.active_capacitance.measurement = msg.measurement
cap_event.active_capacitance.calibrated = float(self.active_capacitance)
cap_event.active_capacitance.timestamp.CopyFrom(get_pb_timestamp())
self.__fire_event(cap_event)
elif isinstance(msg, messages.BulkCapacitanceMsg):
if(msg.group_scan != 0):
self.group_capacitance_counter += 1
if (self.group_capacitance_counter % 10) == 0:
self.raw_group_capacitance = msg.measurements
self.calibrated_group_capacitance = [0.0] * len(self.raw_group_capacitance)
for i in range(msg.count):
if self.pin_state.scan_groups[i].setting == 0:
gain = CAPGAIN_HIGH
else:
gain = CAPGAIN_LOW
self.calibrated_group_capacitance[i] = self.__calibrate_capacitance(msg.measurements[i], gain)
group_event = messages_pb2.PurpleDropEvent()
group_event.group_capacitance.timestamp.CopyFrom(get_pb_timestamp())
group_event.group_capacitance.measurements[:] = self.calibrated_group_capacitance
group_event.group_capacitance.raw_measurements[:] = self.raw_group_capacitance
self.__fire_event(group_event)
else:
# Scan capacitance measurements are broken up into multiple messages
if len(self.raw_scan_capacitance) < msg.start_index + msg.count:
self.raw_scan_capacitance.extend([0] * (msg.start_index + msg.count - len(self.raw_scan_capacitance)))
self.calibrated_scan_capacitance.extend([0] * (msg.start_index + msg.count - len(self.calibrated_scan_capacitance)))
for i in range(msg.count):
chan = msg.start_index + i
gain = self.scan_gains[chan]
self.raw_scan_capacitance[chan] = msg.measurements[i]
self.calibrated_scan_capacitance[chan] = self.__calibrate_capacitance(msg.measurements[i], gain)
# Fire event on the last group
if msg.start_index + msg.count == 128:
bulk_event = messages_pb2.PurpleDropEvent()
def make_cap_measurement(raw, calibrated):
m = messages_pb2.CapacitanceMeasurement()
m.raw = float(raw)
m.capacitance = float(calibrated)
return m
bulk_event.scan_capacitance.measurements.extend(
[make_cap_measurement(raw, cal)
for (raw, cal) in zip(self.raw_scan_capacitance, self.calibrated_scan_capacitance)]
)
bulk_event.scan_capacitance.timestamp.CopyFrom(get_pb_timestamp())
self.__fire_event(bulk_event)
elif isinstance(msg, messages.DutyCycleUpdatedMsg):
self.duty_cycle_updated_counter += 1
if (self.duty_cycle_updated_counter%10) == 0:
# Update local state of duty cycle
self.pin_state.drive_groups[0].duty_cycle = msg.duty_cycle_A
self.pin_state.drive_groups[1].duty_cycle = msg.duty_cycle_B
# Publish event with new values
duty_cycle_event = messages_pb2.PurpleDropEvent()
duty_cycle_event.duty_cycle_updated.timestamp.CopyFrom(get_pb_timestamp())
duty_cycle_event.duty_cycle_updated.duty_cycles[:] = [msg.duty_cycle_A, msg.duty_cycle_B]
self.__fire_event(duty_cycle_event)
elif isinstance(msg, messages.HvRegulatorMsg):
self.hv_regulator_counter += 1
if (self.hv_regulator_counter % 10) == 0:
self.hv_supply_voltage = msg.voltage
event = messages_pb2.PurpleDropEvent()
event.hv_regulator.voltage = msg.voltage
event.hv_regulator.v_target_out = msg.v_target_out
event.hv_regulator.timestamp.CopyFrom(get_pb_timestamp())
self.__fire_event(event)
elif isinstance(msg, messages.TemperatureMsg):
self.temperatures = [float(x) / 100.0 for x in msg.measurements]
event = messages_pb2.PurpleDropEvent()
event.temperature_control.temperatures[:] = self.temperatures
duty_cycles = []
for i in range(len(self.temperatures)):
duty_cycles.append(self.duty_cycles.get(i, 0.0))
event.temperature_control.duty_cycles[:] = duty_cycles
event.temperature_control.timestamp.CopyFrom(get_pb_timestamp())
self.__fire_event(event)
def __fire_event(self, event):
with self.lock:
for listener in self.event_listeners:
listener(event)
def __get_parameter_definition(self, id):
for p in self.parameter_list:
if p['id'] == id:
return p
return None
def __fire_pinstate_event(self):
def create_electrode_group(x):
eg = messages_pb2.ElectrodeGroup()
eg.electrodes[:] = x.pin_mask
eg.setting = x.setting
return eg
event = messages_pb2.PurpleDropEvent()
for g in self.pin_state.drive_groups:
event.electrode_state.drive_groups.add(electrodes=g.pin_mask, setting=g.setting)
for g in self.pin_state.scan_groups:
event.electrode_state.scan_groups.add(electrodes=g.pin_mask, setting=g.setting)
self.__fire_event(event)
def get_software_version(self) -> Optional[str]:
listener = self.purpledrop.get_sync_listener(msg_filter=messages.DataBlobMsg)
versionRequest = messages.DataBlobMsg()
versionRequest.blob_id = messages.DataBlobMsg.SOFTWARE_VERSION_ID
self.purpledrop.send_message(versionRequest)
msg = listener.wait(0.5)
if msg is None:
software_version = None
logger.warning("Timed out requesting software version")
else:
software_version = msg.payload.decode('utf-8')
return software_version
def register_event_listener(self, func):
"""Register a callback for state update events
"""
with self.lock:
self.event_listeners.append(func)
def get_parameter_definitions(self):
"""Get a list of all of the parameters supported by the PurpleDrop
Arguments:
- None
"""
logger.debug("Recieved get_parameter_definitions")
return {
"parameters": self.parameter_list,
}
def get_parameter(self, paramIdx):
"""Request the current value of a parameter from the device
Arguments:
- paramIdx: The ID of the parameter to request (from the list of
parameters provided by 'get_parameter_definition')
"""
req_msg = messages.SetParameterMsg()
req_msg.set_param_idx(paramIdx)
req_msg.set_param_value_int(0)
req_msg.set_write_flag(0)
def msg_filter(msg):
return isinstance(msg, messages.SetParameterMsg) and msg.param_idx() == paramIdx
listener = self.purpledrop.get_sync_listener(msg_filter=msg_filter)
self.purpledrop.send_message(req_msg)
resp = listener.wait(timeout=0.5)
if resp is None:
raise TimeoutError("No response from purpledrop")
else:
paramDesc = self.__get_parameter_definition(paramIdx)
value = None
if paramDesc is not None and paramDesc['type'] == 'float':
value = resp.param_value_float()
else:
value = resp.param_value_int()
logger.debug(f"get_parameter({paramIdx}) returning {value}")
return value
def set_parameter(self, paramIdx, value):
"""Set a config parameter
A special paramIdx value of 0xFFFFFFFF is used to trigger the saving
of all parameters to flash.
Arguments:
- paramIdx: The index of the parameter to set (from
'get_parameter_definitions')
- value: A float or int (based on the definition) with the new
value to assign
"""
logging.debug(f"Received set_parameter({paramIdx}, {value})")
req_msg = messages.SetParameterMsg()
req_msg.set_param_idx(paramIdx)
paramDesc = self.__get_parameter_definition(paramIdx)
if paramDesc is not None and paramDesc['type'] == 'float':
req_msg.set_param_value_float(value)
else:
req_msg.set_param_value_int(value)
req_msg.set_write_flag(1)
def msg_filter(msg):
return isinstance(msg, messages.SetParameterMsg) and msg.param_idx() == paramIdx
listener = self.purpledrop.get_sync_listener(msg_filter=msg_filter)
self.purpledrop.send_message(req_msg)
resp = listener.wait(timeout=0.5)
if resp is None:
raise TimeoutError(f"No response from purpledrop to set parameter ({paramIdx})")
def get_board_definition(self):
"""Get electrode board configuratin object
Arguments: None
"""
logger.debug(f"Received get_board_definition")
return self.board_definition.as_dict()
def get_bulk_capacitance(self) -> List[float]:
"""Get the most recent capacitance scan results
DEPRECATED. Use get_scan_capacitance.
Arguments: None
"""
logging.debug("Received get_bulk_capacitance")
return self.calibrated_scan_capacitance
def get_scan_capacitance(self) -> Dict[str, Any]:
"""Get the most recent capacitance scan results
Arguments: None
"""
return {
"raw": self.raw_scan_capacitance,
"calibrated": self.calibrated_scan_capacitance
}
def get_group_capacitance(self) -> Dict[str, List[float]]:
"""Get the latest group scan capacitances
Arguments: None
"""
return {
"raw": self.raw_group_capacitance,
"calibrated": self.calibrated_group_capacitance,
}
def get_active_capacitance(self) -> float:
"""Get the most recent active electrode capacitance
Arguments: None
"""
logging.debug("Received get_active_capacitance")
return self.active_capacitance
def get_electrode_pins(self):
"""Get the current state of all electrodes
Arguments: None
Returns: List of booleans
"""
logging.debug("Received get_electrode_pins")
return self.pin_state.to_dict()
def set_capacitance_group(self, pins: Sequence[int], group_id: int, setting: int):
"""Set a capacitance scan group.
Purpledrop support 5 scan groups. Each group defines a set of electrodes
which are measured together after each AC drive cycle.
Arguments:
- pins: A list of pins included in the group (may be empty to clear the group)
- group_id: The group number to set (0-4)
"""
if group_id >= 5:
raise ValueError("group_id must be < 5")
# Send message to device to update
msg = ElectrodeEnableMsg()
msg.group_id = group_id + 100
msg.setting = setting
msg.values = pinlist2mask(pins)
self.purpledrop.send_message(msg)
# Update local state
self.pin_state.scan_groups[group_id] = PinState.ScanGroup(pinlist2bool(pins), setting)
# Send event with new state
self.__fire_pinstate_event()
def set_electrode_pins(self, pins: Sequence[int], group_id: int=0, duty_cycle: int=255):
"""Set the currently enabled pins
Specified electrodes will be activated, all other will be deactivated.
Providing an empty array will deactivate all electrodes.
Arguments:
- pins: A list of pin numbers to activate
- group_id: Which electrode enable group to be set (default: 0)
0: Drive group A
1: Drive group B
- duty_cycle: Duty cycle for the group (0-255)
"""
logging.debug(f"Received set_electrode_pins({pins})")
if group_id < 0 or group_id > 1:
raise ValueError(f"group_id={group_id} is invalid. It must be 0 or 1.")
# Send message to device to update
msg = ElectrodeEnableMsg()
msg.group_id = group_id
msg.setting = duty_cycle
msg.values = pinlist2mask(pins)
self.purpledrop.send_message(msg)
# Update local state
self.pin_state.drive_groups[group_id] = PinState.DriveGroup(pinlist2bool(pins), duty_cycle)
# Send event with new state
self.__fire_pinstate_event()
def set_feedback_command(self, target, mode, input_groups_p_mask, input_groups_n_mask, baseline):
"""Update feedback control settings
When enabled, the purpledrop controller will adjust the duty cycle of
electrode drive groups based on capacitance measurements.
Arguments:
- target: The controller target in counts
- mode:
- 0: Disabled
- 1: Normal
- 2: Differential
- input_groups_p_mask: Bit mask indicating which capacitance groups to
sum for positive input (e.g. for groups 0 and 2: 5)
- input_groups_n_mask: Bit mask for negative input groups (used in differential mode)
- baseline: The duty cycle to apply to both drive groups when no error signal is
present (0-255)
"""
msg = messages.FeedbackCommandMsg()
msg.target = target
msg.mode = mode
msg.input_groups_p_mask = input_groups_p_mask
msg.input_groups_n_mask = input_groups_n_mask
msg.baseline = baseline
self.purpledrop.send_message(msg)
def move_drop(self,
start: Sequence[int],
size: Sequence[int],
direction: str) -> MoveDropResult:
"""Execute a drop move sequence
Arguments:
- start: A list -- [x, y] -- specifying the top-left corner of the current drop location
- size: A list -- [width, height] -- specifying the size of the drop to be moved
- direction: One of, "Up", "Down", "Left", "Right"
"""
logging.debug(f"Received move_drop({start}, {size}, {direction})")
return move_drop(self, start, size, direction)
def get_temperatures(self) -> Sequence[float]:
"""Returns an array of all temperature sensor measurements in degrees C
Arguments: None
"""
logging.debug("Received get_temperatures")
return self.temperatures
def set_pwm_duty_cycle(self, chan: int, duty_cycle: float):
"""Set the PWM output duty cycle for a single channel
Arguments:
- chan: An integer specifying the channel to set
- duty_cycle: A float specifying the duty cycle in range [0, 1.0]
"""
logging.debug(f"Received set_pwm_duty_cycle({chan}, {duty_cycle})")
self.duty_cycles[chan] = duty_cycle
msg = SetPwmMsg()
msg.chan = chan
msg.duty_cycle = duty_cycle
self.purpledrop.send_message(msg)
def get_hv_supply_voltage(self):
"""Return the latest high voltage rail measurement
Arguments: None
Returns: A float, in volts
"""
logging.debug("Received get_hv_supply_voltage")
return self.hv_supply_voltage
def calibrate_capacitance_offset(self):
"""Request a calibration of the capacitance measurement zero offset
Arguments: None
Returns: None
"""
msg = messages.CalibrateCommandMsg()
msg.command = messages.CalibrateCommandMsg.CAP_OFFSET_CMD
self.purpledrop.send_message(msg)
def get_device_info(self):
"""Gets information about the connected purpledrop device
Arguments: None
Returns: Object with the following fields:
- connected: boolean indicating if a device is currently connected
- serial_number: The serial number of the connected device
- software_version: The software version string of the connected device
"""
serial_number = self.purpledrop.connected_serial_number()
if serial_number is None:
return {
'connected': False,
'serial_number': '',
'software_version': ''
}
else:
software_version = self.get_software_version()
return {
'connected': True,
'serial_number': serial_number,
'software_version': software_version
}
def read_gpio(self, gpio_num):
"""Reads the current input value of a GPIO pin
Arguments:
- gpio_num: The ID of the GPIO to read
Returns: A bool
"""
msg = messages.GpioControlMsg()
msg.pin = gpio_num
msg.read = True
listener = self.purpledrop.get_sync_listener(msg_filter=messages.GpioControlMsg)
self.purpledrop.send_message(msg)
rxmsg = listener.wait(0.5)
if rxmsg is None:
raise TimeoutError("No response from purpledrop to GPIO read request")
else:
return rxmsg.value
def write_gpio(self, gpio_num, value, output_enable):
"""Set the output state of a GPIO pin
Arguments:
- gpio_num: The ID of the GPIO to set
- value: The output value (boolean)
- output_enable: Set the GPIO as an output (true) or input (false)
Returns:
- The value read on the GPIO (bool)
"""
msg = messages.GpioControlMsg()
msg.pin = gpio_num
msg.read = False
msg.value = value
msg.output_enable = output_enable
listener = self.purpledrop.get_sync_listener(msg_filter=messages.GpioControlMsg)
self.purpledrop.send_message(msg)
rxmsg = listener.wait(0.5)
if rxmsg is None:
raise TimeoutError("No response from purpledrop to GPIO read request")
else:
return rxmsg.value
def set_electrode_calibration(self, voltage: float, offsets: Sequence[int]):
"""Set the capacitance offset for each electrode
Provides a table of values to be subtracted for each electrode to
compensate for parasitic capacitance of the electrode. Values are
measured at high gain, with no liquid on the device, at a certain
voltage.
These values will be adjusted for changes in voltage from the measured
voltage, and for low gain when applied by the purpledrop.
Arguments:
- voltage: The voltage setting at which the offsets were measured
- offsets: A list of 128 16-bit values to be subtracted
Returns: None
"""
offsets = list(map(int, offsets))
table = struct.pack("<f128H", voltage, *offsets)
tx_pos = 0
while tx_pos < len(table):
tx_size = min(64, len(table) - tx_pos)
msg = messages.DataBlobMsg()
msg.blob_id = msg.OFFSET_CALIBRATION_ID
msg.chunk_index = tx_pos
msg.payload_size = tx_size
msg.payload = table[tx_pos:tx_pos+tx_size]
tx_pos += tx_size
listener = self.purpledrop.get_sync_listener(messages.CommandAckMsg)
self.purpledrop.send_message(msg)
ack = listener.wait(timeout=0.5)
if ack is None:
raise TimeoutError("No ACK while setting electrode calibration")
def set_scan_gains(self, gains: Optional[Sequence[bool]]=None):
"""Set the gains used for capacitance scan measurement
If no gains argument is provided, scan gains will be set based on
oversized electrodes defined in the board definition file.
Arguments:
- gains: A list of 128 booleans, true indicating that an electrode
should be scanned with low gain
"""
if gains is not None:
if len(gains) != 128:
raise ValueError("Scan gains must have 128 values")
# Make sure they are all convertible to bool
gains = [bool(x) for x in gains]
self.__set_scan_gains(gains)
def get_scan_gains(self) -> List[bool]:
"""Return the current scan gain settings
"""
return [x == CAPGAIN_LOW for x in self.scan_gains] | 2.953125 | 3 |
seq_to_seq.py | kiseonjeong/tensorflow-v1.x-ml | 1 | 12762780 | import ignore
import tensorflow as tf
import numpy as np
char_arr = [c for c in 'SEPabcdefghijklmnopqrstuvwxyz단어나무놀이소녀키스사랑']
num_dic = {n: i for i, n in enumerate(char_arr)}
dic_len = len(num_dic)
seq_data = [['word', '단어'], ['wood', '나무'],
['game', '놀이'], ['girl', '소녀'],
['kiss', '키스'], ['love', '사랑']]
def make_batch(seq_data):
input_batch = []
output_batch = []
target_batch = []
for seq in seq_data:
input = [num_dic[n] for n in seq[0]]
output = [num_dic[n] for n in ('S' + seq[1])]
target = [num_dic[n] for n in (seq[1] + 'E')]
input_batch.append(np.eye(dic_len)[input])
output_batch.append(np.eye(dic_len)[output])
target_batch.append(target)
return input_batch, output_batch, target_batch
# 옵션 설정
learning_rate = 0.01
n_hidden = 128
total_epoch = 100
n_class = n_input = dic_len
# 신경망 모델 구성
enc_input = tf.placeholder(tf.float32, [None, None, n_input])
dec_input = tf.placeholder(tf.float32, [None, None, n_input])
targets = tf.placeholder(tf.int64, [None, None])
with tf.variable_scope('encode'):
enc_cell = tf.nn.rnn_cell.BasicRNNCell(n_hidden)
enc_cell = tf.nn.rnn_cell.DropoutWrapper(enc_cell, output_keep_prob=0.5)
outputs, enc_states = tf.nn.dynamic_rnn(enc_cell, enc_input, dtype=tf.float32)
with tf.variable_scope('decode'):
dec_cell = tf.nn.rnn_cell.BasicRNNCell(n_hidden)
dec_cell = tf.nn.rnn_cell.DropoutWrapper(dec_cell, output_keep_prob=0.5)
outputs, dec_states = tf.nn.dynamic_rnn(dec_cell, dec_input, initial_state=enc_states, dtype=tf.float32)
model = tf.layers.dense(outputs, n_class, activation=None)
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=model, labels=targets))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# 신경망 모델 학습
sess = tf.Session()
sess.run(tf.global_variables_initializer())
input_batch, output_batch, target_batch = make_batch(seq_data)
for epoch in range(total_epoch):
_, loss = sess.run([optimizer, cost], feed_dict={enc_input: input_batch, dec_input: output_batch, targets: target_batch})
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6}'.format(loss))
print('최적화 완료!')
# 번역 테스트
def translate(word):
seq_data = [word, 'P' * len(word)]
input_batch, output_batch, target_batch = make_batch([seq_data])
prediction = tf.argmax(model, 2)
result = sess.run(prediction, feed_dict={enc_input: input_batch, dec_input: output_batch, targets: target_batch})
decoded = [char_arr[i] for i in result[0]]
end = decoded.index('E')
translated = ''.join(decoded[:end])
return translated
print('\n=== 번역 테스트 ===')
print('word ->', translate('word'))
print('wodr ->', translate('wodr'))
print('love ->', translate('love'))
print('loev ->', translate('loev'))
print('abcd ->', translate('abcd')) | 2.6875 | 3 |
cleandata.py | sunlightlabs/flask-districtoffices | 1 | 12762781 | <reponame>sunlightlabs/flask-districtoffices
from saucebrush import run_recipe
from saucebrush.emitters import DebugEmitter, CSVEmitter
from saucebrush.filters import FieldModifier, FieldRemover, FieldRenamer, Filter
from saucebrush.sources import CSVSource
import json
import os
import re
import requests
PWD = os.path.abspath(os.path.dirname(__file__))
WHITESPACE_RE = re.compile(r"\s+")
class GeocoderFilter(Filter):
ENDPOINT = "http://where.yahooapis.com/geocode"
def __init__(self, appid, field):
super(GeocoderFilter, self).__init__()
self.appid = appid
self.field = field
def geocode(self, address):
params = {
'q': address,
'appid': self.appid,
'flags': 'CJ',
}
resp = requests.get(self.ENDPOINT, params=params)
data = json.loads(resp.content)
try:
result = data['ResultSet']['Results'][0]
return (result['latitude'], result['longitude'])
except KeyError:
pass
except IndexError:
pass
return (None, None)
def process_record(self, record):
ll = self.geocode(record['address'])
record['latitude'] = ll[0]
record['longitude'] = ll[1]
return record
inpath = os.path.join(PWD, 'data', 'directory-original.csv')
outpath = os.path.join(PWD, 'data', 'directory-clean.csv')
run_recipe(
CSVSource(open(inpath, 'r')),
FieldRemover(('First','Middle','Last','District: Fax','Initial ')),
FieldRenamer({
'bioguide_id': 'Bio ID',
'address': 'District: Address',
'phone': 'District: Phone',
}),
FieldModifier(('bioguide_id','address','phone'), lambda s: s.strip()),
FieldModifier('address', lambda s: WHITESPACE_RE.sub(' ', s)),
GeocoderFilter('Kv3.btLV34EuebZGMzi1KaqI_BOPhPjx7FtbvED.umr8DGUq0NysoGN0XIIIDRU-', 'address'),
DebugEmitter(),
CSVEmitter(open(outpath, 'w'), ('bioguide_id','address','latitude','longitude','phone')),
) | 2.046875 | 2 |
cifar.py | Schwartz-Zha/My-invertible-resnet | 0 | 12762782 | <reponame>Schwartz-Zha/My-invertible-resnet
import torch
from PIL import Image
import numpy as np
import os
class CifarSingleDataset(torch.utils.data.Dataset):
def __init__(self, root_dir, transform=None):
self.root_dir = os.path.join(root_dir, 'img_align_celeba')
self.transform = transform
self.img_files = [os.path.join(self.root_dir, f) for f
in os.listdir(self.root_dir) if os.path.isfile(os.path.join(self.root_dir, f))]
def __len__(self):
return len(self.img_files)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
images = Image.open(self.img_files[idx])
if self.transform is not None:
data = self.transform(images)
return data | 2.453125 | 2 |
python/loom_viewer/__init__.py | arao11/pattern_viz | 34 | 12762783 | <reponame>arao11/pattern_viz
from ._version import __version__
import loompy
from loompy import LoomConnection
from .loom_tiles import LoomTiles
from .loom_expand import LoomExpand
from .loom_datasets import LoomDatasets
from .loom_cli import main
| 1.046875 | 1 |
RPN.py | WanMotion/FatserRCNN_Pytorch_Learn | 0 | 12762784 | <filename>RPN.py
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn.functional import softmax
import VGG16Model
from proposalLayer import *
class RPN(nn.Module):
anchor_scales=[8,16,32]
_feat_stride = [16, ]
def __init__(self):
super(RPN, self).__init__()
# 设输入的图像的尺寸为(M,N)
self.features=VGG16Model() # (512,M/16,N/16)
# feature map 后3*3的卷积层
self.conv1=nn.Conv2d(512,512,(3,3),(1,1),padding=1)
self.relu1=nn.ReLU()
# 每个点对应9个anchor,每个anchor两个score
self.score_conv=nn.Conv2d(512,len(self.anchor_scales)*3*2,(1,1)) # 经过此卷积后,矩阵shape为(18,M/16,N/16)
# 每个点对应9个anchor,每个anchor对应4个坐标表示
self.bbox_conv=nn.Conv2d(512,len(self.anchor_scales)*3*4,(1,1)) # 经过此卷积后,矩阵shape为(36,M/16,N/16)
# loss
self.cross_entropy=None
self.loss_box=None
def forward(self,im_data,im_info,gt_boxes=None,gt_ishard=None,doncare_areas=None):
# 先reshape permute(0,3,1,2)
features=self.features(im_data) # 得到feature map
rpn_conv1=self.conv1(features)
rpn_conv1=self.relu1(rpn_conv1)
# rpn score, W=M/16,H=N/16
rpn_cls_score=self.score_conv(rpn_conv1) # (1,18,W,H)
# 计算softmax
rpn_cls_prob=softmax(rpn_cls_score) # (1,18.W,H)
# rpn boxes
rpn_bbox_pre=self.bbox_conv(rpn_conv1) # (1,36,W,H)
cfg_key = 'TRAIN' if self.training else 'TEST'
rois=self.proposal_layer(rpn_cls_prob,rpn_bbox_pre,cfg_key,self._feat_stride,self.anchor_scales)
# 生成训练标签,构建rpn loss
if self.training:
assert gt_boxes is not None
rpn_data
# 返回feature map 和 得到的anchors
return features,rois
@staticmethod
def proposal_layer(rpn_cls_prob,rpn_bbox_pred,im_info,cfg_key,_feat_stride,anchor_scales):
rpn_cls_prob = rpn_cls_prob.data.cpu().numpy()
rpn_bbox_pred = rpn_bbox_pred.data.cpu().numpy()
x=proposal_layer(rpn_cls_prob,rpn_bbox_pred,im_info,cfg_key,_feat_stride,anchor_scales)
x=Variable(x,torch.from_numpy(x),torch.FloatTensor)
x=x.cuda()
return x
@staticmethod
def anchor_target_layer(rpn_cls_score, gt_boxes, gt_ishard, dontcare_areas, im_info, _feat_stride, anchor_scales):
"""
:param rpn_cls_score: (1,A*2,H,W) bg/fg scores
:param gt_boxes: (G,5) [x1,y1,x2,y2,class] 真值
:param gt_ishard:(G,1) 1 or 0 indicates difficult or not
:param dontcare_areas:(D, 4), some areas may contains small objs but no labelling. D may be 0
:param im_info:[高, 宽, 缩放比]
:param _feat_stride: 降采样率
:param anchor_scales:缩放比
:return:
rpn_labels : (1, 1, HxA, W), for each anchor, 0 denotes bg, 1 fg, -1 dontcare
rpn_bbox_targets: (1, 4xA, H, W), distances of the anchors to the gt_boxes(may contains some transform)
that are the regression objectives
rpn_bbox_inside_weights: (1, 4xA, H, W) weights of each boxes, mainly accepts hyper param in cfg
rpn_bbox_outside_weights: (1, 4xA, H, W) used to balance the fg/bg,
beacuse the numbers of bgs and fgs mays significiantly different
"""
rpn_cls_score = rpn_cls_score.data.cpu().numpy()
rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = \
anchor_target_layer_py(rpn_cls_score, gt_boxes, gt_ishard, dontcare_areas, im_info, _feat_stride, anchor_scales)
| 2.28125 | 2 |
year_name_source.py | vijayp/imageanalysis | 19 | 12762785 | <reponame>vijayp/imageanalysis
#!/usr/bin/env python
import os
from collections import defaultdict
import cPickle
def get_movie_map(movie_path):
rv = {}
for (path, dirs, files) in os.walk(movie_path):
for f in files:
if f.endswith('.html') and f != 'index.html':
mname = f[:-5].replace('-', ' ')
mid = os.path.split(path)[-1]
print mid, mname
rv[int(mid)] = mname
return rv
def get_year_map(movie_path):
mid_mname_map = get_movie_map(movie_path)
year_mname_map = defaultdict(set)
for (path, dirs, files) in os.walk(movie_path):
for f in files:
if f.startswith('t_') and f.endswith('.jpg'):
rem,mid = os.path.split(path)
year = os.path.split(rem)[-1]
val = mid_mname_map.get(int(mid), 'UNKNOWN')
print year, val
year_mname_map[year].add(unicode(val,errors='ignore'))
for k,v in year_mname_map.iteritems():
year_mname_map[k] = list(v)
return year_mname_map
if __name__ == '__main__':
import sys
import os
import json
ym = get_year_map(sys.argv[1])
json.dump(ym, open('year_data.json', 'wb'))
| 2.921875 | 3 |
bilayer_lpa.py | Ilseng/bilayer-instability | 2 | 12762786 | <filename>bilayer_lpa.py
# Functions for bi-layer linear perturbation analysis
import numpy as np
from scipy.optimize import brentq
def s22_func(l2, Nv, chi, mu):
s22 = np.log(1-1/l2) + 1/l2 + chi/l2**2 + Nv*(l2-1/l2) - mu
return s22
def matrix_zero(mu, wH, Nv1, n, chi, eta):
Nv2 = n*Nv1 # Name bottom layer as 2
l1 = brentq(s22_func, 1+1e-10, 1000, args=(Nv1, chi, mu))
l2 = brentq(s22_func, 1+1e-10, 1000, args=(Nv2, chi, mu))
xhi1 = 1/l1 + (1/Nv1)*(1/(l1-1)-(1/l1)-(2*chi)/(l1**2))
xhi2 = 1/l2 + (1/Nv2)*(1/(l2-1)-(1/l2)-(2*chi)/(l2**2))
b1 = np.sqrt((1+l1*xhi1)/(l1**2+l1*xhi1))
b2 = np.sqrt((1+l2*xhi2)/(l2**2+l2*xhi2))
wh1 = wH*(1-eta)*l1
wh = wH*(l1*(1-eta)+l2*eta)
D = np.zeros((8,8))
D[0,0] = 1; D[0,1] = 1; D[0,2] = 1; D[0, 3] = 1
D[1,0] = l1; D[1,1] = -l1; D[1,2] = b1; D[1,3] = -b1
D[2,0] = np.exp(wh1/l1); D[2,1] = np.exp(-wh1/l1);
D[2,2] = np.exp(b1*wh1); D[2,3] = np.exp(-b1*wh1);
D[2,4] = -np.exp(wh1/l2); D[2,5] = -np.exp(-wh1/l2);
D[2,6] = -np.exp(b2*wh1); D[2,7] = -np.exp(-b2*wh1);
D[3,0] = l1*np.exp(wh1/l1); D[3,1] = -l1*np.exp(-wh1/l1);
D[3,2] = b1*np.exp(b1*wh1); D[3,3] = -b1*np.exp(-b1*wh1);
D[3,4] = -l2*np.exp(wh1/l2); D[3,5] = l2*np.exp(-wh1/l2);
D[3,6] = -b2*np.exp(b2*wh1); D[3,7] = b2*np.exp(-b2*wh1);
D[4,0] = 2*Nv1*l1*np.exp(wh1/l1); D[4,1] = 2*Nv1*l1*np.exp(-wh1/l1);
D[4,2] = Nv1*(l1+1/l1)*np.exp(b1*wh1); D[4,3] = Nv1*(l1+1/l1)*np.exp(-b1*wh1);
D[4,4] = -2*Nv2*l2*np.exp(wh1/l2); D[4,5] = -2*Nv2*l2*np.exp(-wh1/l2);
D[4,6] = -Nv2*(l2+1/l2)*np.exp(b2*wh1); D[4,7] = -Nv2*(l2+1/l2)*np.exp(-b2*wh1);
D[5,0] = Nv1*(l1**2+1)*np.exp(wh1/l1); D[5,1] = -Nv1*(l1**2+1)*np.exp(-wh1/l1);
D[5,2] = 2*Nv1*l1*b1*np.exp(b1*wh1); D[5,3] = -2*Nv1*l1*b1*np.exp(-b1*wh1);
D[5,4] = -Nv2*(l2**2+1)*np.exp(wh1/l2); D[5,5] = Nv2*(l2**2+1)*np.exp(-wh1/l2);
D[5,6] = -2*Nv2*l2*b2*np.exp(b2*wh1); D[5,7] = 2*Nv2*l2*b2*np.exp(-b2*wh1);
D[6,4] = 2*l2*np.exp(wh/l2); D[6,5] = 2*l2*np.exp(-wh/l2);
D[6,6] = (l2+1/l2)*np.exp(b2*wh); D[6,7] = (l2+1/l2)*np.exp(-b2*wh);
D[7,4] = (l2+1/l2)*np.exp(wh/l2); D[7,5] = -(l2+1/l2)*np.exp(-wh/l2);
D[7,6] = 2*b2*np.exp(b2*wh); D[7,7] = -2*b2*np.exp(-b2*wh);
logdet = np.linalg.slogdet(D)
return logdet[0]*np.exp(logdet[1])
| 2.3125 | 2 |
accountancy/testing/test_integration/test_helpers.py | rossm6/accounts | 11 | 12762787 | from datetime import date, datetime, timedelta
from accountancy.helpers import AuditTransaction, get_all_historical_changes
from cashbook.models import CashBook
from contacts.models import Contact
from controls.models import FinancialYear, Period
from django.test import TestCase
from nominals.models import Nominal
from purchases.models import (PurchaseHeader, PurchaseLine, PurchaseMatching,
Supplier)
from vat.models import Vat
DATE_INPUT_FORMAT = '%d-%m-%Y'
MODEL_DATE_INPUT_FORMAT = '%Y-%m-%d'
class GetAllHistoricalChangesTest(TestCase):
def test_create_only(self):
contact = Contact.objects.create(code="1", name="11", email="111")
historical_records = Contact.history.all().order_by("pk")
self.assertEqual(
len(historical_records),
1
)
changes = get_all_historical_changes(historical_records)
self.assertEqual(
len(changes),
1
)
creation_change = changes[0]
self.assertEqual(
creation_change["id"]["old"],
""
)
self.assertEqual(
creation_change["id"]["new"],
str(contact.id)
)
self.assertEqual(
creation_change["code"]["old"],
""
)
self.assertEqual(
creation_change["code"]["new"],
"1"
)
self.assertEqual(
creation_change["name"]["old"],
""
)
self.assertEqual(
creation_change["name"]["new"],
"11"
)
self.assertEqual(
creation_change["meta"]["AUDIT_action"],
"Create"
)
def test_create_and_update(self):
contact = Contact.objects.create(code="1", name="11", email="111")
contact.name = "12"
contact.save()
historical_records = Contact.history.all().order_by("pk")
self.assertEqual(
len(historical_records),
2
)
changes = get_all_historical_changes(historical_records)
self.assertEqual(
len(changes),
2
)
creation_change = changes[0]
update_change = changes[1]
self.assertEqual(
creation_change["id"]["old"],
""
)
self.assertEqual(
creation_change["id"]["new"],
str(contact.id)
)
self.assertEqual(
creation_change["code"]["old"],
""
)
self.assertEqual(
creation_change["code"]["new"],
"1"
)
self.assertEqual(
creation_change["name"]["old"],
""
)
self.assertEqual(
creation_change["name"]["new"],
"11"
)
self.assertEqual(
creation_change["meta"]["AUDIT_action"],
"Create"
)
self.assertEqual(
update_change["name"]["old"],
"11"
)
self.assertEqual(
update_change["name"]["new"],
"12"
)
self.assertEqual(
update_change["meta"]["AUDIT_action"],
"Update"
)
def test_create_and_update_and_delete(self):
contact = Contact.objects.create(code="1", name="11", email="111")
contact_dict = contact.__dict__.copy()
contact.name = "12"
contact.save()
contact.delete()
historical_records = Contact.history.all().order_by("pk")
self.assertEqual(
len(historical_records),
3
)
changes = get_all_historical_changes(historical_records)
self.assertEqual(
len(changes),
3
)
creation_change = changes[0]
update_change = changes[1]
deleted_change = changes[2]
self.assertEqual(
creation_change["id"]["old"],
""
)
self.assertEqual(
creation_change["id"]["new"],
str(contact_dict["id"])
)
self.assertEqual(
creation_change["code"]["old"],
""
)
self.assertEqual(
creation_change["code"]["new"],
"1"
)
self.assertEqual(
creation_change["name"]["old"],
""
)
self.assertEqual(
creation_change["name"]["new"],
"11"
)
self.assertEqual(
creation_change["meta"]["AUDIT_action"],
"Create"
)
self.assertEqual(
update_change["name"]["old"],
"11"
)
self.assertEqual(
update_change["name"]["new"],
"12"
)
self.assertEqual(
update_change["meta"]["AUDIT_action"],
"Update"
)
self.assertEqual(
deleted_change["id"]["old"],
str(contact_dict["id"])
)
self.assertEqual(
deleted_change["id"]["new"],
""
)
self.assertEqual(
deleted_change["code"]["old"],
contact_dict["code"]
)
self.assertEqual(
deleted_change["code"]["new"],
""
)
self.assertEqual(
deleted_change["name"]["old"],
"12"
)
self.assertEqual(
deleted_change["name"]["new"],
""
)
self.assertEqual(
deleted_change["email"]["old"],
contact_dict["email"]
)
self.assertEqual(
deleted_change["email"]["new"],
""
)
self.assertEqual(
deleted_change["customer"]["old"],
str(contact_dict["customer"])
)
self.assertEqual(
deleted_change["customer"]["new"],
""
)
self.assertEqual(
deleted_change["supplier"]["old"],
str(contact_dict["supplier"])
)
self.assertEqual(
deleted_change["supplier"]["new"],
""
)
self.assertEqual(
deleted_change["meta"]["AUDIT_action"],
"Delete"
)
class AuditTransactionTest(TestCase):
"""
Test with PL header, line, matching
"""
@classmethod
def setUpTestData(cls):
cls.date = datetime.now().strftime(DATE_INPUT_FORMAT)
cls.due_date = (datetime.now() + timedelta(days=31)
).strftime(DATE_INPUT_FORMAT)
cls.model_date = datetime.now().strftime(MODEL_DATE_INPUT_FORMAT)
cls.model_due_date = (datetime.now() + timedelta(days=31)
).strftime(MODEL_DATE_INPUT_FORMAT)
fy = FinancialYear.objects.create(financial_year=2020)
cls.fy = fy
cls.period = Period.objects.create(
fy=fy, period="01", fy_and_period="202001", month_start=date(2020, 1, 31))
def test_no_lines(self):
cash_book = CashBook.objects.create(
nominal=None,
name="current"
)
supplier = Supplier.objects.create(
code="1",
name="2",
email="3"
)
h = PurchaseHeader.objects.create(
type="pp", # payment
date=date.today(),
goods=120,
vat=0,
total=120,
ref="123",
cash_book=cash_book,
supplier=supplier,
paid=0,
due=0,
period=self.period
)
self.assertEqual(
len(PurchaseHeader.history.all()),
1
)
h.ref = "1234" # update the header
h.save()
h.refresh_from_db()
self.assertEqual(
len(PurchaseHeader.history.all()),
2
)
audit_transaction = AuditTransaction(
h, PurchaseHeader, PurchaseLine, PurchaseMatching)
self.assertEqual(
len(audit_transaction.audit_header_history),
2
)
self.assertEqual(
len(audit_transaction.audit_lines_history),
0
)
self.assertEqual(
len(audit_transaction.audit_matches_history),
0
)
all_changes = audit_transaction.get_historical_changes()
self.assertEqual(
len(all_changes),
2
)
self.assertTrue(
all_changes[0]["meta"]["AUDIT_date"] < all_changes[1]["meta"]["AUDIT_date"]
)
create = all_changes[0]
self.assertEqual(
create["id"]["old"],
"",
)
self.assertEqual(
create["id"]["new"],
str(h.id),
)
self.assertEqual(
create["ref"]["old"],
"",
)
self.assertEqual(
create["ref"]["new"],
"123",
)
self.assertEqual(
create["goods"]["old"],
"",
)
self.assertEqual(
create["goods"]["new"],
str(h.goods * -1), # payment ui value is positive
)
self.assertEqual(
create["vat"]["old"],
"",
)
self.assertEqual(
create["vat"]["new"],
str(h.vat),
)
self.assertEqual(
create["total"]["old"],
"",
)
self.assertEqual(
create["total"]["new"],
str(h.total * -1), # payment ui value is positive
)
self.assertEqual(
create["paid"]["old"],
"",
)
self.assertEqual(
create["paid"]["new"],
str(h.paid),
)
self.assertEqual(
create["due"]["old"],
"",
)
self.assertEqual(
create["due"]["new"],
str(h.due),
)
self.assertEqual(
create["date"]["old"],
"",
)
self.assertEqual(
create["date"]["new"],
str(h.date),
)
self.assertEqual(
create["due_date"]["old"],
"",
)
self.assertEqual(
create["due_date"]["new"],
str(h.due_date),
)
self.assertEqual(
create["period_id"]["old"],
"",
)
self.assertEqual(
create["period_id"]["new"],
str(self.period.pk),
)
self.assertEqual(
create["status"]["old"],
"",
)
self.assertEqual(
create["status"]["new"],
str(h.status),
)
self.assertEqual(
create["type"]["old"],
"",
)
self.assertEqual(
create["type"]["new"],
str(h.type),
)
self.assertEqual(
create["cash_book_id"]["old"],
"",
)
self.assertEqual(
create["cash_book_id"]["new"],
str(h.cash_book_id),
)
self.assertEqual(
create["supplier_id"]["old"],
"",
)
self.assertEqual(
create["supplier_id"]["new"],
str(h.supplier_id),
)
self.assertEqual(
create["meta"]["AUDIT_action"],
"Create"
)
self.assertEqual(
create["meta"]["transaction_aspect"],
"header"
)
update = all_changes[1]
self.assertEqual(
update["ref"]["old"],
"123",
)
self.assertEqual(
update["ref"]["new"],
h.ref,
)
self.assertEqual(
update["meta"]["AUDIT_action"],
"Update"
)
self.assertEqual(
update["meta"]["transaction_aspect"],
"header"
)
def test_lines(self):
# same as above except for change a line
# above has no lines
cash_book = CashBook.objects.create(
nominal=None,
name="current"
)
supplier = Supplier.objects.create(
code="1",
name="2",
email="3"
)
h = PurchaseHeader.objects.create(
type="pi", # payment
date=date.today(),
goods=100,
vat=20,
total=120,
ref="123",
cash_book=cash_book,
supplier=supplier,
paid=0,
due=0,
period=self.period
)
nominal = Nominal.objects.create(
name="something",
parent=None
)
vat_code = Vat.objects.create(
code="1",
name="2",
rate=20
)
l = PurchaseLine.objects.create(
nominal=nominal,
goods=100,
vat=20,
vat_code=vat_code,
description="123",
line_no=1,
header=h
)
self.assertEqual(
len(PurchaseHeader.history.all()),
1
)
h.ref = "1234" # update the header
h.save()
h.refresh_from_db()
l.description = "12345"
l.save()
l.refresh_from_db()
self.assertEqual(
len(PurchaseHeader.history.all()),
2
)
audit_transaction = AuditTransaction(
h, PurchaseHeader, PurchaseLine, PurchaseMatching)
self.assertEqual(
len(audit_transaction.audit_header_history),
2
)
self.assertEqual(
len(audit_transaction.audit_lines_history),
2
)
self.assertEqual(
len(audit_transaction.audit_matches_history),
0
)
all_changes = audit_transaction.get_historical_changes()
self.assertEqual(
len(all_changes),
4
)
self.assertTrue(
all_changes[0]["meta"]["AUDIT_date"] < all_changes[1]["meta"]["AUDIT_date"]
)
self.assertTrue(
all_changes[1]["meta"]["AUDIT_date"] < all_changes[2]["meta"]["AUDIT_date"]
)
self.assertTrue(
all_changes[2]["meta"]["AUDIT_date"] < all_changes[3]["meta"]["AUDIT_date"]
)
create = all_changes[0]
self.assertEqual(
create["id"]["old"],
"",
)
self.assertEqual(
create["id"]["new"],
str(h.id),
)
self.assertEqual(
create["ref"]["old"],
"",
)
self.assertEqual(
create["ref"]["new"],
"123",
)
self.assertEqual(
create["goods"]["old"],
"",
)
self.assertEqual(
create["goods"]["new"],
str(h.goods),
)
self.assertEqual(
create["vat"]["old"],
"",
)
self.assertEqual(
create["vat"]["new"],
str(h.vat),
)
self.assertEqual(
create["total"]["old"],
"",
)
self.assertEqual(
create["total"]["new"],
str(h.total),
)
self.assertEqual(
create["paid"]["old"],
"",
)
self.assertEqual(
create["paid"]["new"],
str(h.paid),
)
self.assertEqual(
create["due"]["old"],
"",
)
self.assertEqual(
create["due"]["new"],
str(h.due),
)
self.assertEqual(
create["date"]["old"],
"",
)
self.assertEqual(
create["date"]["new"],
str(h.date),
)
self.assertEqual(
create["due_date"]["old"],
"",
)
self.assertEqual(
create["due_date"]["new"],
str(h.due_date),
)
self.assertEqual(
create["period_id"]["old"],
"",
)
self.assertEqual(
create["period_id"]["new"],
str(self.period.pk),
)
self.assertEqual(
create["status"]["old"],
"",
)
self.assertEqual(
create["status"]["new"],
str(h.status),
)
self.assertEqual(
create["type"]["old"],
"",
)
self.assertEqual(
create["type"]["new"],
str(h.type),
)
self.assertEqual(
create["cash_book_id"]["old"],
"",
)
self.assertEqual(
create["cash_book_id"]["new"],
str(h.cash_book_id),
)
self.assertEqual(
create["supplier_id"]["old"],
"",
)
self.assertEqual(
create["supplier_id"]["new"],
str(h.supplier_id),
)
self.assertEqual(
create["meta"]["AUDIT_action"],
"Create"
)
self.assertEqual(
create["meta"]["transaction_aspect"],
"header"
)
update = all_changes[2]
self.assertEqual(
update["ref"]["old"],
"123",
)
self.assertEqual(
update["ref"]["new"],
h.ref,
)
self.assertEqual(
update["meta"]["AUDIT_action"],
"Update"
)
self.assertEqual(
update["meta"]["transaction_aspect"],
"header"
)
# now for the line change
create = all_changes[1]
self.assertEqual(
create["id"]["old"],
"",
)
self.assertEqual(
create["id"]["new"],
str(l.id),
)
self.assertEqual(
create["description"]["old"],
"",
)
self.assertEqual(
create["description"]["new"],
"123",
)
self.assertEqual(
create["goods"]["old"],
""
)
self.assertEqual(
create["goods"]["new"],
str(l.goods),
)
self.assertEqual(
create["vat"]["old"],
"",
)
self.assertEqual(
create["vat"]["new"],
str(l.vat),
)
self.assertEqual(
create["line_no"]["old"],
"",
)
self.assertEqual(
create["line_no"]["new"],
str(l.line_no),
)
self.assertEqual(
create["nominal_id"]["old"],
"",
)
self.assertEqual(
create["nominal_id"]["new"],
str(l.nominal.pk),
)
self.assertEqual(
create["vat_code_id"]["old"],
"",
)
self.assertEqual(
create["vat_code_id"]["new"],
str(l.vat_code.pk),
)
self.assertEqual(
create["header_id"]["old"],
"",
)
self.assertEqual(
create["header_id"]["new"],
str(l.header.pk),
)
self.assertEqual(
create["meta"]["AUDIT_action"],
"Create"
)
self.assertEqual(
create["meta"]["transaction_aspect"],
"line"
)
update = all_changes[3]
self.assertEqual(
update["description"]["old"],
"123",
)
self.assertEqual(
update["description"]["new"],
l.description,
)
self.assertEqual(
update["meta"]["AUDIT_action"],
"Update"
)
self.assertEqual(
update["meta"]["transaction_aspect"],
"line"
)
def test_matching(self):
# same as above except for change a line
# above has no lines
cash_book = CashBook.objects.create(
nominal=None,
name="current"
)
supplier = Supplier.objects.create(
code="1",
name="2",
email="3"
)
to_match_against = PurchaseHeader.objects.create(
type="pi", # payment
date=date.today(),
goods=-100,
vat=-20,
total=-120,
ref="123",
cash_book=cash_book,
supplier=supplier,
paid=0,
due=0,
period=self.period
)
h = PurchaseHeader.objects.create(
type="pi", # payment
date=date.today(),
goods=100,
vat=20,
total=120,
ref="123",
cash_book=cash_book,
supplier=supplier,
paid=0,
due=0,
period=self.period
)
nominal = Nominal.objects.create(
name="something",
parent=None
)
vat_code = Vat.objects.create(
code="1",
name="2",
rate=20
)
l = PurchaseLine.objects.create(
nominal=nominal,
goods=100,
vat=20,
vat_code=vat_code,
description="123",
line_no=1,
header=h
)
match = PurchaseMatching.objects.create(
matched_by=h,
matched_to=to_match_against,
period=self.period,
value=-100
)
self.assertEqual(
len(PurchaseHeader.history.all()),
2
)
self.assertEqual(
len(PurchaseMatching.history.all()),
1
)
h.ref = "1234" # update the header
h.save()
h.refresh_from_db()
l.description = "12345"
l.save()
l.refresh_from_db()
match.value = -120
match.save()
match.refresh_from_db()
audit_transaction = AuditTransaction(
h, PurchaseHeader, PurchaseLine, PurchaseMatching)
self.assertEqual(
len(audit_transaction.audit_header_history),
2
)
self.assertEqual(
len(audit_transaction.audit_lines_history),
2
)
self.assertEqual(
len(audit_transaction.audit_matches_history),
2
)
all_changes = audit_transaction.get_historical_changes()
self.assertEqual(
len(all_changes),
6
)
self.assertTrue(
all_changes[0]["meta"]["AUDIT_date"] <= all_changes[1]["meta"]["AUDIT_date"]
)
self.assertTrue(
all_changes[1]["meta"]["AUDIT_date"] <= all_changes[2]["meta"]["AUDIT_date"]
)
self.assertTrue(
all_changes[2]["meta"]["AUDIT_date"] <= all_changes[3]["meta"]["AUDIT_date"]
)
self.assertTrue(
all_changes[3]["meta"]["AUDIT_date"] <= all_changes[4]["meta"]["AUDIT_date"]
)
self.assertTrue(
all_changes[4]["meta"]["AUDIT_date"] <= all_changes[5]["meta"]["AUDIT_date"]
)
create = all_changes[0]
self.assertEqual(
create["id"]["old"],
"",
)
self.assertEqual(
create["id"]["new"],
str(h.id),
)
self.assertEqual(
create["ref"]["old"],
"",
)
self.assertEqual(
create["ref"]["new"],
"123",
)
self.assertEqual(
create["goods"]["old"],
"",
)
self.assertEqual(
create["goods"]["new"],
str(h.goods),
)
self.assertEqual(
create["vat"]["old"],
"",
)
self.assertEqual(
create["vat"]["new"],
str(h.vat),
)
self.assertEqual(
create["total"]["old"],
"",
)
self.assertEqual(
create["total"]["new"],
str(h.total),
)
self.assertEqual(
create["paid"]["old"],
"",
)
self.assertEqual(
create["paid"]["new"],
str(h.paid),
)
self.assertEqual(
create["due"]["old"],
"",
)
self.assertEqual(
create["due"]["new"],
str(h.due),
)
self.assertEqual(
create["date"]["old"],
"",
)
self.assertEqual(
create["date"]["new"],
str(h.date),
)
self.assertEqual(
create["due_date"]["old"],
"",
)
self.assertEqual(
create["due_date"]["new"],
str(h.due_date),
)
self.assertEqual(
create["period_id"]["old"],
"",
)
self.assertEqual(
create["period_id"]["new"],
str(self.period.pk),
)
self.assertEqual(
create["status"]["old"],
"",
)
self.assertEqual(
create["status"]["new"],
str(h.status),
)
self.assertEqual(
create["type"]["old"],
"",
)
self.assertEqual(
create["type"]["new"],
str(h.type),
)
self.assertEqual(
create["cash_book_id"]["old"],
"",
)
self.assertEqual(
create["cash_book_id"]["new"],
str(h.cash_book_id),
)
self.assertEqual(
create["supplier_id"]["old"],
"",
)
self.assertEqual(
create["supplier_id"]["new"],
str(h.supplier_id),
)
self.assertEqual(
create["meta"]["AUDIT_action"],
"Create"
)
self.assertEqual(
create["meta"]["transaction_aspect"],
"header"
)
update = all_changes[3]
self.assertEqual(
update["ref"]["old"],
"123",
)
self.assertEqual(
update["ref"]["new"],
h.ref,
)
self.assertEqual(
update["meta"]["AUDIT_action"],
"Update"
)
self.assertEqual(
update["meta"]["transaction_aspect"],
"header"
)
# now for the line change
create = all_changes[1]
self.assertEqual(
create["id"]["old"],
"",
)
self.assertEqual(
create["id"]["new"],
str(l.id),
)
self.assertEqual(
create["description"]["old"],
"",
)
self.assertEqual(
create["description"]["new"],
"123",
)
self.assertEqual(
create["goods"]["old"],
""
)
self.assertEqual(
create["goods"]["new"],
str(l.goods),
)
self.assertEqual(
create["vat"]["old"],
"",
)
self.assertEqual(
create["vat"]["new"],
str(l.vat),
)
self.assertEqual(
create["line_no"]["old"],
"",
)
self.assertEqual(
create["line_no"]["new"],
str(l.line_no),
)
self.assertEqual(
create["nominal_id"]["old"],
"",
)
self.assertEqual(
create["nominal_id"]["new"],
str(l.nominal.pk),
)
self.assertEqual(
create["vat_code_id"]["old"],
"",
)
self.assertEqual(
create["vat_code_id"]["new"],
str(l.vat_code.pk),
)
self.assertEqual(
create["header_id"]["old"],
"",
)
self.assertEqual(
create["header_id"]["new"],
str(l.header.pk),
)
self.assertEqual(
create["meta"]["AUDIT_action"],
"Create"
)
self.assertEqual(
create["meta"]["transaction_aspect"],
"line"
)
update = all_changes[4]
self.assertEqual(
update["description"]["old"],
"123",
)
self.assertEqual(
update["description"]["new"],
l.description,
)
self.assertEqual(
update["meta"]["AUDIT_action"],
"Update"
)
self.assertEqual(
update["meta"]["transaction_aspect"],
"line"
)
create = all_changes[2]
self.assertEqual(
create["matched_by_id"]["old"],
"",
)
self.assertEqual(
create["matched_by_id"]["new"],
str(match.matched_by_id),
)
self.assertEqual(
create["matched_to_id"]["old"],
"",
)
self.assertEqual(
create["matched_to_id"]["new"],
str(match.matched_to_id),
)
self.assertEqual(
create["value"]["old"],
"",
)
self.assertEqual(
create["value"]["new"],
"-100.00",
)
self.assertEqual(
create["period_id"]["old"],
"",
)
self.assertEqual(
create["period_id"]["new"],
str(self.period.pk),
)
self.assertEqual(
create["meta"]["AUDIT_action"],
"Create"
)
self.assertEqual(
create["meta"]["transaction_aspect"],
"match"
)
update = all_changes[5]
self.assertEqual(
update["value"]["old"],
"-100.00"
)
self.assertEqual(
update["value"]["new"],
"-120.00"
)
self.assertEqual(
update["meta"]["AUDIT_action"],
"Update"
)
self.assertEqual(
update["meta"]["transaction_aspect"],
"match"
)
| 2.203125 | 2 |
ext/hal/nxp/mcux/scripts/import_mcux_sdk.py | lemrey/zephyr | 4 | 12762788 | #!/usr/bin/env python3
#
# Copyright (c) 2018, NXP
#
# SPDX-License-Identifier: Apache-2.0
"""Import files from an NXP MCUXpresso SDK archive into Zephyr
The MCUXpresso SDK provides device header files and peripheral drivers for NXP
Kinetis, LPC, and i.MX SoCs. Zephyr drivers for these SoCs are shims that adapt
MCUXpresso SDK APIs to Zephyr APIs.
This script automates updating Zephyr to a newer version of the MCUXpresso SDK.
"""
import argparse
import os
import re
import shutil
import sys
import tempfile
if "ZEPHYR_BASE" not in os.environ:
sys.stderr.write("$ZEPHYR_BASE environment variable undefined.\n")
exit(1)
ZEPHYR_BASE = os.environ["ZEPHYR_BASE"]
def get_soc_family(device):
if device.startswith('MK'):
return 'kinetis'
elif device.startswith('LPC'):
return 'lpc'
elif device.startswith('MIMX'):
return 'imx'
def get_files(src, pattern):
matches = []
nonmatches = []
if os.path.exists(src):
for filename in os.listdir(src):
path = os.path.join(src, filename)
if re.search(pattern, filename):
matches.append(path)
else:
nonmatches.append(path)
return [matches, nonmatches]
def copy_files(files, dst):
if not files:
return
os.makedirs(dst, exist_ok=True)
for f in files:
shutil.copy2(f, dst)
def import_sdk(directory):
devices = os.listdir(os.path.join(directory, 'devices'))
boards = os.listdir(os.path.join(directory, 'boards'))
for device in devices:
family = get_soc_family(device)
shared_dst = os.path.join(ZEPHYR_BASE, 'ext/hal/nxp/mcux/drivers', family)
device_dst = os.path.join(ZEPHYR_BASE, 'ext/hal/nxp/mcux/devices', device)
device_src = os.path.join(directory, 'devices', device)
device_pattern = "|".join([device, 'fsl_device_registers'])
[device_headers, ignore] = get_files(device_src, device_pattern)
drivers_src = os.path.join(directory, 'devices', device, 'drivers')
drivers_pattern = "fsl_clock|fsl_iomuxc"
[device_drivers, shared_drivers] = get_files(drivers_src, drivers_pattern)
xip_boot_src = os.path.join(directory, 'devices', device, 'xip')
xip_boot_pattern = ".*"
[xip_boot, ignore] = get_files(xip_boot_src, xip_boot_pattern)
print('Importing {} device headers to {}'.format(device, device_dst))
copy_files(device_headers, device_dst)
print('Importing {} device-specific drivers to {}'.format(device, device_dst))
copy_files(device_drivers, device_dst)
print('Importing {} family shared drivers to {}'.format(family, shared_dst))
copy_files(shared_drivers, shared_dst)
print('Importing {} xip boot to {}'.format(device, shared_dst))
copy_files(xip_boot, shared_dst)
for board in boards:
board_src = os.path.join(directory, 'boards', board)
board_dst = os.path.join(ZEPHYR_BASE, 'ext/hal/nxp/mcux/boards', board)
xip_config_src = os.path.join(board_src, 'xip')
xip_config_pattern = ".*"
[xip_config, ignore] = get_files(xip_config_src, xip_config_pattern)
print('Importing {} xip config to {}'.format(board, board_dst))
copy_files(xip_config, board_dst)
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-f", "--file", required=True,
help="MCUXpresso SDK archive file to import from")
args = parser.parse_args()
with tempfile.TemporaryDirectory() as d:
print('Extracting MCUXpresso SDK into temporary directory {}'.format(d))
shutil.unpack_archive(args.file, d)
import_sdk(d)
def main():
parse_args()
if __name__ == "__main__":
main()
| 2.4375 | 2 |
tests/test_game.py | cezhunter/tonto_card_game | 0 | 12762789 | # Copyright (c) 2021 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from random import Random
import pytest
from tonto.card import Card
from tonto.deck import Deck
from tonto.exception import GameError
from tonto.game import Game
MESSAGE = {
"WELCOME": ["Welcome."],
"ROUND START": ["Round $current_round."],
"ROUND END": ["$round_winner won the round."],
"ROUND END TIE": ["Round was a tie."],
"TURN START": ["$current_player_name turn."],
"FALSE TURN END": [
("Negative, $current_player_name drew $current_card "
"bringing score to $current_player_score.")
],
"TRUE TURN END": [
("Positive, $current_player_name drew $current_card "
"bringing score to $current_player_score.")
],
"GAME OVER TIE": ["Game was a tie."],
"GAME OVER": ["Game over, $game_winner won."],
"EMPTY DECK": ["Deck empty."],
}
GAME_1_RESULTS = ("Welcome.\nRound 1.\nBerkelly turn.\n"
"Positive, Berkelly drew 10 of Spades "
"bringing score to 10.\nBerkelly won the round.\n"
"Game over, Berkelly won.\n1: Berkelly (10)\n")
GAME_2_RESULTS = ("Welcome.\nRound 1.\nBerkelly turn.\n"
"Positive, Berkelly drew King of Spades bringing "
"score to 13.\nBerkelly won the round.\nGame over, "
"Berkelly won.\n1: Berkelly (13)\n")
GAME_3_RESULTS = ("Welcome.\nRound 1.\nBerkelly turn.\n"
"Positive, Berkelly drew 9 of Clubs bringing score to 36.\n"
"Berkelly won the round.\nRound 2.\nBerkelly turn.\nDeck "
"empty.\nPositive, Berkelly drew 10 of Spades bringing "
"score to 46.\nBerkelly won the round.\nGame over, "
"Berkelly won.\n1: Berkelly (46)\n")
GAME_4_RESULTS = ("Welcome.\nRound 1.\nBerkelly turn.\nPositive, Berkelly "
"drew 10 of Spades bringing score to 10.\nCez turn."
"\nPositive, Cez drew Queen of Hearts bringing score to 36."
"\nTonto turn.\nPositive, Tonto drew Jack of Clubs "
"bringing score to 44.\nTonto won the round.\nRound 2.\n"
"Berkelly turn.\nNegative, Berkelly drew 6 of Spades "
"bringing score to 16.\nCez turn.\nPositive, Cez drew 5 "
"of Diamonds bringing score to 46.\nTonto turn.\nPositive, "
"Tonto drew 9 of Spades bringing score to 53.\nCez won the "
"round.\nRound 3.\nBerkelly turn.\nNegative, Berkelly drew "
"7 of Hearts bringing score to 37.\nCez turn.\nPositive, "
"Cez drew 4 of Hearts bringing score to 58.\nTonto turn."
"\nPositive, Tonto drew 6 of Hearts bringing score to 71."
"\nBerkelly won the round.\nGame over, Tonto won.\n1: "
"Tonto (71)\n2: Cez (58)\n3: Berkelly (37)\n")
GAME_5_RESULTS = ("Welcome.\nRound 1.\nBerkelly turn.\nPositive, Berkelly drew"
" 10 of Spades bringing score to 10.\nCez turn.\nNegative, "
"Cez drew 3 of Diamonds bringing score to 6.\nTonto turn."
"\nPositive, Tonto drew 5 of Diamonds bringing score to 10."
"\nRound was a tie.\nRound 2.\nBerkelly turn.\nPositive, "
"Berkelly drew King of Spades bringing score to 23.\nCez "
"turn.\nPositive, Cez drew Ace of Clubs bringing score to "
"62.\nTonto turn.\nNegative, Tonto drew 9 of Clubs "
"bringing score to 46.\nCez won the round.\nRound 3."
"\nBerkelly turn.\nNegative, Berkelly drew 3 of Diamonds "
"bringing score to 29.\nCez turn.\nPositive, Cez drew 8 "
"of Spades bringing score to 70.\nTonto turn.\nPositive, "
"Tonto drew 6 of Clubs bringing score to 70.\nTonto won "
"the round.\nGame was a tie.\n1: Cez (70)\n1: Tonto (70)"
"\n2: Berkelly (29)\n")
PLAYERS_1 = ["Berkelly"]
PLAYERS_2 = ["Berkelly", "Cez", "Tonto"]
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, Tonto's Card Game"
__credits__ = ["<NAME>", "<NAME>"]
__email__ = "<EMAIL>"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__status__ = "Production"
__version__ = "1.0.0"
def test_basic_game_1(monkeypatch):
monkeypatch.setattr("builtins.input", lambda x: "")
with pytest.raises(GameError):
Game([])
with pytest.raises(GameError):
Game(PLAYERS_1, max_rounds=0)
game = Game(PLAYERS_1)
assert game
game.play()
assert not game
game.new_game()
assert game
def test_basic_game_2(capsys, monkeypatch):
monkeypatch.setattr("builtins.input", lambda x: "")
rand = Random()
rand.seed(1)
deck = Deck(random_instance=rand)
deck.shuffle()
game = Game(PLAYERS_1, deck=deck, message=MESSAGE, max_rounds=1)
assert game
game.play()
assert not game
captured = capsys.readouterr()
assert captured.out == GAME_1_RESULTS
game.new_game()
assert game
game.play()
assert not game
captured = capsys.readouterr()
assert captured.out == GAME_2_RESULTS
def test__basic_game_3(capsys, monkeypatch):
monkeypatch.setattr("builtins.input", lambda x: "")
rand = Random()
rand.seed(1)
deck = Deck(empty=True, random_instance=rand)
deck.add_card(Card("Clubs", "9"))
deck.shuffle()
game = Game(PLAYERS_1, deck=deck, message=MESSAGE, max_rounds=2)
game.play()
captured = capsys.readouterr()
assert captured.out == GAME_3_RESULTS
def test_basic_game_4(capsys, monkeypatch):
monkeypatch.setattr("builtins.input", lambda x: "")
rand = Random()
rand.seed(1)
deck = Deck(random_instance=rand)
deck.shuffle()
game = Game(PLAYERS_2, deck=deck, message=MESSAGE)
game.play()
captured = capsys.readouterr()
assert captured.out == GAME_4_RESULTS
def test_basic_game_5(capsys, monkeypatch):
monkeypatch.setattr("builtins.input", lambda x: "")
cards = [
Card("Clubs", "6"),
Card("Spades", "8"),
Card("Diamonds", "3"),
Card("Clubs", "9"),
Card("Clubs", "Ace"),
Card("Spades", "King"),
Card("Diamonds", "5"),
Card("Diamonds", "3"),
Card("Spades", "10"),
]
deck = Deck(empty=True)
for card in cards:
deck.add_card(card)
game = Game(PLAYERS_2, deck=deck, message=MESSAGE)
game.play()
captured = capsys.readouterr()
assert captured.out == GAME_5_RESULTS
| 2.5 | 2 |
Spreader.py | Crystalzord/Spreader | 1 | 12762790 | <filename>Spreader.py<gh_stars>1-10
from configparser import ConfigParser
from emails.EmailSnailPro import EmailSnailPro
from sites.AgentJbzd import AgentJbzd
from utils import BrowserManager
from utils import Logger
if __name__ == '__main__' and __package__ is None:
Logger.setup_logger()
# Read config.ini
config = ConfigParser()
config.read('config.ini')
email_provider_url = config['GENERAL']['EmailProvider']
target_site_url = config['GENERAL']['TargetSite']
driver = BrowserManager.init_driver()
email_agent = EmailSnailPro(driver, email_provider_url)
temp_email = email_agent.get_temp_email()
site_agent = AgentJbzd(driver, target_site_url)
site_agent.register_account(temp_email)
| 1.96875 | 2 |
config/conf.py | LindgeW/MetaAug4NER | 2 | 12762791 | <reponame>LindgeW/MetaAug4NER
import os
import json
import argparse
def data_config(data_path):
assert os.path.exists(data_path)
with open(data_path, 'r', encoding='utf-8') as fin:
opts = json.load(fin)
print(opts)
return opts
def args_config():
parse = argparse.ArgumentParser('NER configuration')
parse.add_argument('--cuda', type=int, default=-1, help='cuda device, default cpu')
parse.add_argument('-lr', '--learning_rate', type=float, default=1e-3, help='learning rate of training')
parse.add_argument('-bt1', '--beta1', type=float, default=0.9, help='beta1 of Adam optimizer 0.9')
parse.add_argument('-bt2', '--beta2', type=float, default=0.99, help='beta2 of Adam optimizer 0.999')
parse.add_argument('-eps', '--eps', type=float, default=1e-8, help='eps of Adam optimizer 1e-8')
parse.add_argument('-warmup', '--warmup_step', type=int, default=10000, help='warm up steps for optimizer')
parse.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay for Adam optimizer')
parse.add_argument('--scheduler', choices=['cosine', 'inv_sqrt', 'exponent', 'linear', 'step', 'const'], default='linear', help='the type of lr scheduler')
parse.add_argument('--grad_clip', type=float, default=5., help='the max norm of gradient clip')
parse.add_argument('--bert_grad_clip', type=float, default=1., help='the max norm of gradient clip')
parse.add_argument('--max_step', type=int, default=50000, help='the total steps of training')
parse.add_argument('--patient', type=int, default=3, help='patient number in early stopping')
parse.add_argument('--mix_alpha', type=float, default=7, help='mixup parameters')
parse.add_argument('--aug_lambda', type=float, default=1.0, help='the weight of augmenting loss')
parse.add_argument('--mixup_lambda', type=float, default=1.0, help='the weight of mixup loss')
parse.add_argument('--use_aug_crf', action='store_true', default=False, help='use the augmentation crf solely')
parse.add_argument('--batch_size', type=int, default=16, help='batch size of source inputs')
parse.add_argument('--aug_batch_size', type=int, default=32, help='batch size of augmentation dataset, should be larger than batch size')
parse.add_argument('--test_batch_size', type=int, default=64, help='test batch size')
parse.add_argument('--epoch', type=int, default=20, help='number of training')
parse.add_argument('--update_step', type=int, default=1, help='gradient accumulation and update per x steps')
parse.add_argument('--train_type', choices=['vanilla', 'aug'], default='all', help='the type of domain corpus')
parse.add_argument('--genre', type=str, help='the type of domain corpus')
parse.add_argument('--aug_genre', type=str, help='the type of domain corpus')
parse.add_argument("--bert_lr", type=float, default=2e-5, help='bert learning rate')
parse.add_argument("--bert_layer", type=int, default=8, help='the number of last bert layers')
parse.add_argument('--bert_embed_dim', type=int, default=768, help='feature size of bert inputs')
parse.add_argument('--hidden_size', type=int, default=400, help='feature size of hidden layer')
parse.add_argument('--rnn_depth', type=int, default=1, help='number of rnn layers')
parse.add_argument('--enc_bidi', action='store_true', default=True, help='is encoder bidirectional?')
parse.add_argument('--embed_drop', type=float, default=0.5, help='drop rate of embedding layer')
parse.add_argument('--rnn_drop', type=float, default=0.5, help='drop rate of rnn layer')
parse.add_argument('--dropout', type=float, default=0.5, help='dropout ratio')
parse.add_argument('--model_chkp', type=str, default='model.pkl', help='model saving path')
parse.add_argument('--vocab_chkp', type=str, default='vocab.pkl', help='vocab saving path')
args = parse.parse_args()
print(vars(args))
return args
| 2.265625 | 2 |
schieber/deck.py | JoelNiklaus/pyschieber | 0 | 12762792 | from schieber.suit import Suit
from schieber.card import Card
class Deck:
def __init__(self):
"""
Initializes a deck of cards used for Jassen (from 6 to 10, Jack, Queen, King and Ace; each card in 4 suits)
"""
self.cards = []
for suit in Suit:
self.cards += [Card(suit=suit, value=i) for i in range(6, 15)]
def __str__(self):
return str([str(card) for card in self.cards])
| 3.28125 | 3 |
src/flashkit/cli/build/__init__.py | akashdhruv/FlashKit | 2 | 12762793 | """Perform actions related to building flash executables and directories"""
# type annotations
from __future__ import annotations
# internal libraries
from ...core.custom import DictApp
# external libraries
from cmdkit.app import ApplicationGroup
from cmdkit.cli import Interface
# commands
from . import jobs, port, scaling, simulation
COMMANDS: DictApp = {
'jobs': jobs.JobsBuildApp,
'port': port.PortBuildApp,
'scaling': scaling.ScalingBuildApp,
'simulation': simulation.SimulationBuildApp,
}
PROGRAM = f'flashkit build'
USAGE = f"""\
usage: {PROGRAM} [-h] <command> [<args>...]
{__doc__}\
"""
HELP = f"""\
{USAGE}
commands:
jobs {jobs.__doc__}
port {port.__doc__}
scaling {scaling.__doc__}
simulation {simulation.__doc__}
options:
-h, --help Show this message and exit.
Use the -h/--help flag with the above commands to
learn more about their usage.\
"""
class BuildApp(ApplicationGroup):
"""Application class for build command group."""
interface = Interface(PROGRAM, USAGE, HELP)
commands = COMMANDS
interface.add_argument('command')
| 2.40625 | 2 |
ondocker_testing/test_image/test_app.py | xpersky/one_takes | 0 | 12762794 | <filename>ondocker_testing/test_image/test_app.py
import urllib.request as req
from config import machine_ip
def get_code(url):
return req.urlopen('http://'+machine_ip+':5000'+url).getcode()
def test_start():
print("Testing...")
def test_home():
assert get_code('/') == 200
print('{:7} ... OK'.format('home'))
def test_index():
assert get_code('/index.html') == 200
print('{:7} ... OK'.format('index'))
def test_symbol():
assert get_code('/symbol.html') == 200
print('{:7} ... OK'.format('symbol'))
def test_myth():
assert get_code('/myth.html') == 200
print('{:7} ... OK'.format('myth'))
| 2.5 | 2 |
praeteritum/utils/Delegator.py | NoMariusz/Praeteritum | 3 | 12762795 | <reponame>NoMariusz/Praeteritum
class Delegator():
""" Class implementing Delegator pattern in childrens, who enable to
automaticaly use functions and methods from _delegate_subsystems, so
reduces boilerplate code
!!! be aware that Delegator can't delegate methods from childrens
which implements Delegator
"""
def __init__(self):
# prepare list of instances to delegate work to them
if not self._delegate_subsystems:
self._delegate_subsystems: list[object] = []
# prepare dict with subsystems and their methods
self._subsystems_dicts = [
{
"subsystem": s,
"methods": [
f for f in dir(s) if not f.startswith('_')
]
}
for s in self._delegate_subsystems
]
def __getattr__(self, func):
""" Enable to delegate all methods from subsystems to self """
def method(*args):
# get list of subsystems with specified method
results = list(filter(
lambda subsystem_data: func in subsystem_data["methods"],
self._subsystems_dicts))
# if some results, return that function to execute
if len(results) > 0:
return getattr(results[0]["subsystem"], func)(*args)
else:
# raise error if not found
raise AttributeError
return method
| 3.171875 | 3 |
script.py | mdpanna/Python-zip-crack-1 | 0 | 12762796 | import zipfile
import argparse
from threading import Thread
import time
def extract_zip(zip_file, password):
try:
with open(password, 'r') as a:
for i in a.readlines():
passw = i.strip('\n')
time.sleep(0.3)
try:
with zipfile.ZipFile(zip_file) as myzipfile:
print(f'[-] password : { passw }')
a = myzipfile.extractall(pwd=passw.encode())
print(f'[+] password found : {passw} \n')
break
except Exception as e:
pass
except Exception as e:
pass
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-z', help='zip file path')
parser.add_argument('-d', help='password list file path')
args = parser.parse_args()
zipname = args.z
passname = args.d
t = Thread(target=extract_zip, args=(zipname, passname))
t.start()
if __name__ == "__main__":
main()
| 3.34375 | 3 |
forms-flow-api/src/formsflow_api/services/external/analytics_api.py | sreehari-aot/forms-flow-ai | 0 | 12762797 | """This exposes the analytics API."""
from http import HTTPStatus
import requests
from flask import current_app
class RedashAPIService: # pylint: disable=too-few-public-methods
"""This class manages all the Redash analytics service API calls."""
@staticmethod
def get_request(url_path, page_no=None, limit=None):
"""This method makes the GET request to Redash API."""
if page_no is None:
url = f"{current_app.config.get('ANALYTICS_API_URL')}/api/{url_path}"
else:
url = (
f"{current_app.config.get('ANALYTICS_API_URL')}"
f"/api/{url_path}?page={page_no}&page_size={limit}"
)
analytics_admin_token = current_app.config.get("ANALYTICS_API_KEY")
headers = {"Authorization": analytics_admin_token}
response = requests.get(url, headers=headers)
if response.ok:
return response.json()
if response.status_code == HTTPStatus.NOT_FOUND:
return "unauthorized"
return None
| 2.640625 | 3 |
dariah/core/utils.py | DARIAH-DE/Topics | 58 | 12762798 | """
dariah.topics.utils
~~~~~~~~~~~~~~~~~~~
This module implements helper functions for topic modeling.
"""
from typing import Generator, List
from pathlib import Path
import cophi
def read_mallet_topics(path: Path, num_words: int) -> Generator[List[str], None, None]:
"""Read a MALLET topics file.
Args:
path: Filepath to the topics file.
num_words: Number of words for a topic.
Yields:
A list of tokens, i.e. a topic.
"""
with path.open("r", encoding="utf-8") as file:
for row in file:
sequence = row.split("\t")[2]
yield list(cophi.text.utils.find_tokens(sequence))[:200]
| 3.0625 | 3 |
api/serializers.py | winny-/sillypaste | 3 | 12762799 | <filename>api/serializers.py
from django.contrib.auth.models import User
from core.models import Paste, ExpiryLog, Language
from rest_framework import serializers
from core.validators import validate_future_datetime
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ['id', 'username', 'is_staff']
read_only_fields = ['id', 'username', 'is_staff']
class PasteSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Paste
fields = [
'id',
'title',
'body',
'timestamp',
'expiry',
'freeze_hits',
'hits',
'size',
'author',
'language',
]
read_only_fields = ['id', 'freeze_hits', 'hits']
def validate_expiry(self, value):
validate_future_datetime(value)
return value
class LanguageSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Language
fields = ['id', 'name']
read_only_fields = ['id']
class ExpiryLogSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ExpiryLog
fields = [
'id',
'expired_ids',
'timestamp',
'count',
'reclaimed_space',
'completed',
]
read_only_fields = ['id']
| 2.21875 | 2 |
google_home_client/webhook.py | edrickwong/w3p | 0 | 12762800 | <filename>google_home_client/webhook.py
from flask import Flask
from flask_assistant import Assistant, ask, tell
import logging
import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 1315
BUFFER_SIZE = 1024
# Fixed responses
GREETING = "Hi, how can I help you?"
UNKNOWN_OBJ_RESP = "Sorry, I can only detect bottles, cups, kettles and bowls right now. Please try a different object."
logging.getLogger('flask_assistant').setLevel(logging.DEBUG)
app = Flask(__name__)
assist = Assistant(app, '/')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
# keeps track of last message said by Google Home
msg_stack = []
def respond(msg):
msg_stack.append(msg)
return ask(msg)
@assist.action('greeting')
def greet_and_start():
return respond(GREETING)
@assist.action("object-to-detect")
def detect_object(object):
message = handle_detect_object(object)
return respond(message)
def handle_detect_object(object):
s.send(object)
message = s.recv(BUFFER_SIZE)
return message
@assist.action("unknown-object")
def handle_unknown_object():
return respond(UNKNOWN_OBJ_RESP)
@assist.action("repeat")
def repeat():
most_recent_msg = msg_stack[-1:][0]
return respond(most_recent_msg)
if __name__ == '__main__':
app.run(debug=True, use_reloader=False)
| 2.65625 | 3 |
debug.py | bkerler/MobileDevice | 1 | 12762801 | <filename>debug.py
#!/usr/bin/python
# coding: utf-8
# Copyright (c) 2013 Mountainstorm
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from MobileDevice import *
from amdevice import *
from installationproxy import *
import os
import tempfile
import posixpath
class DebugServer(object):
u'''Debugserver class; starts an instance of debugserver and spawns an lldb
instance to talk to it'''
def __init__(self, amdevice):
self.s = amdevice.start_service(u'com.apple.debugserver')
if self.s is None:
raise RuntimeError(u'Unable to launch:', u'com.apple.debugserver')
def disconnect(self):
os.close(self.s)
class DebugAppList(object):
u'''Debugserver class; starts an instance of debugserver and spawns an lldb
instance to talk to it'''
def __init__(self, amdevice):
self.s = amdevice.start_service(u'com.apple.debugserver.applist')
if self.s is None:
raise RuntimeError(u'Unable to launch:', u'com.apple.debugserver.applist')
def get_applist(self):
u'''Retrieves an list of aplications on the device; with pids if their
running
Returns:
An array of dict's (pid is optional) e.g.
<key>displayName</key>
<string>Phone</string>
<key>executablePath</key>
<string>/Applications/MobilePhone.app/MobilePhone</string>
<key>isFrontApp</key>
<false/>
<key>pid</key>
<integer>87</integer>
'''
retval = ''
os.write(self.s, u'ping') # XXX we need to prod it somehow
while True:
data = os.read(self.s, 4096)
if len(data) == 0:
break
retval += data
return dict_from_plist_encoding(retval, kCFPropertyListXMLFormat_v1_0)
def disconnect(self):
os.close(self.s)
class GDB(object):
def __init__(self, dev, device_support_path, local_path, remote_path=None):
self.dev = dev
self._file = None
self._substitutions = []
self._runcmds = u''
if device_support_path is None:
device_support_path = dev.find_device_support_path()
self._set_file(local_path, remote_path)
# add standard substitutions
root = os.path.join(device_support_path, u'Symbols')
for f in os.listdir(root):
if os.path.isdir(os.path.join(root, f)):
self._add_substitution(
u'/' + f,
posixpath.join(root, f)
)
def find_gdb(self):
return u'/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/usr/libexec/gdb/gdb-arm-apple-darwin'
def _set_debugserver_fd(self, fd):
# now gdb's remote protocol requires us to setup a unix socket; when you
# specify 'target remote-mobile <unix socket path>' it then opens this
# and reads a control message; containing the fd to use to actually
# talk to the debugserver. It then tkes this and does a standard
# 'target remote-OSX filedesc:<fd>' command.
#
# As I'm lazy I'm just going to pass it the fd directly
self._debugserver_fd = fd
def _get_bundleid(self, local_path):
# open the app and get its id
f = open(os.path.join(local_path, u'Info.plist'), u'rb')
plist = f.read()
f.close()
info = dict_from_plist_encoding(plist)
return info[u'CFBundleIdentifier']
def _set_file(self, local_path, remote_path=None):
import pprint
if remote_path is None:
# we dont know where its gone; this should only apply for
# non-jailbroken devices i.e. where you can only debug apps
# thus we can get the local appid, then lookup where on the device
# it is ... simples
bundleid = self._get_bundleid(local_path)
ip = InstallationProxy(self.dev)
apps = ip.lookup_applications()
ip.disconnect()
#pprint.pprint(apps)
for app in apps:
if app[u'CFBundleIdentifier'] == bundleid:
remote_path = app[u'Path']
break
if remote_path is None:
raise RuntimeError(
u'Application %s, not installed on device' % bundleid
)
else:
# we want the remote path to be the directory; for a .app thats
# not a problem - but for a native macho we need adjust
remote_path = posixpath.split(remote_path)[0]
self._file = (local_path, remote_path)
# we also want to add a substitution for our enclosing folder
if local_path[-1] == u'/' or local_path[-1] == u'\\':
local_path = local_path[:-1]
local_folder = os.path.dirname(local_path)
if remote_path[-1] == u'/' or remote_path[-1] == u'\\':
remote_path = remote_path[:-1]
remote_folder = posixpath.dirname(remote_path)
self._add_substitution(local_folder, remote_folder)
def _add_substitution(self, local_folder, remote_folder):
self._substitutions.append((local_folder, remote_folder))
# we can get errors if the remote folder is in /private/var - its
# normally refered to by /var. So add another substitution if need be.
# there may be other cases of this but this it the only one I've seen
if remote_folder.startswith(u'/private/var'):
remote_folder = remote_folder.replace(u'/private/var', u'/var/')
self._substitutions.append((local_folder, remote_folder))
def _get_initial_cmds(self):
retval = u'''
set auto-raise-load-levels 1
set mi-show-protections off
set trust-readonly-sections 1
set inferior-auto-start-dyld 0
set minimal-signal-handling 1
set env NSUnbufferedIO YES
set sharedlibrary check-uuids on
set sharedlibrary load-rules \\".*\\" \\".*\\" container
set shlib-path-substitutions'''
# add all the path substitutions
for s in self._substitutions:
retval += u' "%s" "%s"' % s
retval += u'''
set remote max-packet-size 4096
set remote executable-directory %s
set remote noack-mode 1
target remote-macosx filedesc: %u
mem 0x1000 0x3fffffff cache
mem 0x40000000 0xffffffff none
mem 0x00000000 0x0fff none
''' % (self._file[1], self._debugserver_fd)
retval += self._runcmds
retval += u'''
set minimal-signal-handling 0
set inferior-auto-start-cfm off
set sharedLibrary load-rules dyld ".*libobjc.*" all dyld ".*CoreFoundation.*" all dyld ".*Foundation.*" all dyld ".*libSystem.*" all dyld ".*AppKit.*" all dyld ".*PBGDBIntrospectionSupport.*" all dyld ".*/usr/lib/dyld.*" all dyld ".*CarbonDataFormatters.*" all dyld ".*libauto.*" all dyld ".*CFDataFormatters.*" all dyld "/System/Library/Frameworks\\\\\\\\|/System/Library/PrivateFrameworks\\\\\\\\|/usr/lib" extern dyld ".*" all exec ".*" all
sharedlibrary apply-load-rules all
set inferior-auto-start-dyld 1
'''
#print(retval)
return retval
def set_run(self, args=None):
# we specify file when running; we can't when doing attach
runcmd = u'file "%s"\nrun' % (self._file[0])
if args is not None:
for arg in args:
runcmd += u'"%s"' % arg
runcmd += u'\n'
self._runcmds = runcmd
def set_attach(self, pid):
# XXX figure out what we should use instead of file - perhaps exec-file?
self._runcmds = u'attach %s' % pid
def run(self):
# create the temp file and fill it with the init commands
cmdfd, path = tempfile.mkstemp()
# start debug server
dbg = DebugServer(self.dev)
self._set_debugserver_fd(dbg.s)
os.write(cmdfd, self._get_initial_cmds())
os.close(cmdfd)
# start gdb
os.system(self.find_gdb() + u' --arch armv7 -q -x "%s"' % path)
# cleanup
dbg.disconnect()
os.unlink(path)
def register_argparse_debugserver(cmdargs):
import argparse
import sys
import imagemounter
def load_developer_dmg(args, dev):
if not args.advanced:
# we only load in non-advanced mode
try:
# we're doing this as, for some reason, the checking load image
# does isn;t very good - so if we don;t we end up transfering
# the image every time; which is slow and generates tonnes of
# log messages
applist = DebugAppList(dev)
applist.disconnect()
# it's already loaded
except:
# its not ... so find and load the disk image
im = imagemounter.ImageMounter(dev)
imagepath = None
if args.device_support_path:
imagepath = dev.find_developer_disk_image_path(
args.device_support_path.decode(u'utf-8')
)
im.mount(imagepath)
im.disconnect()
def cmd_applist(args, dev):
load_developer_dmg(args, dev)
applist = DebugAppList(dev)
al = applist.get_applist()
applist.disconnect()
rows = []
colmax = [0, 0, 0]
for app in al:
# pid
pid = u'-'
try:
# XXX why does hasattr not work properly on this?
pid = u'%u' % app[u'pid']
except:
pass
if len(pid) > colmax[0]:
colmax[0] = len(pid)
# foreground
foreground = u' '
if app[u'isFrontApp']:
foreground = u'*'
# name
name = app[u'displayName']
if len(name) > colmax[1]:
colmax[1] = len(name)
# path
path = app[u'executablePath']
if len(path) > colmax[2]:
colmax[2] = len(path)
rows.append([pid, foreground, name, path])
for row in rows:
print(
row[0].rjust(colmax[0]) + u' ' +
row[1] + u' ' +
row[2].ljust(colmax[1]) + u' ' +
row[3].ljust(colmax[2])
)
def cmd_gdb(args, dev):
load_developer_dmg(args, dev)
remote = None
if args.remote is not None:
remote = args.remote.decode(u'utf-8')
gdb = GDB(
dev,
args.device_support_path.decode(u'utf-8'),
args.program.decode(u'utf-8'),
remote
)
if args.p:
gdb.set_attach(int(args.p)) # attach
else:
gdb.set_run() # spawn
gdb.run()
debugparser = cmdargs.add_parser(
u'debug',
help=u'debugging commands; utilising debugserver in the developer .dmg'
)
debugparser.add_argument(
u'-s',
metavar=u'support-path',
dest=u'device_support_path',
help=u'specify a custom device support folder; instead of finding the default'
)
debugcmd = debugparser.add_subparsers()
# applist command
applistcmd = debugcmd.add_parser(
u'applist',
help=u'lists applications; which is front and pid if running'
)
applistcmd.set_defaults(func=cmd_applist)
# gdb command
gdbcmd = debugcmd.add_parser(
u'gdb',
help=u'launches gdb; connected to the device'
)
gdbcmd.add_argument(
u'-p',
metavar=u'pid',
help=u'if specified we attempt to connect to this process rather than start a new instance'
)
gdbcmd.add_argument(
u'program',
help=u'local path to the program to debug; the file must already be on the device'
)
gdbcmd.add_argument(
u'remote',
nargs=u'?',
help=u'if the program is a plain mach-o rather than a .app you also need to specify where on the device the file resides'
)
gdbcmd.set_defaults(func=cmd_gdb)
| 2.0625 | 2 |
python/testData/copyPaste/ReplaceSelection.after.py | jnthn/intellij-community | 2 | 12762802 | <filename>python/testData/copyPaste/ReplaceSelection.after.py
a = 1
b = 1
a = 1
b = 1
| 1.203125 | 1 |
Documentation/Scripts/generateSwagger.py | asaaki/ArangoDB | 15 | 12762803 | #!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
### @brief creates swagger json files from doc headers of rest files
###
### find files in
### arangod/RestHandler/*.cpp
### js/actions/api-*.js
###
### @usage generateSwagger.py < RestXXXX.cpp > restSwagger.json
###
### @file
###
### DISCLAIMER
###
### Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
###
### Licensed under the Apache License, Version 2.0 (the "License");
### you may not use this file except in compliance with the License.
### You may obtain a copy of the License at
###
### http://www.apache.org/licenses/LICENSE-2.0
###
### Unless required by applicable law or agreed to in writing, software
### distributed under the License is distributed on an "AS IS" BASIS,
### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
### See the License for the specific language governing permissions and
### limitations under the License.
###
### Copyright holder is triAGENS GmbH, Cologne, Germany
###
### @author Dr. <NAME>
### @author <NAME>
### @author Copyright 2014, triAGENS GmbH, Cologne, Germany
################################################################################
import sys, re, json, string, os
rc = re.compile
MS = re.M | re.S
################################################################################
### @brief swagger
################################################################################
swagger = {
'apiVersion': '0.1',
'swaggerVersion': '1.1',
'basePath': '/',
'apis': []
}
################################################################################
### @brief operation
################################################################################
operation = {}
################################################################################
### @brief C_FILE
################################################################################
C_FILE = False
################################################################################
### @brief DEBUG
################################################################################
DEBUG = False
################################################################################
### @brief trim_text
################################################################################
def trim_text(txt):
r = rc(r"""[ \t]+$""")
txt = r.sub("", txt)
return txt
################################################################################
### @brief parameters
###
### suche die erste {
### suche die letzten }
### gib alles dazwis<NAME>
################################################################################
def parameters(line):
(l, c, line) = line.partition('{')
(line, c , r) = line.rpartition('}')
line = BackTicks(line, wordboundary = ['{','}'])
return line
################################################################################
### @brief BackTicks
###
### `word` -> <b>word</b>
################################################################################
def BackTicks(txt, wordboundary = ['<em>','</em>']):
r = rc(r"""([\(\s'/">]|^|.)\`(.*?)\`([<\s\.\),:;'"?!/-]|$)""", MS)
subpattern = '\\1' + wordboundary[0] + '\\2' + wordboundary[1] + '\\3'
return r.sub(subpattern, txt)
################################################################################
### @brief AsteriskItalic
###
### *word* -> <b>word</b>
################################################################################
def AsteriskItalic(txt, wordboundary = ['<em>','</em>']):
r = rc(r"""([\(\s'/">]|^|.)\*(.*?)\*([<\s\.\),:;'"?!/-]|$)""", MS)
subpattern = '\\1' + wordboundary[0] + '\\2' + wordboundary[1] + '\\3'
return r.sub(subpattern, txt)
################################################################################
### @brief AsteriskBold
###
### **word** -> <b>word</b>
################################################################################
def AsteriskBold(txt, wordboundary = ['<em>','</em>']):
r = rc(r"""([\(\s'/">]|^|.)\*\*(.*?)\*\*([<\s\.\),:;'"?!/-]|$)""", MS)
subpattern = '\\1' + wordboundary[0] + '\\2' + wordboundary[1] + '\\3'
return r.sub(subpattern, txt)
################################################################################
### @brief FA
###
### @FA{word} -> <b>word</b>
################################################################################
def FA(txt, wordboundary = ['<b>','</b>']):
r = rc(r"""([\(\s'/">]|^|.)@FA\{(.*?)\}([<\s\.\),:;'"?!/-]|$)""", MS)
subpattern = '\\1' + wordboundary[0] + '\\2' + wordboundary[1] + '\\3'
return r.sub(subpattern, txt)
################################################################################
### @brief FN
###
### @FN{word} -> <b>word</b>
################################################################################
def FN(txt, wordboundary = ['<b>','</b>']):
r = rc(r"""([\(\s'/">]|^|.)@FN\{(.*?)\}([<\s\.\),:;'"?!/-])""", MS)
subpattern = '\\1' + wordboundary[0] + '\\2' + wordboundary[1] + '\\3'
return r.sub(subpattern, txt)
################################################################################
### @brief LIT
###
### @LIT{word} -> <b>word</b>
################################################################################
def LIT(txt, wordboundary = ['<b>','</b>']):
r = rc(r"""([\(\s'/">]|^)@LIT\{(.*?)\}([<\s\.\),:;'"?!/-])""", MS)
subpattern = '\\1' + wordboundary[0] + '\\2' + wordboundary[1] + '\\3'
return r.sub(subpattern, txt)
################################################################################
### @brief Typegraphy
################################################################################
def Typography(txt):
if C_FILE:
txt = txt[4:-1]
else:
txt = txt[0:-1]
txt = BackTicks(txt)
txt = AsteriskItalic(txt)
txt = AsteriskBold(txt)
txt = FN(txt)
txt = LIT(txt)
txt = FA(txt)
# no way to find out the correct link for Swagger,
# so replace all @ref elements with just "the manual"
r = rc(r"""@ref [a-zA-Z0-9]+""", MS)
txt = r.sub("the manual", txt)
return txt
################################################################################
### @brief InitializationError
################################################################################
class InitializationError(Exception): pass
################################################################################
### @brief StateMachine
################################################################################
class StateMachine:
def __init__(self):
self.handlers = []
self.startState = None
self.endStates = []
def add_state(self, handler, end_state=0):
self.handlers.append(handler)
if end_state:
self.endStates.append(handler)
def set_start(self, handler):
self.startState = handler
def run(self, cargo=None):
if not self.startState:
raise InitializationError,\
"must call .set_start() before .run()"
if not self.endStates:
raise InitializationError, \
"at least one state must be an end_state"
handler = self.startState
while 1:
(newState, cargo) = handler(cargo)
if newState in self.endStates:
newState(cargo)
break
elif newState not in self.handlers:
raise RuntimeError, "Invalid target %s" % newState
else:
handler = newState
################################################################################
### @brief Regexen
################################################################################
class Regexen:
def __init__(self):
self.DESCRIPTION_LI = re.compile('^-\s.*$')
self.DESCRIPTION_SP = re.compile('^\s\s.*$')
self.DESCRIPTION_BL = re.compile('^\s*$')
self.EMPTY_LINE = re.compile('^\s*$')
self.END_EXAMPLE_ARANGOSH_RUN = re.compile('.*@END_EXAMPLE_ARANGOSH_RUN')
self.EXAMPLES = re.compile('.*@EXAMPLES')
self.EXAMPLE_ARANGOSH_RUN = re.compile('.*@EXAMPLE_ARANGOSH_RUN{')
self.FILE = re.compile('.*@file')
self.RESTBODYPARAM = re.compile('.*@RESTBODYPARAM')
self.RESTDESCRIPTION = re.compile('.*@RESTDESCRIPTION')
self.RESTDONE = re.compile('.*@RESTDONE')
self.RESTHEADER = re.compile('.*@RESTHEADER{')
self.RESTHEADERPARAM = re.compile('.*@RESTHEADERPARAM{')
self.RESTHEADERPARAMETERS = re.compile('.*@RESTHEADERPARAMETERS')
self.RESTQUERYPARAM = re.compile('.*@RESTQUERYPARAM{')
self.RESTQUERYPARAMETERS = re.compile('.*@RESTQUERYPARAMETERS')
self.RESTRETURNCODE = re.compile('.*@RESTRETURNCODE{')
self.RESTRETURNCODES = re.compile('.*@RESTRETURNCODES')
self.RESTURLPARAM = re.compile('.*@RESTURLPARAM{')
self.RESTURLPARAMETERS = re.compile('.*@RESTURLPARAMETERS')
self.NON_COMMENT = re.compile('^[^/].*')
################################################################################
### @brief checks for end of comment
################################################################################
def check_end_of_comment(line, r):
if C_FILE:
return r.NON_COMMENT.match(line)
else:
return r.RESTDONE.match(line)
################################################################################
### @brief next_step
################################################################################
def next_step(fp, line, r):
global operation
if not line: return eof, (fp, line)
elif check_end_of_comment(line, r): return skip_code, (fp, line)
elif r.EXAMPLE_ARANGOSH_RUN.match(line): return example_arangosh_run, (fp, line)
elif r.RESTBODYPARAM.match(line): return restbodyparam, (fp, line)
elif r.RESTDESCRIPTION.match(line): return restdescription, (fp, line)
elif r.RESTHEADER.match(line): return restheader, (fp, line)
elif r.RESTHEADERPARAM.match(line): return restheaderparam, (fp, line)
elif r.RESTHEADERPARAMETERS.match(line): return restheaderparameters, (fp, line)
elif r.RESTQUERYPARAM.match(line): return restqueryparam, (fp, line)
elif r.RESTQUERYPARAMETERS.match(line): return restqueryparameters, (fp, line)
elif r.RESTRETURNCODE.match(line): return restreturncode, (fp, line)
elif r.RESTRETURNCODES.match(line): return restreturncodes, (fp, line)
elif r.RESTURLPARAM.match(line): return resturlparam, (fp, line)
elif r.RESTURLPARAMETERS.match(line): return resturlparameters, (fp, line)
if r.EXAMPLES.match(line):
operation['examples'] = ""
return examples, (fp, line)
return None, None
################################################################################
### @brief generic handler
################################################################################
def generic_handler(cargo, r, message):
global DEBUG
if DEBUG: print >> sys.stderr, message
(fp, last) = cargo
while 1:
(next, c) = next_step(fp, fp.readline(), r)
if next:
return next, c
################################################################################
### @brief generic handler with description
################################################################################
def generic_handler_desc(cargo, r, message, op, para, name):
global DEBUG, C_FILE, operation
if DEBUG: print >> sys.stderr, message
(fp, last) = cargo
inLI = False
inUL = False
while 1:
line = fp.readline()
(next, c) = next_step(fp, line, r)
if next:
para[name] = trim_text(para[name])
if op:
operation[op].append(para)
return next, c
if C_FILE and line[0:4] == "////":
continue
line = Typography(line)
if r.DESCRIPTION_LI.match(line):
line = "<li>" + line[2:]
inLI = True
elif inLI and r.DESCRIPTION_SP.match(line):
line = line[2:]
elif inLI and r.DESCRIPTION_BL.match(line):
line = ""
else:
inLI = False
if not inUL and inLI:
line = " <ul class=\"swagger-list\">" + line
inUL = True
elif inUL and not inLI:
line = "</ul> " + line
inUL = False
if not inLI and r.EMPTY_LINE.match(line):
line = "<br><br>"
para[name] += line + ' '
################################################################################
### @brief restheader
################################################################################
def restheader(cargo, r=Regexen()):
global swagger, operation
(fp, last) = cargo
temp = parameters(last).split(',')
(method, path) = temp[0].split()
summary = temp[1]
summaryList = summary.split()
nickname = summaryList[0] + ''.join([word.capitalize() for word in summaryList[1:]])
operation = {
'httpMethod': method,
'nickname': nickname,
'parameters': [],
'summary': summary,
'notes': '',
'examples': '',
'errorResponses': []
}
api = {
'path': FA(path, wordboundary = ['{', '}']),
'operations': [ operation ]
}
swagger['apis'].append(api)
return generic_handler(cargo, r, "resturlparameters")
################################################################################
### @brief resturlparameters
################################################################################
def resturlparameters(cargo, r=Regexen()):
return generic_handler(cargo, r, "resturlparameters")
################################################################################
### @brief resturlparam
################################################################################
def resturlparam(cargo, r=Regexen()):
(fp, last) = cargo
parametersList = parameters(last).split(',')
if parametersList[2] == 'required':
required = 'true'
else:
required = 'false'
para = {
'name': parametersList[0],
'paramType': 'path',
'description': '',
'dataType': parametersList[1].capitalize(),
'required': required
}
return generic_handler_desc(cargo, r, "resturlparam", 'parameters', para, 'description')
################################################################################
### @brief restqueryparameters
################################################################################
def restqueryparameters(cargo, r=Regexen()):
return generic_handler(cargo, r, "restqueryparameters")
################################################################################
### @brief restheaderparameters
################################################################################
def restheaderparameters(cargo, r=Regexen()):
return generic_handler(cargo, r, "restheaderparameters")
################################################################################
### @brief restheaderparameters
################################################################################
def restheaderparam(cargo, r=Regexen()):
(fp, last) = cargo
parametersList = parameters(last).split(',')
para = {
'paramType': 'header',
'dataType': parametersList[1].capitalize(),
'name': parametersList[0],
'description': ''
}
return generic_handler_desc(cargo, r, "restheaderparam", 'parameters', para, 'description')
################################################################################
### @brief restbodyparam
################################################################################
def restbodyparam(cargo, r=Regexen()):
(fp, last) = cargo
parametersList = parameters(last).split(',')
if parametersList[2] == 'required':
required = 'true'
else:
required = 'false'
para = {
'name': parametersList[0],
'paramType': 'body',
'description': '',
'dataType': parametersList[1].capitalize(),
'required': required
}
return generic_handler_desc(cargo, r, "restbodyparam", 'parameters', para, 'description')
################################################################################
### @brief restqueryparam
################################################################################
def restqueryparam(cargo, r=Regexen()):
(fp, last) = cargo
parametersList = parameters(last).split(',')
if parametersList[2] == 'required':
required = 'true'
else:
required = 'false'
para = {
'name': parametersList[0],
'paramType': 'query',
'description': '',
'dataType': parametersList[1].capitalize(),
'required': required
}
return generic_handler_desc(cargo, r, "restqueryparam", 'parameters', para, 'description')
################################################################################
### @brief restdescription
################################################################################
def restdescription(cargo, r=Regexen()):
return generic_handler_desc(cargo, r, "restdescription", None, operation, 'notes')
################################################################################
### @brief restreturncodes
################################################################################
def restreturncodes(cargo, r=Regexen()):
return generic_handler(cargo, r, "restreturncodes")
################################################################################
### @brief restreturncode
################################################################################
def restreturncode(cargo, r=Regexen()):
(fp, last) = cargo
returncode = {
'code': parameters(last),
'reason': ''
}
return generic_handler_desc(cargo, r, "restreturncode", 'errorResponses', returncode, 'reason')
################################################################################
### @brief examples
################################################################################
def examples(cargo, r=Regexen()):
return generic_handler_desc(cargo, r, "examples", None, operation, 'examples')
################################################################################
### @brief example_arangosh_run
################################################################################
def example_arangosh_run(cargo, r=Regexen()):
global DEBUG, C_FILE
if DEBUG: print >> sys.stderr, "example_arangosh_run"
fp, last = cargo
# new examples code TODO should include for each example own object in json file
examplefile = open(os.path.join(os.path.dirname(__file__), '../Examples/' + parameters(last) + '.generated'))
operation['examples'] += '<br><br><pre><code class="json">'
for line in examplefile.readlines():
operation['examples'] += line
operation['examples'] += '</code></pre><br>'
line = ""
while not r.END_EXAMPLE_ARANGOSH_RUN.match(line):
line = fp.readline()
if not line:
return eof, (fp, line)
return examples, (fp, line)
################################################################################
### @brief eof
################################################################################
def eof(cargo):
global DEBUG, C_FILE
if DEBUG: print >> sys.stderr, "eof"
print json.dumps(swagger, indent=4, separators=(',',': '))
################################################################################
### @brief error
################################################################################
def error(cargo):
global DEBUG, C_FILE
if DEBUG: print >> sys.stderr, "error"
sys.stderr.write('Unidentifiable line:\n' + line)
################################################################################
### @brief comment
################################################################################
def comment(cargo, r=Regexen()):
global DEBUG, C_FILE
if DEBUG: print >> sys.stderr, "comment"
(fp, last) = cargo
while 1:
line = fp.readline()
if not line: return eof, (fp, line)
if r.FILE.match(line): C_FILE = True
next, c = next_step(fp, line, r)
if next:
return next, c
################################################################################
### @brief skip_code
###
### skip all non comment lines
################################################################################
def skip_code(cargo, r=Regexen()):
global DEBUG, C_FILE
if DEBUG: print >> sys.stderr, "skip_code"
(fp, last) = cargo
if not C_FILE:
return comment((fp, last), r)
while 1:
line = fp.readline()
if not line:
return eof, (fp, line)
if not r.NON_COMMENT.match(line):
return comment((fp, line), r)
################################################################################
### @brief main
################################################################################
if __name__ == "__main__":
automat = StateMachine()
automat.add_state(comment)
automat.add_state(eof, end_state=1)
automat.add_state(error, end_state=1)
automat.add_state(example_arangosh_run)
automat.add_state(examples)
automat.add_state(skip_code)
automat.add_state(restbodyparam)
automat.add_state(restdescription)
automat.add_state(restheader)
automat.add_state(restheaderparam)
automat.add_state(restheaderparameters)
automat.add_state(restqueryparam)
automat.add_state(restqueryparameters)
automat.add_state(restreturncode)
automat.add_state(restreturncodes)
automat.add_state(resturlparam)
automat.add_state(resturlparameters)
automat.set_start(skip_code)
automat.run((sys.stdin, ''))
## -----------------------------------------------------------------------------
## --SECTION-- END-OF-FILE
## -----------------------------------------------------------------------------
## Local Variables:
## mode: outline-minor
## outline-regexp: "^\\(### @brief\\|## --SECTION--\\|# -\\*- \\)"
## End:
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| 1.40625 | 1 |
ctm_saas_client/models/zoo_keeper.py | tadinve/ctm_python_client | 0 | 12762804 | # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.30
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ctm_saas_client.configuration import Configuration
class ZooKeeper(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'zookeeper_id': 'int',
'zookeeper_server_host': 'str',
'zookeeper_admin_server_port': 'int',
'zookeeper_client_port': 'int',
'zookeeper_leader_port': 'int',
'zookeeper_leader_election_port': 'int'
}
attribute_map = {
'zookeeper_id': 'zookeeperId',
'zookeeper_server_host': 'zookeeperServerHost',
'zookeeper_admin_server_port': 'zookeeperAdminServerPort',
'zookeeper_client_port': 'zookeeperClientPort',
'zookeeper_leader_port': 'zookeeperLeaderPort',
'zookeeper_leader_election_port': 'zookeeperLeaderElectionPort'
}
def __init__(self, zookeeper_id=None, zookeeper_server_host=None, zookeeper_admin_server_port=None, zookeeper_client_port=None, zookeeper_leader_port=None, zookeeper_leader_election_port=None, _configuration=None): # noqa: E501
"""ZooKeeper - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._zookeeper_id = None
self._zookeeper_server_host = None
self._zookeeper_admin_server_port = None
self._zookeeper_client_port = None
self._zookeeper_leader_port = None
self._zookeeper_leader_election_port = None
self.discriminator = None
if zookeeper_id is not None:
self.zookeeper_id = zookeeper_id
if zookeeper_server_host is not None:
self.zookeeper_server_host = zookeeper_server_host
if zookeeper_admin_server_port is not None:
self.zookeeper_admin_server_port = zookeeper_admin_server_port
if zookeeper_client_port is not None:
self.zookeeper_client_port = zookeeper_client_port
if zookeeper_leader_port is not None:
self.zookeeper_leader_port = zookeeper_leader_port
if zookeeper_leader_election_port is not None:
self.zookeeper_leader_election_port = zookeeper_leader_election_port
@property
def zookeeper_id(self):
"""Gets the zookeeper_id of this ZooKeeper. # noqa: E501
zookeeper Id # noqa: E501
:return: The zookeeper_id of this ZooKeeper. # noqa: E501
:rtype: int
"""
return self._zookeeper_id
@zookeeper_id.setter
def zookeeper_id(self, zookeeper_id):
"""Sets the zookeeper_id of this ZooKeeper.
zookeeper Id # noqa: E501
:param zookeeper_id: The zookeeper_id of this ZooKeeper. # noqa: E501
:type: int
"""
self._zookeeper_id = zookeeper_id
@property
def zookeeper_server_host(self):
"""Gets the zookeeper_server_host of this ZooKeeper. # noqa: E501
zookeeper Server Host # noqa: E501
:return: The zookeeper_server_host of this ZooKeeper. # noqa: E501
:rtype: str
"""
return self._zookeeper_server_host
@zookeeper_server_host.setter
def zookeeper_server_host(self, zookeeper_server_host):
"""Sets the zookeeper_server_host of this ZooKeeper.
zookeeper Server Host # noqa: E501
:param zookeeper_server_host: The zookeeper_server_host of this ZooKeeper. # noqa: E501
:type: str
"""
self._zookeeper_server_host = zookeeper_server_host
@property
def zookeeper_admin_server_port(self):
"""Gets the zookeeper_admin_server_port of this ZooKeeper. # noqa: E501
zookeeper Admin Server Port # noqa: E501
:return: The zookeeper_admin_server_port of this ZooKeeper. # noqa: E501
:rtype: int
"""
return self._zookeeper_admin_server_port
@zookeeper_admin_server_port.setter
def zookeeper_admin_server_port(self, zookeeper_admin_server_port):
"""Sets the zookeeper_admin_server_port of this ZooKeeper.
zookeeper Admin Server Port # noqa: E501
:param zookeeper_admin_server_port: The zookeeper_admin_server_port of this ZooKeeper. # noqa: E501
:type: int
"""
self._zookeeper_admin_server_port = zookeeper_admin_server_port
@property
def zookeeper_client_port(self):
"""Gets the zookeeper_client_port of this ZooKeeper. # noqa: E501
zookeeper Client Port # noqa: E501
:return: The zookeeper_client_port of this ZooKeeper. # noqa: E501
:rtype: int
"""
return self._zookeeper_client_port
@zookeeper_client_port.setter
def zookeeper_client_port(self, zookeeper_client_port):
"""Sets the zookeeper_client_port of this ZooKeeper.
zookeeper Client Port # noqa: E501
:param zookeeper_client_port: The zookeeper_client_port of this ZooKeeper. # noqa: E501
:type: int
"""
self._zookeeper_client_port = zookeeper_client_port
@property
def zookeeper_leader_port(self):
"""Gets the zookeeper_leader_port of this ZooKeeper. # noqa: E501
zookeeper Leader Port # noqa: E501
:return: The zookeeper_leader_port of this ZooKeeper. # noqa: E501
:rtype: int
"""
return self._zookeeper_leader_port
@zookeeper_leader_port.setter
def zookeeper_leader_port(self, zookeeper_leader_port):
"""Sets the zookeeper_leader_port of this ZooKeeper.
zookeeper Leader Port # noqa: E501
:param zookeeper_leader_port: The zookeeper_leader_port of this ZooKeeper. # noqa: E501
:type: int
"""
self._zookeeper_leader_port = zookeeper_leader_port
@property
def zookeeper_leader_election_port(self):
"""Gets the zookeeper_leader_election_port of this ZooKeeper. # noqa: E501
zookeeper Leader Election Port # noqa: E501
:return: The zookeeper_leader_election_port of this ZooKeeper. # noqa: E501
:rtype: int
"""
return self._zookeeper_leader_election_port
@zookeeper_leader_election_port.setter
def zookeeper_leader_election_port(self, zookeeper_leader_election_port):
"""Sets the zookeeper_leader_election_port of this ZooKeeper.
zookeeper Leader Election Port # noqa: E501
:param zookeeper_leader_election_port: The zookeeper_leader_election_port of this ZooKeeper. # noqa: E501
:type: int
"""
self._zookeeper_leader_election_port = zookeeper_leader_election_port
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ZooKeeper, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ZooKeeper):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ZooKeeper):
return True
return self.to_dict() != other.to_dict()
| 1.4375 | 1 |
baelfire/tests/test_parrented.py | socek/baelfire | 0 | 12762805 | from mock import MagicMock
from baelfire.parrented import parrented
class TestParrented(object):
@property
def parented(self):
def method(self):
return self
return method
def test_parrented_method_without_parent(self):
"""
parrented should return own method if no parrent set.
"""
obj = MagicMock()
obj.parent = None
method = parrented(self.parented)
assert method(obj) == obj
def test_parrented_method_with_parent(self):
"""
parrented should return parent method if parrent is set.
"""
parent = MagicMock()
obj = MagicMock()
obj.parent = parent
method = parrented(self.parented)
assert method(obj) == parent.method.return_value
parent.method.assert_called_once_with(obj)
def test_parrented_method_property(self):
"""
parrented should return parent property.
"""
parent = MagicMock()
parent.method = 15
obj = MagicMock()
obj.parent = parent
method = parrented(self.parented)
assert method(obj) == parent.method
| 3.25 | 3 |
smsaero/__init__.py | sheregeda/SMSAero | 6 | 12762806 | <reponame>sheregeda/SMSAero
#!/usr/bin/env python
# coding: UTF-8
import json
import time
import requests
import hashlib
import json
from datetime import datetime
from urllib.parse import urljoin
class SmsAeroError(Exception):
""" Super class of all SmsAero Errors. """
pass
class SmsAeroHTTPError(SmsAeroError):
""" A Connection error occurred. """
pass
class SmsAero(object):
URL_GATE = 'http://gate.smsaero.ru/'
SIGNATURE = 'NEWS'
DIGITAL = 0
TYPE_SEND = 2
def __init__(self, user, passwd, url_gate=URL_GATE, signature=SIGNATURE,
digital=DIGITAL, type_send=TYPE_SEND):
self.user = user
self.url_gate = url_gate
self.signature = signature
self.digital = digital
self.type_send = type_send
self.session = requests.session()
m = hashlib.md5(passwd.encode())
self.passwd = m.hexdigest()
def _request(self, selector, data):
data.update({
'user': self.user,
'password': <PASSWORD>,
'answer': 'json',
})
url = urljoin(self.url_gate, selector)
try:
response = self.session.post(url, data=data)
except requests.RequestException as err:
raise SmsAeroHTTPError(err)
if not response.status_code == 200:
raise SmsAeroHTTPError('response status code is not 200')
return self._check_response(response.text)
def _check_response(self, content):
try:
response = json.loads(content)
if 'result' in response and response['result'] == u'reject':
raise SmsAeroError(response['reason'])
elif 'result' in response and response['result'] == u'no credits':
raise SmsAeroError(response['result'])
return response
except ValueError:
if 'incorrect language' in content:
raise SmsAeroError("incorrect language in '...' use \
the cyrillic or roman alphabet.")
else:
raise SmsAeroError('unexpected format is received')
def send(self, to, text, date=None, signature=SIGNATURE,
digital=DIGITAL, type_send=TYPE_SEND):
data = {
'from': signature,
'digital': digital,
'type_send': type_send,
'to': to,
'text': text,
}
if date is not None:
if isinstance(date, datetime):
data['date'] = int(time.mktime(date.timetuple()))
else:
raise SmsAeroError('param `date` is not datetime object')
return self._request('/send/', data)
def sendtogroup(self, group, text, date=None, signature=SIGNATURE,
digital=DIGITAL, type_send=TYPE_SEND):
data = {
'from': signature,
'digital': digital,
'type_send': type_send,
'group': group,
'text': text,
}
if date is not None:
if isinstance(date, datetime):
data['date'] = int(time.mktime(date.timetuple()))
else:
raise SmsAeroError('param `date` is not datetime object')
return self._request('/sendtogroup/', data)
def status(self, id):
return self._request('/status/', {'id': id})
def checksending(self, id):
return self._request('/checksending/', {'id': id})
def balance(self):
return self._request('/balance/', {})
def checktarif(self):
return self._request('/checktarif/', {})
def senders(self):
return self._request('/senders/', {})
def sign(self, sign):
return self._request('/sign/', {'sign': sign})
def checkgroup(self):
return self._request('/checkgroup/', {})
def addgroup(self, group):
return self._request('/addgroup/', {'group': group})
def delgroup(self, group):
return self._request('/delgroup/', {'group': group})
def addphone(self, phone, group=None):
data = {'phone': phone} if group is None \
else {'phone': phone, 'group': group}
return self._request('/addphone/', data)
def delphone(self, phone, group=None):
data = {'phone': phone} if group is None \
else {'phone': phone, 'group': group}
return self._request('/delphone/', data)
def addblacklist(self, phone):
return self._request('/addblacklist/', {'phone': phone})
| 2.5 | 2 |
scripts/socket-server.py | botcs/ppcu-entry | 4 | 12762807 | # Echo server program
import socket
import pickle
HOST = '' # Symbolic name meaning the local host
PORT = 50007 # Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
while 1:
conn, addr = s.accept()
print('Connected by', addr)
while 1:
data = conn.recv(1024)
if not data: break
conn.send(data)
conn.close()
| 3.09375 | 3 |
backend/serve.py | piotrgredowski/async-page-download | 0 | 12762808 | #!/usr/bin/env python3
from flask import Flask
from views import jobs
from utils import Config
from jobs_queue import queue
def register_blueprints(app):
app.register_blueprint(jobs.blueprint, url_prefix="/api/jobs")
def make_app(cfg_path):
app = Flask("AsyncPage")
app.cfg = Config()
app.cfg.load_from_yaml(cfg_path)
# Assign queue to app
app.queue = queue
register_blueprints(app)
return app
def main():
app = make_app("config.yml")
app.run(host=app.cfg.get("app.server.host"),
port=app.cfg.get("app.server.port"),
debug=app.cfg.get("app.server.debug"))
if __name__ == "__main__":
main()
| 2.265625 | 2 |
rpirobot/robot_modules.py | climbus/RPiRobot | 0 | 12762809 | import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
class Led(object):
"""RGB Led control module."""
color = (0, 0, 0)
__gpio_module__ = GPIO
def __init__(self, red_pin, green_pin, blue_pin):
"""Module constructor."""
self.gpio = self.__gpio_module__
self.red_pin = red_pin
self.green_pin = green_pin
self.blue_pin = blue_pin
self.gpio.setup(red_pin, self.gpio.OUT)
self.gpio.setup(green_pin, self.gpio.OUT)
self.gpio.setup(blue_pin, self.gpio.OUT)
self.red = self.gpio.PWM(red_pin, 100)
self.green = self.gpio.PWM(green_pin, 100)
self.blue = self.gpio.PWM(blue_pin, 100)
self.red.start(0)
self.green.start(0)
self.blue.start(0)
def __del__(self):
"""Cleaning."""
self.red.stop()
self.green.stop()
self.blue.stop()
def set_color(self, color):
"""Set RGB color.
color - tuple: (R, G, B)
R, G, B: 0-255
"""
self.color = color
def on(self):
"""Turn led on."""
self.red.ChangeDutyCycle(self.prepare_data(self.reverse(self.color[0])))
self.green.ChangeDutyCycle(self.prepare_data(self.reverse(self.color[1])))
self.blue.ChangeDutyCycle(self.prepare_data(self.reverse(self.color[2])))
def off(self):
"""Turn led off."""
self.red.ChangeDutyCycle(self.reverse(0))
self.green.ChangeDutyCycle(self.reverse(0))
self.blue.ChangeDutyCycle(self.reverse(0))
@staticmethod
def reverse(color):
"""Reverse values for katoda led type."""
return 255 - color
@staticmethod
def prepare_data(val):
"""Translate 0-255 value to 0-100."""
return round((100 * val)/255)
class Motor(object):
"""Motor module class."""
__gpio_module__ = GPIO
def __init__(self, enable_pin, input1_pin, input2_pin, correction=1.0):
"""Motor constructor."""
self.gpio = self.__gpio_module__
self.enable_pin = enable_pin
self.input1_pin = input1_pin
self.input2_pin = input2_pin
self.gpio.setup(enable_pin, self.gpio.OUT)
self.gpio.setup(input1_pin, self.gpio.OUT)
self.gpio.setup(input2_pin, self.gpio.OUT)
self.enable = self.gpio.PWM(enable_pin, 100)
self.enable.start(0)
self.correction = correction
def forward(self, speed):
"""Run motor forward.
speed: motor speed 0-100
"""
if speed < 0 or speed > 100:
raise TypeError("Speed must be between 0 and 100")
self.gpio.output(self.input1_pin, 1)
self.gpio.output(self.input2_pin, 0)
self.enable.ChangeDutyCycle(speed * self.correction)
def backward(self, speed=None):
"""Move motor backward."""
if speed < 0 or speed > 100:
raise TypeError("Speed must be between 0 and 100")
self.gpio.output(self.input1_pin, 0)
self.gpio.output(self.input2_pin, 1)
self.enable.ChangeDutyCycle(speed * self.correction)
def stop(self):
"""Stop motor."""
self.enable.ChangeDutyCycle(0)
class Button(object):
"""Button module."""
__gpio_module__ = GPIO
time_set_status = None
hold_time = 3
def __init__(self, pin):
"""Button constructor."""
self.gpio = self.__gpio_module__
self.pin = pin
self.gpio.setup(pin, self.gpio.IN)
self.status = 0
def is_pressed(self):
"""Check if button is pressed."""
new_status = self.gpio.input(self.pin)
if self.status != new_status:
self.status = new_status
return self.status
else:
return 0
def is_hold(self):
"""Check if button is holded by x seconds."""
status = self.gpio.input(self.pin)
if status == 1:
if not self.time_set_status:
self.time_set_status = time.time()
if time.time() - self.time_set_status > self.hold_time:
self.time_set_status = time.time()
return 1
else:
self.time_set_status = None
return 0
| 3.234375 | 3 |
ciukune/core/apps.py | an-otter-world/ciukune | 0 | 12762810 | <filename>ciukune/core/apps.py
"""Tovarich core app."""
from typing import Optional
from os.path import dirname
from ciukune.core.utils.apps import CiukuneAppConfig
from ciukune.core.utils.apps import GrapheneType
class AppConfig(CiukuneAppConfig):
"""AppConfig implementation for ciukune core."""
name = 'ciukune.core'
label = 'ciukune'
path = dirname(__file__)
@classmethod
def get_query(cls) -> Optional[GrapheneType]:
"""Get GraphQL query type for this Ciukune plugin."""
# pylint: disable=import-outside-toplevel
from ciukune.core.graphql import CoreQuery
return CoreQuery
@classmethod
def get_mutation(cls) -> Optional[GrapheneType]:
"""Get GraphQL mutation for this Ciukune plugin."""
# pylint: disable=import-outside-toplevel
from ciukune.core.graphql import CoreMutation
return CoreMutation
| 1.984375 | 2 |
_python/raspa_eel.py | luizeleno/pyjupiter | 0 | 12762811 | <reponame>luizeleno/pyjupiter
import json
import raspa_curso as rc
EA = 'EA', '251', '0'
EB = 'EB', '152', '0'
EF = 'EF', '301', '0'
EM = 'EM', '202', '0'
EP = 'EP', '352', '4'
EQD = 'EQD', '052', '1'
EQN = 'EQN', '052', '4'
for curso, codigo, hab in [EF, EM, EA, EB, EP, EQD, EQN]:
URL = f'https://uspdigital.usp.br/jupiterweb/listarGradeCurricular?codcg=88&codcur=88{codigo}&codhab={hab}&tipo=N'
print(URL)
dados = rc.scrape_curso(URL)
# with open(f'{curso}.yml', 'w') as f:
# rc.recursive_print_dict(dados, f)
with open(f'{curso}.json', 'w') as f:
json.dump(dados, f)
| 2.5 | 2 |
NoteBooks/Curso de Ciencia de datos/Data_Vizualitation/line_charts.py | Alejandro-sin/Learning_Notebooks | 1 | 12762812 | from matplotlib import pyplot as plt
years = [1950, 1960, 1970, 1980, 1990, 2000, 2010]
gdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3]
# Crear un gráfico de línea años en el ejec x y gdp en el eje y
plt.plot(years, gdp, color ='green', marker='o', linestyle='solid')
# -add a atitle
plt.title("Nominal GDP")
# Añadir etiquetas
plt.ylabel("Billons of dollars")
plt.show()
| 3.75 | 4 |
Exec/testing/run_tests.py | darylbond/cerberus | 5 | 12762813 | <reponame>darylbond/cerberus<filename>Exec/testing/run_tests.py
#!/usr/bin/env python3
import os
from subprocess import Popen, PIPE
tests = os.listdir(".")
failed = []
for test in tests:
if os.path.isfile(test):
continue
local_files = os.listdir(test)
if "check.py" not in local_files:
continue
print("running: ",test, flush=True)
p = Popen("cd %s; sh run"%test, shell=True)
output, err = p.communicate()
rc = p.returncode
if rc > 0:
print("failed:\n",err,"\n",output)
failed.append({"test":test, "output":output, "error":err, "rc":rc})
print("done", flush=True)
print("Completed with {} failed tests:".format(len(failed)))
for fail in failed:
print(" ",fail["test"])
| 2.328125 | 2 |
day14/main.py | AXVin/aoc2021 | 0 | 12762814 | from collections import Counter, defaultdict
from utils import *
from copy import copy
with open("input.txt", "r") as file:
data = file.read()
@test(1588)
def part1(data):
template, instruction_lines = data.strip().split("\n\n")
# data = list(map(int, data))
instructions = {}
for line in instruction_lines.strip().split('\n'):
join, insert = line.strip().split(' -> ')
instructions[join] = join[0] +insert
template = template.strip()
for _ in range(10):
new_template = ''
for i, char_a in enumerate(template):
char_b = template[i+1] if len(template) > i+1 else None
if char_b is None:
new_template += char_a
continue
if char_a+char_b in instructions:
new_template += instructions[char_a+char_b]
else:
new_template += char_a
template = new_template
count = {
char: template.count(char)
for char in template
}
print({a+b: template.count(a+b) for a, b in zip(template, template[1:])})
return count[max(count, key=lambda x: count[x])] - count[min(count, key=lambda x: count[x])]
@test(2188189693529)
def part2(data):
template_str, instruction_lines = data.strip().split("\n\n")
# data = list(map(int, data))
instructions = {}
for line in instruction_lines.strip().split('\n'):
join, insert = line.strip().split(' -> ')
instructions[join] = (join[0] + insert, insert + join[1])
template = defaultdict(int)
for a, b in zip(template_str.strip(), template_str.strip()[1:]):
template[a+b] += 1
template = dict(template)
for step in range(40):
new_template = defaultdict(int)
for key, value in template.items():
if key in instructions:
keys = instructions[key]
for key in keys:
new_template[key] += value
else:
new_template[key] = value
template = dict(new_template)
print(template)
counts = defaultdict(int)
for key, value in template.items():
counts[key[0]] += value
counts[template_str.strip()[-1]] += 1
sorts = sorted(counts, key=lambda x: counts[x])
print(sorts[-1], counts[sorts[-1]], sorts[0], counts[sorts[0]])
return counts[sorts[-1]] - counts[sorts[0]]
print("Part 1:", "\u001b[36;1m", part1(data), "\u001b[0m")
print("Part 2:", "\u001b[36;1m", part2(data), "\u001b[0m")
| 3.015625 | 3 |
libraries/python/universal_utilities/files/useful_file_operations.py | utarsuno/quasar_source | 7 | 12762815 | # coding=utf-8
"""This module, useful_file_operations, simply contains lots of functions for file + directory operations."""
# Needed for copying files.
from shutil import copyfile
# Needed for running regular expressions.
import re
# Needed for system level operations.
import os
# Has super useful file + directory operations.
from pathlib import Path
# Used for advanced IDE typing.
from typing import List
# Used for recursively traversing directories.
import glob
# Needed for calculating the md5_checksum of files.
import hashlib
# Needed for running shell commands.
from libraries.universal_code.system_abstraction import bash_interactive as bash
# Needed for utility debugging calls. Such as termination on error with exception thrown.
from libraries.universal_code import debugging as dbg
# Used for copying files and other operations such as deleting directories.
import shutil
# Needed for compression.
from PIL import Image
# GLOBAL TODO (s):
# Add more safety checks on all functions.
# Create automated QA tests for all these functions!
# Automatically log in DB the results + time-taken for all QA tests!
''' ___ ___ ___ __ ___ __ __
| | | | | | | \ / |__ | | |\ | / ` | | / \ |\ | /__`
\__/ | | |___ | | | | \__/ | \| \__, | | \__/ | \| .__/
'''
def _is_valid_path_parameter(path: str) -> bool:
if path is not None and type(path) is str and path != '':
return True
#if os.path.exists(path):
# return True
dbg.raise_exception('The provided path {' + str(path) + '} of type {' + str(type(path)) + '} is not valid!')
return False
''' __ __ ___ __ ___ __ __ ___ __
| \ | |__) |__ / ` | / \ |__) | |__ /__`
|__/ | | \ |___ \__, | \__/ | \ | |___ .__/ ______________________________________________________________ '''
# ------------------------------------------------ O P E R A T I O N S ------------------------------------------------
def directory_op_create(path: str) -> None:
"""Creates the directory at the provided path."""
os.makedirs(path)
def directory_op_delete(path: str) -> None:
"""Deletes the directory at the provided path."""
shutil.rmtree(path)
def directory_op_copy(path_source: str, path_destination: str) -> None:
"""Copies the specified directory to the provided path."""
shutil.copytree(path_source, path_destination)
# --------------------------------------------------- S E T T E R S ---------------------------------------------------
# --------------------------------------------------- G E T T E R S ---------------------------------------------------
def directory_get_does_exist(path: str) -> bool:
"""Returns a boolean indicating if the directory exists or not."""
if os.path.exists(path):
return os.path.isdir(path)
return False
def directory_get_is_directory(path: str) -> bool:
"""Determines if the path provided points to a directory or not."""
if _is_valid_path_parameter(path):
return os.path.isdir(path)
return False
def directory_get_basename(path: str) -> str:
"""Returns the last directory in a path."""
p = path
if p.endswith('/'):
p = p[:-1]
elif '.' in p:
p = p[:p.rfind('/')]
return p[p.rfind('/') + 1:]
def directory_get_all_internal_directory_paths(path: str, recursively=False) -> list:
"""Returns all the directory paths from the directory path provided."""
directory_paths = []
for full_path in glob.glob(path + '/**', recursive=recursively):
# Ignore files, only look at directories.
if not file_get_is_file(full_path):
directory_paths.append(full_path)
if path in directory_paths:
directory_paths.remove(path)
return directory_paths
def directory_get_all_internal_file_paths(path: str, recursively=False) -> List[str]:
"""Returns a list of all file paths found inside the provided directory."""
if _is_valid_path_parameter(path):
file_paths = []
for full_path in glob.glob(path + '/**', recursive=recursively):
if not directory_get_is_directory(full_path):
file_paths.append(full_path)
return file_paths
return []
'''___ ___ __
|__ | | |__ /__`
| | |___ |___ .__/ __________________________________________________________________________________________ '''
# ------------------------------------------------ O P E R A T I O N S ------------------------------------------------
def file_op_delete(path: str) -> None:
"""Deletes the file."""
os.remove(path)
def file_op_copy(path_source: str, path_destination: str) -> None:
"""Copies the source file to the destination."""
copyfile(path_source, path_destination)
def file_op_convert_image_to_webp(path_source: str, path_destination:str) -> None:
"""Converts the provided PNG or JPG file to a compressed WebP format."""
is_png = '.png' in path_source
def file_op_convert_png_to_compressed_jpg(path_source: str, path_destination=None) -> None:
"""Generates a compressed JPG file from the provided PNG file."""
jpg = path_source.replace('.png', '.jpg')
Image.open(path_source).convert('RGB').save(jpg)
Image.open(jpg).save(path_destination, quality=85, optimize=True, progressive=False)
def file_op_compress_image(path_source: str, path_destination=None) -> None:
"""Compressed the provided JPG or PNG image."""
is_png = '.png' in path_source
image = Image.open(path_source)
if is_png:
image.save(path_destination, quality=85, optimize=True, compress_level=9)
else:
image.save(path_destination, quality=85, optimize=True, progressive=False)
def file_op_append_files_content(source_path: str, append_file_path: str) -> None:
"""Appends the 'append_file_path' file's content to the 'source_path' file."""
content = file_get_contents_as_string(append_file_path)
with open(source_path, 'a') as f:
f.write(content)
def file_op_replace_text(path: str, text_to_find, text_to_replace_with) -> None:
"""Replaces 'text_to_find' instances with 'text_to_replace_with'."""
with open(path) as f:
s = f.read()
with open(path, 'w') as f:
s = s.replace(text_to_find, text_to_replace_with)
f.write(s)
def file_op_create_or_override(path: str, file_text) -> None:
"""Creates the file with the specified file text at the specified file path."""
raw_text = file_text
if type(file_text) == list:
raw_text = ''
for l in file_text:
raw_text += l
with open(path, 'w+') as file_handler:
file_handler.write(raw_text)
def file_op_replace_line_from_text_match(path: str, text_to_match, line_to_replace_with) -> None:
"""Replaces lines that contain the 'text_to_match' text and replaces those lines with the 'line_to_replace_with'."""
file_lines = []
with open(path) as f:
for l in f:
file_lines.append(l)
for i, l in enumerate(file_lines):
if text_to_match in l:
file_lines[i] = line_to_replace_with
if not file_lines[i].endswith('\n'):
file_lines[i] += '\n'
with open(path, 'w') as f:
for l in file_lines:
f.write(l)
# --------------------------------------------------- S E T T E R S ---------------------------------------------------
# --------------------------------------------------- G E T T E R S ---------------------------------------------------
def file_get_size_in_bytes(path: str) -> int:
"""Return the size of the file in bytes."""
return int(os.stat(path).st_size)
def file_get_is_file(path: str) -> bool:
"""Determines if the path provided points to a file or not."""
if _is_valid_path_parameter(path):
return os.path.isfile(path)
return False
def file_get_extensions(path: str) -> List[str]:
"""Extracts all the file extensions from the provided path (if any exist, returns [] otherwise)."""
if _is_valid_path_parameter(path):
return Path(path).suffixes
return []
def file_get_basename(path: str) -> str:
"""Extracts the basename of the provided path."""
# Thanks to stackoverflow for showing how to get_file_basename : https://stackoverflow.com/questions/8384737/extract-file-name-from-path-no-matter-what-the-os-path-format
basename = re.search(r'[^\\/]+(?=[\\/]?$)', path)
if basename:
return basename.group(0)
return ''
def file_get_contents_as_lines(path: str) -> list:
"""Returns a list of strings containing the file content."""
lines = []
with open(path, 'r') as file_handler:
for line in file_handler:
lines.append(line)
return lines
def file_get_contents_as_string(path: str) -> list:
"""Returns a list of strings containing the file content."""
lines = []
with open(path, 'r') as file_handler:
for line in file_handler:
lines.append(line)
text = ''
for l in lines:
text += l
return text
def file_get_sha256_checksm(path, block_size=65536):
"""Returns sha256 for a given file."""
# From : https://gist.github.com/rji/b38c7238128edf53a181
sha256 = hashlib.sha256()
with open(path, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest()
def file_get_md5_checksum(path, block_size= 2 ** 20):
"""Returns MD% checksum for given file."""
# Function source originally from : https://gist.github.com/juusimaa/5846242.
md5 = hashlib.md5()
try:
file = open(path, 'rb')
while True:
data = file.read(block_size)
if not data:
break
md5.update(data)
except IOError:
print('File \'' + path + '\' not found!')
return None
except:
return None
return md5.hexdigest()
################################################################################
# Temporary on hold.
'''
# "This module makes available standard errno system symbols. The value of each symbol is the corresponding integer value. The names and descriptions are borrowed from linux/include/errno.h, which should be pretty all-inclusive."
# List of error definitions : https://docs.python.org/3.1/library/errno.html
#import errno
# TODO : Create unit tests (but deleting unit tests will be required as well).
def create_file_if_it_does_not_exist(path: str, list_of_text) -> bool:
"""Creates the file with the provided path and list of strings that makeup the file. Returns a boolean indicating if successful.
:param path : The full path of the file.
:param list_of_text : A list of lines to compose the file of OR a string to be decomposed into a list of strings split by the '\n' character."""
if not _is_valid_path_parameter(path):
return False
# TODO : Add error checking to make sure that list_of_text is either string or a list of strings. But the debugging module needs to be completed first.
# if
# Thanks to stackoverflow post : https://stackoverflow.com/questions/10978869/safely-create-a-file-if-and-only-if-it-does-not-exist-with-python
flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
try:
file_handler = os.open(path, flags)
except OSError as e:
if e.errno == errno.EEXIST: # Failed as the file already exists.
pass
else: # Something unexpected went wrong so re-raise the exception.
raise
else: # No exception, so the file must have been created successfully.
with os.fdopen(file_handler, 'w') as file_object:
# Using 'os.fdopen' converts the handle to an object that acts like a regular Python file object, and the 'with' context manager means the file will be automatically closed when we're done with it.
for line in list_of_text:
'''
# TODOS:!!!
'''
def get_file_last_extension(path: str) -> str:
"""Extracts the last extension from the provided path (if it exists, returns '' otherwise)."""
if _is_valid_path_parameter(path):
return Path(path).suffix
return ''
def download_file_from_url_to_path(url, save_path):
"""This will download a file from the provided URL into the provided save path.
:param url : The URL to download the file from.
:param save_path : The location to place the file.
:return: Void."""
make_any_missing_directories(save_path)
urllib.request.urlretrieve(url, save_path)
def get_file_as_binary_data(file_path):
"""Returns the the provided file as binary data.
:param file_path : The full path to the file to get binary data from.
:return: The raw binary of the provided file."""
return open(file_path, 'rb')
def does_file_exist(file_path):
"""Checks if the file exists at the provided file path.
:param file_path : The provided file path to check for a file for.
:return : Boolean indicating if a file exists at the provided file path or not."""
if os.path.exists(file_path):
return os.path.isfile(file_path)
return False
def create_text_file(file_path, content):
file = open(file_path, 'w')
for line in content.split('\n'):
file.write(line + '\n')
file.close()
def create_csv_file(file_path, content):
"""Creates the CSV file."""
lines = []
if type(content) == str:
lines = content.split('\n')
content = lines
with open(file_path, 'w+') as file_handler:
for l in content:
file_handler.write(l + '\n')
# DIRECTORY STUFF BELOW!
def get_all_directory_paths_from_directory(directory_path):
"""Returns all the directory paths from the directory path provided.
:param directory_path : The directory path to get all the directory paths from.
:return : A list of strings that each map to a full directory path for all directories in this directory."""
directory_paths = []
for full_path in glob.glob(directory_path + '/**', recursive=True):
# Ignore files, only look at directories.
if not is_file(full_path):
directory_paths.append(full_path)
return directory_paths
def get_all_sub_directory_paths_from_directory(directory_path):
"""Returns all the directory paths from the directory path provided.
:param directory_path : The directory path to get all the directory paths from.
:return : A list of strings that each map to a full directory path for all directories in this directory."""
directory_paths = []
for full_path in glob.glob(directory_path + '/**', recursive=False):
# Ignore files, only look at directories.
if not is_file(full_path):
directory_paths.append(full_path)
return directory_paths
# Inspiration/source for this function : https://stackoverflow.com/questions/4187564/recursive-dircmp-compare-two-directories-to-ensure-they-have-the-same-files-and
def are_two_directories_the_same(directory_path_0, directory_path_1):
"""Compares two directories for equality. Will compare file contents.
:param directory_path_0 : The first directory to compare against.
:param directory_path_1 : The second directory to compare against.
:return: Boolean indicating if the two directories are the same or not."""
compared = filecmp.dircmp(directory_path_0, directory_path_1)
if compared.left_only or compared.right_only or compared.diff_files or compared.funny_files:
return False
for subdir in compared.common_dirs:
if not are_two_directories_the_same(os.path.join(directory_path_0, subdir), os.path.join(directory_path_1, subdir)):
return False
return True
def delete_all_files_in_directory(directory_path):
"""Deletes all the files located in this directory. Including those in sub-directories."""
all_files = get_all_file_paths_from_directory(directory_path)
for f in all_files:
delete_file(f)
def get_all_non_empty_directory_paths_from_directory(directory_path):
"""Returns all the directory paths that contain at least one entity from the directory path provided.
:param directory_path : The root directory path to get all directory paths from.
:return: A list of strings that each map to a directory path (for a non-empty directory) for all directories in this directory."""
directory_paths = []
for full_path in glob.glob(directory_path + '/**', recursive=True):
# Ignore files, only look at directories.
if not is_file(full_path):
# Ignore empty directories.
if len(os.listdir(full_path)) > 0:
directory_paths.append(full_path)
return directory_paths
def make_any_missing_directories(path):
"""Make any directories that do not exist for the provided file path.
:param path : The path to ensure that all needed parent directories exist."""
if '.' in path:
ending = path.replace(os.path.dirname(path), '')
if '.' in ending:
pathlib.Path(os.path.dirname(path)).mkdir(parents=True, exist_ok=True)
else:
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
def get_easy_to_read_size_of_directory(directory):
"""Returns the size of the directory converted to the most logical unit.
:return: A string representation of the size of the provided directory."""
return str(humanize.naturalsize(sum(os.path.getsize(x) for x in iglob(directory + '/**'))))
def get_raw_size_as_bytes_of_directory(directory):
"""Returns the "RAWWWWWWWW"-(Gordan Ramsey) size of the directory.
:return: A string representation of the size of the provided directory."""
return str(sum(os.path.getsize(x) for x in iglob(directory + '/**')))
# ZIP stuff
def get_all_file_names_in_zip_directory(zip_file_path) -> List[str]:
"""Returns a list of all the file names inside of a zipped directory (without unzipping the directory)."""
file_names = []
output = bash_runner.run_bash_command_and_get_output(['unzip', '-l', zip_file_path])
zip_file_name = get_file_basename(zip_file_path).replace('.zip', '')
for l in output.split('\n'):
if zip_file_name in l:
sections = l.split()
if len(sections) == 4:
if sections[0] != '0' and 'Thumbs.db' not in sections[3]:
file_names.append(sections[3].replace(zip_file_name + '/', ''))
return file_names
'''
| 2.359375 | 2 |
edr_explorer/explorer.py | informatics-lab/edr_explorer | 0 | 12762816 | import ipywidgets as widgets
import cartopy.crs as ccrs
import geoviews as gv
import holoviews as hv
import numpy as np
import panel as pn
import param
from shapely.geometry import Polygon as sPolygon, LineString as sLineString
from .interface import EDRInterface
from .lookup import CRS_LOOKUP
class EDRExplorer(param.Parameterized):
"""
A `Panel` dashboard from which you can explore the data presented by an EDR Server.
"""
# Metadata widgets.
coll_uri = widgets.Text(placeholder='Specify an EDR Server...', description='Server')
coll = widgets.Dropdown(options=[], description='Collections', disabled=True)
locations = widgets.Dropdown(options=[], description='Locations', disabled=True)
datasets = widgets.SelectMultiple(options=[], description="Datasets", disabled=True)
start_time = widgets.Dropdown(options=[], description='Start Date', disabled=True)
end_time = widgets.Dropdown(options=[], description='End Date', disabled=True)
start_z = widgets.Dropdown(options=[], description='Z Lower', disabled=True)
end_z = widgets.Dropdown(options=[], description='Z Upper', disabled=True)
# Error display widgets.
connect_error_box = widgets.HTML("", layout=widgets.Layout(display="none"))
data_error_box = widgets.HTML("", layout=widgets.Layout(display="none"))
# Plot control widgets.
pc_times = widgets.SelectionSlider(options=[""], description="Timestep", disabled=True)
pc_zs = widgets.SelectionSlider(options=[""], description="Z Level", disabled=True)
pc_params = widgets.Dropdown(options=[], description="Parameter", disabled=True)
use_colours = pn.widgets.Checkbox(name="Use supplied colours", disabled=True)
use_levels = pn.widgets.Checkbox(name="Use supplied levels", disabled=True)
# Parameters for triggering plot updates.
_data_key = param.String("")
_colours = param.Boolean(use_colours.value)
_levels = param.Boolean(use_levels.value)
cmap = param.String("viridis")
alpha = param.Magnitude(0.85)
# Buttons.
connect_button = widgets.Button(description="Connect")
submit_button = widgets.Button(description="Submit", disabled=True)
dataset_button = widgets.Button(
description="Get Dataset",
disabled=True,
layout=widgets.Layout(top="-0.5rem")
)
# Lists and boxes aggregating multiple widgets.
wlist = [coll, locations, datasets, start_time, end_time, start_z, end_z] # Metadata widgets.
pwlist = [pc_times, pc_zs, pc_params] # Plot widgets.
pchecklist = [use_colours, use_levels]
wbox = widgets.VBox(wlist)
pwbox = pn.Row(pn.Column(*pwlist[:2]), pwlist[-1], pn.Column(*pchecklist))
# Map projection code
web_mercator_epsg = "EPSG:3857"
def __init__(self, server_address=None):
"""
Set up a new `Panel` dashboard to use to explore the data presented by an
EDR Server. This constructs an instance of `.interface.EDRInterface` to submit
requests to the EDR Server on the dashboard's behalf and displays results from
these requests in the dashboard.
Optionally pass the hostname of an EDR server via `server_address`. If specified,
this value will pre-populate the `Server` field of the interface.
"""
self.server_address = server_address
if self.server_address is not None:
self.coll_uri.value = self.server_address
super().__init__()
# Class properties.
self._edr_interface = None
self._dataset = None
self._no_t = "No t values in collection"
self._no_z = "No z values in collection"
# Plot.
self.plot = gv.DynamicMap(self.make_plot)
# Button click bindings.
self.connect_button.on_click(self._load_collections)
self.submit_button.on_click(self._request_plot_data)
self.dataset_button.on_click(self._get_dataset)
# Watches on widgets.
self.coll.observe(self._populate_contents_callback, names='value')
self.start_time.observe(self._filter_end_time, names='value')
self.start_z.observe(self._filter_end_z, names='value')
self.pc_times.observe(self._plot_change, names='value')
self.pc_zs.observe(self._plot_change, names='value')
self.pc_params.observe(self._plot_change, names='value')
self.use_colours.param.watch(self._checkbox_change, "value", onlychanged=True)
self.use_levels.param.watch(self._checkbox_change, "value", onlychanged=True)
# Items for geometry-based queries.
self._area_poly = None
self._corridor_path = None
self._area_stream = None
self._corridor_stream = None
self._query_tools()
@property
def edr_interface(self):
"""The instance of `.interface.EDRInterface` used to handle requests to the EDR Server."""
return self._edr_interface
@edr_interface.setter
def edr_interface(self, value):
"""Set the instance of `.interface.EDRInterface` used to handle requests to the EDR Server."""
self._edr_interface = value
@property
def dataset(self):
"""
A well-known Python data object containing all the data represented by the current state
of select widgets on the dashboard.
"""
return self._dataset
@dataset.setter
def dataset(self, value):
self._dataset = value
@property
def layout(self):
"""
Construct a layout of `Panel` objects to produce the EDR explorer dashboard.
To view the dashboard:
explorer = EDRExplorer()
explorer.layout
The layout is composed of two main elements:
* a set of selector widgets in a column on the left that define the values passed
in queries to the EDR Server via the `.interface.EDRInterface` instance
* a plot on the right that displays graphical results from queries submitted to the
EDR Server via the `.interface.EDRInterface` instance
There are some extra elements too:
* the widgets column on the left contains three buttons:
* one to connect to the server at the URI specified in the `Server` text field widget,
* one to submit a query to the EDR Server via the `.interface.EDRInterface` instance
based on the values set in the selector widgets, and
* one to request and return to the user all the data referenced by the current state
of the dashboard's select widgets as a well-known Python data object (such as an Iris cube).
* the widgets column on the left also contains two fields for displaying error messages
when connecting to or retrieving data from the EDR Server. These are hidden by
default and are made visible when there is a relevant error message to display. Once
the error has been resolved the field will become hidden again.
* the plot area on the right contains two plot control widgets to select specific data
from queries submitted to the EDR Server to show on the plot.
* the plot areas on the right also contains two checkboxes to select whether or not to
show data on the plot rendered using colours and levels supplied in the query response.
"""
connect_row = pn.Row(
pn.Column(self.coll_uri, self.connect_error_box),
self.connect_button
)
control_widgets = pn.Column(self.wbox, self.data_error_box)
buttons = pn.Column(self.submit_button, self.dataset_button)
control_row = pn.Row(control_widgets, buttons, align=("end", "start"))
control_col = pn.Column(connect_row, control_row)
tiles = gv.tile_sources.Wikipedia.opts(width=800, height=600)
plot = tiles * self.plot
plot_col = pn.Column(plot, self.pwbox)
return pn.Row(control_col, plot_col).servable()
def _populate_error_box(self, error_box_ref, errors):
error_box = getattr(self, error_box_ref)
good_layout = widgets.Layout(
display="none",
visibility="hidden",
border="none",
)
bad_layout = widgets.Layout(
border="2px solid #dc3545",
padding="0.05rem 0.5rem",
margin="0 0.25rem 0 5.625rem",
width="70%",
overflow="auto",
display="flex",
)
error_box.value = errors
error_box.layout = good_layout if errors == "" else bad_layout
def _load_collections(self, event):
"""
Callback when the `connect` button is clicked.
Set up the EDR interface instance and connect to the server's collections.
"""
self._clear_controls()
server_loc = self.coll_uri.value
self.edr_interface = EDRInterface(server_loc)
error_box = "connect_error_box"
if self.edr_interface.errors is None:
# Independent check to see if we can clear the error box.
self._populate_error_box(error_box, "")
if self.edr_interface.json is not None and self.edr_interface.errors is None:
# The only state in which the controls can be populated and enabled.
self.coll.options = [(ct, cid) for (cid, ct) in zip(self.edr_interface.collection_ids, self.edr_interface.collection_titles)]
self.coll.value = self.edr_interface.collection_ids[0]
self._enable_controls()
elif self.edr_interface.errors is not None:
# We have known errors to show.
self._populate_error_box(error_box, self.edr_interface.errors)
else:
# Something else has gone wrong, which we need to show.
self._populate_error_box(error_box, "UnspecifiedError")
def _enable_controls(self):
"""Enable query control widgets in the left column."""
for widget in self.wlist:
widget.disabled = False
self.submit_button.disabled = False
def _clear_controls(self):
"""Clear state of all control and error display widgets and disable them."""
for widget in self.wlist + self.pwlist:
widget.disabled = True
if isinstance(widget, widgets.SelectMultiple):
widget.options = ("",)
widget.value = ("",)
elif isinstance(widget, widgets.SelectionSlider):
widget.options = ("",)
widget.value = ""
else:
widget.options = []
widget.value = None
for box in self.pchecklist:
box.value = False
box.disabled = True
self.submit_button.disabled = True
self.dataset_button.disabled = True
self._populate_error_box("connect_error_box", "")
self._populate_error_box("data_error_box", "")
def _check_enable_checkboxes(self):
"""
Check if we can enable the checkboxes to specify the plot should
use colours and levels specified in the data JSON. This is only
possible if this information is present in the data JSON.
"""
box_disabled = self.edr_interface.data_handler.get_colours(self.pc_params.value) is None
for box in self.pchecklist:
box.disabled = box_disabled
def _checkbox_change(self, event):
"""
Bind a change in a checkbox to the relevant param object to trigger
a plot update.
"""
name = event.obj.name
if "colour" in name:
self._colours = event.new
elif "level" in name:
self._levels = event.new
def _enable_plot_controls(self):
"""Enable plot control widgets for updating the specific data shown on the plot."""
for widget in self.pwlist:
widget.disabled = False
self.dataset_button.disabled = False
self._check_enable_checkboxes()
def _populate_contents_callback(self, change):
"""
Populate the options and values attributes of all the left column query control
widgets when a collection provided by the EDR Server is specified.
"""
collection_id = change["new"]
if collection_id is not None:
# Parameters and locations.
self._populate_params(collection_id)
locs = self.edr_interface.get_locations(collection_id)
self.locations.options = locs
# Times.
if self.edr_interface.has_temporal_extent(collection_id):
times = self.edr_interface.get_temporal_extent(collection_id)
else:
times = [self._no_t]
self.start_time.options = times
self.end_time.options = times
# Vertical levels.
if self.edr_interface.has_vertical_extent(collection_id):
zs = self.edr_interface.get_vertical_extent(collection_id)
else:
zs = [self._no_z]
self.start_z.options = zs
self.end_z.options = zs
def _populate_params(self, collection_id):
"""
Populate the `Datasets` widget with a descriptive list (names and units) of
the parameters provided by the selected collection.
"""
params_dict = self.edr_interface.get_collection_parameters(collection_id)
options = []
for k, v in params_dict.items():
choice = f'{v["label"].replace("_", " ").title()} ({v["units"]})'
options.append((choice, k))
self.datasets.options = options
def _filter_end_time(self, change):
"""
Only show end datetimes in the `End Date` widget that are later than
the value selected in the `Start Date` widget.
"""
start_time_selected = change["new"]
if start_time_selected is not None:
# Avoid errors when clearing widget state.
times = self.start_time.options
sel_idx = times.index(start_time_selected)
self.end_time.options = times[sel_idx:]
def _filter_end_z(self, change):
"""
Only show end vertical values in the `End Z` widget that are greater than
the value selected in the `Start Z` widget.
"""
start_z_selected = change["new"]
if start_z_selected is not None:
# Avoid errors when clearing widget state.
zs = self.start_z.options
sel_idx = zs.index(start_z_selected)
self.end_z.options = zs[sel_idx:]
def _get_dataset(self, _):
"""
Callback when the `get dataset` button is clicked.
Request from the EDR Server all data represented by the current states of
the select widgets and provide this data as a well-known Python data
object (such as an Iris Cube).
"""
# XXX somewhere we should check if the server supports `Cube` queries,
# and preferentially use that if available.
from .dataset import make_dataset
collection_id = self.coll.value
params = self.edr_interface.get_collection_parameters(collection_id)
keys = self.datasets.value
names_dict = {k: v["label"] for k, v in params.items() if k in keys}
dataset = make_dataset(self.edr_interface.data_handler, names_dict)
self.dataset = dataset
def _geometry_stream_data(self, query_name):
"""
Return the data attribute of the holoviews stream referenced by `query_name`.
"""
ref = f"_{query_name}_stream"
geom_stream = getattr(self, ref)
return geom_stream.data
def _geometry_query_is_defined(self, query_name):
"""
Determine whether a geometry specified by `query_name` has been defined.
We determine this by checking if all the values in its x and y coords
are 0 - if they are, we assume it's in its default state and thus
undefined.
"""
data = self._geometry_stream_data(query_name)
return all(data["xs"][0]) and all(data["ys"][0])
def _hv_stream_to_wkt(self, query_name):
"""
Convert the data points in the geometry specified by `query_name` to
the appropriate Shapely geometry, and return the WKT string representation
of the geometry.
"""
constructor = sPolygon if query_name == "area" else sLineString
data = self._geometry_stream_data(query_name)
xpoints, ypoints = np.array(data["xs"][0]), np.array(data["ys"][0])
wgs84_points = ccrs.PlateCarree().transform_points(
ccrs.Mercator(), xpoints, ypoints
)
result = None
errors = None
try:
geom = constructor(wgs84_points)
except ValueError:
errors = f"Invalid {query_name!r} geometry provided"
else:
result = geom.wkt
return result, errors
def _request_plot_data(self, _):
"""
Callback when the `submit` button is clicked.
This makes a get data request to the EDR Server via the
`.interface.EDRInterface` instance.
"""
# Get selection widgets state for request.
coll_id = self.coll.value
param_names = self.datasets.value
locations = self.locations.value
start_date = self.start_time.value
end_date = self.end_time.value
start_z = self.start_z.value
end_z = self.end_z.value
# Define common query parameters.
query_params = {"crs": "EPSG:4326"}
if start_date != self._no_t:
query_params["datetime"] = "/".join([start_date, end_date])
if start_z != self._no_z:
query_params["z"] = [start_z, end_z]
# Set query type.
query_type = None
errors = None
query_types = ["area", "corridor"]
for qtype in query_types:
if self._geometry_query_is_defined(qtype):
print(f"Query type: {qtype}")
query_type = qtype
coords, errors = self._hv_stream_to_wkt(query_type)
if coords is not None:
query_params["coords"] = coords
if query_type is None:
query_type = "locations"
query_params["loc_id"] = locations
# Request dataset.
self.edr_interface.query(coll_id, query_type, param_names, **query_params)
# Collect coords and query errors, if present.
all_errors = []
if errors is not None:
all_errors.append(errors)
if self.edr_interface.errors is not None:
all_errors.append(self.edr_interface.errors)
if len(all_errors):
self.edr_interface.errors = "\n".join(all_errors)
error_box = "data_error_box"
if self.edr_interface.errors is None:
# Independent check to see if we can clear the error box.
self._populate_error_box(error_box, "")
if self.edr_interface.data_handler is not None and self.edr_interface.errors is None:
# Generate and enable the plot controls.
if self.edr_interface.has_temporal_extent(coll_id):
plot_control_times = list(self.edr_interface.data_handler.coords["t"])
else:
plot_control_times = [self._no_t]
self.pc_times.options = plot_control_times
self.pc_times.value = plot_control_times[0]
if self.edr_interface.has_vertical_extent(coll_id):
plot_control_zs = list(self.edr_interface.data_handler.coords["z"])
else:
plot_control_zs = [self._no_z]
self.pc_zs.options = plot_control_zs
self.pc_zs.value = plot_control_zs[0]
plot_control_params = list(param_names)
self.pc_params.options = list(filter(lambda o: o[1] in plot_control_params, self.datasets.options))
self.pc_params.value = plot_control_params[0]
self._enable_plot_controls()
elif self.edr_interface.errors is not None:
self._populate_error_box(error_box, self.edr_interface.errors)
else:
self._populate_error_box(error_box, "Uncaught error (data retrieval)")
def _plot_change(self, _):
"""
Helper function to capture changes from either plot control widget
and trigger an update of the plot.
"""
param = self.pc_params.value
t = self.pc_times.value
z = self.pc_zs.value
can_request_data = False
self._check_enable_checkboxes()
value_dict = {}
if t not in (None, "", self._no_t):
value_dict.update({"t": t})
can_request_data = True
if z not in (None, "", self._no_z):
value_dict.update({"z": z})
can_request_data = True
if param is not None and can_request_data:
self._data_key = self.edr_interface.data_handler.make_key(param, value_dict)
def _query_tools(self):
self._area_poly = hv.Polygons(
[[(0, 0), (0, 0)]]
).opts(
line_color="gray", line_width=1.5, line_alpha=0.75,
fill_color="gray", fill_alpha=0.3,
)
self._corridor_path = hv.Path(
[[(0, 0), (0, 0)]]
).opts(
color="gray", line_width=2, line_alpha=0.75,
)
self._area_stream = hv.streams.PolyDraw(
source=self._area_poly,
num_objects=1,
tooltip="Area Query Tool"
)
self._corridor_stream = hv.streams.PolyDraw(
source=self._corridor_path,
num_objects=1,
tooltip="Corridor Query Tool"
)
@param.depends('_data_key', '_colours', '_levels', 'cmap', 'alpha')
def make_plot(self):
"""Show data from a data request to the EDR Server on the plot."""
showable = gv.Image(
([-8, -1], [53, 58], [[0, 0], [0, 0]]), # Approximate UK extent.
crs=CRS_LOOKUP["WGS_1984"],
).opts(alpha=0.0)
if self._data_key != "":
dataset = self.edr_interface.data_handler[self._data_key]
opts = {"cmap": self.cmap, "alpha": self.alpha, "colorbar": True}
colours = self.edr_interface.data_handler.get_colours(self.pc_params.value)
if colours is not None:
opts.update({"clim": (colours["vmin"], colours["vmax"])})
if self.use_colours.value:
opts["cmap"] = colours["colours"]
if self.use_levels.value:
opts["color_levels"] = colours["values"]
error_box = "data_error_box"
if self.edr_interface.data_handler.errors is None:
# Independent check to see if we can clear the data error box.
self._populate_error_box(error_box, "")
if dataset is not None and self.edr_interface.data_handler.errors is None:
showable = dataset.to(gv.Image, ['longitude', 'latitude']).opts(**opts)
elif self.edr_interface.data_handler.errors is not None:
self._populate_error_box(
error_box,
self.edr_interface.data_handler.errors
)
else:
self._populate_error_box(
error_box,
"Unspecified error (plotting)"
)
return showable * self._area_poly * self._corridor_path | 2.359375 | 2 |
API/src/main/resources/images/_backup/_color/color.sikuli/color.py | MiguelDomingues/SikuliX1 | 1,746 | 12762817 | reg = Region(106,108,370,160)
img1 = "source_activate.jpg"
img2 = "source_activated.jpg"
button = "buttonactivate.png"
"""
m = find(button)
m.highlight(2)
exit()
"""
ib = Finder(Image.create(button))
ib.find(button)
print "button:", ib.next().getScore()
ib = Finder(Image.create(img1))
ib.find(button)
print "img1:", ib.next().getScore()
ib = Finder(Image.create(img2))
ib.find(button)
print "img2:", ib.next().getScore()
"""
print "button:", Image(button).find(button)
print "img1:", Image(img1).find(button)
print "img2:", Image(img2).find(button)
"""
| 2.625 | 3 |
Tracker_Development/Sequitr_Package_Scripts/lineage.py | The-Kristina/CellComp | 7 | 12762818 | <filename>Tracker_Development/Sequitr_Package_Scripts/lineage.py<gh_stars>1-10
#!/usr/bin/env python
#-------------------------------------------------------------------------------
# Name: Sequitr
# Purpose: Sequitr is a small, lightweight Python library for common image
# processing tasks in optical microscopy, in particular, single-
# molecule imaging, super-resolution or time-lapse imaging of cells.
# Sequitr implements fully convolutional neural networks for image
# segmentation and classification. Modelling of the PSF is also
# supported, and the library is designed to integrate with
# BayesianTracker.
#
# Authors: <NAME> (arl) <EMAIL>
#
# License: See LICENSE.md
#
# Created: 23/03/2018
#-------------------------------------------------------------------------------
__author__ = "<NAME>"
__email__ = "<EMAIL>"
FATE_APOPTOSIS = 5
import sys
sys.path.append("../")
from Tracker_Development.Sequitr_Package_Scripts import tracker
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
class LineageTreeNode(object):
""" LineageTreeNode
Node object to store tree structure and underlying track data
Args:
track: the Track object
depth: depth of the node in the binary tree (should be > parent)
root: is this the root node?
Properties:
left: pointer to left node
right: pointer to right node
leaf: returns whether this is also a leaf node (no children)
children: returns the left and right nodes together
ID: returns the track ID
start: start time
end: end time
Notes:
"""
def __init__(self,
track=None,
depth=0,
root=False):
assert(isinstance(root, bool))
assert(depth >= 0)
self.root = root
self.left = None
self.right = None
self.track = track
self.depth = depth
@property
def leaf(self):
return not all([self.left, self.right])
@property
def children(self):
""" return references to the children (if any) """
if self.leaf:
return []
return [self.left, self.right]
@property
def ID(self):
return self.track.ID
@property
def start(self):
return self.track.n[0]
@property #decorators
def end(self):
return self.track.n[-1]
def to_dict(self):
""" convert the whole tree (from this node onward) to a dictionary """
return tree_to_dict(self)
def tree_to_dict(root):
""" tree_to_dict
Convert a tree to a JSON compatible dictionary. Traverses the tree and
returns a dictionary structure which can be output as a JSON file.
Recursive implementation, hopefully there are no loops!
The JSON representation should look like this:
{
"name": "1",
"children": [
{
"name": "2"
},
{
"name": "3"
}
]
}
Args:
root: a root LineageTreeNode
Returns:
a dictionary representation of the tree.
"""
assert(isinstance(root, LineageTreeNode))
tree = {"name": str(int(root.ID))}
# metadata = ['x','y','t','cell_type','fate','label']
# for m in metadata:
# tree[m] = getattr(root.track, m)
if root.children:
tree["children"] = [tree_to_dict(root.left),tree_to_dict(root.right)]
return tree
def export_tree_to_json(tree, filename):
""" export a tree to JSON format for visualisation """
#check https://en.wikipedia.org/wiki/JSON for more detail...
#TODO(arl): proper type checking here
assert(isinstance(tree, dict))
assert(isinstance(filename, str))
import json
with open(filename, 'w') as json_file:
json.dump(tree, json_file, indent=2, separators=(',', ': '))
def export_all_trees(trees, export_dir):
""" export all trees """
for tree in trees:
tree_fn = "tree_{}_{}.json"
export_tree_to_json(tree, os.path.join(export_dir, tree_fn))
# finally write a file with the outputs
class LineageTree(object):
""" LineageTree
Build a lineage tree from track objects.
Args:
tracks: a list of Track objects, typically imported from a json/xml file
Methods:
get_track_by_ID: return the track object with the corresponding ID
create: create the lineage trees by performing a BFS
plot: plot the tree/trees
Notes:
Need to update plotting and return other stats from the trees
"""
def __init__(self, tracks):
assert(isinstance(tracks, list))
if not all([isinstance(trk, tracker.Track) for trk in tracks]):
raise TypeError('Tracks should be of type Track')
# sort the tracks by the starting frame
self.tracks = sorted(tracks, key=lambda trk:trk.n[0], reverse=False)
def get_track_by_ID(self, ID):
""" return the track object with the corresponding ID """
return [t for t in self.tracks if t.ID==ID][0]
def create(self):
""" build the lineage tree """
used = []
self.trees = []
# iterate over the tracks and add them into the growing binary trees
for trk in self.tracks:
if trk not in used:
# TODO(arl): confirm that this is a root node, i.e. the parent
# ID should be the same as the track ID or None
if trk.ID != trk.parent and trk.parent is not None:
print ("Error with trk {}".format(trk.ID))
root = LineageTreeNode(track=trk, root=True)
used.append(trk)
if trk.children:
# follow the tree here
queue = [root]
while len(queue) > 0:
q = queue.pop(0)
children = q.track.children
if children:
# make the left node, then the right
left_track = self.get_track_by_ID(children[0])
right_track = self.get_track_by_ID(children[1])
# set the children of the current node
d = q.depth + 1 # next level from parent
q.left = LineageTreeNode(track=left_track, depth=d)
q.right = LineageTreeNode(track=right_track, depth=d)
# append the left and right children to the queue
queue.append(q.left)
queue.append(q.right)
# flag as used, do not need to revisit
used.append(left_track)
used.append(right_track)
# append the root node
self.trees.append(root)
return self.trees
def plot(self):
""" plot the trees """
plotter = LineageTreePlotter()
for t in self.trees:
plotter.plot([t])
@staticmethod
def from_xml(filename, cell_type=None):
""" create a lineage tree from an XML file """
tracks = tracker.read_XML(filename, cell_type=cell_type)
return LineageTree(tracks)
class LineageTreePlotter(object):
""" Plotter for lineage trees.
o-----------X
|
o--------o o--------------
| | |
| o------o
----o |
| o--------------
|
o------------------------------ etc...
Notes:
This is ugly, and needs cleaning up!
"""
def __init__(self):
self.reset()
def reset(self):
""" Reset the position iterator """
self.y = 0
def plot(self, tree):
queue, marked, y_pos = [], [], []
#put the start vertex into the queue, and the marked list
queue.append(tree[0])
marked.append(tree[0])
y_pos.append(0)
# store the line coordinates that need to be plotted
line_list = []
text_list = []
marker_list = []
# now step through
while len(queue) > 0:
# pop the root from the tree
node = queue.pop(0)
y = y_pos.pop(0)
# draw the root of the tree
line_list.append(([y,y], [node.start,node.end]))
marker_list.append((y, node.start,'k.'))
# mark if this is an apoptotic tree
if node.leaf:
if node.track.fate == FATE_APOPTOSIS:
marker_list.append((y, node.end, 'rx'))
text_list.append((y, node.end, str(node.ID), 'r'))
else:
marker_list.append((y, node.end, 'ks'))
text_list.append((y, node.end, str(node.ID), 'k'))
if tree[0].ID == node.ID:
text_list.append((y, node.start, str(node.ID), 'b'))
for child in node.children:
if child not in marked:
# mark the children
marked.append(child)
queue.append(child)
# calculate the depth modifier
depth_mod = 2./(2.**(node.depth-1.))
if child == node.children[0]:
y_pos.append(y+depth_mod)
else:
y_pos.append(y-depth_mod)
# plot a linking line to the children
line_list.append(([y, y_pos[-1]], [node.end, child.start]))
marker_list.append((y, node.end,'go'))
text_list.append((y_pos[-1],
child.end-(child.end-child.start)/2.,
str(child.ID), 'k'))
# now that we have traversed the tree, calculate the span
tree_span = []
for line in line_list:
tree_span.append(line[0][0])
tree_span.append(line[0][1])
min_x = min(tree_span)
max_x = max(tree_span)
# now do the plotting
y_offset = self.y - min_x + 1
for line in line_list:
x = line[0]
y = line[1]
plt.plot([xx+y_offset for xx in x],y,'k-')
# markers
for marker in marker_list:
plt.plot(marker[0]+y_offset,marker[1],marker[2])
# labels
for txt_label in text_list:
plt.text(txt_label[0]+y_offset-0.1,
txt_label[1]-0.1, txt_label[2], fontsize=8,
path_effects=[PathEffects.withStroke(linewidth=1,foreground='w')],
color=txt_label[3])
# update the position for next round
self.y = y_offset + max_x + 1
if __name__ == "__main__":
pass | 1.796875 | 2 |
wrapper_function_for_job_arr.py | kjaehnig/GAT_SEBs | 0 | 12762819 | <reponame>kjaehnig/GAT_SEBs
import lightkurve as lk
import astropy.table as astab
import pandas as pd
import numpy as np
import astropy
import sys
from astropy.coordinates import SkyCoord
from astropy import units as u
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from tqdm import tqdm
import warnings
import astropy.table as astab
from astropy.io import fits
from optparse import OptionParser
import helper_functions as hf
warnings.filterwarnings('ignore',
message="WARNING (theano.tensor.opt): Cannot construct a scalar test value from a test value with no size:"
)
import os
import pickle as pk
import pymc3 as pm
import pymc3_ext as pmx
import aesara_theano_fallback.tensor as tt
from celerite2.theano import terms, GaussianProcess
from pymc3.util import get_default_varnames, get_untransformed_name, is_transformed_name
import theano
import exoplanet as xo
import arviz as az
from corner import corner
def wrapper_function(index=0,
mf=2,
nt=1000,
nd=500,
nc=2,
sf=5,
ns=5):
tic_systems_of_interest = [
28159019,
272074664,
20215452,
99254945,
144441148,
169820068,
126232983,
164458426,
164527723,
165453878,
258108067,
271548206,
365204192
]
from pymc3_ind_model_rusty import load_construct_run_pymc3_model
load_construct_run_pymc3_model(TIC_TARGET=tic_systems_of_interest[index],
mult_factor=mf,
Ntune=nt,
Ndraw=nd,
chains=nc,
sparse_factor=sf,
nsig=ns)
result = OptionParser()
result.add_option('--index', dest='index', default=0, type='int',
help='indice of tic system array (defaults to 0)')
result.add_option("--mf", dest='mf', default=1, type='int',
help='multiplicative factor by which to increase multivariate prior variances (default: 1)')
result.add_option("--nt", dest="nt", default=1000, type='int',
help="number of tuning draws to perform during sampling (default: 1000)")
result.add_option("--nd", dest="nd", default=500, type='int',
help="number of sample draws to perform during sampling (default: 500)")
result.add_option("--nc", dest='nc', default=2, type='int',
help='number of chains to run during sampling (default: 2)')
result.add_option("--sf", dest='sf', default=5, type='int',
help='how sparse to make the lightcurve data before running pymc3 (default: 5)')
result.add_option("--ns", dest='ns', default=5, type='int',
help='number of sigma to consider in constructing isochrones BinMod distributions (default: 5)')
if __name__ == "__main__":
opt,arguments = result.parse_args()
wrapper_function(**opt.__dict__) | 1.710938 | 2 |
pastepwn/analyzers/phonenumberanalyzer.py | palaparthi/pastepwn | 0 | 12762820 | # -*- coding: utf-8 -*-
from .regexanalyzer import RegexAnalyzer
class PhoneNumberAnalyzer(RegexAnalyzer):
"""Analyzer to match Phone Numbers"""
def __init__(self, action):
"""Analyzer to match international phone numbers"""
# from stackoverflow: https://stackoverflow.com/questions/2113908/what-regular-expression-will-match-valid-international-phone-numbers
country_code_regex = "\+(9[976]\d|8[987530]\d|6[987]\d|5[90]\d|42\d|3[875]\d|2[98654321]\d|9[8543210]|8[6421]|6[6543210]|5[87654321]|4[987654310]|3[9643210]|2[70]|7|1)"
# adapted from the same stackoverflow
national_number = "(\W*\d){2,17}"
regex = "(^|\s)" + country_code_regex + "\s*" + national_number + "($|\s)"
super().__init__(action, regex)
| 3.421875 | 3 |
private_data.py | Yzma-Robotics/smart-banking-app | 2 | 12762821 | <gh_stars>1-10
#!/usr/bin/python
admin_email="<EMAIL>"
admin_password="<PASSWORD>"
| 0.941406 | 1 |
tests/test_create.py | estenssoros/sqlwriter | 0 | 12762822 | <filename>tests/test_create.py<gh_stars>0
# -*- coding: utf-8 -*-
import unittest
from utils import DBRouter
class TestCreatePostgres(unittest.TestCase):
def setUp(self):
self.db = DBRouter('postgres')
def test_create_table(self):
curs, conn = self.db['postgres']
curs.execute('DROP TABLE IF EXISTS test')
conn.commit()
sql = '''
CREATE TABLE test (
id SERIAL
, astring VARCHAR(50)
, aninteger INTEGER
, afloat FLOAT
, adate DATE
, adatetime TIMESTAMP WITHOUT TIME ZONE
)
'''
curs.execute(sql)
conn.commit()
def tearDown(self):
self.db.close()
class TestCreateMySQL(unittest.TestCase):
def setUp(self):
self.db = DBRouter('mysql')
def test_create_dbmysql(self):
curs, conn = self.db['mysql']
curs.execute('CREATE DATABASE IF NOT EXISTS sqlwriter')
conn.commit()
def test_create_tablemysql(self):
curs, conn = self.db['mysql']
curs.execute('DROP TABLE IF EXISTS sqlwriter.test')
conn.commit()
sql = '''
CREATE TABLE test (
id SERIAL
, astring VARCHAR(50)
, aninteger INTEGER
, afloat FLOAT
, adate DATE
, adatetime DATETIME
)
'''
curs.execute(sql)
conn.commit()
def tearDown(self):
self.db.close()
class TestCreateMsSQL(unittest.TestCase):
def setUp(self):
self.db = DBRouter('mssql')
def test_create_dbmssql(self):
_, conn = self.db['mssql']
conn.autocommit(True)
curs = conn.cursor()
curs.execute("IF EXISTS(select * from sys.databases where name='sqlwriter') DROP DATABASE sqlwriter")
conn.commit()
curs.execute('CREATE DATABASE sqlwriter')
conn.commit()
def test_create_tablemssql(self):
curs, conn = self.db['mssql']
curs.execute("IF OBJECT_ID('sqlwriter.dbo.test') IS NOT NULL DROP TABLE sqlwriter.dbo.test")
conn.commit()
sql = '''
CREATE TABLE sqlwriter.dbo.test (
id INT IDENTITY(1,1)
, astring NVARCHAR(50)
, aninteger INT
, afloat FLOAT
, adate DATE
, adatetime DATETIME
)
'''
curs.execute(sql)
conn.commit()
def tearDown(self):
self.db.close()
if __name__ == '__main__':
unittest.main()
| 2.921875 | 3 |
taxcalc/tests/accuracy.py | jlyons871/Tax-Calculator | 0 | 12762823 | <reponame>jlyons871/Tax-Calculator<filename>taxcalc/tests/accuracy.py
import numpy as np
import pandas as pd
import h5py as h5
import os
import os.path as op
def main(sas_output_path, py_output_dir, rerun=False):
'''If this module is run directly it calculates per variable difference
between the SAS output read from sas_output_path and Python output
read from CSV files contained in py_output_dir.
If the arg "rerun" is anything but False, taxcalc gets imported and its Test
function run, thus regenerating the python output.
'''
if rerun:
cwd = os.getcwd()
import translation
os.chdir(python_output_dir)
translation.Test(True)
os.chdir(cwd)
gold_std = h5.File(sas_codes_path)
errors = compute_error(gen_file_paths(py_output_dir), gold_std)
errors.columns = ['Error']
errors.sort(columns='Error', ascending=False, inplace=True)
errors.to_csv('errors_by_variable.csv',
index_label='Variable'
)
def compute_error(files_to_look_at, gold_std):
'''Computes the "error" by looping through CSV files in "files_to_look_at"
and comparing the values of c-code variables generated by Python to
corresponding values from the gold_std (data generated by SAS).
'''
error = {}
for file_name in files_to_look_at:
file_vars = pd.read_csv(file_name)
for var_name in file_vars:
is_c_code = var_name.lower().startswith('c')
if is_c_code and var_name not in error:
per_taxpayer_diff = file_vars[var_name] - gold_std[var_name]
error[var_name] = np.absolute(per_taxpayer_diff).sum()
return pd.DataFrame.from_dict(error, orient='index')
def mismatching_records(gold_std, variable, py_out_dir='py_output'):
'''
Given gold_std as a mapping of variable names to their values according to
SAS and the name of one particular variable as well as a folder containing
the CSVs produced by our Python script attempts to create a list of
indeces (which represent taxpayer records) where SAS values for variable
do not match its Python values.
'''
# first we need to find the python values for this variable
py_answer = None
for file_path in gen_file_paths(py_out_dir):
temp = pd.read_csv(file_path)
if variable in temp:
print 'Found in file: {}'.format(file_path)
py_answer = temp[variable]
break
if not py_answer:
print 'Variable was not found, returning None. Beware!'
return py_answer
mismatches = np.array(np.absolute(py_answer - gold_std[variable]) > 0)
if not mismatches.any():
print 'No cells satisfied the condition, returning series as is'
return mismatches
all_indices = np.arange(len(mismatches))
return all_indices[mismatches]
def report_accuracy(sas_codes, indx, python_output_dir='py_output'):
'''Generates a csv for one taxpayer specified by indx.
This csv contains variable names as row labels and corresponding SAS output
compared with Python output.
'''
value_comparison = {}
for var_file_path in gen_file_paths(python_output_dir):
python_variables_df = pd.read_csv(var_file_path)
# select all records in index row and update accuracy dict
value_comparison.update(python_variables_df.iloc[indx].to_dict())
value_comparison = merge_dicts(sas_codes, indx, value_comparison)
value_comp_df = pd.DataFrame.from_dict(value_comparison, orient='index')
value_comp_df.to_csv('accuracy.csv')
def gen_file_paths(dir_name, filter_func=None):
'''A function for wrapping all the os.path commands involved in listing files
in a directory, then turning file names into file paths by concatenating
them with the directory name.
This also optionally supports filtering file names using filter_func.
:param dir_name: name of directory to list files in
:type dir_name: string
:param filter_func: optional name of function to filter file names by
:type filter_func: None by default, function if passed
:returns: sequence of paths for files in *dir_name*
'''
file_paths = tuple(op.join(dir_name, file_name)
for file_name in os.listdir(dir_name))
if filter_func:
return filter(filter_func, file_paths)
return file_paths
def merge_dicts(sas_codes, taxpayer, python_codes):
'''
Combines SAS output for a taxpayer with the corresponding python output.
'''
result = {}
for variable in sas_codes:
sas_taxpayer = sas_codes[variable][taxpayer]
# occasionally Python's versions of the variable name are lowercase
lc_v = variable.lower()
if variable in python_codes:
result[variable] = (sas_taxpayer, python_codes[variable])
elif lc_v in python_codes:
result[lc_v] = (sas_taxpayer, python_codes[lc_v])
else:
result[variable] = (sas_taxpayer, '')
return result
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser('Testing script')
parser.add_argument('sas_codes_path',
help='path to HDF5 file with SAS codes')
parser.add_argument('py_out_dir',
help='path to folder with Python-generated values')
parser.add_argument('-r', dest='rerun', action='store_true',
help=('pass this flag to regenerate Python data'))
cmd_input = parser.parse_args()
main(cmd_input.sas_codes_path, cmd_input.py_output_dir, cmd_input.rerun)
| 2.90625 | 3 |
django/urls/base.py | Jhvcc/CustomizeDjango | 0 | 12762824 | <reponame>Jhvcc/CustomizeDjango
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @FileName :base.py
# @Author :Lowell
# @Time :2022/3/30 13:00
from asgiref.local import Local
_prefixes = Local()
def set_script_prefix(prefix):
"""
"""
if not prefix.endswith("/"):
prefix += "/"
_prefixes.values = prefix
def get_script_prefix():
"""
"""
return getattr(_prefixes, "value", "/")
| 2.046875 | 2 |
setup.py | seigfried1/pdftoaudiobook | 2 | 12762825 | <reponame>seigfried1/pdftoaudiobook
from setuptools import setup, find_packages
with open('README.md') as readme_file:
README = readme_file.read()
setup_args = dict(
name='pdfToAudioBookConverter',
version='0.0.1',
description='This package converts a pdf file to audiobooks',
long_description_content_type="text/markdown",
long_description=README,
license='MIT',
packages=find_packages(),
author='<NAME>',
author_email='<EMAIL>',
keywords=['Audiobooks', 'pdf', 'audiobookconverter'],
url='https://github.com/seigfried1/pdftoaudiobook',
)
install_requires = [
'gTTS>=2.1.1',
'PyPDF2>=1.26.0'
]
if __name__ == '__main__':
setup(**setup_args, install_requires=install_requires) | 1.984375 | 2 |
flytrap/base/response.py | flytrap/flytrap-base | 0 | 12762826 | <filename>flytrap/base/response.py<gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Created by flytrap
import json
from django.http import HttpResponse
class SimpleResponse(HttpResponse):
"""
封装http相应
"""
def __init__(self, data, status=200, *args, **kwargs):
if not isinstance(data, dict):
raise Exception("data format error")
super(SimpleResponse, self).__init__(json.dumps(data), status=status, *args, **kwargs)
| 2.40625 | 2 |
test/test.py | Maximvda/pysmappee | 0 | 12762827 | <reponame>Maximvda/pysmappee
from pysmappee.mqtt import SmappeeLocalMqtt
from pysmappee.smappee import Smappee
serial = '5010004089'
smappee_mqtt = SmappeeLocalMqtt(serial)
#smappee_mqtt = SmappeeLocalMqtt()
smappee_mqtt.start_and_wait_for_config()
smappee = Smappee(api=smappee_mqtt, serialnumber=serial)
smappee.load_local_service_location()
print(smappee.service_locations.values())
for service_location in smappee.service_locations.values():
for actuator_id, actuator in service_location.actuators.items():
print(actuator_id, actuator)
print(actuator.type)
| 2.15625 | 2 |
frontend/migrations/0001_initial.py | kbilak/DPS-Web-App | 0 | 12762828 | <reponame>kbilak/DPS-Web-App
# Generated by Django 3.2.8 on 2021-11-09 23:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False, unique=True)),
('name', models.CharField(max_length=200)),
('category', models.CharField(choices=[('E', 'Elektryczne'), ('CH', 'Chemiczne'), ('M', 'Malarskie')], max_length=3)),
('quantity', models.IntegerField()),
('price', models.IntegerField()),
('author', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=250, unique_for_date='publish')),
('body', models.TextField()),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('project', 'Project'), ('published', 'Published')], default='project', max_length=15)),
('image', models.ImageField(blank=True, null=True, upload_to='./static/img')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-publish',),
},
),
]
| 1.78125 | 2 |
RubiksCube/envs/cube_env.py | alseambusher/gym-rubiksCube | 0 | 12762829 | <reponame>alseambusher/gym-rubiksCube
import gym
import numpy as np
from gym import spaces
from RubiksCube.MagicCube.code.cube import Cube
import imageio
class CubeEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array', 'ansi']
}
def __init__(self, n, randomize=0):
"""
Create an environment
:param n: number of blocks on a side of the cube
:param randomize: Number of random steps to be taken from the solved position
"""
self.cube = Cube(n)
self.transitions = []
self.max_steps = 25
self.solved_cube_stickers = np.copy(self.cube.stickers)
self.randomize = randomize
self.cube.randomize(self.randomize)
self.initial_cube_stickers = np.copy(self.cube.stickers)
self.metadata['render.modes'].extend(["human", "rgb_array", "ansi"])
action_count = 0
self.actions = {}
for f in self.cube.facedict.keys():
for l in range(self.cube.N - 1):
for d in [-1, 1]:
self.actions[action_count] = (f, l, d)
action_count += 1
self.action_space = spaces.Discrete(len(self.actions))
# self.action_space = spaces.Discrete(len(self.cube.facedict) * (self.cube.N - 1) * 2)
self.observation_space = spaces.Box(low=np.array([0]*(n**2*6)),
high=np.array([5]*(n**2*6)))
def step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid" % (action, type(action))
self.transitions.append(action)
self.cube.move(*self._action_to_args(action))
reward = 1 if self.solved else 0
done = reward or len(self.transitions) >= self.max_steps
info = {}
return self.state, reward, done, info
def reset(self, skip_randomize=False):
self.cube.stickers = np.copy(self.initial_cube_stickers)
self.transitions.clear()
if not skip_randomize:
self.cube.randomize(self.randomize)
return self.state
def render(self, mode='human'):
"""
Renders the environment
:param mode: human, rgb_array and ansi supported
"""
if mode == "human":
self.cube.render(flat=False).show()
if mode == "rgb_array":
fig = self.cube.render(flat=False)
fig.canvas.draw()
w, h = fig.canvas.get_width_height()
buf = np.fromstring (fig.canvas.tostring_rgb(), dtype=np.uint8)
buf = buf.reshape((h, w, 3))
return buf
if mode == "ansi":
return self.cube.stickers.tostring()
def render_transitions(self, mode='human', filename='out.mp4'):
if mode == "human":
moves = self.transitions[:]
self.reset(skip_randomize=False)
with imageio.get_writer(filename, fps=1) as video:
video.append_data(self.render(mode="rgb_array"))
for action in moves:
self.step(action)
video.append_data(self.render(mode="rgb_array"))
def _action_to_args(self, action):
return self.actions[action]
def readable_moves(self):
return [self._action_to_args(action) for action in self.transitions]
@property
def state(self):
return self.cube.stickers.flatten()
@property
def solved(self):
return np.array_equal(self.cube.stickers, self.solved_cube_stickers)
@property
def max_steps(self):
return self.max_steps_value
@max_steps.setter
def max_steps(self, value):
self.max_steps_value = value
if __name__ == "__main__":
env = CubeEnv(4, 0)
for i in range(24):
state, reward, done, info = env.step(env.action_space.sample())
print(env.solved)
print(state, reward, done, info)
print(env.readable_moves())
env.render_episodes()
# env.reset()
# print(env.solved)
| 2.5625 | 3 |
python/DrawImage.py | Joiner12/TimeVisual | 0 | 12762830 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 27 10:35:53 2021
@author: Peace4Lv
"""
from pyecharts.components import Image
from pyecharts.options import ComponentTitleOpts
from os import path
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime, timedelta
plt.rcParams['font.sans-serif'] = ['KaiTi']
plt.rcParams['axes.unicode_minus'] = False
def DrawImage(imgUrl="../html/pic/horizontalLine.png", **kw):
image = Image()
# check file exists
if not path.isfile(imgUrl):
imgUrl = r"https://gitee.com/RiskyJR/pic-bed/raw/master/comm-timeline-graphic-1024x380.png"
image.add(
src=imgUrl,
# image align center should modify outside
style_opts={
"style": "margin-top: 20px;text-align: center;width:1800px;height:900px;"},
)
image.set_global_opts(
title_opts=ComponentTitleOpts(title="Time Line")
)
image.render("../html/imageTest.html")
print("horizontal line image finished...\n")
return image
def UpdateTimeLineImage(startTick_x=['2021-08-09 09:00:00', '2021-08-09 09:45:00',
'2021-08-09 11:11:00', '2021-08-09 14:30:00',
'2021-08-09 15:18:00',
'2021-08-09 16:40:00', '2021-08-09 17:19:00'],
eventName_x=['开会', '发票', 'visual-code', '舆情分析',
'AOA-Paper', 'AOA-Paper', 'visual-code'],
eventLast_x=[30, 78, 33, 47, 69, 39, 15], *k, **kw):
colors = ['#E5562D', '#E0A459', '#CFBE65', '#A8CF65', '#6FD67D', '#68D5AE'
'#6FD0DB', '#5294D0', '#595CD0', '#9E59D0', '#D05994']
# datetime-str→datetime→baseline→gap
# Create the base bar from 5am to 1am
startTick_t = [datetime.strptime(x, "%Y-%m-%d %H:%M:%S")
for x in startTick_x]
zeroTick_t = datetime.strptime(datetime.strftime(
startTick_t[1], "%Y-%m-%d")+" 05:00:00", "%Y-%m-%d %H:%M:%S")
endTick_t = zeroTick_t+timedelta(hours=19)
eventName = eventName_x
eventLast = eventLast_x
levels = np.array([-5, 5, -3, 3, -1, 1])
fig, ax = plt.subplots(figsize=(36, 36*0.5625),
facecolor='#D6D7C5', dpi=500)
baseGapMin = (endTick_t-zeroTick_t).total_seconds()/60
ax.set(facecolor="#D6D7C5")
ax.broken_barh(
[(0, baseGapMin)], (-1/2, 1), alpha=.5,
facecolors='#ace9e8', edgecolors='white', lw=4, capstyle='round')
ax.set_ylim(-8, 8)
# set as page background image no need title
# ax.set_title('Daily Time Line', fontsize=60, color='white')
for ii, (iname, itick, ieventLast) in enumerate(zip(eventName, startTick_t, eventLast)):
barhColor = colors[ii % 4]
level = levels[ii % 6]
vert = 'top' if level < 0 else 'bottom'
# tickTemp = datetime.strptime(itick, "%Y-%m-%d %H:%M:%S")
curPointX = (itick-zeroTick_t).total_seconds()/60
curPointX_M = curPointX + ieventLast/2
ax.scatter(curPointX_M, 0, s=100, facecolor='w',
edgecolor=barhColor, zorder=9999)
# a line up to the text
ax.plot((curPointX_M, curPointX_M), (0, level), c='white', alpha=.5)
# text
itickStr = datetime.strftime(itick, "%m-%d %H:%M")
itext = iname+"\n"+itickStr+"|"+str(ieventLast)
textInstance = ax.text(
curPointX_M, level, itext,
horizontalalignment='center', verticalalignment=vert, fontsize=20,
fontfamily='Microsoft YaHei')
textInstance.set_bbox(
dict(boxstyle="round", alpha=0.5, color='#C3EAE9'))
# broken_bar
ax.broken_barh([(curPointX, ieventLast)], (-1/2, 1),
facecolors=barhColor, edgecolors='white', lw=4)
# Remove components for a cleaner look
plt.setp((ax.get_yticklabels() + ax.get_yticklines() +
list(ax.spines.values())), visible=False)
plt.setp((ax.get_xticklabels() + ax.get_xticklines() +
list(ax.spines.values())), visible=False)
plt.xlabel(startTick_t[int(len(startTick_t)/2)].strftime("%Y-%m-%d")+' Time Line',
loc='left', fontsize=30, fontfamily='Microsoft YaHei', color='white')
plt.ylabel('Update:'+datetime.now().strftime("%Y-%m-%d"),
loc='bottom', fontsize=30, fontfamily='Microsoft YaHei', color='white')
if True:
imageFile = r'../html/pic/timeline.jpg'
plt.savefig(imageFile,dpi=400, bbox_inches='tight')
print('image generated', imageFile)
return imageFile
else:
plt.show()
if __name__ == "__main__":
UpdateTimeLineImage()
# DrawImage()
| 2.171875 | 2 |
about/migrations/0001_initial.py | Arlefreak/api.afk | 0 | 12762831 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-18 22:52
from __future__ import unicode_literals
import about.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False)),
('name', models.CharField(max_length=140)),
('slug', models.SlugField(editable=False)),
('text', models.TextField()),
('dateCreated', models.DateField(auto_now_add=True)),
('dateUpdated', models.DateField(auto_now=True)),
('notes', models.TextField(blank=True)),
],
options={
'ordering': ['order', 'name'],
},
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False)),
('publish', models.BooleanField(default=False)),
('name', models.CharField(max_length=140)),
('caption', models.CharField(blank=True, max_length=140)),
('image', models.ImageField(upload_to=about.models.imageLocation)),
('dateCreated', models.DateField(auto_now_add=True)),
('entry', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='about.Entry')),
],
options={
'ordering': ['order', 'dateCreated'],
},
),
]
| 1.742188 | 2 |
penchmark/_core.py | Ruzzz/penchmark | 0 | 12762832 | import re
import statistics
import sys
from collections import defaultdict
from sys import getsizeof
from timeit import default_timer
from types import BuiltinFunctionType, FunctionType, MethodType
from typing import Any, Dict, Iterable, Optional, Tuple, Union
from penchmark._defs import (
AnyCallee,
AnyInData,
ByDataReport,
CallableAny,
Report,
ReportItem,
Summary,
SummaryItem,
)
class Estimator:
def __call__(self,
callee: CallableAny,
data: Any,
count_of_call: int,
expected: Any = None) -> float:
with Estimator.Elapsed() as elapsed:
for _ in range(count_of_call):
ret = callee(data)
if expected is not None:
assert ret == expected
return elapsed()
class Elapsed:
__slots__ = '_start', 'dx'
FLOAT_FMT = '.3f'
def __init__(self):
self._start = 0
self.dx = 0
def __enter__(self):
self._start = default_timer()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.dx = default_timer() - self._start
def __call__(self, fmt=None) -> Union[float, str]:
return self.dx if fmt is None else format(self.dx, fmt)
class ByDataSummary:
def __init__(self):
self.by_data_ratios = defaultdict(list)
self._with_errors = set()
def __call__(self, by_data_report: ByDataReport):
for x in by_data_report:
if x.callee_name not in self._with_errors:
if x.valid:
self.by_data_ratios[x.callee_name].append(x.ratio)
else:
self._with_errors.add(x.callee_name)
if x.callee_name in self.by_data_ratios:
del self.by_data_ratios[x.callee_name]
def calc_summary(self) -> Summary:
ret = []
for callee_name, ratios in self.by_data_ratios.items():
ret.append(SummaryItem(
callee_name=callee_name,
mean=statistics.mean(ratios),
median=statistics.median(ratios)
))
ret.sort(key=lambda x: x.median)
return ret
class NameGenerator:
def __init__(self, module_as_prefix=True):
self.module_as_prefix = module_as_prefix
self._name_counters = defaultdict(int)
self._cache = {} # type: Dict[object, str]
def __call__(self, obj: CallableAny):
ret = self._cache.get(obj, None)
if ret is not None:
return ret
ret = self.scan_name(obj)
if not ret:
ret = 'callable'
if ret in self._name_counters:
count = self._name_counters[ret] + 1
ret = ret + '-' + str(count)
self._name_counters[ret] = count
else:
self._name_counters[ret] = 1
self._cache[obj] = ret
return ret
_REGEXS = [
re.compile('<bound method (.+) of <.*>>'),
re.compile('<function (.+) at .*'),
re.compile('<built-in function (.+)>'),
]
@classmethod
def scan_name(cls, x: object):
ret = None
if isinstance(x, (BuiltinFunctionType, FunctionType, MethodType)):
ret = str(x)
for regex in cls._REGEXS:
m = regex.match(ret)
if m:
ret = m[1]
break
if '<lambda>' in ret:
ret = 'lambda'
if '<locals>.' in ret:
ret = ret[ret.find('<locals>.') + 9:]
elif hasattr(x, '__name__'):
ret = x.__name__ # type: ignore
if not ret:
# special cases
# - functools.partial
s = repr(x)
if s.startswith('functools'):
ret = s[:s.find('(')]
if not ret and hasattr(x, '__class__'):
ret = x.__class__.__name__ # type: ignore
if ret and hasattr(x, '__module__') and x.__module__ and x.__module__ != '__main__':
ret = x.__module__ + '.' + ret
return ret
def benchmark(callees: Iterable[AnyCallee],
dataset: Iterable[AnyInData],
*,
count_factor=1.0,
estimator=None,
summary=None,
name_generator=None,
verbose=True) -> Tuple[Report, Optional[Union[Summary, Any]]]:
"""
:param callees:
:param dataset:
:param count_factor:
:param estimator: Default is Estimator()
:param summary: None, False or summary object, default is ByDataSummary()
:param name_generator:
:param verbose:
:return:
"""
# pylint: disable=too-many-branches, too-many-arguments, too-many-locals
if not estimator:
estimator = Estimator()
if summary is None:
summary = ByDataSummary()
if not name_generator:
name_generator = NameGenerator()
ret = {}
for data_name, data, count_of_call, *data_expected in dataset:
expected = data_expected[0] if data_expected else None
count_of_call = round(count_of_call * count_factor)
if count_of_call <= 0:
continue
if verbose:
print(data_name, 'count of call:', count_of_call, 'size of data:', getsizeof(data))
group = []
for callee_data in callees:
if not callable(callee_data):
callee_name, callee = callee_data
else:
callee_name, callee = name_generator(callee_data), callee_data
if verbose:
print(' -', callee_name)
try:
elapsed = estimator(callee, data, count_of_call, expected)
ri = ReportItem(callee_name=callee_name, elapsed=elapsed)
except Exception: # pylint: disable=broad-except
ri = ReportItem(callee_name=callee_name)
group.append(ri)
group.sort(key=lambda x: x.elapsed if x.valid else sys.maxsize)
first = group[0]
if first.valid:
for item in group:
if item == first:
item.ratio = 1.0
elif item.valid:
item.ratio = item.elapsed / first.elapsed
if summary:
summary(group)
ret[data_name] = group
if verbose:
print()
return ret, summary.calc_summary() if summary else None
| 2.375 | 2 |
cirq-core/cirq/testing/repr_pretty_tester_test.py | Nexuscompute/Cirq | 0 | 12762833 | # Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cirq.testing
def test_fake_printer():
p = cirq.testing.FakePrinter()
assert p.text_pretty == ""
p.text("stuff")
assert p.text_pretty == "stuff"
p.text(" more")
assert p.text_pretty == "stuff more"
def test_assert_repr_pretty():
class TestClass:
def _repr_pretty_(self, p, cycle):
p.text("TestClass" if cycle else "I'm so pretty")
cirq.testing.assert_repr_pretty(TestClass(), "I'm so pretty")
cirq.testing.assert_repr_pretty(TestClass(), "TestClass", cycle=True)
class TestClassMultipleTexts:
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("TestClass")
else:
p.text("I'm so pretty")
p.text(" I am")
cirq.testing.assert_repr_pretty(TestClassMultipleTexts(), "I'm so pretty I am")
cirq.testing.assert_repr_pretty(TestClassMultipleTexts(), "TestClass", cycle=True)
def test_assert_repr_pretty_contains():
class TestClass:
def _repr_pretty_(self, p, cycle):
p.text("TestClass" if cycle else "I'm so pretty")
cirq.testing.assert_repr_pretty_contains(TestClass(), "pretty")
cirq.testing.assert_repr_pretty_contains(TestClass(), "Test", cycle=True)
class TestClassMultipleTexts:
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("TestClass")
else:
p.text("I'm so pretty")
p.text(" I am")
cirq.testing.assert_repr_pretty_contains(TestClassMultipleTexts(), "I am")
cirq.testing.assert_repr_pretty_contains(TestClassMultipleTexts(), "Class", cycle=True)
| 2.375 | 2 |
jd_cmd.py | cdle/PagerMaid_Plugins | 0 | 12762834 | """ PagerMaid module to handle jd command. """
from pagermaid import version
from pagermaid.listener import listener
from pagermaid.utils import lang, alias_command, obtain_message, client
@listener(is_plugin=False, outgoing=True, command=alias_command("jd_cmd"),
description="解析 JD 口令",
parameters="<JD 口令>")
async def jd_cmd(context):
try:
text = await obtain_message(context)
except ValueError:
return await context.edit("[jd_cmd] " + lang("msg_ValueError"))
try:
data = (await client.post("https://api.jds.codes/jCommand", json={"code": text})).json()
except:
return await context.edit("[jd_cmd] 网络错误!")
if data["code"] != 200:
return await context.edit("[jd_cmd] 未找到 JD 口令!")
try:
data = data["data"]
await context.edit(f"【jd_cmd】 [【{data['title']}】 - {data['userName']}]({data['jumpUrl']})")
except KeyError:
return await context.edit("[jd_cmd] 数据错误!")
| 2.234375 | 2 |
examples/benchmarks/echoserver.py | brmmm3/fastthreadpool | 31 | 12762835 | import sys
import argparse
import asyncio
import gc
import os
import fastthreadpool
from socket import socket, AF_UNIX, AF_INET, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR, IPPROTO_TCP, TCP_NODELAY
PRINT = 0
def pool_echo_server(address, unix, threads, size):
if unix:
sock = socket(AF_UNIX, SOCK_STREAM)
else:
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind(address)
sock.listen(threads)
if PRINT:
print('Server listening at', address)
with sock:
while True:
client, addr = sock.accept()
if PRINT:
print('Connection from', addr)
pool.submit(pool_echo_client, client, size)
def pool_echo_client(client, size):
try:
client.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
except (OSError, NameError):
pass
b = bytearray(size)
bl = [b]
with client:
try:
while True:
client.recvmsg_into(bl)
client.sendall(b)
except:
pass
if PRINT:
print('Connection closed')
async def echo_server(loop, address, unix):
if unix:
sock = socket(AF_UNIX, SOCK_STREAM)
else:
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind(address)
sock.listen(16)
sock.setblocking(False)
if PRINT:
print('Server listening at', address)
with sock:
while True:
client, addr = await loop.sock_accept(sock)
if PRINT:
print('Connection from', addr)
loop.create_task(echo_client(loop, client))
async def echo_client(loop, client):
try:
client.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
except (OSError, NameError):
pass
with client:
while True:
data = await loop.sock_recv(client, 4096)
if not data:
break
await loop.sock_sendall(client, data)
if PRINT:
print('Connection closed')
async def echo_client_streams(reader, writer):
sock = writer.get_extra_info('socket')
try:
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
except (OSError, NameError):
pass
if PRINT:
print('Connection from', sock.getpeername())
while True:
data = await reader.read(4096)
if not data:
break
writer.write(data)
if PRINT:
print('Connection closed')
writer.close()
class EchoProtocol(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
def connection_lost(self, exc):
self.transport = None
def data_received(self, data):
self.transport.write(data)
async def print_debug(loop):
while True:
print(chr(27) + "[2J") # clear screen
loop.print_debug_info()
await asyncio.sleep(0.5, loop=loop)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--uvloop', default=False, action='store_true',
help='use uvloop instead of asyncio')
parser.add_argument('--streams', default=False, action='store_true',
help='use asyncio/uvloop streams')
parser.add_argument('--proto', default=False, action='store_true',
help='use asyncio/uvloop protocol')
parser.add_argument('--pool', default=False, action='store_true',
help='use thread pool instead of asyncio/uvloop')
parser.add_argument('--threads', default=os.cpu_count(), type=int,
help='number of parallel threads in case of thread pool')
parser.add_argument('--bufsize', default=4096, type=int)
parser.add_argument('--addr', default='127.0.0.1:25000', type=str)
parser.add_argument('--print', default=False, action='store_true')
args = parser.parse_args()
unix = False
if args.addr.startswith('file:'):
unix = True
addr = args.addr[5:]
if os.path.exists(addr):
os.remove(addr)
else:
addr = args.addr.split(':')
addr[1] = int(addr[1])
addr = tuple(addr)
print('serving on: {}'.format(addr))
if args.pool:
print(f"creating thread pool with {args.threads} threads")
print(f"buffer size is {args.bufsize} bytes")
pool = fastthreadpool.Pool(args.threads)
pool.submit(pool_echo_server, addr, unix, args.threads, args.bufsize)
pool.join()
sys.exit(0)
if args.uvloop:
import uvloop
loop = uvloop.new_event_loop()
print('using uvloop')
else:
loop = asyncio.new_event_loop()
print('using asyncio loop')
asyncio.set_event_loop(loop)
loop.set_debug(False)
if args.print:
PRINT = 1
if hasattr(loop, 'print_debug_info'):
loop.create_task(print_debug(loop))
PRINT = 0
if args.streams:
if args.proto:
print('cannot use --stream and --proto simultaneously')
exit(1)
print('using streams')
if unix:
coro = asyncio.start_unix_server(echo_client_streams,
addr, loop=loop)
else:
coro = asyncio.start_server(echo_client_streams,
*addr, loop=loop)
srv = loop.run_until_complete(coro)
elif args.proto:
if args.streams:
print('cannot use --stream and --proto simultaneously')
exit(1)
print('using simple protocol')
if unix:
coro = loop.create_unix_server(EchoProtocol, addr)
else:
coro = loop.create_server(EchoProtocol, *addr)
srv = loop.run_until_complete(coro)
else:
print('using sock_recv/sock_sendall')
loop.create_task(echo_server(loop, addr, unix))
try:
loop.run_forever()
finally:
if hasattr(loop, 'print_debug_info'):
gc.collect()
print(chr(27) + "[2J")
loop.print_debug_info()
loop.close()
| 2.5625 | 3 |
rllib/evaluation/tests/test_postprocessing.py | mgelbart/ray | 22 | 12762836 | import numpy as np
import unittest
import ray
from ray.rllib.evaluation.postprocessing import adjust_nstep, discount_cumsum
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.test_utils import check
class TestPostprocessing(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_n_step_3(self):
"""Tests, whether n-step adjustments of trajectories work."""
# n-step = 3
gamma = 0.9
obs = [1, 2, 3, 4, 5, 6, 7]
actions = ["ac1", "ac2", "ac1", "ac1", "ac1", "ac2", "ac1"]
rewards = [10.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0]
dones = [0, 0, 0, 0, 0, 0, 1]
next_obs = [2, 3, 4, 5, 6, 7, 8]
batch = SampleBatch(
{
SampleBatch.OBS: obs,
SampleBatch.ACTIONS: actions,
SampleBatch.REWARDS: rewards,
SampleBatch.DONES: dones,
SampleBatch.NEXT_OBS: next_obs,
}
)
adjust_nstep(3, gamma, batch)
check(batch[SampleBatch.OBS], [1, 2, 3, 4, 5, 6, 7])
check(
batch[SampleBatch.ACTIONS],
["ac1", "ac2", "ac1", "ac1", "ac1", "ac2", "ac1"],
)
check(batch[SampleBatch.NEXT_OBS], [4, 5, 6, 7, 8, 8, 8])
check(batch[SampleBatch.DONES], [0, 0, 0, 0, 1, 1, 1])
check(
batch[SampleBatch.REWARDS], [91.0, 171.0, 271.0, 271.0, 271.0, 190.0, 100.0]
)
def test_n_step_4(self):
"""Tests, whether n-step adjustments of trajectories work."""
# n-step = 4
gamma = 0.99
obs = np.arange(0, 7)
actions = np.random.randint(-1, 3, size=(7,))
check_actions = actions.copy()
rewards = [10.0, 0.0, 100.0, 50.0, 60.0, 10.0, 100.0]
dones = [False, False, False, False, False, False, True]
next_obs = np.arange(1, 8)
batch = SampleBatch(
{
SampleBatch.OBS: obs,
SampleBatch.ACTIONS: actions,
SampleBatch.REWARDS: rewards,
SampleBatch.DONES: dones,
SampleBatch.NEXT_OBS: next_obs,
}
)
adjust_nstep(4, gamma, batch)
check(batch[SampleBatch.OBS], [0, 1, 2, 3, 4, 5, 6])
check(batch[SampleBatch.ACTIONS], check_actions)
check(batch[SampleBatch.NEXT_OBS], [4, 5, 6, 7, 7, 7, 7])
check(batch[SampleBatch.DONES], [False, False, False, True, True, True, True])
check(
batch[SampleBatch.REWARDS],
[
discount_cumsum(np.array(rewards[0:4]), gamma)[0],
discount_cumsum(np.array(rewards[1:5]), gamma)[0],
discount_cumsum(np.array(rewards[2:6]), gamma)[0],
discount_cumsum(np.array(rewards[3:7]), gamma)[0],
discount_cumsum(np.array(rewards[4:]), gamma)[0],
discount_cumsum(np.array(rewards[5:]), gamma)[0],
discount_cumsum(np.array(rewards[6:]), gamma)[0],
],
)
def test_n_step_malformed_dones(self):
# Test bad input (trajectory has dones in middle).
# Re-use same batch, but change dones.
gamma = 1.0
obs = np.arange(0, 7)
actions = np.random.randint(-1, 3, size=(7,))
rewards = [10.0, 0.0, 100.0, 50.0, 60.0, 10.0, 100.0]
next_obs = np.arange(1, 8)
batch = SampleBatch(
{
SampleBatch.OBS: obs,
SampleBatch.ACTIONS: actions,
SampleBatch.REWARDS: rewards,
SampleBatch.DONES: [False, False, True, False, False, False, True],
SampleBatch.NEXT_OBS: next_obs,
}
)
self.assertRaisesRegex(
AssertionError,
"Unexpected done in middle",
lambda: adjust_nstep(5, gamma, batch),
)
def test_n_step_very_short_trajectory(self):
"""Tests, whether n-step also works for very small trajectories."""
gamma = 1.0
obs = np.arange(0, 2)
actions = np.random.randint(-100, 300, size=(2,))
check_actions = actions.copy()
rewards = [10.0, 100.0]
next_obs = np.arange(1, 3)
batch = SampleBatch(
{
SampleBatch.OBS: obs,
SampleBatch.ACTIONS: actions,
SampleBatch.REWARDS: rewards,
SampleBatch.DONES: [False, False],
SampleBatch.NEXT_OBS: next_obs,
}
)
adjust_nstep(3, gamma, batch)
check(batch[SampleBatch.OBS], [0, 1])
check(batch[SampleBatch.ACTIONS], check_actions)
check(batch[SampleBatch.DONES], [False, False])
check(batch[SampleBatch.REWARDS], [10.0 + gamma * 100.0, 100.0])
check(batch[SampleBatch.NEXT_OBS], [2, 2])
def test_n_step_from_same_obs_source_array(self):
"""Tests, whether n-step also works on a shared obs/new-obs array."""
gamma = 0.99
# The underlying observation data. Both obs and next_obs will
# be references into that same np.array.
underlying_obs = np.arange(0, 8)
obs = underlying_obs[:7]
next_obs = underlying_obs[1:]
actions = np.random.randint(-1, 3, size=(7,))
check_actions = actions.copy()
rewards = [10.0, 0.0, 100.0, 50.0, 60.0, 10.0, 100.0]
dones = [False, False, False, False, False, False, True]
batch = SampleBatch(
{
SampleBatch.OBS: obs,
SampleBatch.ACTIONS: actions,
SampleBatch.REWARDS: rewards,
SampleBatch.DONES: dones,
SampleBatch.NEXT_OBS: next_obs,
}
)
adjust_nstep(4, gamma, batch)
check(batch[SampleBatch.OBS], [0, 1, 2, 3, 4, 5, 6])
check(batch[SampleBatch.ACTIONS], check_actions)
check(batch[SampleBatch.NEXT_OBS], [4, 5, 6, 7, 7, 7, 7])
check(batch[SampleBatch.DONES], [False, False, False, True, True, True, True])
check(
batch[SampleBatch.REWARDS],
[
discount_cumsum(np.array(rewards[0:4]), gamma)[0],
discount_cumsum(np.array(rewards[1:5]), gamma)[0],
discount_cumsum(np.array(rewards[2:6]), gamma)[0],
discount_cumsum(np.array(rewards[3:7]), gamma)[0],
discount_cumsum(np.array(rewards[4:]), gamma)[0],
discount_cumsum(np.array(rewards[5:]), gamma)[0],
discount_cumsum(np.array(rewards[6:]), gamma)[0],
],
)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| 2.296875 | 2 |
lib/test_master.py | serl/topoblocktest | 0 | 12762837 | <gh_stars>0
import re
import os
import os.path
import pathlib
import json
import hashlib
import random
import itertools
from . import topologies
from . import tests
from . import analyze
from lib.bash import CommandBlock
from pydblite import Base
import warnings
warnings.formatwarning = lambda message, category, *a: '{}: {}\n'.format(category.__name__, message)
results_dir = 'results/'
def generate_combinations(constants, variables, skip_fn=lambda x: False):
n = 0
variables_keys = tuple(variables.keys())
for combi in itertools.product(*variables.values()):
settings = constants.copy()
for i, value in enumerate(combi):
settings[variables_keys[i]] = value
if len(settings) is 0:
continue
if not skip_fn(settings):
generate(**settings)
n += 1
print('Generated {} cases. Now go and run run_test.py in order to run them!'.format(n))
def generate(**settings):
settings_json = json.dumps(settings, sort_keys=True)
settings_hash = hashlib.sha1(settings_json.encode('utf-8')).hexdigest()
m, ns1, ns2 = getattr(topologies, settings['topology'])(**settings)
settings['ns1'] = ns1
settings['ns2'] = ns2
settings['result_file'] = results_dir
# if settings['collection'] is not None:
# settings['result_file'] += settings['collection'] + '/'
settings['result_file'] += settings_hash
script = tests.begin()
script += m.get_script()
script += getattr(tests, settings['iperf_name'])(**settings)
with open(settings['result_file'] + '.config', 'w') as f:
f.write(settings_json)
with open(settings['result_file'] + '.sh', 'w') as f:
f.write(str(script))
os.chmod(settings['result_file'] + '.sh', 0o777)
return settings_hash, script
def run_all(target_repetitions=0, dry_run=False, debug=False, recursion_limit=10):
if not hasattr(run_all, "scripts"):
run_all.scripts = {} # hash => CommandBlock instance
to_run = [] # each script will appear N times, so to reach the target_repetitions
p = pathlib.Path(results_dir)
max_count = 0
forecast_time = 0
for script_file in p.glob('*.sh'):
settings_hash = script_file.stem
count = 0
try:
with script_file.parent.joinpath(settings_hash + '.count').open() as count_fh:
count = int(count_fh.read())
except (FileNotFoundError, ValueError):
pass
max_count = max(max_count, count)
needed_repetitions = target_repetitions - count
if needed_repetitions > 0:
with script_file.open() as script_fh:
run_all.scripts[settings_hash] = CommandBlock() + script_fh.read()
to_run.extend([settings_hash] * needed_repetitions)
forecast_time += run_all.scripts[settings_hash].execution_time() * needed_repetitions
if target_repetitions == 0 and max_count > 0:
return run_all(max_count, dry_run, debug, recursion_limit)
if not dry_run and len(to_run) > 0:
random.shuffle(to_run) # the order becomes unpredictable: I think it's a good idea
for current, settings_hash in enumerate(to_run, start=1):
script = run_all.scripts[settings_hash]
print("Running {} ({}/{})...".format(settings_hash, current, len(to_run)))
script.run(add_bash=settings_hash if debug else False)
if recursion_limit <= 0:
warnings.warn("Hit recursion limit. Some tests didn't run correctly!")
else:
run_all(target_repetitions, False, debug, recursion_limit - 1)
return len(to_run), forecast_time, target_repetitions
def get_results_db(clear_cache=False, skip=[]):
cache_file = 'cache/results.pdl'
db = Base(cache_file)
if clear_cache or not db.exists() or os.path.getmtime(cache_file) < os.path.getmtime(results_dir):
warnings.warn('Rebuilding results cache...')
columns = set()
rows = []
p = pathlib.Path(results_dir)
for config_file in p.glob('*.config'):
with config_file.open() as config_fh:
settings_hash = config_file.stem
row = json.loads(config_fh.read())
if settings_hash in skip:
continue
row['hash'] = settings_hash
tests_count = analyze.count(config_file.parent, settings_hash)
row['iostat_cpu'], len_cpu_values = analyze.iostat_cpu(config_file.parent, settings_hash)
row['iperf_result'], len_iperf_values = getattr(analyze, row['iperf_name'])(config_file.parent, settings_hash, row)
if tests_count != len_cpu_values or tests_count != len_iperf_values:
raise analyze.AnalysisException('For test {}, mismatch in cardinality of tests between count ({}), iostat ({}) and iperf ({})'.format(settings_hash, tests_count, len_cpu_values, len_iperf_values), settings_hash)
if len_iperf_values > 0:
min_fairness = row['iperf_result']['fairness'][0] - row['iperf_result']['fairness'][1]
if min_fairness < (1 - 1 / (2 * row['parallelism'])):
warnings.warn('For test {}, fairness has a critical value: {}.'.format(settings_hash, row['iperf_result']['fairness']), RuntimeWarning)
columns = columns | set(row.keys())
rows.append(row)
db.create(*columns, mode='override')
for r in rows:
db.insert(**r)
db.commit()
warnings.warn('Results cache built.')
else:
warnings.warn('Reusing results cache.')
db.open()
return db
| 2.140625 | 2 |
bip_utils/algorand/mnemonic/algorand_mnemonic_utils.py | 3rdIteration/bip_utils | 0 | 12762838 | # Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Module for Algorand mnemonic utility classes."""
# Imports
from typing import List, Optional, Union
from bip_utils.algorand.mnemonic.algorand_mnemonic import AlgorandMnemonicConst
from bip_utils.utils.misc import CryptoUtils
class AlgorandMnemonicUtils:
"""Class container for Algorand mnemonic utility functions."""
@staticmethod
def ComputeChecksum(data_bytes: bytes) -> bytes:
"""
Compute checksum.
Args:
data_bytes (bytes): Data bytes
Returns:
bytes: Computed checksum
"""
return CryptoUtils.Sha512_256(data_bytes)[:AlgorandMnemonicConst.CHECKSUM_BYTE_LEN]
@staticmethod
def ComputeChecksumWordIndex(data_bytes: bytes) -> int:
"""
Compute checksum word index.
Args:
data_bytes (bytes): Data bytes
Returns:
str: Computed checksum word index
"""
# Compute checksum and convert it to 11-bit
chksum = AlgorandMnemonicUtils.ComputeChecksum(data_bytes)
chksum_11bit = AlgorandMnemonicUtils.ConvertBits(chksum, 8, 11)
# Cannot be None by converting bytes from 8-bit to 11-bit
assert chksum_11bit is not None
return chksum_11bit[0]
@staticmethod
def ConvertBits(data: Union[bytes, List[int]],
from_bits: int,
to_bits: int) -> Optional[List[int]]:
"""
Perform bit conversion.
The function takes the input data (list of integers or byte sequence) and convert every value from
the specified number of bits to the specified one.
It returns a list of integer where every number is less than 2^to_bits.
Args:
data (list[int] or bytes): Data to be converted
from_bits (int) : Number of bits to start from
to_bits (int) : Number of bits to end with
Returns:
list[int]: List of converted values, None in case of errors
"""
max_out_val = (1 << to_bits) - 1
acc = 0
bits = 0
ret = []
for value in data:
# Value shall not be less than zero or greater than 2^from_bits
if value < 0 or (value >> from_bits):
return None
# Continue accumulating until greater than to_bits
acc |= value << bits
bits += from_bits
while bits >= to_bits:
ret.append(acc & max_out_val)
acc = acc >> to_bits
bits -= to_bits
if bits != 0:
ret.append(acc & max_out_val)
return ret
| 1.890625 | 2 |
MyApp/handle.py | kemalsanli/YMGK2-DJANGO-API | 0 | 12762839 | <filename>MyApp/handle.py
from . import crypto
from . import xor
from . import kaydet
from . import hash
import os
import cv2
def ymgk2xor(path,ServerHash):
#Seçilen görseli okuduk.
gorsel = cv2.imread(path)
#Seçilen Dosyanın Hash'ini alıyor.
hashFile = hash.hashIt(path)
#Klasör kontrolü yoksa oluşturuluyor.
if os.path.exists('key') == False:
os.mkdir('key')
if os.path.exists('temp') == False:
os.mkdir('temp')
#Gelen hash değeri key klasörürün altında var ise koşulumuzu çalıştırdık.
if os.path.exists(('key/{}.png'.format(hashFile))):
#Keyi okuduk xor yaptık ve kaydettik
key = cv2.imread(('key/{}.png'.format(hashFile)))
sifresiz = xor.xor(gorsel, key)
kaydet.kaydet(sifresiz,'temp/sonuc.png')
#İsimiz bitince key'i sildik.
os.remove(('key/{}.png'.format(hashFile)))
#Key olmadığı durumlarda ise..
else:
#Hash'i olasılıkları artırmak adına biraz uzattık.
populatedHash=hash.populateHash(ServerHash)
#Hexten Uint8'e çevirdik
gelendeger=xor.hexToUint8(populatedHash)
#Anahtar oluşturmak için ip3'teki gerekli eylemleri tamamladık.
keySource = crypto.randomsayi(gelendeger)
#Gelen değeri aldık anahtar oluşturduk, anahtar oluştururken boyutlarını almak için orijinal görseli de dahil ettik.
anahtar = xor.anahtarOlustur(gorsel, keySource)
#Xorladık ve dönen değeri pillow ile diziden .png uzantılı bir dosyaya çevirip kaydettik.
sifrelenmis = xor.xor(gorsel, anahtar)
kaydet.kaydet(sifrelenmis,'temp/sonuc.png')
sifreliHash = hash.hashIt('temp/sonuc.png')
kaydet.kaydet(anahtar,('key/{}.png'.format(sifreliHash)))
| 2.515625 | 3 |
MVC/snake_curses_game.py | eddieir/design_patterns | 0 | 12762840 | from snake import Snake
from snake_curses_view import SnakeCursesView
from game import Game
import time
import curses
class SnakeCursesGame:
def __init__(self):
self.snake = Snake()
self.game = Game(80,24)
self.game.add_snake(self.snake)
self.game.start()
self.w = curses.initscr()
self.w.nodelay(True)
self.w.keypad(True)
self.curs_set(0)
self.view = SnakeCursesView(self.w,self.game)
self.view.add_action_listener()
def turn_action(self,direction):
self.snake.turn(direction)
def run(self):
while True:
self.view.draw()
self.w.refresh()
time.sleep(0.1)
self.view.undraw()
ch = self.w.getch()
if ch in [curses.KEY_UP, curses.KEY_DOWN,
curses.KEY_LEFT, curses.KEY_RIGHT]:
self.view.get_key()
elif ch !=-1:
break
self.game.tick()
def main():
try:
game = SnakeCursesGame()
game.run()
finally:
try:
curses.endwin()
except:
pass
if __name__ == '__main__':
main()
| 2.828125 | 3 |
parsing/views.py | playyard/infomate.club | 327 | 12762841 | <reponame>playyard/infomate.club<gh_stars>100-1000
from django.contrib.syndication.views import Feed
from parsing.telegram.parser import parse_channel
class TelegramChannelFeed(Feed):
FEED_ITEMS = 30
def get_object(self, request, channel_name):
limit = int(request.GET.get("size") or self.FEED_ITEMS)
only = str(request.GET.get("only") or "")
return parse_channel(channel_name, only_text=only == "text", limit=limit)
def title(self, obj):
return obj.name
def items(self, obj):
return obj.messages
def link(self, obj):
return obj.url
def item_title(self, item):
return item.text
def item_description(self, item):
result = ""
if item.photo:
result += f"<img src=\"{item.photo}\"><br>"
if item.text:
result += str(item.text)
return result
def item_link(self, item):
return item.url
def item_pubdate(self, item):
return item.created_at
| 2.328125 | 2 |
model.py | ZhuXiyue/598cv | 0 | 12762842 | import torch.nn as nn
import torch
import numpy as np
import torch.nn.functional as F
from util import PointNetSetAbstractionOrg, PointNetSetAbstraction,PointNetFeaturePropogation,FlowEmbedding,PointNetSetUpConv
class FlowNet3D(nn.Module):
def __init__(self,args):
super(FlowNet3D,self).__init__()
"""
self.sa1 = PointNetSetAbstraction(npoint=1024, radius=0.5, nsample=16, in_channel=3, mlp=[32,32,64], group_all=False)
self.sa2 = PointNetSetAbstraction(npoint=256, radius=1.0, nsample=16, in_channel=64, mlp=[64, 64, 128], group_all=False)
self.sa3 = PointNetSetAbstraction(npoint=64, radius=2.0, nsample=8, in_channel=128, mlp=[128, 128, 256], group_all=False)
self.sa4 = PointNetSetAbstraction(npoint=16, radius=4.0, nsample=8, in_channel=256, mlp=[256,256,512], group_all=False)
"""
self.sa1 = PointNetSetAbstractionOrg(npoint=1024, radius=0.5, nsample=16, in_channel=3, mlp=[32, 64, 128], group_all=False)
self.sa2 = PointNetSetAbstraction(npoint=256, radius=1.0, nsample=16, in_channel=128, mlp=[128, 128, 128], group_all=False)
self.sa3 = PointNetSetAbstraction(npoint=64, radius=2.0, nsample=8, in_channel=128, mlp=[128, 128, 128], group_all=False)
self.sa4 = PointNetSetAbstraction(npoint=16, radius=4.0, nsample=8, in_channel=128+3, mlp=[128, 128, 128], group_all=False)
self.fe_layer = FlowEmbedding(radius=10.0, nsample=64, in_channel = 128+3, mlp=[128, 128, 128], pooling='max', corr_func='concat')
self.su1 = PointNetSetUpConv(nsample=8, radius=2.4, f1_channel = 128+3, f2_channel = 128+6, mlp=[], mlp2=[256, 256])
self.su2 = PointNetSetUpConv(nsample=8, radius=1.2, f1_channel = 128+128+3+3, f2_channel = 256, mlp=[128, 128, 256], mlp2=[256])
self.su3 = PointNetSetUpConv(nsample=8, radius=0.6, f1_channel = 128+3, f2_channel = 256, mlp=[128, 128, 256], mlp2=[256])
self.fp = PointNetFeaturePropogation(in_channel = 256+3, mlp = [256, 256])
self.conv1 = nn.Conv1d(256, 128, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(128)
self.conv2=nn.Conv1d(128, 3, kernel_size=1, bias=True)
def forward(self, pc1, pc2, feature1, feature2):
l1_pc1, l1_feature1 = self.sa1(pc1, feature1) # l1_pc1 128, l1_f1 128
l2_pc1, l2_feature1 = self.sa2(l1_pc1, l1_feature1) # l2_pc1 128+3, l2_f1 128+3
l1_pc2, l1_feature2 = self.sa1(pc2, feature2) # l2_pc1 128, l1_f2 128
l2_pc2, l2_feature2 = self.sa2(l1_pc2, l1_feature2) # l2_pc2 128+3, l2_f2 128+3
_, l2_feature1_new = self.fe_layer(l2_pc1, l2_pc2, l2_feature1, l2_feature2) # l2_f1n 128
l3_pc1, l3_feature1 = self.sa3(l2_pc1, l2_feature1_new) # l3_pc1 l3_f1 128+6
l4_pc1, l4_feature1 = self.sa4(l3_pc1, l3_feature1) # l4_pc1 l4_f1 128+9
l3_fnew1 = self.su1(l3_pc1, l4_pc1, l3_feature1, l4_feature1) # l3_fn1 = 256
l2_fnew1 = self.su2(l2_pc1, l3_pc1, torch.cat([l2_feature1, l2_feature1_new], dim=1), l3_fnew1)
l1_fnew1 = self.su3(l1_pc1, l2_pc1, l1_feature1, l2_fnew1)
l0_fnew1 = self.fp(pc1, l1_pc1, feature1, l1_fnew1)
x = F.relu(self.bn1(self.conv1(l0_fnew1)))
sf = self.conv2(x)
return sf
if __name__ == '__main__':
import os
import torch
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
input = torch.randn((8,3,2048))
label = torch.randn(8,16)
model = FlowNet3D()
output = model(input,input)
print(output.size())
| 2.25 | 2 |
venv/lib/python3.8/site-packages/arch/__init__.py | YileC928/finm-portfolio-2021 | 0 | 12762843 | from arch._version import get_versions
from arch.univariate.mean import arch_model
from arch.utility import test
__version__ = get_versions()["version"]
del get_versions
def doc() -> None:
import webbrowser
webbrowser.open("http://arch.readthedocs.org/en/latest/")
__all__ = ["arch_model", "__version__", "doc", "test"]
| 1.679688 | 2 |
crafting/crafting.py | Kehvarl/pcg | 1 | 12762844 | """
Crafting.py
Inspired by: Craft3.pdf by <NAME>
http://web.archive.org/web/20080211222642/http://www.cs.umd.edu/~jra/craft3.pdf
Intention: Implement a crafting system which can act as a suitable base for a procedurally-generated
society.
"""
# Crafting is the act of producing an object from resources by use of a recipe
# A Crafting Recipe must define Location, Participants, Resources, Tools, Time, and Results
# Example Craft structure from article
# Craft
# {
# int time;
# string resource_names;
# Craft_need *resource_list; // List of resources and quantities
# int num_resources;
# string skill_name;
# int skill_level;
# int percent_failure;
# string item_created_amount; // Allow for random number of items made.
# string item_created_name;
# int item_created_id;
# };
# Craft_need
# {
# int type;
# int in_object;
# int amount_needed;
# int *acceptable_objects;
# int num_acceptable_objects;
# int damage_amount;
# }
#
# Sample Recipe: Wood (1 to 10 logs)
# Tree Here
# Axe Wield
# Wood_Log Produce [1,10]
# TODO: Define Crafting Primitives
class Craft:
def __init__(self):
self.time = 1
self.needs = {}
self.failure_percent = 10
self.create_amount = 10
self.create_id = 0
raise NotImplementedError
class CraftNeeded:
def __init__(self):
self.type = 0
self.in_object = 0
self.amount = 0
raise NotImplementedError
| 3.75 | 4 |
gdplib/stranded_gas/model.py | Yunshan-Liu/gdplib | 3 | 12762845 | from __future__ import division
import os
import pandas as pd
from pyomo.environ import (
ConcreteModel, Constraint, Integers, NonNegativeReals, Objective, Param,
RangeSet, Set, SolverFactory, Suffix, TransformationFactory, Var, exp, log,
sqrt, summation, value
)
from .util import alphanum_sorted
from pyomo.environ import TerminationCondition as tc
def build_model():
m = ConcreteModel()
m.BigM = Suffix(direction=Suffix.LOCAL)
m.periods_per_year = Param(initialize=4, doc="Quarters per year")
m.project_life = Param(initialize=15, doc="Years")
m.time = RangeSet(0, m.periods_per_year *
m.project_life - 1, doc="Time periods")
m.discount_rate = Param(initialize=0.08, doc="8%")
m.learning_rate = Param(initialize=0.1, doc="Fraction discount for doubling of quantity")
m.module_setup_time = Param(
initialize=1, doc="1 quarter for module transfer")
@m.Param(m.time)
def discount_factor(m, t):
return (1 + m.discount_rate / m.periods_per_year) ** (-t / m.periods_per_year)
xlsx_data = pd.read_excel(os.path.join(os.path.dirname(__file__), "data.xlsx"), sheet_name=None)
module_sheet = xlsx_data['modules'].set_index('Type')
m.module_types = Set(initialize=module_sheet.columns.tolist(),)
@m.Param(m.module_types)
def module_base_cost(m, mtype):
return float(module_sheet[mtype]['Capital Cost [MM$]'])
@m.Param(m.module_types, doc="Natural gas consumption per module of this type [MMSCF/d]")
def unit_gas_consumption(m, mtype):
return float(module_sheet[mtype]['Nat Gas [MMSCF/d]'])
@m.Param(m.module_types, doc="Gasoline production per module of this type [kBD]")
def gasoline_production(m, mtype):
return float(module_sheet[mtype]['Gasoline [kBD]'])
@m.Param(m.module_types, doc="Overall conversion of natural gas into gasoline per module of this type [kB/MMSCF]")
def module_conversion(m, mtype):
return float(module_sheet[mtype]['Conversion [kB/MMSCF]'])
site_sheet = xlsx_data['sites'].set_index('Potential site')
m.potential_sites = Set(initialize=site_sheet.index.tolist())
m.site_pairs = Set(
doc="Pairs of potential sites",
initialize=m.potential_sites * m.potential_sites,
filter=lambda _, x, y: not x == y)
@m.Param(m.potential_sites)
def site_x(m, site):
return float(site_sheet['x'][site])
@m.Param(m.potential_sites)
def site_y(m, site):
return float(site_sheet['y'][site])
well_sheet = xlsx_data['wells'].set_index('Well')
m.well_clusters = Set(initialize=well_sheet.index.tolist())
@m.Param(m.well_clusters)
def well_x(m, well):
return float(well_sheet['x'][well])
@m.Param(m.well_clusters)
def well_y(m, well):
return float(well_sheet['y'][well])
sched_sheet = xlsx_data['well-schedule']
decay_curve = [1] + [3.69 * exp(-1.31 * (t + 1) ** 0.292) for t in range(m.project_life * 12)]
well_profiles = {well: [0 for _ in decay_curve] for well in m.well_clusters}
for _, well_info in sched_sheet.iterrows():
start_time = int(well_info['Month'])
prod = [0] * start_time + decay_curve[:len(decay_curve) - start_time]
prod = [x * float(well_info['max prod [MMSCF/d]']) for x in prod]
current_profile = well_profiles[well_info['well-cluster']]
well_profiles[well_info['well-cluster']] = [val + prod[i] for i, val in enumerate(current_profile)]
@m.Param(m.well_clusters, m.time, doc="Supply of gas from well cluster [MMSCF/day]")
def gas_supply(m, well, t):
return sum(well_profiles[well][t * 3:t * 3 + 2]) / 3
mkt_sheet = xlsx_data['markets'].set_index('Market')
m.markets = Set(initialize=mkt_sheet.index.tolist())
@m.Param(m.markets)
def mkt_x(m, mkt):
return float(mkt_sheet['x'][mkt])
@m.Param(m.markets)
def mkt_y(m, mkt):
return float(mkt_sheet['y'][mkt])
@m.Param(m.markets, doc="Gasoline demand [kBD]")
def mkt_demand(m, mkt):
return float(mkt_sheet['demand [kBD]'][mkt])
m.sources = Set(initialize=m.well_clusters | m.potential_sites)
m.destinations = Set(initialize=m.potential_sites | m.markets)
@m.Param(m.sources, m.destinations, doc="Distance [mi]")
def distance(m, src, dest):
if src in m.well_clusters:
src_x = m.well_x[src]
src_y = m.well_y[src]
else:
src_x = m.site_x[src]
src_y = m.site_y[src]
if dest in m.markets:
dest_x = m.mkt_x[dest]
dest_y = m.mkt_y[dest]
else:
dest_x = m.site_x[dest]
dest_y = m.site_y[dest]
return sqrt((src_x - dest_x) ** 2 + (src_y - dest_y) ** 2)
m.num_modules = Var(
m.module_types, m.potential_sites, m.time,
doc="Number of active modules of each type at a site in a period",
domain=Integers, bounds=(0, 50), initialize=1)
m.modules_transferred = Var(
m.module_types, m.site_pairs, m.time,
doc="Number of module transfers initiated from one site to another in a period.",
domain=Integers, bounds=(0, 15), initialize=0)
m.modules_purchased = Var(
m.module_types, m.potential_sites, m.time,
doc="Number of modules of each type purchased for a site in a period",
domain=Integers, bounds=(0, 30), initialize=1)
m.pipeline_unit_cost = Param(doc="MM$/mile", initialize=2)
@m.Param(m.time, doc="Module transport cost per mile [M$/100 miles]")
def module_transport_distance_cost(m, t):
return 50 * m.discount_factor[t]
@m.Param(m.time, doc="Module transport cost per unit [MM$/module]")
def module_transport_unit_cost(m, t):
return 3 * m.discount_factor[t]
@m.Param(m.time, doc="Stranded gas price [$/MSCF]")
def nat_gas_price(m, t):
return 5 * m.discount_factor[t]
@m.Param(m.time, doc="Gasoline price [$/gal]")
def gasoline_price(m, t):
return 2.5 * m.discount_factor[t]
@m.Param(m.time, doc="Gasoline transport cost [$/gal/100 miles]")
def gasoline_tranport_cost(m, t):
return 0.045 * m.discount_factor[t]
m.gal_per_bbl = Param(initialize=42, doc="Gallons per barrel")
m.days_per_period = Param(initialize=90, doc="Days in a production period")
m.learning_factor = Var(
m.module_types,
doc="Fraction of cost due to economies of mass production",
domain=NonNegativeReals, bounds=(0, 1), initialize=1)
@m.Disjunct(m.module_types)
def mtype_exists(disj, mtype):
disj.learning_factor_calc = Constraint(
expr=m.learning_factor[mtype] == (1 - m.learning_rate) ** (
log(sum(m.modules_purchased[mtype, :, :])) / log(2)))
m.BigM[disj.learning_factor_calc] = 1
disj.require_module_purchases = Constraint(
expr=sum(m.modules_purchased[mtype, :, :]) >= 1)
@m.Disjunct(m.module_types)
def mtype_absent(disj, mtype):
disj.constant_learning_factor = Constraint(
expr=m.learning_factor[mtype] == 1)
@m.Disjunction(m.module_types)
def mtype_existence(m, mtype):
return [m.mtype_exists[mtype], m.mtype_absent[mtype]]
@m.Expression(m.module_types, m.time, doc="Module unit cost [MM$/module]")
def module_unit_cost(m, mtype, t):
return m.module_base_cost[mtype] * m.learning_factor[mtype] * m.discount_factor[t]
m.production = Var(
m.potential_sites, m.time,
doc="Production of gasoline in a time period [kBD]",
domain=NonNegativeReals, bounds=(0, 30), initialize=10)
m.gas_consumption = Var(
m.potential_sites, m.module_types, m.time,
doc="Consumption of natural gas by each module type "
"at each site in a time period [MMSCF/d]",
domain=NonNegativeReals, bounds=(0, 250), initialize=50)
m.gas_flows = Var(
m.well_clusters, m.potential_sites, m.time,
doc="Flow of gas from a well cluster to a site [MMSCF/d]",
domain=NonNegativeReals, bounds=(0, 200), initialize=15)
m.product_flows = Var(
m.potential_sites, m.markets, m.time,
doc="Product shipments from a site to a market in a period [kBD]",
domain=NonNegativeReals, bounds=(0, 30), initialize=10)
@m.Constraint(m.potential_sites, m.module_types, m.time)
def consumption_capacity(m, site, mtype, t):
return m.gas_consumption[site, mtype, t] <= (
m.num_modules[mtype, site, t] * m.unit_gas_consumption[mtype])
@m.Constraint(m.potential_sites, m.time)
def production_limit(m, site, t):
return m.production[site, t] <= sum(
m.gas_consumption[site, mtype, t] * m.module_conversion[mtype]
for mtype in m.module_types)
@m.Expression(m.potential_sites, m.time)
def capacity(m, site, t):
return sum(
m.num_modules[mtype, site, t] * m.unit_gas_consumption[mtype]
* m.module_conversion[mtype] for mtype in m.module_types)
@m.Constraint(m.potential_sites, m.time)
def gas_supply_meets_consumption(m, site, t):
return sum(m.gas_consumption[site, :, t]) == sum(m.gas_flows[:, site, t])
@m.Constraint(m.well_clusters, m.time)
def gas_supply_limit(m, well, t):
return sum(m.gas_flows[well, site, t]
for site in m.potential_sites) <= m.gas_supply[well, t]
@m.Constraint(m.potential_sites, m.time)
def gasoline_production_requirement(m, site, t):
return sum(m.product_flows[site, mkt, t]
for mkt in m.markets) == m.production[site, t]
@m.Constraint(m.potential_sites, m.module_types, m.time)
def module_balance(m, site, mtype, t):
if t >= m.module_setup_time:
modules_added = m.modules_purchased[
mtype, site, t - m.module_setup_time]
modules_transferred_in = sum(
m.modules_transferred[
mtype, from_site, to_site, t - m.module_setup_time]
for from_site, to_site in m.site_pairs if to_site == site)
else:
modules_added = 0
modules_transferred_in = 0
if t >= 1:
existing_modules = m.num_modules[mtype, site, t - 1]
else:
existing_modules = 0
modules_transferred_out = sum(
m.modules_transferred[mtype, from_site, to_site, t]
for from_site, to_site in m.site_pairs if from_site == site)
return m.num_modules[mtype, site, t] == (
existing_modules + modules_added
+ modules_transferred_in - modules_transferred_out)
@m.Disjunct(m.potential_sites)
def site_active(disj, site):
pass
@m.Disjunct(m.potential_sites)
def site_inactive(disj, site):
disj.no_production = Constraint(
expr=sum(m.production[site, :]) == 0)
disj.no_gas_consumption = Constraint(
expr=sum(m.gas_consumption[site, :, :]) == 0)
disj.no_gas_flows = Constraint(
expr=sum(m.gas_flows[:, site, :]) == 0)
disj.no_product_flows = Constraint(
expr=sum(m.product_flows[site, :, :]) == 0)
disj.no_modules = Constraint(
expr=sum(m.num_modules[:, site, :]) == 0)
disj.no_modules_transferred = Constraint(
expr=sum(
m.modules_transferred[mtypes, from_site, to_site, t]
for mtypes in m.module_types
for from_site, to_site in m.site_pairs
for t in m.time
if from_site == site or to_site == site) == 0)
disj.no_modules_purchased = Constraint(
expr=sum(
m.modules_purchased[mtype, site, t]
for mtype in m.module_types for t in m.time) == 0)
@m.Disjunction(m.potential_sites)
def site_active_or_not(m, site):
return [m.site_active[site], m.site_inactive[site]]
@m.Disjunct(m.well_clusters, m.potential_sites)
def pipeline_exists(disj, well, site):
pass
@m.Disjunct(m.well_clusters, m.potential_sites)
def pipeline_absent(disj, well, site):
disj.no_natural_gas_flow = Constraint(
expr=sum(m.gas_flows[well, site, t] for t in m.time) == 0)
@m.Disjunction(m.well_clusters, m.potential_sites)
def pipeline_existence(m, well, site):
return [m.pipeline_exists[well, site], m.pipeline_absent[well, site]]
# Objective Function Construnction
@m.Expression(m.potential_sites, doc="MM$")
def product_revenue(m, site):
return sum(
m.product_flows[site, mkt, t] # kBD
* 1000 # bbl/kB
/ 1E6 # $ to MM$
* m.days_per_period
* m.gasoline_price[t] * m.gal_per_bbl
for mkt in m.markets
for t in m.time)
@m.Expression(m.potential_sites, doc="MM$")
def raw_material_cost(m, site):
return sum(
m.gas_consumption[site, mtype, t] * m.days_per_period
/ 1E6 # $ to MM$
* m.nat_gas_price[t]
* 1000 # MMSCF to MSCF
for mtype in m.module_types for t in m.time)
@m.Expression(
m.potential_sites, m.markets,
doc="Aggregate cost to transport gasoline from a site to market [MM$]")
def product_transport_cost(m, site, mkt):
return sum(
m.product_flows[site, mkt, t] * m.gal_per_bbl
* 1000 # bbl/kB
/ 1E6 # $ to MM$
* m.distance[site, mkt] / 100 * m.gasoline_tranport_cost[t]
for t in m.time)
@m.Expression(m.well_clusters, m.potential_sites, doc="MM$")
def pipeline_construction_cost(m, well, site):
return (m.pipeline_unit_cost * m.distance[well, site]
* m.pipeline_exists[well, site].indicator_var)
# Module transport cost
@m.Expression(m.site_pairs, doc="MM$")
def module_relocation_cost(m, from_site, to_site):
return sum(
m.modules_transferred[mtype, from_site, to_site, t]
* m.distance[from_site, to_site] / 100
* m.module_transport_distance_cost[t]
/ 1E3 # M$ to MM$
+ m.modules_transferred[mtype, from_site, to_site, t]
* m.module_transport_unit_cost[t]
for mtype in m.module_types
for t in m.time)
@m.Expression(m.potential_sites, doc="MM$")
def module_purchase_cost(m, site):
return sum(
m.module_unit_cost[mtype, t] * m.modules_purchased[mtype, site, t]
for mtype in m.module_types
for t in m.time)
@m.Expression(doc="MM$")
def profit(m):
return (
summation(m.product_revenue)
- summation(m.raw_material_cost)
- summation(m.product_transport_cost)
- summation(m.pipeline_construction_cost)
- summation(m.module_relocation_cost)
- summation(m.module_purchase_cost)
)
m.neg_profit = Objective(expr=-m.profit)
# Tightening constraints
@m.Constraint(doc="Limit total module purchases over project span.")
def restrict_module_purchases(m):
return sum(m.modules_purchased[...]) <= 5
@m.Constraint(m.site_pairs, doc="Limit transfers between any two sites")
def restrict_module_transfers(m, from_site, to_site):
return sum(m.modules_transferred[:, from_site, to_site, :]) <= 5
return m
if __name__ == "__main__":
m = build_model()
# Restrict number of module types; A, R, S, U
# valid_modules = ['A500', 'A1000', 'A2000', 'A5000']
# valid_modules = ['A500', 'R500', 'A5000', 'R5000']
# valid_modules = ['U500', 'U5000']
# valid_modules = ['U100', 'U250']
# valid_modules = ['U1000']
# valid_modules = ['U500']
valid_modules = ['U250']
# valid_modules = ['U100']
for mtype in m.module_types - valid_modules:
m.gas_consumption[:, mtype, :].fix(0)
m.num_modules[mtype, :, :].fix(0)
m.modules_transferred[mtype, :, :, :].fix(0)
m.modules_purchased[mtype, :, :].fix(0)
m.mtype_exists[mtype].deactivate()
m.mtype_absent[mtype].indicator_var.fix(1)
| 2.1875 | 2 |
projectreport/__init__.py | whoopnip/project-report | 0 | 12762846 | <filename>projectreport/__init__.py
"""
A set of tools for describing software projects. Finds software projects, analyzes them,
and outputs reports.
"""
from projectreport.analyzer.project import Project
from projectreport.analyzer.ts.github import GithubAnalysis
from projectreport.config import DEFAULT_IGNORE_PATHS
from projectreport.finder.combine import CombinedFinder
from projectreport.finder.git import GitFinder
from projectreport.finder.js import JavaScriptPackageFinder
from projectreport.finder.python import PythonPackageFinder
from projectreport.report.report import Report
| 1.460938 | 1 |
src/Services/SlackCommandService.py | alchamp/slack-click-bot | 0 | 12762847 | <reponame>alchamp/slack-click-bot<gh_stars>0
import os
import src.Models.SlackCommandModel as SlackCommandModel
class SlackCommandService(object):
def __init__(self,container):
self._container = container
self.directory = os.path.join(os.getcwd(), "commands\slackcommands")
self.slackCommands = {}
def LoadSlackCommands(self):
for filename in os.listdir( self.directory ):
if filename.endswith(".json"):
nameWithoutExtension = filename.split(".")[0]
with open(os.path.join(self.directory, filename)) as f:
self.slackCommands[nameWithoutExtension] = SlackCommandModel.SlackCommandModel.Parse(f.read(),nameWithoutExtension)
else:
continue
def GetAllCommandsNames(self):
return self.slackCommands.keys()
def GetAllCommands(self):
return self.slackCommands.values()
def GetSlackCommandByName(self, name):
return self.slackCommands[name]
def SlackCommandExistByName(self, name):
return name in self.slackCommands
| 2.46875 | 2 |
tomes_tagger/lib/eaxs_to_tagged.py | JeremyGibson/xml_parser | 2 | 12762848 | <filename>tomes_tagger/lib/eaxs_to_tagged.py
#!/usr/bin/env python3
"""
This module contains a class for converting an EAXS file to a tagged EAXS document.
Todo:
* Do you need to support <ExtBodyContent> messages?
- I think "yes" and you'll need to append attributes/elements accordingly.
- I think _update_message() MIGHT be doing this already?
- Only one way to find out. :P
"""
# import modules.
import base64
import logging
import os
import quopri
import unicodedata
from lxml import etree
class EAXSToTagged():
""" A class for converting an EAXS file to a tagged EAXS document.
Example:
>>> html2text = def html2text(html) ... # convert HTML to plain text.
>>> text2nlp = def text2nlp(text) ... # convert plain text to NLP/NER output.
>>> e2t = EAXSToTagged(html2text, text2nlp)
>>> e2t.write_tagged(eaxs_file, "tagged.xml") # create "tagged.xml".
>>> e2t.write_tagged(eaxs_file, "tagged.xml", split=True) # create one tagged XML file
>>> # per message with the form "tagged_01.xml", etc.
>>> e2t.write_tagged(eaxs_file, "tagged.xml", restrictions=[1,2]) # only output tagged
>>> # versions of the first two messages.
>>> e2t.write_tagged(eaxs_file, "tagged.xml", split=True, restrictions=[1,2],
>>> inclusive=False) # output tagged versions of all but the first two messages.
"""
def __init__(self, html_converter, nlp_tagger, charset="utf-8", buffered=False):
""" Sets instance attributes.
Args:
- html_converter (function): Any function that accepts HTML text (str) as its
only required argument and returns a plain text version (str).
- nlp_tagger (function): Any function that accepts plain text (str) as its only
required argument and returns an NER-tagged XML message (lxml.etree_Element) per
./nlp_to_xml.xsd.
- charset (str): Encoding with which to update EAXS message content. This is also
the encoding used to write a tagged EAXS file with the @self.write_tagged()
method.
- buffered (bool): Use True to write tagged EAXS files with buffering. Otherwise,
use False. For more information, see:
http://lxml.de/api/lxml.etree.xmlfile-class.html.
"""
# set logger; suppress logging by default.
self.logger = logging.getLogger(__name__)
self.logger.addHandler(logging.NullHandler())
# set attributes.
self.html_converter = html_converter
self.nlp_tagger = nlp_tagger
self.charset = charset
self.buffered = buffered
# set namespace attributes.
self.ncdcr_prefix = "ncdcr"
self.ncdcr_uri = "https://github.com/StateArchivesOfNorthCarolina/tomes-eaxs"
self.ns_map = {self.ncdcr_prefix : self.ncdcr_uri}
@staticmethod
def _legalize_xml_text(xtext):
""" A static method that alters @xtext by replacing vertical tabs, form feeds, and
carriage returns with line breaks and by removing control characters except for line
breaks and tabs. This is so that @xtext can be written to XML without raising a
ValueError.
Args:
- xtext (str): The text to alter.
Returns:
str: The return value.
"""
# legalize @xtext.
for ws in ["\f","\r","\v"]:
xtext = xtext.replace(ws, "\n")
xtext = "".join([char for char in xtext if unicodedata.category(char)[0] != "C" or
char in ("\t", "\n")])
return xtext
def _get_folder_name(self, message_el):
""" Gets the folder name for a given <Message> element. Subfolders are preceeded by
their parent folder name and a forward slash, e.g. 'parent/child'.
Args:
message_el (lxml.etree._Element): An EAXS <Message> element.
Returns:
str: The return value.
"""
# iterate through ancestors; collect all ancestral <Folder/Name> element values.
folder_names = []
for ancestor in message_el.iterancestors():
if ancestor.tag == "{" + self.ncdcr_uri + "}Folder":
name_el = ancestor.getchildren()[0]
if name_el.tag == "{" + self.ncdcr_uri + "}Name" and name_el.text != None:
folder_names.insert(0, name_el.text)
elif ancestor.tag == "{" + self.ncdcr_uri + "}Account":
break
# convert list to path-like string.
folder_name = "/".join(folder_names)
folder_name = folder_name.encode(self.charset).decode(self.charset, errors="ignore")
return folder_name
def _get_global_id(self, eaxs_file):
""" Gets the <GlobalId> element value for the given @eaxs_file.
Args:
- eaxs_file (str): The filepath for the EAXS file.
Returns:
str: The return value.
Raises:
- ValueError: If the <GlobalId> is not found, indicating either a missing element
or a namespace URI that doesn't match @self.ncdcr_uri.
"""
# create placeholder for global identifier.
global_id = None
# find <GlobalId> element value and break immediately (to avoid memory spikes!).
global_id_tag = "{" + self.ncdcr_uri + "}GlobalId"
for event, element in etree.iterparse(eaxs_file, events=("end",), strip_cdata=False,
tag=global_id_tag, huge_tree=True):
global_id_el = element
global_id = global_id_el.text
element.clear()
break
# if needed, raise an exception.
if global_id is None:
msg = "Can't find <GlobalId> element."
self.logger.error(msg)
raise TypeError(msg)
return global_id
def _get_messages(self, eaxs_file):
""" Gets all <Message> elements for the given @eaxs_file.
Args:
- eaxs_file (str): The filepath for the EAXS file.
Returns:
lxml.etree.iterparse: The return value.
"""
# get generator for all <Message> elements.
message_tag = "{" + self.ncdcr_uri + "}Message"
messages = etree.iterparse(eaxs_file, events=("end",), strip_cdata=False,
tag=message_tag, huge_tree=True)
return messages
def _get_message_id(self, message_el):
""" Gets the <MessageId> element value for a given <Message> element.
Args:
message_el (lxml.etree._Element): An EAXS <Message> element.
Returns:
str: The return value.
"""
# get <MessageId> element value; strip leading/trailing space.
path = "{ns}:MessageId".format(ns=self.ncdcr_prefix)
message_id = message_el.xpath(path, namespaces=self.ns_map)
message_id = message_id[0].text.strip()
return message_id
def _get_message_data(self, message_el):
""" Gets relevant element values from the given <Message> element, @message_el.
Args:
- message_el (lxml.etree._Element): An EAXS <Message> element.
Returns:
tuple: The return value.
All items are strings.
The first item is the message's <BodyContent/Content> element value.
The second item is the <BodyContent/TransferEncoding> element value.
The third item is the <ContentType> element value.
"""
# set XPath for <SingleBody> elements, omitting attachments.
path = "{ns}:MultiBody/{ns}:SingleBody[not(descendant::{ns}:Disposition)]"
path = path.format(ns=self.ncdcr_prefix)
# assume default values.
content_text, transfer_encoding_text, content_type_text = "", "7-bit", "text/plain"
# get all <SingleBody> elements via XPath.
single_body_els = message_el.xpath(path, namespaces=self.ns_map)
for single_body_el in single_body_els:
# set @content_text.
node = "{ns}:BodyContent/{ns}:Content".format(ns=self.ncdcr_prefix)
content_el = single_body_el.xpath(node, namespaces=self.ns_map)
if len(content_el) > 0 and content_el[0].text is not None:
content_text = content_el[0].text
else:
content_text = ""
# set @transfer_encoding_text.
node = "{ns}:BodyContent/{ns}:TransferEncoding".format(ns=self.ncdcr_prefix)
transfer_el = single_body_el.xpath(node, namespaces=self.ns_map)
if len(transfer_el) > 0 and transfer_el[0].text is not None:
transfer_encoding_text = transfer_el[0].text.lower()
else:
transfer_encoding_text = ""
# set @content_type_text.
node = "{ns}:ContentType".format(ns=self.ncdcr_prefix)
content_type_el = single_body_el.xpath(node, namespaces=self.ns_map)
if len(content_type_el) > 0 and content_type_el[0].text is not None:
content_type_text = content_type_el[0].text.lower()
else:
content_type_text = ""
# if the preferred plain/text message is found, break immediately.
if content_type_text == "text/plain":
break
# return data as tuple.
message_data = (content_text, transfer_encoding_text, content_type_text)
return message_data
def _tag_message(self, content_text, transfer_encoding_text, content_type_text):
""" Tags a given <Message> element with a given text value (@content_text) and given
@transfer_encoding_text and @content_type_text values.
Args:
- content_text (str): The text from which to extract NER tags via
@self.nlp_tagger.
- transfer_encoding_text (str): The message's transfer encoding value.
- content_type_text (str): The message's content type value.
Returns:
tuple: The return value.
The first item is an lxml.etree._Element: the tagged XML tree.
The second item is a string: the original message stripped of HTML tags and/or
Base64-decoded and/or decoded quoted-printable. If the messages was unaltered,
this value is None.
"""
self.logger.info("Tagging <Message> element content.")
# assume that @content_text will not be altered.
is_stripped = False
# if needed, Base64 decode @content_text.
if transfer_encoding_text == "base64":
self.logger.info("Decoding Base64 message content.")
content_text = base64.b64decode(content_text)
content_text = content_text.decode(self.charset, errors="backslashreplace")
is_stripped = True
# if needed, decode quoted-printable text.
if transfer_encoding_text == "quoted-printable":
self.logger.info("Decoding quoted-printable message content.")
content_text = quopri.decodestring(content_text)
content_text = content_text.decode(self.charset, errors="backslashreplace")
is_stripped = True
# if needed, convert HTML in @content_text to plain text.
if content_type_text in ["text/html", "application/xml+html"]:
self.logger.info("Converting HTML message content to plain text.")
content_text = self.html_converter(content_text)
is_stripped = True
# get NER tags.
self.logger.info("Tagging message content with NER.")
tagged_el = self.nlp_tagger(content_text)
# set value of stripped content.
stripped_content = None
if is_stripped:
stripped_content = content_text
return (tagged_el, stripped_content)
def _update_message(self, message_el, folder_name):
""" Updates a <Message> element's value with NER-tagged content. Affixes the
@folder_name as a new attribute.
Args:
- message_el (lxml.etree._Element): An EAXS <Message> element.
- folder_name (str): The name of the EAXS <Folder> element that contains the
<Message> element.
Returns:
lxml.etree._Element: The return value.
The updated <Message> element.
"""
self.logger.info("Updating <Message> element tree.")
# set new attributes and elements.
try:
message_el.set("ParentFolder", folder_name)
except ValueError as err:
self.logger.error(err)
self.logger.info("Cleaning @ParentFolder attribute value.")
message_el.set("ParentFolder", self._legalize_xml_text(folder_name))
message_el.set("Processed", "false")
message_el.set("Record", "true")
message_el.set("Restricted", "false")
message_el.append(etree.Element("{" + self.ncdcr_uri + "}Restriction",
nsmap=self.ns_map))
# get relevant <Message> element data.
message_data = self._get_message_data(message_el)
content_text, transfer_encoding_text, content_type_text = message_data
# if no viable <Content> sub-element exists, return the <Message> element.
if content_text == "":
self.logger.warning("Found empty message content; skipping message tagging.")
return message_el
# otherwise, get NER tags and a plain text version of the message body.
tagged_content, stripped_content = self._tag_message(content_text,
transfer_encoding_text, content_type_text)
# if PII appears to exist in the message; update the @Restricted attribute.
token_el = "{" + self.ncdcr_uri + "}Token"
for element in tagged_content.iterchildren(tag=token_el):
if "entity" not in element.attrib:
continue
if element.attrib["entity"][:4] == "PII.":
self.logger.info("Found PII tag; updating messages's @Restricted attribute.")
message_el.set("Restricted", "true")
break
# create a new <SingleBody> element.
single_body_el = etree.Element("{" + self.ncdcr_uri + "}SingleBody",
nsmap=self.ns_map)
# create a new <TaggedContent> element; append it to the new <SingleBody> element.
tagged_content_el = etree.Element("{" + self.ncdcr_uri + "}TaggedContent",
nsmap=self.ns_map)
tagged_content = etree.tostring(tagged_content, encoding=self.charset)
tagged_content = tagged_content.decode(self.charset, errors="backslashreplace")
try:
tagged_content_el.text = etree.CDATA(tagged_content.strip())
except ValueError as err:
self.logger.error(err)
self.logger.warning("Cleaning tagged content in order to write CDATA.")
tagged_content = self._legalize_xml_text(tagged_content)
tagged_content_el.text = etree.CDATA(tagged_content.strip())
single_body_el.append(tagged_content_el)
# if needed, append a plain text message body to the new <SingleBody> element.
if stripped_content is not None:
stripped_content_el = etree.Element("{" + self.ncdcr_uri +
"}StrippedContent", nsmap=self.ns_map)
try:
stripped_content_el.text = etree.CDATA(stripped_content.strip())
except ValueError as err:
self.logger.error(err)
self.logger.info("Cleaning stripped content in order to write CDATA.")
stripped_content = self._legalize_xml_text(stripped_content)
stripped_content_el.text = etree.CDATA(stripped_content.strip())
single_body_el.append(stripped_content_el)
# append the new <SingleBody> element to @message_el.
multi_body_tag = "{ns}:MultiBody".format(ns=self.ncdcr_prefix)
message_el.xpath(multi_body_tag, namespaces=self.ns_map)[0].append(single_body_el)
return message_el
def _get_tagged_messages(self, eaxs_file, total_messages, restrictions=[],
inclusive=True):
""" Tags <Message> elements in a given @eaxs_file.
Args:
- eaxs_file (str): The filepath for the EAXS file.
- total_messages (int): The total number of <Message> elements in @eaxs_file.
- restrictions (list): The position of the messages to exclusively tag OR those
to skip from tagging. Note: the first message's value is 1. Leave this empty to
tag all messages.
- inclusive (bool): Use True to only tag messages whose position values are in
@restrictions. Otherwise, use False to tag all messages except the ones listed in
@restrictions. If @restrictions is empty, this value is ignored.
Returns:
generator: The return value.
The yielded data is a tuple.
The first item is a string, the zero-padded position of the given message (first =
1).
The second item is the <MessageId> value.
The third item is the tagged lxml.etree._Element or None if the tagging workflow
failed.
"""
self.logger.info("Tagging messages in EAXS file: {}".format(eaxs_file))
# tag each <Message> element.
message_index = 0
for event, element in self._get_messages(eaxs_file):
message_index += 1
# if @restrictions is not empty; filter results as requested.
if len(restrictions) != 0:
msg = "Skipping message {} as requested.".format(message_index)
if inclusive and message_index not in restrictions:
self.logger.info(msg)
continue
elif not inclusive and message_index in restrictions:
self.logger.info(msg)
continue
# get needed values from the message element.
message_id = self._get_message_id(element)
folder_name = self._get_folder_name(element)
# tag the message.
self.logger.info("Tagging message with id: {}".format(message_id))
try:
tagged_message = self._update_message(element, folder_name)
except Exception as err:
self.logger.error(err)
self.logger.warning("Failed to complete tagging workflow.")
tagged_message = None
# report on progress.
remaining_messages = total_messages - message_index
self.logger.info("Processed {} of {} messages.".format(message_index,
total_messages))
if remaining_messages > 0:
self.logger.info("Messages left to process: {}".format(remaining_messages))
# yield the tagged message tuple.
yield (message_index, message_id, tagged_message)
# clear original @element (must follow yield!).
element.clear()
return
def _write_xml(self, eaxs_file, tagged_eaxs_file, tagged_messages, global_id):
""" Writes @tagged_eaxs_file as an XML file.
Args:
- eaxs_file (str): The filepath for the EAXS file.
- tagged_eaxs_file (str): The filepath to which the tagged EAXS document will be
written.
- tagged_messages (generator): The tagged message tuple as returned by
self._get_tagged_messages().
- global_id (str): The value of self._get_global_id(@eaxs_file).
Returns:
list: The return value.
The message indexes for messages that failed to finish the tagging process.
Raises:
- FileExistsError: If @tagged_eaxs_file already exists.
"""
# raise error if @tagged_eaxs_file already exists.
if os.path.isfile(tagged_eaxs_file):
err = "Destination file '{}' already exists.".format(tagged_eaxs_file)
self.logger.error(err)
raise FileExistsError(err)
# create placeholder for untagged messages.
untagged_messages = []
# open new @tagged_eaxs_file.
with etree.xmlfile(tagged_eaxs_file, encoding=self.charset, close=True,
buffered=self.buffered) as xfile:
# write XML header to @xfile; register namespace information.
xfile.write_declaration()
etree.register_namespace(self.ncdcr_prefix, self.ncdcr_uri)
# write root <Account> element; append tagged <Message> elements.
account_tag = "{ns}:Account".format(ns=self.ncdcr_prefix)
with xfile.element(account_tag, GlobalId=global_id, SourceEAXS=eaxs_file,
nsmap=self.ns_map):
# write tagged message to file.
for message_index, message_id, tagged_message in tagged_messages:
# if message wasn't tagged, append index to @untagged_messages.
if tagged_message is None:
untagged_messages.append(message_index)
# otherwise, write message.
else:
xfile.write(tagged_message)
tagged_message.clear()
return untagged_messages
def write_tagged(self, eaxs_file, tagged_eaxs_file, split=False, restrictions=[],
inclusive=True):
""" Converts an @eaxs_file to one or many tagged EAXS file/s.
Args:
- eaxs_file (str): The filepath for the EAXS file.
- tagged_eaxs_file (str): The filepath that the tagged EAXS document will be
written to. If @split is True, this value will have an underscore and the
zero-padded position of each message placed before the file extension.
- split (bool): Use True to create one tagged EAXS file per message. Otherwise,
use False.
- restrictions (list): The position of the messages to exclusively tag OR those
to skip from tagging. Note: the first message's value is 1. Leave this empty to
tag all messages.
- inclusive (bool): Use True to only tag messages whose position values are in
@restrictions. Otherwise, use False to tag all messages except the ones listed in
@restrictions. If @restrictions is empty, this value is ignored.
Returns:
dict: The return type.
The "message_count" key's value is and int, the total number of messages in
@eaxs_file. The "untagged_messages" key's value is a list of ints - the message
indexes of <Message> elements that didn't make it through the tagging workflow.
Raises:
- FileNotFoundError: If @eaxs_file doesn't exist or if the containing folder for
@tagged_eaxs_file doesn't exist.
- ValueError: If a <Message> element is found with a namespace URI that doesn't
match @self.ncdcr_uri.
"""
# raise error if @eaxs_file doesn't exist.
if not os.path.isfile(eaxs_file):
err = "Can't find EAXS file: {}".format(eaxs_file)
self.logger.error(err)
raise FileNotFoundError(err)
# raise error if containing folder for @tagged_eaxs_file does not exist.
container = os.path.split(tagged_eaxs_file)[0]
if container != "" and not os.path.isdir(container):
err = "Destination folder '{}' does not exist.".format(container)
self.logger.error(err)
raise FileNotFoundError(err)
self.logger.info("Finding number of messages in '{}'; this may take a while.".format(
eaxs_file))
# get count of <Message> elements.
total_messages = 0
for event, element in self._get_messages(eaxs_file):
# test for the correct namespace URI.
if self.ncdcr_uri not in element.nsmap.values():
self.logger.warning("Namespace URI appears to be obsolete.")
msg = "Expected namespace URI '{}' not found in namespace map: {}".format(
self.ncdcr_uri, element.nsmap)
self.logger.error(msg)
raise ValueError(msg)
# augment message count.
total_messages += 1
element.clear()
self.logger.info("Found {} messages.".format(total_messages))
# get needed values for @eaxs_file.
global_id = self._get_global_id(eaxs_file)
source_eaxs = os.path.basename(eaxs_file)
# launch generator to tag all messages.
tagged_messages = self._get_tagged_messages(eaxs_file, total_messages, restrictions,
inclusive)
# create placeholder dict to return.
results = {"total_messages": total_messages, "untagged_messages": []}
# create function to write one file per message.
def multi_file_writer():
# determine padding length based on @total_messages.
padding_length = 1 + len(str(total_messages))
# lambda functions to return a padded version of @tagged_eaxs_file.
pad_indx = lambda indx: "_" + str(indx).zfill(padding_length)
pad_file = lambda fname, pad: pad.join(os.path.splitext(fname))
# write one file per each message.
for tagged_message in tagged_messages:
msg_indx = tagged_message[0]
fname = pad_file(tagged_eaxs_file, pad_indx(msg_indx))
untagged = self._write_xml(source_eaxs, fname, [tagged_message], global_id)
results["untagged_messages"] += untagged
return results
# create function to write only one file.
def single_file_writer():
untagged = self._write_xml(source_eaxs, tagged_eaxs_file, tagged_messages,
global_id)
results["untagged_messages"] = untagged
return results
# execute the appropriate function depending on the value of @split.
if split:
results = multi_file_writer()
else:
results = single_file_writer()
return results
if __name__ == "__main__":
pass
| 2.953125 | 3 |
essere_benessere/website/admin/AccountAdmin.py | entpy/eb-django | 0 | 12762849 | # -*- coding: utf-8 -*-
from website.models import Account, Promotion, Campaign
from website.forms import *
from django.conf.urls import patterns
from django.contrib import admin, messages
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.template import RequestContext, loader
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
class AccountAdmin(admin.ModelAdmin):
# fileds in add/modify form
fields = (('first_name', 'last_name'), 'email', 'mobile_phone', 'birthday_date', ('receive_promotions', 'loyal_customer'))
# table list fields
list_display = ('email', 'mobile_phone', 'first_name', 'last_name', 'loyal_customer')
# URLs overwriting to add new admin views (with auth check and without cache)
def get_urls(self):
urls = super(AccountAdmin, self).get_urls()
my_urls = patterns('',
(r'^campaigns/step1/(?P<new_campaign>\d+)?$', self.admin_site.admin_view(self.create_promotion)),
(r'^campaigns/step2$', self.admin_site.admin_view(self.select_recipients)),
(r'^campaigns/step3/(?P<id_promotion>\d+)?$', self.admin_site.admin_view(self.campaign_review)),
(r'^code_validator$', self.admin_site.admin_view(self.code_validator)),
(r'^birthday_promo$', self.admin_site.admin_view(self.set_birthday_promo)),
)
# return custom URLs with default URLs
return my_urls + urls
def set_birthday_promo(self, request):
"""
Function to create/edit or remove a birthday promo
"""
promotion_obj = Promotion()
if request.method == 'POST':
# checking if birthday promo must be deleted
if request.POST.get("delete"):
promotion_obj.delete_birthday_promotion()
messages.add_message(request, messages.SUCCESS, 'Promozione compleanno eliminata correttamente')
logger.info('Promozione compleanno eliminata correttamente')
return HttpResponseRedirect('/admin/website/account/birthday_promo') # Redirect after POST
form = BirthdayPromotionForm(request.POST, request.FILES, instance=promotion_obj.get_birthday_promotion_instance())
if form.is_valid():
# saving birthday promo form (creting only instance without saving)
birthday_promo_obj = form.save(commit=False)
# setting promo type to birthday_promo before saving
birthday_promo_obj.promo_type = Promotion.PROMOTION_TYPE_BIRTHDAY["key"]
# saving instance into db
birthday_promo_obj.save()
logger.info('Promozione compleanno modificata correttamente')
messages.add_message(request, messages.SUCCESS, 'Promozione compleanno modificata correttamente')
return HttpResponseRedirect('/admin/website/account/birthday_promo') # Redirect after POST
else:
# empty or change birthday promo form
form = BirthdayPromotionForm(instance=promotion_obj.get_birthday_promotion_instance())
context = {
'adminform' : form,
'title': "Promozione compleanno",
'opts': self.model._meta,
'app_label': self.model._meta.app_label,
'has_change_permission': True,
'has_file_field' : True,
}
return render(request, 'admin/custom_view/birthday_promo.html', context)
def code_validator(self, request):
"""
Function to validate a coupon code
"""
can_redeem = False
promotion_details = {}
if request.method == 'POST':
form = ValidateCodeForm(request.POST)
# cancel operation
if (request.POST.get("cancel", "")):
messages.add_message(request, messages.WARNING, 'Operazione annullata.')
return HttpResponseRedirect('/admin/website/account/code_validator') # Redirect after POST
if form.is_valid():
post_code = request.POST.get("promo_code")
# retrieving promotion details
campaign_obj = Campaign()
# checking if code exists
if (not campaign_obj.check_code_validity(code=post_code, validity_check="exists")):
messages.add_message(request, messages.ERROR, 'Codice promozionale non esistente.')
return HttpResponseRedirect('/admin/website/account/code_validator') # Redirect after POST
# checking if code is not already validated
if (not campaign_obj.check_code_validity(code=post_code, validity_check="not_used")):
messages.add_message(request, messages.ERROR, 'Codice promozionale già validato.')
return HttpResponseRedirect('/admin/website/account/code_validator') # Redirect after POST
# checking if campaign is not expired
if (not campaign_obj.check_code_validity(code=post_code, validity_check="not_expired")):
messages.add_message(request, messages.ERROR, 'Codice promozionale scaduto.')
return HttpResponseRedirect('/admin/website/account/code_validator') # Redirect after POST
# user can redeem the code
can_redeem = True
# show promotion details
promotion_details = campaign_obj.get_campaign_details(campaign_code=post_code)
if (request.POST.get("redeem_code", "")):
# redeem code and redirect to success page
campaign_obj.redeem_code(post_code)
messages.add_message(request, messages.SUCCESS, 'Codice promozionale validato!')
return HttpResponseRedirect('/admin/website/account/code_validator') # Redirect after POST
else:
form = ValidateCodeForm() # An unbound form
context = {
'form' : form,
'redeem_code' : can_redeem,
'promotion_details' : promotion_details,
'title': "Validatore di codici",
'opts': self.model._meta,
'app_label': self.model._meta.app_label,
}
return render(request, 'admin/custom_view/code_validator.html', context)
# STEP 1
def create_promotion(self, request, new_campaign=False):
# retrieving id promotion from GET (if exists and saving into session)
if (new_campaign):
request.session['promotion_id'] = None
# 1: get add promotion form
try:
# get promo id from session
promotion_obj = None
promotion_obj = Promotion.objects.get(id_promotion=request.session['promotion_id'])
except (KeyError, Promotion.DoesNotExist):
# object doesn't exists
pass
if (request.method == 'POST'):
formset = PromotionForm(request.POST, request.FILES, instance=promotion_obj)
if formset.is_valid():
promo = formset.save()
logger.debug("nuova promozione (#" + str(promo.id_promotion) + ") salvata con successo")
# saving created promotion id into session
request.session['promotion_id'] = promo.id_promotion
# redirect to campaigns/step2
return HttpResponseRedirect('/admin/website/account/campaigns/step2') # Redirect after POST
else:
# retrieving a model form starting from model instance
formset = PromotionForm(instance=promotion_obj)
logger.debug("selected_contacts_list: " + str(formset))
# creating template context
context = {
'adminform' : formset,
'title': "Crea la promozione",
'opts': self.model._meta,
'app_label': self.model._meta.app_label,
}
return render(request, 'admin/custom_view/campaigns/step1.html', context)
# STEP 2
def select_recipients(self, request):
"""
Send campaign wizard view, a custom admin view to enable
sending promotion to a customer list
"""
# a campaign must exists before enter here
if (request.session['promotion_id'] is None):
return HttpResponseRedirect('/admin/website/account/campaigns/step1')
contact_list = Account.objects.filter(receive_promotions=True)
campaign_obj = Campaign()
paginator = Paginator(contact_list, 5)
working_id_promotion = request.session['promotion_id']
# retrieving new page number
if (request.POST.get('next', '')):
page = request.POST.get('next_page')
else:
page = request.POST.get('previously_page')
# retrieving old page number
old_viewed_page = request.POST.get('current_page')
"""1""" # set/unset campaign senders: saving checked and delete unchecked checkbox from form into db/session if POST exists
if(request.POST.get("select_senders_form_sent", "") and request.POST.get("current_page", "")):
selected_contacts = request.POST.getlist("contacts[]")
# retrieving checked list from current view (only checkbox that are shown from paginator current view)
senders_dictionary = campaign_obj.get_checkbox_dictionary(paginator.page(old_viewed_page), selected_contacts, "id_account")
# logger.error("senders dictionary: " + str(senders_dictionary))
# saving or removing checked/unchecked checkbox from db
campaign_obj.set_campaign_user(senders_dictionary, id_promotion=working_id_promotion)
"""2""" # retrieving all checked checkbox for current promotion
campaign_contacts_list = campaign_obj.get_account_list(id_promotion=working_id_promotion)
logger.debug("selected_contacts_list: " + str(campaign_contacts_list))
# retrieving paginator object
try:
contacts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
contacts = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
contacts = paginator.page(paginator.num_pages)
"""3""" # creating template context
context = {
'contacts' : contacts,
'campaign_contacts_list' : campaign_contacts_list,
'id_promotion' : working_id_promotion,
'title': "Seleziona i destinatari",
'opts': self.model._meta,
'app_label': self.model._meta.app_label,
}
# send promotion (after the senders selection)
if (request.POST.get("next_step", "")):
return HttpResponseRedirect('/admin/website/account/campaigns/step3/' + str(working_id_promotion)) # Redirect after POST
# select senders list page
return render(request, 'admin/custom_view/campaigns/step2.html', context)
# STEP 3
def campaign_review(self, request, id_promotion=None):
try:
# get add promotion form
promotion_obj = Promotion.objects.get(id_promotion=id_promotion)
promo_form = PromotionForm(instance=promotion_obj)
if (not promotion_obj.status):
# checking if user choose to re-edit the promotion
if (request.POST.get("edit_promotion", "")):
return HttpResponseRedirect('/admin/website/account/campaigns/step1/') # Redirect after POST
# checking if user choose to send the promotion
if (request.POST.get("send_promotion", "")):
campaign_obj = Campaign()
logger.debug("invio la promozione #" + str(id_promotion))
campaign_obj.send_campaign(id_promotion=id_promotion)
# redirect to success page
messages.add_message(request, messages.SUCCESS, 'Promozione inviata con successo!')
return HttpResponseRedirect('/admin/website/account/campaigns/step1/1')
# count total senders about this campaign
campaign_obj = Campaign()
total_senders = campaign_obj.count_campaign_senders(id_promotion=id_promotion)
# creating template context
context = {
'adminform' : promo_form,
'id_promotion' : id_promotion,
'total_senders' : total_senders,
'title': "Riepilogo della campagna",
'opts': self.model._meta,
'app_label': self.model._meta.app_label,
}
# campaign review page
return render(request, 'admin/custom_view/campaigns/step3.html', context)
else:
# promotion already sent
return HttpResponseRedirect('/admin/website/account/campaigns/step1/') # Redirect after POST
except (KeyError, Promotion.DoesNotExist):
# object doesn't exists, id_promotion must exists
return HttpResponseRedirect('/admin/website/account/campaigns/step1/1') # Redirect after POST
| 1.96875 | 2 |
src/build/android/gyp/proguard.py | goochen/naiveproxy | 1 | 12762850 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import re
import shutil
import sys
import tempfile
import zipfile
import dex
import dex_jdk_libs
from util import build_utils
from util import diff_utils
_API_LEVEL_VERSION_CODE = [
(21, 'L'),
(22, 'LolliopoMR1'),
(23, 'M'),
(24, 'N'),
(25, 'NMR1'),
(26, 'O'),
(27, 'OMR1'),
(28, 'P'),
(29, 'Q'),
(30, 'R'),
]
_CHECKDISCARD_RE = re.compile(r'^\s*-checkdiscard[\s\S]*?}', re.MULTILINE)
_DIRECTIVE_RE = re.compile(r'^\s*-', re.MULTILINE)
def _ValidateAndFilterCheckDiscards(configs):
"""Check for invalid -checkdiscard rules and filter out -checkdiscards.
-checkdiscard assertions often don't work for test APKs and are not actually
helpful. Additionally, test APKs may pull in dependency proguard configs which
makes filtering out these rules difficult in GN. Instead, we enforce that
configs that use -checkdiscard do not contain any other rules so that we can
filter out the undesired -checkdiscard rule files here.
Args:
configs: List of paths to proguard configuration files.
Returns:
A list of configs with -checkdiscard-containing-configs removed.
"""
valid_configs = []
for config_path in configs:
with open(config_path) as f:
contents = f.read()
if _CHECKDISCARD_RE.search(contents):
contents = _CHECKDISCARD_RE.sub('', contents)
if _DIRECTIVE_RE.search(contents):
raise Exception('Proguard configs containing -checkdiscards cannot '
'contain other directives so that they can be '
'disabled in test APKs ({}).'.format(config_path))
else:
valid_configs.append(config_path)
return valid_configs
def _ParseOptions():
args = build_utils.ExpandFileArgs(sys.argv[1:])
parser = argparse.ArgumentParser()
build_utils.AddDepfileOption(parser)
parser.add_argument('--r8-path',
required=True,
help='Path to the R8.jar to use.')
parser.add_argument(
'--desugar-jdk-libs-json', help='Path to desugar_jdk_libs.json.')
parser.add_argument('--input-paths',
action='append',
required=True,
help='GN-list of .jar files to optimize.')
parser.add_argument('--desugar-jdk-libs-jar',
help='Path to desugar_jdk_libs.jar.')
parser.add_argument('--desugar-jdk-libs-configuration-jar',
help='Path to desugar_jdk_libs_configuration.jar.')
parser.add_argument('--output-path', help='Path to the generated .jar file.')
parser.add_argument(
'--proguard-configs',
action='append',
required=True,
help='GN-list of configuration files.')
parser.add_argument(
'--apply-mapping', help='Path to ProGuard mapping to apply.')
parser.add_argument(
'--mapping-output',
required=True,
help='Path for ProGuard to output mapping file to.')
parser.add_argument(
'--extra-mapping-output-paths',
help='GN-list of additional paths to copy output mapping file to.')
parser.add_argument(
'--classpath',
action='append',
help='GN-list of .jar files to include as libraries.')
parser.add_argument(
'--main-dex-rules-path',
action='append',
help='Path to main dex rules for multidex'
'- only works with R8.')
parser.add_argument(
'--min-api', help='Minimum Android API level compatibility.')
parser.add_argument(
'--verbose', '-v', action='store_true', help='Print all ProGuard output')
parser.add_argument(
'--repackage-classes', help='Package all optimized classes are put in.')
parser.add_argument(
'--disable-outlining',
action='store_true',
help='Disable the outlining optimization provided by R8.')
parser.add_argument(
'--disable-checkdiscard',
action='store_true',
help='Disable -checkdiscard directives')
parser.add_argument('--sourcefile', help='Value for source file attribute')
parser.add_argument(
'--force-enable-assertions',
action='store_true',
help='Forcefully enable javac generated assertion code.')
parser.add_argument(
'--feature-jars',
action='append',
help='GN list of path to jars which comprise the corresponding feature.')
parser.add_argument(
'--dex-dest',
action='append',
dest='dex_dests',
help='Destination for dex file of the corresponding feature.')
parser.add_argument(
'--feature-name',
action='append',
dest='feature_names',
help='The name of the feature module.')
parser.add_argument('--warnings-as-errors',
action='store_true',
help='Treat all warnings as errors.')
parser.add_argument('--show-desugar-default-interface-warnings',
action='store_true',
help='Enable desugaring warnings.')
parser.add_argument(
'--stamp',
help='File to touch upon success. Mutually exclusive with --output-path')
parser.add_argument('--desugared-library-keep-rule-output',
help='Path to desugared library keep rule output file.')
diff_utils.AddCommandLineFlags(parser)
options = parser.parse_args(args)
if options.feature_names:
if options.output_path:
parser.error('Feature splits cannot specify an output in GN.')
if not options.actual_file and not options.stamp:
parser.error('Feature splits require a stamp file as output.')
elif not options.output_path:
parser.error('Output path required when feature splits aren\'t used')
if options.main_dex_rules_path and not options.r8_path:
parser.error('R8 must be enabled to pass main dex rules.')
options.classpath = build_utils.ParseGnList(options.classpath)
options.proguard_configs = build_utils.ParseGnList(options.proguard_configs)
options.input_paths = build_utils.ParseGnList(options.input_paths)
options.extra_mapping_output_paths = build_utils.ParseGnList(
options.extra_mapping_output_paths)
if options.feature_names:
if 'base' not in options.feature_names:
parser.error('"base" feature required when feature arguments are used.')
if len(options.feature_names) != len(options.feature_jars) or len(
options.feature_names) != len(options.dex_dests):
parser.error('Invalid feature argument lengths.')
options.feature_jars = [
build_utils.ParseGnList(x) for x in options.feature_jars
]
return options
class _DexPathContext(object):
def __init__(self, name, output_path, input_jars, work_dir):
self.name = name
self.input_paths = input_jars
self._final_output_path = output_path
self.staging_dir = os.path.join(work_dir, name)
os.mkdir(self.staging_dir)
def CreateOutput(self, has_imported_lib=False, keep_rule_output=None):
found_files = build_utils.FindInDirectory(self.staging_dir)
if not found_files:
raise Exception('Missing dex outputs in {}'.format(self.staging_dir))
if self._final_output_path.endswith('.dex'):
if has_imported_lib:
raise Exception(
'Trying to create a single .dex file, but a dependency requires '
'JDK Library Desugaring (which necessitates a second file).'
'Refer to %s to see what desugaring was required' %
keep_rule_output)
if len(found_files) != 1:
raise Exception('Expected exactly 1 dex file output, found: {}'.format(
'\t'.join(found_files)))
shutil.move(found_files[0], self._final_output_path)
return
# Add to .jar using Python rather than having R8 output to a .zip directly
# in order to disable compression of the .jar, saving ~500ms.
tmp_jar_output = self.staging_dir + '.jar'
build_utils.DoZip(found_files, tmp_jar_output, base_dir=self.staging_dir)
shutil.move(tmp_jar_output, self._final_output_path)
def _OptimizeWithR8(options,
config_paths,
libraries,
dynamic_config_data,
print_stdout=False):
with build_utils.TempDir() as tmp_dir:
if dynamic_config_data:
tmp_config_path = os.path.join(tmp_dir, 'proguard_config.txt')
with open(tmp_config_path, 'w') as f:
f.write(dynamic_config_data)
config_paths = config_paths + [tmp_config_path]
tmp_mapping_path = os.path.join(tmp_dir, 'mapping.txt')
# If there is no output (no classes are kept), this prevents this script
# from failing.
build_utils.Touch(tmp_mapping_path)
tmp_output = os.path.join(tmp_dir, 'r8out')
os.mkdir(tmp_output)
feature_contexts = []
if options.feature_names:
for name, dest_dex, input_paths in zip(
options.feature_names, options.dex_dests, options.feature_jars):
feature_context = _DexPathContext(name, dest_dex, input_paths,
tmp_output)
if name == 'base':
base_dex_context = feature_context
else:
feature_contexts.append(feature_context)
else:
base_dex_context = _DexPathContext('base', options.output_path,
options.input_paths, tmp_output)
cmd = [
build_utils.JAVA_PATH,
'-Dcom.android.tools.r8.allowTestProguardOptions=1',
]
if options.disable_outlining:
cmd += [' -Dcom.android.tools.r8.disableOutlining=1']
cmd += [
'-Xmx1G',
'-cp',
options.r8_path,
'com.android.tools.r8.R8',
'--no-data-resources',
'--output',
base_dex_context.staging_dir,
'--pg-map-output',
tmp_mapping_path,
]
if options.desugar_jdk_libs_json:
cmd += [
'--desugared-lib',
options.desugar_jdk_libs_json,
'--desugared-lib-pg-conf-output',
options.desugared_library_keep_rule_output,
]
if options.min_api:
cmd += ['--min-api', options.min_api]
if options.force_enable_assertions:
cmd += ['--force-enable-assertions']
for lib in libraries:
cmd += ['--lib', lib]
for config_file in config_paths:
cmd += ['--pg-conf', config_file]
if options.main_dex_rules_path:
for main_dex_rule in options.main_dex_rules_path:
cmd += ['--main-dex-rules', main_dex_rule]
module_input_jars = set(base_dex_context.input_paths)
for feature in feature_contexts:
feature_input_jars = [
p for p in feature.input_paths if p not in module_input_jars
]
module_input_jars.update(feature_input_jars)
for in_jar in feature_input_jars:
cmd += ['--feature', in_jar, feature.staging_dir]
cmd += base_dex_context.input_paths
# Add any extra input jars to the base module (e.g. desugar runtime).
extra_jars = set(options.input_paths) - module_input_jars
cmd += sorted(extra_jars)
try:
stderr_filter = dex.CreateStderrFilter(
options.show_desugar_default_interface_warnings)
build_utils.CheckOutput(cmd,
print_stdout=print_stdout,
stderr_filter=stderr_filter,
fail_on_output=options.warnings_as_errors)
except build_utils.CalledProcessError as err:
debugging_link = ('\n\nR8 failed. Please see {}.'.format(
'https://chromium.googlesource.com/chromium/src/+/HEAD/build/'
'android/docs/java_optimization.md#Debugging-common-failures\n'))
raise build_utils.CalledProcessError(err.cwd, err.args,
err.output + debugging_link)
base_has_imported_lib = False
if options.desugar_jdk_libs_json:
existing_files = build_utils.FindInDirectory(base_dex_context.staging_dir)
jdk_dex_output = os.path.join(base_dex_context.staging_dir,
'classes%d.dex' % (len(existing_files) + 1))
base_has_imported_lib = dex_jdk_libs.DexJdkLibJar(
options.r8_path, options.min_api, options.desugar_jdk_libs_json,
options.desugar_jdk_libs_jar,
options.desugar_jdk_libs_configuration_jar,
options.desugared_library_keep_rule_output, jdk_dex_output,
options.warnings_as_errors)
base_dex_context.CreateOutput(base_has_imported_lib,
options.desugared_library_keep_rule_output)
for feature in feature_contexts:
feature.CreateOutput()
with open(options.mapping_output, 'w') as out_file, \
open(tmp_mapping_path) as in_file:
# Mapping files generated by R8 include comments that may break
# some of our tooling so remove those (specifically: apkanalyzer).
out_file.writelines(l for l in in_file if not l.startswith('#'))
def _CombineConfigs(configs, dynamic_config_data, exclude_generated=False):
ret = []
# Sort in this way so //clank versions of the same libraries will sort
# to the same spot in the file.
def sort_key(path):
return tuple(reversed(path.split(os.path.sep)))
for config in sorted(configs, key=sort_key):
if exclude_generated and config.endswith('.resources.proguard.txt'):
continue
ret.append('# File: ' + config)
with open(config) as config_file:
contents = config_file.read().rstrip()
# Fix up line endings (third_party configs can have windows endings).
contents = contents.replace('\r', '')
# Remove numbers from generated rule comments to make file more
# diff'able.
contents = re.sub(r' #generated:\d+', '', contents)
ret.append(contents)
ret.append('')
if dynamic_config_data:
ret.append('# File: //build/android/gyp/proguard.py (generated rules)')
ret.append(dynamic_config_data)
ret.append('')
return '\n'.join(ret)
def _CreateDynamicConfig(options):
ret = []
if not options.r8_path and options.min_api:
# R8 adds this option automatically, and uses -assumenosideeffects instead
# (which ProGuard doesn't support doing).
ret.append("""\
-assumevalues class android.os.Build$VERSION {
public static final int SDK_INT return %s..9999;
}""" % options.min_api)
if options.sourcefile:
ret.append("-renamesourcefileattribute '%s' # OMIT FROM EXPECTATIONS" %
options.sourcefile)
if options.apply_mapping:
ret.append("-applymapping '%s'" % os.path.abspath(options.apply_mapping))
if options.repackage_classes:
ret.append("-repackageclasses '%s'" % options.repackage_classes)
_min_api = int(options.min_api) if options.min_api else 0
for api_level, version_code in _API_LEVEL_VERSION_CODE:
annotation_name = 'org.chromium.base.annotations.VerifiesOn' + version_code
if api_level > _min_api:
ret.append('-keep @interface %s' % annotation_name)
ret.append("""\
-if @%s class * {
*** *(...);
}
-keep,allowobfuscation class <1> {
*** <2>(...);
}""" % annotation_name)
ret.append("""\
-keepclassmembers,allowobfuscation class ** {
@%s <methods>;
}""" % annotation_name)
return '\n'.join(ret)
def _VerifyNoEmbeddedConfigs(jar_paths):
failed = False
for jar_path in jar_paths:
with zipfile.ZipFile(jar_path) as z:
for name in z.namelist():
if name.startswith('META-INF/proguard/'):
failed = True
sys.stderr.write("""\
Found embedded proguard config within {}.
Embedded configs are not permitted (https://crbug.com/989505)
""".format(jar_path))
break
if failed:
sys.exit(1)
def _ContainsDebuggingConfig(config_str):
debugging_configs = ('-whyareyoukeeping', '-whyareyounotinlining')
return any(config in config_str for config in debugging_configs)
def _MaybeWriteStampAndDepFile(options, inputs):
output = options.output_path
if options.stamp:
build_utils.Touch(options.stamp)
output = options.stamp
if options.depfile:
build_utils.WriteDepfile(options.depfile, output, inputs=inputs)
def main():
options = _ParseOptions()
libraries = []
for p in options.classpath:
# TODO(bjoyce): Remove filter once old android support libraries are gone.
# Fix for having Library class extend program class dependency problem.
if 'com_android_support' in p or 'android_support_test' in p:
continue
# If a jar is part of input no need to include it as library jar.
if p not in libraries and p not in options.input_paths:
libraries.append(p)
_VerifyNoEmbeddedConfigs(options.input_paths + libraries)
proguard_configs = options.proguard_configs
if options.disable_checkdiscard:
proguard_configs = _ValidateAndFilterCheckDiscards(proguard_configs)
# ProGuard configs that are derived from flags.
dynamic_config_data = _CreateDynamicConfig(options)
# ProGuard configs that are derived from flags.
merged_configs = _CombineConfigs(
proguard_configs, dynamic_config_data, exclude_generated=True)
print_stdout = _ContainsDebuggingConfig(merged_configs) or options.verbose
if options.expected_file:
diff_utils.CheckExpectations(merged_configs, options)
if options.only_verify_expectations:
build_utils.WriteDepfile(options.depfile,
options.actual_file,
inputs=options.proguard_configs)
return
_OptimizeWithR8(options, proguard_configs, libraries, dynamic_config_data,
print_stdout)
# After ProGuard / R8 has run:
for output in options.extra_mapping_output_paths:
shutil.copy(options.mapping_output, output)
inputs = options.proguard_configs + options.input_paths + libraries
if options.apply_mapping:
inputs.append(options.apply_mapping)
_MaybeWriteStampAndDepFile(options, inputs)
if __name__ == '__main__':
main()
| 2.109375 | 2 |