max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
OpenAttack/exceptions/classifier.py | zangy17/OpenAttack | 10 | 12773451 | from ..exception import AttackException
class ClassifierNotSupportException(AttackException):
pass
| 1.234375 | 1 |
Project_Codev0.1/Class-diagram_Classes/class Reccomendation.py | cyberseihis/Wallsource | 0 | 12773452 | class Reccomandation:
def __init__(self, input_text):
self.text = input_text
def __get__(self, name ):
return self.name
| 2.546875 | 3 |
scripts/linux/merge_gamelists.py | LatorreDev/recalbox | 0 | 12773453 | import argparse
import glob
import os
import xml.etree.ElementTree as ET
from xml.dom import minidom
class Merger:
def __init__(self):
self._filelist = []
pass
def prettify(self, elem):
xmlstr = ET.tostring(elem, 'utf-8').replace('\n', '')
while "> " in xmlstr:
xmlstr = xmlstr.replace("> ", ">")
while " <" in xmlstr:
xmlstr = xmlstr.replace(" <", "<")
reparsed = minidom.parseString(xmlstr)
result = reparsed.toprettyxml(indent=' ', newl='\n', encoding="utf-8")
return result
def read(self, inputfolder):
for f in glob.glob(os.path.join(inputfolder, "*_gamelist.xml")):
print("Found {}".format(f))
self._filelist.append(f)
def write(self, outputfile):
# Read
utree = ET.Element("gameList")
for f in self._filelist:
print("Loading {}".format(f))
for se in ET.parse(f).getroot():
utree.append(se)
# Write
print("Writing {}".format(outputfile))
with open(outputfile, "w") as f:
f.write(self.prettify(utree).encode("utf-8"))
# Cleanup
for f in self._filelist:
print("Removing {}".format(f))
os.remove(f)
if __name__ == '__main__':
# Parse command line
parser = argparse.ArgumentParser(description='Gamelist merger')
parser.add_argument("-i", "--input", help="Input foldet where to collect *_gamelist.xml", type=str, required=True)
parser.add_argument("-o", "--output", help="Output gamelist file", type=str, required=True)
#parser.add_argument("--port", help="This system is a port, not a regular system", action="store_true", required=False)
args = parser.parse_args()
merger = Merger()
merger.read(args.input)
merger.write(args.output)
| 2.953125 | 3 |
tests.py | wrapp/pymetricslog | 0 | 12773454 | <filename>tests.py<gh_stars>0
import json
from mock import ANY
from cStringIO import StringIO
from metricslog import Logger
class TestMetricLogObserver(object):
def setup(self):
self.out = StringIO()
self.service = 'myservice'
self.hostname = 'host-01'
self.log = Logger(self.out, service=self.service, hostname=self.hostname)
def test_emit(self):
metric = 'user_created'
value = 10.0
self.log.emit(metric, value)
self.assert_output({
"timestamp": ANY,
"service": self.service,
"hostname": self.hostname,
"metric": metric,
"value": value
})
def test_emit_with_default_value(self):
metric = 'user_created'
self.log.emit(metric)
self.assert_output({
"timestamp": ANY,
"service": self.service,
"hostname": self.hostname,
"metric": metric,
"value": 1.0
})
def get_output(self):
self.out.reset()
return json.loads(self.out.read())
def assert_output(self, expected):
actual = self.get_output()
assert actual == expected, repr(actual)
| 2.5 | 2 |
src/docconvert/cli.py | cbillingham/docconvert | 8 | 12773455 | """Commandline script for docconvert."""
import argparse
import logging
import os
import subprocess
import sys
from six.moves import input
from . import configuration
from . import core
from . import parser
from . import writer
_LOGGER = logging.getLogger(__name__)
def setup_logger(verbose=False):
"""Setup basic logging handler for console feedback.
Args:
verbose (bool): If true, sets level of logger to logging.INFO,
else leaves handler at default of logging.WARNING.
"""
level = logging.INFO if verbose else logging.WARNING
log_format = "%(name)s:%(levelname)s: %(message)s"
logging.basicConfig(format=log_format, level=level)
def is_git_repository(path):
"""Checks if path is in a git repository.
Args:
path (str): The path to check.
Returns:
bool: Whether the path is a git repository.
"""
if os.path.isfile(path):
path = os.path.dirname(path)
with open(os.devnull, "wb") as devnull:
proc = subprocess.Popen(
["git", "rev-parse", "--is-inside-work-tree"],
cwd=path,
stdout=devnull,
stderr=devnull,
)
proc.wait()
return proc.returncode == 0
def run():
"""Parses arguments and calls core convert function."""
arg_parser = argparse.ArgumentParser(prog="docconvert")
arg_parser.add_argument("source", help="The directory or file to convert.")
arg_parser.add_argument(
"-i",
"--input",
help="Input docstring style. (default: guess)",
type=parser.InputStyle,
choices=list(parser.InputStyle),
)
arg_parser.add_argument(
"-o",
"--output",
help="Output docstring style to convert to. (default: google)",
type=writer.OutputStyle,
choices=list(writer.OutputStyle),
)
arg_parser.add_argument(
"--in-place",
help="Write the changes to the input file instead of printing diffs.",
action="store_true",
)
arg_parser.add_argument(
"-c", "--config", help="Location of configuration file to use."
)
arg_parser.add_argument(
"-t",
"--threads",
type=int,
default=0,
help="Number of threads to use. (default: cpu count)",
)
arg_parser.add_argument(
"-v", "--verbose", action="store_true", help="Log more information."
)
args = arg_parser.parse_args()
setup_logger(verbose=args.verbose)
source = os.path.abspath(os.path.expanduser(args.source))
if not os.path.exists(source):
_LOGGER.error("Path does not exist: %s", source)
return
if not is_git_repository(source):
_LOGGER.warning(
"This directory is not under git control. "
"Continuing will overwrite files."
)
answer = input("Are you sure you would like to proceed? [y/n] ")
if answer.lower() not in ("y", "yes"):
_LOGGER.warning("Exiting without converting.")
return
config = configuration.DocconvertConfiguration.create_default()
if args.config:
config_path = os.path.abspath(os.path.expanduser(args.config))
if not os.path.exists(config_path):
_LOGGER.error("Config path does not exist: %s", config_path)
return
config.update_from_json(config_path)
# Override config values if specified directly with flags
if args.input:
config.input_style = args.input
if args.output:
config.output_style = args.output
diffs = core.convert(source, args.threads, config, args.in_place)
if diffs and not args.in_place:
for diff in diffs:
for line in diff:
sys.stdout.write(line)
if __name__ == "__main__":
sys.exit(run())
| 2.734375 | 3 |
solutions/python/2018/deceptiveTransmission.py | lucifer1198/Codesignal | 2 | 12773456 | """
Due to recent advances in our cryptographic technology (i.e. the invention of the state-of-the-art Ceasar Cipher decoder),
the opposition has been on the losing side. As their attack plans were leaked and used against them, they suffered heavy losses.
Many soldiers from the enemy side defected and the war seemed like it would end soon.
However, just when you thought everything was over, the enemy was able to come up with a new cipher.
With their newly secure communication channels in place, the enemy is now moving again.
Can you crack the enemy code? Time is of the essense. The outcome of the war lies in your hands.
Example
For enemyMessage = "This is HQ. Commence operation COBRA. Send all units to attack the north gate at 2145", the output should be
deceptiveTransmission(enemyMessage) = "ATTACK SOUTH GATE".
How the actual message was encrypted is not clear to us, but we found this note in an enemy base: Pqbdxkldoxmev / Yxzlk'p zfmebo
"""
m, = eval(dir()[0])
s = ''
while m:
t = 1
for _ in ' ':
i, j, *m = m
t += t + ord(i) % 2
s += chr(-~t % 59 + 32)
return s.strip()
| 3.3125 | 3 |
donate/donate/web_form/donation_form/donation_form.py | smriti-ps/Donation-portal | 0 | 12773457 | from __future__ import unicode_literals
import frappe
def get_context(context):
# do your magic here
context.title="Donation Form"
context.Introduction="Kindly fill the form"
| 1.5 | 2 |
plugins/polio/migrations/0020_fix_statuses.py | BLSQ/iaso-copy | 29 | 12773458 | <reponame>BLSQ/iaso-copy<filename>plugins/polio/migrations/0020_fix_statuses.py
from django.db import migrations
OLD_STATUS_TO_NEW = {"PENDING": "TO_SUBMIT", "ONGOING": "SUBMITTED", "FINISHED": "APPROVED", None: None}
VALID_STATUSES = ["APPROVED", "TO_SUBMIT", "SUBMITTED", "REVIEWED", None]
def convert(old):
return OLD_STATUS_TO_NEW.get(old, old)
def fix_campaign_status(apps, schema_editor):
Campaign = apps.get_model("polio", "Campaign")
for c in Campaign.objects.all():
c.risk_assessment_status = convert(c.risk_assessment_status)
c.budget_status = convert(c.budget_status)
c.save()
for c in Campaign.objects.all():
if c.budget_status not in VALID_STATUSES:
raise Exception(f"Still invalid budget_status {c} {c.budget_status}")
if c.risk_assessment_status not in VALID_STATUSES:
raise Exception(f"Still invalid risk_assessment_status {c} {c.risk_assessment_status}")
class Migration(migrations.Migration):
dependencies = [
("polio", "0019_auto_20210715_1751"),
]
operations = [
migrations.RunPython(fix_campaign_status),
]
| 1.945313 | 2 |
tunacell/filters/cells.py | LeBarbouze/tunacell | 0 | 12773459 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This module defines filters for Cell instances
"""
from __future__ import print_function
import copy
import numpy as np
from tunacell.filters.main import FilterGeneral, bounded, included, FilterAND
from tunacell.base.datatools import multiplicative_increments
from tunacell.base.observable import Observable
class FilterCell(FilterGeneral):
"General class for filtering cell objects (reader.Cell instances)"
_type = 'CELL'
class FilterCellAny(FilterCell):
"Class that does not filter anything."
def __init__(self):
self.label = 'Always True' # short description for human readers
return
def func(self, cell):
return True
class FilterData(FilterCell):
"""Default filter test only if cell exists and cell.data non empty."""
def __init__(self):
self.label = 'Cell Has Data'
return
def func(self, cell):
boo = False
if cell is not None:
boo = cell.data is not None and len(cell.data) > 0
return boo
class FilterCellIDparity(FilterCell):
"""Test whether identifier is odd or even"""
def __init__(self, parity='even'):
self.parity = parity
self.label = 'Cell identifier is {}'.format(parity)
return
def func(self, cell):
# test if even
try:
even = int(cell.identifier) % 2 == 0
if self.parity == 'even':
return even
elif self.parity == 'odd':
return not even
else:
raise ValueError("Parity must be 'even' or 'odd'")
except ValueError as ve:
print(ve)
return False
class FilterCellIDbound(FilterCell):
"""Test class"""
def __init__(self, lower_bound=None, upper_bound=None):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.label = '{} <= cellID < {}'.format(lower_bound, upper_bound)
return
def func(self, cell):
return bounded(int(cell.identifier),
self.lower_bound, self.upper_bound)
class FilterHasParent(FilterCell):
"""Test whether a cell has an identified parent cell"""
def __init__(self):
self.label = 'Cell Has Parent'
return
def func(self, cell):
boo = False
if cell.parent:
boo = True
return boo
class FilterDaughters(FilterCell):
"Test whether a given cell as at least one daughter cell"
def __init__(self, daughter_min=1, daughter_max=2):
label = 'Number of daughter cell(s): '
label += '{0} <= n_daughters <= {1}'.format(daughter_min, daughter_max)
self.label = label
self.lower_bound = daughter_min
self.upper_bound = daughter_max + 1 # lower <= x < upper
return
def func(self, cell):
return bounded(len(cell.childs),
lower_bound=self.lower_bound,
upper_bound=self.upper_bound)
class FilterCompleteCycle(FilterCell):
"Test whether a cell has a given parent and at least one daughter."
def __init__(self, daughter_min=1):
label = 'Cell cycle complete'
label += ' (with at least {} daughter cell(s)'.format(daughter_min)
self.daughter_min = daughter_min
self.label = label
return
def func(self, cell):
filt_parent = FilterHasParent()
filt_daughter = FilterDaughters(daughter_min=self.daughter_min)
return filt_parent(cell) and filt_daughter(cell)
class FilterCycleFrames(FilterCell):
"""Check whether cell has got a minimal number of datapoints."""
def __init__(self, lower_bound=None, upper_bound=None):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
label = 'Number of registered frames:'
label += '{0} <= n_frames <= {1}'.format(self.lower_bound,
self.upper_bound)
self.label = label
return
def func(self, cell):
# check whether data exists
boo = False
filtData = FilterData()
if filtData.func(cell):
boo = bounded(len(cell.data),
lower_bound=self.lower_bound,
upper_bound=self.upper_bound
)
return boo
class FilterCycleSpanIncluded(FilterCell):
"""Check that cell cycle time interval is within valid bounds."""
def __init__(self, lower_bound=None, upper_bound=None):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
label = '{} <= Initial frame and Final frame < {}'.format(lower_bound,
upper_bound)
self.label = label
return
def func(self, cell):
boo = False
filtData = FilterData()
if filtData(cell):
boo = included(cell.data['time'],
lower_bound=self.lower_bound,
upper_bound=self.upper_bound)
return boo
class FilterTimeInCycle(FilterCell):
"""Check that tref is within cell birth and division time"""
def __init__(self, tref=0.):
self.tref = tref
label = 'birth/first time <= {} < division/last time'.format(tref)
self.label = label
return
def func(self, cell):
boo = False
filtData = FilterData()
if filtData(cell):
if cell.birth_time is not None:
lower = cell.birth_time
else:
lower = cell.data['time'][0]
if cell.division_time is not None:
upper = cell.division_time
else:
upper = cell.data['time'][-1]
boo = lower <= self.tref < upper
return boo
class FilterObservableBound(FilterCell):
"""Check that a given observable is bounded.
Parameters
----------
obs : Observable instance
observable that will be tested for bounds
works only for continuous observable (mode='dynamics')
tref : float (default None)
Time of reference at which to test dynamics observable value
lower_bound : float (default None)
upper_bound : float (default None)
"""
def __init__(self, obs=Observable(name='undefined'), tref=None,
lower_bound=None, upper_bound=None):
self.obs_to_test = obs # observable to be tested
self._obs = [obs, ] # hidden to be computed at for filtering purpose
self.tref = tref
# code below is commented because func is able to deal with arrays
# if obs.mode == 'dynamics' and tref is None:
# msg = 'For dynamics mode, this filter needs a valid tref (float)'
# raise ValueError(msg)
self.lower_bound = lower_bound
self.upper_bound = upper_bound
label = '{} <= {}'.format(lower_bound, obs.name)
if tref is not None:
label += ' (t={})'.format(tref)
label += ' < {}'.format(upper_bound)
self.label = label
return
def func(self, cell):
import collections
boo = False
if self.tref is not None:
filt = FilterAND(FilterData(),
FilterTimeInCycle(tref=self.tref))
else:
filt = FilterData()
label = self.obs_to_test.label
if filt(cell):
# retrieve data
array = cell._sdata[label] # two cases: array, or single value
raw_time = cell.data['time']
if len(raw_time) > 1:
dt = np.amin(raw_time[1:] - raw_time[:-1])
else:
dt = cell.container.period
if array is None:
return False
if isinstance(array, collections.Iterable):
if self.tref is None:
# data may be one value (for cycle observables), or array
boo = bounded(array[label], self.lower_bound, self.upper_bound)
else:
# find data closest to tref (-> round to closest time)
# for now return closest time to tref
index = np.argmin(np.abs(array['time'] - self.tref))
# check that it's really close:
if np.abs(array['time'][index] - self.tref) < dt:
value = array[label][index]
boo = bounded(value, self.lower_bound, self.upper_bound)
# otherwise it's a number
else:
boo = bounded(array, self.lower_bound, self.upper_bound)
return boo
# useless ?
class FilterLengthIncrement(FilterCell):
"Check increments are bounded."
def __init__(self, lower_bound=None, upper_bound=None):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
label = 'Length increments between two successive frames: '
label += '{0} <= delta_length <= {1}'.format(self.lower_bound,
self.upper_bound)
return
def func(self, cell):
boo = False
filtData = FilterData()
if filtData(cell):
ell = np.array(cell.data['length'])
incr = multiplicative_increments(ell)
lower = bounded(np.amin(incr), lower_bound=self.lower_bound)
upper = bounded(np.amax(incr), upper_bound=self.upper_bound)
boo = lower and upper
return boo
class FilterSymmetricDivision(FilterCell):
"""Check that cell division is (roughly) symmetric.
Parameters
----------
raw : str
column label of raw observable to test for symmetric division
(usually one of 'length', 'area'). This quantity will be approximated
"""
def __init__(self, raw='area', lower_bound=0.4, upper_bound=0.6):
self.raw = raw
# Observable to be computed: raw at birth, raw at division
# hidden _obs because not part of parameters, but should be computed
self._obs = [Observable(raw=raw, scale='log', mode='birth', timing='b'),
Observable(raw=raw, scale='log', mode='division',
timing='d')]
self.lower_bound = lower_bound
self.upper_bound = upper_bound
label = 'Symmetric division filter:'
ratio_str = '(daughter cell {})/(mother cell {})'.format(raw, raw)
label += ' {} <= {} <= {}'.format(self.lower_bound,
ratio_str,
self.upper_bound)
# label += 'OR (in case mother cell data is missing) '
# label += '{0} <= (daughter cell area)/(sister cell area) <= {1}\
# '.format(self.lower_bound/self.upper_bound,
# self.upper_bound/self.lower_bound)
self.label = label
return
def func(self, cell):
boo = False
filtData = FilterData()
if cell.parent is None:
# birth is not reported, impossible to test, cannot exclude from data
boo = True
else:
if filtData(cell):
csize = cell._sdata[self._obs[0].label]
if filtData(cell.parent):
psize = cell.parent._sdata[self._obs[1].label]
boo = bounded(csize/psize,
lower_bound=self.lower_bound,
upper_bound=self.upper_bound
)
else:
# parent exists, but without data.
# this is a weird scenario, that should not exist
# TODO: warn user?
# but we can check with sibling
sibs = copy.deepcopy(cell.parent.childs)
for item in sibs:
if item.identifier == cell.identifier:
sibs.remove(item)
if sibs:
if len(sibs) > 1:
from ..base.cell import CellChildsError
raise CellChildsError('>2 daughters')
sib = sibs[0] # there should be only one cell
if sib.data is not None:
sibsize = sib._sdata[self._obs[0].label()]
boo = bounded(csize/sibsize,
lower_bound=self.lower_bound,
upper_bound=self.upper_bound
)
else:
boo = True # sibling cell: no data, accept this cell
else:
boo = True # no sibling, accept this cell
return boo
| 2.890625 | 3 |
sprezz/views/oauth2.py | oohlaf/sprezz | 1 | 12773460 | import json
import logging
from aiohttp import web
log = logging.getLogger(__name__)
class AuthorizeView(web.View):
async def get(self):
log.info('Requested URL: %s', self.request.path_qs)
log.info('Requested Remote: %s', self.request.remote)
if self.request.can_read_body:
data = await self.request.json()
parsed = json.loads(data)
dump = json.dumps(parsed, indent=2)
log.info('JSON body: %s', dump)
return web.Response(text="Welcome")
class TokenView(web.View):
async def get(self):
log.info('Requested URL: %s', self.request.path_qs)
log.info('Requested Remote: %s', self.request.remote)
if self.request.can_read_body:
data = await self.request.json()
parsed = json.loads(data)
dump = json.dumps(parsed, indent=2)
log.info('JSON body: %s', dump)
return web.Response(text="Welcome")
| 2.546875 | 3 |
tests/unit/test_dist.py | olegklimov/DeepSpeed | 6,728 | 12773461 | import torch
import torch.distributed as dist
from common import distributed_test
import pytest
@distributed_test(world_size=3)
def test_init():
assert dist.is_initialized()
assert dist.get_world_size() == 3
assert dist.get_rank() < 3
# Demonstration of pytest's parameterization
@pytest.mark.parametrize('number,color', [(1138, 'purple')])
def test_dist_args(number, color):
"""Outer test function with inputs from pytest.mark.parametrize(). Uses a distributed
helper function.
"""
@distributed_test(world_size=2)
def _test_dist_args_helper(x, color='red'):
assert dist.get_world_size() == 2
assert x == 1138
assert color == 'purple'
"""Ensure that we can parse args to distributed_test decorated functions. """
_test_dist_args_helper(number, color=color)
@distributed_test(world_size=[1, 2, 4])
def test_dist_allreduce():
x = torch.ones(1, 3).cuda() * (dist.get_rank() + 1)
sum_of_ranks = (dist.get_world_size() * (dist.get_world_size() + 1)) // 2
result = torch.ones(1, 3).cuda() * sum_of_ranks
dist.all_reduce(x)
assert torch.all(x == result)
| 2.796875 | 3 |
SWIM-Executables/Unix/pyinstaller-2.0/swim3/render.py | alexsigaras/SWIM | 3 | 12773462 | # -*- coding: utf-8 -*-
#############################################
## (C)opyright by <NAME> ##
## All rights reserved ##
#############################################
__version__ = "$Revision: 176 $"
__author__ = "$Author: kgrodzicki $"
__date__ = "$Date: 2011-01-15 10:11:47 +0100 (Fr, 15 July 2011) $"
"""
HTML/CSS to PDF converter
Test background image generation on the `portrait` and `landscape`
page.
"""
from cookbook import HTML2PDF
import sys, os
if __name__ == "__main__":
xhtml = open(sys.argv[1])
try:
filename = sys.argv[2]
if not len(filename):
raise Exception
except:
filename = sys.argv[1].split('.')[0] + '.pdf'
HTML2PDF(xhtml.read(), filename)
os.remove(sys.argv[1])
# def render(html_fn, filename):
# #print html_fn
# #xhtml = open(html_fn)
# #print xhtml.read()
# HTML2PDF(html_fn, filename) | 2.765625 | 3 |
30 Days of Code/Day 10 Binary Numbers.py | MasterZesty/HackerRank-Solutions | 2 | 12773463 | #!/bin/python3
import math
import os
import random
import re
import sys
def dec_to_base(num,base): #Maximum base - 36
base_num = ""
while num>0:
dig = int(num%base)
if dig<10:
base_num += str(dig)
else:
base_num += chr(ord('A')+dig-10) #Using uppercase letters
num //= base
base_num = base_num[::-1] #To reverse the string
return base_num
if __name__ == '__main__':
n = int(input().strip())
base_no = dec_to_base(n,2)
print(max(map(len,base_no.split('0'))))
| 3.484375 | 3 |
util/config.py | benvenutto/wrangle-and-analyze-data | 0 | 12773464 | import yaml
def read_creds(conf_path):
with open(conf_path, 'r') as cf:
config = yaml.load(cf, Loader=yaml.FullLoader)
return config
| 2.015625 | 2 |
helper/parameter_generator.py | mrikitoku/LightGBM | 0 | 12773465 | <filename>helper/parameter_generator.py<gh_stars>0
import os
def GetParameterInfos(config_hpp):
is_inparameter = False
parameter_group = None
cur_key = None
cur_info = {}
keys = []
member_infos = []
with open(config_hpp) as config_hpp_file:
for line in config_hpp_file:
if "#pragma region Parameters" in line:
is_inparameter = True
elif "#pragma region" in line and "Parameters" in line:
cur_key = line.split("region")[1].strip()
keys.append(cur_key)
member_infos.append([])
elif '#pragma endregion' in line:
if cur_key is not None:
cur_key = None
elif is_inparameter:
is_inparameter = False
elif cur_key is not None:
line = line.strip()
if line.startswith("//"):
tokens = line.split("//")[1].split("=")
key = tokens[0].strip()
val = '='.join(tokens[1:]).strip()
if key not in cur_info:
if key == "descl2":
cur_info["desc"] = []
else:
cur_info[key] = []
if key == "desc":
cur_info["desc"].append(["l1", val])
elif key == "descl2":
cur_info["desc"].append(["l2", val])
else:
cur_info[key].append(val)
elif line:
has_eqsgn = False
tokens = line.split("=")
if len(tokens) == 2:
if "default" not in cur_info:
cur_info["default"] = [tokens[1][:-1].strip()]
has_eqsgn = True
tokens = line.split()
cur_info["inner_type"] = [tokens[0].strip()]
if "name" not in cur_info:
if has_eqsgn:
cur_info["name"] = [tokens[1].strip()]
else:
cur_info["name"] = [tokens[1][:-1].strip()]
member_infos[-1].append(cur_info)
cur_info = {}
return (keys, member_infos)
def GetNames(infos):
names = []
for x in infos:
for y in x:
names.append(y["name"][0])
return names
def GetAlias(infos):
pairs = []
for x in infos:
for y in x:
if "alias" in y:
name = y["name"][0]
alias = y["alias"][0].split(',')
for name2 in alias:
pairs.append([name2.strip(), name])
return pairs
def SetOneVarFromString(name, type, checks):
ret = ""
univar_mapper = {"int": "GetInt", "double": "GetDouble", "bool": "GetBool", "std::string": "GetString"}
if "vector" not in type:
ret += " %s(params, \"%s\", &%s);\n" % (univar_mapper[type], name, name)
if len(checks) > 0:
for check in checks:
ret += " CHECK(%s %s);\n" % (name, check)
ret += "\n"
else:
ret += " if (GetString(params, \"%s\", &tmp_str)) {\n" % (name)
type2 = type.split("<")[1][:-1]
if type2 == "std::string":
ret += " %s = Common::Split(tmp_str.c_str(), ',');\n" % (name)
else:
ret += " %s = Common::StringToArray<%s>(tmp_str, ',');\n" % (name, type2)
ret += " }\n\n"
return ret
def GenParameterCode(config_hpp, config_out_cpp):
keys, infos = GetParameterInfos(config_hpp)
names = GetNames(infos)
alias = GetAlias(infos)
str_to_write = "/// This file is auto generated by LightGBM\\helper\\parameter_generator.py\n"
str_to_write += "#include<LightGBM/config.h>\nnamespace LightGBM {\n"
# alias table
str_to_write += "std::unordered_map<std::string, std::string> Config::alias_table({\n"
for pair in alias:
str_to_write += " {\"%s\", \"%s\"}, \n" % (pair[0], pair[1])
str_to_write += "});\n\n"
# names
str_to_write += "std::unordered_set<std::string> Config::parameter_set({\n"
for name in names:
str_to_write += " \"%s\", \n" % (name)
str_to_write += "});\n\n"
# from strings
str_to_write += "void Config::GetMembersFromString(const std::unordered_map<std::string, std::string>& params) {\n"
str_to_write += " std::string tmp_str = \"\";\n"
for x in infos:
for y in x:
if "[doc-only]" in y:
continue
type = y["inner_type"][0]
name = y["name"][0]
checks = []
if "check" in y:
checks = y["check"]
tmp = SetOneVarFromString(name, type, checks)
str_to_write += tmp
# tails
str_to_write += "}\n\n"
str_to_write += "std::string Config::SaveMembersToString() const {\n"
str_to_write += " std::stringstream str_buf;\n"
for x in infos:
for y in x:
if "[doc-only]" in y:
continue
type = y["inner_type"][0]
name = y["name"][0]
if "vector" in type:
if "int8" in type:
str_to_write += " str_buf << \"[%s: \" << Common::Join(Common::ArrayCast<int8_t, int>(%s),\",\") << \"]\\n\";\n" % (name, name)
else:
str_to_write += " str_buf << \"[%s: \" << Common::Join(%s,\",\") << \"]\\n\";\n" % (name, name)
else:
str_to_write += " str_buf << \"[%s: \" << %s << \"]\\n\";\n" % (name, name)
# tails
str_to_write += " return str_buf.str();\n"
str_to_write += "}\n\n"
str_to_write += "}\n"
with open(config_out_cpp, "w") as config_out_cpp_file:
config_out_cpp_file.write(str_to_write)
if __name__ == "__main__":
config_hpp = os.path.join(os.path.pardir, 'include', 'LightGBM', 'config.h')
config_out_cpp = os.path.join(os.path.pardir, 'src', 'io', 'config_auto.cpp')
GenParameterCode(config_hpp, config_out_cpp)
| 2.703125 | 3 |
mysql_communicator/__init__.py | alexisljn/bluetooth-ipssi | 0 | 12773466 | <filename>mysql_communicator/__init__.py
import mysql.connector
class connector:
def __init__(self):
self.db = mysql.connector.connect(
host="localhost",
user="scannr",
password="<PASSWORD>",
database="scannr",
)
self.cursor = self.db.cursor()
def commit(self):
self.db.commit()
def insert_scan(self, mac_address, device_name, rssi, scanned_by_device):
sql = "INSERT INTO scan (mac_address, device_name, rssi, scanned_by_device) VALUES (%s, %s, %s, %s)"
values = (mac_address, device_name, rssi, scanned_by_device)
self.cursor.execute(sql, values)
self.commit()
def get_latest_scan_for_each_mac_address(self):
sql = "select mac_address, device_name, rssi, scan_date from scan s where is_deleted = 0 and not exists (select 1 from scan s1 where is_deleted = 0 and s1.mac_address = s.mac_address and s1.scan_date > s.scan_date) order by scan_date desc"
self.cursor.execute(sql)
columns = [col[0] for col in self.cursor.description]
rows = [dict(zip(columns, row)) for row in self.cursor.fetchall()]
return rows
def get_names_of_mac_address(self, mac_address):
sql = "select distinct device_name from scan s where mac_address = %s and is_deleted = 0"
values = (mac_address, )
self.cursor.execute(sql, values)
names = []
for name in self.cursor.fetchall():
names.append(name[0])
return names
def get_scans_of_mac_address(self, mac_address):
sql = "select id, device_name, scan_date, rssi, scanned_by_device from scan s where mac_address = %s and is_deleted = 0 order by scan_date desc"
values = (mac_address, )
self.cursor.execute(sql, values)
columns = [col[0] for col in self.cursor.description]
rows = [dict(zip(columns, row)) for row in self.cursor.fetchall()]
return rows
def delete_scan(self, scan_id):
sql = "update scan set is_deleted = 1 where id = %s"
values = (scan_id, )
self.cursor.execute(sql, values)
self.commit()
| 3.109375 | 3 |
tests/conftest.py | Informasjonsforvaltning/organization-page-bffe | 0 | 12773467 | <gh_stars>0
"""Conftest module."""
from asyncio import AbstractEventLoop
import datetime
import os
import time
from typing import Any
from unittest.mock import Mock
from dotenv import load_dotenv
import pytest
from pytest_mock import MockFixture
import requests
from requests.exceptions import ConnectionError
from fdk_organization_bff import create_app
load_dotenv()
HOST_PORT = int(os.environ.get("HOST_PORT", "8080"))
MOCKED_DATE = datetime.date(2021, 4, 10)
def is_responsive(url: Any) -> Any:
"""Return true if response from service is 200."""
url = f"{url}/ready"
try:
response = requests.get(url)
if response.status_code == 200:
time.sleep(2) # sleep extra 2 sec
return True
except ConnectionError:
return False
@pytest.fixture(scope="session")
def docker_service(docker_ip: Any, docker_services: Any) -> Any:
"""Ensure that HTTP service is up and responsive."""
# `port_for` takes a container port and returns the corresponding host port
port = docker_services.port_for("fdk-organization-bff", HOST_PORT)
url = "http://{}:{}".format(docker_ip, port)
docker_services.wait_until_responsive(
timeout=30.0, pause=0.1, check=lambda: is_responsive(url)
)
return url
@pytest.fixture(scope="session")
def docker_compose_file(pytestconfig: Any) -> Any:
"""Override default location of docker-compose.yml file."""
return os.path.join(str(pytestconfig.rootdir), "./", "docker-compose.yml")
@pytest.mark.integration
@pytest.fixture(scope="function")
def client(loop: AbstractEventLoop, aiohttp_client: Any) -> Any:
"""Return an aiohttp client for testing."""
return loop.run_until_complete(
aiohttp_client(loop.run_until_complete(create_app()))
)
@pytest.fixture
def mock_datetime(mocker: MockFixture) -> Mock:
"""Mock datetime."""
mock = mocker.patch("datetime.date")
mock.today.return_value = MOCKED_DATE
return mock
| 2.125 | 2 |
setup.py | kartik-hegde/fate | 0 | 12773468 | from setuptools import setup, find_packages
setup(name='fate', version='1.0', packages=find_packages()) | 1.140625 | 1 |
reporting/data.py | pranavrajpal/mypyc-benchmarks | 0 | 12773469 | <filename>reporting/data.py
from typing import NamedTuple, List, Dict, Set, Tuple
from datetime import datetime
import os
import re
import sys
import glob
import subprocess
from reporting.common import (
get_hardware_id, get_os_version, get_c_compiler_version, CC, DATA_DIR, SOURCE_DIRS
)
def write_csv_header(fnam: str) -> None:
with open(fnam, "w") as f:
f.write("Timestamp,Runtime (s),Runtime (stddev),Mypy commit," +
"Benchmark commit,Python version,Hardware,OS,C compiler\n")
def write_csv_line(fnam: str,
benchmark: str,
timestamp: datetime,
runtime: float,
stdev: float,
mypy_commit: str,
benchmark_commit: str) -> None:
if not os.path.exists(fnam):
write_csv_header(fnam)
with open(fnam, "a") as f:
f.write("%s,%.6f,%.6f,%s,%s,%s,%s,%s,%s\n" % (
timestamp,
runtime,
stdev,
mypy_commit,
benchmark_commit,
sys.version.split()[0],
get_hardware_id(),
get_os_version(),
'%s %s' % (CC, get_c_compiler_version(CC)),
))
class DataItem(NamedTuple):
benchmark: str
timestamp: datetime
runtime: float
stdev_percent: float
mypy_commit: str
benchmark_commit: str
python_version: str
hardware_id: str
os_version: str
def read_csv(fnam: str) -> List[DataItem]:
benchmark = os.path.basename(fnam)
benchmark = benchmark.partition('.csv')[0]
benchmark = benchmark.partition('-cpython')[0]
with open(fnam) as f:
lines = f.readlines()
lines = lines[1:]
result = []
for line in lines:
fields = line.split(',')
item = DataItem(
benchmark=benchmark,
timestamp=datetime.fromisoformat(fields[0]),
runtime=float(fields[1]),
stdev_percent=float(fields[2]),
mypy_commit=fields[3],
benchmark_commit=fields[4],
python_version=fields[5],
hardware_id=fields[6],
os_version=fields[7],
)
result.append(item)
return result
class BenchmarkData(NamedTuple):
# Data about interpreted baseline runs (benchmark name as key)
baselines: Dict[str, List[DataItem]]
# Data about each compiled benchmark run (benchmark name as key)
runs: Dict[str, List[DataItem]]
# These benchmarks are microbenchmarks
microbenchmarks: Set[str]
# Dict from benchmark name to (source .py file path, line number)
source_locations: Dict[str, Tuple[str, int]]
def load_data(mypy_repo: str, data_repo: str) -> BenchmarkData:
"""Load all benchmark data from csv files."""
baselines = {}
runs = {}
files = glob.glob(os.path.join(data_repo, DATA_DIR, '*.csv'))
for fnam in files:
benchmark = os.path.basename(fnam)
benchmark, _, _ = benchmark.partition('.csv')
benchmark, suffix, _ = benchmark.partition('-cpython')
items = read_csv(fnam)
if suffix:
baselines[benchmark] = items
else:
runs[benchmark] = items
microbenchmarks = get_microbenchmark_names()
source_locations = get_source_locations()
return BenchmarkData(baselines, runs, microbenchmarks, source_locations)
def get_benchmark_names() -> Set[str]:
"""Get names of all benchmarks (normal and microbenchmarks)."""
result = set()
data = subprocess.check_output(['python', 'runbench.py', '--list']).decode('ascii')
for line in data.splitlines():
result.add(line.split()[0])
return result
def get_microbenchmark_names() -> Set[str]:
result = set()
data = subprocess.check_output(['python', 'runbench.py', '--list']).decode('ascii')
for line in data.splitlines():
if '(micro)' in line:
result.add(line.split()[0])
return result
def get_source_locations() -> Dict[str, Tuple[str, int]]:
result = {}
for src_dir in SOURCE_DIRS:
for fnam in glob.glob('%s/*.py' % src_dir):
with open(fnam) as f:
lines = f.readlines()
for i, line in enumerate(lines):
if line.strip().startswith('@benchmark'):
for j, line2 in enumerate(lines[i + 1 : i + 10]):
line2 = line2.strip()
m = re.match('def +([a-zA-Z_0-9]+)', line2)
if m:
result[m.group(1)] = (fnam, i + 2 + j)
return result
def sort_data_items(items: List[DataItem], commit_order: Dict[str, int]) -> List[DataItem]:
"""Sort data items by age of mypy commit, from recent to old."""
return sorted(items, key=lambda x: commit_order[x.mypy_commit])
def find_baseline(baselines: List[DataItem], run: DataItem) -> DataItem:
"""Find the corresponding baseline measurement for a benchmark run."""
for item in baselines:
if (item.python_version == run.python_version
and item.hardware_id == run.hardware_id
and item.os_version == run.os_version):
return item
assert False, "No baseline found for %r" % (run,)
# Override the default significance levels for benchmarks that aren't very noisy.
significant_percent_change_overrides = {
'sieve': 3.0,
'str_methods_2': 3.0,
'str_format': 10.0,
'str_methods': 10.0,
'matrix_multiply': 10.0,
}
def significant_percent_change(benchmark: str, is_microbenchmark: bool) -> float:
if benchmark in significant_percent_change_overrides:
return significant_percent_change_overrides[benchmark]
elif is_microbenchmark:
# Microbenchmark measurements are noisy. By default, only
# consider 15% change as significant.
return 15.0
else:
# Other benchmarks are less noisy.
return 3.0
def is_significant_percent_change(benchmark: str,
delta_percentage: float,
is_microbenchmark: bool) -> bool:
return abs(delta_percentage) >= significant_percent_change(benchmark, is_microbenchmark)
| 2.390625 | 2 |
count-primes.py | mengzhuo/my-leetcode-solution | 0 | 12773470 | <gh_stars>0
"""
https://leetcode.com/problems/count-primes/
"""
class Solution:
# @param {integer} n
# @return {integer}
def countPrimes(self, n):
if n <=2:
return 0
if n == 3:
return 1
count = n-2
d = [True for i in xrange(n)]
for i in xrange(2, int(n**0.5)+1):
if d[i]:
j = 2
while i*j < n:
if d[i*j]:
d[i*j] = False
count -=1
j +=1
return count
| 3.46875 | 3 |
app/apply/views.py | spbso/so_rest | 0 | 12773471 | import logging
from core.authentication import VKAuthentication
from core.models import Boec, UserApply
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers, status, viewsets
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from reversion.views import RevisionMixin
from .serializers import ApplySerializer
logger = logging.getLogger(__name__)
class UserApplyView(viewsets.ViewSet):
"Manage user applies"
authentication_classes = (VKAuthentication,)
permission_classes = (IsAuthenticated,)
@action(
methods=["get"],
detail=False,
)
def get_own_apply(self, request):
try:
apply = UserApply.objects.get(vk_id=request.user.vk_id)
return Response({}, status=status.HTTP_200_OK)
except UserApply.DoesNotExist:
msg = _(f"User apply doesn't exist")
return Response({"error": msg}, status=status.HTTP_404_NOT_FOUND)
@action(
methods=["post"],
detail=False,
)
def apply(self, request):
serializer = ApplySerializer(data=request.data)
if serializer.is_valid():
instance = serializer.save()
Boec.objects.create(
first_name=instance.first_name,
last_name=instance.last_name,
middle_name=instance.middle_name,
date_of_birth=instance.date_of_birth,
vk_id=instance.vk_id,
for_development=True,
)
return Response(serializer.data)
else:
print(serializer.errors)
return Response({"error": "Validation"}, status=status.HTTP_400_BAD_REQUEST)
| 2.125 | 2 |
tests/unit/test_app.py | davidjsherman/repo2docker | 1,047 | 12773472 | import errno
import pytest
from tempfile import TemporaryDirectory
from unittest.mock import patch
import docker
import escapism
from repo2docker.app import Repo2Docker
from repo2docker.__main__ import make_r2d
from repo2docker.utils import chdir
def test_find_image():
images = [{"RepoTags": ["some-org/some-repo:latest"]}]
with patch("repo2docker.docker.docker.APIClient") as FakeDockerClient:
instance = FakeDockerClient.return_value
instance.images.return_value = images
r2d = Repo2Docker()
r2d.output_image_spec = "some-org/some-repo"
assert r2d.find_image()
instance.images.assert_called_with()
def test_dont_find_image():
images = [{"RepoTags": ["some-org/some-image-name:latest"]}]
with patch("repo2docker.docker.docker.APIClient") as FakeDockerClient:
instance = FakeDockerClient.return_value
instance.images.return_value = images
r2d = Repo2Docker()
r2d.output_image_spec = "some-org/some-other-image-name"
assert not r2d.find_image()
instance.images.assert_called_with()
def test_image_name_remains_unchanged():
# if we specify an image name, it should remain unmodified
with TemporaryDirectory() as src:
app = Repo2Docker()
argv = ["--image-name", "a-special-name", "--no-build", src]
app = make_r2d(argv)
app.start()
assert app.output_image_spec == "a-special-name"
def test_image_name_contains_sha1(repo_with_content):
upstream, sha1 = repo_with_content
app = Repo2Docker()
# force selection of the git content provider by prefixing path with
# file://. This is important as the Local content provider does not
# store the SHA1 in the repo spec
argv = ["--no-build", "file://" + upstream]
app = make_r2d(argv)
app.start()
assert app.output_image_spec.endswith(sha1[:7])
def test_local_dir_image_name(repo_with_content):
upstream, sha1 = repo_with_content
app = Repo2Docker()
argv = ["--no-build", upstream]
app = make_r2d(argv)
app.start()
assert app.output_image_spec.startswith(
"r2d" + escapism.escape(upstream, escape_char="-").lower()
)
def test_build_kwargs(repo_with_content):
upstream, sha1 = repo_with_content
argv = [upstream]
app = make_r2d(argv)
app.extra_build_kwargs = {"somekey": "somevalue"}
with patch.object(docker.APIClient, "build") as builds:
builds.return_value = []
app.build()
builds.assert_called_once()
args, kwargs = builds.call_args
assert "somekey" in kwargs
assert kwargs["somekey"] == "somevalue"
def test_run_kwargs(repo_with_content):
upstream, sha1 = repo_with_content
argv = [upstream]
app = make_r2d(argv)
app.extra_run_kwargs = {"somekey": "somevalue"}
with patch.object(docker.DockerClient, "containers") as containers:
app.start_container()
containers.run.assert_called_once()
args, kwargs = containers.run.call_args
assert "somekey" in kwargs
assert kwargs["somekey"] == "somevalue"
def test_root_not_allowed():
with TemporaryDirectory() as src, patch("os.geteuid") as geteuid:
geteuid.return_value = 0
argv = [src]
with pytest.raises(SystemExit) as exc:
app = make_r2d(argv)
assert exc.code == 1
with pytest.raises(ValueError):
app = Repo2Docker(repo=src, run=False)
app.build()
app = Repo2Docker(repo=src, user_id=1000, user_name="jovyan", run=False)
app.initialize()
with patch.object(docker.APIClient, "build") as builds:
builds.return_value = []
app.build()
builds.assert_called_once()
def test_dryrun_works_without_docker(tmpdir, capsys):
with chdir(tmpdir):
with patch.object(docker, "APIClient") as client:
client.side_effect = docker.errors.DockerException("Error: no Docker")
app = Repo2Docker(dry_run=True)
app.build()
captured = capsys.readouterr()
assert "Error: no Docker" not in captured.err
def test_error_log_without_docker(tmpdir, capsys):
with chdir(tmpdir):
with patch.object(docker, "APIClient") as client:
client.side_effect = docker.errors.DockerException("Error: no Docker")
app = Repo2Docker()
with pytest.raises(SystemExit):
app.build()
captured = capsys.readouterr()
assert "Error: no Docker" in captured.err
| 2.25 | 2 |
ocrapi/common.py | pengyang486868/PY-read-Document | 0 | 12773473 | <reponame>pengyang486868/PY-read-Document
from aip import AipOcr
config = {
'appId': '16804814',
'apiKey': 'mVCRu8AmAcTdFSGaDtoRki53',
'secretKey': '<KEY>'
}
client = AipOcr(**config)
def get_file_content(file):
with open(file, 'rb') as fp:
return fp.read()
| 2.46875 | 2 |
tests/html_generation_tests.py | JoshBarr/python-mammoth | 1 | 12773474 | from nose.tools import istest, assert_equal
from mammoth.html_generation import HtmlGenerator, satisfy_html_path
from mammoth import html_paths
@istest
def generates_empty_string_when_newly_created():
generator = HtmlGenerator()
assert_equal("", generator.html_string())
@istest
def html_escapes_text():
generator = HtmlGenerator()
generator.text("<")
assert_equal("<", generator.html_string())
@istest
def self_closing_tag_is_self_closing():
generator = HtmlGenerator()
generator.self_closing("br")
assert_equal("<br />", generator.html_string())
@istest
def all_elements_are_closed_by_end_all():
generator = HtmlGenerator()
generator.start("p")
generator.start("span")
generator.text("Hello!")
generator.end_all()
assert_equal("<p><span>Hello!</span></p>", generator.html_string())
@istest
def elements_with_no_text_are_not_generator():
generator = HtmlGenerator()
generator.start("p")
generator.start("span")
generator.end_all()
assert_equal("", generator.html_string())
@istest
def elements_with_empty_string_text_are_not_generator():
generator = HtmlGenerator()
generator.start("p")
generator.start("span")
generator.text("")
generator.end_all()
assert_equal("", generator.html_string())
@istest
def self_closing_tag_can_have_attributes():
generator = HtmlGenerator()
generator.self_closing("br", {"data-blah": "42"})
assert_equal('<br data-blah="42" />', generator.html_string())
@istest
def attribute_values_are_escaped():
generator = HtmlGenerator()
generator.self_closing("br", {"data-blah": "<"})
assert_equal('<br data-blah="<" />', generator.html_string())
@istest
def opening_tag_can_have_attributes():
generator = HtmlGenerator()
generator.start("p", {"data-blah": "42"})
generator.text("Hello!")
generator.end()
assert_equal('<p data-blah="42">Hello!</p>', generator.html_string())
@istest
def appending_another_html_generator_does_nothing_if_empty():
generator = HtmlGenerator()
generator.start("p")
generator.append(HtmlGenerator())
assert_equal('', generator.html_string())
@istest
def appending_another_html_generator_writes_out_elements_if_other_generator_is_not_empty():
generator = HtmlGenerator()
generator.start("p")
other = HtmlGenerator()
other.text("Hello!")
generator.append(other)
assert_equal('<p>Hello!', generator.html_string())
@istest
class SatisfyPathTests(object):
@istest
def plain_elements_are_generated_to_satisfy_plain_path_elements(self):
generator = HtmlGenerator()
path = html_paths.path([html_paths.element(["p"])])
satisfy_html_path(generator, path)
generator.text("Hello!")
assert_equal('<p>Hello!', generator.html_string())
@istest
def only_missing_elements_are_generated_to_satisfy_plain_path_elements(self):
generator = HtmlGenerator()
generator.start("blockquote")
generator.text("Hello")
path = html_paths.path([html_paths.element(["blockquote"]), html_paths.element(["p"])])
satisfy_html_path(generator, path)
generator.text("there")
assert_equal('<blockquote>Hello<p>there', generator.html_string())
@istest
def mismatched_elements_are_closed_to_satisfy_plain_path_elements(self):
generator = HtmlGenerator()
generator.start("blockquote")
generator.start("span")
generator.text("Hello")
path = html_paths.path([html_paths.element(["blockquote"]), html_paths.element(["p"])])
satisfy_html_path(generator, path)
generator.text("there")
assert_equal('<blockquote><span>Hello</span><p>there', generator.html_string())
@istest
def fresh_element_matches_nothing(self):
generator = HtmlGenerator()
generator.start("blockquote")
generator.start("p")
generator.text("Hello")
path = html_paths.path([html_paths.element(["blockquote"]), html_paths.element(["p"], fresh=True)])
satisfy_html_path(generator, path)
generator.text("there")
assert_equal('<blockquote><p>Hello</p><p>there', generator.html_string())
@istest
def attributes_are_generated_when_satisfying_elements(self):
generator = HtmlGenerator()
path = html_paths.path([html_paths.element(["p"], class_names=["tip"])])
satisfy_html_path(generator, path)
generator.text("Hello")
assert_equal('<p class="tip">Hello', generator.html_string())
@istest
def elements_do_not_match_if_class_names_do_not_match(self):
generator = HtmlGenerator()
generator.start("p", {"class": "help"})
generator.text("Help")
path = html_paths.path([html_paths.element(["p"], class_names=["tip"])])
satisfy_html_path(generator, path)
generator.text("Tip")
assert_equal('<p class="help">Help</p><p class="tip">Tip', generator.html_string())
@istest
def class_names_match_if_they_are_the_same(self):
generator = HtmlGenerator()
generator.start("p", {"class": "tip"})
generator.text("Help")
path = html_paths.path([html_paths.element(["p"], class_names=["tip"])])
satisfy_html_path(generator, path)
generator.text("Tip")
assert_equal('<p class="tip">HelpTip', generator.html_string())
| 2.59375 | 3 |
admin/getActiveDirectory.py | KleeGroup-BaseCamp/klee-office | 0 | 12773475 | <gh_stars>0
#!/home/dev/anaconda2/bin/python
# -*- coding: utf-8 -*-
import json # jsonfy search result
import sys # sys.exit()
import ldap3 as ldap # ldap connection request
from ldap3 import Server, Connection, NTLM, ALL, MODIFY_ADD, MODIFY_REPLACE
######################################################
with open('../config/config-ldap.json') as data_file:
settings = json.load(data_file)
Serv = settings['url']
BaseDN = settings['baseDN']
BaseDNDesactives = settings['BaseDNDesactives']
User = settings['username']
Password = settings['password']
#######################################################
base_dn = BaseDN
base_dn_desactives=BaseDNDesactives
server = Server(Serv, get_info=ALL)
con=Connection(server, user=BaseDN+'\\'+User, password=Password, authentication = NTLM , return_empty_attributes=True)
attributes_des=['cn','mail']
attributes=['cn','mail','department','company','physicalDeliveryOfficeName']
if not con.bind():
print('error in bind',con.result)
else:
try:
results=con.search(search_base=base_dn, search_filter='(&(objectClass=person))' ,attributes=attributes)
if results:
print(base_dn)
with open('../api/data/KleeGroup.json', 'w') as json_file:
text=[]
for x in con.entries:
if x.mail.value:
mail=x.mail.value.encode('utf-8')
else:
mail=""
if x.department.value:
dep=x.department.value.encode('utf-8')
else:
dep=""
if x.company.value:
comp=x.company.value.encode('utf-8')
else:
comp=""
if x.physicalDeliveryOfficeName.value:
if x.physicalDeliveryOfficeName.value.encode('utf-8')=='La Boursidière':
office='La Boursidière : aucun'
else:
office=x.physicalDeliveryOfficeName.value.encode('utf-8')
else:
office=""
text.append([x.entry_dn.encode('utf-8'),{'cn':[x.cn.value.encode('utf-8')],'mail':[mail],'department':[dep],'company':[comp],'physicalDeliveryOfficeName':[office]}])
json.dump(text, json_file, ensure_ascii=False)
json_file.close()
except IOError:
print("error")
try:
results=con.search(search_base=base_dn_desactives, search_filter='(objectClass=person)' ,attributes=attributes_des)
if results:
print(base_dn_desactives.encode('utf-8'))
with open('../api/data/KleeGroup_Desactives.json', 'w') as json_file:
text=[]
for x in con.entries:
if x.mail.value:
mail=x.mail.value.encode('utf-8')
else:
mail=""
text.append([x.entry_dn.encode('utf-8'),{'cn':[x.cn.value.encode('utf-8')],'mail':[mail]}])
json.dump(text, json_file, ensure_ascii=False)
json_file.close()
except IOError:
print("error")
con.unbind()
sys.exit()
| 2.296875 | 2 |
backend-chat/app.py | bayuajinurmnsh/chat_apps | 0 | 12773476 | <filename>backend-chat/app.py<gh_stars>0
from flask import Flask, request, jsonify, make_response
from datetime import datetime
import operations as ops
app = Flask(__name__)
@app.route('/users', methods=['GET'])
def get_all_user():
data = ops.get_users()
return jsonify({'users': data}), 200
@app.route('/register', methods=['POST'])
def register_user():
data = request.get_json()
created_date = datetime.now()
# check if data is valid or not
valid_data = ops.valid_user_data(data)
if valid_data:
insert_data = ops.register(data, created_date)
if insert_data == 'exists':
return jsonify({'message': 'Username Already Exists', 'status': 'failed'}), 500
if insert_data == False:
return jsonify({'message': 'Failed to Insert Data', 'status': 'failed'}), 500
return jsonify({'message': 'Data has been insert', 'status': 'success'}), 200
else:
return jsonify({'message': 'Data not valid', 'status': 'invalid'}), 500
if __name__ == "__main__":
app.run(debug=True)
| 2.84375 | 3 |
Jasper/__init__.py | Rhodolite/Gem.py.Other | 0 | 12773477 | #
# Copyright (c) 2018 <NAME>. All rights reserved.
#
| 0.769531 | 1 |
codegen/subexpr_elim.py | gballard/fast-matmul | 23 | 12773478 | # Copyright (c) 2014-2015, Sandia Corporation
# All rights reserved.
#
# This file is part of fast-matmul and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause.
import convert
import sys
import gen
def all_pairs(col):
''' Return all pairs in column, where the column refers to the rank multiply.
col is an array of coefficients. '''
for i in range(len(col)):
for j in range(i + 1, len(col)):
if col[i] != '0' and col[j] != '0':
yield ((col[i], col[j]), (i, j))
def can_substitute(val_pair1, val_pair2):
''' Returns true if and only if the two pairs can be replaced by a single multiply.
val1_pair1 and val_pair2 are pairs of coefficients
'''
try:
val1a = float(val_pair1[0])
val1b = float(val_pair1[1])
val2a = float(val_pair2[0])
val2b = float(val_pair2[1])
except:
return False
# Can't match zeros
if val2a == 0.0 or val2b == 0.0:
return False
scale1 = val1b / val1a
scale2 = val2b / val2a
if abs(scale1 - scale2) <= 1e-15:
return True
def find_subexpression(pair, col):
''' Find occurence of the pair subexpression in the column.
pair is of the form ((coeff1, coeff2), (ind1, ind2)) where coeff
is a coefficient and ind is an index
col is an array of coefficients such that we can access col[ind1], col[ind2]
'''
ind1, ind2 = pair[1]
curr_val = (col[ind1], col[ind2])
return can_substitute(pair[0], curr_val)
def transpose(coeffs):
''' Given a list of rows, return a list of columns. '''
return [[x[i] for x in coeffs] for i in range(len(coeffs[0]))]
def eliminate(coeff_set):
cols = transpose(coeff_set)
match_dict = {}
for i, col in enumerate(cols):
for pair in all_pairs(col):
for j in range(i + 1, len(cols)):
if find_subexpression(pair, cols[j]):
# Hash the mapping by subcomputation (which multiply) where the subexpression originates
# and the index pair of the coefficients.
key = (i, pair[1][0], pair[1][1])
if key in match_dict:
match_dict[key].append(j)
else:
match_dict[key] = [j]
remove_duplicates(match_dict)
return sorted(match_dict.items(), key=lambda keyval: keyval[0])
def remove_duplicates(match_dict):
''' Remove duplicate eliminations from the match dictionary. '''
sorted_matches = sorted(match_dict.iterkeys())
# Delete duplicates that overlap in both entries
keys_to_delete = []
for key in sorted_matches:
if len(match_dict[key]) > 1:
for col_ind in match_dict[key][:-1]:
keys_to_delete.append((col_ind, key[1], key[2]))
for key in keys_to_delete:
if key in match_dict:
del match_dict[key]
def update_coeffs(coeff_set, elim_info):
''' Given the eliminations, create the new coefficients. '''
def zero_col(col_ind, coeff_ind1, coeff_ind2):
coeff_set[coeff_ind1][col_ind] = '0'
coeff_set[coeff_ind2][col_ind] = '0'
def get_scale(index, sub):
''' Get the scale factor for the substitution matrix. '''
coeff_ind1, coeff_ind2 = index[1:3]
col_ind = index[0]
val1a = float(coeff_set[coeff_ind1][col_ind])
val1b = float(coeff_set[coeff_ind1][sub])
val2a = float(coeff_set[coeff_ind2][col_ind])
val2b = float(coeff_set[coeff_ind2][sub])
if 0.0 in [val1a, val1b, val2a, val2b]:
return 0.0
scale1 = val1b / val1a
scale2 = val2b / val2a
assert(abs(scale1 - scale2) <= 1e-15)
return scale1
rank = len(coeff_set[0])
num_entries = len(coeff_set)
save_coeff_set = convert.replicate(coeff_set)
all_sub_coeffs = []
num_subs = 0
for index, subs in elim_info:
new_coeff_line = ['0'] * rank
# In the multiply where the subexpression originates, replace with new matrix.
new_coeff_line[index[0]] = '1'
have_subbed_flag = False
for sub in subs:
# Replace the add with the substitute matrix.
scale = get_scale(index, sub)
if scale != 0.0:
new_coeff_line[sub] = scale
zero_col(sub, index[1], index[2])
num_subs += 1
have_subbed_flag = True
if have_subbed_flag:
zero_col(index[0], index[1], index[2])
coeff_set.append(new_coeff_line)
sub_coeffs = ['0'] * num_entries
sub_coeffs[index[1]] = save_coeff_set[index[1]][index[0]]
sub_coeffs[index[2]] = save_coeff_set[index[2]][index[0]]
all_sub_coeffs.append(sub_coeffs)
return all_sub_coeffs, num_subs
def num_nonzero(coeffs):
''' Returns the total number of nonzero in the coefficients. '''
return sum([1 for coeff_set in coeffs for coeff_line in coeff_set \
for coeff in coeff_line if gen.is_nonzero(coeff)])
def main():
try:
coeff_file = sys.argv[1]
dims = tuple([int(d) for d in sys.argv[2].split(',')])
print 'Reading coefficients for %d x %d x %d matrix' % dims
except:
raise Exception('USAGE: python subexpr_elim.py coeff_file m,k,n')
coeffs = convert.read_coeffs(coeff_file)
print 'nnz = ', num_nonzero(coeffs)
A_elim = eliminate(coeffs[0])
B_elim = eliminate(coeffs[1])
# Transpose the C coefficients
C_coeffs = transpose(coeffs[2])
C_elim = eliminate(C_coeffs)
A_subs, num_subs_A = update_coeffs(coeffs[0], A_elim)
B_subs, num_subs_B = update_coeffs(coeffs[1], B_elim)
C_subs, num_subs_C = update_coeffs(C_coeffs, C_elim)
C_coeffs = transpose(C_coeffs)
total_elim = num_subs_A + num_subs_B + num_subs_C
print 'Eliminating %d non-zeros' % total_elim
new_nonzeros = int(coeff_file.split('-')[-1]) - total_elim
new_name = '-'.join(coeff_file.split('-')[:-1]) + '-%d' % (new_nonzeros)
print 'Writing information to ' + new_name
with open(new_name, 'w') as out_file:
out_file.write('# Eliminated version of %s\n' % coeff_file)
def write_coeff_set(coeff_set):
''' Write the coefficient set for a single matrix (A, B, or C). '''
def pretty_print(value):
if float(int(float(value))) == float(value):
return str(int(float(value)))
else:
return str(value)
for entry in coeff_set:
# Single (i, j) entry of a single matrix.
out_file.write(' '.join([pretty_print(val) for val in entry]) + '\n')
if len(coeff_set) == 0:
out_file.write('\n')
write_coeff_set(coeffs[0])
out_file.write('#\n')
write_coeff_set(coeffs[1])
out_file.write('#\n')
write_coeff_set(C_coeffs)
out_file.write('# Substitution information\n')
write_coeff_set(A_subs)
out_file.write('#\n')
write_coeff_set(B_subs)
out_file.write('#\n')
write_coeff_set(C_subs)
if __name__ == '__main__':
main()
| 2.96875 | 3 |
scripts/csv_to_paper_plots/4wayROC/preprocess.py | iqbal-lab-org/paper_pandora2020_analyses | 0 | 12773479 | <reponame>iqbal-lab-org/paper_pandora2020_analyses
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import os
# In[2]:
# read df
import sys
four_way_df = pd.read_csv("../pandora1_paper_analysis_output_4_way/ROC_data_old_and_new_basecall.tsv", sep="\t")
# In[3]:
# add new columns
four_way_df["methylation_aware"] = four_way_df["tool"].apply(lambda tool: "yes" if tool.startswith("pandora_NEW_BASECALL") else "no")
four_way_df["local_assembly"] = four_way_df["tool"].apply(lambda tool: "yes" if "withdenovo" in tool else "no")
# In[4]:
# save csv
four_way_df.to_csv("ROC_data_old_and_new_basecall.R_data.csv")
| 2.21875 | 2 |
networkapi/api_pools/tests/pool_v3_test.py | brunodevel/GloboNetworkAPI | 0 | 12773480 | # -*- coding: utf-8 -*-
import json
import logging
from django.test.client import Client
from networkapi.test.test_case import NetworkApiTestCase
log = logging.getLogger(__name__)
class PoolTestV3Case(NetworkApiTestCase):
maxDiff = None
def setUp(self):
self.client = Client()
def tearDown(self):
pass
def execute_some_put_verify_error(self, name_file):
# update
response = self.client.put(
'/api/v3/pool/1/',
data=json.dumps(self.load_json_file(name_file)),
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
self.assertEqual(400, response.status_code,
'Status code should be 400 and was %s' % response.status_code)
def execute_some_put_verify_success(self, name_file):
# update
response = self.client.put(
'/api/v3/pool/1/',
data=json.dumps(self.load_json_file(name_file)),
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
self.assertEqual(200, response.status_code,
'Status code should be 200 and was %s' % response.status_code)
# get datas updated
response = self.client.get(
'/api/v3/pool/1/',
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
# test if datas were updated
self.assertEqual(
json.dumps(self.load_json_file(name_file), sort_keys=True),
json.dumps(response.data, sort_keys=True),
'jsons should same'
)
self.assertEqual(200, response.status_code,
'Status code should be 200 and was %s' % response.status_code)
def execute_some_post_verify_error(self, name_file):
# delete
self.client.delete(
'/api/v3/pool/1/',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
# insert
response = self.client.post(
'/api/v3/pool/',
data=json.dumps(self.load_json_file(name_file)),
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
self.assertEqual(400, response.status_code,
'Status code should be 400 and was %s' % response.status_code)
# try to get datas
response = self.client.get(
'/api/v3/pool/1/',
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
# test if data were not inserted
self.assertEqual(500, response.status_code,
'Status code should be 500 and was %s' % response.status_code)
def execute_some_post_verify_success(self, name_file):
# delete
self.client.delete(
'/api/v3/pool/1/',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
# try to get datas
response = self.client.get(
'/api/v3/pool/1/',
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
# test if does not exist data inserted
self.assertEqual(500, response.status_code,
'Status code should be 500 and was %s' % response.status_code)
# insert
response = self.client.post(
'/api/v3/pool/',
data=json.dumps(self.load_json_file(name_file)),
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
self.assertEqual(201, response.status_code,
'Status code should be 201 and was %s' % response.status_code)
id_pool = response.data[0]['id']
# get data inserted
response = self.client.get(
'/api/v3/pool/%s/' % id_pool,
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
data = response.data
del data['server_pools'][0]['id']
# test if data were inserted
self.assertEqual(
json.dumps(self.load_json_file(name_file), sort_keys=True),
json.dumps(data, sort_keys=True),
'jsons should same'
)
self.assertEqual(200, response.status_code,
'Status code should be 200 and was %s' % response.status_code)
def test_put_valid_file(self):
""" test_put_valid_file"""
self.execute_some_put_verify_success(
'api_pools/tests/json/put/test_pool_put_valid_file.json')
def test_put_out_of_range_port(self):
""" test_put_out_of_range_port"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_out_of_range_port.json')
def test_put_negative_port(self):
""" test_put_negative_port"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_negative_port.json')
def test_put_float_port(self):
""" test_put_float_port"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_float_port.json')
def test_put_zero_port(self):
""" test_put_zero_port"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_zero_port.json')
def test_put_string_port(self):
""" test_put_string_port"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_string_port.json')
def test_put_float_environment(self):
""" test_put_float_environment"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_float_environment.json')
def test_put_string_environment(self):
""" test_put_string_environment"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_string_environment.json')
def test_put_zero_environment(self):
""" test_put_zero_environment"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_zero_environment.json')
def test_put_negative_environment(self):
""" test_put_negative_environment"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_negative_environment.json')
def test_put_integer_name_servicedownaction(self):
""" test_put_integer_name_servicedownaction"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_integer_name_servicedownaction.json')
def test_put_invalid_healthcheck_type(self):
""" test_put_invalid_healthcheck_type"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_invalid_healthcheck_type.json')
def test_put_invalid_destination(self):
""" test_put_invalid_destination"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_invalid_destination.json')
def test_put_negative_default_limit(self):
""" test_put_negative_default_limit"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_negative_default_limit.json')
def test_put_integer_lb_method(self):
""" test_put_integer_lb_method"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_integer_lb_method.json')
def test_put_string_id_servicedownaction(self):
""" test_put_string_id_servicedownaction"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_string_id_servicedownaction.json')
def test_put_zero_id_servicedownaction(self):
""" test_put_zero_id_servicedownaction"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_zero_id_servicedownaction.json')
def test_put_negative_id_servicedownaction(self):
""" test_put_negative_id_servicedownaction"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_negative_id_servicedownaction.json')
def test_post_valid_file(self):
""" test_post_valid_file"""
self.execute_some_post_verify_success(
'api_pools/tests/json/post/test_pool_post_valid_file.json')
def test_post_out_of_range_port(self):
""" test_post_out_of_range_port"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_out_of_range_port.json')
def test_post_negative_port(self):
""" test_post_negative_port"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_negative_port.json')
def test_post_float_port(self):
""" test_post_float_port"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_float_port.json')
def test_post_zero_port(self):
""" test_post_zero_port"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_zero_port.json')
def test_post_string_port(self):
""" test_post_string_port"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_string_port.json')
def test_post_float_environment(self):
""" test_post_float_environment"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_float_environment.json')
def test_post_string_environment(self):
""" test_post_string_environment"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_string_environment.json')
def test_post_zero_environment(self):
""" test_post_zero_environment"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_zero_environment.json')
def test_post_negative_environment(self):
""" test_post_negative_environment"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_negative_environment.json')
def test_post_integer_name_servicedownaction(self):
""" test_post_integer_name_servicedownaction"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_integer_name_servicedownaction.json')
def test_post_invalid_healthcheck_type(self):
""" test_post_invalid_healthcheck_type"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_invalid_healthcheck_type.json')
def test_post_invalid_destination(self):
""" test_post_invalid_destination"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_invalid_destination.json')
def test_post_negative_default_limit(self):
""" test_post_negative_default_limit"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_negative_default_limit.json')
def test_post_integer_lb_method(self):
""" test_post_integer_lb_method"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_integer_lb_method.json')
def test_post_string_id_servicedownaction(self):
""" test_post_string_id_servicedownaction"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_string_id_servicedownaction.json')
def test_post_zero_id_servicedownaction(self):
""" test_post_zero_id_servicedownaction"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_zero_id_servicedownaction.json')
def test_post_negative_id_servicedownaction(self):
""" test_post_negative_id_servicedownaction"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_negative_id_servicedownaction.json')
def test_valid_post_after_equals_valid_put(self):
""" test_valid_post_after_equals_valid_put"""
# try to get datas
response = self.client.get(
'/api/v3/pool/1/',
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
# test if data were not inserted
self.assertEqual(200, response.status_code,
'Status code should be 200 and was %s' % response.status_code)
response = self.client.put(
'/api/v3/pool/1/',
data=json.dumps(self.load_json_file(
'api_pools/tests/json/test_pool_put_and_post.json')),
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
self.assertEqual(200, response.status_code,
'Status code should be 200 and was %s' % response.status_code)
response = self.client.post(
'/api/v3/pool/',
data=json.dumps(self.load_json_file(
'api_pools/tests/json/test_pool_put_and_post.json')),
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
self.assertEqual(400, response.status_code,
'Status code should be 500 and was %s' % response.status_code)
| 2.265625 | 2 |
ethereum/experimental/__init__.py | IIIIllllIIIIllllIIIIllllIIIIllllIIIIll/pyethereum | 11 | 12773481 | <gh_stars>10-100
from ethereum.experimental import pruning_trie, refcount_db
| 1.171875 | 1 |
parser.py | DanielOjalvo/execview | 0 | 12773482 | #!/usr/bin/env python3
'''
designed for cronjobs where /build and /sources are mount on the system.
They need to be mounted under the same parent directory.
e.g. /mnt/seadev/build for /build
/mnt/seadev/sources for /sources
'''
import psycopg2
import argparse
import re
import os
import sys
import inspect
import traceback
sys.path.append(os.path.dirname( os.path.abspath( inspect.getfile( inspect.currentframe() ) ) ) + '/dynamic/')
sys.path.append(os.path.dirname( os.path.abspath( inspect.getfile( inspect.currentframe() ) ) ) + '/static/')
sys.path.append(os.path.dirname( os.path.abspath( inspect.getfile( inspect.currentframe() ) ) ) + '/library/')
import dynamic_parser
import static_parser
import dependency_resolver
from library import *
def get_fullversion_str(dynamic_path, version):
#import pdb; pdb.set_trace()
# bldnum file approach
fullver_str = ""
try:
with open(dynamic_path + "/bldnum", "r") as f:
# read the raw string form bldnum (e.g., 3754.0)
buildnum_raw = f.readline().strip()
# match with regex (just in case there are other unwanted info appended)
bn_regex = r"(\d+).0"
bn_regex_match = re.search(bn_regex, buildnum_raw)
# form the build number. (e.g., 3754.0 => 0.0.3754)
build_bldnum = "0.0." + bn_regex_match.group(1)
print("DHNO: Reached here when getting re")
# version: hotfix processing
if "-hf" in version:
version = version.split("-hf", 1)[0]
# ISO filename extract approach
for f in os.listdir(dynamic_path):
# find the ISO
#print("DHNO: listing directory")
#print("DHNO: f is", f)
if os.path.splitext(f)[-1].lower() == ".iso":
# define the regex to match version + buildnum
fn_regex = r"(" + version + r"[-.](.+?)).iso"
fn_regex_match = re.search(fn_regex, f)
print(fn_regex_match.groups())
fullver_str = fn_regex_match.group(1)
# build number from ISO
build_isoname = fn_regex_match.group(2)
# complain on mismatch, proceed with build numbers in ISO filename
if build_isoname != build_bldnum:
utility.terminal_msg(1, "Either the build number in bldnum is incorrect or the build serializing is not following the standard fashion. " + \
"\n\t bldnum: {0} ; ISO filename: {1}".format(buildnum_raw, build_isoname))
except FileNotFoundError as e:
print("Error in finding dynamic path bldnum.")
print(e.strerror)
fullver_str = ""
return fullver_str
def version_exist(prod, vers):
# establish connection
conn = psycopg2.connect(utility.get_conn_str())
# init variable
res = (0,)
# with connect enables auto-commit. (otherwise do conn.commit() manually)
with conn:
with conn.cursor() as cur:
sql = "SELECT count(*) FROM versions JOIN products ON versions.prod_id = products.prod_id WHERE products.product = '{}' AND versions.version = '{}';".format(prod, vers + "%")
cur.execute(sql)
# print real query generated by psycopg2
print(cur.query)
try:
res = cur.fetchone()
except psycopg2.ProgrammingError:
res = (0,) # Error
# close connection
conn.close()
try:
if res[0] > 0:
utility.terminal_msg(2, "Product %s %s is already parsed into database.".format(prod, vers))
return True
# when certain version not found in database
else:
return False
except Exception as e:
utility.terminal_msg(0, "Error occurred during querying product %s %s with error message: %s".format(prod, vers, e))
def iterate_seadev(args):
# add/edit paths here for new products supported or on change of seadev directory structure
# retrieve mount point
mnt_path = args.seadev
# Only add this many isos to the database
iso_load_limit = args.limit_upload
isos_uploaded = 0
# check if the mount point exists or not
# add os.path.ismount(mnt_path) check when PD figures out a way to mount / (currently impossible with CIFS as well due to different access policies imposed to different subdirs)
if os.path.isdir(mnt_path):
mnt_path = utility.dir_formatting(mnt_path)
else:
utility.terminal_msg(0, "Mount point not found or not a valid one.")
# iterate over all products specified in path_dict
for prod in path_dict:
dynamic_base_path = mnt_path + "/build/" + path_dict.get(prod)
dynamic_trailing_path = "dist/release"
# check if path provided can be found in file system
if os.path.exists(dynamic_base_path) and os.path.isdir(dynamic_base_path):
# iterate over all versions
for ver_dir in os.listdir(dynamic_base_path):
# form the complete path for dynamic parsing
dynamic_path = os.path.join(dynamic_base_path, ver_dir, dynamic_trailing_path)
# check if build path exist
if os.path.exists(dynamic_path) and os.path.isdir(dynamic_path):
static_base_path = mnt_path + "/sources/" + path_dict.get(prod)
static_trailing_path = "release/logs"
# form the complete path for static parsing
static_path = os.path.join(static_base_path, ver_dir, static_trailing_path)
# set product
args.product_name = prod
# remove the "v" from the directory name and set version. (e.g., v13.1.1 => version number 13.1.1)
#args.version_number = get_fullversion_str(dynamic_path, ver_dir[1:])
args.version_number = ver_dir[1:]
# if no version number found (due to no iso found), skip
if not args.version_number:
continue
try:
# check if sources path also exist and whether the version is seen in DB
if os.path.exists(static_path) and os.path.isdir(static_path) and not version_exist(args.product_name,
args.version_number):
if (iso_load_limit and (isos_uploaded >= iso_load_limit)):
#We've uploaded enough isos for now. Time to break out
break
isos_uploaded += 1
# log message
utility.terminal_msg(2, "Processing {} {} from seadev path.".format(args.product_name, args.version_number))
# run parser
args = dynamic_parser.wrapper(args, 2)
static_parser.wrapper(args, 2)
dependency_resolver.resolve_deps(args.product_name, args.version_number)
except Exception as e:
with open("~/parser_exception_log", "a") as f:
f.write("************FOUND EXCEPTION**********\n")
f.write("Error parsing version: %s\n" % args.version_number)
f.write("Static path calculated %s\n" % os.fsdecode(args.static_path))
f.write("Dynamic path %s\n" % os.fsdecode(dynamic_path))
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback, file=sys.stdout)
traceback.print_exception(exc_type, exc_value, exc_traceback, file=f)
continue
terminal_msg(2, "Completed uploading {} isos to {} at {}.".format(args.isos_uploaded, utility.get_database_name(), utility.get_database_url()))
if __name__ == "__main__":
p = argparse.ArgumentParser()
# used
p.add_argument("-pc", "--processes", metavar="<amount>", type=int, default=5,
help = "The number of processes to spawn that can be utilized to examine rpm files. " + \
"(default 10, suggested threshold x where x <= how many GBs of RAM available)")
p.add_argument("-o", "--output-directory", metavar = "<path>", type = str, default = "./output/",
help = "A directory to place the output. <cwd>/output is created if not specified.")
p.add_argument("-sd", "--seadev", metavar = "<mount path>", type = str, required = True,
help = "The mount point if running on or mounted with the seadev machine or any UNIX box that shares the same source code/ISO directory structure as seadev.")
p.add_argument("-c", "--clean-output-directory", action = "store_true",
help = "Cleanup the output directory before writing to it.")
p.add_argument("-w", "--wipe-program-output", action = "store_true",
help = "Remove the output directories and files after the whole process finishes.")
p.add_argument("-l", "--limit-upload", metavar="<limit>", type=int, default=0,
help = "Only upload <limit> new isos to the database.")
# not used, but necessary for compatibility with dynamic/static parsers
p.add_argument("-i", "--iso", type=str,
help = "[*] Please do not assign any value. This option will not take affect in this script.")
p.add_argument("-d", "--directory", type=str,
help = "[*] Please do not assign any value. This option will not take affect in this script.")
p.add_argument("-f", "--file", type = str,
help = "[*] Please do not assign any value. This option will not take affect in this script.")
p.add_argument("-p", "--product-name", type = str,
help = "[*] Please do not assign any value. This option will not take affect in this script.")
p.add_argument("-v", "--version-number", type = str,
help = "[*] Please do not assign any value. This option will not take affect in this script.")
args = p.parse_args()
args.isos_uploaded = 0
iterate_seadev(args)
| 2.046875 | 2 |
pi/emo_reco/helpers/utils/captchahelper.py | danielbrenners/buzz-lightyear | 17 | 12773483 | <reponame>danielbrenners/buzz-lightyear
# import the necessary packages
import imutils
import cv2
def preprocess(image, width, height):
# grab the dimensions of the image, then initialize
# the padding values
(h, w) = image.shape[:2]
# if the width is greater than the height then resize along
# the width
if w > h:
image = imutils.resize(image, width=width)
# otherwise, the height is greater than the width so resize
# along the height
else:
image = imutils.resize(image, height=height)
# determine the padding values for the width and height to
# obtain the target dimensions
padW = int((width - image.shape[1]) / 2.0)
padH = int((height - image.shape[0]) / 2.0)
# pad the image then apply one more resizing to handle any
# rounding issues
image = cv2.copyMakeBorder(image, padH, padH, padW, padW,
cv2.BORDER_REPLICATE)
image = cv2.resize(image, (width, height))
# return the pre-processed image
return image | 3 | 3 |
dnnv/properties/transformers/remove_ifthenelse.py | samysweb/dnnv | 5 | 12773484 | <filename>dnnv/properties/transformers/remove_ifthenelse.py<gh_stars>1-10
from __future__ import annotations
from ..expressions import And, IfThenElse, Implies, Not, Or
from .base import GenericExpressionTransformer
class RemoveIfThenElse(GenericExpressionTransformer):
def __init__(self, form="dnf"):
super().__init__()
# `form` provides a hint on how to efficiently format the IfThenElse replacement expression
self.form = form
def visit_IfThenElse(self, expression: IfThenElse) -> And:
condition = self.visit(expression.condition)
t_expr = self.visit(expression.t_expr)
f_expr = self.visit(expression.f_expr)
if self.form == "dnf":
return Or(And(condition, t_expr), And(Not(condition), f_expr))
return And(Implies(condition, t_expr), Implies(Not(condition), f_expr))
def visit_Not(self, expression):
form = self.form
self.form = "cnf" if form == "dnf" else "dnf"
result = super().generic_visit(expression)
self.form = form
return result
__all__ = ["RemoveIfThenElse"]
| 2.328125 | 2 |
models/student.py | EugeneJoe/Peri_Planner | 0 | 12773485 | <reponame>EugeneJoe/Peri_Planner
#!/usr/bin/python3
"""This module defines a Student Class"""
import sqlalchemy
from sqlalchemy import Column, String, ForeignKey
from sqlalchemy.orm import relationship, backref
from os import getenv
import models
from models.base_model import BaseModel, Base
from models.log import LessonLog
class Student(BaseModel, Base):
"""
This class defines a student through various attributes
Attributes:
first_name (str): student's first name
last_name (str): student's last name
activity (str): their instrument, or sport/game they play, or language
user_id (str): associated user's user id
lesson_logs (list): a list of lesson log ids for lesson logs for
for this student
"""
if models.storage_t == 'db':
__tablename__ = 'students'
first_name = Column(String(128), nullable=False)
last_name = Column(String(128), nullable=True)
activity = Column(String(128), nullable=True)
user_id = Column(String(60), ForeignKey('users.id'), nullable=False)
lesson_logs = relationship("LessonLog", backref="student")
user = relationship("User", backref="students")
else:
first_name = ""
last_name = ""
activity = ""
user_id = ""
lesson_logs = []
def __init__(self, *args, **kwargs):
"""Initialize a new student"""
super().__init__(*args, **kwargs)
if self.first_name and self.last_name:
self.fullname = self.first_name + " " + self.last_name
if models.storage_t != "db":
@property
def lesson_logs(self):
"""Getter for list of lesson logs for the student"""
all_lessons = models.storage.all(LessonLog)
for lesson in all_lessons.values():
if lesson.student_id == self.id:
self.lesson_logs.append(lesson)
return self.lesson_logs
| 3.203125 | 3 |
isobmff/sinf.py | kentoku24/isobmff | 6 | 12773486 | <filename>isobmff/sinf.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from .box import Box
from .box import FullBox
from .box import Quantity
from .box import read_int
from .box import read_string
class ProtectionSchemeInfoBox(Box):
box_type = 'sinf'
is_mandatory = False
quantity = Quantity.ONE_OR_MORE
class OriginalFormatBox(Box):
box_type = 'frma'
is_mandatory = True
quantity = Quantity.EXACTLY_ONE
def __init__(self, size):
super().__init__(size=size)
self.data_format = None
def read(self, file):
self.data_format = read_int(file, 4)
class SchemeTypeBox(FullBox):
box_type = 'schm'
is_mandatory = False
quantity = Quantity.ZERO_OR_ONE
def __init__(self, size):
super().__init__(size=size)
self.scheme_type = None
self.scheme_version = None
self.scheme_uri = None
def read(self, file):
self.scheme_type = read_int(file, 4)
self.scheme_version = read_int(file, 4)
if self.flags & 0b1:
self.scheme_uri = read_string(file)
class SchemeInformationBox(Box):
box_type = 'schi'
is_mandatory = False
quantity = Quantity.ZERO_OR_ONE
| 2.421875 | 2 |
oi/Contest/self/2017-1/network/gen.py | Riteme/test | 3 | 12773487 | <gh_stars>1-10
#!/usr/bin/env pypy
import sys
from random import *
if len(sys.argv) < 3:
print("Usage: %s [N] [Q]" % sys.argv[0])
exit(-1)
N = int(sys.argv[1])
Q = int(sys.argv[2])
print("%s %s" % (N, Q))
for i in range(0, Q):
u = randint(1, N)
v = randint(1, N)
t = randint(1, Q - i)
print("%s %s %s" % (u, v, t))
| 2.90625 | 3 |
marketplaces/apps/market_community/urls.py | diassor/CollectorCity-Market-Place | 135 | 12773488 | <gh_stars>100-1000
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url('^$', 'market_community.views.overview', name='market_community'),
url(r'^overview/$', 'market_community.views.overview', name='community_overview'),
url(r'^forums/$', 'market_community.views.forums', name='community_forums'),
url(r'^blogs/$', 'market_community.views.blogs', name='community_blogs'),
url(r'^faq/$', 'market_community.views.faq', name='community_faq'),
url(r'^profiles/$', 'market_community.views.profiles', name='community_profiles'),
url(r'^profiles/(?P<letter>[\w]+)/$', 'market_community.views.profiles_list', name='community_profiles_list'),
)
| 1.53125 | 2 |
src/main/resources/templates/c/gcc/test/testUtils/Utils.py | TheZoker/Artemis | 58 | 12773489 | import os
import select
import signal
from datetime import datetime
from io import TextIOWrapper
from pty import openpty
from pwd import getpwnam, struct_passwd
from subprocess import Popen
from termios import ONLCR, tcgetattr, TCSANOW, tcsetattr
from threading import Thread
from time import sleep
from typing import Any, Dict, List, Optional
def studSaveStrComp(ref: str, other: str, strip: bool = True, ignoreCase: bool = True, ignoreNonAlNum=True):
"""
Student save compare between strings.
Converts both to lower, strips them and removes all non alphanumeric chars
before comparison.
"""
# Strip:
if strip:
ref = ref.strip()
other = other.strip()
# Convert to lower
if ignoreCase:
ref = ref.lower()
other = other.lower()
# Remove all non alphanumeric chars:
if ignoreNonAlNum:
ref = "".join(c for c in ref if c.isalnum())
other = "".join(c for c in other if c.isalnum())
# print("Ref: {}\nOther:{}".format(ref, other))
return ref == other
def recursive_chmod(path: str, mode: int):
"""
Recursively changes file permissions.
"""
os.chmod(path, mode)
# print("CHMOD: {}".format(path))
f: str
for f in os.listdir(path):
f = os.path.join(path, f)
if os.path.isdir(f):
recursive_chmod(f, mode)
else:
os.chmod(f, mode)
# print("CHMOD: {}".format(f))
# Limit for stdout in chars.
# Should prevent to much output on artemis if for example there is a loop in a tree.
# By default the stdout limit is disabled:
__stdoutLimitEnabled: bool = False
def resetStdoutLimit(limit: int = 15000):
"""
Resets the stout limit to the given limit (default = 15.000 chars).
"""
global stdoutCharsLeft # Required since we want to modify stdoutCharsLeft
stdoutCharsLeft = limit
def setStdoutLimitEnabled(enabled: bool):
"""
Enables or disables the stdout limit.
Does not restet the chars left!
"""
global __stdoutLimitEnabled
__stdoutLimitEnabled = enabled
def __printStdout(text: str):
"""
Prints the given text to stdout.
Only if there are still enough chars in stdoutCharsLeft left.
Else will not print anything.
"""
global stdoutCharsLeft # Required since we want to modify stdoutCharsLeft
if not __stdoutLimitEnabled:
print(text)
elif stdoutCharsLeft > 0:
if stdoutCharsLeft >= len(text):
print(text)
else:
print(text[:stdoutCharsLeft] + "...")
stdoutCharsLeft -= len(text)
if stdoutCharsLeft <= 0:
print("[STDOUT LIMIT REACHED]".center(50, "="))
# A cache of all that the tester has been writing to stdout:
testerOutputCache: List[str] = list()
def clearTesterOutputCache():
"""
Clears the testerOutputCache.
"""
testerOutputCache.clear()
def getTesterOutput():
"""
Returns the complete tester output as a single string.
"""
return "\n".join(testerOutputCache)
startTime: datetime = datetime.now()
def __getCurSeconds():
"""
Returns the total seconds passed, since the tester started as a string with a precision of two digits.
"""
seconds: float = (datetime.now() - startTime).total_seconds()
return str(round(seconds, 2))
def __getCurDateTimeStr():
"""
Returns the current date and time string (e.g. 11.10.2019_17:02:33)
"""
return datetime.now().strftime("%d.%m.%Y_%H:%M:%S")
def printTester(text: str, addToCache: bool = True):
"""
Prints the given string with the '[T]: ' tag in front.
Should be used instead of print() to make it easier for students
to determine what came from the tester and what from their program.
"""
msg: str = f"[{__getCurSeconds()}][T]: {text}"
__printStdout(msg)
if addToCache:
testerOutputCache.append(msg)
def printProg(text: str, addToCache: bool = True):
"""
Prints the given string with the '[P]: ' tag in front.
Should be used instead of print() to make it easier for students
to determine what came from the tester and what from their program.
"""
msg: str = f"[{__getCurSeconds()}][P]: {text.rstrip()}"
__printStdout(msg)
if addToCache:
testerOutputCache.append(msg)
def shortenText(text: str, maxNumChars: int):
"""
Shortens the given text to a maximum number of chars.
If there are more chars than specified in maxNumChars,
it will append: "\n[And {} chars more...]".
"""
if len(text) > maxNumChars:
s: str = f"\n[And {len(text) - maxNumChars} chars more...]"
l: int = maxNumChars - len(s)
if l > 0:
return f"{text[:l]}{s}"
else:
printTester(f"Unable to limit output to {maxNumChars} chars! Not enough space.", False)
return ""
return text
class ReadCache(Thread):
"""
Helper class that makes sure we only get one line (separated by '\n')
if we read multiple lines at once.
"""
__cacheList: List[str]
__cacheFile: TextIOWrapper
__outFd: int
__outSlaveFd: int
def __init__(self, filePath: str):
Thread.__init__(self)
self.__cacheList = []
self.__cacheFile = open(filePath, "w")
# Emulate a terminal:
self.__outFd, self.__outSlaveFd = openpty()
self.start()
def fileno(self):
return self.__outFd
def join(self, timeout: float = None):
try:
os.close(self.__outFd)
except OSError as e:
printTester(f"Closing stdout FD failed with: {e}")
try:
os.close(self.__outSlaveFd)
except OSError as e:
printTester(f"Closing stdout slave FD failed with: {e}")
Thread.join(self, timeout)
@staticmethod
def __isFdValid(fd: int):
try:
os.stat(fd)
except OSError:
return False
return True
@staticmethod
def __decode(data: bytes):
"""
Tries to decode the given string as UTF8.
In case this fails, it will fall back to ASCII encoding.
Returns the decoded result.
---
data: bytes
The data that should be decoded.
"""
try:
return data.decode("utf8", "replace")
except UnicodeDecodeError as e:
printTester(f"Failed to decode line as utf8. Using ascii ecoding - {e}")
return data.decode("ascii", "replace")
def run(self):
pollObj = select.poll()
pollObj.register(self.__outSlaveFd, select.POLLIN)
while self.__isFdValid(self.__outSlaveFd):
try:
for fd, mask in pollObj.poll(100):
if fd != self.__outSlaveFd:
continue
if mask & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
return
if mask & select.POLLIN:
data: bytes = os.read(self.__outSlaveFd, 4096)
dataStr: str = self.__decode(data)
try:
self.__cacheFile.write(dataStr)
except UnicodeEncodeError:
printTester("Invalid ASCII character read. Skipping line...")
continue
self.__cacheFile.flush()
self.__cache(dataStr)
printProg(dataStr)
except OSError:
break
def canReadLine(self):
return len(self.__cacheList) > 0
def __cache(self, data: str):
self.__cacheList.extend(data.splitlines(True))
def readLine(self):
if self.canReadLine():
return self.__cacheList.pop(0)
return ""
class PWrap:
"""
A wrapper for "Popen".
"""
cmd: List[str]
prog: Optional[Popen]
cwd: str
__stdinFd: int
__stdinMasterFd: int
__stdOutLineCache: ReadCache
__stdErrLineCache: ReadCache
__terminatedTime: Optional[datetime]
def __init__(self, cmd: List[str], stdoutFilePath: str = "/tmp/stdout.txt", stderrFilePath: str = "/tmp/stderr.txt", cwd: Optional[str] = None):
self.cmd = cmd
self.prog = None
self.cwd: str = os.getcwd() if cwd is None else cwd
self.stdout = open(stdoutFilePath, "wb")
self.stderr = open(stderrFilePath, "wb")
self.__stdOutLineCache = ReadCache(stdoutFilePath)
self.__stdErrLineCache = ReadCache(stderrFilePath)
self.__terminatedTime = None
def __del__(self):
try:
os.close(self.__stdinFd)
except OSError as e:
printTester(f"Closing stdin FD failed with: {e}")
except AttributeError:
pass
try:
os.close(self.__stdinMasterFd)
except OSError as e:
printTester(f"Closing stdin master FD failed with: {e}")
except AttributeError:
pass
def start(self, userName: Optional[str] = None):
"""
Starts the process and sets all file descriptors to nonblocking.
---
userName: Optional[str] = None
In case the userName is not None, the process will be executed as the given userName.
This requires root privileges and you have to ensure the user has the required rights to access all resources (files).
"""
# Emulate a terminal for stdin:
self.__stdinMasterFd, self.__stdinFd = openpty()
# Transform "\r\n" to '\n' for data send to stdin:
tsettings: List[Any] = tcgetattr(self.__stdinFd)
tsettings[1] &= ~ONLCR
tcsetattr(self.__stdinFd, TCSANOW, tsettings)
if userName is not None:
# Check for root privileges:
self.__checkForRootPrivileges()
# Prepare environment:
pwRecord: struct_passwd = getpwnam(userName)
env: Dict[str, str] = os.environ.copy()
env["HOME"] = pwRecord.pw_dir
env["LOGNAME"] = pwRecord.pw_name
env["USER"] = pwRecord.pw_name
env["PWD"] = self.cwd
printTester(f"Starting process as: {pwRecord.pw_name}")
# Start the actual process:
self.prog = Popen(
self.cmd,
stdout=self.__stdOutLineCache.fileno(),
stdin=self.__stdinMasterFd,
stderr=self.__stdErrLineCache.fileno(),
universal_newlines=True,
cwd=self.cwd,
env=env,
preexec_fn=self.__demote(pwRecord.pw_uid, pwRecord.pw_gid, pwRecord.pw_name),
)
else:
# Start the actual process:
self.prog = Popen(
self.cmd,
stdout=self.__stdOutLineCache.fileno(),
stdin=self.__stdinMasterFd,
stderr=self.__stdErrLineCache.fileno(),
universal_newlines=True,
cwd=self.cwd,
preexec_fn=os.setsid,
) # Make sure we store the process group id
def __demote(self, userUid: int, userGid: int, userName: str):
"""
Returns a call, demoting the calling process to the given user, UID and GID.
"""
def result():
# self.__printIds("Starting demotion...") # Will print inside the new process and reports via the __stdOutLineCache
os.initgroups(userName, userGid)
os.setuid(userUid)
# self.__printIds("Finished demotion.") # Will print inside the new process and reports via the __stdOutLineCache
return result
@staticmethod
def __checkForRootPrivileges():
"""
Checks if the current process has root permissions.
Fails if not.
"""
if os.geteuid() != 0:
raise PermissionError("The tester has to be executed as root to be able to switch users!")
def __printIds(self, msg: str):
printTester(f"uid, gid = {os.getuid()}, {os.getgid()}; {msg}")
def __readLine(self, lineCache: ReadCache, blocking: bool):
"""
Reads a single line from the given ReadCache and returns it.
---
blocking:
When set to True will only return if the process terminated or we read a non empty string.
"""
while blocking:
if not lineCache.canReadLine():
if not self.hasTerminated():
sleep(0.1)
else:
break
else:
line: str = lineCache.readLine()
return line
return ""
def readLineStdout(self, blocking: bool = True):
"""
Reads a single line from the processes stdout and returns it.
---
blocking:
When set to True will only return if the process terminated or we read a non empty string.
"""
return self.__readLine(self.__stdOutLineCache, blocking)
def canReadLineStdout(self):
"""
Returns whether there is a line from the processes stdout that can be read.
"""
return self.__stdOutLineCache.canReadLine()
def readLineStderr(self, blocking: bool = True):
"""
Reads a single line from the processes stderr and returns it.
---
blocking:
When set to True will only return if the process terminated or we read a non empty string.
"""
return self.__readLine(self.__stdErrLineCache, blocking)
def canReadLineStderr(self):
"""
Returns whether there is a line from the processes stderr that can be read.
"""
return self.__stdErrLineCache.canReadLine()
def writeStdin(self, data: str):
"""
Writes the given data string to the processes stdin.
"""
os.write(self.__stdinFd, data.encode())
printTester(f"Wrote: {data}")
def hasTerminated(self):
"""
Returns whether the process has terminated.
"""
if self.prog is None:
return True
# Make sure we wait 1.0 seconds after the process has terminated to
# make sure all the output arrived:
elif self.prog.poll() is not None:
if self.__terminatedTime:
if (datetime.now() - self.__terminatedTime).total_seconds() > 1.0:
return True
else:
self.__terminatedTime = datetime.now()
return False
def getReturnCode(self):
"""
Returns the returncode of the terminated process else None.
"""
return self.prog.returncode
def waitUntilTerminationReading(self, secs: float = -1):
"""
Waits until termination of the process and tries to read until either
the process terminated or the timeout occurred.
Returns True if the process terminated before the timeout occurred,
else False.
---
secs:
The timeout in seconds. Values < 0 result in infinity.
"""
start: datetime = datetime.now()
while True:
if self.hasTerminated():
return True
elif 0 <= secs <= (datetime.now() - start).total_seconds():
return False
self.readLineStdout(False)
sleep(0.1)
def kill(self, signal: int = signal.SIGKILL):
"""
Sends the given signal to the complete process group started by the process.
Returns True if the process existed and had to be killed. Else False.
---
signal:
The signal that should be sent to the process group started by the process.
"""
# Send a signal to the complete process group:
try:
os.killpg(os.getpgid(self.prog.pid), signal)
return True
except ProcessLookupError:
printTester("No need to kill process. Process does not exist any more.")
return False
def cleanup(self):
"""
Should be called once the execution has terminated.
Will join the stdout and stderr reader threads.
"""
self.__stdOutLineCache.join()
self.__stdErrLineCache.join()
def getPID(self):
return self.prog.pid
| 2.28125 | 2 |
2.Insertion Sort, Merge Sort/sort.py | zuhaalfaraj/Introduction-to-Algorithms | 1 | 12773490 | <gh_stars>1-10
def insertionSort(lst):
lst = [-np.inf]+ lst
for i in range(2,len(lst)):
t = lst[i]
k= i-1
while t<lst[k]:
print(k)
lst[k+1]= lst[k]
k-=1
lst[k+1]=t
lst = lst[1:]
return lst
def mergeSort(arr):
def merege(L,R):
i = j = k = 0
while i< len(L) and j < len(R):
if L[i] < R[j]:
arr[k]= L[i]
i+=1
else:
arr[k]= R[j]
j+=1
k+=1
return arr
if len(arr) > 1:
mid = len(arr) // 2
L = arr[:mid]
R = arr[mid:]
mergeSort(L)
mergeSort(R)
arr = merege(L,R)
return arr
if __name__=='__main__':
import time
import numpy as np
lst= [4,2,1,5,3,10,2,33,100,23,3,44,3,2323,12,45,23,55,234]
#lst= np.asarray(lst)
#lst = np.random.rand(700)
#lst= list(lst)
t1= time.time()
y= insertionSort(lst)
t2 = time.time()
x= mergeSort(lst)
t3 = time.time()
tim1= t3-t2
tim2= t2-t1
#print(x)
print(tim1)
print(tim2)
print(tim2-tim1)
print([3]+[2,3,4])
| 3.375 | 3 |
nicos/devices/datasinks/file.py | ebadkamil/nicos | 12 | 12773491 | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
#
# *****************************************************************************
"""Base file data sink class for NICOS."""
from nicos.core.data import DataSink
from nicos.core.params import Param, intrange, listof, none_or, subdir
TEMPLATE_DESC = '''Templates must contain percent-style placeholders
(e.g. ``%(proposal)s_%(pointcounter)08d``) with the following keys:
* counters:
- ``(type)counter`` for globally unique counters
- ``(type)propcounter`` for unique counters within a proposal
- ``(type)samplecounter`` for unique counters within a sample directory \
(for many instruments, there is no separate sample directory, so this \
counter is the same as the propcounter)
- ``(type)number`` for the dataset's number within its parent
``type`` is the dataset type, e.g. ``point`` or ``scan``.
* proposal info from the experiment (e.g. ``proposal`` for the prop. number)
* all devices and parameters (e.g. ``dev1`` for the value of dev1 and
``dev1.param`` for a parameter)
'''
class FileSink(DataSink):
"""Base class for sinks that save data into files."""
parameters = {
'subdir': Param('Filetype specific subdirectory name',
type=subdir, mandatory=False, default=''),
'filenametemplate': Param('List of templates for data file names '
'(will be hardlinked), can contain '
'subdirectories',
ext_desc=TEMPLATE_DESC, type=listof(str),
default=['%(pointcounter)08d.dat'],
settable=False, prefercache=False),
'filemode': Param('File access rights after closing the file, '
"if set to 'none' (default) the OS defaults "
'will be used',
type=none_or(intrange(0o000, 0o777),)),
}
| 1.640625 | 2 |
Model Codes/RetrieveEdit/github_eval.py | lvyiwei1/StylePTB | 36 | 12773492 | # -*- coding: utf-8 -*-
import paths
import os
#os.environ['COPY_EDIT_DATA'] = paths.data_dir
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import sys
def set_output_encoding(encoding='utf-8'):
import sys
import codecs
'''When piping to the terminal, python knows the encoding needed, and
sets it automatically. But when piping to another program (for example,
| less), python can not check the output encoding. In that case, it
is None. What I am doing here is to catch this situation for both
stdout and stderr and force the encoding'''
current = sys.stdout.encoding
if current is None :
sys.stdout = codecs.getwriter(encoding)(sys.stdout)
current = sys.stderr.encoding
if current is None :
sys.stderr = codecs.getwriter(encoding)(sys.stderr)
#Note - we need this or else the program crashes due to a utf-8 error when trying to pipe the outputs to a text file.
#set_output_encoding()
from gtd.utils import Config
from editor_code.copy_editor.edit_training_run import EditTrainingRun
from editor_code.copy_editor.context_vae_training_run import ContextVAETrainingRun
from editor_code.copy_editor.editor import EditExample
from editor_code.copy_editor.vocab import HardCopyDynamicVocab
from gtd.utils import bleu
print os.environ['COPY_EDIT_DATA']
# no-profile
profile = False
config = Config.from_file('editor_code/configs/editor/github.txt')
src_dir_noret = os.environ['COPY_EDIT_DATA']+'/edit_runs/0' #for codalab
src_dir_vae = os.environ['COPY_EDIT_DATA']+'/edit_runs/1' #for codalab
load_expt_noret = EditTrainingRun(config, src_dir_noret)
load_expt_vae = ContextVAETrainingRun(config, src_dir_vae)
import numpy as np
edit_model_noret = load_expt_noret.editor
examples = load_expt_noret._examples
from gtd.utils import chunks
from tqdm import tqdm
eval_num = 500
beam_list_noret, edit_traces_noret = edit_model_noret.edit(examples.test[0:eval_num])
import numpy as np
def eval_batch_noret(ex):
edit_model_noret.copy_index=0
editor_input = edit_model_noret.preprocess(ex)
train_decoder = edit_model_noret.train_decoder
encoder_output, enc_loss = edit_model_noret.encoder(editor_input.encoder_input)
vocab_probs = edit_model_noret.train_decoder.vocab_probs(encoder_output, editor_input.train_decoder_input)
token_list = editor_input.train_decoder_input.target_words.split()
base_vocab = edit_model_noret.base_vocab
unk_idx = base_vocab.word2index(base_vocab.UNK)
all_ranks_noret = [ [] for _ in range(len(ex))]
position = 0
for token, vout in zip(token_list, vocab_probs):
target_idx = token.values.data.cpu().numpy()
target_mask = token.mask.data.cpu().numpy()
in_vocab_id = target_idx[:,0]
copy_token_id = target_idx[:, 1]
vocab_matrix = vout.data.cpu().numpy()
for i in range(len(in_vocab_id)):
voc_vec = vocab_matrix[i,:].copy()
voc_vec_rest = voc_vec.copy()
voc_vec_rest[copy_token_id[i]]=0
voc_vec_rest[in_vocab_id[i]] = 0
if in_vocab_id[i] == unk_idx:
gold_rank = np.sum(voc_vec_rest >= voc_vec[copy_token_id[i]])
else:
gold_rank = np.sum(voc_vec_rest >= voc_vec[copy_token_id[i]] + voc_vec[in_vocab_id[i]])
if target_mask[i] == 1.0:
all_ranks_noret[i].append(gold_rank)
position+=1
del token_list
del vocab_probs
return all_ranks_noret
all_ranks_noret = []
for chunk in tqdm(chunks(examples.test[0:eval_num],16), total=eval_num/16):
all_ranks_noret.extend(eval_batch_noret(chunk))
###
# base retriever.
import gtd.retrieval_func as rf
lsh, dict = rf.make_hash(examples.train)
output_index = rf.grab_nbs(examples.test[0:eval_num], lsh, dict)
ret_pred = rf.generate_predictions(examples.train, output_index)
def agree_vec(ref, targ):
rank_vec = []
for i in range(max(len(ref),len(targ))):
if i < len(targ) and i < len(ref):
agree_ind = ref[i] == targ[i]
rank_vec.append((1.0-agree_ind)*100.0)
else:
rank_vec.append(100.0)
return rank_vec
all_ranks_ret_fixed = []
for i in range(eval_num):
all_ranks_ret_fixed.append(agree_vec(examples.test[i].target_words, ret_pred[i]))
###
# vae retriever
new_vecs_joint, new_vecs_ctx = load_expt_vae.editor.get_vectors(examples.train)
held_vecs_joint, held_vecs_ctx = load_expt_vae.editor.get_vectors(examples.test[0:eval_num])
lshout = rf.make_hash_from_vec(new_vecs_ctx)
output_index = rf.grab_nbs_from_vec(held_vecs_ctx, lshout)
ret_pred_ctxvae = rf.generate_predictions(examples.train, output_index)
all_ranks_ret_vae = []
for i in range(eval_num):
all_ranks_ret_vae.append(agree_vec(examples.test[i].target_words, ret_pred_ctxvae[i]))
####
# eval code
import itertools
def rle(inarray):
""" run length encoding. Partial credit to R rle function.
Multi datatype arrays catered for including non Numpy
returns: tuple (runlengths, startpositions, values) """
ia = np.asarray(inarray) # force numpy
n = len(ia)
if n == 0:
return (None, None, None)
else:
y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)
i = np.append(np.where(y), n - 1) # must include last element posi
z = np.diff(np.append(-1, i)) # run lengths
p = np.cumsum(np.append(0, z))[:-1] # positions
return (z, p, ia[i])
def avg_runlen(rankin, cut):
tmp = []
for rank in rankin:
if sum(np.array(rank) <= cut) > 0:
rlevals = rle(np.array(rank) <= cut)
match_pr = rlevals[0][rlevals[2]] / float(np.sum(rlevals[0])) #probability of picking each run
expect_dist = (rlevals[0][rlevals[2]]+1.0)/2.0 #expected run length over each run (if we sample uniformly)
elen = np.sum(np.array(expect_dist)*np.array(match_pr))
tmp.append(elen)
else:
tmp.append(0)
return tmp
def correct_runlen(rankin, cut):
tmp = []
for rank in rankin:
rlevals = rle(np.array(rank) <= cut)
if np.sum(rlevals[2])>0:
tmp.append(np.max(rlevals[0][rlevals[2]]))
#if rlevals[2][0]:
# tmp.append(rlevals[0][0])
else:
tmp.append(0)
return tmp
eval_fns = [lambda x: np.mean(avg_runlen(x, 1)), lambda x: np.mean(avg_runlen(x, 5)), lambda x: np.mean(avg_runlen(x, 10)),
lambda x: np.mean(correct_runlen(x, 1)), lambda x: np.mean(correct_runlen(x, 5)), lambda x: np.mean(correct_runlen(x, 10))]
methods = [all_ranks_noret, all_ranks_ret_fixed, all_ranks_ret_vae]
all_eval = [[fn(method) for method in methods] for fn in eval_fns]
print 'method table: s2s, ret_fixed, ret_context_only'
print np.array(all_eval)
import regex as re
def tokenize_for_bleu_eval(code):
code = re.sub(r'([^A-Za-z0-9_])', r' \1 ', code)
code = re.sub(r'([a-z])([A-Z])', r'\1 \2', code)
code = re.sub(r'\s+', ' ', code)
code = code.replace('"', '`')
code = code.replace('\'', '`')
tokens = [t for t in code.split(' ') if t]
return tokens
import editdistance
gen2_out = []
ret_fix_out = []
ret_vae_out = []
for i in range(len(edit_traces_noret)):
ret_fix_out.append(ret_pred[i])
ret_vae_out.append(ret_pred_ctxvae[i])
gen2 = beam_list_noret[i][0]
gen2_out.append(gen2)
def btok(x,y):
return bleu(tokenize_for_bleu_eval(' '.join(x)),tokenize_for_bleu_eval(' '.join(y)))
print 'BLEU'
orig_out = [trace.example.target_words for trace in edit_traces_noret]
blist = [btok(gen2_out[i], orig_out[i]) for i in range(len(gen2_out))]
print 's2s'
print np.mean(blist)
blist_ret_fix = [btok(ret_fix_out[i], orig_out[i]) for i in range(len(gen2_out))]
print 'fixed ret'
print np.mean(blist_ret_fix)
blist_vae = [btok(ret_vae_out[i], orig_out[i]) for i in range(len(gen2_out))]
print 'vae ret'
print np.mean(blist_vae)
| 2.3125 | 2 |
Plan 001/Variables/ejercicio_2_antes.py | LookThisCode/Python-Basic-001 | 0 | 12773493 | __author__ = 'nickbortolotti'
"""
Parte 1
1.utilizar un arreglo que almacene 2 cadenas y 2 valores enteros
2.mostrar en pantalla el la ubicacion 1
mostrar desde el 0-2
mostrar del 2 en adelante
mostrar del 1 en reversa
mostrar el ultimo elemento del arreglo en reversa
"""
| 2.484375 | 2 |
ndj_pipeline/data_checks.py | ndjenkins85/ndj_cookie | 1 | 12773494 | # -*- coding: utf-8 -*-
# Copyright © 2021 by <NAME>. All rights reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Schemas and assertions for raw data inputs.
Schema checks are useful to standardize certain information in raw data including:
* Clean column names (lowercase, underscore)
* Ensure an understanding of primary keys of data (uniqueness)
* Set data types of columns, i.e. boolean->int, datetime, string, nullable integer
* Pandera schema checks to attempt to lock some checks about a datafile. This helps
to quickly assess if a file has changed, or a similar file is same or different.
Some examples:
* Contains exact list of columns (no more no less)
* Nullable column
* Data type correct
* Data range checks (must not be 0 or less, must contain only these values)
Schemas can often be re-applied to similar data files, i.e. tabs of an excel, or train/test data.
"""
import logging
from pathlib import Path
import pandas as pd
from pandera import io
from ndj_pipeline import utils
def check_titanic() -> pd.DataFrame:
"""Data schema and typing validations.
Returns:
Loaded pandas dataframe with typing and schema checks.
"""
# Standardize column names
input_path = Path("data", "titanic.csv")
logging.info(f"Loading data from {input_path}")
df = pd.read_csv(input_path)
df = df.rename(columns=utils.clean_column_names(df)) # type: ignore
# Checks for duplicates
assert df.shape == df.drop_duplicates().shape
# Recoding of string variables
df["sex"] = df["sex"].replace({"male": 1, "female": 0}).astype("Int64")
# Full expressive list of variables, assumptions and questions
schema_path = Path("schemas", "titanic.yaml")
with open(schema_path, "r") as f:
pandera_schema_check = io.from_yaml(f)
df = pandera_schema_check.validate(df)
logging.info("Validation checks passed")
return df
| 1.765625 | 2 |
tests/datasets/test_gcc_data.py | fishmingyu/cogdl | 6 | 12773495 | <reponame>fishmingyu/cogdl
from cogdl.datasets import build_dataset
from cogdl.utils import build_args_from_dict
def test_gcc_kdd_icdm():
args = build_args_from_dict({"dataset": "kdd_icdm"})
assert args.dataset == "kdd_icdm"
dataset = build_dataset(args)
data = dataset.data
assert data[0].edge_index[0].shape[0] == 17316
assert data[1].edge_index[0].shape[0] == 10846
if __name__ == "__main__":
test_gcc_kdd_icdm()
| 2.078125 | 2 |
src/quiche/pal/pal_lift_37.py | riswords/quiche | 0 | 12773496 | from ast import (
Module,
Suite,
FunctionDef,
AsyncFunctionDef,
Assign,
AnnAssign,
For,
AsyncFor,
With,
AsyncWith,
Num,
Str,
Bytes,
NameConstant,
ExtSlice,
arguments,
arg,
)
from quiche.pal.pal_block import (
PALIdentifier,
PALLeaf,
PALPrimitive,
StmtBlock,
ExprBlock,
SliceBlock,
ArgBlock,
WithItemBlock,
)
from quiche.pal.pal_lifter import PALLifter
class PALLift37(PALLifter):
def visit_Module(self, node: Module) -> Module:
# Short-circuit: assume if the body is a StmtBlock, it's already
# been transformed. NOTE: may need to revisit this later if this
# transform is applied to fix-up after other modifications.
if isinstance(node.body, StmtBlock):
return node
self.generic_visit(node)
return Module(body=StmtBlock(node.body))
def visit_Suite(self, node: Suite) -> Suite:
if isinstance(node.body, StmtBlock):
return node
self.generic_visit(node)
return Suite(body=StmtBlock(node.body))
def visit_FunctionDef(self, node: FunctionDef) -> FunctionDef:
if isinstance(node.body, StmtBlock):
return node
self.generic_visit(node)
return FunctionDef(
name=PALIdentifier(node.name),
args=node.args,
body=StmtBlock(node.body),
decorator_list=ExprBlock(node.decorator_list),
returns=node.returns,
)
def visit_AsyncFunctionDef(self, node: AsyncFunctionDef) -> AsyncFunctionDef:
if isinstance(node.body, StmtBlock):
return node
self.generic_visit(node)
return AsyncFunctionDef(
name=PALIdentifier(node.name),
args=node.args,
body=StmtBlock(node.body),
decorator_list=ExprBlock(node.decorator_list),
returns=node.returns,
)
def visit_Assign(self, node: Assign) -> Assign:
if isinstance(node.targets, ExprBlock):
return node
self.generic_visit(node)
return Assign(targets=ExprBlock(node.targets), value=node.value)
def visit_AnnAssign(self, node: AnnAssign) -> AnnAssign:
if isinstance(node.simple, PALPrimitive):
return node
self.generic_visit(node)
return AnnAssign(target=node.target, value=node.value, simple=PALPrimitive[int](node.simple))
def visit_For(self, node: For) -> For:
if isinstance(node.body, StmtBlock):
return node
self.generic_visit(node)
return For(
target=node.target,
iter=node.iter,
body=StmtBlock(node.body),
orelse=StmtBlock(node.orelse),
)
def visit_AsyncFor(self, node: AsyncFor) -> AsyncFor:
if isinstance(node.body, StmtBlock):
return node
self.generic_visit(node)
return AsyncFor(
target=node.target,
iter=node.iter,
body=StmtBlock(node.body),
orelse=StmtBlock(node.orelse),
)
def visit_With(self, node: With) -> With:
if isinstance(node.items, WithItemBlock):
return node
self.generic_visit(node)
return With(items=WithItemBlock(node.items), body=StmtBlock(node.body))
def visit_AsyncWith(self, node: AsyncWith) -> AsyncWith:
if isinstance(node.items, WithItemBlock):
return node
self.generic_visit(node)
return AsyncWith(
items=WithItemBlock(node.items), body=StmtBlock(node.body)
)
# EXPRESSIONS
def visit_Num(self, node: Num) -> PALLeaf[complex]:
return PALLeaf[complex](type(node.n).__name__, Num, node.n)
def visit_Str(self, node: Str) -> PALLeaf[str]:
return PALLeaf[str]("str", Str, node.s)
def visit_Bytes(self, node: Bytes) -> PALLeaf[bytes]:
return PALLeaf[bytes]("bytes", Bytes, node.s)
def visit_NameConstant(self, node: NameConstant) -> PALLeaf:
return PALLeaf[bool]("bool", NameConstant, node.value)
def visit_ExtSlice(self, node: ExtSlice) -> ExtSlice:
if isinstance(node.dims, SliceBlock):
return node
self.generic_visit(node)
return ExtSlice(dims=SliceBlock(node.dims))
def visit_arguments(self, node: arguments) -> arguments:
if isinstance(node.args, ArgBlock):
return node
self.generic_visit(node)
return arguments(
args=ArgBlock(node.args),
vararg=node.vararg,
kwonlyargs=ArgBlock(node.kwonlyargs),
kw_defaults=ExprBlock(node.kw_defaults),
kwarg=node.kwarg,
defaults=ExprBlock(node.defaults),
)
def visit_arg(self, node: arg) -> arg:
if isinstance(node.arg, PALIdentifier):
return node
self.generic_visit(node)
return arg(arg=PALIdentifier(node.arg), annotation=node.annotation)
| 2.265625 | 2 |
error_output.py | mertyigit/INEEDopt | 15 | 12773497 |
def error_output(number_of_parameters):
"""Write the optimized error values into the params file near the corresponding parameter identicator"""
try:
file2 = open("parameters", "r+")
file2.seek(0)
position2 = file2.tell()
file = open("fort.13","r")
file.seek(0)
position = file.tell()
try:
file.seek(0)
error = file.read(12)
file2.seek(20)
file2.write(" " + error.rjust(18))
finally:
file.close()
except IOError:
pass
def error_output_bf(number_of_parameters, parameters_file_name):
"""Write the optimized error values into the params file near the corresponding parameter identicator"""
try:
file2 = open(parameters_file_name, "a+")
#file2.seek(0)
#position2 = file2.tell()
file = open("fort.13","r")
file.seek(0)
position = file.tell()
try:
file.seek(0)
error_bf = file.read(12)
#file2.seek(20)
file2.write("\n TOTAL ERROR: " + error_bf.rjust(18))
finally:
file.close()
except IOError:
pass
return error_bf | 3.34375 | 3 |
Trading arbitrage strategy/strategies/arbitrage.py | iCoder333/Trading-arbitrage-strategy | 4 | 12773498 | <reponame>iCoder333/Trading-arbitrage-strategy
"""Provides the concrete arbitrage trading class"""
| 1.46875 | 1 |
magmap/io/yaml_io.py | clifduhn/magellanmapper | 0 | 12773499 | <reponame>clifduhn/magellanmapper
# YAML Input/Output
# Author: <NAME>, 2020
"""YAML file format input/output."""
import yaml
from magmap.io import libmag
def load_yaml(path, enums=None):
"""Load a YAML file with support for multiple documents and Enums.
Args:
path (str): Path to YAML file.
enums (dict): Dictionary mapping Enum names to Enum classes; defaults
to None. If a key or value in the YAML file matches an Enum name
followed by a period, the corresponding Enum will be used.
Returns:
List[dict]: Sequence of parsed dictionaries for each document within
a YAML file.
"""
def parse_enum_val(val):
if isinstance(val, str):
val_split = val.split(".")
if len(val_split) > 1 and val_split[0] in enums:
# replace with the corresponding Enum class
val = enums[val_split[0]][val_split[1]]
return val
def parse_enum(d):
# recursively parse Enum keys and values within nested dictionaries
out = {}
for key, val in d.items():
if isinstance(val, dict):
# recursively parse nested dictionaries
val = parse_enum(val)
elif libmag.is_seq(val):
# parse vals within lists
val = [parse_enum_val(v) for v in val]
else:
# parse a single val
val = parse_enum_val(val)
key = parse_enum_val(key)
out[key] = val
return out
with open(path) as yaml_file:
# load all documents into a generator
docs = yaml.load_all(yaml_file, Loader=yaml.FullLoader)
data = []
for doc in docs:
if enums:
doc = parse_enum(doc)
data.append(doc)
return data
| 2.953125 | 3 |
paper/tested_configs/solov2_light_448_r50_fpn_coco.py | anion0278/SOLO | 0 | 12773500 | num_classes = 81
# model settings
model = dict(
type='SOLOv2',
#pretrained='torchvision://resnet50', # The backbone weights will be overwritten when using load_from or resume_from.
# https://github.com/open-mmlab/mmdetection/issues/7817#issuecomment-1108503826
backbone=dict(
type='ResNet',
depth=50,
in_channels = 3,
num_stages=4,
out_indices=(0, 1, 2, 3), # C2, C3, C4, C5
frozen_stages=-1, # -1 is unfrozen, 0 -> C1 is frozen, 1 - C1, C2 are frozen and so on
style='pytorch'),
# norm_eval = True # true by default, "you're fine-tuning to minimize training, it's typically best to keep batch normalization frozen"
# https://stackoverflow.com/questions/63016740/why-its-necessary-to-frozen-all-inner-state-of-a-batch-normalization-layer-when
# required for traning from scratch
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=0,
num_outs=5),
bbox_head=dict(
type='SOLOv2Head',
num_classes=num_classes,
in_channels=256,
stacked_convs=2,
seg_feat_channels=256,
strides=[8, 8, 16, 32, 32],
scale_ranges=((1, 56), (28, 112), (56, 224), (112, 448), (224, 896)),
sigma=0.2,
num_grids=[40, 36, 24, 16, 12],
ins_out_channels=128,
loss_ins=dict(
type='DiceLoss',
use_sigmoid=True,
loss_weight=3.0),
loss_cate=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0)),
mask_feat_head=dict(
type='MaskFeatHead',
in_channels=256,
out_channels=128,
start_level=0,
end_level=3,
num_classes=128,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)),
)
# training and testing settings
train_cfg = dict()
test_cfg = dict(
nms_pre=500,
score_thr=0.1,
mask_thr=0.5,
update_thr=0.05,
kernel='gaussian', # gaussian/linear
sigma=2.0,
max_per_img=100)
| 1.867188 | 2 |
hetsage/data.py | vakker/HetSAGE | 0 | 12773501 | <filename>hetsage/data.py
from typing import List, NamedTuple, Optional, Tuple
import networkx as nx
import numpy as np
import torch
from sklearn.model_selection import train_test_split
from torch_sparse import SparseTensor
from tqdm import tqdm
tensor = torch.FloatTensor
def get_props(g):
return {
data['nodetype']: list(data.get('properties', {}).keys())
for n, data in g.nodes(data=True)
}
def featurize(g, target_node, target_prop, include_target_label=True):
g = nx.convert_node_labels_to_integers(g)
edge_cats = set()
for n1, n2, data in tqdm(g.edges(data=True)):
edge_cats = edge_cats.union({data['label']})
edge_cats = sorted(list(edge_cats))
edge_feats = []
for n1, n2, k, data in tqdm(g.edges(data=True, keys=True)):
edge_feats.append((data['label'] == np.array(edge_cats)).astype(np.int))
edge_feats = tensor(np.stack(edge_feats))
features = {}
for n, data in tqdm(g.nodes(data=True)):
nt = data['nodetype']
if nt in features:
if data.get('prop', {}).keys() != features[nt]['prop'].keys():
raise ValueError('Inconsistent prop keys')
if data.get('single_cat', {}).keys() != features[nt]['single_cat'].keys():
raise ValueError('Inconsistent prop keys')
if data.get('multi_cat', {}).keys() != features[nt]['multi_cat'].keys():
raise ValueError('Inconsistent prop keys')
for p, v in data.get('prop', {}).items():
if v < features[nt]['prop'][p]['min']:
features[nt]['prop'][p]['min'] = v
if v > features[nt]['prop'][p]['max']:
features[nt]['prop'][p]['max'] = v
for p, v in data.get('single_cat', {}).items():
features[nt]['single_cat'][p].add(v)
for p, v in data.get('multi_cat', {}).items():
features[nt]['multi_cat'][p].update(v)
else:
prop = {p: {'min': v, 'max': v} for p, v in data.get('prop', {}).items()}
single_cat = {p: {v} for p, v in data.get('single_cat', {}).items()}
multi_cat = {p: set(v) for p, v in data.get('multi_cat', {}).items()}
features[nt] = {
'prop': prop,
'single_cat': single_cat,
'multi_cat': multi_cat,
}
for nt in features:
for pc, cats in features[nt]['single_cat'].items():
features[nt]['single_cat'][pc] = sorted(list(cats))
for pc, cats in features[nt]['multi_cat'].items():
features[nt]['multi_cat'][pc] = sorted(list(cats))
features[nt].update({'x_in': [], 'x_out': [], 'y': [], 'n_ids': []})
target_nodes = []
targets = []
for n, data in tqdm(g.nodes(data=True)):
nt = data['nodetype']
prop_keys = set(features[nt]['prop'].keys())
single_cat_keys = set(features[nt]['single_cat'].keys())
multi_cat_keys = set(features[nt]['multi_cat'].keys())
if nt == target_node:
prop_keys = prop_keys - {target_prop}
single_cat_keys = single_cat_keys - {target_prop}
multi_cat_keys = multi_cat_keys - {target_prop}
nd = data['nodetype']
prop = data.get('prop', {})
single_cats = data.get('single_cat', {})
multi_cats = data.get('multi_cat', {})
x_p = get_prop(prop, prop_keys, features[nt]['prop'])
x_sc = get_cat(single_cats, single_cat_keys, features[nt]['single_cat'])
x_mc = get_cat(multi_cats, multi_cat_keys, features[nt]['multi_cat'])
if nd == target_node:
if target_prop in features[nt]['prop']:
y = get_prop(prop, [target_prop], features[nt]['prop'])
elif target_prop in features[nt]['single_cat']:
y = get_cat(single_cats, [target_prop], features[nt]['single_cat'])
elif target_prop in features[nt]['multi_cat']:
y = get_cat(multi_cats, [target_prop], features[nt]['multi_cat'])
else:
raise ValueError(f'{target_prop} is not a property')
target_nodes.append(n)
targets.append(torch.nonzero(torch.tensor(y) == 1, as_tuple=False).squeeze())
else:
y = []
if include_target_label:
g.nodes[n]['x_in'] = get_tensor(np.concatenate([x_p, x_sc, x_mc, y]))
else:
g.nodes[n]['x_in'] = get_tensor(np.concatenate([x_p, x_sc, x_mc]))
g.nodes[n]['x_out'] = get_tensor(np.concatenate([x_p, x_sc, x_mc]))
g.nodes[n]['y'] = torch.tensor(y)
features[nt]['x_in'].append(g.nodes[n]['x_in'])
features[nt]['x_out'].append(g.nodes[n]['x_out'])
features[nt]['y'].append(g.nodes[n]['y'])
features[nt]['n_ids'].append(torch.tensor(n))
for nt in features:
features[nt]['x_in'] = torch.stack(features[nt]['x_in'])
features[nt]['x_out'] = torch.stack(features[nt]['x_out'])
features[nt]['y'] = torch.stack(features[nt]['y'])
features[nt]['n_ids'] = torch.stack(features[nt]['n_ids'])
target_nodes = torch.LongTensor(target_nodes)
targets = torch.stack(targets)
return g, target_nodes, targets, features, edge_feats
def get_tensor(np_arr):
if np_arr.shape[0] > 0:
return tensor(np_arr)
return tensor([0])
def get_cat(cats, cat_keys, all_cats):
x_c = []
for k in cat_keys:
if not isinstance(cats[k], list):
cs = [cats[k]]
else:
cs = cats[k]
c_ = []
for i, c in enumerate(all_cats[k]):
if c in cs:
c_.append(1)
else:
c_.append(0)
x_c.append(c_)
if x_c:
return np.concatenate(x_c)
return np.array(x_c)
def get_prop(props, prop_keys, all_props, normalize=True):
x_p = np.array([
norm(props[k], all_props[k]['min'], all_props[k]['max']) if normalize else props[k]
for k in prop_keys
])
return x_p
def norm(v, v_min, v_max):
return (v - v_min) / (v_max - v_min)
class NeighborSampler(torch.utils.data.DataLoader):
def __init__(self,
edge_index: torch.Tensor,
edge_features: torch.Tensor,
sizes: List[int],
node_idx: Optional[torch.Tensor] = None,
num_nodes: Optional[int] = None,
flow: str = "source_to_target",
**kwargs):
N = int(edge_index.max() + 1) if num_nodes is None else num_nodes
adj = SparseTensor(row=edge_index[0],
col=edge_index[1],
value=edge_features,
sparse_sizes=(N, N),
is_sorted=False)
adj = adj.t() if flow == 'source_to_target' else adj
self.adj = adj
if node_idx is None:
node_idx = torch.arange(N)
elif node_idx.dtype == torch.bool:
node_idx = node_idx.nonzero(as_tuple=False).view(-1)
self.sizes = sizes
self.flow = flow
assert self.flow in ['source_to_target', 'target_to_source']
super(NeighborSampler, self).__init__(node_idx.tolist(), collate_fn=self.sample, **kwargs)
def sample(self, batch):
if not isinstance(batch, torch.Tensor):
batch = torch.tensor(batch)
batch_size: int = len(batch)
n_id_offset = 0
n_id_map = []
edge_indeces = [[] for _ in self.sizes]
e_feats = [[] for _ in self.sizes]
for target_id in batch:
n_id = target_id.unsqueeze(dim=0)
n_id_targets = []
for i, size in enumerate(self.sizes):
n_id_targets.append(n_id)
adj, n_id = self.adj.sample_adj(n_id, size, replace=False)
if self.flow == 'source_to_target':
adj = adj.t()
row, col, e_feat = adj.coo()
row += n_id_offset
col += n_id_offset
size = adj.sparse_sizes()
edge_index = torch.stack([row, col], dim=0)
edge_indeces[i].append(edge_index)
e_feats[i].append(e_feat)
is_target = n_id == target_id
n_id_layers = torch.zeros_like(n_id) # * len(n_id_targets)
for i, targets in enumerate(reversed(n_id_targets)):
id_in_layer = [idx for idx, n in enumerate(n_id) if n in targets]
n_id_layers[id_in_layer] = i + 1
n_id_map.append(torch.stack([n_id, is_target, n_id_layers], dim=1))
n_id_offset += len(n_id)
n_id_map = torch.cat(n_id_map)
_, sorted_idx = torch.sort(n_id_map[:, 2], descending=True)
_, sorted_idx_inv = torch.sort(sorted_idx)
n_id_map = n_id_map[sorted_idx]
adjs = []
target_size = len(batch)
for i, size in enumerate(self.sizes):
edge_index = torch.cat(edge_indeces[i], dim=-1)
edge_index = reindex(sorted_idx, sorted_idx_inv, edge_index)
e_feat = torch.cat(e_feats[i], dim=0)
M = edge_index[0].max().item() + 1
N = edge_index[1].max().item() + 1
size = (M, N)
adjs.append(Adj(edge_index, e_feat, size, target_size))
target_size = M
return batch_size, n_id_map, adjs[::-1]
def __repr__(self):
return '{}(sizes={})'.format(self.__class__.__name__, self.sizes)
def reindex(idx_map, idx_map_inv, edge_index):
edge_reindex = torch.ones_like(edge_index) * -1
for i in range(edge_reindex.shape[1]):
edge_reindex[0, i] = idx_map_inv[edge_index[0, i]]
edge_reindex[1, i] = idx_map_inv[edge_index[1, i]]
return edge_reindex
class DataManager:
def __init__(self,
graph_file,
target,
include_target_label=True,
neighbor_sizes=[20, 20],
batch_size=200,
workers=1,
target_node_lim=None,
seed=0):
# load graph
g = nx.nx.read_gpickle(graph_file)
self.neighbor_steps = len(neighbor_sizes)
# featurize
target_node, target_prop = target.split(':')
self.target_node = target_node
self.g, self.target_nodes, self.targets, self.node_features, self.edge_feats = featurize(
g, target_node, target_prop, include_target_label)
edge_idx = torch.tensor(list(self.g.edges)).t().contiguous()
self.targets_sparse = SparseTensor(row=self.target_nodes,
col=torch.zeros_like(self.target_nodes),
value=self.targets)
self.node_map = torch.zeros((len(self.g.nodes), 2), dtype=torch.long)
for i, _ in enumerate(self.node_map):
for node_type_id, (node_type, node_props) in enumerate(self.node_features.items()):
idx = torch.nonzero(i == node_props['n_ids'], as_tuple=False)
if idx.shape[0] == 1:
idx = idx.squeeze(dim=-1)
self.node_map[i, 0] = node_type_id
self.node_map[i, 1] = idx
if target_node_lim:
k = target_node_lim
else:
k = self.target_nodes.size(0) + 1
# perm = torch.randperm(self.target_nodes.size(0))
# subset_idx = perm[:k]
# last_tng_id = int(0.8 * subset_idx.size(0))
# tng_idx, _ = torch.sort(subset_idx[:last_tng_id])
# val_idx, _ = torch.sort(subset_idx[last_tng_id:])
splits = train_test_split(self.target_nodes[:k],
self.targets[:k],
stratify=self.targets[:k],
test_size=0.2,
random_state=seed)
self.tng_target_nodes, self.val_target_nodes, self.tng_targets, self.val_targets = splits
# self.tng_target_nodes = self.target_nodes[tng_idx]
# self.tng_targets = self.targets[tng_idx]
# self.val_target_nodes = self.target_nodes[val_idx]
# self.val_targets = self.targets[val_idx]
# tng_edge_idx = self.filter_edge_index(edge_idx, self.val_target_nodes)
unique_targets, target_counts = torch.unique(self.targets, return_counts=True)
self.target_weights = 1 / target_counts.float()
self.target_weights /= self.target_weights.sum()
print('Data stats', len(self.targets), 100 * target_counts / float(len(self.targets)))
print('Tng len', len(self.tng_targets))
print('Val len', len(self.val_targets))
self.tng_loader = NeighborSampler(
# tng_edge_idx,
edge_idx,
self.edge_feats,
node_idx=self.tng_target_nodes,
sizes=neighbor_sizes,
batch_size=batch_size,
shuffle=True,
num_workers=workers,
pin_memory=True,
drop_last=False,
)
self.val_loader = NeighborSampler(
edge_idx,
self.edge_feats,
node_idx=self.val_target_nodes,
sizes=neighbor_sizes,
batch_size=batch_size,
shuffle=False,
num_workers=workers,
pin_memory=True,
drop_last=False,
)
self.graph_info = {
'in_nodes': {},
}
for node_type, node_props in self.node_features.items():
self.graph_info['in_nodes'][node_type] = {'in_size': node_props['x_in'].shape[1]}
self.graph_info['target_node'] = {
'in_size': self.node_features[target_node]['x_out'].shape[1],
'out_size': self.node_features[target_node]['y'].shape[1]
}
self.graph_info['edges'] = {'in_size': self.edge_feats.shape[1]}
def filter_edge_index(self, edge_idx, node_idx):
mask = [
i for i, edge in enumerate(edge_idx.t())
if not (edge[0] in node_idx or edge[1] in node_idx)
]
mask = torch.LongTensor(mask)
return edge_idx[:, mask]
def get_targets(self, n_id):
_, _, value = self.targets_sparse[n_id].coo()
return value
def get_id_map(self, node_id):
node_map = {
node_type: {
'x': [],
'h_id': []
}
for node_type, node_props in self.node_features.items()
}
node_map.update({'target': {'x': [], 'h_id': []}})
nodes = [
torch.tensor(range(len(node_id))).unsqueeze(-1),
node_id[:, 1].unsqueeze(-1),
self.node_map[node_id[:, 0]],
]
nodes = torch.cat(nodes, dim=1)
for node_type_id, node_type in enumerate(list(self.node_features.keys())):
idx = (nodes[:, 1] == 1) & (nodes[:, 2] == node_type_id)
if torch.any(idx):
node_map['target']['x'] = self.node_features[node_type]['x_out'][nodes[idx, 3]]
node_map['target']['h_id'] = nodes[idx, 0]
idx = (nodes[:, 1] != 1) & (nodes[:, 2] == node_type_id)
if torch.any(idx):
node_map[node_type]['x'] = self.node_features[node_type]['x_in'][nodes[idx, 3]]
node_map[node_type]['h_id'] = nodes[idx, 0]
node_map_out = {}
for node_type, n_map in node_map.items():
if len(node_map[node_type]['x']) == 0:
continue
node_map_out[node_type] = NodeMap(
node_map[node_type]['x'],
node_map[node_type]['h_id'].squeeze(),
)
return node_map_out
class NodeMap(NamedTuple):
x: torch.Tensor
h_id: torch.Tensor
def to(self, *args, **kwargs):
return NodeMap(
self.x.to(*args, **kwargs),
self.h_id.to(*args, **kwargs),
)
class Adj(NamedTuple):
edge_index: torch.Tensor
edge_features: torch.Tensor
size: Tuple[int, int]
target_size: int
def to(self, *args, **kwargs):
return Adj(
self.edge_index.to(*args, **kwargs),
self.edge_features.to(*args, **kwargs),
self.size,
self.target_size
)
| 2.234375 | 2 |
models/train_classifier.py | MoustafaaAshraf/Disaster-Response-Message-Classification | 0 | 12773502 | <filename>models/train_classifier.py
# importing needed libraries
import sys
import pandas as pd
import re
from sqlalchemy import create_engine
import pickle
import nltk
from nltk import word_tokenize
from nltk.corpus import stopwords
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
from sklearn.multioutput import MultiOutputClassifier
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
def load_data(database_filepath):
'''
loading the dataframe from the database file
:param database_filepath: location of the saved dataframe
:return:
X: Dataframe of the messages used for predictions
y: Dataframe of the multi-label used to train the model
categories_name: a list of the names of the multiclass targets used to train the model
'''
# establishing the connection with the database
engine = create_engine('sqlite:///' + database_filepath)
# reading the model from the SQL database
df = pd.read_sql('etltable', engine)
# Slicing the dataframe to acquire the targets and labels
X = df.loc[:, 'message']
y = df.iloc[:, 4:]
# Extracting the names of the target columns
category_names = y.columns
return X, y, category_names
def tokenize(text):
'''
turing plain text into cleaned list of tokens for cleaned words
:param text: a plain text message
:return: clean_tokens: a list of cleaned tokens from the text, cleaned from punctuations, stopwords and words are
lemmatized
'''
# Removing the punctuation from text by keeping only a->z, A->Z and 0-9 characters from the text
text = re.sub(r'[^a-zA-Z0-9]', ' ', text)
# Splitting the text into tokens/words through the word_tokenize method from nltk library
tokens = word_tokenize(text)
# list comprehension checking every word in the list and choosing the words which are not stop words
tokens = [w for w in tokens if w not in stopwords.words('english')]
# Instanciating an empty list for cleaned tokens
clean_tokens = []
# looping through tokens
for tok in tokens:
# getting lower case and removing the spaces from words
clean_tok = tok.lower().strip()
# adding the tokens to the cleaned_tokens list
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
'''
creating a pipeline for modelling the database of categories and labels
:return: model to be trained using grid search
'''
pipeline = Pipeline([
# creating a count vectorizer using the tokens provided by a list
('vect', CountVectorizer(tokenizer=tokenize)),
# Providing the tfidf transformer from sklearn
('tfidf', TfidfTransformer()),
# an instance of multi-class classifier based on RandomForestClassifier
('clf', MultiOutputClassifier(RandomForestClassifier(n_estimators=50)))
])
parameters = {'clf__estimator__max_depth': [10, None],
'clf__estimator__min_samples_leaf': [5, 10]}
grid = GridSearchCV(pipeline, parameters)
return grid
def evaluate_model(model, X_test, Y_test, category_names):
'''
a method used for evaluating the performance of the model by printing the metrics for classification
:param model: pipeline for preprocessing and modelling
:param X_test: a dataframe of the labels used for predicting the targets
:param Y_test: a dataframe of targets used to train the model
:param category_names: a list of the names of targets used for training the model
'''
# Predicting the classification prediction of the created model
preds = model.predict(X_test)
# Iterating through the columns of the testing columns and comparing them to the predicted classification
for idx, col in enumerate(category_names):
# Printing the classification metrics
print(col, classification_report(Y_test.iloc[:, idx], preds[:, idx]))
def save_model(model, model_filepath):
'''
Saving a file of the trained model
:param model: trained model
:param model_filepath: location of the trained model file
'''
# Creating a pathfile to save the model
model_pkl = open(model_filepath, 'wb')
# Saving the model into the created location
pickle.dump(model, model_pkl)
# Closing
model_pkl.close()
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() | 3.078125 | 3 |
src/apps/configs/api/serializers.py | MatsuiLin101/aprp | 2 | 12773503 | from rest_framework.serializers import (
ModelSerializer,
)
from apps.configs.models import (
Config,
AbstractProduct,
Chart,
Type,
Source,
Unit,
)
class ChartSerializer(ModelSerializer):
class Meta:
model = Chart
fields = '__all__'
class TypeSerializer(ModelSerializer):
class Meta:
model = Type
fields = '__all__'
class SourceSerializer(ModelSerializer):
class Meta:
model = Source
fields = '__all__'
class ConfigSerializer(ModelSerializer):
class Meta:
model = Config
fields = '__all__'
class AbstractProductSerializer(ModelSerializer):
class Meta:
model = AbstractProduct
fields = '__all__'
class UnitSerializer(ModelSerializer):
class Meta:
model = Unit
fields = '__all__'
| 2.09375 | 2 |
src/multi2convai/pipelines/base.py | inovex/multi2convai | 7 | 12773504 | from abc import ABC, abstractmethod
from typing import Any
class BasePipelineConfig:
"""Base config."""
def __init__(self, *args, **kwargs):
pass
class BasePipeline(ABC):
"""Abstract base class for end-to-end pipelines.
Args:
config (:class:`~BasePipelineConfig`): pipeline configuration
"""
def __init__(self, config: BasePipelineConfig):
self.config = config
@abstractmethod
def setup(self) -> None:
"""Performs initial setups of the pipeline. Intended to contain all heavy operations that need to be executed
once while setting up, e.g. loading models into memory.
"""
raise NotImplementedError
@abstractmethod
def run(self, *args, **kwargs) -> Any:
"""Runs pipeline end-to-end. Child class needs to overwrite with specific interfaces for input and output."""
raise NotImplementedError
@abstractmethod
def get_metadata(self) -> dict:
"""Receives the currently loaded model as metadata."""
raise NotImplementedError
@abstractmethod
def cleanup(self) -> None:
"""Resets parameters assigned during runtime."""
raise NotImplementedError
| 3.421875 | 3 |
maml/compressed_sensing/compressed_sensing.py | epyzerknapp/maml-bsd | 2 | 12773505 | <reponame>epyzerknapp/maml-bsd
__author__ = 'epyzerknapp'
import numpy as np
import cPickle as pkl
from maml.compressed_sensing import twist_solver as twist
from copy import deepcopy
from maml.distances.tanimoto import tanimoto_similarity
"""
With compressed sensing we are looking to locate the maximum compression which we can use to reduce the dimensionanlity
for the feature representation. For each set, we calculate
"""
def compress(X, desired_feature_size, A=None, saveA=True, Afilename='compression_matrix.pkl'):
"""
Compress a matrix of inputs using compressed sensing
:param X: inputs, numpy array
:param desired_feature_size: size of compressed features, int
:param A: transformation matrix, numpy array, default=None
:param saveA: dump the transformation matrix to a pickle, default=True
:param Afilename: name of the pickle which is dumped if saveA is True
:return: matrix of compressed inputs, numpy array
"""
m, n = X.shape
if not A:
A = np.random.rand(desired_feature_size, m)
if saveA:
pkl.dump(A, Afilename)
b = np.dot(A, X)
return A, b.T
def solve(b, A, tolA=1e-7, tolD=1e-9, verbose=False, **kwargs):
"""
Solves the x = A^-1 x b problem to reconstruct the signal/inputs
:param b: compressed inputs, numpy array
:param A:transformation matrix, numpy array
:param tolA: tolerance for Twist in A
:param tolD: tolerance for Twist in D
:param verbose: print level
:param kwargs: contains options for the TwIST minimizer
:return: dictorary of results with keys:
'lambdas', 'lambdas_debias', 'objective', 'times', 'debias_start', 'max_svd'
"""
solver = twist.TwistSolver(tolA=tolA, tolD=tolD, verbose=verbose, **kwargs)
values = solver.solve(b, A)
labels = 'lambdas', 'lambdas_debias', 'objective', 'times', 'debias_start', 'max_svd'
results = dict((label, value) for label, value in zip(labels, values))
return results
def calc_error(X, lambdas):
"""
Calculates reconstruction error for a compression
:param X: uncompressed inputs
:param A: compression matrix
:param lambdas: expansion coefficients, from the TwIST solver
:return: error dictionary
"""
reconstructed_inputs = lambdas
reconstructed_inputs[reconstructed_inputs>0.5] = 1
reconstructed_inputs[reconstructed_inputs<0.5] = 0
errors = dict()
errors['l2_norm'] = np.linalg.norm(X - reconstructed_inputs, ord=2)/ np.linalg.norm(X, ord=2)
errors['l_inf'] = np.linalg.norm(reconstructed_inputs - X, ord=np.Inf)
errors['frobenius'] = np.linalg.norm(reconstructed_inputs - X, ord=None)
errors['tanimoto'] = 1 - tanimoto_similarity(X, reconstructed_inputs)
return errors
def optimize_compression(X, feature_size_min=10, feature_size_max=200, reconstruction_tolerance=1e-02,
error_convergence=None, error_type='l2_norm', epsilon=1):
"""
This routine finds the best compression size which satisfies either a given reconstruction error, or a convergence
in the reconstruction error wrt compression size.
:param X: Uncompressed features
:param feature_size_min: minimum size of compressed feature (i.e maximum compression factor)
:param feature_size_max: maximum size of compressed feature (i.e. minimum compression factor)
:param reconstruction_tolerance: the minimium reconstruction error to terminate the search
:param error_convergence: a convergence in the error which would terminate the search, None for ignore
:param error_type: the method for calculating the error can be
* 'l2_norm' : Matrix -> 2-norm (largest sing. value) Vector -> sum(abs(x)**2)**(1./2)
* 'l_inf' : Matrix -> max(sum(abs(x), axis=1)) Vector -> max(abs(x))
* 'frobenius' : Matrix -> ||A||_F = [\sum_{i,j} abs(a_{i,j})^2]^{1/2} Vector -> 2-norm
:return: optimized A, reconstruction error
"""
dim = feature_size_min
a, b = compress(X, dim, saveA=False)
converged = False
res = solve(b, a, verbose=False, weight=1e-03, miniter=100, maxiter=2000)
try:
error = calc_error(X, res['lambdas'])
old_error = error[error_type]
except KeyError:
raise StandardError('Error type {} not recognized'.format(error_type))
errors = [error[error_type]]
while not converged:
a, b = compress(X, dim, saveA=False)
res = solve(b, a, verbose=False, weight=5e-03, miniter=100, maxiter=2000)
error = calc_error(X, res['lambdas'])[error_type]
errors.append(error)
grad = error - old_error
print dim, error, grad
if error < reconstruction_tolerance:
converged = True
elif grad < error_convergence:
converged = True
old_dim = deepcopy(dim)
old_error = deepcopy(error)
dim = dim+5
if dim > feature_size_max:
dim = old_dim
converged = True
return a, b, dim, errors
if __name__ == '__main__':
import hickle as hkl
import pylab as pl
import seaborn as sns
sns.set_context('poster')
sns.set_style('darkgrid')
sns.set_palette('Set2')
data = hkl.load('/home/epyzerknapp/Projects/columbus/playground/cep_timing_test/cep_test_50k.hkl')
inputs = data['512_morgans_r2'][:100].T
a, b, dim, errors = optimize_compression(inputs, error_type='tanimoto')
samples = range(0, len(errors))
samples = [x*5 for x in samples]
pl.plot(samples, errors)
ax = pl.gca()
ax.set_xticklabels(np.arange(0,204,5))
pl.ylabel('Tanimoto Reconstruction Error')
pl.xlabel('Number of features')
pl.show()
| 2.28125 | 2 |
5200_flask_app/app/forms.py | F-Zainab/SWArch | 0 | 12773506 | #Form classes can be declared here and imported into the routes view
#More to come soon
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired
class TestForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
| 2.515625 | 3 |
tempdir/login-app.py | Kennethvalerio212/design-app | 0 | 12773507 | <reponame>Kennethvalerio212/design-app
# Add to this file for the sample app lab
from flask import Flask
from flask import request
from flask import render_template
login = Flask(__name__)
@login.route("/")
def main():
return render_template("login.html")
if __name__ == "__main__":
login.run(host="0.0.0.0", port=5050) | 2.46875 | 2 |
analysis_vis/scripts/EpistasisCanSeqs.py | arubenstein/deep_seq | 0 | 12773508 | #!/usr/bin/env python
"""Create edges and nodes from a list of sequences that are a given hamming distance apart"""
import itertools
import sys
import operator
import numpy
from numpy import linalg as LA
import argparse
from general_seq import conv
from general_seq import seq_IO
def find_intermediates(starting_seq, ending_seq):
muts = [ (ch2, ch1!=ch2) for ch1, ch2 in zip(starting_seq, ending_seq) ]
list_inter = []
for ind, (mut, is_mut) in enumerate(muts):
if is_mut:
list_inter.append(starting_seq[0:ind] + mut + starting_seq[ind+1:])
return list_inter
def get_inter_fitness(starting_seq, ending_seq, dict_fitnesses):
list_inter = find_intermediates(starting_seq, ending_seq)
list_fit = [ dict_fitnesses.get(i) for i in list_inter ]
return list_fit
def calc_epi(list_fit, ending_fit):
n_list_fit = []
for item in list_fit:
if item == 1000:
fit = 1
elif item == 10:
fit = 0.5
elif item == 1:
fit = 0
n_list_fit.append(fit)
if ending_fit == 1000:
fit = 1
elif ending_fit == 10:
fit = 0.5
elif ending_fit == 1:
fit = 0
epi = (fit - 1.0) - sum([f - 1.0 for f in n_list_fit ])
return epi
def main(list_sequence_names, output_prefix, canonical_file):
list_sequences = [] #list of list of sequences, where each item represents a label
extended_list_sequences = [] #flat list of sequences
labels = [] #labels for list_sequences
canonical_seqs = seq_IO.read_sequences(canonical_file)
for [filename, label] in list_sequence_names:
sequences = seq_IO.read_sequences(filename, additional_params=True, ind_type={1:float})
list_sequences.append(sequences)
extended_list_sequences.extend(sequences[:])
labels.append(label)
dict_sequences = { seq : fitness for (seq, fitness) in extended_list_sequences }
epi = {}
for canonical_seq in canonical_seqs:
mut_func = { "Both_Functional" : [], "Both_Nonfunctional" : [], "One_Functional" : [] }
mut_nonfunc = { "Both_Functional" : [], "Both_Nonfunctional" : [], "One_Functional" : [] }
outfile_epi = '%s_%s_epi.csv' % (output_prefix, canonical_seq)
epi_out = open(outfile_epi,"w")
print canonical_seq
epi = {}
double_mut = [ seq for seq in extended_list_sequences if conv.hamdist(canonical_seq, seq[0]) == 2 ]
for seq_fit in extended_list_sequences:
seq = seq_fit[0]
fit = seq_fit[1]
mut_dict = mut_func if fit == 1000 else mut_nonfunc
list_fit = get_inter_fitness(canonical_seq, seq, dict_sequences)
if len(list_fit) <= 1:
continue
if all(list_fit):
if seq_fit in double_mut:
sum_fit = sum(list_fit)
print sum_fit
if sum_fit == 2000:
mut_dict["Both_Functional"].append((canonical_seq, seq))
elif sum_fit == 0:
mut_dict["Both_Nonfunctional"].append((canonical_seq, seq))
elif sum_fit == 1000:
mut_dict["One_Functional"].append((canonical_seq, seq))
epi[seq] = (calc_epi(list_fit, fit),list_fit+[fit])
epi_out.write("Total Double Mutants,%s\n" % (len(double_mut)))
for label, list_muts in mut_func.items():
for (can, seq) in list_muts:
epi_out.write("End Functional,%s,%s,%s\n" % (label,can,seq) )
for label, list_muts in mut_nonfunc.items():
for (can, seq) in list_muts:
epi_out.write("End Functional,%s,%s,%s\n" % (label,can,seq) )
epi_out.write("\n".join(["{0},{1},{2}".format(seq,epi,",".join([str(f) for f in fits])) for seq, (epi,fits) in epi.items()] ) )
epi_out.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument ('--sequence_list', '-d', nargs=2, action='append', help="text file which contains sequences and the label you want to use for the set")
parser.add_argument ('--output_prefix', help='output file prefix')
parser.add_argument ('--canonical_file', help='file of canonical_sequences')
args = parser.parse_args()
main(args.sequence_list, args.output_prefix, args.canonical_file)
| 2.765625 | 3 |
sync.py | vadniks/MultiDandyBot_Client | 0 | 12773509 | """
MIT License
Copyright (c) 2022 <NAME> (https://github.com/vadniks)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Callable, List, Tuple
import requests as rq
import json
from time import sleep
from threading import Thread
sid = -1
pid = 0
name: str
level = 0
_HOST = 'http://127.0.0.1:5000'
_THRESHOLD = 0.5 # seconds
_waiterThread: Thread
_canWaitForServer = True
solo = True
_isReady = False
def connect(_name: str, script: str) -> bool:
global sid, pid, name
name = _name
try:
rsp: rq.Response = rq.post(f'{_HOST}/new',
json={'name': name, 'script': script, 'level': level})
except Exception: return False
if rsp.status_code == 200:
if (sid := int(json.loads(rsp.text)['sid'])) == -1:
return False
pid = int(json.loads(rsp.text)['pid'])
return True
# id name status
def checkForPlayers() -> List[Tuple[int, str, bool]] | None:
try:
rsp: rq.Response = rq.get(f'{_HOST}/chk/{pid}')
except Exception: return None
if rsp.status_code != 200:
return None
else:
a = json.loads(rsp.text)
_list = [(int(i[0]), i[1], bool(i[2])) for i in a]
return _list
def waitForPlayers(onWait: Callable, onFinish: Callable) -> Callable: # stop
global _waiterThread, _canWaitForServer, _isReady
def checkStatus(players: List[Tuple[int, str, bool]]) -> bool:
result = True
for i in players: result = result and i[2]
return result
def wait():
while _canWaitForServer:
if (players := checkForPlayers()) is not None:
if len(players) > 0 and checkStatus(players) and _isReady:
onFinish(players)
break
else:
onWait(players)
else:
onWait(None)
sleep(_THRESHOLD)
_waiterThread = Thread(target=wait)
_waiterThread.daemon = True
_waiterThread.start()
return endWaiter
def endWaiter():
global _canWaitForServer
_canWaitForServer = False
_waiterThread.join()
def quitt():
try: rq.post(f'{_HOST}/qt/{pid}')
except Exception: pass
# id name level x y gold
def tracePlayers() -> List[Tuple[int, str, int, int, int, int]] | None:
if solo: return None
try: rsp: rq.Response = rq.get(f'{_HOST}/trc/{sid}/{pid}')
except Exception: return None
if rsp.status_code != 200: return None
jsn = json.loads(rsp.text)
_list = []
for i in jsn:
_list.append((int(i[0]), i[1], int(i[2]), int(i[3]),
int(i[4]), int(i[5])))
return _list
def updatePlayer(lvl: int, x: int, y: int, goldAmount: int):
if solo: return
try:
rq.post(f'{_HOST}/upd/{pid}',
json={'level': lvl, 'x': x, 'y': y, 'gold': goldAmount})
except Exception: pass
# x y
def updateBoard(goldTakenFrom: Tuple[int, int]):
if solo: return
try: rq.post(f'{_HOST}/brd/{sid}/{level}',
json={'pid': pid, 'gtf_x': goldTakenFrom[0], 'gtf_y': goldTakenFrom[1]})
except Exception: pass
# pid x y
def traceBoard() -> List[Tuple[int, int, int]] | None:
if solo: return None
try: rsp: rq.Response = rq.get(f'{_HOST}/trc_b/{sid}/{pid}/{level}')
except Exception: return None
if rsp.status_code != 200: return None
jsn = json.loads(rsp.text)
_list = []
for i in jsn:
_list.append((int(i[0]), int(i[1]), int(i[2])))
return _list
def getCurrentGoldAmountOnBoard() -> int | None:
if solo: return None
try: rsp: rq.Response = rq.get(f'{_HOST}/gld/{sid}/{level}')
except Exception: return None
if rsp.status_code != 200: return None
return int(rsp.text)
# name score
def getSavedPlayers() -> List[Tuple[str, int]] | None:
try: rsp: rq.Response = rq.get(f'{_HOST}/db',
json={'mode': 'select', 'pid': pid})
except Exception: return None
if rsp.status_code != 200: return None
jsn = json.loads(rsp.text)
_list = []
[_list.append((i[0], int(i[1]))) for i in jsn]
return _list
def saveCurrentPlayerResult():
if solo: return
try: rq.Response = rq.post(f'{_HOST}/db',
json={'mode': 'insert', 'pid': pid})
except Exception: pass
def notifyPlayerIsReady():
global _isReady
_isReady = True
try: rq.post(f'{_HOST}/rd/{pid}')
except Exception: pass
def hasPlayerLeft(pid: int) -> bool | None:
if solo: return None
try: rsp: rq.Response = rq.get(f'{_HOST}/hpl/{pid}/{sid}')
except Exception: return None
if rsp.status_code != 200: return None
return bool(rsp.text)
| 2.359375 | 2 |
5-capstone/src/spark/core/builder.py | natanascimento/data-engineer-udacity | 1 | 12773510 | from pyspark.sql import SparkSession
class SparkSessionBuilder:
@staticmethod
def build():
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0,") \
.master("local[*]") \
.getOrCreate()
return spark | 2.328125 | 2 |
hydra/example0.py | futabato/mlops_demo | 0 | 12773511 | import hydra
from omegaconf import DictConfig
@hydra.main(config_path="conf", config_name="config.yaml")
def main(cfg: DictConfig) -> None:
print(cfg)
if __name__ == "__main__":
main()
| 1.773438 | 2 |
catalog/testing/logged_unittest.py | eoss-cloud/madxxx_catalog_api | 0 | 12773512 | <gh_stars>0
import logging
import sys
import unittest
class LogMyTestCase(type):
def __new__(cls, name, bases, dct):
# if the TestCase already provides setUp, wrap it
if 'setUp' in dct:
setUp = dct['setUp']
else:
setUp = lambda self: None
def wrappedSetUp(self):
# for hdlr in self.logger.handlers:
# self.logger.removeHandler(hdlr)
self.hdlr = logging.StreamHandler(sys.stdout)
self.logger.addHandler(self.hdlr)
setUp(self)
dct['setUp'] = wrappedSetUp
# same for tearDown
if 'tearDown' in dct:
tearDown = dct['tearDown']
else:
tearDown = lambda self: None
def wrappedTearDown(self):
tearDown(self)
self.logger.removeHandler(self.hdlr)
dct['tearDown'] = wrappedTearDown
# return the class instance with the replaced setUp/tearDown
return type.__new__(cls, name, bases, dct)
class LoggedTestCase(unittest.TestCase):
__metaclass__ = LogMyTestCase
logger = logging.getLogger()
logger.setLevel(logging.INFO) # or whatever you prefer
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)-8s %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
| 2.625 | 3 |
PIDS_parse/Logic2plot.py | charlieromano/FIUBA_PIDS2020 | 0 | 12773513 | <filename>PIDS_parse/Logic2plot.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
Path='/home/charlieromano/Documents/Academico/CESE/Proyecto2020/TallerCastelar/Mediciones/traza/dataOut'
dt = pd.read_csv(Path+'/Wave_bin.txt', header=34, sep='\t', index_col=0)
dt.head()
dt.columns
dt.dtypes
# reemplazar ' ' por ' '
# crear df a partir de dt con los canales de datos
df = dt[['A0','A1']]
# reemplazar U por NaN-->None
df = df.replace('U', df.replace(['U'], [np.nan])) # or .replace('-', {0: None})
df['A1'].plot()
plt.show()
# TO DO:
# castear a binario si hace falta
# pasar el index a timeseries
# ajustar el timestep y condensar datos
# User Story
# quiero leer la frecuencia de la señal de datos
# quiero leer los datos en hexa
| 2.84375 | 3 |
local-lighttpd/uci.py | vaginessa/OpenWireless | 1 | 12773514 | <gh_stars>1-10
"""
Override implementation for routerapi/uci.py when running local-lighttpd.
Instead of calling out to /sbin/uci, which probably doesn't exist, store
name-value pairs in a JSON file in OVERRIDE_ETC.
"""
import json
import os
import common
uci_path = os.path.join(common.get_etc(), 'uci.json')
try:
with open(uci_path) as f:
data = json.loads(f.read())
except IOError:
data = {}
def get(name):
validate(name)
return data.get(name)
def set(name, value):
validate(name)
validate(value)
data[name] = value
def commit(namespace):
with open(uci_path, 'w') as f:
f.write(json.dumps(data))
def validate(string):
if len(string) > 200:
raise Exception('String input to UCI too long.')
if string.find('\00') != -1:
raise Exception('Invalid input: contains null bytes.')
| 2.46875 | 2 |
String/Easy/firstUniqueString.py | pavi-ninjaac/leetcode | 0 | 12773515 | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 10 22:57:42 2020
@author: ninjaac
"""
"""
Given a string, find the first non-repeating character in it and return its index. If it doesn't exist, return -1.
Examples:
s = "leetcode"
return 0.
s = "loveleetcode"
return 2.
"""
from collections import Counter
class Solution:
@staticmethod
def firstUniqChar(s):
if s=="":
return -1
if s==" ":
return -1
freq=Counter(s)
print(freq.items())
for let,c in freq.items():
if c==1:
return s.index(let)
return -1
print(Solution().firstUniqChar(s='leedcode')) | 3.84375 | 4 |
apps/mh2/mh2mhx.py | teddydragoone/makehuman1.0.0alpha7 | 2 | 12773516 | <gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
**Project Name:** MakeHuman
**Product Home Page:** http://www.makehuman.org/
**Code Home Page:** http://code.google.com/p/makehuman/
**Authors:** <NAME>
**Copyright(c):** MakeHuman Team 2001-2009
**Licensing:** GPL3 (see also http://sites.google.com/site/makehumandocs/licensing)
**Coding Standards:** See http://sites.google.com/site/makehumandocs/developers-guide
Abstract
--------
Redirect to new MHX location
"""
import sys
import os
mhxPath = os.path.realpath('./shared/mhx')
if mhxPath not in sys.path:
sys.path.append(mhxPath)
import mhx_main
def exportMhx(human, filename, options=None):
mhx_main.exportMhx(human, filename, options)
| 1.914063 | 2 |
src/main.py | jk96491/C-COMA | 10 | 12773517 | <reponame>jk96491/C-COMA
import numpy as np
import torch as th
from utils.logging import get_logger
import random
from run import run
import config_util as cu
'''
algorithm 설정 가이드(config/algs 경로의 파일이름 그대로)
만일 QMIX 를 실행하고 싶다면 -> 'QMIX'
만일 C-COMA 를 실행하고 싶다면 -> 'C-COMA'
'''
if __name__ == '__main__':
logger = get_logger()
algorithm = 'C-COMA'
minigame = 'Dynamic_env_Training'
config = cu.config_copy(cu.get_config(algorithm, minigame))
random_Seed = random.randrange(0, 16546)
np.random.seed(random_Seed)
th.manual_seed(random_Seed)
config['env_args']['seed'] = random_Seed
run(config, logger) | 2.03125 | 2 |
decrypt.py | cjntifo/pentest-tools | 0 | 12773518 | #!/usr/bin/python
#
# Gpprefdecrypt - Decrypt the password of local users added via Windows 2008 Group Policy Preferences.
#
# This tool decrypts the cpassword attribute value embedded in the Groups.xml file stored in the domain controller's Sysvol share.
#
# Updated by <NAME>
# Edited to run with Python 3.x
#
import sys, codecs
from Crypto.Cipher import AES
from base64 import b64decode
from Crypto import Random
def decrypt(cpassword):
# Init the key
# From MSDN: http://msdn.microsoft.com/en-us/library/2c15cbf0-f086-4c74-8b70-1f2fa45dd4be%28v=PROT.13%29#endNote2
#key = """
#4e 99 06 e8 <KEY> fa f4 93 10 62 0f fe e8
#f4 96 e8 06 cc 05 79 90 20 9b 09 a4 33 b6 6c 1b
#""".replace(" ","").replace("\n","").replace("\n","")
key = ("<KEY>"
"<KEY>")
decode_hex = codecs.getdecoder("hex_codec")
key = decode_hex(key)[0]
#print("decoded key " + str(key))
# Add padding to the base64 string and decode it
cpassword += "=" * ((4 - len(sys.argv[1]) % 4) % 4)
password = b64decode(cpassword)
# Decrypt the password
o = AES.new(key, AES.MODE_CBC, ("\x00" * 16).encode("utf8")).decrypt(password)
#iv = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
#o = AES.new(key, AES.MODE_CBC, iv).decrypt(password)
# Print it
print (o.decode('utf-16'))
return (o.decode('utf16'))
if(len(sys.argv) != 2):
print ("Usage: decrypt.py <cpassword>")
sys.exit(0)
cpassword = sys.argv[1]
try:
decrypt(cpassword)
except:
print("Input correct cpassword format...") | 2.6875 | 3 |
test/basic/conf.py | fpoirotte/sphinxcontrib-paradoxy | 1 | 12773519 | <gh_stars>1-10
from os import path
extensions = ['sphinxcontrib.paradoxy']
exclude_patterns = ['_build']
version = '1.2.3'
paradoxy = {
'example': (
'http://example.com/doc/',
path.join(path.dirname(path.abspath('.')), 'doxygen', 'Example.xml')
),
}
| 1.359375 | 1 |
app/movie/tests/test_tags_api.py | evanmcpheron/Django-starter | 0 | 12773520 | <gh_stars>0
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag
from movie.serializers import TagSerializer
TAGS_URL = reverse('movie:tag-list')
class PublicTagsApiTest(TestCase):
# test publically available tags API
def setUp(self):
self.client = APIClient()
def test_login_required(self):
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTest(TestCase):
# test authorized test case
def setUp(self):
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'123456'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
# test retrieving tags
Tag.objects.create(user=self.user, name='Comedy')
Tag.objects.create(user=self.user, name='Horror')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
# tags are returned for auth user
user2 = get_user_model().objects.create_user(
'<EMAIL>',
'123456'
)
Tag.objects.create(user=user2, name='Comedy')
tag = Tag.objects.create(user=self.user, name='Drama')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
| 2.5 | 2 |
civil/models.py | AeroYoung/PyCSE | 0 | 12773521 | <reponame>AeroYoung/PyCSE
from math import floor
from django.db.models import Sum
from django.db import models
from users.models import User
class Category(models.Model):
category_name = models.CharField(
primary_key=True,
unique=True,
max_length=200,
verbose_name='题型大类'
)
def __str__(self):
return self.category_name
def category_num(self):
total = self.topic_set.aggregate(nums=Sum('topic_num'))
return total['nums']
category_num.admin_order_field = 'category_name'
category_num.short_description = '题量'
class Meta:
verbose_name = '题型大类'
verbose_name_plural = '题型大类'
class Topic(models.Model):
category = models.ForeignKey(Category, on_delete=models.CASCADE)
topic_name = models.CharField(
primary_key=True,
unique=True,
max_length=200,
verbose_name='题型小类'
)
topic_num = models.PositiveIntegerField(
default=1,
verbose_name='题量',
)
single_score = models.FloatField(
default=1.0,
verbose_name='分值',
)
def __str__(self):
if self.topic_name == self.category.category_name:
return self.topic_name
else:
return self.category.category_name + '-' + self.topic_name
def total_score(self):
return round(self.single_score * self.topic_num, 1)
def rated_time_span(self):
return floor(self.total_score() * Practice.SCORE_PER_MIN)
rated_time_span.admin_order_field = 'practice_num'
rated_time_span.short_description = '额定耗时'
total_score.admin_order_field = 'single_score'
total_score.short_description = '总分'
def probability(self):
total = Topic.objects.aggregate(nums=Sum('topic_num'))
total_num = total['nums']
return self.topic_num / total_num
def probability_str(self):
return "%.2f%%" % (self.probability() * 100)
probability_str.admin_order_field = 'topic_num'
probability_str.short_description = '概率'
class Meta:
verbose_name = '题型小类'
verbose_name_plural = '题型小类'
class Practice(models.Model):
SCORE_PER_MIN = 120 / 150 # 分钟/分, 120分钟试卷总分150
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='做题人')
topic = models.ForeignKey(Topic, on_delete=models.CASCADE)
practice_date = models.DateTimeField('做题日期')
practice_num = models.PositiveIntegerField(
default=1,
verbose_name='题量',
)
time_span = models.PositiveIntegerField(
default=1,
verbose_name='耗时',
)
error_num = models.PositiveIntegerField(
default=0,
verbose_name='错误',
)
def correct_num(self):
if self.practice_num > self.error_num:
return self.practice_num - self.error_num
else:
return 0
correct_num.admin_order_field = 'practice_num'
correct_num.short_description = '正确'
def total_score(self):
return self.practice_num * self.topic.single_score
total_score.admin_order_field = 'practice_num'
total_score.short_description = '总分'
def rated_time_span(self):
return floor(self.total_score() * self.SCORE_PER_MIN)
rated_time_span.admin_order_field = 'practice_num'
rated_time_span.short_description = '额定耗时'
def rest_time(self):
if self.time_span < self.rated_time_span():
return self.rated_time_span() - self.time_span
else:
return 0
rest_time.admin_order_field = 'practice_num'
rest_time.short_description = '剩余时间'
def time_info(self):
return str(self.time_span) + '/' + str(self.rated_time_span())
time_info.admin_order_field = 'practice_num'
time_info.short_description = '耗时/额定耗时'
def score_without_time(self):
return self.correct_num() * self.topic.single_score
score_without_time.admin_order_field = 'practice_num'
score_without_time.short_description = '得分(忽略超时)'
def score(self):
if self.time_span <= self.rated_time_span():
return self.correct_num() * self.topic.single_score
else:
rate = self.rated_time_span() / self.time_span
return rate * self.correct_num() * self.topic.single_score
score.admin_order_field = 'practice_num'
score.short_description = '得分(额定耗时)'
def probability_without_time(self):
return self.score_without_time() / self.total_score()
def probability_without_time_str(self):
return "%.2f%%" % (self.probability_without_time() * 100)
probability_without_time_str.admin_order_field = 'practice_num'
probability_without_time_str.short_description = '得分率(忽略超时)'
def probability(self):
return self.score() / self.total_score()
def probability_str(self):
return "%.2f%%" % (self.probability() * 100)
probability_str.admin_order_field = 'practice_num'
probability_str.short_description = '得分率(额定耗时)'
def error_probability(self):
return self.error_num / self.practice_num
def error_probability_str(self):
return "%.2f%%" % (self.error_probability() * 100)
error_probability_str.admin_order_field = 'practice_num'
error_probability_str.short_description = '错误率'
def weight(self):
return round(self.topic.total_score() * (1 - self.probability()), 2)
weight.admin_order_field = 'practice_num'
weight.short_description = '影响权重'
def topic_str(self):
return self.topic.__str__() + '(' + str(self.topic.total_score()) + ')'
topic_str.admin_order_field = 'practice_num'
topic_str.short_description = '题型(分值)'
def __str__(self):
return self.topic.topic_name + '(' + self.probability_str() + ')'
class Meta:
verbose_name = '单项练习'
verbose_name_plural = '单项练习'
class PracticeStatistic(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='做题人')
topic = models.ForeignKey(Topic, on_delete=models.CASCADE)
def total_num(self):
total = self.user.practice_set.filter(topic=self.topic).aggregate(nums=Sum('practice_num'))
if total['nums'] is None:
return 0.0
else:
return total['nums']
total_num.admin_order_field = 'topic'
total_num.short_description = '题量'
def total_time(self):
total = self.user.practice_set.filter(topic=self.topic).aggregate(nums=Sum('time_span'))
if total['nums'] is None:
return 0.0
else:
return total['nums']
total_time.admin_order_field = 'topic'
total_time.short_description = '耗时'
def total_error_num(self):
total = self.user.practice_set.filter(topic=self.topic).aggregate(nums=Sum('error_num'))
if total['nums'] is None:
return 0.0
else:
return total['nums']
total_error_num.admin_order_field = 'topic'
total_error_num.short_description = '错误'
def total_correct_num(self):
if self.total_num() > self.total_error_num():
return self.total_num() - self.total_error_num()
else:
return 0
total_correct_num.admin_order_field = 'topic'
total_correct_num.short_description = '正确'
def total_score(self):
return self.total_num() * self.topic.single_score
total_score.admin_order_field = 'topic'
total_score.short_description = '总分'
def total_rated_time(self):
return floor(self.total_score() * Practice.SCORE_PER_MIN)
total_rated_time.admin_order_field = 'topic'
total_rated_time.short_description = '额定耗时'
def rest_time(self):
if self.total_time() < self.total_rated_time():
return self.total_rated_time() - self.total_time()
else:
return 0
rest_time.admin_order_field = 'topic'
rest_time.short_description = '剩余时间'
def time_info(self):
return str(self.total_time()) + '/' + str(self.total_rated_time())
time_info.admin_order_field = 'topic'
time_info.short_description = '耗时/额定耗时'
def score_without_time(self):
return self.total_correct_num() * self.topic.single_score
score_without_time.admin_order_field = 'topic'
score_without_time.short_description = '得分(忽略超时)'
def score(self):
if self.total_time() <= self.total_rated_time():
return self.total_correct_num() * self.topic.single_score
else:
rate = self.total_rated_time() / self.total_time()
return rate * self.total_correct_num() * self.topic.single_score
score.admin_order_field = 'topic'
score.short_description = '得分(额定耗时)'
def probability_without_time(self):
total_score = self.total_score()
if total_score != 0:
return self.score_without_time() / self.total_score()
else:
return 0.0
def probability_without_time_str(self):
return "%.2f%%" % (self.probability_without_time() * 100)
probability_without_time_str.admin_order_field = 'topic'
probability_without_time_str.short_description = '得分率(忽略超时)'
def probability(self):
total_score = self.total_score()
if total_score != 0:
return self.score() / self.total_score()
else:
return 0.0
def probability_str(self):
return "%.2f%%" % (self.probability() * 100)
probability_str.admin_order_field = 'topic'
probability_str.short_description = '得分率(额定耗时)'
def error_probability(self):
total_num = self.total_num()
if total_num != 0:
return self.total_error_num() / self.total_num()
else:
return 0.0
def error_probability_str(self):
return "%.2f%%" % (self.error_probability() * 100)
error_probability_str.admin_order_field = 'topic'
error_probability_str.short_description = '错误率'
def weight(self):
return round(self.topic.total_score() * (1 - self.probability()), 2)
weight.admin_order_field = 'topic'
weight.short_description = '影响权重'
def topic_str(self):
return self.topic.__str__() + '(' + str(self.topic.total_score()) + ')'
topic_str.admin_order_field = 'topic'
topic_str.short_description = '题型(分值)'
def __str__(self):
return self.topic.topic_name + '(' + self.probability_str() + ')'
class Meta:
verbose_name = '单项练习统计'
verbose_name_plural = '单项练习统计'
| 2.3125 | 2 |
scripts/mk_symbols_file.py | DavidLegg/StockSim | 1 | 12773522 | from glob import glob
import os, sys, re
USAGE = """Usage: mk_symbols_file [PATTERN]...
Make resources/symbols.txt, using the files matching given PATTERN(s).
Each PATTERN must contain exactly one "*" wildcard,
for the location in the path of the symbol.
Earlier patterns have precedence over later ones.
Examples:
python3 mk_symbols_file.py resources/*_daily_bars.csv
"""
CORRUPTED_FILENAME = 'resources/corrupted_files.txt'
OUTPUT_FILENAME = 'resources/symbols.txt'
TIME_COL_NAME = 'Unix Timestamp'
PRICE_COL_NAME = 'Close'
GARBAGE_THRESH = 5
def main():
try:
patterns = sys.argv[1:]
assert len(patterns) > 0, 'Must provide at least 1 pattern'
if patterns[0].lower() in {'-h', '--help'}:
print(USAGE)
return
assert all(p.count('*') == 1 for p in patterns), 'Every pattern must have exactly 1 "*" wildcard'
except Exception as e:
print(e)
print(USAGE)
return
print('Finding files...')
# Get a dictionary of pattern -> matching filenames
fns = {p:glob(p) for p in patterns}
print('Extracting symbol list...')
# Flatten and filter list of symbols, extracted from filenames using regex
symbols = {re.fullmatch(p.replace('*', '(.*)'), x).group(1) for p,xs in fns.items() for x in xs}
with open(CORRUPTED_FILENAME) as f:
symbols.difference_update({line.strip() for line in f})
total = len(symbols)
print('Found {} symbols. Processing for start & end times... Progress -{:5.1f}%'.format(total, 0.0), end='', flush=True)
with open(OUTPUT_FILENAME, "w") as f_out:
print('Symbol,Start Time,End Time', file=f_out)
for i,sym in enumerate(symbols):
for p in patterns:
fn = p.replace('*', sym)
start,end = findStartEnd(fn)
if start is not None and end is not None:
break
if start is None or end is None:
print('Error reading data files for {}'.format(sym))
else:
print('{},{},{}'.format(sym, start, end), file=f_out)
print("\b\b\b\b\b\b{:5.1f}%".format(100.0 * i / total), end='', flush=True)
print(' - Done.\nMade symbols file "{}" successfully.'.format(OUTPUT_FILENAME))
def findStartEnd(fn):
try:
timeCol = None
priceCol = None
start = None
end = None
price = None
with open(fn) as f:
while timeCol is None or priceCol is None:
parts = next(f).strip().split(',')
try:
timeCol = parts.index(TIME_COL_NAME)
except ValueError:
pass
try:
priceCol = parts.index(PRICE_COL_NAME)
except ValueError:
pass
while start is None:
try:
start = int( next(f).strip().split(',')[timeCol] )
if start > 10000000000:
start = start // 1000
except (ValueError, IndexError):
pass
for line in f:
parts = line.strip().split(',')
try:
end = int( parts[timeCol] )
if end > 10000000000:
end = end // 1000
except (ValueError, IndexError):
pass
try:
newPrice = int( parts[priceCol] )
if price is not None and (newPrice > GARBAGE_THRESH*price or newPrice*GARBAGE_THRESH < price):
# Data is bad, return none
return None,None
price = newPrice
except (ValueError, IndexError):
pass
return start,end
except KeyboardInterrupt:
exit(1)
except:
return None,None
if __name__ == '__main__':
main()
| 3.609375 | 4 |
students/k3342/practical_works/Zangieva Veronika/project/auto/auto/apps/owners/migrations/0003_delete_ownersmodel.py | TonikX/ITMO_ICT_-WebProgramming_2020 | 10 | 12773523 | # Generated by Django 3.0.4 on 2020-05-30 01:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('owners', '0002_ownersmodel'),
]
operations = [
migrations.DeleteModel(
name='OwnersModel',
),
]
| 1.53125 | 2 |
2020/Day-22/Crab_Combat/example.py | sreekesari-vangeepuram/aoc-2020 | 1 | 12773524 | #!/usr/bin/env python
from typing import List, Tuple
def play_space_cards(p1: List[int], p2: List[int]) -> Tuple[str, List[int]]:
b1, b2 = 0, 0 # buffer spaces for both players to space their cards
while len(p1) !=0 and len(p2)!= 0:
b1, b2 = p1.pop(0), p2.pop(0)
if b1 > b2:
p1.extend([b1, b2])
else:
p2.extend([b2, b1])
if len(p1) != 0:
return "Player_1", p1
return "Player_2", p2
def count_score(winner_deck: List[int]) -> int:
accumulator = 0
for card, multiplier in zip(winner_deck, list(reversed(range(1, len(winner_deck)+1)))):
accumulator += card * multiplier
return accumulator
decks = open("sample.txt").read().strip().split("\n\n")
player_1 = list(map(int, decks[0].split("\n")[1:]))
player_2 = list(map(int, decks[1].split("\n")[1:]))
winner, winner_deck = play_space_cards(player_1, player_2)
print(f"Combat: {winner} won with score {count_score(winner_deck)}!")
| 3.765625 | 4 |
contextaware.py | yogeshagrawal50/Movie-Recommendation-System | 2 | 12773525 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 16 18:04:26 2020
@author: hp
"""
import pandas as pd
import numpy as np
ratings= pd.read_csv('ratings.csv')
movies= pd.read_csv(r'movies.csv' )
ts = ratings['timestamp']
ts = pd.to_datetime(ts, unit = 's').dt.hour
movies['hours'] = ts
merged = ratings.merge(movies, left_on = 'movieId' , right_on = 'movieId', suffixes = ['_user',''])
merged = merged[['userId', 'movieId','genres','hours']]
merged = pd.concat([merged,merged['genres'].str.get_dummies(sep = '|')], axis = 1)
del merged['genres']
del merged['(no genres listed)']
def activateuserprofile(userId):
userprofile = merged.loc[merged['userId'] == userId]
del userprofile ['userId']
del userprofile['movieId']
userprofile = userprofile.groupby(['hours'], as_index = False, sort =True).sum()
userprofile.iloc[:,1:20] = userprofile.iloc[:,1:20].apply(lambda x:(x - np.min(x))/(np.max(x)-np.min(x)),axis = 1)
return(userprofile)
activeuser = activateuserprofile(30)
recommend = movies= pd.read_csv(r'recommend.csv' )
del merged['userId']
del merged['rating']
merged = merged.drop_duplicate()
user_pref = recommend.merge(merged, left_on = 'movieId' , right_on = 'movieId', suffixes = ['_user',''])
product = np.dot(user_pref.iloc[:,2:21].as_matrix(), activeuser.iloc[21,2:21].as_matrix())#IndexError: single positional indexer is out-of-bounds
preferences = np.stack((user_pref['movieId'], product), axis =-1)
df = pd.DataFrame(preferences, columns = ['movieId', 'prefrernces'])
result = (df.sort_values(['preferences'], ascending = False).iloc[0:10],0)
| 2.546875 | 3 |
CS3/0600_neural networks_handwriting_recognition/numpy_nnet/erikdelange/linear.py | nealholt/python_programming_curricula | 7 | 12773526 | # A neural network which approximates linear function y = 2x + 3.
# The network has 1 layer with 1 node, which has 1 input (and a bias).
# As there is no activation effectively this node is a linear function.
# After +/- 10.000 iterations W should be close to 2 and B should be close to 3.
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(formatter={"float": "{: 0.3f}".format}, linewidth=np.inf)
np.random.seed(1)
X = np.array([[0], [1], [2], [3], [4]]) # X = input (here: 5 values)
Y = 2 * X + 3 # Y = output: y = 2x + 3 (as many values as there are X's)
W = np.random.normal(scale=0.1, size=(1, 1)) # layer: (1, 1) = 1 node with 1 input
B = np.random.normal(scale=0.1, size=(1, 1)) # bias: (1, 1) = for 1 node (and by definition only 1 bias value per node)
learning_rate = 0.001
iterations = 10000
error = []
print("initial :", "W =", W, "B =", B, "(random initialization)")
m = X.shape[0]
for _ in range(iterations):
# forward pass
a = W.dot(X.T) + B
# back propagation
da = a - Y.T # da = error
dz = da # no activation
dw = dz.dot(X) / m
db = np.sum(dz, axis=1, keepdims=True) / m
W -= learning_rate * dw
B -= learning_rate * db
error.append(np.average(da ** 2))
print("result :", "W =", W, "B =", B, "(after {} iterations)".format(iterations))
print("expected: W = 2, B = 3")
plt.plot(range(iterations), error)
plt.title("MSE (mean squared error)")
plt.xlabel("training iterations")
plt.ylabel("mse")
plt.show()
| 3.921875 | 4 |
app/repositories/register.py | JulienBalestra/enjoliver | 33 | 12773527 | <reponame>JulienBalestra/enjoliver
from repositories.machine_discovery_repo import DiscoveryRepository
from repositories.machine_schedule_repo import ScheduleRepository
from repositories.machine_state_repo import MachineStateRepository
from repositories.user_interface_repo import UserInterfaceRepository
from smartdb import SmartDatabaseClient
class RepositoriesRegister:
def __init__(self, smart: SmartDatabaseClient):
self.discovery = DiscoveryRepository(smart)
self.machine_state = MachineStateRepository(smart)
self.user_interface = UserInterfaceRepository(smart)
self.machine_schedule = ScheduleRepository(smart)
| 2.140625 | 2 |
tools/check-license-header.py | Just-maple/go2sky | 1 | 12773528 | # Licensed to SkyAPM org under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. SkyAPM org licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
ignored_chars = '//\n \t'
ignored_paths = [
"reporter/grpc"
]
license_header = ' '.join(
[
line.strip(ignored_chars) for line in """
// Licensed to SkyAPM org under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. SkyAPM org licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
""".splitlines()
]
).strip(ignored_chars)
def walk_through_dir(d) -> bool:
checked = True
for root, sub_dirs, files in os.walk(d):
ignored = False
for ignored_path in ignored_paths:
if root.__contains__(ignored_path):
ignored = True
break
if ignored:
continue
for filename in files:
if not filename.endswith(".go"):
continue
file_path = os.path.join(root, filename)
with open(file_path, 'r') as f:
header = ' '.join([line.strip(ignored_chars) for line in f.readlines() if line.startswith('//')]).strip()
print('%s license header in file: %s' % ('✅' if header.startswith(license_header) else '❌', file_path))
checked &= header.startswith(license_header)
return checked
if __name__ == "__main__":
if not walk_through_dir("./"):
sys.exit(1)
| 1.84375 | 2 |
vc_scripts/parallel_build.py | TimSVector/azure | 0 | 12773529 | #
# The MIT License
#
# Copyright 2020 Vector Informatik, GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#parallel_build.py
from __future__ import unicode_literals
from __future__ import print_function
import sys, os, subprocess, argparse, glob, shutil
from pprint import pprint
import pdb, time
from datetime import timedelta
from io import open
from vector.apps.DataAPI.vcproject_api import VCProjectApi
from threading import Thread, Lock, Semaphore
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # python 3.x
VCD = os.environ['VECTORCAST_DIR']
class ParallelBuild(object):
def __init__(self):
self.manageProject = None
self.lock = Lock()
parser = argparse.ArgumentParser()
parser.add_argument('--project', '-p', help='Manager Project Name')
parser.add_argument('--dryrun', help='Dry Run without build/execute', action="store_true")
parser.add_argument('--jobs', '-j', help='Number of concurrent jobs', default="1")
parser.add_argument('--verbose', help='Dry Run without build/execute', action="store_true")
parser.add_argument('--ci', help='Use CI Licenses', action="store_true", default = False)
args = parser.parse_args()
if args.ci:
self.useCI = " --ci "
else:
self.useCI = ""
try:
self.manageProject = os.environ['VCV_ENVIRONMENT_FILE']
except:
self.manageProject = args.project
self.dryrun = args.dryrun
if self.manageProject is None:
print ("\n** Use either --project [Manage Project Name] or enviroment variable VCV_ENVIRONMENT_FILE to specify the manage project name")
sys.exit()
if not os.path.isfile(self.manageProject) and not os.path.isfile(self.manageProject + ".vcm"):
raise IOError(self.manageProject + ' does not exist')
return
if args.verbose:
self.verbose = True
else:
self.verbose = False
self.mpName = self.manageProject.replace(".vcm","")
self.reportName = os.path.basename(self.manageProject).replace(".vcm","")
self.buildSemaphore = Semaphore(int(args.jobs))
print ("Disabling range check globally")
try:
self.api = VCProjectApi(self.manageProject)
except:
statusCmd = VCD + "/manage --project " + self.manageProject + self.useCI + " --status"
self.runManageCmd(statusCmd)
self.api = VCProjectApi(self.manageProject)
self.oldRangeCheck = self.api.project.options["enums"]["RANGE_CHECK"][0]
self.api.close()
buildCmd = VCD + "/manage --project " + self.manageProject + self.useCI + " --config=RANGE_CHECK=NONE"
self.runManageCmd(buildCmd)
self.api = VCProjectApi(self.manageProject)
def __enter__(self):
return self
def __exit__(self, exct_type, exce_value, traceback):
self.api.close()
print ("Clearing disable of range check globally")
buildCmd = VCD + "/manage --project " + self.manageProject + self.useCI +" --config=RANGE_CHECK="+self.oldRangeCheck
self.runManageCmd(buildCmd)
build_log_data = ""
for file in glob.glob("build*.log"):
build_log_data += " ".join(open(file,"r").readlines())
os.remove(file)
try:
open("complete_build.log","w", encoding="utf-8").write(unicode(build_log_data))
except:
open("complete_build.log","w").write(build_log_data)
print(build_log_data)
def th_Print (self, str):
self.lock.acquire()
print (str)
self.lock.release()
def runManageCmd(self, cmd, env = None):
if self.verbose:
self.th_Print (cmd)
if self.dryrun:
return
if env:
logName = "build_" + self.reportName + "_" + env.compiler.name + "_" + env.testsuite.name + "_" + env.name + ".log"
build_log = open(logName,"w")
process = subprocess.Popen(cmd, shell=True, stdout=build_log, stderr=build_log)
process.wait()
build_log.close()
else:
process = subprocess.Popen(cmd, shell=True)
process.wait()
def build_env(self,env):
if not self.verbose:
self.th_Print ("Building: " + env.compiler.name + "/" + env.testsuite.name + "/" + env.name)
buildCmd = VCD + "/manage --project " + self.manageProject + self.useCI + " --build --level " + env.compiler.name + "/" + env.testsuite.name + " --environment " + env.name
self.runManageCmd(buildCmd,env)
self.buildSemaphore.release()
def doit(self):
buildingList = []
for env in self.api.Environment.all():
if env.system_tests:
print("Building System Test: " + env.compiler.name + "/" + env.testsuite.name + "/" + env.name)
buildCmd = VCD + "/manage --project " + self.manageProject + self.useCI + " --build --level " + env.compiler.name + "/" + env.testsuite.name + " --environment " + env.name
self.runManageCmd(buildCmd,env)
continue
self.buildSemaphore.acquire()
t = Thread(target=self.build_env,args=[env])
t.daemon = True # thread dies with the program
t.start()
buildingList.append(t)
checkThreads = True
while checkThreads:
checkThreads = False
for t in buildingList:
if t.is_alive():
time.sleep(1)
checkThreads = True
break
if __name__ == '__main__':
with ParallelBuild() as parallel_build:
parallel_build.doit()
| 1.765625 | 2 |
gameresources/config.py | RoW171/GameResources | 0 | 12773530 | __author__ = "Robin 'r0w' Weiland"
__date__ = "2019-05-07"
__version__ = "0.0.0"
from configparser import ConfigParser
from io import StringIO, TextIOWrapper
from warnings import warn
class Section:
_config = None
_name = str()
def __getattribute__(self, item): return super(Section, self).__getattribute__(item)
__getitem__ = __getattribute__
def __setattr__(self, key, value):
try:
if key not in ('_config', '_name'): self._config.saveEntry(self._name, key, value)
super(Section, self).__setattr__(key, value)
except (AttributeError,):
raise NotImplemented('adding options will be possible in the future') # TODO: add option into section here
__setitem__ = __setattr__
def __contains__(self, item): return item in self.__dict__
class Config(ConfigParser):
def __init__(self, path, **kwargs):
self._path = path
super(Config, self).__init__(**kwargs)
self(self._path)
self.load()
def __call__(self, nFile=None):
if isinstance(nFile, (StringIO, TextIOWrapper,)):
self.read_file(nFile)
self._path = nFile.name
if isinstance(nFile, str):
self.read(nFile)
self._path = nFile
return self
def __repr__(self): return str(self._path)
__str__ = __repr__
def __getattribute__(self, item): return super(Config, self).__getattribute__(item)
__getitem__ = __getattribute__
def __setattr__(self, key, value):
try: super(Config, self).__setattr__(key, value)
except (AttributeError,):
raise NotImplemented('adding sections will be possible in the future') # TODO: add sections here
__setitem__ = __setattr__
def __contains__(self, item): return item in self.__dict__
def load(self):
for section in self.sections():
s = Section()
s._config = self
s._name = section
for option in self.options(section):
s.__dict__[option] = self.loadEntry(section, option)
self.__dict__[section] = s
def loadEntry(self, section, option, fallback=None, datatype=str, subdatatype=str, chunksize=None):
try:
loadedItem = self.get(section, option, fallback=fallback)
if datatype == list or datatype == tuple:
loadedItem = loadedItem.replace('(', '').replace(')', '')
if loadedItem == '': return datatype([])
loadedItem = datatype(map(subdatatype, loadedItem.split(', ')))
if chunksize is not None: loadedItem = datatype((zip(*[iter(loadedItem)] * chunksize)))
elif datatype == bool: loadedItem = bool(int(loadedItem))
else: loadedItem = datatype(loadedItem)
return loadedItem
except (Exception,) as e:
warn(f'failed loading {section}:{option}; returned fallback; {e.__name__}')
return fallback
def save(self):
with open(self._path, 'w') as configfile: self.write(configfile)
def saveEntry(self, section, option, value, saveToFile=True):
if isinstance(value, (list, tuple,)): value = ', '.join(value)
elif type(value) == bool: value = str(int(value))
self.set(section, option, str(value))
if saveToFile: self.save()
if __name__ == '__main__':
from pathlib import Path
c = Config(Path(r'C:\Users\robin\Documents\Private\Python\GameResources\testing\config.ini').open('r'))
print(c.gameplay.player_speed)
# c.gameplay['player-speed'] = 4
| 2.265625 | 2 |
examples/input.py | cloudagon/prey | 0 | 12773531 | #!/usr/bin/env prey
async def main():
word = input("Give me a word: ")
await x(f"echo {word}")
| 2.609375 | 3 |
mods/Commands.py | waffle620/fagyhal | 494 | 12773532 | import asyncio
import discord
import sys
from discord.ext import commands
from utils import checks
from mods.cog import Cog
class Commands(Cog):
def __init__(self, bot):
super().__init__(bot)
self.cursor = bot.mysql.cursor
self.escape = bot.escape
@commands.group(pass_context=True, aliases=['setprefix', 'changeprefix'], invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def prefix(self, ctx, *, txt:str=None):
"""Change the Bots Prefix for the Server"""
if txt is None:
sql = "SELECT prefix FROM `prefix` WHERE server={0}"
sql = sql.format(ctx.message.server.id)
sql_channel = "SELECT prefix FROM `prefix_channel` WHERE server={0} AND channel={1}"
sql_channel = sql_channel.format(ctx.message.server.id, ctx.message.channel.id)
result = self.cursor.execute(sql).fetchall()
result2 = self.cursor.execute(sql_channel).fetchall()
if len(result) == 0:
server_prefix = '.'
else:
server_prefix = result[0]['prefix']
if len(result2) == 0:
channel_prefix = None
else:
channel_prefix = result2[0]['prefix']
msg = "Server Prefix: `{0}`\n".format(server_prefix)
if channel_prefix != None:
msg += "**Current** Channel Prefix: `{0}`".format(channel_prefix)
await self.bot.say(msg)
return
sql = "INSERT INTO `prefix` (`server`, `prefix`, `id`) VALUES (%s, %s, %s)"
update_sql = "UPDATE `prefix` SET prefix={0} WHERE server={1}"
update_sql = update_sql.format(self.escape(txt), ctx.message.server.id)
check = "SELECT server FROM `prefix` WHERE server={0}"
check = check.format(ctx.message.server.id)
result = self.cursor.execute(check).fetchall()
if len(result) == 0:
self.cursor.execute(sql, (ctx.message.server.id, txt, ctx.message.author.id))
self.cursor.commit()
await self.bot.say(":white_check_mark: Set bot prefix to \"{0}\" for the server\n".format(txt))
else:
self.cursor.execute(update_sql)
self.cursor.commit()
await self.bot.say(":white_check_mark: Updated bot prefix to \"{0}\" for the server".format(txt))
@prefix.command(pass_context=True, name='channel', no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def _prefix_channel(self, ctx, *, txt:str):
"""Change the Bots Prefix for the current Channel"""
channel = ctx.message.channel
for c in ctx.message.channel_mentions:
channel = c
txt = txt.replace(channel.mention, '').replace('#'+channel.name, '')
sql = "INSERT INTO `prefix_channel` (`server`, `prefix`, `channel`, `id`) VALUES (%s, %s, %s, %s)"
update_sql = "UPDATE `prefix_channel` SET prefix={0} WHERE server={1} AND channel={2}"
update_sql = update_sql.format(self.escape(txt), ctx.message.server.id, channel.id)
check = "SELECT * FROM `prefix_channel` WHERE server={0} AND channel={1}"
check = check.format(ctx.message.server.id, channel.id)
result = self.cursor.execute(check).fetchall()
if len(result) == 0:
self.cursor.execute(sql, (ctx.message.server.id, txt, channel.id, ctx.message.author.id))
self.cursor.commit()
await self.bot.say(":white_check_mark: Set bot prefix to \"{0}\" for {1}".format(txt, channel.mention))
else:
self.cursor.execute(update_sql)
self.cursor.commit()
await self.bot.say(":white_check_mark: Updated bot prefix to \"{0}\" for {1}".format(txt, channel.mention))
@prefix.command(pass_context=True, name='reset', no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def _prefix_reset(self, ctx, what:str=None, channel:discord.Channel=None):
"""Reset All Custom Set Prefixes For the Bot"""
if what is None or what == "server":
sql = "DELETE FROM `prefix` WHERE server={0}"
sql = sql.format(ctx.message.server.id)
check = "SELECT * FROM `prefix` WHERE server={0}"
check = check.format(ctx.message.server.id)
result = self.cursor.execute(check).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: Current server does **not** have a custom prefix set!")
return
else:
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":exclamation: **Reset server prefix**\nThis does not reset channel prefixes, run \"all\" after reset to reset all prefixes *or* \"channels\" to reset all custom channel prefixes.")
elif what == "channel":
if channel is None:
channel = ctx.message.channel
sql = "DELETE FROM `prefix_channel` WHERE server={0} AND channel={1}"
sql = sql.format(ctx.message.server.id, channel.id)
check = "SELECT * FROM `prefix_channel` WHERE server={0} AND channel={1}"
check = check.format(ctx.message.server.id, channel.id)
result = self.cursor.execute(check).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: {0} does **not** have a custom prefix Set!\nMention the channel after \"reset channel\" for a specific channel.".format(channel.mention))
return
else:
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":exclamation: Reset {0}'s prefix!\nThis does **not** reset all custom channel prefixes, \"reset channels\" to do so.".format(channel.mention))
return
elif what == "channels":
sql = "DELETE FROM `prefix_channel` WHERE server={0}"
sql = sql.format(ctx.message.server.id)
check = "SELECT * FROM `prefix_channel` WHERE server={0}"
check = check.format(ctx.message.server.id)
result = self.cursor.execute(check).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: Server does **not** reset a custom prefix set for any channel!\nMention the channel after \"reset channel\" for a specific channel.")
return
else:
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":exclamation: Reset all channels custom prefixes!")
return
elif what == "all" or what == "everything":
sql = "DELETE FROM `prefix_channel` WHERE server={0}"
sql = sql.format(ctx.message.server.id)
sql2 = "DELETE FROM `prefix` WHERE server={0}"
sql2 = sql2.format(ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.execute(sql2)
self.cursor.commit()
await self.bot.say(":warning: Reset all custom server prefix settings!")
return
else:
await self.bot.say(":no_entry: Invalid Option\nOptions: `server, channel, channels, all/everything`")
good_commands = ['command', 'blacklist', 'help', 'invite']
async def command_toggle(self, t:str, ctx, cmd:str, user=None, msg=True):
try:
if cmd in self.good_commands:
await self.bot.send_message(ctx.message.channel, ':no_entry: You cannot disable command: `{0}`!'.format(self.good_commands[self.good_commands.index(cmd)]))
return
if t == 'server':
sql = "SELECT * FROM `command_blacklist` WHERE type='server' AND server={0} AND command={1}"
sql = sql.format(ctx.message.server.id, self.escape(cmd))
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
sql = 'INSERT INTO `command_blacklist` (`command`, `type`, `server`) VALUES (%s, %s, %s)'
self.cursor.execute(sql, (cmd, "server", ctx.message.server.id))
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':negative_squared_cross_mark: Disabled command `{0}`.'.format(cmd))
else:
sql = "DELETE FROM `command_blacklist` WHERE type='server' AND server={0} AND command={1}"
sql = sql.format(ctx.message.server.id, self.escape(cmd))
self.cursor.execute(sql)
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':white_check_mark: Enabled command `{0}`.'.format(cmd))
elif t == 'channel':
channel = user
sql = "SELECT * FROM `command_blacklist` WHERE type='channel' AND server={0} AND channel={1} AND command={2}"
sql = sql.format(ctx.message.server.id, channel.id, self.escape(cmd))
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
sql = 'INSERT INTO `command_blacklist` (`command`, `type`, `server`, `channel`) VALUES (%s, %s, %s, %s)'
self.cursor.execute(sql, (cmd, "channel", ctx.message.server.id, channel.id))
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':negative_squared_cross_mark: Disabled command `{0}` for channel {1}.'.format(cmd, channel.mention))
else:
sql = "DELETE FROM `command_blacklist` WHERE type='channel' AND server={0} AND channel={1} AND command={2}"
sql = sql.format(ctx.message.server.id, channel.id, self.escape(cmd))
self.cursor.execute(sql)
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':white_check_mark: Enabled command `{0}` for channel {1}.'.format(cmd, channel.mention))
elif t == 'user':
sql = "SELECT * FROM `command_blacklist` WHERE type='user' AND server={0} AND user={1} AND command={2}"
sql = sql.format(ctx.message.server.id, user.id, self.escape(cmd))
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
sql = 'INSERT INTO `command_blacklist` (`command`, `type`, `server`, `user`) VALUES (%s, %s, %s, %s)'
self.cursor.execute(sql, (cmd, "user", ctx.message.server.id, user.id))
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':negative_squared_cross_mark: Disabled command `{0}` for user `{1}`.'.format(cmd, user))
else:
sql = "DELETE FROM `command_blacklist` WHERE type='user' AND server={0} AND user={1} AND command={2}"
sql = sql.format(ctx.message.server.id, user.id, self.escape(cmd))
self.cursor.execute(sql)
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':white_check_mark: Enabled command `{0}` for user `{1}`.'.format(cmd, user))
elif t == 'role':
role = user
sql = "SELECT * FROM `command_blacklist` WHERE type='role' AND server={0} AND role={1} AND command={2}"
sql = sql.format(ctx.message.server.id, role.id, self.escape(cmd))
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
sql = 'INSERT INTO `command_blacklist` (`command`, `type`, `server`, `role`) VALUES (%s, %s, %s, %s)'
self.cursor.execute(sql, (cmd, "role", ctx.message.server.id, role.id))
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':negative_squared_cross_mark: Disabled command `{0}` for role {1}.'.format(cmd, role.mention))
else:
sql = "DELETE FROM `command_blacklist` WHERE type='role' AND server={0} AND role={1} AND command={2}"
sql = sql.format(ctx.message.server.id, role.id, self.escape(cmd))
self.cursor.execute(sql)
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':white_check_mark: Enabled command `{0}` for role {1}.'.format(cmd, role.mention))
elif t == 'global':
sql = "SELECT * FROM `command_blacklist` WHERE type='global' AND command={0}"
sql = sql.format(self.escape(cmd))
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
sql = 'INSERT INTO `command_blacklist` (`command`, `type`) VALUES (%s, %s)'
self.cursor.execute(sql, (cmd, "global"))
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':globe_with_meridians: Disabled command `{0}` globally.'.format(cmd))
else:
sql = "DELETE FROM `command_blacklist` WHERE type='global' AND command={0}"
sql = sql.format(self.escape(cmd))
self.cursor.execute(sql)
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':white_check_mark: Enabled command `{0}` globally.'.format(cmd))
else:
return
except Exception as e:
await self.bot.send_message(ctx.message.channel, str(e))
async def module_command_toggle(self, module, t:str, ctx):
try:
count = 0
disabled = []
for command in self.bot.commands:
if self.bot.commands[command].module == module and command not in disabled:
count += 1
cmd = str(self.bot.commands[command].name)
await self.command_toggle(t, ctx, cmd, msg=False)
await asyncio.sleep(0.21)
disabled.append(command)
return count
except Exception as e:
await self.bot.send_message(ctx.message.channel, str(e))
async def get_modules(self):
modules = []
for module in sys.modules:
if module.startswith('mods.'):
if module == 'mods.Repl' or module == 'mods.Stats' or module == 'mods.Commands':
continue
mod = module.replace('mods.', '')
modules.append(mod)
return modules
@commands.group(pass_context=True, invoke_without_command=True, aliases=['commands', 'cmd'], no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def command(self, ctx, cmd:str):
"""Toggle a command for the server"""
if cmd in self.bot.commands:
cmd = str(self.bot.commands[cmd])
await self.command_toggle('server', ctx, cmd)
else:
await self.bot.say(':no_entry: `Command does not exist.`')
@command.command(name='toggle', aliases=['enable', 'disable'], pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def cmd_toggle(self, ctx, cmd:str):
"""Server wide Command Toggle"""
if cmd in self.bot.commands:
cmd = str(self.bot.commands[cmd])
await self.command_toggle('server', ctx, cmd)
else:
await self.bot.say(':no_entry: `Command does not exist.`')
@command.command(name='user', pass_context=True, aliases=['member'], invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def command_toggle_user(self, ctx, cmd:str, user:discord.User=None):
"""Toggle Command for a user"""
if user is None:
user = ctx.message.author
if cmd in self.bot.commands:
cmd = str(self.bot.commands[cmd])
await self.command_toggle('user', ctx, cmd, user)
else:
await self.bot.say(':no_entry: `Command does not exist.`')
@command.command(name='role', pass_context=True, aliases=['rank'], invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def command_toggle_role(self, ctx, cmd:str, role:discord.Role):
"""Toggle Command for a role"""
if cmd in self.bot.commands:
cmd = str(self.bot.commands[cmd])
await self.command_toggle('role', ctx, cmd, role)
else:
await self.bot.say(':no_entry: `Command does not exist.`')
@command.command(name='channel', pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def command_toggle_channel(self, ctx, cmd:str, chan:discord.Channel=None):
"""Toggle Command for a channel"""
if chan is None:
chan = ctx.message.channel
if cmd in self.bot.commands:
cmd = str(self.bot.commands[cmd])
await self.command_toggle('channel', ctx, cmd, chan)
else:
await self.bot.say(':no_entry: `Command does not exist.`')
@command.command(name='global', pass_context=True, invoke_without_command=True)
@checks.is_owner()
async def command_toggle_global(self, ctx, cmd:str):
"""Toggle command globally"""
if cmd in self.bot.commands:
cmd = str(self.bot.commands[cmd])
await self.command_toggle('global', ctx, cmd)
else:
await self.bot.say(':no_entry: `Command does not exist.`')
@command.group(name='module', pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def command_toggle_module(self, ctx, module:str, chan:discord.Channel=None):
"""Toggle a bot command module"""
try:
mod = sys.modules['mods.{0}'.format(module)]
except KeyError:
modules = await self.get_modules()
await self.bot.say(':no_entry: Invalid Module\n**Modules**\n`{0}`'.format(', '.join(modules)))
return
if chan:
count = await self.module_command_toggle(mod, 'channel', ctx)
else:
count = await self.module_command_toggle(mod, 'server', ctx)
await self.bot.say(':white_check_mark: Disabled **{0}** commands in module `{1}`.'.format(count, module))
@command_toggle_module.command(name='list', pass_context=True, invoke_without_command=True)
async def command_toggle_module_list(self, ctx):
modules = await self.get_modules()
await self.bot.say(':information_source: **Modules**\n`{0}`'.format(', '.join(modules)))
@command.command(name='all', pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def command_toggle_all(self, ctx):
sql = 'SELECT COUNT(*) FROM `command_blacklist` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
count = str(self.cursor.execute(sql).fetchall()[0]['COUNT(*)'])
sql = 'DELETE FROM `command_blacklist` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(':white_check_mark: Enabled **{0}** server command(s).'.format(count))
@command.command(name='list', pass_context=True, invoke_without_command=True, no_pm=True)
async def command_list(self, ctx):
sql = 'SELECT * FROM `command_blacklist` WHERE server={0} OR type="global"'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(':no_entry: Server does **not** have any commands blacklisted.')
return
msg = ''
for s in result:
if s['type'] == 'global':
msg += ':globe_with_meridians: Globaly Command Disabled: `{0}`\n'.format(s['command'])
elif s['type'] == 'server':
msg += ':desktop: Command Disabled on Server: `{0}`\n'.format(s['command'])
elif s['type'] == 'channel':
msg += ':arrow_right: Command Disabled in <#{0}>: `{1}`\n'.format(s['channel'] ,s['command'])
elif s['type'] == 'role':
msg += ':eight_spoked_asterisk: Command Disabled for <@&{0}>: `{1}`\n'.format(s['role'], s['command'])
elif s['type'] == 'user':
user = discord.utils.get(self.bot.get_all_members(), id=str(s['user']))
if user is None:
user = '<@{0}> (Not Found)'.format(s['user'])
msg += ':bust_in_silhouette: Command Disabled for **{0}**: `{1}`\n'.format(user, s['command'])
await self.bot.say(':white_check_mark: **Commands Disabled**\n'+msg)
def setup(bot):
bot.add_cog(Commands(bot)) | 2.484375 | 2 |
year2017/day03/test_solver.py | Sebaestschjin/advent-of-code | 0 | 12773533 | <reponame>Sebaestschjin/advent-of-code
import pytest
from assertpy import assert_that
import year2017.day03.reader as reader
import year2017.day03.solver as solver
@pytest.mark.parametrize('puzzle, expected',
[(1, 0),
(12, 3),
(23, 2),
(1024, 31),
])
def test_examples_a(puzzle, expected):
result = solver.solve_a(puzzle)
assert_that(result).is_equal_to(expected)
@pytest.mark.solution
def test_solution_a():
result = solver.solve_a(reader.read())
assert_that(result).is_equal_to(430)
@pytest.mark.parametrize('puzzle, expected',
[(1, 2),
(2, 4),
(3, 4),
(4, 5),
(5, 10),
])
def test_examples_b(puzzle, expected):
result = solver.solve_b(puzzle)
assert_that(result).is_equal_to(expected)
@pytest.mark.solution
def test_solution_b():
result = solver.solve_b(reader.read())
assert_that(result).is_equal_to(312453)
| 2.484375 | 2 |
codemon/CodemonFetch.py | Enigmage/codemon | 0 | 12773534 | import requests
import re
import os
import itertools
from bs4 import BeautifulSoup as beSo
from clint.textui import colored
def check_structure(name, basedir):
# Check if the question folder exists for the name passed.
status = True
if not os.path.exists(os.path.join(basedir, f'{name}')) or not \
os.path.exists(os.path.join(basedir, f'{name}',f'{name}.in')) or not \
os.path.exists(os.path.join(basedir, f'{name}', f'{name}.op')):
status = False
return status
def fetch_tests(file_list, contestName):
try:
basedir = os.path.join(os.getcwd(), contestName) if not os.path.basename(os.getcwd()) == contestName else os.getcwd()
contest_number = ''.join(re.findall(r'\d+', contestName))
if not len(contest_number):
print(colored.red("Invalid contest number."))
return
load_page = requests.get(f"https://codeforces.com/contest/{contest_number}/problems")
soup = beSo(load_page.content, 'html.parser')
tests = soup.findAll("div", attrs={"class":"sample-tests"})
if(len(tests) == 0):
print(colored.red("Invalid contest number."))
else:
print("Fetching sample test cases...")
for file_name, test in zip(file_list, tests):
# Check if proper directory structure exists, if not generate error.
correct_dir_structure = True
if(check_structure(file_name, basedir)):
# Add inputs to .in files
for t in test.findAll("div", attrs={"class":"input"}):
inp = t.pre.contents
with open(os.path.join(basedir, f'{file_name}' , f'{file_name}.in'), 'a') as f:
for i in range(len(inp)):
if str(inp[i]) in ('<br>', '<br/>'):
# Make sure separate testcases are separated by a newline
f.write('\n\n') if i == len(inp)-1 else f.write('\n')
continue
f.write(inp[i])
# Add outputs to .op files
for t in test.findAll("div", attrs={"class":"output"}):
outp = t.pre.contents
with open(os.path.join(basedir, f'{file_name}' , f'{file_name}.op'), 'a') as f:
for o in range(len(outp)):
if str(outp[o]) in ('<br>', '<br/>'):
# Make sure separate testcases are separated by a newline
f.write('\n\n') if o == len(outp)-1 else f.write('\n')
continue
f.write(outp[o])
else:
correct_dir_structure = False
break
print("Sample test cases added." if correct_dir_structure else
colored.red(f"Failed to add sample test cases: Incorrect directory structure !!"))
# In case of any error with scraping, display warning.
except:
print(colored.red("There was some error fetching the tests !!"))
| 3.015625 | 3 |
pyactiviti/pyactiviti.py | matrixise/pyactiviti | 3 | 12773535 | # -*- coding: utf-8 -*-
import requests
import json
from requests.status_codes import codes
from .exceptions import (
UserAlreadyExists,
NotFound,
UserNotFound,
GroupNotFound,
GroupMissingID,
UserMissingID,
GroupUpdatedSimultaneous,
UserAlreadyMember,
UserUpdatedSimultaneous,
DeploymentNotFound
)
USERS_FIELDS = [
'id', 'firstName', 'lastNAme', 'email', 'firstNameLike',
'lastNameLike', 'emailLike', 'memberOfGroup', 'potentialStarter',
'sort'
]
GROUPS_FIELDS = [
'id', 'name', 'type', 'nameLike', 'member', 'potentialStarter', 'sort'
]
DEPLOYMENTS_FIELDS = [
'name', 'nameLike', 'category', 'categoryNotEquals', 'tenantId',
'tenantIdLike', 'withoutTenantId', 'sort'
]
def check_parameters(fields, args):
arguments = {}
for item in fields:
value = args.pop(item, None)
if value:
arguments[item] = value
return arguments
class Activiti(object):
def __init__(self, endpoint, auth=('kermit', 'kermit')):
self.endpoint = endpoint
self.auth = auth
self.session = requests.Session()
self.session.auth = self.auth
self.session.headers.update({'content-type': 'application/json'})
user_url = lambda self, id: self.users_url(id)
group_url = lambda self, id: self.groups_url(id)
def user_exists(self, login):
response = self._get(self.user_url(login))
return response.status_code == codes.ok
def get_user(self, login):
response = self._get(self.user_url(login))
if response.status_code == codes.ok:
return response.json()
raise UserNotFound()
def users_url(self, *args):
return self._to_endpoint('identity', 'users', *args)
def users(self, **parameters):
params = check_parameters(USERS_FIELDS, parameters)
response = self._get(self.users_url(), params=params)
if response.status_code == codes.ok:
return response.json()
raise NotImplementedError()
def get_users_member_of(self, group):
return self.users(memberOfGroups=group)
def create_user(self, login, email, password, firstname=None, lastname=None):
user = {
'id': login,
'email': email,
'password': password,
'firstName': firstname or '',
'lastName': lastname or ''
}
response = self._post(self.users_url(), user)
if response.status_code == codes.created:
return response.json()
elif response.status_code == codes.conflict:
raise UserAlreadyExists(response.json()['exception'])
elif response.status_code == codes.bad_request:
raise UserMissingID()
return response.status_code == codes.created
def user_update(self, user_id, values=None):
response = self._put(self.user_url(user_id), values=values)
if response.status_code == codes.ok:
return response.json()
elif response.status_code == codes.not_found:
raise UserNotFound()
elif response.status_code == codes.conflict:
raise UserUpdatedSimultaneous()
def delete_user(self, login):
response = self._delete(self.user_url(login))
if response.status_code == codes.no_content:
return True
elif response.status_code == codes.not_found:
raise UserNotFound()
def groups_url(self, *args):
return self._to_endpoint('identity', 'groups', *args)
def get_group(self, group_id):
response = self._get(self.group_url(group_id))
if response.status_code == codes.ok:
return True
elif response.status_code == codes.not_found:
return False
raise NotImplementedError()
def groups(self, **parameters):
params = check_parameters(GROUPS_FIELDS, parameters)
response = self._get(self.groups_url(), params=params)
if response.status_code == codes.ok:
return response.json()
raise NotImplementedError()
def group_update(self, group_id, values=None):
response = self._put(self.group_url(group_id), values=values)
if response.status_code == codes.ok:
return response.json()
elif response.status_code == codes.not_found:
raise GroupNotFound()
elif response.status_code == codes.conflict:
raise GroupUpdatedSimultaneous()
def create_group(self, id, name, type):
values = dict(id=id, name=name, type=type)
response = self._post(self.groups_url(), values)
if response.status_code == codes.created:
return response.json()
elif response.status_code == codes.bad_request:
raise GroupMissingID()
def delete_group(self, group_id):
response = self._delete(self.group_url(group_id))
if response.status_code == codes.no_content:
return True
elif response.status_code == codes.not_found:
raise GroupNotFound()
def group_add_member(self, group_id, user_id):
values = {
'userId': user_id,
}
response = self._post(
self._to_endpoint('identity', 'groups', group_id, 'members'),
values=values
)
if response.status_code == codes.created:
return response.json()
elif response.status_code == codes.not_found:
raise GroupNotFound()
elif response.status_code == codes.conflict:
raise UserAlreadyMember()
def group_remove_member(self, group_id, user_id):
response = self._delete(
self._to_endpoint('identity', 'groups', group_id, 'members', user_id)
)
if response.status_code == codes.no_content:
return True
elif response.status_code == codes.not_found:
raise NotFound()
def process_definitions(self):
response = self._get('/repository/process-definitions')
return json.loads(response.content)
def _delete(self, service):
return self.session.delete(service)
def _post(self, service, values=None):
if values:
values = json.dumps(values)
return self.session.post(service, data=values)
def _get(self, service, params=None):
return self.session.get(service, params=params)
def _put(self, service, values=None):
if values:
values = json.dumps(values)
return self.session.put(service, data=values)
def _to_endpoint(self, *args):
return '/'.join([self.endpoint, 'service'] + list(str(arg) for arg in args))
def start_process_by_key(self, key, variables=None):
if variables is None:
variables = {}
variables = [
{'name': _key, 'value': value}
for _key, value in variables.iteritems()
]
values = {
'processDefinitionKey': key,
'businessKey': 'business%s' % key,
'variables': variables,
}
return self._post('/runtime/process-instances', values)
def get_user_task_list(self, user, process=None):
url = '/runtime/tasks?involvedUser=%s' % (user,)
if process:
url += '&processDefinitionKey=%s' % (process,)
response = self._get(url)
return json.loads(response.content)
def get_task_form(self, task_id):
response = self._get('/form/form-data?taskId=%s' % (task_id,))
return json.loads(response.content)
def submit_task_form(self, task_id, properties=None):
if properties is None:
properties = {}
properties = [
{'id': _key, 'value': value}
for _key, value in properties.iteritems()
]
values = {
'taskId': task_id,
'properties': properties,
}
return self._post('/form/form-data', values)
# Keep the backward-compatibility
submitTaskForm = submit_task_form
getTaskForm = get_task_form
startProcessByKey = start_process_by_key
getUserTaskList = get_user_task_list
def deployments_url(self, *args):
return self._to_endpoint('repository', 'deployments', *args)
def deployment_url(self, deployment_id):
return self.deployments_url(deployment_id)
def deployments(self, **parameters):
response = self._get(self.deployments_url(), params=parameters)
if response.status_code == codes.ok:
return response.json()
raise NotImplementedError()
def get_deployment(self, deployment_id):
response = self._get(self.deployment_url(deployment_id))
if response.status_code == codes.ok:
return response.json()
elif response.status_code == codes.not_found:
raise DeploymentNotFound()
raise NotImplementedError()
# def create_deployment(self, files):
# response = self.session.post(self.deployments_url(), files=files)
| 2.265625 | 2 |
txweb/tests/test_static_file_resource.py | devdave/txWeb | 0 | 12773536 | <filename>txweb/tests/test_static_file_resource.py
from txweb.web_site import WebSite
from twisted.web.server import NOT_DONE_YET
from twisted.web.static import File
from twisted.web.resource import getChildForRequest
from .helper import MockRequest
from pathlib import Path
"""
Reference tests for how I can hack around File.
One complication is that File resource has isLeaf set to 0/False so it's not meant to
be rendered directly but instead through getChild
"""
def test_serves_a_file():
license = Path(__file__).parent / "fixture" / "static" / "LICENSE.txt" # type: Path
resource = File(str(license))
request = MockRequest(["LICENSE.txt"], "/LICENSE.txt")
response = resource.render(request)
actual = license.read_bytes()
assert response == NOT_DONE_YET
assert len(request.written) == 1
expected = request.written[0]
assert len(actual) == len(expected)
def test_serves_a_directory():
license = Path(__file__).parent / "fixture" / "static" / "LICENSE.txt" # type: Path
request = MockRequest(["irrelevant","past","path"], "license.txt")
file_resource = File(str(license.parent))
dir_resource = file_resource.getChild("license.txt", request)
response = dir_resource.render(request)
actual = license.read_bytes()
assert response == NOT_DONE_YET
assert len(request.written) == 1
expected = request.written[0]
assert len(actual) == len(expected)
| 2.25 | 2 |
tools/demo.py | YuxinZou/vedacls | 1 | 12773537 | import os
import sys
import argparse
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from vedacls.runner import InferenceRunner
from vedacls.utils import Config
def parse_args():
parser = argparse.ArgumentParser(description='Demo')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument('inp', type=str, help='input video path')
parser.add_argument('--json_pth', type=str, help='input video path', default=None)
parser.add_argument('--save_pth', type=str, default=None, help='video output path')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
inference_cfg = cfg['inference']
common_cfg = cfg.get('common')
runner = InferenceRunner(inference_cfg, common_cfg)
runner.load_checkpoint(args.checkpoint)
# labels, scores = runner.inference(args.inp)
# print(labels)
# print(scores)
args.save_pth = args.inp.split('/')[-1]
print(args.save_pth)
if args.json_pth:
runner.plot_v3(args.inp, args.json_pth, args.save_pth)
else:
runner.plot(args.inp, args.save_pth)
if __name__ == '__main__':
main()
| 2.453125 | 2 |
pubg_python/__init__.py | adamgeraldy/pubg-python | 0 | 12773538 | <reponame>adamgeraldy/pubg-python
from .base import PUBG # noqa
from .domain.base import ( # noqa
Filter,
Shard,
)
__all__ = [
'PUBG',
'Filter',
'Shard',
]
| 1.453125 | 1 |
code/server/odis/tests/entities/test_base.py | michalporeba/odis | 5 | 12773539 | import pytest
from odis.entities import *
from django.db import models
class Sut():
class States(models.TextChoices):
FIRST = ('one', 'the first state')
SECOND = ('two', 'the second state')
THIRD = ('three', 'the third state')
SHORTER = ('s', 'two part tuple')
ANY = ('any', 'from any')
state = None
def to_second(self, *args, **kwargs):
self.state = Sut.States.SECOND
def to_third(self, *args, **kwargs):
self.state = Sut.States.THIRD
def to_any(self, *args, **kwargs):
self.state = Sut.States.ANY
def do(self, action: str, *args, **kwargs):
transitions = {
'progress': [
[Sut.States.FIRST, self.to_second],
[Sut.States.SECOND, self.to_third]
],
'third': [[
Sut.States.FIRST,
Sut.States.SECOND,
self.to_third
], [
Sut.States.THIRD,
None #NoOp
]],
'test_any': self.to_any
}
guard_state_transition(transitions, action, self.state, *args, **kwargs)
def test_progress_action():
sut = Sut()
sut.state = Sut.States.FIRST
sut.do('progress')
assert sut.state == Sut.States.SECOND
sut.do('progress')
assert sut.state == Sut.States.THIRD
with pytest.raises(StateTransitionError):
sut.do('progress')
def test_third_action():
sut = Sut()
sut.state = Sut.States.FIRST
sut.do('third')
assert sut.state == Sut.States.THIRD
sut.do('third')
assert sut.state == Sut.States.THIRD
def test_invalid_operation():
sut = Sut()
sut.state = Sut.States.FIRST
with pytest.raises(UndefinedActionError):
sut.do('undefined')
def test_simple_always_available_operation():
sut = Sut()
sut.do('test_any')
assert sut.state == Sut.States.ANY | 2.296875 | 2 |
enthought/pyface/i_about_dialog.py | enthought/etsproxy | 3 | 12773540 | # proxy module
from pyface.i_about_dialog import *
| 1.085938 | 1 |
version/utils.py | Kramoule/happypanda | 9 | 12773541 | #"""
#This file is part of Happypanda.
#Happypanda is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 2 of the License, or
#any later version.
#Happypanda is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with Happypanda. If not, see <http://www.gnu.org/licenses/>.
#"""
import datetime
import os
import subprocess
import sys
import logging
import zipfile
import hashlib
import shutil
import uuid
import re
import scandir
import rarfile
import json
import send2trash
import functools
import time
from PyQt5.QtGui import QImage, qRgba
from PIL import Image,ImageChops
try:
import app_constants
from database import db_constants
except:
from . import app_constants
from .database import db_constants
log = logging.getLogger(__name__)
log_i = log.info
log_d = log.debug
log_w = log.warning
log_e = log.error
log_c = log.critical
IMG_FILES = ('.jpg','.bmp','.png','.gif', '.jpeg')
ARCHIVE_FILES = ('.zip', '.cbz', '.rar', '.cbr')
FILE_FILTER = '*.zip *.cbz *.rar *.cbr'
IMG_FILTER = '*.jpg *.bmp *.png *.jpeg'
rarfile.PATH_SEP = '/'
rarfile.UNRAR_TOOL = app_constants.unrar_tool_path
if not app_constants.unrar_tool_path:
FILE_FILTER = '*.zip *.cbz'
ARCHIVE_FILES = ('.zip', '.cbz')
class GMetafile:
def __init__(self, path=None, archive=''):
self.metadata = {
"title":'',
"artist":'',
"type":'',
"tags":{},
"language":'',
"pub_date":'',
"link":'',
"info":'',
}
self.files = []
if path is None:
return
if archive:
zip = ArchiveFile(archive)
c = zip.dir_contents(path)
for x in c:
if x.endswith(app_constants.GALLERY_METAFILE_KEYWORDS):
self.files.append(open(zip.extract(x), encoding='utf-8'))
else:
for p in scandir.scandir(path):
if p.name in app_constants.GALLERY_METAFILE_KEYWORDS:
self.files.append(open(p.path, encoding='utf-8'))
if self.files:
self.detect()
else:
log_d('No metafile found...')
def _eze(self, fp):
if not fp.name.endswith('.json'):
return
j = json.load(fp, encoding='utf-8')
eze = ['gallery_info', 'image_api_key', 'image_info']
# eze
if all(x in j for x in eze):
log_i('Detected metafile: eze')
ezedata = j['gallery_info']
t_parser = title_parser(ezedata['title'])
self.metadata['title'] = t_parser['title']
self.metadata['type'] = ezedata['category']
for ns in ezedata['tags']:
self.metadata['tags'][ns.capitalize()] = ezedata['tags'][ns]
self.metadata['tags']['default'] = self.metadata['tags'].pop('Misc', [])
self.metadata['artist'] = self.metadata['tags']['Artist'][0].capitalize()\
if 'Artist' in self.metadata['tags'] else t_parser['artist']
self.metadata['language'] = ezedata['language']
d = ezedata['upload_date']
# should be zero padded
d[1] = int("0" + str(d[1])) if len(str(d[1])) == 1 else d[1]
d[3] = int("0" + str(d[1])) if len(str(d[1])) == 1 else d[1]
self.metadata['pub_date'] = datetime.datetime.strptime("{} {} {}".format(d[0], d[1], d[3]), "%Y %m %d")
l = ezedata['source']
self.metadata['link'] = 'http://' + l['site'] + '.org/g/' + str(l['gid']) + '/' + l['token']
return True
def _hdoujindler(self, fp):
"HDoujin Downloader"
if fp.name.endswith('info.txt'):
log_i('Detected metafile: HDoujin text')
lines = fp.readlines()
if lines:
for line in lines:
splitted = line.split(':', 1)
if len(splitted) > 1:
other = splitted[1].strip()
if not other:
continue
l = splitted[0].lower()
if "title" == l:
self.metadata['title'] = other
if "artist" == l:
self.metadata['artist'] = other.capitalize()
if "tags" == l:
self.metadata['tags'].update(tag_to_dict(other))
if "description" == l:
self.metadata['info'] = other
if "circle" in l:
if not "group" in self.metadata['tags']:
self.metadata['tags']['group'] = []
self.metadata['tags']['group'].append(other.strip().lower())
if "url" == l:
self.metadata['link'] = other
return True
## Doesnt work for some reason.. too lazy to debug
#elif fp.name.endswith('info.json'):
# log_i('Detected metafile: HDoujin json')
# j = json.load(fp, encoding='utf-8')
# j = j['manga_info']
# self.metadata['title'] = j['title']
# for n, a in enumerate(j['artist']):
# at = a
# if not n+1 == len(j['artist']):
# at += ', '
# self.metadata['artist'] += at
# tags = {}
# for x in j['tags']:
# ns = 'default' if x == 'misc' else x.capitalize()
# tags[ns] = []
# for y in j[tags][x]:
# tags[ns].append(y.strip().lower())
# self.metadata['tags'] = tags
# self.metadata['link'] = j['url']
# self.metadata['info'] = j['description']
# for x in j['circle']:
# if not "group" in self.metadata['tags']:
# self.metadata['tags']['group'] = []
# self.metadata['tags']['group'].append(x.strip().lower())
# return True
def detect(self):
for fp in self.files:
with fp:
z = False
for x in [self._eze, self._hdoujindler]:
try:
if x(fp):
z = True
break
except Exception:
log.exception('Error in parsing metafile')
continue
if not z:
log_i('Incompatible metafiles found')
def update(self, other):
self.metadata.update((x, y) for x, y in other.metadata.items() if y)
def apply_gallery(self, gallery):
log_i('Applying metafile to gallery')
if self.metadata['title']:
gallery.title = self.metadata['title']
if self.metadata['artist']:
gallery.artist = self.metadata['artist']
if self.metadata['type']:
gallery.type = self.metadata['type']
if self.metadata['tags']:
gallery.tags = self.metadata['tags']
if self.metadata['language']:
gallery.language = self.metadata['language']
if self.metadata['pub_date']:
gallery.pub_date = self.metadata['pub_date']
if self.metadata['link']:
gallery.link = self.metadata['link']
if self.metadata['info']:
gallery.info = self.metadata['info']
return gallery
def backup_database(db_path=db_constants.DB_PATH):
log_i("Perfoming database backup")
date = "{}".format(datetime.datetime.today()).split(' ')[0]
base_path, name = os.path.split(db_path)
backup_dir = os.path.join(base_path, 'backup')
if not os.path.isdir(backup_dir):
os.mkdir(backup_dir)
db_name = "{}-{}".format(date, name)
current_try = 0
orig_db_name = db_name
while current_try < 50:
if current_try:
db_name = "{}({})-{}".format(date, current_try, orig_db_name)
try:
dst_path = os.path.join(backup_dir, db_name)
if os.path.exists(dst_path):
raise ValueError
shutil.copyfile(db_path, dst_path)
break
except ValueError:
current_try += 1
log_i("Database backup perfomed: {}".format(db_name))
return True
def get_date_age(date):
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it won't work.
"""
def formatn(n, s):
'''Add "s" if it's plural'''
if n == 1:
return "1 %s" % s
elif n > 1:
return "%d %ss" % (n, s)
def q_n_r(a, b):
'''Return quotient and remaining'''
return a / b, a % b
class PrettyDelta:
def __init__(self, dt):
now = datetime.datetime.now()
delta = now - dt
self.day = delta.days
self.second = delta.seconds
self.year, self.day = q_n_r(self.day, 365)
self.month, self.day = q_n_r(self.day, 30)
self.hour, self.second = q_n_r(self.second, 3600)
self.minute, self.second = q_n_r(self.second, 60)
def format(self):
for period in ['year', 'month', 'day', 'hour', 'minute', 'second']:
n = getattr(self, period)
if n > 0.9:
return formatn(n, period)
return "0 second"
return PrettyDelta(date).format()
def all_opposite(*args):
"Returns true if all items in iterable evaluae to false"
for iterable in args:
for x in iterable:
if x:
return False
return True
def update_gallery_path(new_path, gallery):
"Updates a gallery's chapters path"
for chap in gallery.chapters:
head, tail = os.path.split(chap.path)
if gallery.path == chap.path:
chap.path = new_path
elif gallery.path == head:
chap.path = os.path.join(new_path, tail)
gallery.path = new_path
return gallery
def move_files(path, dest='', only_path=False):
"""
Move files to a new destination. If dest is not set,
imported_galleries_def_path will be used instead.
"""
if not dest:
dest = app_constants.IMPORTED_GALLERY_DEF_PATH
if not dest:
return path
f = os.path.split(path)[1]
new_path = os.path.join(dest, f)
if not only_path:
log_i("Moving to: {}".format(new_path))
if new_path == os.path.join(*os.path.split(path)): # need to unpack to make sure we get the corrct sep
return path
if not os.path.exists(new_path):
app_constants.TEMP_PATH_IGNORE.append(os.path.normcase(new_path))
if not only_path:
new_path = shutil.move(path, new_path)
else:
return path
return new_path
def check_ignore_list(key):
k = os.path.normcase(key)
if os.path.isdir(key) and 'Folder' in app_constants.IGNORE_EXTS:
return False
_, ext = os.path.splitext(key)
if ext in app_constants.IGNORE_EXTS:
return False
for path in app_constants.IGNORE_PATHS:
p = os.path.normcase(path)
if p in k:
return False
return True
def gallery_text_fixer(gallery):
regex_str = app_constants.GALLERY_DATA_FIX_REGEX
if regex_str:
try:
valid_regex = re.compile(regex_str)
except re.error:
return None
if not valid_regex:
return None
def replace_regex(text):
new_text = re.sub(regex_str, app_constants.GALLERY_DATA_FIX_REPLACE, text)
return new_text
if app_constants.GALLERY_DATA_FIX_TITLE:
gallery.title = replace_regex(gallery.title)
if app_constants.GALLERY_DATA_FIX_ARTIST:
gallery.artist = replace_regex(gallery.artist)
return gallery
def b_search(data, key):
if key:
lo = 0
hi = len(data) - 1
while hi >= lo:
mid = lo + (hi - lo) // 2
if data[mid] < key:
lo = mid + 1
elif data[mid] > key:
hi = mid - 1
else:
return data[mid]
return None
def generate_img_hash(src):
"""
Generates sha1 hash based on the given bytes.
Returns hex-digits
"""
chunk = 8129
sha1 = hashlib.sha1()
buffer = src.read(chunk)
log_d("Generating hash")
while len(buffer) > 0:
sha1.update(buffer)
buffer = src.read(chunk)
return sha1.hexdigest()
class ArchiveFile():
"""
Work with archive files, raises exception if instance fails.
namelist -> returns a list with all files in archive
extract <- Extracts one specific file to given path
open -> open the given file in archive, returns bytes
close -> close archive
"""
zip, rar = range(2)
def __init__(self, filepath):
self.type = 0
try:
if filepath.endswith(ARCHIVE_FILES):
if filepath.endswith(ARCHIVE_FILES[:2]):
self.archive = zipfile.ZipFile(os.path.normcase(filepath))
b_f = self.archive.testzip()
self.type = self.zip
elif filepath.endswith(ARCHIVE_FILES[2:]):
self.archive = rarfile.RarFile(os.path.normcase(filepath))
b_f = self.archive.testrar()
self.type = self.rar
# test for corruption
if b_f:
log_w('Bad file found in archive {}'.format(filepath.encode(errors='ignore')))
raise app_constants.CreateArchiveFail
else:
log_e('Archive: Unsupported file format')
raise app_constants.CreateArchiveFail
except:
log.exception('Create archive: FAIL')
raise app_constants.CreateArchiveFail
def namelist(self):
filelist = self.archive.namelist()
return filelist
def is_dir(self, name):
"""
Checks if the provided name in the archive is a directory or not
"""
if not name:
return False
if not name in self.namelist():
log_e('File {} not found in archive'.format(name))
raise app_constants.FileNotFoundInArchive
if self.type == self.zip:
if name.endswith('/'):
return True
elif self.type == self.rar:
info = self.archive.getinfo(name)
return info.isdir()
return False
def dir_list(self, only_top_level=False):
"""
Returns a list of all directories found recursively. For directories not in toplevel
a path in the archive to the diretory will be returned.
"""
if only_top_level:
if self.type == self.zip:
return [x for x in self.namelist() if x.endswith('/') and x.count('/') == 1]
elif self.type == self.rar:
potential_dirs = [x for x in self.namelist() if x.count('/') == 0]
return [x.filename for x in [self.archive.getinfo(y) for y in potential_dirs] if x.isdir()]
else:
if self.type == self.zip:
return [x for x in self.namelist() if x.endswith('/') and x.count('/') >= 1]
elif self.type == self.rar:
return [x.filename for x in self.archive.infolist() if x.isdir()]
def dir_contents(self, dir_name):
"""
Returns a list of contents in the directory
An empty string will return the contents of the top folder
"""
if dir_name and not dir_name in self.namelist():
log_e('Directory {} not found in archive'.format(dir_name))
raise app_constants.FileNotFoundInArchive
if not dir_name:
if self.type == self.zip:
con = [x for x in self.namelist() if x.count('/') == 0 or \
(x.count('/') == 1 and x.endswith('/'))]
elif self.type == self.rar:
con = [x for x in self.namelist() if x.count('/') == 0]
return con
if self.type == self.zip:
dir_con_start = [x for x in self.namelist() if x.startswith(dir_name)]
return [x for x in dir_con_start if x.count('/') == dir_name.count('/') and \
(x.count('/') == dir_name.count('/') and not x.endswith('/')) or \
(x.count('/') == 1 + dir_name.count('/') and x.endswith('/'))]
elif self.type == self.rar:
return [x for x in self.namelist() if x.startswith(dir_name) and \
x.count('/') == 1 + dir_name.count('/')]
return []
def extract(self, file_to_ext, path=None):
"""
Extracts one file from archive to given path
Creates a temp_dir if path is not specified
Returns path to the extracted file
"""
if not path:
path = os.path.join(app_constants.temp_dir, str(uuid.uuid4()))
os.mkdir(path)
if not file_to_ext:
return self.extract_all(path)
else:
if self.type == self.zip:
membs = []
for name in self.namelist():
if name.startswith(file_to_ext) and name != file_to_ext:
membs.append(name)
temp_p = self.archive.extract(file_to_ext, path)
for m in membs:
self.archive.extract(m, path)
elif self.type == self.rar:
temp_p = os.path.join(path, file_to_ext)
self.archive.extract(file_to_ext, path)
return temp_p
def extract_all(self, path=None, member=None):
"""
Extracts all files to given path, and returns path
If path is not specified, a temp dir will be created
"""
if not path:
path = os.path.join(app_constants.temp_dir, str(uuid.uuid4()))
os.mkdir(path)
if member:
self.archive.extractall(path, member)
self.archive.extractall(path)
return path
def open(self, file_to_open, fp=False):
"""
Returns bytes. If fp set to true, returns file-like object.
"""
if fp:
return self.archive.open(file_to_open)
else:
return self.archive.open(file_to_open).read()
def close(self):
self.archive.close()
def check_archive(archive_path):
"""
Checks archive path for potential galleries.
Returns a list with a path in archive to galleries
if there is no directories
"""
try:
zip = ArchiveFile(archive_path)
except app_constants.CreateArchiveFail:
return []
if not zip:
return []
galleries = []
zip_dirs = zip.dir_list()
def gallery_eval(d):
con = zip.dir_contents(d)
if con:
gallery_probability = len(con)
for n in con:
if not n.lower().endswith(IMG_FILES):
gallery_probability -= 1
if gallery_probability >= (len(con) * 0.8):
return d
if zip_dirs: # There are directories in the top folder
# check parent
r = gallery_eval('')
if r:
galleries.append('')
for d in zip_dirs:
r = gallery_eval(d)
if r:
galleries.append(r)
zip.close()
else: # all pages are in top folder
if isinstance(gallery_eval(''), str):
galleries.append('')
zip.close()
return galleries
def recursive_gallery_check(path):
"""
Recursively checks a folder for any potential galleries
Returns a list of paths for directories and a list of tuples where first
index is path to gallery in archive and second index is path to archive.
Like this:
["C:path/to/g"] and [("path/to/g/in/a", "C:path/to/a")]
"""
gallery_dirs = []
gallery_arch = []
found_paths = 0
for root, subfolders, files in scandir.walk(path):
if files:
for f in files:
if f.endswith(ARCHIVE_FILES):
arch_path = os.path.join(root, f)
for g in check_archive(arch_path):
found_paths += 1
gallery_arch.append((g, arch_path))
if not subfolders:
if not files:
continue
gallery_probability = len(files)
for f in files:
if not f.lower().endswith(IMG_FILES):
gallery_probability -= 1
if gallery_probability >= (len(files) * 0.8):
found_paths += 1
gallery_dirs.append(root)
log_i('Found {} in {}'.format(found_paths, path).encode(errors='ignore'))
return gallery_dirs, gallery_arch
def today():
"Returns current date in a list: [dd, Mmm, yyyy]"
_date = datetime.date.today()
day = _date.strftime("%d")
month = _date.strftime("%b")
year = _date.strftime("%Y")
return [day, month, year]
def external_viewer_checker(path):
check_dict = app_constants.EXTERNAL_VIEWER_SUPPORT
viewer = os.path.split(path)[1]
for x in check_dict:
allow = False
for n in check_dict[x]:
if viewer.lower() in n.lower():
allow = True
break
if allow:
return x
def open_chapter(chapterpath, archive=None):
is_archive = True if archive else False
if not is_archive:
chapterpath = os.path.normpath(chapterpath)
temp_p = archive if is_archive else chapterpath
custom_args = app_constants.EXTERNAL_VIEWER_ARGS
send_folder_t = '{$folder}'
send_image_t = '{$file}'
send_folder = True
if app_constants.USE_EXTERNAL_VIEWER:
send_folder = True
if custom_args:
if send_folder_t in custom_args:
send_folder = True
elif send_image_t in custom_args:
send_folder = False
def find_f_img_folder():
filepath = os.path.join(temp_p, [x for x in sorted([y.name for y in scandir.scandir(temp_p)])\
if x.lower().endswith(IMG_FILES) and not x.startswith('.')][0]) # Find first page
return temp_p if send_folder else filepath
def find_f_img_archive(extract=True):
zip = ArchiveFile(temp_p)
if extract:
app_constants.NOTIF_BAR.add_text('Extracting...')
t_p = os.path.join('temp', str(uuid.uuid4()))
os.mkdir(t_p)
if is_archive or chapterpath.endswith(ARCHIVE_FILES):
if os.path.isdir(chapterpath):
t_p = chapterpath
elif chapterpath.endswith(ARCHIVE_FILES):
zip2 = ArchiveFile(chapterpath)
f_d = sorted(zip2.dir_list(True))
if f_d:
f_d = f_d[0]
t_p = zip2.extract(f_d, t_p)
else:
t_p = zip2.extract('', t_p)
else:
t_p = zip.extract(chapterpath, t_p)
else:
zip.extract_all(t_p) # Compatibility reasons.. TODO: REMOVE IN BETA
if send_folder:
filepath = t_p
else:
filepath = os.path.join(t_p, [x for x in sorted([y.name for y in scandir.scandir(t_p)])\
if x.lower().endswith(IMG_FILES) and not x.startswith('.')][0]) # Find first page
filepath = os.path.abspath(filepath)
else:
if is_archive or chapterpath.endswith(ARCHIVE_FILES):
con = zip.dir_contents('')
f_img = [x for x in sorted(con) if x.lower().endswith(IMG_FILES) and not x.startswith('.')]
if not f_img:
log_w('Extracting archive.. There are no images in the top-folder. ({})'.format(archive))
return find_f_img_archive()
filepath = os.path.normpath(archive)
else:
app_constants.NOTIF_BAR.add_text("Fatal error: Unsupported gallery!")
raise ValueError("Unsupported gallery version")
return filepath
try:
try: # folder
filepath = find_f_img_folder()
except NotADirectoryError: # archive
try:
if not app_constants.EXTRACT_CHAPTER_BEFORE_OPENING and app_constants.EXTERNAL_VIEWER_PATH:
filepath = find_f_img_archive(False)
else:
filepath = find_f_img_archive()
except app_constants.CreateArchiveFail:
log.exception('Could not open chapter')
app_constants.NOTIF_BAR.add_text('Could not open chapter. Check happypanda.log for more details.')
return
except FileNotFoundError:
log.exception('Could not find chapter {}'.format(chapterpath))
app_constants.NOTIF_BAR.add_text("Chapter does no longer exist!")
return
except IndexError:
log.exception('No images found: {}'.format(chapterpath))
app_constants.NOTIF_BAR.add_text("No images found in chapter!")
return
if send_folder_t in custom_args:
custom_args = custom_args.replace(send_folder_t, filepath)
elif send_image_t in custom_args:
custom_args = custom_args.replace(send_image_t, filepath)
else:
custom_args = filepath
try:
app_constants.NOTIF_BAR.add_text('Opening chapter...')
if not app_constants.USE_EXTERNAL_VIEWER:
if sys.platform.startswith('darwin'):
subprocess.call(('open', custom_args))
elif os.name == 'nt':
os.startfile(custom_args)
elif os.name == 'posix':
subprocess.call(('xdg-open', custom_args))
else:
ext_path = app_constants.EXTERNAL_VIEWER_PATH
viewer = external_viewer_checker(ext_path)
if viewer == 'honeyview':
if app_constants.OPEN_GALLERIES_SEQUENTIALLY:
subprocess.call((ext_path, custom_args))
else:
subprocess.Popen((ext_path, custom_args))
else:
if app_constants.OPEN_GALLERIES_SEQUENTIALLY:
subprocess.check_call((ext_path, custom_args))
else:
subprocess.Popen((ext_path, custom_args))
except subprocess.CalledProcessError:
app_constants.NOTIF_BAR.add_text("Could not open chapter. Invalid external viewer.")
log.exception('Could not open chapter. Invalid external viewer.')
except:
app_constants.NOTIF_BAR.add_text("Could not open chapter for unknown reasons. Check happypanda.log!")
log_e('Could not open chapter {}'.format(os.path.split(chapterpath)[1]))
def get_gallery_img(gallery_or_path, chap_number=0):
"""
Returns a path to image in gallery chapter
"""
archive = None
if isinstance(gallery_or_path, str):
path = gallery_or_path
else:
path = gallery_or_path.chapters[chap_number].path
if gallery_or_path.is_archive:
archive = gallery_or_path.path
# TODO: add chapter support
try:
name = os.path.split(path)[1]
except IndexError:
name = os.path.split(path)[0]
is_archive = True if archive or name.endswith(ARCHIVE_FILES) else False
real_path = archive if archive else path
img_path = None
if is_archive:
try:
log_i('Getting image from archive')
zip = ArchiveFile(real_path)
temp_path = os.path.join(app_constants.temp_dir, str(uuid.uuid4()))
os.mkdir(temp_path)
if not archive:
f_img_name = sorted([img for img in zip.namelist() if img.lower().endswith(IMG_FILES) and not img.startswith('.')])[0]
else:
f_img_name = sorted([img for img in zip.dir_contents(path) if img.lower().endswith(IMG_FILES) and not img.startswith('.')])[0]
img_path = zip.extract(f_img_name, temp_path)
zip.close()
except app_constants.CreateArchiveFail:
img_path = app_constants.NO_IMAGE_PATH
elif os.path.isdir(real_path):
log_i('Getting image from folder')
first_img = sorted([img.name for img in scandir.scandir(real_path) if img.name.lower().endswith(tuple(IMG_FILES)) and not img.name.startswith('.')])
if first_img:
img_path = os.path.join(real_path, first_img[0])
if img_path:
return os.path.abspath(img_path)
else:
log_e("Could not get gallery image")
def tag_to_string(gallery_tag, simple=False):
"""
Takes gallery tags and converts it to string, returns string
if simple is set to True, returns a CSV string, else a dict-like string
"""
assert isinstance(gallery_tag, dict), "Please provide a dict like this: {'namespace':['tag1']}"
string = ""
if not simple:
for n, namespace in enumerate(sorted(gallery_tag), 1):
if len(gallery_tag[namespace]) != 0:
if namespace != 'default':
string += namespace + ":"
# find tags
if namespace != 'default' and len(gallery_tag[namespace]) > 1:
string += '['
for x, tag in enumerate(sorted(gallery_tag[namespace]), 1):
# if we are at the end of the list
if x == len(gallery_tag[namespace]):
string += tag
else:
string += tag + ', '
if namespace != 'default' and len(gallery_tag[namespace]) > 1:
string += ']'
# if we aren't at the end of the list
if not n == len(gallery_tag):
string += ', '
else:
for n, namespace in enumerate(sorted(gallery_tag), 1):
if len(gallery_tag[namespace]) != 0:
if namespace != 'default':
string += namespace + ","
# find tags
for x, tag in enumerate(sorted(gallery_tag[namespace]), 1):
# if we are at the end of the list
if x == len(gallery_tag[namespace]):
string += tag
else:
string += tag + ', '
# if we aren't at the end of the list
if not n == len(gallery_tag):
string += ', '
return string
def tag_to_dict(string, ns_capitalize=True):
"Receives a string of tags and converts it to a dict of tags"
namespace_tags = {'default':[]}
level = 0 # so we know if we are in a list
buffer = ""
stripped_set = set() # we only need unique values
for n, x in enumerate(string, 1):
if x == '[':
level += 1 # we are now entering a list
if x == ']':
level -= 1 # we are now exiting a list
if x == ',': # if we meet a comma
# we trim our buffer if we are at top level
if level is 0:
# add to list
stripped_set.add(buffer.strip())
buffer = ""
else:
buffer += x
elif n == len(string): # or at end of string
buffer += x
# add to list
stripped_set.add(buffer.strip())
buffer = ""
else:
buffer += x
def tags_in_list(br_tags):
"Receives a string of tags enclosed in brackets, returns a list with tags"
unique_tags = set()
tags = br_tags.replace('[', '').replace(']','')
tags = tags.split(',')
for t in tags:
if len(t) != 0:
unique_tags.add(t.strip().lower())
return list(unique_tags)
unique_tags = set()
for ns_tag in stripped_set:
splitted_tag = ns_tag.split(':')
# if there is a namespace
if len(splitted_tag) > 1 and len(splitted_tag[0]) != 0:
if splitted_tag[0] != 'default':
if ns_capitalize:
namespace = splitted_tag[0].capitalize()
else:
namespace = splitted_tag[0]
else:
namespace = splitted_tag[0]
tags = splitted_tag[1]
# if tags are enclosed in brackets
if '[' in tags and ']' in tags:
tags = tags_in_list(tags)
tags = [x for x in tags if len(x) != 0]
# if namespace is already in our list
if namespace in namespace_tags:
for t in tags:
# if tag not already in ns list
if not t in namespace_tags[namespace]:
namespace_tags[namespace].append(t)
else:
# to avoid empty strings
namespace_tags[namespace] = tags
else: # only one tag
if len(tags) != 0:
if namespace in namespace_tags:
namespace_tags[namespace].append(tags)
else:
namespace_tags[namespace] = [tags]
else: # no namespace specified
tag = splitted_tag[0]
if len(tag) != 0:
unique_tags.add(tag.lower())
if len(unique_tags) != 0:
for t in unique_tags:
namespace_tags['default'].append(t)
return namespace_tags
import re as regex
def title_parser(title):
"Receives a title to parse. Returns dict with 'title', 'artist' and language"
log_d("Parsing title: {}".format(title))
#If title is not absolute, then it's not a pathname and we allow a "/" inside it
if(os.path.isabs(title)):
title = os.path.basename(title)
title = " ".join(title.split())
# if '/' in title:
# try:
# title = os.path.split(title)[1]
# if not title:
# title = title
# except IndexError:
# pass
for x in ARCHIVE_FILES:
if title.endswith(x):
title = title[:-len(x)]
parsed_title = {'title':"", 'artist':"", 'language':""}
try:
a = regex.findall('((?<=\[) *[^\]]+( +\S+)* *(?=\]))', title)
assert len(a) != 0
try:
artist = a[0][0].strip()
except IndexError:
artist = ''
parsed_title['artist'] = artist
try:
assert a[1]
lang = app_constants.G_LANGUAGES + app_constants.G_CUSTOM_LANGUAGES
for x in a:
l = x[0].strip()
l = l.lower()
l = l.capitalize()
if l in lang:
parsed_title['language'] = l
break
else:
parsed_title['language'] = app_constants.G_DEF_LANGUAGE
except IndexError:
parsed_title['language'] = app_constants.G_DEF_LANGUAGE
t = title
for x in a:
t = t.replace(x[0], '')
t = t.replace('[]', '')
final_title = t.strip()
parsed_title['title'] = final_title
except AssertionError:
parsed_title['title'] = title
return parsed_title
import webbrowser
def open_web_link(url):
if not url:
return
try:
webbrowser.open_new_tab(url)
except:
log_e('Could not open URL in browser')
def open_path(path, select=''):
""
try:
if sys.platform.startswith('darwin'):
subprocess.Popen(['open', path])
elif os.name == 'nt':
if select:
subprocess.Popen(r'explorer.exe /select,"{}"'.format(os.path.normcase(select)), shell=True)
else:
os.startfile(path)
elif os.name == 'posix':
subprocess.Popen(('xdg-open', path))
else:
app_constants.NOTIF_BAR.add_text("I don't know how you've managed to do this.. If you see this, you're in deep trouble...")
log_e('Could not open path: no OS found')
except:
app_constants.NOTIF_BAR.add_text("Could not open specified location. It might not exist anymore.")
log_e('Could not open path')
def open_torrent(path):
if not app_constants.TORRENT_CLIENT:
open_path(path)
else:
subprocess.Popen([app_constants.TORRENT_CLIENT, path])
def delete_path(path):
"Deletes the provided recursively"
s = True
if os.path.exists(path):
error = ''
if app_constants.SEND_FILES_TO_TRASH:
try:
send2trash.send2trash(path)
except:
log.exception("Unable to send file to trash")
error = 'Unable to send file to trash'
else:
try:
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
except PermissionError:
error = 'PermissionError'
except FileNotFoundError:
pass
if error:
p = os.path.split(path)[1]
log_e('Failed to delete: {}:{}'.format(error, p))
app_constants.NOTIF_BAR.add_text('An error occured while trying to delete: {}'.format(error))
s = False
return s
def regex_search(a, b, override_case=False, args=[]):
"Looks for a in b"
if a and b:
try:
if not app_constants.Search.Case in args or override_case:
if regex.search(a, b, regex.IGNORECASE):
return True
else:
if regex.search(a, b):
return True
except regex.error:
pass
return False
def search_term(a, b, override_case=False, args=[]):
"Searches for a in b"
if a and b:
if not app_constants.Search.Case in args or override_case:
b = b.lower()
a = a.lower()
if app_constants.Search.Strict in args:
if a == b:
return True
else:
if a in b:
return True
return False
def get_terms(term):
"Dividies term into pieces. Returns a list with the pieces"
# some variables we will use
pieces = []
piece = ''
qoute_level = 0
bracket_level = 0
brackets_tags = {}
current_bracket_ns = ''
end_of_bracket = False
blacklist = ['[', ']', '"', ',']
for n, x in enumerate(term):
# if we meet brackets
if x == '[':
bracket_level += 1
brackets_tags[piece] = set() # we want unique tags!
current_bracket_ns = piece
elif x == ']':
bracket_level -= 1
end_of_bracket = True
# if we meet a double qoute
if x == '"':
if qoute_level > 0:
qoute_level -= 1
else:
qoute_level += 1
# if we meet a whitespace, comma or end of term and are not in a double qoute
if (x == ' ' or x == ',' or n == len(term) - 1) and qoute_level == 0:
# if end of term and x is allowed
if (n == len(term) - 1) and not x in blacklist and x != ' ':
piece += x
if piece:
if bracket_level > 0 or end_of_bracket: # if we are inside a bracket we put piece in the set
end_of_bracket = False
if piece.startswith(current_bracket_ns):
piece = piece[len(current_bracket_ns):]
if piece:
try:
brackets_tags[current_bracket_ns].add(piece)
except KeyError: # keyerror when there is a closing bracket without a starting bracket
pass
else:
pieces.append(piece) # else put it in the normal list
piece = ''
continue
# else append to the buffers
if not x in blacklist:
if qoute_level > 0: # we want to include everything if in double qoute
piece += x
elif x != ' ':
piece += x
# now for the bracket tags
for ns in brackets_tags:
for tag in brackets_tags[ns]:
ns_tag = ns
# if they want to exlucde this tag
if tag[0] == '-':
if ns_tag[0] != '-':
ns_tag = '-' + ns
tag = tag[1:] # remove the '-'
# put them together
ns_tag += tag
# done
pieces.append(ns_tag)
return pieces
def image_greyscale(filepath):
"""
Check if image is monochrome (1 channel or 3 identical channels)
"""
log_d("Checking if img is monochrome: {}".format(filepath))
im = Image.open(filepath).convert("RGB")
if im.mode not in ("L", "RGB"):
return False
if im.mode == "RGB":
rgb = im.split()
if ImageChops.difference(rgb[0],rgb[1]).getextrema()[1] != 0:
return False
if ImageChops.difference(rgb[0],rgb[2]).getextrema()[1] != 0:
return False
return True
def PToQImageHelper(im):
"""
The Python Imaging Library (PIL) is
Copyright © 1997-2011 by Secret Labs AB
Copyright © 1995-2011 by <NAME>
"""
def rgb(r, g, b, a=255):
"""(Internal) Turns an RGB color into a Qt compatible color integer."""
# use qRgb to pack the colors, and then turn the resulting long
# into a negative integer with the same bitpattern.
return (qRgba(r, g, b, a) & 0xffffffff)
def align8to32(bytes, width, mode):
"""
converts each scanline of data from 8 bit to 32 bit aligned
"""
bits_per_pixel = {
'1': 1,
'L': 8,
'P': 8,
}[mode]
# calculate bytes per line and the extra padding if needed
bits_per_line = bits_per_pixel * width
full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)
bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)
extra_padding = -bytes_per_line % 4
# already 32 bit aligned by luck
if not extra_padding:
return bytes
new_data = []
for i in range(len(bytes) // bytes_per_line):
new_data.append(bytes[i*bytes_per_line:(i+1)*bytes_per_line] + b'\x00' * extra_padding)
return b''.join(new_data)
data = None
colortable = None
# handle filename, if given instead of image name
if hasattr(im, "toUtf8"):
# FIXME - is this really the best way to do this?
if str is bytes:
im = unicode(im.toUtf8(), "utf-8")
else:
im = str(im.toUtf8(), "utf-8")
if isinstance(im, (bytes, str)):
im = Image.open(im)
if im.mode == "1":
format = QImage.Format_Mono
elif im.mode == "L":
format = QImage.Format_Indexed8
colortable = []
for i in range(256):
colortable.append(rgb(i, i, i))
elif im.mode == "P":
format = QImage.Format_Indexed8
colortable = []
palette = im.getpalette()
for i in range(0, len(palette), 3):
colortable.append(rgb(*palette[i:i+3]))
elif im.mode == "RGB":
data = im.tobytes("raw", "BGRX")
format = QImage.Format_RGB32
elif im.mode == "RGBA":
try:
data = im.tobytes("raw", "BGRA")
except SystemError:
# workaround for earlier versions
r, g, b, a = im.split()
im = Image.merge("RGBA", (b, g, r, a))
format = QImage.Format_ARGB32
else:
raise ValueError("unsupported image mode %r" % im.mode)
# must keep a reference, or Qt will crash!
__data = data or align8to32(im.tobytes(), im.size[0], im.mode)
return {
'data': __data, 'im': im, 'format': format, 'colortable': colortable
}
def make_chapters(gallery_object):
chap_container = gallery_object.chapters
path = gallery_object.path
metafile = GMetafile()
try:
log_d('Listing dir...')
con = scandir.scandir(path) # list all folders in gallery dir
log_i('Gallery source is a directory')
log_d('Sorting')
chapters = sorted([sub.path for sub in con if sub.is_dir() or sub.name.endswith(ARCHIVE_FILES)]) #subfolders
# if gallery has chapters divided into sub folders
if len(chapters) != 0:
log_d('Chapters divided in folders..')
for ch in chapters:
chap = chap_container.create_chapter()
chap.title = title_parser(ch)['title']
chap.path = os.path.join(path, ch)
metafile.update(GMetafile(chap.path))
chap.pages = len([x for x in scandir.scandir(chap.path) if x.name.lower().endswith(IMG_FILES)])
else: #else assume that all images are in gallery folder
chap = chap_container.create_chapter()
chap.title = title_parser(os.path.split(path)[1])['title']
chap.path = path
metafile.update(GMetafile(path))
chap.pages = len([x for x in scandir.scandir(path) if x.name.lower().endswith(IMG_FILES)])
except NotADirectoryError:
if path.endswith(ARCHIVE_FILES):
gallery_object.is_archive = 1
log_i("Gallery source is an archive")
archive_g = sorted(check_archive(path))
for g in archive_g:
chap = chap_container.create_chapter()
chap.path = g
chap.in_archive = 1
metafile.update(GMetafile(g, path))
arch = ArchiveFile(path)
chap.pages = len(arch.dir_contents(g))
arch.close()
metafile.apply_gallery(gallery_object)
def timeit(func):
@functools.wraps(func)
def newfunc(*args, **kwargs):
startTime = time.time()
func(*args, **kwargs)
elapsedTime = time.time() - startTime
print('function [{}] finished in {} ms'.format(
func.__name__, int(elapsedTime * 1000)))
return newfunc
def makedirs_if_not_exists(folder):
"""Create directory if not exists.
Args:
folder: Target folder.
"""
if not os.path.isdir(folder):
os.makedirs(folder)
def lookup_tag(tag):
"Issues a tag lookup on preferred site"
assert isinstance(tag, str), "str not " + str(type(tag))
# remove whitespace at edges and replace whitespace with +
tag = tag.strip().lower().replace(' ', '+')
url = app_constants.DEFAULT_EHEN_URL
if not url.endswith('/'):
url += '/'
if not ':' in tag:
tag = 'misc:' + tag
url += 'tag/' + tag
open_web_link(url) | 1.992188 | 2 |
pyramid_swagger/__init__.py | hiromu/pyramid_swagger | 61 | 12773542 | <filename>pyramid_swagger/__init__.py
# -*- coding: utf-8 -*-
"""
Import this module to add the validation tween to your pyramid app.
"""
from __future__ import absolute_import
import pyramid
from pyramid_swagger.api import build_swagger_20_swagger_schema_views
from pyramid_swagger.api import register_api_doc_endpoints
from pyramid_swagger.ingest import get_swagger_schema
from pyramid_swagger.ingest import get_swagger_spec
from pyramid_swagger.renderer import PyramidSwaggerRendererFactory
from pyramid_swagger.tween import get_swagger_versions
from pyramid_swagger.tween import SWAGGER_12
from pyramid_swagger.tween import SWAGGER_20
def includeme(config):
"""
:type config: :class:`pyramid.config.Configurator`
"""
settings = config.registry.settings
swagger_versions = get_swagger_versions(settings)
# for rendering /swagger.yaml
config.add_renderer(
'yaml', 'pyramid_swagger.api.YamlRendererFactory',
)
# Add the SwaggerSchema to settings to make it available to the validation
# tween and `register_api_doc_endpoints`
settings['pyramid_swagger.schema12'] = None
settings['pyramid_swagger.schema20'] = None
# Store under two keys so that 1.2 and 2.0 can co-exist.
if SWAGGER_12 in swagger_versions:
settings['pyramid_swagger.schema12'] = get_swagger_schema(settings)
if SWAGGER_20 in swagger_versions:
settings['pyramid_swagger.schema20'] = get_swagger_spec(settings)
config.add_tween(
"pyramid_swagger.tween.validation_tween_factory",
under=pyramid.tweens.EXCVIEW
)
config.add_renderer('pyramid_swagger', PyramidSwaggerRendererFactory())
if settings.get('pyramid_swagger.enable_api_doc_views', True):
if SWAGGER_12 in swagger_versions:
register_api_doc_endpoints(
config,
settings['pyramid_swagger.schema12'].get_api_doc_endpoints())
if SWAGGER_20 in swagger_versions:
register_api_doc_endpoints(
config,
build_swagger_20_swagger_schema_views(config),
base_path=settings.get('pyramid_swagger.base_path_api_docs', ''))
| 2.171875 | 2 |
supervisoragent/eventlistener.py | silverfernsys/supervisoragent | 0 | 12773543 | #!/usr/bin/env python
import sys
import socket
import logging
import json
class EventListener():
def __init__(self):
self.logger = logging.getLogger('Event Listener')
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def write_stdout(self, s):
# only eventlistener protocol messages may be sent to stdout
sys.stdout.write(s)
sys.stdout.flush()
def write_stderr(self, s):
sys.stderr.write(s)
sys.stderr.flush()
def start(self):
# Connect the socket to the port where the server is listening
server_address = '/run/supervisoragent.sock'
self.logger.info('Attemping to connect to {0}'.format(server_address))
try:
self.socket.connect(server_address)
except socket.error as error:
self.logger.error(error)
sys.exit(1)
while 1:
# transition from ACKNOWLEDGED to READY
self.write_stdout('READY\n')
# read header line
line = sys.stdin.readline()
# read event payload and send to socket
# don't forget the new-line character
headers = dict([x.split(':') for x in line.split()])
raw_data = sys.stdin.read(int(headers['len']))
data = dict([x.split(':') for x in raw_data.split()])
self.logger.info(raw_data)
try:
response = {}
response['name'] = data['processname']
response['group'] = data['groupname']
response['from_state'] = data['from_state']
response['eventname'] = headers['eventname']
response['statename'] = headers['eventname'].split('_')[2]
try:
response['pid'] = int(data['pid'])
except:
response['pid'] = None
json_str = json.dumps(response)
self.socket.sendall('LENGTH:{0}\n'.format(len(json_str)))
self.socket.sendall(json_str)
self.logger.info(json_str)
except Exception as e:
self.logger.error(e)
# transition from READY to ACKNOWLEDGED
self.write_stdout('RESULT 2\nOK')
def main():
format = '%(asctime)s::%(levelname)s::%(name)s::%(message)s'
logging.basicConfig(filename='/tmp/eventlistener.log',
format=format, level=logging.DEBUG)
event_listener = EventListener()
event_listener.start()
if __name__ == '__main__':
main()
| 2.8125 | 3 |
convert.py | HitkoDev/siamesenetwork-tensorflow | 0 | 12773544 | import tensorflow as tf
from model2 import model
flags = tf.compat.v1.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('model', '', 'Model to restore')
model.load_weights(FLAGS.model, by_name=True, skip_mismatch=True)
model.save('converted.h5')
| 2.1875 | 2 |
metric_plot.py | GravYong/scalarized_ns | 0 | 12773545 | import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
from numpy import log10 as lg
from numpy import pi as pi
from scipy.interpolate import interp1d as sp_interp1d
from scipy.integrate import odeint
from scipy.integrate import ode
import warnings
import timeit
import scipy.optimize as opt
from matplotlib import cm
from astropy import constants as const
from astropy import units as u
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
G=const.G.cgs.value
c=const.c.cgs.value
Ms=const.M_sun.cgs.value
hbar=const.hbar.cgs.value
m_n=const.m_n.cgs.value
km=10**5
import matplotlib.font_manager as font_manager
plt.rcParams['xtick.labelsize'] = 25
plt.rcParams['ytick.labelsize'] = 25
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['xtick.major.size'] = 8
plt.rcParams['ytick.major.size'] = 8
plt.rcParams['xtick.minor.size'] = 4
plt.rcParams['ytick.minor.size'] = 4
plt.rcParams['xtick.top'] = True
plt.rcParams['ytick.right'] = True
plt.rcParams['axes.labelpad'] = 8.0
plt.rcParams['figure.constrained_layout.h_pad'] = 0
plt.rcParams['text.usetex'] = True
plt.rc('text', usetex=True)
plt.rcParams['font.sans-serif'] = ['Times New Roman']
plt.tick_params(axis='both', which='minor', labelsize=18)
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
names1= ['m14','m14_5_001','m14_5_1', 'm14_10_001','m14_10_1']
names2=['m20','m20_5_001', 'm20_10_001','m20_10_1']
colors=['black', 'c', 'g', 'orange', 'red', 'black', 'c','orange','red']
linestyle=['-', ':', '-.', '-', '--' ,'-' ,'--' , '-.' ,':']
labels=[r'\rm GR',r'$\xi=5,\,\, a=0.01$', r'$\xi=5,\,\, a=1$',r'$\xi=10,\,\, a=0.01$',r'$\xi=10,\,\, a=1$',r'\rm GR',r'$\xi=5,\,\, a=0.01$',
r'$\xi=10,\,\, a=0.01$',r'$\xi=10,\,\, a=1$']
fig, axs = plt.subplots(2, 2,figsize=(15,12),sharex=True, sharey='row')
plt.subplots_adjust(hspace=0.0)
plt.subplots_adjust(wspace=0)
axs[0,0].yaxis.set_minor_locator(MultipleLocator(0.25/5))
axs[1,0].yaxis.set_minor_locator(MultipleLocator(0.2/5))
axs[0,0].xaxis.set_minor_locator(MultipleLocator(10/5))
for i in range(len(names1)):
data1 = np.genfromtxt('data/'+'sol_'+ 'ap4_'+names1[i]+'.txt')
R, gtt, grr= data1[:,0]/10**5, data1[:,1], data1[:, 2]
axs[1,0].plot(R,gtt,linewidth=2, color=colors[i],linestyle=linestyle[i])
axs[1,0].grid(alpha=0.6)
axs[1,0].set_ylabel(r'$ -g_{tt}$', fontsize=30)
axs[0,0].plot(R,grr,linewidth=2, color=colors[i],linestyle=linestyle[i],label=labels[i])
axs[0,0].grid(alpha=0.6)
axs[0,0].set_ylabel(r'$ g_{rr}$', fontsize=30)
axs[0,0].legend(fontsize=25, frameon=False,loc=(0.37,0.27))
sub_axes = plt.axes([.3, .18, .20, .18])
sub_axes.plot(R,gtt,linewidth=2, color=colors[i],linestyle=linestyle[i])
sub_axes.set_ylim(0.67,0.725)
sub_axes.set_xlim(13.4,14.6)
# sub_axes.set_xticks([10,11,12])
# sub_axes.grid(alpha=0.8)
sub_axes.yaxis.set_minor_locator(MultipleLocator(0.02/5))
sub_axes.xaxis.set_minor_locator(MultipleLocator(0.5/5))
for j in range(len(names2)):
data2 = np.genfromtxt('data/'+'sol_'+ 'ap4_'+names2[j]+'.txt')
R, gtt, grr= data2[:,0]/10**5, data2[:,1], data2[:, 2]
axs[1,1].plot(R,gtt,linewidth=2, color=colors[j+5],linestyle=linestyle[j+5])
axs[1,1].grid(alpha=0.6)
axs[0,1].plot(R,grr,linewidth=2, color=colors[j+5],linestyle=linestyle[j+5],label=labels[j+5])
axs[0,1].grid(alpha=0.6)
axs[0,1].legend(fontsize=25, frameon=False,loc=(0.37,0.4))
sub_axes = plt.axes([.69, .18, .19, .16])
sub_axes.plot(R,gtt,linewidth=2, color=colors[j+5],linestyle=linestyle[j+5])
sub_axes.set_xlim(13.4,14.6)
sub_axes.set_ylim(0.53,0.59)
# sub_axes.set_yticks([6,8,10])
sub_axes.set_yticks([0.54,0.56,0.58])
# sub_axes.grid(alpha=0.8)
sub_axes.yaxis.set_minor_locator(MultipleLocator(0.02/5))
sub_axes.xaxis.set_minor_locator(MultipleLocator(0.5/5))
fig.text(0.48, 0.04, r'$r\,[\rm km]$' ,fontsize=30)
# fig.text(0.7, 0.04, r'$r\,[\rm km]$' ,fontsize=30)
axs[1,0].set_ylim(0.14,0.95)
axs[0,0].set_ylim(0.97,2.35)
axs[0,0].set_xlim(-1,43)
fig.text(0.28, 0.84, r'$M=1.4M_{\odot}$' ,fontsize=25)
fig.text(0.66, 0.84, r'$M=2M_{\odot}$' ,fontsize=25)
plt.savefig("ap41.pdf", format='pdf', bbox_inches="tight")
plt.show()
| 1.851563 | 2 |
labTwo/config/__init__.py | kotskon/patrec19 | 0 | 12773546 | class ToolConfig ():
def __init__ (self):
self.one = './data/onetwothree1.wav'
self.two = './data/onetwothree8.wav'
self.digits_path = './data/digits'
| 2.390625 | 2 |
conv_split_miniImagenet.py | arslan-chaudhry/orthog_subspace | 17 | 12773547 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for split miniImageNET 100 experiment.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import time
import random
import datetime
import collections
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import construct_split_miniImagenet
from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, compute_fgt, load_task_specific_data, grad_check
from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
NUM_RUNS = 5 # Number of experiments to average over
TRAIN_ITERS = 2000 # Number of training iterations per task
BATCH_SIZE = 16
LEARNING_RATE = 0.1
RANDOM_SEED = 1234
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_MOMENTUM = 0.9
OPT_POWER = 0.9
VALID_ARCHS = ['CNN', 'RESNET-S', 'RESNET-B', 'VGG']
ARCH = 'RESNET-S'
## Model options
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'M-EWC', 'S-GEM', 'A-GEM', 'FTR_EXT', 'PNN', 'ER-Reservoir', 'ER-Ringbuffer', 'SUBSPACE-PROJ', 'ER-SUBSPACE', 'PROJ-SUBSPACE-GP', 'ER-SUBSPACE-GP'] #List of valid models
IMP_METHOD = 'EWC'
SYNAP_STGTH = 75000
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 50 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 13
IMG_HEIGHT = 84
IMG_WIDTH = 84
IMG_CHANNELS = 3
TOTAL_CLASSES = 100 # Total number of classes in the dataset
VISUALIZE_IMPORTANCE_MEASURE = False
MEASURE_CONVERGENCE_AFTER = 0.9
EPS_MEM_BATCH_SIZE = 256
DEBUG_EPISODIC_MEMORY = False
K_FOR_CROSS_VAL = 3
TIME_MY_METHOD = False
COUNT_VIOLATONS = False
MEASURE_PERF_ON_EPS_MEMORY = False
## Logging, saving and testing options
LOG_DIR = './split_miniImagenet_results'
RESNET18_miniImageNET10_CHECKPOINT = './resnet-18-pretrained-miniImagenet10/model.ckpt-19999'
DATA_FILE = 'miniImageNet_Dataset/miniImageNet_full.pickle'
## Evaluation options
## Task split
NUM_TASKS = 10
MULTI_TASK = False
PROJECTION_RANK = 50
GRAD_CHECK = False
QR = False
SVB = False
# Define function to load/ store training weights. We will use ImageNet initialization later on
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for split miniImagenet experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_true",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_true",
help="If option is chosen then train for single epoch")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--maintain-orthogonality", action="store_true",
help="If option is chosen then weights will be projected to Steifel manifold.")
parser.add_argument("--arch", type=str, default=ARCH,
help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Total size of episodic memory.")
parser.add_argument("--eps-mem-batch", type=int, default=EPS_MEM_BATCH_SIZE,
help="Number of samples per class from previous tasks.")
parser.add_argument("--num-tasks", type=int, default=NUM_TASKS,
help="Number of tasks.")
parser.add_argument("--subspace-share-dims", type=int, default=0,
help="Number of dimensions to share across tasks.")
parser.add_argument("--data-file", type=str, default=DATA_FILE,
help="miniImageNet data file.")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, datasets, args):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
task_labels_dataset = []
if model.imp_method in {'A-GEM', 'ER-Ringbuffer', 'ER-Reservoir', 'ER-SUBSPACE', 'ER-SUBSPACE-GP'}:
use_episodic_memory = True
else:
use_episodic_memory = False
batch_size = args.batch_size
# Loop over number of runs to average over
for runid in range(args.num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(args.random_seed+runid)
random.seed(args.random_seed+runid)
# Get the task labels from the total number of tasks and full label space
task_labels = []
classes_per_task = TOTAL_CLASSES// args.num_tasks
total_classes = classes_per_task * model.num_tasks
if args.online_cross_val:
label_array = np.arange(total_classes)
else:
class_label_offset = K_FOR_CROSS_VAL * classes_per_task
label_array = np.arange(class_label_offset, total_classes+class_label_offset)
np.random.shuffle(label_array)
for tt in range(model.num_tasks):
tt_offset = tt*classes_per_task
task_labels.append(list(label_array[tt_offset:tt_offset+classes_per_task]))
print('Task: {}, Labels:{}'.format(tt, task_labels[tt]))
# Store the task labels
task_labels_dataset.append(task_labels)
# Set episodic memory size
episodic_mem_size = args.mem_size * total_classes
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
# List to store the classes that we have so far - used at test time
test_labels = []
if use_episodic_memory:
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
episodic_labels = np.zeros([episodic_mem_size, TOTAL_CLASSES])
count_cls = np.zeros(TOTAL_CLASSES, dtype=np.int32)
episodic_filled_counter = 0
examples_seen_so_far = 0
# Mask for softmax
logit_mask = np.zeros(TOTAL_CLASSES)
nd_logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
if COUNT_VIOLATONS:
violation_count = np.zeros(model.num_tasks)
vc = 0
proj_matrices = generate_projection_matrix(model.num_tasks, feature_dim=model.subspace_proj.get_shape()[0], share_dims=args.subspace_share_dims, qr=QR)
# Check the sanity of the generated matrices
unit_test_projection_matrices(proj_matrices)
# TODO: Temp for gradients check
prev_task_grads = []
# Training loop for all the tasks
for task in range(len(task_labels)):
print('\t\tTask %d:'%(task))
# If not the first task then restore weights from previous task
if(task > 0 and model.imp_method != 'PNN'):
model.restore(sess)
if model.imp_method == 'PNN':
pnn_train_phase = np.array(np.zeros(model.num_tasks), dtype=np.bool)
pnn_train_phase[task] = True
pnn_logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
# If not in the cross validation mode then concatenate the train and validation sets
task_train_images, task_train_labels = load_task_specific_data(datasets[0]['train'], task_labels[task])
# If multi_task is set then train using all the datasets of all the tasks
if MULTI_TASK:
if task == 0:
for t_ in range(1, len(task_labels)):
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[t_])
task_train_images = np.concatenate((task_train_images, task_tr_images), axis=0)
task_train_labels = np.concatenate((task_train_labels, task_tr_labels), axis=0)
else:
# Skip training for this task
continue
print('Received {} images, {} labels at task {}'.format(task_train_images.shape[0], task_train_labels.shape[0], task))
print('Unique labels in the task: {}'.format(np.unique(np.nonzero(task_train_labels)[1])))
# Test for the tasks that we've seen so far
test_labels += task_labels[task]
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
num_train_examples = task_train_images.shape[0]
logit_mask[:] = 0
# Train a task observing sequence of data
if args.train_single_epoch:
# Ceiling operation
num_iters = (num_train_examples + batch_size - 1) // batch_size
if args.cross_validate_mode:
logit_mask[task_labels[task]] = 1.0
else:
num_iters = args.train_iters
# Set the mask only once before starting the training for the task
logit_mask[task_labels[task]] = 1.0
if MULTI_TASK:
logit_mask[:] = 1.0
# Randomly suffle the training examples
perm = np.arange(num_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm]
train_y = task_train_labels[perm]
task_sample_weights = task_sample_weights[perm]
# Array to store accuracies when training for task T
ftask = []
# Number of iterations after which convergence is checked
convergence_iters = int(num_iters * MEASURE_CONVERGENCE_AFTER)
# Training loop for task T
for iters in range(num_iters):
if args.train_single_epoch and not args.cross_validate_mode and not MULTI_TASK:
if (iters <= 20) or (iters > 20 and iters % 50 == 0):
# Snapshot the current performance across all tasks after each mini-batch
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task, projection_matrices=proj_matrices)
ftask.append(fbatch)
if model.imp_method == 'PNN':
pnn_train_phase[:] = False
pnn_train_phase[task] = True
pnn_logit_mask[:] = 0
pnn_logit_mask[task][task_labels[task]] = 1.0
elif model.imp_method in {'A-GEM', 'ER-Ringbuffer'}:
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
else:
# Set the output labels over which the model needs to be trained
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
if args.train_single_epoch:
offset = iters * batch_size
if (offset+batch_size <= num_train_examples):
residual = batch_size
else:
residual = num_train_examples - offset
if model.imp_method == 'PNN':
feed_dict = {model.x: train_x[offset:offset+residual], model.y_[task]: train_y[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5, model.learning_rate: args.learning_rate}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, pnn_logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
else:
feed_dict = {model.x: train_x[offset:offset+residual], model.y_: train_y[offset:offset+residual],
model.sample_weights: task_sample_weights[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True, model.learning_rate: args.learning_rate}
else:
offset = (iters * batch_size) % (num_train_examples - batch_size)
residual = batch_size
if model.imp_method == 'PNN':
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_[task]: train_y[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5, model.learning_rate: args.learning_rate}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, pnn_logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
else:
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_: train_y[offset:offset+batch_size],
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True, model.learning_rate: args.learning_rate}
if model.imp_method == 'VAN':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PROJ-SUBSPACE-GP':
if task == 0:
feed_dict[model.output_mask] = logit_mask
feed_dict[model.subspace_proj] = proj_matrices[task]
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
else:
# Compute gradient in \perp space
logit_mask[:] = 0
for tt in range(task):
logit_mask[task_labels[tt]] = 1.0
feed_dict[model.output_mask] = logit_mask
feed_dict[model.train_phase] = False
feed_dict[model.subspace_proj] = np.eye(proj_matrices[task].shape[0]) - proj_matrices[task]
sess.run(model.store_ref_grads, feed_dict=feed_dict)
# Compute gradient in P space and train
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
feed_dict[model.train_phase] = True
feed_dict[model.subspace_proj] = proj_matrices[task]
_, loss = sess.run([model.train_gp, model.gp_total_loss], feed_dict=feed_dict)
reg = 0.0
elif model.imp_method == 'SUBSPACE-PROJ':
feed_dict[model.output_mask] = logit_mask
feed_dict[model.subspace_proj] = proj_matrices[task]
if args.maintain_orthogonality:
_, loss = sess.run([model.train_stiefel, model.reg_loss], feed_dict=feed_dict)
else:
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PNN':
_, loss = sess.run([model.train[task], model.unweighted_entropy[task]], feed_dict=feed_dict)
elif model.imp_method == 'FTR_EXT':
feed_dict[model.output_mask] = logit_mask
if task == 0:
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
else:
_, loss = sess.run([model.train_classifier, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'EWC' or model.imp_method == 'M-EWC':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Update fisher after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
sess.run(model.set_running_fisher)
sess.run(model.reset_tmp_fisher)
if (iters >= convergence_iters) and (model.imp_method == 'M-EWC'):
_, _, _, _, loss = sess.run([model.weights_old_ops_grouped, model.set_tmp_fisher, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
else:
_, _, loss = sess.run([model.set_tmp_fisher, model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PI':
feed_dict[model.output_mask] = logit_mask
_, _, _, loss = sess.run([model.weights_old_ops_grouped, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'MAS':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'A-GEM':
if task == 0:
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
## Compute and store the reference gradients on the previous tasks
# Set the mask for all the previous tasks so far
nd_logit_mask[:] = 0
for tt in range(task):
nd_logit_mask[tt][task_labels[tt]] = 1.0
if episodic_filled_counter <= args.eps_mem_batch:
mem_sample_mask = np.arange(episodic_filled_counter)
else:
# Sample a random subset from episodic memory buffer
mem_sample_mask = np.random.choice(episodic_filled_counter, args.eps_mem_batch, replace=False) # Sample without replacement so that we don't sample an example more than once
# Store the reference gradient
ref_feed_dict = {model.x: episodic_images[mem_sample_mask], model.y_: episodic_labels[mem_sample_mask],
model.keep_prob: 1.0, model.train_phase: True, model.learning_rate: args.learning_rate}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
ref_feed_dict.update(logit_mask_dict)
ref_feed_dict[model.mem_batch_size] = float(len(mem_sample_mask))
sess.run(model.store_ref_grads, feed_dict=ref_feed_dict)
# Compute the gradient for current task and project if need be
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
if COUNT_VIOLATONS:
vc, _, loss = sess.run([model.violation_count, model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
else:
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
# Put the batch in the ring buffer
update_fifo_buffer(train_x[offset:offset+residual], train_y[offset:offset+residual], episodic_images, episodic_labels,
task_labels[task], args.mem_size, count_cls, episodic_filled_counter)
elif model.imp_method == 'RWALK':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Update fisher and importance score after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
# Update the importance score using distance in riemannian manifold
sess.run(model.update_big_omega_riemann)
# Now that the score is updated, compute the new value for running Fisher
sess.run(model.set_running_fisher)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Reset the delta_L
sess.run([model.reset_small_omega])
_, _, _, _, loss = sess.run([model.set_tmp_fisher, model.weights_old_ops_grouped,
model.train, model.update_small_omega, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'ER-Reservoir':
mem_filled_so_far = examples_seen_so_far if (examples_seen_so_far < episodic_mem_size) else episodic_mem_size
if mem_filled_so_far < args.eps_mem_batch:
er_mem_indices = np.arange(mem_filled_so_far)
else:
er_mem_indices = np.random.choice(mem_filled_so_far, args.eps_mem_batch, replace=False)
np.random.shuffle(er_mem_indices)
er_train_x_batch = np.concatenate((episodic_images[er_mem_indices], train_x[offset:offset+residual]), axis=0)
er_train_y_batch = np.concatenate((episodic_labels[er_mem_indices], train_y[offset:offset+residual]), axis=0)
labels_in_the_batch = np.unique(np.nonzero(er_train_y_batch)[1])
logit_mask[:] = 0
for tt in range(task+1):
if any(c_lab == t_lab for t_lab in task_labels[tt] for c_lab in labels_in_the_batch):
logit_mask[task_labels[tt]] = 1.0
feed_dict = {model.x: er_train_x_batch, model.y_: er_train_y_batch,
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.train_phase: True, model.learning_rate: args.learning_rate}
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
# Reservoir update
for er_x, er_y_ in zip(train_x[offset:offset+residual], train_y[offset:offset+residual]):
update_reservior(er_x, er_y_, episodic_images, episodic_labels, episodic_mem_size, examples_seen_so_far)
examples_seen_so_far += 1
elif model.imp_method == 'ER-Ringbuffer':
# Sample Bn U Bm
mem_filled_so_far = episodic_filled_counter if (episodic_filled_counter <= episodic_mem_size) else episodic_mem_size
er_mem_indices = np.arange(mem_filled_so_far) if (mem_filled_so_far <= args.eps_mem_batch) else np.random.choice(mem_filled_so_far, args.eps_mem_batch, replace=False)
np.random.shuffle(er_mem_indices)
er_train_x_batch = np.concatenate((episodic_images[er_mem_indices], train_x[offset:offset+residual]), axis=0) # TODO: Check if for task 0 the first arg is empty
er_train_y_batch = np.concatenate((episodic_labels[er_mem_indices], train_y[offset:offset+residual]), axis=0)
# Set the logit masks
nd_logit_mask[:] = 0
for tt in range(task+1):
nd_logit_mask[tt][task_labels[tt]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
feed_dict = {model.x: er_train_x_batch, model.y_: er_train_y_batch,
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.train_phase: True, model.learning_rate: args.learning_rate}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = float(er_train_x_batch.shape[0])
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
# Put the batch in the FIFO ring buffer
update_fifo_buffer(train_x[offset:offset+residual], train_y[offset:offset+residual], episodic_images, episodic_labels,
task_labels[task], args.mem_size, count_cls, episodic_filled_counter)
elif model.imp_method == 'ER-SUBSPACE':
# Zero out all the grads
sess.run([model.reset_er_subspace_grads])
if task > 0:
# Randomly pick a task to replay
tt = np.squeeze(np.random.choice(np.arange(task), 1, replace=False))
mem_offset = tt*args.mem_size*classes_per_task
er_mem_indices = np.arange(mem_offset, mem_offset+args.mem_size*classes_per_task)
np.random.shuffle(er_mem_indices)
er_train_x_batch = episodic_images[er_mem_indices]
er_train_y_batch = episodic_labels[er_mem_indices]
feed_dict = {model.x: er_train_x_batch, model.y_: er_train_y_batch,
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.train_phase: True, model.task_id: task+1, model.learning_rate: args.learning_rate}
logit_mask[:] = 0
logit_mask[task_labels[tt]] = 1.0
feed_dict[model.output_mask] = logit_mask
feed_dict[model.subspace_proj] = proj_matrices[tt]
sess.run(model.accum_er_subspace_grads, feed_dict=feed_dict)
# Train on the current task
feed_dict = {model.x: train_x[offset:offset+residual], model.y_: train_y[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.train_phase: True, model.task_id: task+1, model.learning_rate: args.learning_rate}
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
feed_dict[model.subspace_proj] = proj_matrices[task]
if args.maintain_orthogonality:
if SVB:
_, _, loss = sess.run([model.train_er_subspace, model.accum_er_subspace_grads, model.reg_loss], feed_dict=feed_dict)
# Every few iterations bound the singular values
if iters % 20 == 0:
sess.run(model.update_weights_svb)
else:
_, loss = sess.run([model.accum_er_subspace_grads, model.reg_loss], feed_dict=feed_dict)
sess.run(model.train_stiefel, feed_dict={model.learning_rate: args.learning_rate})
else:
_, _, loss = sess.run([model.train_er_subspace, model.accum_er_subspace_grads, model.reg_loss], feed_dict=feed_dict)
# Put the batch in the FIFO ring buffer
update_fifo_buffer(train_x[offset:offset+residual], train_y[offset:offset+residual], episodic_images, episodic_labels,
task_labels[task], args.mem_size, count_cls, episodic_filled_counter)
elif model.imp_method == 'ER-SUBSPACE-GP':
# Zero out all the grads
sess.run([model.reset_er_subspace_gp_grads])
feed_dict = {model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.task_id: task+1, model.learning_rate: args.learning_rate, model.train_phase: True}
if task > 0:
# Randomly pick a task to replay
tt = np.squeeze(np.random.choice(np.arange(task), 1, replace=False))
mem_offset = tt*args.mem_size*classes_per_task
er_mem_indices = np.arange(mem_offset, mem_offset+args.mem_size*classes_per_task)
np.random.shuffle(er_mem_indices)
er_train_x_batch = episodic_images[er_mem_indices]
er_train_y_batch = episodic_labels[er_mem_indices]
logit_mask[:] = 0
logit_mask[task_labels[tt]] = 1.0
feed_dict[model.output_mask] = logit_mask
feed_dict[model.x] = er_train_x_batch
feed_dict[model.y_] = er_train_y_batch
# Compute the gradient in the \perp space
#feed_dict[model.subspace_proj] = np.eye(proj_matrices[tt].shape[0]) - proj_matrices[tt]
#sess.run(model.store_ref_grads, feed_dict=feed_dict)
# Compute the gradient in P space and store the gradient
feed_dict[model.subspace_proj] = proj_matrices[tt]
sess.run(model.accum_er_subspace_grads, feed_dict=feed_dict)
# Train on the current task
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
if task == 0:
feed_dict[model.x] = train_x[offset:offset+residual]
feed_dict[model.y_] = train_y[offset:offset+residual]
else:
# Sample Bn U Bm
mem_filled_so_far = episodic_filled_counter if (episodic_filled_counter <= episodic_mem_size) else episodic_mem_size
er_mem_indices = np.arange(mem_filled_so_far) if (mem_filled_so_far <= args.eps_mem_batch) else np.random.choice(mem_filled_so_far, args.eps_mem_batch, replace=False)
np.random.shuffle(er_mem_indices)
er_train_x_batch = np.concatenate((episodic_images[er_mem_indices], train_x[offset:offset+residual]), axis=0) # TODO: Check if for task 0 the first arg is empty
er_train_y_batch = np.concatenate((episodic_labels[er_mem_indices], train_y[offset:offset+residual]), axis=0)
feed_dict[model.x] = er_train_x_batch
feed_dict[model.y_] = er_train_y_batch
# Compute the gradient in the \perp space
feed_dict[model.subspace_proj] = np.eye(proj_matrices[tt].shape[0]) - proj_matrices[task]
sess.run(model.store_ref_grads, feed_dict=feed_dict)
# Compute the gradient in P space and store the gradient
feed_dict[model.x] = train_x[offset:offset+residual]
feed_dict[model.y_] = train_y[offset:offset+residual]
feed_dict[model.subspace_proj] = proj_matrices[task]
_, loss = sess.run([model.train_er_gp, model.gp_total_loss], feed_dict=feed_dict)
# Put the batch in the FIFO ring buffer
update_fifo_buffer(train_x[offset:offset+residual], train_y[offset:offset+residual], episodic_images, episodic_labels,
task_labels[task], args.mem_size, count_cls, episodic_filled_counter)
if (iters % 100 == 0):
print('Step {:d} {:.3f}'.format(iters, loss))
#print('Step {:d}\t CE: {:.3f}\t Reg: {:.9f}\t TL: {:.3f}'.format(iters, entropy, reg, loss))
#print('Step {:d}\t Reg: {:.9f}\t TL: {:.3f}'.format(iters, reg, loss))
if (math.isnan(loss)):
print('ERROR: NaNs NaNs NaNs!!!')
sys.exit(0)
print('\t\t\t\tTraining for Task%d done!'%(task))
if model.imp_method == 'SUBSPACE-PROJ' and GRAD_CHECK:
# TODO: Compute the average gradient of the task at \theta^*: Could be done as running average (no need for extra passes?)
bbatch_size = 100
grad_sum = []
for iiters in range(train_x.shape[0] // bbatch_size):
offset = iiters * bbatch_size
feed_dict = {model.x: train_x[offset:offset+bbatch_size], model.y_: train_y[offset:offset+bbatch_size],
model.keep_prob: 1.0, model.train_phase: False, model.learning_rate: args.learning_rate}
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
feed_dict.update(logit_mask_dict)
#feed_dict[model.subspace_proj] = proj_matrices[task]
projection_dict = {proj: proj_matrices[proj.get_shape()[0]][task] for proj in model.subspace_proj}
feed_dict.update(projection_dict)
feed_dict[model.mem_batch_size] = residual
grad_vars, train_vars = sess.run([model.reg_gradients_vars, model.trainable_vars], feed_dict=feed_dict)
for v in range(len(train_vars)):
if iiters == 0:
grad_sum.append(grad_vars[v][0])
else:
grad_sum[v] += (grad_vars[v][0] - grad_sum[v])/ iiters
prev_task_grads.append(grad_sum)
if use_episodic_memory:
episodic_filled_counter += args.mem_size * classes_per_task
if model.imp_method == 'A-GEM':
if COUNT_VIOLATONS:
violation_count[task] = vc
print('Task {}: Violation Count: {}'.format(task, violation_count))
sess.run(model.reset_violation_count, feed_dict=feed_dict)
# Compute the inter-task updates, Fisher/ importance scores etc
# Don't calculate the task updates for the last task
if (task < (len(task_labels) - 1)) or MEASURE_PERF_ON_EPS_MEMORY:
model.task_updates(sess, task, task_train_images, task_labels[task]) # TODO: For MAS, should the gradients be for current task or all the previous tasks
print('\t\t\t\tTask updates after Task%d done!'%(task))
if args.train_single_epoch and not args.cross_validate_mode:
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task, projection_matrices=proj_matrices)
print('Task: {}, Acc: {}'.format(task, fbatch))
ftask.append(fbatch)
ftask = np.array(ftask)
if model.imp_method == 'PNN':
pnn_train_phase[:] = False
pnn_train_phase[task] = True
pnn_logit_mask[:] = 0
pnn_logit_mask[task][task_labels[task]] = 1.0
else:
if MEASURE_PERF_ON_EPS_MEMORY:
eps_mem = {
'images': episodic_images,
'labels': episodic_labels,
}
# Measure perf on episodic memory
ftask = test_task_sequence(model, sess, eps_mem, task_labels, task, classes_per_task=classes_per_task, projection_matrices=proj_matrices)
else:
# List to store accuracy for all the tasks for the current trained model
ftask = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task, projection_matrices=proj_matrices)
print('Task: {}, Acc: {}'.format(task, ftask))
# Store the accuracies computed at task T in a list
evals.append(ftask)
# Reset the optimizer
model.reset_optimizer(sess)
#-> End for loop task
runs.append(np.array(evals))
# End for loop runid
runs = np.array(runs)
return runs, task_labels_dataset
def test_task_sequence(model, sess, test_data, test_tasks, task, classes_per_task=0, projection_matrices=None):
"""
Snapshot the current performance
"""
if TIME_MY_METHOD:
# Only compute the training time
return np.zeros(model.num_tasks)
final_acc = np.zeros(model.num_tasks)
if model.imp_method in {'PNN', 'A-GEM', 'ER-Ringbuffer'}:
logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
else:
logit_mask = np.zeros(TOTAL_CLASSES)
if MEASURE_PERF_ON_EPS_MEMORY:
for tt, labels in enumerate(test_tasks):
# Multi-head evaluation setting
logit_mask[:] = 0
logit_mask[labels] = 1.0
mem_offset = tt*SAMPLES_PER_CLASS*classes_per_task
feed_dict = {model.x: test_data['images'][mem_offset:mem_offset+SAMPLES_PER_CLASS*classes_per_task],
model.y_: test_data['labels'][mem_offset:mem_offset+SAMPLES_PER_CLASS*classes_per_task], model.keep_prob: 1.0, model.train_phase: False, model.output_mask: logit_mask}
acc = model.accuracy.eval(feed_dict = feed_dict)
final_acc[tt] = acc
return final_acc
for tt, labels in enumerate(test_tasks):
if not MULTI_TASK:
if tt > task:
return final_acc
task_test_images, task_test_labels = load_task_specific_data(test_data, labels)
if model.imp_method == 'PNN':
pnn_train_phase = np.array(np.zeros(model.num_tasks), dtype=np.bool)
logit_mask[:] = 0
logit_mask[tt][labels] = 1.0
feed_dict = {model.x: task_test_images,
model.y_[tt]: task_test_labels, model.keep_prob: 1.0}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
acc = model.accuracy[tt].eval(feed_dict = feed_dict)
elif model.imp_method in {'A-GEM', 'ER-Ringbuffer'}:
logit_mask[:] = 0
logit_mask[tt][labels] = 1.0
feed_dict = {model.x: task_test_images,
model.y_: task_test_labels, model.keep_prob: 1.0, model.train_phase: False}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
feed_dict.update(logit_mask_dict)
#if model.imp_method in {'SUBSPACE-PROJ', 'ER-SUBSPACE', 'PROJ-SUBSPACE-GP'}:
if False:
feed_dict[model.subspace_proj] = projection_matrices[tt]
#feed_dict[model.subspace_proj] = np.eye(projection_matrices[tt].shape[0])
#projection_dict = {proj: projection_matrices[proj.get_shape()[0]][tt] for proj in model.subspace_proj}
#feed_dict.update(projection_dict)
acc = model.accuracy[tt].eval(feed_dict = feed_dict)
else:
logit_mask[:] = 0
logit_mask[labels] = 1.0
#for ttt in range(task+1):
# logit_mask[test_tasks[ttt]] = 1.0
feed_dict = {model.x: task_test_images,
model.y_: task_test_labels, model.keep_prob: 1.0, model.train_phase: False, model.output_mask: logit_mask}
if model.imp_method in {'SUBSPACE-PROJ', 'ER-SUBSPACE', 'PROJ-SUBSPACE-GP', 'ER-SUBSPACE-GP'}:
feed_dict[model.subspace_proj] = projection_matrices[tt]
acc = model.accuracy.eval(feed_dict = feed_dict)
final_acc[tt] = acc
return final_acc
def main():
"""
Create the model and start the training
"""
# Get the CL arguments
args = get_arguments()
# Check if the network architecture is valid
if args.arch not in VALID_ARCHS:
raise ValueError("Network architecture %s is not supported!"%(args.arch))
# Check if the method to compute importance is valid
if args.imp_method not in MODELS:
raise ValueError("Importance measure %s is undefined!"%(args.imp_method))
# Check if the optimizer is valid
if args.optim not in VALID_OPTIMS:
raise ValueError("Optimizer %s is undefined!"%(args.optim))
# Create log directories to store the results
if not os.path.exists(args.log_dir):
print('Log directory %s created!'%(args.log_dir))
os.makedirs(args.log_dir)
# Generate the experiment key and store the meta data in a file
exper_meta_data = {'ARCH': args.arch,
'DATASET': 'SPLIT_miniImageNET',
'NUM_RUNS': args.num_runs,
'TRAIN_SINGLE_EPOCH': args.train_single_epoch,
'IMP_METHOD': args.imp_method,
'SYNAP_STGTH': args.synap_stgth,
'FISHER_EMA_DECAY': args.fisher_ema_decay,
'FISHER_UPDATE_AFTER': args.fisher_update_after,
'OPTIM': args.optim,
'LR': args.learning_rate,
'BATCH_SIZE': args.batch_size,
'MEM_SIZE': args.mem_size}
experiment_id = "SPLIT_miniImageNET_META_%s_%s_%r_%s-"%(args.imp_method, str(args.synap_stgth).replace('.', '_'),
str(args.batch_size), str(args.mem_size)) + datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
snapshot_experiment_meta_data(args.log_dir, experiment_id, exper_meta_data)
# Get the task labels from the total number of tasks and full label space
if args.online_cross_val:
num_tasks = K_FOR_CROSS_VAL
else:
num_tasks = args.num_tasks - K_FOR_CROSS_VAL
# Load the split miniImagenet dataset
data_labs = [np.arange(TOTAL_CLASSES)]
datasets = construct_split_miniImagenet(data_labs, args.data_file)
# Variables to store the accuracies and standard deviations of the experiment
acc_mean = dict()
acc_std = dict()
# Reset the default graph
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
# Set the random seed
tf.set_random_seed(args.random_seed)
np.random.seed(args.random_seed)
random.seed(args.random_seed)
# Define Input and Output of the model
x = tf.placeholder(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
learning_rate = tf.placeholder(dtype=tf.float32, shape=())
if args.imp_method == 'PNN':
y_ = []
for i in range(num_tasks):
y_.append(tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES]))
else:
y_ = tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES])
# Define the optimizer
if args.optim == 'ADAM':
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
elif args.optim == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
elif args.optim == 'MOMENTUM':
#base_lr = tf.constant(args.learning_rate)
#learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - train_step / training_iters), OPT_POWER))
opt = tf.train.MomentumOptimizer(learning_rate, OPT_MOMENTUM)
# Create the Model/ contruct the graph
model = Model(x, y_, num_tasks, opt, args.imp_method, args.synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, learning_rate, network_arch=args.arch)
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
time_start = time.time()
with tf.Session(config=config, graph=graph) as sess:
runs, task_labels_dataset = train_task_sequence(model, sess, datasets, args)
# Close the session
sess.close()
time_end = time.time()
time_spent = time_end - time_start
# Store all the results in one dictionary to process later
exper_acc = dict(mean=runs)
exper_labels = dict(labels=task_labels_dataset)
# If cross-validation flag is enabled, store the stuff in a text file
if args.cross_validate_mode:
acc_mean, acc_std = average_acc_stats_across_runs(runs, model.imp_method)
fgt_mean, fgt_std = average_fgt_stats_across_runs(runs, model.imp_method)
cross_validate_dump_file = args.log_dir + '/' + 'SPLIT_miniImageNET_%s_%s'%(args.imp_method, args.optim) + '.txt'
with open(cross_validate_dump_file, 'a') as f:
if MULTI_TASK:
f.write('HERDING: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {}\n'.format(args.arch, args.learning_rate, args.synap_stgth, acc_mean[-1,:].mean()))
else:
f.write('ORTHO:{} \t SVB:{}\t NUM_TASKS: {} \t MEM_SIZE: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t SHARED_SUBSPACE:{}, \t ACC: {} (+-{})\t Fgt: {} (+-{})\t QR:{}\t Time: {}\n'.format(args.maintain_orthogonality, SVB, args.num_tasks, args.mem_size, args.arch, args.learning_rate,
args.synap_stgth, args.subspace_share_dims, acc_mean, acc_std, fgt_mean, fgt_std, QR, str(time_spent)))
# Store the experiment output to a file
snapshot_experiment_eval(args.log_dir, experiment_id, exper_acc)
snapshot_task_labels(args.log_dir, experiment_id, exper_labels)
if __name__ == '__main__':
main()
| 1.875 | 2 |
app/api/v2/models/party_model.py | mogoria/Project-politico | 0 | 12773548 | <filename>app/api/v2/models/party_model.py
from app.utils.database.model import Model
class Party(Model):
table_name = "parties"
columns = ('name', 'logourl', 'hqaddress')
def __init__(self, name, logourl, hqaddress):
super().__init__()
self.name = name
self.logourl = logourl
self.hqaddress = hqaddress
@classmethod
def get_all_parties(cls):
parties = cls.select_all(table_name=cls.table_name,
columns=cls.columns)
return parties
@classmethod
def get_party_by_name(cls, name):
party = cls.select_one(table_name=cls.table_name,
criteria={'column': 'name', 'value': name})
return party
@classmethod
def get_party_id_from_name(cls, name):
party_id = cls.select_one(table_name=cls.table_name, columns=['id'],
criteria={'column': 'name', 'value': name})
return party_id.get('id')
def add_party(self):
party_details = [self.name, self.logourl, self.hqaddress]
self.insert(self.table_name, self.columns, party_details)
return dict(zip(self.columns, party_details))
| 2.703125 | 3 |
athena/edu/views.py | antonyryan/athena-backend-Django- | 5 | 12773549 | <reponame>antonyryan/athena-backend-Django-<filename>athena/edu/views.py<gh_stars>1-10
from rest_framework import viewsets
from athena.authentication.permissions import (
IsAdmin,
IsStudentAndReadOnly,
IsTeacher,
IsTutor,
)
from .serializers import (
Speciality,
SpecialitySerializer,
StudentGroup,
StudentGroupSerializer,
Subject,
SubjectSerializer,
)
class SubjectViewSet(viewsets.ModelViewSet):
queryset = Subject.objects.all()
serializer_class = SubjectSerializer
permission_classes = (IsStudentAndReadOnly | IsTutor | IsTeacher | IsAdmin,)
class SpecialityViewSet(viewsets.ModelViewSet):
queryset = Speciality.objects.all()
serializer_class = SpecialitySerializer
permission_classes = (IsStudentAndReadOnly | IsTutor | IsTeacher | IsAdmin,)
class StudentGroupViewSet(viewsets.ModelViewSet):
queryset = StudentGroup.objects.all()
serializer_class = StudentGroupSerializer
permission_classes = (IsStudentAndReadOnly | IsTutor | IsTeacher | IsAdmin,)
| 2.046875 | 2 |
scripts/bandpasses/LFI_bandpass_check.py | ACTCollaboration/tilec | 1 | 12773550 | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from tilec.fg import dBnudT,get_mix
"""
compute various conversion factors for LFI bandpasses
"""
TCMB = 2.726 # Kelvin
TCMB_uK = 2.726e6 # micro-Kelvin
hplanck = 6.626068e-34 # MKS
kboltz = 1.3806503e-23 # MKS
clight = 299792458.0 # MKS
clight_cmpersec = 2.99792458*1.e10 #speed of light in cm/s
N_freqs = 3
LFI_freqs = []
LFI_freqs.append('030')
LFI_freqs.append('044')
LFI_freqs.append('070')
LFI_freqs_GHz = np.array([30.0, 44.0, 70.0])
LFI_files = []
for i in xrange(N_freqs):
print("----------")
print(LFI_freqs[i])
LFI_files.append('../data/LFI_BANDPASS_F'+LFI_freqs[i]+'_reformat.txt')
LFI_loc = np.loadtxt(LFI_files[i])
# check norm, i.e., make sure response is unity for CMB
LFI_loc_GHz = LFI_loc[:,0]
LFI_loc_trans = LFI_loc[:,1]
print("CMB norm = ", np.trapz(LFI_loc_trans, LFI_loc_GHz))
# compute K_CMB -> y_SZ conversion
print("K_CMB -> y_SZ conversion: ", np.trapz(LFI_loc_trans*dBnudT(LFI_loc_GHz)*1.e6, LFI_loc_GHz) / np.trapz(LFI_loc_trans*dBnudT(LFI_loc_GHz)*1.e6*get_mix(LFI_loc_GHz,'tSZ')/TCMB_uK, LFI_loc_GHz) / TCMB)
# compute K_CMB -> MJy/sr conversion [IRAS convention, alpha=-1 power-law SED]
print("K_CMB -> MJy/sr conversion [IRAS convention, alpha=-1 power-law SED]: ", np.trapz(LFI_loc_trans*dBnudT(LFI_loc_GHz)*1.e6, LFI_loc_GHz) / np.trapz(LFI_loc_trans*(LFI_freqs_GHz[i]/LFI_loc_GHz), LFI_loc_GHz) * 1.e20)
# compute color correction from IRAS to "dust" (power-law with alpha=4)
print("MJy/sr color correction (power-law, alpha=-1 to alpha=4): ", np.trapz(LFI_loc_trans*(LFI_freqs_GHz[i]/LFI_loc_GHz), LFI_loc_GHz) / np.trapz(LFI_loc_trans*(LFI_loc_GHz/LFI_freqs_GHz[i])**4.0, LFI_loc_GHz))
# compute color correction from IRAS to modified blackbody with T=13.6 K, beta=1.4 (to compare to results at https://wiki.cosmos.esa.int/planckpla2015/index.php/UC_CC_Tables )
print("MJy/sr color correction (power-law alpha=-1 to MBB T=13.6 K/beta=1.4): ", np.trapz(LFI_loc_trans*(LFI_freqs_GHz[i]/LFI_loc_GHz), LFI_loc_GHz) / np.trapz(LFI_loc_trans*(LFI_loc_GHz/LFI_freqs_GHz[i])**(1.4+3.) * (np.exp(hplanck*LFI_freqs_GHz[i]*1.e9/(kboltz*13.6))-1.)/(np.exp(hplanck*LFI_loc_GHz*1.e9/(kboltz*13.6))-1.), LFI_loc_GHz))
print("----------")
| 2.21875 | 2 |