max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
bin/gpio-syslog-daemon.py
|
Ricapar/ta-splunk-perimeter-security
| 0
|
12779651
|
<gh_stars>0
#!/usr/bin/env python
import os
import sys
import time
import datetime
import logging
import RPi.GPIO as GPIO
from daemon import Daemon
from socket import gethostname
rpiPins = [
[ 4, 17, 21, 22, 18, 23, 24, 25 ],
[ 4, 17, 27, 22, 18, 23, 24, 25 ],
[ 4, 17, 27, 22, 5, 6, 13, 19, 26, 18, 23, 24, 25, 12, 16, 20, 21 ]
]
gpioPins = rpiPins[GPIO.RPI_REVISION - 1]
# Set up logging
logger = logging.getLogger("SplunkPerimeterSecurity")
logger.setLevel(logging.INFO)
# Log to /var/log
handler = logging.FileHandler('/var/log/splunk-perimeter-security')
handler.setLevel(logging.INFO)
# Make the logs kinda look like syslog
formatter = logging.Formatter('%(asctime)s app=\"Splunk Perimeter Security\" src_host="'+gethostname()+'" %(message)s', '%b %e %H:%M:%S')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
class SplunkPerimeterSecurity(Daemon):
# The RaspberryPi has had a few different revisions,
# each with their own GPIO pin configurations.
# There are a few more pins that can be used for GPIO, but
# they also serve other purposes, such as serial TX/RX
# and SPI
def zone_changed(self, pin):
pinStatus = "closed" if GPIO.input(pin) else "open"
zone = gpioPins.index(pin)
logger.info("type=alert subject=\"Zone State Changed\" src=ZONE%02d src_category=zone_trigger pin=%d body=%s " % ( zone, pin, pinStatus))
def setup_zones(self):
GPIO.setmode(GPIO.BCM)
for zone, pin in enumerate(gpioPins):
logger.info('type=event severity=informational src=ZONE%02d src_category=startup pin=%d subject="Configuring GPIO pin" body=\"Configuring GPIO pin\"' % ( zone, pin ))
GPIO.setup(pin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
GPIO.add_event_detect(
pin,
GPIO.BOTH,
callback = self.zone_changed,
bouncetime = 200
)
def output_status(self):
outputStr = ""
for zone, pin in enumerate(gpioPins):
pinStatus = "closed" if GPIO.input(pin) else "open"
logger.info("type=event severity=informational src=ZONE%02d src_category=zone pin=%d body=%s" % ( zone, pin, pinStatus))
def run(self):
try:
logger.info('type=event severity=informational src_category=startup event=init body="SplunkPerimeterSecurity is starting up"')
self.setup_zones()
logger.info('type=event severity=informational src_category=startup event=init body="SplunkPerimeterSecurity is ready"')
while True:
self.output_status()
time.sleep(60)
except:
print sys.exc_info()
logger.critical('action=shutdown severity=critical event=shutdown body="SplunkPerimeterSecurity monitoring is SHUTTING DOWN" exception="%s"' % ( sys.exc_info()[0]) )
sys.exit(1)
if __name__ == "__main__":
daemon = SplunkPerimeterSecurity('/var/run/splunk-perimeter-security.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'status' == sys.argv[1]:
daemon.status()
else:
sys.stdout.write("Unknown command\n")
sys.exit(2)
sys.exit(0)
else:
sys.stdout.write("Usage: %s start | stop | restart | status\n" % sys.argv[0])
sys.exit(2)
| 2.71875
| 3
|
python/comparatist/utils/jl.py
|
tkf/comparatist
| 0
|
12779652
|
<filename>python/comparatist/utils/jl.py
import os
import numpy
import julia
jlbase = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.pardir, os.path.pardir, os.path.pardir,
'julia')
os.environ['JULIA_LOAD_PATH'] = jlbase + (
':' + os.environ['JULIA_LOAD_PATH']
if 'JULIA_LOAD_PATH' in os.environ
else ''
)
JL = None
def getjulia():
global JL
if not JL:
JL = julia.Julia()
return JL
def jlprepare(module, name, **kwds):
jl = getjulia()
jlrun = jl.eval("""
import {module}
function(; opts...)
{module}.prepare(:{name}; opts...)
end
""".format(module=module, name=name))(**kwds)
def run():
kwds = jlrun()
for k, v in kwds.items():
if isinstance(v, numpy.ndarray):
kwds[k] = v.T
return kwds
return run
def make_prepare(module, **kwds):
return lambda name: jlprepare(module, name, **kwds)
| 2.359375
| 2
|
model_trainer/basic_trainer.py
|
NeverendingNotification/nnlibs
| 0
|
12779653
|
<reponame>NeverendingNotification/nnlibs
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 30 11:25:37 2018
@author: nn
"""
class BaseTrainer:
def __init__(self, trainer_setting):
self.setting = trainer_setting
def make_model(self, loader, is_train=True):
raise NotImplementedError()
def get_losses(self, inputs, models, loss_params):
raise NotImplementedError()
def get_trainer(self, inputs, models, losses):
raise NotImplementedError()
def get_evaluator(self, inputs, models):
raise NotImplementedError()
def initialize_training(self, loader):
raise NotImplementedError()
def check_run(self, key, func):
if key in self.setting:
func(**self.setting[key])
def check_blank_run(self, key, func):
params = self.setting[key] if key in self.setting else {}
func(**params)
def make_graph(self, loader, is_train):
inputs, models = self.make_model(loader, is_train=is_train)
self.inputs = inputs
self.models = models
if is_train:
losses = self.get_losses(inputs, models, self.setting["loss_params"])
trainers = self.get_trainer(inputs, models, losses)
self.losses = losses
self.trainers = trainers
else:
losses = self.get_losses(inputs, models, self.setting["loss_params"])
# trainers = self.get_trainer(inputs, models, losses)
self.evaluator = self.get_evaluator(inputs, models)
self.initialize_training(loader)
def train(self, loader, epochs, batch_size=32):
raise NotImplementedError()
def evaluate(self, loader, eval_params):
raise NotImplementedError()
| 2.46875
| 2
|
pipeline_tools/tests/test_http_requests_manager.py
|
HumanCellAtlas/pipeline-tools
| 5
|
12779654
|
import os
from pipeline_tools.shared import http_requests
from pipeline_tools.tests.http_requests_manager import HttpRequestsManager
class TestHttpRequestsManager(object):
def test_enter_creates_directory(self):
with HttpRequestsManager() as temp_dir:
assert os.path.isdir(temp_dir) is True
def test_exit_deletes_directory(self):
with HttpRequestsManager() as temp_dir:
temp_dir_name = temp_dir
assert os.path.isdir(temp_dir_name) is True
assert os.path.isdir(temp_dir) is False
def test_enter_sets_environment_vars(self):
with HttpRequestsManager() as temp_dir:
assert http_requests.HTTP_RECORD_DIR in os.environ
assert os.environ[http_requests.HTTP_RECORD_DIR] == temp_dir
assert http_requests.RECORD_HTTP_REQUESTS in os.environ
assert os.environ[http_requests.RECORD_HTTP_REQUESTS] == 'true'
assert http_requests.RETRY_MAX_TRIES in os.environ
assert os.environ[http_requests.RETRY_MAX_TRIES] == '3'
assert http_requests.RETRY_MAX_INTERVAL in os.environ
assert os.environ[http_requests.RETRY_MAX_INTERVAL] == '10'
assert http_requests.RETRY_TIMEOUT in os.environ
assert os.environ[http_requests.RETRY_TIMEOUT] == '1'
assert http_requests.RETRY_MULTIPLIER in os.environ
assert os.environ[http_requests.RETRY_MULTIPLIER] == '0.01'
assert http_requests.INDIVIDUAL_REQUEST_TIMEOUT in os.environ
assert os.environ[http_requests.INDIVIDUAL_REQUEST_TIMEOUT] == '1'
def test_exit_deletes_environment_var(self):
with HttpRequestsManager() as temp_dir:
pass
assert http_requests.HTTP_RECORD_DIR not in os.environ
assert http_requests.RECORD_HTTP_REQUESTS not in os.environ
assert http_requests.RETRY_MAX_TRIES not in os.environ
assert http_requests.RETRY_MAX_INTERVAL not in os.environ
assert http_requests.RETRY_TIMEOUT not in os.environ
assert http_requests.RETRY_MULTIPLIER not in os.environ
assert http_requests.INDIVIDUAL_REQUEST_TIMEOUT not in os.environ
| 2.234375
| 2
|
algorithm/enigma_smarter_crack_turing.py
|
alphaPhantm/Privacy-and-security-SRP
| 1
|
12779655
|
<filename>algorithm/enigma_smarter_crack_turing.py
from collections import deque
from random import random
def str2num(zeichenkette):
return [ord(c) - 65 for c in zeichenkette]
walzen_r = ['EKMFLGDQVZNTOWYHXUSPAIBRCJ', # I
'AJDKSIRUXBLHWTMCQGZNPYFVOE', # II
'BDFHJLCPRTXVZNYEIWGAKMUSQO', # III
'ESOVPZJAYQUIRHXLNFTGKDCMWB', # IV
'VZBRGITYUPSDNHLXAWMJQOFECK', # V
'JPGVOUMFYQBENHZRDKASXLICTW', # VI
'NZJHGRCXMYSWBOUFAIVLPEKQDT', # VII
'FKQHTLXOCBJSPDZRAMEWNIUYGV'] # VIII
walzen_r = [deque(str2num(zeile)) for zeile in walzen_r]
walzen_l = deque(range(26))
UKWs = ['<KEY>', # UKW A
'YRUHQSLDPXNGOKMIEBFZCWVJAT', # UKW B
'FVPJIAOYEDRZXWGCTKUQSBNMHL'] # UKW C
UKWs = [str2num(zeile) for zeile in UKWs]
kerbenKat = "Q E V J Z ZM ZM ZM"
kerbenKat = [str2num(zeile) for zeile in kerbenKat.split()]
class Walze():
def __init__(self, nr, w_pos, r_pos):
self.w_pos = w_pos
self.r_pos = r_pos
self.verdr_r = walzen_r[nr].copy()
self.verdr_l = walzen_l.copy()
self.kerben = kerbenKat[nr]
self.setup()
def setup(self):
offset = self.r_pos - self.w_pos
self.verdr_l.rotate(offset)
self.verdr_r.rotate(offset)
self.kerben = [(k - self.r_pos) % 26 for k in self.kerben]
def click(self):
self.verdr_l.rotate(-1)
self.verdr_r.rotate(-1)
def schaltung(self):
return self.verdr_l[0] in self.kerben
class Enigma():
def __init__(self):
self.walzen = []
self.ukw = []
self.steckerbr = {}
def setup(self, nr_ukw, nr_walzen, w_pos, r_pos, paare_steckerbr):
for i, nr in enumerate(nr_walzen):
wpos = ord(w_pos[i]) - 65
rpos = r_pos[i] - 1
self.walzen.append(Walze(nr - 1, wpos, rpos))
self.ukw = UKWs[nr_ukw - 1]
for a, b in paare_steckerbr.split():
self.steckerbr[ord(a) - 65] = ord(b) - 65
self.steckerbr[ord(b) - 65] = ord(a) - 65
def rotiere(self):
links, mitte, rechts = self.walzen
if mitte.schaltung():
mitte.click()
links.click()
elif rechts.schaltung():
mitte.click()
rechts.click()
def umwandeln(e, text):
u_text = ""
text = text.upper()
for c in text:
c = ord(c) - 65
if c < 0 or c > 26: continue
e.rotiere()
c = e.steckerbr.get(c, c)
for w in reversed(e.walzen):
c = w.verdr_r[c]
c = w.verdr_l.index(c)
c = e.ukw[c]
for w in e.walzen:
c = w.verdr_l[c]
c = w.verdr_r.index(c)
c = e.steckerbr.get(c, c)
u_text += chr(c + 65)
return u_text
def run(ukw, walze1, walze2, walze3, walzenPos, ringPosW1, ringPosW2, ringPosW3, steckerbrett, text):
ukw = int(ukw)
walze1 = int(walze1)
walze2 = int(walze2)
walze3 = int(walze3)
ringPosW1 = int(ringPosW1)
ringPosW2 = int(ringPosW2)
ringPosW3 = int(ringPosW3)
walzen = [walze1, walze2, walze3]
ringPos = [ringPosW1, ringPosW2, ringPosW3]
enigma = Enigma()
enigma.setup(ukw, walzen, walzenPos, ringPos, steckerbrett)
erg = umwandeln(enigma, text)
return erg
def bruteforce(text, word, steckerbrett):
import time
word = word.upper()
walzen_pos = []
for a in range(26):
for b in range(26):
for c in range(26):
walzen_pos.append(chr(a + 65) + chr(b + 65) + chr(c + 65))
ergC = []
wordC = []
for c in word:
wordC.append(c)
start = time.time()
for ukw in range(1, 4):
for walze1 in range(1, 9):
for walze2 in range(1, 9):
for walze3 in range(1, 9):
for pos in walzen_pos:
for ringPosW1 in range(1, 27):
for ringPosW2 in range(1, 27):
for ringPosW3 in range(1, 27):
erg = run(ukw, walze1, walze2, walze3, pos, ringPosW1, ringPosW2, ringPosW3,
steckerbrett, text)
ergC = []
for c in erg:
ergC.append(c)
for i in range(len(ergC) - len(wordC) + 1):
z = ergC[i: len(wordC) + i]
s = "".join(z)
if s == word:
end = time.time()
time = end - start
print(i)
print("UKW", ukw)
print("walze 1", walze1)
print("walze 2", walze2)
print("walze 3", walze3)
print("walzenPos", pos)
print("walzeRing1", ringPosW1)
print("walzeRing2", ringPosW2)
print("walzeRing3", ringPosW3)
print(time)
print("Finised")
exit(0)
walzen_pos = []
for a in range(26):
for b in range(26):
for c in range(26):
walzen_pos.append(chr(a + 65) + chr(b + 65) + chr(c + 65))
one = {}
two = {}
tre = {}
for i, c in enumerate(walzen_pos):
enigma = Enigma()
enigma.setup(2, [2, 1, 3], "AAA", [ord("E")-65, ord("Q")-65, ord("V")-65], "")
text = umwandeln(enigma, c + c)
one[text[0]] = text[3]
two[text[1]] = text[4]
tre[text[2]] = text[5]
def findLoop(dict):
seen = []
lens = []
for i in range(26):
key = chr(i + 65)
if not key in seen:
len = 0
org_key = key
while True:
len += 1
seen.append(key)
key = dict[key]
if key == org_key:
break
lens.append(len)
return lens
print(findLoop(one))
print(findLoop(two))
print(findLoop(tre))
| 2.765625
| 3
|
tools/clang/scripts/generate_compdb.py
|
zipated/src
| 2,151
|
12779656
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Helper for generating compile DBs for clang tooling. On non-Windows platforms,
this is pretty straightforward. On Windows, the tool does a bit of extra work to
integrate the content of response files, force clang tooling to run in clang-cl
mode, etc.
"""
import argparse
import json
import os
import sys
script_dir = os.path.dirname(os.path.realpath(__file__))
tool_dir = os.path.abspath(os.path.join(script_dir, '../pylib'))
sys.path.insert(0, tool_dir)
from clang import compile_db
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'-p',
required=True,
help='Path to build directory')
args = parser.parse_args()
print json.dumps(compile_db.GenerateWithNinja(args.p))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 2.0625
| 2
|
rev1/scripts/bom_csv_multi.py
|
bzzzm/commodity-hw
| 0
|
12779657
|
<filename>rev1/scripts/bom_csv_multi.py
"""
@package
A simple way to generate separate CSV BOM files for multiple suppliers in Kicad.
Heavily inspired from https://github.com/wokwi/kicad-jlcpcb-bom-plugin .
This can be made much more efficient than it is, but I didn't really bothered with the performance with my
hobby-sized BOMs.
Feel free to modify `config` variable to fill your needs before you copy this file in Kicad scripts dir. I recommend
that you also check the code, things are commented and maybe you need to change it to fit your needs.
To "install" the plugin, copy this python file in your KICAD plugins dir (usr/share/kicad/plugins on my
Ubuntu 20 with Kicad 5.1 from Snap).
Command line:
python "pathToFile/bom_csv_multi.py" "%I" "%O"
Config sample:
{
'suppliers': [
{
'name': 'JLCPCB',
'field_name': 'LCSC',
'header': ['Comment', 'Designator', 'Quantity', 'Footprint', 'LCSC Part #'],
'data': ['value', 'refs', 'quantity', 'footprint', 'pn']
},
{ ... }
]
}
This script comes with absolutely no warranty!
"""
import kicad_netlist_reader
import csv
import sys
# The configuration for multiple suppliers. Please make sure you fill at least 4 fields (name, field_name, header, data)
config = {
'suppliers': [
{
'name': 'JLCPCB',
'field_name': 'LCSC',
'header': ['Comment', 'Designator', 'Quantity', 'Footprint', 'LCSC Part #'],
'data': ['value', 'refs', 'quantity', 'footprint', 'pn']
},
{
'name': 'Farnell',
'field_name': 'Farnell',
'header': ['Part Number', 'Quantity', 'Description'],
'data': ['pn', 'quantity', 'value']
},
{
'name': 'OptimusDigital',
'field_name': 'Optimus',
'header': ['Part Number', 'Quantity', 'Description'],
'data': ['pn', 'quantity', 'value']
},
# do not remove this config line, it is a placeholder for components
# without a supplier field.
{
'name': 'Empty',
'field_name': '__missing',
'header': ['Refs', 'Value', 'FP'],
'data': ['refs', 'value', 'footprint']
}
],
'ignore_field': 'Ignore'
}
def create_row(comp, field, refs, quantity):
"""
Gets a components and it's supplier field and creates a CSV row for the corespondent BOM.
:param comp: Kicad components
:param field: Supplier field key
:param refs: A list with component reference that identical
:param quantity: How many components
:return: list, the same format as supplier['data']
"""
out = []
# get the supplier from config
sup = [s for s in config['suppliers'] if s['field_name'] == field][0]
# generate the row
# (this should be done in a different way, but it works for now)
for col in sup['data']:
if col == 'value':
out.append(comp.getValue())
continue
if col == 'refs':
out.append(",".join(refs))
continue
if col == 'footprint':
out.append(comp.getFootprint().split(':')[1])
continue
if col == 'pn':
out.append(comp.getField(field))
continue
if col == "quantity":
out.append(quantity)
continue
return out
def extract_boms(net):
"""
Iterates over component groups, gets the first component in the group
and appends the output dict for each supplier, based on the `data` key.
:param net: Kicad netlist
:return: dict of boms: {field_name: list(csv_rows)}
"""
# extract possible fields (only enabled suppliers)
fields = [x['field_name'] for x in config['suppliers'] if x.get('enabled', True)]
# output format
out = dict()
for field in fields:
out[field] = []
# iterate component groups
for group in net.groupComponents():
# how many components in the group
quant = len(group)
# references
refs = [r.getRef() for r in group]
# get the first component in the group and create the csv row for each supplier
comp = group[0]
# ignore group in case requested
if comp.getField(config.get('ignore_field')) != "":
continue
# get supplier fields in component
sup_fields = [v for v in fields if v in comp.getFieldNames()]
# create a csv for for each supplier field
# if no supplier field is found, add the component in the missing csv
if len(sup_fields) > 0:
for field in sup_fields:
row = create_row(comp, field, refs, quant)
out[field].append(row)
else:
out['__missing'].append([", ".join(refs), comp.getValue(), comp.getFootprint()])
return out
def write_csv(field, comps):
"""
:param field: Supplier `field_name`
:param comps: List of components (generated by extract_boms())
:return:
"""
# get the supplier from config
sup = [s for s in config['suppliers'] if s['field_name'] == field][0]
# generate the path from sys.argv
path = "{}-{}-bom.csv".format(sys.argv[2], sup['name'])
# write the CSV file (header and the component list)
with open(path, 'w') as f:
out = csv.writer(f)
out.writerow(sup['header'])
out.writerows(comps)
f.close()
def main():
"""
Main function
:return: None
"""
# read the netlist
net = kicad_netlist_reader.netlist(sys.argv[1])
# extract boms from netlist
boms = extract_boms(net)
# write boms to csv files
for sup, comps in boms.items():
num_comps = len(comps)
if num_comps > 0:
write_csv(sup, comps)
print("> Generated CSV BOM for supplier with key `{}` "
"with a total of {} unique components.".format(sup, num_comps))
else:
print("> Ignoring supplier with key `{}`, no components found.".format(sup))
# we got so far, nice
sys.exit(0)
if __name__ == '__main__':
main()
| 2.140625
| 2
|
exercicios/ex030.py
|
thiago5171/python.
| 1
|
12779658
|
"""
Faça um programa que mostre na tela uma contagem regressiva para o estouro de fogos de
artifício, indo de 10 até 0, com uma pausa de 1 segundo entre eles.
"""
#importar a bliblioteca para esperar
from time import sleep
print("contagem regressiva para os fogos!!!!")
for a in range(10,0,-1):
print(a)
sleep(1)
print("FOGOS")
| 3.359375
| 3
|
mc_launcher_core/web/install.py
|
tfff1OFFICIAL/mc_launcher_core
| 2
|
12779659
|
<gh_stars>1-10
"""
All the web requests related to installing a version of Minecraft
"""
import os.path
import logging
import platform
import shutil
import unpack200
from urllib.error import URLError, HTTPError
from mc_launcher_core.exceptions import HashMatchError
from mc_launcher_core.util import extract_file_to_directory, java_esque_string_substitutor, is_os_64bit, get_url_filename, do_get_library, extract_xz_to_file
from mc_launcher_core.web.util import chunked_file_download, verify_sha1, get_sha1_hash
MINECRAFT_VERSIONS_ROOT = "https://s3.amazonaws.com/Minecraft.Download/versions"
logger = logging.getLogger(__name__)
system = platform.system().lower()
def save_minecraft_jar(mcversion, path, hash=None, raise_on_hash_mismatch=False):
"""
Downloads and saves the Minecraft.jar (from Mojang source) into path
:param mcversion: string, e.g. "1.7.10", "18w14b"
:param path: string, absolute path to the location where this file should be saved
:param hash: string, sha1 hash of the Jar file
:param raise_on_hash_mismatch: bool
:return: None
"""
url = "{0}/{1}/{1}.jar".format(MINECRAFT_VERSIONS_ROOT, mcversion)
attempt_count = 0
while (not os.path.isfile(path) or os.path.getsize(path) == 0 or (hash is not None and not verify_sha1(path, hash))) and attempt_count <= 4:
logger.info("Downloading Minecraft.jar from URL: {}... (attempt: {})".format(url, attempt_count))
chunked_file_download(url, path)
attempt_count += 1
if not os.path.isfile(path) or os.path.getsize(path) == 0:
logging.critical("Failed to download Minecraft.jar")
raise Exception("Minecraft.jar not downloading correctly (file is either 0 bytes or non-existent)")
with open(path, 'rb') as f:
h = get_sha1_hash(f)
if hash is not None and h != hash: # hashes don't match!!!
logger.critical("Failed to download minecraft.jar. Hash of file: '{}' doesn't match expected hash: '{}'".format(
h,
hash
))
if raise_on_hash_mismatch:
raise HashMatchError("minecraft.jar", "Hashes don't match. Expected: '{}' but got '{}'".format(h, hash))
def save_minecraft_lib(lib, libdir, nativesdir, raise_on_hash_mismatch=False):
"""
Save a specific Minecraft lib
:param lib: dict, library JSON format
:param libdir: string
:param nativesdir: string, where to put natives
:param raise_on_hash_mismatch: bool, whether to raise an exception when hashes don't match
:return: None
"""
logger.info("Checking library: {}".format(lib["name"]))
'''if lib.get("clientreq") is True:
# this is old-style and required for the client
old_style_library_saver(lib)
continue
elif lib.get("serverreq") is not None or lib.get("downloads") is None:
# old-style but we don't need to download it
continue'''
if not do_get_library(lib.get("rules")):
logger.info("No need to download.")
return
native_classifier_to_download = None
logger.debug("Checking for natives...")
if lib.get("natives"):
logger.info("Checking for natives for {}bit system".format(("64" if is_os_64bit() else "32")))
# this library has natives attached to it
native_classifier_to_download = lib["natives"].get(system)
if native_classifier_to_download is not None:
native_classifier_to_download = java_esque_string_substitutor(
native_classifier_to_download,
arch=("64" if is_os_64bit() else "32")
)
logger.info("Found native")
if native_classifier_to_download is not None:
filepath = os.path.join(
nativesdir,
get_url_filename(lib["downloads"]["classifiers"][native_classifier_to_download]["path"]) # file name
)
logger.debug("Downloading native to: '{}'".format(filepath))
os.makedirs(os.path.dirname(filepath), exist_ok=True)
chunked_file_download(
lib["downloads"]["classifiers"][native_classifier_to_download]["url"],
filepath
)
if not verify_sha1(filepath, lib["downloads"]["classifiers"][native_classifier_to_download]["sha1"]):
logger.warning("Hashes don't match. Expected: {}".format(lib["downloads"]["classifiers"][native_classifier_to_download]["sha1"]))
if raise_on_hash_mismatch:
raise HashMatchError(lib, "Failed to download native as hashes don't match!")
logger.debug("download complete")
if lib.get("extract"):
exclude_from_extract = lib["extract"].get("exclude")
logger.debug("extracting files...")
# extract the file
extract_file_to_directory(
filepath,
os.path.dirname(filepath),
exclude_from_extract
)
# clean up afterwards
os.remove(filepath)
logger.debug("done")
if lib["downloads"].get("artifact"):
filepath = os.path.join(
libdir,
*lib["downloads"]["artifact"]["path"].split("/")
)
if lib.get("fu_existence_guaranteed") in (None, False):
logger.debug("Checking if need to download artifact to: {}".format(filepath))
if not os.path.isfile(filepath):
# get that file, cos it's not there yet
using_alt_url = False
os.makedirs(os.path.dirname(filepath), exist_ok=True)
logger.info(
"Downloading artifact from: {} to: {}".format(lib["downloads"]["artifact"]["url"], filepath))
try:
chunked_file_download(
lib["downloads"]["artifact"]["url"],
filepath
)
except HTTPError:
if lib["downloads"]["artifact"].get("fu_alt_url"):
# download from alt URL
using_alt_url = True
chunked_file_download(
lib["downloads"]["artifact"]["fu_alt_url"],
filepath
)
if lib["downloads"]["artifact"].get("sha1") is not None: # let's verify this file
if not verify_sha1(filepath, lib["downloads"]["artifact"]["sha1"]):
logger.warning("library file at: {} sha1 hash doesn't match".format(
lib["downloads"]["artifact"]["sha1"]
))
if raise_on_hash_mismatch:
HashMatchError(lib)
logger.info("download complete")
if lib.get("extract") and lib["extract"].get("fu_xz_unpack") and (
not using_alt_url or lib["extract"].get("fu_xz_unpack_on_alt_url")):
logger.debug("unzipping .pack.xz file...")
if os.path.isfile(filepath + ".pack.xz"):
os.remove(filepath + ".pack.xz")
if os.path.isfile(filepath + ".pack"):
os.remove(filepath + ".pack")
os.rename(filepath, filepath + ".pack.xz")
extract_xz_to_file(
filepath + ".pack.xz",
filepath + ".pack"
)
os.remove(filepath + ".pack.xz")
logger.debug("Unzipped, unpacking...")
unpack200.unpack(
filepath + ".pack",
filepath,
remove_source=True
)
logger.debug("done")
def save_minecraft_asset(asset, assetname, assetsdir, raise_on_hash_mismatch=False):
"""
Downloads an asset into the correct locations
:param asset: dict
:param assetsdir: string
:param assetname: string, name of asset
:param raise_on_hash_mismatch: bool, whether to raise if the hash doesn't match
:return: None
"""
MINECRAFT_RESOURCES_ROOT = "https://resources.download.minecraft.net/"
path = (asset["hash"][:2], asset["hash"])
filepath = os.path.join(
assetsdir,
"objects",
*path
)
url = MINECRAFT_RESOURCES_ROOT + "/".join(path)
logger.debug("Downloading Asset from: {} to: {}".format(url, filepath))
# download file
if not os.path.isfile(filepath):
chunked_file_download(
url,
filepath
)
# check hash
if not verify_sha1(filepath, asset["hash"]):
logger.warning("Hash for asset doesn't match. Expected: {}".format(asset["hash"]))
if raise_on_hash_mismatch:
raise HashMatchError(asset, type="asset")
# copy file
legacy_path = os.path.join(
assetsdir,
"virtual",
"legacy",
*assetname.split("/")
)
if not os.path.isfile(legacy_path):
logger.debug("Copying from: {} to legacy path: {}".format(filepath, legacy_path))
os.makedirs(os.path.dirname(legacy_path), exist_ok=True)
shutil.copyfile(filepath, legacy_path)
| 2.671875
| 3
|
src/main/models.py
|
HammudElHammud/DjangoProject
| 0
|
12779660
|
from __future__ import unicode_literals
from django import forms
from django.contrib.auth.models import User
from django.db import models
from ckeditor_uploader.fields import RichTextUploadingField
# Create your models here.
from django.utils.safestring import mark_safe
class Main(models.Model):
STATUS = (
('True', 'Evet'),
('False', 'Hayir'),
)
name = models.CharField(default='', max_length=40)
status = models.CharField(default='', max_length=40, choices=STATUS)
title = models.CharField(max_length=15)
about = RichTextUploadingField()
keyword = models.TextField(max_length=10000)
description = models.TextField(max_length=10000)
company = models.TextField(max_length=10000)
smtpserver = models.CharField(max_length=44)
smtpemail = models.CharField(max_length=44)
smtpPassword = models.CharField(max_length=150)
smtpPort = models.CharField(max_length=150)
pagefa = models.CharField(max_length=150)
pagetw = models.CharField(max_length=150)
pageyt = models.CharField(max_length=105)
pageLink = models.CharField(max_length=150)
pageTe = models.CharField(max_length=20,default=0)
icon = models.ImageField(blank=True,upload_to='images/')
name_set = models.CharField(max_length=20,default='-')
def __str__(self):
return self.name_set + " ||" +str(self.pk)
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
phone = models.CharField(blank=True,max_length=20)
address = models.CharField(blank=True,max_length=202)
city = models.CharField(blank=True,max_length=20)
country= models.CharField(blank=True,max_length=20)
image = models.ImageField(upload_to="images/profile_images/", blank=True)
def __str__(self):
return self.user.username
@property
def use_name(self):
return self.user.usernames
@property
def image_tag(self):
if self.image is None:
return ''
self.image.short_description = 'Image'
return mark_safe('<img src="{}" width="50" height="50" />'.format(self.image.url))
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('phone', 'address', 'city', 'country','image')
class FAQ(models.Model):
STATUS = (
('True', 'Evet'),
('False', 'Hayir'),
)
orderNumber = models.IntegerField()
question = models.CharField(default='', max_length=150)
answer = models.TextField(max_length=1000)
status = models.CharField(default='', max_length=40, choices=STATUS)
create_at = models.DateTimeField(auto_now_add=True)
update_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.question
| 2.109375
| 2
|
Exercises/Conditions/Aumento_salarial.py
|
LuisAdolfoAlves/Learning-Python
| 0
|
12779661
|
('-=-' * 20)
print('ANALISADOR DE TRIANGULOS')
('-=-' * 20)
p = float(input('Primeiro segmento: '))
s = float(input('Segundo segmento: '))
t = float(input('Terceiro segmento: '))
triangulo1 = p < s + t
triangulo1 = p > s - t or p > t - s
if triangulo1 == true:
print('Os segmentos acima PODEM formar um triângulo.')
else:
print('Os segmentos acima NAO PODEM formar um triangulo')
| 4
| 4
|
TPC Gateway.py
|
dangerousbeak/tpc
| 0
|
12779662
|
<filename>TPC Gateway.py<gh_stars>0
#!/usr/bin/python
from game import Game, State
from racing import Racing
from quiet import QuietAttract
from songs import Songs
game = Game({
"quiet": QuietAttract,
"racing": Racing,
"songs": Songs,
})
try:
if game.buttons.back:
game.play("racing")
else:
game.play("quiet")
except KeyboardInterrupt:
print("Quit.")
finally:
game.cleanup()
| 2.421875
| 2
|
PatchMatch.py
|
WArushrush/An-Application-of-Image-Inpainting-and-Completion
| 3
|
12779663
|
import numpy as np
from PIL import Image
import time
import cv2
global img
global point1, point2
global min_x, min_y, width, height, max_x, max_y
def on_mouse(event, x, y, flags, param):
global img, point1, point2, min_x, min_y, width, height, max_x, max_y
img2 = img.copy()
if event == cv2.EVENT_LBUTTONDOWN: # 左键点击
point1 = (x, y)
cv2.circle(img2, point1, 10, (0, 255, 0), 2)
cv2.imshow('image', img2)
elif event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_LBUTTON): # 按住左键拖曳
cv2.rectangle(img2, point1, (x, y), (255, 0, 0), 2)
cv2.imshow('image', img2)
elif event == cv2.EVENT_LBUTTONUP: # 左键释放
point2 = (x, y)
cv2.rectangle(img2, point1, point2, (0, 0, 255), 2)
cv2.imshow('image', img2)
min_y = min(point1[0], point2[0])
min_x = min(point1[1], point2[1])
width = abs(point1[0] - point2[0])
height = abs(point1[1] - point2[1])
max_x = min_x + height
max_y = min_y + width
def overlap_restricted_area(x, y, patch_size, min_x, max_x, min_y, max_y):
dx0 = dy0 = patch_size // 2
minx1 = x - dx0
miny1 = y - dy0
maxx1 = x + dx0
maxy1 = y + dy0
minx2 = min_x
miny2 = min_y
maxx2 = max_x
maxy2 = max_y
minx = max(minx1, minx2)
miny = max(miny1, miny2)
maxx = min(maxx1, maxx2)
maxy = min(maxy1, maxy2)
if minx > maxx or miny > maxy:
return False
else:
return True
def cal_distance(a, b, A_padding, B, p_size):
p = p_size // 2
patch_a = A_padding[a[0]:a[0] + p_size, a[1]:a[1] + p_size, :]
patch_b = B[b[0] - p:b[0] + p + 1, b[1] - p:b[1] + p + 1, :]
temp = patch_b - patch_a
num = np.sum(1 - np.int32(np.isnan(temp)))
dist = np.sum(np.square(np.nan_to_num(temp))) / num
return dist
def cal_alpha(dis, gamma=2.0):
return gamma ** (-dis)
def reconstruction(f, A, B, p_size, dist, min_x, max_x, min_y, max_y, itter):
A_h = np.size(A, 0)
A_w = np.size(A, 1)
B_h = np.size(B, 0)
B_w = np.size(B, 1)
temp = np.zeros_like(A)
p = p_size // 2
for i in range(A_h):
for j in range(A_w):
cnt = 0
ans = np.zeros(3)
for m in range(-p, p + 1, 1):
for n in range(-p, p + 1, 1):
if not ((0 <= i + m < A_h) and (0 <= j + n < A_w)):
continue
if not ((0 <= f[i + m][j + n][0] - m < B_h) and (0 <= f[i + m][j + n][1] - n < B_w)):
continue
if overlap_restricted_area(f[i + m][j + n][0] - m, f[i + m][j + n][1] - n, p_size, min_x, max_x,
min_y,
max_y):
continue
alpha = cal_alpha(dis=dist[i + m, j + n])
cnt += alpha
ans += alpha * B[f[i + m][j + n][0] - m, f[i + m][j + n][1] - n, :]
temp[i, j, :] = ans / cnt
tmp = np.copy(B)
# temp = cv2.GaussianBlur(temp, (3, 3), 0)
tmp[min_x:min_x + A_h, min_y:min_y + A_w, :] = temp
# Image.fromarray(tmp).show()
return tmp, temp
def initialization(A, B, f, p_size, min_x, max_x, min_y, max_y, create_f=False):
A_h = np.size(A, 0)
A_w = np.size(A, 1)
B_h = np.size(B, 0)
B_w = np.size(B, 1)
p = p_size // 2
# A_padding = np.ones([A_h+p*2, A_w+p*2, 3]) * np.nan
A_padding = B[min_x - p:min_x + A_h + p, min_y - p:min_y + A_w + p, :]
A_padding[p:A_h + p, p:A_w + p, :] = A
random_B_r = np.random.randint(p, B_h - p, [A_h, A_w])
random_B_c = np.random.randint(p, B_w - p, [A_h, A_w])
for i in range(A_h):
for j in range(A_w):
while overlap_restricted_area(random_B_r[i][j], random_B_c[i][j], p_size, min_x, max_x, min_y, max_y):
random_B_r[i][j] = np.random.randint(p, B_h - p)
random_B_c[i][j] = np.random.randint(p, B_w - p)
if create_f:
f = np.zeros([A_h, A_w], dtype=object)
dist = np.zeros([A_h, A_w])
for i in range(A_h):
for j in range(A_w):
a = np.array([i, j])
if create_f:
b = np.array([random_B_r[i, j], random_B_c[i, j]], dtype=np.int32)
f[i, j] = b
else:
b = np.array([random_B_r[i, j], random_B_c[i, j]], dtype=np.int32)
if (i % 2 == 0) or (j % 2 == 0):
f[i, j] = b
else:
b = f[i, j]
dist[i, j] = cal_distance(a, b, A_padding, B, p_size)
return f, dist, A_padding
def propagation(f, a, dist, A_padding, B, p_size, is_odd, min_x, max_x, min_y, max_y):
A_h = np.size(A_padding, 0) - p_size + 1
A_w = np.size(A_padding, 1) - p_size + 1
# print(A_h, A_w)
x = a[0]
y = a[1]
if is_odd:
d_left = dist[max(x - 1, 0), y]
d_up = dist[x, max(y - 1, 0)]
d_current = dist[x, y]
idx = np.argmin(np.array([d_current, d_left, d_up]))
if idx == 1 and (not overlap_restricted_area(f[max(x - 1, 0), y][0] + 1, f[max(x - 1, 0), y][1], p_size,
min_x, max_x, min_y, max_y)):
f[x, y] = f[max(x - 1, 0), y]
dist[x, y] = cal_distance(a, f[x, y], A_padding, B, p_size)
if idx == 2 and (not overlap_restricted_area(f[x, max(y - 1, 0)][0], f[x, max(y - 1, 0)][1] + 1, p_size,
min_x, max_x, min_y, max_y)):
f[x, y] = f[x, max(y - 1, 0)]
dist[x, y] = cal_distance(a, f[x, y], A_padding, B, p_size)
else:
# print(dist.shape)
# print(min(x + 1, A_h - 1), y)
d_right = dist[min(x + 1, A_h - 1), y]
d_down = dist[x, min(y + 1, A_w - 1)]
d_current = dist[x, y]
idx = np.argmin(np.array([d_current, d_right, d_down]))
if idx == 1 and (
not overlap_restricted_area(f[min(x + 1, A_h - 1), y][0] - 1, f[min(x + 1, A_h - 1), y][1], p_size,
min_x, max_x, min_y, max_y)):
f[x, y] = f[min(x + 1, A_h - 1), y]
dist[x, y] = cal_distance(a, f[x, y], A_padding, B, p_size)
if idx == 2 and (
not overlap_restricted_area(f[x, min(y + 1, A_w - 1)][0], f[x, min(y + 1, A_w - 1)][1] - 1, p_size,
min_x, max_x, min_y, max_y)):
f[x, y] = f[x, min(y + 1, A_w - 1)]
dist[x, y] = cal_distance(a, f[x, y], A_padding, B, p_size)
def random_search(f, a, dist, A_padding, B, p_size, min_x, max_x, min_y, max_y, alpha=0.5):
x = a[0]
y = a[1]
B_h = np.size(B, 0)
B_w = np.size(B, 1)
p = p_size // 2
i = 4
search_h = B_h * alpha ** i
search_w = B_w * alpha ** i
b_x = f[x, y][0]
b_y = f[x, y][1]
while search_h > 1 and search_w > 1:
search_min_r = max(b_x - search_h, p)
search_max_r = min(b_x + search_h, B_h - p)
random_b_x = np.random.randint(search_min_r, search_max_r)
search_min_c = max(b_y - search_w, p)
search_max_c = min(b_y + search_w, B_w - p)
random_b_y = np.random.randint(search_min_c, search_max_c)
search_h = B_h * alpha ** i
search_w = B_w * alpha ** i
b = np.array([random_b_x, random_b_y])
d = cal_distance(a, b, A_padding, B, p_size)
if d < dist[x, y] and (not overlap_restricted_area(b[0], b[1], p_size, min_x, max_x, min_y, max_y)):
dist[x, y] = d
f[x, y] = b
i += 1
def NNS(img, ref, p_size, itr, f, dist, img_padding, min_x, max_x, min_y, max_y):
A_h = np.size(img, 0)
A_w = np.size(img, 1)
# print(A_h, A_w)
# print(img_padding.shape)
for itr in range(1, itr + 1):
if itr % 2 == 0:
for i in range(A_h - 1, -1, -1):
for j in range(A_w - 1, -1, -1):
a = np.array([i, j])
propagation(f, a, dist, img_padding, ref, p_size, False, min_x, max_x, min_y, max_y)
random_search(f, a, dist, img_padding, ref, p_size, min_x, max_x, min_y, max_y)
else:
for i in range(A_h):
for j in range(A_w):
a = np.array([i, j])
propagation(f, a, dist, img_padding, ref, p_size, True, min_x, max_x, min_y, max_y)
random_search(f, a, dist, img_padding, ref, p_size, min_x, max_x, min_y, max_y)
print("iteration: %d" % (itr))
return f
def upsample_nnf(nnf):
temp = np.zeros((nnf.shape[0], nnf.shape[1], 3))
for x in range(nnf.shape[0]):
for y in range(nnf.shape[1]):
temp[x][y] = [nnf[x][y][0], nnf[x][y][1], 0]
# img = np.zeros(shape=(size, size, 2), dtype=np.int)
# small_size = nnf.shape[0]
aw_ratio = 2 # ((size) // small_size)
ah_ratio = 2 # ((size) // small_size)
temp = cv2.resize(temp, None, fx=aw_ratio, fy=aw_ratio, interpolation=cv2.INTER_NEAREST)
imge = np.zeros(shape=(temp.shape[0], temp.shape[1], 2), dtype=np.int)
for i in range(temp.shape[0]):
for j in range(temp.shape[1]):
pos = temp[i, j]
imge[i, j] = pos[0] * aw_ratio, pos[1] * ah_ratio
return imge
padding_size = [15, 15, 13, 9, 5, 2]
# padding_size = [9, 7, 5, 3, 3, 2]
iter_arr = [2, 2, 16, 40, 64, 64]
def main(img_path):
# img_path = 'IMAGE/face.jpg'
global img
img = cv2.imread(img_path)
cv2.namedWindow('image')
cv2.setMouseCallback('image', on_mouse)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# print(min_x, min_y, height, width)
global_min_x = min_x
global_min_y = min_y
global_max_x = max_x
global_max_y = max_y
# img = np.array(Image.open("./cup_a.jpg"))
origin_ref = np.array(Image.open(img_path))
# ref = cv2.pyrDown(origin_ref, (np.size(origin_ref, 0)//2, np.size(origin_ref, 1)//2))
# Image.fromarray(ref).show()
itr = 4
start = time.time()
# origin_img = origin_ref[min_x: max_x + 1, min_y:max_y + 1, :]
# img = cv2.resize(origin_img, None, fx=2 ** (-4), fy=2 ** (-4), interpolation=cv2.INTER_NEAREST)
f = 0
depth = 3
for l in range(depth, -1, -1):
p_size = padding_size[l]
gmin_x = global_min_x // (2 ** l)
gmin_y = global_min_y // (2 ** l)
gmax_x = global_max_x // (2 ** l)
gmax_y = global_max_y // (2 ** l)
# print(origin_ref.shape)
# ref = cv2.resize(origin_ref, None, fx=2 ** (-l), fy=2 ** (-l), interpolation=cv2.INTER_LINEAR)
ref = origin_ref
for kk in range(l):
ref = cv2.pyrDown(ref, (np.size(origin_ref, 0) // 2, np.size(origin_ref, 1) // 2))
# print(ref.shape)
# print(gmin_x, gmin_y, gmax_x, gmax_y)
# !!!!!!!!!
img = ref[gmin_x: gmax_x + 1, gmin_y:gmax_y + 1, :]
# !!!!!!!!!
if l == depth:
# img = ref[gmin_x: gmax_x + 1, gmin_y:gmax_y + 1, :]
# img = np.zeros([gmax_x - gmin_x + 1, gmax_y - gmin_y + 1, 3])
# !!!!!!!!!!
# img = np.random.randint(0, 256, size=(gmax_x - gmin_x + 1, gmax_y - gmin_y + 1, 3), dtype=np.uint8)
# !!!!!!!!!!
# print(np.shape(img)[0] // 4)
f, dist, img_padding = initialization(img, ref, f, p_size, gmin_x, gmax_x, gmin_y, gmax_y, create_f=True)
else:
# print(img.shape)
fake, dist, img_padding = initialization(img, ref, f, p_size, gmin_x, gmax_x, gmin_y, gmax_y,
create_f=False)
# Image.fromarray(ref).show()
# Image.fromarray(img).show()
# print(img.shape)
# print(img_padding.shape)
for itter in range(iter_arr[l]):
f = NNS(img, ref, p_size, itr, f, dist, img_padding, gmin_x, gmax_x, gmin_y, gmax_y)
end = time.time()
print(end - start)
print(l, itter + 1, '/', iter_arr[l])
tmp, img = reconstruction(f, img, ref, p_size, dist, gmin_x, gmax_x, gmin_y, gmax_y, itter)
# if itter == iter_arr[l] - 1:
# Image.fromarray(tmp).show()
# img = cv2.resize(img, None, fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
# Image.fromarray(img).show()
img = cv2.pyrUp(img, (np.size(img, 0) * 2, np.size(img, 1) * 2))
f = upsample_nnf(f)
# Image.fromarray(img).show()
tmp = Image.fromarray(tmp)
tmp.save("temp.jpg")
return "temp.jpg"
if __name__ == '__main__':
img_path = 'D://project//Image_Completion//IMAGE//face.jpg'
# img_path = 'D://project//Image_Completion//IMAGE//birds.jpg'
while True:
img_path = main(img_path)
| 2.734375
| 3
|
generativepy/tween.py
|
LloydTao/generativepy
| 58
|
12779664
|
# Author: <NAME>
# Created: 2019-01-25
# Copyright (C) 2018, <NAME>
# License: MIT
import math
class Tween():
'''
Tweening class for scalar values
Initial value is set on construction.
wait() maintains the current value for the requested number of frames
pad() similar to wait, but pads until the total length of the tween is the required size.
set() sets a new current values, and adds it for the requested number of frames (which can be zero)
to() moves linearly from the current value to the supplied value. The first frame added will have the current value,
the last frame added will have the new value, with values spaced evenly in between. The final value will be set as
the new current value.
You can use get(n) to get the nth frame, or alternatively you can use tween[n]. The built in len() function can be
used to find the sequence length. Tween are iterable, so they can be used with for loops etc.
'''
def __init__(self, value=0):
self.check_value(value, None)
self.frames = []
self.previous = value
self.nextFrame = 0
def wait(self, count):
self.check_count(count)
self.frames.extend([self.previous for i in range(count)])
return self
def pad(self, final_length):
self.check_count(final_length)
required = final_length - len(self.frames)
if required > 0:
self.frames.extend([self.previous for i in range(required)])
return self
def set(self, value, count=0):
self.check_value(value, self.previous)
self.check_count(count)
self.frames.extend([value for i in range(count)])
self.previous = value
return self
def to(self, value, count):
self.check_value(value, self.previous)
self.check_count(count)
for i in range(count):
factor = (i + 1) / count
self.frames.append(self.previous + factor * (value - self.previous))
self.previous = value
return self
def ease(self, value, count, ease_function):
self.check_value(value, self.previous)
self.check_count(count)
for i in range(count):
factor = ease_function((i + 1) / count)
self.frames.append(self.previous + factor * (value - self.previous))
self.previous = value
return self
def get(self, frame):
if frame >= len(self.frames):
return self.previous
return self.frames[frame]
def __getitem__(self, key):
return self.get(key)
def __next__(self):
if self.nextFrame >= len(self.frames):
raise StopIteration()
frame = self.get(self.nextFrame)
self.nextFrame += 1
return frame
def __iter__(self):
return self
def check_value(self, value, previous):
if (not isinstance(value, (int, float))) or isinstance(value, bool):
raise ValueError('Numeric value required')
def check_index(self, value):
if not isinstance(value, int):
raise ValueError('Integer value required')
def check_count(self, value):
if not isinstance(value, int) or value < 0:
raise ValueError('Non-negative integer value required')
def __len__(self):
return len(self.frames)
class TweenVector(Tween):
'''
Tweening class for vector quantities.
Similar to Tween, but the values are vector quantities (ie tuples of lists), such as (x, y) positions or
(r, g, b, a) colours.
The vector quantities must have at least 1 element, but normally it will be 2 or more. Every value added must have
the same length as the initial value, for example if you start with an (x, y) value, every new value must also
have 2 dimansions.
'''
def __init__(self, value=(0, 0)):
Tween.__init__(self, value)
def to(self, value, count):
self.check_value(value, self.previous)
self.check_count(count)
for i in range(count):
nextvalue = []
factor = (i + 1) / count
for a, b in zip(self.previous, value):
nextvalue.append(a + factor * (b - a))
self.frames.append(nextvalue)
self.previous = value
return self
def ease(self, value, count, ease_function):
self.check_value(value, self.previous)
self.check_count(count)
for i in range(count):
nextvalue = []
factor = ease_function((i + 1) / count)
for a, b in zip(self.previous, value):
nextvalue.append(a + factor * (b - a))
self.frames.append(nextvalue)
self.previous = value
return self
def check_value(self, value, previous):
try:
if len(value) <= 0:
raise ValueError('Vectors of rank 0 are not supported')
if previous and len(value) != len(self.previous):
raise ValueError('All values must be vectors of equal rank')
except:
ValueError('Sequence value required')
def ease_linear():
return lambda x: x
def ease_in_harm():
return lambda x: 1 + math.sin(math.pi * (x / 2 - 0.5))
def ease_out_harm():
return lambda x: math.sin(math.pi * x / 2)
def ease_in_out_harm():
return lambda x: 0.5 + 0.5 * math.sin(math.pi * (x - 0.5))
def ease_in_elastic():
return lambda x: math.sin(2.25 * 2 * math.pi * (x)) * pow(2, 10 * (x - 1))
def ease_out_elastic():
return lambda x: 1 - math.sin(2.25 * 2 * math.pi * (1 - x)) * pow(2, -10 * x)
def ease_in_out_elastic():
def fn(x):
if x < 0.5:
f = 2 * x
return 0.5 * (math.sin(2.25 * 2 * math.pi * f) * pow(2, 10 * (f - 1)))
else:
f = (2 * x - 1)
return 0.5 * (1 - math.sin(2.25 * 2 * math.pi * (1 - f)) * pow(2, -10 * f)) + 0.5
return fn
def ease_in_back():
return lambda x: x * x * x - x * math.sin(x * math.pi)
def ease_out_back():
def fn(x):
f = (1 - x)
return 1 - (f * f * f - f * math.sin(f * math.pi))
return fn
def ease_in_out_back():
def fn(x):
if x < 0.5:
f = 2 * x
return 0.5 * (f * f * f - f * math.sin(f * math.pi))
else:
f = (1 - (2 * x - 1))
return 0.5 * (1 - (f * f * f - f * math.sin(f * math.pi))) + 0.5
return fn
# Basic bounce function used by the bounce easing functions.
# Don't use this function directly, use the ease_*_bounce functions instead.
def _bounce(x):
if x < 4 / 11.0:
return (121 * x * x) / 16.0
elif x < 8 / 11.0:
return (363 / 40.0 * x * x) - (99 / 10.0 * x) + 17 / 5.0
elif x < 9 / 10.0:
return (4356 / 361.0 * x * x) - (35442 / 1805.0 * x) + 16061 / 1805.0
else:
return (54 / 5.0 * x * x) - (513 / 25.0 * x) + 268 / 25.0
def ease_in_bounce():
return lambda x: 1 - _bounce(1 - x)
def ease_out_bounce():
return lambda x: _bounce(x)
def ease_in_out_bounce():
def fn(x):
if x < 0.5:
return 0.5 * (1 - _bounce(1 - x * 2))
else:
return 0.5 * _bounce(x * 2 - 1) + 0.5
return fn
| 3.953125
| 4
|
mod_ngarn/connection.py
|
hotkit/mod-ngarn
| 3
|
12779665
|
import json
import os
import asyncpg
async def get_connection():
PGDBNAME = os.getenv("PGDBNAME")
PGHOST = os.getenv("PGHOST")
PGPASSWORD = <PASSWORD>("PGPASSWORD")
PGUSER = os.getenv("PGUSER")
cnx = await asyncpg.connect(
user=PGUSER, password=<PASSWORD>, database=PGDBNAME, host=PGHOST
)
await cnx.set_type_codec(
"jsonb", encoder=json.dumps, decoder=json.loads, schema="pg_catalog"
)
await cnx.set_type_codec(
"json", encoder=json.dumps, decoder=json.loads, schema="pg_catalog"
)
return cnx
class DBConnection:
async def __aenter__(self):
PGDBNAME = os.getenv("PGDBNAME")
PGHOST = os.getenv("PGHOST")
PGPASSWORD = <PASSWORD>("<PASSWORD>")
PGUSER = os.getenv("PGUSER")
self.cnx = await asyncpg.connect(
user=PGUSER, password=<PASSWORD>, database=PGDBNAME, host=PGHOST
)
await self.cnx.set_type_codec(
"jsonb", encoder=json.dumps, decoder=json.loads, schema="pg_catalog"
)
await self.cnx.set_type_codec(
"json", encoder=json.dumps, decoder=json.loads, schema="pg_catalog"
)
return self.cnx
async def __aexit__(self, exc_type, exc, tb):
await self.cnx.close()
| 2.546875
| 3
|
data_admin_examples/example1/urls.py
|
love1900905/frepple-data-admin
| 7
|
12779666
|
from django.conf.urls import url
from . import views
from . import serializers
# Automatically add these URLs when the application is installed
autodiscover = True
urlpatterns = [
# Grid views
url(
r"^data/example1/location/$",
views.LocationList.as_view(),
name="example1_location_changelist",
),
url(
r"^data/example1/customer/$",
views.CustomerList.as_view(),
name="example1_customer_changelist",
),
url(
r"^data/example1/demand/$",
views.DemandList.as_view(),
name="example1_demand_changelist",
),
url(
r"^data/example1/item/$",
views.ItemList.as_view(),
name="example1_item_changelist",
),
# REST API framework
url(r"^api/example1/location/$", serializers.LocationAPI.as_view()),
url(r"^api/example1/customer/$", serializers.CustomerAPI.as_view()),
url(r"^api/example1/demand/$", serializers.DemandAPI.as_view()),
url(r"^api/example1/item/$", serializers.ItemAPI.as_view()),
url(r"^api/example1/location/(?P<pk>(.+))/$", serializers.LocationdetailAPI.as_view()),
url(r"^api/example1/customer/(?P<pk>(.+))/$", serializers.CustomerdetailAPI.as_view()),
url(r"^api/example1/demand/(?P<pk>(.+))/$", serializers.DemanddetailAPI.as_view()),
url(r"^api/example1/item/(?P<pk>(.+))/$", serializers.ItemdetailAPI.as_view()),
]
| 2.1875
| 2
|
src/utils/money_api.py
|
Bemesko/Dolar-Canadense-Bipolar
| 3
|
12779667
|
import requests
class MoneyAPI():
def __init__(self):
self.API_URL = "https://economia.awesomeapi.com.br/json/all/CAD"
self.SUCESS_STATUS_CODE = 200
def request_money(self):
resp = requests.get(self.API_URL)
if resp.status_code != self.SUCESS_STATUS_CODE:
raise Exception # ('GET /tasks/ {}'.format(resp.status_code))
dollar_info = resp.json()["CAD"]
# adicionando um item que é só as horas e os minutos
check_time = dollar_info["create_date"].split(" ")[1].split(":")
dollar_info["check_time"] = f"{check_time[0]}:{check_time[1]}"
return dollar_info
if __name__ == "__main__":
money = MoneyAPI()
print(money.request_money()["check_time"])
| 3.265625
| 3
|
project/server/main/tasks.py
|
dataesr/harvest-theses
| 0
|
12779668
|
<filename>project/server/main/tasks.py
import time
import datetime
import os
import requests
from project.server.main.feed import harvest_and_insert
from project.server.main.logger import get_logger
logger = get_logger(__name__)
def create_task_harvest(arg):
collection_name = arg.get('collection_name')
if collection_name:
harvest_and_insert(collection_name)
| 2.140625
| 2
|
tests/test_bitvector.py
|
devjsc/ledger-api-py
| 0
|
12779669
|
<gh_stars>0
import unittest
from fetchai.ledger.bitvector import BitVector
class BitVectorSerialisationTests(unittest.TestCase):
def test_empty(self):
bits = BitVector()
self.assertEqual(len(bits), 0)
self.assertEqual(bits.byte_length, 0)
def test_sets(self):
bits = BitVector(8)
bits.set(3, 1)
bits.set(6, 1)
bits.set(7, 1)
self.assertEqual(len(bits), 8)
self.assertEqual(bits.byte_length, 1)
self.assertEqual(bits.as_hex(), 'c8')
def test_gets(self):
bits = BitVector.from_hex_string('c8')
self.assertEqual(bits.get(0), 0)
self.assertEqual(bits.get(1), 0)
self.assertEqual(bits.get(2), 0)
self.assertEqual(bits.get(3), 1)
self.assertEqual(bits.get(4), 0)
self.assertEqual(bits.get(5), 0)
self.assertEqual(bits.get(6), 1)
self.assertEqual(bits.get(7), 1)
def test_equal(self):
bits1 = BitVector.from_hex_string('c8')
bits2 = BitVector.from_hex_string('c8')
self.assertEqual(bits1, bits2)
def test_copy_construct(self):
bits1 = BitVector.from_hex_string('c8')
bits2 = BitVector(bits1)
self.assertEqual(bits1, bits2)
def test_not_equal(self):
bits1 = BitVector.from_hex_string('c8')
bits2 = BitVector.from_hex_string('c9c8')
self.assertFalse(bits1 == bits2)
def test_binary(self):
bits = BitVector.from_bytes(bytes([0x1f]), 8)
self.assertEqual('00011111', bits.as_binary())
| 2.53125
| 3
|
buddy/groupme_util.py
|
gc-13/studybuddy
| 0
|
12779670
|
<filename>buddy/groupme_util.py
from django.conf import settings
import requests, json
from .models import User, Course, StudyGroup, StudyRequest
# GROUPME_ACCESS_TOKEN = settings.GROUPME_ACCESS_TOKEN
def creategroupme(title):
groupme_name = "Study Group for - {}".format(title)
groupme_params = '{"name": "'+str(groupme_name)+'", "share":"true"}'
posturl = "https://api.groupme.com/v3/groups?token={}".format(GROUPME_ACCESS_TOKEN)
groupme_response = requests.post(posturl,
groupme_params).json()
groupme_id = groupme_response['response']['group_id']
groupme_shareurl = groupme_response['response']['share_url']
return(groupme_name, groupme_id, groupme_shareurl)
| 2.28125
| 2
|
MySQL/insert_data_to_table.py
|
arjunjanamatti/pymongo_practise
| 0
|
12779671
|
<filename>MySQL/insert_data_to_table.py
import mysql.connector
from mysql.connector.errors import Error
databse_name = 'practise_db'
user_name = 'aj'
password = '<PASSWORD>'
host_address = 'localhost'
try:
### CONNECT TO THE DATABASE
mydb = mysql.connector.connect(
host = 'localhost',
user = user_name,
passwd = password,
database = databse_name
)
print('Successfully connected to database: {}'.format(mydb))
### ADDING DATA TO THE TABLE METHOD# 1
cur = mydb.cursor()
insertquery = '''
INSERT INTO namelist(First_name, Last_name, Email) VALUES ('nayan', 'no_name', '<EMAIL>')
'''
cur.execute(insertquery)
mydb.commit()
### ADDING DATA TO THE TABLE METHOD# 2
cur = mydb.cursor()
insertquery_2 = '''
INSERT INTO namelist(First_name, Last_name, Email) VALUES (%s, %s, %s)
'''
value = ('gayathri', 'no_name', '<EMAIL>')
cur.execute(insertquery_2, value)
mydb.commit()
mydb.close()
except Error as e:
print('Error: {}'.format(e))
| 3.296875
| 3
|
tests/test_data.py
|
deep-voice/soundbay
| 7
|
12779672
|
from hydra.experimental import compose, initialize
from random import randint
from random import seed
from soundbay.data import ClassifierDataset
import numpy as np
def test_dataloader() -> None:
seed(1)
with initialize(config_path="../soundbay/conf"):
# config is relative to a module
cfg = compose(config_name="runs/main")
data_loader = ClassifierDataset(cfg.data.train_dataset.data_path, cfg.data.train_dataset.metadata_path,
augmentations=cfg.data.train_dataset.augmentations,
augmentations_p=cfg.data.train_dataset.augmentations_p,
preprocessors=cfg.data.train_dataset.preprocessors)
assert data_loader.metadata.shape[1] == 5 # make sure metadata has 5 columns
assert data_loader.metadata.shape[0] > 0 # make sure metadata is not empty
data_size = data_loader.metadata.shape[0]
value = randint(0, data_size)
sample = data_loader[value]
assert np.issubdtype(sample[1], np.integer)
if 'spectrogram' in cfg.data.train_dataset.preprocessors:
assert len(sample[0].shape) == 3
if 'utils.LibrosaMelSpectrogram' in cfg.data.train_dataset.preprocessors.spectrogram._target_:
assert sample[0].shape[1] == cfg.data.train_dataset.preprocessors.spectrogram.n_mels
else:
assert sample[0].shape[1] == (cfg.data.train_dataset.preprocessors.spectrogram.n_fft // 2 + 1)
else:
assert sample[0].shape[1] == 1
| 2.109375
| 2
|
delivery_bot.py
|
zaleksandrne/delivery_bot
| 0
|
12779673
|
# -*- coding: utf-8 -*-
import json, os, requests
from dotenv import load_dotenv
from telegram import Bot, InlineKeyboardButton, InlineKeyboardMarkup, Update
from telegram.ext import (CallbackContext, CallbackQueryHandler,
CommandHandler, Filters, MessageHandler, Updater)
load_dotenv()
TELEGRAM_TOKEN = os.getenv('TELEGRAM_TOKEN')
CHAT_ID = os.getenv('TELEGRAM_CHAT_ID')
DELLIN_KEY = os.getenv('DELLIN_KEY')
DELLIN_ID = os.getenv('DELLIN_ID')
URL_DELLIN_CALC = os.getenv('URL_DELLIN_CALC')
URL_DELLIN_KLADR = os.getenv('URL_DELLIN_KLADR')
URL_SBER = os.getenv('URL_SBER')
URL_GLAVDOSTAVKA = os.getenv('URL_GLAVDOSTAVKA')
USERS = {}
bot = Bot(TELEGRAM_TOKEN)
updater = Updater(TELEGRAM_TOKEN)
def start(update, context):
USERS[update.effective_user.id] = {
'progress': 1,
'derival': '',
'arrival': ''
}
bot.send_message(update.effective_message.chat.id,
'Введите город отправления посылки'
)
def progress(update, context):
if USERS[update.effective_user.id]['progress'] == 1:
return city(update, context)
elif USERS[update.effective_user.id]['progress'] == 2:
return result(update, context)
def city(update: Update, context: CallbackContext):
USERS[update.effective_user.id]['derival'] = update['message']['text']
USERS[update.effective_user.id]['progress'] = 2
bot.send_message(update.effective_message.chat.id,
'Введите город получения посылки'
)
def result(update: Update, context: CallbackContext):
USERS[update.effective_user.id]['arrival'] = update['message']['text']
derival = USERS[update.effective_user.id]['derival'].lower()
arrival = USERS[update.effective_user.id]['arrival'].lower()
derival_dellin = requests.post(
URL_DELLIN_KLADR,
json={"appkey": DELLIN_KEY,
"q": derival,
"limit": 1}
)
arrival_dellin = requests.post(
URL_DELLIN_KLADR,
json={"appkey": DELLIN_KEY,
"q": arrival,
"limit": 1}
)
try:
derival_dellin = derival_dellin.json().get('cities')[0]['code']
arrival_dellin = arrival_dellin.json().get('cities')[0]['code']
except IndexError:
del USERS[update.effective_user.id]
keyboard = [[InlineKeyboardButton(
'Новый расчет',
callback_data='new'
)]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(update.effective_message.chat.id,
'Ошибка в названии города. Попробуйте еще.',
reply_markup=reply_markup
)
dellin = requests.post(
URL_DELLIN_CALC,
json={"appkey": DELLIN_KEY,
"sessionID": DELLIN_ID,
"derival": {"city": derival_dellin},
"arrival": {"city": arrival_dellin}
}
)
with open('sber_cities.json', 'r', encoding='utf-8') as g:
sber_cities = json.load(g)
derival_sber = [city['kladr_id'] for city in sber_cities \
if city.get('name').lower() == derival][0]
arrival_sber = [city['kladr_id'] for city in sber_cities \
if city.get('name').lower() == arrival][0]
sber = requests.post(
URL_SBER,
json={"id": "JsonRpcClient.js",
"jsonrpc": "2.0",
"method": "calculateShipping",
"params": {
"stock": True,
"kladr_id_from": derival_sber,
"kladr_id": arrival_sber,
"length": 50,
"width": 35,
"height": 35,
"weight": 5,
"cod": 0,
"declared_cost": 0,
"courier": "sberlogistics"
}
}
)
sber = sber.json()['result']['methods'][0]
with open('glav_cities.json', 'r', encoding='utf-8') as g:
GLAV_CITIES = json.load(g)
derival_glav = [city['id'] for city in GLAV_CITIES \
if city.get('name', '').lower() == derival][0]
arrival_glav = [city['id'] for city in GLAV_CITIES \
if city.get('name', '').lower() == arrival][0]
glavdostavka = requests.post(
URL_GLAVDOSTAVKA + f'&depPoint={derival_glav}&arrPoint={arrival_glav}'
)
price_glavdostavka = glavdostavka.json()['price']
dellin = dellin.json()['data']['terminals_standard']
price_dellin = dellin['price']
period_dellin = dellin['period_to']
price_sber = sber['cost']['total']['sum']
period_sber = sber['max_days']
del USERS[update.effective_user.id]
keyboard = [[InlineKeyboardButton('Новый расчет', callback_data='new')]]
reply_markup = InlineKeyboardMarkup(keyboard)
derival = derival[0].upper() + derival[1:]
arrival = arrival[0].upper() + arrival[1:]
bot.send_message(update.effective_message.chat.id,
f'Стоимость и сроки доставки посылки с габаритами '
f'не превышающими 0.5х0.35х0.35(м) и массой не более 5кг '
f'из города {derival} в город {arrival} '
f'(от терминала до терминала):\n\n'
f'Деловые линии: {price_dellin} руб. '
f'До {period_dellin} дней.\n'
f'СберЛогистика: {price_sber} руб. '
f'До {period_sber} дней.\n'
f'ГлавДоставка: {price_glavdostavka} руб',
reply_markup=reply_markup
)
def button(update: Update, context: CallbackContext):
start(update, context)
def main():
start_handler = CommandHandler('start', start)
updater.dispatcher.add_handler(start_handler)
updater.dispatcher.add_handler(CallbackQueryHandler(button))
updater.dispatcher.add_handler(MessageHandler(Filters.text, progress))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
| 2.296875
| 2
|
src/megapy/pin.py
|
aakash-sahai/megapy
| 0
|
12779674
|
<filename>src/megapy/pin.py
from arduino import ArduinoConnection, ArduinoObject
class DigitalPin(ArduinoObject):
def __init__(self, conn, pin, mode='input'):
ArduinoObject.__init__(self, conn, 'dp' + str(pin), 'pin digital')
self._pin = pin
self._mode = mode
super(DigitalPin, self).create("{} {}".format(mode, pin))
def _get_value(self):
return int(super(DigitalPin, self).get("value"))
def _set_value(self, value):
return super(DigitalPin, self).set("value", value)
def _get_pin(self):
return self._pin
def _get_mode(self):
return self._mode
value = property(_get_value, _set_value)
pin = property(_get_pin)
mode = property(_get_mode)
class AnalogPin(ArduinoObject):
def __init__(self, conn, pin):
ArduinoObject.__init__(self, conn, 'ap' + str(pin), 'pin analog')
self._pin = pin
super(AnalogPin, self).create("input {}".format(pin))
def _get_value(self):
return int(super(AnalogPin, self).get("value"))
def _get_pin(self):
return self._pin
value = property(_get_value)
pin = property(_get_pin)
| 3.28125
| 3
|
pysuru/tests/conftest.py
|
rcmachado/pysuru
| 0
|
12779675
|
# coding: utf-8
from pytest import fixture
@fixture
def tsuru_apps_list():
return """[
{
"name": "app1-dev",
"cname": ["app1-dev.cname.example.com"],
"ip": "app1-dev.example.com"
},
{
"name": "app1-prod",
"cname": [],
"ip": "app1-prod.example.com"
},
{
"name": "app2-dev",
"cname": [],
"ip": "app2-dev.example.com"
}
]"""
| 2.015625
| 2
|
dev_Hand.py
|
lpeletan/Poker_Monte-Carlo
| 0
|
12779676
|
<filename>dev_Hand.py
import poker as p
d = p.Deck.standard_52_card_deck()
print(d)
for c in d:
print(repr(c), c)
h = p.Hand([d['2c']])
print(h._strength)
print(h.strength)
# h = p.Hand.best_from_cards(d.cards)
# print(h)
| 2.5625
| 3
|
pycmp/ast/relational.py
|
aeroshev/CMP
| 0
|
12779677
|
from .lhs_rhs_node import LhsRhsNode
from .node import Node
class GreaterRelationalNode(LhsRhsNode):
"""Greater relational object node"""
__slots__ = ("lhs", "rhs")
def __init__(self, lhs: Node, rhs: Node) -> None:
super().__init__(lhs, rhs)
class GreaterEqualRelationalNode(LhsRhsNode):
"""Greater or equal relational object node"""
__slots__ = ("lhs", "rhs")
def __init__(self, lhs: Node, rhs: Node) -> None:
super().__init__(lhs, rhs)
class LowerRelationalNode(LhsRhsNode):
"""Lower object node"""
__slots__ = ("lhs", "rhs")
def __init__(self, lhs: Node, rhs: Node) -> None:
super().__init__(lhs, rhs)
class LowerEqualRelationalNode(LhsRhsNode):
"""Lower or equal object node"""
__slots__ = ("lhs", "rhs")
def __init__(self, lhs: Node, rhs: Node) -> None:
super().__init__(lhs, rhs)
| 3.109375
| 3
|
03 Programming Contest/shoffee.py
|
thinkofmia/Team-MIA-Shopee-Code-League-2021
| 0
|
12779678
|
<gh_stars>0
#!/bin/python3
import math
import os
import random
import re
import sys
from itertools import combinations
#
# Complete the 'maxSubstring' function below.
#
# The function is expected to return a STRING.
# The function accepts STRING s as parameter.
#
def findShoffee(coffeeBeanAndExpectation, preferenceValue):
# Write your code here
coffeeBeanAndExpectation = coffeeBeanAndExpectation.split(' ')
N = int(coffeeBeanAndExpectation[0])
K = int(coffeeBeanAndExpectation[1])
preferenceValue = preferenceValue.split(' ')
preferences = []
for i in preferenceValue:
preferences.append(int(i))
result = []
for a in range(len(preferences)+1):
result.append(list(combinations(preferences, a)))
resultClean = [item for sublist in result for item in sublist]
resultClean.pop(0)
resultClean = list(dict.fromkeys(resultClean))
another = []
for i in resultClean:
myList = list(i)
sums = 0
for j in myList:
sums += j
another.append(sums / len(myList))
count = 0
for i in another:
if i >= K:
count += 1
return count
if __name__ == '__main__':
coffeeBeanAndExpectation = input()
preferenceValue = input()
result = findShoffee(coffeeBeanAndExpectation, preferenceValue)
print(result)
| 3.578125
| 4
|
leetcode/python/Q0054_Spiral_Matrix.py
|
lisuizhe/algorithm
| 2
|
12779679
|
class Solution(object):
def spiralOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
ans = []
if not matrix:
return ans
r1, r2 = 0, len(matrix) - 1
c1, c2 = 0, len(matrix[0]) - 1
while c1 <= c2 and r1 <= r2:
for c in range(c1, c2 + 1):
ans.append(matrix[r1][c])
for r in range(r1 + 1, r2 + 1):
ans.append(matrix[r][c2])
if r1 < r2 and c1 < c2:
for c in range(c2 - 1, c1, -1):
ans.append(matrix[r2][c])
for r in range(r2, r1, -1):
ans.append(matrix[r][c1])
r1 += 1
r2 -= 1
c1 += 1
c2 -= 1
return ans
| 3.25
| 3
|
scripts/batch-reverse-complement.py
|
cherrytrees-kpu/kpu-agc-project-scripts
| 0
|
12779680
|
<gh_stars>0
from Bio import Seq, SeqIO
import argparse
import pathlib
def parse_args():
parser = argparse.ArgumentParser(description="Script to quickly generate reverse complement fastas")
parser.add_argument(
'seq_path',
type=pathlib.Path,
action='store',
help='Path to sequence files'
)
args = parser.parse_args()
return args.seq_path
def main(seq_path):
if seq_path.is_dir() is True:
#Loop over all fasta files in the directory
input_paths = []
for seq_file_path in seq_path.glob('*.fasta'):
input_paths.append(seq_file_path)
for seq_file_path in input_paths:
seq_file = SeqIO.read(seq_file_path, 'fasta')
gene = seq_file.id
rev_comp_seq = seq_file.seq.reverse_complement()
output_file_path = seq_file_path.with_name(f'{gene}_REV.fasta')
output_file = open(output_file_path, 'w')
output_file.write(
f'>{gene}\n{str(rev_comp_seq)}'
)
output_file.close()
else:
seq_file = SeqIO.read(seq_path, 'fasta')
gene = seq_file.id
rev_comp_seq = seq_file.seq.reverse_complement()
output_file_path = seq_path.with_name(f'{gene}_REV.fasta')
output_file = open(output_file_path, 'w')
output_file.write(
f'>{gene}\n{str(rev_comp_seq)}'
)
output_file.close()
if __name__ == '__main__':
seq_path = parse_args()
main(seq_path)
| 2.921875
| 3
|
packages/attitude.pkg/providers.py
|
GrahamCobb/maemo-mud-builder
| 0
|
12779681
|
<filename>packages/attitude.pkg/providers.py
#
# Provider information sources for `Attitude' - a false horizon display using
# accelerometer information. (c) <NAME> 2009
# Released under the Artistic Licence
import os.path
from math import sin, cos, pi
class Dummy:
"""One of the simplest providers: returns dead-on, flat."""
def position(self):
#return (0, 0, -1000) # Back down
#return (0, 0, 1000) # Front down
#return (-1000, 0, 0) # Right edge down
#return (1000, 0, 0) # Left edge down
#return (0, -1000, 0) # Bottom edge down
return (-500, -500, 0) # Bottom right down
class Demo:
"""A demonstration provider which will take the user on a tour through
the air."""
x = 0.0
y = 0.0
z = 0.0
def position(self):
self.x += 0.1
self.y += 0.04
self.z += 0.03
return (sin(self.x) * 350,
sin(self.y) * 400 - 100,
sin(self.z) * 450)
class NokiaAccelerometer:
"""An accelerometer provider which actually reads an RX-51's
accelerometers, based on http://wiki.maemo.org/Accelerometers"""
global ACCELEROMETER_PATH
ACCELEROMETER_PATH = '/sys/class/i2c-adapter/i2c-3/3-001d/coord'
def position(self):
f = open(ACCELEROMETER_PATH, 'r')
coords = [int(w) for w in f.readline().split()]
f.close()
return coords
@classmethod
def available(cls):
return os.path.isfile(ACCELEROMETER_PATH)
| 3.21875
| 3
|
_lib/wordpress_office_processor.py
|
himedlooff/cfgov-refresh
| 0
|
12779682
|
import sys
import json
import os.path
import requests
def posts_at_url(url):
current_page = 1
max_page = sys.maxint
while current_page <= max_page:
url = os.path.expandvars(url)
resp = requests.get(url, params={'page':current_page, 'count': '-1'})
results = json.loads(resp.content)
current_page += 1
max_page = results['pages']
for p in results['posts']:
yield p
def documents(name, url, **kwargs):
for post in posts_at_url(url):
yield process_office(post)
def process_office(item):
item['_id'] = item['slug']
custom_fields = item['custom_fields']
# get intro text & subscribe form data from custom fields
for attr in ['intro_text', 'intro_subscribe_form', 'related_contact']:
if attr in custom_fields:
item[attr] = custom_fields[attr][0]
# build top story dict
top_story = {}
for attr in ['top_story_head', 'top_story_desc']:
if attr in custom_fields:
top_story[attr] = custom_fields[attr][0]
# convert top story links into a proper list
top_story_links = []
for x in xrange(0,5):
key = 'top_story_links_%s' % x
if key in custom_fields:
top_story_links.append(custom_fields[key])
if top_story_links:
top_story['top_story_links'] = top_story_links
if top_story:
item['top_story'] = top_story
# create list of office resource dicts
item['resources'] = []
for x in xrange(0,4):
resource = {}
fields = ['head', 'desc', 'icon', 'link']
for field in fields:
field_name = 'resource_%s_%s' % (str(x), field)
if field_name in custom_fields and custom_fields[field_name][0] != '':
if field == 'link':
resource[field] = custom_fields[field_name]
else:
resource[field] = custom_fields[field_name][0]
if resource:
item['resources'].append(resource)
return item
| 2.671875
| 3
|
zombie.py
|
mohaninit/bigga
| 0
|
12779683
|
import os
CMD = 'docker ps'
os.system(CMD)
ls = os.popen(CMD).read().split('\n')[1:-1]
zombies = []
for line in ls:
container, image = line.split()[:2]
if 'bigga' not in image and ':' not in image:
print(container, image)
zombies.append(container)
print("Zombies: ", " ".join(zombies))
# docker kill <zombies>
# docker system prune
| 3
| 3
|
mastermind_api/game/tests/test_models.py
|
manuelmamut/mastermind
| 0
|
12779684
|
from django.test import TestCase
from ..models import Game
class TestGame(TestCase):
"""This is the test for the Game model"""
def setUp(self):
Game.objects.create(codemaker="User 1",
codebreaker="User 2",
peg_1="R",
peg_2="R",
peg_3="R",
peg_4="R")
def test_game_info_creation(self):
game = Game.objects.get(codemaker="User 1")
self.assertEqual(game.codebreaker, "User 2")
self.assertEqual(game.peg_1, "R")
self.assertEqual(game.peg_2, "R")
self.assertEqual(game.peg_3, "R")
self.assertEqual(game.peg_4, "R")
self.assertEqual(game.tries_number, 12)
self.assertEqual(game.open_game, True)
| 2.859375
| 3
|
plant_vs_zoomie_game_normal03.py
|
ChengzhuLi/plantwarzombie
| 4
|
12779685
|
<gh_stars>1-10
import pygame
import os
from Peashooter import Peashooter
from SunFlower import SunFlower
from WallNut import WallNut
from Sun import Sun
from Zombie import Zombie
pygame.init()
backgd_size = (1200, 600)
screen = pygame.display.set_mode(backgd_size)
pygame.display.set_caption('plant_vs_zoomie')
bg_img_path = 'material/images/background1.jpg'
bg_img_obj = pygame.image.load(bg_img_path).convert_alpha()
sunbank_img_path = 'material/images/SunBack.png'
sunbank_img_obj = pygame.image.load(sunbank_img_path).convert_alpha()
text = '900'
sun_font = pygame.font.SysFont('arial',25)
sun_num_surface = sun_font.render(text,True,(0,0,0))
peashooter = Peashooter()
sunflower = SunFlower()
wallnut = WallNut()
zombie = Zombie()
spriteGroup = pygame.sprite.Group()
spriteGroup.add(peashooter)
spriteGroup.add(sunflower)
spriteGroup.add(wallnut)
spriteGroup.add(zombie)
# sunList = []
clock = pygame.time.Clock()
def main():
running = True
index = 0
while running:
if index >= 130:
index = 0
clock.tick(20)
#2s产生一个太阳花
if index % 40 == 0:
sun = Sun(sunflower.rect)
spriteGroup.add(sun)
screen.blit(bg_img_obj,(0,0))
screen.blit(sunbank_img_obj,(250,0))
screen.blit(sun_num_surface,(300,5))
spriteGroup.update(index)
spriteGroup.draw(screen)
index+=1
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.display.update()
if __name__ == '__main__':
main()
| 2.640625
| 3
|
multinet/api/apps.py
|
multinet-app/multinet-api
| 0
|
12779686
|
from django.apps import AppConfig
class ApiConfig(AppConfig):
name = 'multinet.api'
verbose_name = 'Multinet: Api'
| 1.1875
| 1
|
togglcmder/toggl/caching.py
|
yatesjr/toggl-cmder
| 3
|
12779687
|
import sqlite3
from sqlite3 import IntegrityError
import logging
from typing import List
from datetime import datetime
from togglcmder.toggl.types.workspace import Workspace
from togglcmder.toggl.builders.workspace_builder import WorkspaceBuilder
from togglcmder.toggl.types.time_entry import TimeEntry
from togglcmder.toggl.builders.time_entry_builder import TimeEntryBuilder
from togglcmder.toggl.types.user import User
from togglcmder.toggl.builders.user_builder import UserBuilder
from togglcmder.toggl.types.tag import Tag
from togglcmder.toggl.builders.tag_builder import TagBuilder
from togglcmder.toggl.types.project import Project
from togglcmder.toggl.builders.project_builder import ProjectBuilder
class Caching(object):
WORKSPACE_TABLE = '''
CREATE TABLE IF NOT EXISTS workspaces (
name TEXT NOT NULL,
identifier INTEGER PRIMARY KEY,
last_updated TIMESTAMP NOT NULL
)
'''
PROJECT_TABLE = '''
CREATE TABLE IF NOT EXISTS projects (
name TEXT NOT NULL,
color INTEGER,
last_updated TIMESTAMP NOT NULL,
created TIMESTAMP NOT NULL,
identifier INTEGER PRIMARY KEY,
workspace_identifier INTEGER NOT NULL,
FOREIGN KEY (workspace_identifier) REFERENCES workspaces (identifier) ON DELETE CASCADE
)
'''
TAG_TABLE = '''
CREATE TABLE IF NOT EXISTS tags (
name TEXT,
identifier INTEGER PRIMARY KEY,
workspace_identifier INTEGER,
FOREIGN KEY (workspace_identifier) REFERENCES workspaces (identifier) ON DELETE CASCADE
)
'''
TIME_ENTRY_TABLE = '''
CREATE TABLE IF NOT EXISTS time_entries (
description TEXT,
start_time TIMESTAMP NOT NULL,
stop_time TIMESTAMP,
duration INTEGER,
identifier INTEGER PRIMARY KEY,
project_identifier INTEGER,
workspace_identifier INTEGER NOT NULL,
last_updated TIMESTAMP,
FOREIGN KEY (project_identifier) REFERENCES projects (identifier) ON DELETE CASCADE,
FOREIGN KEY (workspace_identifier) REFERENCES workspaces (identifier)
)
'''
TIME_ENTRY_TAG_JUNCTION_TABLE = '''
CREATE TABLE IF NOT EXISTS time_entry_tags (
tag_identifier INTEGER NOT NULL,
time_entry_identifier INTEGER NOT NULL,
FOREIGN KEY (tag_identifier) REFERENCES tags (identifier) ON DELETE CASCADE,
FOREIGN KEY (time_entry_identifier) REFERENCES time_entries (identifier) ON DELETE CASCADE
)
'''
USER_TABLE = '''
CREATE TABLE IF NOT EXISTS users (
name TEXT,
api_token TEXT,
identifier INTEGER PRIMARY KEY,
last_updated TIMESTAMP NOT NULL
)
'''
def __init__(self, *, cache_name: str = "cache.db"):
self.__connection = sqlite3.connect(cache_name)
self.__connection.set_trace_callback(logging.getLogger(__name__).debug)
self.__cursor = self.__connection.cursor()
self.__cursor.execute("PRAGMA foreign_keys = 1")
self.__connection.commit()
self.__cursor.execute(Caching.WORKSPACE_TABLE)
self.__cursor.execute(Caching.PROJECT_TABLE)
self.__cursor.execute(Caching.TAG_TABLE)
self.__cursor.execute(Caching.TIME_ENTRY_TABLE)
self.__cursor.execute(Caching.TIME_ENTRY_TAG_JUNCTION_TABLE)
self.__cursor.execute(Caching.USER_TABLE)
self.__connection.commit()
self.__workspaces: List[Workspace] = []
self.__projects: List[Project] = []
self.__tags: List[Tag] = []
self.__time_entries: List[TimeEntry] = []
def __del__(self):
self.__connection.close()
def update_workspace_cache(self, workspaces: List[Workspace]) -> int:
insert_sql = '''
INSERT INTO workspaces
(name, identifier, last_updated) VALUES
(?, ?, ?)
'''
update_sql = '''
UPDATE workspaces SET name=?, last_updated=?
WHERE identifier=?
'''
for workspace in workspaces:
try:
self.__cursor.execute(
insert_sql, (workspace.name,
workspace.identifier,
workspace.last_updated.timestamp()))
except IntegrityError:
self.__cursor.execute(
update_sql, (workspace.name,
workspace.last_updated.timestamp(),
workspace.identifier))
self.__connection.commit()
return self.__cursor.rowcount
def retrieve_workspace_cache(self) -> List[Workspace]:
sql = '''
SELECT name, identifier, last_updated FROM workspaces
'''
self.__cursor.execute(sql)
results = self.__cursor.fetchall()
if results:
return [
WorkspaceBuilder()
.name(result[0])
.identifier(result[1])
.last_updated(epoch=result[2]).build()
for result in results
]
def update_user_cache(self, user: User) -> int:
insert_sql = '''
INSERT INTO users
(name, api_token, identifier, last_updated) VALUES
(?, ?, ?, ?)
'''
update_sql = '''
UPDATE users SET name=?, api_token=?, last_updated=?
WHERE identifier=?
'''
try:
self.__cursor.execute(
insert_sql, (user.name,
user.api_token,
user.identifier,
user.last_updated.timestamp()))
except IntegrityError:
self.__cursor.execute(
update_sql, (user.name,
user.api_token,
user.last_updated.timestamp(),
user.identifier))
self.__connection.commit()
return self.__cursor.rowcount
def retrieve_user_cache(self) -> User:
sql = '''
SELECT name, api_token, identifier, last_updated FROM users
'''
self.__cursor.execute(sql)
results = self.__cursor.fetchone()
if results:
return UserBuilder()\
.name(results[0])\
.api_token(results[1])\
.identifier(results[2])\
.last_updated(epoch=results[3]).build()
def update_project_cache(self, projects: List[Project]) -> int:
insert_sql = '''
INSERT INTO projects
(name, color, last_updated, created, identifier, workspace_identifier) VALUES
(?, ?, ?, ?, ?, ?)
'''
update_sql = '''
UPDATE projects
SET name=?, color=?, last_updated=?, workspace_identifier=?
WHERE identifier=?
'''
for project in projects:
try:
self.__cursor.execute(
insert_sql, (project.name,
project.color.value,
project.last_updated.timestamp(),
project.created.timestamp() if project.created else datetime.now().timestamp(),
project.identifier,
project.workspace_identifier))
except IntegrityError:
self.__cursor.execute(
update_sql, (project.name,
project.color.value,
project.last_updated.timestamp(),
project.workspace_identifier,
project.identifier))
self.__connection.commit()
return self.__cursor.rowcount
def retrieve_project_cache(self) -> List[Project]:
sql = '''
SELECT name, color, last_updated, created, identifier, workspace_identifier FROM projects
'''
self.__cursor.execute(sql)
results = self.__cursor.fetchall()
if results:
return [
ProjectBuilder()
.name(result[0])
.color(result[1])
.last_updated(epoch=result[2])
.created(epoch=result[3])
.identifier(result[4])
.workspace_identifier(result[5]).build()
for result in results
]
def remove_project_from_cache(self, project: Project) -> None:
sql = '''
DELETE FROM projects
WHERE identifier=?
'''
self.__cursor.execute(sql, (project.identifier,))
self.__connection.commit()
def update_tag_cache(self, tags: List[Tag]) -> int:
insert_sql = '''
INSERT INTO tags
(name, identifier, workspace_identifier) VALUES
(?, ?, ?)
'''
update_sql = '''
UPDATE tags
SET name=?, workspace_identifier=?
WHERE identifier=?
'''
rows_affected = 0
for tag in tags:
try:
self.__cursor.execute(
insert_sql, (tag.name,
tag.identifier,
tag.workspace_identifier))
except IntegrityError:
self.__cursor.execute(
update_sql, (tag.name,
tag.workspace_identifier,
tag.identifier))
rows_affected += self.__cursor.rowcount
self.__connection.commit()
return rows_affected
def retrieve_tag_cache(self) -> List[Tag]:
sql = """
SELECT name, identifier, workspace_identifier FROM tags
"""
self.__cursor.execute(sql)
results = self.__cursor.fetchall()
if results:
return [
TagBuilder()
.name(result[0])
.identifier(result[1])
.workspace_identifier(result[2]).build()
for result in results
]
def remove_tag_from_cache(self, tag: Tag) -> None:
tag_removal_sql = '''
DELETE FROM tags
WHERE identifier=?
'''
self.__cursor.execute(tag_removal_sql, (tag.identifier,))
join_table_removal_sql = '''
DELETE FROM time_entry_tags
WHERE tag_identifier=?
'''
self.__cursor.execute(join_table_removal_sql, (tag.identifier,))
self.__connection.commit()
def __retrieve_time_entry_tags_join(self, time_entry_identifier: int) -> List[tuple]:
sql = '''
SELECT name, tag_identifier, time_entry_identifier
FROM time_entry_tags
INNER JOIN tags ON time_entry_tags.tag_identifier = tags.identifier
WHERE time_entry_identifier=?
'''
self.__cursor.execute(sql, (time_entry_identifier,))
return self.__cursor.fetchall()
def __retrieve_time_entry_tags(self, time_entry_identifier: int) -> List[tuple]:
sql = '''
SELECT tag_identifier, time_entry_identifier
FROM time_entry_tags
WHERE time_entry_identifier=?
'''
self.__cursor.execute(sql, (time_entry_identifier,))
return self.__cursor.fetchall()
def __check_existing(self, tags: List[int], time_entry_identifier: int) -> List[int]:
# returns a tuple of (tag_id, time_entry_id)
existing_time_entry_tags = self.__retrieve_time_entry_tags(time_entry_identifier)
if len(tags) == 0 or len(existing_time_entry_tags) == 0:
return tags
return list(
# 2. map each tuple to be just the tag identifier, so given x,
# return the tag_id
map(lambda x: x[0],
# 1. filter so we only get tags tuples that aren't in the existing list
# of tags (checked based on x[0], which is the tag identifier)
filter(lambda x: x[0] not in tags, existing_time_entry_tags)))
def update_time_entry_cache(self, time_entries: List[TimeEntry]) -> int:
insert_sql = '''
INSERT INTO time_entries
(description, start_time, stop_time, duration, identifier,
project_identifier, workspace_identifier, last_updated) VALUES
(?, ?, ?, ?, ?, ?, ?, ?)
'''
update_sql = '''
UPDATE time_entries
SET description=?,
start_time=?,
stop_time=?,
duration=?,
project_identifier=?,
workspace_identifier=?,
last_updated=?
WHERE identifier=?
'''
insert_time_entry_tag_sql = '''
INSERT INTO time_entry_tags
(tag_identifier, time_entry_identifier)
VALUES (?, ?)
'''
tag_rows = self.retrieve_tag_cache()
rows_affected = 0
for time_entry in time_entries:
try:
self.__cursor.execute(
insert_sql, (time_entry.description,
time_entry.start_time.timestamp(),
None if not time_entry.stop_time else time_entry.stop_time.timestamp(),
time_entry.duration,
time_entry.identifier,
time_entry.project_identifier,
time_entry.workspace_identifier,
time_entry.last_updated.timestamp()))
except IntegrityError:
self.__cursor.execute(
update_sql, (time_entry.description,
time_entry.start_time.timestamp(),
None if not time_entry.stop_time else time_entry.stop_time.timestamp(),
time_entry.duration,
time_entry.project_identifier,
time_entry.workspace_identifier,
time_entry.last_updated.timestamp(),
time_entry.identifier))
rows_affected += self.__cursor.rowcount
tag_ids = []
if time_entry.tags:
for tag in time_entry.tags:
for tag_row in tag_rows:
if tag == tag_row.name:
tag_ids.append(tag_row.identifier)
break
for tag_id in self.__check_existing(tag_ids, time_entry.identifier):
self.__cursor.execute(
insert_time_entry_tag_sql,
(tag_id,
time_entry.identifier))
self.__connection.commit()
return rows_affected
def retrieve_time_entry_cache(self) -> List[TimeEntry]:
time_entry_sql = """
SELECT description,
start_time,
stop_time,
duration,
identifier,
project_identifier,
workspace_identifier,
last_updated
FROM time_entries
"""
time_entries = []
self.__cursor.execute(time_entry_sql)
results = self.__cursor.fetchall()
for result in results:
tag_results = self.__retrieve_time_entry_tags_join(result[4])
builder = TimeEntryBuilder()\
.description(result[0])\
.start_time(epoch=result[1])\
.stop_time(epoch=result[2])\
.duration(result[3])\
.identifier(result[4])\
.project_identifier(result[5])\
.workspace_identifier(result[6])\
.last_updated(epoch=result[7])\
.tags([tag_result[0] for tag_result in tag_results])
time_entries.append(builder.build())
return time_entries
def remove_time_entry_from_cache(self, time_entry: TimeEntry) -> None:
entry_removal_sql = '''
DELETE FROM time_entries
WHERE identifier=?
'''
self.__cursor.execute(entry_removal_sql, (time_entry.identifier,))
joined_entry_removal_sql = '''
DELETE FROM time_entry_tags
WHERE time_entry_identifier=?
'''
self.__cursor.execute(joined_entry_removal_sql, (time_entry.identifier,))
self.__connection.commit()
def get_workspace_identifier(self, workspace_name: str) -> int:
sql = """
SELECT identifier
FROM workspaces
WHERE name=?
"""
self.__cursor.execute(sql, (workspace_name,))
return self.__cursor.fetchone()[0]
def get_project_identifier(self, project_name: str) -> int:
sql = """
SELECT identifier
FROM projects
WHERE name=?
"""
self.__cursor.execute(sql, (project_name,))
return self.__cursor.fetchone()[0]
| 2.25
| 2
|
baudelaire/log.py
|
juliendoutre/baudelaire
| 0
|
12779688
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
def init_logger() -> None:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
| 2.125
| 2
|
data_exploration/england_wales_risk_plots.py
|
riskyviz/webapp
| 0
|
12779689
|
<filename>data_exploration/england_wales_risk_plots.py
import csv
from visigoth.utils.geojson import GeojsonReader
from visigoth import Diagram
from visigoth.map_layers import KDE, Hexbin, Geoimport, Cartogram
from visigoth.utils.colour import ContinuousPalette
from visigoth.containers import Map, Sequence
from visigoth.utils.colour import DiscretePalette
from visigoth.utils.marker import MarkerManager
from visigoth.common import Text
gjr = GeojsonReader()
# population-weighted centroids
# wget https://opendata.arcgis.com/datasets/b0a6d8a3dc5d4718b3fd62c548d60f81_0.geojson
centroids = {} # msoa => (short-name,lon,lat)
(points,lines,polys) = gjr.extract("b0a6d8a3dc5d4718b3fd62c548d60f81_0.geojson")
for (properties,multipoints) in points:
msoa = properties["msoa11cd"]
sname = properties["msoa11nm"]
lon = multipoints[0][0]
lat = multipoints[0][1]
centroids[msoa] = (sname,lon,lat)
dataset = []
f = open("../data_ingest/latest_scores.csv","r")
r = csv.reader(f)
row = 0
for line in r:
row += 1
if row > 1:
msoa = line[0]
score = float(line[1])
(sname,lon,lat) = centroids[msoa]
risk_band = "unknown"
if score < 0.71:
risk_band = "low"
elif score < 3.71:
risk_band = "moderate"
else:
risk_band = "high"
dataset.append({"msoa":msoa,"lat":lat,"lon":lon,"score":score,"risk_band":risk_band,"name":sname})
gi = Geoimport("nuts1.json",polygon_style=lambda p:{"fill":"none"}) # https://github.com/martinjc/UK-GeoJSON/blob/master/json/eurostat/ew/nuts1.json
d = Diagram()
s = Sequence(orientation="horizontal")
# KDE Plot first
p1 = ContinuousPalette(colourMap=["#0000FF00","yellow","red"],withIntervals=False)
k = KDE(dataset,bandwidth=10000,nr_samples_across=80,lon="lon",lat="lat",colour="score",contour_bands=20,palette=p1)
m1 = Map(width=1024, boundaries=((-6,50),(2,56)))
m1.add(gi)
m1.add(k)
s.add(Sequence().add(Text("Heat Map")).add(m1))
# Hex bin
p2 = ContinuousPalette(colourMap=["#0000FF00","yellow","red"],withIntervals=False)
h = Hexbin(dataset,colour="score",nr_bins_across=80,lon="lon",lat="lat",palette=p2,stroke_width=0.5)
m2 = Map(width=1024, boundaries=((-6,50),(2,56)))
m2.add(gi)
m2.add(h)
s.add(Sequence().add(Text("Binned Plot")).add(m2))
# Cartogram
p3 = DiscretePalette()
p3.addColour("low","green").addColour("moderate","orange").addColour("high","red")
mm = MarkerManager()
mm.setDefaultRadius(5)
cg = Cartogram(dataset,marker_manager=mm,iterations=100,lon="lon",lat="lat",colour="risk_band",palette=p3,f2=1)
m3 = Map(width=1024,boundaries=((-6,50),(2,56)))
m3.add(gi)
m3.add(cg)
s.add(Sequence().add(Text("Cartogram")).add(m3))
d.add(Text("Covid-19 Estimated Rate - 18th July - National Maps"))
d.add(s)
html = d.draw(format="html")
f = open("england_wales_risk_plots.html", "w")
f.write(html)
f.close()
| 2.484375
| 2
|
tests/test_distancefunctions.py
|
jplalor/flowpm
| 0
|
12779690
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 2 18:24:41 2020
@author: Denise
"""
import numpy as np
from flowpm.tfbackground import dchioverda, rad_comoving_distance,a_of_chi as a_of_chi_tf, transverse_comoving_distance as trans_comoving_distance,angular_diameter_distance as ang_diameter_distance
from numpy.testing import assert_allclose
from nbodykit.cosmology import Planck15 as cosmo
cosmo1={"w0":-1.0,
"wa":0.0,
"H0":100,
"h":cosmo.h,
"Omega0_b":cosmo.Omega0_b,
"Omega0_c":cosmo.Omega0_cdm,
"Omega0_m":cosmo.Omega0_m,
"Omega0_k":0.0,
"Omega0_de":cosmo.Omega0_lambda,
"n_s":cosmo.n_s,
"sigma8":cosmo.sigma8}
def test_radial_comoving_distance():
""" This function tests the function computing the radial comoving distance.
"""
a = np.logspace(-2, 0.0)
z=1/a-1
radial =rad_comoving_distance(cosmo1,a)
radial_astr=cosmo.comoving_distance(z)
assert_allclose(radial,radial_astr,rtol=1e-2)
def test_transverse_comoving_distance():
"""This function test the function computing the Transverse comoving distance in [Mpc/h] for a given scale factor
"""
a = np.logspace(-2, 0.0)
z=1/a-1
trans_tf=trans_comoving_distance(cosmo1,a)
trans_astr=cosmo.comoving_transverse_distance(z)
assert_allclose(trans_tf, trans_astr, rtol=1e-2)
def test_angular_diameter_distance():
"""This function test the function computing the Angular diameter distance in [Mpc/h] for a given scale factor
"""
a=np.logspace(-2,0.0)
z=1/a-1
angular_diameter_distance_astr=cosmo.angular_diameter_distance(z)
angular_diameter_distance_tf=ang_diameter_distance(cosmo1,a)
assert_allclose(angular_diameter_distance_tf, angular_diameter_distance_astr,rtol=1e-2)
# =============================================================================
# Here we use the nbodykit function that compute comoving_distance as a function of a/z to
# build a new a-of-chi function by interpolation using a scipy interpolation function.
# Then we compare thiss function with our a-of-chi function.
# =============================================================================
from scipy import interpolate
def a_of_chi(z):
r"""Computes the scale factor for corresponding (array) of radial comoving
distance by reverse linear interpolation.
Parameters:
-----------
cosmo: Cosmology
Cosmological parameters
chi: array-like
radial comoving distance to query.
Returns:
--------
a : array-like
Scale factors corresponding to requested distances
"""
a=1/(1+z)
cache_chi=cosmo.comoving_distance(z)
return interpolate.interp1d(cache_chi, a, kind='cubic')
def test_a_of_chi():
"""This function test the function computing the scale factor for corresponding (array) of radial comoving
distance by reverse linear interpolation
"""
a = np.logspace(-2, 0.0, 512)
z=1/a-1
chi = np.geomspace(500, 8000, 50)
aofchi_tf=a_of_chi_tf(cosmo1,chi)
f=a_of_chi(z)
aofchi_astr=f(chi)
assert_allclose(aofchi_tf,aofchi_astr,rtol=1e-2)
| 2.15625
| 2
|
listaMetodit.py
|
anolsa/listenandrepeat-praat
| 0
|
12779691
|
<gh_stars>0
def f1Lista(formanttiLista):
lista = [int(i.split(",")[0]) for i in formanttiLista]
return lista
def f2Lista(formanttiLista):
lista = [int(i.split(",")[1]) for i in formanttiLista]
return lista
def laskeSuhteet(formanttiLista, f1min, f2min, f1range, f2range):
lista2 = f1Lista(formanttiLista)
lista3 = f2Lista(formanttiLista)
f1suhde = [(i - f1min)/f1range for i in lista2]
f2suhde = [(i - f2min)/f2range for i in lista3]
suhteet = []
for i in range(0,len(f1suhde)):
suhteet.append((float(f1suhde[i]), float(f2suhde[i])))
return suhteet
def laskeSuhteetPari(formanttiTuple, f1min, f2min, f1range, f2range):
f1suhde = (formanttiTuple[0] - f1min)/f1range
f2suhde = (formanttiTuple[1] - f2min)/f2range
suhteet = (f1suhde, f2suhde)
return suhteet
def suhdeListat(sanakirja):
aakkoset = sanakirja.keys()
f1suhteet = [sanakirja[i][0] for i in aakkoset]
f2suhteet = [sanakirja[i][1] for i in aakkoset]
return (f1suhteet, f2suhteet)
| 2.890625
| 3
|
fixGanExperment/model.py
|
okingjerryo/modelExperiment
| 0
|
12779692
|
<reponame>okingjerryo/modelExperiment<filename>fixGanExperment/model.py
'''
sunkejia
GAN network
'''
import tensorflow as tf
import tensorflow.contrib.slim as slim
def inst_norm(inputs, epsilon=1e-3, suffix=''):
"""
Assuming TxHxWxC dimensions on the tensor, will normalize over
the H,W dimensions. Use this before the activation layer.
This function borrows from:
http://r2rt.com/implementing-batch-normalization-in-tensorflow.html
Note this is similar to batch_normalization, which normalizes each
neuron by looking at its statistics over the batch.
:param input_:
input tensor of NHWC format
"""
# Create scale + shift. Exclude batch dimension.
stat_shape = inputs.get_shape().as_list()
print(stat_shape)
scale = tf.get_variable('INscale' + suffix,
initializer=tf.ones(stat_shape[3]))
shift = tf.get_variable('INshift' + suffix,
initializer=tf.zeros(stat_shape[3]))
# batch nrom axes=[0,1,2] 出来的结果只有1 * C,而 instanse norm 结果为 B* C
inst_means, inst_vars = tf.nn.moments(inputs, axes=[1, 2], keep_dims=True)
# Normalization
inputs_normed = (inputs - inst_means) / tf.sqrt(inst_vars + epsilon)
# Perform trainable shift.
output = scale * inputs_normed + shift
return output
def dense_block(layer_input, kernel_size, k, filiter_num, scopename):
# todo: desnet修改
pass
def netG_encoder_gamma_32(image_input, reuse=False):
'''
08-04 删除了line reshape层
:param image_input:
:param reuse:
:return:
'''
with tf.variable_scope('generator', reuse=reuse) as vs:
if reuse:
vs.reuse_variables()
kernel_size = [3, 3]
filter_num = 64
imageshape = image_input.get_shape().as_list()[1]
print(imageshape)
with tf.variable_scope('encoding'):
# 目前用的是lrelu,其实应该用elu,后面注意跟换
with slim.arg_scope([slim.conv2d], normalizer_fn=inst_norm, activation_fn=tf.nn.elu, padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.02)):
# 32
net = slim.conv2d(image_input, filter_num, kernel_size, normalizer_fn=None, scope='conv1')
net = slim.conv2d(net, filter_num * 2, kernel_size, scope='conv2')
# 16
net = slim.conv2d(net, filter_num * 2, stride=2, kernel_size=kernel_size, scope='conv3')
net = slim.conv2d(net, filter_num * 4, kernel_size, scope='conv4')
# 8
net = slim.conv2d(net, filter_num * 4, stride=2, kernel_size=kernel_size, scope='conv6')
net = slim.conv2d(net, filter_num * 6, kernel_size, scope='conv7')
# 4
net = slim.conv2d(net, filter_num * 6, stride=2, kernel_size=kernel_size, scope='conv9')
net = slim.conv2d(net, filter_num * 8, kernel_size, scope='conv10')
net = tf.reshape(slim.flatten(net),
[-1, 1, 1, int(imageshape / 8) * int(imageshape / 8) * filter_num * 8], name='fc1')
logits = slim.fully_connected(net, 1024, activation_fn=None, normalizer_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.02),
scope='bottleneck')
output = logits # 1024维的向量
return output
def netG_deconder_gamma_32(feature, output_channel, reuse=False):
'''
01-02 instanse norm
@brief:
feature:1*1*320+13+50
pose:1*1*13r
noise:1*1*50
'''
with tf.variable_scope('generator', reuse=reuse):
kernel_size = [3, 3]
filter_num = 64
with tf.variable_scope('decoding') as vs:
if reuse:
vs.reuse_variables()
with slim.arg_scope([slim.conv2d_transpose], activation_fn=tf.nn.elu, normalizer_fn=inst_norm,
padding='SAME', weights_initializer=tf.truncated_normal_initializer(stddev=0.02)):
# 先将vector组织为6*6*320的tensor#slim.batch_norm
fc1 = slim.fully_connected(feature, 4 * 4 * filter_num * 8, activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.02),
scope='fc1')
# reshape the vector[n,6,6,320]
inputs_img = tf.reshape(fc1, [-1, 4, 4, filter_num * 8])
# print 'inputs_img',inputs_img.shape
# 4
net = slim.conv2d_transpose(inputs_img, filter_num * 8, kernel_size, scope='deconv01')
net = slim.conv2d_transpose(net, filter_num * 6, kernel_size, scope='deconv02')
# 8
net = slim.conv2d_transpose(net, filter_num * 6, stride=2, kernel_size=kernel_size, scope='deconv2')
net = slim.conv2d_transpose(net, filter_num * 4, kernel_size, scope='deconv3')
# 16
net = slim.conv2d_transpose(net, filter_num * 4, stride=2, kernel_size=kernel_size, scope='deconv5')
net = slim.conv2d_transpose(net, filter_num * 2, kernel_size, scope='deconv6')
# 32
net = slim.conv2d_transpose(net, filter_num * 2, stride=2, kernel_size=kernel_size, scope='deconv8')
net = slim.conv2d_transpose(net, filter_num, kernel_size, scope='deconv9')
# 为什么放到外面就好了呢?
net = slim.conv2d_transpose(net, output_channel, kernel_size, activation_fn=tf.nn.tanh, normalizer_fn=None,
scope='deconv13', weights_initializer=tf.contrib.layers.xavier_initializer())
output = net
return output
def netD_discriminator_adloss_32(image_input, reuse=False):
with tf.variable_scope('discriminator', reuse=reuse) as vs:
kernel_size = [3, 3]
filter_num = 64
imageshape = image_input.get_shape().as_list()[1]
with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm, activation_fn=tf.nn.elu, padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.02)):
# 224/96/32
net = slim.conv2d(image_input, filter_num, kernel_size, normalizer_fn=None, scope='conv1')
net = slim.conv2d(net, filter_num * 2, kernel_size, scope='conv2')
# 112/48/16
net = slim.conv2d(net, filter_num * 2, stride=2, kernel_size=kernel_size, scope='conv3')
net = slim.conv2d(net, filter_num * 2, kernel_size, scope='conv4')
# 56/24/8
net = slim.conv2d(net, filter_num * 4, stride=2, kernel_size=kernel_size, scope='conv6')
net = slim.conv2d(net, filter_num * 4, kernel_size, scope='conv7')
# 28/12/4
net = slim.conv2d(net, filter_num * 6, stride=2, kernel_size=kernel_size, scope='conv9')
net = slim.conv2d(net, filter_num * 6, kernel_size, scope='conv10')
# 14/6/2
net = slim.conv2d(net, filter_num * 8, stride=2, kernel_size=kernel_size, scope='conv12')
net = slim.conv2d(net, filter_num * 8, kernel_size, scope='conv13')
avgpool = slim.pool(net, [int(imageshape / 16), int(imageshape / 16)], stride=int(imageshape / 32),
pooling_type="AVG", scope='avgpool')
adlogits = slim.fully_connected(slim.flatten(avgpool), 1, activation_fn=None, normalizer_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.02),
scope='ad_soft')
return adlogits
def netG_encoder_gamma(image_input, reuse=False):
'''
08-04 删除了line reshape层
:param image_input:
:param reuse:
:return:
'''
with tf.variable_scope('generator', reuse=reuse) as vs:
if reuse:
vs.reuse_variables()
kernel_size = [3, 3]
filter_num = 32
imageshape = image_input.get_shape().as_list()[1]
print(imageshape)
with tf.variable_scope('encoding'):
# 目前用的是lrelu,其实应该用elu,后面注意跟换
with slim.arg_scope([slim.conv2d], normalizer_fn=inst_norm, activation_fn=tf.nn.elu, padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.02)):
# 224
net = slim.conv2d(image_input, filter_num, kernel_size, normalizer_fn=None, scope='conv1')
net = slim.conv2d(net, filter_num * 2, kernel_size, scope='conv2')
# 112
net = slim.conv2d(net, filter_num * 2, stride=2, kernel_size=kernel_size, scope='conv3')
net = slim.conv2d(net, filter_num * 2, kernel_size, scope='conv4')
# 56
net = slim.conv2d(net, filter_num * 4, stride=2, kernel_size=kernel_size, scope='conv6')
net = slim.conv2d(net, filter_num * 3, kernel_size, scope='conv7')
# 28
net = slim.conv2d(net, filter_num * 6, stride=2, kernel_size=kernel_size, scope='conv9')
net = slim.conv2d(net, filter_num * 4, kernel_size, scope='conv10')
# 14
net = slim.conv2d(net, filter_num * 8, stride=2, kernel_size=kernel_size, scope='conv12')
net = slim.conv2d(net, filter_num * 6, kernel_size, scope='conv13')
# avg出来之后应该是1*1*320的tensor
# 7
net = slim.conv2d(net, filter_num * 10, stride=2, kernel_size=kernel_size, scope='conv15')
net = slim.conv2d(net, filter_num * 8, kernel_size, scope='conv16')
net = tf.reshape(slim.flatten(net),
[-1, 1, 1, int(imageshape / 32) * int(imageshape / 32) * filter_num * 8])
logits = slim.fully_connected(net, 512, activation_fn=None, normalizer_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.02),
scope='bottleneck')
output = logits # 512维的向量
return output
def netG_deconder_gamma(feature, output_channel, reuse=False):
'''
01-02 instanse norm
@brief:
feature:1*1*320+13+50
pose:1*1*13r
noise:1*1*50
'''
with tf.variable_scope('generator', reuse=reuse):
kernel_size = [3, 3]
filter_num = 32
with tf.variable_scope('decoding') as vs:
if reuse:
vs.reuse_variables()
with slim.arg_scope([slim.conv2d_transpose], activation_fn=tf.nn.elu, normalizer_fn=inst_norm,
padding='SAME', weights_initializer=tf.truncated_normal_initializer(stddev=0.02)):
# 先将vector组织为6*6*320的tensor#slim.batch_norm
fc1 = slim.fully_connected(feature, 3 * 3 * filter_num * 8, activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.02),
scope='fc1')
# reshape the vector[n,6,6,320]
inputs_img = tf.reshape(fc1, [-1, 3, 3, 256])
# print 'inputs_img',inputs_img.shape
# 7
net = slim.conv2d(inputs_img, filter_num * 8, kernel_size, scope='deconv01')
net = slim.conv2d(net, filter_num * 10, kernel_size, scope='deconv02')
# 14
net = slim.conv2d_transpose(net, filter_num * 10, stride=2, kernel_size=kernel_size, scope='deconv03')
net = slim.conv2d_transpose(net, filter_num * 6, kernel_size, scope='deconv0')
# 28
net = slim.conv2d_transpose(net, filter_num * 8, stride=2, kernel_size=kernel_size, scope='deconv2')
net = slim.conv2d_transpose(net, filter_num * 4, kernel_size, scope='deconv3')
# 56
net = slim.conv2d_transpose(net, filter_num * 6, stride=2, kernel_size=kernel_size, scope='deconv5')
net = slim.conv2d_transpose(net, filter_num * 3, kernel_size, scope='deconv6')
# 112
net = slim.conv2d_transpose(net, filter_num * 4, stride=2, kernel_size=kernel_size, scope='deconv8')
net = slim.conv2d_transpose(net, filter_num * 2, kernel_size, scope='deconv9')
# 224
net = slim.conv2d_transpose(net, filter_num * 2, stride=2, kernel_size=kernel_size, scope='deconv11')
net = slim.conv2d_transpose(net, filter_num * 1, kernel_size, scope='deconv12')
# 为什么放到外面就好了呢?
net = slim.conv2d_transpose(net, output_channel, kernel_size, activation_fn=tf.nn.tanh, normalizer_fn=None,
scope='deconv13', weights_initializer=tf.contrib.layers.xavier_initializer())
output = net
return output
def netD_discriminator_adloss(image_input, reuse=False):
with tf.variable_scope('discriminator', reuse=reuse) as vs:
kernel_size = [3, 3]
filter_num = 32
imageshape = image_input.get_shape().as_list()[1]
with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm, activation_fn=tf.nn.elu, padding='SAME',
weights_initializer=tf.truncated_normal_initializer(
stddev=0.02)):
# 224/96/32
net = slim.conv2d(image_input, filter_num, kernel_size, normalizer_fn=None, scope='conv1')
net = slim.conv2d(net, filter_num * 2, kernel_size, scope='conv2')
# 112/48/16
net = slim.conv2d(net, filter_num * 2, stride=2, kernel_size=kernel_size, scope='conv3')
net = slim.conv2d(net, filter_num * 2, kernel_size, scope='conv4')
# 56/24/8
net = slim.conv2d(net, filter_num * 4, stride=2, kernel_size=kernel_size, scope='conv6')
net = slim.conv2d(net, filter_num * 4, kernel_size, scope='conv7')
# 28/12/4
net = slim.conv2d(net, filter_num * 6, stride=2, kernel_size=kernel_size, scope='conv9')
net = slim.conv2d(net, filter_num * 6, kernel_size, scope='conv10')
# 14/6/2
net = slim.conv2d(net, filter_num * 8, stride=2, kernel_size=kernel_size, scope='conv12')
net = slim.conv2d(net, filter_num * 8, kernel_size, scope='conv13')
# two path -feature -W Omegapredict_r_label
# avg出来之后应该是1*1*320的tensor
# 7/3
# net = slim.conv2d(net, filter_num * 10, stride=2, kernel_size=kernel_size, scope='conv15')
net = slim.conv2d(net, filter_num * 10, kernel_size, scope='conv16')
avgpool = slim.pool(net, [int(imageshape / 32), int(imageshape / 32)], stride=int(imageshape / 32),
pooling_type="AVG", scope='avgpool')
adlogits = slim.fully_connected(slim.flatten(avgpool), 1, activation_fn=None, normalizer_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.02),
scope='ad_soft')
return adlogits
def netG_Unet_decoder_gamma_32(feature, output_channel, reuse=False):
with tf.variable_scope('generator', reuse=reuse):
kernel_size = [3, 3]
filter_num = 32
with tf.variable_scope('decoding') as vs:
if reuse:
vs.reuse_variables()
with slim.arg_scope([slim.conv2d_transpose], activation_fn=tf.nn.elu, normalizer_fn=inst_norm,
padding='SAME', weights_initializer=tf.truncated_normal_initializer(stddev=0.02)):
# 先将vector组织为6*6*320的tensor#slim.batch_norm
fc1 = slim.fully_connected(feature, 4 * 4 * filter_num * 8, activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.02),
scope='fc1')
# reshape the vector[n,6,6,320]
inputs_img = tf.reshape(fc1, [-1, 4, 4, filter_num * 8])
# Unet改变
fc1_en = slim.get_variables_by_name('fc1', 'encoding')[0]
tf.concat([fc1_en, inputs_img], axis=3, name='defc1')
# print 'inputs_img',inputs_img.shape
# 4
net = slim.conv2d(inputs_img, filter_num * 8, kernel_size, scope='deconv01')
net = slim.conv2d(net, filter_num * 6, kernel_size, scope='deconv02')
# 8
net = slim.conv2d_transpose(net, filter_num * 3, stride=2, kernel_size=kernel_size, scope='deconv2')
net = slim.conv2d_transpose(net, filter_num * 4, kernel_size, scope='deconv3')
# 16
net = slim.conv2d_transpose(net, filter_num * 2, stride=2, kernel_size=kernel_size, scope='deconv5')
net = slim.conv2d_transpose(net, filter_num * 2, kernel_size, scope='deconv6')
# 32
net = slim.conv2d_transpose(net, filter_num * 2, stride=2, kernel_size=kernel_size, scope='deconv8')
net = slim.conv2d_transpose(net, filter_num, kernel_size, scope='deconv9')
# 为什么放到外面就好了呢?
net = slim.conv2d_transpose(net, output_channel, kernel_size, activation_fn=tf.nn.tanh, normalizer_fn=None,
scope='deconv13', weights_initializer=tf.contrib.layers.xavier_initializer())
output = net
return output
| 2.484375
| 2
|
src/components/kankeiforms/coloring_types.py
|
BigJerBD/Kankei-Backend
| 0
|
12779693
|
from components.kankeiforms.shown_properties import DEFAULT_SHOWN_PROPERTIES
DEFAULT_COLORING_TYPES = [name for name, scope in DEFAULT_SHOWN_PROPERTIES["nodes"]]
| 1.289063
| 1
|
tests/graphics/RETAINED_INDEXED.py
|
qbektrix/pyglet
| 1
|
12779694
|
<reponame>qbektrix/pyglet<gh_stars>1-10
#!/usr/bin/env python
"""Tests vertex list drawing using indexed data.
"""
import unittest
import pyglet
from graphics_common import GraphicsIndexedGenericTestCase, get_feedback, GL_TRIANGLES
__noninteractive = True
class GraphicsIndexedVertexListTestCase(GraphicsIndexedGenericTestCase, unittest.TestCase):
def get_feedback(self, data):
vertex_list = pyglet.graphics.vertex_list_indexed(self.n_vertices, self.index_data, *data)
return get_feedback(lambda: vertex_list.draw(GL_TRIANGLES))
if __name__ == '__main__':
unittest.main()
| 2.25
| 2
|
sky/migrations/0007_auto_20180224_1120.py
|
eethan1/IMnight2018_Backend
| 0
|
12779695
|
<filename>sky/migrations/0007_auto_20180224_1120.py
# Generated by Django 2.0 on 2018-02-24 11:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sky', '0006_auto_20180224_1118'),
]
operations = [
migrations.AlterField(
model_name='news',
name='label',
field=models.SlugField(),
),
]
| 1.5
| 2
|
DR_Warnings/server/web_server/warning_web_server/static/program/client.py
|
TseSteven/SureSide
| 0
|
12779696
|
<reponame>TseSteven/SureSide
# Example of embedding CEF Python browser using wxPython library.
# This example has a top menu and a browser widget without navigation bar.
# Tested configurations:
# - wxPython 4.0 on Windows/Mac/Linux
# - wxPython 3.0 on Windows/Mac
# - wxPython 2.8 on Linux
# - CEF Python v66.0+
import wx
import wx.adv
from wx.lib.pubsub import pub
from cefpython3 import cefpython as cef
import platform
import sys
import os
import configparser
from kombu import Connection, Exchange, Queue, binding
from kombu.mixins import ConsumerMixin
import traceback
import time
import threading
from urllib.request import urlretrieve
import json
import random
icon_file = os.path.join(os.path.abspath(os.path.dirname(__file__)),"icon.ico")
class Worker(ConsumerMixin):
def __init__(self, connection, queues, print_enable):
self.connection = connection
self.queues = queues
self.print_enable = print_enable
print('Listening.....')
def get_consumers(self, Consumer, channel):
return [Consumer(queues=self.queues,
callbacks=[self.on_message])]
def on_message(self, body, message):
print('Got message: {0}'.format(body))
data = body
message.ack()
if 'web_url' in data:
web_url = data['web_url']
print('Web URL: '+ web_url)
wx.CallAfter(pub.sendMessage, "msg_update", msg=web_url)
if 'pdf_url' in data and 'msg_id' in data:
msg_id = data['msg_id']
pdf_url = data['pdf_url']
print('PDF URL: '+ pdf_url)
if self.print_enable == 'yes':
try:
filename = msg_id + '.pdf'
pdf_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),"pdf")
file_path = os.path.join(pdf_path, filename)
urlretrieve(pdf_url, file_path)
time.sleep(3)
os.startfile(file_path, "print")
except:
print(traceback.format_exc())
pass
class Kombu_Receive_Thread(threading.Thread):
def __init__(self, settings_dict):
threading.Thread.__init__(self)
self.settings_dict = settings_dict
self.channel = None
def run(self):
#if self.stop_flag == True:
#break
server_ip = self.settings_dict['server_ip']
os.environ['NO_PROXY'] = server_ip
topic = self.settings_dict['topic']
print_enable = self.settings_dict['print_enable']
exchange = Exchange("warning", type="topic")
if topic == '#':
binding_keys = '#'
elif ',' in topic:
binding_keys = topic.split(',')
elif '.' in topic:
binding_keys = [topic]
binding_list = []
for binding_key in binding_keys:
binding_list.append(binding(exchange, routing_key=binding_key.strip()))
queues = [Queue('', exchange=exchange, bindings=binding_list)]
if ',' in server_ip:
ip_list = server_ip.split(',')
else:
ip_list = [server_ip]
print(ip_list)
primary_ip = ip_list[0].strip()
alternates_ip = []
if len(ip_list)>1:
for item in ip_list[1:]:
alternates_ip.append('amqp://rad:rad@{}:5672//'.format(item.strip()))
with Connection('amqp://rad:rad@{}:5672//'.format(primary_ip), alternates=alternates_ip, failover_strategy='round-robin', heartbeat=4) as conn:
try:
self.worker = Worker(conn, queues, print_enable)
self.worker.run()
except:
print(traceback.format_exc())
pass
def stop(self):
print('kombu thread stopped!')
self.worker.should_stop = True
self.join()
def create_menu_item(menu, label, func):
item = wx.MenuItem(menu, -1, label)
menu.Bind(wx.EVT_MENU, func, id=item.GetId())
menu.Append(item)
return item
class SettingDialog(wx.Dialog):
def __init__(self, parent, title, settings_dict):
super(SettingDialog, self).__init__(parent, title = title,size = (400,400))
panel = wx.Panel(self)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox0 = wx.BoxSizer(wx.HORIZONTAL)
label_server_ip = wx.StaticText(panel, -1, "Server IP Address")
hbox0.Add(label_server_ip, 1, wx.EXPAND|wx.ALIGN_LEFT|wx.ALL,5)
self.txt_ctrl_server_ip = wx.TextCtrl(panel, size=(250, -1))
self.txt_ctrl_server_ip.SetValue(settings_dict['server_ip'])
hbox0.Add(self.txt_ctrl_server_ip,1,wx.EXPAND|wx.ALIGN_RIGHT|wx.ALL,5)
vbox.Add(hbox0)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
lblList = ['Yes', 'No']
self.rbox = wx.RadioBox(panel,label = 'Enable Printing?', pos = (80,10), size=(400, -1), choices = lblList , majorDimension = 1, style = wx.RA_SPECIFY_ROWS)
if settings_dict['print_enable'] == 'yes':
self.rbox.SetSelection(0)
else:
self.rbox.SetSelection(1)
hbox1.Add(self.rbox, 1, wx.EXPAND|wx.ALIGN_LEFT|wx.ALL,5)
vbox.Add(hbox1)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
label_topic = wx.StaticText(panel, -1, "Message Queue Topics")
hbox2.Add(label_topic,1, wx.EXPAND|wx.ALIGN_LEFT|wx.ALL,5)
self.txt_ctrl_topic = wx.TextCtrl(panel,size = (250,200),style = wx.TE_MULTILINE)
self.txt_ctrl_topic.SetValue(settings_dict['topic'])
hbox2.Add(self.txt_ctrl_topic,1,wx.EXPAND|wx.ALIGN_RIGHT|wx.ALL,5)
vbox.Add(hbox2)
vbox.AddStretchSpacer(2)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
self.ok_btn = wx.Button(panel, -1, "OK")
self.cancel_btn = wx.Button(panel, -1, "Cancel")
hbox3.AddStretchSpacer(4)
hbox3.Add(self.ok_btn,1,wx.EXPAND|wx.ALIGN_RIGHT|wx.ALL,5)
hbox3.Add(self.cancel_btn,1,wx.EXPAND|wx.ALIGN_RIGHT|wx.ALL,5)
vbox.Add(hbox3)
panel.SetSizer(vbox)
self.Centre()
self.setup_icon()
self.Show()
self.Fit()
self.ok_btn.Bind(wx.EVT_BUTTON,self.OnOKClicked)
self.cancel_btn.Bind(wx.EVT_BUTTON,self.OnCancelClicked)
def setup_icon(self):
icon = wx.Icon()
icon.CopyFromBitmap(wx.Bitmap(icon_file, wx.BITMAP_TYPE_ANY))
self.SetIcon(icon)
def OnOKClicked(self,event):
parser = configparser.ConfigParser()
parser.read('setting.ini')
server_ip = self.txt_ctrl_server_ip.GetValue().replace(' ', '')
topic = self.txt_ctrl_topic.GetValue().replace(' ', '')
parser.set('Networking', 'server_ip', server_ip)
parser.set('Networking', 'topic', topic)
if self.rbox.GetSelection()==0:
parser.set('Networking', 'print_enable', 'yes')
else:
parser.set('Networking', 'print_enable', 'no')
with open('setting.ini', 'w') as configfile: # save
parser.write(configfile)
wx.MessageBox('Please restart application to enable new settings!', 'Info', wx.OK | wx.ICON_INFORMATION)
self.EndModal(wx.ID_OK)
wx.CallAfter(self.Destroy)
def OnCancelClicked(self,event):
self.EndModal(wx.ID_CANCEL)
wx.CallAfter(self.Destroy)
def GetSettings(self):
return self.settings
class TaskBarIcon(wx.adv.TaskBarIcon):
def __init__(self, frame):
self.frame = frame
super(TaskBarIcon, self).__init__()
self.set_icon(icon_file)
self.Bind(wx.adv.EVT_TASKBAR_LEFT_DOWN, self.on_left_down)
self.settings_dict = self.read_config()
self.kombu_thread = Kombu_Receive_Thread(self.settings_dict)
self.kombu_thread.start()
def read_config(self):
parser = configparser.ConfigParser()
parser.read('setting.ini')
if parser.has_section('Networking') == False:
parser.add_section('Networking')
if parser.has_option('Networking','server_ip') == False:
parser.set('Networking', 'server_ip', '10.0.2.15')
if parser.has_option('Networking','print_enable') == False:
parser.set('Networking', 'print_enable', 'no')
if parser.has_option('Networking','topic') == False:
parser.set('Networking', 'topic', 'POHCT.delay_upload')
with open('setting.ini', 'w') as configfile:
parser.write(configfile)
parser.read('setting.ini')
server_ip = parser.get('Networking','server_ip')
topic = parser.get('Networking', 'topic')
print_enable = parser.get('Networking','print_enable')
settings_dict = {'server_ip':server_ip, 'topic':topic, 'print_enable':print_enable}
return settings_dict
def CreatePopupMenu(self):
menu = wx.Menu()
create_menu_item(menu, 'Version. 20191021', None)
menu.AppendSeparator()
create_menu_item(menu, 'Settings', self.on_setting)
menu.AppendSeparator()
create_menu_item(menu, 'Exit', self.on_exit)
return menu
def set_icon(self, path):
icon = wx.Icon(path)
self.SetIcon(icon, 'NTWC DR Warning')
def on_left_down(self, event):
print ('Tray icon was left-clicked.')
def on_setting(self, event):
setting_dlg = SettingDialog(None,'Application settings', self.settings_dict)
setting_dlg.ShowModal()
def on_exit(self, event):
wx.CallAfter(self.Destroy)
self.frame.Close()
self.kombu_thread.stop()
# class App(wx.App):
# def OnInit(self):
# frame=wx.Frame(None)
# self.SetTopWindow(frame)
# TaskBarIcon(frame)
# return True
# Platforms
WINDOWS = (platform.system() == "Windows")
LINUX = (platform.system() == "Linux")
MAC = (platform.system() == "Darwin")
if MAC:
try:
# noinspection PyUnresolvedReferences
from AppKit import NSApp
except ImportError:
print("[wxpython.py] Error: PyObjC package is missing, "
"cannot fix Issue #371")
print("[wxpython.py] To install PyObjC type: "
"pip install -U pyobjc")
sys.exit(1)
# Configuration
WIDTH = 900
HEIGHT = 640
# Globals
g_count_windows = 0
def main():
pdf_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),"pdf")
if not os.path.exists(pdf_path):
os.makedirs(pdf_path)
check_versions()
sys.excepthook = cef.ExceptHook # To shutdown all CEF processes on error
settings = {}
switches = {"proxy-server": "direct://",}
if MAC:
# Issue #442 requires enabling message pump on Mac
# and calling message loop work in a timer both at
# the same time. This is an incorrect approach
# and only a temporary fix.
settings["external_message_pump"] = True
if WINDOWS:
# noinspection PyUnresolvedReferences, PyArgumentList
cef.DpiAware.EnableHighDpiSupport()
cef.Initialize(settings=settings, switches=switches)
app = CefApp(False)
app.MainLoop()
del app # Must destroy before calling Shutdown
if not MAC:
# On Mac shutdown is called in OnClose
cef.Shutdown()
def check_versions():
print("[wxpython.py] CEF Python {ver}".format(ver=cef.__version__))
print("[wxpython.py] Python {ver} {arch}".format(
ver=platform.python_version(), arch=platform.architecture()[0]))
print("[wxpython.py] wxPython {ver}".format(ver=wx.version()))
# CEF Python version requirement
assert cef.__version__ >= "66.0", "CEF Python v66.0+ required to run this"
def scale_window_size_for_high_dpi(width, height):
"""Scale window size for high DPI devices. This func can be
called on all operating systems, but scales only for Windows.
If scaled value is bigger than the work area on the display
then it will be reduced."""
if not WINDOWS:
return width, height
(_, _, max_width, max_height) = wx.GetClientDisplayRect().Get()
# noinspection PyUnresolvedReferences
(width, height) = cef.DpiAware.Scale((width, height))
if width > max_width:
width = max_width
if height > max_height:
height = max_height
return width, height
class MainFrame(wx.Frame):
def __init__(self, url):
self.browser = None
# Must ignore X11 errors like 'BadWindow' and others by
# installing X11 error handlers. This must be done after
# wx was intialized.
if LINUX:
cef.WindowUtils.InstallX11ErrorHandlers()
global g_count_windows
g_count_windows += 1
if WINDOWS:
# noinspection PyUnresolvedReferences, PyArgumentList
print("[wxpython.py] System DPI settings: %s"
% str(cef.DpiAware.GetSystemDpi()))
if hasattr(wx, "GetDisplayPPI"):
print("[wxpython.py] wx.GetDisplayPPI = %s" % wx.GetDisplayPPI())
print("[wxpython.py] wx.GetDisplaySize = %s" % wx.GetDisplaySize())
print("[wxpython.py] MainFrame declared size: %s"
% str((WIDTH, HEIGHT)))
size = scale_window_size_for_high_dpi(WIDTH, HEIGHT)
print("[wxpython.py] MainFrame DPI scaled size: %s" % str(size))
wx.Frame.__init__(self, parent=None, id=wx.ID_ANY,
title='NTWC DR Warning!', size=size)
# wxPython will set a smaller size when it is bigger
# than desktop size.
print("[wxpython.py] MainFrame actual size: %s" % self.GetSize())
self.setup_icon()
self.Bind(wx.EVT_CLOSE, self.OnClose)
# Set wx.WANTS_CHARS style for the keyboard to work.
# This style also needs to be set for all parent controls.
self.browser_panel = wx.Panel(self, style=wx.WANTS_CHARS)
self.browser_panel.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
self.browser_panel.Bind(wx.EVT_SIZE, self.OnSize)
if MAC:
# Make the content view for the window have a layer.
# This will make all sub-views have layers. This is
# necessary to ensure correct layer ordering of all
# child views and their layers. This fixes Window
# glitchiness during initial loading on Mac (Issue #371).
NSApp.windows()[0].contentView().setWantsLayer_(True)
if LINUX:
# On Linux must show before embedding browser, so that handle
# is available (Issue #347).
self.Show()
# In wxPython 3.0 and wxPython 4.0 on Linux handle is
# still not yet available, so must delay embedding browser
# (Issue #349).
if wx.version().startswith("3.") or wx.version().startswith("4."):
wx.CallLater(100, self.embed_browser, url)
else:
# This works fine in wxPython 2.8 on Linux
self.embed_browser(url)
else:
self.embed_browser(url)
self.SetWindowStyle(wx.DEFAULT_FRAME_STYLE|wx.STAY_ON_TOP)
self.Show()
self.Maximize(True)
def setup_icon(self):
icon = wx.Icon()
icon.CopyFromBitmap(wx.Bitmap(icon_file, wx.BITMAP_TYPE_ANY))
self.SetIcon(icon)
def embed_browser(self, url):
window_info = cef.WindowInfo()
(width, height) = self.browser_panel.GetClientSize().Get()
assert self.browser_panel.GetHandle(), "Window handle not available"
window_info.SetAsChild(self.browser_panel.GetHandle(),
[0, 0, width, height])
self.browser = cef.CreateBrowserSync(window_info,
url=url)
self.browser.SetClientHandler(FocusHandler())
def OnSetFocus(self, _):
if not self.browser:
return
if WINDOWS:
cef.WindowUtils.OnSetFocus(self.browser_panel.GetHandle(),
0, 0, 0)
self.browser.SetFocus(True)
def OnSize(self, _):
if not self.browser:
return
if WINDOWS:
cef.WindowUtils.OnSize(self.browser_panel.GetHandle(),
0, 0, 0)
elif LINUX:
(x, y) = (0, 0)
(width, height) = self.browser_panel.GetSize().Get()
self.browser.SetBounds(x, y, width, height)
self.browser.NotifyMoveOrResizeStarted()
def OnClose(self, event):
print("[wxpython.py] OnClose called")
if not self.browser:
# May already be closing, may be called multiple times on Mac
return
if MAC:
# On Mac things work differently, other steps are required
self.browser.CloseBrowser()
self.clear_browser_references()
self.Destroy()
global g_count_windows
g_count_windows -= 1
if g_count_windows == 0:
cef.Shutdown()
wx.GetApp().ExitMainLoop()
# Call _exit otherwise app exits with code 255 (Issue #162).
# noinspection PyProtectedMember
os._exit(0)
else:
# Calling browser.CloseBrowser() and/or self.Destroy()
# in OnClose may cause app crash on some paltforms in
# some use cases, details in Issue #107.
self.browser.ParentWindowWillClose()
event.Skip()
self.clear_browser_references()
def clear_browser_references(self):
# Clear browser references that you keep anywhere in your
# code. All references must be cleared for CEF to shutdown cleanly.
self.browser = None
class FocusHandler(object):
def OnGotFocus(self, browser, **_):
# Temporary fix for focus issues on Linux (Issue #284).
if LINUX:
print("[wxpython.py] FocusHandler.OnGotFocus:"
" keyboard focus fix (Issue #284)")
browser.SetFocus(True)
class CefApp(wx.App):
def __init__(self, redirect):
self.timer = None
self.timer_id = 1
self.is_initialized = False
super(CefApp, self).__init__(redirect=redirect)
def OnPreInit(self):
super(CefApp, self).OnPreInit()
# On Mac with wxPython 4.0 the OnInit() event never gets
# called. Doing wx window creation in OnPreInit() seems to
# resolve the problem (Issue #350).
if MAC and wx.version().startswith("4."):
print("[wxpython.py] OnPreInit: initialize here"
" (wxPython 4.0 fix)")
self.initialize()
def OnInit(self):
self.initialize()
return True
def initialize(self):
if self.is_initialized:
return
self.is_initialized = True
self.create_timer()
frame=wx.Frame(None)
self.SetTopWindow(frame)
TaskBarIcon(frame)
pub.subscribe(self.on_receive_mg, "msg_update")
def on_receive_mg(self, msg):
browser_frame = MainFrame(msg)
#self.SetTopWindow(browser_frame)
browser_frame.Show()
def create_timer(self):
# See also "Making a render loop":
# http://wiki.wxwidgets.org/Making_a_render_loop
# Another way would be to use EVT_IDLE in MainFrame.
self.timer = wx.Timer(self, self.timer_id)
self.Bind(wx.EVT_TIMER, self.on_timer, self.timer)
self.timer.Start(10) # 10ms timer
def on_timer(self, _):
cef.MessageLoopWork()
def OnExit(self):
self.timer.Stop()
return 0
if __name__ == '__main__':
main()
| 2.25
| 2
|
src/shotgun_io.py
|
shotgunsoftware/shotgun_io
| 2
|
12779697
|
#!/usr/bin/env python
# ---------------------------------------------------------------------------------------------
# Copyright (c) 2009-2016, Shotgun Software Inc
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the name of the Shotgun Software Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Provides interaction with Shotgun specific for render queue integrations.
ShotgunIO facilitates the creation and updating of Version entities in Shotgun.
It manages communication for listing entities in Shotgun for constructing job
submission UIs, handles input and output formatting, and validation for creating
and updating Versions in Shotgun with render job specific information.
"""
import sys
import os
import socket
import optparse
import ConfigParser
import logging
import re
import glob
try:
import simplejson as json
except ImportError:
import json
# shotgun_io modules
try:
import io_entity_queries
except Exception, e:
msg = "There is a problem in your entity_query_config module: %s" % e
logging.error(msg)
sys.exit(1)
import io_input
import io_output
try:
import shotgun_api3
except ImportError, e:
msg = "%s:\nThe Shotgun API is not installed. To install it, download the "\
"latest version from https://github.com/shotgunsoftware/python-api, "\
"extract the package and place it in this directory "\
"or somewhere in your PYTHONPATH (more info on where to install modules "\
"at http://docs.python.org/tutorial/modules.html#the-module-search-path)" % (e)
logging.error(msg)
sys.exit(1)
__author__ = "KP"
__copyright__ = "Copyright 2016, Shotgun Software"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__license__ = "BSD"
__version__ = "1.0.0"
__maintainer__ = "KP"
__email__ = "<EMAIL>"
__status__ = "Production"
# LOGGING
# ====================================
class NullHandler(logging.Handler):
"""a NOP Logging handler that does nothing.
"""
def emit(self, record):
pass
def handle(self, record):
pass
logging.basicConfig(level=logging.INFO,
format="%(filename)s %(levelname)s: %(message)s" )
# shut up shotgun_api3 log messages
logging.getLogger('shotgun_api3').propagate = False
logging.getLogger('shotgun_api3').addHandler(NullHandler())
# setup our log
logging.getLogger(__name__)
# don't hang things up for more than 10 seconds if we can't reach Shotgun
socket.setdefaulttimeout(10)
# CONSTANTS (don't change these)
# ====================================
VERSION_STATUS_SUBMITTED = 0
VERSION_STATUS_IP = 1
VERSION_STATUS_COMPLETE = 2
VERSION_STATUS_FAILED = 3
VERSION_STATUS_ABORTED = 4
# minimum fields required to create a Version in Shotgun.
REQUIRED_SG_VERSION_FIELDS = ['code','project','user']
NON_ENTITY_LIST_VALUES = ['entities']
class ShotgunIOError(Exception):
"""Base for all ShotgunIO Errors"""
pass
class ShotgunIO(object):
"""Factory for ShotgunIO instance.
A wrapper around the Shotgun API for creating and updating Versions and
managing Shotgun communication specific to render queue workflow.
Provides an easy way to get lists of entities from Shotgun for populating
submit UI menus. Manages the input and output formatting of data as well as
validation for creating and updating Versions in Shotgun.
Can be imported and instantiated directly or can be called standalone via
commandline. If a 'custom_module' is defined in shotgun_io.conf, attempts to
import the specified file and instantiate the ShotgunIOCustom class instead.
:param config: ConfigParser instance holding configuration options loaded
from shotgun_io.conf
:returns: ShotgunIO instance
"""
def __new__(klass, **kwargs):
# load config
config = None
config_paths = [os.path.abspath( os.path.dirname(__file__) ) +
"/shotgun_io.conf", '/etc/shotgun_io.conf']
config = ShotgunConfigParser()
config.read(config_paths)
if not config:
raise ShotgunIOError('shotgun_io.conf config file not found. ' \
'Searched %s' % (config_paths))
# if no custom module is defined return the base class instance
custom_module_str = config.get('advanced', 'custom_module')
if not custom_module_str:
return ShotgunIOBase(config)
else:
# attempt to load the custom module if enabled
try:
custom_module = __import__(custom_module_str)
except ImportError:
logging.warning("custom module '%s' specified in config not "\
"found. Using the default ShotgunIO class instead.")
return ShotgunIOBase(config)
else:
return custom_module.ShotgunIOCustom(config)
class ShotgunIOBase(object):
"""A wrapper around the Shotgun API for creating and updating Versions and
managing Shotgun communication specific to render queue workflow.
Provides an easy way to get lists of entities from Shotgun for populating
submit UI menus. Manages the input and output formatting of data as well as
validation for creating and updating Versions in Shotgun
Can be imported and instantiated directly or can be called standalone via
commandline.
:param config: ConfigParser instance holding configuration options loaded
from shotgun_io.conf
:returns: ShotgunIOBase instance
"""
def __init__(self, config):
self._config = config
# shotgun server info
self.shotgun_url = self._config.get('shotgun', 'url')
self.shotgun_script = self._config.get('shotgun', 'script_name')
self.shotgun_key = self._config.get('shotgun', 'application_key')
self._sg = None
# version numbering options
self.version_numbering = self._config.get('version_values',
'version_numbering')
self.version_number_format = self._config.get('version_values',
'version_number_format',
1)
# input and output validation and formatting
self.input = io_input.InputDefault(self._config)
self.output = io_output.OutputDefault()
def _shotgun_connect(self):
"""Instantiate Shotgun API for connecting to the Shotgun server and
assign to this object's _sg variable.
If instance already exists, no action is taken. Nothing is returned
by this method.
:raises: :class:`ShotgunIOError` on failure
:todo: test this with the Shotgun JSON API.
"""
if self._sg:
return
try:
self._sg = shotgun_api3.Shotgun(self.shotgun_url,
self.shotgun_script, self.shotgun_key)
except Exception, e:
raise ShotgunIOError("Unable to connect to Shotgun: %s" % e)
def _validate_list_option(self, entity_type):
"""Validate the entity type requested is in the list of configured
entity types.
:returns: True if valid. False if invalid.
:rtype: `bool`
"""
if (entity_type in io_entity_queries.entity_queries.keys() or
entity_type in NON_ENTITY_LIST_VALUES):
return True
else:
return False
def _get_version_schema(self):
"""Internal method for introspecting the Version schema
:returns: dictionary of all fields on the Version entity in the
default Shotgun API format
:rtype: `dict`
"""
self._shotgun_connect()
try:
schema = self._sg.schema_field_read('Version')
except Exception, e:
raise ShotgunIOError("Error getting Version schema: %s" % (e))
return schema
def get_workflow(self):
"""Returns the current configured workflow option as defined in the conf
file.
:returns: name of the workflow currently enabled
:rtype: `str`
>>> io.get_workflow()
task
"""
w = self._config.get('advanced','workflow')
return w
def get_config(self):
"""Returns the current config option settings as defined in the conf
file.
:returns: Dictionary of current config settings as key value pairs
:rtype: `dict`
"""
conf = {}
for section in self._config.sections():
conf[section] = {}
for item in self._config.items(section, 1):
conf[section][item[0]] = item[1]
out = self.output.format_output('config', conf)
return out
def load_env_variables(self):
"""checks local environment variables for existance of SG_IO_* values
and returns formatted key/value pairs.
export SG_IO_USER='{"type":"HumanUser", "id":123, "login":"kp"}'
export SG_IO_TASK='{"type":"Task", "id":234, "content":"Anim", "project":{"type":"Project","id":345,"name":"Demo Project"},"entity":{"type":"Shot","id":456,"name":"0010_0001"}}'
export SG_IO_VERSION_NAME='010_0001 / anim / kp'
export SG_IO_PROJECT='{"type":"Project", "id":123, "name":"Demo Project"}'
export SG_IO_ENTITY='{"type":"Shot", "id":123, "name":"010_0001"}'
:returns: Dictionary of key/value pairs representing Shotgun info
:rtype: `dict`
Example::
>>> io.load_env_variables()
{
'user': {'type':'HumanUser', 'id':123},
'task': {
'type':'Task',
'id':234,
'content':'Anim',
'project': {'type':'Project', 'id':567},
'entity': {'type':'Shot', 'id':678}
},
'name': '010_0001 / anim / kp',
'project': {'type':'Project', 'id':345, 'name':'Demo Project'},
'entity': {'type':'Shot', 'id':456, 'name':'0010_0001'}
}
"""
job_info = {}
if os.environ.has_key("SG_IO_USER"):
job_info['user'] = json.loads(os.environ.get('SG_IO_USER'))
if os.environ.has_key("SG_IO_TASK"):
job_info['task'] = json.loads(os.environ.get('SG_IO_TASK'))
if os.environ.has_key("SG_IO_VERSION_NAME"):
job_info['name'] = os.environ.get('SG_IO_VERSION_NAME')
if os.environ.has_key("SG_IO_PROJECT"):
job_info['project'] = json.loads(os.environ.get('SG_IO_PROJECT'))
if os.environ.has_key("SG_IO_ENTITY"):
job_info['shot'] = json.loads(os.environ.get('SG_IO_ENTITY'))
# if os.environ.has_key("SG_IO_IMAGE_PATH"):
job_info['image_path'] = os.environ.get('SG_IO_IMAGE_PATH')
out = self.output.format_output('env_variables', job_info)
return out
def validate_user(self, username):
"""Checks if given username string is a valid active user in Shotgun.
If the username is active and valid, returns the id of the HumanUser
entity in Shotgun. If the username is invalid, returns None.
>>> io.validate_user("zoe")
42
>>> io.validate_user("franny")
shotgun_io.py ERROR: User 'franny' is invalid.
:param username: the login to lookup in Shotgun
:type username: `str`
:returns: Shotgun id for the user or None if username wasn't found
:rtype: `int` or `NoneType`
:raises: :class:`ShotgunIOError` if the Shotgun query fails
"""
self._shotgun_connect()
io_entity_queries.validate_user['filters'].append(
['login', 'is', username])
try:
user = self._sg.find_one(
io_entity_queries.validate_user['entity_type'],
io_entity_queries.validate_user['filters'],
io_entity_queries.validate_user['fields'])
except Exception, e:
ShotgunIOError("Error validating user in Shotgun: %s" % (e))
out = self.output.format_output('user', user)
return out
def get_version_fields(self):
"""Returns a dict of Version fields whose values are editable
categorized by field type.
This may be useful for introspecting whether specified field names
defined during setup or configuration of the integration when done
interactively.
:returns: dictionary of the editable fields for the Version entity in
Shotgun grouped by field type
:rtype: `dict`
:raises: :class:`ShotgunIOError` if the Shotgun query fails
:todo: The filtering could be more intelligent at filtering out
additional fields that don't make sense
Example::
>>> io.get_version_fields()
{'checkbox': ['sg_frames_have_slate', 'sg_movie_has_slate'],
'date': ['sg_render_datestamp'],
'date_time': ['sg_render_timestamp'],
'entity': ['entity',
'project',
'sg_task',
'task_template',
'user'],
'float': ['sg_frames_aspect_ratio', 'sg_movie_aspect_ratio'],
'list': ['sg_version_type'],
'multi_entity': ['notes',
'playlists',
'sg_storyboard_link',
'task_sg_versions_tasks',
'tasks'],
'number': ['frame_count',
'sg_avg_frame_time',
'sg_first_frame',
'sg_last_frame',
'sg_total_render_time'],
'status_list': ['sg_status_list'],
'tag_list': ['tag_list'],
'text': ['code',
'description',
'frame_range',
'sg_department',
'sg_job_id',
'sg_path_to_frames',
'sg_path_to_movie'],
'url': ['sg_link_to_frames',
'sg_link_to_movie',
'sg_uploaded_movie']}
"""
self._shotgun_connect()
sorted_fields = {}
try:
fields = self._sg.schema_field_read('Version')
except Exception, e:
raise ShotgunIOError("Error retrieving list of Version fields from"\
" Shotgun: %s" % (e))
# sort fields by data_type. remove fields that aren't editable.
for fn, fv in fields.iteritems():
if fv['editable']['value']:
try:
sorted_fields[fv['data_type']['value']].append(fn)
except KeyError:
sorted_fields[fv['data_type']['value']]=[]
sorted_fields[fv['data_type']['value']].append(fn)
# sort the field names in each type
for t, f in sorted_fields.iteritems():
f.sort()
out = self.output.format_output('version_fields', sorted_fields)
return out
def get_version_status_values(self):
"""Returns a list of valid status values for the Version Status field
in Shotgun
:returns: list of short codes for the configured status field in Shotgun
:rtype: `list`
:raises: :class:`ShotgunIOError` if the Shotgun query fails
>>> io.get_version_status_values()
['na', 'queued', 'ren', 'rev', 'vwd', 'fail']
"""
self._shotgun_connect()
try:
status_field = self._sg.schema_field_read(
'Version','sg_status_list')
except Exception, e:
logging.error("Error retrieving list of valid status values for "\
"Versions from Shotgun: %s" % (e))
out = self.output.format_output('version_statuses',
status_field['sg_status_list']['properties']['valid_values']['value'])
return out
def get_version_name_templates(self):
"""Return a list of the Version name templates defined in the config.
The first entry in the list is the default. If the first
entry is blank, then there is no default set.
Allows tokens like ${project} or ${shot} that can be used
for string replacement in the submit UI.
:returns: list of Version name templates defined in the config
:rtype: `list`
>>> print io.get_version_name_templates()
['', '${project}_${shot}_${task}', ' ${shot}_${task} ${jobid}']
"""
template_list = []
template_list = self._config.get(
'version_values',
'version_name_templates').split(',')
out = self.output.format_output('version_name_templates', template_list)
return out
def get_entities(
self, entity_type, project_id=None, user_id=None, no_format=False):
"""Retrieve a list of entities of `entity_type` from Shotgun
The config file holds several settings for how this query is executed
including filters, fields returned, sorting options, and whether the
project_id and user_id is required. These settings are controlled by
the studio to match their workflow. Vendors can assume that the
settings will define the details for the studio and shouldn't
concern themselves with them.
:param entity_type: type of entities to query Shotgun for
:type entity_type: `str`
:param project_id: id of the project to limit the query to. This is
required for certain entity types (like Shots and Assets)
:type project_id: `int`
:param user_id: id of the user to limit the query to. This is
required for certain entity types (like Tasks)
:type user_id: `int`
:param no_format: used internally
:type no_format: `bool`
:returns: list of Shotgun entity dictionaries matching the default
settings defined in the config
:rtype: `list`
:raises: :class:`ShotgunIOError` if the validation fails or the Shotgun
query fails
Example::
>>> io.get_entities("tasks", user_id=55)
[{'content': 'Anm',
'entity': {'id': 860, 'name': 'bunny_010_0010', 'type': 'Shot'},
'id': 557,
'project': {'id': 65, 'name': 'Demo Animation Project', 'type': 'Project'},
'type': 'Task'}]
"""
if not self._validate_list_option(entity_type):
raise ShotgunIOError("entity_type value '%s' is invalid. Valid "\
"options are %s" %
(entity_type,
io_entity_queries.entity_queries.keys()+NON_ENTITY_LIST_VALUES))
entities = []
# advanced workflow: potentially run multiple queries
if entity_type == "entities":
for t in io_entity_queries.advanced_workflow_entities:
entities += self.get_entities(t, project_id=project_id,
no_format=True)
else:
filters = io_entity_queries.entity_queries[entity_type]['filters']
# check if we need to inject project or user filters
if io_entity_queries.entity_queries[entity_type]['project_required']:
if project_id is None:
raise ShotgunIOError("'project_id' is a required parameter"\
" for getting '%s'" % entity_type)
filters.append(
['project', 'is', {'type':'Project', 'id': project_id}]
)
if io_entity_queries.entity_queries[entity_type]['user_required']:
if user_id is None:
raise ShotgunIOError("'user_id' is a required parameter "\
"for getting '%s'" % entity_type)
if entity_type == 'tasks':
filters.append(
['task_assignees', 'is',
{'type':'HumanUser', 'id':user_id}])
elif entity_type == 'projects':
# we could add a filter for Projects the user is linked to
# but if this is overridden by permissions, we have no way
# of knowing that from the API. So when we do, add that
# logic here.
pass
self._shotgun_connect()
try:
entities = self._sg.find(
io_entity_queries.entity_queries[entity_type]['entity_type'],
filters=filters,
fields=io_entity_queries.entity_queries[entity_type]['fields'],
order=io_entity_queries.entity_queries[entity_type]['order'])
except Exception, e:
raise ShotgunIOError("Error retrieving %s list from Shotgun: "\
"%s" % (entity_type, e))
else:
# in a recursive call to combine entity lookups we'll do the
# formatting when we're all done
if no_format:
return entities
out = self.output.format_output(entity_type, entities)
return out
def get_entities_from_file_path(self, scene_path):
# """Attemps to match a Project and Shot name automatically from the scene
# file path based on regex definitions in the shotgun_io config.
# This method is a good candidate for overriding in
# :class:`ShotgunIOCustom` since there's often varying logic that needs to
# be implemented for each studio.
# Currently just returns strings for project and shot which can then
# be used to lookup their entity ids in Shotgun. Returns None if no match
# was found.
# .. warning:: This method is not implemented
# :param scene_path: full path to the scene path
# :type entity_type: `str`
# :raises: NotImplementedError
# :returns: tuple with project and shot
# :rtype: `tuple`
# """
raise NotImplementedError("This method isn't implemented")
ret = None
for r in self.scenefile_path_regexes:
result = re.match(r, scene_path)
if result:
project, shot = result.groups()
if project and shot:
ret = (project, shot)
out = self.output.format_output('scenepath_parse', ret)
return out
def get_next_version_number(self, entity, task=None):
"""Calculates the next Version number based on the pending Version
that is about to be created.
The logic used depends on what the setting for ``version_numbering`` is
in the config.
.. note:: If the config specifies Task-based numbering and no Task
is provided, it will fall back on global numbering.
:param entity: Shotgun entity the Version is for. The format is the
standard Shotgun entity hash format with a required 'type' and 'id'
key in the dictionary. Eg. ``{'type':'Shot', 'id':123}``
:type entity: `dict`
:param task: Task entity required for Task-based or Pipline Step-based
numbering. Format is the standard Shotgun entity hash format with
a required 'type' and 'id' key in the dictionary. Eg.
``{'type':'Task', 'id':456}``
:type task: `dict` or `None`
:returns: the next Version number for the Version contained in this
instance
:rtype: `int`
"""
self._shotgun_connect()
next_number = 1
step = None
filters = [['entity', 'is', entity]]
# task-based numbering: add task filter
if self.version_numbering == 'task':
tf = self._config.get('version_fields', 'task')
if task:
filters.append([tf, 'is', task])
# pipeline step-based numbering: add step filter
elif self.version_numbering == 'pipeline_step':
# lookup pipeline step for provided task and append appropriate
# filter
if task:
result = self._sg.find_one(
'Task',
[['id', 'is', task['id']]],
['step'])
if result:
step = result['step']
filters.append(['%s.Task.step' % tf, 'is', step])
result = self._sg.find("Version", filters, ['code'],
order=[{'field_name':'content','direction':'asc'}])
for v in result:
match = re.search('v(\d*$)',v['code'])
if match and len(match.groups())==1:
version_number = int(match.group(1))
if version_number >= next_number:
next_number = version_number+1
return next_number
def create_version(self, version_data):
"""create new Version entity in Shotgun from the provided Version data
If the ``version_data`` is a JSON `str`, this will automatically
perform JSON decoding prior to validation, and data translation of the
data to the expected Shotgun API format based on the integration
environment.
:param version_data: structured data representing field/value pairs for
creating the new Version. Input formats can vary depending on
implementation. Eg. this can be a `str` JSON string or a Python
`dict`
:type version_data: `str` or `dict`
:raises: :class:`ShotgunIOError` if validation fails or Shotgun query
fails.
"""
ret = {}
self.input.action = 'create'
self.input._version_schema = self._get_version_schema()
# turn JSON string into valid dictionary hash
self.input.extract_version_data(version_data)
# check required fields (sometimes project and shot will be part of the
# task need to handle that)
for f in REQUIRED_SG_VERSION_FIELDS:
if not f in self.input.shotgun_input:
raise ShotgunIOError("Unable to create Version in Shotgun. "\
"Missing required Shotgun field '%s'" % f)
# append next version number formatted as defined
if self.version_numbering:
entity = self.input.shotgun_input.get(
self._config.get('version_fields', 'shot'))
task = self.input.shotgun_input.get(
self._config.get('version_fields', 'task'))
version_number = self.get_next_version_number(entity, task)
if version_number:
self.input.shotgun_input['code'] += self.version_number_format \
% (version_number)
# create version
self._shotgun_connect()
try:
version = self._sg.create('Version', self.input.shotgun_input)
ret.update(version)
except Exception, e:
raise ShotgunIOError("Error creating Version in Shotgun: %s" % e)
else:
if self.input.thumbnail_path is not None:
result = self.upload_thumbnail(version['id'],
self.input.thumbnail_path)
if result:
ret['thumbnail_id'] = result
if self.input.movie_upload is not None:
result = self.upload_movie(version['id'],
self.input.movie_upload)
if result:
ret.update(result)
return self.output.format_output('version_create', ret)
def update_version(self, version_data):
"""Update existing Version entity in Shotgun from the provided Version
data.
If the ``version_data`` is a JSON `str`, this will automatically
perform JSON decoding prior to validation, and data translation of the
data to the expected Shotgun API format based on the integration
environment.
:param version_data: structured data representing field/value pairs for
creating the new Version. Input formats can vary depending on
implementation. Eg. this can be a `str` JSON string or a Python
`dict`. Requires ``id`` key with integer value that
corresponds to the id of the Version entity to update in Shotgun.
:type version_data: `str` or `dict`
:raises: :class:`ShotgunIOError` if validation fails or Shotgun query
fails.
"""
ret = {}
self.input.action = 'update'
self.input._version_schema = self._get_version_schema()
self.input.extract_version_data(version_data)
# check required fields
if 'id' not in self.input.shotgun_input:
raise ShotgunIOError("Unable to update Version in Shotgun. Missing"\
" required field 'version_id' in Version hash")
# extract Version id and remove it from the dict
version_id = self.input.shotgun_input['id']
del(self.input.shotgun_input['id'])
# check if we are uploading a thumbnail
if self.input.thumbnail_path is not None:
result = self.upload_thumbnail(version_id,
self.input.thumbnail_path)
if result:
ret['thumbnail_id'] = result
# check if we are uploading a movie
if self.input.movie_upload is not None:
result = self._sg.upload(version_id, self.input.movie_upload)
if result:
ret.update(result)
# check if we have anything else to update
if len(self.input.shotgun_input) == 0:
return ret
# update version
self._shotgun_connect()
try:
version = self._sg.update('Version',
version_id, self.input.shotgun_input)
ret.update(version)
except Exception, e:
raise ShotgunIOError("Error updating Version in Shotgun: %s" % e)
return self.output.format_output('version_update', ret)
def upload_thumbnail(self, version_id, frames_path):
"""Upload file located at thumb_path as thumbnail for Version with
version_id
"""
self._shotgun_connect()
print "uploading thumbnail from %s" % frames_path
# /Users/kp/Documents/shotgun/maya/horse_gallop/frames/horseGallop_final.iff.#
# replace frame syntax with regex
pattern = re.sub('[#@]', '[0-9]', frames_path)
# lookup output frames that match our pattern
files = glob.glob(pattern)
# nothing matched? We should generate an error here
if len(files) == 0:
logging.error("no matching frames were found to upload thumbnail. "\
"searched %s" % pattern)
return False
print "found frames"
frame_choice = self._config.get('version_values', 'thumbnail_frame')
if frame_choice == 'first':
thumb_frame = files[0]
elif frame_choice == 'last':
thumb_frame = files[-1]
else:
# middle frame is the default
thumb_frame = files[ len(files)/2 ]
# check that thumb_path exists and is readable.
if not os.path.exists(thumb_frame):
raise ShotgunIOError("Error uploading thumbnail '%s' to Shotgun "\
"for Version %d: thumbnail path not found." % \
(thumb_frame, version_id))
return False
print "found thumbnail frame %s" % thumb_frame
# upload thumbnail
try:
result = self._sg.upload_thumbnail('Version', version_id, thumb_frame)
except Exception, e:
raise ShotgunIOError("Error uploading thumbnail '%s' to Shotgun "\
"for Version %d: %s" % \
(thumb_frame, version_id, e))
return False
print "done"
return self.output.format_output("thumbnail_id", result)
def upload_movie(self, version_id, movie_path):
"""Upload file located at movie_path to Version with
version_id
"""
self._shotgun_connect()
# check that movie_path exists and is readable.
if not os.path.exists(movie_path):
raise ShotgunIOError("Error uploading movie '%s' to Shotgun "\
"for Version %d: movie path not found." % \
(movie_path, version_id))
return False
# upload movie to movie field
movie_field = self._config.get("version_fields","movie_path")
try:
result = self._sg.upload('Version', version_id, movie_path,
movie_field)
except Exception, e:
raise ShotgunIOError("Error uploading thumbnail '%s' to Shotgun "\
"for Version %d: %s" % \
(movie_path, version_id, e))
return False
return self.output.format_output("movie_upload", {movie_field: result})
def delete_version(self, version_id):
"""Delete existing Version entity in Shotgun with the provided Version
id.
Deletes (or 'retires' in Shotgun lingo) the Version with id
``version_id``. Only useful for cases where a user cancels or dumps
a render job and the associated Version in Shotgun would be irrelevant.
Deleting the Version ensures there is no extra cruft lying around.
:param version_id: Shotgun id of the Version to be deleted.
:type version_data: `int`
:returns: `True` if the Version was deleted. `False` if no Version with
id ``version_id`` exists
.. note:: if the ``version_id`` does not exist, the Shotgun API spits
out an error response to STDERR
"""
self._shotgun_connect()
if not isinstance(version_id, int):
raise ShotgunIOError('version_id must be an integer')
try:
result = self._sg.delete('Version', version_id)
except shotgun_api3.Fault, e:
raise ShotgunIOError('Error deleting Version #%d: %s' \
% (version_id, e))
else:
return self.output.format_output("version_delete", result)
def process_logfiles(self, logfiles):
raise NotImplementedError("processing logfiles will be enabled before "\
"release at least to not generate errors.")
class ShotgunIoOptionParser(optparse.OptionParser):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
subclassed to make exit code 1 instead of 0.
"""
def error(self, msg):
self.print_usage(sys.stderr)
self.exit(1, "%s: error: %s\n" % (self.get_prog_name(), msg))
"""override format_description to not strip newlines
"""
def format_description(self, formatter):
return self.get_description()
class ShotgunConfigParser(ConfigParser.SafeConfigParser):
# def __init__(self):
# super(ShotgunConfigParser, self).__init__()
def getlist(self, section, option, sep=",", chars=None):
"""Return a list from a ConfigParser option. By default, split on a
comma and strip whitespaces. Do not interpolate any % characters
"""
return [chunk.strip(chars)
for chunk in self.get(section, option, 1).split(sep)]
def check_options(option, opt_str, value, parser):
"""does simple check for valid option combinations when called from the
command line
This is not a very robust method but does some basic checking to ensure
that more than one action isn't being attempted at the same time.
"""
exclusive_options = [
'list', 'fields', 'create_version', 'update_version', 'statuses',
'logfiles', 'templates', 'getconfig', 'delete_version','env_vars'
]
boolean_options = [
'fields', 'statuses', 'templates', 'getconfig', 'workflow','env_vars'
]
for o in exclusive_options:
if getattr(parser.values, o):
raise ShotgunIOError("%s option cannot be used when '--%s' option "\
"has already been specified" % (opt_str, o))
if option.dest in boolean_options:
setattr(parser.values, option.dest, True)
else:
setattr(parser.values, option.dest, value)
def read_options():
"""defines options when shotgun_io is called from the commandline
"""
# options
usage = "USAGE: %prog [options]\nTry %prog --help for more information"
version_string = "v%s (%s)" % (__version__, __status__)
full_version_string = "%prog " + version_string
description = "%prog provides a simplified wrapper around the Shotgun API to facilitate integrating render queues with Shotgun.\n\n"+full_version_string
parser = ShotgunIoOptionParser(usage=usage, version=full_version_string, description=description)
parser.add_option("-l", "--list", type="string", default=None, action="callback", callback=check_options, help="retrieve a list of entities from Shotgun")
parser.add_option("-p", "--project_id", type="int", default=None, help="required Project id when using '-l (--list)' option with entities that are Project specific (assets, shots, etc.)")
parser.add_option("-u", "--user_id", type="int", default=None, help="required HumanUser id when using '-l (--list) tasks'")
parser.add_option("-n", "--validate_user", type="string", default=None, help="test whether provided username is valid - returns user id and username when valid, nothing when invalid")
parser.add_option("-C", "--create_version", type="string", default=None, action="callback", callback=check_options, help="create Version in Shotgun. Value must be a valid JSON encoded hash with required keys")
parser.add_option("-U", "--update_version", type="string", default=None, action="callback", callback=check_options, help="update Version in Shotgun. Value must be a valid JSON encoded hash with at least a version_id key (for the Version to update)")
parser.add_option("-D", "--delete_version", type="int", default=None, action="callback", callback=check_options, help="delete Version in Shotgun. Value is an integer representing the Shotgun id of the Version to be deleted")
parser.add_option("-f", "--fields", dest="fields", action="callback", callback=check_options, help="return a list of valid fields and field types on the Version entity for storing information.")
parser.add_option("-t", "--statuses", dest="statuses", action="callback", callback=check_options, help="return a list of valid status values for Versions")
parser.add_option("-x", "--logfiles", type="string", default=None, action="callback", callback=check_options, help="path to logfiles for processing")
parser.add_option("-v", "--version_id", type="int", default=None, help="Shotgun Version id required when using '-x (--logfiles)' option to process logfiles")
parser.add_option("-m", "--templates", dest="templates", action="callback", callback=check_options, help="return a list of Version name templates defined in shotgun_io.conf")
parser.add_option("-w", "--workflow", dest="workflow", action="callback", callback=check_options, help="return the current workflow setting defined in the config (default is 'task')")
parser.add_option("-e", "--env", dest="env_vars", action="callback", callback=check_options, help="returns preset Shotgun vars if they are already decided by context")
parser.add_option("--getconfig", dest="getconfig", action="callback", callback=check_options, help="display the current config values from shotgun_io.conf")
return parser
def main():
"""Handles execution of shotgun_io when called from the command line
"""
parser = read_options()
(options, args) = parser.parse_args()
io = ShotgunIO()
io.input = io_input.InputCmdLine(io._config)
io.output = io_output.OutputCmdLine()
if options.env_vars:
print io.load_env_variables()
# list entities
elif options.list:
if not io._validate_list_option(options.list) and options.list not in NON_ENTITY_LIST_VALUES:
raise ShotgunIOError("--list value '%s' is invalid. Valid "\
"options are %s" %
(options.list,
io_entity_queries.entity_queries.keys()+NON_ENTITY_LIST_VALUES))
if (options.list == 'entities' or \
io_entity_queries.entity_queries[options.list]['project_required']) and \
options.project_id == None:
raise ShotgunIOError("-l (--list) option '%s' requires a project id: -p (--project_id)" % options.list)
elif options.list in ['tasks'] and options.user_id == None:
raise ShotgunIOError("-l (--list) option '%s' requires a user id: -u (--user_id)" % options.list)
else:
print io.get_entities(options.list, project_id=options.project_id, user_id=options.user_id)
# validate username
elif options.validate_user:
out = io.validate_user(options.validate_user)
if not out:
raise ShotgunIOError("User '%s' is invalid." % options.validate_user)
print out
# list fields
elif options.fields:
print io.get_version_fields()
# list valid status values
elif options.statuses:
print io.get_version_status_values()
# create Version in Shotgun
elif options.create_version:
print io.create_version(options.create_version)
# update Version in Shotgun
elif options.update_version:
print io.update_version(options.update_version)
# delete Version in Shotgun (no ouput)
elif options.delete_version:
io.delete_version(options.delete_version)
# process logfiles
elif options.logfiles:
if options.version_id is None:
raise ShotgunIOError("-x (--logfiles) option also requires a Version id: -v (--version_id)")
print io.process_logfiles(options.logfiles)
# list version name templates
elif options.templates:
print io.get_version_name_templates()
# get config vals
elif options.getconfig:
print io.get_config()
# get workflow
elif options.workflow:
print io.get_workflow()
else:
parser.print_help
if len(sys.argv) > 1:
raise ShotgunIOError("invalid option combination '%s'. The '%s' "\
"switch probably requires another switch to be provided.\n\n"\
"Try '%s --help' to see the valid options" %
(sys.argv[1],sys.argv[1],os.path.basename(__file__)))
else:
parser.print_usage(sys.stderr)
raise ShotgunIOError("At least one option is required. No options specified")
# list options here
if __name__ == '__main__':
# executed only when called from the command line
try:
main()
# runtime errors
except ShotgunIOError, e:
logging.error(e)
sys.exit(1)
# validation errors
except ValueError, e:
logging.error(e)
sys.exit(1)
# configuration errors
except ConfigParser.NoOptionError, e:
logging.error('There is a problem with your configuration file. '\
'You are missing a required option. %s' % e)
sys.exit(1)
except ConfigParser.NoSectionError, e:
logging.error('There is a problem with your configuration file. '\
'You are missing a section header (it should look like '\
'[section_header]). %s' % e)
sys.exit(1)
# import errors
except ImportError, e:
logging.error('You are missing required Python modules: %s' % e)
sys.exit(1)
# unimplemented errors
except NotImplementedError, e:
logging.error('This command is not yet implemented: %s' % e)
sys.exit(0)
sys.exit(0)
| 1.210938
| 1
|
src/feature/commands/Export.py
|
junhg0211/Condictbot
| 0
|
12779698
|
<filename>src/feature/commands/Export.py
from os import mkdir
from os.path import exists
from discord import Message, File
from feature.commands.Command import Command
from util.constants import COMMAND_IDENTIFIER, WORK_END_EMOJI
from util.util import is_dm_channel, get_language, pickle_to_json
class Export(Command):
head: str = 'export'
emoji: str = ':arrow_down:'
description_path: str = 'command.export.description'
usage_paths: tuple = ((f'{COMMAND_IDENTIFIER}{head} <dictionary name>', 'command.export.usage.dictionary_name'),)
admin_only: bool = False
def __init__(self, tobcidnock):
super().__init__()
self.tobcidnock = tobcidnock
async def operate(self, message: Message):
name = self.remove_head(message.content)
if is_dm_channel(message.channel):
base_directory = f'./res/dictionary/dm/{message.author.id}'
else:
base_directory = f'./res/dictionary/{message.channel.guild.id}'
dictionary_path = f'{base_directory}/{name}.pickle'
if not exists(base_directory) or not exists(dictionary_path):
await message.channel.send('{} {} {}'.format(
self.emoji,
get_language(self.tobcidnock, message.author)['command']['export']['operate']['no_dictionary'].format(
name
),
WORK_END_EMOJI
))
return
await message.channel.send(file=File(pickle_to_json(dictionary_path)))
| 2.265625
| 2
|
verboselib/__init__.py
|
oblalex/verboselib
| 3
|
12779699
|
<gh_stars>1-10
from .core import *
from .helpers import *
from .translations import *
| 1.054688
| 1
|
main.py
|
TotoAfreeca/Neural-Network
| 1
|
12779700
|
<gh_stars>1-10
import sys
from PyQt5.QtWidgets import QDialog, QTabWidget, QHBoxLayout, QSpinBox, QLabel, QDoubleSpinBox, QTextEdit, QRadioButton, \
QFileDialog, QErrorMessage, \
QApplication, QPushButton, QVBoxLayout, QLineEdit, QFormLayout, QWidget, QPlainTextEdit
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from PyQt5 import QtCore, QtGui
# Plotting
import matplotlib.pyplot as plt
import pandas as pd
import networkx as nx
from DataFormatter import DataFormatter
import time
from networkx.drawing.nx_agraph import graphviz_layout
from NeuralNetwork import NeuralNetwork
from Functions import sigmoid_unipolar_function, sigmoid_unipolar_prime, tanh, tanh_prime
import numpy as np
class Window(QTabWidget):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
np.set_printoptions(suppress=True)
np.set_printoptions(linewidth=np.inf)
# Tabs
self.create_tab = QWidget()
self.addTab(self.create_tab, "Create")
self.train_tab = QWidget()
self.addTab(self.train_tab, "Train")
self.error_tab = QWidget()
self.addTab(self.error_tab, "Error")
self.summary_tab = QWidget()
self.addTab(self.summary_tab, "Summary")
self.error_list = []
self.test_error = []
# Create Tab
#train tab
self.learning_rate = QDoubleSpinBox()
self.learning_rate.setRange(0.0001, 0.9999)
self.learning_rate.setValue(0.1)
self.learning_rate.setSingleStep(0.01)
self.learning_rate.setDecimals(4)
self.epochs_number = QSpinBox()
self.epochs_number.setRange(1, 100000)
self.epochs_number.setValue(100)
self.epochs_number.setSingleStep(1)
self.max_error = QDoubleSpinBox()
self.max_error.setRange(0.0001, 0.9999)
self.max_error.setValue(0.09)
self.epoch_sum = 0
self.epoch_label = QLabel("Epoch: ")
self.error_label = QLabel("Error: ")
self.canvas_train = FigureCanvas(plt.figure(1))
self.stop = False
self.stop_button = QPushButton("Stop")
self.stop_button.clicked.connect(self.change_stop)
self.randomize_button = QPushButton('Initialize weights')
self.randomize_button.clicked.connect(self.randomize)
self.train_by_steps_button = QPushButton('Train ' + self.epochs_number.text() + ' epochs')
#error tab
self.epoch_label_error = QLabel("Epoch: ")
self.error_label_error = QLabel("Error: ")
self.error_figure = plt.figure(num=2, figsize=(100, 100))
self.canvas_error = FigureCanvas(self.error_figure)
self.stop = False
self.stop_button_error = QPushButton("Stop")
self.stop_button_error.clicked.connect(self.change_stop)
self.randomize_button_error = QPushButton('Initialize weights')
self.randomize_button_error.clicked.connect(self.randomize)
self.train_by_steps_button_error = QPushButton('Train ' + self.epochs_number.text() + ' epochs')
#summary tab
self.summary = QPlainTextEdit()
self.get_summary_button = QPushButton("Predict test & get summary")
self.get_summary_button.clicked.connect(self.write_summary)
self.train_tab_ui()
self.create_tab_ui()
self.error_tab_ui()
self.summary_tab_ui()
def create_tab_ui(self):
regex = r"^(\s*(\+)?\d+(?:\.\d)?\s*,\s*)+(-|\+)?\d+(?:\.\d+)?\s*$"
#regex = r"/^(\s*|\d+)$/"
validator = QtGui.QRegExpValidator(QtCore.QRegExp(regex), self)
self.layers_line_edit = QLineEdit()
self.layers_line_edit.textEdited.connect(self.layers_number_change)
self.layers_line_edit.setValidator(validator)
self.file_button = QPushButton("Read file", self)
self.file_button.clicked.connect(self.open_file_dialog)
self.file_text_edit = QLineEdit()
self.file_text_edit.setReadOnly(True)
self.network_exists = False
self.input_size = 0
self.output_size = 0
self.unipolar = QRadioButton("Unipolar sigmoid function")
self.unipolar.toggled.connect(lambda: self.function_select(self.unipolar))
self.unipolar.setChecked(True)
self.tanh = QRadioButton("Hyperbolic tangent function")
self.tanh.toggled.connect(lambda: self.function_select(self.tanh))
self.activation_function = sigmoid_unipolar_function
self.activation_prime = sigmoid_unipolar_prime
self.figure = plt.figure(num=1, figsize=(100, 100))
self.canvas_create = FigureCanvas(self.figure)
toolbar_create = NavigationToolbar(self.canvas_create, self)
data_form = QFormLayout()
network_data = QHBoxLayout()
network_data.addWidget(self.layers_line_edit)
data_form.addRow('Coma separated layers sizes:', network_data)
file_data = QHBoxLayout()
file_data.addWidget(self.file_button)
file_data.addWidget(self.file_text_edit)
functions_data = QHBoxLayout()
functions_data.addWidget(self.unipolar)
functions_data.addWidget(self.tanh)
data_form.addRow(functions_data)
data_form.addRow(file_data)
data_form.addRow(toolbar_create)
network_plot_create = QHBoxLayout()
network_plot_create.addWidget(QLabel(""))
network_plot_create.addWidget(self.canvas_create)
network_plot_create.addWidget(QLabel(""))
data_form.addRow(network_plot_create)
button = QPushButton('CREATE')
button.clicked.connect(self.create_network)
data_form.addRow(button)
self.epochs_number.editingFinished.connect(self.epochs_number_edited)
self.layer_sizes = []
self.create_tab.setLayout(data_form)
def train_tab_ui(self):
toolbar_train = NavigationToolbar(self.canvas_train, self)
train_form = QFormLayout()
train_data = QHBoxLayout()
train_data.addWidget(self.learning_rate)
train_data.addWidget(QLabel('Expected error'))
train_data.addWidget(self.max_error)
train_data.addWidget(QLabel('No. epochs'))
train_data.addWidget(self.epochs_number)
train_form.addRow('Learning rate:', train_data)
train_form.addRow(toolbar_train)
epoch_row = QHBoxLayout()
epoch_row.addWidget(self.epoch_label)
epoch_row.addWidget(self.error_label)
train_form.addRow(epoch_row)
network_plot_train = QHBoxLayout()
network_plot_train.addWidget(self.canvas_train)
train_form.addRow(network_plot_train)
buttons = QHBoxLayout()
buttons.addWidget(self.randomize_button)
buttons.addWidget(self.stop_button)
self.train_by_steps_button.clicked.connect(self.gui_train)
buttons.addWidget(self.train_by_steps_button)
train_form.addRow(buttons)
self.train_tab.setLayout(train_form)
def error_tab_ui(self):
error_form = QFormLayout()
toolbar_error = NavigationToolbar(self.canvas_error, self)
error_form.addRow(toolbar_error)
epoch_row = QHBoxLayout()
epoch_row.addWidget(self.epoch_label_error)
epoch_row.addWidget(self.error_label_error)
error_form.addRow(epoch_row)
error_plot = QHBoxLayout()
error_form.addWidget(self.canvas_error)
error_form.addRow(error_plot)
buttons = QHBoxLayout()
buttons.addWidget(self.randomize_button_error)
buttons.addWidget(self.stop_button_error)
self.train_by_steps_button_error.clicked.connect(self.gui_train_error)
buttons.addWidget(self.train_by_steps_button_error)
error_form.addRow(buttons)
self.error_tab.setLayout(error_form)
def summary_tab_ui(self):
summary_form = QFormLayout()
summary_form.addWidget(self.summary)
summary_form.addWidget(self.get_summary_button)
self.summary_tab.setLayout(summary_form)
def function_select(self, b):
if b.text() == "Unipolar sigmoid function":
if b.isChecked() == True:
self.activation_function = sigmoid_unipolar_function
self.activation_prime = sigmoid_unipolar_prime
else:
self.activation_function = tanh
self.activation_prime = tanh_prime
if b.text() == "Hyperbolic tangent function":
if b.isChecked() == True:
self.activation_function = tanh
self.activation_prime = tanh_prime
else:
self.activation_function = sigmoid_unipolar_function
self.activation_prime = sigmoid_unipolar_prime
def open_file_dialog(self):
dialog = QFileDialog.getOpenFileName(self, 'Open file')
if dialog[0].endswith('.csv'):
formatter = DataFormatter(dialog[0])
self.x_train, self.y_train = formatter.get_training_set()
self.x_test, self.y_test = formatter.get_test_set()
self.input_size, self.output_size = formatter.get_sizes()
self.file_text_edit.setText(dialog[0])
else:
error_dialog = QErrorMessage()
error_dialog.showMessage('Please select csv file.')
error_dialog.exec_()
def layers_number_change(self):
if self.layers_line_edit.text():
self.layer_sizes = [int(val) for val in self.layers_line_edit.text().split(",") if val and val not in '0+ ']
print(self.layer_sizes)
def create_network(self):
self.epoch_sum = 0
self.error_list = []
self.test_error = []
self.network = NeuralNetwork()
if self.input_size > 0 and self.output_size > 0:
self.network.create_layers(self.input_size, self.output_size, self.layer_sizes, self.activation_function,
self.activation_prime)
self.timer = QtCore.QTimer()
self.timer.setInterval(100)
self.timer.timeout.connect(self.plot_error)
self.plot_network(self.canvas_create)
self.plot_network(self.canvas_train)
else:
error_dialog = QErrorMessage()
error_dialog.showMessage('Please load the file first.')
error_dialog.exec_()
def plot_error(self):
if not self.stop:
plt.figure(2)
self.error_figure.clear()
test1 = np.arange(self.epoch_sum)
test2 = self.error_list
test3 = self.test_error
plt.plot(test1, test2, label='train')
#plt.plot(test1, test3, label='test')
plt.xlabel("Epoch")
plt.ylabel("Mean squared error (MSE)")
plt.legend(loc='upper right')
plt.grid()
self.update_labels()
self.canvas_error.draw()
#Probably the ugliest piece of code i've ever wrote, probably can vectorize it somehow
def plot_network(self, canvas):
plt.figure(1)
self.figure.clear()
G = nx.DiGraph()
# input layer to first layer edges
ed = []
for i in range(0, self.network.layers[0].input_size):
vertex_name = 'x'+str(i+1)
for j in range(0, self.network.layers[0].output_size):
ed.append([vertex_name, 'h'+str(1)+str(j+1), np.round(self.network.layers[0].weights[i, j], 3)])
for layer_number in range(1, len(self.network.layers)):
prev_layer_size = self.network.layers[layer_number-1].output_size
for i in range(0, prev_layer_size):
vertex_name = 'h'+str(layer_number)+str(i+1)
for j in range(0, self.network.layers[layer_number].output_size):
if layer_number == len(self.network.layers)-1:
ed.append([vertex_name, 'OUT'+str(j+1), np.round(self.network.layers[layer_number].weights[i, j], 3)])
else:
ed.append([vertex_name, 'h' + str(layer_number + 1) + str(j + 1),
np.round(self.network.layers[layer_number].weights[i, j], 3)])
# ed = insert(ed, 2,list(np.around(self.network.layers[0].weights, 3).flatten()) , axis=1)
# print(ed)
# ed = [['x1', 4, -1],
# ['x1', 5, -1],
# ['x2', 4, -1],
# ['x2', 5, -1],
# ['x3', 4, -1],
# ['x3', 5, 10],
# [4, 3, -1],
# [5, 3, 100]]
G.add_weighted_edges_from(ed)
pos = graphviz_layout(G, prog='dot', args="-Grankdir=LR")
nx.draw(G, with_labels=True, pos=pos, font_weight='bold')
edge_labels = nx.get_edge_attributes(G, 'weight')
nx.draw_networkx_edge_labels(G, pos=pos, font_weight='bold', label_pos=0.85, edge_labels=edge_labels)
self.update_labels()
self.canvas_train.draw()
self.figure
self.canvas_create.draw()
def gui_train(self):
self.stop = False
self.timer.start()
QApplication.processEvents()
for i in range(int(self.epochs_number.value())):
if(self.stop != True):
self.network.train(self.x_train,
self.y_train,
epochs=1,
learning_rate=float(self.learning_rate.value()))
self.epoch_sum += 1
self.error_list.append(np.round(self.network.err, 5))
#self.test_error.append(np.round(self.network.calculate_test_mse(self.x_test, self.y_test), 5))
self.update_labels()
self.plot_network(self.canvas_create)
self.plot_network(self.canvas_train)
QApplication.processEvents()
if self.network.err <= self.max_error.value():
break
else:
self.plot_error()
self.plot_network(self.canvas_create)
self.plot_network(self.canvas_train)
break
self.stop = True
self.update_labels()
self.plot_network(self.canvas_create)
self.plot_network(self.canvas_train)
self.timer.stop()
def gui_train_error(self):
self.stop = False
self.timer.start()
QApplication.processEvents()
for i in range(int(self.epochs_number.value())):
if (self.stop != True):
self.network.train(self.x_train,
self.y_train,
epochs=1,
learning_rate=float(self.learning_rate.value()))
self.epoch_sum += 1
self.error_list.append(np.round(self.network.err, 5))
#self.test_error.append(np.round(self.network.calculate_test_mse(self.x_test, self.y_test), 5))
if self.network.err <= self.max_error.value():
self.plot_error()
break
QApplication.processEvents()
self.stop = True
self.plot_network(self.canvas_create)
self.plot_network(self.canvas_train)
def update_labels(self):
self.epoch_label.setText("Epoch: " + str(self.epoch_sum))
self.error_label.setText("Error: " + str(np.round(self.network.err, 5)))
self.epoch_label_error.setText("Epoch: " + str(self.epoch_sum))
self.error_label_error.setText("Error: " + str(np.round(self.network.err, 5)))
def epochs_number_edited(self):
self.train_by_steps_button.setText('Train ' + self.epochs_number.text() + ' epochs')
self.train_by_steps_button_error.setText('Train ' + self.epochs_number.text() + ' epochs')
def randomize(self):
self.stop = True
self.epoch_sum = 0
self.error_list = []
self.test_error = []
self.network.randomize_layers()
self.plot_network(self.canvas_create)
self.plot_network(self.canvas_train)
self.canvas_error.draw()
self.timer.stop()
def write_summary(self):
self.summary.clear()
self.summary.appendPlainText("Finished learning")
self.summary.appendPlainText("Epochs: " + str(self.epoch_sum))
self.summary.appendPlainText("Train error: " + str(self.network.err))
self.summary.appendPlainText("Test error: " + str(self.network.calculate_test_mse(self.x_test, self.y_test)))
self.summary.appendPlainText("TEST RESULTS BELOW")
self.summary.appendPlainText("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
for i in range(len(self.x_test)):
predicted = np.round(self.network.predict(self.x_test[i]), 3)
self.summary.appendPlainText(str(i))
self.summary.appendPlainText(str(predicted))
self.summary.appendPlainText(str(self.y_test[i]))
def sigmoid_unipolar_function(self, x):
pos_mask = (x >= 0)
neg_mask = (x < 0)
z = np.zeros_like(x)
z[pos_mask] = np.exp(-x[pos_mask])
z[neg_mask] = np.exp(x[neg_mask])
top = np.ones_like(x)
top[neg_mask] = z[neg_mask]
return top / (1 + z)
def sigmoid_unipolar_prime(self, z):
return self.sigmoid_unipolar_function(z) * (1 - self.sigmoid_unipolar_function(z))
def tanh_function(self, x):
return np.tanh(x)
def tanh_prime(self, x):
return 1 - np.tanh(x) ** 2
def change_stop(self):
self.stop = True
def window():
app = QApplication(sys.argv)
win = Window()
win.show()
sys.exit(app.exec_())
window()
| 1.984375
| 2
|
modules/photons_transport/comms/base.py
|
Djelibeybi/photons
| 51
|
12779701
|
<reponame>Djelibeybi/photons
from photons_transport.errors import FailedToFindDevice, StopPacketStream
from photons_transport.comms.receiver import Receiver
from photons_transport.comms.writer import Writer
from photons_app.errors import TimedOut, FoundNoDevices, RunErrors, BadRunWithResults
from photons_app import helpers as hp
from photons_protocol.packets import Information
from photons_protocol.messages import Messages
from photons_transport import catch_errors
import binascii
import logging
import asyncio
import random
import struct
import json
log = logging.getLogger("photons_transport.comms")
class FakeAck:
represents_ack = True
__slots__ = ["source", "sequence", "target", "serial", "Information"]
def __init__(self, source, sequence, target, serial, addr):
self.serial = serial
self.target = target
self.source = source
self.sequence = sequence
self.Information = Information(remote_addr=addr)
def __or__(self, kls):
return kls.Payload.Meta.protocol == 1024 and kls.Payload.represents_ack
def __repr__(self):
return f"<ACK source: {self.source}, sequence: {self.sequence}, serial: {self.serial}>"
class Found:
def __init__(self):
self.found = {}
def clone(self):
found = self.__class__()
for serial, services in self.found.items():
found[serial] = dict(services)
return found
def cleanse_serial(self, serial):
if isinstance(serial, str):
serial = binascii.unhexlify(serial)
return serial[:6]
@property
def serials(self):
return sorted([binascii.hexlify(target).decode() for target in self.found])
def __len__(self):
return len(self.found)
def __getitem__(self, serial):
return self.found[self.cleanse_serial(serial)]
def __setitem__(self, serial, value):
serial = self.cleanse_serial(serial)
self.found[serial] = value
def __delitem__(self, serial):
serial = self.cleanse_serial(serial)
del self.found[serial]
def __contains__(self, serial):
serial = self.cleanse_serial(serial)
return serial in self.found and self.found[serial]
def __eq__(self, other):
return self.found == other.found
def __bool__(self):
return bool(self.found)
def borrow(self, other_found, session):
if isinstance(other_found, Found):
for target in other_found:
if target not in self:
self[target] = {}
for service, transport in other_found[target].items():
if service not in self[target]:
self[target][service] = transport.clone_for(session)
async def remove_lost(self, found_now):
found_now = [self.cleanse_serial(serial) for serial in found_now]
for target in list(self):
if target not in found_now:
if target in self:
for transport in self[target].values():
try:
await transport.close()
except Exception as error:
log.error(hp.lc("Failed to close transport", error=error))
if target in self:
del self[target]
def __iter__(self):
return iter(self.found)
def __repr__(self):
services = json.dumps(
{
binascii.hexlify(t).decode(): ",".join(repr(s) for s in services.keys())
for t, services in self.found.items()
}
)
return f"<FOUND: {services}>"
def timeout_task(task, errf, serial):
"""Used to cancel sending a messages and record a timed out exception"""
if not task.done():
if not errf.done():
errf.set_exception(TimedOut("Waiting for reply to a packet", serial=serial))
task.cancel()
class Sender(hp.AsyncCMMixin):
def __init__(self, session, msg, reference, **kwargs):
self.msg = msg
self.kwargs = kwargs
self.session = session
self.reference = reference
if "session" in self.kwargs:
self.kwargs.pop("session")
self.script = self.session.transport_target.script(msg)
self.StopPacketStream = StopPacketStream
@hp.memoized_property
def gen(self):
return self.script.run(self.reference, self.session, **self.kwargs)
def __await__(self):
return (yield from self.all_packets().__await__())
async def all_packets(self):
results = []
try:
async for pkt in self:
results.append(pkt)
except asyncio.CancelledError:
raise
except RunErrors as error:
raise BadRunWithResults(results=results, _errors=error.errors)
except Exception as error:
raise BadRunWithResults(results=results, _errors=[error])
else:
return results
def __aiter__(self):
return self.stream_packets()
async def stream_packets(self):
async for pkt in self.gen:
yield pkt
async def start(self):
self.catcher = catch_errors(self.kwargs.get("error_catcher"))
self.kwargs["error_catcher"] = self.catcher.__enter__()
return self
async def finish(self, exc_typ=None, exc=None, tb=None):
if hasattr(self, "_gen"):
try:
await hp.stop_async_generator(self.gen, name="GenCatch::finish[stop_gen]", exc=exc)
except StopPacketStream:
pass
if exc_typ is not asyncio.CancelledError:
try:
self.catcher.__exit__(None, None, None)
except asyncio.CancelledError:
raise
except Exception as error:
raise error from None
if exc_typ is StopPacketStream:
return True
class Communication:
_merged_options_formattable = True
def __init__(self, target):
self.transport_target = target
self.found = Found()
self.stop_fut = hp.ChildOfFuture(
self.transport_target.final_future, name=f"{type(self).__name__}.__init__|stop_fut|"
)
self.receiver = Receiver()
self.received_data_tasks = hp.TaskHolder(
self.stop_fut, name=f"{type(self).__name__}.__init__|received_data_tasks|"
)
self.make_plans = __import__("photons_control.planner").planner.make_plans
self.setup()
def setup(self):
pass
def __call__(self, msg, reference=None, **kwargs):
return Sender(self, msg, reference, **kwargs)
@hp.memoized_property
def gatherer(self):
return __import__("photons_control.planner").planner.Gatherer(self)
async def finish(self, exc_typ=None, exc=None, tb=None):
self.stop_fut.cancel()
for serial in self.found.serials:
try:
await self.forget(serial)
except Exception as error:
log.error(hp.lc("Failed to close transport", error=error, serial=serial))
await self.received_data_tasks.finish(exc_typ, exc, tb)
@hp.memoized_property
def source(self):
"""Return us a source to use for our packets"""
return random.randrange(1, 1 << 32)
def seq(self, target):
"""Create the next sequence for this target"""
if not hasattr(self, "_seq"):
self._seq = {}
if target not in self._seq:
self._seq[target] = 0
self._seq[target] = (self._seq[target] + 1) % pow(2, 8)
return self._seq[target]
async def forget(self, serial):
if serial not in self.found:
return
services = self.found[serial]
del self.found[serial]
for service, transport in services.items():
try:
await transport.close()
except asyncio.CancelledError:
raise
except Exception as error:
log.exception(
hp.lc("Failed to close transport", service=service, error=error, serial=serial)
)
async def add_service(self, serial, service, **kwargs):
new = await self.make_transport(serial, service, kwargs)
if serial not in self.found:
self.found[serial] = {}
existing = self.found[serial].get(service)
if existing != new:
if existing:
try:
await existing.close()
except asyncio.CancelledError:
raise
except Exception as error:
log.error(
hp.lc(
"Failed to close old transport",
service=service,
error=error,
serial=serial,
)
)
self.found[serial][service] = new
async def find_devices(self, *, ignore_lost=False, raise_on_none=False, **kwargs):
"""Hook for finding devices"""
kwargs["ignore_lost"] = ignore_lost
kwargs["raise_on_none"] = raise_on_none
found, _ = await self.find_specific_serials(None, **kwargs)
return found
async def find_specific_serials(
self, serials, ignore_lost=False, raise_on_none=False, **kwargs
):
kwargs["ignore_lost"] = ignore_lost
kwargs["raise_on_none"] = raise_on_none
found = await self._find_specific_serials(serials, **kwargs)
missing = [] if serials is None else [serial for serial in serials if serial not in found]
if missing:
log.error(hp.lc("Didn't find some devices", missing=missing))
return found, missing
async def _find_specific_serials(
self, serials, ignore_lost=False, raise_on_none=False, timeout=60, **kwargs
):
found_now = await self._do_search(serials, timeout, **kwargs)
if not ignore_lost:
await self.found.remove_lost(found_now)
if serials is None and not found_now:
if raise_on_none:
raise FoundNoDevices()
else:
log.error(hp.lc("Didn't find any devices"))
return self.found
async def _do_search(self, serials, timeout, **kwargs):
raise NotImplementedError()
async def make_transport(self, serial, service, kwargs):
raise NotImplementedError()
async def choose_transport(self, packet, services):
raise NotImplementedError()
async def make_broadcast_transport(self, broadcast):
raise NotImplementedError()
def retry_gaps(self, packet, transport):
raise NotImplementedError()
async def broadcast(self, packet, broadcast, **kwargs):
kwargs["transport"] = await self.make_broadcast_transport(broadcast)
kwargs["is_broadcast"] = True
return await self.send_single(packet, **kwargs)
async def send_single(
self, original, packet, *, timeout, no_retry=False, broadcast=False, connect_timeout=10
):
transport, is_broadcast = await self._transport_for_send(
None, packet, original, broadcast, connect_timeout
)
retry_gaps = self.retry_gaps(original, transport)
writer = Writer(
self,
transport,
self.receiver,
original,
packet,
retry_gaps,
did_broadcast=is_broadcast,
connect_timeout=connect_timeout,
)
results = []
unlimited = False
if hasattr(original, "Meta"):
unlimited = original.Meta.multi == -1
async def wait_for_remainders(tick_fut, streamer_fut):
try:
await hp.wait_for_all_futures(tick_fut)
finally:
if unlimited and any(result.wait_for_result() for result in results):
await hp.wait_for_first_future(*results)
streamer_fut.cancel()
tick_fut = hp.ChildOfFuture(
self.stop_fut,
name=f"SendPacket({original.pkt_type, packet.serial})::send_single[tick_fut]",
)
streamer_fut = hp.ChildOfFuture(
self.stop_fut,
name=f"SendPacket({original.pkt_type, packet.serial})::send_single[streamer_fut]",
)
retry_ticker = retry_gaps.retry_ticker(
name=f"{type(self).__name__}({type(transport).__name__})::retry_ticker"
)
with tick_fut, streamer_fut:
async with hp.ResultStreamer(
streamer_fut, name=f"SendPacket({original.pkt_type, packet.serial}).send_single"
) as streamer:
await streamer.add_generator(
retry_ticker.tick(tick_fut, timeout),
context="tick",
on_done=lambda res: tick_fut.cancel(),
)
await streamer.add_coroutine(wait_for_remainders(tick_fut, streamer_fut))
streamer.no_more_work()
async for result in streamer:
if not result.successful:
try:
raise result.value
finally:
del result
if result.context == "tick":
if not no_retry or not results:
result = await writer()
results.append(result)
await streamer.add_task(result, context="write")
elif result.context == "write":
return result.value
raise TimedOut(
"Waiting for reply to a packet",
serial=packet.serial,
sent_pkt_type=packet.pkt_type,
source=packet.source,
sequence=packet.sequence,
)
async def _transport_for_send(self, transport, packet, original, broadcast, connect_timeout):
is_broadcast = bool(broadcast)
if transport is None and (is_broadcast or packet.target is None):
is_broadcast = True
transport = await self.make_broadcast_transport(broadcast or True)
if transport is None:
if packet.serial not in self.found:
raise FailedToFindDevice(serial=packet.serial)
transport = await self.choose_transport(original, self.found[packet.serial])
await transport.spawn(original, timeout=connect_timeout)
return transport, is_broadcast
def sync_received_data(self, *args, **kwargs):
return self.received_data_tasks.add(self.received_data(*args, **kwargs))
async def received_data(self, data, addr, allow_zero=False):
"""What to do when we get some data"""
if type(data) is bytes:
log.debug(hp.lc("Received bytes", bts=binascii.hexlify(data).decode()))
try:
protocol_register = self.transport_target.protocol_register
protocol, pkt_type, Packet, PacketKls, data = Messages.get_packet_type(
data, protocol_register
)
if protocol == 1024 and pkt_type == 45:
if isinstance(data, bytes):
source = struct.unpack("<I", data[4:8])[0]
target = data[8:16]
sequence = data[23]
else:
source = data.source
target = data.target
sequence = data.sequence
serial = binascii.hexlify(target[:6]).decode()
pkt = FakeAck(source, sequence, target, serial, addr)
else:
if PacketKls is None:
PacketKls = Packet
if isinstance(data, PacketKls):
pkt = data.clone()
else:
pkt = PacketKls.create(data)
except Exception as error:
log.exception(error)
else:
await self.receiver.recv(pkt, addr, allow_zero=allow_zero)
| 2.140625
| 2
|
homeassistant/components/solax/const.py
|
PiotrMachowski/core
| 3
|
12779702
|
"""Constants for the solax integration."""
DOMAIN = "solax"
| 1.007813
| 1
|
scripts/WIPS2015/incidentCases_RtEstimation_dataExport.py
|
eclee25/flu-SDI-exploratory-age
| 3
|
12779703
|
<reponame>eclee25/flu-SDI-exploratory-age
#!/usr/bin/python
##############################################
###Python template
###Author: <NAME>
###Date: 2/3/15
###Function: Export incident ILI cases by week for total population in all service places
###Import data: SQL_export/OR_allweeks.csv, SQL_export/totalpop.csv
###Command Line: python
##############################################
### notes ###
### packages/modules ###
import csv
from datetime import date
## local modules ##
import functions_v5 as fxn
### data structures ###
### functions ###
def export_totalILIcases(csv_incidence):
dict_ILIage_week, dict_wk, dict_ILI_week = {},{},{}
for row in csv_incidence:
week = row[1]
Sun_dt = date(int(week[:4]), int(week[5:7]), int(week[8:]))
wk, seas, _ = fxn.SDIweek(Sun_dt) # Thu date, season, wknum
dict_ILIage_week[(wk, str(row[2]))] = float(row[3])
dict_wk[wk] = seas
for wk in sorted(dict_wk):
seas = dict_wk[wk]
cases = sum([dict_ILIage_week.get((wk, age), 0) for age in ['C', 'A', 'O']])
dict_ILI_week[(seas, wk)] = cases
return dict_ILI_week
### data files ###
# incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks.csv','r')
# incid = csv.reader(incidin, delimiter=',')
incid787in = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient_zip787.csv', 'r')
incid787 = csv.reader(incid787in, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
### program ###
d_ILI_week = export_totalILIcases(incid787)
for s in ps:
filename = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/EpiEstim_totalILI_allLocs_787_S%s.csv' %(s)
d_ILI_week_subset = dict((k, d_ILI_week[k]) for k in d_ILI_week if k[0]==s)
dummyweeks = sorted([key[1] for key in d_ILI_week_subset])
with open(filename, 'a') as f:
for k,v in sorted(d_ILI_week_subset.iteritems()):
f.write(str(k[0])) # season number
f.write(',')
f.write(str(k[1])) # week
f.write(',')
f.write(str(v)) # new cases
f.write('\n')
| 2.28125
| 2
|
utility_scripts/configureCMK.py
|
jjk-dev/aws-qnabot
| 197
|
12779704
|
<reponame>jjk-dev/aws-qnabot
import boto3
from botocore.config import Config
import argparse
import json
import base64
import sys
parser = argparse.ArgumentParser(description='Uses a specified CMK to encrypt QnABot Lambdas and Parameter Store settings')
parser.add_argument("region", help="AWS Region")
parser.add_argument("stack_arn", help="the arn of the QnABot CloudFormation Stack")
parser.add_argument("cmk_arn", help="the ARN of the Customer Master Key to use for encryption")
parser.add_argument("target_s3_bucket", help="the Name of the S3 bucket to use for server access logs")
args = type('', (), {})()
args = parser.parse_args()
client_config = Config(
region_name = args.region
)
lambda_client = boto3.client('lambda', config=client_config)
iam_client = boto3.client('iam', config=client_config)
role_paginator = iam_client.get_paginator('list_role_policies')
kms_client = boto3.client("kms", config=client_config)
cloudformation_client = boto3.client('cloudformation', config=client_config)
ssm_client = boto3.client('ssm', config=client_config)
s3_client = boto3.client('s3', config=client_config)
ddb_client = boto3.client('dynamodb', config=client_config)
sts_client = boto3.client('sts', config=client_config)
kinesis_client = boto3.client('firehose', config=client_config)
policy_name = "CMKPolicy4"
policy_document = {
"Version":"2012-10-17",
"Statement":[
{
"Effect":"Allow",
"Action":[
"kms:Decrypt",
"kms:Encrypt",
"kms:GenerateDataKey"
],
"Resource":args.cmk_arn
}
]
}
cmk_roles_logical_ids = [
'S3AccessRole',
'FirehoseESS3Role',
'AdminRole',
'ExportRole',
'ImportRole',
'ApiGatewayRole',
'ESCognitoRole',
'KibanaRole',
]
cmk_roles_physical_ids = []
def assign_role(role_name):
role_iterator = role_paginator.paginate(
RoleName=role_name,
PaginationConfig={
'MaxItems': 1000,
'PageSize': 1000
}
)
print(f"Updating role {role_name}...")
cmk_policy_exists = False
for role in role_iterator:
if policy_name in role["PolicyNames"]:
cmk_policy_exists = True
break
if not cmk_policy_exists:
iam_client.put_role_policy(RoleName=role_name, PolicyName = policy_name,PolicyDocument=json.dumps(policy_document))
def put_key_policy (stackname,roles):
response = kms_client.get_key_policy(KeyId = args.cmk_arn, PolicyName='default')
policy = response['Policy'].replace("\n","")
policy = json.loads(policy)
caller_identity = sts_client.get_caller_identity()
new_statement = []
for statement in policy["Statement"]:
if(statement["Sid"] != stackname):
new_statement.append(statement)
policy["Statement"] = new_statement
formatted_roles = []
for role in roles:
formatted_roles.append(f"arn:aws:iam::{caller_identity['Account']}:role/{role}")
policy["Statement"].append(
{
"Sid": stackname,
"Effect": "Allow",
"Principal": {
"AWS": formatted_roles
},
"Action": [
"kms:Encrypt",
"kms:Decrypt",
"kms:GenerateDataKey"
],
"Resource": "*"
}
)
print(f"Updating policy for key {args.cmk_arn}")
kms_client.put_key_policy(
KeyId = args.cmk_arn,
PolicyName = "default",
Policy = json.dumps(policy)
)
print(f"Policy for key {args.cmk_arn} updated.")
def process_stacks(stackname):
paginator = cloudformation_client.get_paginator('list_stack_resources')
response_iterator = paginator.paginate(
StackName=stackname,
PaginationConfig={
'MaxItems': 10000#,
}
)
for response in response_iterator:
lambda_resources = filter(lambda x: x["ResourceType"] == "AWS::Lambda::Function",response["StackResourceSummaries"])
for lambda_func in lambda_resources:
lambda_client.update_function_configuration(FunctionName=lambda_func["PhysicalResourceId"],KMSKeyArn=args.cmk_arn)
print(f"Updated function {lambda_func['PhysicalResourceId']} in stack {stackname}")
lambda_configuration = lambda_client.get_function_configuration(FunctionName=lambda_func["PhysicalResourceId"])
role_name = lambda_configuration["Role"].split("/")[-1]
assign_role(role_name)
ssm_parameters = filter(lambda x: x["ResourceType"] == "AWS::SSM::Parameter",response["StackResourceSummaries"])
for parameter in ssm_parameters:
parameter_name = parameter["PhysicalResourceId"]
parameter_response = ssm_client.get_parameter(
Name=parameter_name,
WithDecryption=True
)
parameter_value = parameter_response['Parameter']['Value']
description = parameter_response['Parameter']["Description"] if "Decription" in parameter_response['Parameter'] else ""
ssm_client.put_parameter(
Name=parameter_name,
Description=description,
Value=parameter_value,
Type='SecureString',
KeyId=args.cmk_arn,
Overwrite=True,
)
s3_buckets = filter(lambda x: x["ResourceType"] == "AWS::S3::Bucket",response["StackResourceSummaries"])
for bucket in s3_buckets:
s3_client.put_bucket_encryption(
Bucket=bucket["PhysicalResourceId"],
ServerSideEncryptionConfiguration={
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'aws:kms',
'KMSMasterKeyID': args.cmk_arn
}
},
]
}
)
print(f"Encryption set for {bucket['PhysicalResourceId']}")
s3_client.put_bucket_logging(
Bucket=bucket["PhysicalResourceId"],
BucketLoggingStatus={
'LoggingEnabled': {
'TargetBucket': args.target_s3_bucket,
'TargetPrefix': bucket["PhysicalResourceId"] + '/'
}
}
)
print(f"Access Logs set for {bucket['PhysicalResourceId']}")
ddb_tables = filter(lambda x: x["ResourceType"] == "AWS::DynamoDB::Table",response["StackResourceSummaries"])
for table in ddb_tables:
table_description = ddb_client.describe_table(TableName = table["PhysicalResourceId"])
if('SSEDescription' not in table_description["Table"] or 'KMSMasterKeyArn' not in table_description["Table"]['SSEDescription'] or table_description["Table"]['SSEDescription']['KMSMasterKeyArn']!= args.cmk_arn ):
ddb_client.update_table(
TableName = table["PhysicalResourceId"],
SSESpecification ={
'Enabled': True,
'SSEType': 'KMS',
'KMSMasterKeyId': args.cmk_arn
}
)
kinesis_streams = filter(lambda x: x["ResourceType"] == "AWS::KinesisFirehose::DeliveryStream",response["StackResourceSummaries"])
for stream in kinesis_streams:
stream_response = kinesis_client.describe_delivery_stream(
DeliveryStreamName=stream["PhysicalResourceId"])
if('KeyType' not in stream_response['DeliveryStreamDescription']['DeliveryStreamEncryptionConfiguration']
or ( stream_response['DeliveryStreamDescription']['DeliveryStreamEncryptionConfiguration']['KeyType'] != "CUSTOMER_MANAGED_CMK"
and stream_response['DeliveryStreamDescription']['DeliveryStreamEncryptionConfiguration']['KeyARN'] != args.cmk_arn)):
kinesis_client.start_delivery_stream_encryption(
DeliveryStreamName=stream["PhysicalResourceId"],
DeliveryStreamEncryptionConfigurationInput={
'KeyARN': args.cmk_arn,
'KeyType': 'CUSTOMER_MANAGED_CMK'})
role_resources = filter(lambda x: 'LambdaRole' in x["LogicalResourceId"] or x["LogicalResourceId"] in cmk_roles_logical_ids , response["StackResourceSummaries"])
for role_resource in role_resources:
print(f"role_resource: {role_resource['PhysicalResourceId']}")
cmk_roles_physical_ids.append(role_resource["PhysicalResourceId"])
assign_role(role_resource["PhysicalResourceId"])
process_stacks(args.stack_arn)
paginator = cloudformation_client.get_paginator('list_stack_resources')
response_iterator = paginator.paginate(
StackName=args.stack_arn,
PaginationConfig={
'MaxItems': 10000,
}
)
for response in response_iterator:
stacks = filter(lambda x: x["ResourceType"] == "AWS::CloudFormation::Stack",response["StackResourceSummaries"])
for stack in stacks:
print(f"Processing stack {stack['PhysicalResourceId']}")
process_stacks(stack["PhysicalResourceId"])
put_key_policy(args.stack_arn,cmk_roles_physical_ids)
| 2.15625
| 2
|
boto3_exceptions/discovery.py
|
siteshen/boto3_exceptions
| 2
|
12779705
|
import boto3
exceptions = boto3.client('discovery').exceptions
AuthorizationErrorException = exceptions.AuthorizationErrorException
ConflictErrorException = exceptions.ConflictErrorException
InvalidParameterException = exceptions.InvalidParameterException
InvalidParameterValueException = exceptions.InvalidParameterValueException
OperationNotPermittedException = exceptions.OperationNotPermittedException
ResourceInUseException = exceptions.ResourceInUseException
ResourceNotFoundException = exceptions.ResourceNotFoundException
ServerInternalErrorException = exceptions.ServerInternalErrorException
| 1.851563
| 2
|
rewx/core.py
|
akrk1986/re-wx
| 0
|
12779706
|
<reponame>akrk1986/re-wx
"""
https://medium.com/@sweetpalma/gooact-react-in-160-lines-of-javascript-44e0742ad60f
"""
import functools
import wx
from inspect import isclass
from rewx.dispatch import mount, update
from rewx.widgets import mount as _mount
from rewx.widgets import update as _update
mount.merge_registries(_mount._registry)
update.merge_registries(_update._registry)
def wsx(f):
def convert(spec: list):
type, props, *children = spec
return create_element(type, props, children=list(map(convert, children)))
# being used as a decorator
if callable(f):
@functools.wraps(f)
def inner(*args, **kwargs):
result = f(*args, **kwargs)
return convert(result)
return inner
else:
return convert(f)
def create_element(type, props, children=None):
element = {
'type': type,
'props': props
}
if children:
if not isinstance(children, list):
raise Exception('Children must be a list!')
element['props']['children'] = children
return element
def updatewx(instance, props):
if isinstance(instance, wx.StaticText):
instance: wx.StaticText = instance
if props.get('on_click'):
instance.Unbind(wx.EVT_LEFT_DOWN)
instance.Unbind(wx.EVT_LEFT_DCLICK)
instance.Bind(wx.EVT_LEFT_DOWN, props.get('on_click'))
instance.Bind(wx.EVT_LEFT_DCLICK, props.get('on_click'))
else:
instance.Unbind(wx.EVT_LEFT_DCLICK)
instance.Unbind(wx.EVT_LEFT_DOWN)
instance.SetLabel(props.get('value', ''))
elif isinstance(instance, wx.Panel):
instance: wx.Panel = instance
sizer: wx.BoxSizer = instance.GetSizer()
sizer.SetOrientation(props.get('orient', wx.VERTICAL))
return instance
def patch(dom: wx.Window, vdom):
parent = dom.GetParent()
try:
# if parent:
# parent.Freeze()
if not isclass(vdom['type']):
# because stateless functions are just opaque wrappers
# they have no relevant diffing logic -- there is no
# associated top-level WX element produced from a SFC, only
# their inner contents matter. As such, we evaluate it and
# push the result back into `patch`
return patch(dom, vdom['type'](vdom['props']))
if isclass(vdom['type']) and issubclass(vdom['type'], Component):
return Component.patch_component(dom, vdom)
elif not isinstance(dom, vdom['type']):
for child in dom.GetChildren():
dom.RemoveChild(child)
child.Destroy()
dom.Destroy()
newdom = render(vdom, parent)
elif isinstance(dom, vdom['type']):
update(vdom, dom)
pool = {f'__index_{index}': child for index, child in enumerate(dom.GetChildren())}
for index, child in enumerate(vdom['props'].get('children', [])):
key = f'__index_{index}'
if key in pool:
patch(pool[key], child)
del pool[key]
else:
# TODO: this IS the addition case, right?
# why would I need this removeChild line..?
if key in pool:
# if we're adding something new to the
# tree, it won't be present in the pool
parent.RemoveChild(pool[key])
# TODO: need to understand this case more
# if we're not updating, we're adding
# in which case.. why doesn't this fall to the
# `dom` instance..?
inst = render(child, dom)
if dom.GetSizer():
dom.GetSizer().Add(
inst,
child['props'].get('proportion', 0),
child['props'].get('flag', 0),
child['props'].get('border', 0)
)
# any keys which haven't been removed in the
# above loop represent wx.Objects which are no longer
# part of the virtualdom and should thus be removed.
for key, orphan in pool.items():
dom.RemoveChild(orphan)
orphan.Destroy()
newdom = dom
else:
raise Exception("unexpected case!")
p = parent
while p:
p.Layout()
p = p.GetParent()
return newdom
finally:
# TODO: we sometimes call parent.Thaw() when
# parent isn't frozen. I think this has something
# to do with the child removal case. Not sure tho
# if parent and parent.IsFrozen():
# parent.Thaw()
pass
class Component:
def __init__(self, props):
self.props = props
self.state = None
# this gets set dynamically once mounted / instantiated
self.base = None
@classmethod
def render_component(cls, vdom, parent=None):
if cls.__name__ == vdom['type'].__name__:
instance = vdom['type'](vdom['props'])
instance.base = render(instance.render(), parent)
instance.base._instance = instance
instance.base._key = vdom['props'].get('key', None)
instance.component_did_mount()
return instance.base
else:
# TODO: what are the cases where this would be hit..?
return render(vdom['type'](vdom['props']), parent)
@classmethod
def patch_component(cls, dom, vdom):
parent = dom.GetParent()
# TODO: is any of this right..?
if hasattr(dom, '_instance') and type(dom._instance).__name__ == vdom['type'].__name__:
return patch(dom, dom._instance.render())
if cls.__name__ == vdom['type'].__name__:
return cls.render_component(vdom, parent)
else:
return patch(dom, vdom['type'](vdom['props']))
def component_did_mount(self):
pass
def component_will_unmount(self):
print('gooodbye!')
pass
def render(self):
return None
def set_state(self, next_state):
prev_state = self.state
self.state = next_state
p = self.base
while p.GetParent() != None:
p = p.GetParent()
p.Freeze()
patch(self.base, self.render())
p.Thaw()
def render(element, parent):
if isclass(element['type']) and issubclass(element['type'], wx.Object):
instance = mount(element, parent)
if element['props'].get('ref'):
element['props'].get('ref').update_ref(instance)
for child in element['props'].get('children', []):
sizer = instance.GetSizer()
if not sizer:
render(child, instance)
else:
sizer.Add(
render(child, instance),
child['props'].get('proportion', 0),
child['props'].get('flag', 0),
child['props'].get('border', 0)
)
return instance
elif type(element['type']) == type:
return element['type'].render_component(element, parent)
elif callable(element['type']):
# stateless functional component
return render(element['type'](element['props']), parent)
else:
# TODO: rest of this message
raise TypeError(f'''
An unknown type ("{element['type']}") was supplied as a renderable
element.
''')
class Ref:
def __init__(self):
self.instance = None
def update_ref(self, instance):
self.instance = instance
if __name__ == '__main__':
statictext = wx.StaticText
foo_elm = create_element('block', {}, children=[
create_element('statictext', {'value': 'Hey there, world!'}),
create_element('statictext', {'value': 'Hey there, again!'}),
create_element('block', {'orient': wx.HORIZONTAL}, children=[
create_element('statictext', {'value': 'One'}),
create_element('statictext', {'value': ' and Two!'}),
])
])
foo_elm1 = create_element('block', {}, children=[
create_element('statictext', {'value': 'One'}),
create_element('statictext', {'value': 'Two'})
])
foo_elm2 = create_element('block', {'orient': wx.HORIZONTAL}, children=[
create_element('statictext', {'value': 'Two'}),
create_element('statictext', {'value': 'One'}),
])
# foo_elm3 = create_element(Foo, {'item1': 'HELLOOOOO'})
# foo_elm4 = create_element(Bar, {})
#
# foo_elm5 = create_element(Bar, {'item1': 'HELLOOOOO'})
# foo_elm6 = create_element(Foo, {'item1': 'BYeeeee'})
# basic_app('My Hello App', foo_elm)
app = wx.App()
import wx.lib.inspection
wx.lib.inspection.InspectionTool().Show()
frame = wx.Frame(None, title='Test re-wx')
frame.SetSize((570, 520))
thing = render(create_element(statictext, {'label': 'Two'}), frame)
# thing = patch(thing, foo_elm6)
# t = Thread(target=andthen, args=(thing, foo_elm6))
# t.start()
box = wx.BoxSizer(wx.VERTICAL)
box.Add(thing, 1, wx.EXPAND)
frame.SetSizer(box)
frame.Show()
# frame.Fit()
for child in frame.GetChildren():
for ccc in child.GetChildren():
for cc in ccc.GetChildren():
cc.Layout()
ccc.Layout()
child.Layout()
app.MainLoop()
| 2.09375
| 2
|
apps/projetos/apps/sentinela/migrations/0006_auto_20210220_1547.py
|
mequetrefe-do-subtroco/web_constel
| 1
|
12779707
|
<reponame>mequetrefe-do-subtroco/web_constel<filename>apps/projetos/apps/sentinela/migrations/0006_auto_20210220_1547.py
# Generated by Django 3.0.7 on 2021-02-20 15:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sentinela', '0005_sentinelacontratos_status_sentinela'),
]
operations = [
migrations.AlterField(
model_name='sentinelacontratos',
name='tipo',
field=models.CharField(max_length=50),
),
]
| 1.257813
| 1
|
vmware_nsx/plugins/nsx_p/availability_zones.py
|
yebinama/vmware-nsx
| 0
|
12779708
|
<filename>vmware_nsx/plugins/nsx_p/availability_zones.py
# Copyright 2017 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from vmware_nsx.common import availability_zones as common_az
from vmware_nsx.common import config
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.plugins.common_v3 import availability_zones as v3_az
from vmware_nsxlib.v3 import exceptions as nsx_lib_exc
from vmware_nsxlib.v3 import nsx_constants
LOG = log.getLogger(__name__)
DEFAULT_NAME = common_az.DEFAULT_NAME + 'p'
class NsxPAvailabilityZone(v3_az.NsxV3AvailabilityZone):
def get_az_opts(self):
return config.get_nsxp_az_opts(self.name)
def init_defaults(self):
# use the default configuration
self.metadata_proxy = cfg.CONF.nsx_p.metadata_proxy
self.dhcp_profile = cfg.CONF.nsx_p.dhcp_profile
self.native_metadata_route = cfg.CONF.nsx_p.native_metadata_route
self.default_overlay_tz = cfg.CONF.nsx_p.default_overlay_tz
self.default_vlan_tz = cfg.CONF.nsx_p.default_vlan_tz
self.default_tier0_router = cfg.CONF.nsx_p.default_tier0_router
self.dns_domain = cfg.CONF.nsx_p.dns_domain
self.nameservers = cfg.CONF.nsx_p.nameservers
self.edge_cluster = cfg.CONF.nsx_p.edge_cluster
def _init_default_resource(self, nsxpolicy, resource_api, config_name,
filter_list_results=None,
auto_config=False,
is_mandatory=True,
search_scope=None):
# NOTE(annak): we may need to generalize this for API calls
# requiring path ids
name_or_id = getattr(self, config_name)
if not name_or_id:
if auto_config:
# If the field not specified, the system will auto-configure
# in case only single resource is present
resources = resource_api.list()
if filter_list_results:
resources = filter_list_results(resources)
if len(resources) == 1:
return resources[0]['id']
if is_mandatory:
if self.is_default():
raise cfg.RequiredOptError(config_name,
group=cfg.OptGroup('nsx_p'))
else:
msg = (_("No %(res)s provided for availability "
"zone %(az)s") % {
'res': config_name,
'az': self.name})
raise nsx_exc.NsxPluginException(err_msg=msg)
return None
try:
# Check if the configured value is the ID
resource_api.get(name_or_id, silent=True)
return name_or_id
except nsx_lib_exc.ResourceNotFound:
# Search by tags
if search_scope:
resource_type = resource_api.entry_def.resource_type()
resource_id = nsxpolicy.get_id_by_resource_and_tag(
resource_type,
search_scope,
name_or_id)
if resource_id:
return resource_id
# Check if the configured value is the name
resource = resource_api.get_by_name(name_or_id)
if resource:
return resource['id']
# Resource not found
if self.is_default():
raise cfg.RequiredOptError(config_name,
group=cfg.OptGroup('nsx_p'))
else:
msg = (_("Could not find %(res)s %(id)s for availability "
"zone %(az)s") % {
'res': config_name,
'id': name_or_id,
'az': self.name})
raise nsx_exc.NsxPluginException(err_msg=msg)
def translate_configured_names_to_uuids(self, nsxpolicy, nsxlib=None,
search_scope=None):
super(NsxPAvailabilityZone, self).translate_configured_names_to_uuids(
nsxpolicy)
self._default_overlay_tz_uuid = self._init_default_resource(
nsxpolicy, nsxpolicy.transport_zone, 'default_overlay_tz',
auto_config=True, is_mandatory=True,
filter_list_results=lambda tzs: [
tz for tz in tzs if tz['tz_type'].startswith('OVERLAY')],
search_scope=search_scope)
self._default_vlan_tz_uuid = self._init_default_resource(
nsxpolicy, nsxpolicy.transport_zone, 'default_vlan_tz',
auto_config=True, is_mandatory=False,
filter_list_results=lambda tzs: [
tz for tz in tzs if tz['tz_type'].startswith('VLAN')],
search_scope=search_scope)
self._default_tier0_router = self._init_default_resource(
nsxpolicy, nsxpolicy.tier0, 'default_tier0_router',
auto_config=True, is_mandatory=True,
search_scope=search_scope)
self._edge_cluster_uuid = self._init_default_resource(
nsxpolicy, nsxpolicy.edge_cluster, 'edge_cluster',
auto_config=True, is_mandatory=False,
search_scope=search_scope)
# Init dhcp config from policy or MP
self.use_policy_dhcp = False
if (nsxpolicy.feature_supported(
nsx_constants.FEATURE_NSX_POLICY_DHCP)):
try:
self._policy_dhcp_server_config = self._init_default_resource(
nsxpolicy, nsxpolicy.dhcp_server_config, 'dhcp_profile',
auto_config=False, is_mandatory=False,
search_scope=search_scope)
if self._policy_dhcp_server_config:
self.use_policy_dhcp = True
except Exception:
# Not found. try as MP profile
pass
self._native_dhcp_profile_uuid = None
if not self.use_policy_dhcp and nsxlib:
self._translate_dhcp_profile(nsxlib, search_scope=search_scope)
self.use_policy_md = False
if (nsxpolicy.feature_supported(
nsx_constants.FEATURE_NSX_POLICY_MDPROXY)):
# Try to initialize md-proxy from the policy
try:
self._native_md_proxy_uuid = self._init_default_resource(
nsxpolicy, nsxpolicy.md_proxy, 'metadata_proxy',
auto_config=True, is_mandatory=True,
search_scope=search_scope)
LOG.info("NSX-P az using policy MD proxy: %s",
self._native_md_proxy_uuid)
self.use_policy_md = True
except Exception:
LOG.info("NSX-P az could not use policy MD proxy. Using MP "
"one instead")
if not self.use_policy_md:
# Try to initialize md-proxy from the MP
if nsxlib:
self._translate_metadata_proxy(
nsxlib, search_scope=search_scope)
LOG.info("NSX-P az using MP MD proxy: %s",
self._native_md_proxy_uuid)
else:
self._native_md_proxy_uuid = None
class NsxPAvailabilityZones(common_az.ConfiguredAvailabilityZones):
default_name = DEFAULT_NAME
def __init__(self):
default_azs = cfg.CONF.default_availability_zones
super(NsxPAvailabilityZones, self).__init__(
cfg.CONF.nsx_p.availability_zones,
NsxPAvailabilityZone,
default_availability_zones=default_azs)
self.non_default_dns_domain = self.dns_domain_configured_non_default()
def dns_domain_configured_non_default(self):
for az in self.availability_zones.values():
if az.dns_domain and az.dns_domain != cfg.CONF.nsx_p.dns_domain:
return True
return False
| 1.859375
| 2
|
inbox/constants.py
|
future-haus/django-inbox
| 0
|
12779709
|
<reponame>future-haus/django-inbox
from functools import lru_cache
from django_enumfield import enum
class MessageMedium(enum.Enum):
APP_PUSH = 1
EMAIL = 2
SMS = 3
WEB_PUSH = 4
__labels__ = {
APP_PUSH: 'App Push',
EMAIL: 'Email',
SMS: 'SMS',
WEB_PUSH: 'Web Push'
}
@classmethod
@lru_cache(maxsize=None)
def keys(cls):
return [item[0].lower() for item in cls.items()]
class MessageLogStatus(enum.Enum):
NEW = 1
QUEUED = 2
SENT = 3
NOT_SENDABLE = 4
FAILED = 5
__labels__ = {
NEW: 'New',
QUEUED: 'Queued',
SENT: 'Sent',
NOT_SENDABLE: 'Not Sendable',
FAILED: 'Failed',
}
class MessageLogStatusReason(enum.Enum):
MISSING_TEMPLATE = 101
MISSING_ID = 102
NOT_VERIFIED = 201
PREFERENCE_OFF = 301
SEND_AT_NOT_IN_RANGE = 401
__labels__ = {
MISSING_TEMPLATE: 'Missing template',
MISSING_ID: 'Missing id',
NOT_VERIFIED: 'Not verified',
PREFERENCE_OFF: 'Pref off',
SEND_AT_NOT_IN_RANGE: 'Send at not in range'
}
| 2.015625
| 2
|
scripts/pre-commit.py
|
rcy1314/RSSerpent
| 0
|
12779710
|
#!/usr/bin/env python
import os
import subprocess
import sys
from pathlib import Path
basedir = Path(__file__).parent.parent
os.chdir(basedir)
deps = {
"flake8": [
"darglint",
"flake8-bugbear",
"flake8-builtins",
"flake8-comprehensions",
"flake8-datetimez",
"flake8-debugger",
"flake8-docstrings",
"flake8-eradicate",
"flake8-print",
"flake8-too-many",
"pep8-naming",
"tryceratops",
],
"mypy": [
"arrow",
"httpx",
"hypothesis",
"importlib-metadata",
"pydantic",
"pytest",
"pytest-asyncio",
"starlette",
"types-dataclasses",
],
}
if __name__ == "__main__":
subprocess.call(["pip", "install", "-U", *deps[sys.argv[1]]])
exit(subprocess.call([sys.argv[1], "."]))
| 1.945313
| 2
|
ufss/HLG/base_class.py
|
peterarose/UFSS
| 8
|
12779711
|
import yaml
import os
import numpy as np
class DataOrganizer:
def __init__(self,parameter_file_path):
self.base_path = parameter_file_path
self.load_params()
def load_params(self):
params_file = os.path.join(self.base_path,'params.yaml')
with open(params_file) as yamlstream:
self.params = yaml.load(yamlstream,Loader=yaml.SafeLoader)
def get_closest_index_and_value(self,value,array):
index = np.argmin(np.abs(array - value))
value = array[index]
return index, value
| 2.859375
| 3
|
flask_pancake/views.py
|
arthurio/flask-pancake
| 4
|
12779712
|
from flask import Blueprint, abort, current_app, render_template, request
from flask.json import jsonify
from jinja2 import TemplateNotFound
from .constants import EXTENSION_NAME
from .extension import FlaskPancake
bp = Blueprint("pancake", __name__, template_folder="templates")
def aggregate_data(ext: FlaskPancake):
if ext.group_funcs:
group_ids = list(ext.group_funcs.keys())
flags = [
{
"name": flag.name,
"default": flag.default,
"is_active": flag.is_active_globally(),
"groups": {
group_id: {
object_id: flag.is_active_group(
group_id=group_id, object_id=object_id
)
for object_id in func.get_candidate_ids()
}
for group_id, func in ext.group_funcs.items()
},
}
for flag in ext.flags.values()
]
else:
group_ids = []
flags = [
{
"name": flag.name,
"default": flag.default,
"is_active": flag.is_active_globally(),
"groups": {},
}
for flag in ext.flags.values()
]
samples = [
{"name": sample.name, "default": sample.default, "value": sample.get()}
for sample in ext.samples.values()
]
switches = [
{
"name": switch.name,
"default": switch.default,
"is_active": switch.is_active(),
}
for switch in ext.switches.values()
]
return {
"name": ext.name,
"group_ids": group_ids,
"flags": flags,
"samples": samples,
"switches": switches,
}
def aggregate_is_active_data(ext: FlaskPancake):
flags = [
{"name": flag.name, "is_active": flag.is_active()}
for flag in ext.flags.values()
]
samples = [
{"name": sample.name, "is_active": sample.is_active()}
for sample in ext.samples.values()
]
switches = [
{"name": switch.name, "is_active": switch.is_active()}
for switch in ext.switches.values()
]
return {
"flags": flags,
"samples": samples,
"switches": switches,
}
@bp.route("/overview", defaults={"pancake": EXTENSION_NAME})
@bp.route("/overview/<pancake>")
def overview(pancake):
ext = current_app.extensions.get(pancake)
if ext is None or not isinstance(ext, FlaskPancake):
return "Unknown", 404
context = aggregate_data(ext)
if request.accept_mimetypes.accept_html:
try:
return render_template("flask_pancake/overview.html", **context)
except TemplateNotFound: # pragma: no cover
abort(404)
else:
return jsonify(context)
@bp.route("/status", defaults={"pancake": EXTENSION_NAME})
@bp.route("/status/<pancake>")
def status(pancake):
ext = current_app.extensions.get(pancake)
if ext is None or not isinstance(ext, FlaskPancake):
return "Unknown", 404
context = aggregate_is_active_data(ext)
return jsonify(context)
| 2.21875
| 2
|
models/__init__.py
|
Jay2020-01/TextureGAN--Flask
| 5
|
12779713
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import os
import os.path as osp
def save_network(model, network_label, epoch, iteration, args):
dataset = args.data_path.split(os.sep)[-1]
save_filename = "{0}_net_{1}_{2}_{3}.pth".format(network_label, args.model, epoch, iteration)
model_save_dir = osp.join(args.save_dir, dataset)
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
save_path = os.path.join(model_save_dir, save_filename)
model_state = {
'state_dict': model.cpu().state_dict(),
'epoch': epoch,
'iteration': iteration,
'model': args.model,
'color_space': args.color_space,
'batch_size': args.batch_size,
'dataset': dataset,
'image_size': args.image_size
}
torch.save(model_state, save_path)
model.cuda()
print("Saved {0} at epoch: {1}, iter: {2}".format(network_label, epoch, iteration))
def load_network(model, network_label, epoch, iteration, args):
dataset = args.data_path.split(os.sep)[-1]
save_filename = "{0}_net_{1}_{2}_{3}.pth".format(network_label, args.model, epoch, iteration)
# model_save_dir = osp.join(args.load_dir, dataset)
save_path = osp.join(args.load_dir, save_filename)
model_state = torch.load(save_path)
if "state_dict" in model_state:
model.load_state_dict(model_state["state_dict"])
else:
model.load_state_dict(model_state)
model_state = {
'state_dict': model.cpu().state_dict(),
'epoch': epoch,
'iteration': iteration,
'model': args.model,
'color_space': args.color_space,
'batch_size': args.batch_size,
'dataset': dataset,
'image_size': args.image_size
}
model.cuda(device_id=args.gpu)
print('Loaded {0} from epoch: {1} itr: {2}'.format(network_label, epoch, args.load))
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1 or classname.find('InstanceNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_norm_layer(norm_type):
if norm_type == 'batch':
norm_layer = nn.BatchNorm2d
elif norm_type == 'instance':
norm_layer = nn.InstanceNorm2d
else:
print('normalization layer [%s] is not found' % norm_type)
return norm_layer
def define_G(input_nc, output_nc, ngf, norm='batch', use_dropout=False, gpu_ids=[]):
netG = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids)
if len(gpu_ids) > 0:
netG.cuda(device_id=gpu_ids[0])
netG.apply(weights_init)
return netG
def define_D(input_nc, ndf, norm='batch', use_sigmoid=False, gpu_ids=[]):
netD = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
if use_gpu:
netD.cuda(device_id=gpu_ids[0])
netD.apply(weights_init)
return netD
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
# Defines the GAN loss which uses either LSGAN or the regular GAN.
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor.cuda())
# TODO define forward() for GANLoss?
# Defines the generator that consists of Resnet blocks between a few
# downsampling/upsampling operations.
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[]):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
model = [nn.Conv2d(input_nc, ngf, kernel_size=7, padding=3),
norm_layer(ngf, affine=True),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1),
norm_layer(ngf * mult * 2, affine=True),
nn.ReLU(True)]
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, 'zero', norm_layer=norm_layer, use_dropout=use_dropout)]
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1),
norm_layer(int(ngf * mult / 2), affine=True),
nn.ReLU(True)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=3)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, x):
if self.gpu_ids and isinstance(x.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, x, self.gpu_ids)
else:
return self.model(x)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout):
conv_block = []
p = 0
assert(padding_type == 'zero')
p = 1
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim, affine=True),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim, affine=True)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Defines the PatchGAN discriminator.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
kw = 4
padw = int(np.ceil((kw-1)/2))
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2,
padding=padw), norm_layer(ndf * nf_mult,
affine=True), nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1,
padding=padw), norm_layer(ndf * nf_mult,
affine=True), nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, x):
if len(self.gpu_ids) and isinstance(x.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, x, self.gpu_ids)
else:
return self.model(x)
class GramMatrix(nn.Module):
def forward(self, input):
a, b, c, d = input.size() # a=batch size(=1)
# b=number of feature maps
# (c,d)=dimensions of a f. map (N=c*d)
features = input.view(a, b, c * d) # resize F_XL into \hat F_XL
G = torch.bmm(features, features.transpose(1, 2)) # compute the gram product
# normalize the values of the gram matrix
# by dividing by the number of element in each feature maps.
return G.div(b * c * d)
class FeatureExtractor(nn.Module):
# Extract features from intermediate layers of a network
def __init__(self, submodule, extracted_layers):
super(FeatureExtractor, self).__init__()
self.submodule = submodule
self.extracted_layers = extracted_layers
def forward(self, x):
outputs = []
for name, module in self.submodule._modules.items():
x = module(x)
if name in self.extracted_layers:
outputs += [x]
return outputs + [x]
| 2.4375
| 2
|
climate/denv-2w.py
|
williamcaicedo/morbidityPrediction
| 0
|
12779714
|
<filename>climate/denv-2w.py
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
def show_prediction_curve(clf, cv, X, y):
fig = plt.figure()
all_predictions = []
for train,test in cv.split(X):
clf.fit(X[train], y[train])
predictions = clf.predict(X[test])
all_predictions = np.append(all_predictions, predictions)
all_predictions = y_scaler.inverse_transform(all_predictions)
y = y_scaler.inverse_transform(y)
r2 = metrics.r2_score(y, all_predictions)
print("R^2: {0}".format(r2))
print("Explained Variance: {0}".format(
metrics.explained_variance_score(y, all_predictions)))
mae = metrics.mean_absolute_error(y, all_predictions)
print("Mean Absolute Error: {0}".format(mae))
#fig = plt.figure()
plt.plot(all_predictions, label='predicted')
plt.plot(y, label='observed')
#plt.xticks(range(validation_size), Y_train_weeks[training_size:], rotation='vertical', size='small')
plt.title('Gaussian KRR dengue model (MAE: {0:.3f})'.format(mae))
plt.legend(loc="upper right")
plt.tight_layout()
#fig.savefig('Gaussian Kernel Ridge Regression Cartagena (climatic).png', format='png', dpi=500)
plt.show()
weeks_before = -5
f = open("datos_clima.csv")
data = np.loadtxt(f, delimiter=',')
train_data = data[:321, :]
test_data = data[321:, :]
x_scaler = StandardScaler()
y_scaler = StandardScaler()
X_train = train_data[:weeks_before, [3, 6]]
y_train = np.roll(train_data[:, 0], weeks_before)[:weeks_before]
X_train = x_scaler.fit_transform(X_train)
y_train = y_scaler.fit_transform(y_train)
print(len(X_train))
print(len(y_train))
model = KernelRidge(kernel='rbf', gamma=0.1)
cv = KFold(n_splits=5)
grid_search = GridSearchCV(model, cv=5, n_jobs=-1,
param_grid={"alpha": np.linspace(1e-15, 1, 100),
"gamma": np.linspace(1e-15, 5, 100)})
grid_search.fit(X_train, y_train)
print(grid_search.best_params_)
model.set_params(**{'alpha': grid_search.best_params_['alpha'], 'gamma': grid_search.best_params_['gamma']})
show_prediction_curve(model, cv, X_train, y_train)
| 2.859375
| 3
|
src/clustering_hyperparameters/dataset/suite.py
|
mishra-sid/clustering_hyperparameters
| 2
|
12779715
|
from .loaders.loader import DatasetLoader
from omegaconf import OmegaConf
class DatasetSuite:
def __init__(self, name, cache_dir, datasets):
self.name = name
self.cache_dir = cache_dir
self.datasets = datasets
def fetch_and_cache_dataset(self, dataset_index):
loader_type = self.datasets[int(dataset_index)].pop('loader')
loader_cls = DatasetLoader.by_name(loader_type)
loader = loader_cls(**self.datasets[int(dataset_index)])
loader.fetch_and_cache(self.cache_dir)
@classmethod
def fetch_and_cache_from_cfg(cls, cfg, dataset_index):
resolved_cfg = OmegaConf.to_container(cfg, resolve=True)
suite = cls(resolved_cfg['name'], resolved_cfg['cache_dir'], resolved_cfg['datasets'])
suite.fetch_and_cache_dataset(dataset_index)
| 2.28125
| 2
|
fiber/__init__.py
|
leukeleu/django-fiber-multilingual
| 0
|
12779716
|
<gh_stars>0
__version__ = '1.2-multilingual'
| 1.039063
| 1
|
hand.py
|
shin-sforzando/PAC2020-RPS
| 0
|
12779717
|
from enum import Enum
class Hand(Enum):
G = "グー"
C = "チョキ"
P = "パー"
| 2.671875
| 3
|
app/grandchallenge/cases/image_builders/__init__.py
|
Tommos0/grand-challenge.org
| 0
|
12779718
|
from collections import namedtuple
ImageBuilderResult = namedtuple(
"ImageBuilderResult",
("consumed_files", "file_errors_map", "new_images", "new_image_files"),
)
| 1.710938
| 2
|
setup.py
|
Christoph-Raab/log-parser
| 0
|
12779719
|
<reponame>Christoph-Raab/log-parser<filename>setup.py
from setuptools import setup
setup(
name='ams-recruiting-4',
version='0.1.0',
packages=['app', 'app.tests'],
author='<NAME>',
description='Log Parser to parse a log and find top 10 hosts and http status code stats for first 7 days of july'
)
| 1.273438
| 1
|
src/create_train_valid_dataset.py
|
HuyVu0508/psychgenerator
| 0
|
12779720
|
<reponame>HuyVu0508/psychgenerator
import pandas as pd
from tqdm import tqdm
import numpy as np
import argparse
import random
# main function
def main():
### input arguments
print("Reading arguments.")
parser = argparse.ArgumentParser()
# important arguments
parser.add_argument("--messages_csv", default=None, type=str, required=True,
help="Input file containing estimated scores which is used to create train/validate files for psychgenerator.")
parser.add_argument("--estimated_scores_csv", default=None, type=str, required=True,
help="Input file containing estimated scores which is used to create train/validate files for psychgenerator.")
parser.add_argument("--output", default=None, type=str, required=True,
help="Folder to contain created train/validate files for psychgenerator.")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
args = parser.parse_args()
# input
print("Reading files.")
outcome_message_file = args.estimated_scores_csv
outcome_message_df = pd.read_csv(outcome_message_file, header=0, index_col=0)
outcome_list = list(outcome_message_df.columns[0:])
outcome_message_df['message_id'] = outcome_message_df.index
outcome_message_df['message_id'] = outcome_message_df['message_id'].astype(str)
outcome_message_df = outcome_message_df[["message_id"] + outcome_list]
outcome_message_df.columns = ['message_id'] + outcome_list
messages_file = args.messages_csv
messages_df = pd.read_csv(messages_file, header=0, index_col=None)
messages_df['message_id'] = messages_df['message_id'].astype(str)
# output
dataset_train_path = args.output + "/estimated_scores_train.csv"
dataset_valid_path = args.output + "/estimated_scores_valid.csv"
# print out results
print(outcome_message_df.head())
print(messages_df.head())
print("len(outcome_message_df): " + str(len(outcome_message_df)))
print("len(messages_df): " + str(len(messages_df)))
# processing messages
print("Processing messages.")
messages_df["message"] = messages_df["message"].str.lower()
# remove all non-alphabet => check this message_id: messages_df.loc[[item.startswith("today is the day you have made, i will rejoice and") for item in messages_df['message']]].message
## messages_df.loc[73470]
from string import printable
keep_index = []
for item in messages_df['message']:
key = True
for x in item:
if ord(x)>127:
key = False
break
keep_index.append(key)
messages_df = messages_df.loc[keep_index]
# replace line break "\n" with " "
messages_df['message'] = [str(item).replace("\\n"," ") for item in messages_df['message']]
# remove links http
messages_df = messages_df.loc[[True if ('http:' not in str(item)) else False for item in messages_df['message']]]
# remove messages shorter than
L = 5
messages_df = messages_df.loc[[True if len(item.strip(" "))>5 else False for item in messages_df['message']]]
print(messages_df.head())
# set index
print("Setting indexes.")
outcome_message_df.index = outcome_message_df['message_id']
messages_df['index'] = messages_df['message_id']
messages_df.index = messages_df['index']
print("outcome_message_df.index: ")
print(outcome_message_df.head())
print("messages_df.index: ")
print(messages_df.head())
# filter messages_df to include only messages that are in outcome_message_df
messages_df = messages_df.loc[[True if item in outcome_message_df.index else False for item in messages_df['message_id']]]
# merge data
print("Merging data.")
dataset_df = messages_df[["index", "message_id", "message"]]
for outcome in outcome_list:
print(outcome_message_df.loc[dataset_df["message_id"]][outcome].values)
dataset_df[outcome] = outcome_message_df.loc[dataset_df["message_id"]][outcome].values
dataset_df = dataset_df.fillna(0)
print(dataset_df.head())
# assign message_id as index
outcome_message_column = outcome_list
dataset_df = dataset_df[['index', 'message'] + outcome_message_column]
dataset_df.columns = ['message_id', 'message'] + outcome_message_column
# shuffle data or not
print("Shuffling data (or not).")
random.seed(args.seed)
dataset_df = dataset_df.sample(frac = 1)
print(dataset_df.head())
# divide to train/validate
print("Dividing to train/validate partition.")
n_train = int(0.9*len(dataset_df))
dataset_train_df = dataset_df[:n_train]
dataset_valid_df = dataset_df[n_train:]
print("len(dataset_train_df): " + str(len(dataset_train_df)))
print("len(dataset_valid_df): " + str(len(dataset_valid_df)))
# save to file
dataset_train_df.to_csv(dataset_train_path, header=True, index=False)
dataset_valid_df.to_csv(dataset_valid_path, header=True, index=False)
if __name__ == "__main__":
main()
| 2.90625
| 3
|
delete_organism.py
|
diogo1790team/inphinity_DM
| 1
|
12779721
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 24 08:57:47 2018
@author: Diogo
"""
# NOTE: this script does not take in consideration the data for the datasets
from objects_new.Couples_new import *
from objects_new.Proteins_new import *
from objects_new.Gene_new import *
from objects_new.Protein_dom_new import *
from objects_new.Contigs_new import *
from objects_new.Organisms_new import *
from objects_new.WholeDNA_new import *
from objects_new.Strains_new import *
organism_id = 10608
#Organism
organism_obj = Organism.get_organism_by_id(organism_id)
print(organism_obj)
#couples
list_couples = Couple.get_all_couples_by_bacterium(organism_id)
print(len(list_couples))
#list genes
list_genes = Gene.get_all_Genes_by_organism_id(organism_id)
print(len(list_genes))
#list prots
list_proteins = Protein.get_all_Proteins_by_organism_id(organism_id)
print(len(list_proteins))
#list_prot_doms
try:
list_prot_doms = ProteinDom.get_all_protein_domain_by_protein_id(list_proteins[0].id_protein)
print(len(list_prot_doms))
except:
print("no proteins")
#list contigs
try:
list_contig = Contig.get_all_Contigs_bi_whole_DNA_id(organism_obj.fk_whole_genome)
print(len(list_contig))
except:
print("no whole contigs")
#whole dna
try:
whole_dna_obj = WholeDNA.get_whole_dna_by_id(organism_obj.fk_whole_genome)
print(whole_dna_obj)
except:
print("No whole dna")
########### delete part ############
Couple.remove_couple_by_id_bacterium(organism_obj.id_organism)
Gene.delete_gene_from_id_organism(organism_obj.id_organism)
for protein in list_proteins:
ProteinDom.remove_prot_dom_by_protein_id(protein.id_protein)
Protein.remove_protein_by_its_id(protein.id_protein)
Contig.remove_contig_by_FK_whole_dna(whole_dna_obj.id_wholeDNA)
Organism.remove_organism_by_id(organism_id)
Strain.remove_strain_by_id(organism_obj.fk_strain)
WholeDNA.remove_whole_dna_by_id(whole_dna_obj.id_wholeDNA)
| 2.578125
| 3
|
runme.py
|
ipendlet/SD2_DRP_ML
| 6
|
12779722
|
"""ESCALATE Capture
Main point of entry for for EscalateCAPTURE
"""
import os
import sys
import ast
import xlrd
import logging
import argparse as ap
from log import init
from capture import specify
from capture import devconfig
from utils import globals, data_handling
def escalatecapture(rxndict, vardict):
"""Point of entry into the data pipeline
Manages processing calls to specify, generate, and prepare --> leads to execute
:param rxndict: dictionary of Excel-specified params
:param vardict: dictionary of dev-specified params
:return None:
"""
modlog = logging.getLogger('capture.escalatecapture')
modlog.info("Initializing specify")
specify.datapipeline(rxndict, vardict)
def linkprocess(linkfile):
"""TODO: what was this going to be for?"""
return
def build_rxndict(rxnvarfile):
"""Read Template file and return a dict representation
The rxndict is a mapping of Variables => Values (column B => column d) in the
uncommented rows of the reaction excel file
:param rxnvarfile: path to excel file containing reaction specification
:return rxndict: dictionary representation of reaction specification
"""
rxndict = {}
varfile = rxnvarfile
wb = xlrd.open_workbook(varfile)
sheet = wb.sheet_by_name('WF1')
for i in range(sheet.nrows):
commentval = sheet.cell(i, 0).value
if commentval == '#':
continue
else:
cell_dict_value = sheet.cell(i, 3).value
cell_dict_id = sheet.cell(i, 1).value
cell_dict_type = sheet.cell(i, 4).value
if cell_dict_id == "":
pass
if cell_dict_type == 'list':
rxndict[cell_dict_id] = ast.literal_eval(cell_dict_value)
else:
rxndict[cell_dict_id.strip()] = cell_dict_value
# cannot use globals.get_lab() here since it has not been set
# if rxndict['lab'] == 'MIT_PVLab':
# data_handling.get_user_actions(rxndict, sheet)
return rxndict
if __name__ == "__main__":
parser = ap.ArgumentParser(description='Generate experimental run data')
parser.add_argument('Variables', type=str,
help='Target xls file containing run information specified by the user\
format should be "filename.xlsx"')
parser.add_argument('-s', '--ss', default=0, type=int, choices=[0, 1, 2],
help='0 - quasi-random experiments generate, 1 - Generates stateset\
for exp_1 user specified reagents, 2 - generate prototype run for\
exp_1 user specified reagents')
parser.add_argument('-d', '--debug', default=0, type=int, choices=[0,1,2],
help='0 - complete run generation and upload to google drive,\
1 - retain all tables from gdrive & keep runtime content,\
2 - full offline debugging (no uploading)')
args = parser.parse_args()
challengeproblem = args.ss
rxndict = build_rxndict(args.Variables)
rxndict['challengeproblem'] = challengeproblem
# vardict will hold variables configured by developers
vardict = {
'exefilename': args.Variables,
'challengeproblem': challengeproblem,
'debug': args.debug,
'lab': rxndict['lab']
}
if not os.path.exists('./localfiles'):
os.mkdir('./localfiles')
globals.set_lab(rxndict['lab'])
init.runuidgen(rxndict, vardict)
loggerfile = init.buildlogger(rxndict)
rxndict['logfile'] = loggerfile
# log the initial state of the run
init.initialize(rxndict, vardict)
# TODO: >>>> insert variable tests here <<<<
escalatecapture(rxndict, vardict)
if vardict['debug'] == 0: # if no debuggin
if os.path.exists("./mycred.txt"):
os.remove("./mycred.txt")
if os.path.exists("./capture/user_cli_variables.py"):
os.remove("./capture/user_cli_variables.py")
| 2.75
| 3
|
ditto/tickets/api/views.py
|
Kvoti/ditto
| 0
|
12779723
|
<reponame>Kvoti/ditto<gh_stars>0
from django.shortcuts import get_object_or_404
from rest_framework import generics, response
from rest_framework.decorators import api_view
from .. import models
from . import serializers
class TicketList(generics.ListAPIView):
serializer_class = serializers.ViewTicketSerializer
#TODO permission_classes
def get_queryset(self):
return models.Ticket.objects.manageable(self.request.user)
@api_view(['POST'])
def claim(request, pk):
ticket = get_object_or_404(
models.Ticket.objects.claimable(request.user),
pk=pk)
ticket.claim(request.user)
# TODO what message to return here?
return response.Response('Ticket claimed')
@api_view(['POST'])
def resolve(request, pk):
ticket = get_object_or_404(
request.user.assigned_tickets.unresolved(),
pk=pk
)
ticket.resolve(request.user)
# TODO what message to return here?
return response.Response('Ticket resolved')
| 2.015625
| 2
|
cse_helpers.py
|
Steve-Hawk/nrpytutorial
| 0
|
12779724
|
""" CSE Partial Factorization and Post-Processing
The following script will perform partial factorization on SymPy expressions,
which should occur before common subexpression elimination (CSE) to prevent the
identification of undesirable patterns, and perform post-processing on the
the resulting replaced/reduced expressions after the CSE procedure was applied.
"""
# Author: <NAME>
# Email: <EMAIL>
from SIMDExprTree import ExprTree
import sympy as sp
# Input: expr_list = single SymPy expression or list of SymPy expressions
# prefix = string prefix for variable names (i.e. replacement symbols)
# declare = declare negative one symbol (i.e. _NegativeOne_)
# factor = perform partial factorization (excluding negative one)
# Output: modified SymPy expression(s) where all integers and rationals were replaced
# with temporary placeholder variables that allow for partial factorization
def cse_preprocess(expr_list, prefix='', declare=False, factor=True, debug=False):
if not isinstance(expr_list, list):
expr_list = [expr_list]
def expand(a, n):
if n == 2: return sp.Mul(a, a, evaluate=False)
elif n > 2: return sp.Mul(expand(a, n - 1), a, evaluate=False)
return sp.Pow(expand(a, -n), -1, evaluate=False)
_NegativeOne_ = sp.Symbol(prefix + '_NegativeOne_')
map_sym_to_rat, map_rat_to_sym = {}, {}
for i, expr in enumerate(expr_list):
tree = ExprTree(expr)
# Expand power function, preventing replacement of exponent argument
for subtree in tree.preorder(tree.root):
subexpr = subtree.expr
if subexpr.func == sp.Pow:
exponent = subtree.children[1].expr
if exponent.func == sp.Integer and abs(exponent) > 1:
subtree.expr = expand(*subexpr.args)
tree.build(subtree, clear=True)
# Search through expression tree for integers/rationals
for subtree in tree.preorder():
subexpr = subtree.expr
if isinstance(subexpr, sp.Rational) and subexpr != sp.S.NegativeOne:
# If rational < 0, factor out negative and declare positive rational
sign = 1 if subexpr >= 0 else -1
subexpr *= sign
# Check whether rational was already declared, otherwise declare rational
try: repl = map_rat_to_sym[subexpr]
except KeyError:
p, q = subexpr.p, subexpr.q
var_name = prefix + '_Rational_' + str(p) + '_' + str(q) \
if q != 1 else prefix + '_Integer_' + str(p)
repl = sp.Symbol(var_name)
map_sym_to_rat[repl], map_rat_to_sym[subexpr] = subexpr, repl
subtree.expr = repl * sign
if sign < 0: tree.build(subtree, clear=True)
# If declare == True, then declare symbol for -1 or extracted negative
elif declare and subexpr == sp.S.NegativeOne:
try: subtree.expr = map_rat_to_sym[sp.S.NegativeOne]
except KeyError:
repl = _NegativeOne_
map_sym_to_rat[repl], map_rat_to_sym[subexpr] = subexpr, repl
subtree.expr = repl
# If exponent was replaced with symbol (usually -1), then back-substitute
for subtree in tree.preorder(tree.root):
subexpr = subtree.expr
if subexpr.func == sp.Pow:
exponent = subtree.children[1].expr
if exponent.func == sp.Symbol:
subtree.children[1].expr = map_sym_to_rat[exponent]
expr = tree.reconstruct()
# If factor == True, then perform partial factoring
if factor:
# Handle the separate case of function arguments
for subtree in tree.preorder():
if isinstance(subtree.expr, sp.Function):
for var in map_sym_to_rat:
if var != _NegativeOne_:
child = subtree.children[0]
child.expr = sp.collect(child.expr, var)
child.children.clear()
expr = tree.reconstruct()
# Perform partial factoring on the expression(s)
for var in map_sym_to_rat:
if var != _NegativeOne_:
expr = sp.collect(expr, var)
# If debug == True, then back-substitute everything and check difference
if debug:
def lookup_rational(arg):
if arg.func == sp.Symbol:
try: arg = map_sym_to_rat[arg]
except KeyError: pass
return arg
debug_tree = ExprTree(expr)
for subtree in debug_tree.preorder():
subexpr = subtree.expr
if subexpr.func == sp.Symbol:
subtree.expr = lookup_rational(subexpr)
debug_expr = tree.reconstruct()
expr_diff = expr - debug_expr
if sp.simplify(expr_diff) != 0:
raise Warning('Expression Difference: ' + str(expr_diff))
expr_list[i] = expr
if len(expr_list) == 1:
expr_list = expr_list[0]
return expr_list, map_sym_to_rat
# Input: cse_output = output from SymPy CSE with tuple format: (list of ordered pairs that
# contain substituted symbols and their replaced expressions, reduced SymPy expression)
# Output: output from SymPy CSE where postprocessing, such as back-substitution of addition/product
# of symbols, has been applied to the replaced/reduced expression(s)
def cse_postprocess(cse_output):
replaced, reduced = cse_output
i = 0
while i < len(replaced):
sym, expr = replaced[i]
# Search through replaced expressions for negative symbols
if (expr.func == sp.Mul and len(expr.args) == 2 and \
any((arg.func == sp.Symbol) for arg in expr.args) and \
any((arg == sp.S.NegativeOne or '_NegativeOne_' in str(arg)) for arg in expr.args)):
for k in range(i + 1, len(replaced)):
if sym in replaced[k][1].free_symbols:
replaced[k] = (replaced[k][0], replaced[k][1].subs(sym, expr))
for k in range(len(reduced)):
if sym in reduced[k].free_symbols:
reduced[k] = reduced[k].subs(sym, expr)
# Remove the replaced expression from the list
replaced.pop(i)
if i != 0: i -= 1
# Search through replaced expressions for addition/product of 2 or less symbols
if ((expr.func == sp.Add or expr.func == sp.Mul) and 0 < len(expr.args) < 3 and \
all((arg.func == sp.Symbol or arg.is_integer or arg.is_rational) for arg in expr.args)) or \
(expr.func == sp.Pow and expr.args[0].func == sp.Symbol and expr.args[1] == 2):
sym_count = 0 # Count the number of occurrences of the substituted symbol
for k in range(len(replaced) - i):
# Check if the substituted symbol appears in the replaced expressions
if sym in replaced[i + k][1].free_symbols:
for arg in sp.preorder_traversal(replaced[i + k][1]):
if arg.func == sp.Symbol and str(arg) == str(sym):
sym_count += 1
for k in range(len(reduced)):
# Check if the substituted symbol appears in the reduced expression
if sym in reduced[k].free_symbols:
for arg in sp.preorder_traversal(reduced[k]):
if arg.func == sp.Symbol and str(arg) == str(sym):
sym_count += 1
# If the number of occurrences of the substituted symbol is 2 or less, back-substitute
if 0 < sym_count < 3:
for k in range(i + 1, len(replaced)):
if sym in replaced[k][1].free_symbols:
replaced[k] = (replaced[k][0], replaced[k][1].subs(sym, expr))
for k in range(len(reduced)):
if sym in reduced[k].free_symbols:
reduced[k] = reduced[k].subs(sym, expr)
# Remove the replaced expression from the list
replaced.pop(i); i -= 1
i += 1
return replaced, reduced
| 2.984375
| 3
|
baiduocr.py
|
wangtonghe/hq-answer-assist
| 119
|
12779725
|
# coding=utf-8
from aip import AipOcr
import re
opt_aux_word = ['《', '》']
def get_file_content(file):
with open(file, 'rb') as fp:
return fp.read()
def image_to_str(name, client):
image = get_file_content(name)
text_result = client.basicGeneral(image)
print(text_result)
result = get_question_and_options(text_result)
return result
def init_baidu_ocr(baidu_ocr_config):
app_id, api_key, secret_key = baidu_ocr_config
client = AipOcr(app_id, api_key, secret_key)
return client
# {'words_result': [{'words': '11.代表作之一是《蒙娜丽莎的眼'},
# {'words': '泪》的歌手是?'}, {'words': '林志颖'},
# {'words': '林志炫'}, {'words': '林志玲'}],
# 'log_id': 916087026228727188, 'words_result_num': 5}
def get_question_and_options(text):
if 'error_code' in text:
print('请确保百度OCR配置正确')
exit(-1)
if text['words_result_num'] == 0:
return '', []
result_arr = text['words_result']
option_arr = []
question_str = ''
question_obj, options_obj = get_question(result_arr)
for question in question_obj:
word = question['words']
word = re.sub('^\d+\.*', '', word)
question_str += word
for option in options_obj:
word = option['words']
if word.startswith('《'):
word = word[1:]
if word.endswith('》'):
word = word[:-1]
print(word)
option_arr.append(word)
print(question_str)
print(option_arr)
return question_str, option_arr
# 先按'?'分割问题和答案,若无问号,用索引分割
def get_question(result_arr):
result_num = len(result_arr)
index = -1
question_obj, options_obj = [], []
for i, result in enumerate(result_arr):
if '?' in result['words']:
index = i
break
if index > -1:
question_obj = result_arr[:index + 1]
options_obj = result_arr[index + 1:]
return question_obj, options_obj
else:
# 按照经验,4个结果为1行问题,5、6个为2行问题,8个以上为公布答案
if result_num <= 4:
question_obj = result_arr[:1]
options_obj = result_arr[1:]
elif result_num == 5:
question_obj = result_arr[:2]
options_obj = result_arr[2:]
elif result_num == 6: # 暂时
question_obj = result_arr[:2]
options_obj = result_arr[2:]
elif result_num == 7 or result_num == 8:
question_obj = result_arr[:3]
options_obj = result_arr[3:]
return question_obj, options_obj
| 2.734375
| 3
|
Document Summarization/engine.py
|
ShivamRajSharma/PyTorch
| 5
|
12779726
|
<reponame>ShivamRajSharma/PyTorch<filename>Document Summarization/engine.py
import torch
import torch.nn as nn
from tqdm import tqdm
def loss_fn(predicted, target, pad_idx):
return nn.CrossEntropyLoss(ignore_index=pad_idx)(predicted, target)
def train_fn(model, dataloader, optimizer, scheduler, device, pad_idx):
model.train()
running_loss = 0
for num_steps, data in tqdm(enumerate(dataloader), total=len(dataloader)):
for p in model.parameters():
p.grad = None
text = data["text_idx"].to(device)
headlines = data["headline_idx"].to(device)
predicted = model(text, headlines[:, :-1])
predicted = predicted.view(-1, predicted.shape[-1])
headlines = headlines[:, 1:].reshape(-1)
loss = loss_fn(predicted, headlines, pad_idx)
running_loss += loss.item()
loss.backward()
optimizer.step()
scheduler.step()
epoch_loss = running_loss/len(dataloader)
return epoch_loss
def eval_fn(model, dataloader, device, pad_idx):
model.eval()
running_loss = 0
with torch.no_grad():
for num_steps, data in tqdm(enumerate(dataloader), total=len(dataloader)):
text = data["text_idx"].to(device)
headlines = data["headline_idx"].to(device)
predicted = model(text, headlines[:, :-1])
predicted = predicted.view(-1, predicted.shape[-1])
headlines = headlines[:, 1:].reshape(-1)
loss = loss_fn(predicted, headlines, pad_idx)
running_loss += loss.item()
epoch_loss = running_loss/len(dataloader)
return epoch_loss
| 2.296875
| 2
|
basicsr/demo.py
|
ACALJJ32/BasicSR_ACALJJ32
| 2
|
12779727
|
import torch, os
from os import path as osp
from math import ceil
import sys
from yaml import load
from basicsr.data import build_dataloader, build_dataset
from basicsr.utils.options import parse_options
import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.autograd import Variable
import cv2
from copy import deepcopy
import os.path as osp
from torch.nn.parallel import DataParallel, DistributedDataParallel
from basicsr.archs.edvr_arch import EDVR
def chop_forward(model, inp, shave=8, min_size=120000):
# This part will divide your input in 4 small images
b, n, c, h, w = inp.size()
h_half, w_half = h // 2, w // 2
h_size, w_size = h_half + shave, w_half + shave
mod_size = 4
if h_size % mod_size:
h_size = ceil(h_size/mod_size)*mod_size # The ceil() function returns the uploaded integer of a number
if w_size % mod_size:
w_size = ceil(w_size/mod_size)*mod_size
inputlist = [
inp[:, :, :, 0:h_size, 0:w_size],
inp[:, :, :, 0:h_size, (w - w_size):w],
inp[:, :, :, (h - h_size):h, 0:w_size],
inp[:, :, :, (h - h_size):h, (w - w_size):w]]
if w_size * h_size < min_size:
outputlist = []
for i in range(4):
with torch.no_grad():
input_batch = inputlist[i]
output_batch = model(input_batch)
outputlist.append(output_batch)
else:
outputlist = [
chop_forward(model, patch) \
for patch in inputlist]
scale=4
h, w = scale * h, scale * w
h_half, w_half = scale * h_half, scale * w_half
h_size, w_size = scale * h_size, scale * w_size
shave *= scale
with torch.no_grad():
output_ht = Variable(inp.data.new(b, c, h, w))
output_ht[:, :, 0:h_half, 0:w_half] = outputlist[0][:, :, 0:h_half, 0:w_half]
output_ht[:, :, 0:h_half, w_half:w] = outputlist[1][:, :, 0:h_half, (w_size - w + w_half):w_size]
output_ht[:, :, h_half:h, 0:w_half] = outputlist[2][:, :, (h_size - h + h_half):h_size, 0:w_half]
output_ht[:, :, h_half:h, w_half:w] = outputlist[3][:, :, (h_size - h + h_half):h_size, (w_size - w + w_half):w_size]
return output_ht
def demo_pipeline(root_path):
# parse options, set distributed setting, set ramdom seed
opt, args = parse_options(root_path, is_train=False)
print("video path: ",args.video_path)
video_name = osp.basename(args.video_path).split(".")[0]
torch.backends.cudnn.benchmark = True
# create test dataset and dataloader
test_loaders = []
for _, dataset_opt in sorted(opt['datasets'].items()):
test_set = build_dataset(dataset_opt)
test_loader = build_dataloader(
test_set, dataset_opt, num_gpu=opt['num_gpu'], dist=opt['dist'], sampler=None, seed=opt['manual_seed'])
test_loaders.append(test_loader)
# create model
model_config = opt['network_g']
_ = model_config.pop("type", "Unkown")
model = EDVR(**model_config)
device = torch.device('cuda' if opt['num_gpu'] != 0 else 'cpu')
model = model.to(device=device)
param_key='params'
load_net = torch.load(opt["path"].get("pretrain_network_g", "Unkown"), map_location=lambda storage, loc: storage)
find_unused_parameters = opt.get('find_unused_parameters', False)
model = DistributedDataParallel(
model, device_ids=[torch.cuda.current_device()], find_unused_parameters=find_unused_parameters)
# load weights
if param_key is not None:
if param_key not in load_net and 'params' in load_net:
param_key = 'params'
load_net = load_net[param_key]
for k, v in deepcopy(load_net).items():
load_net['module.' + k] = v
load_net.pop(k)
model.load_state_dict(load_net, strict=True)
model.eval()
# set min size
min_size = 921599
# test clips
for test_loader in test_loaders:
for idx, data in enumerate(test_loader):
frame_name = "{:08d}.png".format(idx)
frame_name = osp.join("sr_video", video_name, frame_name)
if osp.exists(frame_name): continue
height, width = data.size()[-2:]
if height * width < min_size:
output = model(data)
else:
output = chop_forward(model, data)
print("imwrite {:08d}.png. | totol: {}".format(idx, len(test_loader)))
output = torch.squeeze(output.data.cpu(), dim=0).clamp(0,1).permute(1,2,0).numpy()
cv2.imwrite(frame_name, cv2.cvtColor(output*255, cv2.COLOR_BGR2RGB), [cv2.IMWRITE_PNG_COMPRESSION, 0])
if __name__ == '__main__':
root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
demo_pipeline(root_path)
| 2.078125
| 2
|
python/aether/src/tests/scratch_tests.py
|
MoysheBenRabi/setp
| 1
|
12779728
|
<gh_stars>1-10
t = bytearray([65]*3)
print(t[0:2])
| 1.632813
| 2
|
commerce/__init__.py
|
PragmaticMates/django-commerce
| 4
|
12779729
|
<gh_stars>1-10
default_app_config = 'commerce.apps.Config'
| 1.1875
| 1
|
tasks/EPAM/pytasks/task04-03.py
|
AleksNeStu/projects
| 2
|
12779730
|
<reponame>AleksNeStu/projects<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'AleksNeStu'
# Task04-03 (not mandatory):
# Create **very** simple [ORM] using data descriptors like in `Subtask 1` and [SQLite]3 python module to store fields in Data Base.
# After creating instances of model all fields mast be stored in SQLite DB.
# Example:
# ```python
# >>> from ormapi import Model, BirthdayField, NameField, PhoneField
# >>> class Person(Model):
# __table__ = "persons"
# ... name = NameField()
# ... birthday = BirthdayField()
# ... phone = PhoneField()
# ...
# >>> p = Person() # New row in table *persons* are created with default values for fields.
# >>> p.name = "Aleks" # Cell updated with new value.
# >>> # Or you can create special method to save (commit) the values to DB like bellow.
# >>> p.phone = "375 25 5443322" # Not yet stored in DB.
# >>> p.save() # All changes commited to DB.
# ```
# Addition info:
# [ORM]: https://en.wikipedia.org/wiki/Object-relational_mapping
# [SQLite]: https://en.wikipedia.org/wiki/SQLite
# Input
from EPAM.ormapi import Model, BirthdayField, NameField, PhoneField
class Person(Model):
# [ORM] used data descriptors like in `Subtask 1` and [SQLite]3 python module to store fields in Data Base
__table__ = "persons"
name = NameField()
birthday = BirthdayField()
phone = PhoneField()
p = Person() # New row in table *persons* are created with default values for fields
p.name = "Aleks" # Cell updated with new value
# Create special method to save (commit) the values to DB like bellow
p.phone = "375 25 5443322" # Not yet stored in DB
p.save() # All changes commited to DB
| 3.84375
| 4
|
baduk/command/remove_dead_stones.py
|
sarcoma/Baduk
| 3
|
12779731
|
<filename>baduk/command/remove_dead_stones.py
from baduk.command.command_types import UndoableCommand
class RemoveDeadStones(UndoableCommand):
def __init__(self, groups: set, dead_stone_groups: list):
self.groups = groups
self.group = None
self.dead_stone_groups = dead_stone_groups
def execute(self):
if len(self.dead_stone_groups) > 0:
for dead_stone_group in self.dead_stone_groups:
dead_stone_group.capture_stones()
self.groups.remove(dead_stone_group)
def undo(self):
for dead_stone_group in self.dead_stone_groups:
dead_stone_group.replace_captured_stones()
self.groups.add(dead_stone_group)
| 2.640625
| 3
|
bindings/pydeck-carto/tests/test_credentials.py
|
ehtick/deck.gl
| 0
|
12779732
|
from pydeck_carto import load_carto_credentials
def test_load_carto_credentials(requests_mock):
requests_mock.post(
"https://auth.carto.com/oauth/token", text='{"access_token":"asdf1234"}'
)
creds = load_carto_credentials("tests/fixtures/mock_credentials.json")
assert creds == {
"apiVersion": "v3",
"apiBaseUrl": "https://api.carto.com",
"accessToken": "asdf1234",
}
| 2.421875
| 2
|
fewshot/data/samplers/fewshot_sampler.py
|
sebamenabar/oc-fewshot-public
| 18
|
12779733
|
"""Regular few-shot episode sampler.
Author: <NAME> (<EMAIL>)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from fewshot.data.registry import RegisterSampler
from fewshot.data.samplers.incremental_sampler import IncrementalSampler
@RegisterSampler('fewshot')
class FewshotSampler(IncrementalSampler):
"""Standard few-shot learning sampler."""
def __init__(self, seed):
super(FewshotSampler, self).__init__(seed)
def sample_episode_classes(self, n, nshot=1, **kwargs):
"""See EpisodeSampler for documentation."""
return super(FewshotSampler, self).sample_episode_classes(
n, nshot_min=nshot, nshot_max=nshot)
| 2.171875
| 2
|
sirepo/pkcli/rcscon.py
|
mkeilman/sirepo
| 49
|
12779734
|
# -*- coding: utf-8 -*-
"""Wrapper to run RCSCON from the command line.
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdp, pkdc, pkdlog
from sirepo import simulation_db
from sirepo.template import sdds_util
from sirepo.template import template_common
import numpy as np
import py.path
import sirepo.template.rcscon as template
def run(cfg_dir):
template_common.exec_parameters()
template.extract_report_data(
py.path.local(cfg_dir),
simulation_db.read_json(template_common.INPUT_BASE_NAME),
)
def run_background(cfg_dir):
data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
if data.report == 'elegantAnimation':
return _run_elegant_simulation(cfg_dir)
template_common.exec_parameters()
def _build_arrays():
sigma = sdds_util.read_sdds_pages(
'run_setup.sigma.sdds',
['s', 's1', 's12', 's2', 's3', 's34', 's4', 's5', 's56', 's6'],
)
errors = _error_values()
inputs = []
outputs = []
k = 0
for i in range(len(errors)):
for j in range(int(len(sigma.s) / len(errors))):
initial_index = k - j
inputs.append([
errors[i, 1], errors[i, 2], sigma.s[k],
sigma.s1[initial_index], sigma.s12[initial_index], sigma.s2[initial_index],
sigma.s3[initial_index], sigma.s34[initial_index], sigma.s4[initial_index],
sigma.s5[initial_index], sigma.s56[initial_index], sigma.s6[initial_index],
])
outputs.append([
sigma.s1[k], sigma.s12[k], sigma.s2[k],
sigma.s3[k], sigma.s34[k], sigma.s4[k],
sigma.s5[k], sigma.s56[k], sigma.s6[k],
])
k+=1
return np.asarray(inputs), np.asarray(outputs)
def _error_values():
pages = sdds_util.read_sdds_pages(
'error_control.error_log.sdds',
['ElementParameter', 'ParameterValue'],
True)
res = []
for page in range(len(pages.ElementParameter)):
values = PKDict()
for idx in range(len(pages.ElementParameter[page])):
p = pages.ElementParameter[page][idx]
v = pages.ParameterValue[page][idx]
if p not in values:
values[p] = []
values[p].append(v)
res.append(
[page, np.mean(np.asarray(values.PHASE)), np.sum(np.asarray(values.VOLT))],
)
return np.asarray(res)
def _run_elegant_simulation(cfg_dir):
import sirepo.pkcli.elegant
sirepo.pkcli.elegant.run_elegant()
inputs, outputs = _build_arrays()
common = [
's1', 's12', 's2',
's3', 's34', 's4',
's5', 's56', 's6',
]
in_cols = ['average phase', 'total volts', 'position']
in_header = ','.join(in_cols + ['initial ' + x for x in common])
out_header = ','.join(common)
np.savetxt('inputs.csv', inputs, delimiter=',', comments='', header=in_header)
np.savetxt('outputs.csv', outputs, delimiter=',', comments='', header=out_header)
| 1.867188
| 2
|
src/gat.py
|
simoneazeglio/DeepInf
| 258
|
12779735
|
<reponame>simoneazeglio/DeepInf
#!/usr/bin/env python
# encoding: utf-8
# File Name: gat.py
# Author: <NAME>
# Create Time: 2017/12/18 21:40
# TODO:
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from gat_layers import BatchMultiHeadGraphAttention
class BatchGAT(nn.Module):
def __init__(self, pretrained_emb, vertex_feature, use_vertex_feature,
n_units=[1433, 8, 7], n_heads=[8, 1],
dropout=0.1, attn_dropout=0.0, fine_tune=False,
instance_normalization=False):
super(BatchGAT, self).__init__()
self.n_layer = len(n_units) - 1
self.dropout = dropout
self.inst_norm = instance_normalization
if self.inst_norm:
self.norm = nn.InstanceNorm1d(pretrained_emb.size(1), momentum=0.0, affine=True)
# https://discuss.pytorch.org/t/can-we-use-pre-trained-word-embeddings-for-weight-initialization-in-nn-embedding/1222/2
self.embedding = nn.Embedding(pretrained_emb.size(0), pretrained_emb.size(1))
self.embedding.weight = nn.Parameter(pretrained_emb)
self.embedding.weight.requires_grad = fine_tune
n_units[0] += pretrained_emb.size(1)
self.use_vertex_feature = use_vertex_feature
if self.use_vertex_feature:
self.vertex_feature = nn.Embedding(vertex_feature.size(0), vertex_feature.size(1))
self.vertex_feature.weight = nn.Parameter(vertex_feature)
self.vertex_feature.weight.requires_grad = False
n_units[0] += vertex_feature.size(1)
self.layer_stack = nn.ModuleList()
for i in range(self.n_layer):
# consider multi head from last layer
f_in = n_units[i] * n_heads[i - 1] if i else n_units[i]
self.layer_stack.append(
BatchMultiHeadGraphAttention(n_heads[i], f_in=f_in,
f_out=n_units[i + 1], attn_dropout=attn_dropout)
)
def forward(self, x, vertices, adj):
emb = self.embedding(vertices)
if self.inst_norm:
emb = self.norm(emb.transpose(1, 2)).transpose(1, 2)
x = torch.cat((x, emb), dim=2)
if self.use_vertex_feature:
vfeature = self.vertex_feature(vertices)
x = torch.cat((x, vfeature), dim=2)
bs, n = adj.size()[:2]
for i, gat_layer in enumerate(self.layer_stack):
x = gat_layer(x, adj) # bs x n_head x n x f_out
if i + 1 == self.n_layer:
x = x.mean(dim=1)
else:
x = F.elu(x.transpose(1, 2).contiguous().view(bs, n, -1))
x = F.dropout(x, self.dropout, training=self.training)
return F.log_softmax(x, dim=-1)
| 2.0625
| 2
|
homeassistant/components/philips_js/__init__.py
|
elyobelyob/core
| 0
|
12779736
|
<gh_stars>0
"""The Philips TV integration."""
import asyncio
from datetime import timedelta
import logging
from typing import Any, Callable, Dict, Optional
from haphilipsjs import ConnectionFailure, PhilipsTV
from homeassistant.components.automation import AutomationActionType
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_VERSION, CONF_HOST
from homeassistant.core import Context, HassJob, HomeAssistant, callback
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import DOMAIN
PLATFORMS = ["media_player"]
LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Philips TV component."""
hass.data[DOMAIN] = {}
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Philips TV from a config entry."""
tvapi = PhilipsTV(entry.data[CONF_HOST], entry.data[CONF_API_VERSION])
coordinator = PhilipsTVDataUpdateCoordinator(hass, tvapi)
await coordinator.async_refresh()
hass.data[DOMAIN][entry.entry_id] = coordinator
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class PluggableAction:
"""A pluggable action handler."""
_actions: Dict[Any, AutomationActionType] = {}
def __init__(self, update: Callable[[], None]):
"""Initialize."""
self._update = update
def __bool__(self):
"""Return if we have something attached."""
return bool(self._actions)
@callback
def async_attach(self, action: AutomationActionType, variables: Dict[str, Any]):
"""Attach a device trigger for turn on."""
@callback
def _remove():
del self._actions[_remove]
self._update()
job = HassJob(action)
self._actions[_remove] = (job, variables)
self._update()
return _remove
async def async_run(
self, hass: HomeAssistantType, context: Optional[Context] = None
):
"""Run all turn on triggers."""
for job, variables in self._actions.values():
hass.async_run_hass_job(job, variables, context)
class PhilipsTVDataUpdateCoordinator(DataUpdateCoordinator[None]):
"""Coordinator to update data."""
api: PhilipsTV
def __init__(self, hass, api: PhilipsTV) -> None:
"""Set up the coordinator."""
self.api = api
def _update_listeners():
for update_callback in self._listeners:
update_callback()
self.turn_on = PluggableAction(_update_listeners)
async def _async_update():
try:
await self.hass.async_add_executor_job(self.api.update)
except ConnectionFailure:
pass
super().__init__(
hass,
LOGGER,
name=DOMAIN,
update_method=_async_update,
update_interval=timedelta(seconds=30),
request_refresh_debouncer=Debouncer(
hass, LOGGER, cooldown=2.0, immediate=False
),
)
| 2.078125
| 2
|
src/pyorc/predicates.py
|
anilreddypuresoftware/pyorc
| 0
|
12779737
|
<reponame>anilreddypuresoftware/pyorc
import enum
from typing import Any, Optional
from .enums import TypeKind
class Operator(enum.IntEnum):
NOT = 0
OR = 1
AND = 2
EQ = 3
LT = 4
LE = 5
class Predicate:
def __init__(self, operator: Operator, left, right) -> None:
self.values = (operator, left, right)
def __or__(self, other) -> "Predicate":
self.values = (Operator.OR, self.values, other.values)
return self
def __and__(self, other) -> "Predicate":
self.values = (Operator.AND, self.values, other.values)
return self
def __invert__(self) -> "Predicate":
self.values = (Operator.NOT, self.values)
return self
class PredicateColumn:
def __init__(
self,
type_kind: TypeKind,
name: Optional[str] = None,
index: Optional[int] = None,
precision: Optional[int] = None,
scale: Optional[int] = None,
) -> None:
if not TypeKind.has_value(type_kind) or type_kind in (
TypeKind.BINARY,
TypeKind.LIST,
TypeKind.MAP,
TypeKind.UNION,
TypeKind.STRUCT,
):
raise TypeError("Invalid type for PredicateColumn: %s" % type_kind)
self.type_kind = type_kind
if self.type_kind == TypeKind.DECIMAL and (precision is None or scale is None):
raise ValueError("Both precision and scale must be set for Decimal type")
if name is not None and index is not None:
raise TypeError("Only one of the name or index parameter must be given")
if name is not None and not isinstance(name, str):
raise TypeError("Name parameter must be string")
if index is not None and not isinstance(index, int):
raise TypeError("Index parameter must be int")
self.name = name
self.index = index
self.precision = precision if precision is not None else 0
self.scale = scale if scale is not None else 0
def __eq__(self, other: Any) -> Predicate:
return Predicate(Operator.EQ, self, other)
def __ne__(self, other: Any) -> Predicate:
return ~Predicate(Operator.EQ, self, other)
def __lt__(self, other: Any) -> Predicate:
return Predicate(Operator.LT, self, other)
def __le__(self, other: Any) -> Predicate:
return Predicate(Operator.LE, self, other)
def __gt__(self, other: Any) -> Predicate:
return ~Predicate(Operator.LE, self, other)
def __ge__(self, other: Any) -> Predicate:
return ~Predicate(Operator.LT, self, other)
| 2.859375
| 3
|
scripts/robot_position.py
|
DharminB/moving_obstacle_gazebo
| 0
|
12779738
|
<reponame>DharminB/moving_obstacle_gazebo<filename>scripts/robot_position.py
#! /usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped
from gazebo_msgs.msg import ModelStates
class RobotPosition(object):
"""get the position of robot from /gazebo/model_states and re publish it with /robot_position"""
_robot_name = "youbot"
def __init__(self):
self.robot_pose_publisher = rospy.Publisher('robot_position', PoseStamped, queue_size=5)
self.gazebo_pose_subscriber = rospy.Subscriber('gazebo/model_states', ModelStates, self.callback_function)
rospy.loginfo("Initiated node")
def callback_function(self, msg):
pose = PoseStamped()
pose.header.stamp = rospy.Time.now()
if self._robot_name in msg.name :
index = msg.name.index(self._robot_name)
pose.pose = msg.pose[index]
self.robot_pose_publisher.publish(pose)
if __name__ == '__main__':
rospy.init_node('robot_position')
RobotPosition()
rospy.spin()
| 2.46875
| 2
|
main.py
|
realtechsupport/c-plus-r
| 2
|
12779739
|
<reponame>realtechsupport/c-plus-r
#!/usr/bin/env python3
# main.py
# Catch & Release
# Flask interface for linux computers
# experiments in knowledge documentation; with an application to AI for ethnobotany
# spring 2020
# tested on ubuntu 18 LTS, kernel 5.3.0; Mac OS Catalina
#-------------------------------------------------------------------------------
#1. start virtual env
#2. launch the program
# python3 main.py ubuntu firefox debug, or
# python3 main.py mac chromium no-debug\' for example.
# OS: ubuntu or mac. Browsers: chromium or firefox.
# issue: can not get flask-caching to work properly
# solution: Classic Cache Killer for Chrome; after install check options (right click, enable at start)
#------------------------------------------------------------------------------
import sys, os, time, shutil, glob
import eventlet, json
import random, threading, webbrowser
from flask import Flask, flash, current_app, send_file, render_template, request, redirect, url_for, session, send_from_directory
from flask_socketio import SocketIO, emit
from werkzeug.utils import secure_filename
from stt import *
from av_helper import *
from inputs import *
from utilities import *
from similarities import *
from pyt_utilities import *
app = Flask(__name__, template_folder="templates")
cwd = app.root_path
t_dir = cwd + '/tmp/'; a_dir = cwd + '/anotate/'
s_dir = cwd + '/static/'; i_dir = cwd + '/images/'
m_dir = cwd + '/models/'; c_dir = cwd + '/collection/'
r_dir = cwd + '/results/'; ar_dir = cwd + '/archive/'
f_dir = cwd + '/find/'; cl_dir = cwd + '/classify/'
te_dir = cwd + '/tests/'
dirs = [t_dir, a_dir, s_dir, i_dir, m_dir, c_dir, r_dir, ar_dir, f_dir, cl_dir, te_dir]
for dir in dirs:
if not os.path.exists(dir):
os.makedirs(dir)
app.config['SECRET_KEY'] = 'you-will-not-guess'
app.config['TMP'] = t_dir; app.config['STATIC'] = s_dir
app.config['ANOTATE'] = a_dir; app.config['IMAGES'] = i_dir
app.config['MODELS'] = m_dir; app.config['COLLECTION'] = c_dir
app.config['RESULTS'] = r_dir; app.config['FIND'] = f_dir
app.config['ARCHIVE'] = ar_dir; app.config['CLASSIFY'] = cl_dir
app.config['TESTS'] = te_dir
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
socketio = SocketIO(app, async_mode="eventlet")
#------------------------------------------------------------------------------
@app.route('/')
@app.route('/index')
def index():
session.clear()
formats = ('*.json', '*.webm', '*.wav', '*.mp4', '*.MP4', '*.txt', '*.zip', '*.prof', '*.mkv', '*.jpg', '*.csv', '*.pth')
locations = ('STATIC', 'ANOTATE', 'TMP', 'IMAGES', 'RESULTS')
exception = 'voiceover'
try:
removefiles(app, formats, locations, exception)
except:
pass
if not os.path.exists(i_dir):
os.makedirs(i_dir)
template = 'index.html'
return render_template(template)
#------------------------------------------------------------------------------
@app.route('/inputview', methods=['GET', 'POST'])
def inputview():
form = GetTextinputs()
template = 'inputview.html'
results = []
searchresults = []
revsource = ''
filename = ''
upl = False
if (request.method == 'POST'):
if("view" in request.form):
temp = session.get('s_filename', None)
if(temp == ''):
s_filename = ''
else:
s_filename = temp
file = request.files['vid']
filename = secure_filename(file.filename).lower()
revsource = os.path.join(app.config['STATIC'], filename)
if(s_filename == None):
print('no file yet..')
pass
elif((s_filename.split('.')[0]) == filename.split('.')[0]):
m = os.path.join(app.config['STATIC'], s_filename)
if(os.path.isfile(m)):
upl = True;
print('file already uploaded')
if(upl == False):
print('.... uploading .....')
file.save(revsource)
videoformat = (filename.split('.')[1]).lower()
print('this is the videoformat: ', videoformat)
session['s_filename'] = filename
#------------------------------------------
elif("capture" in request.form):
temp = session.get('s_filename', None)
if(temp == ''):
s_filename = ''
else:
s_filename = temp
file = request.files['vid']
filename = secure_filename(file.filename).lower()
revsource = os.path.join(app.config['STATIC'], filename)
if(s_filename == None):
print('no file yet..')
pass
elif((s_filename.split('.')[0]) == filename.split('.')[0]):
m = os.path.join(app.config['STATIC'], s_filename)
if(os.path.isfile(m)):
upl = True;
print('file already uploaded')
if(upl == False):
print('.... uploading .....')
file.save(revsource)
videoformat = (filename.split('.')[1]).lower()
print('this is the videoformat: ', videoformat)
destination = os.path.join(app.config['TMP'], filename)
shutil.copyfile(revsource, destination)
session['s_filename'] = filename
s_h = 0
s_m = form.s_m.data; s_s = form.s_s.data
e_h = 0
e_m = form.e_m.data; e_s = form.e_s.data
start_time = s_s + 60*s_m + 3600*s_h
end_time = e_s + 60*e_m + 3600*e_h
duration = end_time - start_time
start = seconds_to_hms(start_time); end = seconds_to_hms(end_time)
if('searchterm' in form.search.data):
searchterm = ''
else:
searchterm = form.search.data
try:
auth_file = request.files['auth']
auth_filename = secure_filename(auth_file.filename).lower()
authsource = os.path.join(app.config['STATIC'], auth_filename)
auth_file.save(authsource)
except:
print('no credential file selected - cannot capture text without valid [.json] access.\n\n')
return redirect(url_for('inputview'))
#now get the text from the set segment
os.chdir(app.config['TMP'])
maxattempts = 5
results, searchresults = extract_text(app, destination, form.lang.data, start_time, duration, form.chunk.data, form.conf.data, maxattempts, searchterm, authsource)
print('\n finished extracting text from video..\n')
#session variables limited to 4kb !!
session['s_results'] = results; session['s_searchresults'] = searchresults
for line in results:
print (line)
template = 'outputview.html'
return redirect(url_for('outputview'))
else:
results = None
searchresults = None
return(render_template(template, form=form, result=results, sresult=searchresults, showvideo=filename))
#-------------------------------------------------------------------------------
@app.route('/outputview', methods=['GET', 'POST'])
def outputview():
form = DownloadInputs()
template = 'outputview.html'
s_results = session.get('s_results', None)
s_searchresults = session.get('s_searchresults', None)
s_filename = session.get('s_filename', None)
if (request.method == 'POST'):
print('downloading s2tlog results..')
template = 'index.html'
if("download" in request.form):
resultspath = os.path.join(current_app.root_path, app.config['TMP'])
for name in glob.glob(resultspath + '*s2tlog*'):
log = name
return send_file(log, as_attachment=True)
return render_template(template, form=form, result=s_results, sresult=s_searchresults, showvideo=s_filename)
#-------------------------------------------------------------------------------
@app.route('/checkimagesview', methods=['GET', 'POST'])
def checkimagesview():
form = Checkimagesinputs()
current_video = ''
category = ''
dfiles = ''
key = ''
wordcollection = []
images = ''
template = 'checkimagesview.html'
key = session.get('s_key', None)
wordcollection = session.get('s_wordcollection', None)
videoname = session.get('s_videoname', None)
current_video = videoname.split('/')[-1]
category = session.get('s_category', None)
if((len(wordcollection) == 0) and (category == '')):
print('\n\nNO MATHCES ... try again\n\n')
return render_template(template, form=form, category = key, videoname = current_video, images = images)
#use the first of the actually detected words when there are more than one
if(key != ''):
if(len(wordcollection) > 0):
key = wordcollection[0]
if(key == ''):
images = os.listdir(app.config['IMAGES'] + category)
else:
images = os.listdir(app.config['IMAGES'] + key)
lastentry_d = 0; firstentry_s = 0
if (request.method == 'POST'):
ssim_min = form.ssim_min.data;
lum_max = form.lum_max.data;
lum_min = form.lum_min.data;
if("add" in request.form):
if(key == ''):
destination = os.path.join(app.config['COLLECTION'], category)
else:
destination = os.path.join(app.config['COLLECTION'], key)
if not os.path.exists(destination):
os.makedirs(destination)
#find the highest number in the destination and the lowest in the source
dfiles = glob.glob(destination + '/*.jpg')
if(key == ''):
source = os.path.join(app.config['IMAGES'], category)
else:
source = os.path.join(app.config['IMAGES'], key)
sfiles = glob.glob(source + '/*.jpg')
try:
dfiles = [i.split('/')[-1] for i in dfiles]
dfiles = [i.split('.')[0] for i in dfiles]
dfiles = sorted([int(i) for i in dfiles])
lastentry_d = dfiles[-1]
except:
lastentry_d = 0
try:
ssfiles = [i.split('/')[-1] for i in sfiles]
ssfiles = [i.split('.')[0] for i in ssfiles]
ssfiles = sorted([int(i) for i in ssfiles])
firstentry_s = ssfiles[0]
except:
firstentry_s = 0
if(firstentry_s < lastentry_d):
print('first entry source smaller than in last in destination...renaming source images')
rename_all(source, lastentry_d)
#copy the files
rfiles = glob.glob(source + '/*.jpg')
rfiles = sorted(rfiles)
for file in rfiles:
shutil.copy(file, destination)
print('COPIED images to collection')
elif("divergent" in request.form):
print('removing fuzzy, over and underexposed images...')
try:
images2remove = session.get('s_images2remove', None)
imlist = json.loads(images2remove)
if(key == ''):
im_loc = os.path.join(app.config['IMAGES'], category) + '/'
else:
im_loc = os.path.join(app.config['IMAGES'], key) + '/'
im_ref = imlist[-1]
nbad = remove_fuzzy_over_under_exposed(im_ref, im_loc, images, ssim_min, lum_max, lum_min)
print('removed ' + str(nbad) + ' images...')
except:
print('no images selected to create reference...')
elif("delete" in request.form):
print('removing highlighted images...')
try:
images2remove = session.get('s_images2remove', None)
imlist = json.loads(images2remove)
for im in imlist:
if(key == ''):
im_s = os.path.join(app.config['IMAGES'], category, im)
else:
im_s = os.path.join(app.config['IMAGES'], key, im)
print(im_s)
try:
os.remove(im_s)
except:
print('image already removed')
except:
print('no images selected for removal...')
elif("remove" in request.form):
try:
print('delelting the entire collection !')
shutil.rmtree(app.config['COLLECTION'])
except:
pass
if not os.path.exists(app.config['COLLECTION']):
os.makedirs(app.config['COLLECTION'])
elif("archive" in request.form):
#shutil.make_archive(app.config['COLLECTION'], 'zip', app.config['COLLECTION'])... works from commandline...
zfile = 'collection.zip'; timezone = 'America/New_York'
stamped_zfile = create_timestamp(zfile, timezone)
zipit(app.config['COLLECTION'], stamped_zfile)
source = os.path.join(app.config['COLLECTION'], stamped_zfile)
destination = os.path.join(app.config['ARCHIVE'], stamped_zfile)
shutil.move(source, destination)
elif("context" in request.form):
pass
elif("share" in request.form):
pass
#left click on mouse collects the images
else:
try:
imagenames = request.form['data']
session['s_images2remove'] = imagenames
except ValueError:
pass
if(key == ''):
return render_template(template, form=form, category = category, videoname = current_video, images = images)
else:
return render_template(template, form=form, category = key, videoname = current_video, images = images)
#-------------------------------------------------------------------------------
@app.route('/checkimagesview/<filename>')
def send_image(filename):
category = session.get('s_category', None)
key = session.get('s_key', None)
wordcollection = []
wordcollection = session.get('s_wordcollection', None)
#use the first of the detected words, when there are more than one
if(key != ''):
if(len(wordcollection) > 0):
key = wordcollection[0]
#print('Revised key: ', key)
if(key == ''):
location = 'images/' + category
else:
location = 'images/' + key
return send_from_directory(location, filename)
#-------------------------------------------------------------------------------
@app.route('/labelimagesview', methods=['GET', 'POST'])
def labelimagesview():
form = VideoLabelInputs()
revsource = ''
videoname = ''
file = ''
category = ''
key = ''
wordcollection = []
template = 'labelimagesview.html'
if (request.method == 'POST'):
if("load" in request.form):
file = request.files['vid']
videoname = secure_filename(file.filename).lower()
revsource = os.path.join(app.config['STATIC'], videoname)
file.save(revsource)
elif("bulk" in request.form):
print('...labelling bulk category')
framerate = form.framerate.data
file = request.files['vid']
videoname= secure_filename(file.filename)
videonamepath = os.path.join(app.config['TMP'], videoname)
file.save(videonamepath)
category = form.folder.data
savepath = os.path.join(app.config['IMAGES'], category) + '/'
create_images_from_video(savepath, category, videonamepath, framerate)
print('FINISHED bulk saving')
elif("audio" in request.form):
print('\n...labelling with keys')
key = form.label.data
language = form.lang.data
print('searching for this term and language: ', key, language)
try:
file = request.files['auth']
auth = secure_filename(file.filename)
authsource = os.path.join(app.config['STATIC'], auth)
file.save(authsource)
except:
print('no credential file selected - cannot capture text without a valid [.json] credential ')
return redirect(url_for('labelimagesview'))
file = request.files['vid']
videoname= secure_filename(file.filename)
videonamepath = os.path.join(app.config['TMP'], videoname)
file.save(videonamepath)
nimages = form.nimages.data
tconfidence = form.conf.data
maxattempts = 5
wordcollection = label_images(app, videonamepath, language, authsource, key, maxattempts, nimages, tconfidence)
print('FINISHED labelling by keys')
session['s_videoname'] = videoname
session['s_category'] = category
session['s_key'] = key
session['s_wordcollection'] = wordcollection
return render_template(template, form=form, showvideo=videoname)
#-------------------------------------------------------------------------------
@app.route('/testclassifiers', methods=['GET', 'POST'])
def testclassifiers():
form = TestClassifiers()
choice = ''; input = ''; images = ''; files = ''; c_classifier = ''
result = ''; moreresults = ''; tp_vals = []
template = 'testclassifiers.html'
if (request.method == 'POST'):
testcollection = form.testcollection.data
print('\n\ntest collection is: ', testcollection)
location = os.path.join(app.config['FIND'], testcollection)
zfile = os.path.join(app.config['FIND'], testcollection) + '.zip'
try:
path, dirs, files = next(os.walk(location))
except:
pass
if(len(files) == 0):
if(testcollection == 'bali26samples'):
archive = bali26_samples_zip
print('downloading the samples...')
wget.download(archive, zfile)
shutil.unpack_archive(zfile, app.config['FIND'], 'zip')
os.remove(zfile)
else:
#here other archives
archive = bali26_samples_zip
images = os.listdir(location)
if("display" in request.form):
session['s_testcollection'] = testcollection
#display = True; session['s_choices'] = ''
elif(("classify" in request.form) and (session.get('s_choices', None) != '' )):
classifier = form.classifier.data
session['s_testcollection'] = testcollection
if(testcollection != 'bali26samples'):
print('\nother collections not yet ready...\n\n')
return redirect(url_for('testclassifiers'))
if(testcollection == 'bali26samples'):
class_names = bali26_class_names
if(classifier == 'bali26_resnet152'):
archive = bali26_resnet152
elif(classifier == 'bali26_resnet152_np'):
archive = bali26_resnet152_notpretrained
elif(classifier == 'bali26_resnext50'):
archive = bali26_resnext50
elif(classifier == 'bali26_resnext50_np'):
archive = bali26_resnext50_notpretrained
elif(classifier == 'bali26_alexnet'):
archive = bali26_alexnet
elif(classifier == 'bali26_alexnet_np'):
archive = bali26_alexnet_notpretrained
else:
archive = bali26_alexnet
classifier = classifier + '.pth'
print('selected classifier: ', classifier)
path, dirs, files = next(os.walk(app.config['MODELS']))
if(classifier in files):
print('already have the matching classifier...\n')
pass
else:
print('getting the matching classifier...\n')
modelname = archive.split('/')[-1]
wget.download(archive, (os.path.join(app.config['MODELS'], modelname)))
try:
tchoices = session.get('s_choices', None)
choice = json.loads(tchoices)
image_path = os.path.join(app.config['FIND'], testcollection, choice)
pclassifier = os.path.join(app.config['MODELS'], classifier)
processor='cpu'; tk=3; threshold=90;
model = load_checkpoint(pclassifier, processor)
predictions, percentage, outcategory = predict_image(image_path, model, predict_transform, class_names, tk, processor)
tp_indeces = predictions[1].tolist()[0]
for k in tp_indeces:
tp_vals.append(class_names[k])
input = 'selected image: ' + choice
c_classifier = 'selected classifier: ' + classifier
result = 'best prediction: ' + outcategory + ' (with confidence level ' + percentage + '%)'
moreresults = 'top three predictions: ' + str(tp_vals)
except:
print('exception encountered ... click display images, pick an image (with left mouse button) before you classify ... ')
return redirect(url_for('testclassifiers'))
elif("context" in request.form):
images = ''
#left click on mouse collects the images
else:
try:
imagenames = request.form['data']
session['s_choices'] = imagenames
except:
print('no image selected to classify')
pass
return render_template(template, form=form, images=images, result=result, moreresults=moreresults, classifier=c_classifier, input=input)
#-------------------------------------------------------------------------------
@app.route('/testclassifiers/<filename>')
def classify_image(filename):
testcollection = session.get('s_testcollection', None)
location = os.path.join(app.config['FIND'], testcollection)
return send_from_directory(location, filename)
#-------------------------------------------------------------------------------
@app.route('/showinfoview')
def showinfoview():
template = 'showinfoview.html'
return render_template(template)
#-------------------------------------------------------------------------------
@app.route('/prepareview', methods=['GET', 'POST'])
def prepareview():
form = PrepareInputs()
template = 'prepareview.html'
chunkresult = '...'
filename = ''
if (request.method == 'POST'):
if("samples" in request.form):
zfile = os.path.join(app.config['TESTS'], 'tests.zip')
lzfile = 'tests.zip'
path, dirs, files = next(os.walk(app.config['TESTS']))
if(lzfile in files):
print('\nalready downloaded the sample data..')
pass
else:
try:
wget.download(tests_zip, zfile)
shutil.unpack_archive(zfile, app.config['TESTS'], 'zip')
except:
print('can not get the samples...')
return redirect(url_for('prepareview'))
elif("chunk" in request.form):
chunksize = form.chunk.data
try:
file = request.files['vid']
filename = secure_filename(file.filename).lower()
#print(filename, chunksize)
destination = os.path.join(app.config['TMP'], filename)
file.save(destination)
location = app.config['TMP']
nfiles = chunk_large_videofile(destination, chunksize, location)
chunkresult = 'result: ' + str(nfiles) + ' files of max ' + str(chunksize) + ' min...'
print('\nfinished chunking...')
except:
print('Something went wrong...file less than 1 min long? No file chosen? Supported fromats are .webm and .mp4 only. Try again...')
return redirect(url_for('prepareview'))
else:
print('Exception - please retry')
return render_template(template, form=form, result=chunkresult)
#-------------------------------------------------------------------------------
@app.route('/audioanotate', methods=['GET', 'POST'])
def audioanotate():
form = AnotateInputs()
template = 'audioanotate.html'
filename = ''
cut_video = ''
session['s_filename_w'] = ''
upl = False
if (request.method == 'POST'):
if("load" in request.form):
print('in recording section')
#mic = form.mic.data
mic_d = form.mic_device.data
mic_c = form.mic_card.data
cardn = mic_c; devicen = mic_d
try:
file = request.files['vid']
filename = secure_filename(file.filename).lower()
print(filename)
revsource = os.path.join(app.config['STATIC'], filename)
ansource = os.path.join(app.config['ANOTATE'], filename)
file.save(revsource)
time.sleep(1)
copyfile(revsource, ansource)
except:
print('please select a video...')
return redirect(url_for('audioanotate'))
#cardn, devicen = mic_info(mic)
print('selected mic card and device: ', cardn, devicen)
session['cardn'] = cardn
session['devicen'] = devicen
session['s_filename'] = filename
elif("remove" in request.form):
print('removing old voice-over assets')
tpatterns = ('*.webm', '*.wav', '*.mp4', '*.MP4', '*.mkv')
tlocations = ('STATIC', 'ANOTATE', 'TMP')
texception = 'nothing'
try:
removefiles(app, tpatterns, tlocations, texception)
except:
return redirect(url_for('audioanotate'))
elif("micinfo" in request.form):
result = mic_device_info()
print('\n\n Microphone hardware available: ', result)
elif("check" in request.form):
#mic = form.mic.data
mic_d = form.mic_device.data
mic_c = form.mic_card.data
cardn = mic_c; devicen = mic_d
print('selected mic card and device:', mic_c, mic_d)
#cardn, devicen = mic_info(mic)
session['cardn'] = cardn
session['devicen'] = devicen
dur = 3;
output = os.path.join(app.config['TMP'], 'audiocheck.wav')
print('recording 3 seconds of audio for system check... ')
record_and_playback(dur, cardn, devicen, output)
return redirect(url_for('audioanotate'))
if (request.method == 'POST'):
if("segment" in request.form):
print('in SEGMENT section')
sa_h = 0; sa_m = form.sa_m.data; sa_s = form.sa_s.data
ea_h = 0; ea_m = form.ea_m.data; ea_s = form.ea_s.data
start_cut = sa_s + 60*sa_m + 3600*sa_h
end_cut = ea_s + 60*ea_m + 3600*ea_h
duration = end_cut - start_cut
session['start_cut'] = start_cut
session['end_cut'] = end_cut
video = session.get('s_filename')
try:
cut_video = get_segment(video, start_cut, end_cut)
except:
print('something wrong - did you set start and end time correctly?')
return redirect(url_for('audioanotate'))
session['s_filename_w'] = cut_video
session['s_filename'] = cut_video
videoformat = (video.split('.')[1]).lower()
if(videoformat == 'mp4'):
cut_video = convert_mp4_to_webm_rt(cut_video)
print('conversion of segment from .mp4 to .webm finished.....')
session['s_filename'] = cut_video
source = os.path.join(app.config['ANOTATE'], cut_video)
destination = os.path.join(app.config['STATIC'], cut_video)
copyfile(source, destination)
filename = cut_video
print('voiceover file now in anotate folder.\n')
return render_template(template, form=form, showvideo=filename)
#-------------------------------------------------------------------------------
@socketio.on('event')
def handle_my_custom_namespace_event(jsondata):
print('received json: ' + str(jsondata))
@socketio.on('response')
def handle_response(jsondata):
card = session.get('cardn')
device = session.get('devicen')
vid_display = session.get('s_filename')
video = session.get('s_filename_w')
#print('In the handle response... the display video is now: ', vid_display)
st = session.get('start_cut')
end = session.get('end_cut')
#get data from the user
(k,v), = jsondata.items()
os.chdir(app.config['ANOTATE'])
if(k == 'start'):
if (session.get('nrecordings') == None):
session['nrecordings'] = 1
else:
#print('KEY, VALUE: ', k,v)
duration = get_video_length(video)
print('actual video duration: ', duration)
newaudio = 'naudio_' + video.split('.')[0] + '.wav'
voiceover_recording(duration, card, device, newaudio)
#remove noise if necessary ...crecording = cleanrecording(output)
vformat = video.split('.')[1]
if (vformat == 'mp4'):
updated_vformat = '.mp4'
else:
updated_vformat = '.mkv'
combination = 'voiceover_' + str(st) + '-' + str(end) + '_' + video.split('.')[0] + updated_vformat
result = combine_recordingvideo(newaudio, video, combination)
print(result)
#-------------------------------------------------------------------------------
if __name__ == '__main__':
if(len(sys.argv) < 3):
print('\nplease provide OS, browser and dubug choice when you start the program.')
print('\'python3 main.py ubuntu firefox debug\', or \'python3 main.py mac chromium no-debug\' for example.\n')
print('OS: Ubuntu or MAC. Ubuntu browsers: chromium or firefox; MAC only chrome: \'python3 main.py mac chrome no-debug\' \n')
sys.exit(2)
else:
try:
osy = sys.argv[1]
browser = sys.argv[2]
debug_mode = sys.argv[3]
print('\n> operating system: ', osy)
print('> browser: ', browser)
print('> mode: ', debug_mode)
except:
print('... using default ubuntu and chromium in non-debug mode ...')
osy = 'ubuntu'
browser = 'chromium-browser'
debug_mode = 'debug'
port = 5000
url = "http://127.0.0.1:{0}".format(port)
#Two browsers supported in ubuntu; only one (Chrome) in macOS
if('firefox' in browser):
browser = 'firefox'
else:
browser = 'chromium-browser'
if('ubuntu' in osy):
threading.Timer(1.25, lambda: webbrowser.get(browser).open(url) ).start()
else:
#launch Chrome on macOS
threading.Timer(1.25, lambda: webbrowser.get('open -a /Applications/Google\ Chrome.app %s').open(url)).start()
#Chromium, if installed...
#threading.Timer(1.25, lambda: webbrowser.get('open -a /Applications/Chromium.app %s').open(url)).start()
if(debug_mode == 'debug'):
socketio.run(app, port=port, debug=True)
else:
socketio.run(app, port=port, debug=False)
#-------------------------------------------------------------------------------
| 1.835938
| 2
|
netbox/dcim/migrations/0093_auto_20200205_0157.py
|
sharknasuhorse/netbox
| 0
|
12779740
|
# Generated by Django 2.2.10 on 2020-02-05 01:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dcim', '0092_fix_rack_outer_unit'),
]
operations = [
migrations.AddField(
model_name='device',
name='cpus',
field=models.PositiveSmallIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='device',
name='disk',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='device',
name='memory',
field=models.PositiveIntegerField(blank=True, null=True),
),
]
| 1.703125
| 2
|
tensorflow/lite/micro/examples/magic_wand_flourish/train/LRF/lr_finder.py
|
nancibles/tensorflow
| 0
|
12779741
|
<gh_stars>0
from tensorflow.keras.callbacks import Callback
import tensorflow.keras.backend as K
import numpy as np
import matplotlib.pyplot as plt
class LRFinder(Callback):
"""
Up-to date version: https://github.com/WittmannF/LRFinder
Example of usage:
from keras.models import Sequential
from keras.layers import Flatten, Dense
from keras.datasets import fashion_mnist
!git clone https://github.com/WittmannF/LRFinder.git
from LRFinder.keras_callback import LRFinder
# 1. Input Data
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
mean, std = X_train.mean(), X_train.std()
X_train, X_test = (X_train-mean)/std, (X_test-mean)/std
# 2. Define and Compile Model
model = Sequential([Flatten(),
Dense(512, activation='relu'),
Dense(10, activation='softmax')])
model.compile(loss='sparse_categorical_crossentropy', \
metrics=['accuracy'], optimizer='sgd')
# 3. Fit using Callback
lr_finder = LRFinder(min_lr=1e-4, max_lr=1)
model.fit(X_train, y_train, batch_size=128, callbacks=[lr_finder], epochs=2)
"""
def __init__(self, min_lr, max_lr, mom=0.9, stop_multiplier=None,
reload_weights=True, batches_lr_update=5):
self.min_lr = min_lr
self.max_lr = max_lr
self.mom = mom
self.reload_weights = reload_weights
self.batches_lr_update = batches_lr_update
if stop_multiplier is None:
self.stop_multiplier = -20 * self.mom / 3 + 10 # 4 if mom=0.9
# 10 if mom=0
else:
self.stop_multiplier = stop_multiplier
def on_train_begin(self, logs={}):
p = self.params
try:
n_iterations = p['epochs'] * p['samples'] // p['batch_size']
except:
n_iterations = p['steps'] * p['epochs']
self.learning_rates = np.geomspace(self.min_lr, self.max_lr, \
num=n_iterations // self.batches_lr_update + 1)
self.losses = []
self.iteration = 0
self.best_loss = 0
if self.reload_weights:
self.model.save_weights('tmp.hdf5')
def on_batch_end(self, batch, logs={}):
loss = logs.get('loss')
if self.iteration != 0: # Make loss smoother using momentum
loss = self.losses[-1] * self.mom + loss * (1 - self.mom)
if self.iteration == 0 or loss < self.best_loss:
self.best_loss = loss
if self.iteration % self.batches_lr_update == 0: # Evaluate each lr over 5 epochs
if self.reload_weights:
self.model.load_weights('tmp.hdf5')
lr = self.learning_rates[self.iteration // self.batches_lr_update]
K.set_value(self.model.optimizer.lr, lr)
self.losses.append(loss)
if loss > self.best_loss * self.stop_multiplier: # Stop criteria
self.model.stop_training = True
self.iteration += 1
def on_train_end(self, logs=None):
if self.reload_weights:
self.model.load_weights('tmp.hdf5')
plt.figure(figsize=(12, 6))
plt.plot(self.learning_rates[:len(self.losses)], self.losses)
plt.xlabel("Learning Rate")
plt.ylabel("Loss")
plt.xscale('log')
plt.show()
| 2.765625
| 3
|
cpmpy/solvers/gurobi.py
|
vishalbelsare/cpmpy
| 0
|
12779742
|
<gh_stars>0
#!/usr/bin/env python
"""
Interface to the python 'gurobi' package
Requires that the 'gurobipy' python package is installed:
$ pip install gurobipy
as well as the Gurobi bundled binary packages, downloadable from:
https://www.gurobi.com/
In contrast to other solvers in this package, Gurobi is not free to use and requires an active licence
You can read more about available licences at https://www.gurobi.com/downloads/
===============
List of classes
===============
.. autosummary::
:nosignatures:
CPM_gurobi
==============
Module details
==============
"""
from .solver_interface import SolverInterface, SolverStatus, ExitStatus
from ..expressions.core import *
from ..expressions.variables import _BoolVarImpl, NegBoolView, _IntVarImpl, _NumVarImpl, intvar
from ..transformations.flatten_model import flatten_constraint, flatten_objective, get_or_make_var
from ..transformations.get_variables import get_variables
from ..transformations.linearize import linearize_constraint, only_positive_bv
from ..transformations.reification import only_bv_implies
try:
import gurobipy as gp
except ImportError as e:
pass
class CPM_gurobi(SolverInterface):
"""
Interface to Gurobi's API
Requires that the 'gurobipy' python package is installed:
$ pip install gurobipy
See detailed installation instructions at:
https://support.gurobi.com/hc/en-us/articles/360044290292-How-do-I-install-Gurobi-for-Python-
Creates the following attributes:
user_vars: set(), variables in the original (non-transformed) model,
for reverse mapping the values after `solve()`
cpm_status: SolverStatus(), the CPMpy status after a `solve()`
tpl_model: object, TEMPLATE's model object
_varmap: dict(), maps cpmpy variables to native solver variables
"""
@staticmethod
def supported():
# try to import the package
try:
import gurobipy as gp
return True
except ImportError as e:
return False
def __init__(self, cpm_model=None, subsolver=None):
"""
Constructor of the native solver object
Arguments:
- cpm_model: a CPMpy Model()
"""
if not self.supported():
raise Exception(
"CPM_gurobi: Install the python package 'gurobipy' and make sure your licence is activated!")
import gurobipy as gp
# initialise the native gurobi model object
self._GRB_env = gp.Env()
self._GRB_env.setParam("OutputFlag", 0)
self._GRB_env.start()
self.grb_model = gp.Model(env=self._GRB_env)
# initialise everything else and post the constraints/objective
# it is sufficient to implement __add__() and minimize/maximize() below
super().__init__(name="gurobi", cpm_model=cpm_model)
def solve(self, time_limit=None, solution_callback=None, **kwargs):
"""
Call the gurobi solver
Arguments:
- time_limit: maximum solve time in seconds (float, optional)
- kwargs: any keyword argument, sets parameters of solver object
Arguments that correspond to solver parameters:
Examples of gurobi supported arguments include:
- Threads : int
- MIPFocus: int
- ImproveStartTime : bool
- FlowCoverCuts: int
For a full list of gurobi parameters, please visit https://www.gurobi.com/documentation/9.5/refman/parameters.html#sec:Parameters
"""
import gurobipy as gp
from gurobipy import GRB
if time_limit is not None:
self.grb_model.setParam("TimeLimit", time_limit)
# call the solver, with parameters
for param, val in kwargs.items():
self.grb_model.setParam(param, val)
_ = self.grb_model.optimize(callback=solution_callback)
grb_objective = self.grb_model.getObjective()
is_optimization_problem = grb_objective.size() != 0 # TODO: check if better way to do this...
grb_status = self.grb_model.Status
# new status, translate runtime
self.cpm_status = SolverStatus(self.name)
self.cpm_status.runtime = self.grb_model.runtime
# translate exit status
if grb_status == GRB.OPTIMAL and not is_optimization_problem:
self.cpm_status.exitstatus = ExitStatus.FEASIBLE
elif grb_status == GRB.OPTIMAL and is_optimization_problem:
self.cpm_status.exitstatus = ExitStatus.OPTIMAL
elif grb_status == GRB.INFEASIBLE:
self.cpm_status.exitstatus = ExitStatus.UNSATISFIABLE
else: # another?
raise NotImplementedError(
f"Translation of gurobi status {grb_status} to CPMpy status not implemented") # a new status type was introduced, please report on github
# TODO: what about interrupted solves? Gurobi can return sub-optimal values too
# True/False depending on self.cpm_status
has_sol = self._solve_return(self.cpm_status)
# translate solution values (of user vars only)
if has_sol:
# fill in variable values
for cpm_var in self.user_vars:
solver_val = self.solver_var(cpm_var).X
if cpm_var.is_bool():
cpm_var._value = solver_val >= 0.5
else:
cpm_var._value = int(solver_val)
# set _objective_value
if is_optimization_problem:
self.objective_value_ = grb_objective.getValue()
return has_sol
def solver_var(self, cpm_var):
"""
Creates solver variable for cpmpy variable
or returns from cache if previously created
"""
from gurobipy import GRB
if is_num(cpm_var):
return cpm_var
# special case, negative-bool-view
# work directly on var inside the view
if isinstance(cpm_var, NegBoolView):
raise Exception("Negative literals should not be part of any equation. See /transformations/linearize for more details")
# create if it does not exit
if not cpm_var in self._varmap:
if isinstance(cpm_var, _BoolVarImpl):
revar = self.grb_model.addVar(vtype=GRB.BINARY, name=cpm_var.name)
elif isinstance(cpm_var, _IntVarImpl):
revar = self.grb_model.addVar(cpm_var.lb, cpm_var.ub, vtype=GRB.INTEGER, name=str(cpm_var))
else:
raise NotImplementedError("Not a known var {}".format(cpm_var))
self._varmap[cpm_var] = revar
# return from cache
return self._varmap[cpm_var]
def objective(self, expr, minimize=True):
"""
Post the expression to optimize to the solver.
'objective()' can be called multiple times, onlu the last one is used.
(technical side note: any constraints created during conversion of the objective
are premanently posted to the solver)
"""
from gurobipy import GRB
# make objective function non-nested
(flat_obj, flat_cons) = (flatten_objective(expr))
self += flat_cons # add potentially created constraints
self.user_vars.update(get_variables(flat_obj))
obj = self._make_numexpr(flat_obj)
if minimize:
self.grb_model.setObjective(obj, sense=GRB.MINIMIZE)
else:
self.grb_model.setObjective(obj, sense=GRB.MAXIMIZE)
def _make_numexpr(self, cpm_expr):
"""
Turns a numeric CPMpy 'flat' expression into a solver-specific
numeric expression
Used especially to post an expression as objective function
"""
import gurobipy as gp
if is_num(cpm_expr):
return cpm_expr
# decision variables, check in varmap
if isinstance(cpm_expr, _NumVarImpl): # _BoolVarImpl is subclass of _NumVarImpl
return self.solver_var(cpm_expr)
# sum
if cpm_expr.name == "sum":
return gp.quicksum(self.solver_vars(cpm_expr.args))
# wsum
if cpm_expr.name == "wsum":
return gp.quicksum(w * self.solver_var(var) for w, var in zip(*cpm_expr.args))
raise NotImplementedError("gurobi: Not a know supported numexpr {}".format(cpm_expr))
def __add__(self, cpm_con):
"""
Post a (list of) CPMpy constraints(=expressions) to the solver
Note that we don't store the constraints in a cpm_model,
we first transform the constraints into primitive constraints,
then post those primitive constraints directly to the native solver
:param cpm_con CPMpy constraint, or list thereof
:type cpm_con (list of) Expression(s)
"""
# add new user vars to the set
self.user_vars.update(get_variables(cpm_con))
# apply transformations, then post internally
# expressions have to be linearized to fit in MIP model. See /transformations/linearize
cpm_cons = flatten_constraint(cpm_con)
cpm_cons = only_bv_implies(cpm_cons)
cpm_cons = linearize_constraint(cpm_cons)
cpm_cons = only_positive_bv(cpm_cons)
for con in cpm_cons:
self._post_constraint(con)
return self
def _post_constraint(self, cpm_expr):
"""
Post a primitive CPMpy constraint to the native solver API
What 'primitive' means depends on the solver capabilities,
more specifically on the transformations applied in `__add__()`
Solvers do not need to support all constraints.
"""
from gurobipy import GRB
# Comparisons: only numeric ones as 'only_bv_implies()' has removed the '==' reification for Boolean expressions
# numexpr `comp` bvar|const
if isinstance(cpm_expr, Comparison):
lhs, rhs = cpm_expr.args
rvar = self.solver_var(rhs)
# TODO: this should become a transformation!!
if cpm_expr.name != '==' and not is_num(lhs) and \
not isinstance(lhs, _NumVarImpl) and \
not lhs.name == "sum" and \
not lhs.name == "wsum":
# functional globals only exist for equality in gurobi
# example: min(x) > 10 :: min(x) == aux, aux > 10
# create the equality and overwrite lhs with auxiliary (will handle appropriate bounds)
(lhs, cons) = get_or_make_var(lhs)
self += cons
# all but '==' now only have as lhs: const|ivar|sum|wsum
# translate ivar|sum|wsum so they can be posted directly below
if isinstance(lhs, _NumVarImpl):
lhs = self.solver_var(lhs) # Case can be omitted -> handled in _make_num_expr
elif isinstance(lhs, Operator) and (lhs.name == 'sum' or lhs.name == 'wsum'):
# a BoundedLinearExpression LHS, special case, like in objective
lhs = self._make_numexpr(lhs)
# assumes that gurobi accepts sum(x) >= y without further simplification
# post the comparison
if cpm_expr.name == '<=':
return self.grb_model.addLConstr(lhs, GRB.LESS_EQUAL, rvar)
elif cpm_expr.name == '<':
raise Exception(f"{cpm_expr} should have been linearized, see /transformations/linearize.py")
elif cpm_expr.name == '>=':
return self.grb_model.addLConstr(lhs, GRB.GREATER_EQUAL, rvar)
elif cpm_expr.name == '>':
raise Exception(f"{cpm_expr} should have been linearized, see /transformations/linearize.py")
elif cpm_expr.name == '!=':
raise Exception(f"{cpm_expr} should have been linearized, see /transformations/linearize.py")
elif cpm_expr.name == '==':
if not isinstance(lhs, Expression):
# base cases: const|ivar|sum|wsum with prepped lhs above
return self.grb_model.addLConstr(lhs, GRB.EQUAL, rvar)
elif lhs.name == 'mul':
assert len(lhs.args) == 2, "Gurobi only supports multiplication with 2 variables"
a, b = self.solver_vars(lhs.args)
self.grb_model.setParam("NonConvex", 2)
return self.grb_model.addConstr(a * b == rvar)
elif lhs.name == 'div':
if not isinstance(lhs.args[1], _NumVarImpl):
a, b = self.solver_vars(lhs.args)
return self.grb_model.addLConstr(a / b, GRB.EQUAL, rvar)
raise Exception("Gurobi only supports division by constants")
# General constraints
# rvar should be a variable, not a constant
if not isinstance(rhs, _NumVarImpl):
rvar = self.solver_var(intvar(lb=rhs, ub=rhs))
if lhs.name == "and" or lhs.name == "or":
raise Exception(f"{cpm_expr} should have been linearized, see /transformations/linearize.py")
elif lhs.name == 'min':
return self.grb_model.addGenConstrMin(rvar, self.solver_vars(lhs.args))
elif lhs.name == 'max':
return self.grb_model.addGenConstrMax(rvar, self.solver_vars(lhs.args))
elif lhs.name == 'abs':
return self.grb_model.addGenConstrAbs(rvar, self.solver_var(lhs.args[0]))
elif lhs.name == 'pow':
x, a = self.solver_vars(lhs.args)
assert a == 2, "Only support quadratic constraints"
assert not isinstance(a, _NumVarImpl), f"Gurobi only supports power expressions with positive exponents."
return self.grb_model.addGenConstrPow(x, rvar, a)
raise NotImplementedError(
"Not a know supported gurobi left-hand-side '{}' {}".format(lhs.name, cpm_expr))
elif isinstance(cpm_expr, Operator) and cpm_expr.name == "->":
# Indicator constraints
# Take form bvar -> sum(x,y,z) >= rvar
cond, sub_expr = cpm_expr.args
assert isinstance(cond, _BoolVarImpl), f"Implication constraint {cpm_expr} must have BoolVar as lhs"
assert isinstance(sub_expr, Comparison), "Implication must have linear constraints on right hand side"
if isinstance(cond, NegBoolView):
cond, bool_val = self.solver_var(cond._bv), False
else:
cond, bool_val = self.solver_var(cond), True
lhs, rhs = sub_expr.args
if isinstance(lhs, _NumVarImpl) or lhs.name == "sum" or lhs.name == "wsum":
lin_expr = self._make_numexpr(lhs)
else:
raise Exception(f"Unknown linear expression {lhs} on right side of indicator constraint: {cpm_expr}")
if sub_expr.name == "<=":
return self.grb_model.addGenConstrIndicator(cond, bool_val, lin_expr, GRB.LESS_EQUAL, self.solver_var(rhs))
if sub_expr.name == ">=":
return self.grb_model.addGenConstrIndicator(cond, bool_val, lin_expr, GRB.GREATER_EQUAL, self.solver_var(rhs))
if sub_expr.name == "==":
return self.grb_model.addGenConstrIndicator(cond, bool_val, lin_expr, GRB.EQUAL, self.solver_var(rhs))
# Global constraints
else:
self += cpm_expr.decompose()
return
raise NotImplementedError(cpm_expr) # if you reach this... please report on github
def solveAll(self, display=None, time_limit=None, solution_limit=None, **kwargs):
"""
Compute all solutions and optionally display the solutions.
This is the generic implementation, solvers can overwrite this with
a more efficient native implementation
Arguments:
- display: either a list of CPMpy expressions, OR a callback function, called with the variables after value-mapping
default/None: nothing displayed
- time_limit: stop after this many seconds (default: None)
- solution_limit: stop after this many solutions (default: None)
- any other keyword argument
Returns: number of solutions found
"""
if time_limit is not None:
self.grb_model.setParam("TimeLimit", time_limit)
if solution_limit is None:
raise Exception(
"Gurobi does not support searching for all solutions. If you really need all solutions, try setting solution limit to a large number and set time_limit to be not None.")
# Force gurobi to keep searching in the tree for optimal solutions
self.grb_model.setParam("PoolSearchMode", 2)
self.grb_model.setParam("PoolSolutions", solution_limit)
for param, val in kwargs.items():
self.grb_model.setParam(param, val)
# Solve the model
self.grb_model.optimize()
solution_count = self.grb_model.SolCount
for i in range(solution_count):
# Specify which solution to query
self.grb_model.setParam("SolutionNumber", i)
# Translate solution to variables
for cpm_var in self.user_vars:
solver_val = self.solver_var(cpm_var).Xn
if cpm_var.is_bool():
cpm_var._value = solver_val >= 0.5
else:
cpm_var._value = int(solver_val)
# Translate objective
if self.grb_model.getObjective().size() != 0: # TODO: check if better way to do this...
self.objective_value_ = self.grb_model.getObjective().getValue()
if display is not None:
if isinstance(display, Expression):
print(display.value())
elif isinstance(display, list):
print([v.value() for v in display])
else:
display() # callback
# Reset pool search mode to default
self.grb_model.setParam("PoolSearchMode", 0)
return solution_count
| 1.84375
| 2
|
3.py
|
inwk6312fall2018/model-open-book-quiz-rohit391
| 0
|
12779743
|
<gh_stars>0
def deal_cards(self, number: int):
for _ in range(0, number):
for player in self.players:
card = self.deck.draw()
player.hand.append(card)
print("Dealt {} to player {}".format(card, player))
deal_cards()
| 2.96875
| 3
|
tests/test_sdpb.py
|
alepiazza/pycftboot
| 0
|
12779744
|
<gh_stars>0
import unittest
import shutil
import os
import subprocess
import filecmp
from symengine.lib.symengine_wrapper import RealMPFR
from pycftboot import SdpbDocker, SdpbBinary, SdpbSingularity
from pycftboot.constants import prec
DIR = 'test_output'
def have_binary(bin_name):
bin_in_path = shutil.which(bin_name)
return bin_in_path is not None and os.path.isfile(bin_in_path)
def check_directory_equal(dir1, dir2):
"""https://stackoverflow.com/questions/4187564/recursively-compare-two-directories-to-ensure-they-have-the-same-files-and-subdi
"""
dirs_cmp = filecmp.dircmp(dir1, dir2)
if len(dirs_cmp.left_only) > 0 or len(dirs_cmp.right_only) > 0 or \
len(dirs_cmp.funny_files) > 0:
return False
(_, mismatch, errors) = filecmp.cmpfiles(
dir1, dir2, dirs_cmp.common_files, shallow=False)
if len(mismatch) > 0 or len(errors) > 0:
return False
for common_dir in dirs_cmp.common_dirs:
new_dir1 = os.path.join(dir1, common_dir)
new_dir2 = os.path.join(dir2, common_dir)
if not check_directory_equal(new_dir1, new_dir2):
return False
return True
class TestSdpb(unittest.TestCase):
def setUp(self):
self.volume = DIR
os.makedirs(self.volume, exist_ok=True)
if have_binary('docker'):
self.s = SdpbDocker(volume=self.volume)
elif have_binary('singularity'):
self.s = SdpbSingularity(volume=self.volume)
elif have_binary('sdpb'):
self.s = SdpbBinary()
else:
raise RuntimeError("Nor docker or sdpb found")
self.input_xml = f"{DIR}/test.xml"
self.s.set_option("sdpDir", f"{DIR}/test_pvm2sdp")
self.s.set_option("outDir", f"{DIR}/test_sdpb")
self.s.set_option("procsPerNode", 1)
def tearDown(self):
shutil.rmtree(self.volume)
@unittest.skipUnless(have_binary('docker'), "No docker")
def test_sdpb_docker_run_command(self):
command = 'echo "running in docker"'.split()
out_docker = self.s.run_command(command)
out_shell = subprocess.run(command, capture_output=True, check=True, text=True)
self.assertEqual(out_docker.__dict__, out_shell.__dict__)
self.s.run_command(f"touch '{DIR}/test'")
self.assertTrue(os.path.isfile(f'{DIR}/test'))
self.s.run_command(f"rm '{DIR}/test'")
self.assertFalse(os.path.isfile(f'{DIR}/test'))
@unittest.skipUnless(have_binary('singularity'), "No singularity")
def test_sdpb_singularity_run_command(self):
command = 'echo "running in singularity"'.split()
out_docker = self.s.run_command(command)
out_shell = subprocess.run(command, capture_output=True, check=True, text=True)
self.assertEqual(out_docker.__dict__, out_shell.__dict__)
self.s.run_command(f"touch '{DIR}/test'")
self.assertTrue(os.path.isfile(f'{DIR}/test'))
self.s.run_command(f"rm '{DIR}/test'")
self.assertFalse(os.path.isfile(f'{DIR}/test'))
def test_sdpb_sdpb(self):
shutil.copy('tests/input/test.xml', self.input_xml)
self.s.pvm2sdp_run(self.input_xml, self.s.get_option("sdpDir"))
self.assertTrue(filecmp.cmp(f'{DIR}/test_pvm2sdp', 'tests/check_output/test_pvm2sdp'))
self.s.run()
self.assertTrue(check_directory_equal(f'{DIR}/test_pvm2sdp.ck', 'tests/check_output/test_pvm2sdp.ck'))
self.assertTrue(check_directory_equal(f'{DIR}/test_sdpb', 'tests/check_output/test_sdpb'))
def test_sdpb_read_sdpb_output(self):
shutil.copy('tests/input/test.xml', self.input_xml)
self.s.pvm2sdp_run(self.input_xml, self.s.get_option("sdpDir"))
self.s.run()
output = self.s.read_output(self.s.get_option("outDir"))
expected = {
'terminateReason': "found primal-dual optimal solution",
'primalObjective': RealMPFR('1.84026576313204924668804017173055420056358532030282556465761906133430166726537336826049865612094019021116018862947817214304719196101000427864203352107112262936760692514062283196788975004021011672107', prec),
'dualObjective': RealMPFR('1.84026576313204924668804017172924388084784907020307957926406455972756967820389551729116356865203683721324847695046740812192888147479629469781056654543846872510659962749879756855722780845863763393790', prec),
'dualityGap': RealMPFR('3.56013718775636270149999059635335050723442743109168831293885607041894974620853522385695694898029533051814224367926288833813664613892167772499464803601004077512494651654320924529335252052851986330126e-31', prec),
'primalError': RealMPFR('3.02599720266600806524000028915989450062982004925818215153860399689135685232954868045195617059093162777053090078176088450807518064138449768061426525659372084875033647651339829218635516934714536708774e-213', prec),
'dualError': RealMPFR('4.46281245187788768570163247269790454899928515696183592865699887013584285017886599785053051433102982386812845275899249791404197108679379367457100621059380942879597102272199695686175968991580692060326e-209', prec),
'Solver runtime': float(0),
'y': [RealMPFR('-1.84026576313204924668804017172924388084784907020307957926406455972756967820389551729116356865203683721324847695046740812192888147479629469781056654543846872510659962749879756855722780845863763393790', prec)]
}
expected = {k: str(v) for k, v in expected.items()}
output = {k: str(v) for k, v in output.items()}
self.assertEqual(output, expected)
if __name__ == '__main__':
unittest.main()
| 2.328125
| 2
|
src/Services/errorHandlers.py
|
CarolineYao/SentimentAnalysisProject
| 2
|
12779745
|
<filename>src/Services/errorHandlers.py
from flask import json, request, jsonify, make_response
import werkzeug
exception_to_error_code = {
"BadRequest": 400
}
def _get_exception_error_code(e):
exception_type = type(e).__name__
return exception_to_error_code[exception_type]
def _compute_error_json(e):
return {
"code": _get_exception_error_code(e),
"message": e.description
}
def handle_bad_request(e):
return make_response(_compute_error_json(e), _get_exception_error_code(e))
| 2.75
| 3
|
tracker/forms.py
|
amin-da71/Benbb96
| 0
|
12779746
|
from django import forms
from django.core.exceptions import ValidationError
from django_select2.forms import Select2MultipleWidget
from tracker.models import Track, Tracker
class TrackerForm(forms.ModelForm):
class Meta:
model = Tracker
fields = ('nom', 'icone', 'color')
widgets = {
'nom': forms.TextInput(attrs={'class': 'form-control'})
}
class TrackForm(forms.ModelForm):
datetime = forms.DateTimeField(
required=True,
input_formats=['%Y-%m-%dT%H:%M']
)
class Meta:
model = Track
fields = ('commentaire', 'datetime')
widgets = {
'tracker': forms.HiddenInput(),
'commentaire': forms.TextInput(attrs={'placeholder': 'Commentaire facultatif', 'class': 'form-control'})
}
labels = {'commentaire': 'Ajouter un nouveau track'}
class SelectTrackersForm(forms.Form):
trackers = forms.ModelMultipleChoiceField(
label='Sélectionner des trackers à comparer',
queryset=Tracker.objects.all(),
widget=Select2MultipleWidget(attrs={'class': 'form-control'})
)
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['trackers'].queryset = user.profil.trackers.all()
def clean_trackers(self):
trackers = self.cleaned_data.get('trackers')
if len(trackers) < 2:
raise ValidationError('Veuillez sélectionner au minimum 2 trackers.')
return trackers
| 2.375
| 2
|
app.py
|
annewieggers/sqlalchemy-challenge
| 0
|
12779747
|
import datetime as dt
from datetime import datetime
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from sqlalchemy import inspect
from dateutil.relativedelta import relativedelta
from flask import Flask, jsonify
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(bind=engine)
# Flask Setup
app = Flask(__name__)
# Flask Routes
# @app.route("/") - List all routes that are available.
@app.route("/")
def home_page():
"""List all routes."""
return (
f"All Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start<br/>"
f"/api/v1.0/start-end<br/>"
)
# /api/v1.0/precipitation
# Convert the query results to a dictionary using date as the key and prcp as the value.
# Return the JSON representation of your dictionary.
@app.route("/api/v1.0/precipitation/")
def precipitation():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all precipitation amounts"""
# Retrieve precipitation data
precip = (session.query(Measurement.date, Measurement.tobs).order_by(Measurement.date))
session.close()
# Convert the query results to a dictionary using date as the key and prcp as the value.
prcp_list = []
for result in precip:
prcp_dict = {}
prcp_dict["date"] = result[0]
prcp_dict["prcp"] = result[1]
prcp_list.append(prcp_dict)
return jsonify(prcp_list)
# /api/v1.0/stations Return a JSON list of stations from the dataset.
@app.route("/api/v1.0/stations/")
def stations():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all active stations"""
# Query all active stations
results_stations = session.query(Station).all()
#session.close()
list_stations = []
for station in results_stations:
station_dict = {}
station_dict["id"] = station.id
station_dict["station"] = station.station
station_dict["name"] = station.name
station_dict["latitude"] = station.latitude
station_dict["longitude"] = station.longitude
station_dict["elevation"] = station.elevation
list_stations.append(station_dict)
return jsonify(list_stations)
# /api/v1.0/tobs
# Query the dates and temperature observations of the most active station for the last year of data.
# Return a JSON list of temperature observations (TOBS) for the previous year.
@app.route("/api/v1.0/tobs")
def tobs():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all tobs"""
# Determine the last date and year ago
latest_date = (session.query(Measurement.date).order_by(Measurement.date.desc()).first())
latest = latest_date[0]
# Calculate the date 1 year ago from the last data point in the database
latest = dt.datetime.strptime(latest, '%Y-%m-%d')
latest = latest.date()
year_ago = latest - relativedelta(days=365)
# Determine active stations and order by most active
active_stations = session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()
# Identify most active station
most_active = active_stations[0][0]
# Query the dates and temperature observations of the most active station for the last year of data.
temp_data = session.query(Measurement.date, Measurement.tobs). filter(Measurement.date >= year_ago).filter(Measurement.station==most_active).all()
session.close()
# Return a list of all tobs
all_tobs = []
for tob in temp_data:
tobs_dict = {}
tobs_dict["date"] = tob.date
tobs_dict["tobs"] = tob.tobs
all_tobs.append(tobs_dict)
return jsonify(all_tobs)
# /api/v1.0/<start>
# Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
# When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date.
@app.route("/api/v1.0/start")
def start():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Start date"""
# Design a query to retrieve the last 12 months of precipitation data and plot the results
latest_date = (session.query(Measurement.date).order_by(Measurement.date.desc()).first())
latest = latest_date[0]
# Calculate the date 1 year ago from the last data point in the database
latest = dt.datetime.strptime(latest, '%Y-%m-%d')
latest = latest.date()
year_ago = latest - relativedelta(days=365)
active_stations = session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()
most_active = active_stations[0][0]
results = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).filter(Measurement.station==most_active).filter(Measurement.date >= year_ago).all()
session.close()
return jsonify(results)
# /api/v1.0/<api/v1.0/start-end
# Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
# When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive.
@app.route("/api/v1.0/start-end")
def start_end():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Start - End date"""
# Design a query to retrieve the last 12 months of precipitation data and plot the results
latest_date = (session.query(Measurement.date).order_by(Measurement.date.desc()).first())
latest = latest_date[0]
# Calculate the date 1 year ago from the last data point in the database
latest = dt.datetime.strptime(latest, '%Y-%m-%d')
latest = latest.date()
year_ago = latest - relativedelta(days=365)
active_stations = session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()
most_active = active_stations[0][0]
results = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).filter(Measurement.station==most_active).filter(Measurement.date >= year_ago).filter(Measurement.date <= latest).all()
session.close()
return jsonify(results)
if __name__ == '__main__':
app.run(debug=True)
| 3.03125
| 3
|
lib/improver/tests/argparser/test_ArgParser.py
|
TomekTrzeciak/improver
| 0
|
12779748
|
<filename>lib/improver/tests/argparser/test_ArgParser.py
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for argparser.ArgParser."""
import os
import unittest
from unittest.mock import patch
from improver.argparser import ArgParser
# We might one day want to move this up to a more central place.
class QuietTestCase(unittest.TestCase):
"""A subclass of unittest.TestCase which prevents writing to stderr and
calling of sys.exit."""
@classmethod
def setUpClass(cls):
"""Patch the class by redirecting stderr to /dev/null, and disabling
calls to sys.exit. Currently used to prevent
ArgumentParser.parse_args() from writing its output to the screen and
exiting early when using unittest discover."""
cls.file_handle = open(os.devnull, 'w')
cls.stderr_patch = patch('sys.stderr', cls.file_handle)
cls.exit_patch = patch('sys.exit')
cls.stderr_patch.start()
cls.exit_patch.start()
@classmethod
def tearDownClass(cls):
"""Stop the patches which redirect stderr to /dev/null and prevents
sys.exit from being called."""
cls.file_handle.close()
cls.stderr_patch.stop()
cls.exit_patch.stop()
class Test_init(QuietTestCase):
"""Test the __init__ method."""
def test_create_argparser_with_no_arguments(self):
"""Test that creating an ArgParser with no arguments has no
arguments."""
compulsory_arguments = {}
# it doesn't matter what the centralized arguments are, because we
# select None of them - we only need to patch the COMPULSORY_ARGUMENTS
# to ensure there are none of them
with patch('improver.argparser.ArgParser.COMPULSORY_ARGUMENTS',
compulsory_arguments):
parser = ArgParser(central_arguments=None, specific_arguments=None)
args = parser.parse_args()
args = vars(args).keys()
self.assertEqual(len(args), 0)
def test_create_argparser_only_compulsory_arguments(self):
"""Test that creating an ArgParser with only compulsory arguments
adds only the compulsory arguments."""
compulsory_arguments = {'foo': (['--foo'], {})}
# it doesn't matter what the centralized arguments are, because we
# select None of them - only patch COMPULSORY_ARGUMENTS so we know
# what to expect
with patch('improver.argparser.ArgParser.COMPULSORY_ARGUMENTS',
compulsory_arguments):
parser = ArgParser(central_arguments=None, specific_arguments=None)
args = parser.parse_args()
args = vars(args).keys()
self.assertCountEqual(args, ['foo'])
def test_create_argparser_fails_with_unknown_centralized_argument(self):
"""Test that we raise an exception when attempting to retrieve
centralized arguments which are not centralized argument dictionary."""
centralized_arguments = {'foo': (['--foo'], {})}
central_args_to_fetch = ('missing_central_arg',)
# patch the CENTRALIZED_ARGUMENTS so we know that `missing_central_arg`
# is not there, and we can raise an exception
with patch('improver.argparser.ArgParser.CENTRALIZED_ARGUMENTS',
centralized_arguments):
with self.assertRaises(KeyError):
ArgParser(central_arguments=central_args_to_fetch,
specific_arguments=None)
def test_create_argparser_only_centralized_arguments(self):
"""Test that creating an ArgParser with only centralized arguments
adds only the selected centralized arguments."""
compulsory_arguments = {}
centralized_arguments = {'foo': (['--foo'], {})}
# patch the COMPULSORY_ARGUMENTS to an empty dict (so there are none)
# and patch CENTRALIZED_ARGUMENTS so we know that `foo` can be selected
# from it
with patch('improver.argparser.ArgParser.COMPULSORY_ARGUMENTS',
compulsory_arguments):
with patch('improver.argparser.ArgParser.CENTRALIZED_ARGUMENTS',
centralized_arguments):
parser = ArgParser(central_arguments=['foo'],
specific_arguments=None)
args = parser.parse_args()
args = vars(args).keys()
self.assertCountEqual(args, ['foo'])
def test_create_argparser_only_specific_arguments(self):
"""Test that creating an ArgParser with only specific arguments
adds only the specific arguments."""
compulsory_arguments = {}
specific_arguments = [(['--foo'], {})]
# it doesn't matter what the centralized arguments are, because we
# select None of them - patch the COMPULSORY_ARGUMENTS to be an empty
# dict so that we don't add any of them
with patch('improver.argparser.ArgParser.COMPULSORY_ARGUMENTS',
compulsory_arguments):
parser = ArgParser(central_arguments=None,
specific_arguments=specific_arguments)
args = parser.parse_args()
args = vars(args).keys()
self.assertCountEqual(args, ['foo'])
def test_create_argparser_compulsory_and_centralized_arguments(self):
"""Test that creating an ArgParser with compulsory and centralized
arguments adds both of these and no others."""
compulsory_arguments = {'foo': (['--foo'], {})}
centralized_arguments = {'bar': (['--bar'], {})}
# patch the COMPULSORY_ARGUMENTS so we know that `foo` exists
# and the CENTRALIZED_ARGUMENTS so we know that `bar` exists.
with patch('improver.argparser.ArgParser.COMPULSORY_ARGUMENTS',
compulsory_arguments):
with patch('improver.argparser.ArgParser.CENTRALIZED_ARGUMENTS',
centralized_arguments):
parser = ArgParser(central_arguments=['bar'],
specific_arguments=None)
args = parser.parse_args()
args = vars(args).keys()
self.assertCountEqual(args, ['foo', 'bar'])
def test_create_argparser_compulsory_and_specfic_arguments(self):
"""Test that creating an ArgParser with compulsory and specific
arguments adds both of these and no others."""
compulsory_arguments = {'foo': (['--foo'], {})}
specific_arguments = [(['--bar'], {})]
# it doesn't matter what the centralized arguments are, because we
# select None of them - patch only the COMPULSORY_ARGUMENTS so we know
# that `foo` is added from here
with patch('improver.argparser.ArgParser.COMPULSORY_ARGUMENTS',
compulsory_arguments):
parser = ArgParser(central_arguments=None,
specific_arguments=specific_arguments)
args = parser.parse_args()
args = vars(args).keys()
self.assertCountEqual(args, ['foo', 'bar'])
def test_create_argparser_all_arguments(self):
"""Test that creating an ArgParser with compulsory, centralized and
specific arguments adds the arguments from all 3 collections."""
compulsory_arguments = {'foo': (['--foo'], {})}
centralized_arguments = {'bar': (['--bar'], {})}
specific_arguments = [(['--baz'], {})]
# patch both the COMPULSORY_ARGUMENTS and CENTRALIZED_ARGUMENTS, so
# that `foo` and `bar` are added from these (respectively)
with patch('improver.argparser.ArgParser.COMPULSORY_ARGUMENTS',
compulsory_arguments):
with patch('improver.argparser.ArgParser.CENTRALIZED_ARGUMENTS',
centralized_arguments):
parser = ArgParser(central_arguments=['bar'],
specific_arguments=specific_arguments)
args = parser.parse_args()
args = vars(args).keys()
self.assertCountEqual(args, ['foo', 'bar', 'baz'])
def test_argparser_compulsory_args_has_profile(self):
"""Test that creating an ArgParser instance with the compulsory
arguments adds the profiling options."""
expected_profile_options = ['profile', 'profile_file']
parser = ArgParser(central_arguments=None, specific_arguments=None)
args = parser.parse_args()
args = vars(args).keys()
self.assertCountEqual(args, expected_profile_options)
class Test_add_arguments(QuietTestCase):
"""Test the add_arguments method."""
def test_adding_multiple_arguments(self):
"""Test that we can successfully add multiple arguments to the
ArgParser."""
# we will not actually pass anything in, so the Namespace will receive
# the defaults (if any) - only check the keys of the Namespace derived
# dictionary
args_to_add = [(['--foo'], {}),
(['--bar', '--b'], {})]
expected_namespace_keys = ['foo', 'bar'] # + compulsory...
# explicitly pass nothing in - will only have compulsory arguments
# and the ones we added...
parser = ArgParser(central_arguments=None,
specific_arguments=None)
parser.add_arguments(args_to_add)
result_args = parser.parse_args()
result_args = vars(result_args).keys()
# we could also add compulsory arguments to expected_namespace_keys
# and then assertCountEqual - (order unimportant), but this
# is unnecessary - just use loop:
# (or we could patch compulsory arguments to be an empty dictionary)
for expected_arg in expected_namespace_keys:
self.assertIn(expected_arg, result_args)
def test_adding_argument_with_defined_kwargs_dict(self):
"""Test that we can successfully add an argument to the ArgParser,
when the argspec contained kwargs."""
# length of argspec is 2...
args_to_add = [(['--foo'], {'default': 1})]
expected_arg = 'foo'
parser = ArgParser(central_arguments=None,
specific_arguments=None)
parser.add_arguments(args_to_add)
result_args = parser.parse_args()
result_args = vars(result_args).keys()
self.assertIn(expected_arg, result_args)
def test_adding_argument_with_defined_kwargs_dict_has_defualt(self):
"""Test that we can successfully add an argument to the ArgParser,
when the argspec contained kwargs, and that the default value is
captured."""
args_to_add = [(['--one'], {'default': 1})]
parser = ArgParser(central_arguments=None,
specific_arguments=None)
parser.add_arguments(args_to_add)
result_args = parser.parse_args()
# `--one` was not passed in, so we pick up the default - let's check
# they agree...
self.assertEqual(1, result_args.one)
def test_adding_single_argument_with_unexpected_length_argspec(self):
"""Test that attempting to add an argument to the ArgParser when
the wrong format argspec raises an exception."""
# length of argspec is 3 - this is unexpected
args_to_add = [(['--foo'], 'bar', {})]
parser = ArgParser(central_arguments=None,
specific_arguments=None)
with self.assertRaises(AttributeError):
parser.add_arguments(args_to_add)
def test_adding_empty_argument_list_does_nothing(self):
"""Test that attempting to add an empty list of argspecs to the
ArgParser does not add any new arguments."""
args_to_add = []
# add a specific (optional) argument - ensures that even if there are
# no compulsory arguments, we have something...
# adding arguments after calling parse_args/args will do nothing, so
# instead create 2 instances:
parser1 = ArgParser(central_arguments=None,
specific_arguments=[[['--optional'], {}]])
parser2 = ArgParser(central_arguments=None,
specific_arguments=[[['--optional'], {}]])
parser2.add_arguments(args_to_add)
self.assertEqual(parser1.parse_args(), parser2.parse_args())
class Test_parse_args(QuietTestCase):
"""Test the parse_args method."""
def test_profile_is_called_when_enabled(self):
"""Test that calling parse_args enables profiling when the --profile
option is added."""
# temporarily patch compulsory args so that profiling is enabled by
# default
compulsory_arguments = {'profile': (
['--profile'],
{'default': True}),
'profile_file': (
['--profile-file'],
{'default': None})}
with patch('improver.argparser.ArgParser.COMPULSORY_ARGUMENTS',
compulsory_arguments):
with patch('improver.argparser.profile_hook_enable') as \
mock_profile:
parser = ArgParser(central_arguments=None,
specific_arguments=None)
parser.parse_args()
self.assertEqual(mock_profile.call_count, 1)
def test_profile_is_not_called_when_disbaled(self):
"""Test that calling parse_args does not enable profiling when the
--profile option is not added."""
# temporarily patch compulsory args so that profiling is disabled by
# default
compulsory_arguments = {'profile': (
['--profile'],
{'default': False}),
'profile_file': (
['--profile-file'],
{'default': None})}
with patch('improver.argparser.ArgParser.COMPULSORY_ARGUMENTS',
compulsory_arguments):
with patch('improver.argparser.profile_hook_enable') as \
mock_profile:
parser = ArgParser(central_arguments=None,
specific_arguments=None)
parser.parse_args()
self.assertEqual(mock_profile.call_count, 0)
# inherit from only TestCase - we want to explicitly catch the SystemExit
class Test_wrong_args_error(unittest.TestCase):
"""Test the wrong_args_error method."""
def test_error_raised(self, args='foo', method='bar'):
"""Test that an exception is raised containing the args and method."""
msg = ("Method: {} does not accept arguments: {}".format(
method, args))
# argparser will write to stderr independently of SystemExit
with open(os.devnull, 'w') as file_handle:
with patch('sys.stderr', file_handle):
with self.assertRaises(SystemExit, msg=msg):
ArgParser().wrong_args_error(args, method)
if __name__ == '__main__':
unittest.main()
| 1.710938
| 2
|
RL_based_ATSC/single-intersection/SALT/rl/agents/__init__.py
|
sue04206/traffic-signal-optimization
| 6
|
12779749
|
from __future__ import absolute_import
from .dqn import Learner, get_state_1d, get_state_2d
| 1.109375
| 1
|
hub_app/views/auth.py
|
passiopeia/passiopeia-hub
| 0
|
12779750
|
<filename>hub_app/views/auth.py
"""
Views for authentication
"""
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import logout, authenticate, login
from django.http import HttpRequest, HttpResponse
from django.shortcuts import redirect, render
from django.utils.translation import gettext_lazy as _
from django.urls import reverse_lazy
from django.views import View
from hub_app.forms.auth import UsernamePasswordOtpForm
from hub_app.navlib.next_url import get_next
class LogoutView(View):
"""
Logout View
"""
http_method_names = ['get']
@staticmethod
def _logout(request):
logout(request)
messages.add_message(request, messages.SUCCESS, _('Logout successful.'))
return redirect(settings.LOGOUT_REDIRECT_URL)
def get(self, request: HttpRequest) -> HttpResponse:
"""
Just handle the GET request
"""
return LogoutView._logout(request)
class LoginView(View):
"""
Login View
"""
http_method_names = ['get', 'post']
template_name = 'hub_app/auth/login.html'
content_type = 'text/html'
def dispatch(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:
"""
If the user is logged on, it must logout first. This is enforced here.
"""
if request.user.is_authenticated:
logout(request)
return redirect(reverse_lazy('ha:auth:login'), permanent=False)
return super(LoginView, self).dispatch(request, *args, **kwargs)
def _show_form(self, request: HttpRequest, form: UsernamePasswordOtpForm = UsernamePasswordOtpForm()):
"""
Send the form to the client
"""
return render(request, self.template_name, {
'form': form,
'next_url': get_next(request),
}, content_type=self.content_type)
def get(self, request: HttpRequest) -> HttpResponse:
"""
On GET-Request, only send the form
"""
return self._show_form(request)
def post(self, request: HttpRequest) -> HttpResponse:
"""
Handle the Login
"""
form = UsernamePasswordOtpForm(request.POST)
if not form.is_valid():
return self._show_form(request, form)
data = form.clean()
user = authenticate(
request,
username=data['username'],
password=data['password'],
one_time_pw=data['otp']
)
if user is None:
form.add_error(None, _('Username or password wrong, or one time password invalid.'))
return self._show_form(request, form)
login(request, user)
messages.add_message(request, messages.SUCCESS, _('Hey %(user)s, welcome to Passiopeia Hub!') % {
'user': user.first_name
})
next_url = get_next(request)
if next_url is None:
next_url = settings.LOGIN_REDIRECT_URL
return redirect(next_url, permanent=False)
| 2.375
| 2
|