hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cbb1f2e05a98843b34d5e931f7f14a6abe1ca529 | 3,634 | py | Python | bin/vigilance-server.py | EricMountain/weather_risk_metrics | f5962da2547efd7d6bfef6c73d4d5a5b5edbcae6 | [
"MIT"
] | null | null | null | bin/vigilance-server.py | EricMountain/weather_risk_metrics | f5962da2547efd7d6bfef6c73d4d5a5b5edbcae6 | [
"MIT"
] | null | null | null | bin/vigilance-server.py | EricMountain/weather_risk_metrics | f5962da2547efd7d6bfef6c73d4d5a5b5edbcae6 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from prometheus_client import start_http_server, Gauge
import urllib.request
import random
from datetime import datetime
import re
import time
test = False
risks = ["vent violent", "pluie-inondation", "orages", "inondation", "neige-verglas", "canicule", "grand-froid", "avalanches", "vagues-submersion"]
# Maps a (dept, risk, startZ, endZ) tuple to the round in which it was last set
cache = {}
# Create metrics to track time spent and requests made.
gauge_full = Gauge('meteorological_risk_full', 'Weather risk', ['dept', 'risk', 'startZ', 'endZ'])
gauge = Gauge('meteorological_risk', 'Weather risk', ['dept', 'risk'])
def getTimeHash():
d = datetime.now()
return d.year*365*24*60+d.month*30*24*60+d.day*24*60+d.hour*60+d.minute
def getStream():
url = "http://www.vigimeteo.com/data/NXFR49_LFPW_.xml?{}".format(getTimeHash())
stream = None
if test:
stream = open('test/jaune-vent-violent+littoral-vagues.xml')
else:
try:
stream = urllib.request.urlopen(url)
except urllib.error.URLError as e:
print(f'Error fetching URL: {e}')
pass
return stream
def getVigilanceData():
regex = r'<PHENOMENE departement="(?P<dept>\w+)" phenomene="(?P<risk>\d+)" couleur="(?P<level>\d)" dateDebutEvtTU="(?P<start>\d{14})" dateFinEvtTU="(?P<end>\d{14})"/>'
pattern = re.compile(regex)
results = []
stream = getStream()
if stream is None: return results
for line in stream:
try:
line = line.decode('utf-8')
except AttributeError:
pass
matches = pattern.match(line)
if matches:
data = matches.groupdict()
results.append(data)
return results
def latestVigilanceMetrics(gauge=Gauge, cacheRound=int):
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
deptRiskLevelMap = dict()
for result in getVigilanceData():
if result['end'] > now:
level = int(result['level'])
else:
level = 0
risk = risks[int(result['risk'])-1]
key = (result['dept'], risk, result['start'], result['end'])
cache[key] = cacheRound
dept = result['dept']
gauge_full.labels(dept=dept, risk=risk, startZ=result['start'], endZ=result['end']).set(level)
if (dept, risk) not in deptRiskLevelMap:
deptRiskLevelMap[(dept, risk)] = level
gauge.labels(dept=dept, risk=risk).set(level)
elif level > deptRiskLevelMap[(dept, risk)]:
deptRiskLevelMap[(dept, risk)] = level
gauge.labels(dept=dept, risk=risk).set(level)
print(f'{key!r} --> {level}, added to cache with round {cacheRound}')
def checkDeadCacheEntries(gauge=Gauge, cacheRound=int):
'''
Checks if a particular combination has been dropped from the output
produced by vigimeteo. We need to zero these entries else they will stay stuck
at whatever their last value was.
'''
for key, value in list(cache.items()):
if value != cacheRound:
print(f'{key!r} --> {0}, deleting cache entry')
gauge.labels(dept=key[0], risk=key[1], startZ=key[2], endZ=key[3]).set(0)
del cache[key]
if __name__ == '__main__':
# Start up the server to expose the metrics.
start_http_server(9696)
cacheRound = 0
while True:
cacheRound = 1 - cacheRound
print(f'Starting new round… (index {cacheRound})')
latestVigilanceMetrics(gauge, cacheRound)
checkDeadCacheEntries(gauge, cacheRound)
print('Round completed.')
time.sleep(3600)
| 34.609524 | 171 | 0.625206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,165 | 0.320407 |
cbb2c9ec40013427f4fe502f0cc420d2699e4241 | 2,058 | py | Python | examples/tutorial/paging_exception_force.py | Wlgen/force-riscv | 9f09b86c5a21ca00f8e5ade8e5186d65bc3e26f8 | [
"Apache-2.0"
] | null | null | null | examples/tutorial/paging_exception_force.py | Wlgen/force-riscv | 9f09b86c5a21ca00f8e5ade8e5186d65bc3e26f8 | [
"Apache-2.0"
] | null | null | null | examples/tutorial/paging_exception_force.py | Wlgen/force-riscv | 9f09b86c5a21ca00f8e5ade8e5186d65bc3e26f8 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from PageFaultSequence import PageFaultSequence
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from riscv.ModifierUtils import PageFaultModifier
# This test verifies recovery from a page fault on a branch operation.
class MainSequence(PageFaultSequence):
def __init__(self, gen_thread, name=None):
super().__init__(gen_thread, name)
self._mInstrList = (
"JAL##RISCV",
"JALR##RISCV",
"LD##RISCV",
"SD##RISCV",
"LW##RISCV",
"SW##RISCV",
"LH##RISCV",
"SH##RISCV",
"LB##RISCV",
"SB##RISCV",
)
self._mExceptionCodes = [
12,
13,
15,
] # exception code for instr, load, storeamo page fault
self._mExceptionSubCodes = {} # N/A
# Create an instance of the appropriate page fault modifier.
def createPageFaultModifier(self):
return PageFaultModifier(self.genThread, self.getGlobalState("AppRegisterWidth"))
# Return the tuple of instructions to choose from to trigger a page fault.
def getInstructionList(self):
return self._mInstrList
# Return exception codes that can be generated from faulting instructions.
def getExceptionCodes(self):
return self._mExceptionCodes
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
| 33.737705 | 89 | 0.677357 | 1,103 | 0.535957 | 0 | 0 | 0 | 0 | 0 | 0 | 1,064 | 0.517007 |
cbb2f8c56eb07a52d75952fa30eba4aacd7d35ed | 2,104 | py | Python | test.py | letri6tdn/PythonScript_URLshortener | 2d0c80b411b3bdf51ae5446b9bc1080ad54c4215 | [
"MIT"
] | null | null | null | test.py | letri6tdn/PythonScript_URLshortener | 2d0c80b411b3bdf51ae5446b9bc1080ad54c4215 | [
"MIT"
] | null | null | null | test.py | letri6tdn/PythonScript_URLshortener | 2d0c80b411b3bdf51ae5446b9bc1080ad54c4215 | [
"MIT"
] | null | null | null | # main.py
#----------------------------------------------------------------------#
#
#
#
#----------------------------------------------------------------------#
from math import floor
from sqlite3 import OperationalError
import string, sqlite3
from urllib.parse import urlparse
import http.server
import socketserver
from flask import Flask, request, render_template, redirect
#Assuming urls.db is in your app root folder
def table_check():
create_table = """
CREATE TABLE WEB_URL(
ID INT PRIMARY KEY AUTOINCREMENT,
URL TEXT NOT NULL
);
"""
with sqlite3.connect('urls.db') as conn:
cursor = conn.cursor()
try:
cursor.execute(create_table)
except OperationalError:
pass
# Base62 Encoder and Decoder
def toBase62(num, b = 62):
if b <= 0 or b > 62:
return 0
base = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
r = num % b
res = base[r];
q = floor(num / b)
while q:
r = q % b
q = floor(q / b)
res = base[int(r)] + res
return res
def toBase10(num, b = 62):
base = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
limit = len(num)
res = 0
for i in range(limit):
res = b * res + base.find(num[i])
return res
app = Flask(__name__)
# REDIRECTING
@app.route('/<short_url>')
def redirect_short_url(short_url):
decoded_string = toBase10(short_url)
redirect_url = 'https://marvelapp.com/asdf'
with sqlite3.connect('urls.db') as conn:
cursor = conn.cursor()
select_row = """
SELECT URL FROM WEB_URL
WHERE ID=%s
"""%(decoded_string)
result_cursor = cursor.execute(select_row)
try:
redirect_url = result_cursor.fetchone()[0]
except Exception as e:
print(e)
return redirect(redirect_url)
if __name__ == '__main__':
# This code checks whether database table is created or not
table_check()
app.run(port = 8000, debug=True)
| 25.349398 | 75 | 0.568916 | 0 | 0 | 0 | 0 | 565 | 0.268536 | 0 | 0 | 727 | 0.345532 |
cbb30457af6425432e38e401ec2a806f28ae68ab | 6,968 | py | Python | SimpleHTTPSAuthServer.py | oza6ut0ne/SimpleHTTPSAuthServer | 084e5c0cb686e8c41ba1f911a6bc8939f2e24a59 | [
"MIT"
] | null | null | null | SimpleHTTPSAuthServer.py | oza6ut0ne/SimpleHTTPSAuthServer | 084e5c0cb686e8c41ba1f911a6bc8939f2e24a59 | [
"MIT"
] | null | null | null | SimpleHTTPSAuthServer.py | oza6ut0ne/SimpleHTTPSAuthServer | 084e5c0cb686e8c41ba1f911a6bc8939f2e24a59 | [
"MIT"
] | null | null | null | import base64
import os
import random
import re
import socket
import sys
import ssl
import string
if sys.version_info[0] == 2:
from BaseHTTPServer import HTTPServer as Server
from SimpleHTTPServer import SimpleHTTPRequestHandler as Handler
from SocketServer import ThreadingMixIn
from httplib import UNAUTHORIZED
from itertools import izip_longest as zip_longest
elif sys.version_info[0] == 3:
from http.server import HTTPServer as Server
from http.server import SimpleHTTPRequestHandler as Handler
from socketserver import ThreadingMixIn
from http.client import UNAUTHORIZED
from itertools import zip_longest
ENV_USERS = 'SIMPLE_HTTPS_USERS'
ENV_PASSWORDS = 'SIMPLE_HTTPS_PASSWORDS'
ENV_KEYS = 'SIMPLE_HTTPS_KEYS'
class AuthHandler(Handler):
def send_auth_request(self):
self.send_response(UNAUTHORIZED)
self.send_header('WWW-Authenticate', 'Basic realm=\"Authorization Required\"')
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
if not self.server.keys:
if __name__ == '__main__':
Handler.do_GET(self)
return True
auth_header = self.headers.get('Authorization')
if auth_header is None:
self.send_auth_request()
self.wfile.write('<h1>Authorization Required</h1>'.encode())
print('no auth header received')
return False
elif auth_header[len('Basic '):] in self.server.keys:
if __name__ == '__main__':
Handler.do_GET(self)
return True
else:
self.send_auth_request()
self.wfile.write('<h1>Authorization Required</h1>'.encode())
auth = re.sub('^Basic ', '', auth_header)
print('Authentication failed! %s' % base64.b64decode(auth).decode())
return False
def super_get(self):
Handler.do_GET(self)
class HTTPSAuthServer(Server):
def __init__(self, server_address, RequestHandlerClass=AuthHandler,
bind_and_activate=True):
Server.__init__(
self, server_address, RequestHandlerClass, bind_and_activate)
self.keys = []
self.servercert = None
self.cacert = None
self.protocol = 'HTTP'
self.certreqs = ssl.CERT_NONE
def set_auth(self, users=None, passwords=None, keys=None):
if not (users or passwords or keys):
self.keys = []
return
if keys is not None:
self.keys += keys
if users is not None or passwords is not None:
accounts = zip_longest(
users or [''], passwords or [''], fillvalue=''
)
for user, password in accounts:
self.keys.append(
base64.b64encode((user + ':' + password).encode()).decode()
)
def set_certs(self, servercert=None, cacert=None):
self.servercert = servercert
self.cacert = cacert
if servercert is not None:
self.protocol = 'HTTPS'
if cacert is not None:
self.certreqs = ssl.CERT_REQUIRED
self.socket = ssl.wrap_socket(self.socket, certfile=servercert,
server_side=True,
cert_reqs=self.certreqs,
ca_certs=self.cacert)
def server_bind(self):
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except:
pass
return Server.server_bind(self)
def serve_forever(self, poll_interval=0.5):
if self.servercert is None:
print('No server certificate is specified. HTTPS is disabled.')
elif self.cacert is not None:
print('CA certificate is specified. '
'Cilent certificate authentication is enabled.')
sockname = self.socket.getsockname()
print('Serving {} on {} port {} ...'.format(
self.protocol, sockname[0], sockname[1])
)
try:
Server.serve_forever(self, poll_interval)
except KeyboardInterrupt:
pass
class ThreadedHTTPSAuthServer(ThreadingMixIn, HTTPSAuthServer):
daemon_threads = True
def serve_https(bind=None, port=8000, users=None, passwords=None, keys=None,
servercert=None, cacert=None, threaded=False,
HandlerClass=AuthHandler):
if threaded:
ServerClass = ThreadedHTTPSAuthServer
else:
ServerClass = HTTPSAuthServer
addrinfo = socket.getaddrinfo(
bind, port, 0, socket.SOCK_STREAM, 0, socket.AI_PASSIVE)[0]
ServerClass.address_family = addrinfo[0]
addr = addrinfo[4]
server = ServerClass(addr, HandlerClass)
server.set_auth(users, passwords, keys)
server.set_certs(servercert, cacert)
server.serve_forever()
def random_string(length):
return ''.join([random.choice(
string.ascii_letters +
string.digits +
string.punctuation) for i in range(length)])
def split_or_none(val):
if val is None:
return None
return val.split(' ')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='An HTTPS server with Basic authentication '
'and client certificate authentication',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('port', nargs='?', type=int, default=8000)
parser.add_argument('-b', '--bind', metavar='ADDRESS')
parser.add_argument('-t', '--threaded', action='store_true')
parser.add_argument('-u', '--users', nargs='*',
default=split_or_none(os.getenv(ENV_USERS)))
parser.add_argument('-p', '--passwords', nargs='*',
default=split_or_none(os.getenv(ENV_PASSWORDS)))
parser.add_argument('-k', '--keys', nargs='*',
default=split_or_none(os.getenv(ENV_KEYS)))
parser.add_argument('-r', '--random', type=int)
parser.add_argument('-s', '--servercert')
parser.add_argument('-c', '--cacert')
parser.add_argument('-d', '--docroot')
args = parser.parse_args()
if args.servercert is not None:
args.servercert = os.path.abspath(args.servercert)
if args.cacert is not None:
args.cacert = os.path.abspath(args.cacert)
if args.docroot is not None:
print('Set docroot to %s' % args.docroot)
os.chdir(args.docroot)
if args.random is not None:
args.users = [random_string(args.random)]
args.passwords = [random_string(args.random)]
print('Generated username and password -> {} : {}'.format(
args.users[0], args.passwords[0])
)
serve_https(args.bind, args.port, args.users, args.passwords,
args.keys, args.servercert, args.cacert, args.threaded)
| 33.5 | 86 | 0.617681 | 3,576 | 0.513203 | 0 | 0 | 0 | 0 | 0 | 0 | 835 | 0.119834 |
cbb368bf34a52b4d1ae95c69d88a1beadca9817c | 903 | py | Python | image/__init__.py | navroz-lamba/Image-Classifier-using-VGG-19-CNN | baceb95297b95be19d7e024eac50d5f754d26fa7 | [
"MIT"
] | 1 | 2020-12-18T18:45:00.000Z | 2020-12-18T18:45:00.000Z | image/__init__.py | navroz-lamba/Image-Classifier-using-VGG-19-CNN | baceb95297b95be19d7e024eac50d5f754d26fa7 | [
"MIT"
] | null | null | null | image/__init__.py | navroz-lamba/Image-Classifier-using-VGG-19-CNN | baceb95297b95be19d7e024eac50d5f754d26fa7 | [
"MIT"
] | null | null | null | import flask
from flask import Flask, url_for
from tensorflow.keras.applications.imagenet_utils import preprocess_input, decode_predictions
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
import numpy as np
# instantiating a class object
app = Flask(__name__)
model_path = 'vgg19.h5'
# load the model
model = load_model(model_path)
# model._make_predict_function()
# preprocessing function
def model_predict(img_path, model):
# load the image and set the size to 224,224
img = image.load_img(img_path, target_size=(224,224))
# change the image to array
x = image.img_to_array(img)
# add dimension so we could pass it as an input to the network
x = np.expand_dims(x, axis=0)
# scale the input
x = preprocess_input(x)
# make predictions
preds = model.predict(x)
return preds
from image import routes | 27.363636 | 93 | 0.746401 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 283 | 0.3134 |
cbb3e7b69b02ac60b8543485427f81f9ba135f8f | 5,419 | py | Python | interface/tasks.py | cni-iisc/campus-rakshak-simulator-app | ef30a1fb57b72d25534945526fdb77d158ad16c1 | [
"Apache-2.0"
] | 1 | 2021-07-29T10:33:26.000Z | 2021-07-29T10:33:26.000Z | interface/tasks.py | cni-iisc/campus-rakshak-simulator-app | ef30a1fb57b72d25534945526fdb77d158ad16c1 | [
"Apache-2.0"
] | null | null | null | interface/tasks.py | cni-iisc/campus-rakshak-simulator-app | ef30a1fb57b72d25534945526fdb77d158ad16c1 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from .helper import convert, run_aggregate_sims
from django.core.files import File
from celery import shared_task
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
from anymail.exceptions import AnymailError
from config.celery import app
from .models import simulationParams, campusInstantiation
from io import StringIO
import json
import pandas as pd
from django.utils import timezone
import sys
import os
import billiard as multiprocessing
## logging
import logging
log = logging.getLogger('celery_log')
## Custom modules taken from submodule
from simulator.staticInst.campus_parse_and_instantiate import campus_parse
from simulator.staticInst.default_betas import default_betas
@app.task()
def run_instantiate(inputFiles):
inputFiles = json.loads(inputFiles)
inputFiles['students'] = pd.DataFrame.from_dict(inputFiles['students'])
df = pd.DataFrame.from_dict(inputFiles['class'])
df = df.astype({'faculty_id': int})
inputFiles['class'] = df
del df
inputFiles['timetable'] = pd.DataFrame.from_dict(inputFiles['timetable'])
inputFiles['staff'] = pd.DataFrame.from_dict(inputFiles['staff'])
inputFiles['mess'] = pd.DataFrame.from_dict(inputFiles['mess'])
inputFiles['common_areas'] = pd.DataFrame.from_dict(inputFiles['common_areas'])
inputFiles['campus_setup'] = pd.DataFrame.from_dict(inputFiles['campus_setup'])
campusSetupDf = pd.DataFrame(inputFiles['campus_setup'])
try:
individuals, interactionSpace, transCoeff2 = campus_parse(inputFiles)
# print("\nPrinting Input Files\n")
# print(inputFiles)
# print(campusSetupDf)
transCoeff = default_betas(campusSetupDf)
indF = StringIO(json.dumps(individuals, default=convert))
intF = StringIO(json.dumps(interactionSpace, default=convert))
campusInstantiation.objects.filter(id=inputFiles['objid'])[0].agent_json.save('individuals.json', File(indF))
campusInstantiation.objects.filter(id=inputFiles['objid'])[0].interaction_spaces_json.save('interaction_spaces.json', File(intF))
campusInstantiation.objects.filter(id=inputFiles['objid']).update(
trans_coeff_file = json.dumps(transCoeff, default=convert),
status = 'Complete',
created_on = timezone.now()
)
log.info(f"Instantiaion job {campusInstantiation.objects.filter(id=inputFiles['objid'])[0].inst_name.campus_name} was completed successfully.")
del individuals, interactionSpace, transCoeff
return True
except Exception as e:
campusInstantiation.objects.filter(id=inputFiles['objid']).update(
status = 'Error',
created_on = timezone.now()
)
log.error(f"Instantiaion job {campusInstantiation.objects.filter(id=inputFiles['objid'])[0].inst_name.campus_name} terminated abruptly with error {e} at {sys.exc_info()}.")
return False
def run_cmd(prgCall):
print(prgCall)
outName = prgCall[1]
if not os.path.exists(outName):
os.mkdir(outName)
os.system(prgCall[0] + outName)
@app.task()
def run_simulation(id, dirName, enable_testing, intv_name):
obj = simulationParams.objects.filter(id=id)
obj.update(status='Running')
obj = obj[0]
log.info(f"Simulation job { obj.simulation_name } is now running.")
cmd = f"./simulator/cpp-simulator/drive_simulator --SEED_FIXED_NUMBER --INIT_FIXED_NUMBER_INFECTED { obj.init_infected_seed } --intervention_filename ./{intv_name}.json --NUM_DAYS { obj.days_to_simulate }"
if(enable_testing):
cmd += f" --ENABLE_TESTING --testing_protocol_filename ./testing_protocol.json"
cmd += f" --input_directory { dirName } --output_directory "
list_of_sims = [(cmd , f"{ dirName }/{obj.simulation_name.replace(' ', '_')}_{ intv_name }_id_{ i }") for i in range(obj.simulation_iterations)]
pool = multiprocessing.Pool(multiprocessing.cpu_count() - 1)
r = pool.map_async(run_cmd, list_of_sims)
r.wait()
try:
log.info(f" Running sims for {obj.simulation_name } are complete")
simulationParams.objects.filter(id=id).update(
output_directory=f"{ dirName }/{obj.simulation_name.replace(' ', '_')}_{ intv_name }",
status='Complete',
completed_at=timezone.now()
)
run_aggregate_sims(id)
log.info(f"Simulation job { obj.simulation_name } is complete and the results are aggregated.")
return True
except Exception as e:
simulationParams.objects.filter(id=id).update(
status = 'Error',
created_on = timezone.now()
)
log.error(f"Simulation job { obj.simulation_name } terminated abruptly with error {e} at {sys.exc_info()}.")
return False
@shared_task(bind=True, max_retries=settings.CELERY_TASK_MAX_RETRIES)
def send_mail(self, recipient, subject, html_message, context, **kwargs):
# Subject and body can't be empty. Empty string or space return index out of range error
message = EmailMultiAlternatives(
subject=subject,
body=html_message,
from_email=settings.DJANGO_DEFAULT_FROM_EMAIL,
to=[recipient]
)
message.attach_alternative(" ", "text/html")
message.merge_data = {
recipient: context,
}
try:
message.send()
except AnymailError as e:
self.retry(e)
| 41.366412 | 209 | 0.704927 | 0 | 0 | 0 | 0 | 4,488 | 0.828197 | 0 | 0 | 1,575 | 0.290644 |
cbb42904bf11927aff25c0adb772e27632914a8b | 6,566 | py | Python | lightweight_mmm/media_transforms_test.py | google/lightweight_mmm | 9bbecbeb691e79019f920cf904d8cb8b918e6638 | [
"Apache-2.0"
] | 45 | 2022-02-10T09:04:16.000Z | 2022-03-29T23:54:16.000Z | lightweight_mmm/media_transforms_test.py | google/lightweight_mmm | 9bbecbeb691e79019f920cf904d8cb8b918e6638 | [
"Apache-2.0"
] | 2 | 2022-02-14T16:02:55.000Z | 2022-02-17T12:43:40.000Z | lightweight_mmm/media_transforms_test.py | google/lightweight_mmm | 9bbecbeb691e79019f920cf904d8cb8b918e6638 | [
"Apache-2.0"
] | 4 | 2022-02-16T12:21:25.000Z | 2022-03-31T16:20:45.000Z | # Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for media_transforms."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import numpy as np
from lightweight_mmm import media_transforms
class MediaTransformsTest(parameterized.TestCase):
@parameterized.named_parameters([
dict(
testcase_name="2d_four_channels",
data=np.ones((100, 4)),
ad_effect_retention_rate=np.array([0.9, 0.8, 0.7, 1]),
peak_effect_delay=np.array([0.9, 0.8, 0.7, 1]),
number_lags=5),
dict(
testcase_name="2d_one_channel",
data=np.ones((300, 1)),
ad_effect_retention_rate=np.array([0.2]),
peak_effect_delay=np.array([1]),
number_lags=10),
dict(
testcase_name="3d_10channels_10geos",
data=np.ones((100, 10, 10)),
ad_effect_retention_rate=np.ones(10),
peak_effect_delay=np.ones(10),
number_lags=13),
dict(
testcase_name="3d_10channels_8geos",
data=np.ones((100, 10, 8)),
ad_effect_retention_rate=np.ones(10),
peak_effect_delay=np.ones(10),
number_lags=13),
])
def test_carryover_produces_correct_shape(self, data,
ad_effect_retention_rate,
peak_effect_delay, number_lags):
generated_output = media_transforms.carryover(data,
ad_effect_retention_rate,
peak_effect_delay,
number_lags)
self.assertEqual(generated_output.shape, data.shape)
@parameterized.named_parameters([
dict(
testcase_name="2d_three_channels",
data=np.ones((100, 3)),
half_max_effective_concentration=np.array([0.9, 0.8, 0.7]),
slope=np.array([2, 2, 1])),
dict(
testcase_name="2d_one_channels",
data=np.ones((100, 1)),
half_max_effective_concentration=np.array([0.9]),
slope=np.array([5])),
dict(
testcase_name="3d_10channels_5geos",
data=np.ones((100, 10, 5)),
half_max_effective_concentration=np.expand_dims(np.ones(10), axis=-1),
slope=np.expand_dims(np.ones(10), axis=-1)),
dict(
testcase_name="3d_8channels_10geos",
data=np.ones((100, 8, 10)),
half_max_effective_concentration=np.expand_dims(np.ones(8), axis=-1),
slope=np.expand_dims(np.ones(8), axis=-1)),
])
def test_hill_produces_correct_shape(self, data,
half_max_effective_concentration, slope):
generated_output = media_transforms.hill(
data=data,
half_max_effective_concentration=half_max_effective_concentration,
slope=slope)
self.assertEqual(generated_output.shape, data.shape)
@parameterized.named_parameters([
dict(
testcase_name="2d_five_channels",
data=np.ones((100, 5)),
lag_weight=np.array([0.2, 0.3, 0.8, 0.2, 0.1]),
normalise=True),
dict(
testcase_name="2d_one_channels",
data=np.ones((100, 1)),
lag_weight=np.array([0.4]),
normalise=False),
dict(
testcase_name="3d_10channels_5geos",
data=np.ones((100, 10, 5)),
lag_weight=np.expand_dims(np.ones(10), axis=-1),
normalise=True),
dict(
testcase_name="3d_8channels_10geos",
data=np.ones((100, 8, 10)),
lag_weight=np.expand_dims(np.ones(8), axis=-1),
normalise=True),
])
def test_adstock_produces_correct_shape(self, data, lag_weight, normalise):
generated_output = media_transforms.adstock(
data=data, lag_weight=lag_weight, normalise=normalise)
self.assertEqual(generated_output.shape, data.shape)
def test_apply_exponent_safe_produces_correct_shape(self):
data = jnp.arange(50).reshape((10, 5))
exponent = jnp.full(5, 0.5)
output = media_transforms.apply_exponent_safe(data=data, exponent=exponent)
np.testing.assert_array_equal(x=output, y=data**exponent)
def test_apply_exponent_safe_produces_same_exponent_results(self):
data = jnp.ones((10, 5))
exponent = jnp.full(5, 0.5)
output = media_transforms.apply_exponent_safe(data=data, exponent=exponent)
self.assertEqual(output.shape, data.shape)
def test_apply_exponent_safe_produces_non_nan_or_inf_grads(self):
def f_safe(data, exponent):
x = media_transforms.apply_exponent_safe(data=data, exponent=exponent)
return x.sum()
data = jnp.ones((10, 5))
data = data.at[0, 0].set(0.)
exponent = jnp.full(5, 0.5)
grads = jax.grad(f_safe)(data, exponent)
self.assertFalse(np.isnan(grads).any())
self.assertFalse(np.isinf(grads).any())
def test_adstock_zeros_stay_zeros(self):
data = jnp.zeros((10, 5))
lag_weight = jnp.full(5, 0.5)
generated_output = media_transforms.adstock(
data=data, lag_weight=lag_weight)
np.testing.assert_array_equal(x=generated_output, y=data)
def test_hill_zeros_stay_zeros(self):
data = jnp.zeros((10, 5))
half_max_effective_concentration = jnp.full(5, 0.5)
slope = jnp.full(5, 0.5)
generated_output = media_transforms.hill(
data=data,
half_max_effective_concentration=half_max_effective_concentration,
slope=slope)
np.testing.assert_array_equal(x=generated_output, y=data)
def test_carryover_zeros_stay_zeros(self):
data = jnp.zeros((10, 5))
ad_effect_retention_rate = jnp.full(5, 0.5)
peak_effect_delay = jnp.full(5, 0.5)
generated_output = media_transforms.carryover(
data=data,
ad_effect_retention_rate=ad_effect_retention_rate,
peak_effect_delay=peak_effect_delay)
np.testing.assert_array_equal(x=generated_output, y=data)
if __name__ == "__main__":
absltest.main()
| 34.925532 | 80 | 0.646055 | 5,732 | 0.872982 | 0 | 0 | 3,654 | 0.556503 | 0 | 0 | 837 | 0.127475 |
cbb6cf178d364c3e00e075d9dd9ffee20b00a722 | 9,680 | py | Python | tests/plugins/inventory/test_simple_inventory.py | kyawmyomin/nornir | 4ae009ac894ec5b412c9859fe699b68747526255 | [
"Apache-2.0"
] | null | null | null | tests/plugins/inventory/test_simple_inventory.py | kyawmyomin/nornir | 4ae009ac894ec5b412c9859fe699b68747526255 | [
"Apache-2.0"
] | null | null | null | tests/plugins/inventory/test_simple_inventory.py | kyawmyomin/nornir | 4ae009ac894ec5b412c9859fe699b68747526255 | [
"Apache-2.0"
] | null | null | null | import os
from nornir.plugins.inventory import SimpleInventory
dir_path = os.path.dirname(os.path.realpath(__file__))
class Test:
def test(self):
host_file = f"{dir_path}/data/hosts.yaml"
group_file = f"{dir_path}/data/groups.yaml"
defaults_file = f"{dir_path}/data/defaults.yaml"
inv = SimpleInventory(host_file, group_file, defaults_file).load()
assert inv.dict() == {
"defaults": {
"connection_options": {
"dummy": {
"extras": {"blah": "from_defaults"},
"hostname": "dummy_from_defaults",
"password": None,
"platform": None,
"port": None,
"username": None,
}
},
"data": {
"my_var": "comes_from_defaults",
"only_default": "only_defined_in_default",
},
"hostname": None,
"password": "docker",
"platform": "linux",
"port": None,
"username": "root",
},
"groups": {
"group_1": {
"connection_options": {},
"data": {"my_var": "comes_from_group_1", "site": "site1"},
"groups": ["parent_group"],
"hostname": None,
"name": "group_1",
"password": "from_group1",
"platform": "linux",
"port": None,
"username": "root",
},
"group_2": {
"connection_options": {},
"data": {"site": "site2"},
"groups": [],
"hostname": None,
"name": "group_2",
"password": "docker",
"platform": "linux",
"port": None,
"username": "root",
},
"group_3": {
"connection_options": {},
"data": {"site": "site2"},
"groups": [],
"hostname": None,
"name": "group_3",
"password": "docker",
"platform": "linux",
"port": None,
"username": "root",
},
"parent_group": {
"connection_options": {
"dummy": {
"extras": {"blah": "from_group"},
"hostname": "dummy_from_parent_group",
"password": None,
"platform": None,
"port": None,
"username": None,
},
"dummy2": {
"extras": {"blah": "from_group"},
"hostname": "dummy2_from_parent_group",
"password": None,
"platform": None,
"port": None,
"username": None,
},
},
"data": {"a_false_var": False, "a_var": "blah"},
"groups": [],
"hostname": None,
"name": "parent_group",
"password": "from_parent_group",
"platform": "linux",
"port": None,
"username": "root",
},
},
"hosts": {
"dev1.group_1": {
"connection_options": {
"dummy": {
"extras": {"blah": "from_host"},
"hostname": "dummy_from_host",
"password": None,
"platform": None,
"port": None,
"username": None,
},
"paramiko": {
"extras": {},
"hostname": None,
"password": "docker",
"platform": "linux",
"port": 65020,
"username": "root",
},
},
"data": {
"my_var": "comes_from_dev1.group_1",
"nested_data": {
"a_dict": {"a": 1, "b": 2},
"a_list": [1, 2],
"a_string": "asdasd",
},
"role": "www",
"www_server": "nginx",
},
"groups": ["group_1"],
"hostname": "localhost",
"name": "dev1.group_1",
"password": "a_password",
"platform": "eos",
"port": 65020,
"username": "root",
},
"dev2.group_1": {
"connection_options": {
"dummy2": {
"extras": None,
"hostname": None,
"password": None,
"platform": None,
"port": None,
"username": "dummy2_from_host",
},
"paramiko": {
"extras": {},
"hostname": None,
"password": "docker",
"platform": "linux",
"port": None,
"username": "root",
},
},
"data": {
"nested_data": {
"a_dict": {"b": 2, "c": 3},
"a_list": [2, 3],
"a_string": "qwe",
},
"role": "db",
},
"groups": ["group_1"],
"hostname": "localhost",
"name": "dev2.group_1",
"password": "from_group1",
"platform": "junos",
"port": 65021,
"username": "root",
},
"dev3.group_2": {
"connection_options": {
"nornir_napalm.napalm": {
"extras": {},
"hostname": None,
"password": None,
"platform": "mock",
"port": None,
"username": None,
}
},
"data": {"role": "www", "www_server": "apache"},
"groups": ["group_2"],
"hostname": "localhost",
"name": "dev3.group_2",
"password": "docker",
"platform": "linux",
"port": 65022,
"username": "root",
},
"dev4.group_2": {
"connection_options": {
"netmiko": {
"extras": {},
"hostname": "localhost",
"password": "docker",
"platform": "linux",
"port": None,
"username": "root",
},
"paramiko": {
"extras": {},
"hostname": "localhost",
"password": "docker",
"platform": "linux",
"port": None,
"username": "root",
},
},
"data": {"my_var": "comes_from_dev4.group_2", "role": "db"},
"groups": ["parent_group", "group_2"],
"hostname": "localhost",
"name": "dev4.group_2",
"password": "from_parent_group",
"platform": "linux",
"port": 65023,
"username": "root",
},
"dev5.no_group": {
"connection_options": {},
"data": {},
"groups": [],
"hostname": "localhost",
"name": "dev5.no_group",
"password": "docker",
"platform": "linux",
"port": 65024,
"username": "root",
},
},
}
def test_simple_inventory_empty(self):
"""Verify completely empty groups.yaml and defaults.yaml doesn't generate exception."""
host_file = f"{dir_path}/data/hosts-nogroups.yaml"
group_file = f"{dir_path}/data/groups-empty.yaml"
defaults_file = f"{dir_path}/data/defaults-empty.yaml"
inv = SimpleInventory(host_file, group_file, defaults_file).load()
assert len(inv.hosts) == 1
assert inv.groups == {}
assert inv.defaults.data == {}
| 39.510204 | 95 | 0.31219 | 9,557 | 0.987293 | 0 | 0 | 0 | 0 | 0 | 0 | 3,090 | 0.319215 |
cbb6d2fb3dae2334128d6c0086f42f31db307ebc | 5,016 | py | Python | main.py | shipwreckdev/cloudflare-reconciler | deefb016e5013ad95d59a9e7cc03ce42632e6757 | [
"Apache-2.0"
] | null | null | null | main.py | shipwreckdev/cloudflare-reconciler | deefb016e5013ad95d59a9e7cc03ce42632e6757 | [
"Apache-2.0"
] | null | null | null | main.py | shipwreckdev/cloudflare-reconciler | deefb016e5013ad95d59a9e7cc03ce42632e6757 | [
"Apache-2.0"
] | null | null | null | import argparse
import json
import os
import requests
import lib.cf as cf
import lib.aws as aws
import lib.gh as gh
version = "v1.0"
api_base = 'https://api.cloudflare.com/client/v4/zones'
api_key = os.getenv("CLOUDFLARE_API_KEY")
content_type = 'application/json'
email = os.getenv("CLOUDFLARE_EMAIL")
github_repo = "mygithuborg/myrepo"
# Parse for arguments.
parser = argparse.ArgumentParser(
description='Compare records in Cloudflare against endpoints in AWS and reconcile resources that do not match.')
parser.add_argument('--cluster', dest='cluster', type=str,
help='EKS cluster to use when looking for load balancers. Should be Name tag of cluster.', default='')
parser.add_argument('--dry-run', dest='dry_run',
help='Dry run mode. Records will not be reconciled in Cloudflare with this option.', action='store_true')
parser.add_argument('--domain', dest='domain', type=str,
help='Domain to evaluate. For example: foobar.com', default='')
parser.add_argument('--elb-tag', dest='elb_tag', type=str,
help='Tag key/value used to find load balancer in the form of key:value.', default='')
parser.add_argument('--issue', dest='create_issue',
help='Create a GitHub issue in the subject Terraform repository with relevant data.', action='store_true')
parser.add_argument('--records', dest='records', type=str,
help='Record to evaluate. For example: test - this would imply test.foobar.com', default='')
args = parser.parse_args()
# Establish argument variables.
cluster = args.cluster
create_issue = args.create_issue
dry_run = args.dry_run
domain = args.domain
elb_tag = args.elb_tag.split(':')
records = args.records.split(',')
headers = {
'X-Auth-Email': email,
'X-Auth-Key': api_key,
'Content-Type': content_type
}
zones = cf.GetZones(api_base, headers)
if dry_run:
print('Dry run enabled. Unsynchronized Cloudflare records will not be updated.')
print()
if __name__ == "__main__":
if domain != '' and elb_tag != [''] and cluster != '':
if domain in zones.keys():
if records != ['']:
for r in records:
print('Evaluating {}.{}'.format(r, domain))
print()
status = cf.GetExistingRecord(
api_base, domain, headers, r, zones[domain])
if type(status) is dict:
print('Cloudflare Record: ' + status['host'])
print('Cloudflare Record Type: ' +
status['record_type'])
print('Cloudflare Record Content: ' +
status['content'])
print()
current_elb_hostname = aws.ReturnHostname(cluster,
elb_tag[0], elb_tag[1])
print('ELB Hostname: ' + current_elb_hostname)
print()
if status['content'] == current_elb_hostname:
synchronized = True
else:
synchronized = False
print('Synchronized: ' + str(synchronized))
print()
if synchronized == False and dry_run == False:
action = cf.UpdateRecord(
api_base, headers, current_elb_hostname, status, zones[domain])
print(action)
if create_issue == True:
print()
gh.create_issue(
"[cloudflare-reconciler] Record {} needs to be updated in Cloudflare.".format(
status['host']),
"The Cloudflare record `{}` should be updated in Terraform to point to the endpoint `{}` which has recently changed.\n\nThis issue was automatically created by `cloudflare-reconciler {}`.".format(status['host'], current_elb_hostname, version))
elif synchronized == False and dry_run == True:
print(
'Record is not synchronized. Skipping update since dry run is enabled.')
else:
print('{} in Cloudflare zone {}'.format(status, domain))
else:
print(
'Please provide at least one record to evaluate for {}.'.format(domain))
else:
print('No matching zone found for domain {}.'.format(domain))
print()
print('Available Cloudflare zones: ')
for k in zones.keys():
print(k)
else:
print('Please provide a domain to evaluate, a key/value pair for isolating an AWS ELB, and an EKS cluster option.')
| 43.617391 | 279 | 0.546451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,759 | 0.350678 |
cbb79607ab88cfb6b0f8cd6f2a609f20a5508f65 | 867 | py | Python | app/helpers.py | QwertygidQ/phystech-backend | 657a5862d3bab91623551c7f4f6868cfdb1df4b8 | [
"MIT"
] | null | null | null | app/helpers.py | QwertygidQ/phystech-backend | 657a5862d3bab91623551c7f4f6868cfdb1df4b8 | [
"MIT"
] | null | null | null | app/helpers.py | QwertygidQ/phystech-backend | 657a5862d3bab91623551c7f4f6868cfdb1df4b8 | [
"MIT"
] | null | null | null | from flask import request, jsonify
from functools import wraps
from schema import SchemaError
def json_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if not request.data or not request.json:
return jsonify({
'status': 'Error',
'reason': 'No JSON provided'
})
return func(*args, **kwargs)
return wrapper
def validate_schema(schema):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
schema.validate(request.json)
except SchemaError:
return jsonify({
'status': 'Error',
'reason': 'Invalid JSON provided'
})
return func(*args, **kwargs)
return wrapper
return decorator | 25.5 | 53 | 0.517878 | 0 | 0 | 0 | 0 | 600 | 0.692042 | 0 | 0 | 87 | 0.100346 |
cbbbcdd9a359c2d8bfbdd28aa61f833e1d9364df | 362 | py | Python | tests/__init__.py | zhester/jdi | 18d43331f15776a5ef94342ac85a24a083a3c7d7 | [
"BSD-2-Clause"
] | null | null | null | tests/__init__.py | zhester/jdi | 18d43331f15776a5ef94342ac85a24a083a3c7d7 | [
"BSD-2-Clause"
] | null | null | null | tests/__init__.py | zhester/jdi | 18d43331f15776a5ef94342ac85a24a083a3c7d7 | [
"BSD-2-Clause"
] | null | null | null | #=============================================================================
#
# JDI Unit Tests
#
#=============================================================================
"""
JDI Unit Tests
==============
Run all unit tests from project's root directory.
python -m unittest discover
python3 -m unittest discover
"""
__version__ = '0.0.0'
| 16.454545 | 78 | 0.345304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 336 | 0.928177 |
cbbe7b8eb169b6440ac5e6b29f5cb22280403941 | 13,380 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/grades/course_grade.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/grades/course_grade.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/grades/course_grade.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """
CourseGrade Class
"""
from abc import abstractmethod
from collections import OrderedDict, defaultdict
from ccx_keys.locator import CCXLocator
from django.conf import settings
from lazy import lazy
from openedx.core.lib.grade_utils import round_away_from_zero
from xmodule import block_metadata_utils
from .config import assume_zero_if_absent
from .scores import compute_percent
from .subsection_grade import ZeroSubsectionGrade
from .subsection_grade_factory import SubsectionGradeFactory
class CourseGradeBase:
"""
Base class for Course Grades.
"""
def __init__(self, user, course_data, percent=0.0, letter_grade=None, passed=False, force_update_subsections=False):
self.user = user
self.course_data = course_data
self.percent = percent
self.passed = passed
# Convert empty strings to None when reading from the table
self.letter_grade = letter_grade or None
self.force_update_subsections = force_update_subsections
def __str__(self):
return 'Course Grade: percent: {}, letter_grade: {}, passed: {}'.format(
str(self.percent),
self.letter_grade,
self.passed,
)
@property
def attempted(self):
"""
Returns whether at least one problem was attempted
by the user in the course.
"""
return False
def subsection_grade(self, subsection_key):
"""
Returns the subsection grade for the given subsection usage key.
Raises `KeyError` if the course structure does not contain the key.
If the course structure contains the key, this will always succeed
(and return a grade) regardless of whether the user can access that section;
it is up to the caller to ensure that the grade isn't
shown to users that shouldn't be able to access it
(e.g. a student shouldn't see a grade for an unreleased subsection);
"""
# look in the user structure first and fallback to the collected;
# however, we assume the state of course_data is intentional,
# so we use effective_structure to avoid additional fetching
subsection = (
self.course_data.effective_structure[subsection_key]
if subsection_key in self.course_data.effective_structure
else self.course_data.collected_structure[subsection_key]
)
return self._get_subsection_grade(subsection)
@lazy
def graded_subsections_by_format(self):
"""
Returns grades for the subsections in the course in
a dict keyed by subsection format types.
"""
subsections_by_format = defaultdict(OrderedDict)
for chapter in self.chapter_grades.values():
for subsection_grade in chapter['sections']:
if subsection_grade.graded:
graded_total = subsection_grade.graded_total
if graded_total.possible > 0:
subsections_by_format[subsection_grade.format][subsection_grade.location] = subsection_grade
return subsections_by_format
@lazy
def chapter_grades(self):
"""
Returns a dictionary of dictionaries.
The primary dictionary is keyed by the chapter's usage_key.
The secondary dictionary contains the chapter's
subsection grades, display name, and url name.
"""
course_structure = self.course_data.structure
grades = OrderedDict()
for chapter_key in course_structure.get_children(self.course_data.location):
grades[chapter_key] = self._get_chapter_grade_info(course_structure[chapter_key], course_structure)
return grades
@lazy
def subsection_grades(self):
"""
Returns an ordered dictionary of subsection grades,
keyed by subsection location.
"""
subsection_grades = defaultdict(OrderedDict)
for chapter in self.chapter_grades.values():
for subsection_grade in chapter['sections']:
subsection_grades[subsection_grade.location] = subsection_grade
return subsection_grades
@lazy
def problem_scores(self):
"""
Returns a dict of problem scores keyed by their locations.
"""
problem_scores = {}
for chapter in self.chapter_grades.values():
for subsection_grade in chapter['sections']:
problem_scores.update(subsection_grade.problem_scores)
return problem_scores
def chapter_percentage(self, chapter_key):
"""
Returns the rounded aggregate weighted percentage for the given chapter.
Raises:
KeyError if the chapter is not found.
"""
earned, possible = 0.0, 0.0
chapter_grade = self.chapter_grades[chapter_key]
for section in chapter_grade['sections']:
earned += section.graded_total.earned
possible += section.graded_total.possible
return compute_percent(earned, possible)
def score_for_module(self, location):
"""
Calculate the aggregate weighted score for any location in the course.
This method returns a tuple containing (earned_score, possible_score).
If the location is of 'problem' type, this method will return the
possible and earned scores for that problem. If the location refers to a
composite module (a vertical or section ) the scores will be the sums of
all scored problems that are children of the chosen location.
"""
if location in self.problem_scores:
score = self.problem_scores[location]
return score.earned, score.possible
children = self.course_data.structure.get_children(location)
earned, possible = 0.0, 0.0
for child in children:
child_earned, child_possible = self.score_for_module(child)
earned += child_earned
possible += child_possible
return earned, possible
@lazy
def grader_result(self):
"""
Returns the result from the course grader.
"""
course = self._prep_course_for_grading(self.course_data.course)
return course.grader.grade(
self.graded_subsections_by_format,
generate_random_scores=settings.GENERATE_PROFILE_SCORES,
)
@property
def summary(self):
"""
Returns the grade summary as calculated by the course's grader.
DEPRECATED: To be removed as part of TNL-5291.
"""
# TODO(TNL-5291) Remove usages of this deprecated property.
grade_summary = self.grader_result
grade_summary['percent'] = self.percent
grade_summary['grade'] = self.letter_grade
return grade_summary
@classmethod
def get_subsection_type_graders(cls, course):
"""
Returns a dictionary mapping subsection types to their
corresponding configured graders, per grading policy.
"""
course = cls._prep_course_for_grading(course)
return {
subsection_type: subsection_type_grader
for (subsection_type_grader, subsection_type, _)
in course.grader.subgraders
}
@classmethod
def _prep_course_for_grading(cls, course):
"""
Make sure any overrides to the grading policy are used.
This is most relevant for CCX courses.
Right now, we still access the grading policy from the course
object. Once we get the grading policy from the BlockStructure
this will no longer be needed - since BlockStructure correctly
retrieves/uses all field overrides.
"""
if isinstance(course.id, CCXLocator):
# clean out any field values that may have been set from the
# parent course of the CCX course.
course._field_data_cache = {} # pylint: disable=protected-access
# this is "magic" code that automatically retrieves any overrides
# to the grading policy and updates the course object.
course.set_grading_policy(course.grading_policy)
return course
def _get_chapter_grade_info(self, chapter, course_structure):
"""
Helper that returns a dictionary of chapter grade information.
"""
chapter_subsection_grades = self._get_subsection_grades(course_structure, chapter.location)
return {
'display_name': block_metadata_utils.display_name_with_default(chapter),
'url_name': block_metadata_utils.url_name_for_block(chapter),
'sections': chapter_subsection_grades,
}
def _get_subsection_grades(self, course_structure, chapter_key):
"""
Returns a list of subsection grades for the given chapter.
"""
return [
self._get_subsection_grade(course_structure[subsection_key], self.force_update_subsections)
for subsection_key in _uniqueify_and_keep_order(course_structure.get_children(chapter_key))
]
@abstractmethod
def _get_subsection_grade(self, subsection, force_update_subsections=False):
"""
Abstract method to be implemented by subclasses for returning
the grade of the given subsection.
"""
raise NotImplementedError
class ZeroCourseGrade(CourseGradeBase):
"""
Course Grade class for Zero-value grades when no problems were
attempted in the course.
"""
def _get_subsection_grade(self, subsection, force_update_subsections=False):
return ZeroSubsectionGrade(subsection, self.course_data)
class CourseGrade(CourseGradeBase):
"""
Course Grade class when grades are updated or read from storage.
"""
def __init__(self, user, course_data, *args, **kwargs):
super().__init__(user, course_data, *args, **kwargs)
self._subsection_grade_factory = SubsectionGradeFactory(user, course_data=course_data)
def update(self):
"""
Updates the grade for the course. Also updates subsection grades
if self.force_update_subsections is true, via the lazy call
to self.grader_result.
"""
# TODO update this code to be more functional and readable.
# Currently, it is hard to follow since there are plenty of
# side-effects. Once functional, force_update_subsections
# can be passed through and not confusingly stored and used
# at a later time.
grade_cutoffs = self.course_data.course.grade_cutoffs
self.percent = self._compute_percent(self.grader_result)
self.letter_grade = self._compute_letter_grade(grade_cutoffs, self.percent)
self.passed = self._compute_passed(grade_cutoffs, self.percent)
return self
@lazy
def attempted(self): # lint-amnesty, pylint: disable=invalid-overridden-method
"""
Returns whether any of the subsections in this course
have been attempted by the student.
"""
if assume_zero_if_absent(self.course_data.course_key):
return True
for chapter in self.chapter_grades.values():
for subsection_grade in chapter['sections']:
if subsection_grade.all_total.first_attempted:
return True
return False
def _get_subsection_grade(self, subsection, force_update_subsections=False):
if self.force_update_subsections:
return self._subsection_grade_factory.update(subsection, force_update_subsections=force_update_subsections)
else:
# Pass read_only here so the subsection grades can be persisted in bulk at the end.
return self._subsection_grade_factory.create(subsection, read_only=True)
@staticmethod
def _compute_percent(grader_result):
"""
Computes and returns the grade percentage from the given
result from the grader.
"""
# Confused about the addition of .05 here? See https://openedx.atlassian.net/browse/TNL-6972
return round_away_from_zero(grader_result['percent'] * 100 + 0.05) / 100
@staticmethod
def _compute_letter_grade(grade_cutoffs, percent):
"""
Computes and returns the course letter grade given the
inputs, as defined in the grading_policy (e.g. 'A' 'B' 'C')
or None if not passed.
"""
letter_grade = None
# Possible grades, sorted in descending order of score
descending_grades = sorted(grade_cutoffs, key=lambda x: grade_cutoffs[x], reverse=True)
for possible_grade in descending_grades:
if percent >= grade_cutoffs[possible_grade]:
letter_grade = possible_grade
break
return letter_grade
@staticmethod
def _compute_passed(grade_cutoffs, percent):
"""
Computes and returns whether the given percent value
is a passing grade according to the given grade cutoffs.
"""
nonzero_cutoffs = [cutoff for cutoff in grade_cutoffs.values() if cutoff > 0]
success_cutoff = min(nonzero_cutoffs) if nonzero_cutoffs else None
return success_cutoff and percent >= success_cutoff
def _uniqueify_and_keep_order(iterable):
return list(OrderedDict([(item, None) for item in iterable]).keys())
| 39.122807 | 120 | 0.67145 | 12,757 | 0.953438 | 0 | 0 | 6,595 | 0.4929 | 0 | 0 | 5,111 | 0.381988 |
cbbeb4b320b177296806f93013a315f5fa18949c | 1,385 | py | Python | horoscope.py | Srol/Slack-Horoscope-Bot | af88183ff9810cf1606496c11ed949c5b4c27773 | [
"MIT"
] | 2 | 2019-11-17T19:28:44.000Z | 2020-04-17T15:41:15.000Z | horoscope.py | Srol/Slack-Horoscope-Bot | af88183ff9810cf1606496c11ed949c5b4c27773 | [
"MIT"
] | null | null | null | horoscope.py | Srol/Slack-Horoscope-Bot | af88183ff9810cf1606496c11ed949c5b4c27773 | [
"MIT"
] | null | null | null | import os
import feedparser
import json
from flask import Flask, request
# Add your Slack token to the variable below.
SLACK_TOKEN = ""
url = ""
payload = {}
headers = {'content-type': 'application/json'}
app = Flask(__name__)
# this endpoint listens for incoming slash commands from Slack.
@app.route('/horos', methods=['POST'])
def horos():
if request.method == "POST" and request.form.get('token') == SLACK_TOKEN:
from_number = request.form.get('text')
from_number = from_number[11:]
channel = request.form.get('channel_name')
message = matchHoroscope(from_number)
payload = {
"text": message,
"channel": channel,
"username": "Star-Messenger",
}
return json.dumps(payload)
final = ""
signs = ["aries", "taurus", "gemini", "cancer", "leo", "virgo", "libra", "scorpio", "sagittarius", "capricorn", "aquarius", "pisces"]
# One half of the process of matching a horoscope with an actual reading.
def matchHoroscope(sign):
if sign.lower() in signs:
return getHoroscope(sign)
else:
return "The answer you seek is not written in the stars."
# This function pulls today's horoscope through the RSS feeds of FindYourFate.com
def getHoroscope(sign):
url = 'http://www.findyourfate.com/rss/dailyhoroscope-feed.asp?sign=' + sign.title()
d = feedparser.parse(url)
container = d.entries[0]
horoscope = container['summary_detail']['value']
return horoscope | 30.777778 | 133 | 0.711913 | 0 | 0 | 0 | 0 | 408 | 0.294585 | 0 | 0 | 625 | 0.451264 |
cbbecb922401274e31912c3ad07cb8e5c2af2c9e | 9,507 | py | Python | scripts/segmentation/PointNet/run_pointnet_segmentation.py | devskroy1/ForkedBrainSurfaceTK | 774035ab5eae6c0a40eb96eab43d489d3f722eaa | [
"MIT"
] | null | null | null | scripts/segmentation/PointNet/run_pointnet_segmentation.py | devskroy1/ForkedBrainSurfaceTK | 774035ab5eae6c0a40eb96eab43d489d3f722eaa | [
"MIT"
] | null | null | null | scripts/segmentation/PointNet/run_pointnet_segmentation.py | devskroy1/ForkedBrainSurfaceTK | 774035ab5eae6c0a40eb96eab43d489d3f722eaa | [
"MIT"
] | null | null | null | import os.path as osp
PATH_TO_ROOT = osp.join(osp.dirname(osp.realpath(__file__)), '..', '..', '..')
import sys
sys.path.append(PATH_TO_ROOT)
import pickle
import time
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import StepLR
from models.pointnet.src.utils import get_id, save_to_log, get_comment, get_data_path, data
from models.pointnet.src.models.pointnet2_segmentation import Net
from models.pointnet.main.pointnet2_segmentation import train, test, perform_final_testing
# Global variables
all_labels = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8 , 9, 10, 11, 12, 13, 14, 15, 16, 17])
num_points_dict = {'original': 32492, '50': 16247, '90': None}
PATH_TO_ROOT = osp.join(osp.dirname(osp.realpath(__file__)), '..', '..', '..') + '/'
PATH_TO_POINTNET = osp.join(osp.dirname(osp.realpath(__file__)), '..', '..', '..', 'models', 'pointnet') + '/'
if __name__ == '__main__':
num_workers = 2
local_features = ['corrected_thickness', 'curvature', 'sulcal_depth']
global_features = None
#################################################
########### EXPERIMENT DESCRIPTION ##############
#################################################
recording = True
REPROCESS = True
data_nativeness = 'native'
data_compression = "5k"
data_type = 'white'
hemisphere = 'both'
# data_nativeness = 'native'
# data_compression = "20k"
# data_type = 'white'
# hemisphere = 'left'
additional_comment = 'Baseline PointNet++ segmentn to compare with Randla-net segmentn'
experiment_name = f'{data_nativeness}_{data_type}_{data_compression}_{hemisphere}_{additional_comment}'
#################################################
############ EXPERIMENT DESCRIPTION #############
#################################################
# 1. Model Parameters
################################################
lr = 0.001
batch_size = 2
gamma = 0.9875
target_class = ""
task = 'segmentation'
################################################
###### SPECIFY PATH TO YOUR DATA_SPLIT PICKLE #####
# 2. Get the data splits indices
with open(PATH_TO_POINTNET + 'src/names.pk', 'rb') as f:
indices = pickle.load(f)
# 4. Get experiment description
comment = get_comment(data_nativeness, data_compression, data_type, hemisphere,
lr, batch_size, local_features, global_features, target_class)
print('=' * 50 + '\n' + '=' * 50)
print(comment)
print('=' * 50 + '\n' + '=' * 50)
##### SPECIFY YOUR DATA_FOLDER AND FILES_ENDING #####
# 5. Perform data processing.
data_folder, files_ending = get_data_path(data_nativeness, data_compression, data_type, hemisphere=hemisphere)
train_dataset, test_dataset, validation_dataset, train_loader, test_loader, val_loader, num_labels = data(
data_folder,
files_ending,
data_type,
target_class,
task,
REPROCESS,
local_features,
global_features,
indices,
batch_size,
num_workers=2,
data_nativeness=data_nativeness,
data_compression=data_compression,
hemisphere=hemisphere
)
# 6. Getting the number of features to adapt the architecture
try:
num_local_features = train_dataset[0].x.size(1)
except:
num_local_features = 0
print(f'Unique labels found: {num_labels}')
if not torch.cuda.is_available():
print('You are running on a CPU.')
# 7. Create the model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Net(num_labels, num_local_features, num_global_features=None).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = StepLR(optimizer, step_size=1, gamma=gamma)
id = '0'
writer = None
if recording:
# 9. Save to log_record.txt
log_descr = get_comment(data_nativeness, data_compression, data_type, hemisphere,
lr, batch_size, local_features, global_features, target_class,
log_descr=True)
save_to_log(log_descr, prefix=experiment_name)
id = str(int(get_id(prefix=experiment_name)) - 1)
writer = SummaryWriter(PATH_TO_POINTNET + f'new_runs/{experiment_name}ID' + id)
writer.add_text(f'{experiment_name} ID #{id}', comment)
best_val_acc = 0
best_val_iou = 0
best_model_acc = 0
best_model_iou = 0
# 10. ====== TRAINING LOOP ======
for epoch in range(1, 150):
# 1. Start recording time
start = time.time()
# 2. Make a training step
train(model, train_loader, epoch, device, optimizer, num_labels, writer, recording=recording)
if recording:
writer.add_scalar('Training Time/epoch', time.time() - start, epoch)
# 3. Validate the performance after each epoch
loss, acc, iou, mean_iou = test(model, val_loader, comment + 'val' + str(epoch), device, num_labels, writer, epoch=epoch, id=id,
experiment_name=experiment_name, recording=recording)
print('Epoch: {:02d}, Val Loss/nll: {}, Val Acc: {:.4f}'.format(epoch, loss, acc))
scheduler.step()
# 4. Record valiation metrics in Tensorboard
if recording:
# By Accuracy
if acc > best_val_acc:
best_val_acc = acc
best_model_acc = epoch
torch.save(model.state_dict(),
PATH_TO_POINTNET + f'experiment_data/new/{experiment_name}-{id}/' + 'best_acc_model' + '.pt')
# By Mean IoU
if mean_iou > best_val_iou:
best_val_iou = mean_iou
best_model_iou = epoch
torch.save(model.state_dict(),
PATH_TO_POINTNET + f'experiment_data/new/{experiment_name}-{id}/' + 'best_iou_model' + '.pt')
writer.add_scalar('Loss/val_nll', loss, epoch)
writer.add_scalar('Accuracy/val', acc, epoch)
for label, value in enumerate(iou):
writer.add_scalar('IoU{}/validation'.format(label), value, epoch)
print('\t\tValidation Label {}: {}'.format(label, value))
print('=' * 60)
if recording:
# save the last model
torch.save(model.state_dict(), PATH_TO_POINTNET + f'experiment_data/new/{experiment_name}-{id}/' + 'last_model' + '.pt')
loss_acc, acc_acc, iou_acc, mean_iou_acc, loss_iou, acc_iou, iou_iou, mean_iou_iou = perform_final_testing(model,
writer,
test_loader,
experiment_name,
comment,
id,
num_labels,
device,
best_model_acc,
best_model_iou,
recording=recording) | 47.064356 | 146 | 0.429999 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,902 | 0.200063 |
cbbf46aa2d552297fb3fb4b62e94db053b8ce1eb | 1,581 | py | Python | basic_auth/handler.py | andrei-shabanski/s3pypi | 48718149cf43d6e3252712d072d5b0de850bac55 | [
"MIT"
] | 249 | 2016-04-07T08:36:44.000Z | 2021-06-03T06:55:08.000Z | basic_auth/handler.py | andrei-shabanski/s3pypi | 48718149cf43d6e3252712d072d5b0de850bac55 | [
"MIT"
] | 58 | 2016-06-08T22:41:18.000Z | 2021-05-28T20:03:39.000Z | basic_auth/handler.py | AstrocyteResearch/s3pypi | cd8d02015772dcded3b3db1a588c89e174d10471 | [
"MIT"
] | 100 | 2016-06-23T23:28:23.000Z | 2021-03-12T15:09:29.000Z | import base64
import hashlib
import json
import logging
from dataclasses import dataclass
import boto3
log = logging.getLogger()
region = "us-east-1"
def handle(event: dict, context):
request = event["Records"][0]["cf"]["request"]
try:
authenticate(request["headers"])
except Exception as e:
log.error(repr(e))
return unauthorized
return request
def authenticate(headers: dict):
domain = headers["host"][0]["value"]
auth = headers["authorization"][0]["value"]
auth_type, creds = auth.split(" ")
if auth_type != "Basic":
raise ValueError("Invalid auth type: " + auth_type)
username, password = base64.b64decode(creds).decode().split(":")
user = get_user(domain, username)
if hash_password(password, user.password_salt) != user.password_hash:
raise ValueError("Invalid password for " + username)
@dataclass
class User:
username: str
password_hash: str
password_salt: str
def get_user(domain: str, username: str) -> User:
data = boto3.client("ssm", region_name=region).get_parameter(
Name=f"/s3pypi/{domain}/users/{username}",
WithDecryption=True,
)["Parameter"]["Value"]
return User(username, **json.loads(data))
def hash_password(password: str, salt: str) -> str:
return hashlib.sha1((password + salt).encode()).hexdigest()
unauthorized = dict(
status="401",
statusDescription="Unauthorized",
headers={
"www-authenticate": [
{"key": "WWW-Authenticate", "value": 'Basic realm="Login"'}
]
},
)
| 23.597015 | 73 | 0.648956 | 75 | 0.047438 | 0 | 0 | 86 | 0.054396 | 0 | 0 | 281 | 0.177736 |
cbc097bb33323fa8088310bd343222a9655bd612 | 11,674 | py | Python | misc/zip/Cura-master/plugins/CuraEngineBackend/ProcessSlicedLayersJob.py | criscola/G-Gen | 293d4f46cb40d7917a10a95921040a14a086efc1 | [
"MIT"
] | 1 | 2018-10-19T10:08:45.000Z | 2018-10-19T10:08:45.000Z | misc/zip/Cura-master/plugins/CuraEngineBackend/ProcessSlicedLayersJob.py | criscola/G-Gen | 293d4f46cb40d7917a10a95921040a14a086efc1 | [
"MIT"
] | null | null | null | misc/zip/Cura-master/plugins/CuraEngineBackend/ProcessSlicedLayersJob.py | criscola/G-Gen | 293d4f46cb40d7917a10a95921040a14a086efc1 | [
"MIT"
] | null | null | null | #Copyright (c) 2017 Ultimaker B.V.
#Cura is released under the terms of the LGPLv3 or higher.
import gc
from UM.Job import Job
from UM.Application import Application
from UM.Mesh.MeshData import MeshData
from UM.Preferences import Preferences
from UM.View.GL.OpenGLContext import OpenGLContext
from UM.Message import Message
from UM.i18n import i18nCatalog
from UM.Logger import Logger
from UM.Math.Vector import Vector
from cura.Scene.BuildPlateDecorator import BuildPlateDecorator
from cura.Scene.CuraSceneNode import CuraSceneNode
from cura.Settings.ExtruderManager import ExtruderManager
from cura import LayerDataBuilder
from cura import LayerDataDecorator
from cura import LayerPolygon
import numpy
from time import time
from cura.Settings.ExtrudersModel import ExtrudersModel
catalog = i18nCatalog("cura")
## Return a 4-tuple with floats 0-1 representing the html color code
#
# \param color_code html color code, i.e. "#FF0000" -> red
def colorCodeToRGBA(color_code):
if color_code is None:
Logger.log("w", "Unable to convert color code, returning default")
return [0, 0, 0, 1]
return [
int(color_code[1:3], 16) / 255,
int(color_code[3:5], 16) / 255,
int(color_code[5:7], 16) / 255,
1.0]
class ProcessSlicedLayersJob(Job):
def __init__(self, layers):
super().__init__()
self._layers = layers
self._scene = Application.getInstance().getController().getScene()
self._progress_message = Message(catalog.i18nc("@info:status", "Processing Layers"), 0, False, -1)
self._abort_requested = False
self._build_plate_number = None
## Aborts the processing of layers.
#
# This abort is made on a best-effort basis, meaning that the actual
# job thread will check once in a while to see whether an abort is
# requested and then stop processing by itself. There is no guarantee
# that the abort will stop the job any time soon or even at all.
def abort(self):
self._abort_requested = True
def setBuildPlate(self, new_value):
self._build_plate_number = new_value
def getBuildPlate(self):
return self._build_plate_number
def run(self):
Logger.log("d", "Processing new layer for build plate %s..." % self._build_plate_number)
start_time = time()
view = Application.getInstance().getController().getActiveView()
if view.getPluginId() == "SimulationView":
view.resetLayerData()
self._progress_message.show()
Job.yieldThread()
if self._abort_requested:
if self._progress_message:
self._progress_message.hide()
return
Application.getInstance().getController().activeViewChanged.connect(self._onActiveViewChanged)
# The no_setting_override is here because adding the SettingOverrideDecorator will trigger a reslice
new_node = CuraSceneNode(no_setting_override = True)
new_node.addDecorator(BuildPlateDecorator(self._build_plate_number))
# Force garbage collection.
# For some reason, Python has a tendency to keep the layer data
# in memory longer than needed. Forcing the GC to run here makes
# sure any old layer data is really cleaned up before adding new.
gc.collect()
mesh = MeshData()
layer_data = LayerDataBuilder.LayerDataBuilder()
layer_count = len(self._layers)
# Find the minimum layer number
# When using a raft, the raft layers are sent as layers < 0. Instead of allowing layers < 0, we
# instead simply offset all other layers so the lowest layer is always 0. It could happens that
# the first raft layer has value -8 but there are just 4 raft (negative) layers.
min_layer_number = 0
negative_layers = 0
for layer in self._layers:
if layer.id < min_layer_number:
min_layer_number = layer.id
if layer.id < 0:
negative_layers += 1
current_layer = 0
for layer in self._layers:
# Negative layers are offset by the minimum layer number, but the positive layers are just
# offset by the number of negative layers so there is no layer gap between raft and model
abs_layer_number = layer.id + abs(min_layer_number) if layer.id < 0 else layer.id + negative_layers
layer_data.addLayer(abs_layer_number)
this_layer = layer_data.getLayer(abs_layer_number)
layer_data.setLayerHeight(abs_layer_number, layer.height)
layer_data.setLayerThickness(abs_layer_number, layer.thickness)
for p in range(layer.repeatedMessageCount("path_segment")):
polygon = layer.getRepeatedMessage("path_segment", p)
extruder = polygon.extruder
line_types = numpy.fromstring(polygon.line_type, dtype="u1") # Convert bytearray to numpy array
line_types = line_types.reshape((-1,1))
points = numpy.fromstring(polygon.points, dtype="f4") # Convert bytearray to numpy array
if polygon.point_type == 0: # Point2D
points = points.reshape((-1,2)) # We get a linear list of pairs that make up the points, so make numpy interpret them correctly.
else: # Point3D
points = points.reshape((-1,3))
line_widths = numpy.fromstring(polygon.line_width, dtype="f4") # Convert bytearray to numpy array
line_widths = line_widths.reshape((-1,1)) # We get a linear list of pairs that make up the points, so make numpy interpret them correctly.
line_thicknesses = numpy.fromstring(polygon.line_thickness, dtype="f4") # Convert bytearray to numpy array
line_thicknesses = line_thicknesses.reshape((-1,1)) # We get a linear list of pairs that make up the points, so make numpy interpret them correctly.
line_feedrates = numpy.fromstring(polygon.line_feedrate, dtype="f4") # Convert bytearray to numpy array
line_feedrates = line_feedrates.reshape((-1,1)) # We get a linear list of pairs that make up the points, so make numpy interpret them correctly.
# Create a new 3D-array, copy the 2D points over and insert the right height.
# This uses manual array creation + copy rather than numpy.insert since this is
# faster.
new_points = numpy.empty((len(points), 3), numpy.float32)
if polygon.point_type == 0: # Point2D
new_points[:, 0] = points[:, 0]
new_points[:, 1] = layer.height / 1000 # layer height value is in backend representation
new_points[:, 2] = -points[:, 1]
else: # Point3D
new_points[:, 0] = points[:, 0]
new_points[:, 1] = points[:, 2]
new_points[:, 2] = -points[:, 1]
this_poly = LayerPolygon.LayerPolygon(extruder, line_types, new_points, line_widths, line_thicknesses, line_feedrates)
this_poly.buildCache()
this_layer.polygons.append(this_poly)
Job.yieldThread()
Job.yieldThread()
current_layer += 1
progress = (current_layer / layer_count) * 99
# TODO: Rebuild the layer data mesh once the layer has been processed.
# This needs some work in LayerData so we can add the new layers instead of recreating the entire mesh.
if self._abort_requested:
if self._progress_message:
self._progress_message.hide()
return
if self._progress_message:
self._progress_message.setProgress(progress)
# We are done processing all the layers we got from the engine, now create a mesh out of the data
# Find out colors per extruder
global_container_stack = Application.getInstance().getGlobalContainerStack()
manager = ExtruderManager.getInstance()
extruders = list(manager.getMachineExtruders(global_container_stack.getId()))
if extruders:
material_color_map = numpy.zeros((len(extruders), 4), dtype=numpy.float32)
for extruder in extruders:
position = int(extruder.getMetaDataEntry("position", default="0")) # Get the position
try:
default_color = ExtrudersModel.defaultColors[position]
except IndexError:
default_color = "#e0e000"
color_code = extruder.material.getMetaDataEntry("color_code", default=default_color)
color = colorCodeToRGBA(color_code)
material_color_map[position, :] = color
else:
# Single extruder via global stack.
material_color_map = numpy.zeros((1, 4), dtype=numpy.float32)
color_code = global_container_stack.material.getMetaDataEntry("color_code", default="#e0e000")
color = colorCodeToRGBA(color_code)
material_color_map[0, :] = color
# We have to scale the colors for compatibility mode
if OpenGLContext.isLegacyOpenGL() or bool(Preferences.getInstance().getValue("view/force_layer_view_compatibility_mode")):
line_type_brightness = 0.5 # for compatibility mode
else:
line_type_brightness = 1.0
layer_mesh = layer_data.build(material_color_map, line_type_brightness)
if self._abort_requested:
if self._progress_message:
self._progress_message.hide()
return
# Add LayerDataDecorator to scene node to indicate that the node has layer data
decorator = LayerDataDecorator.LayerDataDecorator()
decorator.setLayerData(layer_mesh)
new_node.addDecorator(decorator)
new_node.setMeshData(mesh)
# Set build volume as parent, the build volume can move as a result of raft settings.
# It makes sense to set the build volume as parent: the print is actually printed on it.
new_node_parent = Application.getInstance().getBuildVolume()
new_node.setParent(new_node_parent) # Note: After this we can no longer abort!
settings = Application.getInstance().getGlobalContainerStack()
if not settings.getProperty("machine_center_is_zero", "value"):
new_node.setPosition(Vector(-settings.getProperty("machine_width", "value") / 2, 0.0, settings.getProperty("machine_depth", "value") / 2))
if self._progress_message:
self._progress_message.setProgress(100)
if self._progress_message:
self._progress_message.hide()
# Clear the unparsed layers. This saves us a bunch of memory if the Job does not get destroyed.
self._layers = None
Logger.log("d", "Processing layers took %s seconds", time() - start_time)
def _onActiveViewChanged(self):
if self.isRunning():
if Application.getInstance().getController().getActiveView().getPluginId() == "SimulationView":
if not self._progress_message:
self._progress_message = Message(catalog.i18nc("@info:status", "Processing Layers"), 0, False, 0, catalog.i18nc("@info:title", "Information"))
if self._progress_message.getProgress() != 100:
self._progress_message.show()
else:
if self._progress_message:
self._progress_message.hide()
| 46.696 | 165 | 0.650677 | 10,407 | 0.891468 | 0 | 0 | 0 | 0 | 0 | 0 | 3,442 | 0.294843 |
cbc1c81f810a9f128eb409484181a04a43ba666d | 262 | py | Python | sts/metric.py | LostCow/KLUE | 73b1b0526cf6b1b6f5ef535b9527d8abe6ca1a77 | [
"MIT"
] | 18 | 2021-12-22T09:41:24.000Z | 2022-03-19T12:54:30.000Z | sts/metric.py | LostCow/KLUE | 73b1b0526cf6b1b6f5ef535b9527d8abe6ca1a77 | [
"MIT"
] | null | null | null | sts/metric.py | LostCow/KLUE | 73b1b0526cf6b1b6f5ef535b9527d8abe6ca1a77 | [
"MIT"
] | 4 | 2021-12-26T11:31:46.000Z | 2022-03-28T07:55:45.000Z | from datasets import load_metric
def compute_metrics(pred):
pearson = load_metric("pearsonr").compute
references = pred.label_ids
predictions = pred.predictions
metric = pearson(predictions=predictions, references=references)
return metric
| 26.2 | 68 | 0.763359 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.038168 |
cbc2172760f370c3b4d7c60769a266040ec53d06 | 442 | py | Python | BITs/2014/Abdrahmanova_G_I/task_3_1.py | YukkaSarasti/pythonintask | eadf4245abb65f4400a3bae30a4256b4658e009c | [
"Apache-2.0"
] | null | null | null | BITs/2014/Abdrahmanova_G_I/task_3_1.py | YukkaSarasti/pythonintask | eadf4245abb65f4400a3bae30a4256b4658e009c | [
"Apache-2.0"
] | null | null | null | BITs/2014/Abdrahmanova_G_I/task_3_1.py | YukkaSarasti/pythonintask | eadf4245abb65f4400a3bae30a4256b4658e009c | [
"Apache-2.0"
] | null | null | null | #Задача 3. Вариант 1.
#Напишите программу, которая выводит имя "Иво Ливи", и запрашивает его псевдоним. Программа должна сцеплять две эти строки и выводить полученную строку, разделяя имя и псевдоним с помощью тире.
name=input('Герой нашей сегодняшней программы - Иво Ливи. \nПод каким же именем мы знаем этого человека? ')
print('Ваш ответ: ', name)
print('Все верно: Иво Ливи - ', name)
input('Нажмите Enter')
#Abdrahmanova G. I.
#7.03.2016 | 55.25 | 193 | 0.757919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 664 | 0.927374 |
cbc43e11b971288afc6b07c65f700ea9583287dc | 3,523 | py | Python | musiquepy/data/db.py | vitormoura/musiquepy | 8065f85f216796d9c00a72ce4cc08cda8c461720 | [
"MIT"
] | null | null | null | musiquepy/data/db.py | vitormoura/musiquepy | 8065f85f216796d9c00a72ce4cc08cda8c461720 | [
"MIT"
] | null | null | null | musiquepy/data/db.py | vitormoura/musiquepy | 8065f85f216796d9c00a72ce4cc08cda8c461720 | [
"MIT"
] | null | null | null | import io
import logging
import os
import pathlib
from datetime import datetime
from typing import List
from musiquepy.data.errors import MusiquepyExistingUserError
from musiquepy.data.media import get_profile_pictures_dir
from musiquepy.data.model import (
Album, AlbumPhoto, Artist, MusicGenre, MusicTrack, User)
from sqlalchemy import select
from sqlalchemy.engine import Engine, ResultProxy
from sqlalchemy.orm.session import Session
class MusiquepyDB:
_engine: Engine
_session: Session
_log: logging.Logger
def __init__(self, engine: Engine) -> None:
self._engine = engine
self._log = logging.getLogger(__name__)
def connect(self):
self._session = Session(self._engine)
self._session.expire_on_commit = False
def close(self):
self._session.close()
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def create_user(self, name: str, email: str, password: str) -> User:
usr = self.get_user_by_email(email)
if usr is not None:
raise MusiquepyExistingUserError(
f"utilisateur existe déjà: {email}")
usr = User()
usr.email = email
usr.name = name
usr.password = password
usr.accept_marketing = 0
usr.active = 1
usr.created_at = int(datetime.now().timestamp())
usr.email_confirmed_at = None
self._session.add(usr)
self._session.commit()
return usr
def get_users(self) -> List[User]:
result: ResultProxy
result = self._session.execute(select(User))
return [row.User for row in result.fetchall()]
def get_user_by_id(self, id) -> User:
stmt = select(User).where(User.id == id)
return self._session.execute(stmt).scalar()
def get_user_by_email(self, email) -> User:
stmt = select(User).where(User.email == email)
return self._session.execute(stmt).scalar()
def get_user_profile_picture(self, user_id: int) -> io.IOBase:
pictures_path = get_profile_pictures_dir()
profile_pic_path = pathlib.Path(
pictures_path, f'user_{int(user_id)}.jpg')
if not profile_pic_path.exists():
profile_pic_path = pathlib.Path(pictures_path, 'default.jpg')
return io.FileIO(os.path.join(pictures_path, 'default.jpg'))
def get_genres(self) -> List[MusicGenre]:
stmt = select(MusicGenre).order_by(MusicGenre.description)
return self._session.execute(stmt).scalars().all()
def get_genre_by_id(self, id: int) -> MusicGenre:
stmt = select(MusicGenre).where(MusicGenre.id == id)
return self._session.execute(stmt).scalar()
def get_artist_by_id(self, id: int) -> Artist:
stmt = select(Artist).where(Artist.id == id)
return self._session.execute(stmt).scalar()
def get_music_tracks_by_genre(self, id_genre: int) -> List[MusicTrack]:
stmt = (
select(MusicTrack, Artist, Album)
.join(MusicTrack.album)
.join(Album.artist)
.join(Artist.genres)
.where(MusicGenre.id == id_genre)
)
result = self._session.execute(stmt)
return [row.MusicTrack for row in result.fetchall()]
def get_album_photo(self, id: int) -> AlbumPhoto:
stmt = (select(AlbumPhoto).where(AlbumPhoto.album_id == id))
return self._session.execute(stmt).scalar()
| 28.184 | 75 | 0.649446 | 3,079 | 0.873475 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.025248 |
cbc4abcb91f41f2e38663e2ea3967fca2c3cbd66 | 7,301 | py | Python | main.py | Drejky/Genetic-ML | 80076246d4a5e75834d95607d04ab0e7e0132c48 | [
"MIT"
] | null | null | null | main.py | Drejky/Genetic-ML | 80076246d4a5e75834d95607d04ab0e7e0132c48 | [
"MIT"
] | null | null | null | main.py | Drejky/Genetic-ML | 80076246d4a5e75834d95607d04ab0e7e0132c48 | [
"MIT"
] | null | null | null | import random
from copy import deepcopy, copy
import timeit
adrMask = 63 #mask to get our addr bits
cmdMask = 192 #mask to get our command bits
stepLim = 500 #Limited num of instructions
tourFlag = int(input("Input 1 for tournament, 0 for roulette"))
mixFlag = int(input("Input 1 for mixed children, 0 for halfnhalf"))
popLim = int(input("Input the ammount of individuals in each gen"))
#Commands:
#0 - increment
#64 - decrement
#128 - jump
#192 - print
uwu = [
['b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'b', 'g', 'b', 'b'],
['b', 'b', 'g', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'b', 'b', 'b', 'g'],
['b', 'g', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'b', 'g', 'b', 'b'],
['b', 'b', 'b', 's', 'b', 'b', 'b']
]
#Finds number in 2D array
def findNum(arr, num):
for i in arr:
try:
j = i.index(num)
return j, arr.index(i)
except ValueError:
continue
#Counts the number of treasures in map
def getGc(arr):
c = 0
for i in arr:
for j in i:
if j == 'g':
c += 1
return c
startx, starty = findNum(uwu, 's')
gc = getGc(uwu)
class individual:
def __init__(self):
self.mem = []
self.fitness = 100
def randomize(self):
for i in range(64):
self.mem.append(random.randrange(0, 255))
#Opperations on memmory cells
def increment(x):
if(x == 255):
return 0
return x + 1
def decrement(x):
if(x == 0):
return 255
return x - 1
def addrUp(x):
if x == 63:
return 0
return x + 1
def getCom(x):
masked = x & 3
if masked == 0:
return 'H'
elif masked == 1:
return 'D'
elif masked == 2:
return 'P'
else:
return 'L'
#Checks if bot went out of bounds
def checkOutBounds(arr, x, y):
if y < 0 or y >= len(arr):
return True
elif x < 0 or x >= len(arr[0]):
return True
else:
return False
#Program for our individuals
def vm(x, prin):
grid = deepcopy(uwu)
stepCount = 0
addr = 0
posx = startx
posy = starty
while(stepCount < stepLim):
stepCount += 1
#Increment cell
if x.mem[addr] & cmdMask == 0:
x.mem[x.mem[addr] & adrMask] = increment(x.mem[x.mem[addr] & adrMask])
addr = addrUp(addr)
continue
#Decrement cell
elif x.mem[addr] & cmdMask == 64:
x.mem[x.mem[addr] & adrMask] = decrement(x.mem[x.mem[addr] & adrMask])
addr = addrUp(addr)
continue
#Jump address
elif x.mem[addr] & cmdMask == 128:
addr = x.mem[addr] & adrMask
continue
#Do a step
else:
x.fitness -= 1
masked = x.mem[x.mem[addr] & adrMask] & 3 #creating mask representing move direction
if masked == 0: #move up
posy -= 1
elif masked == 1: #move down
posy += 1
elif masked == 2: #move right
posx += 1
else: #move left
posx -= 1
if checkOutBounds(grid, posx, posy):
return x.fitness
elif grid[posy][posx] == 'g':
x.fitness += 100
grid[posy][posx] = 'b'
#If the prin flag is raised, print out individuals path
if prin:
print(getCom(x.mem[x.mem[addr] & adrMask]))
addr = addrUp(addr)
return x.fitness
#Breeding functions
def halfnhalf(par1, par2):
child = individual()
rand = random.randrange(64)
for i in range(rand):
child.mem.append(par1.mem[i])
for i in range(rand, 64):
child.mem.append(par2.mem[i])
return child
def mixChild(par1, par2):
child = individual()
for i in range(64):
if random.randrange(2) == 1:
child.mem.append(par1.mem[i])
else:
child.mem.append(par2.mem[i])
return child
#Selection functions
def roulete(gen):
sum = 0
rand = random.random()
last = 0
for i in gen:
sum += i.fitness
for i in gen:
if rand < (last + (i.fitness/sum)):
return i
else:
last += i.fitness/sum
def roulMakeChild(gen):
x = roulete(gen)
y = roulete(gen)
while(x == y):
y = roulete(gen)
if(mixFlag == 1):
return mixChild(x, y)
else:
return halfnhalf(x, y)
def tournament(gen):
x = gen[random.randrange(0, popLim)]
y = gen[random.randrange(0, popLim)]
while(x == y):
y = gen[random.randrange(0, popLim)]
if(x.fitness > y.fitness):
return x
else:
return y
def tournamentChild(gen):
x = tournament(gen)
y = tournament(gen)
while(x == y):
y = tournament(gen)
if(mixFlag == 1):
return mixChild(x, y)
else:
return halfnhalf(x, y)
#Mutation
def mutate(indi):
rand = random.random()
#print(rand)
if rand < 0.1:
return indi
elif rand > 0.9:
indi.randomize()
return indi
else:
for i in range(63):
if random.randrange(0,40) == 1:
indi.mem[i] = random.randrange(0, 255)
return indi
def main():
pop = 1
gen = []
newGen = []
genLim = int(input("Input the ammount of generations"))
#First individual set as elite
gen.append(individual())
gen[0].randomize()
gen[0].fitness = vm(deepcopy(gen[0]), 0)
elite = deepcopy(gen[0])
#Populating first generation
for i in range(1, popLim):
gen.append(individual())
gen[i].randomize()
gen[i].fitness = vm(deepcopy(gen[i]), 0)
if(gen[i].fitness > elite.fitness):
elite = deepcopy(gen[i])
#Creating new generations
while pop <= genLim:
print("=========================")
sumFit = 0
newGen.append(elite)
for i in range(1, popLim):
if(tourFlag):
newGen.append(tournamentChild(gen))
else:
newGen.append(roulMakeChild(gen))
newGen[i] = mutate(newGen[i])
newGen[i].fitness = vm(deepcopy(newGen[i]), 0)
#print(newGen[i].fitness)
if(newGen[i].fitness > elite.fitness):
elite = deepcopy(newGen[i])
sumFit += newGen[i].fitness
if(newGen[i].fitness > 100*gc):
print(pop)
vm(deepcopy(newGen[i]), 1)
return
print("Average {} gen: {}".format(pop, sumFit / popLim))
gen.clear()
gen = copy(newGen)
newGen.clear()
pop += 1
if pop == genLim:
if int(input("Write one for another 100 generations")):
genLim += 100
start = timeit.default_timer()
main()
stop = timeit.default_timer()
print("It took {:.4f} to run.".format(stop - start))
| 26.26259 | 99 | 0.490207 | 203 | 0.027804 | 0 | 0 | 0 | 0 | 0 | 0 | 1,134 | 0.155321 |
cbc5080d810af6158c768f9c9a1d4621bd817296 | 233 | py | Python | wordVerification.py | Androcks/SlideWord | 0ea3ff886ee142fa2211667d1bd9b73da66129e9 | [
"Apache-2.0"
] | null | null | null | wordVerification.py | Androcks/SlideWord | 0ea3ff886ee142fa2211667d1bd9b73da66129e9 | [
"Apache-2.0"
] | null | null | null | wordVerification.py | Androcks/SlideWord | 0ea3ff886ee142fa2211667d1bd9b73da66129e9 | [
"Apache-2.0"
] | null | null | null | import enchant
##Use our custom word list
wordList = enchant.request_pwl_dict("truncListWords.txt")
##Takes our string from the stage and checks it
def checkForWord(playerString) :
return wordList.check(playerString)
| 23.3 | 58 | 0.755365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 95 | 0.407725 |
cbc5cfd99ada9e09f7fc9fada68993cce224acc8 | 2,264 | py | Python | server/main/components/widgets_content.py | INRIM/forms-theme-italia | 45415a16e32c8c93ee8d234262149ed0635cf212 | [
"MIT"
] | null | null | null | server/main/components/widgets_content.py | INRIM/forms-theme-italia | 45415a16e32c8c93ee8d234262149ed0635cf212 | [
"MIT"
] | null | null | null | server/main/components/widgets_content.py | INRIM/forms-theme-italia | 45415a16e32c8c93ee8d234262149ed0635cf212 | [
"MIT"
] | null | null | null | from .widgets_base import WidgetsBase
class PageWidget(WidgetsBase):
def __init__(self, templates_engine, request, settings, schema={}, resource_ext=None, disabled=False, **kwargs):
super(PageWidget, self).__init__(templates_engine, request, **kwargs)
self.base_path = kwargs.get('base_path', "/")
self.page_api_action = kwargs.get('page_api_action', "/")
self.settings = settings
self.schema = schema
self.ext_resource = resource_ext
self.beforerows = []
self.afterrrows = []
self.disabled = disabled
def get_login_act(self, session):
return 'logout' if session.get('logged_in') else 'login'
def get_config(self, session: dict, **context):
today_date = self.dte.get_tooday_ui()
avatar = "/avatars/"
if session.get('logged_in'):
if session.get('name'):
user = session.get('name', "test")
else:
user = session.get('username', "Test")
avatar = session.get('avatar', "/avatars/")
else:
user = False
base_prj_data = {
"token": self.authtoken,
'app_name': self.settings.app_name,
'version': self.settings.app_version,
'env': "test",
'login_act': self.get_login_act(session),
'login_user': user,
'avatar': avatar,
'today_date': today_date,
"beforerows": self.beforerows,
"afterrrows": self.afterrrows,
"backtop": self.backtop,
"error": self.error,
"export_button": self.export_btn,
"rows": self.rows,
"request": self.request,
"base_path": self.base_path,
"page_api_action": self.page_api_action,
"logo_img_url": self.settings.logo_img_url
}
kwargs_def = {**context, **base_prj_data}
return kwargs_def
def render_page(self, template_name_or_list: str, session: dict, **context):
kwargs_def = self.get_config(session, **context)
return self.response_template(template_name_or_list, kwargs_def)
def render_custom(self, tmpname, cfg):
return self.render_template(f"{tmpname}", cfg)
| 37.114754 | 116 | 0.590989 | 2,223 | 0.98189 | 0 | 0 | 0 | 0 | 0 | 0 | 339 | 0.149735 |
cbc761dabe74f703096f63ce1ad894e6e2334799 | 1,000 | py | Python | app/pages/forms.py | MicroprocessorX069/Todo-web-app | c68f2e26eec1c89ab2257a22071133216c743ca0 | [
"Apache-2.0"
] | null | null | null | app/pages/forms.py | MicroprocessorX069/Todo-web-app | c68f2e26eec1c89ab2257a22071133216c743ca0 | [
"Apache-2.0"
] | 1 | 2021-06-02T00:55:48.000Z | 2021-06-02T00:55:48.000Z | app/pages/forms.py | MicroprocessorX069/Todo-web-app | c68f2e26eec1c89ab2257a22071133216c743ca0 | [
"Apache-2.0"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length
from app.models import User
from flask import request
class CreateChallengeForm(FlaskForm):
name = StringField('Name', validators = [DataRequired()])
description = TextAreaField('About the challenge', validators = [Length(min = 0, max = 600)])
total_days = StringField('Total days', validators=[DataRequired()])
interval = StringField('Interval')
type = StringField('Type')
submit = SubmitField('Add challenge')
class EditChallengeForm(FlaskForm):
description = TextAreaField('About the challenge', validators = [Length(min = 0, max = 600)])
total_days = StringField('Total days', validators=[DataRequired()])
interval = StringField('Interval')
submit = SubmitField('Edit challenge')
def __init__(self, *args, **kwargs):
super(EditChallengeForm, self).__init__(*args, **kwargs)
| 43.478261 | 94 | 0.766 | 736 | 0.736 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.129 |
cbc875e1529c951ebc046e3930e392b9612e4907 | 1,302 | py | Python | study/hfppnetwork/hfppnetwork/sms/logginghelper.py | NASA-Tournament-Lab/CoECI-CMS-Healthcare-Fraud-Prevention | 4facd935920e77239c25323ca7e233cb899ba9f5 | [
"Apache-2.0"
] | 7 | 2015-07-15T06:47:16.000Z | 2020-10-17T20:51:09.000Z | study/hfppnetwork/hfppnetwork/sms/logginghelper.py | NASA-Tournament-Lab/CoECI-CMS-Healthcare-Fraud-Prevention | 4facd935920e77239c25323ca7e233cb899ba9f5 | [
"Apache-2.0"
] | null | null | null | study/hfppnetwork/hfppnetwork/sms/logginghelper.py | NASA-Tournament-Lab/CoECI-CMS-Healthcare-Fraud-Prevention | 4facd935920e77239c25323ca7e233cb899ba9f5 | [
"Apache-2.0"
] | 8 | 2017-01-30T02:27:01.000Z | 2021-04-21T04:15:48.000Z | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 TopCoder Inc., All Rights Reserved.
This is the module that defines logging helper methods
This module resides in Python source file logginghelper.py
Thread Safety:
It is thread safe because no module-level variable is used.
@author: TCSASSEMBLER
@version: 1.0
"""
import logging
def method_enter(logger, signature, params=None):
"""
This function is used to logging when enter method .
@param signature the method signature
@param params the method params
"""
logger.debug('Entering method %s', signature)
logger.debug('Input parameters:[%s]',('' if params is None else params))
def method_exit(logger, signature, ret=None):
"""
This function is used to logging when exit method.
@param signature the method signature
@param ret the method return value
"""
logger.debug('Exiting method %s', signature)
logger.debug('Output parameters:%s',ret)
def method_error(logger, signature, details):
"""
This function is used to logging when error happen.
@param signature the method signature
@param details the error details
"""
logger.error('Error in method %s', signature)
logger.error('Details:%s',details)
#log error stack
logger.exception('') | 31.756098 | 76 | 0.69278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 906 | 0.695853 |
cbca5e82b89e8b333ac4997324a7acc5ec0553d9 | 2,566 | py | Python | hw8/next_permutation.py | alexander-paskal/ece143-hw | 9e3d475cb44fd16f87879cb74dc9305d70805355 | [
"MIT"
] | 1 | 2022-02-02T07:30:20.000Z | 2022-02-02T07:30:20.000Z | hw8/next_permutation.py | alexander-paskal/ece143-hw | 9e3d475cb44fd16f87879cb74dc9305d70805355 | [
"MIT"
] | null | null | null | hw8/next_permutation.py | alexander-paskal/ece143-hw | 9e3d475cb44fd16f87879cb74dc9305d70805355 | [
"MIT"
] | null | null | null | """
Given a permutation of any length, generate the next permutation in lexicographic order.
For example, this are the permutations for [1,2,3] in lexicographic order.
# >>> list(it.permutations([1,2,3]))
[(1, 2, 3), (1, 3, 2), (2, 1, 3), (2, 3, 1), (3, 1, 2), (3, 2, 1)]
Then, your function next_permutation(t:tuple)->tuple should do the following
# >>> next_permutation((2,3,1))
(3,1,2)
Because (3,1,2) is the next permutation in lexicographic order. Here is another example:
# >>> next_permutation((0, 5, 2, 1, 4, 7, 3, 6))
(0, 5, 2, 1, 4, 7, 6, 3)
Your function should work for very long input tuples so the autograder will time-out if you
try to brute force your solution. The last permutation should wrap aruond to the first.
# >>> next_permutation((3,2,1,0))
(0, 1, 2, 3)
"""
def next_permutation(t: tuple) -> tuple:
"""
Prints the permutation of the input element t immediately following t in lexicographic order
:param t:
:return:
"""
assert isinstance(t, tuple)
for elem in t:
assert isinstance(elem, int)
assert len(t) == len(set(t))
RIGHT = -1
LEFT = RIGHT - 1
left = t[LEFT]
right = t[RIGHT]
while True:
if left < right:
if RIGHT == -1: # if first two:
l = list()
l.extend(t[:LEFT])
l.append(t[RIGHT])
l.append(t[LEFT])
return tuple(l)
else:
# pick the second highest, that's left
# add the rest of the elements in sorted order
l = list()
l.extend(t[:LEFT])
remaining = sorted(t[LEFT:])
second_highest = remaining.pop(remaining.index(left) + 1)
l.append(second_highest)
l.extend(sorted(remaining))
return tuple(l)
else: # left > right
try:
RIGHT -= 1
LEFT = RIGHT - 1
left = t[LEFT]
right = t[RIGHT]
except IndexError: # fully reversed list, return start condition
return tuple(sorted(t))
if __name__ == '__main__':
from itertools import permutations
##### Arguments
p = (1,2,3,4,5)
##### End Arguments
ps = permutations(p)
print("Start permutation:", p)
print("\nExpected | Actual | Matching")
print("-"*20)
for i in range(10):
p_n = next(ps)
print(p, p_n, p==p_n)
p = next_permutation(p)
| 27.591398 | 96 | 0.537412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,188 | 0.462977 |
cbcc9ac8c10c1c0b1c3e2bbf32ea484ec87b41ee | 329 | py | Python | 8/8.10/great_magician.py | singi2016cn/python-scaffold | 274e508d1919da67e599aa73be139800c043bce4 | [
"MIT"
] | null | null | null | 8/8.10/great_magician.py | singi2016cn/python-scaffold | 274e508d1919da67e599aa73be139800c043bce4 | [
"MIT"
] | null | null | null | 8/8.10/great_magician.py | singi2016cn/python-scaffold | 274e508d1919da67e599aa73be139800c043bce4 | [
"MIT"
] | null | null | null | # 魔术师
def show_magicians(magicians):
for magician in magicians:
print('magician\'s name is ' + magician)
def make_great(magicians):
i = 0
for item in magicians:
magicians[i] = 'The Great ' + item
i = i + 1
magicians = ['singi', 'sunjun']
make_great(magicians)
show_magicians(magicians)
| 16.45 | 48 | 0.632219 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.179104 |
cbce87157b7abadfa6bd754cf2488a033bb5028e | 4,889 | py | Python | scratchpad/voids_paper/bin/tests/test_smartvis.py | arshadzahangirchowdhury/TomoEncoders | 9c2b15fd515d864079f198546821faee5d78df17 | [
"BSD-3-Clause"
] | null | null | null | scratchpad/voids_paper/bin/tests/test_smartvis.py | arshadzahangirchowdhury/TomoEncoders | 9c2b15fd515d864079f198546821faee5d78df17 | [
"BSD-3-Clause"
] | null | null | null | scratchpad/voids_paper/bin/tests/test_smartvis.py | arshadzahangirchowdhury/TomoEncoders | 9c2b15fd515d864079f198546821faee5d78df17 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
import numpy as np
import matplotlib.pyplot as plt
import os
import h5py
import sys
import time
import seaborn as sns
import pandas as pd
sys.path.append('/home/atekawade/TomoEncoders/scratchpad/voids_paper/configs')
from params import model_path, get_model_params
sys.path.append('/home/atekawade/TomoEncoders/scratchpad/voids_paper')
from tomo_encoders.tasks.void_mapping import process_patches
from tomo_encoders.structures.voids import Voids
from tomo_encoders import DataFile
import cupy as cp
from tomo_encoders.neural_nets.surface_segmenter import SurfaceSegmenter
from tomo_encoders.tasks.void_mapping import coarse_segmentation
######## START GPU SETTINGS ############
########## SET MEMORY GROWTH to True ############
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('GPU')
try:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
######### END GPU SETTINGS ############
model_tag = "M_a07"
model_names = {"segmenter" : "segmenter_Unet_%s"%model_tag}
model_params = get_model_params(model_tag)
# patch size
wd = 32
# guess surface parameters
b = 4
b_K = 4
sparse_flag = True
pixel_res = 1.17
size_um = -1 # um
void_rank = 1
radius_around_void_um = 800.0 # um
blur_size = 0.5
# handy code for timing stuff
# st_chkpt = cp.cuda.Event(); end_chkpt = cp.cuda.Event(); st_chkpt.record()
# end_chkpt.record(); end_chkpt.synchronize(); t_chkpt = cp.cuda.get_elapsed_time(st_chkpt,end_chkpt)
# print(f"time checkpoint {t_chkpt/1000.0:.2f} secs")
## Output for vis
ply_lowres = '/home/atekawade/Dropbox/Arg/transfers/runtime_plots/lowres_full.ply'
ply_highres = '/home/atekawade/Dropbox/Arg/transfers/runtime_plots/highres_around_void_%i.ply'%void_rank
voids_highres = '/data02/MyArchive/aisteer_3Dencoders/tmp_data/voids_highres'
voids_lowres = '/data02/MyArchive/aisteer_3Dencoders/tmp_data/voids_lowres'
if __name__ == "__main__":
# initialize segmenter fCNN
fe = SurfaceSegmenter(model_initialization = 'load-model', \
model_names = model_names, \
model_path = model_path)
fe.test_speeds(128,n_reps = 5, input_size = (wd,wd,wd))
# read data and initialize output arrays
## to-do: ensure reconstructed object has dimensions that are a multiple of the (wd,wd,wd) !!
hf = h5py.File('/data02/MyArchive/aisteer_3Dencoders/tmp_data/projs_2k.hdf5', 'r')
projs = np.asarray(hf["data"][:])
theta = np.asarray(hf['theta'][:])
center = float(np.asarray(hf["center"]))
hf.close()
# make sure projection shapes are divisible by the patch width (both binning and full steps)
print("BEGIN: Read projection data from disk")
print(f'\tSTAT: shape of raw projection data: {projs.shape}')
##### BEGIN ALGORITHM ########
# guess surface
print(f"\nSTEP: visualize all voids with size greater than {size_um:.2f} um")
V_bin, rec_min_max = coarse_segmentation(projs, theta, center, b_K, b, blur_size)
voids_b = Voids().guess_voids(V_bin, b)
voids_b.select_by_size(size_um, pixel_size_um = pixel_res)
voids_b.sort_by_size(reverse = True)
surf = voids_b.export_void_mesh_with_texture("sizes")
surf.write_ply(ply_lowres)
# guess roi around a void
void_id = np.argsort(voids_b["sizes"])[-void_rank]
voids_b.select_around_void(void_id, radius_around_void_um, pixel_size_um = pixel_res)
print(f"\nSTEP: visualize voids in the neighborhood of void id {void_id} at full detail")
cp.fft.config.clear_plan_cache()
p_sel, r_fac = voids_b.export_grid(wd)
x_voids, p_voids = process_patches(projs, theta, center, fe, p_sel, rec_min_max)
# export voids
voids = Voids().import_from_grid(voids_b, x_voids, p_voids)
voids_b.write_to_disk(voids_lowres)
voids.write_to_disk(voids_highres)
surf = voids.export_void_mesh_with_texture("sizes")
surf.write_ply(ply_highres)
# # complete: save stuff
# Vp = np.zeros(p_voids.vol_shape, dtype = np.uint8)
# p_voids.fill_patches_in_volume(x_voids, Vp)
# ds_save = DataFile('/data02/MyArchive/aisteer_3Dencoders/tmp_data/test_y_pred', tiff = True, d_shape = Vp.shape, d_type = np.uint8, VERBOSITY=0)
# ds_save.create_new(overwrite=True)
# ds_save.write_full(Vp)
# Vp_mask = np.zeros(p_voids.vol_shape, dtype = np.uint8) # Save for illustration purposes the guessed neighborhood of the surface
# p_voids.fill_patches_in_volume(np.ones((len(p_voids),wd,wd,wd)), Vp_mask)
# ds_save = DataFile('/data02/MyArchive/aisteer_3Dencoders/tmp_data/test_y_surf', tiff = True, d_shape = Vp_mask.shape, d_type = np.uint8, VERBOSITY=0)
# ds_save.create_new(overwrite=True)
# ds_save.write_full(Vp_mask)
| 41.084034 | 155 | 0.720802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,467 | 0.504602 |
cbce8d593e80431df2ec54039449d1df714915f5 | 13,919 | py | Python | src/Python/flask/app.py | mwaseem75/iris-python-template | c8d75e22a6eee33e796d28f5508fd656a6ea58d6 | [
"MIT"
] | null | null | null | src/Python/flask/app.py | mwaseem75/iris-python-template | c8d75e22a6eee33e796d28f5508fd656a6ea58d6 | [
"MIT"
] | null | null | null | src/Python/flask/app.py | mwaseem75/iris-python-template | c8d75e22a6eee33e796d28f5508fd656a6ea58d6 | [
"MIT"
] | 1 | 2022-02-21T05:14:48.000Z | 2022-02-21T05:14:48.000Z | from flask import Flask, jsonify, Response, request, make_response, render_template
from definitions.passenger import Passenger
import json,util,io,random
import iris
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backends.backend_svg import FigureCanvasSVG
from matplotlib.figure import Figure
app = Flask(__name__)
app.secret_key = "abc222"
# ----------------------------------------------------------------
### CRUD FOR TITANIC_TABLE.PASSENGER
# ----------------------------------------------------------------
@app.route("/")
def index():
content = util.get_dashboard_stats()
return render_template('index.html', content = content)
@app.route("/processes")
def processes():
iris.cls("Embedded.Utils").SetNameSpace("USER")
statement = iris.sql.exec(util.get_sql_stat("processes"))
df = statement.dataframe()
my_data=json.loads(df.to_json(orient="split"))["data"]
my_cols=[{"title": str(col)} for col in json.loads(df.to_json(orient="split"))["columns"]]
ftitle = "Processes"
fheading = "Currently runnung processes"
content = util.get_sidebar_stats()
return render_template('tablesdata.html', ftitle = ftitle, fheading = fheading, my_data = my_data, my_cols = my_cols, content = content)
@app.route("/messages")
def messages():
iris.cls("Embedded.Utils").SetNameSpace("USER")
statement = iris.sql.exec(util.get_sql_stat("messages"))
df = statement.dataframe()
my_data=json.loads(df.to_json(orient="split"))["data"]
my_cols=[{"title": str(col)} for col in json.loads(df.to_json(orient="split"))["columns"]]
ftitle = "Messages"
fheading = "Production Messages"
content = util.get_sidebar_stats()
return render_template('tablesdata.html', ftitle = ftitle, fheading = fheading, my_data = my_data, my_cols = my_cols, content = content)
@app.route("/securityusers")
def securityusers():
iris.cls("Embedded.Utils").SetNameSpace("%SYS")
statement = iris.sql.exec(util.get_sql_stat("securityusers"))
df = statement.dataframe()
my_data=json.loads(df.to_json(orient="split"))["data"]
my_cols=[{"title": str(col)} for col in json.loads(df.to_json(orient="split"))["columns"]]
ftitle = "Users"
fheading = "Security Users"
content = util.get_sidebar_stats()
return render_template('tablesdata.html', ftitle = ftitle, fheading = fheading, my_data = my_data, my_cols = my_cols, content = content)
@app.route("/securityapps")
def securityapps():
iris.cls("Embedded.Utils").SetNameSpace("%SYS")
statement = iris.sql.exec(util.get_sql_stat("securityapps"))
df = statement.dataframe()
my_data=json.loads(df.to_json(orient="split"))["data"]
my_cols=[{"title": str(col)} for col in json.loads(df.to_json(orient="split"))["columns"]]
ftitle = "Applications"
fheading = "Created Applications"
content = util.get_sidebar_stats()
return render_template('tablesdata.html', ftitle = ftitle, fheading = fheading, my_data = my_data, my_cols = my_cols, content = content)
@app.route("/elassert")
def elassert():
iris.cls("Embedded.Utils").SetNameSpace("USER")
statement = iris.sql.exec(util.get_sql_stat("elassert"))
df = statement.dataframe()
my_data=json.loads(df.to_json(orient="split"))["data"]
my_cols=[{"title": str(col)} for col in json.loads(df.to_json(orient="split"))["columns"]]
ftitle = "Assert"
fheading = "Event Log Assert"
content = util.get_sidebar_stats()
return render_template('tablesdata.html', ftitle = ftitle, fheading = fheading, my_data = my_data, my_cols = my_cols, content = content)
@app.route("/elerror")
def elerror():
iris.cls("Embedded.Utils").SetNameSpace("USER")
statement = iris.sql.exec(util.get_sql_stat("elerror"))
df = statement.dataframe()
my_data=json.loads(df.to_json(orient="split"))["data"]
my_cols=[{"title": str(col)} for col in json.loads(df.to_json(orient="split"))["columns"]]
ftitle = "Error"
fheading = "Event Log Error"
content = util.get_sidebar_stats()
return render_template('tablesdata.html', ftitle = ftitle, fheading = fheading, my_data = my_data, my_cols = my_cols, content = content)
@app.route("/elwarning")
def elwarning():
statement = iris.sql.exec(util.get_sql_stat("elwarning"))
df = statement.dataframe()
my_data=json.loads(df.to_json(orient="split"))["data"]
my_cols=[{"title": str(col)} for col in json.loads(df.to_json(orient="split"))["columns"]]
ftitle = "Warning"
fheading = "Event Log Warning"
content = util.get_sidebar_stats()
return render_template('tablesdata.html', ftitle = ftitle, fheading = fheading, my_data = my_data, my_cols = my_cols, content = content)
@app.route("/elinfo")
def elinfo():
statement = iris.sql.exec(util.get_sql_stat("elinfo"))
df = statement.dataframe()
my_data=json.loads(df.to_json(orient="split"))["data"]
my_cols=[{"title": str(col)} for col in json.loads(df.to_json(orient="split"))["columns"]]
ftitle = "Info"
fheading = "Event Log Info"
content = util.get_sidebar_stats()
return render_template('tablesdata.html', ftitle = ftitle, fheading = fheading, my_data = my_data, my_cols = my_cols, content = content)
@app.route("/eltrace")
def eltrace():
statement = iris.sql.exec(util.get_sql_stat("eltrace"))
df = statement.dataframe()
my_data=json.loads(df.to_json(orient="split"))["data"]
my_cols=[{"title": str(col)} for col in json.loads(df.to_json(orient="split"))["columns"]]
ftitle = "Trace"
fheading = "Event Log Trace"
content = util.get_sidebar_stats()
return render_template('tablesdata.html', ftitle = ftitle, fheading = fheading, my_data = my_data, my_cols = my_cols, content = content)
@app.route("/elevent")
def elevent():
statement = iris.sql.exec(util.get_sql_stat("elalert"))
df = statement.dataframe()
my_data=json.loads(df.to_json(orient="split"))["data"]
my_cols=[{"title": str(col)} for col in json.loads(df.to_json(orient="split"))["columns"]]
ftitle = "Alert"
fheading = "Event Log Alert"
content = util.get_sidebar_stats()
return render_template('tablesdata.html', ftitle = ftitle, fheading = fheading, my_data = my_data, my_cols = my_cols, content = content)
# GET all passengers
@app.route("/api/passengers")
def getAllPassengers():
payload = {}
payload['passengers'] = []
tp = {}
name = request.args.get('name')
currPage = request.args.get('currPage')
pageSize = request.args.get('pageSize')
if name is not None:
# If search by name
query = "SELECT ID FROM Titanic_Table.Passenger WHERE name %STARTSWITH ?"
rs = iris.sql.exec(query, name)
for i in rs:
# We create an iris object
tp = iris.ref(1)
# We get the json in a string
iris.cls("Titanic.Table.Passenger")._OpenId(i[0])._JSONExportToString(tp)
# We normalize the string to get it in python
tp = iris.cls("%String").Normalize(tp)
# We load the string in a dict
tp = json.loads(tp)
# We add the id
tp['passengerId'] = i[0]
payload['passengers'].append(tp)
else:
currPage = int(currPage) if currPage is not None else 1
pageSize = int(pageSize) if pageSize is not None else 10
tFrom = ((currPage -1 ) * pageSize)+1
tTo = tFrom + (pageSize-1)
query = """
SELECT * FROM
(
SELECT ID,
ROW_NUMBER() OVER (ORDER By ID ASC) rn
FROM Titanic_Table.Passenger
) tmp
WHERE rn between {} and {}
ORDER By ID ASC
""".format(tFrom,tTo)
rs = iris.sql.exec(query)
for i in rs:
# We create an iris object
tp = iris.ref(1)
# We get the json in a string
iris.cls("Titanic.Table.Passenger")._OpenId(i[0])._JSONExportToString(tp)
# We normalize the string to get it in python
tp = iris.cls("%String").Normalize(tp)
# We load the string in a dict
tp = json.loads(tp)
# We add the id
tp['passengerId'] = i[0]
payload['passengers'].append(tp)
# Getting the total number of passengers
rs = iris.sql.exec("SELECT COUNT(*) FROM Titanic_Table.Passenger")
payload['total'] = rs.__next__()[0]
payload['query'] = query
return jsonify(payload)
# POST a new passenger
@app.route("/api/passengers", methods=["POST"])
def createPassenger():
# Retreiving the data in request body
passenger = request.get_json()
query = "INSERT INTO Titanic_Table.Passenger (survived, pclass, name, sex, age, sibSp, parCh, ticket, fare, cabin, embarked) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
# Getting the new ID of the passenger
newId = int(iris.sql.exec("SELECT MAX(ID) FROM Titanic_Table.Passenger").__next__()[0]) + 1
try:
iris.sql.exec(query, passenger['survived'], passenger['pclass'], passenger['name'], passenger['sex'], passenger['age'], passenger['sibSp'], passenger['parCh'], passenger['ticket'], passenger['fare'], passenger['cabin'], passenger['embarked'])
except:
return make_response(
'Bad Request',
400
)
payload = {
'query': query,
'passengerId': newId
}
return jsonify(payload)
# GET passenger with id
@app.route("/api/passengers/<int:id>", methods=["GET"])
def getPassenger(id):
payload = {}
query = "SELECT * FROM Titanic_Table.Passenger WHERE ID = ?"
rs = iris.sql.exec(query, str(id))
try :
passenger = Passenger(rs.__next__()).__dict__
except:
return make_response(
'Not Found',
204
)
payload['passenger'] = passenger
payload['query'] = query
return jsonify(payload)
# PUT to update passenger with id
@app.route("/api/passengers/<int:id>", methods=["PUT"])
def updatePassenger(id):
# First, checking to see if the passenger exists
query = "SELECT ID FROM Titanic_Table.Passenger WHERE ID = ?"
rs = iris.sql.exec(query, str(id))
try :
rs.__next__()
except:
return make_response(
'Not Found',
204
)
# Updating
passenger = request.get_json()
query = "UPDATE Titanic_Table.Passenger SET survived = ?, pclass = ?, name = ?, sex = ?, age = ?, sibSp = ?, parCh = ?, ticket = ?, fare = ?, cabin = ?, embarked = ? WHERE ID = ?"
try:
iris.sql.exec(query, passenger['survived'], passenger['pclass'], passenger['name'], passenger['sex'], passenger['age'], passenger['sibSp'], passenger['parCh'], passenger['ticket'], passenger['fare'], passenger['cabin'], passenger['embarked'], id)
except:
return make_response(
'Bad Request',
400
)
payload = {
'query': query,
}
return jsonify(payload)
# DELETE passenger with id
@app.route("/api/passengers/<int:id>", methods=["DELETE"])
def deletePassenger(id):
payload = {}
query = "DELETE FROM Titanic_Table.Passenger WHERE ID = ?"
try:
iris.sql.exec(query, str(id))
except:
return make_response(
'Not Found',
204
)
payload['query'] = query
return jsonify(payload)
def getP():
payload = {}
payload['passengers'] = []
tp = {}
name = request.args.get('name')
currPage = request.args.get('currPage')
pageSize = request.args.get('pageSize')
currPage = int(currPage) if currPage is not None else 1
pageSize = int(pageSize) if pageSize is not None else 10
tFrom = ((currPage -1 ) * pageSize)+1
tTo = tFrom + (pageSize-1)
query = """
SELECT * FROM
(
SELECT ID,
ROW_NUMBER() OVER (ORDER By ID ASC) rn
FROM Titanic_Table.Passenger
) tmp
WHERE rn between {} and {}
ORDER By ID ASC
""".format(tFrom,tTo)
rs = iris.sql.exec(query)
for i in rs:
# We create an iris object
tp = iris.ref(1)
# We get the json in a string
iris.cls("Titanic.Table.Passenger")._OpenId(i[0])._JSONExportToString(tp)
# We normalize the string to get it in python
tp = iris.cls("%String").Normalize(tp)
# We load the string in a dict
tp = json.loads(tp)
# We add the id
tp['passengerId'] = i[0]
payload['passengers'].append(tp)
# Getting the total number of passengers
rs = iris.sql.exec("SELECT COUNT(*) FROM Titanic_Table.Passenger")
payload['total'] = rs.__next__()[0]
payload['query'] = query
return jsonify(payload)
@app.route("/matplot")
def matplot():
""" Returns html with the img tag for your plot.
"""
content = util.get_sidebar_stats()
num_x_points = int(request.args.get("num_x_points", 50))
return render_template('matplot.html', content = content, num_x_points = num_x_points)
@app.route("/matplot-as-image-<int:num_x_points>.png")
def plot_png(num_x_points=50):
""" renders the plot on the fly.
"""
fig = Figure()
axis = fig.add_subplot(1, 1, 1)
x_points = range(num_x_points)
axis.plot(x_points, [random.randint(1, 30) for x in x_points])
output = io.BytesIO()
FigureCanvasAgg(fig).print_png(output)
return Response(output.getvalue(), mimetype="image/png")
# ----------------------------------------------------------------
### MAIN PROGRAM
# ----------------------------------------------------------------
if __name__ == '__main__':
app.run('0.0.0.0', port = "8080", debug=True) | 39.997126 | 254 | 0.613694 | 0 | 0 | 0 | 0 | 11,464 | 0.823622 | 0 | 0 | 4,504 | 0.323586 |
cbce9d4c6da7ba7b0d4c2b3424496f644f1e7e6d | 10,390 | py | Python | unihan_db/importer.py | linnaea/unihan-db | da3730dce0c4aad425e7ab9ae1331762b45793f2 | [
"MIT"
] | null | null | null | unihan_db/importer.py | linnaea/unihan-db | da3730dce0c4aad425e7ab9ae1331762b45793f2 | [
"MIT"
] | null | null | null | unihan_db/importer.py | linnaea/unihan-db | da3730dce0c4aad425e7ab9ae1331762b45793f2 | [
"MIT"
] | null | null | null | from unihan_db.tables import (
UnhnLocation,
UnhnLocationkXHC1983,
UnhnReading,
kCantonese,
kCCCII,
kCheungBauer,
kCheungBauerIndex,
kCihaiT,
kDaeJaweon,
kDefinition,
kFenn,
kFennIndex,
kGSR,
kHanYu,
kHanyuPinlu,
kHanyuPinyin,
kHDZRadBreak,
kIICore,
kIICoreSource,
kUnihanCore2020,
kIRG_GSource,
kIRG_HSource,
kIRG_JSource,
kIRG_KPSource,
kIRG_KSource,
kIRG_MSource,
kIRG_TSource,
kIRG_USource,
kIRG_VSource,
kIRG_SSource,
kIRG_UKSource,
kIRGDaeJaweon,
kIRGHanyuDaZidian,
kIRGKangXi,
kMandarin,
kRSAdobe_Japan1_6,
kRSJapanese,
kRSKangXi,
kRSKanWa,
kRSKorean,
kRSUnicode,
kSBGY,
kTotalStrokes,
kXHC1983,
kTGHZ2013,
kSimplifiedVariant,
kTraditionalVariant,
kSpoofingVariant,
kZVariant,
kSemanticVariant,
kSpecializedSemanticVariant,
UnhnVariantSource,
SemanticVariantSource
)
def import_char(c, char): # NOQA: C901
if 'kDefinition' in char:
for d in char['kDefinition']:
c.kDefinition.append(kDefinition(definition=d))
if 'kCantonese' in char:
for d in char['kCantonese']:
c.kCantonese.append(kCantonese(definition=d))
if 'kCCCII' in char:
for d in char['kCCCII']:
c.kCCCII.append(kCCCII(hex=d))
if 'kMandarin' in char:
d = char['kMandarin']
c.kMandarin.append(kMandarin(hans=d['zh-Hans'], hant=d['zh-Hant']))
if 'kTotalStrokes' in char:
d = char['kTotalStrokes']
c.kTotalStrokes.append(kTotalStrokes(hans=d['zh-Hans'], hant=d['zh-Hant']))
if 'kHanyuPinyin' in char:
for d in char['kHanyuPinyin']:
k = kHanyuPinyin()
for loc in d['locations']:
k.locations.append(
UnhnLocation(
volume=loc['volume'],
page=loc['page'],
character=loc['character'],
virtual=loc['virtual'],
)
)
for reading in d['readings']:
k.readings.append(UnhnReading(reading=reading))
c.kHanyuPinyin.append(k)
if 'kHanYu' in char:
k = kHanYu()
for d in char['kHanYu']:
k.locations.append(
UnhnLocation(
volume=d['volume'],
page=d['page'],
character=d['character'],
virtual=d['virtual'],
)
)
c.kHanYu.append(k)
if 'kIRGHanyuDaZidian' in char:
for d in char['kIRGHanyuDaZidian']:
k = kIRGHanyuDaZidian()
k.locations.append(
UnhnLocation(
volume=d['volume'],
page=d['page'],
character=d['character'],
virtual=d['virtual'],
)
)
c.kIRGHanyuDaZidian.append(k)
if 'kXHC1983' in char:
for d in char['kXHC1983']:
k = kXHC1983()
for loc in d['locations']:
k.locations.append(
UnhnLocationkXHC1983(
page=loc['page'],
character=loc['character'],
entry=loc['entry'],
substituted=loc['substituted'],
)
)
k.readings.append(UnhnReading(reading=d['reading']))
c.kXHC1983.append(k)
if 'kTGHZ2013' in char:
for d in char['kTGHZ2013']:
k = kTGHZ2013()
for loc in d['locations']:
k.locations.append(
UnhnLocation(
page=loc['page'],
character=loc['character'],
)
)
k.readings.append(UnhnReading(reading=d['reading']))
c.kTGHZ2013.append(k)
if 'kCheungBauer' in char:
for d in char['kCheungBauer']:
k = kCheungBauer(
radical=d['radical'], strokes=d['strokes'], cangjie=d['cangjie']
)
for reading in d['readings']:
k.readings.append(UnhnReading(reading=reading))
c.kCheungBauer.append(k)
if 'kRSAdobe_Japan1_6' in char:
for d in char['kRSAdobe_Japan1_6']:
c.kRSAdobe_Japan1_6.append(
kRSAdobe_Japan1_6(
type=d['type'],
cid=d['cid'],
radical=d['radical'],
strokes=d['strokes'],
strokes_residue=d['strokes-residue'],
)
)
if 'kCihaiT' in char:
for d in char['kCihaiT']:
c.kCihaiT.append(
kCihaiT(page=d['page'], row=d['row'], character=d['character'])
)
if 'kIICore' in char:
for d in char['kIICore']:
k = kIICore(priority=d['priority'])
for s in d['sources']:
k.sources.append(kIICoreSource(source=s))
c.kIICore.append(k)
if 'kUnihanCore2020' in char:
for s in char['kUnihanCore2020']:
c.kUnihanCore2020.append(kUnihanCore2020(source=s))
if 'kDaeJaweon' in char:
k = kDaeJaweon()
d = char['kDaeJaweon']
k.locations.append(
UnhnLocation(page=d['page'], character=d['character'], virtual=d['virtual'])
)
c.kDaeJaweon.append(k)
if 'kIRGKangXi' in char:
k = kIRGKangXi()
for d in char['kIRGKangXi']:
k.locations.append(
UnhnLocation(
page=d['page'], character=d['character'], virtual=d['virtual']
)
)
c.kIRGKangXi.append(k)
if 'kIRGDaeJaweon' in char:
k = kIRGDaeJaweon()
for d in char['kIRGDaeJaweon']:
k.locations.append(
UnhnLocation(
page=d['page'], character=d['character'], virtual=d['virtual']
)
)
c.kIRGDaeJaweon.append(k)
if 'kFenn' in char:
for d in char['kFenn']:
c.kFenn.append(kFenn(phonetic=d['phonetic'], frequency=d['frequency']))
if 'kHanyuPinlu' in char:
for d in char['kHanyuPinlu']:
c.kHanyuPinlu.append(
kHanyuPinlu(phonetic=d['phonetic'], frequency=d['frequency'])
)
if 'kHDZRadBreak' in char:
d = char['kHDZRadBreak']
k = kHDZRadBreak(radical=d['radical'], ucn=d['ucn'])
k.locations.append(
UnhnLocation(
volume=d['location']['volume'],
page=d['location']['page'],
character=d['location']['character'],
virtual=d['location']['virtual'],
)
)
c.kHDZRadBreak.append(k)
if 'kSBGY' in char:
for d in char['kSBGY']:
k = kSBGY()
k.locations.append(UnhnLocation(page=d['page'], character=d['character']))
c.kSBGY.append(k)
rs_fields = ( # radical-stroke fields, since they're the same structure
('kRSUnicode', kRSUnicode, c.kRSUnicode),
('kRSJapanese', kRSJapanese, c.kRSJapanese),
('kRSKangXi', kRSKangXi, c.kRSKangXi),
('kRSKanWa', kRSKanWa, c.kRSKanWa),
('kRSKorean', kRSKorean, c.kRSKorean),
)
for f, model, column in rs_fields:
if f in char:
for d in char[f]:
k = model(
radical=d['radical'],
strokes=d['strokes'],
simplified=d['simplified'],
)
column.append(k)
irg_fields = ( # IRG, since they're the same structure
('kIRG_GSource', kIRG_GSource, c.kIRG_GSource),
('kIRG_HSource', kIRG_HSource, c.kIRG_HSource),
('kIRG_JSource', kIRG_JSource, c.kIRG_JSource),
('kIRG_KPSource', kIRG_KPSource, c.kIRG_KPSource),
('kIRG_KSource', kIRG_KSource, c.kIRG_KSource),
('kIRG_MSource', kIRG_MSource, c.kIRG_MSource),
('kIRG_TSource', kIRG_TSource, c.kIRG_TSource),
('kIRG_USource', kIRG_USource, c.kIRG_USource),
('kIRG_VSource', kIRG_VSource, c.kIRG_VSource),
('kIRG_SSource', kIRG_SSource, c.kIRG_SSource),
('kIRG_UKSource', kIRG_UKSource, c.kIRG_UKSource),
)
for f, model, column in irg_fields:
if f in char:
d = char[f]
k = model(source=d['source'], location=d['location'])
column.append(k)
if 'kGSR' in char:
for d in char['kGSR']:
k = kGSR(set=d['set'], letter=d['letter'], apostrophe=d['apostrophe'])
c.kGSR.append(k)
if 'kCheungBauerIndex' in char:
d = char['kCheungBauerIndex']
k = kCheungBauerIndex()
k.locations.append(
UnhnLocation(
page=d['location']['page'], character=d['location']['character']
)
)
c.kCheungBauerIndex.append(k)
if 'kFennIndex' in char:
d = char['kFennIndex']
k = kFennIndex()
k.locations.append(
UnhnLocation(
page=d['location']['page'], character=d['location']['character']
)
)
c.kFennIndex.append(k)
simple_variant_fields = (
('kSimplifiedVariant', kSimplifiedVariant, c.kSimplifiedVariant),
('kTraditionalVariant', kTraditionalVariant, c.kTraditionalVariant),
('kSpoofingVariant', kSpoofingVariant, c.kSpoofingVariant),
)
for f, model, column in simple_variant_fields:
if f in char:
for d in char[f]:
column.append(model(ucn=d))
sourced_variant_fields = (
('kZVariant', kZVariant, c.kZVariant, UnhnVariantSource),
('kSemanticVariant', kSemanticVariant, c.kSemanticVariant, SemanticVariantSource),
('kSpecializedSemanticVariant', kSpecializedSemanticVariant, c.kSpecializedSemanticVariant, SemanticVariantSource),
)
for f, model, column, source_model in sourced_variant_fields:
if f in char:
for d in char[f]:
m = model(ucn=d['ucn'])
for s in d.get('sources', []):
m.sources.append(source_model(**s))
column.append(m)
| 31.580547 | 123 | 0.528104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,813 | 0.174495 |
cbcfd99b4f40fab95fa519c4f1885786acde8bae | 2,373 | py | Python | sylwek/data_provider.py | fizykpl/Synerise2 | baf755004d62b3d14a0988bbe8a5add15f10d066 | [
"MIT"
] | null | null | null | sylwek/data_provider.py | fizykpl/Synerise2 | baf755004d62b3d14a0988bbe8a5add15f10d066 | [
"MIT"
] | null | null | null | sylwek/data_provider.py | fizykpl/Synerise2 | baf755004d62b3d14a0988bbe8a5add15f10d066 | [
"MIT"
] | null | null | null | import time
import numpy as np
from matplotlib import pyplot as plt
from sylwek.similarity_measure import SimilarityMeasure
from utils import mnist_reader
class DataProvider:
images = {}
labels = {}
similarity_measure = None
next_id = -1 #TODO 2. Prosimy nadać każdemu obrazkowi unikalny identyfikator `image_id` (np. indeks z tablicy)
def __init__(self):
print("Init: Data Provider.")
self.similarity_measure = SimilarityMeasure(self.images)
# load data
X_train, y_train = mnist_reader.load_mnist('../data/fashion', kind='train')
X_test, y_test = mnist_reader.load_mnist('../data/fashion', kind='t10k')
# Add images and labels
self.add_image(X_train,y_train)
self.add_image(X_test,y_test)
def get_image(self,image_id):
if image_id in self.images:
return self.images[image_id]
else:
return None
def add_image(self, images,labels):
if not len(images) == len(labels):
print("Images and labels must be the same size")
print(" System Exit -1")
exit(-1)
for index,image in enumerate(images):
id = self._next_id()
self.images[id] = image
self.add_label(id, labels[index])
def add_label(self,id,label):
if label in self.labels:
self.labels[label].append(id)
else:
self.labels[label] = []
self.labels[label].append(id)
def _next_id(self):
"""
2. Prosimy nadać każdemu obrazkowi unikalny identyfikator `image_id` (np. indeks z tablicy)
:return: next id
"""
self.next_id += 1
return self.next_id
def show_image(self,image_id):
"""
3. Prosimy potraktować obrazki jako 784 (28x28) elementowy wektor
:param image_id: id of image
"""
data = self.images[image_id]
data = np.resize(data, (28, 28))
plt.imshow(data)
plt.show()
if __name__ == "__main__":
dp = DataProvider()
exetime = int(round(time.time() * 1000))
id = 50
mi = dp.similarity_measure.most_similar(id, 3)
exetime = int(round(time.time() * 1000)) - exetime
print("total time {}[ms]".format( exetime))
print(mi)
# Show
dp.show_image(id)
for id in mi:
dp.show_image(id)
| 28.939024 | 115 | 0.604298 | 1,864 | 0.783852 | 0 | 0 | 0 | 0 | 0 | 0 | 564 | 0.237174 |
cbd13396135d0966c93e66018afe90dc4090a475 | 4,933 | py | Python | mmtbx/regression/fix_cablam/tst_one_resid_rotation.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | 2 | 2021-03-18T12:31:57.000Z | 2022-03-14T06:27:06.000Z | mmtbx/regression/fix_cablam/tst_one_resid_rotation.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | null | null | null | mmtbx/regression/fix_cablam/tst_one_resid_rotation.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-03-26T12:52:30.000Z | 2021-03-26T12:52:30.000Z | from __future__ import absolute_import, division, print_function
import iotbx.pdb
import mmtbx.model
from mmtbx.building.cablam_idealization import cablam_idealization, master_phil
import sys
import libtbx.load_env
pdb_str = """\
ATOM 2327 N GLY A 318 169.195 115.930 63.690 1.00216.32 N
ATOM 2328 CA GLY A 318 169.975 114.907 64.348 1.00193.16 C
ATOM 2329 C GLY A 318 169.246 113.598 64.539 1.00197.19 C
ATOM 2330 O GLY A 318 168.148 113.399 64.016 1.00193.16 O
ATOM 2331 N GLN A 319 169.849 112.700 65.308 1.00184.03 N
ATOM 2332 CA GLN A 319 169.232 111.415 65.589 1.00195.95 C
ATOM 2333 C GLN A 319 169.246 111.137 67.080 1.00193.64 C
ATOM 2334 O GLN A 319 168.185 111.047 67.708 1.00229.34 O
ATOM 2335 CB GLN A 319 169.941 110.308 64.822 1.00201.09 C
ATOM 2336 CG GLN A 319 169.719 110.407 63.336 1.00236.37 C
ATOM 2337 CD GLN A 319 168.255 110.312 62.966 1.00254.36 C
ATOM 2338 OE1 GLN A 319 167.506 109.520 63.536 1.00280.71 O
ATOM 2339 NE2 GLN A 319 167.836 111.126 62.007 1.00220.80 N
ATOM 2340 N ALA A 320 170.446 111.006 67.646 1.00140.99 N
ATOM 2341 CA ALA A 320 170.595 110.942 69.090 1.00197.51 C
ATOM 2342 C ALA A 320 169.906 109.734 69.704 1.00203.65 C
ATOM 2343 O ALA A 320 168.789 109.863 70.203 1.00242.54 O
ATOM 2344 CB ALA A 320 170.069 112.226 69.727 1.00240.45 C
ATOM 2345 N LYS A 321 170.554 108.566 69.662 1.00164.18 N
ATOM 2346 CA LYS A 321 169.963 107.306 70.104 1.00134.95 C
ATOM 2347 C LYS A 321 169.103 107.477 71.344 1.00134.95 C
ATOM 2348 O LYS A 321 167.904 107.194 71.302 1.00134.95 O
ATOM 2349 CB LYS A 321 171.040 106.265 70.421 1.00145.47 C
ATOM 2350 CG LYS A 321 171.950 105.868 69.279 1.00164.10 C
ATOM 2351 CD LYS A 321 171.197 105.229 68.138 1.00145.47 C
ATOM 2352 CE LYS A 321 172.173 104.777 67.070 1.00145.80 C
ATOM 2353 NZ LYS A 321 171.487 104.149 65.918 1.00145.93 N
ATOM 2354 N ARG A 322 169.682 107.900 72.454 1.00185.62 N
ATOM 2355 CA ARG A 322 168.888 108.089 73.652 1.00142.57 C
ATOM 2356 C ARG A 322 169.546 109.124 74.551 1.00128.56 C
ATOM 2357 O ARG A 322 170.758 109.341 74.474 1.00128.56 O
ATOM 2358 CB ARG A 322 168.719 106.769 74.369 1.00115.16 C
ATOM 2359 CG ARG A 322 167.669 106.817 75.430 1.00133.21 C
ATOM 2360 CD ARG A 322 167.578 105.605 76.270 1.00149.50 C
ATOM 2361 NE ARG A 322 168.665 105.482 77.219 1.00115.16 N
ATOM 2362 CZ ARG A 322 168.912 104.370 77.883 1.00115.16 C
ATOM 2363 NH1 ARG A 322 168.133 103.302 77.715 1.00116.90 N
ATOM 2364 NH2 ARG A 322 169.915 104.340 78.745 1.00115.16 N
ATOM 2365 N VAL A 323 168.740 109.783 75.382 1.00121.29 N
ATOM 2366 CA VAL A 323 169.198 110.884 76.220 1.00121.29 C
ATOM 2367 C VAL A 323 168.668 110.712 77.632 1.00139.86 C
ATOM 2368 O VAL A 323 167.480 110.430 77.835 1.00121.29 O
ATOM 2369 CB VAL A 323 168.795 112.246 75.654 1.00117.56 C
ATOM 2370 CG1 VAL A 323 168.912 113.310 76.710 1.00151.93 C
ATOM 2371 CG2 VAL A 323 169.721 112.603 74.534 1.00121.07 C
"""
def exercise_no_sidechains(prefix="tst_one_resid_rotation_no_sidechains"):
pdb_inp = iotbx.pdb.input(lines=pdb_str.split('\n'), source_info=None)
model = mmtbx.model.manager(
model_input = pdb_inp)
with open("%s_start.pdb" % prefix, 'w') as f:
f.write(model.model_as_pdb())
s = model.selection("name N or name CA or name C or name O")
model = model.select(s)
ci = cablam_idealization(model = model, params=master_phil.extract().cablam_idealization, log=sys.stdout)
pdb_txt = model.model_as_pdb()
def exercise_yes_sidechains(prefix="tst_one_resid_rotation_yes_sidechains"):
pdb_inp = iotbx.pdb.input(lines=pdb_str.split('\n'), source_info=None)
model = mmtbx.model.manager(
model_input = pdb_inp)
with open("%s_start.pdb" % prefix, 'w') as f:
f.write(model.model_as_pdb())
ci = cablam_idealization(model = model, params=master_phil.extract().cablam_idealization, log=sys.stdout)
pdb_txt = model.model_as_pdb()
if __name__ == '__main__':
if (not libtbx.env.has_module(name="probe")):
print("Skipping: probe not configured")
else:
exercise_no_sidechains()
exercise_yes_sidechains()
| 59.433735 | 107 | 0.598419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,770 | 0.764241 |
cbd272df4b8ee10d7b3a479337364711ca636afe | 9,835 | py | Python | mindhome_alpha/erpnext/stock/doctype/batch/test_batch.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/stock/doctype/batch/test_batch.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/stock/doctype/batch/test_batch.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.exceptions import ValidationError
import unittest
from erpnext.stock.doctype.batch.batch import get_batch_qty, UnableToSelectBatchError, get_batch_no
from frappe.utils import cint, flt
from erpnext.accounts.doctype.purchase_invoice.test_purchase_invoice import make_purchase_invoice
from erpnext.stock.get_item_details import get_item_details
class TestBatch(unittest.TestCase):
def test_item_has_batch_enabled(self):
self.assertRaises(ValidationError, frappe.get_doc({
"doctype": "Batch",
"name": "_test Batch",
"item": "_Test Item"
}).save)
@classmethod
def make_batch_item(cls, item_name):
from erpnext.stock.doctype.item.test_item import make_item
if not frappe.db.exists(item_name):
return make_item(item_name, dict(has_batch_no = 1, create_new_batch = 1, is_stock_item=1))
def test_purchase_receipt(self, batch_qty = 100):
'''Test automated batch creation from Purchase Receipt'''
self.make_batch_item('ITEM-BATCH-1')
receipt = frappe.get_doc(dict(
doctype='Purchase Receipt',
supplier='_Test Supplier',
company='_Test Company',
items=[
dict(
item_code='ITEM-BATCH-1',
qty=batch_qty,
rate=10,
warehouse= 'Stores - _TC'
)
]
)).insert()
receipt.submit()
self.assertTrue(receipt.items[0].batch_no)
self.assertEqual(get_batch_qty(receipt.items[0].batch_no,
receipt.items[0].warehouse), batch_qty)
return receipt
def test_stock_entry_incoming(self):
'''Test batch creation via Stock Entry (Work Order)'''
self.make_batch_item('ITEM-BATCH-1')
stock_entry = frappe.get_doc(dict(
doctype = 'Stock Entry',
purpose = 'Material Receipt',
company = '_Test Company',
items = [
dict(
item_code = 'ITEM-BATCH-1',
qty = 90,
t_warehouse = '_Test Warehouse - _TC',
cost_center = 'Main - _TC',
rate = 10
)
]
))
stock_entry.set_stock_entry_type()
stock_entry.insert()
stock_entry.submit()
self.assertTrue(stock_entry.items[0].batch_no)
self.assertEqual(get_batch_qty(stock_entry.items[0].batch_no, stock_entry.items[0].t_warehouse), 90)
def test_delivery_note(self):
'''Test automatic batch selection for outgoing items'''
batch_qty = 15
receipt = self.test_purchase_receipt(batch_qty)
item_code = 'ITEM-BATCH-1'
delivery_note = frappe.get_doc(dict(
doctype='Delivery Note',
customer='_Test Customer',
company=receipt.company,
items=[
dict(
item_code=item_code,
qty=batch_qty,
rate=10,
warehouse=receipt.items[0].warehouse
)
]
)).insert()
delivery_note.submit()
# shipped from FEFO batch
self.assertEqual(
delivery_note.items[0].batch_no,
get_batch_no(item_code, receipt.items[0].warehouse, batch_qty)
)
def test_delivery_note_fail(self):
'''Test automatic batch selection for outgoing items'''
receipt = self.test_purchase_receipt(100)
delivery_note = frappe.get_doc(dict(
doctype = 'Delivery Note',
customer = '_Test Customer',
company = receipt.company,
items = [
dict(
item_code = 'ITEM-BATCH-1',
qty = 5000,
rate = 10,
warehouse = receipt.items[0].warehouse
)
]
))
self.assertRaises(UnableToSelectBatchError, delivery_note.insert)
def test_stock_entry_outgoing(self):
'''Test automatic batch selection for outgoing stock entry'''
batch_qty = 16
receipt = self.test_purchase_receipt(batch_qty)
item_code = 'ITEM-BATCH-1'
stock_entry = frappe.get_doc(dict(
doctype='Stock Entry',
purpose='Material Issue',
company=receipt.company,
items=[
dict(
item_code=item_code,
qty=batch_qty,
s_warehouse=receipt.items[0].warehouse,
)
]
))
stock_entry.set_stock_entry_type()
stock_entry.insert()
stock_entry.submit()
# assert same batch is selected
self.assertEqual(
stock_entry.items[0].batch_no,
get_batch_no(item_code, receipt.items[0].warehouse, batch_qty)
)
def test_batch_split(self):
'''Test batch splitting'''
receipt = self.test_purchase_receipt()
from erpnext.stock.doctype.batch.batch import split_batch
new_batch = split_batch(receipt.items[0].batch_no, 'ITEM-BATCH-1', receipt.items[0].warehouse, 22)
self.assertEqual(get_batch_qty(receipt.items[0].batch_no, receipt.items[0].warehouse), 78)
self.assertEqual(get_batch_qty(new_batch, receipt.items[0].warehouse), 22)
def test_get_batch_qty(self):
'''Test getting batch quantities by batch_numbers, item_code or warehouse'''
self.make_batch_item('ITEM-BATCH-2')
self.make_new_batch_and_entry('ITEM-BATCH-2', 'batch a', '_Test Warehouse - _TC')
self.make_new_batch_and_entry('ITEM-BATCH-2', 'batch b', '_Test Warehouse - _TC')
self.assertEqual(get_batch_qty(item_code = 'ITEM-BATCH-2', warehouse = '_Test Warehouse - _TC'),
[{'batch_no': u'batch a', 'qty': 90.0}, {'batch_no': u'batch b', 'qty': 90.0}])
self.assertEqual(get_batch_qty('batch a', '_Test Warehouse - _TC'), 90)
def test_total_batch_qty(self):
self.make_batch_item('ITEM-BATCH-3')
existing_batch_qty = flt(frappe.db.get_value("Batch", "B100", "batch_qty"))
stock_entry = self.make_new_batch_and_entry('ITEM-BATCH-3', 'B100', '_Test Warehouse - _TC')
current_batch_qty = flt(frappe.db.get_value("Batch", "B100", "batch_qty"))
self.assertEqual(current_batch_qty, existing_batch_qty + 90)
stock_entry.cancel()
current_batch_qty = flt(frappe.db.get_value("Batch", "B100", "batch_qty"))
self.assertEqual(current_batch_qty, existing_batch_qty)
@classmethod
def make_new_batch_and_entry(cls, item_name, batch_name, warehouse):
'''Make a new stock entry for given target warehouse and batch name of item'''
if not frappe.db.exists("Batch", batch_name):
batch = frappe.get_doc(dict(
doctype = 'Batch',
item = item_name,
batch_id = batch_name
)).insert(ignore_permissions=True)
batch.save()
stock_entry = frappe.get_doc(dict(
doctype = 'Stock Entry',
purpose = 'Material Receipt',
company = '_Test Company',
items = [
dict(
item_code = item_name,
qty = 90,
t_warehouse = warehouse,
cost_center = 'Main - _TC',
rate = 10,
batch_no = batch_name,
allow_zero_valuation_rate = 1
)
]
))
stock_entry.set_stock_entry_type()
stock_entry.insert()
stock_entry.submit()
return stock_entry
def test_batch_name_with_naming_series(self):
stock_settings = frappe.get_single('Stock Settings')
use_naming_series = cint(stock_settings.use_naming_series)
if not use_naming_series:
frappe.set_value('Stock Settings', 'Stock Settings', 'use_naming_series', 1)
batch = self.make_new_batch('_Test Stock Item For Batch Test1')
batch_name = batch.name
self.assertTrue(batch_name.startswith('BATCH-'))
batch.delete()
batch = self.make_new_batch('_Test Stock Item For Batch Test2')
self.assertEqual(batch_name, batch.name)
# reset Stock Settings
if not use_naming_series:
frappe.set_value('Stock Settings', 'Stock Settings', 'use_naming_series', 0)
def make_new_batch(self, item_name, batch_id=None, do_not_insert=0):
batch = frappe.new_doc('Batch')
item = self.make_batch_item(item_name)
batch.item = item.name
if batch_id:
batch.batch_id = batch_id
if not do_not_insert:
batch.insert()
return batch
def test_batch_wise_item_price(self):
if not frappe.db.get_value('Item', '_Test Batch Price Item'):
frappe.get_doc({
'doctype': 'Item',
'is_stock_item': 1,
'item_code': '_Test Batch Price Item',
'item_group': 'Products',
'has_batch_no': 1,
'create_new_batch': 1
}).insert(ignore_permissions=True)
batch1 = create_batch('_Test Batch Price Item', 200, 1)
batch2 = create_batch('_Test Batch Price Item', 300, 1)
batch3 = create_batch('_Test Batch Price Item', 400, 0)
args = frappe._dict({
"item_code": "_Test Batch Price Item",
"company": "_Test Company with perpetual inventory",
"price_list": "_Test Price List",
"currency": "_Test Currency",
"doctype": "Sales Invoice",
"conversion_rate": 1,
"price_list_currency": "_Test Currency",
"plc_conversion_rate": 1,
"customer": "_Test Customer",
"name": None
})
#test price for batch1
args.update({'batch_no': batch1})
details = get_item_details(args)
self.assertEqual(details.get('price_list_rate'), 200)
#test price for batch2
args.update({'batch_no': batch2})
details = get_item_details(args)
self.assertEqual(details.get('price_list_rate'), 300)
#test price for batch3
args.update({'batch_no': batch3})
details = get_item_details(args)
self.assertEqual(details.get('price_list_rate'), 400)
def create_batch(item_code, rate, create_item_price_for_batch):
pi = make_purchase_invoice(company="_Test Company",
warehouse= "Stores - _TC", cost_center = "Main - _TC", update_stock=1,
expense_account ="_Test Account Cost for Goods Sold - _TC", item_code=item_code)
batch = frappe.db.get_value('Batch', {'item': item_code, 'reference_name': pi.name})
if not create_item_price_for_batch:
create_price_list_for_batch(item_code, None, rate)
else:
create_price_list_for_batch(item_code, batch, rate)
return batch
def create_price_list_for_batch(item_code, batch, rate):
frappe.get_doc({
'doctype': 'Item Price',
'item_code': '_Test Batch Price Item',
'price_list': '_Test Price List',
'batch_no': batch,
'price_list_rate': rate
}).insert()
def make_new_batch(**args):
args = frappe._dict(args)
try:
batch = frappe.get_doc({
"doctype": "Batch",
"batch_id": args.batch_id,
"item": args.item_code,
}).insert()
except frappe.DuplicateEntryError:
batch = frappe.get_doc("Batch", args.batch_id)
return batch | 29.270833 | 102 | 0.716523 | 8,243 | 0.838129 | 0 | 0 | 1,071 | 0.108897 | 0 | 0 | 2,594 | 0.263752 |
cbd3598d91ff793b9cf6d9fdbee232e97be7c505 | 35,768 | py | Python | morphological_analysis_v4.py | rivernuthead/DoD_analysis | b06219d4026e89a9b9f1e8939010a63612750c80 | [
"MIT"
] | null | null | null | morphological_analysis_v4.py | rivernuthead/DoD_analysis | b06219d4026e89a9b9f1e8939010a63612750c80 | [
"MIT"
] | null | null | null | morphological_analysis_v4.py | rivernuthead/DoD_analysis | b06219d4026e89a9b9f1e8939010a63612750c80 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 25 16:02:58 2022
@author: erri
"""
import os
import numpy as np
import math
from morph_quantities_func_v2 import morph_quantities
import matplotlib.pyplot as plt
# SINGLE RUN NAME
run = 'q07_1'
DoD_name = 'DoD_s1-s0_filt_nozero_rst.txt'
# Step between surveys
DoD_delta = 1
# Base length in terms of columns. If the windows dimensions are channel width
# multiples, the windows_length_base is 12 columns
windows_length_base = 12
window_mode = 1
'''
windows_mode:
0 = fixed windows (all the channel)
1 = expanding window
2 = floating fixed windows (WxW, Wx2W, Wx3W, ...) without overlapping
3 = floating fixed windows (WxW, Wx2W, Wx3W, ...) with overlapping
'''
plot_mode = 2
'''
plot_mode:
1 = only summary plot
2 = all single DoD plot
'''
# Parameters
# Survey pixel dimension
px_x = 50 # [mm]
px_y = 5 # [mm]
W = 0.6 # Width [m]
d50 = 0.001
NaN = -999
# setup working directory and DEM's name
home_dir = os.getcwd()
# Source DoDs folder
DoDs_folder = os.path.join(home_dir, 'DoDs', 'DoD_'+run)
DoDs_name_array = [] # List the file's name of the DoDs with step of delta_step
for f in sorted(os.listdir(DoDs_folder)):
if f.endswith('_filt_nozero_rst.txt') and f.startswith('DoD_'):
delta = eval(f[5]) - eval(f[8])
if delta == DoD_delta:
DoDs_name_array = np.append(DoDs_name_array, f)
else:
pass
# Initialize overall arrays
dep_vol_w_array_all = []
sco_vol_w_array_all = []
# Loop over the DoDs with step of delta_step
for f in DoDs_name_array:
DoD_name = f
print(f)
DoD_path = os.path.join(DoDs_folder,DoD_name)
DoD_filt_nozero = np.loadtxt(DoD_path, delimiter='\t')
# DoD length
DoD_length = DoD_filt_nozero.shape[1]*px_x/1000 # DoD length [m]
dim_x = DoD_filt_nozero.shape[1]
# Initialize array
# Define total volume matrix, Deposition matrix and Scour matrix
DoD_vol = np.where(np.isnan(DoD_filt_nozero), 0, DoD_filt_nozero) # Total volume matrix
DoD_vol = np.where(DoD_vol==NaN, 0, DoD_vol)
dep_DoD = (DoD_vol>0)*DoD_vol # DoD of only deposition data
sco_DoD = (DoD_vol<0)*DoD_vol # DoD of only scour data
# Active pixel matrix:
act_px_matrix = np.where(DoD_vol!=0, 1, 0) # Active pixel matrix, both scour and deposition
act_px_matrix_dep = np.where(dep_DoD != 0, 1, 0) # Active deposition matrix
act_px_matrix_sco = np.where(sco_DoD != 0, 1, 0) # Active scour matrix
# Initialize array for each window dimension
###################################################################
# MOVING WINDOWS ANALYSIS
###################################################################
array = DoD_filt_nozero
W=windows_length_base
mean_array_tot = []
std_array_tot= []
window_boundary = np.array([0,0])
x_data_tot=[]
tot_vol_array=[] # Tot volume
tot_vol_mean_array=[]
tot_vol_std_array=[]
sum_vol_array=[] # Sum of scour and deposition volume
dep_vol_array=[] # Deposition volume
sco_vol_array=[] # Scour volume
morph_act_area_array=[] # Total active area array
morph_act_area_dep_array=[] # Deposition active area array
morph_act_area_sco_array=[] # Active active area array
act_width_mean_array=[] # Total active width mean array
act_width_mean_dep_array=[] # Deposition active width mean array
act_width_mean_sco_array=[] # Scour active width mean array
if window_mode == 1:
# With overlapping
for w in range(1, int(math.floor(array.shape[1]/W))+1): # W*w is the dimension of every possible window
# Initialize arrays that stock data for each window position
x_data=[]
tot_vol_w_array = []
sum_vol_w_array = []
dep_vol_w_array = []
sco_vol_w_array =[]
morph_act_area_w_array = []
morph_act_area_dep_w_array = []
morph_act_area_sco_w_array = []
act_width_mean_w_array = []
act_width_mean_dep_w_array = []
act_width_mean_sco_w_array = []
act_thickness_w_array = []
act_thickness_dep_w_array = []
act_thickness_sco_w_array = []
for i in range(0, array.shape[1]+1):
if i+w*W <= array.shape[1]:
window = array[:, i:W*w+i]
boundary = np.array([i,W*w+i])
window_boundary = np.vstack((window_boundary, boundary))
x_data=np.append(x_data, w)
# Calculate morphological quantities
tot_vol, sum_vol, dep_vol, sco_vol, morph_act_area, morph_act_area_dep, morph_act_area_sco, act_width_mean, act_width_mean_dep, act_width_mean_sco, act_thickness, act_thickness_dep, act_thickness_sco = morph_quantities(window)
# Append single data to array
# For each window position the calculated parameters will be appended to _array
tot_vol_w_array=np.append(tot_vol_w_array, tot_vol)
sum_vol_w_array=np.append(sum_vol_w_array, sum_vol)
dep_vol_w_array=np.append(dep_vol_w_array, dep_vol)
sco_vol_w_array=np.append(sco_vol_w_array, sco_vol)
morph_act_area_w_array=np.append(morph_act_area_w_array, morph_act_area)
morph_act_area_dep_w_array=np.append(morph_act_area_dep_w_array, morph_act_area_dep)
morph_act_area_sco_w_array=np.append(morph_act_area_sco_w_array, morph_act_area_sco)
act_width_mean_w_array=np.append(act_width_mean_w_array, act_width_mean)
act_width_mean_dep_w_array=np.append(act_width_mean_dep_w_array, act_width_mean_dep)
act_width_mean_sco_w_array=np.append(act_width_mean_sco_w_array, act_width_mean_sco)
act_thickness_w_array=np.append(act_thickness_w_array, act_thickness)
act_thickness_dep_w_array=np.append(act_thickness_dep_w_array, act_thickness_dep)
act_thickness_sco_w_array=np.append(act_thickness_sco_w_array, act_thickness_sco)
# For each window dimension w*W,
x_data_tot=np.append(x_data_tot, np.nanmean(x_data)) # Append one value of x_data
tot_vol_mean_array=np.append(tot_vol_mean_array, np.nanmean(tot_vol_w_array)) # Append the tot_vol_array mean
tot_vol_std_array=np.append(tot_vol_std_array, np.nanstd(tot_vol_w_array)) # Append the tot_vol_array mean
# sum_vol_array=
# dep_vol_array=
# sco_vol_array=
# morph_act_area_array=
# morph_act_area_dep_array=
# morph_act_area_sco_array=
# act_width_mean_array=
# act_width_mean_dep_array=
# act_width_mean_sco_array=
# Slice window boundaries array to delete [0,0] when initialized
window_boundary = window_boundary[1,:]
if window_mode == 2:
# Without overlapping
for w in range(1, int(math.floor(array.shape[1]/W))+1): # W*w is the dimension of every possible window
mean_array = []
std_array= []
x_data=[]
for i in range(0, array.shape[1]+1):
if W*w*(i+1) <= array.shape[1]:
window = array[:, W*w*i:W*w*(i+1)]
boundary = np.array([W*w*i,W*w*(i+1)])
window_boundary = np.vstack((window_boundary, boundary))
mean = np.nanmean(window)
std = np.nanstd(window)
mean_array = np.append(mean_array, mean)
std_array = np.append(std_array, std)
x_data=np.append(x_data, w)
mean_array_tot = np.append(mean_array_tot, np.nanmean(mean_array))
std_array_tot= np.append(std_array_tot, np.nanstd(std_array)) #TODO check this
x_data_tot=np.append(x_data_tot, np.nanmean(x_data))
# Slice window boundaries array to delete [0,0] when initialized
window_boundary = window_boundary[1,:]
if window_mode == 3:
# Increasing window dimension keeping still the upstream cross section
mean_array = []
std_array= []
x_data=[]
for i in range(0, array.shape[1]+1):
if W*(i+1) <= array.shape[1]:
window = array[:, 0:W*(i+1)]
boundary = np.array([0,W*(i+1)])
window_boundary = np.vstack((window_boundary, boundary))
mean = np.nanmean(window)
std = np.nanstd(window)
mean_array = np.append(mean_array, mean)
std_array = np.append(std_array, std)
x_data=np.append(x_data, i)
mean_array_tot = np.append(mean_array_tot, np.nanmean(mean_array))
std_array_tot= np.append(std_array_tot, np.nanstd(std_array)) #TODO check this
x_data_tot=np.append(x_data_tot, np.nanmean(x_data))
# Slice window boundaries array to delete [0,0] when initialized
window_boundary = window_boundary[1,:]
# # TODO Go on with this section
# if windows_mode == 1:
# # Define x_data for plots
# x_data = np.linspace(W,dim_x,math.floor(DoD_length/W))*px_x/1e03
# for n in range(1,math.floor(DoD_length/W)+1):
# w_cols = n*round(W/(px_x/1000)) # Window analysis length in number of columns
# w_len = round(n*W,1) # Window analysis lenght im meter [m]
# # Define total volume matrix, Deposition matrix and Scour matrix
# DoD_vol_w = DoD_vol[:,0:w_cols] # Total volume matrix
# dep_DoD_w = dep_DoD[:,0:w_cols] # DoD of only deposition data
# sco_DoD_w = sco_DoD[:,0:w_cols] # DoD of only scour data
# # Define active pixel matrix
# act_px_matrix_w = act_px_matrix[:,0:w_cols] # Active pixel matrix, both scour and deposition
# act_px_matrix_dep_w = act_px_matrix_dep[:,0:w_cols] # Active deposition matrix
# act_px_matrix_sco_w = act_px_matrix_sco[:,0:w_cols] # Active scour matrix
# # Calculate principal quantities:
# # Volumes
# tot_vol_w = np.sum(DoD_vol_w)*px_x*px_y/(W*w_len*d50*1e09)# Total volume as V/(L*W*d50) [-] considering negative sign for scour
# sum_vol_w = np.sum(np.abs(DoD_vol_w))*px_x*px_y/(W*w_len*d50*1e09) # Sum of scour and deposition volume as V/(L*W*d50) [-]
# dep_vol_w = np.sum(dep_DoD_w)*px_x*px_y/(W*w_len*d50*1e09) # Deposition volume as V/(L*W*d50) [-]
# sco_vol_w = np.sum(sco_DoD_w)*px_x*px_y/(W*w_len*d50*1e09) # Scour volume as V/(L*W*d50) [-]
# # Areas:
# morph_act_area_w = np.count_nonzero(act_px_matrix_w)*px_x*px_y/(W*w_len*1e06) # Active area both in terms of scour and deposition as A/(W*L) [-]
# morph_act_area_dep_w = np.count_nonzero(act_px_matrix_dep_w)*px_x*px_y/(W*w_len*1e06) # Active deposition area as A/(W*L) [-]
# morph_act_area_sco_w = np.count_nonzero(act_px_matrix_sco_w)*px_x*px_y/(W*w_len*1e06) # Active scour area as A/(W*L) [-]
# # Widths:
# act_width_mean_w = np.count_nonzero(act_px_matrix_w)*px_x*px_y/(W*w_len*1e06) # Total mean active width [%] - Wact/W
# act_width_mean_dep_w = np.count_nonzero(act_px_matrix_dep_w)*px_x*px_y/(W*w_len*1e06) # Deposition mean active width [%] - Wact/W
# act_width_mean_sco_w = np.count_nonzero(act_px_matrix_sco_w)*px_x*px_y/(W*w_len*1e06) # Scour mean active width [%] - Wact/W
# # Thicknesses:
# act_thickness_w = sum_vol_w/morph_act_area_w*(d50*1e03) # Total active thickness (abs(V_sco) + V_dep)/act_area [mm]
# act_thickness_dep_w = dep_vol_w/morph_act_area_dep_w*(d50*1e03) # Deposition active thickness V_dep/act_area [mm]
# act_thickness_sco_w = sco_vol_w/act_width_mean_sco_w*(d50*1e03) # Scour active thickness V_sco/act_area [mm]
# # Append all values in arrays
# tot_vol_w_array = np.append(tot_vol_w_array, tot_vol_w)
# sum_vol_w_array = np.append(sum_vol_w_array, sum_vol_w)
# dep_vol_w_array = np.append(dep_vol_w_array, dep_vol_w)
# sco_vol_w_array = np.append(sco_vol_w_array, sco_vol_w)
# morph_act_area_w_array = np.append(morph_act_area_w_array, morph_act_area_w)
# morph_act_area_dep_w_array = np.append(morph_act_area_dep_w_array, morph_act_area_dep_w)
# morph_act_area_sco_w_array = np.append(morph_act_area_sco_w_array, morph_act_area_sco_w)
# act_width_mean_w_array = np.append(act_width_mean_w_array, act_width_mean_w)
# act_width_mean_dep_w_array = np.append(act_width_mean_dep_w_array, act_width_mean_dep_w)
# act_width_mean_sco_w_array = np.append(act_width_mean_sco_w_array, act_width_mean_sco_w)
# act_thickness_w_array = np.append(act_thickness_w_array, act_thickness_w)
# act_thickness_dep_w_array = np.append(act_thickness_dep_w_array, act_thickness_dep_w)
# act_thickness_sco_w_array = np.append(act_thickness_sco_w_array, act_thickness_sco_w)
# if plot_mode ==2:
# # Plots
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, dep_vol_w_array, '-', c='brown')
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Deposition volumes V/(W*L*d50) [-]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, sco_vol_w_array, '-', c='brown')
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Scour volumes V/(W*L*d50) [-]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, act_width_mean_w_array, '-', c='brown')
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Active width actW/W [-]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, act_thickness_w_array, '-', c='brown')
# axs.set_title(run)
# axs.set_xlabel('Longitudinal coordinate [m]')
# axs.set_ylabel('Active thickness [mm]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# # Fixed window without overlapping
# if windows_mode == 2:
# # Calculate the number of suitable windows in the channel length
# c_array = []
# W_cols = int(W/px_x*1e03)
# for i in range(1, round(dim_x/W_cols)):
# c = math.floor(dim_x/(W_cols*i))
# if c*W_cols*i<=dim_x:
# c_array = np.append(c_array, c)
# else:
# pass
# # Define the components of the slicing operation (exclude the first one)
# f_cols_array = [0,0]
# x_data = [] # X data for the plot
# n = 0 # Initialize variable count
# for m in range(0,len(c_array)):
# # m is the window dimension in columns
# n+=1
# for i in range(1,(math.floor(dim_x/(W_cols*(m+1)))+1)):
# f_cols = [round(W_cols*(m+1)*(i-1), 1), round(W_cols*(m+1)*(i),1)]
# f_cols_array = np.vstack((f_cols_array, f_cols))
# x_data = np.append(x_data, n)
# x_data = (x_data)*W
# # Resize f_cols_array
# f_cols_array = f_cols_array[1:]
# for p in range(0, f_cols_array.shape[0]): # Loop over all the available window
# w_len = (f_cols_array[p,1] - f_cols_array[p,0])*px_x/1e03 # Define the window lwgth
# # Define total volume matrix, Deposition matrix and Scour matrix
# DoD_vol_w = DoD_vol[:, f_cols_array[p,0]:f_cols_array[p,1]] # Total volume matrix
# dep_DoD_w = dep_DoD[:, f_cols_array[p,0]:f_cols_array[p,1]] # DoD of only deposition data
# sco_DoD_w = sco_DoD[:, f_cols_array[p,0]:f_cols_array[p,1]] # DoD of only scour data
# # Define active pixel matrix
# act_px_matrix_w = act_px_matrix[:, f_cols_array[p,0]:f_cols_array[p,1]] # Active pixel matrix, both scour and deposition
# act_px_matrix_dep_w = act_px_matrix_dep[:, f_cols_array[p,0]:f_cols_array[p,1]] # Active deposition matrix
# act_px_matrix_sco_w = act_px_matrix_sco[:, f_cols_array[p,0]:f_cols_array[p,1]] # Active scour matrix
# # Calculate principal quantities:
# # Volumes
# tot_vol_w = np.sum(DoD_vol_w)*px_x*px_y/(W*w_len*d50*1e09)# Total volume as V/(L*W*d50) [-] considering negative sign for scour
# sum_vol_w = np.sum(np.abs(DoD_vol_w))*px_x*px_y/(W*w_len*d50*1e09) # Sum of scour and deposition volume as V/(L*W*d50) [-]
# dep_vol_w = np.sum(dep_DoD_w)*px_x*px_y/(W*w_len*d50*1e09) # Deposition volume as V/(L*W*d50) [-]
# sco_vol_w = np.sum(sco_DoD_w)*px_x*px_y/(W*w_len*d50*1e09) # Scour volume as V/(L*W*d50) [-]
# # Areas:
# morph_act_area_w = np.count_nonzero(act_px_matrix_w)*px_x*px_y/(W*w_len*1e06) # Active area both in terms of scour and deposition as A/(W*L) [-]
# morph_act_area_dep_w = np.count_nonzero(act_px_matrix_dep_w)*px_x*px_y/(W*w_len*1e06) # Active deposition area as A/(W*L) [-]
# morph_act_area_sco_w = np.count_nonzero(act_px_matrix_sco_w)*px_x*px_y/(W*w_len*1e06) # Active scour area as A/(W*L) [-]
# # Widths:
# act_width_mean_w = np.count_nonzero(act_px_matrix_w)*px_x*px_y/(W*w_len*1e06) # Total mean active width [%] - Wact/W
# act_width_mean_dep_w = np.count_nonzero(act_px_matrix_dep_w)*px_x*px_y/(W*w_len*1e06) # Deposition mean active width [%] - Wact/W
# act_width_mean_sco_w = np.count_nonzero(act_px_matrix_sco_w)*px_x*px_y/(W*w_len*1e06) # Scour mean active width [%] - Wact/W
# # Thicknesses:
# act_thickness_w = sum_vol_w/morph_act_area_w*(d50*1e03) # Total active thickness (abs(V_sco) + V_dep)/act_area [mm]
# act_thickness_dep_w = dep_vol_w/morph_act_area_dep_w*(d50*1e03) # Deposition active thickness V_dep/act_area [mm]
# act_thickness_sco_w = sco_vol_w/act_width_mean_sco_w*(d50*1e03) # Scour active thickness V_sco/act_area [mm]
# # Append all values in arrays
# tot_vol_w_array = np.append(tot_vol_w_array, tot_vol_w)
# sum_vol_w_array = np.append(sum_vol_w_array, sum_vol_w)
# dep_vol_w_array = np.append(dep_vol_w_array, dep_vol_w)
# sco_vol_w_array = np.append(sco_vol_w_array, sco_vol_w)
# morph_act_area_w_array = np.append(morph_act_area_w_array, morph_act_area_w)
# morph_act_area_dep_w_array = np.append(morph_act_area_dep_w_array, morph_act_area_dep_w)
# morph_act_area_sco_w_array = np.append(morph_act_area_sco_w_array, morph_act_area_sco_w)
# act_width_mean_w_array = np.append(act_width_mean_w_array, act_width_mean_w)
# act_width_mean_dep_w_array = np.append(act_width_mean_dep_w_array, act_width_mean_dep_w)
# act_width_mean_sco_w_array = np.append(act_width_mean_sco_w_array, act_width_mean_sco_w)
# act_thickness_w_array = np.append(act_thickness_w_array, act_thickness_w)
# act_thickness_dep_w_array = np.append(act_thickness_dep_w_array, act_thickness_dep_w)
# act_thickness_sco_w_array = np.append(act_thickness_sco_w_array, act_thickness_sco_w)
# if plot_mode ==2:
# # Plots
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, dep_vol_w_array, 'o', c='brown')
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Deposition volumes V/(W*L*d50) [-]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, sco_vol_w_array, 'o', c='brown')
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Scour volumes V/(W*L*d50) [-]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, act_width_mean_w_array, 'o', c='brown')
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Active width actW/W [-]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, act_thickness_w_array, 'o', c='brown')
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Active thickness [mm]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# # Fixed window with overlapping
# if windows_mode == 3:
# # Calculate the number of suitable windows in the channel length
# c_array = []
# W_cols = int(W/px_x*1e03) # Minimum windows length WxW dimension in columns
# for i in range(1, math.floor(dim_x/W_cols)+1): # per each windows analysis WxWi
# c = dim_x - W_cols*i
# c_array = np.append(c_array, c) # Contains the number of windows for each dimension WxW*i
# else:
# pass
# f_cols_array = [0,0]
# x_data = []
# n = 0
# for m in range(1,int(dim_x/W_cols)+1):
# w_length = m*W_cols # Analysis windows length
# # print(w_length)
# n+=1
# for i in range(0,dim_x): # i is the lower limit of the analysis window
# low_lim = i # Analisys window lower limit
# upp_lim = i + w_length # Analisys window upper limit
# if upp_lim<=dim_x:
# # print(low_lim, upp_lim)
# # print(i+w_length)
# f_cols = [low_lim, upp_lim] # Lower and upper boundary of the analysis window
# f_cols_array = np.vstack((f_cols_array, f_cols))
# x_data = np.append(x_data, n)
# else:
# pass
# x_data = x_data*W
# # Resize f_cols_array
# f_cols_array = f_cols_array[1:]
# for p in range(0, f_cols_array.shape[0]):
# w_len = (f_cols_array[p,1] - f_cols_array[p,0])*px_x/1e03 # Define the window length
# # print()
# # print(f_cols_array[p,:])
# # print(w_len)
# # Define total volume matrix, Deposition matrix and Scour matrix
# DoD_vol_w = DoD_vol[:, f_cols_array[p,0]:f_cols_array[p,1]] # Total volume matrix
# dep_DoD_w = dep_DoD[:, f_cols_array[p,0]:f_cols_array[p,1]] # DoD of only deposition data
# sco_DoD_w = sco_DoD[:, f_cols_array[p,0]:f_cols_array[p,1]] # DoD of only scour data
# # Define active pixel matrix
# act_px_matrix_w = act_px_matrix[:, f_cols_array[p,0]:f_cols_array[p,1]] # Active pixel matrix, both scour and deposition
# act_px_matrix_dep_w = act_px_matrix_dep[:, f_cols_array[p,0]:f_cols_array[p,1]] # Active deposition matrix
# act_px_matrix_sco_w = act_px_matrix_sco[:, f_cols_array[p,0]:f_cols_array[p,1]] # Active scour matrix
# # Calculate principal quantities:
# # Volumes
# tot_vol_w = np.sum(DoD_vol_w)*px_x*px_y/(W*w_len*d50*1e09)# Total volume as V/(L*W*d50) [-] considering negative sign for scour
# sum_vol_w = np.sum(np.abs(DoD_vol_w))*px_x*px_y/(W*w_len*d50*1e09) # Sum of scour and deposition volume as V/(L*W*d50) [-]
# dep_vol_w = np.sum(dep_DoD_w)*px_x*px_y/(W*w_len*d50*1e09) # Deposition volume as V/(L*W*d50) [-]
# sco_vol_w = np.sum(sco_DoD_w)*px_x*px_y/(W*w_len*d50*1e09) # Scour volume as V/(L*W*d50) [-]
# # Areas:
# morph_act_area_w = np.count_nonzero(act_px_matrix_w)*px_x*px_y/(W*w_len*1e06) # Active area both in terms of scour and deposition as A/(W*L) [-]
# morph_act_area_dep_w = np.count_nonzero(act_px_matrix_dep_w)*px_x*px_y/(W*w_len*1e06) # Active deposition area as A/(W*L) [-]
# morph_act_area_sco_w = np.count_nonzero(act_px_matrix_sco_w)*px_x*px_y/(W*w_len*1e06) # Active scour area as A/(W*L) [-]
# # Widths:
# act_width_mean_w = np.count_nonzero(act_px_matrix_w)*px_x*px_y/(W*w_len*1e06) # Total mean active width [%] - Wact/W
# act_width_mean_dep_w = np.count_nonzero(act_px_matrix_dep_w)*px_x*px_y/(W*w_len*1e06) # Deposition mean active width [%] - Wact/W
# act_width_mean_sco_w = np.count_nonzero(act_px_matrix_sco_w)*px_x*px_y/(W*w_len*1e06) # Scour mean active width [%] - Wact/W
# # Thicknesses:
# act_thickness_w = sum_vol_w/morph_act_area_w*(d50*1e03) # Total active thickness (abs(V_sco) + V_dep)/act_area [mm]
# act_thickness_dep_w = dep_vol_w/morph_act_area_dep_w*(d50*1e03) # Deposition active thickness V_dep/act_area [mm]
# act_thickness_sco_w = sco_vol_w/act_width_mean_sco_w*(d50*1e03) # Scour active thickness V_sco/act_area [mm]
# # Append all values in arrays
# tot_vol_w_array = np.append(tot_vol_w_array, tot_vol_w)
# sum_vol_w_array = np.append(sum_vol_w_array, sum_vol_w)
# dep_vol_w_array = np.append(dep_vol_w_array, dep_vol_w)
# sco_vol_w_array = np.append(sco_vol_w_array, sco_vol_w)
# morph_act_area_w_array = np.append(morph_act_area_w_array, morph_act_area_w)
# morph_act_area_dep_w_array = np.append(morph_act_area_dep_w_array, morph_act_area_dep_w)
# morph_act_area_sco_w_array = np.append(morph_act_area_sco_w_array, morph_act_area_sco_w)
# act_width_mean_w_array = np.append(act_width_mean_w_array, act_width_mean_w)
# act_width_mean_dep_w_array = np.append(act_width_mean_dep_w_array, act_width_mean_dep_w)
# act_width_mean_sco_w_array = np.append(act_width_mean_sco_w_array, act_width_mean_sco_w)
# act_thickness_w_array = np.append(act_thickness_w_array, act_thickness_w)
# act_thickness_dep_w_array = np.append(act_thickness_dep_w_array, act_thickness_dep_w)
# act_thickness_sco_w_array = np.append(act_thickness_sco_w_array, act_thickness_sco_w)
# if plot_mode ==2:
# # Plots
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, dep_vol_w_array, 'o', c='brown', markersize=0.1)
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Deposition volumes V/(W*L*d50) [-]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, sco_vol_w_array, 'o', c='brown', markersize=0.1)
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Scour volumes V/(W*L*d50) [-]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, act_width_mean_w_array, 'o', c='brown', markersize=0.1)
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Active width actW/W [-]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, act_thickness_w_array, 'o', c='brown', markersize=0.1)
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Active thickness [mm]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# if f == DoDs_name_array[0]:
# dep_vol_w_array_all = np.transpose(np.array(dep_vol_w_array))
# sco_vol_w_array_all = np.transpose(np.array(sco_vol_w_array))
# else:
# pass
# dep_vol_w_array_all = np.vstack((dep_vol_w_array_all,dep_vol_w_array))
# dep_vol_mean = np.mean(dep_vol_w_array_all, axis=0)
# dep_vol_std = np.std(dep_vol_w_array_all, axis=0)
# sco_vol_w_array_all = np.vstack((sco_vol_w_array_all,sco_vol_w_array))
# sco_vol_mean = np.mean(sco_vol_w_array_all, axis=0)
# sco_vol_std = np.std(sco_vol_w_array_all, axis=0)
# if windows_mode==2:
# # Loop to define the windows to clusterize data
# array = [0]
# num=0
# for n in range(0,len(c_array)):
# num += c_array[n]
# array = np.append(array, num) # Clusterize window dimension
# dep_vol_mean = []
# sco_vol_mean = []
# dep_vol_std = []
# sco_vol_std = []
# x_data_full = x_data
# x_data = []
# for n in range(0, len(array)-1):
# x_data = np.append(x_data, x_data_full[int(array[n])])
# for n in f_cols_array:
# dep_vol_mean = np.append(dep_vol_mean, np.mean(dep_vol_w_array_all[:,int(array[n]):int(array[n+1])]))
# sco_vol_mean = np.append(sco_vol_mean, np.mean(sco_vol_w_array_all[:,int(array[n]):int(array[n+1])]))
# dep_vol_std = np.append(dep_vol_std, np.std(dep_vol_w_array_all[:,int(array[n]):int(array[n+1])]))
# sco_vol_std = np.append(sco_vol_std, np.std(sco_vol_w_array_all[:,int(array[n]):int(array[n+1])]))
# # To finish
# if windows_mode == 3:
# # Loop to define the windows to clusterize data
# array = [0]
# num=0
# for n in range(0,len(c_array)):
# num += c_array[n]
# array = np.append(array, num) # Clusterize window dimension
# dep_vol_mean = []
# sco_vol_mean = []
# dep_vol_std = []
# sco_vol_std = []
# x_data_full = x_data
# x_data = []
# for n in range(0, len(array)-1):
# # low_lim = int(f_cols_array[n,0])
# # upp_lim = int(f_cols_array[n,1])
# x_data = np.append(x_data, round(x_data_full[int(array[n])+n],1))
# # dep_vol_mean = np.append(dep_vol_mean, np.mean(dep_vol_w_array_all[:,low_lim:upp_lim]))
# # sco_vol_mean = np.append(sco_vol_mean, np.mean(sco_vol_w_array_all[:,low_lim:upp_lim]))
# # dep_vol_std = np.append(dep_vol_std, np.std(dep_vol_w_array_all[:,low_lim:upp_lim]))
# # sco_vol_std = np.append(sco_vol_std, np.std(sco_vol_w_array_all[:,low_lim:upp_lim]))
# dep_vol_mean = np.append(dep_vol_mean, np.mean(dep_vol_w_array_all[:,int(array[n]):int(array[n+1])]))
# sco_vol_mean = np.append(sco_vol_mean, np.mean(sco_vol_w_array_all[:,int(array[n]):int(array[n+1])]))
# dep_vol_std = np.append(dep_vol_std, np.std(dep_vol_w_array_all[:,int(array[n]):int(array[n+1])]))
# sco_vol_std = np.append(sco_vol_std, np.std(sco_vol_w_array_all[:,int(array[n]):int(array[n+1])]))
# # print(int(array[n]),int(array[n+1]))
# # TODO To finish
# fig3, axs = plt.subplots(2,1,dpi=80, figsize=(10,6), sharex=True, tight_layout=True)
# fig3.suptitle(run + ' - Volume')
# axs[0].errorbar(x_data, sco_vol_mean, sco_vol_std, linestyle='--', marker='^', color='red')
# # axs[0].set_ylim(bottom=0)
# axs[0].set_title('Scour')
# # axs[0].set_xlabel()
# axs[0].set_ylabel('Scour volume V/(L*W*d50) [-]')
# axs[1].errorbar(x_data, dep_vol_mean, dep_vol_std, linestyle='--', marker='^', color='blue')
# axs[1].set_ylim(bottom=0)
# axs[1].set_title('Deposition')
# axs[1].set_xlabel('Analysis window length [m]')
# axs[1].set_ylabel('Deposition volume V/(L*W*d50) [-]')
# # plt.savefig(os.path.join(plot_dir, run +'dep_scour.png'), dpi=200)
# plt.show()
# # # Plots
# # fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# # axs.plot(x_data, dep_vol_w_array, 'o', c='brown')
# # axs.set_title(run)
# # axs.set_xlabel('Longitudinal coordinate [m]')
# # axs.set_ylabel('Deposition volumes V/(W*L*d50) [-]')
# # # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# # plt.show()
# # fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# # axs.plot(x_data, sco_vol_w_array, 'o', c='brown')
# # axs.set_title(run)
# # axs.set_xlabel('Longitudinal coordinate [m]')
# # axs.set_ylabel('Scour volumes V/(W*L*d50) [-]')
# # # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# # plt.show()
# # fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# # axs.plot(x_data, act_width_mean_w_array, 'o', c='brown')
# # axs.set_title(run)
# # axs.set_xlabel('Longitudinal coordinate [m]')
# # axs.set_ylabel('Active width actW/W [-]')
# # # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# # plt.show()
# # fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# # axs.plot(x_data, act_thickness_w_array, 'o', c='brown')
# # axs.set_title(run)
# # axs.set_xlabel('Longitudinal coordinate [m]')
# # axs.set_ylabel('Active thickness [mm]')
# # # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# # plt.show() | 50.87909 | 246 | 0.605374 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27,700 | 0.774435 |
cbd426fc2cf477bb0eedd50f6dab71f0530cee3c | 1,987 | py | Python | SimulationLego/laptop.py | MajesticKhan/Reinforcement-Learning | c488e547afe571b47446be76abf6f5c80e8e6be5 | [
"MIT"
] | 4 | 2022-03-30T00:45:29.000Z | 2022-03-30T00:45:49.000Z | SimulationLego/laptop.py | MajesticKhan/Reinforcement-Learning | c488e547afe571b47446be76abf6f5c80e8e6be5 | [
"MIT"
] | null | null | null | SimulationLego/laptop.py | MajesticKhan/Reinforcement-Learning | c488e547afe571b47446be76abf6f5c80e8e6be5 | [
"MIT"
] | null | null | null | #--------------------------------------------------------Import libraries
import pickle
import socket
import struct
import cv2
from stable_baselines import PPO2
import numpy as np
import imageio
#--------------------------------------------------------Establiosh connection
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("RASPBERRY PI address",1235))
#--------------------------------------------------------Read model
model = PPO2.load("model_output/model_final.zip")
#--------------------------------------------------------Establish initial varibles to hold information
data = bytearray()
info = s.recv(4)
length = struct.unpack(">L", info[:4])[0]
#--------------------------------------------------------Initialize
# initializes arrays to hold images for GIF
images_O = []
cv2.namedWindow('frame')
cv2.resizeWindow('frame', 256,256)
try:
while True:
# Capture the bytes being sent
while len(data) < length:
data.extend(s.recv(4096))
# Convert to BGR TO RGB
frame = cv2.cvtColor(cv2.imdecode(np.frombuffer(data[:length],dtype=np.uint8),1),cv2.COLOR_BGR2RGB)
# add raw and transformed images
images_O.append(frame)
# Given state, predict action
action, _ = model.predict(frame, deterministic=True)
# send action
s.sendall(pickle.dumps(action))
# Set up to get new image
data = data[length:]
data.extend(s.recv(4))
length = struct.unpack(">L", data[:4])[0]
data = data[4:]
# Show image on display
# Convert transformed image to BGR so CV2 can show image correctly
cv2.imshow('frame',cv2.cvtColor(frame,cv2.COLOR_RGB2BGR))
if cv2.waitKey(1) & 0xFF == ord('q'):
s.close()
break
finally:
s.close()
# convert untransformed images to gif
imageio.mimsave('Lego_camera_view.gif', [np.array(img) for i, img in enumerate(images_O) if i % 2 == 0], fps=20) | 32.048387 | 116 | 0.560141 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 815 | 0.410166 |
cbd784fa49d1aeb99aa20877269a15eeae7feeb6 | 502 | py | Python | examples/py/async-generator-multiple-tickers.py | diwenshi61/ccxt | ebdda10e7c4ed8841d572f3bfe198b5f0e949cf6 | [
"MIT"
] | 24,910 | 2017-10-27T21:41:59.000Z | 2022-03-31T23:08:57.000Z | examples/py/async-generator-multiple-tickers.py | diwenshi61/ccxt | ebdda10e7c4ed8841d572f3bfe198b5f0e949cf6 | [
"MIT"
] | 8,201 | 2017-10-28T10:19:28.000Z | 2022-03-31T23:49:37.000Z | examples/py/async-generator-multiple-tickers.py | diwenshi61/ccxt | ebdda10e7c4ed8841d572f3bfe198b5f0e949cf6 | [
"MIT"
] | 6,632 | 2017-10-28T02:53:24.000Z | 2022-03-31T23:20:14.000Z | # -*- coding: utf-8 -*-
import asyncio
import ccxt.async_support as ccxt
async def poll(tickers):
i = 0
kraken = ccxt.kraken()
while True:
symbol = tickers[i % len(tickers)]
yield (symbol, await kraken.fetch_ticker(symbol))
i += 1
await asyncio.sleep(kraken.rateLimit / 1000)
async def main():
async for (symbol, ticker) in poll(['BTC/USD', 'ETH/BTC', 'BTC/EUR']):
print(symbol, ticker)
asyncio.get_event_loop().run_until_complete(main())
| 21.826087 | 74 | 0.629482 | 0 | 0 | 246 | 0.49004 | 0 | 0 | 368 | 0.733068 | 50 | 0.099602 |
cbd7e3697d362246ff728b1ea5a7f422a2d3dc8c | 3,198 | py | Python | sdk/keyvault/azure-keyvault-administration/azure/keyvault/administration/_generated/v7_2_preview/models/__init__.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/keyvault/azure-keyvault-administration/azure/keyvault/administration/_generated/v7_2_preview/models/__init__.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 2 | 2020-03-03T23:11:13.000Z | 2020-03-30T18:50:55.000Z | sdk/keyvault/azure-keyvault-administration/azure/keyvault/administration/_generated/v7_2_preview/models/__init__.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import Attributes
from ._models_py3 import Error
from ._models_py3 import FullBackupOperation
from ._models_py3 import KeyVaultError
from ._models_py3 import Permission
from ._models_py3 import RestoreOperation
from ._models_py3 import RestoreOperationParameters
from ._models_py3 import RoleAssignment
from ._models_py3 import RoleAssignmentCreateParameters
from ._models_py3 import RoleAssignmentFilter
from ._models_py3 import RoleAssignmentListResult
from ._models_py3 import RoleAssignmentProperties
from ._models_py3 import RoleAssignmentPropertiesWithScope
from ._models_py3 import RoleDefinition
from ._models_py3 import RoleDefinitionFilter
from ._models_py3 import RoleDefinitionListResult
from ._models_py3 import SASTokenParameter
from ._models_py3 import SelectiveKeyRestoreOperation
from ._models_py3 import SelectiveKeyRestoreOperationParameters
except (SyntaxError, ImportError):
from ._models import Attributes # type: ignore
from ._models import Error # type: ignore
from ._models import FullBackupOperation # type: ignore
from ._models import KeyVaultError # type: ignore
from ._models import Permission # type: ignore
from ._models import RestoreOperation # type: ignore
from ._models import RestoreOperationParameters # type: ignore
from ._models import RoleAssignment # type: ignore
from ._models import RoleAssignmentCreateParameters # type: ignore
from ._models import RoleAssignmentFilter # type: ignore
from ._models import RoleAssignmentListResult # type: ignore
from ._models import RoleAssignmentProperties # type: ignore
from ._models import RoleAssignmentPropertiesWithScope # type: ignore
from ._models import RoleDefinition # type: ignore
from ._models import RoleDefinitionFilter # type: ignore
from ._models import RoleDefinitionListResult # type: ignore
from ._models import SASTokenParameter # type: ignore
from ._models import SelectiveKeyRestoreOperation # type: ignore
from ._models import SelectiveKeyRestoreOperationParameters # type: ignore
__all__ = [
'Attributes',
'Error',
'FullBackupOperation',
'KeyVaultError',
'Permission',
'RestoreOperation',
'RestoreOperationParameters',
'RoleAssignment',
'RoleAssignmentCreateParameters',
'RoleAssignmentFilter',
'RoleAssignmentListResult',
'RoleAssignmentProperties',
'RoleAssignmentPropertiesWithScope',
'RoleDefinition',
'RoleDefinitionFilter',
'RoleDefinitionListResult',
'SASTokenParameter',
'SelectiveKeyRestoreOperation',
'SelectiveKeyRestoreOperationParameters',
]
| 45.042254 | 94 | 0.735147 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,149 | 0.359287 |
cbd972da2a2d979e028c1ffafee8336e9a3e9560 | 447 | py | Python | fitbox/consultas/views.py | ravellys/fitbox | 3ca61bf4d27f47dd7fabb1301afdf5c79284f68b | [
"MIT"
] | null | null | null | fitbox/consultas/views.py | ravellys/fitbox | 3ca61bf4d27f47dd7fabb1301afdf5c79284f68b | [
"MIT"
] | 1 | 2020-10-29T20:03:11.000Z | 2020-10-29T20:03:11.000Z | fitbox/consultas/views.py | ravellys/fitbox | 3ca61bf4d27f47dd7fabb1301afdf5c79284f68b | [
"MIT"
] | 2 | 2020-10-27T16:46:18.000Z | 2020-11-04T02:29:59.000Z | # from django.shortcuts import render
# Create your views here.
from django.urls import reverse_lazy
from django.views.generic import CreateView
from fitbox.consultas.forms import ConsultaForm
from fitbox.consultas.models import Consulta
class ConsultaCreateView(CreateView):
template_name = "consultas/cadastro_consulta.html"
model = Consulta
form_class = ConsultaForm
success_url = reverse_lazy("consulta:cadastro_consulta")
| 27.9375 | 60 | 0.805369 | 204 | 0.456376 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.277405 |
cbd9e2d20a7a2ee57818c7e4187d5b9ba2868f18 | 598 | py | Python | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/settings/base.py | drgarcia1986/cookiecutter-muffin | 7aa861787b4280477a726da99cf9de4047b01d91 | [
"MIT"
] | 3 | 2016-06-24T21:14:37.000Z | 2017-03-07T05:36:33.000Z | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/settings/base.py | drgarcia1986/cookiecutter-muffin | 7aa861787b4280477a726da99cf9de4047b01d91 | [
"MIT"
] | null | null | null | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/settings/base.py | drgarcia1986/cookiecutter-muffin | 7aa861787b4280477a726da99cf9de4047b01d91 | [
"MIT"
] | null | null | null | import os
STATIC_FOLDERS = (
'{{cookiecutter.repo_name}}/common/static',
'{{cookiecutter.repo_name}}/users/static',
)
# Muffin Plugins
PLUGINS = (
'muffin_jinja2',
'muffin_peewee',
'muffin_session',
)
# Plugins configurations
SESSION_SECRET = 'SecretHere'
SESSION_LOGIN_URL = '/users/signin/'
JINJA2_TEMPLATE_FOLDERS = (
'{{cookiecutter.repo_name}}/common/templates',
'{{cookiecutter.repo_name}}/public/templates',
'{{cookiecutter.repo_name}}/users/templates'
)
PEEWEE_CONNECTION = os.environ.get('DATABASE_URL', 'sqlite:///{{cookiecutter.repo_name}}.sqlite')
| 22.148148 | 97 | 0.70903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 390 | 0.652174 |
cbda5badb8318614b5c5a7f8c3574fc8ff467460 | 6,905 | py | Python | rpyc/core/netref.py | fruch/rpyc | 8db3fdcef2272d468aca562465279370d075be72 | [
"MIT"
] | null | null | null | rpyc/core/netref.py | fruch/rpyc | 8db3fdcef2272d468aca562465279370d075be72 | [
"MIT"
] | null | null | null | rpyc/core/netref.py | fruch/rpyc | 8db3fdcef2272d468aca562465279370d075be72 | [
"MIT"
] | null | null | null | """
NetRef - transparent network references implementation.
"""
import sys
import inspect
import types
import cPickle as pickle
from rpyc.core import consts
_local_netref_attrs = frozenset([
'____conn__', '____oid__', '__class__', '__cmp__', '__del__', '__delattr__',
'__dir__', '__doc__', '__getattr__', '__getattribute__', '__hash__',
'__init__', '__metaclass__', '__module__', '__new__', '__reduce__',
'__reduce_ex__', '__repr__', '__setattr__', '__slots__', '__str__',
'__weakref__', '__dict__', '__members__', '__methods__',
])
_builtin_types = [
type, object, types.InstanceType, types.ClassType, bool, complex, dict,
file, float, int, list, long, slice, str, basestring, tuple, unicode,
str, set, frozenset, Exception, types.NoneType, types.DictProxyType,
types.BuiltinFunctionType, types.GeneratorType, types.MethodType,
types.CodeType, types.FrameType, types.TracebackType, xrange,
types.ModuleType, types.FunctionType,
type(int.__add__), # wrapper_descriptor
type((1).__add__), # method-wrapper
type(iter([])), # listiterator
type(iter(())), # tupleiterator
type(iter(xrange(10))), # rangeiterator
type(iter(set())), # setiterator
]
_normalized_builtin_types = dict(((t.__name__, t.__module__), t)
for t in _builtin_types)
def syncreq(proxy, handler, *args):
"""performs a synchronous request on the given proxy object"""
conn = object.__getattribute__(proxy, "____conn__")
oid = object.__getattribute__(proxy, "____oid__")
return conn().sync_request(handler, oid, *args)
def asyncreq(proxy, handler, *args):
"""performs an asynchronous request on the given proxy object,
retuning an AsyncResult"""
conn = object.__getattribute__(proxy, "____conn__")
oid = object.__getattribute__(proxy, "____oid__")
return conn().async_request(handler, oid, *args)
class NetrefMetaclass(type):
"""a metaclass just to customize the __repr__ of netref classes"""
__slots__ = ()
def __repr__(self):
if self.__module__:
return "<netref class '%s.%s'>" % (self.__module__, self.__name__)
else:
return "<netref class '%s'>" % (self.__name__,)
class BaseNetref(object):
"""the base netref object, from which all netref classes derive"""
__metaclass__ = NetrefMetaclass
__slots__ = ["____conn__", "____oid__", "__weakref__"]
def __init__(self, conn, oid):
self.____conn__ = conn
self.____oid__ = oid
def __del__(self):
try:
asyncreq(self, consts.HANDLE_DEL)
except Exception:
# raised in a destructor, most likely on program termination,
# it's safe to ignore all exceptions here
pass
def __getattribute__(self, name):
if name in _local_netref_attrs:
if name == "__class__":
cls = object.__getattribute__(self, "__class__")
if cls is None:
cls = self.__getattr__("__class__")
return cls
elif name == "__doc__":
return self.__getattr__("__doc__")
elif name == "__members__": # for Python < 2.6
return self.__dir__()
else:
return object.__getattribute__(self, name)
elif name == "__call__": # IronPython issue #10
return object.__getattribute__(self, "__call__")
else:
return syncreq(self, consts.HANDLE_GETATTR, name)
def __getattr__(self, name):
return syncreq(self, consts.HANDLE_GETATTR, name)
def __delattr__(self, name):
if name in _local_netref_attrs:
object.__delattr__(self, name)
else:
syncreq(self, consts.HANDLE_DELATTR, name)
def __setattr__(self, name, value):
if name in _local_netref_attrs:
object.__setattr__(self, name, value)
else:
syncreq(self, consts.HANDLE_SETATTR, name, value)
def __dir__(self):
return list(syncreq(self, consts.HANDLE_DIR))
# support for metaclasses
def __hash__(self):
return syncreq(self, consts.HANDLE_HASH)
def __cmp__(self, other):
return syncreq(self, consts.HANDLE_CMP, other)
def __repr__(self):
return syncreq(self, consts.HANDLE_REPR)
def __str__(self):
return syncreq(self, consts.HANDLE_STR)
# support for pickle
def __reduce_ex__(self, proto):
return pickle.loads, (syncreq(self, consts.HANDLE_PICKLE, proto),)
def _make_method(name, doc):
name = str(name) # IronPython issue #10
if name == "__call__":
def __call__(_self, *args, **kwargs):
kwargs = tuple(kwargs.items())
return syncreq(_self, consts.HANDLE_CALL, args, kwargs)
__call__.__doc__ = doc
return __call__
else:
def method(_self, *args, **kwargs):
kwargs = tuple(kwargs.items())
return syncreq(_self, consts.HANDLE_CALLATTR, name, args, kwargs)
method.__name__ = name
method.__doc__ = doc
return method
def inspect_methods(obj):
"""returns a list of (method name, docstring) tuples of all the methods of
the given object"""
methods = {}
attrs = {}
if isinstance(obj, type):
# don't forget the darn metaclass
mros = list(reversed(type(obj).__mro__)) + list(reversed(obj.__mro__))
else:
mros = reversed(type(obj).__mro__)
for basecls in mros:
attrs.update(basecls.__dict__)
for name, attr in attrs.iteritems():
if name not in _local_netref_attrs and hasattr(attr, "__call__"):
methods[name] = inspect.getdoc(attr)
return methods.items()
def class_factory(clsname, modname, methods):
clsname = str(clsname) # IronPython issue #10
modname = str(modname) # IronPython issue #10
ns = {"__slots__" : ()}
for name, doc in methods:
name = str(name) # IronPython issue #10
if name not in _local_netref_attrs:
ns[name] = _make_method(name, doc)
ns["__module__"] = modname
if modname in sys.modules and hasattr(sys.modules[modname], clsname):
ns["__class__"] = getattr(sys.modules[modname], clsname)
elif (clsname, modname) in _normalized_builtin_types:
ns["__class__"] = _normalized_builtin_types[clsname, modname]
else:
# to be resolved by the instance
ns["__class__"] = None
return type(clsname, (BaseNetref,), ns)
builtin_classes_cache = {}
for cls in _builtin_types:
builtin_classes_cache[cls.__name__, cls.__module__] = class_factory(
cls.__name__, cls.__module__, inspect_methods(cls))
| 39.232955 | 84 | 0.625344 | 2,696 | 0.390442 | 0 | 0 | 0 | 0 | 0 | 0 | 1,463 | 0.211875 |
cbdac3877bab597aa23c31ff7320e50f74d4b377 | 381 | py | Python | hpc/config_manager.py | PRIDE-Toolsuite/trackhub-creator | ade2cfafeaad95088664caecacb783b501c170aa | [
"Apache-2.0"
] | null | null | null | hpc/config_manager.py | PRIDE-Toolsuite/trackhub-creator | ade2cfafeaad95088664caecacb783b501c170aa | [
"Apache-2.0"
] | null | null | null | hpc/config_manager.py | PRIDE-Toolsuite/trackhub-creator | ade2cfafeaad95088664caecacb783b501c170aa | [
"Apache-2.0"
] | null | null | null | #
# Author : Manuel Bernal Llinares
# Project : trackhub-creator
# Timestamp : 11-09-2017 11:10
# ---
# © 2017 Manuel Bernal Llinares <mbdebian@gmail.com>
# All rights reserved.
#
"""
Configuration Manager for this HPC Module
"""
if __name__ == '__main__':
print("ERROR: This script is part of a pipeline collection and it is not meant to be run in stand alone mode")
| 23.8125 | 114 | 0.695538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 349 | 0.897172 |
cbdd50556efcc7c8ce43ff47982d04d269c61a42 | 3,230 | py | Python | exampleScripts/basicClusterHoehnerExampleData.py | StSchulze/pyGCluster | 92bb6855faa9e40582326cabfb0323eace72d4b3 | [
"MIT"
] | 7 | 2015-01-30T19:47:31.000Z | 2019-12-19T04:06:59.000Z | exampleScripts/basicClusterHoehnerExampleData.py | StSchulze/pyGCluster | 92bb6855faa9e40582326cabfb0323eace72d4b3 | [
"MIT"
] | 1 | 2016-02-22T08:55:39.000Z | 2016-02-25T11:08:01.000Z | exampleScripts/basicClusterHoehnerExampleData.py | StSchulze/pyGCluster | 92bb6855faa9e40582326cabfb0323eace72d4b3 | [
"MIT"
] | 6 | 2017-04-01T13:29:50.000Z | 2019-12-19T04:06:58.000Z | #!/usr/bin/env python
"""
Testscript to demonstrate functionality of pyGCluster
This script imports the data of Hoehner et al. (2013) and executes pyGCluster
with 250,000 iterations of resampling. pyGCluster will evoke 4 threads
(if possible), which each require approx. 1.5GB RAM. Please make sure you have
enough RAM available (4 threads in all require approx. 6GB RAM).
Duration will be approx. 2 hours to complete 250,000 iterations on 4 threads.
Usage::
./basicClusterHoehnerExampleData.py <pathToExampleFile>
If this script is executed in folder pyGCluster/exampleScripts, the command would be::
./basicClusterHoehnerExampleData.py ../exampleFiles/hoehner_dataset.csv
The results are saved in ".../pyGCluster/exampleScripts/hoehner_example_run/".
"""
from __future__ import print_function
import sys
import os
import csv
import multiprocessing
import pyGCluster # dependencies are NumPy, SciPy, optionally fastcluster and rpy2
def main():
pyGCluster_dir = os.path.split( sys.argv[ 0 ] )[ 0 ]
## parse data
data = dict()
with open( sys.argv[ 1 ] ) as fin:
reader = csv.DictReader( fin, delimiter = ',' )
conditions = set()
for row in reader:
if not conditions:
conditions = set( [ _.split( '__' )[ 0 ] for _ in row.keys() ] ) - set( [ 'identifier' ] )
data[ row[ 'identifier' ] ] = dict()
for condition in conditions:
mean = float( row[ '{0}__MEAN'.format( condition ) ] )
std = float( row[ '{0}__STD'.format( condition ) ] )
data[ row[ 'identifier' ] ][ condition ] = ( mean, std )
working_dir = os.path.join( pyGCluster_dir, 'hoehner_example_run/' )
if not os.path.exists( working_dir ):
os.mkdir( working_dir )
print( '[ INFO ] ... the results of the example script are saved in "{0}".\n'.format( working_dir ) )
cpus_2_use = 4
if multiprocessing.cpu_count() < cpus_2_use:
print( '[ INFO ] 4 threads are not available -> re-sampling is performed with only {0} thread(s) (this increases calculation time approx. proportional).'.format( multiprocessing.cpu_count() ) )
cpus_2_use = multiprocessing.cpu_count()
cluster = pyGCluster.Cluster( data = data, working_directory = working_dir, verbosity_level = 2 )
print( "[ INFO ] pyGCluster will evoke 4 threads (if possible), which each require approx. 1.5GB RAM. Please make sure you have enough RAM available (4 threads in all require approx. 6GB RAM)." )
print( "[ INFO ] It will take approx. 2 hours to complete 250,000 iterations on 4 threads." )
cluster.do_it_all(
distances = [ 'euclidean', 'correlation' ],
linkages = [ 'complete', 'average', 'ward' ],
iter_max = 10000,
cpus_2_use = cpus_2_use,
min_value_4_expression_map = -3,
max_value_4_expression_map = 3,
threshold_4_the_lowest_max_freq = 0.005
)
if __name__ == '__main__':
if len(sys.argv) <= 1:
print(__doc__)
exit()
#invoke the freeze_support funtion for windows based systems
try:
sys.getwindowsversion()
multiprocessing.freeze_support()
except:
pass
main()
| 36.704545 | 199 | 0.662539 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,538 | 0.476161 |
cbdd5fbfea56b41fd5be10c9535f73893f760618 | 6,767 | py | Python | fpga_interchange/convert.py | gatecat/python-fpga-interchange | ddae5bcea8524ffcea61a3558f1971b0b54cf165 | [
"0BSD"
] | 27 | 2020-10-27T19:07:19.000Z | 2021-11-14T17:11:49.000Z | fpga_interchange/convert.py | gatecat/python-fpga-interchange | ddae5bcea8524ffcea61a3558f1971b0b54cf165 | [
"0BSD"
] | 61 | 2020-10-08T17:59:12.000Z | 2021-10-01T07:53:53.000Z | fpga_interchange/convert.py | gatecat/python-fpga-interchange | ddae5bcea8524ffcea61a3558f1971b0b54cf165 | [
"0BSD"
] | 12 | 2020-10-07T22:24:32.000Z | 2021-11-18T21:40:47.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
""" Utilities for converting between file formats using string options.
This file provides a main function that allows conversions between supported
formats, and selecting subsets of the schemas where possible.
"""
import argparse
import json
# Treat Rapidyaml and PyYAML as optional dependencies.
try:
import ryml as _ryml
from fpga_interchange.rapidyaml_support import to_rapidyaml, from_rapidyaml
RAPIDYAML_INSTALLED = True
except ImportError as e:
RAPIDYAML_IMPORT_ERROR = e
RAPIDYAML_INSTALLED = False
try:
import yaml as _yaml
from yaml import CSafeLoader as _SafeLoader, CDumper as _Dumper
PYYAML_INSTALLED = True
except ImportError as e:
PYYAML_IMPORT_ERROR = e
PYYAML_INSTALLED = False
from fpga_interchange.interchange_capnp import Interchange, read_capnp_file, write_capnp_file
from fpga_interchange.json_support import to_json, from_json
from fpga_interchange.yaml_support import to_yaml, from_yaml
SCHEMAS = ('device', 'logical', 'physical')
FORMATS = ('json', 'yaml', 'capnp', 'pyyaml')
def get_ryml():
if RAPIDYAML_INSTALLED:
return _ryml
else:
# TODO: https://github.com/SymbiFlow/python-fpga-interchange/issues/11
raise RuntimeError(
'Rapidyaml failed import, "yaml" support not available.\n\nImport error:\n{}\n\nInstall with "pip install git+https://github.com/litghost/rapidyaml.git@fixup_python_packaging#egg=rapidyaml&subdirectory=api/python"'
.format(RAPIDYAML_IMPORT_ERROR))
def get_pyyaml():
if PYYAML_INSTALLED:
return _yaml, _SafeLoader, _Dumper
else:
raise RuntimeError(
'PyYAML failed import, "pyyaml" support not available.\n\nImport error:\n{}\n\nInstall with "pip install pyyaml"'
.format(PYYAML_IMPORT_ERROR))
def follow_path(schema_root, path):
""" Follow path from schema_root to get a specific schema. """
schema = schema_root
for leaf in path:
schema = getattr(schema, leaf)
return schema
def read_format_to_message(message, input_format, in_f):
if input_format == 'json':
json_string = in_f.read().decode('utf-8')
json_data = json.loads(json_string)
from_json(message, json_data)
elif input_format == 'yaml':
ryml = get_ryml()
yaml_string = in_f.read().decode('utf-8')
yaml_tree = ryml.parse(yaml_string)
from_rapidyaml(message, yaml_tree)
elif input_format == 'pyyaml':
yaml, SafeLoader, _ = get_pyyaml()
yaml_string = in_f.read().decode('utf-8')
yaml_data = yaml.load(yaml_string, Loader=SafeLoader)
from_yaml(message, yaml_data)
else:
assert False, 'Invalid input format {}'.format(input_format)
def read_format(schema, input_format, in_f):
""" Read serialized format into capnp message of specific schema.
schema: Capnp schema for input format.
input_format (str): Input format type, either capnp, json, yaml.
in_f (file-like): Binary file that contains serialized data.
Returns capnp message Builder of specified input format.
"""
if input_format == 'capnp':
message = read_capnp_file(schema, in_f)
message = message.as_builder()
elif input_format in ['json', 'yaml', 'pyyaml']:
message = schema.new_message()
read_format_to_message(message, input_format, in_f)
else:
assert False, 'Invalid input format {}'.format(input_format)
return message
def write_format(message, output_format, out_f):
""" Write capnp file to a serialized output format.
message: Capnp Builder object to be serialized into output file.
output_format (str): Input format type, either capnp, json, yaml.
in_f (file-like): Binary file to writer to serialized format.
"""
if output_format == 'capnp':
write_capnp_file(message, out_f)
elif output_format == 'json':
message = message.as_reader()
json_data = to_json(message)
json_string = json.dumps(json_data, indent=2)
out_f.write(json_string.encode('utf-8'))
elif output_format == 'yaml':
ryml = get_ryml()
message = message.as_reader()
strings, yaml_tree = to_rapidyaml(message)
yaml_string = ryml.emit(yaml_tree)
out_f.write(yaml_string.encode('utf-8'))
elif output_format == 'pyyaml':
yaml, _, Dumper = get_pyyaml()
message = message.as_reader()
yaml_data = to_yaml(message)
yaml_string = yaml.dump(yaml_data, sort_keys=False, Dumper=Dumper)
out_f.write(yaml_string.encode('utf-8'))
else:
assert False, 'Invalid output format {}'.format(output_format)
def get_schema(schema_dir, schema, schema_path=None):
""" Returns capnp schema based on directory of schemas, schema type.
schema_dir (str): Path to directory containing schemas.
schema (str): Schema type to return, either device, logical, physical.
schema_path (str): Optional '.' seperated path to locate a schema.
Returns capnp schema.
"""
schemas = Interchange(schema_dir)
schema_map = {
'device': schemas.device_resources_schema,
'logical': schemas.logical_netlist_schema,
'physical': schemas.physical_netlist_schema,
}
# Make sure schema_map is complete.
for schema_str in SCHEMAS:
assert schema_str in schema_map
if schema_path is None:
default_path = {
'device': ['Device'],
'logical': ['Netlist'],
'physical': ['PhysNetlist'],
}
path = default_path[schema]
else:
path = schema_path.split('.')
schema = follow_path(schema_map[schema], path)
return schema
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--schema_dir', required=True)
parser.add_argument('--schema', required=True, choices=SCHEMAS)
parser.add_argument('--input_format', required=True, choices=FORMATS)
parser.add_argument('--output_format', required=True, choices=FORMATS)
parser.add_argument('--schema_path')
parser.add_argument('input')
parser.add_argument('output')
args = parser.parse_args()
schema = get_schema(args.schema_dir, args.schema, args.schema_path)
with open(args.input, 'rb') as in_f:
message = read_format(schema, args.input_format, in_f)
with open(args.output, 'wb') as out_f:
write_format(message, args.output_format, out_f)
if __name__ == "__main__":
main()
| 32.37799 | 226 | 0.685385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,357 | 0.348308 |
cbddc035a6a6d8c78e66f93a222b27dafd4be1e5 | 3,602 | py | Python | sharpy/plans/tactics/speed_mining.py | DuncanDHall/sharpy-sc2 | 7a47a7538ad99214e3f0288b6213cac882551180 | [
"MIT"
] | null | null | null | sharpy/plans/tactics/speed_mining.py | DuncanDHall/sharpy-sc2 | 7a47a7538ad99214e3f0288b6213cac882551180 | [
"MIT"
] | null | null | null | sharpy/plans/tactics/speed_mining.py | DuncanDHall/sharpy-sc2 | 7a47a7538ad99214e3f0288b6213cac882551180 | [
"MIT"
] | null | null | null | from typing import List, Dict
from sc2.ids.ability_id import AbilityId
from sc2.position import Point2
from sc2.unit import Unit
from sc2.units import Units
from sharpy.interfaces import IZoneManager
from sharpy.managers.core.roles import UnitTask
from sharpy.plans.acts import ActBase
from sharpy.sc2math import get_intersections
MINING_RADIUS = 1.325
class SpeedMining(ActBase):
""" Make worker mine faster perhaps? """
def __init__(self, enable_on_return=True, enable_on_mine=True) -> None:
super().__init__()
self.enable_on_return = enable_on_return
self.enable_on_mine = enable_on_mine
self.mineral_target_dict: Dict[Point2, Point2] = {}
async def start(self, knowledge: "Knowledge"):
await super().start(knowledge)
self.calculate_targets()
async def execute(self) -> bool:
if len(self.ai.townhalls) < 1 or (not self.enable_on_return and not self.enable_on_mine):
return True
workers = self.get_mineral_workers()
self.speedmine(workers)
return True
def get_mineral_workers(self) -> Units:
def miner_filter(unit: Unit) -> bool:
if unit.is_carrying_vespene:
return False
if unit.order_target is not None and isinstance(unit.order_target, int):
target_unit = self.cache.by_tag(unit.order_target)
if target_unit is not None and target_unit.has_vespene:
return False
return True
units = self.roles.all_from_task(UnitTask.Gathering).filter(miner_filter)
return units
def speedmine(self, workers: Units):
for worker in workers:
self.speedmine_single(worker)
def speedmine_single(self, worker: Unit):
townhall = self.ai.townhalls.closest_to(worker)
if self.enable_on_return and worker.is_returning and len(worker.orders) == 1:
target: Point2 = townhall.position
target = target.towards(worker, townhall.radius + worker.radius)
if 0.75 < worker.distance_to(target) < 2:
worker.move(target)
worker(AbilityId.SMART, townhall, True)
return
if (
self.enable_on_mine
and not worker.is_returning
and len(worker.orders) == 1
and isinstance(worker.order_target, int)
):
mf = self.cache.by_tag(worker.order_target)
if mf is not None and mf.is_mineral_field:
target = self.mineral_target_dict.get(mf.position)
if target and 0.75 < worker.distance_to(target) < 2:
worker.move(target)
worker(AbilityId.SMART, mf, True)
def calculate_targets(self):
zone_manager = self.knowledge.get_required_manager(IZoneManager)
zones = zone_manager.expansion_zones
centers: List[Point2] = []
for zone in zones:
centers.append(zone.center_location)
for mf in self.ai.mineral_field:
target: Point2 = mf.position
center = target.closest(centers)
target = target.towards(center, MINING_RADIUS)
close = self.ai.mineral_field.closer_than(MINING_RADIUS, target)
for mf2 in close:
if mf2.tag != mf.tag:
points = get_intersections(mf.position, MINING_RADIUS, mf2.position, MINING_RADIUS)
if len(points) == 2:
target = center.closest(points)
self.mineral_target_dict[mf.position] = target
| 37.134021 | 103 | 0.631316 | 3,244 | 0.900611 | 0 | 0 | 0 | 0 | 369 | 0.102443 | 51 | 0.014159 |
cbdf9ff14b79b043642bf4e92833f21eebe9c6f0 | 621 | py | Python | build_automation/content_management/migrations/0046_auto_20200228_1432.py | SolarSPELL-Main/DLMS | 7e799246c55f5a64fa236567c411d1c2a2c4f38f | [
"MIT"
] | null | null | null | build_automation/content_management/migrations/0046_auto_20200228_1432.py | SolarSPELL-Main/DLMS | 7e799246c55f5a64fa236567c411d1c2a2c4f38f | [
"MIT"
] | 9 | 2020-01-15T21:33:16.000Z | 2021-06-10T22:13:28.000Z | build_automation/content_management/migrations/0046_auto_20200228_1432.py | SolarSPELL-Main/DLMS | 7e799246c55f5a64fa236567c411d1c2a2c4f38f | [
"MIT"
] | 3 | 2019-11-16T03:54:48.000Z | 2021-09-10T18:53:20.000Z | # Generated by Django 2.1.3 on 2020-02-28 21:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('content_management', '0045_auto_20200228_1345'),
]
operations = [
migrations.RemoveField(
model_name='content',
name='collections',
),
migrations.AddField(
model_name='content',
name='collection',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='content_management.Collection'),
),
]
| 25.875 | 129 | 0.63124 | 495 | 0.797101 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.267311 |
cbe454fc38406286a1921bff834cf81ed53fb858 | 12,700 | py | Python | pycudadecon/deconvolution.py | zbarry/pycudadecon | 9259a579a3bb2122a2251beb5b910ba2e5828ad3 | [
"MIT"
] | 46 | 2019-03-04T04:54:39.000Z | 2022-02-28T04:44:27.000Z | pycudadecon/deconvolution.py | zbarry/pycudadecon | 9259a579a3bb2122a2251beb5b910ba2e5828ad3 | [
"MIT"
] | 18 | 2019-03-08T04:31:23.000Z | 2022-02-07T06:43:15.000Z | pycudadecon/deconvolution.py | zbarry/pycudadecon | 9259a579a3bb2122a2251beb5b910ba2e5828ad3 | [
"MIT"
] | 9 | 2019-03-06T03:07:59.000Z | 2022-03-11T17:02:21.000Z | import os
from fnmatch import fnmatch
from typing import Iterator, List, Optional, Sequence, Tuple, Union
import numpy as np
from typing_extensions import Literal
from . import lib
from .otf import TemporaryOTF
from .util import PathOrArray, _kwargs_for, imread
def rl_cleanup():
"""Release GPU buffer and cleanup after deconvolution
Call this before program quits to release global GPUBuffer d_interpOTF.
- Resets any bleach corrections
- Removes OTF from GPU buffer
- Destroys cuFFT plan
- Releases GPU buffers
"""
return lib.RL_cleanup()
def rl_init(
rawdata_shape: Tuple[int, int, int],
otfpath: str,
dzdata: float = 0.5,
dxdata: float = 0.1,
dzpsf: float = 0.1,
dxpsf: float = 0.1,
deskew: float = 0,
rotate: float = 0,
width: int = 0,
):
"""Initialize GPU for deconvolution.
Prepares cuFFT plan for deconvolution with a given data shape and OTF.
Must be used prior to :func:`pycudadecon.rl_decon`
Parameters
----------
rawdata_shape : Tuple[int, int, int]
3-tuple of data shape
otfpath : str
Path to OTF TIF
dzdata : float, optional
Z-step size of data, by default 0.5
dxdata : float, optional
XY pixel size of data, by default 0.1
dzpsf : float, optional
Z-step size of the OTF, by default 0.1
dxpsf : float, optional
XY pixel size of the OTF, by default 0.1
deskew : float, optional
Deskew angle. If not 0.0 then deskewing will be performed before
deconvolution, by default 0
rotate : float, optional
Rotation angle; if not 0.0 then rotation will be performed around Y
axis after deconvolution, by default 0
width : int, optional
If deskewed, the output image's width, by default 0 (do not crop)
Examples
--------
>>> rl_init(im.shape, otfpath)
>>> decon_result = rl_decon(im)
>>> rl_cleanup()
"""
nz, ny, nx = rawdata_shape
lib.RL_interface_init(
nx,
ny,
nz,
dxdata,
dzdata,
dxpsf,
dzpsf,
deskew,
rotate,
width,
otfpath.encode(),
)
def rl_decon(
im: np.ndarray,
background: Union[int, Literal["auto"]] = 80,
n_iters: int = 10,
shift: int = 0,
save_deskewed: bool = False,
output_shape: Optional[Tuple[int, int, int]] = None,
napodize: int = 15,
nz_blend: int = 0,
pad_val: float = 0.0,
dup_rev_z: bool = False,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""Perform Richardson Lucy Deconvolution.
Performs actual deconvolution. GPU must first be initialized with
:func:`pycudadecon.rl_init`
Parameters
----------
im : np.ndarray
3D image volume to deconvolve
background : int or 'auto'
User-supplied background to subtract. If 'auto', the median value of the
last Z plane will be used as background. by default 80
n_iters : int, optional
Number of iterations, by default 10
shift : int, optional
If deskewed, the output image's extra shift in X (positive->left),
by default 0
save_deskewed : bool, optional
Save deskewed raw data as well as deconvolution result, by default False
output_shape : tuple of int, optional
Specify the output shape after deskewing. Usually this is unnecessary and
can be autodetected. Mostly intended for use within a
:class:`pycudadecon.RLContext` context, by default None
napodize : int, optional
Number of pixels to soften edge with, by default 15
nz_blend : int, optional
Number of top and bottom sections to blend in to reduce axial ringing,
by default 0
pad_val : float, optional
Value with which to pad image when deskewing, by default 0.0
dup_rev_z : bool, optional
Duplicate reversed stack prior to decon to reduce axial ringing,
by default False
Returns
-------
np.ndarray or 2-tuple of np.ndarray
The deconvolved result. If `save_deskewed` is `True`, returns
`(decon_result, deskew_result)`
Raises
------
ValueError
If im.ndim is not 3, or `output_shape` is provided but not length 3
"""
if im.ndim != 3:
raise ValueError("Only 3D arrays supported")
nz, ny, nx = im.shape
if output_shape is None:
output_shape = (lib.get_output_nz(), lib.get_output_ny(), lib.get_output_nx())
elif len(output_shape) != 3:
raise ValueError("Decon output shape must have length==3")
decon_result = np.empty(tuple(output_shape), dtype=np.float32)
if save_deskewed:
deskew_result = np.empty_like(decon_result)
else:
deskew_result = np.empty(1, dtype=np.float32)
# must be 16 bit going in
if not np.issubdtype(im.dtype, np.uint16):
im = im.astype(np.uint16)
if isinstance(background, str) and background == "auto":
background = np.median(im[-1])
rescale = False # not sure if this works yet...
if not im.flags["C_CONTIGUOUS"]:
im = np.ascontiguousarray(im)
lib.RL_interface(
im,
nx,
ny,
nz,
decon_result,
deskew_result,
background,
rescale,
save_deskewed,
n_iters,
shift,
napodize,
nz_blend,
pad_val,
dup_rev_z,
)
if save_deskewed:
return decon_result, deskew_result
else:
return decon_result
def quickDecon(image: np.ndarray, otfpath: str, **kwargs):
"""Perform deconvolution of `image` with otf at `otfpath`.
Not currently used...
"""
rl_init(image.shape, otfpath, **_kwargs_for(rl_init, kwargs))
result = rl_decon(image, **_kwargs_for(rl_decon, kwargs))
lib.RL_cleanup()
return result
class RLContext:
"""Context manager to setup the GPU for RL decon
Takes care of handing the OTF to the GPU, preparing a cuFFT plane,
and cleaning up after decon. Internally, this calls :func:`rl_init`,
stores the shape of the expected output volume after any deskew/decon,
then calls :func:`rl_cleanup` when exiting the context.
For parameters, see :func:`rl_init`.
Examples
--------
>>> with RLContext(data.shape, otfpath, dz) as ctx:
... result = rl_decon(data, ctx.out_shape)
"""
def __init__(
self,
rawdata_shape: Tuple[int, int, int],
otfpath: str,
dzdata: float = 0.5,
dxdata: float = 0.1,
dzpsf: float = 0.1,
dxpsf: float = 0.1,
deskew: float = 0,
rotate: float = 0,
width: int = 0,
):
self.kwargs = locals()
self.kwargs.pop("self")
self.out_shape: Optional[Tuple[int, int, int]] = None
def __enter__(self):
"""Setup the context and return the ZYX shape of the output image"""
rl_init(**self.kwargs)
self.out_shape = (lib.get_output_nz(), lib.get_output_ny(), lib.get_output_nx())
return self
def __exit__(self, typ, val, traceback):
# exit receives a tuple with any exceptions raised during processing
# if __exit__ returns True, exceptions will be supressed
lib.RL_cleanup()
# alias
rl_context = RLContext
def _yield_arrays(
images: Union[PathOrArray, Sequence[PathOrArray]], fpattern="*.tif"
) -> Iterator[np.ndarray]:
"""Yield arrays from an array, path, or sequence of either.
Parameters
----------
images : Union[PathOrArray, Sequence[PathOrArray]]
an array, path, or sequence of either
fpattern : str, optional
used to filter files in a directory, by default "*.tif"
Yields
-------
Iterator[np.ndarray]
Arrays (read from paths if necessary)
Raises
------
OSError
If a directory is provided and no files match fpattern.
"""
if isinstance(images, np.ndarray):
yield images
elif isinstance(images, str):
if os.path.isfile(images):
yield imread(images)
elif os.path.isdir(images):
imfiles = [f for f in os.listdir(images) if fnmatch(f, fpattern)]
if not len(imfiles):
raise OSError(
'No files matching pattern "{}" found in directory: {}'.format(
fpattern, images
)
)
for fpath in imfiles:
yield imread(os.path.join(images, fpath))
else:
for item in images:
yield from _yield_arrays(item)
def decon(
images: Union[PathOrArray, Sequence[PathOrArray]],
psf: PathOrArray,
fpattern: str = "*.tif",
**kwargs
) -> Union[np.ndarray, List[np.ndarray]]:
"""Deconvolve an image or images with a PSF or OTF file.
If `images` is a directory, use the `fpattern` argument to select files
by filename pattern.
Parameters
----------
images : str, np.ndarray, or sequence of either
The array, filepath, directory, or list/tuple thereof to deconvolve
psf : str or np.ndarray
a filepath of a PSF or OTF file, or a 3D numpy PSF array. Function will
auto-detect whether the file is a 3D PSF or a filepath representing a 2D
complex OTF.
fpattern : str, optional
Filepattern to use when a directory is provided in the `images` argument,
by default `*.tif`
** kwargs
All other kwargs must be valid for either :func:`rl_init` or :func:`rl_decon`.
Returns
-------
np.ndarray or list of array
The deconvolved image(s)
Raises
------
ValueError
If save_deskewed is True and deskew is unset or 0
IOError
If a directory is provided as input and ``fpattern`` yields no files
NotImplementedError
If ``psf`` is provided as a complex, 2D numpy array (OTFs can only be
provided as filenames created with :func:`pycudadecon.make_otf`)
Examples
--------
deconvolve a 3D TIF volume with a 3D PSF volume (e.g. a single bead stack)
>>> result = decon('/path/to/image.tif', '/path/to/psf.tif')
deconvolve all TIF files in a specific directory that match a certain
`filename pattern <https://docs.python.org/3.6/library/fnmatch.html>`_,
(in this example, all TIFs with the string '560nm' in their name)
>>> result = decon(
... '/directory/with/images', '/path/to/psf.tif', fpattern='*560nm*.tif'
... )
deconvolve a list of images, provided either as np.ndarrays, filepaths,
or directories
>>> imarray = tifffile.imread('some_other_image.tif')
>>> inputs = ['/directory/with/images', '/path/to/image.tif', imarray]
>>> result = decon(inputs, '/path/to/psf.tif', fpattern='*560nm*.tif')
"""
if kwargs.get("save_deskewed"):
if kwargs.get("deskew", 1) == 0:
raise ValueError("Cannot use save_deskewed=True with deskew=0")
if not kwargs.get("deskew"):
raise ValueError("Must set deskew != 0 when using save_deskewed=True")
init_kwargs = _kwargs_for(rl_init, kwargs)
decon_kwargs = _kwargs_for(rl_decon, kwargs)
out = []
with TemporaryOTF(psf, **kwargs) as otf:
arraygen = _yield_arrays(images, fpattern)
# first, assume that all of the images are the same shape...
# in which case we can prevent a lot of GPU IO
# grab and store the shape of the first item in the generator
next_im = next(arraygen)
shp = next_im.shape
with RLContext(shp, otf.path, **init_kwargs) as ctx:
while True:
out.append(
rl_decon(next_im, output_shape=ctx.out_shape, **decon_kwargs)
)
try:
next_im = next(arraygen)
# here we check to make sure that the images are still the same
# shape... if not, we'll continue below
if next_im.shape != shp:
break
except StopIteration:
next_im = None
break
# if we had a shape mismatch, there will still be images left to process
# process them the slow way here...
if next_im is not None:
for imarray in [next_im, *arraygen]:
with RLContext(imarray.shape, otf.path, **init_kwargs) as ctx:
out.append(
rl_decon(imarray, output_shape=ctx.out_shape, **decon_kwargs)
)
if isinstance(images, (list, tuple)) and len(images) > 1:
return out
else:
return out[0]
| 31.127451 | 88 | 0.613228 | 1,414 | 0.111339 | 1,289 | 0.101496 | 0 | 0 | 0 | 0 | 7,162 | 0.563937 |
cbe6b0eea93a98eae975c5a09c446c6929e03ea6 | 816 | py | Python | deep_learning_with_tensorFlow/Chapter05/p11302.py | pearpai/TensorFlow-action | 264099d933988532ed59eaf0f2ad495d40ede4d2 | [
"Apache-2.0"
] | 3 | 2018-06-07T07:15:00.000Z | 2018-10-09T07:59:50.000Z | deep_learning_with_tensorFlow/Chapter05/p11302.py | pearpai/TensorFlow-action | 264099d933988532ed59eaf0f2ad495d40ede4d2 | [
"Apache-2.0"
] | null | null | null | deep_learning_with_tensorFlow/Chapter05/p11302.py | pearpai/TensorFlow-action | 264099d933988532ed59eaf0f2ad495d40ede4d2 | [
"Apache-2.0"
] | 4 | 2017-04-23T05:30:41.000Z | 2018-09-27T07:13:37.000Z | # coding=utf-8
import tensorflow as tf
v = tf.Variable(0, dtype=tf.float32, name='v3')
# 在没有声明滑动平均模型时只有一个变量v,所以下面的语句只会输出v:0
for variables in tf.global_variables():
print(variables.name)
ema = tf.train.ExponentialMovingAverage(0.99)
# 加入命名空间中
maintain_averages_op = ema.apply(tf.global_variables())
# 在申明滑动平均模型之后,TensorFlow会自动生成一个影子变量
# v/ExponentialMovingAverage。于是下面的语句输出
# v:0 和 v/ExponentialMovingAverage:0
for variables in tf.global_variables():
print(variables.name)
saver = tf.train.Saver()
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
sess.run(tf.assign(v, 10))
sess.run(maintain_averages_op)
# 保存时候会将v0, v/ExponentialMovingAverage:0 这两个变量保存下来
saver.save(sess, "Saved_model/model2.ckpt")
print(sess.run([v, ema.average(v)]))
| 29.142857 | 55 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 421 | 0.425253 |
1dab593015997d8eba3dbafe4c8fcbe0f12b7e14 | 35 | py | Python | build/lib/zorb/models/__init__.py | varunranga/zorb | ffad98d15c3200eafc1b10c68860ce34ebf78f62 | [
"MIT"
] | 3 | 2021-05-13T16:28:39.000Z | 2022-02-18T23:10:35.000Z | src/zorb/models/__init__.py | varunranga/zorb | ffad98d15c3200eafc1b10c68860ce34ebf78f62 | [
"MIT"
] | null | null | null | src/zorb/models/__init__.py | varunranga/zorb | ffad98d15c3200eafc1b10c68860ce34ebf78f62 | [
"MIT"
] | null | null | null | from .Sequential import Sequential
| 17.5 | 34 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1dae9ce676e6495c35381a8c5c2d8862347c3bd0 | 28 | py | Python | oarepo_whitenoise/__init__.py | oarepo/oarepo-whitenoise | e72a875edc2a8db3c7805c65ed7e73927d20f056 | [
"MIT"
] | null | null | null | oarepo_whitenoise/__init__.py | oarepo/oarepo-whitenoise | e72a875edc2a8db3c7805c65ed7e73927d20f056 | [
"MIT"
] | null | null | null | oarepo_whitenoise/__init__.py | oarepo/oarepo-whitenoise | e72a875edc2a8db3c7805c65ed7e73927d20f056 | [
"MIT"
] | 1 | 2021-06-28T12:21:33.000Z | 2021-06-28T12:21:33.000Z | """Nothing here, really."""
| 14 | 27 | 0.607143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.964286 |
1daff2d7a44c6c02cc6f43b03cc6ccde6dd9165f | 1,748 | py | Python | assets/src/ba_data/python/bastd/mapdata/rampage.py | Benefit-Zebra/ballistica | eb85df82cff22038e74a2d93abdcbe9cd755d782 | [
"MIT"
] | 317 | 2020-04-04T00:33:10.000Z | 2022-03-28T01:07:09.000Z | assets/src/ba_data/python/bastd/mapdata/rampage.py | Alshahriah/ballistica | 326f6677a0118667e93ce9034849622ebef706fa | [
"MIT"
] | 315 | 2020-04-04T22:33:10.000Z | 2022-03-31T22:50:02.000Z | assets/src/ba_data/python/bastd/mapdata/rampage.py | Alshahriah/ballistica | 326f6677a0118667e93ce9034849622ebef706fa | [
"MIT"
] | 97 | 2020-04-04T01:32:17.000Z | 2022-03-16T19:02:59.000Z | # Released under the MIT License. See LICENSE for details.
#
# This file was automatically generated from "rampage.ma"
# pylint: disable=all
points = {}
# noinspection PyDictCreation
boxes = {}
boxes['area_of_interest_bounds'] = (0.3544110667, 5.616383286,
-4.066055072) + (0.0, 0.0, 0.0) + (
19.90053969, 10.34051135, 8.16221072)
boxes['edge_box'] = (0.3544110667, 5.438284793, -4.100357672) + (
0.0, 0.0, 0.0) + (12.57718032, 4.645176013, 3.605557343)
points['ffa_spawn1'] = (0.5006944438, 5.051501304,
-5.79356326) + (6.626174027, 1.0, 0.3402012662)
points['ffa_spawn2'] = (0.5006944438, 5.051501304,
-2.435321368) + (6.626174027, 1.0, 0.3402012662)
points['flag1'] = (-5.885814199, 5.112162255, -4.251754911)
points['flag2'] = (6.700855451, 5.10270501, -4.259912982)
points['flag_default'] = (0.3196701116, 5.110914413, -4.292515158)
boxes['map_bounds'] = (0.4528955042, 4.899663734, -3.543675157) + (
0.0, 0.0, 0.0) + (23.54502348, 14.19991443, 12.08017448)
points['powerup_spawn1'] = (-2.645358507, 6.426340583, -4.226597191)
points['powerup_spawn2'] = (3.540102796, 6.549722855, -4.198476335)
points['shadow_lower_bottom'] = (5.580073911, 3.136491026, 5.341226521)
points['shadow_lower_top'] = (5.580073911, 4.321758709, 5.341226521)
points['shadow_upper_bottom'] = (5.274539479, 8.425373402, 5.341226521)
points['shadow_upper_top'] = (5.274539479, 11.93458162, 5.341226521)
points['spawn1'] = (-4.745706238, 5.051501304,
-4.247934288) + (0.9186962739, 1.0, 0.5153189341)
points['spawn2'] = (5.838590388, 5.051501304,
-4.259627405) + (0.9186962739, 1.0, 0.5153189341)
| 52.969697 | 77 | 0.638444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 391 | 0.223684 |
1db107f2c1e2018df51dd16f8e25f706a3117779 | 7,945 | py | Python | PyFlow/UI/Widgets/GraphEditor_ui.py | dlario/PyFlow | b53b9d14b37aa586426d85842c6cd9a9c35443f2 | [
"MIT"
] | null | null | null | PyFlow/UI/Widgets/GraphEditor_ui.py | dlario/PyFlow | b53b9d14b37aa586426d85842c6cd9a9c35443f2 | [
"MIT"
] | null | null | null | PyFlow/UI/Widgets/GraphEditor_ui.py | dlario/PyFlow | b53b9d14b37aa586426d85842c6cd9a9c35443f2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'e:/GIT/PyFlow/PyFlow/UI/Widgets\GraphEditor_ui.ui',
# licensing of 'e:/GIT/PyFlow/PyFlow/UI/Widgets\GraphEditor_ui.ui' applies.
#
# Created: Sat May 4 12:25:24 2019
# by: pyside2-uic running on PySide2 5.12.0
#
# WARNING! All changes made in this file will be lost!
from Qt import QtCompat, QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setEnabled(True)
MainWindow.resize(863, 543)
MainWindow.setDocumentMode(True)
MainWindow.setDockNestingEnabled(True)
MainWindow.setDockOptions(QtWidgets.QMainWindow.AllowNestedDocks|QtWidgets.QMainWindow.AllowTabbedDocks|QtWidgets.QMainWindow.AnimatedDocks)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_3 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_3.setContentsMargins(1, 1, 1, 1)
self.gridLayout_3.setObjectName("gridLayout_3")
self.SceneWidget = QtWidgets.QWidget(self.centralwidget)
self.SceneWidget.setObjectName("SceneWidget")
self.gridLayout = QtWidgets.QGridLayout(self.SceneWidget)
self.gridLayout.setSpacing(2)
self.gridLayout.setContentsMargins(1, 1, 1, 1)
self.gridLayout.setObjectName("gridLayout")
self.widgetCurrentGraphPath = QtWidgets.QWidget(self.SceneWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widgetCurrentGraphPath.sizePolicy().hasHeightForWidth())
self.widgetCurrentGraphPath.setSizePolicy(sizePolicy)
self.widgetCurrentGraphPath.setObjectName("widgetCurrentGraphPath")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.widgetCurrentGraphPath)
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.layoutGraphPath = QtWidgets.QHBoxLayout()
self.layoutGraphPath.setSpacing(2)
self.layoutGraphPath.setContentsMargins(-1, 0, -1, 0)
self.layoutGraphPath.setObjectName("layoutGraphPath")
self.horizontalLayout_3.addLayout(self.layoutGraphPath)
self.gridLayout.addWidget(self.widgetCurrentGraphPath, 1, 0, 1, 1)
self.SceneLayout = QtWidgets.QGridLayout()
self.SceneLayout.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.SceneLayout.setObjectName("SceneLayout")
self.gridLayout.addLayout(self.SceneLayout, 4, 0, 1, 1)
self.CompoundPropertiesWidget = QtWidgets.QWidget(self.SceneWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.CompoundPropertiesWidget.sizePolicy().hasHeightForWidth())
self.CompoundPropertiesWidget.setSizePolicy(sizePolicy)
self.CompoundPropertiesWidget.setObjectName("CompoundPropertiesWidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.CompoundPropertiesWidget)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setContentsMargins(-1, 0, -1, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_2 = QtWidgets.QLabel(self.CompoundPropertiesWidget)
self.label_2.setObjectName("label_2")
self.horizontalLayout_2.addWidget(self.label_2)
self.leCompoundName = QtWidgets.QLineEdit(self.CompoundPropertiesWidget)
self.leCompoundName.setObjectName("leCompoundName")
self.horizontalLayout_2.addWidget(self.leCompoundName)
self.label = QtWidgets.QLabel(self.CompoundPropertiesWidget)
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label)
self.leCompoundCategory = QtWidgets.QLineEdit(self.CompoundPropertiesWidget)
self.leCompoundCategory.setObjectName("leCompoundCategory")
self.horizontalLayout_2.addWidget(self.leCompoundCategory)
self.gridLayout_2.addLayout(self.horizontalLayout_2, 0, 0, 1, 1)
self.gridLayout.addWidget(self.CompoundPropertiesWidget, 2, 0, 1, 1)
self.gridLayout_3.addWidget(self.SceneWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 863, 26))
self.menuBar.setObjectName("menuBar")
MainWindow.setMenuBar(self.menuBar)
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.dockWidgetNodeView = QtWidgets.QDockWidget(MainWindow)
self.dockWidgetNodeView.setMinimumSize(QtCore.QSize(200, 113))
self.dockWidgetNodeView.setAllowedAreas(QtCore.Qt.BottomDockWidgetArea|QtCore.Qt.LeftDockWidgetArea|QtCore.Qt.RightDockWidgetArea)
self.dockWidgetNodeView.setObjectName("dockWidgetNodeView")
self.dockWidgetContents = QtWidgets.QWidget()
self.dockWidgetContents.setObjectName("dockWidgetContents")
self.verticalLayout = QtWidgets.QVBoxLayout(self.dockWidgetContents)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.scrollArea = QtWidgets.QScrollArea(self.dockWidgetContents)
self.scrollArea.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 198, 475))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.propertiesLayout = QtWidgets.QVBoxLayout()
self.propertiesLayout.setObjectName("propertiesLayout")
self.verticalLayout_5.addLayout(self.propertiesLayout)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_5.addItem(spacerItem)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout.addWidget(self.scrollArea)
self.dockWidgetNodeView.setWidget(self.dockWidgetContents)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(1), self.dockWidgetNodeView)
self.toolBar.addSeparator()
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtCompat.translate("MainWindow", "PyFlow", None, -1))
self.label_2.setText(QtCompat.translate("MainWindow", "Name:", None, -1))
self.label.setText(QtCompat.translate("MainWindow", "Category:", None, -1))
self.toolBar.setWindowTitle(QtCompat.translate("MainWindow", "toolBar", None, -1))
self.dockWidgetNodeView.setWindowTitle(QtCompat.translate("MainWindow", "PropertyView", None, -1))
| 60.648855 | 148 | 0.746633 | 7,539 | 0.948899 | 0 | 0 | 0 | 0 | 0 | 0 | 852 | 0.107237 |
1db11b3357bbf28fcc5549063a419c83717d2922 | 995 | py | Python | computer_wordle.py | AntonKueltz/computer-wordle-client | 5cd88d779f529de2c19b82d599ff478476cb0feb | [
"MIT"
] | null | null | null | computer_wordle.py | AntonKueltz/computer-wordle-client | 5cd88d779f529de2c19b82d599ff478476cb0feb | [
"MIT"
] | null | null | null | computer_wordle.py | AntonKueltz/computer-wordle-client | 5cd88d779f529de2c19b82d599ff478476cb0feb | [
"MIT"
] | 2 | 2022-02-03T01:36:51.000Z | 2022-02-07T20:52:36.000Z | #!/usr/bin/env python3
import api
from urllib.parse import urljoin
GREEN = 'G'
YELLOW = 'Y'
GRAY = '.'
with open('wordlist.txt') as wordlist_file:
wordlist = tuple(line.strip() for line in wordlist_file)
class Game:
def __init__(self):
response = api.start_new_game()
self._game_id = response['game_id']
self._public_game_id = response['public_game_id']
self._current_hint = response['hint']
def current_hint(self):
return self._current_hint
def guess(self, guess):
response = api.make_guess(self._game_id, guess)
return_value = {'guess_response': response['response']}
if 'next_hint' in response:
self._current_hint = response['next_hint']
return_value['next_hint'] = response['next_hint']
return return_value
def status(self):
return api.get_game_status(self._game_id)
def url(self):
return urljoin(api.BASE_URL, f'/game/{self._public_game_id}/')
| 26.184211 | 70 | 0.654271 | 780 | 0.78392 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.178894 |
1db1d266c16124792ba1fb342841f354a3bc0a8d | 1,450 | py | Python | src/validataclass/validators/date_validator.py | binary-butterfly/validataclass | 65b07973c66cb4b00404037207405cc4c9580e12 | [
"MIT"
] | null | null | null | src/validataclass/validators/date_validator.py | binary-butterfly/validataclass | 65b07973c66cb4b00404037207405cc4c9580e12 | [
"MIT"
] | 20 | 2021-11-08T15:58:38.000Z | 2022-03-02T16:56:15.000Z | src/validataclass/validators/date_validator.py | binary-butterfly/validataclass | 65b07973c66cb4b00404037207405cc4c9580e12 | [
"MIT"
] | null | null | null | """
validataclass
Copyright (c) 2021, binary butterfly GmbH and contributors
Use of this source code is governed by an MIT-style license that can be found in the LICENSE file.
"""
from datetime import date
from typing import Any
from .string_validator import StringValidator
from validataclass.exceptions import InvalidDateError
__all__ = [
'DateValidator',
]
class DateValidator(StringValidator):
"""
Validator that parses date strings in "YYYY-MM-DD" format (e.g. "2021-01-31") to `datetime.date` objects.
Currently no parameters are supported.
Examples:
```
DateValidator()
```
See also: `TimeValidator`, `DateTimeValidator`
Valid input: Valid dates in YYYY-MM-DD format as `str`
Output: `datetime.date`
"""
def __init__(self):
"""
Create a `DateValidator`. No parameters.
"""
# Initialize StringValidator without any parameters
super().__init__()
def validate(self, input_data: Any) -> date:
"""
Validate input as a valid date string and convert it to a `datetime.date` object.
"""
# First, validate input data as string
date_string = super().validate(input_data)
# Try to create date object from string (only accepts "YYYY-MM-DD")
try:
date_obj = date.fromisoformat(date_string)
except ValueError:
raise InvalidDateError()
return date_obj
| 25.438596 | 109 | 0.658621 | 1,080 | 0.744828 | 0 | 0 | 0 | 0 | 0 | 0 | 876 | 0.604138 |
1db1f7c927cfaedb78f62fb3695b46b8ba415747 | 1,810 | py | Python | shimmer/apps/SimpleAccelGyro/simpleAccelGyroRecv.py | tinyos-io/tinyos-3.x-contrib | 3aaf036722a2afc0c0aad588459a5c3e00bd3c01 | [
"BSD-3-Clause",
"MIT"
] | 1 | 2020-02-28T20:35:09.000Z | 2020-02-28T20:35:09.000Z | shimmer/apps/SimpleAccelGyro/simpleAccelGyroRecv.py | tinyos-io/tinyos-3.x-contrib | 3aaf036722a2afc0c0aad588459a5c3e00bd3c01 | [
"BSD-3-Clause",
"MIT"
] | null | null | null | shimmer/apps/SimpleAccelGyro/simpleAccelGyroRecv.py | tinyos-io/tinyos-3.x-contrib | 3aaf036722a2afc0c0aad588459a5c3e00bd3c01 | [
"BSD-3-Clause",
"MIT"
] | null | null | null | #!/usr/bin/python
import sys, os, struct, array, time
try:
import tos
except ImportError:
import posix
sys.path = [os.path.join(posix.environ['TOSROOT'], 'support', 'sdk', 'python')] + sys.path
import tos
if len(sys.argv) < 2:
print "no device specified"
print "example:"
print " simpleAccelGyroRecv.py /dev/ttyUSB5"
else:
try:
ser = tos.Serial(sys.argv[1], 115200)
am = tos.AM(ser)
except:
print "ERROR: Unable to initialize serial port connection to", sys.argv[1]
sys.exit(-1)
try:
while True:
packet = am.read(timeout=5)
if packet:
if len(packet.data) < 2:
print "skipping truncated packet"
pass
elif len(packet.data) != 100:
print packet.data
else:
count = int(packet.data[1]) + (int(packet.data[0]<<8))
for i in range(2,86,14):
accelx = int(packet.data[i + 1] << 8) + int(packet.data[i])
accely = int(packet.data[i + 3] << 8) + int(packet.data[i + 2])
accelz = int(packet.data[i + 5] << 8) + int(packet.data[i + 4])
gyrox = int(packet.data[i + 7] << 8) + int(packet.data[i + 6])
gyroy = int(packet.data[i + 9] << 8) + int(packet.data[i + 8])
gyroz = int(packet.data[i + 11] << 8) + int(packet.data[i + 10])
rawbatt = int(packet.data[i + 13] << 8) + int(packet.data[i + 12])
batt = rawbatt / 4095.0 * 3.0 * 2.0
print "%d %d %d %d %d %d %d %d %5.3f" % (packet.source, count, accelx, accely, accelz, gyrox, gyroy, gyroz, batt)
sys.stdout.flush()
except KeyboardInterrupt:
print "All done"
| 36.938776 | 131 | 0.508287 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 242 | 0.133702 |
1db266d927adb6c005db3a33cf6739b15458b37f | 597 | py | Python | astronat/core/__init__.py | nstarman/astronat | 9e1f41c6de1ca6adbd2bf99414a4c9b61838abf6 | [
"BSD-3-Clause"
] | 1 | 2020-11-20T18:25:26.000Z | 2020-11-20T18:25:26.000Z | astronat/core/__init__.py | nstarman/astronat | 9e1f41c6de1ca6adbd2bf99414a4c9b61838abf6 | [
"BSD-3-Clause"
] | 3 | 2020-09-09T06:10:20.000Z | 2020-09-16T05:56:10.000Z | astronat/core/__init__.py | nstarman/astronat | 9e1f41c6de1ca6adbd2bf99414a4c9b61838abf6 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# see LICENSE.rst
"""Basic Astronomy Functions.
.. todo::
change this to C / pyx. whatever astropy's preferred C thing is.
"""
__author__ = ""
# __copyright__ = "Copyright 2018, "
# __credits__ = [""]
# __license__ = ""
# __version__ = "0.0.0"
# __maintainer__ = ""
# __email__ = ""
# __status__ = "Production"
# __all__ = [
# ""
# ]
##############################################################################
# IMPORTS
# BUILT IN
# THIRD PARTY
# PROJECT-SPECIFIC
##############################################################################
# END
| 15.710526 | 78 | 0.440536 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 553 | 0.926298 |
1db2abb5c5f56c7175445d7b83c28b635555e35d | 28,922 | py | Python | CHEBYSHEV/TVB_Method/cheb_class.py | mtmoncur/RootFinding | 8d7c8f8d4fda158c83ff1a605f330db14b56ecc7 | [
"MIT"
] | null | null | null | CHEBYSHEV/TVB_Method/cheb_class.py | mtmoncur/RootFinding | 8d7c8f8d4fda158c83ff1a605f330db14b56ecc7 | [
"MIT"
] | null | null | null | CHEBYSHEV/TVB_Method/cheb_class.py | mtmoncur/RootFinding | 8d7c8f8d4fda158c83ff1a605f330db14b56ecc7 | [
"MIT"
] | null | null | null | import numpy as np
import itertools
from numpy.polynomial import chebyshev as cheb
"""
Module for defining the class of Chebyshev polynomials, as well as various related
classes and methods, including:
Classes:
-------
Polynomial: Superclass for MultiPower and MultiCheb. Contains methods and
attributes applicable to both subclasses
MultiCheb: Chebyshev polynomials in arbitrary dimension.
Term: Terms are just tuples of exponents with the degrevlex ordering
Methods:
--------
match_poly_dimensions(polys): Matches the dimensions of a list of polynomials.
mon_combos_highest(mon, numLeft): Find all the monomials of a given degree and
returns them. Works recursively.
mon_combos(mon, numLeft): Finds all the monomials _up to_ a given degree and
returns them. Works recursively.
sort_polys_by_degree(polys): Sorts the polynomials by their degree.
get_var_list(dim): Return a list of tuples corresponding to the variables
[x_1, x_2, ..., x_n]. The tuple for x_1 is (1,0,0,...,0), and for x_i
the 1 is in the ith slot.
slice_bottom(arr):Gets the nd slices needed to slice an array into the bottom
corner of another. There is probably a better (vectorized) way to do this.
slice_top(arr): Construct a list of slices needed to put an array into the upper
left corner of another. There is probably a better way to do this.
match_size(a,b): Reshape two coefficient ndarrays to have the same shape.
makePolyCoeffMatrix(inputString): Take a string input of a polynomaial and
return the coefficient matrix for it. Usefull for making things of high
degree of dimension so you don't have to make it by hand.
"""
class Polynomial(object):
'''
Superclass for MultiPower and MultiCheb. Contains methods and attributes
that are applicable to both subclasses.
Attributes
----------
coeff
The coefficient matrix represented in the object.
dim
The number of dimensions of the coefficient matrix
order
Ordering type given as a string
shape
The shape of the coefficient matrix
lead_term
The polynomial term with the largest total degree
degree
The total degree of the lead_term
lead_coeff
The coeff of the lead_term
Parameters
----------
coeff : ndarray
Coefficients of the polynomial
order : string
lead_term : Tuple
Default is None. Accepts tuple or tuple-like inputs
clean_zeros : bool
Default is True. If True, all extra rows, columns, etc of all zeroes are
removed from matrix of coefficients.
Methods
-------
clean_coeff
Removes extra rows, columns, etc of zeroes from end of matrix of coefficients
match_size
Matches the shape of two matrices.
monomialList
Creates a list of monomials that make up the polynomial in degrevlex order.
monSort
Calls monomial list.
update_lead_term
Finds the lead_term of a polynomial
__call__
Evaluates a polynomial at a certain point.
__eq__
Checks if two polynomials are equal.
__ne__
Checks if two polynomials are not equal.
'''
def __init__(self, coeff, order='degrevlex', lead_term=None, clean_zeros=True):
'''
order : string
Term order to use for the polynomial. degrevlex is default.
Currently no other order is implemented.
'''
if isinstance(coeff,np.ndarray):
self.coeff = coeff
elif isinstance(coeff,str):
self.coeff = makePolyCoeffMatrix(coeff)
else:
raise ValueError('coeff must be an np.array or a string!')
if clean_zeros:
self.clean_coeff()
self.dim = self.coeff.ndim
self.order = order
self.jac = None
self.shape = self.coeff.shape
if lead_term is None:
self.update_lead_term()
else:
self.lead_term = tuple(lead_term)
self.degree = sum(self.lead_term)
self.lead_coeff = self.coeff[self.lead_term]
def clean_coeff(self):
"""
Remove 0s on the outside of the coeff matrix. Acts in place.
"""
for axis in range(self.coeff.ndim):
change = True
while change:
change = False
if self.coeff.shape[axis] == 1:
continue
axisCount = 0
slices = list()
for i in self.coeff.shape:
if axisCount == axis:
s = slice(i-1,i)
else:
s = slice(0,i)
slices.append(s)
axisCount += 1
if np.sum(abs(self.coeff[slices])) == 0:
self.coeff = np.delete(self.coeff,-1,axis=axis)
change = True
def update_lead_term(self):
"""
Update the lead term of the polynomial.
"""
non_zeros = list()
for i in zip(*np.where(self.coeff != 0)):
non_zeros.append(Term(i))
if len(non_zeros) != 0:
self.lead_term = max(non_zeros).val
self.degree = sum(self.lead_term)
self.lead_coeff = self.coeff[self.lead_term]
else:
self.lead_term = None
self.lead_coeff = 0
self.degree = -1
def __call__(self, point):
'''
Evaluate the polynomial at 'point'. This method is overridden
by the MultiPower and MultiCheb classes, so this definition only
checks if the polynomial can be evaluated at the given point.
Parameters
----------
point : array-like
the point at which to evaluate the polynomial
Returns
-------
__call__ : complex
value of the polynomial at the given point
'''
if len(point) != len(self.coeff.shape):
raise ValueError('Cannot evaluate polynomial in {} variables at point {}'\
.format(self.dim, point))
def grad(self, point):
'''
Evaluates the gradient of the polynomial at 'point'. This method is
overridden by the MultiPower and MultiCheb classes, so this definition only
checks if the polynomial can be evaluated at the given point.
Parameters
----------
point : array-like
the point at which to evaluate the polynomial
Returns
-------
grad : ndarray
Gradient of the polynomial at the given point.
'''
if len(point) != len(self.coeff.shape):
raise ValueError('Cannot evaluate polynomial in {} variables at point {}'\
.format(self.dim, point))
def __eq__(self,other):
'''
Check if coeff matrices of 'self' and 'other' are the same.
'''
if self.shape != other.shape:
return False
return np.allclose(self.coeff, other.coeff)
def __ne__(self,other):
'''
Check if coeff matrices of 'self' and 'other' are not the same.
'''
return not (self == other)
###############################################################################
#### MULTI_CHEB ###############################################################
class MultiCheb(Polynomial):
"""
A Chebyshev polynomial.
Attributes
----------
coeff: ndarray
A tensor of coefficients whose i_1,...,i_{dim} entry
corresponds to the coefficient of the term
T_{i_1}(x_1)...T_{i_{dim}}(x_{dim})
dim:
The number of variables, dimension of polynomial.
order: string
Term order
shape: tuple of ints
The shape of the coefficient array.
lead_term:
The term with the largest total degree.
degree: int
The total degree of lead_term.
lead_coeff
The coefficient of the lead_term.
terms : int
Highest term of single-variable polynomials. The polynomial has
degree at most terms+1 in each variable.
Parameters
----------
coeff : list(terms**dim) or np.array ([terms,] * dim)
coefficents in given ordering.
order : string
Term order for Groebner calculations. Default = 'degrevlex'
lead_term : list
The index of the current leading coefficent. If None, this is computed at initialization.
clean_zeros: boolean
If True, strip off any rows or columns of zeros on the outside of the coefficient array.
Methods
-------
__add__
Add two MultiCheb polynomials.
__sub__
Subtract two MultiCheb polynomials.
mon_mult
Multiply a MultiCheb monomial by a MultiCheb polynomial.
__call__
Evaluate a MultiCheb polynomial at a point.
"""
def __init__(self, coeff, order='degrevlex', lead_term=None, clean_zeros = True):
super(MultiCheb, self).__init__(coeff, order, lead_term, clean_zeros)
def __add__(self,other):
'''
Addition of two MultiCheb polynomials.
Parameters
----------
other : MultiCheb
Returns
-------
MultiCheb
The sum of the coeff of self and coeff of other.
'''
if self.shape != other.shape:
new_self, new_other = match_size(self.coeff,other.coeff)
else:
new_self, new_other = self.coeff, other.coeff
return MultiCheb(new_self + new_other,clean_zeros = False)
def __sub__(self,other):
'''
Subtraction of two MultiCheb polynomials.
Parameters
----------
other : MultiCheb
Returns
-------
MultiCheb
The coeff values are the result of self.coeff - other.coeff.
'''
if self.shape != other.shape:
new_self, new_other = match_size(self.coeff,other.coeff)
else:
new_self, new_other = self.coeff, other.coeff
return MultiCheb((new_self - (new_other)), clean_zeros = False)
def _fold_in_i_dir(coeff_array, dim, fdim, size_in_fdim, fold_idx):
"""Find coeffs corresponding to T_|m-n| (referred to as 'folding' in
some of this documentation) when multiplying a monomial times
a Chebyshev polynomial.
Multiplying the monomial T_m(x_i) times T_n(x_i) gives
(T_{m+n}(x_i) + T_{|n-m|}(x_i))/2
So multipying T_m(x_i) times polynomial P with coefficients
in coeff_array results in a new coefficient array sol that has
coeff_array in the bottom right corner plus a 'folded' copy of
coeff_array in locations corresponding to |n-m|. This method
returns the folded part (not dividing by 2)
Parameters
----------
coeff_array : ndarray
coefficients of the polynomial.
dim : int
The number of dimensions in coeff_array.
fdim : int
The dimension being folded ('i' in the explanation above)
size_in_fdim : int
The size of the solution matrix in the dimension being folded.
fold_idx : int
The index to fold around ('m' in the explanation above)
Returns
-------
sol : ndarray
"""
if fold_idx == 0:
return coeff_array
target = np.zeros_like(coeff_array) # Array of zeroes in which to insert
# the new values.
## Compute the n-m part for n >= m
# slice source and target in the dimension of interest (i = fdim)
target_slice = slice(0,size_in_fdim-fold_idx,None) # n-m for n>=m
source_slice = slice(fold_idx,size_in_fdim,None) # n for n>=m
# indexers have a slice index for every dimension.
source_indexer = [slice(None)]*dim
source_indexer[fdim] = source_slice
target_indexer = [slice(None)]*dim
target_indexer[fdim] = target_slice
# Put the appropriately indexed source into the target
target[target_indexer] = coeff_array[source_indexer]
## Compute the m-n part for n < m
# slice source and target in the dimension of interest (i = fdim)
target_slice = slice(fold_idx, 0 , -1) # m-n for n < m
source_slice = slice(None, fold_idx, None) # n for n < m
# indexers have a slice index for every dimension.
source_indexer = [slice(None)]*dim
source_indexer[fdim] = source_slice
target_indexer = [slice(None)]*dim
target_indexer[fdim] = target_slice
# Add the appropriately indexed source to the target
target[target_indexer] += coeff_array[source_indexer]
return target
def _mon_mult1(coeff_array, monom, mult_idx):
"""
Monomial multiply in one dimension, that is, T_m(x_i) * P(x_1,...,x_n),
where P is a Chebyshev polynomial and T_m(x_i) is a Chebyshev monomial
in the lone variable x_i.
Parameters
----------
coeff_array : array_like
Coefficients of a Chebyshev polynomial (denoted P above).
monom : tuple of ints
Index of the form (0,0,...,0,m,0...,0) of a
monomial of one variable
mult_idx : int
The location (denoted i above) of the non-zero value in monom.
Returns
-------
ndarray
Coeffs of the new polynomial T_m(x_i)*P.
"""
p1 = np.zeros(coeff_array.shape + monom)
p1[slice_bottom(coeff_array)] = coeff_array # terms corresp to T_{m+n}
largest_idx = [i-1 for i in coeff_array.shape]
new_shape = [max(i,j) for i,j in
itertools.zip_longest(largest_idx, monom, fillvalue = 0)]
if coeff_array.shape[mult_idx] <= monom[mult_idx]:
add_a = [i-j for i,j in itertools.zip_longest(new_shape, largest_idx, fillvalue = 0)]
add_a_list = np.zeros((len(new_shape),2))
#change the second column to the values of add_a and add_b.
add_a_list[:,1] = add_a
#use add_a_list and add_b_list to pad each polynomial appropriately.
coeff_array = np.pad(coeff_array,add_a_list.astype(int),'constant')
number_of_dim = coeff_array.ndim
shape_of_self = coeff_array.shape
if monom[mult_idx] != 0:
coeff_array = MultiCheb._fold_in_i_dir(coeff_array,number_of_dim,
mult_idx, shape_of_self[mult_idx],
monom[mult_idx])
if p1.shape != coeff_array.shape:
monom = [i-j for i,j in zip(p1.shape,coeff_array.shape)]
result = np.zeros(np.array(coeff_array.shape) + monom)
result[slice_top(coeff_array)] = coeff_array
coeff_array = result
Pf = p1 + coeff_array
return .5*Pf
def mon_mult(self, monom, return_type = 'Poly'):
"""
Multiply a Chebyshev polynomial by a monomial
Parameters
----------
monom : tuple of ints
The index of the monomial to multiply self by.
return_type : str
If 'Poly' then returns a polynomial object.
Returns
-------
MultiCheb object if return_type is 'Poly'.
ndarray if return_type is "Matrix".
"""
coeff_array = self.coeff
monom_zeros = np.zeros(len(monom),dtype = int)
for i in range(len(monom)):
monom_zeros[i] = monom[i]
coeff_array = MultiCheb._mon_mult1(coeff_array, monom_zeros, i)
monom_zeros[i] = 0
if return_type == 'Poly':
return MultiCheb(coeff_array, lead_term = self.lead_term + np.array(monom), clean_zeros = False)
elif return_type == 'Matrix':
return coeff_array
def __call__(self, point):
'''
Evaluate the polynomial at 'point'.
Parameters
----------
point : array-like
point at which to evaluate the polynomial
Returns
-------
c : complex
value of the polynomial at the given point
'''
super(MultiCheb, self).__call__(point)
c = self.coeff
n = len(c.shape)
c = cheb.chebval(point[0],c)
for i in range(1,n):
c = cheb.chebval(point[i],c,tensor=False)
return c
def grad(self, point):
'''
Evaluates the gradient of the polynomial at the given point.
Parameters
----------
point : array-like
the point at which to evaluate the polynomial
Returns
-------
out : ndarray
Gradient of the polynomial at the given point.
'''
super(MultiCheb, self).__call__(point)
out = np.empty(self.dim,dtype="complex_")
if self.jac is None:
jac = list()
for i in range(self.dim):
jac.append(cheb.chebder(self.coeff,axis=i))
self.jac = jac
spot = 0
for i in self.jac:
out[spot] = chebvalnd(point,i)
spot+=1
return out
###############################################################################
def chebvalnd(x,c):
"""
Evaluate a MultiCheb object at a point x
Parameters
----------
x : ndarray
Point to evaluate at
c : ndarray
Tensor of Chebyshev coefficients
Returns
-------
c : float
Value of the MultiCheb polynomial at x
"""
x = np.array(x)
n = len(c.shape)
c = cheb.chebval(x[0],c)
for i in range(1,n):
c = cheb.chebval(x[i],c,tensor=False)
return c
def polyList(deg,dim,Type = 'random'):
"""
Creates random polynomials for root finding.
Parameters
----------
deg : int
Desired degree of the polynomials.
dim : int
Desired number of dimensions for the polynomials
Type : str
Either 'random' or 'int.
Returns
----------
polys : list
polynomial objects that are used to test the root finding.
"""
deg += 1
polys = []
if Type == 'random':
for i in range(dim):
polys.append(np.random.random_sample(deg*np.ones(dim, dtype = int)))
elif Type == 'int':
Range = 10
for i in range(dim):
polys.append(np.random.randint(-Range,Range,deg*np.ones(dim, dtype = int)))
for i,j in np.ndenumerate(polys[0]):
if np.sum(i) >= deg:
for h in range(len(polys)):
polys[h][i] = 0
for i in range(len(polys)):
polys[i] = MultiCheb(polys[i])
return polys
############# Cheb Utils ####################3
class TVBError(RuntimeError):
pass
class Term(object):
'''
Terms are just tuples of exponents with the degrevlex ordering
'''
def __init__(self,val):
self.val = tuple(val)
def __repr__(self):
return str(self.val) + ' with degrevlex order'
def __lt__(self, other, order = 'degrevlex'):
'''
Redfine less-than according to term order
'''
if order == 'degrevlex': #Graded Reverse Lexographical Order
if sum(self.val) < sum(other.val):
return True
elif sum(self.val) > sum(other.val):
return False
else:
for i,j in zip(reversed(self.val),reversed(other.val)):
if i < j:
return False
if i > j:
return True
return False
elif order == 'lexographic': #Lexographical Order
for i,j in zip(self.val,other.val):
if i < j:
return True
if i > j:
return False
return False
elif order == 'grlex': #Graded Lexographical Order
if sum(self.val) < sum(other.val):
return True
elif sum(self.val) > sum(other.val):
return False
else:
for i,j in zip(self.val,other.val):
if i < j:
return True
if i > j:
return False
return False
def match_poly_dimensions(polys):
'''Matches the dimensions of a list of polynomials.
Parameters
----------
polys : list
Polynomials of possibly different dimensions.
Returns
-------
new_polys : list
The same polynomials but of the same dimensions.
'''
dim = max(poly.dim for poly in polys)
new_polys = list()
for poly in polys:
if poly.dim != dim:
coeff_shape = list(poly.shape)
for i in range(dim - poly.dim):
coeff_shape.insert(0,1)
poly.__init__(poly.coeff.reshape(coeff_shape))
new_polys.append(poly)
return new_polys
def mon_combos_highest(mon, numLeft, spot = 0):
'''Find all the monomials of a given degree and returns them. Works recursively.
Very similar to mon_combos, but only returns the monomials of the desired degree.
Parameters
--------
mon: list
A list of zeros, the length of which is the dimension of the desired monomials. Will change
as the function searches recursively.
numLeft : int
The degree of the monomials desired. Will decrease as the function searches recursively.
spot : int
The current position in the list the function is iterating through. Defaults to 0, but increases
in each step of the recursion.
Returns
-----------
answers : list
A list of all the monomials.
'''
answers = list()
if len(mon) == spot+1: #We are at the end of mon, no more recursion.
mon[spot] = numLeft
answers.append(mon.copy())
return answers
if numLeft == 0: #Nothing else can be added.
answers.append(mon.copy())
return answers
temp = mon.copy() #Quicker than copying every time inside the loop.
for i in range(numLeft+1): #Recursively add to mon further down.
temp[spot] = i
answers += mon_combos_highest(temp, numLeft-i, spot+1)
return answers
def mon_combos(mon, numLeft, spot = 0):
'''Finds all the monomials up to a given degree and returns them. Works recursively.
Parameters
--------
mon: list
A list of zeros, the length of which is the dimension of the desired monomials. Will change
as the function searches recursively.
numLeft : int
The degree of the monomials desired. Will decrease as the function searches recursively.
spot : int
The current position in the list the function is iterating through. Defaults to 0, but increases
in each step of the recursion.
Returns
-----------
answers : list
A list of all the monomials.
'''
answers = list()
if len(mon) == spot+1: #We are at the end of mon, no more recursion.
for i in range(numLeft+1):
mon[spot] = i
answers.append(mon.copy())
return answers
if numLeft == 0: #Nothing else can be added.
answers.append(mon.copy())
return answers
temp = mon.copy() #Quicker than copying every time inside the loop.
for i in range(numLeft+1): #Recursively add to mon further down.
temp[spot] = i
answers += mon_combos(temp, numLeft-i, spot+1)
return answers
def sort_polys_by_degree(polys, ascending = True):
'''Sorts the polynomials by their degree.
Parameters
----------
polys : list.
A list of polynomials.
ascending : bool
Defaults to True. If True the polynomials are sorted in order of ascending degree. If False they
are sorted in order of descending degree.
Returns
-------
sorted_polys : list
A list of the same polynomials, now sorted.
'''
degs = [poly.degree for poly in polys]
argsort_list = np.argsort(degs)
sorted_polys = list()
for i in argsort_list:
sorted_polys.append(polys[i])
if ascending:
return sorted_polys
else:
return sorted_polys[::-1]
def makePolyCoeffMatrix(inputString):
'''
Takes a string input of a polynomaial and returns the coefficient matrix for it. Usefull for making things of high
degree of dimension so you don't have to make it by hand.
All strings must be of the following syntax. Ex. '3x0^2+2.1x1^2*x2+-14.73x0*x2^3'
1. There can be no spaces.
2. All monomials must be seperated by a '+'. If the coefficient of the monomial is negative then the '-' sign
should come after the '+'. This is not needed for the first monomial.
3. All variables inside a monomial are seperated by a '*'.
4. The power of a variable in a monomial is given folowing a '^' sign.
'''
matrixSpots = list()
coefficients = list()
for monomial in inputString.split('+'):
coefficientString = monomial[:first_x(monomial)]
if coefficientString == '-':
coefficient = -1
elif coefficientString == '':
coefficient = 1
else:
coefficient = float(coefficientString)
mons = monomial[first_x(monomial):].split('*')
matrixSpot = [0]
for mon in mons:
stuff = mon.split('^')
if len(stuff) == 1:
power = 1
else:
power = int(stuff[1])
if stuff[0] == '':
varDegree = -1
else:
varDegree = int(stuff[0][1:])
if varDegree != -1:
if len(matrixSpot) <= varDegree:
matrixSpot = np.append(matrixSpot, [0]*(varDegree - len(matrixSpot)+1))
matrixSpot[varDegree] = power
matrixSpots.append(matrixSpot)
coefficients.append(coefficient)
#Pad the matrix spots so they are all the same length.
length = max(len(matrixSpot) for matrixSpot in matrixSpots)
for i in range(len(matrixSpots)):
matrixSpot = matrixSpots[i]
if len(matrixSpot) < length:
matrixSpot = np.append(matrixSpot, [0]*(length - len(matrixSpot)))
matrixSpots[i] = matrixSpot
matrixSize = np.maximum.reduce([matrixSpot for matrixSpot in matrixSpots])
matrixSize = matrixSize + np.ones_like(matrixSize)
matrixSize = matrixSize[::-1] #So the variables are in the right order.
matrix = np.zeros(matrixSize)
for i in range(len(matrixSpots)):
matrixSpot = matrixSpots[i][::-1] #So the variables are in the right order.
coefficient = coefficients[i]
matrix[tuple(matrixSpot)] = coefficient
return matrix
def match_size(a,b):
'''
Matches the shape of two ndarrays.
Parameters
----------
a, b : ndarray
Arrays whose size is to be matched.
Returns
-------
a, b : ndarray
Arrays of equal size.
'''
new_shape = np.maximum(a.shape, b.shape)
a_new = np.zeros(new_shape)
a_new[slice_top(a)] = a
b_new = np.zeros(new_shape)
b_new[slice_top(b)] = b
return a_new, b_new
def slice_top(arr):
'''Construct a list of slices needed to put an array into the upper left
corner of another.
Parameters
----------
arr : ndarray
The array of interest.
Returns
-------
slices : list
Each value of the list is a slice of the array in some dimension.
It is exactly the size of the array.
'''
slices = list()
for i in arr.shape:
slices.append(slice(0,i))
return slices
def slice_bottom(arr):
''' Gets the n-d slices needed to slice an array into the bottom
corner of another.
Parameters
----------
arr : ndarray
The array of interest.
Returns
-------
slices : list
Each value of the list is a slice of the array in some dimension.
It is exactly the size of the array.
'''
slices = list()
for i in arr.shape:
slices.append(slice(-i,None))
return slices
def get_var_list(dim):
'''Return a list of tuples corresponding to the
variables [x_1, x_2, ..., x_n]. The tuple for x_1
is (1,0,0,...,0), and for x_i the 1 is in the ith slot.
'''
_vars = []
var = [0]*dim
for i in range(dim):
var[i] = 1
_vars.append(tuple(var))
var[i] = 0
return _vars
| 31.817382 | 118 | 0.577069 | 17,081 | 0.590588 | 0 | 0 | 0 | 0 | 0 | 0 | 15,902 | 0.549824 |
1db3723a72818283ab89562ed96cdc5069ece039 | 5,072 | py | Python | 18-Make-and-build/42-Latex/run_45-Copy-latex-typo3-stuff.py | marble/Toolchain_RenderDocumentation | 1b206c0478b7418a628233e9e1fd36eeb4224185 | [
"MIT"
] | null | null | null | 18-Make-and-build/42-Latex/run_45-Copy-latex-typo3-stuff.py | marble/Toolchain_RenderDocumentation | 1b206c0478b7418a628233e9e1fd36eeb4224185 | [
"MIT"
] | 9 | 2016-09-05T19:24:57.000Z | 2018-12-05T16:11:55.000Z | 18-Make-and-build/42-Latex/run_45-Copy-latex-typo3-stuff.py | marble/Toolchain_RenderDocumentation | 1b206c0478b7418a628233e9e1fd36eeb4224185 | [
"MIT"
] | 2 | 2017-04-08T10:12:48.000Z | 2020-08-14T13:10:42.000Z | #!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import absolute_import
import os
import re
import shutil
import stat
import sys
import tct
from os.path import exists as ospe, join as ospj
from tct import deepget
params = tct.readjson(sys.argv[1])
binabspath = sys.argv[2]
facts = tct.readjson(params['factsfile'])
milestones = tct.readjson(params['milestonesfile'])
reason = ''
resultfile = params['resultfile']
result = tct.readjson(resultfile)
loglist = result['loglist'] = result.get('loglist', [])
toolname = params['toolname']
toolname_pure = params['toolname_pure']
workdir = params['workdir']
exitcode = CONTINUE = 0
# ==================================================
# Make a copy of milestones for later inspection?
# --------------------------------------------------
if 0 or milestones.get('debug_always_make_milestones_snapshot'):
tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1])
# ==================================================
# Helper functions
# --------------------------------------------------
def lookup(D, *keys, **kwdargs):
result = deepget(D, *keys, **kwdargs)
loglist.append((keys, result))
return result
# ==================================================
# define
# --------------------------------------------------
copied_latex_resources = []
run_latex_make_sh_file = None
xeq_name_cnt = 0
# ==================================================
# Check params
# --------------------------------------------------
if exitcode == CONTINUE:
loglist.append('CHECK PARAMS')
make_latex = lookup(milestones, 'make_latex', default=None)
if not make_latex:
CONTINUE == -2
reason = 'Nothing to do'
if exitcode == CONTINUE:
build_latex = lookup(milestones, 'build_latex', default=None)
builder_latex_folder = lookup(milestones, 'builder_latex_folder', default=None)
latex_contrib_typo3_folder = lookup(milestones,
'latex_contrib_typo3_folder',
default=None)
if not (1
and build_latex
and builder_latex_folder
and latex_contrib_typo3_folder):
CONTINUE = -2
reason = 'Bad params or nothing to do'
if exitcode == CONTINUE:
loglist.append('PARAMS are ok')
else:
loglist.append('Bad PARAMS or nothing to do')
# ==================================================
# work
# --------------------------------------------------
if exitcode == CONTINUE:
if not os.path.isdir(latex_contrib_typo3_folder):
exitcode = 22
reason = 'Folder does not exist'
if exitcode == CONTINUE:
foldername = os.path.split(latex_contrib_typo3_folder)[1]
destpath = ospj(builder_latex_folder, foldername)
shutil.copytree(latex_contrib_typo3_folder, destpath)
if exitcode == CONTINUE:
run_latex_make_sh_file = ospj(builder_latex_folder, 'run-make.sh')
f2text = (
"#!/bin/bash\n"
"\n"
"# This is run-make.sh\n"
"\n"
'scriptdir=$( cd $(dirname "$0") ; pwd -P )'
"\n"
"# cd to this dir\n"
"pushd \"$scriptdir\" >/dev/null\n"
"\n"
"# set environment var pointing to the folder and run make\n"
"TEXINPUTS=::texmf_typo3 make\n"
"\n"
"popd >/dev/null\n"
"\n"
)
with open(run_latex_make_sh_file, 'w') as f2:
f2.write(f2text)
file_permissions = (os.stat(run_latex_make_sh_file).st_mode | stat.S_IXUSR
| stat.S_IXGRP
| stat.S_IXOTH)
os.chmod(run_latex_make_sh_file, file_permissions)
if exitcode == CONTINUE:
makefile_path = ospj(builder_latex_folder, 'Makefile')
makefile_original_path = makefile_path + '.original'
if ospe(makefile_path) and not ospe(makefile_original_path):
shutil.copy2(makefile_path, makefile_original_path)
with open(makefile_path, 'rb') as f1:
data = f1.read()
data, cnt = re.subn("LATEXMKOPTS[ ]*=[ ]*\n", "\n\n\n\nLATEXMKOPTS = -interaction=nonstopmode\n\n\n\n\n", data)
if cnt:
with open(makefile_path, 'wb') as f2:
f2.write(data)
# ==================================================
# Set MILESTONE
# --------------------------------------------------
if copied_latex_resources:
result['MILESTONES'].append({'copied_latex_resources':
copied_latex_resources})
if run_latex_make_sh_file:
result['MILESTONES'].append({'run_latex_make_sh_file':
run_latex_make_sh_file})
# ==================================================
# save result
# --------------------------------------------------
tct.save_the_result(result, resultfile, params, facts, milestones, exitcode, CONTINUE, reason)
# ==================================================
# Return with proper exitcode
# --------------------------------------------------
sys.exit(exitcode)
| 30.371257 | 115 | 0.536475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,844 | 0.363565 |
1db591283c587148c660788ee47bee65cf843d14 | 693 | py | Python | src/tact/server/websocket.py | wabain/tact | bd95608bebc640e47f31f6d0a403108fe998188d | [
"MIT"
] | null | null | null | src/tact/server/websocket.py | wabain/tact | bd95608bebc640e47f31f6d0a403108fe998188d | [
"MIT"
] | null | null | null | src/tact/server/websocket.py | wabain/tact | bd95608bebc640e47f31f6d0a403108fe998188d | [
"MIT"
] | null | null | null | """Base definitions for the websocket interface"""
import json
from abc import ABC, abstractmethod
class WebsocketConnectionLost(Exception):
pass
class AbstractWSManager(ABC):
@abstractmethod
async def send(self, conn_id: str, msg: dict) -> None:
# This is implemented as a separate method primarily for test
# convenience
serialized = json.dumps(msg, separators=(',', ':'))
await self._send_serialized(conn_id, serialized)
@abstractmethod
async def _send_serialized(self, conn_id: str, msg: str) -> None:
raise NotImplementedError
@abstractmethod
async def close(self, conn_id: str):
raise NotImplementedError
| 27.72 | 69 | 0.695527 | 588 | 0.848485 | 0 | 0 | 492 | 0.709957 | 432 | 0.623377 | 130 | 0.18759 |
1db65875e73e38c64b16e13b346efb8fec1b7649 | 10,312 | py | Python | wiimatch/match.py | mcara/wiimatch | c6126a6148588d60dbed68c46a7da14f71410f7f | [
"BSD-3-Clause"
] | 2 | 2019-08-14T12:46:05.000Z | 2021-03-30T18:17:47.000Z | wiimatch/match.py | mcara/wiimatch | c6126a6148588d60dbed68c46a7da14f71410f7f | [
"BSD-3-Clause"
] | 4 | 2019-08-07T11:37:34.000Z | 2019-08-21T16:54:56.000Z | wiimatch/match.py | mcara/wiimatch | c6126a6148588d60dbed68c46a7da14f71410f7f | [
"BSD-3-Clause"
] | 2 | 2019-08-05T16:44:14.000Z | 2019-08-21T13:13:21.000Z | """
A module that provides main API for optimal (LSQ) "matching" of weighted
N-dimensional image intensity data using (multivariate) polynomials.
:Author: Mihai Cara (contact: help@stsci.edu)
:License: :doc:`../LICENSE`
"""
import numpy as np
from .lsq_optimizer import build_lsq_eqs, pinv_solve, rlu_solve
__all__ = ['match_lsq']
SUPPORTED_SOLVERS = ['RLU', 'PINV']
def match_lsq(images, masks=None, sigmas=None, degree=0,
center=None, image2world=None, center_cs='image',
ext_return=False, solver='RLU'):
r"""
Compute coefficients of (multivariate) polynomials that once subtracted
from input images would provide image intensity matching in the least
squares sense.
Parameters
----------
images : list of numpy.ndarray
A list of 1D, 2D, etc. `numpy.ndarray` data array whose "intensities"
must be "matched". All arrays must have identical shapes.
masks : list of numpy.ndarray, None
A list of `numpy.ndarray` arrays of same length as ``images``.
Non-zero mask elements indicate valid data in the corresponding
``images`` array. Mask arrays must have identical shape to that of
the arrays in input ``images``. Default value of `None` indicates that
all pixels in input images are valid.
sigmas : list of numpy.ndarray, None
A list of `numpy.ndarray` data array of same length as ``images``
representing the uncertainties of the data in the corresponding array
in ``images``. Uncertainty arrays must have identical shape to that of
the arrays in input ``images``. The default value of `None` indicates
that all pixels will be assigned equal weights.
degree : iterable, int
A list of polynomial degrees for each dimension of data arrays in
``images``. The length of the input list must match the dimensionality
of the input images. When a single integer number is provided, it is
assumed that the polynomial degree in each dimension is equal to
that integer.
center : iterable, None, optional
An iterable of length equal to the number of dimensions in
``image_shape`` that indicates the center of the coordinate system
in **image** coordinates when ``center_cs`` is ``'image'`` otherwise
center is assumed to be in **world** coordinates (when ``center_cs``
is ``'world'``). When ``center`` is `None` then ``center`` is
set to the middle of the "image" as ``center[i]=image_shape[i]//2``.
If ``image2world`` is not `None` and ``center_cs`` is ``'image'``,
then supplied center will be converted to world coordinates.
image2world : function, None, optional
Image-to-world coordinates transformation function. This function
must be of the form ``f(x,y,z,...)`` and accept a number of arguments
`numpy.ndarray` arguments equal to the dimensionality of images.
center_cs : {'image', 'world'}, optional
Indicates whether ``center`` is in image coordinates or in world
coordinates. This parameter is ignored when ``center`` is set to
`None`: it is assumed to be `False`. ``center_cs`` *cannot be*
``'world'`` when ``image2world`` is `None` unless ``center`` is `None`.
ext_return : bool, optional
Indicates whether this function should return additional values besides
optimal polynomial coefficients (see ``bkg_poly_coeff`` return value
below) that match image intensities in the LSQ sense. See **Returns**
section for more details.
solver : {'RLU', 'PINV'}, optional
Specifies method for solving the system of equations.
Returns
-------
bkg_poly_coeff : numpy.ndarray
When ``nimages`` is `None`, this function returns a 1D `numpy.ndarray`
that holds the solution (polynomial coefficients) to the system.
When ``nimages`` is **not** `None`, this function returns a 2D
`numpy.ndarray` that holds the solution (polynomial coefficients)
to the system. The solution is grouped by image.
a : numpy.ndarray
A 2D `numpy.ndarray` that holds the coefficients of the linear system
of equations. This value is returned only when ``ext_return``
is `True`.
b : numpy.ndarray
A 1D `numpy.ndarray` that holds the free terms of the linear system of
equations. This value is returned only when ``ext_return`` is `True`.
coord_arrays : list
A list of `numpy.ndarray` coordinate arrays each of ``image_shape``
shape. This value is returned only when ``ext_return`` is `True`.
eff_center : tuple
A tuple of coordinates of the effective center as used in generating
coordinate arrays. This value is returned only when ``ext_return``
is `True`.
coord_system : {'image', 'world'}
Coordinate system of the coordinate arrays and returned ``center``
value. This value is returned only when ``ext_return`` is `True`.
Notes
-----
:py:func:`match_lsq` builds a system of linear equations
.. math::
a \cdot c = b
whose solution :math:`c` is a set of coefficients of (multivariate)
polynomials that represent the "background" in each input image (these are
polynomials that are "corrections" to intensities of input images) such
that the following sum is minimized:
.. math::
L = \sum^N_{n,m=1,n \neq m} \sum_k
\frac{\left[I_n(k) - I_m(k) - P_n(k) + P_m(k)\right]^2}
{\sigma^2_n(k) + \sigma^2_m(k)}.
In the above equation, index :math:`k=(k_1,k_2,...)` labels a position
in input image's pixel grid [NOTE: all input images share a common
pixel grid].
"Background" polynomials :math:`P_n(k)` are defined through the
corresponding coefficients as:
.. math::
P_n(k_1,k_2,...) = \sum_{d_1=0,d_2=0,...}^{D_1,D_2,...}
c_{d_1,d_2,...}^n \cdot k_1^{d_1} \cdot k_2^{d_2} \cdot \ldots .
Coefficients :math:`c_{d_1,d_2,...}^n` are arranged in the vector :math:`c`
in the following order:
.. math::
(c_{0,0,\ldots}^1,c_{1,0,\ldots}^1,\ldots,c_{0,0,\ldots}^2,
c_{1,0,\ldots}^2,\ldots).
:py:func:`match_lsq` returns coefficients of the polynomials that
minimize *L*.
Examples
--------
>>> import wiimatch
>>> import numpy as np
>>> im1 = np.zeros((5, 5, 4), dtype=np.float)
>>> cbg = 1.32 * np.ones_like(im1)
>>> ind = np.indices(im1.shape, dtype=np.float)
>>> im3 = cbg + 0.15 * ind[0] + 0.62 * ind[1] + 0.74 * ind[2]
>>> mask = np.ones_like(im1, dtype=np.int8)
>>> sigma = np.ones_like(im1, dtype=np.float)
>>> wiimatch.match.match_lsq([im1, im3], [mask, mask], [sigma, sigma],
... degree=(1,1,1), center=(0,0,0)) # doctest: +FLOAT_CMP
array([[-6.60000000e-01, -7.50000000e-02, -3.10000000e-01,
-6.96331881e-16, -3.70000000e-01, -1.02318154e-15,
-5.96855898e-16, 2.98427949e-16],
[ 6.60000000e-01, 7.50000000e-02, 3.10000000e-01,
6.96331881e-16, 3.70000000e-01, 1.02318154e-15,
5.96855898e-16, -2.98427949e-16]])
"""
# check that all images have the same shape:
shapes = set([])
for im in images:
shapes.add(im.shape)
if len(shapes) > 1:
raise ValueError("All images must have identical shapes.")
nimages = len(images)
ndim = len(images[0].shape)
# check that the number of good pixel mask arrays matches the numbers
# of input images, and if 'masks' is None - set all of them to True:
if masks is None:
masks = [np.ones_like(images[0], dtype=np.bool) for i in images]
else:
if len(masks) != nimages:
raise ValueError("Length of masks list must match the length of "
"the image list.")
for m in masks:
shapes.add(m.shape)
if len(shapes) > 1:
raise ValueError("Shape of each mask array must match the shape "
"of input images.")
# make a copy of the masks since we might modify these masks later
masks = [m.copy() for m in masks]
# check that the number of sigma arrays matches the numbers
# of input images, and if 'sigmas' is None - set all of them to 1:
if sigmas is None:
sigmas = [np.ones_like(images[0], dtype=np.float) for i in images]
else:
if len(sigmas) != nimages:
raise ValueError("Length of sigmas list must match the length of "
"the image list.")
for s in sigmas:
shapes.add(s.shape)
if len(shapes) > 1:
raise ValueError("Shape of each sigma array must match the shape "
"of input images.")
# check that 'degree' has the same length as the number of dimensions
# in image arrays:
if hasattr(degree, '__iter__'):
if len(degree) != ndim:
raise ValueError("The length of 'degree' parameter must match "
"the number of image dimensions.")
degree = tuple([int(d) for d in degree])
else:
intdeg = int(degree)
degree = tuple([intdeg for i in range(ndim)])
# check that 'center' has the same length as the number of dimensions
# in image arrays:
if hasattr(center, '__iter__'):
if len(center) != ndim:
raise ValueError("The length of 'center' parameter must match "
"the number of image dimensions.")
elif center is not None:
center = tuple([center for i in range(ndim)])
# build the system of equations:
a, b, coord_arrays, eff_center, coord_system = build_lsq_eqs(
images,
masks,
sigmas,
degree,
center=center,
image2world=image2world,
center_cs=center_cs
)
# solve the system:
if solver == 'RLU':
bkg_poly_coef = rlu_solve(a, b, nimages)
else:
tol = np.finfo(images[0].dtype).eps**(2.0 / 3.0)
bkg_poly_coef = pinv_solve(a, b, nimages, tol)
if ext_return:
return bkg_poly_coef, a, b, coord_arrays, eff_center, coord_system
else:
return bkg_poly_coef
| 39.060606 | 79 | 0.624903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,021 | 0.777832 |
1db68436f9947e25427cf7df6c8bf3c1b129ddd8 | 2,705 | py | Python | tests/test_utils.py | plus3it/WatchMaker | 0b9d773642709e5b2a2f4a9aac4d682917901b72 | [
"Apache-2.0"
] | 39 | 2017-03-07T15:39:47.000Z | 2022-02-03T00:51:33.000Z | tests/test_utils.py | plus3it/WatchMaker | 0b9d773642709e5b2a2f4a9aac4d682917901b72 | [
"Apache-2.0"
] | 851 | 2016-06-01T00:35:46.000Z | 2022-03-28T12:03:17.000Z | tests/test_utils.py | eemperor/watchmaker | a007297bd4549e064019d8212ca8cb4da4636686 | [
"Apache-2.0"
] | 55 | 2016-06-01T14:15:46.000Z | 2021-12-23T14:31:26.000Z | # -*- coding: utf-8 -*-
# pylint: disable=redefined-outer-name,protected-access
"""Salt worker main test module."""
from __future__ import (absolute_import, division, print_function,
unicode_literals, with_statement)
import watchmaker.utils
try:
from unittest.mock import patch
except ImportError:
from mock import patch
@patch('os.path.exists', autospec=True)
@patch('shutil.rmtree', autospec=True)
@patch('shutil.copytree', autospec=True)
def test_copytree_no_force(mock_copy, mock_rm, mock_exists):
"""Test that copytree results in correct calls without force option."""
random_src = 'aba51e65-afd2-5020-8117-195f75e64258'
random_dst = 'f74d03de-7c1d-596f-83f3-73748f2e238f'
watchmaker.utils.copytree(random_src, random_dst)
mock_copy.assert_called_with(random_src, random_dst)
assert mock_rm.call_count == 0
assert mock_exists.call_count == 0
watchmaker.utils.copytree(random_src, random_dst, force=False)
mock_copy.assert_called_with(random_src, random_dst)
assert mock_rm.call_count == 0
assert mock_exists.call_count == 0
@patch('os.path.exists', autospec=True)
@patch('shutil.rmtree', autospec=True)
@patch('shutil.copytree', autospec=True)
def test_copytree_force(mock_copy, mock_rm, mock_exists):
"""Test that copytree results in correct calls with force option."""
random_src = '44b6df59-db6f-57cb-a570-ccd55d782561'
random_dst = '72fe7962-a7af-5f2f-899b-54798bc5e79f'
watchmaker.utils.copytree(random_src, random_dst, force=True)
mock_copy.assert_called_with(random_src, random_dst)
mock_rm.assert_called_with(random_dst)
mock_exists.assert_called_with(random_dst)
def test_clean_none():
"""Check string 'None' conversion to None."""
assert not watchmaker.utils.clean_none('None')
assert not watchmaker.utils.clean_none('none')
assert not watchmaker.utils.clean_none(None)
assert watchmaker.utils.clean_none('not none') == 'not none'
@patch('os.path.exists', autospec=True)
@patch('watchmaker.utils.copytree', autospec=True)
@patch("os.walk", autospec=True)
def test_copy_subdirectories(mock_os, mock_copy, mock_exists):
"""Test that copy_subdirectories executes expected calls."""
random_src = '580a9176-20f6-4f64-b77a-75dbea14d74f'
random_dst = '6538965c-5131-414a-897f-b01f7dfb6c2b'
mock_exists.return_value = False
mock_os.return_value = [
('580a9176-20f6-4f64-b77a-75dbea14d74f', ('87a2a74d',)),
('580a9176-20f6-4f64-b77a-75dbea14d74f/87a2a74d', (),
('6274fd83', '1923c65a')),
].__iter__()
watchmaker.utils.copy_subdirectories(random_src, random_dst, None)
assert mock_copy.call_count == 1
| 37.569444 | 75 | 0.737893 | 0 | 0 | 0 | 0 | 2,050 | 0.757856 | 0 | 0 | 880 | 0.325323 |
1db85436a1e7560cd1e4c27e632933b72b6c79d7 | 18,256 | py | Python | shop/src/transaction_logic.py | gryan12/anonymous_ecommerce | 9c46589f6005fd0f7433e024e0d705fdd6038e53 | [
"Apache-2.0"
] | null | null | null | shop/src/transaction_logic.py | gryan12/anonymous_ecommerce | 9c46589f6005fd0f7433e024e0d705fdd6038e53 | [
"Apache-2.0"
] | null | null | null | shop/src/transaction_logic.py | gryan12/anonymous_ecommerce | 9c46589f6005fd0f7433e024e0d705fdd6038e53 | [
"Apache-2.0"
] | null | null | null | import logging
import random
import time
import src.support.outbound_routing as ob
from src.support.creds import build_cred, build_proof_request, build_schema, build_credential_proposal, build_proof_proposal
import src.support.settings as config
# This file containst the functions that perform transaction-specific
# calls, building the needed requests and sending them to the aries agents
CRED_NAMES = [
"payment_agreement",
"payment_credential",
"package_cred",
"received_package",
]
##User
#User -> Vendor
def send_payment_agreement_proposal(product_id):
if not config.agent_data.product_id:
config.agent_data.update_product_id(product_id)
proposal = {
"@type": "did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/1.0/credential-preview",
"attributes": [
{
"name": "product_id",
"value": product_id
},
]
}
offer_json = build_credential_proposal(
config.agent_data.current_connection,
comment="request for payment agreement credential",
schema_name="payment agreement",
prop_schema=proposal
)
resp = ob.send_cred_proposal(offer_json)
return resp
def send_payment_agreement_cred_offer(conn_id, creddef_id, product_id, value=None, endpoint="placeholder_endpoint"):
logging.debug("Issue credential to user")
print("value is : ", value, " product_id is: ", product_id)
builder = build_cred(creddef_id)
builder.with_attribute({"payment_endpoint": endpoint}) \
.with_attribute({"timestamp": str(int(time.time()))}) \
.with_attribute({"amount": value}) \
.with_attribute({"product_id": product_id}) \
.with_type("did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/1.0/credential-preview") \
.with_conn_id(conn_id)
offer_req = builder.build_offer("purchase request")
config.agent_data.previews[creddef_id] = builder.build_preview()
return ob.send_cred_offer(offer_req)
#User -> Bank
def propose_proof_of_payment_agreement(connection_id, cred_def_id):
proposal = build_proof_proposal(
"proof_of_payment_agreement"
).withAttribute(
"payment_endpoint",
cred_def_id,
).withAttribute(
"amount",
cred_def_id
).withAttribute(
"timestamp",
cred_def_id
).build(connection_id, comment="proof of payment agreement")
return ob.send_proof_proposal(proposal)
#User -> Vendor
def propose_proof_of_payment(connection_id, cred_def_id=None):
proposal = build_proof_proposal(
"proof_of_payment"
).withAttribute(
"transaction_no",
cred_def_id,
).withAttribute(
"timestamp",
cred_def_id
).build(connection_id, comment="wanna prove payhment")
return ob.send_proof_proposal(proposal)
def refuse_payment_agreement(conn_id, creddef_id):
#todo: return a problem report if vendor cant/wont sell
return None
def request_proof_of_payment_agreement(creddef_id = None):
if not creddef_id:
return {"error": "no creddef id"}
builder = build_proof_request(name="proof of payment agreement", version="1.0")
req = builder.withAttribute(
"payment_endpoint",
restrictions=[{"cred_def_id": creddef_id}]
).withAttribute(
"timestamp",
restrictions=[{"cred_def_id": creddef_id}]
).withAttribute(
"amount",
restrictions=[{"cred_def_id": creddef_id}]
).with_conn_id(config.agent_data.current_connection).build()
return ob.send_proof_request(req)
#### Stage 2: Payment;
#Bank -> User
def send_payment_cred_offer(conn_id, creddef_id):
transaction_no = gen_transaction_id()
config.agent_data.transaction_no = transaction_no
logging.debug("Issue credential to user")
builder = build_cred(creddef_id)
builder.with_attribute({"transaction_no": transaction_no}) \
.with_attribute({"timestamp": str(int(time.time()))}) \
.with_type("did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/1.0/credential-preview") \
.with_conn_id(conn_id)
offer_req = builder.build_offer("payment credential issuance")
config.agent_data.previews[creddef_id] = builder.build_preview()
return ob.send_cred_offer(offer_req)
#stage 3: proving payment
#Vendor -> User
def request_proof_of_payment(creddef_id = None, presex_id=None):
if not creddef_id:
if not config.agent_data.payment_creddef:
return {"error": "no creddef id"}
else:
creddef_id = config.agent_data.payment_creddef
builder = build_proof_request(name="proof of payment", version="1.0")
req = builder.withAttribute(
"transaction_no",
restrictions=[{"cred_def_id": creddef_id}]
).withAttribute(
"timestamp",
restrictions=[{"cred_def_id": creddef_id}]
).with_conn_id(config.agent_data.current_connection).build()
return ob.send_proof_request(req, presex_id)
##### PROOF PACKAGE AT SHIPPING SERVICE ######
#Vendor -> User
def propose_proof_of_dispatch(connection_id, cred_def_id):
proposal = build_proof_proposal(
"proof_of_dispatch"
).withAttribute(
"package_no",
cred_def_id,
).withAttribute(
"timestamp",
cred_def_id
).build(connection_id, comment="Package is at shipping service")
return ob.send_proof_proposal(proposal)
#User -> Vendor
def request_proof_of_dispatch(creddef_id = None, presex_id=None):
if not creddef_id:
if not config.payment_creddef:
return {"error": "no creddef id"}
else:
creddef_id = config.agent_data.payment_creddef
builder = build_proof_request(name="proof of dispatch", version="1.0")
req = builder.withAttribute(
"timestamp",
restrictions=[{"cred_def_id": creddef_id}]
).withAttribute(
"package_no",
restrictions=[{"cred_def_id": creddef_id}]
).with_conn_id(config.agent_data.current_connection).build()
return ob.send_proof_request(req, presex_id)
##############################################
####END Stage 2
####START Stage 3: Package ownership
#Vendor -> user
def send_package_cred_offer(conn_id, creddef_id):
logging.debug("Issue credential to user")
package_no = gen_package_no()
config.agent_data.update_package_no(package_no)
builder = build_cred(creddef_id)
builder.with_attribute({"package_no": package_no}) \
.with_attribute({"timestamp": str(int(time.time()))}) \
.with_attribute({"status": "dispatched_to_shipping_service"}) \
.with_type("did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/1.0/credential-preview") \
.with_conn_id(conn_id)
offer_req = builder.build_offer("package credential issuance")
config.agent_data.previews[creddef_id] = builder.build_preview()
return ob.send_cred_offer(offer_req)
#User -> Shipper
# todo self attest address
def propose_proof_of_ownership(conn_id, creddef_id):
builder = build_proof_proposal("proof of package ownership")
req = builder.withAttribute(
"package_no",
cred_def_id=creddef_id
).withAttribute(
"timestamp",
cred_def_id=creddef_id
).withAttribute(
"shipping_address",
).build(conn_id, comment="proof of package ownership")
return ob.send_proof_proposal(req)
#Shipper -> User
def request_proof_of_ownership(creddef_id):
builder = build_proof_request(name="proof of package ownership", version="1.0")
req = builder.withAttribute(
"package_no",
restrictions=[{"cred_def_id": creddef_id}]
).withAttribute(
"timestamp",
restrictions=[{"cred_def_id": creddef_id}]
).withAttribute(
"shipping_address",
).with_conn_id(config.agent_data.current_connection).build()
return ob.send_proof_request(req)
####END Stage 3
####START Stage 4: receipt of package
#Shipper -> Vendor
def send_package_receipt_cred_offer(conn_id, creddef_id, package_no):
logging.debug("Issue receipt credential to vendor")
builder = build_cred(creddef_id)
builder.with_attribute({"package_no": package_no}) \
.with_attribute({"timestamp": str(int(time.time()))}) \
.with_type("did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/1.0/credential-preview") \
.with_conn_id(conn_id)
offer_req = builder.build_offer("package-receipt credential issuance")
config.agent_data.previews[creddef_id] = builder.build_preview()
return ob.send_cred_offer(offer_req)
#User -> Vendor
def request_proof_of_receipt():
builder = build_proof_request(name="proof of shipped package", version="1.0")
req = builder.withAttribute(
"package_no",
restrictions=[{"issuer_did":config.agent_data.shipper_did}]
).withAttribute(
"timestamp",
restrictions=[{"issuer_did":config.agent_data.shipper_did}]
).withAttribute(
"status",
restrictions=[{"issuer_did": config.agent_data.shipper_did}]
).with_conn_id(config.agent_data.current_connection).build()
return ob.send_proof_request(req)
#Vendor -> User
def propose_proof_of_package_status(connection_id, cred_def_id=None):
proposal = build_proof_proposal(
"proof_of_package_status"
).withAttribute(
"package_no",
cred_def_id=cred_def_id,
).withAttribute(
"timestamp",
cred_def_id=cred_def_id,
).build(connection_id, comment="Package is at shipping service")
return ob.send_proof_proposal(proposal)
##helper
def register_schema(name, version, attrs, revocation=False):
schema = build_schema(name, version, attrs)
resp = ob.register_schema(schema)
id = resp["schema_id"]
creddef = {"schema_id": id, "support_revocation": revocation}
resp = ob.register_creddef(creddef)
creddef_id = resp["credential_definition_id"]
config.agent_data.creddef_id = creddef_id
return id, creddef_id
## need a way of keeping track who is for what
def get_agreement_creddefid():
credentials = ob.get_credentials()
res = credentials["results"]
print("results of payment credf: ", res)
payment_creds = [x for x in res if "payment_agreement" in x["schema_id"]]
print("payment creds", res)
if payment_creds:
return payment_creds[0]["cred_def_id"]
else:
return None
def get_creddefid(schema_name):
credentials = ob.get_credentials()
res = credentials["results"]
print("results of payment credf: ", res)
payment_creds = [x for x in res if schema_name in x["schema_id"]]
print("payment creds", res)
if payment_creds:
return payment_creds[0]["cred_def_id"]
def get_payment_creddefid():
credentials = ob.get_credentials()
res = credentials["results"]
payment_creds = [x for x in res if "payment_credential" in x["schema_id"]]
if payment_creds:
return payment_creds[0]["cred_def_id"]
def get_package_creddefid():
credentials = ob.get_credentials()
res = credentials["results"]
package_creds = [x for x in res if "package_cred" in x["schema_id"]]
if package_creds:
return package_creds[0]["cred_def_id"]
def register_payment_agreement_schema(url):
schema_name = "payment_agreement"
schema = {
"schema_name": schema_name,
"schema_version": "1.0",
"attributes": ["amount", "timestamp", "payment_endpoint", "product_id"]
}
response = ob.post(url + "/schemas", data=schema)
id = response["schema_id"]
creddef = {"schema_id": id, "support_revocation": False}
resp = ob.register_creddef(creddef)
if resp:
config.agent_data.creddef_id = resp["credential_definition_id"]
config.agent_data.creddefs[schema_name] = resp["credential_definition_id"]
logging.debug(f"Registered schema with id: %s, and creddef_id: %s", id, resp["credential_definition_id"])
return id, resp["credential_definition_id"]
#schema reg
def register_payment_schema(url):
schema = {
"schema_name": "payment_credential",
"schema_version": "1.0",
"attributes": ["transaction_no", "timestamp"]
}
response = ob.post(url + "/schemas", data=schema)
id = response["schema_id"]
creddef = {"schema_id": id, "support_revocation": False}
resp = ob.register_creddef(creddef)
if resp:
print(resp)
config.agent_data.creddef_id = resp["credential_definition_id"]
config.agent_data.payment_creddef = resp["credential_definition_id"]
logging.debug(f"Registered schema with id: %s, and creddef_id: %s", id, resp["credential_definition_id"])
return id, resp["credential_definition_id"]
def register_package_schema(url):
schema_name = "package_cred"
schema = {
"schema_name": schema_name,
"schema_version": "1.0",
"attributes": ["package_no", "timestamp", "status"]
}
response = ob.post(url + "/schemas", data=schema)
id = response["schema_id"]
creddef = {"schema_id":id, "support_revocation": False}
resp = ob.register_creddef(creddef)
if resp:
config.agent_data.creddef_id = resp["credential_definition_id"]
config.agent_data.creddefs[schema_name] = resp["credential_definition_id"]
logging.debug(f"Registered schema with id: %s, and creddef_id: %s", id, resp["credential_definition_id"])
return id, resp["credential_definition_id"]
def register_receipt_schema(url):
schema_name = "received_package"
schema = {
"schema_name": schema_name,
"schema_version": "1.0",
"attributes": ["package_no", "timestamp"]
}
response = ob.post(url + "/schemas", data=schema)
id = response["schema_id"]
creddef = {"schema_id": id, "support_revocation": False}
resp = ob.register_creddef(creddef)
if resp:
config.agent_data.creddef_id = resp["credential_definition_id"]
config.agent_data.creddefs[schema_name] = resp["credential_definition_id"]
logging.debug(f"Registered schema with id: %s, and creddef_id: %s", id, resp["credential_definition_id"])
return id, resp["credential_definition_id"]
def get_schema_name(creddef):
resp = ob.get_creddef(creddef)
if not resp:
return False
schema_id = resp["credential_definition"]["schemaId"]
resp = ob.get_schema(schema_id)
if not resp:
return False
return resp["schema"]["name"]
#####VALIDATORS#####
def is_credential_stored(name):
credentials = ob.get_credentials()
res = credentials["results"]
matching_creds = [x for x in res if name in x["schema_id"]]
if not matching_creds:
return False
return True
def is_proof_validated(schema_name, proof_name=None, ex_id=None):
proof_records = ob.get_pres_ex_records()
results = proof_records["results"]
if results:
for result in results:
if "verified" in result:
if result["verified"] == "true":
attrs = result["presentation_request"]["requested_attributes"]
for attr in attrs:
for attrname in attrs[attr]:
if attrname == "restrictions":
restrictions = attrs[attr][attrname]
for restriction in restrictions:
if "cred_def_id" in restriction:
name = get_schema_name(restriction["cred_def_id"])
if name == schema_name:
return True
return False
def get_proof_validated(schema_name, proof_name=None, ex_id=None):
proof_records = ob.get_pres_ex_records()
results = proof_records["results"]
if results:
for result in results:
if "verified" in result:
if result["verified"] == "true":
attrs = result["presentation_request"]["requested_attributes"]
for attr in attrs:
for attrname in attrs[attr]:
if attrname == "restrictions":
restrictions = attrs[attr][attrname]
for restriction in restrictions:
if "cred_def_id" in restriction:
name = get_schema_name(restriction["cred_def_id"])
if name == schema_name:
return True
return False
# Helper function
# returns True if a schema of @schema_name is stored
def have_receieved_proof_proposal(schema_name=None):
proof_records = ob.get_pres_ex_records()
results = proof_records["results"]
if results:
for result in results:
state = result["state"]
if state == "proposal_received":
proposal = result["presentation_proposal_dict"]["presentation_proposal"]
attrs = proposal["attributes"]
for attr in attrs:
if "cred_def_id" in attr:
if get_schema_name(attr["cred_def_id"]) == schema_name:
return True
return False
# Helper funciton.
# Returns value string if a credential attribute of name = @name
# is present in the given aries issue-credential message
def get_cred_attr_value(name, offer):
attributes = offer["credential_proposal_dict"]["credential_proposal"]["attributes"]
for attr in attributes:
if attr["name"] == name:
return attr["value"]
return False
def get_cred_attrs(offer):
return offer["credential_proposal_dict"]["credential_proposal"]["attributes"]
def gen_package_no(n=7):
range_start = 10**(n-1)
range_end = (10**n)-1
return str(random.randint(range_start, range_end))
def gen_product_id(n=4):
range_start = 10**(n-1)
range_end = (10**n)-1
return str(random.randint(range_start, range_end))
def gen_transaction_id(n=5):
range_start = 10**(n-1)
range_end = (10**n)-1
return "t_id_" + str(random.randint(range_start, range_end))
def parse_payment_endpoint(data):
get_cred_attr_value("", data)
| 35.51751 | 124 | 0.663453 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,021 | 0.275033 |
1db911440c02b6ae5ac0e07ff149aaf4416a4ce3 | 2,843 | py | Python | pymath/renders.py | Lafrite/pyMath | 9888e12661d07754568493ab47b5d896349d3784 | [
"Apache-2.0"
] | null | null | null | pymath/renders.py | Lafrite/pyMath | 9888e12661d07754568493ab47b5d896349d3784 | [
"Apache-2.0"
] | null | null | null | pymath/renders.py | Lafrite/pyMath | 9888e12661d07754568493ab47b5d896349d3784 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
from .render import Render
from .fraction import Fraction
from .generic import first_elem, last_elem
__all__ = ['post2in_fix', 'tex_render', 'txt_render']
# ------------------------
# A infix to postfix list convertor
p2i_infix = {"+": "+", "-": "-", "*": "*", "/" : "/", ":": ":", "^":"^"}
p2i_postfix = {}
p2i_other = {"(": "(", ")": ")"}
post2in_fix = Render(p2i_infix, p2i_postfix, p2i_other, join = False)
# ------------------------
# A console render
def txtMult(op1,op2):
""" Tex render for *
Cases where \\times won't be displayed
* nbr letter
* nbr (
* )(
"""
first_nbr = type(op1) in [int, Fraction]
seg_letter = type(op2) == str and op2.isalpha()
first_par = (first_elem(op2) == "(")
seg_par = (last_elem(op1) == ")")
if (first_nbr and (seg_letter or seg_par)) \
or (first_par and seg_par):
return [op1, op2]
else:
return [op1, "*", op2]
txt_infix = {"+": "+", "-": "-", "*": txtMult, "/" : "/", ":":":", "^":"^"}
txt_postfix = {}
txt_other = {"(": "(", ")": ")"}
txt_render = Render(txt_infix, txt_postfix, txt_other)
# ------------------------
# A latex render
def texSlash(op1, op2):
""" Tex render for / """
if not Render.isNumerande(op1) and op1[0] == "(" and op1[-1] == ")":
op1 = op1[1:-1]
if not Render.isNumerande(op2) and op2[0] == "(" and op2[-1] == ")":
op2 = op2[1:-1]
return ["\\frac{" , op1 , "}{" , op2 , "}"]
def texFrac(frac):
""" Tex render for Fractions"""
return ["\\frac{" , str(frac._num) , "}{" , str(frac._denom) , "}"]
def texMult(op1,op2):
""" Tex render for *
Cases where \\times won't be displayed
* nbr letter
* nbr (
* )(
"""
first_nbr = type(op1) in [int, Fraction]
seg_letter = type(op2) == str and op2.isalpha()
first_par = (first_elem(op2) == "(")
seg_par = (last_elem(op1) == ")")
if (first_nbr and (seg_letter or seg_par)) \
or (first_par and seg_par):
return [op1, op2]
else:
return [op1, "\\times", op2]
tex_infix = {"+": " + ", "-": " - ", "*": texMult , ":": ":", "^":"^"}
tex_postfix = {"/": texSlash}
tex_other = {"(": "(", ")": ")"}
tex_type_render = {str:str, int: str, Fraction: texFrac}
tex_render = Render(tex_infix, tex_postfix, tex_other, type_render = tex_type_render)
if __name__ == '__main__':
#exp = [2, 5, '^', 1, '-', 3, 4, '*', ':']
#print(txt_render(exp))
#exp = [2, 5, '^', 1, '-', 3, 4, '*', '/', 3, 5, '/', ':']
exp = [2, -3, "*"]
print(tex_render(exp))
#exp = [2, 5, '^', 1, '-', 3, 4, '*', '/', 3, '+']
#print(post2in_fix(exp))
# -----------------------------
# Reglages pour 'vim'
# vim:set autoindent expandtab tabstop=4 shiftwidth=4:
# cursor: 16 del
| 27.336538 | 85 | 0.500528 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,065 | 0.374604 |
1db923da6962bff441110d6f89fea97b0bd0f082 | 1,813 | py | Python | examples/wide_and_deep_example.py | meta-soul/MetaSpore | e6fbc12c6a3139df76c87215b16f9dba65962ec7 | [
"Apache-2.0"
] | 32 | 2022-03-30T10:24:00.000Z | 2022-03-31T16:19:15.000Z | examples/wide_and_deep_example.py | meta-soul/MetaSpore | e6fbc12c6a3139df76c87215b16f9dba65962ec7 | [
"Apache-2.0"
] | null | null | null | examples/wide_and_deep_example.py | meta-soul/MetaSpore | e6fbc12c6a3139df76c87215b16f9dba65962ec7 | [
"Apache-2.0"
] | 3 | 2022-03-30T10:28:57.000Z | 2022-03-30T11:37:39.000Z | #
# To run locally, execute:
#
# spark-submit --master local[2] wide_and_deep_example.py
#
S3_ROOT_DIR = 's3://{YOUR_S3_BUCKET}/{YOUR_S3_PATH}/'
batch_size = 100
worker_count = 1
server_count = 1
import metaspore as ms
spark = ms.spark.get_session(batch_size=batch_size,
worker_count=worker_count,
server_count=server_count,
)
sc = spark.sparkContext
with spark:
module = ms.nn.WideAndDeepModule(
wide_column_name_path=S3_ROOT_DIR + 'demo/schema/column_name_demo.txt',
wide_combine_schema_path=S3_ROOT_DIR + 'demo/schema/combine_schema_demo.txt',
deep_sparse_column_name_path=S3_ROOT_DIR + 'demo/schema/column_name_demo.txt',
deep_sparse_combine_schema_path=S3_ROOT_DIR + 'demo/schema/combine_schema_demo.txt',
)
model_out_path = S3_ROOT_DIR + 'demo/output/dev/model_out/'
estimator = ms.PyTorchEstimator(module=module,
worker_count=worker_count,
server_count=server_count,
model_out_path=model_out_path,
input_label_column_index=0)
train_dataset_path = S3_ROOT_DIR + 'demo/data/train/day_0_0.001_train.csv'
train_dataset = ms.input.read_s3_csv(spark, train_dataset_path, delimiter='\t')
model = estimator.fit(train_dataset)
test_dataset_path = S3_ROOT_DIR + 'demo/data/test/day_0_0.001_test.csv'
test_dataset = ms.input.read_s3_csv(spark, test_dataset_path, delimiter='\t')
result = model.transform(test_dataset)
result.show(5)
import pyspark
evaluator = pyspark.ml.evaluation.BinaryClassificationEvaluator()
test_auc = evaluator.evaluate(result)
print('test_auc: %g' % test_auc)
| 37.770833 | 92 | 0.665196 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 395 | 0.217871 |
1db9945d0a006ee0eef48ac4ca55f27881406e8a | 1,213 | py | Python | O3/_10_bias_variance_tradeoff/bias_variance_tradeoff.py | ShAlireza/ML-Tries | 4516be7a3275c9bdedd7bd258800be384b6b34f0 | [
"MIT"
] | null | null | null | O3/_10_bias_variance_tradeoff/bias_variance_tradeoff.py | ShAlireza/ML-Tries | 4516be7a3275c9bdedd7bd258800be384b6b34f0 | [
"MIT"
] | null | null | null | O3/_10_bias_variance_tradeoff/bias_variance_tradeoff.py | ShAlireza/ML-Tries | 4516be7a3275c9bdedd7bd258800be384b6b34f0 | [
"MIT"
] | null | null | null | """The bias-variance tradeoff
Often, researchers use the terms "bias" and "variance" or "bias-
variance tradeoff" to describe the performance of a model—that
is, you may stumble upon talks, books, or articles where people
say that a model has a "high variance" or "high bias." So, what
does that mean? In general, we might say that "high variance"
is proportional to overfitting and "high bias" is proportional to
underfitting.
In the context of machine learning models, variance measures
the consistency (or variability) of the model prediction for
classifying a particular example if we retrain the model multiple
times, for example, on different subsets of the training dataset.
We can say that the model is sensitive to the randomness
in the training data. In contrast, bias measures how far off
the predictions are from the correct values in general if we
rebuild the model multiple times on different training datasets;
bias is the measure of the systematic error that is not due
to randomness.
Accurate definitions can be found in below link:
https://sebastianraschka.com/pdf/lecture-notes/stat479fs18/08_eval-intro_notes.pdf
"""
| 46.653846 | 82 | 0.751031 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,214 | 0.999177 |
1db9aba6d555320304b7768349db8870cdc9782b | 496 | py | Python | pytronlinks/__init__.py | wahyubram82/PytronAI | 52953461727cda6f60fedf802b748352095ba6ec | [
"BSD-3-Clause"
] | 2 | 2016-12-04T03:03:40.000Z | 2018-05-21T15:28:45.000Z | pytronlinks/__init__.py | wahyubram82/PytronAI | 52953461727cda6f60fedf802b748352095ba6ec | [
"BSD-3-Clause"
] | 2 | 2019-02-24T17:04:03.000Z | 2021-04-12T13:00:08.000Z | pytronlinks/__init__.py | wahyubram82/PytronAI | 52953461727cda6f60fedf802b748352095ba6ec | [
"BSD-3-Clause"
] | 6 | 2016-11-20T15:08:29.000Z | 2020-07-08T12:36:30.000Z | # -*- coding: UTF-8 -*-
"""
Pytron - Links Mark II Interface
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Links for Python
:copyright: (c) 2016 by Scott Doucet / aka: traBpUkciP.
:license: BSD, see LICENSE for more details.
"""
from .client import *
__copyright__ = 'Copyright 2016 by traBpUkciP'
__version__ = '0.3.9'
__license__ = 'BSD'
__author__ = 'traBpUkciP'
__email__ = 'duroktar@gmail.com'
__source__ = 'https://github.com/Duroktar/PytronAI/'
__all__ = [
'Client',
]
| 18.37037 | 59 | 0.612903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 358 | 0.721774 |
1db9f8268801e37ed09678dd7bdfdf9e618fd8da | 2,324 | py | Python | tests/test_nestedprops.py | atviriduomenys/spinta | 77a10e201f8cdc63143fce7996fd0898acb1ff58 | [
"MIT"
] | 2 | 2019-03-14T06:41:14.000Z | 2019-03-26T11:48:14.000Z | tests/test_nestedprops.py | sirex/spinta | 77a10e201f8cdc63143fce7996fd0898acb1ff58 | [
"MIT"
] | 44 | 2019-04-05T15:52:45.000Z | 2022-03-30T07:41:33.000Z | tests/test_nestedprops.py | sirex/spinta | 77a10e201f8cdc63143fce7996fd0898acb1ff58 | [
"MIT"
] | 1 | 2019-04-01T09:54:27.000Z | 2019-04-01T09:54:27.000Z | import pytest
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_update_object(model, app):
app.authmodel(model, ['insert', 'patch', 'getone'])
resp = app.post(f'/{model}', json={
'status': 'ok',
'sync': {
'sync_revision': '1',
'sync_resources': [
{
'sync_id': '2',
'sync_source': 'report'
}
]
}
})
assert resp.status_code == 201, resp.json()
id_ = resp.json()['_id']
rev = resp.json()['_revision']
resp = app.patch(f'/{model}/{id_}', json={
'_revision': rev,
'sync': {
'sync_revision': '3'
}
})
assert resp.status_code == 200, resp.json()
rev = resp.json()['_revision']
resp = app.get(f'/{model}/{id_}')
assert resp.status_code == 200, resp.json()
assert resp.json()['sync'] == {
'sync_revision': '3',
'sync_resources': [
{
'sync_id': '2',
'sync_source': 'report'
}
]
}
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_update_object_array(model, app):
app.authmodel(model, ['insert', 'patch', 'getone'])
resp = app.post(f'/{model}', json={
'status': 'ok',
'sync': {
'sync_revision': '1',
'sync_resources': [
{
'sync_id': '2',
'sync_source': 'report'
}
]
}
})
assert resp.status_code == 201, resp.json()
id_ = resp.json()['_id']
rev = resp.json()['_revision']
resp = app.patch(f'/{model}/{id_}', json={
'_revision': rev,
'sync': {
'sync_resources': [{
'sync_id': '3',
'sync_source': 'troper'
}],
}
})
assert resp.status_code == 200, resp.json()
rev = resp.json()['_revision']
resp = app.get(f'/{model}/{id_}')
assert resp.status_code == 200, resp.json()
assert resp.json()['sync'] == {
'sync_revision': '1',
'sync_resources': [
{
'sync_id': '3',
'sync_source': 'troper'
}
]
}
| 24.463158 | 55 | 0.449656 | 0 | 0 | 0 | 0 | 2,304 | 0.991394 | 0 | 0 | 705 | 0.303356 |
1dba0895c59eec6cd5b80fa99e70f9f9b7bf6507 | 1,979 | py | Python | dockerpty/__init__.py | tedivm/dockerpty | f8d17d893c6758b7cc25825e99f6b02202632a97 | [
"Apache-2.0"
] | 129 | 2015-01-19T15:02:47.000Z | 2022-03-28T07:46:46.000Z | dockerpty/__init__.py | tedivm/dockerpty | f8d17d893c6758b7cc25825e99f6b02202632a97 | [
"Apache-2.0"
] | 52 | 2015-01-01T11:22:23.000Z | 2021-03-29T14:11:10.000Z | dockerpty/__init__.py | tedivm/dockerpty | f8d17d893c6758b7cc25825e99f6b02202632a97 | [
"Apache-2.0"
] | 71 | 2015-02-07T16:25:27.000Z | 2022-02-06T02:34:18.000Z | # dockerpty.
#
# Copyright 2014 Chris Corbyn <chris@w3style.co.uk>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dockerpty.pty import PseudoTerminal, RunOperation, ExecOperation, exec_create
def start(client, container, interactive=True, stdout=None, stderr=None, stdin=None, logs=None):
"""
Present the PTY of the container inside the current process.
This is just a wrapper for PseudoTerminal(client, container).start()
"""
operation = RunOperation(client, container, interactive=interactive, stdout=stdout,
stderr=stderr, stdin=stdin, logs=logs)
PseudoTerminal(client, operation).start()
def exec_command(
client, container, command, interactive=True, stdout=None, stderr=None, stdin=None):
"""
Run provided command via exec API in provided container.
This is just a wrapper for PseudoTerminal(client, container).exec_command()
"""
exec_id = exec_create(client, container, command, interactive=interactive)
operation = ExecOperation(client, exec_id,
interactive=interactive, stdout=stdout, stderr=stderr, stdin=stdin)
PseudoTerminal(client, operation).start()
def start_exec(client, exec_id, interactive=True, stdout=None, stderr=None, stdin=None):
operation = ExecOperation(client, exec_id,
interactive=interactive, stdout=stdout, stderr=stderr, stdin=stdin)
PseudoTerminal(client, operation).start()
| 38.803922 | 97 | 0.71905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 901 | 0.45528 |
1dbcfb725076b81e8d7c56d6791f6316aa039467 | 3,639 | py | Python | django_documents_tools/utils.py | pik-software/django-documents-tools | 57b3083175d2562af635f0cdde74a681248a444e | [
"BSD-3-Clause"
] | 1 | 2019-12-07T15:51:08.000Z | 2019-12-07T15:51:08.000Z | django_documents_tools/utils.py | pik-software/django-documents-tools | 57b3083175d2562af635f0cdde74a681248a444e | [
"BSD-3-Clause"
] | 9 | 2019-12-06T10:53:08.000Z | 2020-07-06T06:47:45.000Z | django_documents_tools/utils.py | pik-software/django-documents-tools | 57b3083175d2562af635f0cdde74a681248a444e | [
"BSD-3-Clause"
] | null | null | null | import os
from collections import Counter
from django.core.exceptions import ValidationError
from django.utils import timezone
from django.utils.deconstruct import deconstructible
from django_documents_tools.exceptions import (
BusinessEntityCreationIsNotAllowedError)
from django_documents_tools.manager import setattrs
from django_documents_tools.settings import tools_settings
def get_change_attachment_file_path(instance, file_name):
app_label = instance._meta.app_label # noqa: protected-access
model_name = instance._meta.model_name # noqa: protected-access
return os.path.join(app_label, model_name, file_name)
def check_subclass(base, original):
if not issubclass(base, original):
raise Exception(
f'{base.__name__} must be subclass of {original.__name__}')
def validate_change_attrs(model, change, attrs):
documented_model_field = model._documented_model_field # noqa: protected-access
documented_model = model._meta.get_field( # noqa: protected-access
documented_model_field).remote_field.model
if change and getattr(change, documented_model_field):
documented_instance = getattr(change, documented_model_field)
kwargs = change.get_changes()
document_fields = attrs.get('document_fields')
if change.snapshot and document_fields:
changes = {}
for field_name in document_fields:
if field_name in attrs.keys():
changes[field_name] = attrs[field_name]
kwargs = {**change.snapshot.state, **changes}
setattrs(documented_instance, **kwargs)
documented_instance.full_clean()
documented_instance.refresh_from_db()
else:
change = model(**attrs)
documented_instance = getattr(change, documented_model_field)
kwargs = change.get_changes()
if documented_instance:
setattrs(documented_instance, **kwargs)
documented_instance.full_clean()
documented_instance.refresh_from_db()
elif tools_settings.CREATE_BUSINESS_ENTITY_AFTER_CHANGE_CREATED:
new_documented = documented_model(**kwargs)
new_documented.full_clean()
@deconstructible
class LimitedChoicesValidator:
def __init__(self, allowed_fields):
self.allowed_fields = allowed_fields
def __call__(self, value):
for field in value:
if field not in self.allowed_fields:
raise ValidationError(f'Unknown field `{field}`.')
for field, count in Counter(value).most_common():
if count > 1:
raise ValidationError(f'Found duplicate field `{field}`.')
return True
def apply_change_receiver(sender, **kwargs):
change = kwargs['instance']
if not change.document_is_draft:
new_documented = getattr(change, change._documented_model_field) # noqa: pylint==protected-access
creation = tools_settings.CREATE_BUSINESS_ENTITY_AFTER_CHANGE_CREATED
if new_documented is None and creation:
new_documented = change.apply_new()
setattr(change, change._documented_model_field, new_documented) # noqa: pylint==protected-access
change.save(update_fields=[change._documented_model_field]) # noqa: pylint==protected-access
elif new_documented is None and not creation:
raise BusinessEntityCreationIsNotAllowedError()
applicable_date = timezone.now().date()
new_documented.changes.apply_to_object(date=applicable_date)
new_documented.save(apply_documents=False)
change.refresh_from_db()
| 38.712766 | 109 | 0.71091 | 473 | 0.129981 | 0 | 0 | 490 | 0.134652 | 0 | 0 | 339 | 0.093157 |
1dbddad7e6aa4ab9f3b4b98398e41967db617fa0 | 2,989 | py | Python | database/models.py | zdresearch/Nettacker | 6d1653df01fee06ac6906a3cb1beebed39c166ef | [
"Apache-2.0"
] | 884 | 2020-09-26T01:12:09.000Z | 2022-03-31T07:39:23.000Z | database/models.py | GeauxWeisbeck4/Nettacker | 0d6a907f4528b42ff6460c8a1e58f73ae768b38e | [
"Apache-2.0"
] | 185 | 2018-04-18T12:16:27.000Z | 2020-09-25T10:37:23.000Z | database/models.py | GeauxWeisbeck4/Nettacker | 0d6a907f4528b42ff6460c8a1e58f73ae768b38e | [
"Apache-2.0"
] | 236 | 2020-09-26T22:19:29.000Z | 2022-03-30T08:21:28.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import (Column,
Integer,
Text,
DateTime)
Base = declarative_base()
class Report(Base):
"""
This class defines the table schema of the reports table. Any changes to the reports table need to be done here.
"""
__tablename__ = 'reports'
id = Column(Integer, primary_key=True, autoincrement=True)
date = Column(DateTime)
scan_unique_id = Column(Text)
report_path_filename = Column(Text)
options = Column(Text)
def __repr__(self):
"""
returns a printable representation of the object of the class Report
"""
return "<Report(id={0}, scan_unique_id={1}, date={2}, report_path_filename={3})>".format(
self.id,
self.scan_unique_id,
self.date,
self.report_path_filename
)
class TempEvents(Base):
"""
This class defines the table schema of the reports table. Any changes to the reports table need to be done here.
"""
__tablename__ = 'temp_events'
id = Column(Integer, primary_key=True, autoincrement=True)
date = Column(DateTime)
target = Column(Text)
module_name = Column(Text)
scan_unique_id = Column(Text)
event_name = Column(Text)
port = Column(Text)
event = Column(Text)
data = Column(Text)
def __repr__(self):
"""
returns a printable representation of the object of the class Report
"""
return '''
<scan_events(id={0}, target={1}, date={2}, module_name={3}, scan_unqiue_id={4},
port={5}, event={6}, data={7})>
'''.format(
self.id,
self.target,
self.date,
self.module_name,
self.scan_unique_id,
self.port,
self.event,
self.data
)
class HostsLog(Base):
"""
This class defines the table schema of the hosts_log table. Any changes to the reports hosts_log need to be done here.
"""
__tablename__ = 'scan_events'
id = Column(Integer, primary_key=True, autoincrement=True)
date = Column(DateTime)
target = Column(Text)
module_name = Column(Text)
scan_unique_id = Column(Text)
port = Column(Text)
event = Column(Text)
json_event = Column(Text)
def __repr__(self):
"""
returns a printable representation of the object of the class HostsLog
"""
return '''
<scan_events(id={0}, target={1}, date={2}, module_name={3}, scan_unqiue_id={4},
port={5}, event={6}, json_event={7})>
'''.format(
self.id,
self.target,
self.date,
self.module_name,
self.scan_unique_id,
self.port,
self.event,
self.json_event
)
| 28.740385 | 122 | 0.573101 | 2,720 | 0.910003 | 0 | 0 | 0 | 0 | 0 | 0 | 1,156 | 0.386751 |
1dbf1d60e9c6955adb19358c161e8059577649ad | 12,712 | py | Python | umigame/plotting.py | penguinwang96825/Umigame | 98d647ab6f40df08fe31d6b3bc444afe229a914e | [
"Apache-2.0"
] | null | null | null | umigame/plotting.py | penguinwang96825/Umigame | 98d647ab6f40df08fe31d6b3bc444afe229a914e | [
"Apache-2.0"
] | null | null | null | umigame/plotting.py | penguinwang96825/Umigame | 98d647ab6f40df08fe31d6b3bc444afe229a914e | [
"Apache-2.0"
] | 1 | 2021-11-01T14:35:32.000Z | 2021-11-01T14:35:32.000Z | import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import empyrical as ep
from matplotlib.ticker import FuncFormatter
def plot_returns(returns,
live_start_date=None,
ax=None):
"""
Plots raw returns over time.
Backtest returns are in green, and out-of-sample (live trading)
returns are in red.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
live_start_date : datetime, optional
The date when the strategy began live trading, after
its backtest period. This date should be normalized.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
ax.set_label('')
ax.set_ylabel('Returns')
if live_start_date is not None:
live_start_date = ep.utils.get_utc_timestamp(live_start_date)
is_returns = returns.loc[returns.index < live_start_date]
oos_returns = returns.loc[returns.index >= live_start_date]
is_returns.plot(ax=ax, color='g')
oos_returns.plot(ax=ax, color='r')
else:
returns.plot(ax=ax, color='g')
return ax
def plot_monthly_returns_dist(returns, ax=None, **kwargs):
"""
Plots a distribution of monthly returns.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
def percentage(x, pos):
"""
Adds percentage sign to plot ticks.
"""
return '%.0f%%' % x
x_axis_formatter = FuncFormatter(percentage)
ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter))
ax.tick_params(axis='x', which='major')
monthly_ret_table = ep.aggregate_returns(returns, 'monthly')
ax.hist(
100 * monthly_ret_table,
color='orangered',
alpha=0.80,
bins=20,
**kwargs)
ax.axvline(
100 * monthly_ret_table.mean(),
color='gold',
linestyle='--',
lw=4,
alpha=1.0)
ax.axvline(0.0, color='black', linestyle='-', lw=3, alpha=0.75)
ax.legend(['Mean'], frameon=True, framealpha=0.5, loc="upper right")
ax.set_ylabel('Number of months')
ax.set_xlabel('Returns')
ax.set_title("Distribution of monthly returns")
return ax
def plot_annual_returns(returns, ax=None, **kwargs):
"""
Plots a bar graph of returns by year.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
def percentage(x, pos):
"""
Adds percentage sign to plot ticks.
"""
return '%.0f%%' % x
x_axis_formatter = FuncFormatter(percentage)
ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter))
ax.tick_params(axis='x', which='major')
ann_ret_df = pd.DataFrame(
ep.aggregate_returns(
returns,
'yearly'))
ax.axvline(
100 *
ann_ret_df.values.mean(),
color='steelblue',
linestyle='--',
lw=4,
alpha=0.7)
(100 * ann_ret_df.sort_index(ascending=False)
).plot(ax=ax, kind='barh', alpha=0.70, **kwargs)
ax.axvline(0.0, color='black', linestyle='-', lw=3)
ax.set_ylabel('Year')
ax.set_xlabel('Returns')
ax.set_title("Annual returns")
ax.legend(['Mean'], frameon=True, framealpha=0.5, loc="upper right")
return ax
def plot_monthly_returns_heatmap(returns, ax=None, **kwargs):
"""
Plots a heatmap of returns by month.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
monthly_ret_table = ep.aggregate_returns(returns, 'monthly')
monthly_ret_table = monthly_ret_table.unstack().round(3)
sns.heatmap(
monthly_ret_table.fillna(0) *
100.0,
annot=True,
annot_kws={"size": 9},
alpha=1.0,
center=0.0,
cbar=False,
cmap=matplotlib.cm.RdYlGn,
ax=ax, **kwargs)
ax.set_ylabel('Year')
ax.set_xlabel('Month')
ax.set_title("Monthly returns (%)")
return ax
def plot_drawdown_periods(returns, top=10, ax=None, **kwargs):
"""
Plots cumulative returns highlighting top drawdown periods.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
Amount of top drawdowns periods to plot (default 10).
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
def two_dec_places(x, pos):
"""
Adds 1/100th decimal to plot ticks.
"""
return '%.2f' % x
y_axis_formatter = FuncFormatter(two_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
df_cum_rets = ep.cum_returns(returns, starting_value=1.0)
df_drawdowns = gen_drawdown_table(returns, top=top)
df_cum_rets.plot(ax=ax, **kwargs)
lim = ax.get_ylim()
colors = sns.cubehelix_palette(len(df_drawdowns))[::-1]
for i, (peak, recovery) in df_drawdowns[['Peak date', 'Recovery date']].iterrows():
if pd.isnull(recovery):
recovery = returns.index[-1]
ax.fill_between((peak, recovery),
lim[0],
lim[1],
alpha=.4,
color=colors[i])
ax.set_ylim(lim)
ax.set_title('Top %i drawdown periods' % top)
ax.set_ylabel('Cumulative returns')
ax.legend(['Portfolio'], loc='upper left', frameon=True, framealpha=0.5)
ax.set_xlabel('')
return ax
def plot_drawdown_underwater(returns, ax=None, **kwargs):
"""
Plots how far underwaterr returns are over time, or plots current
drawdown vs. date.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
def percentage(x, pos):
"""
Adds percentage sign to plot ticks.
"""
return '%.0f%%' % x
y_axis_formatter = FuncFormatter(percentage)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
df_cum_rets = ep.cum_returns(returns, starting_value=1.0)
running_max = np.maximum.accumulate(df_cum_rets)
underwater = -100 * ((running_max - df_cum_rets) / running_max)
(underwater).plot(ax=ax, kind='area', color='coral', alpha=0.7, **kwargs)
ax.set_ylabel('Drawdown')
ax.set_title('Underwater plot')
ax.set_xlabel('')
return ax
def gen_drawdown_table(returns, top=10):
"""
Places top drawdowns in a table.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
The amount of top drawdowns to find (default 10).
Returns
-------
df_drawdowns : pd.DataFrame
Information about top drawdowns.
"""
df_cum = ep.cum_returns(returns, 1.0)
drawdown_periods = get_top_drawdowns(returns, top=top)
df_drawdowns = pd.DataFrame(index=list(range(top)),
columns=['Net drawdown in %',
'Peak date',
'Valley date',
'Recovery date',
'Duration'])
for i, (peak, valley, recovery) in enumerate(drawdown_periods):
if pd.isnull(recovery):
df_drawdowns.loc[i, 'Duration'] = np.nan
else:
df_drawdowns.loc[i, 'Duration'] = len(pd.date_range(peak,
recovery,
freq='B'))
df_drawdowns.loc[i, 'Peak date'] = (peak.to_pydatetime()
.strftime('%Y-%m-%d'))
df_drawdowns.loc[i, 'Valley date'] = (valley.to_pydatetime()
.strftime('%Y-%m-%d'))
if isinstance(recovery, float):
df_drawdowns.loc[i, 'Recovery date'] = recovery
else:
df_drawdowns.loc[i, 'Recovery date'] = (recovery.to_pydatetime()
.strftime('%Y-%m-%d'))
df_drawdowns.loc[i, 'Net drawdown in %'] = (
(df_cum.loc[peak] - df_cum.loc[valley]) / df_cum.loc[peak]) * 100
df_drawdowns['Peak date'] = pd.to_datetime(df_drawdowns['Peak date'])
df_drawdowns['Valley date'] = pd.to_datetime(df_drawdowns['Valley date'])
df_drawdowns['Recovery date'] = pd.to_datetime(
df_drawdowns['Recovery date'])
return df_drawdowns
def get_top_drawdowns(returns, top=10):
"""
Finds top drawdowns, sorted by drawdown amount.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
The amount of top drawdowns to find (default 10).
Returns
-------
drawdowns : list
List of drawdown peaks, valleys, and recoveries. See get_max_drawdown.
"""
returns = returns.copy()
df_cum = ep.cum_returns(returns, 1.0)
running_max = np.maximum.accumulate(df_cum)
underwater = df_cum / running_max - 1
drawdowns = []
for _ in range(top):
peak, valley, recovery = get_max_drawdown_underwater(underwater)
# Slice out draw-down period
if not pd.isnull(recovery):
underwater.drop(underwater[peak: recovery].index[1:-1],
inplace=True)
else:
# drawdown has not ended yet
underwater = underwater.loc[:peak]
drawdowns.append((peak, valley, recovery))
if ((len(returns) == 0)
or (len(underwater) == 0)
or (np.min(underwater) == 0)):
break
return drawdowns
def get_max_drawdown_underwater(underwater):
"""
Determines peak, valley, and recovery dates given an 'underwater'
DataFrame.
An underwater DataFrame is a DataFrame that has precomputed
rolling drawdown.
Parameters
----------
underwater : pd.Series
Underwater returns (rolling drawdown) of a strategy.
Returns
-------
peak : datetime
The maximum drawdown's peak.
valley : datetime
The maximum drawdown's valley.
recovery : datetime
The maximum drawdown's recovery.
"""
valley = underwater.idxmin() # end of the period
# Find first 0
peak = underwater[:valley][underwater[:valley] == 0].index[-1]
# Find last 0
try:
recovery = underwater[valley:][underwater[valley:] == 0].index[0]
except IndexError:
recovery = np.nan # drawdown not recovered
return peak, valley, recovery | 29.910588 | 87 | 0.590387 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,524 | 0.43455 |
1dc11d56c934455980afcc662ca3cdb1b2a2b283 | 154 | py | Python | anuvaad-etl/anuvaad-extractor/document-processor/block-segmenter/src/routes/__init__.py | srihari-nagaraj/anuvaad | b09b01a033a033e97db6e404c088e0e6332053e4 | [
"MIT"
] | null | null | null | anuvaad-etl/anuvaad-extractor/document-processor/block-segmenter/src/routes/__init__.py | srihari-nagaraj/anuvaad | b09b01a033a033e97db6e404c088e0e6332053e4 | [
"MIT"
] | null | null | null | anuvaad-etl/anuvaad-extractor/document-processor/block-segmenter/src/routes/__init__.py | srihari-nagaraj/anuvaad | b09b01a033a033e97db6e404c088e0e6332053e4 | [
"MIT"
] | null | null | null | from .routes import Bolck_Segmenter_BLUEPRINT
from .routes import Bolck_Segmenter_BLUEPRINT_WF
#from .documentstructure import DOCUMENTSTRUCTURE_BLUEPRINT | 51.333333 | 59 | 0.902597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.383117 |
1dc23bb69793b971dce241628dbacc086c1774ec | 9,672 | py | Python | pysnmp/HPN-ICF-8021PAE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/HPN-ICF-8021PAE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/HPN-ICF-8021PAE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module HPN-ICF-8021PAE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HPN-ICF-8021PAE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:24:57 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint")
hpnicfRhw, = mibBuilder.importSymbols("HPN-ICF-OID-MIB", "hpnicfRhw")
dot1xPaePortNumber, = mibBuilder.importSymbols("IEEE8021-PAE-MIB", "dot1xPaePortNumber")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Integer32, Bits, Counter64, Unsigned32, Counter32, ObjectIdentity, iso, NotificationType, MibIdentifier, Gauge32, IpAddress, TimeTicks, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "Bits", "Counter64", "Unsigned32", "Counter32", "ObjectIdentity", "iso", "NotificationType", "MibIdentifier", "Gauge32", "IpAddress", "TimeTicks", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, MacAddress, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "MacAddress", "DisplayString")
hpnicfpaeExtMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6))
hpnicfpaeExtMib.setRevisions(('2001-06-29 00:00',))
if mibBuilder.loadTexts: hpnicfpaeExtMib.setLastUpdated('200106290000Z')
if mibBuilder.loadTexts: hpnicfpaeExtMib.setOrganization('')
hpnicfpaeExtMibObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1))
hpnicfdot1xPaeSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 1))
hpnicfdot1xPaeAuthenticator = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 2))
hpnicfdot1xAuthQuietPeriod = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 1, 1), Unsigned32().clone(60)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfdot1xAuthQuietPeriod.setStatus('current')
hpnicfdot1xAuthTxPeriod = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 1, 2), Unsigned32().clone(30)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfdot1xAuthTxPeriod.setStatus('current')
hpnicfdot1xAuthSuppTimeout = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 1, 3), Unsigned32().clone(30)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfdot1xAuthSuppTimeout.setStatus('current')
hpnicfdot1xAuthServerTimeout = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 1, 4), Unsigned32().clone(100)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfdot1xAuthServerTimeout.setStatus('current')
hpnicfdot1xAuthMaxReq = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 1, 5), Unsigned32().clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfdot1xAuthMaxReq.setStatus('current')
hpnicfdot1xAuthReAuthPeriod = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 1, 6), Unsigned32().clone(3600)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfdot1xAuthReAuthPeriod.setStatus('current')
hpnicfdot1xAuthMethod = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("chap", 1), ("pap", 2), ("eap", 3))).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfdot1xAuthMethod.setStatus('current')
hpnicfdot1xAuthConfigExtTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 2, 1), )
if mibBuilder.loadTexts: hpnicfdot1xAuthConfigExtTable.setStatus('current')
hpnicfdot1xAuthConfigExtEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 2, 1, 1), ).setIndexNames((0, "IEEE8021-PAE-MIB", "dot1xPaePortNumber"))
if mibBuilder.loadTexts: hpnicfdot1xAuthConfigExtEntry.setStatus('current')
hpnicfdot1xpaeportAuthAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfdot1xpaeportAuthAdminStatus.setStatus('current')
hpnicfdot1xpaeportControlledType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("port", 1), ("mac", 2))).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfdot1xpaeportControlledType.setStatus('current')
hpnicfdot1xpaeportMaxUserNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 2, 1, 1, 3), Integer32().clone(256)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfdot1xpaeportMaxUserNum.setStatus('current')
hpnicfdot1xpaeportUserNumNow = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 2, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfdot1xpaeportUserNumNow.setStatus('current')
hpnicfdot1xpaeportClearStatistics = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("clear", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfdot1xpaeportClearStatistics.setStatus('current')
hpnicfdot1xpaeportMcastTrigStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfdot1xpaeportMcastTrigStatus.setStatus('current')
hpnicfdot1xpaeportHandshakeStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfdot1xpaeportHandshakeStatus.setStatus('current')
hpnicfdot1xPaeTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 0))
hpnicfsupplicantproxycheck = NotificationType((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 0, 1)).setObjects(("HPN-ICF-8021PAE-MIB", "hpnicfproxycheckVlanId"), ("HPN-ICF-8021PAE-MIB", "hpnicfproxycheckPortName"), ("HPN-ICF-8021PAE-MIB", "hpnicfproxycheckMacAddr"), ("HPN-ICF-8021PAE-MIB", "hpnicfproxycheckIpaddr"), ("HPN-ICF-8021PAE-MIB", "hpnicfproxycheckUsrName"))
if mibBuilder.loadTexts: hpnicfsupplicantproxycheck.setStatus('current')
hpnicfproxycheckVlanId = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 0, 2), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hpnicfproxycheckVlanId.setStatus('current')
hpnicfproxycheckPortName = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 0, 3), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hpnicfproxycheckPortName.setStatus('current')
hpnicfproxycheckMacAddr = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 0, 4), MacAddress()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hpnicfproxycheckMacAddr.setStatus('current')
hpnicfproxycheckIpaddr = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 0, 5), IpAddress()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hpnicfproxycheckIpaddr.setStatus('current')
hpnicfproxycheckUsrName = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 6, 1, 0, 6), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hpnicfproxycheckUsrName.setStatus('current')
mibBuilder.exportSymbols("HPN-ICF-8021PAE-MIB", hpnicfpaeExtMibObjects=hpnicfpaeExtMibObjects, hpnicfpaeExtMib=hpnicfpaeExtMib, hpnicfdot1xAuthServerTimeout=hpnicfdot1xAuthServerTimeout, hpnicfproxycheckUsrName=hpnicfproxycheckUsrName, hpnicfsupplicantproxycheck=hpnicfsupplicantproxycheck, hpnicfproxycheckMacAddr=hpnicfproxycheckMacAddr, PYSNMP_MODULE_ID=hpnicfpaeExtMib, hpnicfdot1xAuthMethod=hpnicfdot1xAuthMethod, hpnicfdot1xpaeportMcastTrigStatus=hpnicfdot1xpaeportMcastTrigStatus, hpnicfdot1xPaeAuthenticator=hpnicfdot1xPaeAuthenticator, hpnicfdot1xPaeSystem=hpnicfdot1xPaeSystem, hpnicfdot1xPaeTraps=hpnicfdot1xPaeTraps, hpnicfdot1xAuthReAuthPeriod=hpnicfdot1xAuthReAuthPeriod, hpnicfdot1xAuthConfigExtEntry=hpnicfdot1xAuthConfigExtEntry, hpnicfdot1xpaeportControlledType=hpnicfdot1xpaeportControlledType, hpnicfdot1xAuthSuppTimeout=hpnicfdot1xAuthSuppTimeout, hpnicfproxycheckVlanId=hpnicfproxycheckVlanId, hpnicfdot1xpaeportClearStatistics=hpnicfdot1xpaeportClearStatistics, hpnicfdot1xAuthTxPeriod=hpnicfdot1xAuthTxPeriod, hpnicfdot1xAuthMaxReq=hpnicfdot1xAuthMaxReq, hpnicfproxycheckIpaddr=hpnicfproxycheckIpaddr, hpnicfproxycheckPortName=hpnicfproxycheckPortName, hpnicfdot1xAuthConfigExtTable=hpnicfdot1xAuthConfigExtTable, hpnicfdot1xpaeportUserNumNow=hpnicfdot1xpaeportUserNumNow, hpnicfdot1xpaeportMaxUserNum=hpnicfdot1xpaeportMaxUserNum, hpnicfdot1xAuthQuietPeriod=hpnicfdot1xAuthQuietPeriod, hpnicfdot1xpaeportHandshakeStatus=hpnicfdot1xpaeportHandshakeStatus, hpnicfdot1xpaeportAuthAdminStatus=hpnicfdot1xpaeportAuthAdminStatus)
| 140.173913 | 1,548 | 0.770678 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,801 | 0.186208 |
1dc275b3994ff3b09ff1c61b8cf96f51f28bcb83 | 5,978 | py | Python | spectools_ir/slab_fitter/helpers.py | erichegonzales/eriche-thesis | d3e1c67e05f4cf8f636a9365c819538f0f0affcb | [
"MIT"
] | 4 | 2021-06-02T10:45:22.000Z | 2022-01-20T13:49:13.000Z | spectools_ir/slab_fitter/helpers.py | erichegonzales/eriche-thesis | d3e1c67e05f4cf8f636a9365c819538f0f0affcb | [
"MIT"
] | null | null | null | spectools_ir/slab_fitter/helpers.py | erichegonzales/eriche-thesis | d3e1c67e05f4cf8f636a9365c819538f0f0affcb | [
"MIT"
] | null | null | null | import numpy as np
from astroquery.hitran import Hitran
from astropy import units as un
from astropy.constants import c, k_B, h, u
def calc_solid_angle(radius,distance):
'''
Convenience function to calculate solid angle from radius and distance, assuming a disk shape.
Parameters
----------
radius : float
radius value in AU
distance : float
distance value in parsec
Returns
----------
solid angle : float
solid angle in steradians
'''
return np.pi*radius**2./(distance*206265.)**2.
def calc_radius(solid_angle,distance):
'''
Convenience function to calculate disk radius from solid angle and distance, assuming a disk shape.
Parameters
----------
solid_angle : float
solid angle value in radians
distance : float
distance value in parsec
Returns
----------
radius : float
disk radius in AU
'''
return (distance*206265)*np.sqrt(solid_angle/np.pi)
def get_molmass(molecule_name,isotopologue_number=1):
'''
For a given input molecular formula, return the corresponding molecular mass, in amu
Parameters
----------
molecular_formula : str
The string describing the molecule.
isotopologue_number : int, optional
The isotopologue number, from most to least common.
Returns
-------
mu : float
Molecular mass in amu
'''
mol_isot_code=molecule_name+'_'+str(isotopologue_number)
#https://hitran.org/docs/iso-meta/
mass = { 'H2O_1':18.010565, 'H2O_2':20.014811, 'H2O_3':19.01478, 'H2O_4':19.01674,
'H2O_5':21.020985, 'H2O_6':20.020956, 'H2O_7':20.022915,
'CO2_1':43.98983,'CO2_2':44.993185,'CO2_3':45.994076,'CO2_4':44.994045,
'CO2_5':46.997431,'CO2_6':45.9974,'CO2_7':47.998322,'CO2_8':46.998291,
'CO2_9':45.998262,'CO2_10':49.001675,'CO2_11':48.001646,'CO2_12':47.0016182378,
'O3_1':47.984745,'O3_2':49.988991,'O3_3':49.988991,'O3_4':48.98896,'O3_5':48.98896,
'N2O_1':44.001062,'N2O_2':44.998096,'N2O_3':44.998096,'N2O_4':46.005308,'N2O_5':45.005278,
'CO_1':27.994915,'CO_2':28.99827,'CO_3':29.999161,'CO_4':28.99913,'CO_5':31.002516,'CO_6':30.002485,
'CH4_1':16.0313,'CH4_2':17.034655,'CH4_3':17.037475,'CH4_4':18.04083,
'O2_1':31.98983,'O2_2':33.994076,'O2_3':32.994045,
'NO_1':29.997989,'NO_2':30.995023,'NO_3':32.002234,
'SO2_1':63.961901,'SO2_2':65.957695,
'NO2_1':45.992904,'NO2_2':46.989938,
'NH3_1':17.026549,'NH3_2':18.023583,
'HNO3_1':62.995644,'HNO3_2':63.99268,
'OH_1':17.00274,'OH_2':19.006986,'OH_3':18.008915,
'HF_1':20.006229,'HF_2':21.012404,
'HCl_1':35.976678,'HCl_2':37.973729,'HCl_3':36.982853,'HCl_4':38.979904,
'HBr_1':79.92616,'HBr_2':81.924115,'HBr_3':80.932336,'HBr_4':82.930289,
'HI_1':127.912297,'HI_2':128.918472,
'ClO_1':50.963768,'ClO_2':52.960819,
'OCS_1':59.966986,'OCS_2':61.96278,'OCS_3':60.970341,'OCS_4':60.966371,'OCS_5':61.971231, 'OCS_6':62.966136,
'H2CO_1':30.010565,'H2CO_2':31.01392,'H2CO_3':32.014811,
'HOCl_1':51.971593,'HOCl_2':53.968644,
'N2_1':28.006148,'N2_2':29.003182,
'HCN_1':27.010899,'HCN_2':28.014254,'HCN_3':28.007933,
'CH3Cl_1':49.992328,'CH3CL_2':51.989379,
'H2O2_1':34.00548,
'C2H2_1':26.01565,'C2H2_2':27.019005,'C2H2_3':27.021825,
'C2H6_1':30.04695,'C2H6_2':31.050305,
'PH3_1':33.997238,
'COF2_1':65.991722,'COF2_2':66.995083,
'SF6_1':145.962492,
'H2S_1':33.987721,'H2S_2':35.983515,'H2S_3':34.987105,
'HCOOH_1':46.00548,
'HO2_1':32.997655,
'O_1':15.994915,
'ClONO2_1':96.956672,'ClONO2_2':98.953723,
'NO+_1':29.997989,
'HOBr_1':95.921076,'HOBr_2':97.919027,
'C2H4_1':28.0313,'C2H4_2':29.034655,
'CH3OH_1':32.026215,
'CH3Br_1':93.941811,'CH3Br_2':95.939764,
'CH3CN_1':41.026549,
'CF4_1':87.993616,
'C4H2_1':50.01565,
'HC3N_1':51.010899,
'H2_1':2.01565,'H2_2':3.021825,
'CS_1':43.971036,'CS_2':45.966787,'CS_3':44.974368,'CS_4':44.970399,
'SO3_1':79.95682,
'C2N2_1':52.006148,
'COCl2_1':97.9326199796,'COCl2_2':99.9296698896,
'CS2_1':75.94414,'CS2_2':77.93994,'CS2_3':76.943256,'CS2_4':76.947495}
return mass[mol_isot_code]
| 50.235294 | 138 | 0.457009 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,205 | 0.536132 |
1dc2f03665e1c48e0dda7a55e8fc1d2784bcdfa3 | 6,513 | py | Python | tests/test_models.py | hramezani/django-voting | f07707cb84765be76f7e9e985dd0523842b02ae7 | [
"BSD-3-Clause"
] | null | null | null | tests/test_models.py | hramezani/django-voting | f07707cb84765be76f7e9e985dd0523842b02ae7 | [
"BSD-3-Clause"
] | null | null | null | tests/test_models.py | hramezani/django-voting | f07707cb84765be76f7e9e985dd0523842b02ae7 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.test import TestCase
from voting.models import Vote
from test_app.models import Item
# Basic voting ###############################################################
class BasicVotingTests(TestCase):
def setUp(self):
self.item = Item.objects.create(name='test1')
self.users = []
for username in ['u1', 'u2', 'u3', 'u4']:
self.users.append(User.objects.create_user(username, '%s@test.com' % username, 'test'))
def test_print_model(self):
Vote.objects.record_vote(self.item, self.users[0], +1)
expected = 'u1: 1 on test1'
result = Vote.objects.all()[0]
self.assertEqual(str(result), expected)
def test_novotes(self):
result = Vote.objects.get_score(self.item)
self.assertEqual(result, {'score': 0, 'num_votes': 0})
def test_onevoteplus(self):
Vote.objects.record_vote(self.item, self.users[0], +1)
result = Vote.objects.get_score(self.item)
self.assertEqual(result, {'score': 1, 'num_votes': 1})
def test_onevoteminus(self):
Vote.objects.record_vote(self.item, self.users[0], -1)
result = Vote.objects.get_score(self.item)
self.assertEqual(result, {'score': -1, 'num_votes': 1})
def test_onevotezero(self):
Vote.objects.record_vote(self.item, self.users[0], 0)
result = Vote.objects.get_score(self.item)
self.assertEqual(result, {'score': 0, 'num_votes': 0})
def test_allvoteplus(self):
for user in self.users:
Vote.objects.record_vote(self.item, user, +1)
result = Vote.objects.get_score(self.item)
self.assertEqual(result, {'score': 4, 'num_votes': 4})
for user in self.users[:2]:
Vote.objects.record_vote(self.item, user, 0)
result = Vote.objects.get_score(self.item)
self.assertEqual(result, {'score': 2, 'num_votes': 2})
for user in self.users[:2]:
Vote.objects.record_vote(self.item, user, -1)
result = Vote.objects.get_score(self.item)
self.assertEqual(result, {'score': 0, 'num_votes': 4})
def test_wrongvote(self):
try:
Vote.objects.record_vote(self.item, self.users[0], -2)
except ValueError as e:
self.assertEqual(e.args[0], "Invalid vote (must be +1/0/-1)")
else:
self.fail("Did nor raise 'ValueError: Invalid vote (must be +1/0/-1)'")
# Retrieval of votes #########################################################
class VoteRetrievalTests(TestCase):
def setUp(self):
self.items = []
for name in ['test1', 'test2', 'test3', 'test4']:
self.items.append(Item.objects.create(name=name))
self.users = []
for username in ['u1', 'u2', 'u3', 'u4']:
self.users.append(User.objects.create_user(username, '%s@test.com' % username, 'test'))
for user in self.users:
Vote.objects.record_vote(self.items[0], user, +1)
for user in self.users[:2]:
Vote.objects.record_vote(self.items[0], user, 0)
for user in self.users[:2]:
Vote.objects.record_vote(self.items[0], user, -1)
Vote.objects.record_vote(self.items[1], self.users[0], +1)
Vote.objects.record_vote(self.items[2], self.users[0], -1)
Vote.objects.record_vote(self.items[3], self.users[0], 0)
def test_get_pos_vote(self):
vote = Vote.objects.get_for_user(self.items[1], self.users[0])
result = (vote.vote, vote.is_upvote(), vote.is_downvote())
expected = (1, True, False)
self.assertEqual(result, expected)
def test_get_neg_vote(self):
vote = Vote.objects.get_for_user(self.items[2], self.users[0])
result = (vote.vote, vote.is_upvote(), vote.is_downvote())
expected = (-1, False, True)
self.assertEqual(result, expected)
def test_get_zero_vote(self):
self.assertTrue(Vote.objects.get_for_user(self.items[3], self.users[0]) is None)
def test_in_bulk1(self):
votes = Vote.objects.get_for_user_in_bulk(self.items,
self.users[0])
self.assertEqual(
[(id, vote.vote) for id, vote in votes.items()],
[(1, -1), (2, 1), (3, -1)])
def test_empty_items(self):
result = Vote.objects.get_for_user_in_bulk([], self.users[0])
self.assertEqual(result, {})
def test_get_top(self):
for user in self.users[1:]:
Vote.objects.record_vote(self.items[1], user, +1)
Vote.objects.record_vote(self.items[2], user, +1)
Vote.objects.record_vote(self.items[3], user, +1)
result = list(Vote.objects.get_top(Item))
expected = [(self.items[1], 4), (self.items[3], 3), (self.items[2], 2)]
self.assertEqual(result, expected)
def test_get_bottom(self):
for user in self.users[1:]:
Vote.objects.record_vote(self.items[1], user, +1)
Vote.objects.record_vote(self.items[2], user, +1)
Vote.objects.record_vote(self.items[3], user, +1)
for user in self.users[1:]:
Vote.objects.record_vote(self.items[1], user, -1)
Vote.objects.record_vote(self.items[2], user, -1)
Vote.objects.record_vote(self.items[3], user, -1)
result = list(Vote.objects.get_bottom(Item))
expected = [(self.items[2], -4), (self.items[3], -3), (self.items[1], -2)]
self.assertEqual(result, expected)
def test_get_scores_in_bulk(self):
for user in self.users[1:]:
Vote.objects.record_vote(self.items[1], user, +1)
Vote.objects.record_vote(self.items[2], user, +1)
Vote.objects.record_vote(self.items[3], user, +1)
for user in self.users[1:]:
Vote.objects.record_vote(self.items[1], user, -1)
Vote.objects.record_vote(self.items[2], user, -1)
Vote.objects.record_vote(self.items[3], user, -1)
result = Vote.objects.get_scores_in_bulk(self.items)
expected = {
1: {'score': 0, 'num_votes': 4},
2: {'score': -2, 'num_votes': 4},
3: {'score': -4, 'num_votes': 4},
4: {'score': -3, 'num_votes': 3},
}
self.assertEqual(result, expected)
def test_get_scores_in_bulk_no_items(self):
result = Vote.objects.get_scores_in_bulk([])
self.assertEqual(result, {})
| 41.484076 | 99 | 0.595425 | 6,148 | 0.943958 | 0 | 0 | 0 | 0 | 0 | 0 | 582 | 0.08936 |
1dc32d294e6a71baf5f6ed3e4a11842ec769ac1d | 1,040 | py | Python | CodeIA/venv/Lib/site-packages/absl/testing/_parameterized_async.py | Finasty-lab/IA-Python | 286113504906fec11a5aa5fd1d12e38536b1c859 | [
"Apache-2.0"
] | 38,667 | 2015-01-01T00:15:34.000Z | 2022-03-31T22:57:03.000Z | update/venv/lib/python3.9/site-packages/absl/testing/_parameterized_async.py | azizhudai/material-design-icons | 63c5cb306073a9ecdfd3579f0f696746ab6305f6 | [
"Apache-2.0"
] | 1,192 | 2015-01-03T07:59:34.000Z | 2022-03-31T13:22:26.000Z | update/venv/lib/python3.9/site-packages/absl/testing/_parameterized_async.py | azizhudai/material-design-icons | 63c5cb306073a9ecdfd3579f0f696746ab6305f6 | [
"Apache-2.0"
] | 11,269 | 2015-01-01T08:41:17.000Z | 2022-03-31T16:12:52.000Z | # Lint as: python3
# Copyright 2020 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Private module implementing async_wrapped method for wrapping async tests.
This is a separate private module so that parameterized still optionally
supports Python 2 syntax.
"""
import functools
import inspect
def async_wrapped(func):
@functools.wraps(func)
async def wrapper(*args, **kwargs):
return await func(*args, **kwargs)
return wrapper
def iscoroutinefunction(func):
return inspect.iscoroutinefunction(func)
| 30.588235 | 77 | 0.766346 | 0 | 0 | 0 | 0 | 99 | 0.095192 | 74 | 0.071154 | 770 | 0.740385 |
1dc3eb0778658965db00b73aafa4a9ead618309c | 10,149 | py | Python | app/ImagePreprocessing.py | sadpotatoes/G6capstone-AI_Education | e6ae82a607b74a052a3bb4d7d7d5b2462b5b9e23 | [
"MIT"
] | null | null | null | app/ImagePreprocessing.py | sadpotatoes/G6capstone-AI_Education | e6ae82a607b74a052a3bb4d7d7d5b2462b5b9e23 | [
"MIT"
] | null | null | null | app/ImagePreprocessing.py | sadpotatoes/G6capstone-AI_Education | e6ae82a607b74a052a3bb4d7d7d5b2462b5b9e23 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 8 11:20:09 2020
@author: Donovan
"""
class ImagePreprocessing:
"""@package ImagePreprocessing
This class extracts a number of features from the images and saves them in a CSV
to be used by the machine learning class.
"""
import cv2
#conda install -c conda-forge opencv=3.4.1
#3-Clause BSD License
import os
import csv
import numpy
from skimage.io import imread, imshow
import matplotlib.pyplot as plt
from skimage.color import rgb2hsv
#conda install -c anaconda scikit-image
#BSD 3-Clause
def getAdvancedFeatures(imageIn):
"""
Returns a tuple of advanced features.
Parameters
----------
imageIn : Image
The image to process.
Returns
-------
returnValues : tuple
numbers.
"""
lowRed = 165
highRed = 240
lowGreen = 160
highGreen = 200
lowBlue = 135
highBlue = 240
rgb_img = imageIn
red = rgb_img[:, :, 0]
hsv_img = rgb2hsv(rgb_img)
hue_img = hsv_img[:, :, 0]
sat_img = hsv_img[:, :, 1]
value_img = hsv_img[:, :, 2]
#saturation mask to isolate foreground
satMask = (sat_img > .11) | (value_img > .3)
#hue and value mask to remove additional brown from background
mask = (hue_img > .14) | (value_img > .48)
#healthy corn mask to remove healthy corn, leaving only blighted pixels
nonBlightMask = hue_img < .14
#get foreground
rawForeground = np.zeros_like(rgb_img)
rawForeground[mask] = rgb_img[mask]
#reduce brown in background
foreground = np.zeros_like(rgb_img)
foreground[satMask] = rawForeground[satMask]
#get blighted pixels from foreground
blightedPixels = np.zeros_like(rgb_img)
blightedPixels[nonBlightMask] = foreground[nonBlightMask]
#combine into one band
blightedHSV = np.bitwise_or(blightedPixels[:,:,0], blightedPixels[:,:,1])
blightedHSV = np.bitwise_or(blightedHSV, blightedPixels[:,:,2])
red = rgb_img[:, :, 0]
green = rgb_img[:, :, 1]
blue = rgb_img [:, :, 2]
binary_green = lowGreen < green
binary_blue = lowBlue < blue
binary_red = lowRed < red
RGB_Blights = np.bitwise_and(binary_red, binary_green)
#'brown' pixels within each RGB threshold
RGB_Blights = np.bitwise_and(RGB_Blights, binary_blue)
HSV_and_RGB = np.bitwise_and(RGB_Blights, blightedHSV)
#get features
numForegroundPixels = np.count_nonzero(foreground)
numBlightedHSVPixels = np.count_nonzero(blightedHSV)
blightedHSVRatio = numBlightedHSVPixels / numForegroundPixels
num_RGB_blightedPixels = np.count_nonzero(RGB_Blights)
blightedRGBRatio = num_RGB_blightedPixels / numForegroundPixels
numBlightedBothPixels = np.count_nonzero(HSV_and_RGB)
blightedBothRatio = numBlightedBothPixels / numForegroundPixels
returnValues = (numForegroundPixels, numBlightedHSVPixels, blightedHSVRatio, num_RGB_blightedPixels,
blightedRGBRatio, numBlightedBothPixels, blightedBothRatio)
return returnValues
def avgGray(image):
grayscaleArray = numpy.reshape(image, -1)
gray_mean = numpy.mean(grayscaleArray)
return gray_mean
def avgRed(image):
red = image[0:4000, 0:6000, 0]
red = numpy.reshape(red, -1)
red_mean = numpy.mean(red)
return red_mean
def avgGreen(image):
green = image[0:4000, 0:6000, 1]
green = numpy.reshape(green, -1)
green_mean = numpy.mean(green)
return green_mean
def avgBlue(image):
blue = image [0:4000, 0:6000, 2]
blue = numpy.reshape(blue, -1)
blue_mean = numpy.mean(blue)
return blue_mean
def numBrownRed(image):
red = image[0:4000, 0:6000, 0]
red = numpy.reshape(red, -1)
num_brown_red, bin_edges = numpy.histogram(red, bins=1, range=(180, 250))
return num_brown_red[0]
def numBrownGreen(image):
green = image[0:4000, 0:6000, 1]
green = numpy.reshape(green, -1)
num_brown_green, bin_edges = numpy.histogram(green, bins=1, range=(160, 200))
return num_brown_green[0]
def numBrownBlue(image):
blue = image [0:4000, 0:6000, 2]
blue = numpy.reshape(blue, -1)
num_brown_blue, bin_edges = numpy.histogram(blue, bins=1, range=(150, 240))
return num_brown_blue[0]
def FdHuMoments(image):
"""
Extracts Hu moments feature from an image
Parameters
----------
image : imread
The image used for feature extraction
Returns
-------
Feature : Float Array
The Hu moments in the image.
Reference
---------
https://gogul.dev/software/image-classification-python
"""
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
feature = cv2.HuMoments(cv2.moments(image)).flatten()
return feature
def FdHaralick(image):
import mahotas
#
#MIT License
"""
Extracts Haralick texture feature from an image
Parameters
----------
image : imread
The image used for feature extraction
Returns
-------
Feature : Float Array
The Haralick texture in the image.
Reference
---------
https://gogul.dev/software/image-classification-python
"""
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# compute the haralick texture feature vector
haralick = mahotas.features.haralick(gray).mean(axis=0)
# return the result
return haralick
def FdHistogram(image, mask=None, bins = 8):
"""
Extracts color histogram feature from an image
Parameters
----------
image : imread
The image used for feature extraction
Returns
-------
Feature : Float Array
The color histogram in the image.
Reference
---------
https://gogul.dev/software/image-classification-python
"""
# convert the image to HSV color-space
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# compute the color histogram
hist = cv2.calcHist([image], [0, 1, 2], None, [bins, bins, bins], [0, 256, 0, 256, 0, 256])
# normalize the histogram
cv2.normalize(hist, hist)
# return the histogram
return hist.flatten()
import numpy as np
def ImageProcessing(folder_name):
def allFilesInDir(dir_name, label):
csvOut = []
counter = 0
for root, dirs, files in os.walk(os.path.abspath(dir_name)):
for file in files:
image = imread(os.path.join(root, file), as_gray=True)
import matplotlib.pyplot as plt
plt.imshow(image, cmap='gray', vmin=0, vmax=1)
plt.show()
gray_mean = avgGray(image)
image = imread(os.path.join(root, file))
red_mean = avgRed(image)
green_mean = avgGreen(image)
blue_mean = avgBlue(image)
num_brown_red = numBrownRed(image)
num_brown_green = numBrownGreen(image)
num_brown_blue = numBrownBlue(image)
advanced_features = getAdvancedFeatures(image)
image = cv2.imread(os.path.join(root, file))
fv_hu_moments = FdHuMoments(image)
fv_haralick = FdHaralick(image)
# fv_histrogram = FdHistogram(image)
feature_vector = np.hstack([file, fv_hu_moments, fv_haralick, gray_mean, red_mean, green_mean, blue_mean,
num_brown_red, num_brown_green, num_brown_blue, advanced_features[0],
advanced_features[1], advanced_features[2], advanced_features[3],
advanced_features[4], advanced_features[5], advanced_features[6], label])
csvOut.append(feature_vector)
counter += 1
print(counter)
return csvOut
#Please update these column labels if you add features in order to help with feature selection.
columnLabels = ('fileName','fvhu','fvhu2','fvhu3','fvhu4','fvhu5','fvhu6','fvhu7',
'fvha1','fvha2','fvha3','fvha4','fvha5','fvha6','fvha7','fvha7',
'fvha8','fvha9','fvha10','fvha11','fvha12',
'gray_mean', 'red_mean', 'green_mean', 'blue_mean', 'num_brown_red', 'num_brown_green',
'num_brown_blue', 'numForegroundPxls', 'blightedHSV_pxls', 'blightedHSV_ratio',
'numRGB_blightedPxls', 'blightedRGBRatio', 'RGB_and_HSV_blighted', 'RGB_and_HSV_both_ratio', 'label')
blighted_features = allFilesInDir('images/blighted', 'B')
healthy_features = allFilesInDir('images/healthy', 'H')
csvfile = open('csvOut.csv','w', newline = '')
obj = csv.writer(csvfile)
obj.writerow(columnLabels)
obj.writerows(blighted_features)
obj.writerows(healthy_features)
#Main
folder_name = 'images/'
ImageProcessing(folder_name) | 37.869403 | 127 | 0.558282 | 10,055 | 0.990738 | 0 | 0 | 0 | 0 | 0 | 0 | 3,031 | 0.29865 |
1dc47681c1599227aeabca5e06abcb71ffb15e6e | 1,349 | py | Python | www.geofabrik.de/__poly2geojson.py | 0xC70FF3/maps-playground | cad0031a11d1c245adbff6981e3dc5921332393c | [
"MIT"
] | null | null | null | www.geofabrik.de/__poly2geojson.py | 0xC70FF3/maps-playground | cad0031a11d1c245adbff6981e3dc5921332393c | [
"MIT"
] | null | null | null | www.geofabrik.de/__poly2geojson.py | 0xC70FF3/maps-playground | cad0031a11d1c245adbff6981e3dc5921332393c | [
"MIT"
] | null | null | null | import json
import os
def main():
features = {
"type": "FeatureCollection",
"features": []
}
directory = "poly/"
for file in os.listdir(directory):
if file.endswith(".poly"):
name = os.path.basename(file).split(".")[0]
with open(os.path.join(directory, file)) as poly_file:
polygon = list()
polygons = list()
line = poly_file.readline()
while line:
line = poly_file.readline()
if line.startswith(" "):
coordinates = line.split()
polygon.append([float(coordinates[0]), float(coordinates[1])])
elif len(polygon) > 0:
polygons.append(polygon)
polygon = list()
feature = {
"properties": {"NAME": name},
"type": "Feature",
"geometry": {
"type": "MultiPolygon" if len(polygons) > 1 else "Polygon",
"coordinates": [polygons] if len(polygons) > 1 else polygons
}
}
features["features"].append(feature)
with open('countries.geojson', 'w') as outfile:
json.dump(features, outfile)
if __name__ == "__main__":
main()
| 31.372093 | 86 | 0.468495 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.134915 |
1dc562d3f5acb7cb3b986082452a776619fd60b4 | 881 | py | Python | kafka/optstest.py | peterhogan/python | bc6764f7794a862ff0d138bad80f1d6313984dcd | [
"MIT"
] | null | null | null | kafka/optstest.py | peterhogan/python | bc6764f7794a862ff0d138bad80f1d6313984dcd | [
"MIT"
] | null | null | null | kafka/optstest.py | peterhogan/python | bc6764f7794a862ff0d138bad80f1d6313984dcd | [
"MIT"
] | null | null | null | from optparse import OptionParser
kafkabroker = ""
kafkatopic = ""
rssfile = ""
helpstring = """Usage: newsreader.py [OPTIONS]... -f <RSS_FILE> <KAFKA_BROKER> <KAFKA_TOPIC>
Read news articles from XML rss feeds specified in <RSS_FILE>
Feeds must be separated by newlines
Feeds to be ignored can be prefixed with #
Mandatory arguments:
-f, --rssfile path to rss feeds file (.xml URLs)
Optional arguments:
-q, --quiet don't print the GUID of every article read (default off)
-l, --live print a running total of how many articles have been sent (default off)
-w, --wait wait for a carriage return before sending to kafka (default off)
-h, --help print this message
Example: newsreader.py -q -f /data/rssfeeds.txt localhost:9092 topic_1
"""
parser = OptionParser()
parser.add_option("-f", "--file",
| 32.62963 | 95 | 0.669694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 726 | 0.824064 |
1dc5a50b94883d015b44bc01453e7a7183a5e0f8 | 636 | py | Python | Shuffle an Array.py | HalShaw/Leetcode | 27c52aac5a8ecc5b5f02e54096a001920661b4bb | [
"MIT"
] | 1 | 2016-12-22T04:09:25.000Z | 2016-12-22T04:09:25.000Z | Shuffle an Array.py | HalShaw/Leetcode | 27c52aac5a8ecc5b5f02e54096a001920661b4bb | [
"MIT"
] | null | null | null | Shuffle an Array.py | HalShaw/Leetcode | 27c52aac5a8ecc5b5f02e54096a001920661b4bb | [
"MIT"
] | null | null | null | from random import shuffle#自带洗牌方法
from copy import deepcopy
class Solution(object):
def __init__(self, nums):
"""
:type nums: List[int]
:type size: int
"""
self.nums=nums
def reset(self):
"""
Resets the array to its original configuration and return it.
:rtype: List[int]
"""
return self.nums
def shuffle(self):
"""
Returns a random shuffling of the array.
:rtype: List[int]
"""
nums=deepcopy(self.nums)#导入deepcopy来拷贝引用对象,不然直接用的话会影响reset的输出
shuffle(nums)
return nums
| 21.931034 | 69 | 0.551887 | 621 | 0.894813 | 0 | 0 | 0 | 0 | 0 | 0 | 381 | 0.548991 |
1dc85a966947c3cfd792ac63592206b7beb44ee8 | 11,319 | py | Python | ml3/mbrl_utils.py | neha191091/LearningToLearn | 3619d27bb3b7a836d9423dfbdd8da82460d4fa73 | [
"MIT"
] | 76 | 2020-12-11T02:15:20.000Z | 2021-11-16T10:26:45.000Z | ml3/mbrl_utils.py | neha191091/LearningToLearn | 3619d27bb3b7a836d9423dfbdd8da82460d4fa73 | [
"MIT"
] | 2 | 2021-04-08T21:06:48.000Z | 2021-09-09T13:48:59.000Z | ml3/mbrl_utils.py | neha191091/LearningToLearn | 3619d27bb3b7a836d9423dfbdd8da82460d4fa73 | [
"MIT"
] | 18 | 2020-12-24T14:18:19.000Z | 2022-03-14T02:23:02.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from termcolor import colored
import logging
import torch.nn as nn
import torch.utils.data
log = logging.getLogger(__name__)
import torch
import numpy as np
import math
class Dataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.dataset = [
(torch.FloatTensor(x[i]), torch.FloatTensor(y[i])) for i in range(len(x))
]
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx]
class Dynamics(nn.Module):
def __init__(self,env):
super(Dynamics, self).__init__()
self.env=env
self.dt = env.dt
self.model_cfg = {}
self.model_cfg['device'] = 'cpu'
self.model_cfg['hidden_size'] = [100, 30]
self.model_cfg['batch_size'] = 128
self.model_cfg['epochs'] = 500
self.model_cfg['display_epoch'] = 50
self.model_cfg['learning_rate'] = 0.001
self.model_cfg['ensemble_size'] = 3
self.model_cfg['state_dim'] = env.state_dim
self.model_cfg['action_dim'] = env.action_dim
self.model_cfg['output_dim'] = env.pos_dim
self.ensemble = EnsembleProbabilisticModel(self.model_cfg)
self.data_X = []
self.data_Y = []
self.norm_in = torch.Tensor(np.expand_dims(np.array([1.0,1.0,8.0,8.0,1.0,1.0]),axis=0))
def train(self,states,actions):
inputs = (torch.cat((states[:-1],actions),dim=1)/self.norm_in).detach().numpy()
outputs = (states[1:,self.env.pos_dim:] - states[:-1,self.env.pos_dim:]).detach().numpy()
self.data_X+=list(inputs)
self.data_Y+=list(outputs)
training_dataset = {}
training_dataset['X'] = np.array(self.data_X)
training_dataset['Y'] = np.array(self.data_Y)
#self.ensemble = EnsembleProbabilisticModel(self.model_cfg)
self.ensemble.train_model(training_dataset, training_dataset, 0.0)
def step_model(self,state,action):
input_x = torch.cat((state,action),dim=0)/self.norm_in
pred_acc = self.ensemble.forward(input_x)[0].squeeze()
#numerically integrate predicted acceleration to velocity and position
pred_vel = state[self.env.pos_dim:]+pred_acc
pred_pos = state[:self.env.pos_dim] + pred_vel*self.dt
pred_pos = torch.clamp(pred_pos, min=-3.0, max=3.0)
pred_vel = torch.clamp(pred_vel, min=-4.0, max=4.0)
next_state = torch.cat((pred_pos.squeeze(),pred_vel.squeeze()),dim=0)
return next_state.squeeze()
# I did not make this inherit from nn.Module, because our GP implementation is not torch based
class AbstractModel(object):
# def forward(self, x):
# raise NotImplementedError("Subclass must implement")
def train_model(self, training_dataset, testing_dataset, training_params):
raise NotImplementedError("Subclass must implement")
# function that (if necessary) converts between numpy input x and torch, and returns a prediction in numpy
def predict_np(self, x):
raise NotImplementedError("Subclass must implement")
def get_input_size(self):
raise NotImplementedError("Subclass must implement")
def get_output_size(self):
raise NotImplementedError("Subclass must implement")
def get_hyperparameters(self):
return None
class Dataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.dataset = [
(torch.FloatTensor(x[i]), torch.FloatTensor(y[i])) for i in range(len(x))
]
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx]
# creates K datasets out of X and Y
# if N is the total number of data points, then this function splits it in to K subsets. and each dataset contains K-1
# subsets.
# so let's say K=5. We create 5 subsets.
# Each datasets contains 4 out of the 5 datasets, by leaving out one of the K subsets.
def split_to_subsets(X, Y, K):
if K == 1:
# for 1 split, do not resshuffle dataset
return [Dataset(X, Y)]
n_data = len(X)
chunk_sz = int(math.ceil(n_data / K))
all_idx = np.random.permutation(n_data)
datasets = []
# each dataset contains
for i in range(K):
start_idx = i * (chunk_sz)
end_idx = min(start_idx + chunk_sz, n_data)
dataset_idx = np.delete(all_idx, range(start_idx, end_idx), axis=0)
X_subset = [X[idx] for idx in dataset_idx]
Y_subset = [Y[idx] for idx in dataset_idx]
datasets.append(Dataset(X_subset, Y_subset))
return datasets
class NLLLoss(torch.nn.modules.loss._Loss):
"""
Specialized NLL loss used to predict both mean (the actual function) and the variance of the input data.
"""
def __init__(self, size_average=None, reduce=None, reduction="mean"):
super(NLLLoss, self).__init__(size_average, reduce, reduction)
def forward(self, net_output, target):
assert net_output.dim() == 3
assert net_output.size(0) == 2
mean = net_output[0]
var = net_output[1]
reduction = "mean"
ret = 0.5 * torch.log(var) + 0.5 * ((mean - target) ** 2) / var
# ret = 0.5 * ((mean - target) ** 2)
if reduction != "none":
ret = torch.mean(ret) if reduction == "mean" else torch.sum(ret)
return ret
class EnsembleProbabilisticModel(AbstractModel):
def __init__(self, model_cfg):
super(EnsembleProbabilisticModel, self).__init__()
self.input_dimension = model_cfg['state_dim'] + model_cfg['action_dim']
# predicting velocity only (second half of state space)
assert model_cfg['state_dim'] % 2 == 0
self.output_dimension = model_cfg['state_dim'] // 2
if model_cfg['device'] == "gpu":
self.device = model_cfg['gpu_name']
else:
self.device = "cpu"
self.ensemble_size = model_cfg['ensemble_size']
self.model_cfg = model_cfg
self.reset()
def reset(self):
self.models = [PModel(self.model_cfg) for _ in range(self.ensemble_size)]
def forward(self, x):
x = torch.Tensor(x)
means = []
variances = []
for eid in range(self.ensemble_size):
mean_and_var = self.models[eid](x)
means.append(mean_and_var[0])
variances.append(mean_and_var[1])
mean = sum(means) / len(means)
dum = torch.zeros_like(variances[0])
for i in range(len(means)):
dum_var2 = variances[i]
dum_mean2 = means[i] * means[i]
dum += dum_var2 + dum_mean2
var = (dum / len(means)) - (mean * mean)
# Clipping the variance to a minimum of 1e-3, we can interpret this as saying weexpect a minimum
# level of noise
# the clipping here is probably not necessary anymore because we're now clipping at the individual model level
var = var.clamp_min(1e-3)
return torch.stack((mean, var))
def predict_np(self, x_np):
x = torch.Tensor(x_np)
pred = self.forward(x).detach().cpu().numpy()
return pred[0].squeeze(), pred[1].squeeze()
def train_model(self, training_dataset, testing_dataset, training_params):
X = training_dataset["X"]
Y = training_dataset["Y"]
datasets = split_to_subsets(X, Y, self.ensemble_size)
for m in range(self.ensemble_size):
print(colored("training model={}".format(m), "green"))
self.models[m].train_model(datasets[m])
def get_gradient(self, x_np):
x = torch.Tensor(x_np).requires_grad_()
output_mean, _ = self.forward(x)
gradients = []
# get gradients of ENN with respect to x and u
for output_dim in range(self.output_dimension):
grads = torch.autograd.grad(
output_mean[0, output_dim], x, create_graph=True
)[0].data
gradients.append(grads.detach().cpu().numpy()[0, :])
return np.array(gradients).reshape(
[self.output_dimension, self.input_dimension]
)
def get_input_size(self):
return self.input_dimension
def get_output_size(self):
return self.output_dimension
def get_hyper_params(self):
return None
class PModel(nn.Module):
"""
Probabilistic network
Output a 3d tensor:
d0 : always 2, first element is mean and second element is variance
d1 : batch size
d2 : output size (number of dimensions in the output of the modeled function)
"""
def __init__(self, config):
super(PModel, self).__init__()
if config["device"] == "gpu":
self.device = config["gpu_name"]
else:
self.device = "cpu"
self.input_sz = config['state_dim'] + config['action_dim']
self.output_sz = config['output_dim']
self.learning_rate = config["learning_rate"]
self.display_epoch = config["display_epoch"]
self.epochs = config["epochs"]
w = config["hidden_size"]
self.layers = nn.Sequential(
nn.Linear(self.input_sz, w[0]),
nn.Tanh(),
nn.Linear(w[0], w[1]),
nn.Tanh(),
)
self.mean = nn.Linear(w[1], self.output_sz)
self.var = nn.Sequential(nn.Linear(w[1], self.output_sz), nn.Softplus())
self.to(self.device)
def forward(self, x):
x = x.to(device=self.device)
assert x.dim() == 2, "Expected 2 dimensional input, got {}".format(x.dim())
assert x.size(1) == self.input_sz
y = self.layers(x)
mean_p = self.mean(y)
var_p = self.var(y)
# Clipping the variance to a minimum of 1e-3, we can interpret this as saying weexpect a minimum
# level of noise
var_p = var_p.clamp_min(1e-3)
return torch.stack((mean_p, var_p))
def predict_np(self, x_np):
x = torch.Tensor(x_np)
pred = self.forward(x).detach().cpu().numpy()
return pred[0].squeeze(), pred[1].squeeze()
def train_model(self, training_data):
train_loader = torch.utils.data.DataLoader(
training_data, batch_size=64, num_workers=0
)
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
loss_fn = NLLLoss()
for epoch in range(self.epochs):
losses = []
for batch, (data, target) in enumerate(
train_loader, 1
): # This is the training loader
x = data.type(torch.FloatTensor).to(device=self.device)
y = target.type(torch.FloatTensor).to(device=self.device)
if x.dim() == 1:
x = x.unsqueeze(0).t()
if y.dim() == 1:
y = y.unsqueeze(0).t()
py = self.forward(x)
loss = loss_fn(py, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.item())
if epoch % self.display_epoch == 0:
print(
colored(
"epoch={}, loss={}".format(epoch, np.mean(losses)), "yellow"
)
) | 33.488166 | 118 | 0.606767 | 10,024 | 0.885591 | 0 | 0 | 0 | 0 | 0 | 0 | 2,221 | 0.196219 |
1dc89df2fe8619ca2bff4d7c4037856cb3d50bae | 323 | py | Python | src/write.py | toodom02/img-to-css | a8ccd899ca18f86f68f8dcdac7854b0a142841c2 | [
"MIT"
] | null | null | null | src/write.py | toodom02/img-to-css | a8ccd899ca18f86f68f8dcdac7854b0a142841c2 | [
"MIT"
] | null | null | null | src/write.py | toodom02/img-to-css | a8ccd899ca18f86f68f8dcdac7854b0a142841c2 | [
"MIT"
] | null | null | null | from . import config
def write_to_css(css):
f = open(
f"output/{'b' if config.bmethod else 'p'}/{config.filename}.css", "w")
f.write(css)
f.close()
def write_to_HTML(html):
f = open(
f"output/{'b' if config.bmethod else 'p'}/{config.filename}.html", "w")
f.write(html)
f.close()
| 20.1875 | 79 | 0.582043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.417957 |
1dc9c27b442dd4cf3069aed11e33ffad02388e03 | 307 | py | Python | test/tests/marshal_test.py | jvkersch/pyston | 2c7e7a5e0ed7a0a8b4528919f855fa8336b43902 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | test/tests/marshal_test.py | jvkersch/pyston | 2c7e7a5e0ed7a0a8b4528919f855fa8336b43902 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | test/tests/marshal_test.py | jvkersch/pyston | 2c7e7a5e0ed7a0a8b4528919f855fa8336b43902 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | import marshal
o = [-1, 1.23456789, complex(1.2, 3.4)]
o += [True, False, None]
o += ["Hello World!", u"Hello World!"]
o += [{ "Key" : "Value" }, set(["Set"]), frozenset(["FrozenSet"]), (1, 2, 3), [1, 2, 3]]
for i in o:
s = marshal.dumps(i)
r = marshal.loads(s)
print "Dumping:", i, "Loaded", r
| 30.7 | 88 | 0.537459 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.2443 |
1dca8369ce474af0f5d87fe2a557846435779fc6 | 1,956 | py | Python | tests/test_reader.py | goiosunsw/audacity.py | 899fa99e11e0345f95a563ea3c72b0d98f84e646 | [
"MIT"
] | 1 | 2018-01-05T02:06:04.000Z | 2018-01-05T02:06:04.000Z | tests/test_reader.py | goiosunsw/audacity.py | 899fa99e11e0345f95a563ea3c72b0d98f84e646 | [
"MIT"
] | null | null | null | tests/test_reader.py | goiosunsw/audacity.py | 899fa99e11e0345f95a563ea3c72b0d98f84e646 | [
"MIT"
] | null | null | null | import unittest
import os
import sys
import argparse
import numpy as np
import audacity as aud
print('Module file:')
print(aud.__file__)
SCRIPT_DIR = os.path.split(os.path.realpath(__file__))[0]
PACKAGE_DIR = os.path.realpath(os.path.join(SCRIPT_DIR,'..'))
DATA_DIR = os.path.join(PACKAGE_DIR, 'data')
TEST_FILE_1 = os.path.join(DATA_DIR, 'test-1.aup')
class testReader(unittest.TestCase):
TEST_FILE = TEST_FILE_1
def test_read_data_is_2d(self):
filename = self.TEST_FILE
print('Audio file:')
print(filename)
au = aud.Aup(filename)
data = au.get_data()
assert len(data.shape) == 2
def test_read_channels_have_same_length(self):
filename = self.TEST_FILE
au = aud.Aup(filename)
data = au.get_data()
for ii in range(au.nchannels-1):
assert len(data[ii]) == len(data[ii+1])
def test_nsample_getter_same_as_data(self):
filename = self.TEST_FILE
au = aud.Aup(filename)
lens = au.get_channel_nsamples()
for ii, ll in enumerate(lens):
self.assertEqual(len(au.get_channel_data(ii)), ll)
def test_single_file_len_is_right(self):
filename = self.TEST_FILE
au = aud.Aup(filename)
chno = 0
au.open(chno)
for f, data in zip(au.files[chno], au.read()):
self.assertEqual(f[2]-f[1], len(data)/4)
def main():
global test_file
parser = argparse.ArgumentParser()
parser.add_argument('--input', default='')
parser.add_argument('unittest_args', nargs='*')
args = parser.parse_args()
# TODO: Go do something with args.input and args.filename
# Now set the sys.argv to the unittest_args (leaving sys.argv[0] alone)
sys.argv[1:] = args.unittest_args
if args.input:
print('Changing audio file to '+args.input)
testReader.TEST_FILE = args.input
unittest.main()
if __name__ == '__main__':
main()
| 26.432432 | 75 | 0.644683 | 1,039 | 0.531186 | 0 | 0 | 0 | 0 | 0 | 0 | 241 | 0.123211 |
1dcac33d4a64b986ef5357a7616bac8feaec924f | 1,715 | py | Python | src/models/predict_model.py | Chrypapado/Garbage_Classification | 92c8627a430fca305fc5fb38880b6464a6edb9a2 | [
"FTL"
] | null | null | null | src/models/predict_model.py | Chrypapado/Garbage_Classification | 92c8627a430fca305fc5fb38880b6464a6edb9a2 | [
"FTL"
] | 1 | 2021-06-20T13:55:56.000Z | 2021-06-20T13:55:56.000Z | src/models/predict_model.py | Chrypapado/Garbage_Classification | 92c8627a430fca305fc5fb38880b6464a6edb9a2 | [
"FTL"
] | null | null | null | import argparse
import sys
from pathlib import Path
import matplotlib.pyplot as plt
import torch
import torchvision.transforms as transforms
from model import ResNet
from PIL import Image
if __name__ == '__main__':
# Arguments
parser = argparse.ArgumentParser(description='Training arguments')
parser.add_argument('--image', default='example.jpg')
parser.add_argument('--load_model_from', default='model0')
args = parser.parse_args(sys.argv[2:])
print(args)
# Load Model
project_dir = Path(__file__).resolve().parents[2]
model_path = str(project_dir.joinpath('./models')) + \
'/' + args.load_model_from + '.pth'
# Set Device and Model Configurations
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
model = ResNet()
model.to(device)
if torch.cuda.is_available():
dict_ = torch.load(model_path)
else:
dict_ = torch.load(model_path, map_location='cpu')
model.load_state_dict(dict_)
# Image Settings
image = Image.open(str(project_dir.joinpath(
'./data/external')) + '/' + args.image)
transformations = transforms.Compose(
[transforms.Resize((256, 256)), transforms.ToTensor()])
edit_image = transformations(image)
plt.imshow(edit_image.permute(1, 2, 0))
plt.show()
unsqueezed_image = edit_image.unsqueeze(0)
device_image = unsqueezed_image.to(device, non_blocking=True)
modeled_image = model(device_image)
prob, preds = torch.max(modeled_image, dim=1)
# Prediction
classes = ['Glass', 'Paper', 'Cardboard', 'Plastic', 'Metal', 'Trash']
classes[preds[0].item()]
print("Predicted image: ", classes[preds[0].item()])
| 35.729167 | 75 | 0.683382 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 291 | 0.169679 |
1dcbbd2677c54b75d412f37ba6ff6de126f5a9fa | 248 | py | Python | tests/test_lightcurve.py | moemyself3/lightcurator | 0848435a170fea1d8979a416068ad88f7d8012a2 | [
"MIT"
] | 2 | 2019-03-20T15:11:22.000Z | 2020-05-31T01:55:03.000Z | tests/test_lightcurve.py | moemyself3/lightcurator | 0848435a170fea1d8979a416068ad88f7d8012a2 | [
"MIT"
] | 18 | 2019-03-20T06:42:17.000Z | 2021-01-24T04:57:08.000Z | tests/test_lightcurve.py | moemyself3/lightcurator | 0848435a170fea1d8979a416068ad88f7d8012a2 | [
"MIT"
] | null | null | null | import unittest
from lightcurator import lightcurve as lc
class TestLightcuratorMethods(unittest.TestCase):
def test_matchcat(self):
cc = lc.matchcat(23)
self.assertEqual(cc, 1)
if __name__ == '__main__':
unittest.main()
| 20.666667 | 49 | 0.709677 | 140 | 0.564516 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.040323 |
1dccc5743e94de53f95b5ccf3e289dd5436b798d | 17,756 | py | Python | baselines/deepq/assembly/data/plot_result.py | DengYuelin/baselines-assembly | d40171845349395f0ed389d725873b389b08f94f | [
"MIT"
] | 1 | 2022-03-23T02:35:05.000Z | 2022-03-23T02:35:05.000Z | baselines/deepq/assembly/data/plot_result.py | DengYuelin/baselines-assembly | d40171845349395f0ed389d725873b389b08f94f | [
"MIT"
] | null | null | null | baselines/deepq/assembly/data/plot_result.py | DengYuelin/baselines-assembly | d40171845349395f0ed389d725873b389b08f94f | [
"MIT"
] | 3 | 2018-12-20T10:10:57.000Z | 2020-08-07T10:12:57.000Z | # -*- coding: utf-8 -*-
"""
# @Time : 24/10/18 2:40 PM
# @Author : ZHIMIN HOU
# @FileName: plot_result.py
# @Software: PyCharm
# @Github : https://github.com/hzm2016
"""
import collections
import matplotlib.pyplot as plt
import numpy as np
import pickle
import copy as cp
from baselines.deepq.assembly.src.value_functions import *
"""=================================Plot result====================================="""
# YLABEL = ['$F_x(N)$', '$F_y(N)$', '$F_z(N)$', '$M_x(Nm)$', '$M_y(Nm)$', '$M_z(Nm)$']
YLABEL = ['$F_x$(N)', '$F_y$(N)', '$F_z$(N)', '$M_x$(Nm)', '$M_y$(Nm)', '$M_z$(Nm)']
Title = ["X axis force", "Y axis force", "Z axis force",
"X axis moment", "Y axis moment", "Z axis moment"]
High = np.array([40, 40, 0, 5, 5, 5, 542, -36, 188, 5, 5, 5])
Low = np.array([-40, -40, -40, -5, -5, -5, 538, -42, 192, -5, -5, -5])
scale = np.array([40, 40, 40, 5, 5, 5])
"""================================================================================="""
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
def plot(result_path):
plt.figure(figsize=(15, 15), dpi=100)
plt.title('Search Result')
prediction_result = np.load(result_path)
for i in range(len(prediction_result)):
for j in range(6):
line = prediction_result[:, j]
# plt.subplot(2, 3, j+1)
plt.plot(line)
plt.ylabel(YLABEL[j])
plt.xlabel('steps')
plt.legend(YLABEL)
plt.show()
def plot_force_and_moment(path_2, path_3):
V_force = np.load(path_2)
V_state = np.load(path_3)
plt.figure(figsize=(15, 10), dpi=100)
plt.title("Search Result of Force", fontsize=20)
plt.plot(V_force[:100])
plt.xlabel("Steps", fontsize=20)
plt.ylabel("F(N)", fontsize=20)
plt.legend(labels=['Fx', 'Fy', 'Fz', 'Mx', 'My', 'Mz'], loc='best', fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.figure(figsize=(15, 10), dpi=100)
plt.title("Search Result of State", fontsize=20)
plt.plot(V_state[:100] - [539.88427, -38.68679, 190.03184, 179.88444, 1.30539, 0.21414])
plt.xlabel("Steps", fontsize=20)
plt.ylabel("Coordinate", fontsize=20)
plt.legend(labels=['x', 'y', 'z', 'rx', 'ry', 'rz'], loc='best', fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.show()
def plot_reward(reward_path):
reward = np.load(reward_path)
print(reward[0])
plt.figure(figsize=(15, 15), dpi=100)
plt.title('Episode Reward')
plt.plot(np.arange(len(reward) - 1), np.array(reward[1:]))
plt.ylabel('Episode Reward')
plt.xlabel('Episodes')
plt.show()
def plot_raw_data(path_1):
data = np.load(path_1)
force_m = np.zeros((len(data), 12))
plt.figure(figsize=(20, 20), dpi=100)
plt.tight_layout(pad=3, w_pad=0.5, h_pad=1.0)
plt.subplots_adjust(left=0.065, bottom=0.1, right=0.995, top=0.9, wspace=0.2, hspace=0.2)
plt.title("True Data")
for j in range(len(data)):
force_m[j] = data[j, 0]
k = -1
for i in range(len(data)):
if data[i, 1] == 0:
print("===========================================")
line = force_m[k+1:i+1]
print(line)
k = i
for j in range(6):
plt.subplot(2, 3, j + 1)
plt.plot(line[:, j])
# plt.plot(line[:, 0])
if j == 1:
plt.ylabel(YLABEL[j], fontsize=17.5)
plt.xlabel('steps', fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
else:
plt.ylabel(YLABEL[j], fontsize=20)
plt.xlabel('steps', fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
i += 1
def plot_continuous_data(path):
raw_data = np.load(path)
plt.figure(figsize=(20, 15))
plt.title('Episode Reward')
plt.tight_layout(pad=3, w_pad=0.5, h_pad=1.0)
plt.subplots_adjust(left=0.1, bottom=0.15, right=0.98, top=0.9, wspace=0.23, hspace=0.22)
# plt.subplots_adjust(left=0.065, bottom=0.1, right=0.995, top=0.9, wspace=0.2, hspace=0.2)
data = np.zeros((len(raw_data), 12))
for j in range(len(raw_data)):
data[j] = raw_data[j, 0]
for j in range(6):
plt.subplot(2, 3, j + 1)
plt.plot(data[:, j]*scale[j], linewidth=2.5)
# plt.ylabel(YLABEL[j], fontsize=18)
if j>2:
plt.xlabel('steps', fontsize=30)
plt.title(YLABEL[j], fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
# plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.2)
plt.savefig('raw_data.pdf')
plt.show()
def compute_true_return(path):
raw_data = np.load(path)
# print(raw_data)
clock = 0
G = 0.
past_gammas = []
past_cumulants = []
all_G = []
for i in range(len(raw_data)):
observation, action, done, action_probability = raw_data[i]
if done == False:
gamma = 0.99
else:
gamma = 0.
past_gammas.append(gamma)
past_cumulants.append(1)
if done == False:
clock += 1
G = 0
all_G.append(cp.deepcopy(G))
else:
print('clock', clock)
for j in reversed(range(0, clock + 1)):
G *= past_gammas[j]
G += past_cumulants[j]
all_G.append(cp.deepcopy(G))
clock = 0
past_cumulants = []
past_gammas = []
print(len(raw_data))
plt.figure(figsize=(20, 15))
plt.plot(all_G[300:400])
plt.show()
return all_G
# Plot the true prediction and true value
def plot_different_gamma_data(path):
f = open(path, 'rb')
titles = ['$\gamma = 0.4$', '$\gamma = 0.8$', '$\gamma = 0.96$', '$\gamma =1.0$']
# true_data = compute_true_return('prediction_result_different_gamma.npy')
# f = open('../data/learning_result_policy', 'rb')
# plot_value_functions = ['Move down Fy', 'Move down Fx', 'Move down Fz', 'Move down Mx', 'Move down My', 'Move down Mz']
plot_value_functions = ['Move down step', 'Move down step 2', 'Move down step 3', 'Move down step 4']
# plot_value_functions = ['Move down Fx', 'Move down Fx 1', 'Move down Fx 2', 'Move down Fx 3']
raw_data = pickle.load(f)
plt.figure(figsize=(20, 15))
plt.tight_layout(pad=3, w_pad=1., h_pad=0.5)
plt.subplots_adjust(left=0.1, bottom=0.15, right=0.98, top=0.9, wspace=0.23, hspace=0.23)
# legend = sorted([key for key in plot_value_functions.keys()])
# print(legend)
# print(value_functions.keys())
for j, key in enumerate(plot_value_functions):
plt.subplot(2, 2, j + 1)
# print(list(raw_data[('GTD(1)', 'Hindsight Error')][key]))
# plt.plot(np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[:], linewidth=2.5)
# plt.plot(true_data[300:])
plt.plot(np.array(raw_data[('GTD(0)', 'UDE')][key])[600:], linewidth=2.75)
# print('true value', np.array(raw_data[('GTD(0)', 'UDE')][key])[300:400])
# plt.plot(np.array(raw_data[('GTD(0)', 'TD Error')][key])[600:], linewidth=2.5)
# print('old prediction', np.array(raw_data[('GTD(0)', 'TD Error')][key])[300:400])
plt.plot(np.array(raw_data[('GTD(0)', 'Prediction')][key])[600:], linewidth=2.75)
# plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[300:] - np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[300:], linewidth=2.5)
# plt.legend('True value', 'Prediction value')
plt.title(titles[j], fontsize=30)
if j > 1:
plt.xlabel('steps', fontsize=30)
plt.ylabel('Number of steps', fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
# plt.savefig('different_gamma.pdf')
plt.show()
# Plot the true prediction and true value
def chinese_plot_different_gamma_data(path):
f = open(path, 'rb')
titles = ['$\gamma = 0.4$', '$\gamma = 0.8$', '$\gamma = 0.96$', '$\gamma =1.0$']
# true_data = compute_true_return('prediction_result_different_gamma.npy')
# f = open('../data/learning_result_policy', 'rb')
# plot_value_functions = ['Move down Fy', 'Move down Fx', 'Move down Fz', 'Move down Mx', 'Move down My', 'Move down Mz']
plot_value_functions = ['Move down step', 'Move down step 2', 'Move down step 3', 'Move down step 4']
# plot_value_functions = ['Move down Fx', 'Move down Fx 1', 'Move down Fx 2', 'Move down Fx 3']
raw_data = pickle.load(f)
plt.figure(figsize=(20, 12), dpi=1000)
plt.tight_layout(pad=3, w_pad=1., h_pad=0.5)
plt.subplots_adjust(left=0.08, bottom=0.12, right=0.98, top=0.95, wspace=0.23, hspace=0.33)
# legend = sorted([key for key in plot_value_functions.keys()])
# print(legend)
# print(value_functions.keys())
for j, key in enumerate(plot_value_functions):
plt.subplot(2, 2, j + 1)
# print(list(raw_data[('GTD(1)', 'Hindsight Error')][key]))
# plt.plot(np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[:], linewidth=2.5)
# plt.plot(true_data[300:])
plt.plot(np.array(raw_data[('GTD(0)', 'UDE')][key])[600:], linewidth=2.75)
# print('true value', np.array(raw_data[('GTD(0)', 'UDE')][key])[300:400])
# plt.plot(np.array(raw_data[('GTD(0)', 'TD Error')][key])[600:], linewidth=2.5)
# print('old prediction', np.array(raw_data[('GTD(0)', 'TD Error')][key])[300:400])
plt.plot(np.array(raw_data[('GTD(0)', 'Prediction')][key])[600:], linewidth=2.75)
# plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[300:] - np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[300:], linewidth=2.5)
# plt.legend('True value', 'Prediction value')
plt.title(titles[j], fontsize=36)
if j > 1:
plt.xlabel('搜索步数', fontsize=36)
plt.ylabel('预测周期', fontsize=36)
plt.xticks([0, 50, 100, 150, 200], fontsize=36)
plt.yticks(fontsize=36)
plt.savefig('./figure/pdf/chinese_different_gamma.pdf')
# plt.show()
def chinese_plot_compare_raw_data(path1, path2):
raw_data = np.load(path1)
raw_data_1 = np.load(path2)
plt.figure(figsize=(20, 12), dpi=1000)
plt.title('Episode Reward')
plt.tight_layout(pad=3, w_pad=0.5, h_pad=1.0)
plt.subplots_adjust(left=0.08, bottom=0.08, right=0.98, top=0.95, wspace=0.33, hspace=0.15)
data = np.zeros((len(raw_data), 12))
for j in range(len(raw_data)):
data[j] = raw_data[j, 0]
data_1 = np.zeros((len(raw_data_1), 12))
for j in range(len(raw_data_1)):
data_1[j] = raw_data_1[j, 0]
for j in range(6):
plt.subplot(2, 3, j + 1)
plt.plot(data[:100, j], linewidth=2.5, color='r', linestyle='--')
plt.plot(data_1[:100, j], linewidth=2.5, color='b')
# plt.ylabel(YLABEL[j], fontsize=18)
if j>2:
plt.xlabel('搜索步数', fontsize=38)
plt.title(YLABEL[j], fontsize=38)
plt.xticks(fontsize=38)
plt.yticks(fontsize=38)
# plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.2)
plt.savefig('./figure/pdf/chinese_raw_data.pdf')
# plt.show()
# Plot the true prediction and true value
def chinese_plot_different_policy_data(path, name):
f = open(path, 'rb')
# true_data = compute_true_return('prediction_result_different_gamma.npy')
# f = open('../data/learning_result_policy', 'rb')
plot_value_functions = ['Move down Fx', 'Move down Fy', 'Move down Fz', 'Move down Mx', 'Move down My', 'Move down Mz']
# plot_value_functions = ['Move down step', 'Move down step 2', 'Move down step 3', 'Move down step 4']
# plot_value_functions = ['Move down Fx', 'Move down Fx 1', 'Move down Fx 2', 'Move down Fx 3']
raw_data = pickle.load(f)
plt.figure(figsize=(20, 12), dpi=1000)
plt.title('Episode Reward')
plt.tight_layout(pad=3, w_pad=0.5, h_pad=1.0)
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.98, top=0.95, wspace=0.33, hspace=0.25)
# plt.subplots_adjust(left=0.1, bottom=0.12, right=0.98, top=0.94, wspace=0.23, hspace=0.33)
# legend = sorted([key for key in plot_value_functions.keys()])
# print(legend)
# print(value_functions.keys())
for j, key in enumerate(plot_value_functions):
plt.subplot(2, 3, j + 1)
# print(list(raw_data[('GTD(1)', 'Hindsight Error')][key]))
# plt.plot(np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[400:]*scale[j], linewidth=2.5)
# plt.plot(true_data[300:])
plt.plot(np.array(raw_data[('GTD(1)', 'UDE')][key])[1000:]*scale[j], linewidth=2.5)
# print('true value', np.array(raw_data[('GTD(0)', 'UDE')][key])[300:400])
# plt.plot(np.array(raw_data[('GTD(0)', 'TD Error')][key])[600:], linewidth=2.5, color='r')
# print('old prediction', np.array(raw_data[('GTD(0)', 'TD Error')][key])[300:400])
plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[1000:]*scale[j], linewidth=2.5)
# plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[300:] - np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[300:], linewidth=2.5)
# plt.legend('True value', 'Prediction value')
plt.title(YLABEL[j], fontsize=38)
if j > 2:
plt.xlabel('搜索步数', fontsize=38)
plt.xticks([0, 50, 100, 150, 200], fontsize=38)
plt.yticks(fontsize=38)
plt.savefig('./figure/pdf/chinese_' + name +'.pdf')
# plt.show()
# Plot the true prediction and true value
def plot_different_policy_data(path):
f = open(path, 'rb')
# true_data = compute_true_return('prediction_result_different_gamma.npy')
# f = open('../data/learning_result_policy', 'rb')
plot_value_functions = ['Move down Fx', 'Move down Fy', 'Move down Fz', 'Move down Mx', 'Move down My', 'Move down Mz']
# plot_value_functions = ['Move down step', 'Move down step 2', 'Move down step 3', 'Move down step 4']
# plot_value_functions = ['Move down Fx', 'Move down Fx 1', 'Move down Fx 2', 'Move down Fx 3']
raw_data = pickle.load(f)
plt.figure(figsize=(20, 12), dpi=1000)
plt.title('Episode Reward')
plt.tight_layout(pad=3, w_pad=1.0, h_pad=1.0)
plt.subplots_adjust(left=0.1, bottom=0.15, right=0.98, top=0.9, wspace=0.23, hspace=0.23)
# legend = sorted([key for key in plot_value_functions.keys()])
# print(legend)
# print(value_functions.keys())
for j, key in enumerate(plot_value_functions):
plt.subplot(2, 3, j + 1)
# print(list(raw_data[('GTD(1)', 'Hindsight Error')][key]))
# plt.plot(np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[400:]*scale[j], linewidth=2.5)
# plt.plot(true_data[300:])
plt.plot(np.array(raw_data[('GTD(1)', 'UDE')][key])[1000:]*scale[j], linewidth=2.5)
# print('true value', np.array(raw_data[('GTD(0)', 'UDE')][key])[300:400])
# plt.plot(np.array(raw_data[('GTD(0)', 'TD Error')][key])[600:], linewidth=2.5, color='r')
# print('old prediction', np.array(raw_data[('GTD(0)', 'TD Error')][key])[300:400])
plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[1000:]*scale[j], linewidth=2.5)
# plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[300:] - np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[300:], linewidth=2.5)
# plt.legend('True value', 'Prediction value')
plt.title(YLABEL[j], fontsize=30)
if j > 2:
plt.xlabel('steps', fontsize=30)
plt.xticks([0, 50, 100, 150, 200], fontsize=25)
plt.yticks(fontsize=25)
plt.savefig('./figure/pdf/chinese_different_policies_b.pdf')
# plt.show()
if __name__ == "__main__":
# force = np.load('./search_force.npy')
# state = np.load('./search_state.npy')
# print(np.max(force, axis=0))
# print(np.min(force, axis=0))
# print(np.max(state, axis=0))
# print(np.min(state, axis=0))
# plot('./search_state.npy')
# plot('./search_force.npy')
# plot_reward('./episode_rewards.npy')
# data = np.load('prediction_result.npy')
# print(data[:, 2])
# plot_continuous_data('prediction_result_different_gamma_six_force.npy')
# f = open('../data/learning_result', 'rb')
# y = pickle.load(f)
# data = y[('GTD(1)', 'Hindsight Error')]['Move down Fz']
# print(data)
# plt.figure(figsize=(15, 15), dpi=100)
# plt.title('Search Result')
#
# plt.plot(data)
# plt.ylabel(YLABEL[0])
# plt.xlabel('steps')
# plt.legend(YLABEL)
# plt.show()
# compute_true_return('prediction_result_different_gamma.npy')
# plot_true_data('learning_result_six_force_gamma_0.9')
# plot_true_data('learning_result_different_gamma')
# plot_different_gamma_data('learning_result_different_policy')
"""=============================== plot different policy ===================================== """
# plot_different_policy_data('learning_result_six_force_gamma_0.9')
# chinese_plot_different_policy_data('learning_result_six_force_gamma_0.9')
# plot_different_policy_data('learning_result_different_policy_new_3')
chinese_plot_different_policy_data('learning_result_different_policy_new_3', 'off_policy_3')
# chinese_plot_different_policy_data('learning_result_different_policy')
# chinese_plot_different_policy_data('learning_result_different_policy')
"""=============================== plot different gamma ======================================== """
# plot_different_gamma_data('learning_result_different_gamma_new')
# chinese_plot_different_gamma_data('learning_result_different_gamma_new') | 42.785542 | 153 | 0.592926 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,474 | 0.476335 |
1dcf47342d8ac127388b110b95bb6666da395ea7 | 895 | py | Python | repacolors/palette/__init__.py | dyuri/repacolors | 4556efeb262529dde4586dad78ac7ff64d4dedf5 | [
"MIT"
] | 1 | 2020-02-29T17:05:06.000Z | 2020-02-29T17:05:06.000Z | repacolors/palette/__init__.py | dyuri/repacolors | 4556efeb262529dde4586dad78ac7ff64d4dedf5 | [
"MIT"
] | null | null | null | repacolors/palette/__init__.py | dyuri/repacolors | 4556efeb262529dde4586dad78ac7ff64d4dedf5 | [
"MIT"
] | null | null | null | from repacolors import ColorScale
from .colorbrewer import PALETTES as CBPALETTES
PALETTES = {
"ryb": ["#fe2713", "#fd5307", "#fb9900", "#fabc00", "#fefe34", "#d1e92c", "#66b032", "#0492ce", "#0347fe", "#3e01a4", "#8600af", "#a7194b"],
"rybw3": ["#FE2712", "#FC600A", "#FB9902", "#FCCC1A", "#FEFE33", "#B2D732", "#66B032", "#347C98", "#0247FE", "#4424D6", "#8601AF", "#C21460"],
**CBPALETTES
}
def get_palette(name: str):
if name.lower() not in PALETTES:
raise KeyError(f"'{name}' palette not found")
return PALETTES[name.lower()]
def get_scale(name: str, *args, **kwargs) -> ColorScale:
kwargs["name"] = name
return ColorScale(get_palette(name), *args, **kwargs)
def demo(width: int = 80):
for name, colors in PALETTES.items():
s = ColorScale(colors)
print(f"{name:12s}", end="")
s.print(width=width, height=2, border=0)
| 30.862069 | 146 | 0.607821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 278 | 0.310615 |
1dcff6dd57331f977b210a82d3eb44c337f97160 | 765 | py | Python | src/util.py | sgtpepperpt/cowrie-qemu | bf03e1a87af24c53290fd626efd7a5865ae0aa2f | [
"BSD-3-Clause"
] | 1 | 2019-06-05T08:33:52.000Z | 2019-06-05T08:33:52.000Z | src/util.py | sgtpepperpt/cowrie-qemu | bf03e1a87af24c53290fd626efd7a5865ae0aa2f | [
"BSD-3-Clause"
] | null | null | null | src/util.py | sgtpepperpt/cowrie-qemu | bf03e1a87af24c53290fd626efd7a5865ae0aa2f | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2019 Guilherme Borges <guilhermerosasborges@gmail.com>
# See the COPYRIGHT file for more information
import subprocess
import time
def ping(guest_ip):
out = subprocess.run(['ping', '-c 1', guest_ip], capture_output=True)
return out.returncode == 0
def nmap_ssh(guest_ip):
out = subprocess.run(['nmap', guest_ip, '-PN', '-p ssh'], capture_output=True)
return out.returncode == 0 and b'open' in out.stdout
def read_file(file_name):
with open(file_name, 'r') as file:
return file.read()
def generate_mac_ip(guest_id):
# TODO support more
hex_id = hex(guest_id)[2:]
mac = 'aa:bb:cc:dd:ee:' + hex_id.zfill(2)
ip = '192.168.150.' + str(guest_id)
return mac, ip
def now():
return time.time()
| 23.181818 | 83 | 0.665359 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 206 | 0.269281 |
1dd03130b6b9f8c9a6d3932e46dd12cf5f030098 | 65,125 | py | Python | library/load_partial.py | binaryburn/pan-fca | 77f8b47d21a7e5a11b6de0bc7b051837c02b770f | [
"Apache-2.0"
] | 26 | 2019-02-08T16:16:01.000Z | 2021-05-03T22:42:02.000Z | library/load_partial.py | binaryburn/pan-fca | 77f8b47d21a7e5a11b6de0bc7b051837c02b770f | [
"Apache-2.0"
] | 10 | 2019-02-26T14:22:23.000Z | 2021-09-23T23:23:16.000Z | library/load_partial.py | binaryburn/pan-fca | 77f8b47d21a7e5a11b6de0bc7b051837c02b770f | [
"Apache-2.0"
] | 43 | 2019-02-09T13:46:45.000Z | 2020-10-06T08:38:58.000Z | # -*- coding: utf-8 -*-
## Developed by Uriah Bojorquez
from PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog, QMessageBox, QSizePolicy, QDialog
from PyQt5.QtGui import QColor, QPalette
from PyQt5 import QtCore
from qt_iron_skillet_ui import Ui_Dialog as IRONSKILLET
from qt_config_ui_panorama import Ui_Dialog as PANORAMA
from qt_config_ui import Ui_Dialog as PANOS
from qt_lcp_ui import Ui_MainWindow
from lxml import etree as lxml
from functools import partial
import collections
import webbrowser
import requests
import paramiko
import tempfile
import socket
import sys
import re
import os
# suppress warnings from requests library
requests.packages.urllib3.disable_warnings()
##############################################
# IRON SKILLET
##############################################
class IronSkillet(QtCore.QThread):
values_iron_skillet = QtCore.pyqtSignal(dict)
done = QtCore.pyqtSignal(bool)
def __init__(self, elements, os, api, url, file, from_vsys, to_vsys, parent=None):
super(IronSkillet, self).__init__(parent)
self.isRunning = True
self.elements = elements
self.os = os
self.api = api
self.url = url
self.file = file
self.from_vsys = from_vsys
self.to_vsys = to_vsys
def run(self):
elements = {}
done = False
values_iron_skillet = {
'element': None,
'result': None,
'response': None,
'error': None,
'pb_value': 0,
'done': False
}
# PANORAMA
if self.os == 'Panorama':
# init dict
elements['system'] = "<load><config><partial><from>{file}</from><from-xpath>/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system</from-xpath><to-xpath>/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file),
elements['settings'] = "<load><config><partial><from>{file}</from><from-xpath>/config/devices/entry[@name='localhost.localdomain']/deviceconfig/setting</from-xpath><to-xpath>/config/devices/entry[@name='localhost.localdomain']/deviceconfig/setting</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file),
elements['log settings'] = "<load><config><partial><from>{file}</from><from-xpath>/config/panorama/log-settings</from-xpath><to-xpath>/config/panorama/log-settings</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file),
elements['template'] = "<load><config><partial><from>{file}</from><from-xpath>/config/devices/entry[@name='localhost.localdomain']/template</from-xpath><to-xpath>/config/devices/entry[@name='localhost.localdomain']/template</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file),
elements['device group'] = "<load><config><partial><from>{file}</from><from-xpath>/config/devices/entry[@name='localhost.localdomain']/device-group</from-xpath><to-xpath>/config/devices/entry[@name='localhost.localdomain']/device-group</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file),
elements['shared'] = "<load><config><partial><from>{file}</from><from-xpath>/config/shared</from-xpath><to-xpath>/config/shared</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file),
elements['log collector'] = "<load><config><partial><from>{file}</from><from-xpath>/config/devices/entry[@name='localhost.localdomain']/log-collector-group</from-xpath><to-xpath>/config/devices/entry[@name='localhost.localdomain']/log-collector-group</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file),
for key in self.elements.keys():
if self.elements[key]:
values = {
'type': 'op',
'key': self.api,
'cmd': elements[key]
}
values_iron_skillet['result'], values_iron_skillet['response'], values_iron_skillet['error'] = self._api_request(values)
values_iron_skillet['element'] = key
values_iron_skillet['pb_value'] += 14.29
values_iron_skillet['done'] = False
self.values_iron_skillet.emit(values_iron_skillet)
done = True
# PAN-OS
else:
# init dict
elements['log settings'] = "<load><config><partial><from>{file}</from><from-xpath>/config/shared/log-settings</from-xpath><to-xpath>/config/shared/log-settings</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file),
elements['tag'] = "<load><config><partial><from>{file}</from><from-xpath>/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='{from_vsys}']/tag</from-xpath><to-xpath>/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='{to_vsys}']/tag</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file, from_vsys=self.from_vsys, to_vsys=self.to_vsys),
elements['system'] = "<load><config><partial><from>{file}</from><from-xpath>/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system</from-xpath><to-xpath>/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file),
elements['settings'] = "<load><config><partial><from>{file}</from><from-xpath>/config/devices/entry[@name='localhost.localdomain']/deviceconfig/setting</from-xpath><to-xpath>/config/devices/entry[@name='localhost.localdomain']/deviceconfig/setting</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file),
elements['address'] = "<load><config><partial><from>{file}</from><from-xpath>/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='{from_vsys}']/address</from-xpath><to-xpath>/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='{to_vsys}']/address</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file, from_vsys=self.from_vsys, to_vsys=self.to_vsys),
elements['external list'] = "<load><config><partial><from>{file}</from><from-xpath>/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/external-list</from-xpath><to-xpath>/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/external-list</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file, from_vsys=self.from_vsys, to_vsys=self.to_vsys),
elements['profiles'] = "<load><config><partial><from>{file}</from><from-xpath>/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/profiles</from-xpath><to-xpath>/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/profiles</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file, from_vsys=self.from_vsys, to_vsys=self.to_vsys),
elements['profile group'] = "<load><config><partial><from>{file}</from><from-xpath>/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/profile-group</from-xpath><to-xpath>/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/profile-group</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file, from_vsys=self.from_vsys, to_vsys=self.to_vsys),
elements['rulebase'] = "<load><config><partial><from>{file}</from><from-xpath>/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/rulebase</from-xpath><to-xpath>/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/rulebase</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file, from_vsys=self.from_vsys, to_vsys=self.to_vsys),
elements['zone protection'] = "<load><config><partial><from>{file}</from><from-xpath>/config/devices/entry[@name='localhost.localdomain']/network/profiles/zone-protection-profile</from-xpath><to-xpath>/config/devices/entry[@name='localhost.localdomain']/network/profiles/zone-protection-profile</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file)
elements['reports'] = "<load><config><partial><from>{file}</from><from-xpath>/config/shared/reports</from-xpath><to-xpath>/config/shared/reports</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file),
elements['report group'] = "<load><config><partial><from>{file}</from><from-xpath>/config/shared/report-group</from-xpath><to-xpath>/config/shared/report-group</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file),
elements['email schedule'] = "<load><config><partial><from>{file}</from><from-xpath>/config/shared/email-scheduler</from-xpath><to-xpath>/config/shared/email-scheduler</to-xpath><mode>merge</mode></partial></config></load>".format(file=self.file),
for key in self.elements.keys():
if self.elements[key]:
values = {
'type': 'op',
'key': self.api,
'cmd': elements[key]
}
values_iron_skillet['result'], values_iron_skillet['response'], values_iron_skillet['error'] = self._api_request(values)
values_iron_skillet['element'] = key
values_iron_skillet['pb_value'] += 7.7
values_iron_skillet['done'] = False
self.values_iron_skillet.emit(values_iron_skillet)
done = True
self.done.emit(done)
##############################################
# API REQUEST
##############################################
def _api_request(self, values):
"""
API request driver
"""
try:
return True, requests.post(self.url, values, verify=False, timeout=10).text, None
except requests.exceptions.ConnectionError as error_api:
return False, 'Error connecting to {ip} - Check IP Address'.format(ip=self.url), error_api
except requests.exceptions.Timeout as error_timeout:
return None, 'Connection to {ip} timed out, please try again'.format(ip=self.url), error_timeout
##############################################
# API REQUEST
##############################################
class APIRequest(QtCore.QThread):
api_values = QtCore.pyqtSignal(dict)
def __init__(self, api, url, cmd, parent=None):
super(APIRequest, self).__init__(parent)
self.isRunning = True
self.api = api
self.url = url
self.cmd = cmd
def run(self):
api_values = {
'result': None,
'response': None,
'error': None
}
# build API rquest
values = {
'type': 'op',
'key': self.api,
'cmd': self.cmd
}
# make API call
api_values['result'], api_values['response'], api_values['error'] = self._api_request(values)
self.api_values.emit(api_values)
##############################################
# API REQUEST
##############################################
def _api_request(self, values):
"""
API request driver
"""
try:
return True, requests.post(self.url, values, verify=False, timeout=10).text, None
except requests.exceptions.ConnectionError as error_api:
return False, 'Error connecting to {ip} - Check IP Address'.format(ip=self.url), error_api
except requests.exceptions.Timeout as error_timeout:
return None, 'Connection to {ip} timed out, please try again'.format(ip=self.url), error_timeout
##############################################
# SETUP SSH
##############################################
class SetupSSH(QtCore.QThread):
output = QtCore.pyqtSignal(str)
def __init__(self, ip, user, password, parent=None):
super(SetupSSH, self).__init__(parent)
self.is_running = True
self.ip = ip
self.user = user
self.password = password
def run(self):
self.command = 'show config saved \t'
output = self.connect_driver()
self.output.emit(output)
##############################################################################
# SETUP SSH
##############################################################################
def setup_ssh(self):
conn = paramiko.SSHClient()
conn.load_system_host_keys()
conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
conn.connect(self.ip, username=self.user, password=self.password)
except paramiko.SSHException as e:
self.show_critical_error(['Error connecting to {ip}'.format(ip=self.ip), e])
return False
return conn
##############################################################################
# EXECUTE SSH COMMAND
##############################################################################
def execute_shh(self, ssh_connection):
shell = ssh_connection.invoke_shell()
while not shell.recv_ready():
pass
shell.send('set cli pager off\n')
while not shell.recv_ready():
pass
shell.send(self.command)
prompt = ''
results = ''
while '<value>' not in prompt:
results += shell.recv(4096).decode('utf-8')
prompt = results.strip()
return results
##############################################################################
# DRIVER FUNCTION
##############################################################################
def connect_driver(self):
ssh_obj = self.setup_ssh()
ssh_data = self.execute_shh(ssh_obj)
ssh_obj.close()
return ssh_data
##############################################
# SHOW CRITICAL ERROR
##############################################
def show_critical_error(self, message_list):
message = '''
<p>
{message}
<br>
Error: {error}
</p>
'''.format(message=message_list[0], error=message_list[1])
result = QMessageBox.critical(self, 'ERROR', message, QMessageBox.Abort, QMessageBox.Retry)
# Abort
if result == QMessageBox.Abort:
self.close()
# Retry
else:
# set error flag to True -- implies error
self._flag_error = True
return
##############################################
# FILL COMBO BOXES
##############################################
class ToComboBoxes(QtCore.QThread):
combo_box_values = QtCore.pyqtSignal(dict)
def __init__(self, api, url, parent=None):
super(ToComboBoxes, self).__init__(parent)
self.is_running = True
self.api = api
self.url = url
def run(self):
combo_box_values = {
'result': None,
'response': None,
'error': None
}
values = {
'type': 'op',
'key': self.api,
'cmd': '<show><config><saved>running-config.xml</saved></config></show>'
}
combo_box_values['result'], combo_box_values['response'], combo_box_values['error'] = self.api_request(values)
self.combo_box_values.emit(combo_box_values)
##############################################
# API REQUEST
##############################################
def api_request(self, values):
"""
API request driver
"""
try:
return True, requests.post(self.url, values, verify=False, timeout=10).text, None
except requests.exceptions.ConnectionError as error_api:
return False, 'Error connecting to {ip} - Check IP Address'.format(ip=self.url), error_api
except requests.exceptions.Timeout as error_timeout:
return None, 'Connection to {ip} timed out, please try again'.format(ip=self.url), error_timeout
##############################################
# CONNECT THREAD
##############################################
class ConnectThread(QtCore.QThread):
connect_values = QtCore.pyqtSignal(dict) # define new Signal
def __init__(self, ip, user, password, parent=None,):
super(ConnectThread, self).__init__(parent)
self.is_running = True
self.ip = ip
self.user = user
self.password = password
def run(self):
self.valid = False
connect_values = {
'api': None,
'ip': None,
'user': None,
'password': None,
'url': None,
'result': None,
'response': None,
'error': None
}
# IP
try:
socket.gethostbyname(self.ip)
except socket.gaierror as os_error_ip:
print(os_error_ip)
connect_values['response'] = "Unable to Connect or Invalid IP address given"
connect_values['error'] = os_error_ip
else:
self.url = 'https://{ip}/api'.format(ip=self.ip)
connect_values['ip'] = self.ip
connect_values['url'] = self.url
# Username
try:
if self.user.isspace() or len(self.user) is 0:
raise AttributeError('invalid username')
except AttributeError as error_user:
connect_values['response'] = 'The Username field is blank'
connect_values['error'] = error_user
else:
connect_values['user'] = self.user
# Password
try:
if self.password.isspace() or len(self.password) is 0:
raise AttributeError('invalid password')
except AttributeError as error_password:
connect_values['response'] = 'The Password field is blank'
connect_values['error'] = error_password
else:
connect_values['password'] = self.password
# all fields valid
self.valid = True
if self.valid:
try:
# get API key
connect_values['result'], connect_values['response'], connect_values['error'] = self.keygen()
if connect_values['result']:
connect_values['api'] = connect_values['response']
except lxml.XMLSyntaxError as error_xml:
connect_values['response'] = "Is this a FW/Panorama IP/Hostname?"
connect_values['error'] = error_xml
# emit signal values
self.connect_values.emit(connect_values)
##############################################
# API DRIVER
##############################################
def keygen(self):
"""
Get API key
"""
values = {'type': 'keygen', 'user': self.user, 'password': self.password}
result, response, error = self.api_request(values)
try:
# if API call was successful
if result is True:
return True, lxml.fromstring(response).find('.//key').text, None
# if timeout
elif result is None:
raise requests.Timeout
# if connection error
else:
raise ValueError('check IP address')
# connectino error
except (IndexError, ValueError) as error_keygen:
return False, 'Error obtaining API key from {ip}'.format(ip=self.ip), error_keygen
# if error raised finding <key> in response
except AttributeError:
return False, 'Error obtaining API key from {ip}'.format(ip=self.ip), 'check credentials'
# if timeout
except requests.Timeout as error_timeout:
return False, 'Connection to {ip} timed out, please try again'.format(ip=self.ip), error_timeout
##############################################
# API REQUEST
##############################################
def api_request(self, values):
"""
API request driver
"""
try:
return True, requests.post(self.url, values, verify=False, timeout=10).text, None
except requests.exceptions.ConnectionError as error_api:
return False, 'Error connecting to {ip} - Check IP Address'.format(ip=self.ip), error_api
except requests.exceptions.Timeout as error_timeout:
return None, 'Connection to {ip} timed out, please try again'.format(ip=self.ip), error_timeout
##############################################
# SHOW CRITICAL ERROR
##############################################
def show_critical_error(self, message_list):
message = '''
<p>
{message}
<br>
Error: {error}
</p>
'''.format(message=message_list[0], error=message_list[1])
result = QMessageBox.critical(self, 'ERROR', message, QMessageBox.Abort, QMessageBox.Retry)
# Abort
if result == QMessageBox.Abort:
self.close()
# Retry
else:
# set error flag to True -- implies error
self._flag_error = True
return
##############################################
# MAIN WINDOW
##############################################
class LoadPartialMainWindow(QMainWindow):
def __init__(self):
# setup main window
QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# variables
self._is_output = '<html>'
self._model = None
self._api = None
self._from = None
self._to = None
self._from_file = None
self._load_config_partial = '<load><config><partial><from>{file}</from><from-xpath>{xpath_from}{obj_from}</from-xpath><to-xpath>{xpath_to}{obj_to}</to-xpath><mode>merge</mode></partial></config></load>'
# iron skillet variables
self._iron_skillet_panos = collections.OrderedDict()
self._iron_skillet_panos['log settings'] = True
self._iron_skillet_panos['tag'] = True
self._iron_skillet_panos['system'] = True
self._iron_skillet_panos['settings'] = True
self._iron_skillet_panos['address'] = True
self._iron_skillet_panos['external list'] = True
self._iron_skillet_panos['profiles'] = True
self._iron_skillet_panos['profile group'] = True
self._iron_skillet_panos['rulebase'] = True
self._iron_skillet_panos['zone protection'] = True
self._iron_skillet_panos['reports'] = True
self._iron_skillet_panos['report group'] = True
self._iron_skillet_panos['email schedule'] = True
self._iron_skillet_panorama = collections.OrderedDict()
self._iron_skillet_panorama['system'] = True,
self._iron_skillet_panorama['settings'] = True,
self._iron_skillet_panorama['log settings'] = True,
self._iron_skillet_panorama['template'] = True,
self._iron_skillet_panorama['device group'] = True,
self._iron_skillet_panorama['shared'] = True,
self._iron_skillet_panorama['log collector'] = True
##############################################
# BUTTON EVENTS/TRIGGERS
##############################################
self.ui.button_iron_skillet.clicked.connect(self._iron_skillet)
self.ui.button_quit.clicked.connect(self.close)
self.ui.button_connect.clicked.connect(self._connect)
self.ui.button_import.clicked.connect(self._import)
self.ui.button_ao.clicked.connect(partial(self._load_objects, 'address'))
self.ui.button_ag.clicked.connect(partial(self._load_objects, 'address-group'))
self.ui.button_so.clicked.connect(partial(self._load_objects, 'service'))
self.ui.button_sg.clicked.connect(partial(self._load_objects, 'service-group'))
self.ui.button_tags.clicked.connect(partial(self._load_objects, 'tag'))
self.ui.button_security.clicked.connect(partial(self._load_rulebase, 'security'))
self.ui.button_nat.clicked.connect(partial(self._load_rulebase, 'nat'))
self.ui.button_reports.clicked.connect(partial(self._load_reports, 'reports'))
self.ui.button_report_groups.clicked.connect(partial(self._load_reports, 'report-group'))
##############################################
# COMBO BOX EVENTS
##############################################
self.ui.combo_file.currentIndexChanged.connect(self._update_file_selected)
self.ui.combo_from_dg.currentIndexChanged.connect(self._reset_flags_buttons)
self.ui.combo_from_rulebase.currentIndexChanged.connect(self._reset_flags_buttons)
self.ui.combo_to_dg.currentIndexChanged.connect(self._reset_flags_buttons)
self.ui.combo_to_rulebase.currentIndexChanged.connect(self._reset_flags_buttons)
self.ui.combo_from_vsys.currentIndexChanged.connect(self._reset_flags_buttons)
self.ui.combo_to_vsys.currentIndexChanged.connect(self._reset_flags_buttons)
############################################################################
# LOAD REPORTS
############################################################################
def _load_reports(self, obj):
self.obj = obj
self.ui.progress_bar.setValue(0)
self.ui.label_status.setText('Merging {obj}...'.format(obj=obj))
# validate user input
if self._validate_user_input() is not True:
return
self._xpath_from = None
self._xpath_to = None
#########################################
# FROM
#########################################
# build xpaths - if Panoramma
if len(self.ui.combo_from_dg.currentText()) > 1:
# shared
if self.ui.combo_from_dg.currentText() == 'Shared':
self._xpath_from = '/config/shared/'
# DG
else:
self._xpath_from = "/config/devices/entry[@name='localhost.localdomain']/device-group/entry[@name='{dg}']/".format(dg=self.ui.combo_from_dg.currentText())
# PAN-OS
else:
self._xpath_from = '/config/shared/'
#########################################
# TO
#########################################
# Panorama
if len(self.ui.combo_to_dg.currentText()) > 1:
# Shared
if self.ui.combo_to_dg.currentText() == 'Shared':
self._xpath_to = '/config/shared/'
# DG
else:
self._xpath_to = "/config/devices/entry[@name='localhost.localdomain']/device-group/entry[@name='{dg}']/".format(dg=self.ui.combo_to_dg.currentText())
# PAN-OS
else:
self._xpath_to = '/config/shared/'
# build out load config partial command
cmd = self._load_config_partial.format(
file=self._from_file,
xpath_from=self._xpath_from,
xpath_to=self._xpath_to,
obj_from=self.obj,
obj_to=self.obj
)
cmd_output = 'load config partial from {file} from-xpath {xpath_from}{obj_from} to-xpath {xpath_to}{obj_to} mode merge'.format(
file=self._from_file,
xpath_from=self._xpath_from,
xpath_to=self._xpath_to,
obj_from=self.obj,
obj_to=self.obj
)
# output to text browser
self.ui.text_out.clear()
self.ui.text_out.append('> Type: <b><font color="yellow">{type}</font></b>'.format(type=self.obj))
self.ui.text_out.append('> Executing the following command...')
self.ui.text_out.append('\n')
self.ui.text_out.append(cmd_output)
self.ui.text_out.append('\n')
self.ui.progress_bar.setValue(50)
self.connect_api_thread = APIRequest(parent=None, api=self._api, url=self._url, cmd=cmd)
self.connect_api_thread.start()
self.connect_api_thread.api_values.connect(self._connect_values_thread)
############################################################################
# IRON SKILLET
############################################################################
def _iron_skillet(self):
# check user input
if not self._validate_user_input():
return
# check parameters are not null
if self._from_file== 'Select a File' or len(self.ui.combo_file.currentText()) < 1:
self._show_critical_error(['Error!', 'Select a "From" file'])
return
self.ui.progress_bar.setValue(0)
self.ui.label_status.setText('Iron Skillet...')
self.ui.text_out.clear()
self.ui.text_out.append('> Starting Iron Skillet...')
d = QDialog()
ui = PANORAMA() if self._model == 'Panorama' else PANOS()
ui.setupUi(d)
d.show()
resp = d.exec_()
self._process_iron_skillet = True if resp == QDialog.Accepted else False
# if they hit 'cancel' -- break out of function
if not self._process_iron_skillet:
return
# PANORAMA
if self._model == 'Panorama':
self._iron_skillet_panorama['system'] = False if not ui.checkbox_system.isChecked() else True
self._iron_skillet_panorama['settings'] = False if not ui.checkbox_setting.isChecked() else True
self._iron_skillet_panorama['log settings'] = False if not ui.checkbox_log.isChecked() else True
self._iron_skillet_panorama['template'] = False if not ui.checkbox_template.isChecked() else True
self._iron_skillet_panorama['device group'] = False if not ui.checkbox_dg.isChecked() else True
self._iron_skillet_panorama['shared'] = False if not ui.checkbox_shared.isChecked() else True
self._iron_skillet_panorama['log collector'] = False if not ui.checkbox_log_collector.isChecked() else True
self.connect_thread_iron_skillet = IronSkillet(parent=None, elements=self._iron_skillet_panorama, os='Panorama', api=self._api, url=self._url, file=self._from_file, from_vsys=self.ui.combo_from_vsys.currentText(), to_vsys=self.ui.combo_to_vsys.currentText())
self.connect_thread_iron_skillet.start()
self.connect_thread_iron_skillet.values_iron_skillet.connect(self._iron_skillet_output)
self.connect_thread_iron_skillet.done.connect(self._print_iron_skillet)
# PAN-OS
else:
self._iron_skillet_panos['address'] = False if not ui.checkbox_address.isChecked() else True
self._iron_skillet_panos['email schedule'] = False if not ui.checkbox_email_schedule.isChecked() else True
self._iron_skillet_panos['external list'] = False if not ui.checkbox_ext_list.isChecked() else True
self._iron_skillet_panos['log settings'] = False if not ui.checkbox_log.isChecked() else True
self._iron_skillet_panos['profile group'] = False if not ui.checkbox_profile_group.isChecked() else True
self._iron_skillet_panos['profiles'] = False if not ui.checkbox_profiles.isChecked() else True
self._iron_skillet_panos['report group'] = False if not ui.checkbox_report_groups.isChecked() else True
self._iron_skillet_panos['reports'] = False if not ui.checkbox_reports.isChecked() else True
self._iron_skillet_panos['rulebase'] = False if not ui.checkbox_rulebase.isChecked() else True
self._iron_skillet_panos['settings'] = False if not ui.checkbox_setting.isChecked() else True
self._iron_skillet_panos['system'] = False if not ui.checkbox_system.isChecked() else True
self._iron_skillet_panos['tag'] = False if not ui.checkbox_tag.isChecked() else True
self._iron_skillet_panos['zone protection'] = False if not ui.checkbox_zone_protection.isChecked() else True
self.connect_thread_iron_skillet = IronSkillet(parent=None, elements=self._iron_skillet_panos, os='PAN-OS', api=self._api, url=self._url, file=self._from_file, from_vsys=self.ui.combo_from_vsys.currentText(), to_vsys=self.ui.combo_to_vsys.currentText())
self.connect_thread_iron_skillet.start()
self.connect_thread_iron_skillet.values_iron_skillet.connect(self._iron_skillet_output)
self.connect_thread_iron_skillet.done.connect(self._print_iron_skillet)
##############################################
# IRON SKILLET OUTPUT
##############################################
def _iron_skillet_output(self, output):
element = '> {}'.format(output['element'])
self.ui.text_out.append(element)
self._is_output += '<h2>{}</h2><ul>'.format(element)
xml = lxml.fromstring(output['response'])
for line in xml.xpath('.//line'):
response = ' > {}'.format(line.text)
self.ui.text_out.append(response)
self._is_output += '<li>{}</li>'.format(line.text)
self.ui.text_out.append('')
self._is_output += '</ul>'
self.ui.progress_bar.setValue(output['pb_value'])
##############################################
# IRON SKILLET DONE
##############################################
def _print_iron_skillet(self, flag):
self._is_output += '</html>'
self.ui.progress_bar.setValue(100)
d = QDialog()
ui = IRONSKILLET()
ui.setupUi(d)
resp = d.exec_()
if resp == QDialog.Accepted:
tmp = tempfile.NamedTemporaryFile(delete=True)
path = tmp.name + '.html'
f = open(path, 'w')
f.write('<html><body>{}</body></html>'.format(self._is_output))
f.close()
webbrowser.open('file://' + path)
##############################################
# RESET FLAGS
##############################################
def _reset_flags(self):
self._flag_tags = False
self._flag_address_objects = False
self._flag_address_groups = False
self._flag_service_objects = False
self._flag_service_groups = False
self._flag_connect_success = False
##############################################
# RESET FLAGS and BUTTONS
##############################################
def _reset_flags_buttons(self):
self._reset_flags()
self._reset_button_color()
##############################################
# CONNECT
##############################################
def _connect(self):
# get/set IP and credentials (validate parameters)
valid = False
self.ui.progress_bar.setValue(0)
self.ui.label_status.setText('Connecting, retrieving running-config, loading saved configs...')
# clear all combo boxes
self.ui.text_out.clear()
self.ui.combo_to_dg.clear()
self.ui.combo_from_dg.clear()
self.ui.combo_to_vsys.clear()
self.ui.combo_from_vsys.clear()
self.ui.combo_from_rulebase.clear()
self.ui.combo_to_rulebase.clear()
# reset all flags & flags
self._reset_flags_buttons()
####
self.connect_thread = ConnectThread(parent=None, ip=self.ui.line_ip.text(), user=self.ui.line_user.text(), password=self.ui.line_password.text())
self.connect_thread.start()
self.connect_thread.connect_values.connect(self._set_connect_values)
##############################################
# SET CONNECT VALUES
##############################################
def _set_connect_values(self, values):
if values['result']:
self._api = values['api']
self._ip = values['ip']
self._user = values['user']
self._password = values['password']
self._url = values['url']
self.ui.button_connect.setStyleSheet('background-color: green; color:white;')
self.ui.button_connect.setText('Connected to: {ip}'.format(ip=self._ip))
self.ui.progress_bar.setValue(25)
# trigger functions to fill combo boxes
self._system_info()
self.connect_thread_combo_boxes = ToComboBoxes(parent=None, api=self._api, url=self._url)
self.connect_thread_combo_boxes.start()
self.connect_thread_combo_boxes.combo_box_values.connect(self._fill_to_combo_boxes)
self.connect_thread_setup_ssh = SetupSSH(parent=None, ip=self._ip, user=self._user, password=self._password)
self.connect_thread_setup_ssh.start()
self.connect_thread_setup_ssh.output.connect(self._load_saved_configs)
else:
self.ui.button_connect.setStyleSheet('background-color: red; color:white;')
self.ui.button_connect.setText('Connection Error: {ip}'.format(ip=values['ip']))
self._show_critical_error([values['response'], values['error']])
##############################################
# IMPORT CONFIG
##############################################
def _import(self):
# if model isn't set, return
if self._model is None:
return
# prompt user for import file
self._import_file, _ = QFileDialog.getOpenFileName(parent=None, caption="Import Local Config (XML)", directory=os.getcwd(), filter="XML files (*.xml)")
# if cancelled
if len(self._import_file) == 0:
return
self.ui.progress_bar.setValue(0)
self.ui.progress_bar.setValue(10)
self.ui.label_status.setText('Importing {file}'.format(file=self._import_file))
try:
with open(self._import_file, 'rb') as f:
response = requests.post(
url=self._url + '/?type=import&category=configuration',
data={'key': self._api},
files={'file': f},
verify=False,
timeout=10).content
# if any errors - display error message and return
except (requests.ConnectionError, requests.ConnectTimeout, requests.HTTPError) as error_requests:
self._show_critical_error(['Importing File', error_requests])
return
else:
self.ui.progress_bar.setValue(50)
# check if successful, if so, reload saved config files
if lxml.fromstring(response).get('status') == 'success':
self.ui.text_out.clear()
self.ui.text_out.append('> {file} has been successfully imported!'.format(file=self._import_file))
self.ui.text_out.append('> Refreshing...')
self.ui.progress_bar.setValue(100)
self.connect_thread_setup_ssh = SetupSSH(parent=None, ip=self._ip, user=self._user, password=self._password)
self.connect_thread_setup_ssh.start()
self.connect_thread_setup_ssh.output.connect(self._load_saved_configs)
# if error, return
else:
self._show_critical_error(['Importing File', 'Unable to load {file}'.format(file=self._import_file)])
return
##############################################
# VALIDATE USER INPUT
##############################################
def _validate_user_input(self):
# did user select a file?
if self.ui.combo_file.currentText() == 'Select a File':
self._show_critical_error(['Error!', 'You must select a file!'])
return
# check if FROM Vsys and DG are None
if len(self.ui.combo_from_vsys.currentText()) < 1 and len(self.ui.combo_from_dg.currentText()) < 1:
return False
# check if TO Vsys and DG are None
if len(self.ui.combo_to_vsys.currentText()) < 1 and len(self.ui.combo_to_dg.currentText()) < 1:
return False
# validate model has been set
if self._model is not None:
return True
else:
return False
##############################################
# BUILD XPATH
##############################################
def _build_xpath(self):
panorama = "/config/devices/entry[@name='localhost.localdomain']/device-group/entry[@name='{dg}']/"
shared = '/config/shared/'
vsys = "/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='{vsys}']/"
# init and clear out xpaths
self._xpath_to = None
self._xpath_from = None
#########################################
# FROM
#########################################
# Panorama
if len(self.ui.combo_from_dg.currentText()) > 1 and len(self.ui.combo_from_rulebase.currentText()) > 1:
# shared
if self.ui.combo_from_dg.currentText() == 'Shared':
try:
self._xpath_from = shared
except KeyError:
self._show_critical_error(['Input Error', 'Select a valid "From" Rulebase'])
return
# device group
else:
try:
self._xpath_from = panorama.format(dg=self.ui.combo_from_dg.currentText())
except KeyError:
self._show_critical_error(['Input Error', 'Select a valid "From" Rulebase'])
return
# PAN-OS (VSYS)
elif len(self.ui.combo_from_vsys.currentText()) > 1:
self._xpath_from = vsys.format(vsys=self.ui.combo_from_vsys.currentText())
#########################################
# TO
#########################################
# Panorama
if len(self.ui.combo_to_dg.currentText()) > 1 and len(self.ui.combo_to_rulebase.currentText()) > 1:
# shared
if self.ui.combo_to_dg.currentText() == 'Shared':
try:
self._xpath_to = shared
except KeyError:
self._show_critical_error(['Input Error', 'Select a valid "TO" Rulebase'])
return
# device group
else:
try:
self._xpath_to = panorama.format(dg=self.ui.combo_to_dg.currentText())
except KeyError:
self._show_critical_error(['Input Error', 'Select a valid "TO" Rulebase'])
return
# PAN-OS (VSYS)
elif len(self.ui.combo_to_vsys.currentText()) > 1:
self._xpath_to = vsys.format(vsys=self.ui.combo_to_vsys.currentText())
##############################################
# LOAD RULEBASE
##############################################
def _load_rulebase(self, rule):
self.rule = rule
self.ui.progress_bar.setValue(0)
self.ui.label_status.setText('Merging {rule}...'.format(rule=self.rule))
# validate user input
if self._validate_user_input() is not True:
return
# valid rulebase?
if self.ui.combo_from_rulebase.currentText() == 'Select Rulebase' or self.ui.combo_to_rulebase.currentText() == 'Select Rulebase':
self._show_critical_error(['Error!', 'To/From Rulebase not selected'])
return
# should I prompt an info dialog?
info = False
info_msg = '<b>The following Objects/Groups have NOT been loaded:</b><ul>'
# check flags
if self._flag_tags is not True:
info_msg += '<li>Tags</li>'
info = True
if self._flag_address_objects is not True:
info_msg += '<li>Address Objects</li>'
info = True
if self._flag_address_groups is not True:
info_msg += '<li>Address Groups</li>'
info = True
if self._flag_service_objects is not True:
info_msg += '<li>Service Objects</li>'
info = True
if self._flag_service_groups is not True:
info_msg += '<li>Service Groups</li>'
info = True
info_msg += '</ul>The command to add "{rule}" policies will still be executed; make sure to add the above Objects/Groups before committing (if necessary).<br>'.format(rule=rule)
# prompt if True
if info:
# QMessageBox.information(self, 'Info', info_msg, QMessageBox.Ok)
mbox = QMessageBox(self)
mbox.setText(self.tr('Warning!'))
mbox.setInformativeText(info_msg)
mbox.resize(400, 200)
mbox.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
mbox.show()
# build xpaths
self._build_xpath()
# check rulebase
from_rulebase = ''
to_rulebase = ''
rulebase = {
'Pre Rulebase': 'pre-rulebase',
'Post Rulebase': 'post-rulebase'
}
# from rulebase
if len(self.ui.combo_from_rulebase.currentText()) > 1:
from_rulebase += '{x}/{y}'.format(x=rulebase[self.ui.combo_from_rulebase.currentText()], y=self.rule)
else:
from_rulebase = 'rulebase/' + self.rule
# to rulebase
if len(self.ui.combo_to_rulebase.currentText()) > 1:
to_rulebase += '{x}/{y}'.format(x=rulebase[self.ui.combo_to_rulebase.currentText()], y=self.rule)
else:
to_rulebase = 'rulebase/' + self.rule
# build out load config partial command
cmd = self._load_config_partial.format(
file=self._from_file,
xpath_from=self._xpath_from,
xpath_to=self._xpath_to,
obj_from=from_rulebase,
obj_to=to_rulebase
)
cmd_output = 'load config partial from {file} from-xpath {xpath_from}{obj_from} to-xpath {xpath_to}{obj_to} mode merge'.format(
file=self._from_file,
xpath_from=self._xpath_from,
xpath_to=self._xpath_to,
obj_from=from_rulebase,
obj_to=to_rulebase
)
# output to text browser
self.ui.text_out.clear()
self.ui.text_out.append('> Type: <b><font color="yellow">{rule}</font></b>'.format(rule=self.rule))
self.ui.text_out.append('> Executing the following command...')
self.ui.text_out.append('\n')
self.ui.text_out.append(cmd_output)
self.ui.text_out.append('\n')
self.ui.progress_bar.setValue(50)
self.connect_api_rule_thread = APIRequest(parent=None, api=self._api, url=self._url, cmd=cmd)
self.connect_api_rule_thread.start()
self.connect_api_rule_thread.api_values.connect(self._connect_rule_values_thread)
##############################################
# CONNECT VALUES THREAD - RULES
##############################################
def _connect_rule_values_thread(self, values):
# convert to XML
values['response'] = lxml.fromstring(values['response'])
# if successful
if values['response'].get('status') == 'success':
self.ui.text_out.append('> <b>Status: <font color="green">{status}</font></b>'.format(status=values['response'].get('status')))
self.ui.text_out.append('> {msg}'.format(msg=values['response'].xpath('.//line')[0].text))
self._set_button_backgroud('green', self.rule)
# if unsuccessful
else:
self.ui.text_out.append('> <b>Status: <font color="red">{status}</font></b>'.format(status=values['response'].get('status')))
self.ui.text_out.append('> {msg}'.format(msg=values['response'].xpath('.//line')[0].text))
self._set_button_backgroud('red', self.rule)
self.ui.progress_bar.setValue(100)
##############################################
# ADDRESS OBJECTS
##############################################
def _load_objects(self, obj):
self.obj = obj
self.ui.progress_bar.setValue(0)
self.ui.label_status.setText('Merging {obj}...'.format(obj=self.obj))
# validate user input
if self._validate_user_input() is not True:
return
obj_message = {
'address': 'Address Objects',
'address-group': 'Address Groups',
'service': 'Service Objects',
'service-group': 'Service Groups'
}
# should I prompt an info dialog?
info = False
info_msg = '<b>The following has been detected:</b><ul>'
# check flags and warn user
if self.obj != 'tag' and self._flag_tags is False:
info = True
info_msg += '<li>{obj} are being loaded before Tags</li>'.format(obj=obj_message[self.obj])
# address groups before address objects?
if self.obj == 'address-group' and self._flag_address_objects is False:
info = True
info_msg += '<li>Address Groups are being loaded before Address Objects</li>'
# service gorups before service objects?
if self.obj == 'service-group' and self._flag_service_objects is False:
info = True
info_msg += '<li>Service Groups are being loaded before Service Objects</li>'
# prompt if True
if info:
info_msg += '</ul>The command to add "{obj}" will still be executed; make sure to add the above Objects before committing (if necessary).<br>'.format(obj=obj_message[obj])
mbox = QMessageBox(self)
mbox.setText(self.tr('Warning!'))
mbox.setInformativeText(info_msg)
mbox.resize(400, 200)
mbox.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
mbox.show()
# set flags
if self.obj == 'tag':
self._flag_tags = True
elif self.obj == 'address':
self._flag_address_objects = True
elif self.obj == 'address-group':
self._flag_address_groups = True
elif self.obj == 'service':
self._flag_service_objects = True
elif self.obj == 'service-group':
self._flag_service_groups = True
# build out the xpaths
self._build_xpath()
# build out load config partial command
cmd = self._load_config_partial.format(
file=self._from_file,
xpath_from=self._xpath_from,
xpath_to=self._xpath_to,
obj_from=self.obj,
obj_to=self.obj
)
cmd_output = 'load config partial from {file} from-xpath {xpath_from}{obj_from} to-xpath {xpath_to}{obj_to} mode merge'.format(
file=self._from_file,
xpath_from=self._xpath_from,
xpath_to=self._xpath_to,
obj_from=self.obj,
obj_to=self.obj
)
# output to text browser
self.ui.text_out.clear()
self.ui.text_out.append('> Type: <b><font color="yellow">{type}</font></b>'.format(type=obj))
self.ui.text_out.append('> Executing the following command...')
self.ui.text_out.append('\n')
self.ui.text_out.append(cmd_output)
self.ui.text_out.append('\n')
self.ui.progress_bar.setValue(50)
self.connect_api_thread = APIRequest(parent=None, api=self._api, url=self._url, cmd=cmd)
self.connect_api_thread.start()
self.connect_api_thread.api_values.connect(self._connect_values_thread)
##############################################
# CONNECT VALUES THREAD - OBJECTS/GROUPS
##############################################
def _connect_values_thread(self, values):
if values['result'] is True:
# convert to XML
values['response'] = lxml.fromstring(values['response'])
else:
self._show_critical_error(['Slow Down!', values['response']])
return
# if successful
if values['response'].get('status') == 'success':
self.ui.text_out.append('> <b>Status: <font color="green">{status}</font></b>'.format(status=values['response'].get('status')))
self.ui.text_out.append('> {msg}'.format(msg=values['response'].xpath('.//line')[0].text))
self._set_button_backgroud('green', self.obj)
# if unsuccessful
else:
self.ui.text_out.append('> <b>Status: <font color="red">{status}</font></b>'.format(status=values['response'].get('status')))
self.ui.text_out.append('> {msg}'.format(msg=values['response'].xpath('.//line')[0].text))
self._set_button_backgroud('red', self.obj)
self.ui.progress_bar.setValue(100)
##############################################
# SET BUTTON BACKGROUND COLOR
##############################################
def _set_button_backgroud(self, color, button):
if button == 'tag':
self.ui.button_tags.setStyleSheet('background-color: {color}; color:white;'.format(color=color))
elif button == 'address':
self.ui.button_ao.setStyleSheet('background-color: {color}; color:white;'.format(color=color))
elif button == 'address-group':
self.ui.button_ag.setStyleSheet('background-color: {color}; color:white;'.format(color=color))
elif button == 'service':
self.ui.button_so.setStyleSheet('background-color: {color}; color:white;'.format(color=color))
elif button == 'service-group':
self.ui.button_sg.setStyleSheet('background-color: {color}; color:white;'.format(color=color))
elif button == 'security':
self.ui.button_security.setStyleSheet('background-color: {color}; color:white;'.format(color=color))
elif button == 'nat':
self.ui.button_nat.setStyleSheet('background-color: {color}; color:white;'.format(color=color))
elif button == 'reports':
self.ui.button_reports.setStyleSheet('background-color: {color}; color:white;'.format(color=color))
elif button == 'report-group':
self.ui.button_report_groups.setStyleSheet('background-color: {color}; color:white;'.format(color=color))
##############################################
# RESET BUTTON COLOR
##############################################
def _reset_button_color(self):
self.ui.button_tags.setStyleSheet('color: white; background-color: rgb(53,53,53);')
self.ui.button_ao.setStyleSheet('color: white; background-color: rgb(53,53,53);')
self.ui.button_ag.setStyleSheet('color: white; background-color: rgb(53,53,53);')
self.ui.button_so.setStyleSheet('color: white; background-color: rgb(53,53,53);')
self.ui.button_sg.setStyleSheet('color: white; background-color: rgb(53,53,53);')
self.ui.button_security.setStyleSheet('color: white; background-color: rgb(53,53,53);')
self.ui.button_nat.setStyleSheet('color: white; background-color: rgb(53,53,53);')
self.ui.button_reports.setStyleSheet('color: white; background-color: rgb(53,53,53);')
self.ui.button_report_groups.setStyleSheet('color: white; background-color: rgb(53,53,53);')
# self.ui.button_applications.setStyleSheet('color: white; background-color: rgb(53,53,53);')
# self.ui.button_application_groups.setStyleSheet('color: white; background-color: rgb(53,53,53);')
##############################################
# API REQUEST
##############################################
def _update_file_selected(self):
"""
Get the selected Saved Config
Sets the "FROM" combo boxes: DG and/or VSYS
"""
if self.ui.combo_file.currentText() == 'Select a File' or len(self.ui.combo_file.currentText()) < 1:
return
self.ui.progress_bar.setValue(0)
self.ui.label_status.setText('Updating File selected...')
self._from_file = self.ui.combo_file.currentText()
# clear from DG and from VSYS
self.ui.combo_from_dg.clear()
self.ui.combo_from_vsys.clear()
self.ui.combo_from_rulebase.clear()
# reset all flags
self._flag_tags = False
self._flag_address_objects = False
self._flag_address_groups = False
self._flag_service_objects = False
self._flag_service_groups = False
# reset button colors
self._reset_button_color()
# get config file from selected text
values = {
'type': 'op',
'key': self._api,
'cmd': '<show><config><saved>{name}</saved></config></show>'.format(name=self._from_file),
}
result, response, error = self._api_request(values)
# if successful
if result is True and lxml.fromstring(response).get('status') == 'success':
self._from_config = lxml.fromstring(response)
self.ui.text_out.clear()
self.ui.text_out.append('> "{name}" has been loaded!'.format(name=self._from_file))
else:
self.ui.text_out.clear()
self.ui.text_out.append('> Error loading "{name}". Please try again.'.format(name=self._from_file))
self.ui.text_out.append('> {error}'.format(error=error))
self._from_config = False
# if config obtained
if self._from_config is not False:
# check if Panorama
dg_exists = self._from_config.find('.//device-group')
# PAN-OS - no Device Groups found
if dg_exists is None:
self.ui.text_out.append('> No Device Groups found; assuming PAN-OS config.')
self.ui.text_out.append('> <b><font color="green">Select VSYS if necessary.</font></b>')
# populate from VSYS
self._vsys = []
for vsys in self._from_config.xpath('//config/devices/entry/vsys/entry'):
self._vsys.append(vsys.get('name'))
self.ui.combo_from_vsys.addItems(self._vsys)
self.ui.combo_from_rulebase.clear()
# Panorama - populate from DG
else:
self.ui.text_out.append('> Device Groups detected!')
self.ui.text_out.append('> <b><font color="green">Please select To/From DG...</font></b>')
self.ui.text_out.append('> <b><font color="green">Please select Pre/Post Rulebase...</font></b>')
self._from_device_groups = ['Shared']
for dg in self._from_config.xpath('//config/devices/entry/device-group/entry'):
self._from_device_groups.append(dg.get('name'))
self.ui.combo_from_dg.addItems(self._from_device_groups)
self.ui.combo_from_rulebase.addItems(['Select Rulebase', 'Pre Rulebase', 'Post Rulebase'])
self.ui.progress_bar.setValue(100)
##############################################
# SYSTEM INFO
##############################################
def _system_info(self):
"""
Show System Info: set status bar and get device info
"""
values = {'type': 'op', 'cmd': '<show><system><info></info></system></show>', 'key': self._api}
result, respose, error = self._api_request(values)
# get device info
if result:
root = lxml.fromstring(respose)
model = root.findtext('.//model')
self._device = root.findtext('.//devicename')
self._sw = root.findtext('.//sw-version')
# set status bar
self.ui.statusbar.showMessage('{m:12} {x:5}|{x:5} {d:15} {x:5}|{x:5} {s:8}'.format(m=model, d=self._device, s=self._sw, x=''))
self.ui.progress_bar.setValue(50)
if model == 'Panorama' or model == 'M-500' or model == 'M-100':
self._model = 'Panorama'
else:
self._model = 'FW'
##############################################
# FILL TO COMBO BOXES
##############################################
def _fill_to_combo_boxes(self, values):
"""
Sets the "TO" combo boxes: DG and/or VSYS and Pre/Post Rulebase
"""
# if succssful
if values['result'] is True and lxml.fromstring(values['response']).get('status') == 'success':
self._running_config = lxml.fromstring(values['response'])
self.ui.text_out.append('> "{name}" has been loaded!'.format(name='running-config.xml'))
else:
self.ui.text_out.clear()
self.ui.text_out.append('> Error loading "{name}". Please try again.'.format(name='running-config.xml'))
self.ui.text_out.append('> {error}'.format(error=values['error']))
self._from_config = False
# Panorama - set To DG
if self._model == 'Panorama':
self._to_device_groups = ['Shared']
for dg in self._running_config.xpath('//config/devices/entry/device-group/entry'):
self._to_device_groups.append(dg.get('name'))
self.ui.combo_to_dg.addItems(self._to_device_groups)
self.ui.combo_to_rulebase.addItems(['Select Rulebase', 'Pre Rulebase', 'Post Rulebase'])
# PAN-OS - set To VSYS
else:
self._to_vsys = []
for vsys in self._running_config.xpath('//config/devices/entry/vsys/entry'):
self._to_vsys.append(vsys.get('name'))
self.ui.combo_to_vsys.addItems(self._to_vsys)
self.ui.combo_to_rulebase.clear()
self.ui.progress_bar.setValue(75)
##############################################
# LOAD SAVED CONFIG
##############################################
def _load_saved_configs(self, output):
"""
Establish SSH connection to FW and get a list of Saved Configs
Update "From" file combo box
"""
lines = output.splitlines()
self._files = []
# update text out
self.ui.text_out.append('> Saved Config Files have been loaded!')
self.ui.text_out.append('> <b><font color="green">Please select a file...</font></b>')
for l in lines:
if re.search(r'(.+)(\dK$)', l):
self._files.append(l.split()[0])
# update file combo box (FROM file)
self._files.insert(0, 'Select a File')
self.ui.combo_file.clear()
self.ui.combo_file.addItems(self._files)
self.ui.progress_bar.setValue(100)
##############################################
# API REQUEST
##############################################
def _api_request(self, values):
"""
API request driver
"""
try:
return True, requests.post(self._url, values, verify=False, timeout=10).text, None
except requests.exceptions.ConnectionError as error_api:
return False, 'Error connecting to {ip} - Check IP Address'.format(ip=self._ip), error_api
except requests.exceptions.Timeout as error_timeout:
return None, 'Connection to {ip} timed out, please try again'.format(ip=self._ip), error_timeout
##############################################
# SHOW ERROR
##############################################
def _show_critical_error(self, message_list):
message = '''
<p>
{message}
<br>
Error: {error}
</p>
'''.format(message=message_list[0], error=message_list[1])
result = QMessageBox.critical(self, 'ERROR', message, QMessageBox.Abort, QMessageBox.Retry)
# Abort
if result == QMessageBox.Abort:
self.close()
# Retry
else:
# set error flag to True -- implies error
self._flag_error = True
return
############################################################################
# MAIN
############################################################################
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setStyle('Fusion')
palette = QPalette()
palette.setColor(QPalette.Window, QColor(53, 53, 53))
palette.setColor(QPalette.WindowText, QtCore.Qt.white)
palette.setColor(QPalette.Base, QColor(15, 15, 15))
palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
palette.setColor(QPalette.ToolTipBase, QtCore.Qt.white)
palette.setColor(QPalette.ToolTipText, QtCore.Qt.white)
palette.setColor(QPalette.Text, QtCore.Qt.white)
palette.setColor(QPalette.Button, QColor(53, 53, 53))
palette.setColor(QPalette.ButtonText, QtCore.Qt.white)
palette.setColor(QPalette.BrightText, QtCore.Qt.red)
palette.setColor(QPalette.Highlight, QColor(25, 193, 255).lighter())
palette.setColor(QPalette.HighlightedText, QtCore.Qt.black)
app.setPalette(palette)
main = LoadPartialMainWindow()
main.show()
sys.exit(app.exec_()) | 42.845395 | 432 | 0.572468 | 62,657 | 0.962104 | 0 | 0 | 0 | 0 | 0 | 0 | 22,620 | 0.347332 |
1dd06073161d1a6fc8153bce35244468454bcf1e | 3,173 | py | Python | lab_2/find_lcs_length_optimized_test.py | alenashuvar/2020-2-level-labs | aa5185fae19b386c741faa8dcff3424872642090 | [
"MIT"
] | 3 | 2020-09-05T16:27:19.000Z | 2021-03-12T12:08:00.000Z | lab_2/find_lcs_length_optimized_test.py | alenashuvar/2020-2-level-labs | aa5185fae19b386c741faa8dcff3424872642090 | [
"MIT"
] | 100 | 2020-09-06T16:36:23.000Z | 2020-12-12T06:18:47.000Z | lab_2/find_lcs_length_optimized_test.py | alenashuvar/2020-2-level-labs | aa5185fae19b386c741faa8dcff3424872642090 | [
"MIT"
] | 62 | 2020-09-06T10:49:43.000Z | 2021-09-10T07:07:53.000Z | """
Tests find_lcs_optimized function
"""
import timeit
import unittest
from memory_profiler import memory_usage
from lab_2.main import find_lcs_length_optimized, tokenize_big_file
class FindLcsOptimizedTest(unittest.TestCase):
"""
Checks for find_lcs_optimized function
"""
def test_find_lcs_length_optimized_ideal_case(self):
"""
Tests that find_lcs_length_optimized
works just fine and not fails with big text
"""
sentence_tokens_first_text = tokenize_big_file('lab_2/data.txt')[:30000]
sentence_tokens_second_text = tokenize_big_file('lab_2/data_2.txt')[:30000]
plagiarism_threshold = 0.0001
actual = find_lcs_length_optimized(sentence_tokens_first_text,
sentence_tokens_second_text,
plagiarism_threshold)
reference_lcs = 3899
almost_equal = 3910
print(f"Actual find_lcs_length_optimized function lcs is {actual}")
print(f"Reference find_lcs_length_optimized function lcs is {reference_lcs}")
self.assertTrue(actual)
self.assertEqual(almost_equal, actual)
def test_find_lcs_length_optimized_quickest_time(self):
"""
Tests that find_lcs_length_optimized
works faster than time reference
"""
reference = 353.6632048700001 * 1.1
sentence_tokens_first_text = tokenize_big_file('lab_2/data.txt')[:30000]
sentence_tokens_second_text = tokenize_big_file('lab_2/data_2.txt')[:30000]
plagiarism_threshold = 0.0001
start_time = timeit.default_timer()
find_lcs_length_optimized(sentence_tokens_first_text,
sentence_tokens_second_text,
plagiarism_threshold)
end_time = timeit.default_timer()
actual = end_time - start_time
print(f"Actual find_lcs_length_optimized function running time is: {actual}")
print(f"Reference find_lcs_length_optimized function running time is: {reference}")
self.assertGreater(reference, actual)
def test_find_lcs_length_optimized_lowest_memory(self):
"""
Tests that find_lcs_length_optimized
works efficiently than given memory reference
"""
reference = 65.69129527698863 * 1.1
sentence_tokens_first_text = tokenize_big_file('lab_2/data.txt')[:30000]
sentence_tokens_second_text = tokenize_big_file('lab_2/data_2.txt')[:30000]
plagiarism_threshold = 0.0001
actual_memory = memory_usage((find_lcs_length_optimized,
(sentence_tokens_first_text,
sentence_tokens_second_text,
plagiarism_threshold)),
interval=2)
actual = sum(actual_memory)/len(actual_memory)
print(f'Actual find_lcs_length_optimized function memory consuming is: {actual}')
print(f'Reference find_lcs_length_optimized function memory consuming is: {reference}')
self.assertGreater(reference, actual)
| 42.306667 | 95 | 0.656792 | 2,988 | 0.941696 | 0 | 0 | 0 | 0 | 0 | 0 | 967 | 0.304759 |
1dd0b855c9e3e5a1470c944f9d3f6ea76ae0c1de | 754 | py | Python | second/app.py | angelsoffury/microservices-basic | ad33b9be82e68b0363f6da676ca1d9e9c71b8f0e | [
"BSD-Source-Code"
] | 1 | 2022-01-20T16:36:35.000Z | 2022-01-20T16:36:35.000Z | second/app.py | angelsoffury/microservices-basic | ad33b9be82e68b0363f6da676ca1d9e9c71b8f0e | [
"BSD-Source-Code"
] | null | null | null | second/app.py | angelsoffury/microservices-basic | ad33b9be82e68b0363f6da676ca1d9e9c71b8f0e | [
"BSD-Source-Code"
] | null | null | null | from flask import Flask, jsonify, render_template
import requests
app = Flask(__name__)
@app.route('/')
def intro():
url = "http://backend-service:6002/"
response = requests.get(url)
info = response.json()["info"]
return render_template('index.html', data=info, title='Hello!')
@app.route('/info')
def info():
url = "http://backend-service:6002/info"
response = requests.get(url)
return response.json()["info"]
@app.route('/cake')
def cake():
url = "http://backend-service:6002/maker"
response = requests.get(url)
info = response.json()["info"]
return render_template('index.html', data=info, title='Basic Microservice')
if __name__ == '__main__':
app.run(host='0.0.0.0', port='5001', debug=True)
| 23.5625 | 79 | 0.656499 | 0 | 0 | 0 | 0 | 574 | 0.761273 | 0 | 0 | 211 | 0.279841 |
1dd48b3bcb89fa1bccfb199439da3963a4087136 | 56,257 | py | Python | autokernel/autokernel.py | oddlama/autokernel | cd165cbc9467385c356d4a3d31b524a40d574edd | [
"MIT"
] | 49 | 2020-04-09T14:36:05.000Z | 2022-03-19T12:57:54.000Z | autokernel/autokernel.py | oddlama/autokernel | cd165cbc9467385c356d4a3d31b524a40d574edd | [
"MIT"
] | 1 | 2021-08-09T19:11:00.000Z | 2021-11-10T17:12:17.000Z | autokernel/autokernel.py | oddlama/autokernel | cd165cbc9467385c356d4a3d31b524a40d574edd | [
"MIT"
] | 4 | 2020-04-16T19:59:34.000Z | 2021-12-17T12:57:37.000Z | import autokernel.kconfig
import autokernel.config
import autokernel.lkddb
import autokernel.node_detector
import autokernel.symbol_tracking
from autokernel import __version__
from autokernel import log
from autokernel import util
from autokernel.symbol_tracking import set_value_detect_conflicts
import argparse
import glob
import grp
import gzip
import kconfiglib
import os
import pwd
import re
import shutil
import stat
import subprocess
import sys
import tempfile
from datetime import datetime, timezone
from pathlib import Path
def check_program_exists(exe):
if shutil.which(exe) is None:
log.die("Missing program '{}'. Please ensure that it is installed.".format(exe))
def check_execution_environment(args):
"""
Checks that some required external programs exist, and some miscellaneous things.
"""
check_program_exists('uname')
check_program_exists('mount')
check_program_exists('umount')
check_program_exists('make')
cur_uid = os.geteuid()
with autokernel.config.config_file_path(args.autokernel_config, warn=True) as config_file:
def _die_writable_config_by(component, name):
log.die("Refusing to run, because the path '{0}' is writable by {1}. This allows {1} to replace the configuration '{2}' and thus inject commands.".format(component, name, config_file))
if not config_file.exists():
log.die("Configuration file '{}' does not exist!".format(config_file))
# Ensure that the config file has the correct mode, to prevent command-injection by other users.
# No component of the path may be modifiable by anyone else but the current user (or root).
config_path = config_file.resolve()
for component in [config_path] + [p for p in config_path.parents]:
st = component.stat()
if st.st_uid != cur_uid and st.st_uid != 0 and st.st_mode & stat.S_IWUSR:
_die_writable_config_by(component, 'user {} ({})'.format(st.st_uid, pwd.getpwuid(st.st_uid).pw_name))
if st.st_gid != 0 and st.st_mode & stat.S_IWGRP:
_die_writable_config_by(component, 'group {} ({})'.format(st.st_gid, grp.getgrgid(st.st_gid).gr_name))
if st.st_mode & stat.S_IWOTH:
_die_writable_config_by(component, 'others')
def replace_common_vars(args, p):
p = str(p)
p = p.replace('{KERNEL_DIR}', args.kernel_dir)
p = p.replace('{KERNEL_VERSION}', autokernel.kconfig.get_kernel_version(args.kernel_dir))
p = p.replace('{UNAME_ARCH}', autokernel.kconfig.get_uname_arch())
p = p.replace('{ARCH}', autokernel.kconfig.get_arch())
return p
def has_proc_config_gz():
"""
Checks if /proc/config.gz exists
"""
return os.path.isfile("/proc/config.gz")
def unpack_proc_config_gz():
"""
Unpacks /proc/config.gz into a temporary file
"""
tmp = tempfile.NamedTemporaryFile()
with gzip.open("/proc/config.gz", "rb") as f:
shutil.copyfileobj(f, tmp)
return tmp
def kconfig_load_file_or_current_config(kconfig, config_file):
"""
Applies the given kernel config file to kconfig, or uses /proc/config.gz if config_file is None.
"""
if config_file:
log.info("Applying kernel config from '{}'".format(config_file))
kconfig.load_config(os.path.realpath(config_file))
else:
log.info("Applying kernel config from '/proc/config.gz'")
with unpack_proc_config_gz() as tmp:
kconfig.load_config(os.path.realpath(tmp.name))
def generated_by_autokernel_header():
return "# Generated by autokernel on {}\n".format(datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC"))
def vim_config_modeline_header():
return "# vim: set ft=ruby ts=4 sw=4 sts=-1 noet:\n"
def apply_autokernel_config(args, kconfig, config):
"""
Applies the given autokernel configuration to a freshly loaded kconfig object,
and returns gathered extra information such as the resulting kernel cmdline
"""
log.info("Applying autokernel configuration")
# Build cmdline on demand
kernel_cmdline = []
# Reset symbol_changes
autokernel.symbol_tracking.symbol_changes.clear()
# Asserts that the symbol has the given value
def get_sym(stmt):
# Get the kconfig symbol, and change the value
try:
return kconfig.syms[stmt.sym_name]
except KeyError:
log.die_print_error_at(stmt.at, "symbol '{}' does not exist".format(stmt.sym_name))
# Asserts that the symbol has the given value
def assert_symbol(stmt):
if not stmt.assert_condition.evaluate(kconfig):
if stmt.message:
log.die_print_error_at(stmt.at, "assertion failed: {}".format(stmt.message))
else:
log.die_print_error_at(stmt.at, "assertion failed")
# Sets a symbols value if and asserts that there are no conflicting double assignments
def set_symbol(stmt):
# Get the kconfig symbol, and change the value
sym = get_sym(stmt)
value = stmt.value
if not autokernel.kconfig.symbol_can_be_user_assigned(sym):
log.die_print_error_at(stmt.at, "symbol {} can't be user-assigned".format(sym.name))
# Skip assignment if value is already pinned and the statement is in try mode.
if stmt.has_try and sym in autokernel.symbol_tracking.symbol_changes:
log.verbose("skipping {} {}".format(autokernel.kconfig.value_to_str(value), sym.name))
return
if util.is_env_var(value):
value = util.resolve_env_variable(stmt.at, value)
if not set_value_detect_conflicts(sym, value, stmt.at):
log.die_print_error_at(stmt.at, "invalid value {} for symbol {}".format(autokernel.kconfig.value_to_str(value), sym.name))
if sym.str_value != value:
if not stmt.has_try:
# Only throw an error if it wasn't a try
log.die_print_error_at(stmt.at, "symbol assignment failed: {} from {} → {}".format(
sym.name,
autokernel.kconfig.value_to_str(sym.str_value),
autokernel.kconfig.value_to_str(value)))
else:
log.verbose("failed try set {} {} (symbol is currently not assignable to the chosen value)".format(autokernel.kconfig.value_to_str(stmt.value), sym.name))
# Visit all module nodes and apply configuration changes
visited = set()
def visit(module):
# Ensure we visit only once
if module.name in visited:
return
visited.add(module.name)
def stmt_use(stmt):
visit(stmt.module)
def stmt_merge(stmt):
filename = replace_common_vars(args, stmt.filename)
log.verbose("Merging external kconf '{}'".format(filename))
kconfig.load_config(os.path.realpath(filename), replace=False)
# Assert that there are no conflicts
for sym in autokernel.symbol_tracking.symbol_changes:
sc = autokernel.symbol_tracking.symbol_changes[sym]
if sym.str_value != sc.value:
autokernel.symbol_tracking.die_print_conflict(stmt.at, 'merge', sym, sym.str_value, sc)
def stmt_assert(stmt):
assert_symbol(stmt)
def stmt_set(stmt):
set_symbol(stmt)
def stmt_add_cmdline(stmt):
kernel_cmdline.append(stmt.param)
dispatch_stmt = {
autokernel.config.ConfigModule.StmtUse: stmt_use,
autokernel.config.ConfigModule.StmtMerge: stmt_merge,
autokernel.config.ConfigModule.StmtAssert: stmt_assert,
autokernel.config.ConfigModule.StmtSet: stmt_set,
autokernel.config.ConfigModule.StmtAddCmdline: stmt_add_cmdline,
}
def conditions_met(stmt):
for condition in stmt.conditions:
if not condition.evaluate(kconfig):
return False
return True
for stmt in module.all_statements_in_order:
# Ensure all attached conditions are met for the statement.
if conditions_met(stmt):
dispatch_stmt[stmt.__class__](stmt)
# Visit the root node and apply all symbol changes
visit(config.kernel.module)
log.verbose(" Changed {} symbols".format(len(autokernel.symbol_tracking.symbol_changes)))
# Lastly, invalidate all non-assigned symbols to process new default value conditions
for sym in kconfig.unique_defined_syms:
if sym.user_value is None:
sym._invalidate() # pylint: disable=protected-access
return kernel_cmdline
def execute_command(args, name, cmd, _replace_vars):
if len(cmd.value) > 0:
command = [_replace_vars(args, p) for p in cmd.value]
log.info("Executing {}: [{}]".format(name, ', '.join(["'{}'".format(i) for i in command])))
try:
# Replace variables in command and run it
subprocess.run(command, check=True)
except subprocess.CalledProcessError as e:
log.die("{} failed with code {}. Aborting.".format(name, e.returncode))
def main_setup(args):
"""
Main function for the 'setup' command.
"""
log.info("Setting up autokernel configuration at '{}'".format(args.setup_dir))
setup_dir = Path(args.setup_dir)
if setup_dir.exists():
log.die("Refusing to setup: directory '{}' exists".format(args.setup_dir))
saved_umask = os.umask(0o077)
setup_dir.mkdir()
modules_d_dir = setup_dir / 'modules.d'
modules_d_dir.mkdir()
import autokernel.contrib.etc as etc
import autokernel.contrib.etc.modules_d as modules_d
for i in util.resource_contents(etc):
if i.endswith('.conf'):
with (setup_dir / i).open('w') as f:
f.write(util.read_resource(i, pkg=etc))
for i in util.resource_contents(modules_d):
if i.endswith('.conf'):
with (modules_d_dir / i).open('w') as f:
f.write(util.read_resource(i, pkg=modules_d))
os.umask(saved_umask)
log.info("A default configuration has been installed")
log.info("You might want to edit it now.")
def main_check_config(args):
"""
Main function for the 'check' command.
"""
if args.compare_config:
if not args.compare_kernel_dir:
args.compare_kernel_dir = args.kernel_dir
kname_cmp = "'{}'".format(args.compare_config)
else:
if not has_proc_config_gz():
log.die("This kernel does not expose /proc/config.gz. Please provide the path to a valid config file manually.")
if not args.compare_kernel_dir:
# Use /usr/src/linux-{kernel_version} as the directory.
running_kver = subprocess.run(['uname', '-r'], check=True, stdout=subprocess.PIPE).stdout.decode().strip().splitlines()[0]
args.compare_kernel_dir = os.path.join('/usr/src/linux-{}'.format(running_kver))
try:
check_kernel_dir(args.compare_kernel_dir)
except argparse.ArgumentTypeError:
log.die("Could not find sources for running kernel (version {}) in '{}', use --check_kernel_dir to specify it manually.".format(running_kver, args.compare_kernel_dir))
kname_cmp = 'running kernel'
log.info("Comparing {} against generated config".format(kname_cmp))
# Load configuration file
config = autokernel.config.load_config(args.autokernel_config)
# Load symbols from Kconfig
kconfig_gen = autokernel.kconfig.load_kconfig(args.kernel_dir)
# Apply autokernel configuration
apply_autokernel_config(args, kconfig_gen, config)
# Load symbols from Kconfig
kconfig_cmp = autokernel.kconfig.load_kconfig(args.compare_kernel_dir)
# Load the given config file or the current kernel's config
kconfig_load_file_or_current_config(kconfig_cmp, args.compare_config)
indicator_del = log.color("[31m-[m", "-")
indicator_add = log.color("[32m+[m", "+")
indicator_mod = log.color("[33m~[m", "~")
log.info("Comparing existing config (left) against generated config (right)")
log.info(" ({}) symbol was removed".format(indicator_del))
log.info(" ({}) symbol is new".format(indicator_add))
log.info(" ({}) symbol value changed".format(indicator_mod))
gen_syms = [s.name for s in kconfig_gen.unique_defined_syms]
cmp_syms = [s.name for s in kconfig_cmp.unique_defined_syms]
def intersection(a, b):
return [i for i in a if i in b]
def comprehension(a, b):
return [i for i in a if i not in b]
common_syms = intersection(gen_syms, set(cmp_syms))
common_syms_set = set(common_syms)
only_gen_syms = comprehension(gen_syms, common_syms_set)
only_cmp_syms = comprehension(cmp_syms, common_syms_set)
supress_new, supress_del, supress_chg = (args.suppress_columns or (False, False, False))
if not supress_new:
for sym in only_gen_syms:
sym_gen = kconfig_gen.syms[sym]
print(indicator_add + " {} {}".format(
autokernel.kconfig.value_to_str(sym_gen.str_value),
sym))
if not supress_del:
for sym in only_cmp_syms:
sym_cmp = kconfig_cmp.syms[sym]
print(indicator_del + " {} {}".format(
autokernel.kconfig.value_to_str(sym_cmp.str_value),
sym))
if not supress_chg:
for sym in common_syms:
sym_gen = kconfig_gen.syms[sym]
sym_cmp = kconfig_cmp.syms[sym]
if sym_gen.str_value != sym_cmp.str_value:
print(indicator_mod + " {} → {} {}".format(
autokernel.kconfig.value_to_str(sym_cmp.str_value),
autokernel.kconfig.value_to_str(sym_gen.str_value),
sym))
def main_generate_config(args, config=None):
"""
Main function for the 'generate_config' command.
"""
log.info("Generating kernel configuration")
if not config:
# Load configuration file
config = autokernel.config.load_config(args.autokernel_config)
# Fallback for config output
if not hasattr(args, 'output') or not args.output:
args.output = os.path.join(args.kernel_dir, '.config')
# Load symbols from Kconfig
kconfig = autokernel.kconfig.load_kconfig(args.kernel_dir)
# Apply autokernel configuration
apply_autokernel_config(args, kconfig, config)
# Write configuration to file
kconfig.write_config(
filename=args.output,
header=generated_by_autokernel_header(),
save_old=False)
log.info("Configuration written to '{}'".format(args.output))
def clean_kernel_dir(args):
"""
Clean the kernel tree (call make distclean)
"""
try:
subprocess.run(['make', 'distclean'], cwd=args.kernel_dir, check=True)
except subprocess.CalledProcessError as e:
log.die("'make distclean' failed in {} with code {}".format(args.kernel_dir, e.returncode))
def build_kernel(args):
"""
Build the kernel (call make)
"""
try:
subprocess.run(['make'], cwd=args.kernel_dir, check=True)
except subprocess.CalledProcessError as e:
log.die("'make' failed in {} with code {}".format(args.kernel_dir, e.returncode))
def build_initramfs(args, config, modules_prefix, initramfs_output):
log.info("Building initramfs")
def _replace_vars(args, p):
p = replace_common_vars(args, p)
if '{MODULES_PREFIX}' in p:
if modules_prefix is None:
log.die(f"A variable used {{MODULES_PREFIX}}, but kernel module support is disabled!")
p = p.replace('{MODULES_PREFIX}', modules_prefix)
p = p.replace('{INITRAMFS_OUTPUT}', initramfs_output)
return p
# Execute initramfs build_command
execute_command(args, 'initramfs.build_command', config.initramfs.build_command, _replace_vars)
if config.initramfs.build_output:
cmd_output_file = _replace_vars(args, config.initramfs.build_output.value)
try:
# Move the output file as stated in the configuration to the kernel tree
shutil.move(cmd_output_file, initramfs_output)
except IOError as e:
log.die("Could not copy initramfs from '{}' to '{}': {}".format(cmd_output_file, initramfs_output, str(e)))
def install_modules(args, prefix="/"):
"""
Installs the modules to the given prefix
"""
# Use correct 022 umask when installing modules
saved_umask = os.umask(0o022)
try:
subprocess.run(['make', 'modules_install', 'INSTALL_MOD_PATH=' + prefix], cwd=args.kernel_dir, check=True, stdout=None)
except subprocess.CalledProcessError as e:
log.die("'make modules_install INSTALL_MOD_PATH={}' failed in {} with code {}".format(prefix, args.kernel_dir, e.returncode))
os.umask(saved_umask)
def main_build(args, config=None):
"""
Main function for the 'build' command.
"""
if not config:
# Load configuration file
config = autokernel.config.load_config(args.autokernel_config)
# Set umask for build
saved_umask = os.umask(config.build.umask.value)
# Execute pre hook
execute_command(args, 'build.hooks.pre', config.build.hooks.pre, replace_common_vars)
# Clean the kernel dir, if the user wants that
if args.clean:
log.info("Cleaning kernel directory")
clean_kernel_dir(args)
kernel_version = autokernel.kconfig.get_kernel_version(args.kernel_dir)
# Config output is "{KERNEL_DIR}/.config"
config_output = os.path.join(args.kernel_dir, '.config.autokernel')
# Initramfs basename "initramfs-{KERNEL_VERSION}.cpio"
# The .cpio suffix is cruical, as the kernel makefile requires it to detect initramfs archives
initramfs_basename = 'initramfs-{}.cpio'.format(kernel_version)
# Initramfs output is "{KERNEL_DIR}/initramfs-{KERNEL_VERSION}.cpio"
initramfs_output = os.path.join(args.kernel_dir, initramfs_basename)
# Load symbols from Kconfig
kconfig = autokernel.kconfig.load_kconfig(args.kernel_dir)
sym_cmdline_bool = kconfig.syms['CMDLINE_BOOL']
sym_cmdline = kconfig.syms['CMDLINE']
sym_initramfs_source = kconfig.syms['INITRAMFS_SOURCE']
sym_modules = kconfig.syms['MODULES']
# Set some defaults
sym_cmdline_bool.set_value('y')
sym_cmdline.set_value('')
sym_initramfs_source.set_value('{INITRAMFS}')
# Apply autokernel configuration
kernel_cmdline = apply_autokernel_config(args, kconfig, config)
def _build_kernel():
# Write configuration to file
kconfig.write_config(
filename=config_output,
header=generated_by_autokernel_header(),
save_old=False)
# Copy file to .config, which may get changed by the makefiles
shutil.copyfile(config_output, os.path.join(args.kernel_dir, '.config'))
# Build the kernel
build_kernel(args)
def set_cmdline():
kernel_cmdline_str = ' '.join(kernel_cmdline)
has_user_cmdline_bool = sym_cmdline_bool in autokernel.symbol_tracking.symbol_changes
has_user_cmdline = sym_cmdline in autokernel.symbol_tracking.symbol_changes
if has_user_cmdline_bool and sym_cmdline_bool.str_value == 'n':
# The user has explicitly disabled the builtin commandline,
# so there is no need to set it.
pass
else:
sym_cmdline_bool.set_value('y')
# Issue a warning, if a custom cmdline does not contain "{CMDLINE}", and we have gathered add_cmdline options.
if has_user_cmdline and not sym_cmdline.str_value.contains('{CMDLINE}') and len(kernel_cmdline) > 0:
log.warn("CMDLINE was set manually and doesn't contain a '{CMDLINE}' token, although add_cmdline has also been used.")
if has_user_cmdline:
sym_cmdline.set_value(sym_cmdline.str_value.replace('{CMDLINE}', kernel_cmdline_str))
else:
sym_cmdline.set_value(kernel_cmdline_str)
def check_initramfs_source(sym_initramfs_source):
has_user_initramfs_source = sym_initramfs_source in autokernel.symbol_tracking.symbol_changes
# It is an error to explicitly set INITRAMFS_SOURCE, if our initramfs is set to builtin.
if has_user_initramfs_source \
and config.initramfs.enabled \
and config.initramfs.builtin \
and autokernel.symbol_tracking.symbol_changes[sym_initramfs_source].reason == 'explicitly set':
log.die("INITRAMFS_SOURCE was set manually, although a custom initramfs should be built and integrated into the kernel.")
# Set CMDLINE_BOOL and CMDLINE
set_cmdline()
# Preprocess INITRAMFS_SOURCE
check_initramfs_source(sym_initramfs_source)
# Kernel build pass #1
log.info("Building kernel")
# On the first pass, disable all initramfs sources
sym_initramfs_source.set_value('')
# Start the build process
_build_kernel()
# Build the initramfs, if enabled
if config.initramfs.enabled:
with tempfile.TemporaryDirectory() as tmppath:
if sym_modules.str_value != 'n':
# Temporarily install modules so the initramfs generator has access to them
log.info("Copying modules into temporary directory")
tmp_modules_prefix = os.path.join(tmppath, 'modules')
install_modules(args, prefix=tmp_modules_prefix)
else:
tmp_modules_prefix = None
# Build the initramfs
build_initramfs(args, config, tmp_modules_prefix, initramfs_output)
# Pack the initramfs into the kernel if desired
if config.initramfs.builtin:
log.info("Rebuilding kernel to pack external resources")
# On the second pass, we enable the initramfs cpio archive, which is now in the kernel_dir
sym_initramfs_source.set_value(initramfs_basename)
# Rebuild the kernel to pack the new images
_build_kernel()
# Execute post hook
execute_command(args, 'build.hooks.post', config.build.hooks.post, replace_common_vars)
os.umask(saved_umask)
def main_install(args, config=None):
"""
Main function for the 'install' command.
"""
if not config:
# Load configuration file
config = autokernel.config.load_config(args.autokernel_config)
# Use correct umask when installing
saved_umask = os.umask(config.install.umask.value)
# Mount
new_mounts = []
for i in config.install.mount:
if not os.access(i, os.R_OK):
log.die("Permission denied on accessing '{}'. Aborting.".format(i))
if not os.path.ismount(i):
log.info("Mounting {}".format(i))
new_mounts.append(i)
try:
subprocess.run(['mount', '--', i], check=True)
except subprocess.CalledProcessError as e:
log.die("Could not mount '{}', mount returned code {}. Aborting.".format(i, e.returncode))
# Check mounts
for i in config.install.mount + config.install.assert_mounted:
if not os.access(i, os.R_OK):
log.die("Permission denied on accessing '{}'. Aborting.".format(i))
if not os.path.ismount(i):
log.die("'{}' is not mounted. Aborting.".format(i))
# Execute pre hook
execute_command(args, 'install.hooks.pre', config.install.hooks.pre, replace_common_vars)
kernel_version = autokernel.kconfig.get_kernel_version(args.kernel_dir)
target_dir = replace_common_vars(args, config.install.target_dir)
# Config output is "{KERNEL_DIR}/.config"
config_output = os.path.join(args.kernel_dir, '.config.autokernel')
# Initramfs basename "initramfs-{KERNEL_VERSION}.cpio"
# The .cpio suffix is cruical, as the kernel makefile requires it to detect initramfs archives
initramfs_basename = 'initramfs-{}.cpio'.format(kernel_version)
# Initramfs output is "{KERNEL_DIR}/initramfs-{KERNEL_VERSION}.cpio"
initramfs_output = os.path.join(args.kernel_dir, initramfs_basename)
# bzImage output
bzimage_output = os.path.join(args.kernel_dir, 'arch', autokernel.kconfig.get_uname_arch(), 'boot/bzImage')
def _purge_old(path):
keep_old = config.install.keep_old.value
# Disable purging on negative count
if keep_old < 0:
return
# Disable purging for non versionated paths
if not '{KERNEL_VERSION}' in path:
return
tokens = path.split('{KERNEL_VERSION}')
if len(tokens) > 2:
log.warn("Cannot purge path with more than one {{KERNEL_VERSION}} token: '{}'".format(path))
return
re_semver = re.compile(r'^[\d\.]+\d')
def _version_sorter(i):
suffix = i[len(tokens[0]):]
basename = suffix.split('/')[0]
st = os.stat(i)
try:
time_create = st.st_birthtime
except AttributeError:
time_create = st.st_mtime
semver = re_semver.match(basename).group()
val = autokernel.config.semver_to_int(semver)
return val, time_create
escaped_kv = re.escape('{KERNEL_VERSION}')
# matches from {KERNEL_VERSION} until first / exclusive in an regex escaped path
match_basename = re.compile(re.escape(escaped_kv) + r"(.+?(?=\\\/|$)).*$")
# derive regex to check if a valid semver is contained and prefix and suffix are given
re_match_valid_paths = re.compile('^' + match_basename.sub(lambda m: r'[0-9]+(\.[0-9]+(\.[0-9]+)?)?(-[^\/]*)?' + m.group(1) + r'.*$', re.escape(path)))
# matches from {KERNEL_VERSION} until first / exclusive in a normal path
re_replace_wildcard = re.compile(escaped_kv + r"[^\/]*")
# replace {KERNEL_VERSION}-* component with *
wildcard_path = re_replace_wildcard.sub('*', glob.escape(path))
# sort out paths that don't contain valid semvers
valid_globbed = [i for i in glob.glob(wildcard_path) if re_match_valid_paths.match(i)]
for i in sorted(valid_globbed, key=_version_sorter)[:-(keep_old + 1)]:
# For security, we will not call rmtree on a path that doesn't end with a slash,
# or if the realpath has less then two slash characters in it.
# Otherwise we only call unlink
if i[-1] == '/' and os.path.realpath(i).count('/') >= 2:
try:
shutil.rmtree(i)
except OSError as e:
log.warn("Could not remove {}: {}".format(i, str(e)))
else:
try:
os.unlink(i)
except IOError as e:
log.warn("Could not remove {}: {}".format(i, str(e)))
def _move_to_old(path):
re_old_suffix = re.compile(r'^.*\.old(\.\d+)?\/*$')
dst = path + '.old'
highest_num = -1
for i in glob.glob(glob.escape(dst) + '*'):
m = re_old_suffix.match(i)
old_num = int((m.group(1) or '.0')[1:]) if m else 0
if highest_num < old_num:
highest_num = old_num
if highest_num >= 0:
dst += ".{:d}".format(highest_num + 1)
shutil.move(path, dst)
def _install(name, src, target_var):
# If the target is disabled, return.
if not target_var:
return
# Figure out destination, and move existing filed if necessary
dst = os.path.join(target_dir, replace_common_vars(args, target_var))
if os.path.exists(dst):
_move_to_old(dst)
# Create directory if it doesn't exist
Path(os.path.dirname(dst)).mkdir(parents=True, exist_ok=True)
log.info("Installing {:<11s} {}".format(name + ':', dst))
# Install target file
shutil.copyfile(src, dst)
# Purge old files
_purge_old(os.path.join(target_dir, str(target_var)))
# Move target_dir, if it is dynamic
if '{KERNEL_VERSION}' in str(config.install.target_dir) and os.path.exists(target_dir):
_move_to_old(os.path.realpath(target_dir))
# Load symbols from Kconfig
kconfig = autokernel.kconfig.load_kconfig(args.kernel_dir)
sym_modules = kconfig.syms['MODULES']
# Install modules
if config.install.modules_prefix and sym_modules.str_value != 'n':
modules_prefix = str(config.install.modules_prefix)
modules_prefix_with_lib = os.path.join(modules_prefix, "lib/modules")
modules_dir = os.path.join(modules_prefix_with_lib, kernel_version)
if os.path.exists(modules_dir):
_move_to_old(os.path.realpath(modules_dir))
log.info("Installing modules: {}".format(modules_prefix_with_lib))
install_modules(args, prefix=modules_prefix)
_purge_old(modules_prefix_with_lib + "/{KERNEL_VERSION}/")
# Install targets
_install('bzimage', bzimage_output, config.install.target_kernel)
_install('config', config_output, config.install.target_config)
if config.initramfs.enabled:
_install('initramfs', initramfs_output, config.install.target_initramfs)
# Purge old target_dirs (will only be done if it is dynamic)
_purge_old(str(config.install.target_dir) + '/')
# Execute post hook
execute_command(args, 'install.hooks.post', config.install.hooks.post, replace_common_vars)
# Undo what we have mounted
for i in reversed(new_mounts):
log.info("Unmounting {}".format(i))
try:
subprocess.run(['umount', '--', i], check=True)
except subprocess.CalledProcessError as e:
log.warn("Could not umount '{}' (returned {})".format(i, e.returncode))
# Restore old umask
os.umask(saved_umask)
def main_build_all(args):
"""
Main function for the 'all' command.
"""
log.info("Started full build")
# Load configuration file
config = autokernel.config.load_config(args.autokernel_config)
main_build(args, config)
main_install(args, config)
class Module():
"""
A module consists of dependencies (other modules) and option assignments.
"""
def __init__(self, name):
self.name = name
self.deps = []
self.assignments = []
self.assertions = []
self.rev_deps = []
def check_config_against_detected_modules(kconfig, modules, differences_only):
log.info("Here are the detected options with both current and desired value.")
log.info("The output format is: [current] OPTION_NAME = desired")
log.info("HINT: Options are ordered by dependencies, i.e. applying")
log.info(" them from top to buttom will work")
if differences_only:
log.info("Detected options (differences only):")
else:
log.info("Indicators: (=) same, (~) changed")
log.info("Detected options:")
visited = set()
visited_opts = set()
if differences_only:
indicator_same = ""
indicator_changed = ""
else:
indicator_same = log.color('[32m=[m', '=')
indicator_changed = log.color('[33m~[m', '~')
def visit_opt(opt, new_value):
# Ensure we visit only once
if opt in visited_opts:
return
visited_opts.add(opt)
sym = kconfig.syms[opt]
changed = sym.str_value != new_value
if changed:
print(indicator_changed + " {} → {} {}".format(autokernel.kconfig.value_to_str(sym.str_value), autokernel.kconfig.value_to_str(new_value), sym.name))
else:
if not differences_only:
print(indicator_same + " {} {}".format(autokernel.kconfig.value_to_str(sym.str_value), sym.name))
def visit(m):
# Ensure we visit only once
if m in visited:
return
visited.add(m)
# First visit all dependencies
for d in m.deps:
visit(d)
# Then print all assignments
for a, v in m.assignments:
visit_opt(a, v)
# Visit all modules
for m in modules:
visit(modules[m])
class KernelConfigWriter:
"""
Writes modules to the given file in kernel config format.
"""
def __init__(self, file):
self.file = file
self.file.write(generated_by_autokernel_header())
self.file.write(vim_config_modeline_header())
def write_module(self, module):
if len(module.assignments) == len(module.assertions) == 0:
return
content = ""
for d in module.rev_deps:
content += "# required by {}\n".format(d.name)
content += "# module {}\n".format(module.name)
for a, v in module.assignments:
if v in "nmy":
content += "CONFIG_{}={}\n".format(a, v)
else:
content += "CONFIG_{}=\"{}\"\n".format(a, v)
for o, v in module.assertions:
content += "# REQUIRES {} {}\n".format(o, v)
self.file.write(content)
class ModuleConfigWriter:
"""
Writes modules to the given file in the module config format.
"""
def __init__(self, file):
self.file = file
self.file.write(generated_by_autokernel_header())
self.file.write(vim_config_modeline_header())
def write_module(self, module):
content = ""
for d in module.rev_deps:
content += "# required by {}\n".format(d.name)
content += "module {} {{\n".format(module.name)
for d in module.deps:
content += "\tuse {};\n".format(d.name)
for a, v in module.assignments:
content += "\tset {} {};\n".format(a, v)
for o, v in module.assertions:
content += "\t#assert {} == {};\n".format(o, v)
content += "}\n\n"
self.file.write(content)
class ModuleCreator:
def __init__(self, module_prefix=''):
self.modules = {}
self.module_for_sym = {}
self.module_select_all = Module('module_select_all')
self.module_prefix = module_prefix
def _create_reverse_deps(self):
# Clear rev_deps
for m in self.modules:
self.modules[m].rev_deps = []
self.module_select_all.rev_deps = []
# Fill in reverse dependencies for all modules
for m in self.modules:
for d in self.modules[m].deps:
d.rev_deps.append(self.modules[m])
# Fill in reverse dependencies for select_all module
for d in self.module_select_all.deps:
d.rev_deps.append(self.module_select_all)
def _add_module_for_option(self, sym):
"""
Recursively adds a module for the given option,
until all dependencies are satisfied.
"""
mod = Module(self.module_prefix + "config_{}".format(sym.name.lower()))
# Find dependencies if needed
needs_deps = not kconfiglib.expr_value(sym.direct_dep)
if needs_deps:
req_deps = autokernel.kconfig.required_deps(sym)
if req_deps is False:
# Dependencies can never be satisfied. The module should be skipped.
log.warn("Cannot satisfy dependencies for {}".format(sym.name))
return False
if not autokernel.kconfig.symbol_can_be_user_assigned(sym):
# If we cannot assign the symbol, we add an assertion instead.
mod.assertions.append((sym.name, 'y'))
else:
mod.assignments.append((sym.name, 'y'))
if needs_deps:
for d, v in req_deps:
if v:
depm = self.add_module_for_sym(d)
if depm is False:
return False
mod.deps.append(depm)
else:
if autokernel.kconfig.symbol_can_be_user_assigned(sym):
mod.assignments.append((d.name, 'n'))
else:
mod.assertions.append((d.name, 'n'))
self.modules[mod.name] = mod
return mod
def add_module_for_sym(self, sym):
"""
Adds a module for the given symbol (and its dependencies).
"""
if sym in self.module_for_sym:
return self.module_for_sym[sym]
# Create a module for the symbol, if it doesn't exist already
mod = self._add_module_for_option(sym)
if mod is False:
return False
self.module_for_sym[sym] = mod
return mod
def select_module(self, mod):
self.module_select_all.deps.append(mod)
def add_external_module(self, mod):
self.modules[mod.name] = mod
def _write_detected_modules(self, f, output_type, output_module_name):
"""
Writes the collected modules to a file / stdout, in the requested output format.
"""
if output_type == 'kconf':
writer = KernelConfigWriter(f)
elif output_type == 'module':
writer = ModuleConfigWriter(f)
else:
log.die("Invalid output_type '{}'".format(output_type))
# Set select_all name
self.module_select_all.name = output_module_name
# Fill in reverse dependencies for all modules
self._create_reverse_deps()
visited = set()
def visit(m):
# Ensure we visit only once
if m in visited:
return
visited.add(m)
writer.write_module(m)
# Write all modules in topological order
for m in self.modules:
visit(self.modules[m])
# Lastly, write "select_all" module, if it has been used
if len(self.module_select_all.deps) > 0:
writer.write_module(self.module_select_all)
def write_detected_modules(self, args):
# Write all modules in the given format to the given output file / stdout
if args.output:
try:
with open(args.output, 'w') as f:
self._write_detected_modules(f, args.output_type, args.output_module_name)
log.info("Module configuration written to '{}'".format(args.output))
except IOError as e:
log.die(str(e))
else:
self._write_detected_modules(sys.stdout, args.output_type, args.output_module_name)
def detect_modules(kconfig):
"""
Detects required options for the current system organized into modules.
Any option with dependencies will also be represented as a module. It returns
a dict which maps module names to the module objects. The special module returned
additionaly is the module which selects all detected modules as dependencies.
"""
log.info("Detecting kernel configuration for local system")
log.info("HINT: It might be beneficial to run this while using a very generic")
log.info(" and modular kernel, such as the default kernel on Arch Linux.")
local_module_count = 0
def next_local_module_id():
"""
Returns the next id for a local module
"""
nonlocal local_module_count
i = local_module_count
local_module_count += 1
return i
module_creator = ModuleCreator(module_prefix='detected_')
def add_module_for_detected_node(node, opts):
"""
Adds a module for the given detected node
"""
mod = Module("{:04d}_{}".format(next_local_module_id(), node.get_canonical_name()))
for o in opts:
try:
sym = kconfig.syms[o]
except KeyError:
log.warn("Skipping unknown symbol {}".format(o))
continue
m = module_creator.add_module_for_sym(sym)
if m is False:
log.warn("Skipping module {} (unsatisfiable dependencies)".format(mod.name))
return None
mod.deps.append(m)
module_creator.add_external_module(mod)
return mod
# Load the configuration database
config_db = autokernel.lkddb.Lkddb()
# Inspect the current system
detector = autokernel.node_detector.NodeDetector()
# Try to find detected nodes in the database
log.info("Matching detected nodes against database")
# First sort all nodes for more consistent output between runs
all_nodes = []
# Find options in database for each detected node
for detector_node in detector.nodes:
all_nodes.extend(detector_node.nodes)
all_nodes.sort(key=lambda x: x.get_canonical_name())
for node in all_nodes:
opts = config_db.find_options(node)
if len(opts) > 0:
# If there are options for the node in the database,
# add a module for the detected node and its options
mod = add_module_for_detected_node(node, opts)
if mod:
# Select the module in the global selector module
module_creator.select_module(mod)
return module_creator
def main_detect(args):
"""
Main function for the 'main_detect' command.
"""
# Check if we should write a config or report differences
check_only = args.check_config != 0
# Assert that --check is not used together with --type
if check_only and args.output_type:
log.die("--check and --type are mutually exclusive")
# Assert that --check is not used together with --output
if check_only and args.output:
log.die("--check and --output are mutually exclusive")
# Assert that --check is not used together with --output
if not check_only and args.check_differences:
log.die("--differences cannot be used without --check")
# Determine the config file to check against, if applicable.
if check_only:
if args.check_config:
log.info("Checking generated config against '{}'".format(args.check_config))
else:
if not has_proc_config_gz():
log.die("This kernel does not expose /proc/config.gz. Please provide the path to a valid config file manually.")
log.info("Checking generated config against currently running kernel")
# Ensure that some required external programs are installed
check_program_exists('find')
check_program_exists('findmnt')
# Load symbols from Kconfig
kconfig = autokernel.kconfig.load_kconfig(args.kernel_dir)
# Detect system nodes and create modules
module_creator = detect_modules(kconfig)
if check_only:
# Load the given config file or the current kernel's config
kconfig_load_file_or_current_config(kconfig, args.check_config)
# Check all detected symbols' values and report them
check_config_against_detected_modules(kconfig, module_creator.modules, differences_only=args.check_differences)
else:
# Add fallback for output type.
if not args.output_type:
args.output_type = 'module'
# Allow - as an alias for stdout
if args.output == '-':
args.output = None
# Write all modules in the given format to the given output file / stdout
module_creator.write_detected_modules(args)
def get_sym_by_name(kconfig, sym_name):
if sym_name.startswith('CONFIG_'):
sym_name = sym_name[len('CONFIG_'):]
# Get symbol
try:
return kconfig.syms[sym_name]
except KeyError:
log.die("Symbol '{}' does not exist".format(sym_name))
def main_info(args):
"""
Main function for the 'info' command.
"""
# Load symbols from Kconfig
kconfig = autokernel.kconfig.load_kconfig(args.kernel_dir)
for config_symbol in args.config_symbols:
sym = get_sym_by_name(kconfig, config_symbol)
log.info("Information for {}:".format(sym.name))
print(sym)
def main_revdeps(args):
"""
Main function for the 'revdeps' command.
"""
# Load symbols from Kconfig
kconfig = autokernel.kconfig.load_kconfig(args.kernel_dir)
for config_symbol in args.config_symbols:
sym = get_sym_by_name(kconfig, config_symbol)
log.info("Dependents for {}:".format(sym.name))
for d in sym._dependents: # pylint: disable=protected-access
print(d)
def main_satisfy(args):
"""
Main function for the 'satisfy' command.
"""
# Load symbols from Kconfig
kconfig = autokernel.kconfig.load_kconfig(args.kernel_dir)
# Apply autokernel configuration only if we want our dependencies based on the current configuration
if not args.dep_global:
# Load configuration file
config = autokernel.config.load_config(args.autokernel_config)
# Apply kernel config
apply_autokernel_config(args, kconfig, config)
# Create a module for the detected option
module_creator = ModuleCreator()
for config_symbol in args.config_symbols:
sym = get_sym_by_name(kconfig, config_symbol)
mod = module_creator.add_module_for_sym(sym)
if mod is False:
log.warn("Skipping {} (unsatisfiable dependencies)".format(sym.name))
continue
module_creator.select_module(mod)
# Add fallback for output type.
if not args.output_type:
args.output_type = 'module'
# Allow - as an alias for stdout
if args.output == '-':
args.output = None
# Write the module
module_creator.write_detected_modules(args)
def check_file_exists(value):
"""
Checks if the given exists
"""
if not os.path.isfile(value):
raise argparse.ArgumentTypeError("'{}' is not a file".format(value))
return value
def check_kernel_dir(value):
"""
Checks if the given value is a valid kernel directory path.
"""
if not os.path.isdir(value):
raise argparse.ArgumentTypeError("'{}' is not a directory".format(value))
if not os.path.exists(os.path.join(value, 'Kconfig')):
raise argparse.ArgumentTypeError("'{}' is not a valid kernel directory, as it does not contain a Kconfig file".format(value))
return value
def suppress_columns_list(value):
"""
Checks if the given value is a csv of columns to suppress.
"""
valid_values_new = ['new', 'n']
valid_values_del = ['del', 'd']
valid_values_chg = ['changed', 'chg', 'c']
valid_values = valid_values_new + valid_values_del + valid_values_chg
supress_new = False
supress_del = False
supress_chg = False
for i in value.split(','):
if i in valid_values_new:
supress_new = True
elif i in valid_values_del:
supress_del = True
elif i in valid_values_chg:
supress_chg = True
else:
raise argparse.ArgumentTypeError("'{}' is not a valid suppression type. Must be one of [{}]".format(i, ', '.join(["'{}'".format(v) for v in valid_values])))
return (supress_new, supress_del, supress_chg)
class ArgumentParserError(Exception):
pass
class ThrowingArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise ArgumentParserError(message)
def autokernel_main():
"""
Parses options and dispatches control to the correct subcommand function
"""
parser = ThrowingArgumentParser(description="Autokernel is a kernel configuration management tool. For more information please refer to the documentation (https://autokernel.oddlama.org). If no mode is given, 'autokernel --help' will be executed.")
subparsers = parser.add_subparsers(title="commands",
description="Use 'autokernel command --help' to view the help for any command.",
metavar='command')
# General options
parser.add_argument('-K', '--kernel-dir', dest='kernel_dir', default='/usr/src/linux', type=check_kernel_dir,
help="The kernel directory to operate on. The default is /usr/src/linux.")
parser.add_argument('-C', '--config', dest='autokernel_config', default=None, type=check_file_exists,
help="The autokernel configuration file to use. Default is to use '/etc/autokernel/autokernel.conf' or an internal fallback if the default path doesn't exist.")
parser.add_argument('--no-color', dest='use_color', action='store_false',
help="Disables coloring in normal output.")
parser.add_argument('--version', action='version',
version='%(prog)s {version}'.format(version=__version__))
# Output options
output_options = parser.add_mutually_exclusive_group()
output_options.add_argument('-q', '--quiet', dest='quiet', action='store_true',
help="Disables any additional output except for errors, and output from tools.")
output_options.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help="Enables verbose output.")
# Setup
parser_setup = subparsers.add_parser('setup', help='Setup a default configuration in /etc/autokernel if the directory does not exist yet.')
parser_setup.add_argument('-d', '--dir', dest='setup_dir', default='/etc/autokernel',
help="The directory to copy the default configuration to. The default is /etc/autokernel.")
parser_setup.set_defaults(func=main_setup)
# Check
parser_check = subparsers.add_parser('check', help="Reports differences between the config that will be generated by autokernel, and the given config file. If no config file is given, the script will try to load the current kernel's configuration from '/proc/config.gz'.")
parser_check.add_argument('-c', '--compare-config', dest='compare_config', type=check_file_exists,
help="The .config file to compare the generated configuration against.")
parser_check.add_argument('-k', '--compare-kernel-dir', dest='compare_kernel_dir', type=check_kernel_dir,
help="The kernel directory for the given comparison config.")
parser_check.add_argument('--suppress', dest='suppress_columns', type=suppress_columns_list,
help="Comma separated list of columns to suppress. 'new' or 'n' supresses new symbols, 'del' or 'd' suppresses removed symbols, 'changed', 'chg' or 'c' supresses changed symbols.")
parser_check.set_defaults(func=main_check_config)
# Config generation options
parser_generate_config = subparsers.add_parser('generate-config', help='Generates the kernel configuration file from the autokernel configuration.')
parser_generate_config.add_argument('-o', '--output', dest='output',
help="The output filename. An existing configuration file will be overwritten. The default is '{KERNEL_DIR}/.config'.")
parser_generate_config.set_defaults(func=main_generate_config)
# Build options
parser_build = subparsers.add_parser('build', help='Generates the configuration, and then builds the kernel (and initramfs if required) in the kernel tree.')
parser_build.add_argument('-c', '--clean', dest='clean', action='store_true',
help="Clean the kernel tree before building")
parser_build.set_defaults(func=main_build)
# Installation options
parser_install = subparsers.add_parser('install', help='Installs the finished kernel, modules and other resources on the system.')
parser_install.set_defaults(func=main_install)
# Full build options
parser_all = subparsers.add_parser('all', help='First builds and then installs the kernel.')
parser_all.add_argument('-c', '--clean', dest='clean', action='store_true',
help="Clean the kernel tree before building")
parser_all.set_defaults(func=main_build_all)
# Show symbol infos
parser_info = subparsers.add_parser('info', help='Displays information for the given symbols')
parser_info.add_argument('config_symbols', nargs='+',
help="A list of configuration symbols to show infos for")
parser_info.set_defaults(func=main_info)
# Show symbol reverse dependencies
parser_revdeps = subparsers.add_parser('revdeps', help='Displays all symbols that somehow depend on the given symbol')
parser_revdeps.add_argument('config_symbols', nargs='+',
help="A list of configuration symbols to show revdeps for")
parser_revdeps.set_defaults(func=main_revdeps)
# Single config module generation options
parser_satisfy = subparsers.add_parser('satisfy', help='Generates required modules to enable the given symbol')
parser_satisfy.add_argument('-g', '--global', action='store_true', dest='dep_global',
help="Report changes solely based on kernel default instead of basing the on the current autokernel configuration")
parser_satisfy.add_argument('-t', '--type', choices=['module', 'kconf'], dest='output_type',
help="Selects the output type. 'kconf' will output options in the kernel configuration format. 'module' will output a list of autokernel modules to reflect the necessary configuration.")
parser_satisfy.add_argument('-m', '--module-name', dest='output_module_name', default='rename_me',
help="The name of the generated module, which will enable all given options (default: 'rename_me').")
parser_satisfy.add_argument('-o', '--output', dest='output',
help="Writes the output to the given file. Use - for stdout (default).")
parser_satisfy.add_argument('config_symbols', nargs='+',
help="The configuration symbols to generate modules for (including dependencies)")
parser_satisfy.set_defaults(func=main_satisfy)
# Config detection options
parser_detect = subparsers.add_parser('detect', help='Detects configuration options based on information gathered from the running system')
parser_detect.add_argument('-c', '--check', nargs='?', default=0, dest='check_config', type=check_file_exists,
help="Instead of outputting the required configuration values, compare the detected options against the given kernel configuration and report the status of each option. If no config file is given, the script will try to load the current kernel's configuration from '/proc/config.gz'.")
parser_detect.add_argument('-d', '--differences', dest='check_differences', action='store_true',
help="Requires --check. Only report options when the suggested value differs from the current value.")
parser_detect.add_argument('-t', '--type', choices=['module', 'kconf'], dest='output_type',
help="Selects the output type. 'kconf' will output options in the kernel configuration format. 'module' will output a list of autokernel modules to reflect the necessary configuration.")
parser_detect.add_argument('-m', '--module-name', dest='output_module_name', default='local',
help="The name of the generated module, which will enable all detected options (default: 'local').")
parser_detect.add_argument('-o', '--output', dest='output',
help="Writes the output to the given file. Use - for stdout (default).")
parser_detect.set_defaults(func=main_detect)
try:
args = parser.parse_args()
except ArgumentParserError as e:
log.die(str(e))
# Set logging options
log.set_verbose(args.verbose)
log.set_quiet(args.quiet)
log.set_use_color(args.use_color)
if 'func' not in args:
# Fallback to --help.
parser.print_help()
elif args.func is main_setup:
# Check if we have chosen 'setup', which is special
# as it has no previous requirements and will not
# open any configuration files.
main_setup(args)
else:
# Initialize important environment variables
autokernel.kconfig.initialize_environment()
# Assert that some required programs exist
check_execution_environment(args)
# Execute the mode's function
args.func(args)
def main():
try:
autokernel_main()
except PermissionError as e:
log.die(str(e))
except Exception as e: # pylint: disable=broad-except
import traceback
traceback.print_exc()
log.die("Aborted because of previous errors")
if __name__ == '__main__':
main()
| 40.914182 | 297 | 0.654087 | 6,708 | 0.119226 | 0 | 0 | 0 | 0 | 0 | 0 | 19,322 | 0.343423 |
1dd75c9d95a741f7dad3e09aacf0d3d73196b181 | 19,007 | py | Python | game.py | henryboisdequin/Tower-Defense-Game | 687f509da179678f205b53705abec4b8ad31b188 | [
"MIT"
] | null | null | null | game.py | henryboisdequin/Tower-Defense-Game | 687f509da179678f205b53705abec4b8ad31b188 | [
"MIT"
] | null | null | null | game.py | henryboisdequin/Tower-Defense-Game | 687f509da179678f205b53705abec4b8ad31b188 | [
"MIT"
] | null | null | null | from Enemy.bosses import *
from Enemy.desert_enemies import *
from Enemy.field_enemies import *
from Enemy.graveyard_enemies import *
from Enemy.magic_enemies import *
from Enemy.moon_enemies import *
from Enemy.winter_enemies import *
from Enemy.fire_enemies import *
from menu import VerticalMenu
from Buildings.archer import ArcherTower
from Buildings.support import DamageTower, RangeTower, StoneTower
from button import PlayGameBtn, MusicBtn, PlayPauseBtn
import random
import pygame
import time
# Setup/initialization
bg = pygame.image.load("Game/Backgrounds/graveyard_bg.png")
bg = pygame.transform.scale(bg, (1350, 700))
width = bg.get_width()
height = bg.get_height()
pygame.init()
pygame.font.init()
win = pygame.display.set_mode((width, height))
pygame.display.set_caption("Tower Defense Game")
# Music
music = pygame.mixer_music.load('Game/music3.mp3')
pygame.mixer_music.play(-1)
play = pygame.image.load("Game/Utils/button_sound.png")
play = pygame.transform.scale(play, (100, 100))
pause = pygame.image.load("Game/Utils/button_sound_off.png")
pause = pygame.transform.scale(pause, (100, 100))
# Play Pause Btn
play2 = pygame.image.load("Game/Utils/button_start.png")
play2 = pygame.transform.scale(play2, (100, 100))
pause2 = pygame.image.load("Game/Utils/button_pause.png")
pause2 = pygame.transform.scale(pause2, (100, 100))
# Archer/support tower set up
buy_archer = pygame.transform.scale(pygame.image.load(os.path.join("Game/Shop", "ico_7.png")).
convert_alpha(), (75, 75))
buy_damage = pygame.transform.scale(pygame.image.load(os.path.join("Game/Shop", "ico_4.png")).
convert_alpha(), (75, 75))
buy_range = pygame.transform.scale(pygame.image.load(os.path.join("Game/Buildings", "14.png")).
convert_alpha(), (75, 75))
buy_stone = pygame.transform.scale(pygame.image.load(os.path.join("Game/Shop", "ico_9.png")).convert_alpha(), (75, 75))
attack_tower_names = ["archer"]
support_tower_names = ["range", "damage", "stone"]
side_img = pygame.transform.scale(pygame.image.load(os.path.join("Game/Shop/", "window_1.png"))
.convert_alpha(), (120, 500))
# Clock
clock = pygame.time.Clock()
# Path for enemies
path = [(-10, 477),
(0, 477), (171, 481), (315, 528), (464, 529), (631, 532), (802, 532), (846, 358), (673, 323), (533, 301),
(513, 209),
(491, 118), (661, 102), (826, 99), (1003, 100), (1179, 97), (1346, 96)]
# Waves in Skeleton, Monster, Bat, Goblin, SnowMan, Knight, MaskedMan, Yeti, Tree, Golem, Guard, SuperBoss (12 enemies)
waves = [ # 30 waves + 3 bonus rounds
[0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 2], # for testing
[20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # wave 1
[30, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0], # wave 2
[30, 20, 10, 0, 0, 0, 0, 0, 0, 0, 0], # wave 3
[50, 40, 20, 5, 0, 0, 0, 0, 0, 0, 0], # wave 4
[100, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0], # wave 5
[0, 0, 0, 0, 30, 0, 0, 1, 0, 0, 0], # wave 6 (winter special)
[100, 40, 30, 20, 10, 2, 0, 0, 0, 0, 0], # wave 7
[100, 100, 50, 50, 30, 10, 0, 0, 0, 0, 0], # wave 8
[100, 100, 75, 75, 40, 20, 5, 0, 0, 0, 0], # wave 9
[0, 0, 0, 0, 0, 0, 0, 10, 10, 7, 7], # wave 10 (boss round)
[150, 100, 100, 100, 50, 50, 20, 0, 0, 0, 0], # wave 11
[150, 150, 150, 150, 40, 40, 40, 0, 0, 0, 0], # wave 12
[200, 200, 150, 150, 50, 50, 50, 0, 0, 0, 0], # wave 13
[200, 200, 150, 150, 50, 50, 50, 1, 1, 0, 0], # wave 14
[200, 200, 200, 200, 100, 75, 75, 2, 2, 1, 1], # wave 15
[200, 200, 200, 200, 100, 100, 100, 2, 2, 2, 2], # wave 16
[1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # wave 17
[300, 200, 200, 200, 150, 150, 100, 3, 3, 2, 2], # wave 18
[300, 200, 200, 200, 200, 200, 150, 4, 4, 4, 4], # wave 19
[0, 0, 0, 0, 0, 0, 0, 12, 12, 10, 10], # wave 20 (boss round)
[400, 300, 300, 300, 300, 300, 200, 5, 5, 5, 5], # wave 21
[400, 300, 300, 300, 300, 300, 300, 0, 0, 0, 0], # wave 22
[1300, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # wave 23
[500, 300, 300, 300, 300, 300, 300, 5, 5, 5, 5], # wave 24
[100, 300, 300, 300, 300, 300, 300, 10, 10, 7, 7], # wave 25
[500, 400, 400, 400, 400, 400, 400, 7, 7, 7, 7], # wave 26
[0, 1300, 100, 0, 0, 0, 0, 0, 0, 0, 0], # wave 27
[600, 500, 500, 500, 500, 500, 500, 6, 6, 6, 6], # wave 28
[1700, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # wave 29
[0, 0, 0, 0, 0, 0, 0, 50, 50, 50, 50], # wave 30 (last before bonus)
[2000, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0], # bonus 1
[700, 500, 500, 500, 500, 500, 500, 10, 10, 10, 10], # bonus 2
[0, 0, 0, 0, 0, 0, 0, 100, 100, 100, 100, 1], # bonus 3
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5], # bonus 4
[0, 0, 0, 0, 0, 0, 0, 20, 20, 20, 20, 5]
]
def point_to_line(tower):
"""
Returns if you can place tower based on distance from.
path
:param tower: Tower class object
:return: Bool
"""
return True, tower
class MainLoop:
def __init__(self):
self.clicks = []
self.running = True
self.in_start = True
self.money = 3000
self.lives = 30
self.wave = 1
self.enemies = []
self.path = path
self.bg = bg
self.timer = time.time()
self.paused = True
self.selected_tower = None
self.attack_towers = []
self.support_towers = []
self.menu = VerticalMenu(width - side_img.get_width() + 70, 250, side_img)
self.menu.add_btn(buy_archer, "buy_archer", 500)
self.menu.add_btn(buy_damage, "buy_damage", 1000)
self.menu.add_btn(buy_range, "buy_range", 1000)
self.menu.add_btn(buy_stone, "buy_stone", 1500)
self.moving_object = None
self.curr_wave = waves[self.wave][:]
self.pl_pa_btn = PlayPauseBtn(play2, pause2, 120, 600)
self.music_btn = MusicBtn(play, pause, 20, 600)
self.music = True
self.font = pygame.font.SysFont("comicsans", 40, bold=True)
self.msg_mode = False
self.msg_clicked = False
self.lose = False
self.win = False
def start_screen(self):
"""
Start screen for tower defense game.
:return: None
"""
while self.in_start:
for ev in pygame.event.get():
if ev == pygame.QUIT:
pygame.quit()
break
# Title
logo = pygame.image.load("Game/Start Screen/logo2.png")
win.blit(self.bg, (0, 0))
win.blit(logo, (850 - width // 2, 400 - height // 2))
# Show characters for a e s t h e t i c s
en = pygame.image.load("Game/Enemies/Bosses/Golem/0_boss_run_000.png")
win.blit(en, (450 - width // 2, 370 - height // 2))
en2 = pygame.image.load("Game/Enemies/Bosses/Guard/0_boss_run_000.png")
en2 = pygame.transform.flip(en2, True, False)
win.blit(en2, (1150 - width // 2, 400 - height // 2))
# Button to play
btn1 = pygame.image.load("Game/Start Screen/button_play.png")
start_screen_btn = PlayGameBtn(btn1, 1150 - width // 2, 720 - height // 2)
start_screen_btn.draw(win)
if pygame.mouse.get_pressed()[0] == 1:
self.in_start = False
self.msg_mode = True
self.main()
break
pygame.display.update()
self.main()
def add_tower(self, name):
x, y = pygame.mouse.get_pos()
name_list = ["buy_archer", "buy_damage", "buy_range", "buy_stone"]
object_list = [ArcherTower(x, y), DamageTower(x, y), RangeTower(x, y), StoneTower(x, y)]
try:
obj = object_list[name_list.index(name)]
self.moving_object = obj
obj.moving = True
except Exception as err:
print(f"[ERROR]: {str(err)}.")
def enemy_wave(self):
"""
Chooses the appropriate enemies to put on the screen.
:return: list
"""
if sum(self.curr_wave) == 0:
if len(self.enemies) == 0:
self.wave += 1
self.curr_wave = waves[self.wave]
self.paused = True
else:
wave_enemies = [Skeleton(), PurpleMonster(), Bat(), HammerGoblin(), SnowMan(), Knight(), MaskedMan(),
Yeti(), Tree(), Golem(), Guard(), SuperBoss()]
for x in range(len(self.curr_wave)):
if self.curr_wave[x] != 0:
self.enemies.append(wave_enemies[x])
self.curr_wave[x] = self.curr_wave[x] - 1
break
def redraw_game_window(self):
"""
Draws everything needed for the game onto the screen.
:return: None
"""
win.blit(self.bg, (0, 0)) # background
# Buttons
self.music_btn.draw(win)
self.pl_pa_btn.draw(win)
# draw placement rings
if self.moving_object:
for tower in self.attack_towers:
tower.draw_placement(win)
for tower in self.support_towers:
tower.draw_placement(win)
self.moving_object.draw_placement(win)
# draw attack towers
for tw in self.attack_towers:
tw.draw(win)
# draw support towers
for tw in self.support_towers:
tw.draw(win)
# redraw selected tower
if self.selected_tower:
self.selected_tower.draw(win)
# draw moving object
if self.moving_object:
self.moving_object.draw(win)
# draw menu
self.menu.draw(win)
# Lives Left
life = pygame.image.load("Game/Utils/heart.png")
pygame.transform.scale(life, (70, 70))
lives = self.font.render(str(self.lives), 2, (255, 255, 255))
win.blit(lives, (1300, 20))
win.blit(life, (1260, 15))
# Money Left
money = pygame.image.load("Game/Utils/star.png")
pygame.transform.scale(money, (70, 70))
money_text = self.font.render(str(self.money), 2, (255, 255, 255))
win.blit(money_text, (1160, 20))
win.blit(money, (1110, 15))
# Wave Number
background = pygame.image.load("Game/Utils/table_2.png")
background = pygame.transform.scale(background, (150, 100))
txt = self.font.render(f"Wave #{str(self.wave)}", 2, (0, 0, 0))
win.blit(background, (10, 10))
win.blit(txt, (16, 34))
# for click in self.clicks:
# pygame.draw.circle(win, (255, 0, 0), click, 5, 0)
# draws enemies
for en in self.enemies:
en.draw(win)
pygame.display.update()
def lose_screen(self):
"""
Screen that appears if one has lost.
:return: None
"""
while self.lose:
for ev in pygame.event.get():
if ev.type == pygame.QUIT:
pygame.quit()
break
# Title
logo = pygame.image.load("Game/Start Screen/logo2.png")
win.blit(self.bg, (0, 0))
win.blit(logo, (850 - width // 2, 400 - height // 2))
# You lose logo
lose_img = pygame.image.load("Game/Utils/header_failed.png")
win.blit(lose_img, (450, 370))
# Best wave
background = pygame.image.load("Game/Utils/table.png")
background = pygame.transform.scale(background, (250, 100))
txt = self.font.render(f"Best Wave #{str(self.wave)}", 2, (255, 255, 255))
win.blit(background, (520, 570))
win.blit(txt, (533, 606))
pygame.display.update()
pygame.quit()
def win_screen(self):
"""
If player completes all the levels.
:return: None
"""
while self.win:
for ev in pygame.event.get():
if ev.type == pygame.QUIT:
break
# Title
logo = pygame.image.load("Game/Start Screen/logo2.png")
win.blit(self.bg, (0, 0))
win.blit(logo, (850 - width // 2, 400 - height // 2))
# You win logo
lose_img = pygame.image.load("Game/Utils/header_win.png")
win.blit(lose_img, (450, 370))
pygame.quit()
def main(self):
"""
Main loop of the game.
:return: None
"""
while self.running:
clock.tick(700)
pos = pygame.mouse.get_pos()
# check for moving object
if self.moving_object:
self.moving_object.move_tower(pos[0], pos[1])
tower_list = self.attack_towers[:] + self.support_towers[:]
collide = False
for tower in tower_list:
if tower.collide_other_tower(self.moving_object):
collide = True
tower.place_color = (255, 0, 0, 100)
self.moving_object.place_color = (255, 0, 0, 100)
else:
tower.place_color = (0, 0, 255, 100)
if not collide:
self.moving_object.place_color = (0, 0, 255, 100)
# Music Button & Playing Music
if pygame.mouse.get_pressed()[0] == 1 or pygame.mouse.get_pressed()[1] == 1 or \
pygame.mouse.get_pressed()[2] == 1:
if self.music_btn.clicked(pos[0], pos[1]):
self.music = not self.music
self.music_btn.music = self.music
if self.music:
pygame.mixer_music.unpause()
else:
pygame.mixer_music.pause()
# Main event loop
for ev in pygame.event.get():
if ev.type == pygame.QUIT:
pygame.quit()
break
if ev.type == pygame.MOUSEBUTTONUP:
# if you're moving an object and click
if self.moving_object:
not_allowed = False
tower_list = self.attack_towers[:] + self.support_towers[:]
for tower in tower_list:
if tower.collide_other_tower(self.moving_object):
not_allowed = True
if not not_allowed and point_to_line(self.moving_object):
if self.moving_object.name in attack_tower_names:
self.attack_towers.append(self.moving_object)
elif self.moving_object.name in support_tower_names:
self.support_towers.append(self.moving_object)
self.moving_object.moving = False
self.moving_object = None
else:
# look if you click on side menu
side_menu_button = self.menu.get_clicked(pos[0], pos[1])
if side_menu_button:
cost = self.menu.get_item_cost(side_menu_button)
if self.money >= cost:
self.money -= cost
self.add_tower(side_menu_button)
# look if you clicked on attack tower or support tower
btn_clicked = None
if self.selected_tower:
btn_clicked = self.selected_tower.menu.get_clicked(pos[0], pos[1])
if btn_clicked:
cost = self.selected_tower.get_upgrade_cost()
if self.money >= cost:
self.money -= cost
self.selected_tower.upgrade()
if not btn_clicked:
for tw in self.attack_towers:
if tw.click(pos[0], pos[1]):
tw.selected = True
self.selected_tower = tw
else:
tw.selected = False
# look if you clicked on support tower
for tw in self.support_towers:
if tw.click(pos[0], pos[1]):
tw.selected = True
self.selected_tower = tw
else:
tw.selected = False
# Play Pause
if pygame.mouse.get_pressed()[0] == 1 or pygame.mouse.get_pressed()[1] == 1 or \
pygame.mouse.get_pressed()[2] == 1:
if self.pl_pa_btn.clicked(pos[0], pos[1]):
self.paused = not self.paused
self.pl_pa_btn.paused = self.paused
# If lose the game
if self.lives <= 0:
self.lose = True
self.lives = 15
self.money = 2000
self.enemies = []
self.support_towers = []
self.attack_towers = []
print("[END] You Lose, no more lives!")
self.lose_screen()
# If you beat the game
if self.wave == 34:
self.win = True
self.lives = 15
self.money = 2000
self.enemies = []
self.support_towers = []
self.attack_towers = []
print("[END] You Win, congrats!")
self.win_screen()
# keys = pygame.key.get_pressed() # for finding path
# if keys[pygame.K_SPACE]:
# self.clicks.append(pos)
# print(self.clicks)
# Generate and handle enemies
if not self.paused:
if time.time() - self.timer >= random.randrange(1, 6) / 3:
self.timer = time.time()
self.enemy_wave()
if not self.paused:
en_to_del = []
for en in self.enemies:
en.move()
if en.x < -15:
en_to_del.append(en)
for enemy in en_to_del:
self.lives -= 1
self.enemies.remove(enemy)
# loop through attack towers
for tw in self.attack_towers:
self.money += tw.attack(self.enemies)
# loop through attack towers
for tw in self.support_towers:
tw.support(self.attack_towers)
self.redraw_game_window()
pygame.quit()
| 37.637624 | 119 | 0.50755 | 14,001 | 0.736623 | 0 | 0 | 0 | 0 | 0 | 0 | 3,157 | 0.166097 |
1dd7cafd4dc5d45bf6df84b8e8c9c05df721a1fb | 1,177 | py | Python | zendesk_tickets/client.py | ibrechin/django-zendesk-tickets | f4651836d6a0aaad46fcd90229e482bf4673e9ac | [
"MIT"
] | 4 | 2017-04-12T08:11:07.000Z | 2022-01-26T16:30:52.000Z | zendesk_tickets/client.py | ibrechin/django-zendesk-tickets | f4651836d6a0aaad46fcd90229e482bf4673e9ac | [
"MIT"
] | 6 | 2015-12-07T12:13:49.000Z | 2022-03-21T12:19:10.000Z | zendesk_tickets/client.py | ibrechin/django-zendesk-tickets | f4651836d6a0aaad46fcd90229e482bf4673e9ac | [
"MIT"
] | 2 | 2021-04-11T06:31:40.000Z | 2021-07-26T06:20:33.000Z | import json
from urllib.parse import urljoin
from django.conf import settings
import requests
def get_ticket_endpoint():
return urljoin(settings.ZENDESK_BASE_URL, '/api/v2/tickets.json')
def zendesk_auth():
return (
'{username}/token'.format(username=settings.ZENDESK_API_USERNAME),
settings.ZENDESK_API_TOKEN
)
def create_ticket(subject, tags, ticket_body, requester_email=None, custom_fields=None):
""" Create a new Zendesk ticket """
payload = {'ticket': {
'subject': subject,
'comment': {
'body': ticket_body
},
'group_id': settings.ZENDESK_GROUP_ID,
'tags': tags,
'custom_fields': list(custom_fields or ()),
}}
if requester_email:
payload['ticket']['requester'] = {
'name': 'Sender: %s' % requester_email.split('@')[0],
'email': requester_email,
}
else:
payload['ticket']['requester_id'] = settings.ZENDESK_REQUESTER_ID
requests.post(
get_ticket_endpoint(),
data=json.dumps(payload),
auth=zendesk_auth(),
headers={'content-type': 'application/json'}).raise_for_status()
| 26.155556 | 88 | 0.627018 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 239 | 0.203059 |
1dd87ba8ad5a67a4277d8da50db47d17748918d9 | 2,208 | py | Python | pravash/servicenowplugin/xlr-servicenow-plugin-master/src/main/resources/servicenow/RequestApproval.py | amvasudeva/rapidata | 7b6e984d24866f5cf474847cf462ac628427cf48 | [
"Apache-2.0"
] | null | null | null | pravash/servicenowplugin/xlr-servicenow-plugin-master/src/main/resources/servicenow/RequestApproval.py | amvasudeva/rapidata | 7b6e984d24866f5cf474847cf462ac628427cf48 | [
"Apache-2.0"
] | 7 | 2020-06-30T23:14:35.000Z | 2021-08-02T17:08:05.000Z | pravash/servicenowplugin/xlr-servicenow-plugin-master/src/main/resources/servicenow/RequestApproval.py | amvasudeva/rapidata | 7b6e984d24866f5cf474847cf462ac628427cf48 | [
"Apache-2.0"
] | null | null | null | #
# THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS
# FOR A PARTICULAR PURPOSE. THIS CODE AND INFORMATION ARE NOT SUPPORTED BY XEBIALABS.
#
import sys, string, time
import com.xhaus.jyson.JysonCodec as json
from servicenow.ServiceNowClient import ServiceNowClient
if servicenowServer is None:
print "No server provided."
sys.exit(1)
if tableName is None:
print "No tableName provided."
sys.exit(1)
if content is None:
print "No content provided."
sys.exit(1)
if shortDescription is None:
print "No shortDescription provided."
sys.exit(1)
if description is None:
print "No description provided."
sys.exit(1)
snClient = ServiceNowClient.create_client(servicenowServer, username, password)
contentJSON = content % (shortDescription, description)
sysId = None
content = content % (shortDescription, description)
print "Sending content %s" % content
try:
data = snClient.create_record( tableName, content )
print "Returned DATA = %s" % (data)
print json.dumps(data, indent=4, sort_keys=True)
sysId = data["sys_id"]
Ticket = data["number"]
print "Created %s in Service Now." % (sysId)
print "Created %s in Service Now." % (Ticket)
except Exception, e:
exc_info = sys.exc_info()
traceback.print_exception( *exc_info )
print e
print snClient.print_error( e )
print "Failed to create record in Service Now"
sys.exit(1)
isClear = False
while ( not isClear ):
try:
data = snClient.get_change_request(tableName, sysId)
status = data["approval"]
print "Found %s in Service Now as %s" % (data['number'], status)
if "approved" == status:
approval = False
isClear = True
print "ServiceNow approval received."
ticket = data["number"]
elif "rejected" == status:
print "Failed to get approval from ServiceNow"
sys.exit(1)
else:
time.sleep(5)
except:
print json.dumps(data, indent=4, sort_keys=True)
print "Error finding status for %s" % statusField
# End try
# End While
| 27.6 | 98 | 0.688859 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 754 | 0.341486 |
1dd93c15d98d6b6545270d6a317fbfaaacd7b210 | 1,238 | py | Python | migrations/versions/5f810254fdd4_initial_migration7.py | moha-abdul/minute-pitch | 4f36d09c61f24fdb983eef2a5b00dc925c34464d | [
"MIT"
] | null | null | null | migrations/versions/5f810254fdd4_initial_migration7.py | moha-abdul/minute-pitch | 4f36d09c61f24fdb983eef2a5b00dc925c34464d | [
"MIT"
] | null | null | null | migrations/versions/5f810254fdd4_initial_migration7.py | moha-abdul/minute-pitch | 4f36d09c61f24fdb983eef2a5b00dc925c34464d | [
"MIT"
] | null | null | null | """Initial Migration7
Revision ID: 5f810254fdd4
Revises: c0a6d11f2e37
Create Date: 2018-09-17 20:20:59.265902
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5f810254fdd4'
down_revision = 'c0a6d11f2e37'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('roles')
op.add_column('pitches', sa.Column('comment', sa.String(length=255), nullable=True))
op.drop_constraint('users_role_id_fkey', 'users', type_='foreignkey')
op.drop_column('users', 'role_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('role_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('users_role_id_fkey', 'users', 'roles', ['role_id'], ['id'])
op.drop_column('pitches', 'comment')
op.create_table('roles',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('name', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='roles_pkey')
)
# ### end Alembic commands ###
| 31.74359 | 98 | 0.693053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 559 | 0.451535 |
1dda73b8c7b716967c6538d6b9f6cbcc77ed0163 | 625 | py | Python | config/api_router.py | Kubiniet/Mailing-messages | 14810eaa6bef7895a22b2f5e435e9db410772e1e | [
"MIT"
] | 1 | 2022-03-30T14:24:10.000Z | 2022-03-30T14:24:10.000Z | config/api_router.py | Kubiniet/Mailing-messages | 14810eaa6bef7895a22b2f5e435e9db410772e1e | [
"MIT"
] | null | null | null | config/api_router.py | Kubiniet/Mailing-messages | 14810eaa6bef7895a22b2f5e435e9db410772e1e | [
"MIT"
] | null | null | null | from django.conf import settings
from rest_framework.routers import DefaultRouter, SimpleRouter
from message_service.mailing.api.views import (
ClientsViewSet,
MailingViewSet,
MessageViewSet,
)
from message_service.users.api.views import UserViewSet
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
router.register("users", UserViewSet)
router.register("clients", ClientsViewSet, basename="clients")
router.register("mailing", MailingViewSet, basename="mailing-list")
router.register("messages", MessageViewSet, basename="message")
app_name = "api"
urlpatterns = router.urls
| 26.041667 | 67 | 0.7808 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.1152 |
1ddaabf5260925241474b1020f7b0e3d597421f5 | 126 | py | Python | fts/backends/xapian.py | filwaitman/django-fts | af9f10d760e543288c21a8f4239288c2e415b9e9 | [
"BSD-3-Clause"
] | null | null | null | fts/backends/xapian.py | filwaitman/django-fts | af9f10d760e543288c21a8f4239288c2e415b9e9 | [
"BSD-3-Clause"
] | 9 | 2020-02-11T23:38:20.000Z | 2022-03-11T23:16:29.000Z | 3rdparty/odeoncg-django-fts-odeon-9ea3a64/fts/backends/unported/xapian.py | cltrudeau/django-yacon | d462c88cf98bf8eef50a0696b265fa28dfdb40eb | [
"MIT"
] | 2 | 2017-09-10T11:27:51.000Z | 2019-12-28T00:12:58.000Z | from fts.backends.base import InvalidFtsBackendError
raise InvalidFtsBackendError("Xapian FTS backend not yet implemented") | 42 | 70 | 0.849206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.31746 |
1ddc7b1c9df94d1d6c3913b9a8bd616213efe883 | 525 | py | Python | Lab2/Exercise3.py | alex99q/python3-lab-exercises | dfa96908f589d2e03ce3a21a3326fd7365d62fcb | [
"MIT"
] | null | null | null | Lab2/Exercise3.py | alex99q/python3-lab-exercises | dfa96908f589d2e03ce3a21a3326fd7365d62fcb | [
"MIT"
] | null | null | null | Lab2/Exercise3.py | alex99q/python3-lab-exercises | dfa96908f589d2e03ce3a21a3326fd7365d62fcb | [
"MIT"
] | null | null | null | import random
secret_num = random.randrange(1, 11, 1)
player_num = int(input("Guess a number from 1 to 10: "))
list_of_player_nums = [player_num]
while player_num != secret_num:
if player_num > secret_num:
print("Too high!")
elif player_num < secret_num:
print("Too low!")
player_num = int(input("Guess again: "))
if player_num not in list_of_player_nums:
list_of_player_nums.append(player_num)
print("You won with " + str(len(list_of_player_nums)) + " tries!") | 27.631579 | 66 | 0.660952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.173333 |
1ddcb5a365134a0fc6fdd220c36768b3059a205b | 363 | py | Python | rotor_tm_traj/traj/Optimization/entire_path/generate_poly.py | xl2623/RotorTM | 4ef88f1fdb2137ff7f6e7f0acbf9105b99773ed8 | [
"BSD-3-Clause"
] | 1 | 2022-01-10T13:43:11.000Z | 2022-01-10T13:43:11.000Z | rotor_tm_traj/traj/Optimization/entire_path/generate_poly.py | xl2623/RotorTM | 4ef88f1fdb2137ff7f6e7f0acbf9105b99773ed8 | [
"BSD-3-Clause"
] | null | null | null | rotor_tm_traj/traj/Optimization/entire_path/generate_poly.py | xl2623/RotorTM | 4ef88f1fdb2137ff7f6e7f0acbf9105b99773ed8 | [
"BSD-3-Clause"
] | 3 | 2022-01-21T03:04:38.000Z | 2022-01-25T15:05:31.000Z | #! /usr/bin/env python
from math import factorial
import numpy as np
# test passed
def generate_poly(max_exponent,max_diff,symbol):
f=np.zeros((max_diff+1, max_exponent+1), dtype=float)
for k in range(max_diff+1):
for i in range(max_exponent+1):
if (i - k) >= 0:
f[k,i] = factorial(i)*symbol**(i-k)/factorial(i-k)
else:
f[k,i] = 0
return f | 24.2 | 55 | 0.663912 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.096419 |
1dde29d159ca50f530c3329ce64e235d58752ef0 | 3,389 | py | Python | tests/train.py | lbaret/pytorch_functions | fd6f6b167074acba9981f0dbbf3ad20e93444ff2 | [
"MIT"
] | 1 | 2021-04-17T13:33:58.000Z | 2021-04-17T13:33:58.000Z | tests/train.py | lbaret/pytorch_functions | fd6f6b167074acba9981f0dbbf3ad20e93444ff2 | [
"MIT"
] | null | null | null | tests/train.py | lbaret/pytorch_functions | fd6f6b167074acba9981f0dbbf3ad20e93444ff2 | [
"MIT"
] | null | null | null | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
def train(model: nn.Module, optimizer: Optimizer, loss: nn.Module, train_loader: DataLoader,
valid_loader: DataLoader = None, epochs: int = 100, gpu: int = None,
score: list = None, scheduler=None, make_sigmoid=False, make_softmax=False) -> tuple:
"""
:param model: torch ML model
:param optimizer: torch optimizer algorithm
:param loss: loss function
:param train_loader: training set
:param valid_loader: validation set
:param epochs: number of epochs
:param gpu: gpu number
:param scheduler: Learning Rate scheduler
:return: train accuracy, train loss, validation accuracy, validation loss
"""
# GPU
if gpu is not None:
model = model.cuda(gpu)
epochs_train_loss = []
epochs_valid_loss = []
for ep in range(epochs):
model.training = True
all_losses = []
all_predictions = []
all_targets = []
for i, (inputs, targets) in enumerate(train_loader):
# GPU
if gpu is not None:
inputs = inputs.cuda(gpu)
targets = targets.float().cuda(gpu)
predictions = model(inputs).squeeze()
err = loss(predictions, targets)
# Machine is learning
err.backward()
optimizer.step()
optimizer.zero_grad()
# Clean GPU
if gpu is not None:
err = err.detach().cpu()
inputs = inputs.cpu()
targets = targets.cpu()
predictions = predictions.cpu()
torch.cuda.empty_cache()
all_losses.append(err)
if make_sigmoid:
labels = (F.sigmoid(predictions) >= 0.5) * 1
elif make_softmax:
labels = (F.softmax(predictions) >= 0.5) * 1
else:
labels = predictions
all_predictions.append(labels)
all_targets.append(targets)
print(
f'\rBatch : {i + 1} / {len(train_loader)} - Loss : {err:.2e}',
end='')
all_predictions = torch.vstack(all_predictions)
all_targets = torch.vstack(all_targets)
train_loss = np.vstack(all_losses).mean()
# Historic
epochs_train_loss.append(train_loss)
if scheduler is not None:
scheduler.step()
# Validation step
if valid_loader is not None:
valid_loss = valid(model, loss, valid_loader, gpu)
# Historic
epochs_valid_loss.append(valid_loss)
print(
f'\rEpoch : {ep + 1} - Train Loss : {train_loss:.2e} - '
f'- Valid Loss : {valid_loss:.2e}')
else:
# Display epoch information
print(f'\rEpoch : {ep + 1} - Train Loss : {train_loss:.2e}')
if valid_loader is not None:
return epochs_train_loss, epochs_valid_loss
return epochs_train_loss
a = torch.randn((15, 1))
b = torch.randn((9, 1))
torch.vstack((a, b)).shape
| 29.215517 | 95 | 0.553851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 981 | 0.289466 |