hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c5160250b4498c1f1e7cd89943e80a080c1c9214
| 689
|
py
|
Python
|
Base/__init__.py
|
jasrub/panorama-worker
|
35083d4e46b7c15e33ef352562bd7889634dcebc
|
[
"MIT"
] | 2
|
2017-05-30T13:38:44.000Z
|
2020-06-08T08:27:32.000Z
|
Base/__init__.py
|
jasrub/panorama-worker
|
35083d4e46b7c15e33ef352562bd7889634dcebc
|
[
"MIT"
] | null | null | null |
Base/__init__.py
|
jasrub/panorama-worker
|
35083d4e46b7c15e33ef352562bd7889634dcebc
|
[
"MIT"
] | null | null | null |
import os
import json
import ConfigParser
import logging.config
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# load the shared settings file
settings_file_path = os.path.join(base_dir, 'config', 'settings.config')
settings = ConfigParser.ConfigParser()
settings.read(settings_file_path)
# set up logging
with open(os.path.join(base_dir, 'config', 'logging.json'), 'r') as f:
logging_config = json.load(f)
logging.config.dictConfig(logging_config)
log = logging.getLogger(__name__)
log.info("---------------------------------------------------------------------------")
requests_logger = logging.getLogger('requests')
requests_logger.setLevel(logging.INFO)
| 32.809524
| 87
| 0.69521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 184
| 0.267054
|
c5160d08bb96e67a6e7e528e268fa0a4a2d6dfb2
| 2,462
|
py
|
Python
|
src/main.py
|
Grant-Steinfeld/python-ubi-openshift
|
82fb2d4a4093a5b67c68a3443da23532f59a230c
|
[
"Apache-2.0"
] | 7
|
2020-04-21T21:23:39.000Z
|
2022-02-16T11:09:58.000Z
|
src/main.py
|
Grant-Steinfeld/python-ubi-openshift
|
82fb2d4a4093a5b67c68a3443da23532f59a230c
|
[
"Apache-2.0"
] | 3
|
2020-02-18T21:57:04.000Z
|
2020-03-26T20:37:22.000Z
|
src/main.py
|
Grant-Steinfeld/python-ubi-openshift
|
82fb2d4a4093a5b67c68a3443da23532f59a230c
|
[
"Apache-2.0"
] | 13
|
2020-04-27T19:56:43.000Z
|
2022-03-31T03:53:22.000Z
|
from flask import Flask
from flask_restplus import Api, Resource, fields
from services.serviceHandler import convertCurrency, getCurrencyExchangeRates
from services.countryCurrencyCodeHandler import (
getCountryAndCurrencyCode,
getCurrencyNameAndCode,
)
app = Flask(__name__)
api = Api(
app,
version="1.0.0",
title="Bee Travels Currency Data Service",
description="This is a microservice that handles currency exchange rate data for Bee Travels",
)
currencyNS = api.namespace(
"Currency",
description="Operations associated with currency exchange rate conversions",
)
currencyNameOrCurrencyCode = api.model(
"currencyNameOrCurrencyCode",
{
"currencyCode": fields.String(
required=False, description="3 letter currency code"
),
"country": fields.String(required=False, description="country name"),
},
)
@currencyNS.route("/")
class CurrencyList(Resource):
"""Shows a list of currency ex rates"""
def get(self):
return getCurrencyExchangeRates()
# /currency/{currencyFromAmount}/{currencyFromCode}/{currencyToCode}
# /currency/10/EUR/USD
@currencyNS.route("/<int:currencyFromAmount>/<currencyFromCode>/<currencyToCode>")
@currencyNS.response(404, "Currency Code not found")
@currencyNS.param("currencyFromAmount", "currency to convert from value (float)")
@currencyNS.param("currencyFromCode", "currency (3 character code) to convert from")
@currencyNS.param("currencyToCode", "currency (3 character code) to convert to")
class Currency(Resource):
def get(self, currencyFromAmount, currencyFromCode, currencyToCode):
result = convertCurrency(
float(currencyFromAmount), currencyFromCode, currencyToCode
)
return {"result": result}
@currencyNS.route("/search")
@currencyNS.response(404, "Currency Code not found")
class Search(Resource):
@currencyNS.doc("search_currency_meta")
@currencyNS.expect(currencyNameOrCurrencyCode)
@currencyNS.marshal_with(currencyNameOrCurrencyCode, code=201)
def post(self):
if "currencyCode" in api.payload:
return getCountryAndCurrencyCode(api.payload["currencyCode"])
elif "country" in api.payload:
return getCurrencyNameAndCode(api.payload["country"])
else:
api.abort(400, "Pass in either the currencyCode or country name")
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True, port=7878)
| 31.974026
| 98
| 0.718522
| 902
| 0.366369
| 0
| 0
| 1,391
| 0.564988
| 0
| 0
| 867
| 0.352153
|
c517a4c10d04e5d45c2f649fb106b2a711638d2d
| 6,344
|
py
|
Python
|
orchestration/run/BrokerActions.py
|
pjk25/RabbitTestTool
|
c0b9e820f079d14d516185f2790371380e190d6c
|
[
"MIT"
] | null | null | null |
orchestration/run/BrokerActions.py
|
pjk25/RabbitTestTool
|
c0b9e820f079d14d516185f2790371380e190d6c
|
[
"MIT"
] | null | null | null |
orchestration/run/BrokerActions.py
|
pjk25/RabbitTestTool
|
c0b9e820f079d14d516185f2790371380e190d6c
|
[
"MIT"
] | null | null | null |
import sys
import io
import subprocess
import threading
import time
import uuid
import os.path
import requests
import json
from random import randint
from UniqueConfiguration import UniqueConfiguration
from CommonConfiguration import CommonConfiguration
from printer import console_out
class BrokerActions:
def __init__(self, deployer):
self._action_status = dict()
self._deployer = deployer
self.actor = "BROKER_ACTIONS"
def wait_for_msg_trigger(self, configurations, common_conf, trigger_at):
# iterate over configurations
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
console_out(self.actor, f"Checking message total on node {unique_conf.node_number}")
broker_ip = self.get_broker_ip(unique_conf.technology, unique_conf.node_number, common_conf.run_tag, common_conf.key_pair)
msg_total = 0
while(msg_total < trigger_at):
msg_total = self.get_cluster_message_total(broker_ip, common_conf.username, common_conf.password)
console_out(self.actor, f"Trigger at {trigger_at}. Currently {msg_total} messages on node {unique_conf.node_number}")
time.sleep(10)
console_out(self.actor, f"Reached msg trigger on node {unique_conf.node_number}")
def restart_all_brokers(self, configurations, common_conf):
r_threads = list()
for config_tag in configurations:
console_out(self.actor, f"BROKER RESTART FOR configuration {config_tag}")
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
# iterate over nodes of this configuration
for n in range(unique_conf.cluster_size):
node = int(unique_conf.node_number) + n
restart = threading.Thread(target=self.restart_broker, args=(unique_conf.technology, str(node), common_conf))
r_threads.append(restart)
for rt in r_threads:
rt.start()
for rt in r_threads:
rt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
for n in range(unique_conf.cluster_size):
node = int(unique_conf.node_number) + n
status_id = f"{unique_conf.technology}{node}"
if self._action_status[status_id] != "success":
console_out(self.actor, f"Broker restart failed for node {unique_conf.technology}{node}")
if not common_conf.no_deploy:
self._deployer.teardown_all(configurations, common_conf, False)
def restart_one_broker(self, configurations, common_conf):
r_threads = list()
for config_tag in configurations:
console_out(self.actor, f"BROKER RESTART FOR configuration {config_tag}")
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
restart = threading.Thread(target=self.restart_broker, args=(unique_conf.technology, str(unique_conf.node_number), common_conf))
r_threads.append(restart)
for rt in r_threads:
rt.start()
for rt in r_threads:
rt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
status_id = f"{unique_conf.technology}{unique_conf.node_number}"
if self._action_status[status_id] != "success":
console_out(self.actor, f"Broker restart failed for node {unique_conf.technology}{unique_conf.node_number}")
if not common_conf.no_deploy:
self._deployer.teardown_all(configurations, common_conf, False)
def restart_broker(self, technology, node, run_tag, key_pair):
status_id = technology + node
exit_code = subprocess.call(["bash", "restart-broker.sh",
key_pair,
node,
run_tag,
technology])
if exit_code != 0:
console_out(self.actor, f"Restart of broker on node {node} failed with exit code {exit_code}")
self._action_status[status_id] = "failed"
else:
self._action_status[status_id] = "success"
def get_broker_ip(self, technology, node, run_tag, key_pair):
broker_ip = ""
attempts = 0
while broker_ip == "" and attempts < 3:
attempts += 1
process = subprocess.Popen(["bash", "get_broker_ip.sh",
key_pair,
node,
run_tag,
technology], stdout=subprocess.PIPE)
for line in io.TextIOWrapper(process.stdout, encoding="utf-8"):
if not line:
break
if line.startswith("BROKER_IP="):
broker_ip = line.rstrip().replace("BROKER_IP=","")
break
if broker_ip == "":
time.sleep(5)
return broker_ip
def get_cluster_message_total(self, broker_ip, username, password):
res = requests.get(f"http://{broker_ip}:15672/api/overview",
auth=(username,password))
overview_json = res.json()
queue_totals = overview_json["queue_totals"]
if "messages" in queue_totals:
return queue_totals["messages"]
else:
return 0
| 42.577181
| 144
| 0.589061
| 6,057
| 0.95476
| 0
| 0
| 0
| 0
| 0
| 0
| 975
| 0.153689
|
c5198d8481c8a0970f981fde506e8ae0b90aab1f
| 1,763
|
py
|
Python
|
bin/wls_users.py
|
rstyczynski/wls-tools
|
292a39a3f7af7b9d7d4c4849618d6789daae9b58
|
[
"Apache-2.0"
] | null | null | null |
bin/wls_users.py
|
rstyczynski/wls-tools
|
292a39a3f7af7b9d7d4c4849618d6789daae9b58
|
[
"Apache-2.0"
] | null | null | null |
bin/wls_users.py
|
rstyczynski/wls-tools
|
292a39a3f7af7b9d7d4c4849618d6789daae9b58
|
[
"Apache-2.0"
] | null | null | null |
#!$BEA_HOME/oracle_common/common/bin/wlst.sh
# default values
admin_name = 'AdminServer'
admin_address = 'localhost'
admin_port = 7001
admin_protocol = 't3'
admin_url = admin_protocol + "://" + admin_address + ":" + str(admin_port)
def usage():
print "dump_users [-s|--server -p|--port] [-u|--url] [-d|--delimiter]"
try:
opts, args = getopt.getopt( sys.argv[1:], 's:p:u::d:h', ['server=','port=','url=','delimiter='] )
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ('--help'):
usage()
sys.exit(2)
elif opt in ('-s', '--server'):
admin_name = arg
elif opt in ('-p', '--port'):
admin_port = arg
admin_url = admin_protocol + "://" + admin_address + ":" + str(admin_port)
elif opt in ('-u', '--url'):
admin_url = arg
elif opt in ('-d', '--delimiter'):
delimiter = arg
else:
usage()
sys.exit(2)
connect(url=admin_url, adminServerName=admin_name)
# do work
from weblogic.management.security.authentication import UserReaderMBean
from weblogic.management.security.authentication import GroupReaderMBean
realmName=cmo.getSecurityConfiguration().getDefaultRealm()
authProvider = realmName.getAuthenticationProviders()
print 'admin_url,group,user'
for i in authProvider:
if isinstance(i,GroupReaderMBean):
groupReader = i
cursor = i.listGroups("*",0)
while groupReader.haveCurrent(cursor):
group = groupReader.getCurrentName(cursor)
usergroup = i.listAllUsersInGroup(group,"*",0)
for user in usergroup:
print '%s,%s,%s' % (admin_url,group,user)
groupReader.advance(cursor)
groupReader.close(cursor)
#
disconnect()
exit()
| 27.546875
| 101
| 0.642087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 325
| 0.184345
|
c51b416cfe1486d20ea86dd385bdcfa1be5f1bbe
| 1,082
|
py
|
Python
|
scripts/drop_low_coverage.py
|
godzilla-but-nicer/SporeLoss
|
8159a628e5f17191254583c053891070ba3d6e7f
|
[
"MIT"
] | null | null | null |
scripts/drop_low_coverage.py
|
godzilla-but-nicer/SporeLoss
|
8159a628e5f17191254583c053891070ba3d6e7f
|
[
"MIT"
] | null | null | null |
scripts/drop_low_coverage.py
|
godzilla-but-nicer/SporeLoss
|
8159a628e5f17191254583c053891070ba3d6e7f
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from Bio import AlignIO, Seq
# parameter to determine the maximum missing proportion that we keep
missing_thresh = 0.4
# load the alignments and turn them into a numpy array
alignments = AlignIO.read(snakemake.input[0], 'fasta')
align_arr = np.array([list(rec) for rec in alignments])
# get a list of missing values per base
missing_bases = []
# iterate over the whole alignment counting missing bases
for base in range(align_arr.shape[1]):
missing = 0
for seq in range(align_arr.shape[0]):
if alignments[seq, base] not in ['A', 'T', 'G', 'C']:
missing += 1
missing_bases.append(missing)
# calculate the proportion of missing bases for each column
missing_prop = np.array([m / align_arr.shape[0] for m in missing_bases])
align_arr = align_arr[:, missing_prop < missing_thresh]
for r, rec in enumerate(alignments):
joined_seq = ''.join(align_arr[r])
print(joined_seq[:10])
rec.seq = Seq.Seq(joined_seq)
with open(snakemake.output[0], 'w') as fout:
AlignIO.write(alignments, fout, 'fasta')
| 31.823529
| 72
| 0.711645
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 308
| 0.284658
|
c51b5610a93a01c7edaae445a44f41f8aa36b738
| 626
|
py
|
Python
|
get_proc_users.py
|
dangtrinhnt/gem
|
bc53cf19d3541542e4c14c24b5fb186432e91c45
|
[
"Apache-2.0"
] | null | null | null |
get_proc_users.py
|
dangtrinhnt/gem
|
bc53cf19d3541542e4c14c24b5fb186432e91c45
|
[
"Apache-2.0"
] | 44
|
2019-11-18T20:15:35.000Z
|
2021-07-27T20:26:38.000Z
|
get_proc_users.py
|
dangtrinhnt/gem
|
bc53cf19d3541542e4c14c24b5fb186432e91c45
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
import sys
from commons import *
def print_proc_users(csv_path, condition_number):
csv_dat = get_dict_data_from_csv_file(csv_path)
if csv_dat:
print "Processing user with condition %s\n" % condition_number
for email in csv_dat:
num = str_to_num(email['src']) % 10
if num in condition_number or condition_number[0]==-1:
print "%s to %s\n" % (email['src'], email['dest'])
if __name__ == "__main__":
csv_path = sys.argv[1]
if sys.argv[2] == 'all':
condition_number = [-1]
else:
condition_number = map(int, sys.argv[2].split(','))
print_proc_users(csv_path, condition_number)
| 24.076923
| 64
| 0.699681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 105
| 0.167732
|
c51c826bcad9a887b4123c7790037f70e652cfae
| 1,250
|
py
|
Python
|
fix_ccJSON.py
|
boada/wmh
|
f2abe5ff2aeeae6eebab2e8c40803b3fcec9ac3a
|
[
"MIT"
] | null | null | null |
fix_ccJSON.py
|
boada/wmh
|
f2abe5ff2aeeae6eebab2e8c40803b3fcec9ac3a
|
[
"MIT"
] | null | null | null |
fix_ccJSON.py
|
boada/wmh
|
f2abe5ff2aeeae6eebab2e8c40803b3fcec9ac3a
|
[
"MIT"
] | null | null | null |
import pandas as pd
import sys
def fix(lists):
df = pd.read_json(lists)
df2 = pd.DataFrame([p for p1 in df.players for p in p1])
df2['theme1'] = ''
df2['theme2'] = ''
for i, l in df2.list2.iteritems():
try:
df2.theme2.iloc[i] = l['theme']
except KeyError:
continue
except TypeError:
continue
for i, l in df2.list2.iteritems():
try:
df2.theme2.iloc[i] = l['theme']
except KeyError:
df2.theme2.iloc[i] = 'None'
except TypeError:
continue
for i, l in df2.list1.iteritems():
try:
df2.theme1.iloc[i] = l['theme']
except KeyError:
df2.theme1.iloc[i] = 'None'
except TypeError:
continue
for i, l in df2.list2.iteritems():
try:
df2.list2.iloc[i] = l['list']
except KeyError:
continue
except TypeError:
continue
for i, l in df2.list1.iteritems():
try:
df2.list1.iloc[i] = l['list']
except KeyError:
continue
except TypeError:
continue
df2.to_json('fixed.json')
if __name__ == "__main__":
fix(sys.argv[1])
| 24.038462
| 60
| 0.508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 87
| 0.0696
|
c51deae5ca13775e3c2ef4b77c14c0ca5e33d193
| 1,184
|
py
|
Python
|
01-Lesson-Plans/03-Python-Pandas/1/Activities/12-functions-02/Unsolved/functions-02.py
|
tatianegercina/FinTech
|
b40687aa362d78674e223eb15ecf14bc59f90b62
|
[
"ADSL"
] | 1
|
2021-04-13T07:14:34.000Z
|
2021-04-13T07:14:34.000Z
|
01-Lesson-Plans/03-Python-Pandas/1/Activities/12-functions-02/Unsolved/functions-02.py
|
tatianegercina/FinTech
|
b40687aa362d78674e223eb15ecf14bc59f90b62
|
[
"ADSL"
] | 2
|
2021-06-02T03:14:19.000Z
|
2022-02-11T23:21:24.000Z
|
01-Lesson-Plans/03-Python-Pandas/1/Activities/12-functions-02/Unsolved/functions-02.py
|
tatianegercina/FinTech
|
b40687aa362d78674e223eb15ecf14bc59f90b62
|
[
"ADSL"
] | 1
|
2021-05-07T13:26:50.000Z
|
2021-05-07T13:26:50.000Z
|
# Define a function "warble" that takes in a string as an argument, adds " arglebargle" to the end of it, and returns the result.
# Print the result of calling your "warble" function with the argument "hello".
# Define a function "wibble" that takes a string as an argument, prints the argument, prepends "wibbly " to the argument, and returns the result
# Print the result of calling your "wibble" function with the argument "bibbly"
# Define a function "print_sum" that takes in two numbers as arguments and prints the sum of those two numbers.
# Define a function "return_sum" that takes in two numbers as arguments and returns the sum of those two numbers
# Using either "return_sum" and no mathematical operators, define a function "triple_sum" that takes in 3 arguments and returns the sum of those 3 numbers.
# Define a function "dance_party" that takes in a string as an argument, that prints "dance!", updates the string from calling "wibble" function with that argument, updates the string from calling "warble" function with that argument, returns the updated string
# Print the result of calling your "dance_party" function with your name as the argument
| 43.851852
| 261
| 0.771115
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,158
| 0.978041
|
c51e6be205213ab9c3f0f822b11808c56b8e2982
| 1,003
|
py
|
Python
|
Section 6 - Modular Programming/Green eggs and ham v4.py
|
gitjot/python-for-lccs
|
a8a4ae8847abbc33361f80183c06d57b20523382
|
[
"CC0-1.0"
] | 10
|
2020-02-14T14:28:15.000Z
|
2022-02-02T18:44:11.000Z
|
Section 6 - Modular Programming/Green eggs and ham v4.py
|
gitjot/python-for-lccs
|
a8a4ae8847abbc33361f80183c06d57b20523382
|
[
"CC0-1.0"
] | null | null | null |
Section 6 - Modular Programming/Green eggs and ham v4.py
|
gitjot/python-for-lccs
|
a8a4ae8847abbc33361f80183c06d57b20523382
|
[
"CC0-1.0"
] | 8
|
2020-03-25T09:27:42.000Z
|
2021-11-03T15:24:38.000Z
|
# Event: LCCS Python Fundamental Skills Workshop
# Date: Dec 2018
# Author: Joe English, PDST
# eMail: computerscience@pdst.ie
# Purpose: To find (and fix) two syntax errors
# A program to display Green Eggs and Ham (v4)
def showChorus():
print()
print("I do not like green eggs and ham.")
print("I do not like them Sam-I-am.")
print()
def showVerse1():
print("I do not like them here or there.")
print("I do not like them anywhere.")
print("I do not like them in a house")
print("I do not like them with a mouse")
def displayVerse2():
print("I do not like them in a box")
print("I do not like them with a fox")
print("I will not eat them in the rain.")
print("I will not eat them on a train")
# Program execution starts here
showChorus()
displayVerse1() # SYNTAX ERROR 1 - function 'displayVerse1' does not exist
showChorus()
showVerse2() # SYNTAX ERROR 2 - function 'showVerse2' does not exist
showChorus()
| 30.393939
| 75
| 0.658026
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 689
| 0.686939
|
c51e94a6e708f618911c4ecc6deceed3e193e44e
| 1,107
|
py
|
Python
|
internal/handlers/singapore.py
|
fillingthemoon/cartogram-web
|
58b645bca0c22b9bccdb2a5a8213a5a24a7e5958
|
[
"MIT"
] | null | null | null |
internal/handlers/singapore.py
|
fillingthemoon/cartogram-web
|
58b645bca0c22b9bccdb2a5a8213a5a24a7e5958
|
[
"MIT"
] | null | null | null |
internal/handlers/singapore.py
|
fillingthemoon/cartogram-web
|
58b645bca0c22b9bccdb2a5a8213a5a24a7e5958
|
[
"MIT"
] | null | null | null |
import settings
import handlers.base_handler
import csv
class CartogramHandler(handlers.base_handler.BaseCartogramHandler):
def get_name(self):
return "Singapore (by Region)"
def get_gen_file(self):
return "{}/singapore_map_processedmap.json".format(settings.CARTOGRAM_DATA_DIR)
def validate_values(self, values):
if len(values) != 5:
return False
for v in values:
if type(v) != float:
return False
return True
def gen_area_data(self, values):
return """1 {} CENTRAL REGION
2 {} EAST REGION
3 {} NORTH REGION
4 {} NORTH-EAST REGION
5 {} WEST REGION""".format(*values)
def expect_geojson_output(self):
return True
def csv_to_area_string_and_colors(self, csvfile):
return self.order_by_example(csv.reader(csvfile), "Region", 0, 1, 2, 3, ["CENTRAL REGION","EAST REGION","NORTH REGION","NORTH-EAST REGION","WEST REGION"], [0.0 for i in range(0,5)], {"CENTRAL REGION":"1","EAST REGION":"2","NORTH REGION":"3","NORTH-EAST REGION":"4","WEST REGION":"5"})
| 29.918919
| 292
| 0.641373
| 1,049
| 0.947606
| 0
| 0
| 0
| 0
| 0
| 0
| 332
| 0.29991
|
c51f18d40b89343f5d2cfddd15750839af888439
| 1,247
|
py
|
Python
|
code.py
|
aashray18521/parallelModifiedGrepPython
|
afad79662e59e1e6fc5f491ba988995a312dc205
|
[
"MIT"
] | null | null | null |
code.py
|
aashray18521/parallelModifiedGrepPython
|
afad79662e59e1e6fc5f491ba988995a312dc205
|
[
"MIT"
] | 3
|
2020-11-23T15:37:43.000Z
|
2020-11-23T15:38:51.000Z
|
code.py
|
aashray18521/parallelModifiedGrepPython
|
afad79662e59e1e6fc5f491ba988995a312dc205
|
[
"MIT"
] | null | null | null |
import multiprocessing
import os
import time
rootdir = input()
keyword = input()
batch_size = 1
def try_multiple_operations(file_path):
try:
with open(file_path, "rb") as f: # open the file for reading
for line in f: # use: for i, line in enumerate(f) if you need line numbers
try:
line = line.decode("utf-8") # try to decode the contents to utf-8
except ValueError: # decoding failed, skip the line
continue
if keyword in line: # if the keyword exists on the current line...
print(file_path) # print the file path
except (IOError, OSError): # ignore read and permission errors
pass
def walk_dirs(directory, batch_size):
walk_dirs_generator = os.walk(directory)
for dirname, subdirectories, filenames in walk_dirs_generator:
for i in range(0, len(filenames), batch_size):
the_queue.put(os.path.join(dirname, filenames[i]))
the_queue = multiprocessing.Queue()
walk_dirs(rootdir, batch_size)
def worker_main(queue):
while True:
item = queue.get(True)
try_multiple_operations(item)
the_pool = multiprocessing.Pool(3, worker_main,(the_queue,))
| 31.974359
| 87
| 0.644747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 268
| 0.214916
|
c51f3d08b27846ef7d07616f6d207a8d88638159
| 1,316
|
py
|
Python
|
flask_youku/__init__.py
|
xiaoyh121/program
|
6826f024cce7a4250a1dab8dba145c1f0d713286
|
[
"Apache-2.0"
] | 176
|
2016-12-11T03:24:41.000Z
|
2021-12-10T11:44:37.000Z
|
flask_youku/__init__.py
|
xiaoyh121/program
|
6826f024cce7a4250a1dab8dba145c1f0d713286
|
[
"Apache-2.0"
] | 4
|
2018-02-07T03:31:13.000Z
|
2021-12-25T13:03:49.000Z
|
flask_youku/__init__.py
|
xiaoyh121/program
|
6826f024cce7a4250a1dab8dba145c1f0d713286
|
[
"Apache-2.0"
] | 76
|
2016-11-13T08:57:38.000Z
|
2021-12-25T12:02:05.000Z
|
from flask import Blueprint, Markup
from flask import render_template
class Youku(object):
"""Flask-Youku extents."""
def __init__(self, app=None, **kwargs):
"""Init Flask-Youku's instance via app object"""
if app:
self.init_app(app)
def init_app(self, app):
"""Init Flask-Youku's instance via app object"""
self.register_blueprint(app)
# Create the Jinja function `youku`
app.add_template_global(youku)
def register_blueprint(self, app):
"""Register the youku blueprint into app object."""
module = Blueprint(
'youku',
__name__,
template_folder='templates')
app.register_blueprint(module)
return module
class Video(object):
"""Receive the youku_id to rendering the video.html"""
def __init__(self, video_id, cls='youku'):
self.video_id = video_id
self.cls = cls
def render(self, *args, **kwargs):
return render_template(*args, **kwargs)
@property
def html(self):
"""Tag the HTML as security string."""
return Markup(
self.render('youku/video.html', video=self))
def youku(*args, **kwargs):
"""Define the Jinja function."""
video = Video(*args, **kwargs)
return video.html
| 24.830189
| 59
| 0.609422
| 1,115
| 0.847264
| 0
| 0
| 157
| 0.119301
| 0
| 0
| 375
| 0.284954
|
c51fefbd501d6ac95a99920e7040a7192440ef23
| 26,061
|
py
|
Python
|
main.py
|
DasAnish/TutorMatch
|
1b2cf3a71e859f519d645dc33edf72a975661066
|
[
"MIT"
] | null | null | null |
main.py
|
DasAnish/TutorMatch
|
1b2cf3a71e859f519d645dc33edf72a975661066
|
[
"MIT"
] | null | null | null |
main.py
|
DasAnish/TutorMatch
|
1b2cf3a71e859f519d645dc33edf72a975661066
|
[
"MIT"
] | 1
|
2021-09-19T15:00:59.000Z
|
2021-09-19T15:00:59.000Z
|
from backend import Backend, Tutor, Parent
from kivy.app import App
from kivy.base import Builder
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.properties import ObjectProperty
from kivy.core.window import Window
from kivy.uix.image import Image
from kivy.config import Config
from kivy.graphics import *
from kivy.animation import *
from kivy.graphics import RoundedRectangle
from kivy.uix.gridlayout import GridLayout
from kivy.uix.textinput import TextInput
from kivy.uix.slider import Slider
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.popup import Popup
from backend import Backend, Match, Level
import os
#Builder.load_file("kivyFiles/main.kv")
photoHeight = 550
photoWidth = 340
parent = {'username':'kelvincfleung', 'password':'hello123', 'fname':'Kelvin', 'lname':'Leung1', 'rateMin':10, 'rateMax':20, 'subject':'maths', 'level':1}
parentObj = Parent('61467ec2c2c5a2e917994d69')
parentObj.updateInfo(parent)
def getOrDefault(dictionary, key, default):
if key in dictionary:
return dictionary[key]
else:
return default
class PersonSingleTon:
__instance = None
@staticmethod
def getInstance():
if PersonSingleTon.__instance is None:
PersonSingleTon()
return PersonSingleTon.__instance
def __init__(self):
if PersonSingleTon.__instance is not None:
raise Exception("Singleton: PersonSingleton")
PersonSingleTon.__instance = self
self.person = parentObj
self.isTutor = False
def AddTextWithBack(widget, string, pos):
if string is None or string.strip(" ") == "":
return 0, None
string = str(string)
with widget.canvas:
Color(0.95, 0.95, 0.95)
back = RoundedRectangle(pos=pos, size=(0, 0))
label = Label(text=string, pos=(pos[0]-40, pos[1]+3), color=(0, 0, 0), halign="left")
label.texture_update()
back.size = (label.texture_size[0] + 20, label.texture_size[1] + 10)
label.size[1] = label.texture.size[1]
label.pos = (label.pos[0] + label.texture.size[0] / 2, label.pos[1] - back.size[1])
back.pos = (back.pos[0], back.pos[1] - back.size[1])
widget.add_widget(label)
return back.size[1], label
class ChangePageButton(Button):
def __init__(self, PM, page, pos, size, source, color=(1, 1, 1), **kwargs):
super(ChangePageButton, self).__init__(**kwargs)
self.PM = PM
self.page = page
self.pos = pos
self.size = size
self.color = color
self.background_normal = source
self.background_down = source.replace(".png", "") + "Down" + ".png"
self.bind(on_press=self.pressed)
def pressed(self, instance):
self.PM.goToPage(self.page)
class FadeBetweenButton(Button):
def __init__(self, images, pos, size, **kwargs):
super(FadeBetweenButton, self).__init__(**kwargs)
self.faded = 0
self.images = images
self.pos = pos
self.size = size
self.opacity = 0
self.bind(on_press=self.pressed)
def pressed(self, instance):
if (self.faded == 0):
Animation(opacity=0.5, duration=0.4).start(self.images[0])
Animation(opacity=1, duration=0.4).start(self.images[1])
else:
Animation(opacity=1, duration=0.4).start(self.images[0])
Animation(opacity=0, duration=0.4).start(self.images[1])
self.faded = (self.faded + 1 ) % 2
class AcceptCardButton(Button):
def __init__(self, page, source, pos, size, **kwargs):
super(AcceptCardButton, self).__init__(**kwargs)
self.background_normal = source
self.background_down = source.replace(".png", "") + "Down" + ".png"
self.page = page
self.pos = pos
self.size = size
self.bind(on_press=self.pressed)
self.tutorObj = None
def pressed(self, instance):
parentObj = PersonSingleTon.getInstance().person
match = Match(parentObj, self.tutorObj)
Backend.sendLike(match)
self.page.nextCard()
# send match request
parentObj = PersonSingleTon.getInstance()
match = Match(parentObj, self.tutorObj)
Backend.sendLike(match)
class RejectCardButton(Button):
def __init__(self, page, source, pos, size, **kwargs):
super(RejectCardButton, self).__init__(**kwargs)
self.background_normal = source
self.background_down = source.replace(".png", "") + "Down" + ".png"
self.page = page
self.pos = pos
self.size = size
self.bind(on_press=self.pressed)
self.tutorObj = None
def pressed(self, instance):
self.page.nextCard()
class AcceptRequestButton(Button):
def __init__(self, page, request, label, source, pos, size, match, **kwargs):
super(AcceptRequestButton, self).__init__(**kwargs)
self.background_normal = source
self.background_down = source.replace(".png", "") + "Down" + ".png"
self.pos = pos
self.size = size
self.bind(on_press=self.pressed)
self.request = request
self.page = page
self.label = label
self.match = match
def pressed(self, instance):
self.page.remove_widget(self.request)
self.page.requestInfo.remove(self.label.text)
self.page.updateRequests()
# Confirm tutoring
Backend.accept(self.match)
# accept match
tutorMatchesPage.updateMatches()
class RejectRequestButton(Button):
def __init__(self, page, request, label, source, pos, size, match, **kwargs):
super(RejectRequestButton, self).__init__(**kwargs)
self.background_normal = source
self.background_down = source.replace(".png", "") + "Down" + ".png"
self.pos = pos
self.size = size
self.bind(on_press=self.pressed)
self.request = request
self.label = label
self.page = page
self.match = match
def pressed(self, instance):
self.page.remove_widget(self.request)
self.page.requestInfo.remove(self.label.text)
self.page.updateRequests()
# Reject tutoring
# reject match
Backend.reject(self.match)
tutorMatchesPage.updateMatches()
class Card(Widget):
def __init__(self, image, **kwargs):
super(Card, self).__init__(**kwargs)
self.image = image
class SignInPage(Widget):
def __init__(self, **kwargs):
super(SignInPage, self).__init__(**kwargs)
class ParentHomePage(Widget):
def __init__(self, **kwargs):
super(ParentHomePage, self).__init__(**kwargs)
#self.cards = [["images/kelvin1.png", ["Kelvin Leung", "BA Mathematics, Cambridge",
# "Tutors in:\n- Maths,\n- Physics,\n- Computer science",
# "For GCSE & A-Level students", "£30+/hr"]],
# ["images/businessMan.png", ["Coolvin Leung", "PhD Mathematics, Cambridge",
# "Tutors in: \n- Nothing", "£'a lot'/hr"]]]
self.card = None
# Yes/no buttons
self.noButton = RejectCardButton(self, "images/noButton.png", (20, 100), (70, 70))
self.yesButton = AcceptCardButton(self, "images/yesButton.png", (Window.width-90, 100), (70, 70))
#self.noButton = Button(pos=(20, 100), size=(70, 70), background_normal="images/noButton.png",
# background_down="images/noButtonDown.png")
#self.yesButton = Button(pos=(Window.width-90, 100), size=(70, 70), background_normal="images/yesButton.png",
# background_down="images/yesButtonDown.png")
self.nextTutor = Backend.nextTutor()
self.card = self.nextCard()
def nextCard(self):
# next tutor function
nextItem = next(self.nextTutor, None)
self.yesButton.tutorObj = nextItem
self.noButton.tutorObj = nextItem
print(nextItem.id, nextItem.fname)
showYesNo = True
if not nextItem:
#: Handle end of cards
info = []
image = "images/businessMan.png"
print("no cards left")
showYesNo = False
#pass
else:
info = [f"{nextItem.fname} {nextItem.lname}",
nextItem.qualification,
f"Tutors in:\n" +'\n'.join(nextItem.subject),
f"£{nextItem.rateMin}+/hr"]
image = "images/kelvin1.png"
# Image formatting
img = Image(source=image, allow_stretch=True, pos=(10, 80),
size=(photoWidth, photoHeight))
if img.texture_size[1]/img.texture_size[0] > photoHeight/photoWidth:
img.texture = img.texture.get_region(0, (img.texture_size[1] - img.texture_size[0] * photoHeight/photoWidth)/2, img.texture_size[0], img.texture_size[0] * photoHeight/photoWidth)
else:
img.texture = img.texture.get_region((img.texture_size[0] - img.texture_size[1] * photoWidth/photoHeight) / 2, 0, img.texture_size[1] * photoWidth/photoHeight, img.texture_size[1])
card = Card(img)
if (self.card != None):
card.pos = (Window.width - photoWidth, self.card.pos[1])
else:
card.pos = (0, 0)
card.add_widget(img)
card.add_widget(Image(source="images/gradient.png", pos=(10, 80), size=(photoWidth, photoHeight)))
card.add_widget(Image(source="images/border.png", pos=(10, 80), size=(photoWidth, photoHeight)))
# Info formatting
infoLabels = Widget(pos=(0, 0))
startPos = (20, 600)
pad = 20
for string in info:
height, label = AddTextWithBack(infoLabels, string, startPos)
if (label is not None):
startPos = (startPos[0], startPos[1] - height - pad)
infoLabels.opacity = 0
card.add_widget(infoLabels)
# Tap to fade
fadeButton = FadeBetweenButton([img, infoLabels], img.pos, img.size)
card.add_widget(fadeButton)
self.add_widget(card)
self.remove_widget(self.card)
#if (self.card != None):
#print("hello")
#Animation(pos=(self.card.pos[0], self.card.pos[1]), duration=0.4).start(card)
#Animation(size=(0, 0), duration=0.4).start(self.card)
#self.remove_widget(self.card)
self.remove_widget(self.noButton)
self.add_widget(self.noButton)
self.remove_widget(self.yesButton)
self.add_widget(self.yesButton)
return card
class TutorHomePage(Widget):
def __init__(self, **kwargs):
super(TutorHomePage, self).__init__(**kwargs)
global tutorHomePage
tutorHomePage = self
self.add_widget(Label(text="Requests", color=(0, 0, 0), pos=(60, 550), font_size="40sp"))
self.requests = []
#: get requested matched
self.requestInfo = []
self.listOfMatches = []
#["Villar\nKS3 Mathematics, 5/hr", "Kiln\nKS2 English, £600/hr", "Das\nGCSE Spanish, £60/hr",
# "Samuels\nA-Level Chemistry, £30/hr"]
self.updateRequests()
def updateRequestsInfo(self):
personObj = PersonSingleTon.getInstance()
self.listOfMatches = Backend.getMatchesTutor(personObj.person, Match.REQUESTED)
def convertMatchToString(match):
output = ''
parent = match.parent
output += parent.lname + "\n"
if parent.level == Level.ALEVEL:
output += 'A-Level, '
elif parent.level == Level.GCSE:
output += 'GCSE, '
elif parent.level == Level.KS3:
output += 'KS3, '
elif parent.level == Level.KS2:
output += 'KS2, '
output += f"£{parent.rateMax}/hr"
return output
self.requestInfo = [convertMatchToString(i) for i in self.listOfMatches]
print(self.requestInfo)
def updateRequests(self):
self.updateRequestsInfo()
for request in self.requests:
self.remove_widget(request)
pad = 10
startPos = (20, 550)
for i in range(0, len(self.requestInfo)):
self.requests.append(Widget(pos=(0, 0)))
height, label = AddTextWithBack(self.requests[i], self.requestInfo[i], startPos)
self.requests[i].add_widget(AcceptRequestButton(self, self.requests[i], label, "images/smallYesButton.png",
(Window.width - 115, startPos[1] - 50), (50, 50),
self.listOfMatches[i]))
self.requests[i].add_widget(RejectRequestButton(self, self.requests[i], label, "images/smallNoButton.png",
(Window.width - 60, startPos[1] - 50), (50, 50),
self.listOfMatches[i]))
startPos = (startPos[0], startPos[1] - height - pad)
self.add_widget(self.requests[i])
# KELVIN GO HERE
#360*640
class ParentProfile(Widget):
def __init__(self, **kwargs):
person = PersonSingleTon.getInstance().person
super(ParentProfile, self).__init__(**kwargs)
self.grid = GridLayout(cols=2, pos=(0,80), size=(360,520), spacing=(0,10), padding=(0,0,20,0))
self.profileLabel = Label(text="Profile Picture:", color=(0, 0, 0), font_size="18sp",width=90, size_hint_y=6)
self.profilepicture = Button(background_normal='images/kelvin1.png')
self.profilepicture.bind(on_press=self.pictureChanger)
self.usernameLabel = Label(text="Username:", color=(0, 0, 0), font_size="18sp",width=90)
self.usernameText = TextInput(text=person.username,background_color=(.95, .95, .95, 1))
self.passwordLabel = Label(text="Password:", color=(0, 0, 0), font_size="18sp",width=90)
self.passwordText = TextInput(text=person.password, background_color=(.95, .95, .95, 1))
self.phoneNumLabel = Label(text="Phone:", color=(0, 0, 0), font_size="18sp",width=90)
self.phoneNumText = TextInput(text=str(person.phoneNum), background_color=(.95, .95, .95, 1))
self.fnameLabel = Label(text="First Name:", color=(0, 0, 0), font_size="18sp",width=90)
self.fnameText = TextInput(text=person.fname, background_color=(.95, .95, .95, 1))
self.lnameLabel = Label(text="Last Name:", color=(0, 0, 0), font_size="18sp",width=90)
self.lnameText = TextInput(text=person.lname, background_color=(.95, .95, .95, 1))
self.subjectLabel = Label(text="Subject:", color=(0, 0, 0), font_size="18sp",width=90)
self.subjectText = GridLayout(cols=2)
self.subjectbtn1 = ToggleButton(text='Maths', group='subject', )
self.subjectbtn2 = ToggleButton(text='English', group='subject', state='down')
self.rateMinLabel = Label(text="Minimum Rate:", color=(0, 0, 0), font_size="18sp",width=90)
self.rateMinText = GridLayout(cols=1)
self.rateMinTextSlider = Slider(value_track= True, min=0, max=100, step=1, value=person.rateMin,
value_track_color=(0,0.5,0.5,0.7))
self.rateMinTextSlider.bind(value=self.onValueMin)
self.rateMinTextText = Label(text='£'+str(self.rateMinTextSlider.value), color=(0, 0, 0), font_size="18sp")
self.rateMaxLabel = Label(text="Maximum Rate:", color=(0, 0, 0), font_size="18sp",width=90)
self.rateMaxText = GridLayout(cols=1)
self.rateMaxTextSlider = Slider(value_track= True, min=0, max=100, step=1, value=person.rateMax,
value_track_color=(0,0.5,0.5,0.7))
self.rateMaxTextText = Label(text='£'+str(self.rateMaxTextSlider.value), color=(0, 0, 0), font_size="18sp")
self.rateMaxTextSlider.bind(value=self.onValueMax)
self.levelLabel = Label(text="Tutee Level:", color=(0, 0, 0), font_size="18sp",width=90)
self.levelText = GridLayout(cols=2)
self.levelbtn1 = ToggleButton(text='GCSE', group='level', )
self.levelbtn2 = ToggleButton(text='A-LEVEL', group='level', state='down')
self.add_widget(self.grid)
self.grid.add_widget(self.profileLabel)
self.grid.add_widget(self.profilepicture)
self.grid.add_widget(self.usernameLabel)
self.grid.add_widget(self.usernameText)
self.grid.add_widget(self.passwordLabel)
self.grid.add_widget(self.passwordText)
self.grid.add_widget(self.phoneNumLabel)
self.grid.add_widget(self.phoneNumText)
self.grid.add_widget(self.fnameLabel)
self.grid.add_widget(self.fnameText)
self.grid.add_widget(self.lnameLabel)
self.grid.add_widget(self.lnameText)
self.grid.add_widget(self.subjectLabel)
self.grid.add_widget(self.subjectText)
self.subjectText.add_widget(self.subjectbtn1)
self.subjectText.add_widget(self.subjectbtn2)
self.grid.add_widget(self.rateMinLabel)
self.grid.add_widget(self.rateMinText)
self.rateMinText.add_widget(self.rateMinTextSlider)
self.rateMinText.add_widget(self.rateMinTextText)
self.grid.add_widget(self.rateMaxLabel)
self.grid.add_widget(self.rateMaxText)
self.rateMaxText.add_widget(self.rateMaxTextSlider)
self.rateMaxText.add_widget(self.rateMaxTextText)
self.grid.add_widget(self.levelLabel)
self.grid.add_widget(self.levelText)
self.levelText.add_widget(self.levelbtn1)
self.levelText.add_widget(self.levelbtn2)
def pictureChanger(self, popup):
popup = Popup(title='Change your profile', size_hint=(None, None), size=(300, 400), auto_dismiss=False)
temp_cont = GridLayout(cols=1,spacing=(0,20), padding=(5,0,5,0))
text = Label(text='Please enter the path of your new picture.')
text_input = TextInput(text='')
btn_choice = GridLayout(cols=2,size_hint_y=0.4)
btn1 = Button(text='Confirm')
btn2 = Button(text='Cancel')
btn_choice.add_widget(btn1)
btn_choice.add_widget(btn2)
temp_cont.add_widget(text)
temp_cont.add_widget(text_input)
temp_cont.add_widget(btn_choice)
btn1.bind(on_press=self.pictureChangerCheck(popup, text_input))
btn2.bind(on_press=popup.dismiss)
popup.content = temp_cont
popup.open()
@staticmethod
def pictureChangerCheck(popup, check):
if os.path.exists(check.text):
imageKey = Backend.getImageKey(check.text)
tutor: Tutor = PersonSingleTon.getInstance().person
tutor.picture = imageKey
imageBytes = Backend.getImageBytes(imageKey)
with open('images/temp.png', 'wb') as f:
f.write(imageBytes)
popup.dismiss()
def onValueMin(self, instance, value):
self.rateMinTextText.text = '£' + str(int(value))
def onValueMax(self, instance, value):
self.rateMaxTextText.text = '£' + str(int(value))
class TutorProfile(Widget):
def __init__(self, **kwargs):
super(TutorProfile, self).__init__(**kwargs)
class ParentMatches(Widget):
def __init__(self, **kwargs):
super(ParentMatches, self).__init__(**kwargs)
global parentMatchesPage
parentMatchesPage = self
self.add_widget(Label(text="Tutors", color=(0, 0, 0), pos=(40, 550), font_size="40sp"))
self.matches = []
# TODO: get matched parents
# self.matchInfo = [
# "Kelvin Leung\nBA Mathematics, Cambridge\nTutors in:\n- Maths,\n- Physics,\n- Computer science"
# "\n£30+/hr\n\nContact at:\n077777888999, leung@gmail.com"]
self.updateMatches()
def updateMatchInfo(self):
parent = PersonSingleTon.getInstance().person
listOfMatches = Backend.getMatchesParent(parent, Match.ACCEPTED)
def matchToString(match: Match):
tutor = match.tutor
subjects = '-' + '\n-'.join(tutor.subject)
output = (f"{tutor.fname} {tutor.lname}\n"
f"{tutor.qualification}\n"
f"Tutors in:\n{subjects}\n"
f"£{tutor.rateMin}+/hr\n\n"
f"Contact at:\n{tutor.phoneNum}")
return output
self.matchInfo = [matchToString(m) for m in listOfMatches]
print(self.matchInfo)
def updateMatches(self):
self.updateMatchInfo()
for match in self.matches:
self.remove_widget(self.matches)
pad = 10
startPos = (20, 550)
for i in range(0, len(self.matchInfo)):
self.matches.append(Widget(pos=(0, 0)))
height, label = AddTextWithBack(self.matches[i], self.matchInfo[i], startPos)
# self.matches[i].add_widget(AcceptRequestButton(self, self.matches[i], label, "images/smallYesButton.png",
# (Window.width - 115, startPos[1] - 50), (50, 50)))
# self.matches[i].add_widget(RejectRequestButton(self, self.matches[i], label, "images/smallNoButton.png",
# (Window.width - 60, startPos[1] - 50), (50, 50)))
startPos = (startPos[0], startPos[1] - height - pad)
self.add_widget(self.matches[i])
class TutorMatches(Widget):
def __init__(self, **kwargs):
super(TutorMatches, self).__init__(**kwargs)
global tutorMatchesPage
tutorMatchesPage = self
self.add_widget(Label(text="Tutees", color=(0, 0, 0), pos=(40, 550), font_size="40sp"))
self.matches = []
# TODO: get matched tutors
self.matchInfo = ["Villar\nKS3 Mathematics, £5/hr\n\nContact at:\n077777888999, villar@gmail.com", "Kiln\nKS2 English, £600/hr\n\nContact at:\n077777888999, kiln@gmail.com",
"Das\nGCSE Spanish, £60/hr\n\nContact at:\n077777888999, das@gmail.com", "Samuels\nA-Level Chemistry, £30/hr\n\nContact at:\n077777888999, samuels@gmail.com"]
self.updateMatches()
def updateMatchInfo(self):
tutor = PersonSingleTon.getInstance().person
listOfMatches = Backend.getMatchesTutor(tutor, Match.ACCEPTED)
def matchToString(match):
parent = match.parent
if parent.level == Level.ALEVEL:
level = 'A-Level'
elif parent.level == Level.GCSE:
level = 'GCSE'
elif parent.level == Level.KS3:
level = 'KS3'
else:
level = 'KS2'
output = (f"{parent.lname}\n"
f"{level} {parent.subject}, £{parent.rateMax}/hr\n\n"
f"Contact at:\n"
f"{parent.phoneNum}")
return output
self.matchInfo = [matchToString(m) for m in listOfMatches]
print(self.matchInfo)
def updateMatches(self):
self.updateMatchInfo()
print("match info: ", self.matchInfo)
for match in self.matches:
self.remove_widget(self.matches)
pad = 10
startPos = (20, 550)
for i in range(0, len(self.matchInfo)):
self.matches.append(Widget(pos=(0, 0)))
height, label = AddTextWithBack(self.matches[i], self.matchInfo[i], startPos)
#self.matches[i].add_widget(AcceptRequestButton(self, self.matches[i], label, "images/smallYesButton.png",
# (Window.width - 115, startPos[1] - 50), (50, 50)))
#self.matches[i].add_widget(RejectRequestButton(self, self.matches[i], label, "images/smallNoButton.png",
# (Window.width - 60, startPos[1] - 50), (50, 50)))
startPos = (startPos[0], startPos[1] - height - pad)
self.add_widget(self.matches[i])
class PageManager(Widget):
HOME = 0
PROFILE = 2
MATCHES = 4
def __init__(self, **kwargs):
super(PageManager, self).__init__(**kwargs)
Window.size = (360, 640)
self.isTutor = False#isinstance(person, Tutor)
self.size = (360, 640)
self.currentPage = 0 + self.isTutor
self.pages = [ParentHomePage(), TutorHomePage(), ParentProfile(), TutorProfile(), ParentMatches(), TutorMatches()]
with self.canvas:
self.bgCanvas = Rectangle(pos=(0, 0), size=(self.width, self.height))#70))
self.add_widget(self.pages[self.currentPage])
self.homeButton = ChangePageButton(self, PageManager.HOME + self.isTutor, (10, 15), (50, 50), "images/homeButton.png")
self.add_widget(self.homeButton)
self.matchesButton = ChangePageButton(self, PageManager.MATCHES + self.isTutor, (Window.width/2 - 25, 15), (50, 50), "images/starButton.png")
self.add_widget(self.matchesButton)
self.profileButton = ChangePageButton(self, PageManager.PROFILE + self.isTutor, (self.width - 60, 15), (50, 50), "images/profileButton.png")
self.add_widget(self.profileButton)
def goToPage(self, page):
self.remove_widget(self.pages[self.currentPage])
self.currentPage = page + self.isTutor
self.add_widget(self.pages[self.currentPage])
def updateUser(self, person):
PersonSingleTon.getInstance().person = person
self.isTutor = isinstance(person, Tutor)
self.goToPage(0)
tutorHomePage.updateRequests()
tutorMatchesPage.updateMatches()
parentMatchesPage.updateMatches()
#self.currentPage += self.isTutor
class MainApp(App):
def build(self):
return PageManager()
if __name__ == '__main__':
Config.set('graphics', 'width', '360')
Config.set('graphics', 'height', '640')
Config.set('graphics', 'resizable', False)
MainApp().run()
| 40.784038
| 192
| 0.610683
| 23,993
| 0.920012
| 0
| 0
| 578
| 0.022163
| 0
| 0
| 4,352
| 0.166878
|
c520331bf38c88e653e41aa4b2d7c402d30d7649
| 374
|
py
|
Python
|
routes/routes.py
|
aryan9600/SimpleMath-Flask
|
855120ba7e7f36435045840ab1c6672308fae7e5
|
[
"MIT"
] | null | null | null |
routes/routes.py
|
aryan9600/SimpleMath-Flask
|
855120ba7e7f36435045840ab1c6672308fae7e5
|
[
"MIT"
] | null | null | null |
routes/routes.py
|
aryan9600/SimpleMath-Flask
|
855120ba7e7f36435045840ab1c6672308fae7e5
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, request
router = Blueprint("router", __name__)
@router.route("/check")
def check():
return "Congratulations! Your app works. :)"
@router.route("/add", methods=["POST"])
def add():
first_number = request.form['FirstNumber']
second_number = request.form['SecondNumber']
result = first_number + second_number
return result
| 22
| 48
| 0.697861
| 0
| 0
| 0
| 0
| 291
| 0.778075
| 0
| 0
| 92
| 0.245989
|
c5203ec4fd880de88723d9ad07ee74058b1d23cf
| 1,592
|
py
|
Python
|
configs/repdet/repdet_repvgg_b1g2_nanopan_nanohead_1x_coco.py
|
karthiksharma98/mmdetection
|
295145d41a74598db98a037224f0f82c074f3fff
|
[
"Apache-2.0"
] | null | null | null |
configs/repdet/repdet_repvgg_b1g2_nanopan_nanohead_1x_coco.py
|
karthiksharma98/mmdetection
|
295145d41a74598db98a037224f0f82c074f3fff
|
[
"Apache-2.0"
] | null | null | null |
configs/repdet/repdet_repvgg_b1g2_nanopan_nanohead_1x_coco.py
|
karthiksharma98/mmdetection
|
295145d41a74598db98a037224f0f82c074f3fff
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../_base_/models/repdet_repvgg_pafpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_poly.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='RepDet',
pretrained='/data/kartikes/repvgg_models/repvgg_b1g2.pth',
backbone=dict(
type='RepVGG',
arch='B1g2',
out_stages=[1, 2, 3, 4],
activation='ReLU',
last_channel=1024,
deploy=False),
neck=dict(
type='NanoPAN',
in_channels=[128, 256, 512, 1024],
out_channels=256,
num_outs=5,
start_level=1,
add_extra_convs='on_input'),
bbox_head=dict(
type='NanoDetHead',
num_classes=80,
in_channels=256,
stacked_convs=2,
feat_channels=256,
share_cls_reg=True,
reg_max=10,
norm_cfg=dict(type='BN', requires_grad=True),
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
)
optimizer = dict(type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0001)
data = dict(
samples_per_gpu=4,
workers_per_gpu=2)
find_unused_parameters=True
runner = dict(type='EpochBasedRunner', max_epochs=12)
| 28.945455
| 74
| 0.594849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 364
| 0.228643
|
c5206e72ad25192f5a2ed7316aa7ced0c3105161
| 436
|
py
|
Python
|
tests/test_calculate_branch.py
|
ivergara/python-abc
|
b5bb87b80315f8e5ecd2d6f35b7208f0a7df9c3a
|
[
"Unlicense"
] | 2
|
2021-07-25T20:12:21.000Z
|
2021-07-25T21:19:23.000Z
|
tests/test_calculate_branch.py
|
ivergara/python-abc
|
b5bb87b80315f8e5ecd2d6f35b7208f0a7df9c3a
|
[
"Unlicense"
] | 1
|
2021-12-28T22:07:05.000Z
|
2021-12-28T22:07:05.000Z
|
tests/test_calculate_branch.py
|
ivergara/python-abc
|
b5bb87b80315f8e5ecd2d6f35b7208f0a7df9c3a
|
[
"Unlicense"
] | 1
|
2021-12-07T19:53:45.000Z
|
2021-12-07T19:53:45.000Z
|
import pytest
from tests import assert_source_returns_expected
BRANCH_CASES = [
# Call
('print("hello world")', 'b | print("hello world")'),
# Await
("await noop()", "b | await noop()"),
# Class instantiation
("Noop()", "b | Noop()"),
]
@pytest.mark.parametrize("source,expected", BRANCH_CASES)
def test_branch(capsys, source, expected):
assert_source_returns_expected(capsys, source, expected) is True
| 22.947368
| 68
| 0.669725
| 0
| 0
| 0
| 0
| 169
| 0.387615
| 0
| 0
| 151
| 0.34633
|
c52074b71855ef72867102bc5564df2ba1896c19
| 4,619
|
py
|
Python
|
client/src/obc.py
|
estcube/telemetry-forwarding-client
|
be659c8dd8e4bd26d1d1974d63f90acffd150e34
|
[
"MIT"
] | 3
|
2020-06-11T12:34:25.000Z
|
2020-09-16T12:06:32.000Z
|
client/src/obc.py
|
estcube/telemetry-forwarding-client
|
be659c8dd8e4bd26d1d1974d63f90acffd150e34
|
[
"MIT"
] | 57
|
2020-09-16T09:11:04.000Z
|
2022-02-28T01:32:13.000Z
|
client/src/obc.py
|
estcube/Telemetry-Forwarding-Client
|
be659c8dd8e4bd26d1d1974d63f90acffd150e34
|
[
"MIT"
] | null | null | null |
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
from kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO
if parse_version(ks_version) < parse_version('0.7'):
raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
class Obc(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.reserved = self._io.read_bits_int(1) != 0
self.internal_flash = self._io.read_bits_int(1) != 0
self.internal_sram = self._io.read_bits_int(1) != 0
self.qspi_flash1 = self._io.read_bits_int(1) != 0
self.qspi_flash2 = self._io.read_bits_int(1) != 0
self.fmc_mram = self._io.read_bits_int(1) != 0
self.spi_fram1 = self._io.read_bits_int(1) != 0
self.spi_fram2 = self._io.read_bits_int(1) != 0
self.spi_fram3 = self._io.read_bits_int(1) != 0
self.io_expander = self._io.read_bits_int(1) != 0
self.fmc_mram_temp_sensor = self._io.read_bits_int(1) != 0
self.qspi_flash_temp_sensor = self._io.read_bits_int(1) != 0
self.io_expander_temp_sensor = self._io.read_bits_int(1) != 0
self.rtc = self._io.read_bits_int(1) != 0
self.current_adc = self._io.read_bits_int(1) != 0
self.aocs1_gyro1 = self._io.read_bits_int(1) != 0
self.aocs1_gyro2 = self._io.read_bits_int(1) != 0
self.aocs1_magnet = self._io.read_bits_int(1) != 0
self.aocs1_acc = self._io.read_bits_int(1) != 0
self.aocs1_temp = self._io.read_bits_int(1) != 0
self.aocs2_gyro1 = self._io.read_bits_int(1) != 0
self.aocs2_gyro2 = self._io.read_bits_int(1) != 0
self.aocs2_magnet = self._io.read_bits_int(1) != 0
self.aocs2_acc = self._io.read_bits_int(1) != 0
self.aocs2_temp = self._io.read_bits_int(1) != 0
self.payload_bus = self._io.read_bits_int(1) != 0
self.icp1_bus = self._io.read_bits_int(1) != 0
self.icp2_bus = self._io.read_bits_int(1) != 0
self.reaction1 = self._io.read_bits_int(1) != 0
self.reaction2 = self._io.read_bits_int(1) != 0
self.reaction3 = self._io.read_bits_int(1) != 0
self.oscillator = self._io.read_bits_int(1) != 0
self.err_mcu = self._io.read_bits_int(1) != 0
self.err_internal_flash = self._io.read_bits_int(1) != 0
self.err_internal_sram = self._io.read_bits_int(1) != 0
self.err_qspi_flash1 = self._io.read_bits_int(1) != 0
self.err_qspi_flash2 = self._io.read_bits_int(1) != 0
self.err_fmc_mram = self._io.read_bits_int(1) != 0
self.err_spi_fram1 = self._io.read_bits_int(1) != 0
self.err_spi_fram2 = self._io.read_bits_int(1) != 0
self.err_spi_fram3 = self._io.read_bits_int(1) != 0
self.err_io_expander = self._io.read_bits_int(1) != 0
self.err_mram_temp = self._io.read_bits_int(1) != 0
self.err_qspi_flash_temp = self._io.read_bits_int(1) != 0
self.err_io_expander_temp = self._io.read_bits_int(1) != 0
self.err_rtc = self._io.read_bits_int(1) != 0
self.err_current_adc = self._io.read_bits_int(1) != 0
self.err_aocs1_gyro1 = self._io.read_bits_int(1) != 0
self.err_aocs1_gyro2 = self._io.read_bits_int(1) != 0
self.err_aocs1_magnet = self._io.read_bits_int(1) != 0
self.err_aocs1_acc = self._io.read_bits_int(1) != 0
self.err_aocs1_temp = self._io.read_bits_int(1) != 0
self.err_aocs2_gyro1 = self._io.read_bits_int(1) != 0
self.err_aocs2_gyro2 = self._io.read_bits_int(1) != 0
self.err_aocs2_magnet = self._io.read_bits_int(1) != 0
self.err_aocs2_acc = self._io.read_bits_int(1) != 0
self.err_aocs2_temp = self._io.read_bits_int(1) != 0
self.err_payload_bus = self._io.read_bits_int(1) != 0
self.err_icp1_bus = self._io.read_bits_int(1) != 0
self.err_icp2_bus = self._io.read_bits_int(1) != 0
self.err_reaction1 = self._io.read_bits_int(1) != 0
self.err_reaction2 = self._io.read_bits_int(1) != 0
self.err_reaction3 = self._io.read_bits_int(1) != 0
self.err_oscillator = self._io.read_bits_int(1) != 0
self._io.align_to_byte()
self.fmc_mram_temp = self._io.read_u1()
self.qspi_fram_temp = self._io.read_u1()
self.io_expander_temp = self._io.read_u1()
| 52.488636
| 118
| 0.657285
| 4,213
| 0.912102
| 0
| 0
| 0
| 0
| 0
| 0
| 185
| 0.040052
|
c522238afd1828d1190c7360573f7b8dc442a5a0
| 1,537
|
py
|
Python
|
SourceWatch/buffer.py
|
spezifanta/SourceWatch
|
aaf2cf1ba00015947689181daf77b80bde9b4feb
|
[
"MIT"
] | 6
|
2019-07-09T19:40:01.000Z
|
2022-01-24T12:01:37.000Z
|
SourceWatch/buffer.py
|
spezifanta/SourceWatch
|
aaf2cf1ba00015947689181daf77b80bde9b4feb
|
[
"MIT"
] | null | null | null |
SourceWatch/buffer.py
|
spezifanta/SourceWatch
|
aaf2cf1ba00015947689181daf77b80bde9b4feb
|
[
"MIT"
] | 1
|
2020-11-07T13:06:58.000Z
|
2020-11-07T13:06:58.000Z
|
import io
import struct
class SteamPacketBuffer(io.BytesIO):
"""In-memory byte buffer."""
def __len__(self):
return len(self.getvalue())
def __repr__(self):
return '<PacketBuffer: {}: {}>'.format(len(self), self.getvalue())
def __str__(self):
return str(self.getvalue())
def read_byte(self):
return struct.unpack('<B', self.read(1))[0]
def write_byte(self, value):
self.write(struct.pack('<B', value))
def read_short(self):
return struct.unpack('<h', self.read(2))[0]
def write_short(self, value):
self.write(struct.pack('<h', value))
def read_float(self):
return struct.unpack('<f', self.read(4))[0]
def write_float(self, value):
self.write(struct.pack('<f', value))
def read_long(self):
return struct.unpack('<l', self.read(4))[0]
def write_long(self, value):
self.write(struct.pack('<l', value))
def read_long_long(self):
return struct.unpack('<Q', self.read(8))[0]
def write_long_long(self, value):
self.write(struct.pack('<Q', value))
def read_string(self):
# TODO: find a more pythonic way doing this
value = []
while True:
char = self.read(1)
if char == b'\x00':
break
else:
value.append(char)
return ''.join(map(lambda char: chr(ord(char)), value))
def write_string(self, value):
self.write(bytearray('{0}\x00'.format(value), 'utf-8'))
| 25.616667
| 74
| 0.573845
| 1,510
| 0.982433
| 0
| 0
| 0
| 0
| 0
| 0
| 160
| 0.104099
|
c52372bcbf3ae907ef32ccf5713d1759604af330
| 483
|
py
|
Python
|
scripts/npc/holyStone.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 54
|
2019-04-16T23:24:48.000Z
|
2021-12-18T11:41:50.000Z
|
scripts/npc/holyStone.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 3
|
2019-05-19T15:19:41.000Z
|
2020-04-27T16:29:16.000Z
|
scripts/npc/holyStone.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 49
|
2020-11-25T23:29:16.000Z
|
2022-03-26T16:20:24.000Z
|
# Holy Stone - Holy Ground at the Snowfield (3rd job)
questIDs = [1431, 1432, 1433, 1435, 1436, 1437, 1439, 1440, 1442, 1443, 1445, 1446, 1447, 1448]
hasQuest = False
for qid in questIDs:
if sm.hasQuest(qid):
hasQuest = True
break
if hasQuest:
if sm.sendAskYesNo("#b(A mysterious energy surrounds this stone. Do you want to investigate?)"):
sm.warpInstanceIn(910540000, 0)
else:
sm.sendSayOkay("#b(A mysterious energy surrounds this stone)#k")
| 30.1875
| 100
| 0.679089
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 176
| 0.364389
|
c523effb8f36813f8d45730c0dbdd83679d7448e
| 16,256
|
py
|
Python
|
pyvmodule/expr.py
|
tanhongze/pyvmodule
|
b88cd35e57893024071306d238ce601341ce3bb4
|
[
"MIT"
] | null | null | null |
pyvmodule/expr.py
|
tanhongze/pyvmodule
|
b88cd35e57893024071306d238ce601341ce3bb4
|
[
"MIT"
] | null | null | null |
pyvmodule/expr.py
|
tanhongze/pyvmodule
|
b88cd35e57893024071306d238ce601341ce3bb4
|
[
"MIT"
] | 1
|
2020-01-20T07:25:40.000Z
|
2020-01-20T07:25:40.000Z
|
#-- coding:utf-8
from .ast import ASTNode
from .compute.value import expr_value_calc_funcs,expr_value_prop_funcs
from .compute.width import expr_width_calc_funcs,expr_width_fix_funcs
from .compute.width import expr_match_width,expr_calc_width
from .tools.utility import count_one
import warnings
__all__ = ['Mux','Concatenate','Expr','wrap_expr',
'BinaryOperator',
'ConstExpr','Hexadecimal','Decimal','Octal','Binary']
def wrap_expr(expr):
if isinstance(expr,Expr):return expr
elif isinstance(expr,int):return Hexadecimal(expr)
else:raise TypeError('Cannot convert "%s" object into "Expr".'%type(expr))
def propagated(func):
def propagated_func(*args):
res = func(*args)
return res if res is None else res._prop_value()
return propagated_func
class Expr(ASTNode):
max_cols_per_line = 120
@classmethod
def _need_split_line(cls,codes):
for lines in codes:
if len(lines)>1:return True
length = 0
for lines in codes:
for line in lines:
for word in line:length+=len(word)
return length>cls.max_cols_per_line
def _generate(self,indent=0,p_precedence=99):return self._expr_generate_funcs[self.typename](self,indent,p_precedence)
def _calc_width(self):self._width_calc_func(self)
def _fix_width(self,expr):return self._width_fix_func(self,expr)
def _prop_value(self):return self._value_prop_func(self)
@property
def typename(self):return self._typename
@typename.setter
def typename(self,typename):
assert isinstance(typename,str)
self._typename = typename
self._width_calc_func = expr_width_calc_funcs[typename]
self._width_fix_func = expr_width_fix_funcs [typename]
self._value_calc_func = expr_value_calc_funcs[typename]
self._value_prop_func = expr_value_prop_funcs[typename]
@property
def lhs(self):return self.childs[1]
@lhs.setter
def lhs(self,subexpr):self.childs[1] = wrap_expr(subexpr)
@property
def rhs(self):return self.childs[0]
@rhs.setter
def rhs(self,subexpr):self.childs[0] = wrap_expr(subexpr)
@property
def cond(self):return self.childs[2]
@cond.setter
def cond(self,cond):
cond = wrap_expr(cond)
cond._fix_width(1)
self.childs[2] = cond
def __init__(self):raise NotImplementedError()
def __int__(self):return self._value_calc_func(self)
def __len__(self):
if self.width is None:raise ValueError('Getting width of width-free expr "%s".'%str(self))
if self.width <= 0 :raise ValueError('Found negative width in "%s".'%str(self))
return self.width
@property
def _is_constant(self):return False
@property
def length(self):return 1
def _wrap_constant(self,value):return Hexadecimal(value,width=self.width)
@staticmethod
def _hex_value(*args,**kwargs):return Hexadecimal(*args,**kwargs)
@staticmethod
def _is_constant_value(expr,value):return isinstance(expr,(int,ConstExpr)) and int(expr)==value
@staticmethod
def _is_expr_typename(obj,typename):return isinstance(obj,Expr) and expr._typename == typename
@propagated
def __mul__(self,rhs):return self if rhs is None else Concatenate(self,rhs)
@propagated
def __pos__(self):return self
@propagated
def __pow__(self,rhs):
if rhs ==0:return None
else:return Replicate(self,rhs)
@propagated
def __lt__ (self,rhs):return BinaryOperator('<',self,rhs)
@propagated
def __gt__ (self,rhs):return BinaryOperator('>',self,rhs)
@propagated
def __le__ (self,rhs):return BinaryOperator('<=',self,rhs)
@propagated
def __ge__ (self,rhs):return BinaryOperator('>=',self,rhs)
@propagated
def __add__(self,rhs):return AddOperator(self,rhs)
@propagated
def __sub__(self,rhs):return BinaryOperator('-',self,rhs)
@propagated
def __and__(self,rhs):return AndOperator(self,rhs)
@propagated
def __or__ (self,rhs):return OrOperator(self,rhs)
@propagated
def __xor__(self,rhs):return XorOperator(self,rhs)
@propagated
def __invert__(self):return UnaryOperator('~',self)
@propagated
def __neg__(self):return UnaryOperator(' -',self)
@propagated
def __lshift__(self,rhs):return BinaryOperator('<<',self,rhs)
@propagated
def __rshift__(self,rhs):return BinaryOperator('>>',self,rhs)
@propagated
def __floordiv__(self,rhs):return BinaryOperator('==',self,rhs)
@propagated
def validif(self,cond):return ValidIf(cond,self)
@propagated
def mux(self,lhs,rhs):return Mux(self,lhs,rhs)
@propagated
def multiply_operate(self,rhs):return MulOperator(self,rhs)
@propagated
def divide_operate(self,rhs):return DivOperator(self,rhs)
@propagated
def module_operate(self,rhs):return ModOperator(self,rhs)
@propagated
def equal_to(self,rhs):return BinaryOperator('==',self,rhs)
@propagated
def not_equal_to(self,rhs):return BinaryOperator('!=',self,rhs)
@propagated
def reduce_or(self):return UnaryOperator(' |',self)
@propagated
def reduce_and(self):return UnaryOperator(' &',self)
@propagated
def reduce_xor(self):return UnaryOperator(' ^',self)
def __getitem__(self,key):raise SyntaxError('Invalid fetch "[%s]" from expr "%s".'%(str(key),str(self)))
def __rpow__(self,lhs):return wrap_expr(lhs)**self
def __rmul__(self,lhs):return self if lhs is None else wrap_expr(lhs)*self
def __radd__(self,lhs):return wrap_expr(lhs)+self
def __rsub__(self,lhs):return wrap_expr(lhs)-self
def __rand__(self,lhs):return wrap_expr(lhs)&self
def __ror__(self,lhs) :return wrap_expr(lhs)|self
def __rxor__(self,lhs):return wrap_expr(lhs)^self
def __rfloordiv__(self,lhs):return wrap_expr(lhs)//self
@staticmethod
def full_adder_c(a,b,c):
return a&b|a&c|b&c
@staticmethod
def full_adder_s(a,b,c):
return a^b^c
def _set_default(self,typename,n_childs=0):
self.comments = []
self.typename = typename
self.childs = [None]*ASTNode._expr_n_childs[typename]
self.value = None
self.width = None
def _connect_port(self,m,p):
if p.io!='input':raise KeyError('Assigning "%s" to %s port "%s"'%(str(self),p.io,str(p)))
self._fix_width(p)
class UnaryOperator(Expr):
def __init__(self,typename,rhs):
self._set_default(typename)
self.rhs = rhs
self._calc_width()
class BinaryOperator(Expr):
def __init__(self,typename,lhs,rhs):
self._set_default(typename)
self.rhs = rhs
self.lhs = lhs
self._calc_width()
class Mux(Expr):
def __init__(self,cond,lhs,rhs):
self._set_default('?:')
self.rhs = rhs
self.lhs = lhs
self.cond = cond
self._calc_width()
class MultilineAlignOperator(Expr):
@property
def _display_as_long(self):return self._display_as_long_val
@_display_as_long.setter
def _display_as_long(self,as_long):
if not isinstance(as_long,bool):raise TypeError('Type of "long" should be bool')
self._display_as_long_val = as_long
class AssociativeOperator(MultilineAlignOperator):
def _merge_childs(self,other):
other = wrap_expr(other)
if other._typename==self._typename:
self._display_as_long|=other._display_as_long
self.childs.extend(other.childs)
else:self.childs.append(other)
def __init__(self,typename,lhs,rhs,long=False):
self._display_as_long = long
self._set_default(typename)
lhs = wrap_expr(lhs)
rhs = wrap_expr(rhs)
expr_calc_width(lhs,rhs)
self._merge_childs(lhs)
self._merge_childs(rhs)
self._calc_width()
class OrOperator(AssociativeOperator):
def __init__(self,lhs,rhs,long=False):AssociativeOperator.__init__(self,'|',lhs,rhs,long)
def __ior__(self,other):
expr_calc_width(self,other)
self._merge_childs(other)
return self
class AndOperator(AssociativeOperator):
def __init__(self,lhs,rhs,long=False):AssociativeOperator.__init__(self,'&',lhs,rhs,long)
def __iand__(self,other):
expr_calc_width(self,other)
self._merge_childs(other)
return self
class AddOperator(AssociativeOperator):
def __init__(self,lhs,rhs,long=False):AssociativeOperator.__init__(self,'+',lhs,rhs,long)
def __iadd__(self,other):
expr_calc_width(self,other)
self._merge_childs(other)
return self
class XorOperator(AssociativeOperator):
def __init__(self,lhs,rhs,long=False):AssociativeOperator.__init__(self,'^',lhs,rhs,long)
def __ixor__(self,other):
expr_calc_width(self,other)
self._merge_childs(other)
return self
def fix_slice(key,width):
start = (0 if key.step is None else key.stop-key.step) if key.start is None else key.start
stop = (width if key.step is None else key.start+key.start) if key.stop is None else key.stop
width = stop - start
return start,stop,width
class Concatenate(AssociativeOperator):
def _extract_childs(self,args):
for arg in args:
if isinstance(arg,(tuple,list)):self._extract_childs(arg)
else:self._merge_childs(arg)
def __init__(self,*args,long=False):
self._display_as_long = long
self._set_default('{}')
self._extract_childs(args)
self._calc_width()
def __setitem__(self,key,val):
if not isinstance(key,slice):raise TypeError(type(key))
expr_match_width(val,len(self))
start,stop,width = fix_slice(key,len(self))
base = 0
for expr in self.childs:
if stop < base or start > base + len(expr):continue
if start > base:
expr[start-base:] = val[:base-start+len(expr)]
elif stop<base +len(expr):
expr[:stop-base] = val[-(stop-base):]
else:
expr[:] = val[base-start::len(expr)]
base += len(expr)
class ValidIf(BinaryOperator):
def __init__(self,lhs,rhs):
self._set_default('validif')
self.rhs = rhs
self.lhs = lhs
self._calc_width()
class MulOperator(BinaryOperator):
def __init__(self,lhs,rhs):
self._set_default('*')
self.rhs = rhs
self.lhs = lhs
self._calc_width()
class DivOperator(BinaryOperator):
def __init__(self,lhs,rhs):
self._set_default('/')
self.rhs = rhs
self.lhs = lhs
self._calc_width()
class ModOperator(BinaryOperator):
def __init__(self,lhs,rhs):
self._set_default('%')
self.rhs = rhs
self.lhs = lhs
self._calc_width()
class Replicate(UnaryOperator):
@property
def count(self):return self._count
@count.setter
def count(self,count):
count=int(count)
self._count = count
if count<=0:raise ValueError('Invalid replicate "%s".'%self)
def __init__(self,rhs,count):
self._set_default('{{}}')
self.rhs = rhs
self.count = count
self._calc_width()
class ConstExpr(Expr):
_radix_fmtstrs = {
16:lambda width,value:("%d'h{:0>%dx}"%(width,(width+3)//4)).format(value),
10:lambda width,value:("%d'd{:0>d}" % width ).format(value),
8 :lambda width,value:("%d'o{:0>%do}"%(width,(width+2)//3)).format(value),
2 :lambda width,value:("%d'b{:0>%db}"%(width, width )).format(value)}
@property
def radix(self):return self._radix
@radix.setter
def radix(self,radix):
if radix not in {2,8,10,16}:raise ValueError('Invalid radix.')
self._radix = radix
self._radix_fmtstr = self._radix_fmtstrs[radix]
@staticmethod
def _convert_str(value):
y = 0
for i in range(len(x)):
y<<=8
c = ord(x[i])
y |=c
if c>=256 or c<0:raise RuntimeError('Charset Error')
return y
@property
def value(self):return self._value
@value.setter
def value(self,value):
if value is None:self._value = value
else:
if isinstance(value,str):self._value = self._convert_str(value)
else:self._value = int(value)
if not self._width is None:self._value&=(1<<self._width)-1
@property
def width(self):return self._width
@width.setter
def width(self,width):
if not isinstance(width,int):raise TypeError(type(width),width)
if width<=0:raise ValueError('Constant value with non-positive width.')
self._width = width
if not self._value is None:self._value&=(1<<self._width)-1
@property
def _driven(self):return 0 if self._value is None else self._constant
@property
def _constant(self):return -1 if self._width is None else (1<<self._width)-1
@property
def _is_constant(self):return True
def _set_default(self,typename,n_childs=0):
self.comments = []
self.childs = [None]*n_childs
self.value = None
self.width = None
def __init__(self,value,width=None,radix=10):
self.typename = 'const'
self._value = 0
self._width = None
if not width is None:self.width = width
self.radix = radix
self.value = value
def __getitem__(self,key):
if isinstance(key,slice):
for a in {'start','stop','step'}:
if not isinstance(getattr(key,a),(int,type(None))):
raise SyntaxError('Invalid fetch format from constant expression.')
start = 0 if key.start is None else key.start
if not key.step is None:return Hexadecimal(int(self)>>start,width=key.step)
elif not key.stop is None:return Hexadecimal(int(self)>>start,width=key.stop-start)
return Hexadecimal(int(self)>>start,width=self.width-start)
elif isinstance(key,(int,ConstExpr)):
loc = int(key)
if loc<0:loc += len(self)
return Binary((self.value>>loc)&1,width=1)
elif isinstance(key,Expr):
n = 1<<len(key)
v = self.value
m = count_one(v)
if m==0:return Binary(0,width=1)
elif m==n:return Binary(1,width=1)
else:
if m<=(n>>1):
expr = 0
for i in range(n):
if ((v>>i)&1)==1:expr|=key//i
else:
expr = 1
for i in range(n):
if ((v>>i)&1)==0:expr&=~(key//i)
return expr
else:raise TypeError(type(key))
def __str__(self):
width = self.width
value = self.value
if value is None:
if width is None:return "'bz"
else:return "%d'bz"%width
if width is None:
if value<0:warnings.warn('Negative value without width declared.')
return str(value)
result = self._radix_fmtstr(width,value)
return result
def __int__(self):return self.value
def __eq__(self,other):
if isinstance(other,ConstExpr):return self.width==other.width and int(self)==int(other)
elif isinstance(other,int):return self.width is None and int(self)==other
else:return False
def __hash__(self):return int(self)+(0 if self.width is None else self.width)
def Hexadecimal(x,width=None):
if width==0:return None
else:return ConstExpr(x,width=width,radix=16)
def Binary (x,width=None):
if width==0:return None
else:return ConstExpr(x,width=width,radix=2 )
def Octal (x,width=None):
if width==0:return None
else:return ConstExpr(x,width=width,radix=8 )
def Decimal (x,width=None):
if width==0:return None
else:return ConstExpr(x,width=width,radix=10)
| 40.237624
| 123
| 0.62906
| 14,690
| 0.903666
| 0
| 0
| 5,562
| 0.342151
| 0
| 0
| 713
| 0.043861
|
c529b4e8440b64034ec82bd0b0da8014712c8c78
| 13,936
|
py
|
Python
|
Common_3/Tools/ForgeShadingLanguage/generators/d3d.py
|
divecoder/The-Forge
|
e882fbc000b2915b52c98fe3a8c791930490dd3c
|
[
"Apache-2.0"
] | 3,058
|
2017-10-03T01:33:22.000Z
|
2022-03-30T22:04:23.000Z
|
Common_3/Tools/ForgeShadingLanguage/generators/d3d.py
|
juteman/The-Forge
|
e882fbc000b2915b52c98fe3a8c791930490dd3c
|
[
"Apache-2.0"
] | 157
|
2018-01-26T10:18:33.000Z
|
2022-03-06T10:59:23.000Z
|
Common_3/Tools/ForgeShadingLanguage/generators/d3d.py
|
juteman/The-Forge
|
e882fbc000b2915b52c98fe3a8c791930490dd3c
|
[
"Apache-2.0"
] | 388
|
2017-12-21T10:52:32.000Z
|
2022-03-31T18:25:49.000Z
|
""" GLSL shader generation """
from utils import Stages, getHeader, getShader, getMacro, genFnCall, fsl_assert, get_whitespace
from utils import isArray, getArrayLen, getArrayBaseName, getMacroName, DescriptorSets, is_groupshared_decl
import os, sys, importlib, re
from shutil import copyfile
def pssl(fsl, dst, rootSignature=None):
return d3d(fsl, dst, pssl=True, d3d12=False, rootSignature=rootSignature)
def prospero(fsl, dst):
return d3d(fsl, dst, pssl=True, prospero=True)
def xbox(fsl, dst, rootSignature=None):
return d3d(fsl, dst, xbox=True, d3d12=True, rootSignature=rootSignature)
def d3d12(fsl, dst):
return d3d(fsl, dst, d3d12=True)
def scarlett(fsl, dst, rootSignature=None):
return xbox(fsl, dst, rootSignature)
def d3d(fsl, dst, pssl=False, prospero=False, xbox=False, rootSignature=None, d3d12=False):
shader = getShader(fsl, dst)
shader_src = getHeader(fsl)
if not (d3d12 or pssl or xbox):
shader_src += ['#define DIRECT3D11\n']
if prospero:
import prospero
pssl = prospero
shader_src += ['#define PROSPERO\n']
shader_src += prospero.preamble()
elif pssl:
import orbis
pssl = orbis
shader_src += ['#define ORBIS\n']
shader_src += orbis.preamble()
if xbox:
import xbox
shader_src += ['#define XBOX\n']
shader_src += xbox.preamble()
if d3d12:
shader_src += ['#define DIRECT3D12\n']
shader_src += ['#define STAGE_', shader.stage.name, '\n']
if shader.enable_waveops:
shader_src += ['#define ENABLE_WAVEOPS()\n']
# directly embed d3d header in shader
header_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'includes', 'd3d.h')
header_lines = open(header_path).readlines()
shader_src += header_lines + ['\n']
nonuniformresourceindex = None
# tesselation
pcf_returnType = None
# for SV_PrimitiveID usage in pixel shaders, generate a pass-through gs
passthrough_gs = False
if pssl and shader.stage == Stages.FRAG:
for dtype, dvar in shader.flat_args:
if getMacroName(dtype).upper() == 'SV_PRIMITIVEID':
passthrough_gs = True
if prospero:
prospero.gen_passthrough_gs(shader, dst.replace('frag', 'geom'))
else:
orbis.gen_passthrough_gs(shader, dst.replace('frag', 'geom'))
last_res_decl = 0
explicit_res_decl = None
srt_resources = { descriptor_set.name: [] for descriptor_set in DescriptorSets }
srt_free_resources = []
srt_references = []
defineLoc = len(shader_src)
parsing_struct = None
skip_semantics = False
struct_elements = []
srt_redirections = set()
for line in shader.lines:
def get_uid(name):
return name + '_' + str(len(shader_src))
# dont process commented lines
if line.strip().startswith('//'):
shader_src += [line]
continue
if is_groupshared_decl(line):
dtype, dname = getMacro(line)
basename = getArrayBaseName(dname)
shader_src += ['#define srt_'+basename+' '+basename+'\n']
if not pssl:
line = 'groupshared '+dtype+' '+dname+';\n'
else:
line = 'thread_group_memory '+dtype+' '+dname+';\n'
if 'DECLARE_RESOURCES' in line:
explicit_res_decl = len(shader_src) + 1
line = '//' + line
if line.strip().startswith('STRUCT(') or line.strip().startswith('CBUFFER(') or line.strip().startswith('PUSH_CONSTANT('):
parsing_struct = getMacro(line)
struct_name = parsing_struct[0]
struct_elements = []
if pssl and 'PUSH_CONSTANT' in line:
skip_semantics = True
macro = get_uid(struct_name)
shader_src += ['#define ', macro, '\n']
srt_free_resources += [(macro, pssl.declare_rootconstant(struct_name))]
if pssl and 'CBUFFER' in line:
skip_semantics = True
res_freq = parsing_struct[1]
macro = get_uid(struct_name)
shader_src += ['#define ', macro, '\n']
if 'rootcbv' in struct_name:
srt_free_resources += [(macro, pssl.declare_cbuffer(struct_name))]
else:
srt_resources[res_freq] += [(macro, pssl.declare_cbuffer(struct_name))]
if parsing_struct and line.strip().startswith('DATA('):
data_decl = getMacro(line)
if skip_semantics or data_decl[-1] == 'None':
line = get_whitespace(line) + data_decl[0] + ' ' + data_decl[1] + ';\n'
if pssl and type(parsing_struct) is not str:
basename = getArrayBaseName(data_decl[1])
macro = 'REF_' + get_uid(basename)
shader_src += ['#define ', macro, '\n']
init, ref = pssl.declare_element_reference(shader, parsing_struct, data_decl)
shader_src += [*init, '\n']
srt_redirections.add(basename)
struct_elements += [(macro, ref)]
srt_references += [(macro, (init, ref))]
shader_src += [line]
continue
if parsing_struct and '};' in line:
# if this shader is the receiving end of a passthrough_gs, insert the necessary inputs
if passthrough_gs and shader.struct_args[0][0] == parsing_struct:
shader_src += ['\tDATA(FLAT(uint), PrimitiveID, TEXCOORD8);\n']
shader_src += [line]
skip_semantics = False
if type(parsing_struct) is not str:
last_res_decl = len(shader_src)+1
parsing_struct = None
continue
resource_decl = None
if line.strip().startswith('RES('):
resource_decl = getMacro(line)
last_res_decl = len(shader_src)+1
if pssl and resource_decl:
# shader_src += ['// ', line.strip(), '\n']
_, res_name, res_freq, _, _ = resource_decl
basename = getArrayBaseName(res_name)
macro = get_uid(basename)
# shader_src += ['#define ', macro, ' //', line.strip(), '\n']
shader_src += ['#define ', macro, '\n']
srt_resources[res_freq] += [(macro, pssl.declare_resource(resource_decl))]
# macro = 'REF_' + macro
# shader_src += ['#define ', macro, '\n']
init, ref = pssl.declare_reference(shader, resource_decl)
shader_src += [*init, '\n']
srt_references += [(macro, (init, ref))]
srt_redirections.add(basename)
last_res_decl = len(shader_src)+1
# continue
if 'TESS_VS_SHADER(' in line and prospero:
vs_filename = getMacro(line).strip('"')
vs_fsl_path = os.path.join(os.path.dirname(fsl), vs_filename)
ls_vs_filename = 'ls_'+vs_filename.replace('.fsl', '')
vs_pssl = os.path.join(os.path.dirname(dst), ls_vs_filename)
d3d(vs_fsl_path, vs_pssl, pssl=True, prospero=True)
shader_src += [
'#undef VS_MAIN\n',
'#define VS_MAIN vs_main\n',
'#include "', ls_vs_filename, '"\n'
]
continue
if '_MAIN(' in line and shader.stage == Stages.TESC and prospero:
shader_src += pssl.insert_tesc('vs_main')
if '_MAIN(' in line and shader.returnType:
if shader.returnType not in shader.structs:
if shader.stage == Stages.FRAG:
if not 'SV_DEPTH' in shader.returnType.upper():
line = line[:-1] + ': SV_TARGET\n'
else:
line = line[:-1] + ': SV_DEPTH\n'
if shader.stage == Stages.VERT:
line = line[:-1] + ': SV_POSITION\n'
# manually transform Type(var) to Type var (necessary for DX11/fxc)
if '_MAIN(' in line:
for dtype, var in shader.struct_args:
line = line.replace(dtype+'('+var+')', dtype + ' ' + var)
for dtype, dvar in shader.flat_args:
sem = getMacroName(dtype).upper()
innertype = getMacro(dtype)
ldtype = line.find(dtype)
line = line[:ldtype]+innertype+line[ldtype+len(dtype):]
l0 = line.find(' '+dvar, ldtype) + len(dvar)+1
line = line[:l0]+' : '+sem+line[l0:]
# if this shader is the receiving end of a passthrough_gs, get rid of the PrimitiveID input
if passthrough_gs:
for dtype, dvar in shader.flat_args:
if 'SV_PRIMITIVEID' in dtype.upper():
upper_line = line.upper()
l0 = upper_line.find('SV_PRIMITIVEID')
l1 = upper_line.rfind(',', 0, l0)
line = line.replace(line[l1: l0+len('SV_PRIMITIVEID')], '')
if pssl:
for dtype, darg in shader.flat_args:
if 'SV_INSTANCEID' in dtype.upper():
shader_src += pssl.set_indirect_draw()
if '_MAIN(' in line and (pssl or xbox) and rootSignature:
l0 = rootSignature.find('SrtSignature')
l1 = rootSignature.find('{', l0)
srt_name = rootSignature[l0: l1].split()[-1]
res_sig = 'RootSignature' if xbox else 'SrtSignature'
shader_src += ['[', res_sig, '(', srt_name, ')]\n', line]
continue
# if 'INIT_MAIN' in line:
# if pssl:
# shader_src += ['\tinit_global_references();\n']
if 'INIT_MAIN' in line and shader.returnType:
# mName = getMacroName(shader.returnType)
# mArg = getMacro(shader.returnType)
# line = line.replace('INIT_MAIN', '{} {}'.format(mName, mArg))
line = get_whitespace(line)+'//'+line.strip()+'\n'
# if this shader is the receiving end of a passthrough_gs, copy the PrimitiveID from GS output
if passthrough_gs:
for dtype, dvar in shader.flat_args:
if 'SV_PRIMITIVEID' in dtype.upper():
shader_src += ['uint ', dvar, ' = ', shader.struct_args[0][1], '.PrimitiveID;\n']
if 'BeginNonUniformResourceIndex(' in line:
index, max_index = getMacro(line), None
assert index != [], 'No index provided for {}'.format(line)
if type(index) == list:
max_index = index[1]
index = index[0]
nonuniformresourceindex = index
if pssl:
shader_src += pssl.begin_nonuniformresourceindex(nonuniformresourceindex, max_index)
continue
else:
line = '#define {0} NonUniformResourceIndex({0})\n'.format(nonuniformresourceindex)
if 'EndNonUniformResourceIndex()' in line:
assert nonuniformresourceindex, 'EndNonUniformResourceIndex: BeginNonUniformResourceIndex not called/found'
if pssl:
shader_src += pssl.end_nonuniformresourceindex(nonuniformresourceindex)
continue
else:
line = '#undef {}\n'.format(nonuniformresourceindex)
nonuniformresourceindex = None
elif re.match('\s*RETURN', line):
if shader.returnType:
line = line.replace('RETURN', 'return ')
else:
line = line.replace('RETURN()', 'return')
# tesselation
if shader.pcf and shader.pcf in line and not pcf_returnType:
loc = line.find(shader.pcf)
pcf_returnType = line[:loc].strip()
# line = getMacroName(pcf_returnType) + ' ' + line[loc:]
for dtype, dvar in shader.pcf_arguments:
if not 'INPUT_PATCH' in dtype and not 'OUTPUT_PATCH' in dtype:
line = line.replace(dtype, getMacro(dtype))
line = line.replace(dvar, dvar+': '+getMacroName(dtype))
if pcf_returnType and re.match('\s*PCF_INIT', line):
# line = line.replace('PCF_INIT', getMacroName(pcf_returnType) + ' ' + getMacro(pcf_returnType))
line = line.replace('PCF_INIT', '')
if pcf_returnType and 'PCF_RETURN' in line:
line = line.replace('PCF_RETURN', 'return ')
# line = line.replace('PCF_RETURN', '{ return ' + getMacro(pcf_returnType) + ';}')
if 'INDIRECT_DRAW(' in line:
if pssl:
shader_src += pssl.set_indirect_draw()
line = '//' + line
if 'SET_OUTPUT_FORMAT(' in line:
if pssl:
shader_src += pssl.set_output_format(getMacro(line))
line = '//' + line
if 'PS_ZORDER_EARLYZ(' in line:
if xbox:
shader_src += xbox.set_ps_zorder_earlyz()
line = '//' + line
shader_src += [line]
if pssl:
if explicit_res_decl:
last_res_decl = explicit_res_decl
if last_res_decl > 0: # skip srt altogether if no declared resourced or not requested
srt = pssl.gen_srt(srt_resources, srt_free_resources, srt_references)
open(dst + '.srt.h', 'w').write(srt)
shader_src.insert(last_res_decl, '\n#include \"' + os.path.basename(dst) + '.srt.h\"\n')
# insert root signature at the end (not sure whether that will work for xbox)
if rootSignature and pssl:
shader_src += [_line+'\n' for _line in rootSignature.splitlines()]# + shader.lines
if rootSignature and xbox:
shader_src += rootSignature + ['\n']# + shader.lines
open(dst, 'w').writelines(shader_src)
return 0
| 39.703704
| 130
| 0.564581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,748
| 0.197187
|
c52ada24bea0c59c6a12c8a2a1dea577b379a815
| 1,673
|
py
|
Python
|
test/relationships/test_minhash.py
|
bateman-research/search-sifter
|
78b05beac5ca21862d2773609dc4b9395a4982a5
|
[
"MIT"
] | 1
|
2020-07-20T13:20:00.000Z
|
2020-07-20T13:20:00.000Z
|
test/relationships/test_minhash.py
|
bateman-research/search-sifter
|
78b05beac5ca21862d2773609dc4b9395a4982a5
|
[
"MIT"
] | null | null | null |
test/relationships/test_minhash.py
|
bateman-research/search-sifter
|
78b05beac5ca21862d2773609dc4b9395a4982a5
|
[
"MIT"
] | null | null | null |
import pytest
import searchsifter.relationships.minhash as mh
import searchsifter.relationships.jaccard as jc
@pytest.mark.parametrize("a, b, result", [
({1, 2}, {2}, 0.5),
({1, 2}, {2, 3}, 1/3),
({1}, {2}, 0),
({1}, {1}, 1)
])
def test_jaccard(a, b, result):
assert jc.jaccard(a, b) == result
assert jc.jaccard(b, a) == result
@pytest.mark.parametrize("a, b, result", [
({1, 2}, {2}, 0.5),
({1}, {1, 2}, 1),
({1, 2}, {2, 3}, 0.5),
({1}, {2}, 0),
({1}, {1}, 1),
])
def test_jaccard_containment(a, b, result):
assert jc.jaccard_containment(a, b) == result
@pytest.mark.parametrize("a, b, result", [
({1, 2}, {2}, 0.5),
({1, 2}, {2, 3}, 1/3),
({1}, {2}, 0),
({1}, {1}, 1)
])
def test_minhash(a, b, result):
s, t = mh.signature(a, 5), mh.signature(b, 5)
assert mh.minhash(s, t, 5) == result
assert mh.minhash(t, s, 5) == result
@pytest.mark.parametrize("a, b, result", [
({1, 2}, {2}, 0.5),
({1}, {1, 2}, 1),
({1, 2}, {2, 3}, 0.5),
({1}, {2}, 0),
({1}, {1}, 1),
])
def test_minhash_containment(a, b, result):
s, t = mh.signature(a, 5), mh.signature(b, 5)
assert mh.minhash_containment(s, t) == result
@pytest.fixture
def a():
return set(range(1, 100))
@pytest.fixture
def b():
return set(range(50, 100))
@pytest.fixture
def c():
return set(range(75, 100))
def test_intersection(a, b, c):
assert mh.intersection_signature(a, b) == set(range(50, 100))
assert mh.intersection_signature(a, b, c) == set(range(75, 100))
def test_union(a, b):
assert mh.union_signature(a, b, 100) == a
assert len(mh.union_signature(a, b, 20)) == 20
| 22.306667
| 68
| 0.550508
| 0
| 0
| 0
| 0
| 1,252
| 0.748356
| 0
| 0
| 56
| 0.033473
|
c52c02d266fb08aaf3f326c61fa1e270518102e0
| 121
|
py
|
Python
|
cartographer/utils/collections.py
|
Patreon/cartographer
|
fe5c03decf01c9f7894bb9cf1f839af435143527
|
[
"Apache-2.0"
] | 29
|
2016-03-30T00:53:42.000Z
|
2022-03-02T23:45:12.000Z
|
cartographer/utils/collections.py
|
Patreon/cartographer
|
fe5c03decf01c9f7894bb9cf1f839af435143527
|
[
"Apache-2.0"
] | 20
|
2016-04-19T18:34:05.000Z
|
2022-02-14T14:18:33.000Z
|
cartographer/utils/collections.py
|
Patreon/cartographer
|
fe5c03decf01c9f7894bb9cf1f839af435143527
|
[
"Apache-2.0"
] | 5
|
2016-04-28T00:44:24.000Z
|
2019-10-26T08:09:17.000Z
|
def filter_dict(dictionary_to_filter):
return dict((k, v) for k, v in dictionary_to_filter.items() if v is not None)
| 40.333333
| 81
| 0.743802
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c52efabf8d8724ff1df4180be0a678f90bbcc559
| 1,672
|
py
|
Python
|
tests/integration/test_between_tags.py
|
liorbass/pydriller
|
26e6b594102e1f0a3e1029c5389fedec3cc55471
|
[
"Apache-2.0"
] | 583
|
2018-04-09T09:48:47.000Z
|
2022-03-23T17:27:10.000Z
|
tests/integration/test_between_tags.py
|
liorbass/pydriller
|
26e6b594102e1f0a3e1029c5389fedec3cc55471
|
[
"Apache-2.0"
] | 195
|
2018-05-25T08:10:58.000Z
|
2022-03-29T09:28:37.000Z
|
tests/integration/test_between_tags.py
|
liorbass/pydriller
|
26e6b594102e1f0a3e1029c5389fedec3cc55471
|
[
"Apache-2.0"
] | 134
|
2018-04-10T12:57:34.000Z
|
2022-03-29T13:40:35.000Z
|
# Copyright 2018 Davide Spadini
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pydriller.repository import Repository
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
def test_between_revisions():
from_tag = 'tag1'
to_tag = 'tag3'
lc = list(Repository('test-repos/tags',
from_tag=from_tag,
to_tag=to_tag).traverse_commits())
assert len(lc) == 5
assert '6bb9e2c6a8080e6b5b34e6e316c894b2ddbf7fcd' == lc[0].hash
assert 'f1a90b8d7b151ceefd3e3dfc0dc1d0e12b5f48d0' == lc[1].hash
assert '4638730126d40716e230c2040751a13153fb1556' == lc[2].hash
assert 'a26f1438bd85d6b22497c0e5dae003812becd0bc' == lc[3].hash
assert '627e1ad917a188a861c9fedf6e5858b79edbe439' == lc[4].hash
def test_multiple_repos_with_tags():
from_tag = 'tag2'
to_tag = 'tag3'
repos = [
'test-repos/tags',
'test-repos/tags',
'test-repos/tags'
]
lc = list(Repository(path_to_repo=repos,
from_tag=from_tag,
to_tag=to_tag).traverse_commits())
assert len(lc) == 9
| 34.122449
| 91
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 909
| 0.54366
|
c52f836bfe409a72332984d1519b1c551dfb66b2
| 847
|
py
|
Python
|
tests/modules/command/button/test_wa_url_parameter.py
|
d3no/mocean-sdk-python
|
cbc215a0eb8aa26c04afb940eab6482f23150c75
|
[
"MIT"
] | null | null | null |
tests/modules/command/button/test_wa_url_parameter.py
|
d3no/mocean-sdk-python
|
cbc215a0eb8aa26c04afb940eab6482f23150c75
|
[
"MIT"
] | null | null | null |
tests/modules/command/button/test_wa_url_parameter.py
|
d3no/mocean-sdk-python
|
cbc215a0eb8aa26c04afb940eab6482f23150c75
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from moceansdk.modules.command.button.wa_url_parameter_button import (
WaUrlParameterButton,
)
class TestWaUrlParameter(TestCase):
def test_type(self):
self.assertEqual(WaUrlParameterButton().type(), "url")
def test_setter_return(self):
obj = WaUrlParameterButton()
self.assertIsInstance(obj.set_url_parameter("url"), WaUrlParameterButton)
def test_is_required_key_set(self):
required_key = ["url_parameter"]
obj = WaUrlParameterButton()
self.assertEqual(obj.required_key(), required_key)
def test_request_data(self):
params = {
"url_parameter": "url",
"type": "url",
}
obj = WaUrlParameterButton()
obj.set_url_parameter("url")
self.assertEqual(obj.get_request_data(), params)
| 27.322581
| 81
| 0.670602
| 714
| 0.842975
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 0.072019
|
c52fa39e205177e471e16b57a23781f02f1d2a0d
| 7,345
|
py
|
Python
|
2019/day21_input.py
|
coingraham/adventofcode
|
52b5b3f049242881285d0c2704f44cc1ee2a821e
|
[
"MIT"
] | 5
|
2020-12-04T04:30:17.000Z
|
2021-11-12T11:26:22.000Z
|
2019/day21_input.py
|
coingraham/adventofcode
|
52b5b3f049242881285d0c2704f44cc1ee2a821e
|
[
"MIT"
] | null | null | null |
2019/day21_input.py
|
coingraham/adventofcode
|
52b5b3f049242881285d0c2704f44cc1ee2a821e
|
[
"MIT"
] | null | null | null |
input_data = """109,2050,21101,0,966,1,21101,13,0,0,1106,0,1378,21101,20,0,0,1105,1,1337,21101,0,27,0,1105,1,1279,1208,1,65,748,1005,748,73,1208,1,79,748,1005,748,110,1208,1,78,748,1005,748,132,1208,1,87,748,1005,748,169,1208,1,82,748,1005,748,239,21101,0,1041,1,21102,1,73,0,1105,1,1421,21101,0,78,1,21101,1041,0,2,21102,88,1,0,1106,0,1301,21101,0,68,1,21102,1041,1,2,21102,1,103,0,1106,0,1301,1101,0,1,750,1105,1,298,21102,1,82,1,21102,1,1041,2,21102,1,125,0,1106,0,1301,1102,1,2,750,1106,0,298,21101,0,79,1,21102,1041,1,2,21102,147,1,0,1105,1,1301,21102,84,1,1,21102,1,1041,2,21101,162,0,0,1106,0,1301,1101,0,3,750,1105,1,298,21102,1,65,1,21101,1041,0,2,21101,184,0,0,1106,0,1301,21102,76,1,1,21102,1041,1,2,21101,199,0,0,1106,0,1301,21101,75,0,1,21102,1,1041,2,21101,0,214,0,1105,1,1301,21102,221,1,0,1106,0,1337,21101,10,0,1,21101,0,1041,2,21101,236,0,0,1106,0,1301,1106,0,553,21102,1,85,1,21101,1041,0,2,21101,254,0,0,1106,0,1301,21102,1,78,1,21101,0,1041,2,21102,269,1,0,1106,0,1301,21102,276,1,0,1105,1,1337,21102,1,10,1,21101,1041,0,2,21102,291,1,0,1106,0,1301,1102,1,1,755,1105,1,553,21102,32,1,1,21102,1041,1,2,21101,313,0,0,1105,1,1301,21102,320,1,0,1105,1,1337,21102,1,327,0,1106,0,1279,1202,1,1,749,21102,1,65,2,21102,1,73,3,21101,0,346,0,1105,1,1889,1206,1,367,1007,749,69,748,1005,748,360,1102,1,1,756,1001,749,-64,751,1106,0,406,1008,749,74,748,1006,748,381,1101,-1,0,751,1105,1,406,1008,749,84,748,1006,748,395,1101,0,-2,751,1105,1,406,21102,1100,1,1,21102,1,406,0,1105,1,1421,21102,32,1,1,21101,0,1100,2,21101,421,0,0,1105,1,1301,21101,0,428,0,1106,0,1337,21101,435,0,0,1105,1,1279,2102,1,1,749,1008,749,74,748,1006,748,453,1102,-1,1,752,1105,1,478,1008,749,84,748,1006,748,467,1101,-2,0,752,1105,1,478,21101,1168,0,1,21101,0,478,0,1105,1,1421,21102,485,1,0,1105,1,1337,21101,0,10,1,21101,0,1168,2,21102,500,1,0,1105,1,1301,1007,920,15,748,1005,748,518,21102,1,1209,1,21101,0,518,0,1105,1,1421,1002,920,3,529,1001,529,921,529,101,0,750,0,1001,529,1,537,1002,751,1,0,1001,537,1,545,1001,752,0,0,1001,920,1,920,1105,1,13,1005,755,577,1006,756,570,21102,1,1100,1,21102,1,570,0,1106,0,1421,21101,987,0,1,1105,1,581,21101,1001,0,1,21101,0,588,0,1105,1,1378,1101,758,0,594,101,0,0,753,1006,753,654,21001,753,0,1,21102,610,1,0,1105,1,667,21102,0,1,1,21101,621,0,0,1106,0,1463,1205,1,647,21101,0,1015,1,21102,1,635,0,1106,0,1378,21102,1,1,1,21101,646,0,0,1106,0,1463,99,1001,594,1,594,1105,1,592,1006,755,664,1101,0,0,755,1106,0,647,4,754,99,109,2,1102,726,1,757,22102,1,-1,1,21102,9,1,2,21102,1,697,3,21101,692,0,0,1106,0,1913,109,-2,2105,1,0,109,2,101,0,757,706,2101,0,-1,0,1001,757,1,757,109,-2,2105,1,0,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,255,63,191,159,95,127,223,0,163,166,217,200,238,34,117,94,155,62,55,60,69,46,103,172,98,186,252,79,107,56,171,214,241,220,175,87,61,70,53,113,232,250,246,245,249,174,86,253,78,108,236,137,244,102,162,84,243,213,126,111,77,212,156,158,222,219,35,239,116,120,190,47,100,221,198,118,205,136,185,187,227,123,119,110,121,43,189,143,188,109,138,177,233,57,226,170,202,248,237,152,196,92,114,167,168,229,234,125,157,169,242,59,182,247,99,216,142,42,183,173,106,39,215,207,201,49,115,54,204,76,71,124,178,181,199,38,179,231,228,85,122,154,50,197,139,218,140,58,153,235,206,251,254,184,203,101,68,93,51,230,141,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,20,73,110,112,117,116,32,105,110,115,116,114,117,99,116,105,111,110,115,58,10,13,10,87,97,108,107,105,110,103,46,46,46,10,10,13,10,82,117,110,110,105,110,103,46,46,46,10,10,25,10,68,105,100,110,39,116,32,109,97,107,101,32,105,116,32,97,99,114,111,115,115,58,10,10,58,73,110,118,97,108,105,100,32,111,112,101,114,97,116,105,111,110,59,32,101,120,112,101,99,116,101,100,32,115,111,109,101,116,104,105,110,103,32,108,105,107,101,32,65,78,68,44,32,79,82,44,32,111,114,32,78,79,84,67,73,110,118,97,108,105,100,32,102,105,114,115,116,32,97,114,103,117,109,101,110,116,59,32,101,120,112,101,99,116,101,100,32,115,111,109,101,116,104,105,110,103,32,108,105,107,101,32,65,44,32,66,44,32,67,44,32,68,44,32,74,44,32,111,114,32,84,40,73,110,118,97,108,105,100,32,115,101,99,111,110,100,32,97,114,103,117,109,101,110,116,59,32,101,120,112,101,99,116,101,100,32,74,32,111,114,32,84,52,79,117,116,32,111,102,32,109,101,109,111,114,121,59,32,97,116,32,109,111,115,116,32,49,53,32,105,110,115,116,114,117,99,116,105,111,110,115,32,99,97,110,32,98,101,32,115,116,111,114,101,100,0,109,1,1005,1262,1270,3,1262,20101,0,1262,0,109,-1,2105,1,0,109,1,21102,1288,1,0,1106,0,1263,21002,1262,1,0,1102,1,0,1262,109,-1,2106,0,0,109,5,21101,0,1310,0,1106,0,1279,22102,1,1,-2,22208,-2,-4,-1,1205,-1,1332,21201,-3,0,1,21102,1332,1,0,1106,0,1421,109,-5,2105,1,0,109,2,21102,1,1346,0,1106,0,1263,21208,1,32,-1,1205,-1,1363,21208,1,9,-1,1205,-1,1363,1106,0,1373,21102,1370,1,0,1106,0,1279,1106,0,1339,109,-2,2106,0,0,109,5,2102,1,-4,1386,20101,0,0,-2,22101,1,-4,-4,21102,1,0,-3,22208,-3,-2,-1,1205,-1,1416,2201,-4,-3,1408,4,0,21201,-3,1,-3,1105,1,1396,109,-5,2105,1,0,109,2,104,10,21201,-1,0,1,21102,1436,1,0,1105,1,1378,104,10,99,109,-2,2106,0,0,109,3,20002,594,753,-1,22202,-1,-2,-1,201,-1,754,754,109,-3,2105,1,0,109,10,21101,5,0,-5,21101,1,0,-4,21102,1,0,-3,1206,-9,1555,21101,3,0,-6,21101,0,5,-7,22208,-7,-5,-8,1206,-8,1507,22208,-6,-4,-8,1206,-8,1507,104,64,1105,1,1529,1205,-6,1527,1201,-7,716,1515,21002,0,-11,-8,21201,-8,46,-8,204,-8,1105,1,1529,104,46,21201,-7,1,-7,21207,-7,22,-8,1205,-8,1488,104,10,21201,-6,-1,-6,21207,-6,0,-8,1206,-8,1484,104,10,21207,-4,1,-8,1206,-8,1569,21102,0,1,-9,1105,1,1689,21208,-5,21,-8,1206,-8,1583,21101,1,0,-9,1106,0,1689,1201,-5,716,1588,21001,0,0,-2,21208,-4,1,-1,22202,-2,-1,-1,1205,-2,1613,21201,-5,0,1,21101,1613,0,0,1105,1,1444,1206,-1,1634,22101,0,-5,1,21102,1,1627,0,1106,0,1694,1206,1,1634,21101,0,2,-3,22107,1,-4,-8,22201,-1,-8,-8,1206,-8,1649,21201,-5,1,-5,1206,-3,1663,21201,-3,-1,-3,21201,-4,1,-4,1106,0,1667,21201,-4,-1,-4,21208,-4,0,-1,1201,-5,716,1676,22002,0,-1,-1,1206,-1,1686,21101,0,1,-4,1106,0,1477,109,-10,2106,0,0,109,11,21102,1,0,-6,21102,1,0,-8,21102,0,1,-7,20208,-6,920,-9,1205,-9,1880,21202,-6,3,-9,1201,-9,921,1725,20101,0,0,-5,1001,1725,1,1733,20102,1,0,-4,22101,0,-4,1,21102,1,1,2,21101,9,0,3,21102,1,1754,0,1106,0,1889,1206,1,1772,2201,-10,-4,1766,1001,1766,716,1766,21002,0,1,-3,1105,1,1790,21208,-4,-1,-9,1206,-9,1786,21201,-8,0,-3,1105,1,1790,21202,-7,1,-3,1001,1733,1,1795,21001,0,0,-2,21208,-2,-1,-9,1206,-9,1812,21201,-8,0,-1,1105,1,1816,22101,0,-7,-1,21208,-5,1,-9,1205,-9,1837,21208,-5,2,-9,1205,-9,1844,21208,-3,0,-1,1106,0,1855,22202,-3,-1,-1,1105,1,1855,22201,-3,-1,-1,22107,0,-1,-1,1106,0,1855,21208,-2,-1,-9,1206,-9,1869,22102,1,-1,-8,1105,1,1873,22102,1,-1,-7,21201,-6,1,-6,1105,1,1708,21202,-8,1,-10,109,-11,2105,1,0,109,7,22207,-6,-5,-3,22207,-4,-6,-2,22201,-3,-2,-1,21208,-1,0,-6,109,-7,2106,0,0,0,109,5,1202,-2,1,1912,21207,-4,0,-1,1206,-1,1930,21101,0,0,-4,21202,-4,1,1,22101,0,-3,2,21102,1,1,3,21102,1949,1,0,1106,0,1954,109,-5,2106,0,0,109,6,21207,-4,1,-1,1206,-1,1977,22207,-5,-3,-1,1206,-1,1977,22102,1,-5,-5,1105,1,2045,21201,-5,0,1,21201,-4,-1,2,21202,-3,2,3,21101,1996,0,0,1105,1,1954,21201,1,0,-5,21102,1,1,-2,22207,-5,-3,-1,1206,-1,2015,21101,0,0,-2,22202,-3,-2,-3,22107,0,-4,-1,1206,-1,2037,21202,-2,1,1,21102,1,2037,0,106,0,1912,21202,-3,-1,-3,22201,-5,-3,-5,109,-6,2105,1,0"""
| 7,345
| 7,345
| 0.687543
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,332
| 0.99823
|
c52fa5fe46da648c33fa7618314fa8e93cc98a14
| 10,860
|
py
|
Python
|
src/python/pants/backend/go/target_types.py
|
Eric-Arellano/pants
|
aaa9756bc4f2cc97bb97851a4295a0de85f374b1
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/go/target_types.py
|
Eric-Arellano/pants
|
aaa9756bc4f2cc97bb97851a4295a0de85f374b1
|
[
"Apache-2.0"
] | 12
|
2022-01-06T23:20:22.000Z
|
2022-03-17T05:06:37.000Z
|
src/python/pants/backend/go/target_types.py
|
Eric-Arellano/pants
|
aaa9756bc4f2cc97bb97851a4295a0de85f374b1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from dataclasses import dataclass
from typing import Sequence
from pants.core.goals.package import OutputPathField
from pants.core.goals.run import RestartableField
from pants.engine.addresses import Address
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.fs import GlobExpansionConjunction, GlobMatchErrorBehavior, PathGlobs
from pants.engine.target import (
COMMON_TARGET_FIELDS,
AsyncFieldMixin,
Dependencies,
InvalidFieldException,
InvalidTargetException,
MultipleSourcesField,
StringField,
StringSequenceField,
Target,
)
from pants.option.global_options import FilesNotFoundBehavior
class GoImportPathField(StringField):
alias = "import_path"
help = (
"Import path in Go code to import this package.\n\n"
"This field should not be overridden; use the value from target generation."
)
required = True
value: str
# -----------------------------------------------------------------------------------------------
# `go_mod` target generator
# -----------------------------------------------------------------------------------------------
class GoModSourcesField(MultipleSourcesField):
alias = "_sources"
default = ("go.mod", "go.sum")
expected_num_files = range(1, 3) # i.e. 1 or 2.
@property
def go_mod_path(self) -> str:
return os.path.join(self.address.spec_path, "go.mod")
@property
def go_sum_path(self) -> str:
return os.path.join(self.address.spec_path, "go.sum")
def validate_resolved_files(self, files: Sequence[str]) -> None:
super().validate_resolved_files(files)
if self.go_mod_path not in files:
raise InvalidFieldException(
f"The {repr(self.alias)} field in target {self.address} must include "
f"{self.go_mod_path}, but only had: {list(files)}\n\n"
f"Make sure that you're declaring the `{GoModTarget.alias}` target in the same "
"directory as your `go.mod` file."
)
invalid_files = set(files) - {self.go_mod_path, self.go_sum_path}
if invalid_files:
raise InvalidFieldException(
f"The {repr(self.alias)} field in target {self.address} must only include "
f"`{self.go_mod_path}` and optionally {self.go_sum_path}, but had: "
f"{sorted(invalid_files)}\n\n"
f"Make sure that you're declaring the `{GoModTarget.alias}` target in the same "
f"directory as your `go.mod` file and that you don't override the `{self.alias}` "
"field."
)
# TODO: This field probably shouldn't be registered.
class GoModDependenciesField(Dependencies):
alias = "_dependencies"
# TODO(#12953): generalize this?
class GoModPackageSourcesField(StringSequenceField, AsyncFieldMixin):
alias = "package_sources"
default = ("**/*.go", "**/*.s")
help = (
"What sources to generate `go_first_party_package` targets for.\n\n"
"Pants will generate one target per matching directory.\n\n"
"Pants does not yet support some file types like `.c` and `.h` files, along with cgo "
"files. If you need to use these files, please open a feature request at "
"https://github.com/pantsbuild/pants/issues/new/choose so that we know to "
"prioritize adding support."
)
def _prefix_glob_with_address(self, glob: str) -> str:
if glob.startswith("!"):
return f"!{os.path.join(self.address.spec_path, glob[1:])}"
return os.path.join(self.address.spec_path, glob)
def path_globs(self, files_not_found_behavior: FilesNotFoundBehavior) -> PathGlobs:
error_behavior = files_not_found_behavior.to_glob_match_error_behavior()
return PathGlobs(
(self._prefix_glob_with_address(glob) for glob in self.value or ()),
conjunction=GlobExpansionConjunction.any_match,
glob_match_error_behavior=error_behavior,
description_of_origin=(
f"{self.address}'s `{self.alias}` field"
if error_behavior != GlobMatchErrorBehavior.ignore
else None
),
)
class GoModTarget(Target):
alias = "go_mod"
core_fields = (
*COMMON_TARGET_FIELDS,
GoModDependenciesField,
GoModSourcesField,
GoModPackageSourcesField,
)
help = (
"A first-party Go module (corresponding to a `go.mod` file).\n\n"
"Generates `go_first_party_package` targets for each directory from the "
"`package_sources` field, and generates `go_third_party_package` targets based on "
"the `require` directives in your `go.mod`.\n\n"
"If you have third-party packages, make sure you have an up-to-date `go.sum`. Run "
"`go mod tidy` directly to update your `go.mod` and `go.sum`."
)
# -----------------------------------------------------------------------------------------------
# `go_first_party_package` target
# -----------------------------------------------------------------------------------------------
class GoFirstPartyPackageSourcesField(MultipleSourcesField):
expected_file_extensions = (".go", ".s")
class GoFirstPartyPackageDependenciesField(Dependencies):
pass
class GoFirstPartyPackageSubpathField(StringField, AsyncFieldMixin):
alias = "subpath"
help = (
"The path from the owning `go.mod` to this package's directory, e.g. `subdir`.\n\n"
"This field should not be overridden; use the value from target generation."
)
required = True
value: str
@property
def full_dir_path(self) -> str:
"""The full path to this package's directory, relative to the build root."""
# NB: The `spec_path` points to the `go_mod` target used to generate the
# `go_first_party_package` target.
assert self.address.is_generated_target
go_mod_path = self.address.spec_path
if not self.value:
return go_mod_path
return os.path.join(go_mod_path, self.value)
class GoFirstPartyPackageTarget(Target):
alias = "go_first_party_package"
core_fields = (
*COMMON_TARGET_FIELDS,
GoImportPathField,
GoFirstPartyPackageSubpathField,
GoFirstPartyPackageDependenciesField,
GoFirstPartyPackageSourcesField,
)
help = (
"A Go package (corresponding to a directory with `.go` files).\n\n"
"You should not explicitly create this target in BUILD files. Instead, add a `go_mod` "
"target where you have your `go.mod` file, which will generate "
"`go_first_party_package` targets for you."
)
def validate(self) -> None:
if not self.address.is_generated_target:
raise InvalidTargetException(
f"The `{self.alias}` target type should not be manually created in BUILD "
f"files, but it was created for {self.address}.\n\n"
"Instead, add a `go_mod` target where you have your `go.mod` file, which will "
f"generate `{self.alias}` targets for you."
)
# -----------------------------------------------------------------------------------------------
# `go_third_party_package` target
# -----------------------------------------------------------------------------------------------
class GoThirdPartyPackageDependenciesField(Dependencies):
pass
class GoThirdPartyModulePathField(StringField):
alias = "module_path"
help = (
"The module path of the third-party module this package comes from, "
"e.g. `github.com/google/go-cmp`.\n\n"
"This field should not be overridden; use the value from target generation."
)
required = True
value: str
class GoThirdPartyModuleVersionField(StringField):
alias = "version"
help = (
"The version of the third-party module this package comes from, e.g. `v0.4.0`.\n\n"
"This field should not be overridden; use the value from target generation."
)
required = True
value: str
class GoThirdPartyPackageTarget(Target):
alias = "go_third_party_package"
core_fields = (
*COMMON_TARGET_FIELDS,
GoThirdPartyPackageDependenciesField,
GoThirdPartyModulePathField,
GoThirdPartyModuleVersionField,
GoImportPathField,
)
help = (
"A package from a third-party Go module.\n\n"
"You should not explicitly create this target in BUILD files. Instead, add a `go_mod` "
"target where you have your `go.mod` file, which will generate "
"`go_third_party_package` targets for you.\n\n"
"Make sure that your `go.mod` and `go.sum` files include this package's module."
)
def validate(self) -> None:
if not self.address.is_generated_target:
raise InvalidTargetException(
f"The `{self.alias}` target type should not be manually created in BUILD "
f"files, but it was created for {self.address}.\n\n"
"Instead, add a `go_mod` target where you have your `go.mod` file, which will "
f"generate `{self.alias}` targets for you based on the `require` directives in "
f"your `go.mod`."
)
# -----------------------------------------------------------------------------------------------
# `go_binary` target
# -----------------------------------------------------------------------------------------------
class GoBinaryMainPackageField(StringField, AsyncFieldMixin):
alias = "main"
help = (
"Address of the `go_first_party_package` with the `main` for this binary.\n\n"
"If not specified, will default to the `go_first_party_package` for the same "
"directory as this target's BUILD file. You should usually rely on this default."
)
value: str
@dataclass(frozen=True)
class GoBinaryMainPackage:
address: Address
@dataclass(frozen=True)
class GoBinaryMainPackageRequest(EngineAwareParameter):
field: GoBinaryMainPackageField
def debug_hint(self) -> str:
return self.field.address.spec
class GoBinaryDependenciesField(Dependencies):
# This is only used to inject a dependency from the `GoBinaryMainPackageField`. Users should
# add any explicit dependencies to the `go_package`.
alias = "_dependencies"
class GoBinaryTarget(Target):
alias = "go_binary"
core_fields = (
*COMMON_TARGET_FIELDS,
OutputPathField,
GoBinaryMainPackageField,
GoBinaryDependenciesField,
RestartableField,
)
help = "A Go binary."
| 36.813559
| 98
| 0.617127
| 8,931
| 0.822376
| 0
| 0
| 927
| 0.085359
| 0
| 0
| 5,169
| 0.475967
|
c52fbd848e1acb3cd166434a5aa79fb5ec3b969e
| 9,623
|
py
|
Python
|
iris/src/iris/main.py
|
headma5ter/wall-e
|
da7624cd58ee3e61b847af6a389cc919e1f2a8d1
|
[
"MIT"
] | null | null | null |
iris/src/iris/main.py
|
headma5ter/wall-e
|
da7624cd58ee3e61b847af6a389cc919e1f2a8d1
|
[
"MIT"
] | null | null | null |
iris/src/iris/main.py
|
headma5ter/wall-e
|
da7624cd58ee3e61b847af6a389cc919e1f2a8d1
|
[
"MIT"
] | null | null | null |
from matplotlib import pyplot as plt
import matplotlib.lines as lines
from statistics import mode, StatisticsError
from csv import QUOTE_ALL
import pandas as pd
import pathlib
import json
from iris import logger
from iris import config
from iris import classifier
from iris.helpers.utils import log_function # TODO: change to ceres
COLUMN_NAMES = ["w", "x", "y", "z"]
@log_function
def read_data(
csv_path: pathlib.Path = None, serial_path: pathlib.Path = None
) -> (pd.DataFrame, dict):
"""
Read in either raw CSV data or pickled data.
:param csv_path: path to CSV data
:param serial_path: path to pickled/JSON data
:return: pd.DataFrame
"""
if serial_path is not None:
ext = serial_path.suffix
if ext == ".pkl":
# Read in centroids serialized file
return pd.read_pickle(serial_path)
elif ext == ".json":
# Read in mapping serialized file
with open(serial_path) as f:
return {int(k): v for k, v in json.load(f).items()}
else:
msg = f"Unknown file extension ({serial_path})"
logger.error(msg)
raise ValueError(msg)
# Read in default CSV file
return pd.read_csv(
csv_path, low_memory=True, header=None, names=COLUMN_NAMES + ["classification"],
)
@log_function
def classify_clusters(
df: pd.DataFrame, initial_centroids: pd.DataFrame = None
) -> (pd.DataFrame, pd.DataFrame):
"""
Send raw data to classifier.py to be run through the
k-means algorithm in order to cluster the data points.
:param df: raw data
:param initial_centroids: centroids from training data (when applicable)
:return: (finalized data, centroids for testing data)
"""
initial_centroids = (
initial_centroids.to_numpy() if initial_centroids is not None else None
)
# Initiate algo class
all_clusters = classifier.LloydsAlgorithm(
number_of_clusters=config.clusters,
data_set=df[df.columns[:-1]].to_numpy(),
initial_centroids=initial_centroids,
)
while (
not all_clusters.is_optimized()
and all_clusters.iteration <= all_clusters.max_iterations
):
# Update centroids with their new center of mass
all_clusters.update_centroids()
# Assign data points to their closest centroid
all_clusters.assign_clusters()
# Increase the increment counter by one
all_clusters.increment_iteration()
merged_df = df.join(
pd.DataFrame(all_clusters.clusters, columns=["cluster"], dtype=int)
)
centroids_df = pd.DataFrame(all_clusters.centroids, columns=COLUMN_NAMES)
return merged_df, centroids_df
@log_function
def serialize_data(data_obj: (pd.DataFrame, dict), file_path: pathlib.Path) -> None:
"""
Pickle training data.
:param data_obj: Dataframe (post-algorithm) or dict (post-mapping)
:param file_path: where to write serialized data
"""
if not file_path.parent.is_dir():
msg = f"The indicated path cannot be found; perhaps a parent folder is missing? ({file_path})"
logger.error(msg)
raise FileNotFoundError(msg)
if isinstance(data_obj, dict):
with open(file_path, "w") as f:
json.dump(data_obj, f)
else:
data_obj.to_pickle(file_path)
@log_function
def map_cluster_to_species(df: pd.DataFrame) -> dict:
"""
Finds the most common species linked to each cluster
value (for plotting, as well as sanity-checking).
:param df: data (after running algorithm)
:return: dict
"""
cluster_map = {
row["cluster"]: list(df[df["cluster"] == row["cluster"]]["classification"])
for _, row in df.iterrows()
}
try:
cluster_map = {int(k): mode(v) for k, v in cluster_map.items()}
except StatisticsError as e:
msg = f"Error finding unique mappings for clusters ({e})"
logger.error(msg)
raise ValueError(msg)
if set(cluster_map.values()) != set(df["classification"].unique()):
logger.warn("Not all classifications are mapped")
cluster_map.update(
{
cluster: "UNMAPPED"
for cluster in range(config.clusters)
if cluster not in cluster_map
}
)
return cluster_map
@log_function
def write_to_csv(df: pd.DataFrame, file_path: pathlib.Path) -> None:
"""
Write final testing dataframe to file.
:param df: testing df (post-algorithm)
:param file_path: file path
"""
if not file_path.parent.is_dir():
msg = f"The indicated path cannot be found; perhaps a parent folder is missing? ({file_path})"
logger.error(msg)
raise FileNotFoundError(msg)
df.to_csv(file_path, index=False, quoting=QUOTE_ALL)
@log_function
def plot_clusters(df: pd.DataFrame, cluster_map: dict) -> None:
"""
Create a plot containing (NxM - k) plots, showing
the relationship between each parameter and the
clusters contained in each sub-data set.
:param df: the data after being classified
:param cluster_map: mapping to convert cluster to Iris species
"""
variants = [
col
for col in df.columns.tolist()
if col not in ("cluster", "classification", "color", "model_classification")
]
fig, ax = plt.subplots(
nrows=len(variants), ncols=len(variants), figsize=[12.0, 12.0], squeeze=True
)
color_map = {
k: plt.get_cmap("Dark2")((k + 1) / config.clusters)
for k in range(config.clusters)
}
df["color"] = df["cluster"].apply(lambda x: color_map[x])
for row_idx, _ in enumerate(ax):
for col_idx, _ in enumerate(_):
x_var = variants[col_idx]
y_var = variants[row_idx]
curr_plot = ax[row_idx][col_idx]
if row_idx == col_idx:
curr_plot.text(
0.5,
0.5,
f"{x_var.upper()}",
ha="center",
va="center",
fontsize="xx-large",
label="",
)
curr_plot.get_xaxis().set_visible(False)
curr_plot.get_yaxis().set_visible(False)
else:
curr_plot.scatter(df[x_var], df[y_var], c=df["color"])
fig.suptitle(f"Iris Classification ({config.stage} data)", fontsize="xx-large")
fig.tight_layout()
fig.subplots_adjust(top=0.93)
handles = list()
labels = list()
for classification, color in {
cluster_map[cluster]: color for cluster, color in color_map.items()
}.items():
handles.append(
lines.Line2D(list(), list(), marker="o", color=color, linestyle="none")
)
labels.append(classification)
plt.legend(handles=handles, labels=labels)
if config.save:
plt.savefig(config.plot_path)
plt.show()
@log_function
def map_species_onto_data(df: pd.DataFrame, cluster_map: dict) -> pd.DataFrame:
"""
Add species names to the final dataframe.
:param df: dataframe (after running through algorithm)
:param cluster_map: mapping data to go from cluster to species
:return: pd.DataFrame
"""
df["model_classification"] = df["cluster"].map(cluster_map)
return df
@log_function
def calculate_statistics(df: pd.DataFrame, cluster_map: dict) -> None:
"""
Calculates accuracy of model.
:param df: dataframe (after running through algorithm)
:param cluster_map: mapping data to go from cluster to species
"""
num_correct = len(df[df["classification"] == df["cluster"].map(cluster_map)])
total_num = len(df)
logger.info(f"Accuracy: {'{:.1%}'.format(num_correct / total_num)} (N={total_num})")
if __name__ == "__main__":
# Get relevant paths
data_path = getattr(config, f"{config.stage}_data_path")
centroid_path = config.centroid_serial_path
mapping_path = config.mapping_serial_path
centroids = None
mapping = dict()
if config.stage == "testing":
if not centroid_path.is_file() or not mapping_path.is_file():
logger.warn(
"No training data to be read -- could result in poor model performance"
)
else:
# Get centroids and species mapping from training data
centroids = read_data(serial_path=centroid_path)
mapping = read_data(serial_path=mapping_path)
# Classify data set
data = read_data(csv_path=data_path)
data, centroids = classify_clusters(data, initial_centroids=centroids)
if config.stage == "training":
# Map species to cluster
mapping = map_cluster_to_species(data)
if config.serialize:
# Save data
serialize_data(centroids, config.centroid_serial_path)
serialize_data(mapping, config.mapping_serial_path)
# Add the model's species classification to data
data = map_species_onto_data(data, mapping)
if config.save:
# Save testing results to files
write_to_csv(data, config.results_path)
if config.visualize:
# Plot data
plot_clusters(data, mapping)
calculate_statistics(data, mapping)
logger.info(f"Process complete\n\t{config.summary}")
| 32.731293
| 103
| 0.618622
| 0
| 0
| 0
| 0
| 7,625
| 0.792372
| 0
| 0
| 3,048
| 0.316741
|
c530c5e8d4407688c79bec94a667aec813211585
| 2,328
|
py
|
Python
|
data_loader/util.py
|
lixiaoyu0575/physionet_challenge2020_pytorch
|
39b5aeeead440eaa88d6fdaf4a8a70c15373e062
|
[
"MIT"
] | 1
|
2021-05-24T08:09:30.000Z
|
2021-05-24T08:09:30.000Z
|
data_loader/util.py
|
lixiaoyu0575/physionet_challenge2020_pytorch
|
39b5aeeead440eaa88d6fdaf4a8a70c15373e062
|
[
"MIT"
] | null | null | null |
data_loader/util.py
|
lixiaoyu0575/physionet_challenge2020_pytorch
|
39b5aeeead440eaa88d6fdaf4a8a70c15373e062
|
[
"MIT"
] | null | null | null |
from scipy.io import loadmat
import numpy as np
import os
import torch
from torch.utils.data import Dataset, TensorDataset
from torchvision import transforms
# Find unique classes.
def get_classes(input_directory, filenames):
classes = set()
for filename in filenames:
with open(filename, 'r') as f:
for l in f:
if l.startswith('#Dx'):
tmp = l.split(': ')[1].split(',')
for c in tmp:
classes.add(c.strip())
return sorted(classes)
# Load challenge data.
def load_challenge_data(label_file, data_dir):
file = os.path.basename(label_file)
name, ext = os.path.splitext(file)
with open(label_file, 'r') as f:
header = f.readlines()
mat_file = file.replace('.hea', '.mat')
x = loadmat(os.path.join(data_dir, mat_file))
recording = np.asarray(x['val'], dtype=np.float64)
return recording, header, name
# Customed TensorDataset
class CustomTensorDataset(Dataset):
"""TensorDataset with support of transforms.
"""
def __init__(self, *tensors, transform=None, p=0.5):
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
self.tensors = tensors
self.transform = transform
self.p = p
def __getitem__(self, index):
x = self.tensors[0][index]
torch.randn(1)
if self.transform:
if torch.rand(1) >= self.p:
x = self.transform(x)
y = self.tensors[1][index]
return x, y
def __len__(self):
return self.tensors[0].size(0)
class CustomTensorListDataset(Dataset):
"""TensorDataset with support of transforms.
"""
def __init__(self, *tensors_list, transform=None):
self.tensors_list = tensors_list
self.transform = transform
def __getitem__(self, index):
x = self.tensors_list[0][index]
if self.transform:
torch.randn(1)
if torch.rand(1) >= 0.5:
x = self.transform(x)
y = self.tensors_list[1][index]
return x, y
def __len__(self):
return len(self.tensors_list[0])
def custom_collate_fn(batch):
data = [item[0].unsqueeze(0) for item in batch]
target = [item[1].unsqueeze(0) for item in batch]
return [data, target]
| 28.390244
| 78
| 0.608247
| 1,191
| 0.511598
| 0
| 0
| 0
| 0
| 0
| 0
| 207
| 0.088918
|
c53329de3ea7d9bd985ca5fb1b5d8d143c4eb7ac
| 2,191
|
py
|
Python
|
pyrez/exceptions.py
|
EthanHicks1/Pyrez
|
022d62ae893594c2ddcd7fac5e740c693fd4fd54
|
[
"MIT"
] | null | null | null |
pyrez/exceptions.py
|
EthanHicks1/Pyrez
|
022d62ae893594c2ddcd7fac5e740c693fd4fd54
|
[
"MIT"
] | null | null | null |
pyrez/exceptions.py
|
EthanHicks1/Pyrez
|
022d62ae893594c2ddcd7fac5e740c693fd4fd54
|
[
"MIT"
] | null | null | null |
class CustomException(Exception):
def __init__(self, *args, **kwargs):
return super().__init__(self, *args, **kwargs)
def __str__(self):
return str(self.args [1])
class DeprecatedException(CustomException):
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
class DailyLimitException(CustomException):
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
class InvalidArgumentException(CustomException):
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
class IdOrAuthEmptyException(CustomException):
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
class NotFoundException(CustomException):
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
class NotSupported(CustomException):
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
class SessionLimitException(CustomException):
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
class WrongCredentials(CustomException):
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
class PaladinsOnlyException(CustomException):
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
class SmiteOnlyException(CustomException):
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
class RealmRoyaleOnlyException(CustomException):
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
class PlayerNotFoundException(CustomException):
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
class GetMatchPlayerDetailsException(CustomException):
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
class UnexpectedException(CustomException):
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
class RequestErrorException(CustomException):
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs)
| 42.960784
| 54
| 0.700593
| 2,175
| 0.992697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c538cf5b43e938d74b89e921d97d1ef0493292ec
| 317
|
py
|
Python
|
solutions/binarysearch.io/hard/collecting-coins/main.py
|
zwliew/ctci
|
871f4fc957be96c6d0749d205549b7b35dc53d9e
|
[
"MIT"
] | 4
|
2020-11-07T14:38:02.000Z
|
2022-01-03T19:02:36.000Z
|
solutions/binarysearch.io/hard/collecting-coins/main.py
|
zwliew/ctci
|
871f4fc957be96c6d0749d205549b7b35dc53d9e
|
[
"MIT"
] | 1
|
2019-04-17T06:55:14.000Z
|
2019-04-17T06:55:14.000Z
|
solutions/binarysearch.io/hard/collecting-coins/main.py
|
zwliew/ctci
|
871f4fc957be96c6d0749d205549b7b35dc53d9e
|
[
"MIT"
] | null | null | null |
class Solution:
2 def solve(self, matrix):
3 from functools import lru_cache
4 @lru_cache(None)
5 def dp(i, j):
6 if i < 0 or j < 0:
7 return 0
8 return max(dp(i - 1, j), dp(i, j - 1)) + matrix[i][j]
9 return dp(len(matrix) - 1, len(matrix[0]) - 1)
| 31.7
| 66
| 0.498423
| 15
| 0.047319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c53adeb9103721e86c1c98d5836be2d9b0c044bf
| 12,074
|
py
|
Python
|
Python 3/First_steps_on_machine_learning/Maze_using_Bellman_equation/Test_Maze.py
|
DarkShadow4/python
|
4cd94e0cf53ee06c9c31e9272572ca9656697c30
|
[
"MIT"
] | null | null | null |
Python 3/First_steps_on_machine_learning/Maze_using_Bellman_equation/Test_Maze.py
|
DarkShadow4/python
|
4cd94e0cf53ee06c9c31e9272572ca9656697c30
|
[
"MIT"
] | null | null | null |
Python 3/First_steps_on_machine_learning/Maze_using_Bellman_equation/Test_Maze.py
|
DarkShadow4/python
|
4cd94e0cf53ee06c9c31e9272572ca9656697c30
|
[
"MIT"
] | 1
|
2020-08-19T17:25:22.000Z
|
2020-08-19T17:25:22.000Z
|
import pygame, sys, maze_builder, random
class Maze(object):
def __init__(self, width, height, grid_length, penalizacion = 0.9): # width and height of the window and the grid size (x, y) so there would be a maximum number of nodes which would be x*y
super(Maze, self).__init__()
pygame.init()
self.width = width
self.height = height
self.screen = pygame.display.set_mode((width, height))
pygame.display.set_caption("TEST MAZE")
self.background = (0, 0, 0)
self.nodes = {} # (x, y):maze_builder.Node
self.nodeRadius = 4
self.grid_length = grid_length
self.node_width = width/grid_length[0]
self.node_height = height/grid_length[1]
self.penalizacion = penalizacion
def display(self):
""" Draw the objects on the screen. """
self.screen.fill(self.background)
for position, node in self.nodes.items():
# i get the square for the node
pygame.draw.rect(self.screen, node.floor_color, (int(node.position[0])*self.node_width, int(node.position[1])*self.node_height, self.node_width, self.node_height), 0) # this is the floor
# now lets draw the walls
pygame.draw.rect(self.screen, node.wall_color, (int(node.position[0])*self.node_width, int(node.position[1])*self.node_height, self.node_width*0.1, self.node_height*0.1), 0) # top left corner
pygame.draw.rect(self.screen, node.wall_color, (int(node.position[0])*self.node_width+self.node_width*0.9, int(node.position[1])*self.node_height, self.node_width*0.1, self.node_height*0.1), 0) # top right corner
pygame.draw.rect(self.screen, node.wall_color, (int(node.position[0])*self.node_width+self.node_width*0.9, int(node.position[1])*self.node_height+self.node_height*0.9, self.node_width*0.1, self.node_height*0.1), 0) # bottom right corner
pygame.draw.rect(self.screen, node.wall_color, (int(node.position[0])*self.node_width, int(node.position[1])*self.node_height+self.node_height*0.9, self.node_width*0.1, self.node_height*0.1), 0) # bottom left corner
# taking wall width equal to 10% of the node size
if not node.up: # top wall
pygame.draw.rect(self.screen, node.wall_color, (int(node.position[0])*self.node_width+self.node_width*0.09, int(node.position[1])*self.node_height, self.node_width*0.82, self.node_height*0.1), 0)
if not node.right: # right wall
pygame.draw.rect(self.screen, node.wall_color, (int(node.position[0])*self.node_width+self.node_width*0.9, int(node.position[1])*self.node_height+self.node_height*0.09, self.node_width*0.1, self.node_height*0.82), 0)
if not node.down: # bottom wall
pygame.draw.rect(self.screen, node.wall_color, (int(node.position[0])*self.node_width+self.node_width*0.09, int(node.position[1])*self.node_height+self.node_height*0.9, self.node_width*0.82, self.node_height*0.1), 0)
if not node.left: # left wall
pygame.draw.rect(self.screen, node.wall_color, (int(node.position[0])*self.node_width, int(node.position[1])*self.node_height+self.node_height*0.09, self.node_width*0.1, self.node_height*0.82), 0)
pygame.draw.rect(self.screen, self.runner.color, (self.runner.position[0]*self.node_width+(self.node_width/2-self.runner.width/2), self.runner.position[1]*self.node_height+(self.node_height/2-self.runner.height/2), self.runner.width, self.runner.height), 0)
def move_runner(self, direction):
if getattr(self.nodes[self.runner.position], direction):
if direction == "up":
self.runner.position = (self.runner.position[0], self.runner.position[1]-1)
if direction == "down":
self.runner.position = (self.runner.position[0], self.runner.position[1]+1)
if direction == "right":
self.runner.position = (self.runner.position[0]+1, self.runner.position[1])
if direction == "left":
self.runner.position = (self.runner.position[0]-1, self.runner.position[1])
print("{0}".format(self.nodes[self.runner.position].value + self.nodes[self.runner.position].reward))
def run(self):
"""Create a pygame screen until it is closed."""
key_to_function = {
pygame.K_LEFT: (lambda x: x.move_runner("left")),
pygame.K_RIGHT: (lambda x: x.move_runner("right")),
pygame.K_UP: (lambda x: x.move_runner("up")),
pygame.K_DOWN: (lambda x: x.move_runner("down"))
}
done = False
while not done:
keys = pygame.key.get_pressed()
for key in key_to_function.keys():
if keys[key]:
key_to_function[key](self)
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
done = True
pygame.time.delay(100)
self.display()
pygame.display.flip()
pygame.quit()
return done
def add_node(self, node):
type = node.special
if type == "start":
self.start = node.position
if type == "end":
self.end = node.position
self.nodes[node.position] = node
def add_runner(self, runner):
if runner.width == "relative":
runner.width = self.node_width*0.4
if runner.height == "relative":
runner.height = self.node_height*0.4
if runner.position == None:
runner.position = self.start
self.runner = runner
# def action(self, state, action):
# if action == "up":
# return(self.nodes[(state.position[0], state.position[1]-1)].reward)
# if action == "down":
# return(self.nodes[(state.position[0], state.position[1]+1)].reward)
# if action == "left":
# return(self.nodes[(state.position[0]-1, state.position[1])].reward)
# if action == "right":
# return(self.nodes[(state.position[0]+1, state.position[1])].reward)
# def get_value(self, state, coming_from=(0, 0)):
# movements = {
# "u":(0, -1),
# "d":(0, 1),
# "l":(-1, 0),
# "r":(1, 0)
# }
# if state == self.end:
# return self.nodes[state].reward + self.penalizacion*self.nodes[state].value
# values = []
# for action in self.nodes[state].actions:
# if movements[action] != coming_from:
# print("being at {0} and going {1} gives {2} reward and makes this node have a value of:".format(state, action, self.nodes[move(state, movements[action])].reward))
# if self.nodes[move(state, movements[action])].value != 0:
# next_value = self.nodes[move(state, movements[action])].value
# else:
# next_value = self.get_value(self.nodes[move(state, movements[action])].position, (-movements[action][0], -movements[action][1]))
#
# value = self.nodes[move(state, movements[action])].reward + self.penalizacion*next_value
# print(value)
# values.append(value)
#
# self.nodes[state].value = max(values)
# return self.nodes[state].value
# def work_out_values(self, state):
# movements = {
# "u":(0, -1),
# "d":(0, 1),
# "l":(-1, 0),
# "r":(1, 0)
# }
#
# for action in self.nodes[state].actions:
# values = []
# unknown = ""
# if self.nodes[state].value != 0:
# values.append( self.nodes[state].value )
# else:
# unknown += action
# if values.length() != 0:
# next_value = max(values)
# else:
# # next_value = self.get_value()
# unknown[0]
# print("being at {0} and going {1} gives {2} reward and makes this node have a value of:".format(state, action, self.nodes[move(state, movements[action])].reward))
# if self.nodes[move(state, movements[action])].value < self.nodes[state].reward + self.penalizacion*next_value:
# self.nodes[move(state, movements[action])].value = self.nodes[state].reward + self.penalizacion*self.nodes[state].value
# if self.nodes[move(state, movements[action])].position != self.start:
# self.get_value(self.nodes[move(state, movements[action])].position)
#
def solve_random(self):
done = False
movements = {
1: "left", # left
2: "right", # right
3: "up", # up
4: "down" # down
}
number_to_function = {
1: (lambda x: x.move_runner("left")), # left
2: (lambda x: x.move_runner("right")), # right
3: (lambda x: x.move_runner("up")), # up
4: (lambda x: x.move_runner("down")) # down
}
while not done:
pygame.time.delay(1000) # for debug reasons it will do one step
# per second
movement = random.randint(1, 4)
# key_to_function[movement](self)
while not getattr(self.nodes[self.runner.position], movements[movement]):
movement = random.randint(1, 4)
self.move_runner(movements[movement])
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
done = True
pygame.time.delay(100)
self.display()
pygame.display.flip()
if self.runner.position == self.end:
done = True
pygame.quit()
return done
class Maze_runner(object):
"""docstring for Maze_runner."""
def __init__(self, position=None, color=(255, 255, 255), width="relative", height="relative"):
super(Maze_runner, position).__init__()
self.position = position
self.color = color
self.width = width
self.height = height
def move(a=(0, 0), b=(0, 0)):
a = (a[0] + b[0], a[1] + b[1])
return(a)
runner = Maze_runner()
# s p p
# b g
# e p p
maze = Maze( 1000, 1000, (3, 3))
test_node_00 = maze_builder.Node(position=(0, 0), right = True, special="start")
test_node_10 = maze_builder.Node(position=(1, 0), left = True, down = True, right = True)
test_node_20 = maze_builder.Node(position=(2, 0), down = True, left = True)
test_node_11 = maze_builder.Node(position=(1, 1), down = True, up = True, right = True, special="bad")
test_node_21 = maze_builder.Node(position=(2, 1), left = True, up=True, down=True, special="good")
test_node_22 = maze_builder.Node(position=(2, 2), up = True, left = True)
test_node_12 = maze_builder.Node(position=(1, 2), up = True, left = True, right = True)
test_node_02 = maze_builder.Node(position=(0, 2), right = True, special="end")
maze.add_node(test_node_00)
maze.add_node(test_node_20)
maze.add_node(test_node_10)
maze.add_node(test_node_11)
maze.add_node(test_node_21)
maze.add_node(test_node_22)
maze.add_node(test_node_12)
maze.add_node(test_node_02)
maze.add_runner(runner)
# maze.work_out_values(maze.end)
# maze.get_value(maze.start)
maze.run()
# maze.solve_random()
# x x
# x x
# maze = Maze( 1000, 1000, (2, 2))
# test_node_00 = maze_builder.Node(position=(0, 0), right = True, up = True, special="start")
# test_node_10 = maze_builder.Node(position=(1, 0), left = True, down = True)
# test_node_11 = maze_builder.Node(position=(1, 1), up = True, left = True)
# test_node_01 = maze_builder.Node(position=(0, 1), right = True, down = True, special="end")
# maze.add_node(test_node_00)
# maze.add_node(test_node_10)
# maze.add_node(test_node_11)
# maze.add_node(test_node_01)
# maze.add_runner(runner)
# maze.run()
| 46.79845
| 265
| 0.595826
| 10,282
| 0.851582
| 0
| 0
| 0
| 0
| 0
| 0
| 4,117
| 0.340981
|
c53b663532da343a9e761b6ebf1b05f4670a34a6
| 13,679
|
py
|
Python
|
powderday/nebular_emission/abund.py
|
mccbc/powderday
|
604b4a242216db0e93dc2e50a77bc20dc5cfb10f
|
[
"BSD-3-Clause"
] | null | null | null |
powderday/nebular_emission/abund.py
|
mccbc/powderday
|
604b4a242216db0e93dc2e50a77bc20dc5cfb10f
|
[
"BSD-3-Clause"
] | null | null | null |
powderday/nebular_emission/abund.py
|
mccbc/powderday
|
604b4a242216db0e93dc2e50a77bc20dc5cfb10f
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as InterpUS
from powderday.nebular_emission.cloudy_tools import sym_to_name
"""
------------------------------------------------------------------------------------------
From cloudyfsps written by Nell Byler.
(Source https://github.com/nell-byler/cloudyfsps/blob/master/cloudyfsps/nebAbundTools.py
retrieved in October 2019)
------------------------------------------------------------------------------------------
"""
def getNebAbunds(set_name, logZ, dust=True, re_z=False, **kwargs):
"""
neb_abund.get_abunds(set_name, logZ, dust=True, re_z=False)
set_name must be 'dopita', 'newdopita', 'cl01' or 'yeh'
"""
allowed_names = ['dopita', 'newdopita', 'cl01', 'yeh',
'varyNO', 'gutkin', 'UVbyler', 'varyCO']
if set_name in allowed_names:
return eval('{}({}, dust={}, re_z={})'.format(set_name, logZ, dust, re_z))
else:
raise IOError(allowed_names)
class abundSet(object):
def __init__(self, set_name, logZ):
"""
overarching class for abundance sets.
abundSet('dopita', 0.0)
"""
self.logZ = logZ
self.abund_0 = load_abund(set_name)
self.depl = load_depl(set_name)
self.calcSpecial()
self.calcFinal()
self.inputStrings()
def calcSpecial(self):
return
def calcFinal(self):
return
def inputStrings(self):
self.solarstr = 'abundances {} {}'.format(self.solar, self.grains)
elem_strs = []
names = sym_to_name()
for key in self.abund_0.keys():
elm = names[key]
abund = self.__getattribute__(key)
# if hasattr(self, 're_z'):
# if key != 'He':
# abund -= self.re_z
outstr = 'element abundance {0} {1:.2f} log'.format(elm, abund)
elem_strs.append(outstr)
self.__setattr__('elem_strs', elem_strs)
return
class dopita(abundSet):
solar = 'old solar 84'
def __init__(self, logZ, dust=True, re_z=False):
"""
Dopita+2001: old solar abundances = 0.019
ISM grains
"""
if dust:
self.grains = 'no grains\ngrains ISM'
else:
self.grains = 'no grains'
if re_z:
self.re_z = logZ
else:
self.re_z = 0.0
abundSet.__init__(self, 'dopita', logZ)
def calcSpecial(self):
"""
piece-wise function for nitrogen abund (step-function)
functional form for helium
"""
def calc_N(logZ):
if logZ <= -0.63:
return -4.57 + logZ
else:
return -3.94 + (2.0 * logZ)
def calc_He(logZ):
return np.log10(0.08096 + (0.02618 * (10.0 ** logZ)))
self.__setattr__('He', calc_He(self.logZ))
self.__setattr__('N', calc_N(self.logZ) + self.depl['N'])
return
def calcFinal(self):
"""
apply depletions and scale with logZ
"""
[self.__setattr__(key, val + self.logZ + self.depl[key])
for key, val in self.abund_0.items() if not hasattr(self, key)]
return
class newdopita(abundSet):
solar = 'GASS10'
def __init__(self, logZ, dust=True, re_z=False):
"""
Abundances from Dopita (2013)
Solar Abundances from Grevasse 2010 - z= 0.013
includes smooth polynomial for N/O, C/O relationship
functional form for He(z)
new depletion factors
ISM grains
"""
if dust:
self.grains = 'no grains\ngrains ISM'
else:
self.grains = 'no grains'
self.re_z = re_z
abundSet.__init__(self, 'newdopita', logZ)
def calcSpecial(self):
def calc_He(logZ):
return np.log10(0.0737 + (0.024 * (10.0 ** logZ)))
def calc_CNO(logZ):
oxy = np.array([7.39, 7.50, 7.69, 7.99, 8.17,
8.39, 8.69, 8.80, 8.99, 9.17, 9.39])
nit = np.array([-6.61, -6.47, -6.23, -5.79, -5.51,
-5.14, -4.60, -4.40, -4.04, -3.67, -3.17])
car = np.array([-5.58, -5.44, -5.20, -4.76, -4.48,
-4.11, -3.57, -3.37, -3.01, -2.64, -2.14])
O = self.abund_0['O'] + logZ
C = float(InterpUS(oxy, car, k=1)(O + 12.0))
N = float(InterpUS(oxy, nit, k=1)(O + 12.0))
return C, N, O
self.__setattr__('He', calc_He(self.logZ))
C, N, O = calc_CNO(self.logZ)
[self.__setattr__(key, val + self.depl[key])
for key, val in zip(['C', 'N', 'O'], [C, N, O])]
return
def calcFinal(self):
[self.__setattr__(key, val + self.logZ + self.depl[key])
for key, val in self.abund_0.items() if not hasattr(self, key)]
return
class UVbyler(abundSet):
solar = 'GASS10'
def __init__(self, logZ, dust=True, re_z=False):
"""
Abundances from Dopita (2013)
Solar Abundances from Grevasse 2010 - z= 0.013
New fit for N/O, C/O relationship
functional form for He(z)
new depletion factors
ISM grains
"""
if dust:
self.grains = 'no grains\ngrains ISM'
else:
self.grains = 'no grains'
self.re_z = re_z
abundSet.__init__(self, 'UVbyler', logZ)
def calcSpecial(self):
def calc_He(logZ):
return np.log10(0.0737 + (0.024 * (10.0 ** logZ)))
def calc_CNO(logZ):
O = self.abund_0['O'] + logZ
# C = np.log10((1.0*10.**O)*(10.**-1.1 + 10.**(2.96 + O)))
C = np.log10((10. ** O) * (10. ** -0.7 + 10. ** (4.8 + 1.45 * O)))
# N = np.log10((1.0*10.**O)*(10.**-1.8 + 10.**(2.2 + O)))
# N = np.log10((10.**O)*(10.**-1.5 + 10.**(2.5 + 1.2*O)))
N = np.log10((1.0 * 10. ** O) * (10. ** -1.55 + 10. ** (2.3 + 1.1 * O)))
# N = -4.81 + logZ if logZ <= -0.3 else -4.51 + 2.0*logZ
return C, N, O
self.__setattr__('He', calc_He(self.logZ))
C, N, O = calc_CNO(self.logZ)
[self.__setattr__(key, val + self.depl[key])
for key, val in zip(['C', 'N', 'O'], [C, N, O])]
return
def calcFinal(self):
[self.__setattr__(key, val + self.logZ + self.depl[key])
for key, val in self.abund_0.items() if not hasattr(self, key)]
return
class gutkin(abundSet):
solar = 'GASS10'
def __init__(self, logZ, dust=True, re_z=False):
"""
Gutkin+2016
PARSEC metallicity (Bressan+2012)
based on Grevesse+Sauvel (1998) and Caffau+2011
"""
if dust:
self.grains = 'no grains\ngrains ISM'
else:
self.grains = 'no grains'
self.re_z = re_z
abundSet.__init__(self, 'gutkin', logZ)
def calcSpecial(self):
def calc_He(logZ):
Z = (10. ** logZ) * 0.01524
Y = 0.2485 + 1.7756 * Z
X = 1. - Y - Z
return np.log10(Y / X / 4.)
def calc_CNO(logZ):
O = self.abund_0['O'] + logZ
N = np.log10((0.41 * 10. ** O) * (10. ** -1.6 + 10. ** (2.33 + O)))
C = self.abund_0['C'] + logZ
return C, N, O
self.__setattr__('He', calc_He(self.logZ))
C, N, O = calc_CNO(self.logZ)
[self.__setattr__(key, val)
for key, val in zip(['C', 'N', 'O'], [C, N, O])]
return
def calcFinal(self):
[self.__setattr__(key, val)
for key, val in self.abund_0.items() if not hasattr(self, key)]
return
def load_abund(set_name):
if set_name == 'dopita':
adict = dict(He=-1.01,
C=-3.44,
N=-3.95,
O=-3.07,
Ne=-3.91,
Mg=-4.42,
Si=-4.45,
S=-4.79,
Ar=-5.44,
Ca=-5.64,
Fe=-4.33,
F=-7.52,
Na=-5.69,
Al=-5.53,
P=-6.43,
Cl=-6.73,
K=-6.87,
Ti=-6.96,
Cr=-6.32,
Mn=-6.47,
Co=-7.08,
Ni=-5.75,
Cu=-7.73,
Zn=-7.34)
elif set_name == 'newdopita':
adict = dict(He=-1.01,
C=-3.57,
N=-4.60,
O=-3.31,
Ne=-4.07,
Na=-5.75,
Mg=-4.40,
Al=-5.55,
Si=-4.49,
S=-4.86,
Cl=-6.63,
Ar=-5.60,
Ca=-5.66,
Fe=-4.50,
Ni=-5.78,
F=-7.44,
P=-6.59,
K=-6.97,
Cr=-6.36,
Ti=-7.05,
Mn=-6.57,
Co=-7.01,
Cu=-7.81,
Zn=-7.44)
elif set_name == 'UVbyler':
adict = dict(He=-1.01,
C=-3.57,
N=-4.17,
O=-3.31,
Ne=-4.07,
Na=-5.75,
Mg=-4.40,
Al=-5.55,
Si=-4.49,
S=-4.86,
Cl=-6.63,
Ar=-5.60,
Ca=-5.66,
Fe=-4.50,
Ni=-5.78,
F=-7.44,
P=-6.59,
K=-6.97,
Cr=-6.36,
Ti=-7.05,
Mn=-6.57,
Co=-7.01,
Cu=-7.81,
Zn=-7.44)
elif set_name == 'gutkin':
adict = dict(He=-1.01,
C=-3.53,
N=-4.32,
O=-3.17,
F=-7.47,
Ne=-4.01,
Na=-5.70,
Mg=-4.45,
Al=-5.56,
Si=-4.48,
P=-6.57,
S=-4.87,
Cl=-6.53,
Ar=-5.63,
K=-6.92,
Ca=-5.67,
Sc=-8.86,
Ti=-7.01,
V=-8.03,
Cr=-6.36,
Mn=-6.64,
Fe=-4.51,
Co=-7.11,
Ni=-5.78,
Cu=-7.82,
Zn=-7.43)
return adict
def load_depl(set_name):
if set_name == 'dopita':
ddict = dict(C=-0.30,
N=-0.22,
O=-0.22,
Ne=0.0,
Mg=-0.70,
Si=-1.0,
S=0.0,
Ar=0.0,
Ca=-2.52,
Fe=-2.0,
F=0.0,
Na=0.0,
Al=0.0,
P=0.0,
Cl=0.0,
K=0.0,
Ti=0.0,
Cr=0.0,
Mn=0.0,
Co=0.0,
Ni=0.0,
Cu=0.0,
Zn=0.0)
elif set_name == 'newdopita':
ddict = dict(He=0.00,
C=-0.30,
N=-0.05,
O=-0.07,
Ne=0.00,
Na=-1.00,
Mg=-1.08,
Al=-1.39,
Si=-0.81,
S=0.00,
Cl=-1.00,
Ar=0.00,
Ca=-2.52,
Fe=-1.31,
Ni=-2.00,
F=0.0,
P=0.0,
K=0.0,
Cr=0.0,
Ti=0.0,
Mn=0.0,
Co=0.0,
Cu=0.0,
Zn=0.0)
elif set_name == 'UVbyler':
ddict = dict(He=0.00,
C=-0.30,
N=-0.05,
O=-0.07,
Ne=0.00,
Na=-1.00,
Mg=-1.08,
Al=-1.39,
Si=-0.81,
S=0.00,
Cl=-1.00,
Ar=0.00,
Ca=-2.52,
Fe=-1.31,
Ni=-2.00,
F=0.0,
P=0.0,
K=0.0,
Cr=0.0,
Ti=0.0,
Mn=0.0,
Co=0.0,
Cu=0.0,
Zn=0.0)
elif set_name == 'gutkin':
ddict = dict(He=0.00,
Li=-0.8,
C=-0.30,
O=-0.15,
Na=-0.60,
Mg=-0.70,
Al=-1.70,
Si=-1.00,
Cl=-0.30,
Ca=-2.52,
Fe=-2.00,
Ni=-1.40)
return ddict
| 31.159453
| 90
| 0.370934
| 6,739
| 0.492653
| 0
| 0
| 0
| 0
| 0
| 0
| 2,295
| 0.167775
|
c53b92a47fb947f6f8b829b01647aa8c055f8973
| 644
|
py
|
Python
|
character/migrations/0004_alter_character_alignment.py
|
scottBowles/dnd
|
a1ef333f1a865d51b5426dc4b3493e8437584565
|
[
"MIT"
] | null | null | null |
character/migrations/0004_alter_character_alignment.py
|
scottBowles/dnd
|
a1ef333f1a865d51b5426dc4b3493e8437584565
|
[
"MIT"
] | null | null | null |
character/migrations/0004_alter_character_alignment.py
|
scottBowles/dnd
|
a1ef333f1a865d51b5426dc4b3493e8437584565
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-08-12 02:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('character', '0003_alter_character_id'),
]
operations = [
migrations.AlterField(
model_name='character',
name='alignment',
field=models.CharField(blank=True, choices=[('LG', 'Lawful Good'), ('NG', 'Neutral Good'), ('CG', 'Chaotic Good'), ('LN', 'Lawful Neutral'), ('N', 'True Neutral'), ('CN', 'Chaotic Neutral'), ('LE', 'Lawful Evil'), ('NE', 'Neutral Evil'), ('CE', 'Chaotic Evil')], max_length=2, null=True),
),
]
| 33.894737
| 300
| 0.591615
| 551
| 0.85559
| 0
| 0
| 0
| 0
| 0
| 0
| 269
| 0.417702
|
c53bcf309d42be5b0611b4932b04593b5fb3c79b
| 818
|
py
|
Python
|
graphs_trees/check_balance/test_check_balance.py
|
filippovitale/interactive-coding-challenges
|
8380a7aa98618c3cc9c0271c30bd320937d431ad
|
[
"Apache-2.0"
] | null | null | null |
graphs_trees/check_balance/test_check_balance.py
|
filippovitale/interactive-coding-challenges
|
8380a7aa98618c3cc9c0271c30bd320937d431ad
|
[
"Apache-2.0"
] | null | null | null |
graphs_trees/check_balance/test_check_balance.py
|
filippovitale/interactive-coding-challenges
|
8380a7aa98618c3cc9c0271c30bd320937d431ad
|
[
"Apache-2.0"
] | 1
|
2019-12-13T12:57:44.000Z
|
2019-12-13T12:57:44.000Z
|
from nose.tools import assert_equal
class TestCheckBalance(object):
def test_check_balance(self):
node = Node(5)
insert(node, 3)
insert(node, 8)
insert(node, 1)
insert(node, 4)
assert_equal(check_balance(node), True)
node = Node(5)
insert(node, 3)
insert(node, 8)
insert(node, 9)
insert(node, 10)
assert_equal(check_balance(node), False)
node = Node(3)
insert(node, 2)
insert(node, 1)
insert(node, 5)
insert(node, 4)
insert(node, 6)
insert(node, 7)
assert_equal(check_balance(node), False)
print('Success: test_check_balance')
def main():
test = TestCheckBalance()
test.test_check_balance()
if __name__ == '__main__':
main()
| 20.974359
| 48
| 0.570905
| 666
| 0.814181
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.047677
|
c53bd8529e678df43ecc3a88f38641a5587a1587
| 1,129
|
py
|
Python
|
D_predict.py
|
shanqu91/microseismic_event_detection_via_CNN
|
ff9f0de135d14741c057a2a78e1fd69db18ae1d2
|
[
"MIT"
] | null | null | null |
D_predict.py
|
shanqu91/microseismic_event_detection_via_CNN
|
ff9f0de135d14741c057a2a78e1fd69db18ae1d2
|
[
"MIT"
] | null | null | null |
D_predict.py
|
shanqu91/microseismic_event_detection_via_CNN
|
ff9f0de135d14741c057a2a78e1fd69db18ae1d2
|
[
"MIT"
] | 1
|
2021-10-05T08:41:15.000Z
|
2021-10-05T08:41:15.000Z
|
import keras
from keras.models import Sequential, load_model, Model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from scipy import io
mat_contents = io.loadmat('Data/X_test_0.mat')
X_test_0 = mat_contents['X_test_0']
mat_contents = io.loadmat('Data/X_test_1.mat')
X_test_1 = mat_contents['X_test_1']
batch_size = 40
num_classes = 2
test_datasize, patch_rows, patch_cols = X_test_0.shape[0], X_test_0.shape[1], X_test_0.shape[2]
X_test_0 = X_test_0.reshape(test_datasize, patch_rows, patch_cols, 1)
test_datasize, patch_rows, patch_cols = X_test_1.shape[0], X_test_1.shape[1], X_test_1.shape[2]
X_test_1 = X_test_1.reshape(test_datasize, patch_rows, patch_cols, 1)
print('X_test_0 shape:', X_test_0.shape)
print('X_test_1 shape:', X_test_1.shape)
# load trained model
model = load_model('Data/trained_model.h5')
# prediction
Y_test_0 = model.predict(X_test_0, batch_size=batch_size, verbose=1)
Y_test_1 = model.predict(X_test_1, batch_size=batch_size, verbose=1)
io.savemat('Data/Y_test_0.mat', {'Y_test_0':Y_test_0})
io.savemat('Data/Y_test_1.mat', {'Y_test_1':Y_test_1})
| 35.28125
| 95
| 0.782108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 205
| 0.181577
|
c53d72e1616e580f62f88e5fc1f0a262cb103728
| 94
|
py
|
Python
|
app/db_manager/apps.py
|
PragmaticCoder/Linkedin-Analytics
|
a990b5cae02f0d758bc3123bde643d13a439efa3
|
[
"MIT"
] | 13
|
2018-07-31T15:37:47.000Z
|
2021-12-20T04:48:13.000Z
|
app/db_manager/apps.py
|
PragmaticCoder/Linkedin-Analytics
|
a990b5cae02f0d758bc3123bde643d13a439efa3
|
[
"MIT"
] | 25
|
2019-12-10T20:03:48.000Z
|
2022-03-11T23:26:11.000Z
|
app/db_manager/apps.py
|
PragmaticCoder/Linkedin-Analytics
|
a990b5cae02f0d758bc3123bde643d13a439efa3
|
[
"MIT"
] | 4
|
2020-03-24T20:13:50.000Z
|
2022-02-05T20:40:48.000Z
|
from django.apps import AppConfig
class DbManagerConfig(AppConfig):
name = 'db_manager'
| 15.666667
| 33
| 0.765957
| 57
| 0.606383
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.12766
|
c53d83148b42eaa02961efd8a515c82ec643034c
| 813
|
py
|
Python
|
examples/dialogs.py
|
tgolsson/appJar
|
5e2f8bff44e927e7c2bae17fccddc6dbf79952f0
|
[
"Apache-2.0"
] | 666
|
2016-11-14T18:17:40.000Z
|
2022-03-29T03:53:22.000Z
|
examples/dialogs.py
|
tgolsson/appJar
|
5e2f8bff44e927e7c2bae17fccddc6dbf79952f0
|
[
"Apache-2.0"
] | 598
|
2016-10-20T21:04:09.000Z
|
2022-03-15T22:44:49.000Z
|
examples/dialogs.py
|
tgolsson/appJar
|
5e2f8bff44e927e7c2bae17fccddc6dbf79952f0
|
[
"Apache-2.0"
] | 95
|
2017-01-19T12:23:58.000Z
|
2022-03-06T18:16:21.000Z
|
from appJar import gui
def press(btn):
if btn == "info": app.infoBox("Title Here", "Message here...")
if btn == "error": app.errorBox("Title Here", "Message here...")
if btn == "warning": app.warningBox("Title Here", "Message here...")
if btn == "yesno": app.yesNoBox("Title Here", "Message here...")
if btn == "question": app.questionBox("Title Here", "Message here...")
if btn == "ok": app.okBox("Title Here", "Message here...")
if btn == "retry": app.retryBox("Title Here", "Message here...")
if btn == "text": app.textBox("Title Here", "Message here...")
if btn == "number": app.numberBox("Title Here", "Message here...")
app=gui()
app.addButtons(["info", "error", "warning", "yesno", "question"], press)
app.addButtons(["ok", "retry", "text", "number"], press)
app.go()
| 45.166667
| 74
| 0.607626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 389
| 0.478475
|
c53d9c366f6302c3f4189f86bcaf5a05f084763e
| 19,136
|
py
|
Python
|
src_RealData/Nets/ObjectOriented.py
|
XYZsake/DRFNS
|
73fc5683db5e9f860846e22c8c0daf73b7103082
|
[
"MIT"
] | 42
|
2018-10-07T08:19:01.000Z
|
2022-02-08T17:41:24.000Z
|
src_RealData/Nets/ObjectOriented.py
|
XYZsake/DRFNS
|
73fc5683db5e9f860846e22c8c0daf73b7103082
|
[
"MIT"
] | 11
|
2018-12-22T00:15:46.000Z
|
2021-12-03T10:29:32.000Z
|
src_RealData/Nets/ObjectOriented.py
|
XYZsake/DRFNS
|
73fc5683db5e9f860846e22c8c0daf73b7103082
|
[
"MIT"
] | 14
|
2018-08-26T06:47:06.000Z
|
2021-07-24T11:52:58.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import os
from sklearn.metrics import confusion_matrix
from datetime import datetime
class ConvolutionalNeuralNetwork:
"""
Generic object for create DNN models.
This class instinciates all functions
needed for DNN operations.
"""
def __init__(
self,
LEARNING_RATE=0.01,
K=0.96,
BATCH_SIZE=1,
IMAGE_SIZE=28,
NUM_LABELS=10,
NUM_CHANNELS=1,
NUM_TEST=10000,
STEPS=2000,
LRSTEP=200,
DECAY_EMA=0.9999,
N_PRINT = 100,
LOG="/tmp/net",
SEED=42,
DEBUG=True,
WEIGHT_DECAY=0.00005,
LOSS_FUNC=tf.nn.l2_loss,
N_FEATURES=16):
self.LEARNING_RATE = LEARNING_RATE
self.K = K
self.BATCH_SIZE = BATCH_SIZE
self.IMAGE_SIZE = IMAGE_SIZE
self.NUM_LABELS = NUM_LABELS
self.NUM_CHANNELS = NUM_CHANNELS
self.N_FEATURES = N_FEATURES
# self.NUM_TEST = NUM_TEST
self.STEPS = STEPS
self.N_PRINT = N_PRINT
self.LRSTEP = LRSTEP
self.DECAY_EMA = DECAY_EMA
self.LOG = LOG
self.SEED = SEED
self.sess = tf.InteractiveSession()
self.sess.as_default()
self.var_to_reg = []
self.var_to_sum = []
self.init_vars()
self.init_model_architecture()
self.init_training_graph()
self.Saver()
self.DEBUG = DEBUG
self.loss_func = LOSS_FUNC
self.weight_decay = WEIGHT_DECAY
def regularize_model(self):
"""
Adds regularization to parameters of the model given LOSS_FUNC
"""
if self.DEBUG:
for var in self.var_to_sum + self.var_to_reg:
self.add_to_summary(var)
self.WritteSummaryImages()
for var in self.var_to_reg:
self.add_to_regularization(var)
def add_to_summary(self, var):
"""
Adds histogram for each parameter in var
"""
if var is not None:
tf.summary.histogram(var.op.name, var)
def add_to_regularization(self, var):
"""
Combines loss with regularization loss
"""
if var is not None:
self.loss = self.loss + self.weight_decay * self.loss_func(var)
def add_activation_summary(self, var):
"""
Add activation summary with information about sparsity
"""
if var is not None:
tf.summary.histogram(var.op.name + "/activation", var)
tf.summary.scalar(var.op.name + "/sparsity", tf.nn.zero_fraction(var))
def add_gradient_summary(self, grad, var):
"""
Add gradiant summary to summary
"""
if grad is not None:
tf.summary.histogram(var.op.name + "/gradient", grad)
def input_node_f(self):
"""
Input node, called when initialising the network
"""
return tf.placeholder(
tf.float32,
shape=(self.BATCH_SIZE, self.IMAGE_SIZE, self.IMAGE_SIZE, self.NUM_CHANNELS))
def label_node_f(self):
"""
Label node, called when initialising the network
"""
return tf.placeholder(
tf.float32,
shape=(self.BATCH_SIZE, self.IMAGE_SIZE, self.IMAGE_SIZE, 1))
def conv_layer_f(self, i_layer, w_var, strides, scope_name, padding="SAME"):
"""
Defining convolution layer
"""
with tf.name_scope(scope_name):
return tf.nn.conv2d(i_layer, w_var, strides=strides, padding=padding)
def relu_layer_f(self, i_layer, biases, scope_name):
"""
Defining relu layer
"""
with tf.name_scope(scope_name):
act = tf.nn.relu(tf.nn.bias_add(i_layer, biases))
self.var_to_sum.append(act)
return act
def weight_const_f(self, ks, inchannels, outchannels, stddev, scope_name, name="W", reg="True"):
"""
Defining parameter to give to a convolution layer
"""
with tf.name_scope(scope_name):
K = tf.Variable(tf.truncated_normal([ks, ks, inchannels, outchannels], # 5x5 filter, depth 32.
stddev=stddev,
seed=self.SEED))
self.var_to_reg.append(K)
self.var_to_sum.append(K)
return K
def weight_xavier(self, ks, inchannels, outchannels, scope_name, name="W"):
"""
Initialises a convolution kernel for a convolution layer with Xavier initialising
"""
xavier_std = np.sqrt( 1. / float(ks * ks * inchannels) )
return self.weight_const_f(ks, inchannels, outchannels, xavier_std, scope_name, name=name)
def biases_const_f(self, const, shape, scope_name, name="B"):
"""
Initialises biais
"""
with tf.name_scope(scope_name):
b = tf.Variable(tf.constant(const, shape=[shape]), name=name)
self.var_to_sum.append(b)
return b
def max_pool(self, i_layer, ksize=[1,2,2,1], strides=[1,2,2,1],
padding="SAME", name="MaxPool"):
"""
Performs max pool operation
"""
return tf.nn.max_pool(i_layer, ksize=ksize, strides=strides,
padding=padding, name=name)
def BatchNorm(self, Input, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5):
"""
Performs batch normalisation.
Code taken from http://stackoverflow.com/a/34634291/2267819
"""
with tf.name_scope(scope):
init_beta = tf.constant(0.0, shape=[n_out])
beta = tf.Variable(init_beta, name="beta")
init_gamma = tf.random_normal([n_out], 1.0, 0.02)
gamma = tf.Variable(init_gamma)
batch_mean, batch_var = tf.nn.moments(Input, [0, 1, 2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=decay)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(Input, mean, var, beta, gamma, eps)
return normed
def DropOutLayer(self, Input, scope="DropOut"):
"""
Performs drop out on the input layer
"""
with tf.name_scope(scope):
return tf.nn.dropout(Input, self.keep_prob) ##keep prob has to be defined in init_var
def init_vars(self):
"""
Initialises variables for the graph
"""
self.input_node = self.input_node_f()
self.train_labels_node = self.label_node_f()
self.conv1_weights = self.weight_xavier(5, self.NUM_CHANNELS, 8, "conv1/")
self.conv1_biases = self.biases_const_f(0.1, 8, "conv1/")
self.conv2_weights = self.weight_xavier(5, 8, 8, "conv2/")
self.conv2_biases = self.biases_const_f(0.1, 8, "conv2/")
self.conv3_weights = self.weight_xavier(5, 8, 8, "conv3/")
self.conv3_biases = self.biases_const_f(0.1, 8, "conv3/")
self.logits_weight = self.weight_xavier(1, 8, self.NUM_LABELS, "logits/")
self.logits_biases = self.biases_const_f(0.1, self.NUM_LABELS, "logits/")
self.keep_prob = tf.Variable(0.5, name="dropout_prob")
print('Model variables initialised')
def WritteSummaryImages(self):
"""
Image summary to add to the summary
"""
tf.summary.image("Input", self.input_node, max_outputs=4)
tf.summary.image("Label", self.train_labels_node, max_outputs=4)
tf.summary.image("Pred", tf.expand_dims(tf.cast(self.predictions, tf.float32), dim=3), max_outputs=4)
def init_model_architecture(self):
"""
Graph structure for the model
"""
self.conv1 = self.conv_layer_f(self.input_node, self.conv1_weights,
[1,1,1,1], "conv1/")
self.relu1 = self.relu_layer_f(self.conv1, self.conv1_biases, "conv1/")
self.conv2 = self.conv_layer_f(self.relu1, self.conv2_weights,
[1,1,1,1], "conv2/")
self.relu2 = self.relu_layer_f(self.conv2, self.conv2_biases, "conv2/")
self.conv3 = self.conv_layer_f(self.relu2, self.conv3_weights,
[1,1,1,1], "conv3/")
self.relu3 = self.relu_layer_f(self.conv3, self.conv3_biases, "conv3/")
self.last = self.relu3
print('Model architecture initialised')
def init_training_graph(self):
"""
Graph optimization part, here we define the loss and how the model is evaluated
"""
with tf.name_scope('Evaluation'):
self.logits = self.conv_layer_f(self.last, self.logits_weight, strides=[1,1,1,1], scope_name="logits/")
self.predictions = tf.argmax(self.logits, axis=3)
with tf.name_scope('Loss'):
self.loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits,
labels=tf.squeeze(tf.cast(self.train_labels_node, tf.int32), squeeze_dims=[3]),
name="entropy")))
tf.summary.scalar("entropy", self.loss)
with tf.name_scope('Accuracy'):
LabelInt = tf.squeeze(tf.cast(self.train_labels_node, tf.int64), squeeze_dims=[3])
CorrectPrediction = tf.equal(self.predictions, LabelInt)
self.accuracy = tf.reduce_mean(tf.cast(CorrectPrediction, tf.float32))
tf.summary.scalar("accuracy", self.accuracy)
with tf.name_scope('Prediction'):
self.TP = tf.count_nonzero(self.predictions * LabelInt)
self.TN = tf.count_nonzero((self.predictions - 1) * (LabelInt - 1))
self.FP = tf.count_nonzero(self.predictions * (LabelInt - 1))
self.FN = tf.count_nonzero((self.predictions - 1) * LabelInt)
with tf.name_scope('Precision'):
self.precision = tf.divide(self.TP, tf.add(self.TP, self.FP))
tf.summary.scalar('Precision', self.precision)
with tf.name_scope('Recall'):
self.recall = tf.divide(self.TP, tf.add(self.TP, self.FN))
tf.summary.scalar('Recall', self.recall)
with tf.name_scope('F1'):
num = tf.multiply(self.precision, self.recall)
dem = tf.add(self.precision, self.recall)
self.F1 = tf.scalar_mul(2, tf.divide(num, dem))
tf.summary.scalar('F1', self.F1)
with tf.name_scope('MeanAccuracy'):
Nprecision = tf.divide(self.TN, tf.add(self.TN, self.FN))
self.MeanAcc = tf.divide(tf.add(self.precision, Nprecision) ,2)
tf.summary.scalar('Performance', self.MeanAcc)
#self.batch = tf.Variable(0, name = "batch_iterator")
self.train_prediction = tf.nn.softmax(self.logits)
self.test_prediction = tf.nn.softmax(self.logits)
tf.global_variables_initializer().run()
print('Computational graph initialised')
def error_rate(self, predictions, labels, iter):
"""
Operations to perform on the training prediction every N_PRINT iterations.
These values are printed to screen.
"""
predictions = np.argmax(predictions, 3)
labels = labels[:,:,:,0]
cm = confusion_matrix(labels.flatten(), predictions.flatten(), labels=[0, 1]).astype(np.float)
b, x, y = predictions.shape
total = b * x * y
TP = cm[1, 1]
TN = cm[0, 0]
FN = cm[0, 1]
FP = cm[1, 0]
acc = (TP + TN) / (TP + TN + FN + FP) * 100
precision = TP / (TP + FP)
acc1 = np.mean([precision, TN / (TN + FN)]) * 100
recall = TP / (TP + FN)
F1 = 2 * precision * recall / (recall + precision)
error = 100 - acc
return error, acc, acc1, recall * 100, precision * 100, F1 * 100
def optimization(self, var_list):
"""
Defining the optimization method to solve the task
"""
with tf.name_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(self.learning_rate)
grads = optimizer.compute_gradients(self.loss, var_list=var_list)
if self.DEBUG:
for grad, var in grads:
self.add_gradient_summary(grad, var)
self.optimizer = optimizer.apply_gradients(grads, global_step=self.global_step)
def LearningRateSchedule(self, lr, k, epoch):
"""
Defines the learning rate
"""
with tf.name_scope('LearningRateSchedule'):
self.global_step = tf.Variable(0., trainable=False)
tf.add_to_collection('global_step', self.global_step)
if self.LRSTEP == "epoch/2":
decay_step = float(epoch) / (2 * self.BATCH_SIZE)
elif "epoch" in self.LRSTEP:
num = int(self.LRSTEP[:-5])
decay_step = float(num) * float(epoch) / self.BATCH_SIZE
else:
decay_step = float(self.LRSTEP)
self.learning_rate = tf.train.exponential_decay(
lr,
self.global_step,
decay_step,
k,
staircase=True)
tf.summary.scalar("learning_rate", self.learning_rate)
def Validation(self, DG_TEST, step):
"""
How the models validates on the test set.
"""
n_test = DG_TEST.length
n_batch = int(np.ceil(float(n_test) / self.BATCH_SIZE))
l, acc, F1, recall, precision, meanacc = 0., 0., 0., 0., 0., 0.
for i in range(n_batch):
Xval, Yval = DG_TEST.Batch(0, self.BATCH_SIZE)
feed_dict = {self.input_node: Xval,
self.train_labels_node: Yval}
l_tmp, acc_tmp, F1_tmp, recall_tmp, precision_tmp, meanacc_tmp, pred = self.sess.run([self.loss, self.accuracy, self.F1, self.recall, self.precision, self.MeanAcc, self.predictions], feed_dict=feed_dict)
l += l_tmp
acc += acc_tmp
F1 += F1_tmp
recall += recall_tmp
precision += precision_tmp
meanacc += meanacc_tmp
l, acc, F1, recall, precision, meanacc = np.array([l, acc, F1, recall, precision, meanacc]) / n_batch
summary = tf.Summary()
summary.value.add(tag="Test/Accuracy", simple_value=acc)
summary.value.add(tag="Test/Loss", simple_value=l)
summary.value.add(tag="Test/F1", simple_value=F1)
summary.value.add(tag="Test/Recall", simple_value=recall)
summary.value.add(tag="Test/Precision", simple_value=precision)
summary.value.add(tag="Test/Performance", simple_value=meanacc)
self.summary_test_writer.add_summary(summary, step)
print(' Validation loss: %.1f' % l)
print(' Accuracy: %1.f%% \n acc1: %.1f%% \n recall: %1.f%% \n prec: %1.f%% \n f1 : %1.f%% \n' % (acc * 100, meanacc * 100, recall * 100, precision * 100, F1 * 100))
self.saver.save(self.sess, self.LOG + '/' + "model.ckpt", global_step=self.global_step)
def Saver(self):
"""
Defining the saver, it will load if possible.
"""
print("Setting up Saver...")
self.saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(self.LOG)
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
print("Model restored...")
def ExponentialMovingAverage(self, var_list, decay=0.9999):
"""
Adding exponential moving average to increase performance.
This aggregates parameters from different steps in order to have
a more robust classifier.
"""
with tf.name_scope('ExponentialMovingAverage'):
ema = tf.train.ExponentialMovingAverage(decay=decay)
maintain_averages_op = ema.apply(var_list)
# Create an op that will update the moving averages after each training
# step. This is what we will use in place of the usual training op.
with tf.control_dependencies([self.optimizer]):
self.training_op = tf.group(maintain_averages_op)
def train(self, DGTrain, DGTest, saver=True):
"""
How the model should train.
"""
epoch = DGTrain.length
self.LearningRateSchedule(self.LEARNING_RATE, self.K, epoch)
trainable_var = tf.trainable_variables()
self.regularize_model()
self.optimization(trainable_var)
self.ExponentialMovingAverage(trainable_var, self.DECAY_EMA)
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
self.summary_test_writer = tf.summary.FileWriter(self.LOG + '/test',
graph=self.sess.graph)
self.summary_writer = tf.summary.FileWriter(self.LOG + '/train', graph=self.sess.graph)
merged_summary = tf.summary.merge_all()
steps = self.STEPS
for step in range(steps):
batch_data, batch_labels = DGTrain.Batch(0, self.BATCH_SIZE)
feed_dict = {self.input_node: batch_data,
self.train_labels_node: batch_labels}
# self.optimizer is replaced by self.training_op for the exponential moving decay
_, l, lr, predictions, s = self.sess.run(
[self.training_op, self.loss, self.learning_rate,
self.train_prediction, merged_summary],
feed_dict=feed_dict)
if step % self.N_PRINT == 0:
i = datetime.now()
print i.strftime('%Y/%m/%d %H:%M:%S: \n ')
self.summary_writer.add_summary(s, step)
error, acc, acc1, recall, prec, f1 = self.error_rate(predictions, batch_labels, step)
print(' Step %d of %d' % (step, steps))
print(' Learning rate: %.5f \n') % lr
print(' Mini-batch loss: %.5f \n Accuracy: %.1f%% \n acc1: %.1f%% \n recall: %1.f%% \n prec: %1.f%% \n f1 : %1.f%% \n' %
(l, acc, acc1, recall, prec, f1))
self.Validation(DGTest, step)
| 38.272
| 215
| 0.570861
| 18,959
| 0.99075
| 0
| 0
| 0
| 0
| 0
| 0
| 3,595
| 0.187866
|
c53e560dfa34e9fcc79e711abf7084717bfce494
| 1,571
|
py
|
Python
|
flaskr/test/unit/webapp/test_change_light_color.py
|
UnibucProjects/SmartAquarium
|
6f3c16fb7a45218e763b46223568f6c3e5b66bfd
|
[
"MIT"
] | 6
|
2022-02-02T19:37:57.000Z
|
2022-02-03T15:12:32.000Z
|
flaskr/test/unit/webapp/test_change_light_color.py
|
UnibucProjects/SmartAquarium
|
6f3c16fb7a45218e763b46223568f6c3e5b66bfd
|
[
"MIT"
] | 18
|
2022-01-29T22:47:46.000Z
|
2022-02-03T15:30:28.000Z
|
flaskr/test/unit/webapp/test_change_light_color.py
|
UnibucProjects/SmartAquarium
|
6f3c16fb7a45218e763b46223568f6c3e5b66bfd
|
[
"MIT"
] | null | null | null |
from flask import request
import pytest
import json
from app import create_app, create_rest_api
from db import get_db
from change_light import is_aquarium_id_valid
@pytest.fixture
def client():
local_app = create_app()
create_rest_api(local_app)
client = local_app.test_client()
yield client
def get_max_aquarium_id():
light_data = get_db().execute(
'SELECT id, timestamp, default_mode, total_food_quantity'
' FROM aquarium'
' ORDER BY id DESC'
).fetchone()
return light_data['id']
def test_get_aquarium_light_color_invalid_id(client):
with create_app().app_context():
invalid_id = get_max_aquarium_id() + 1
request = client.get('/lightColor/' + str(invalid_id))
assert request.status_code == 403
def test_get_aquarium_light_color_valid_id(client):
with create_app().app_context():
valid_id = get_max_aquarium_id()
request = client.get('/lightColor/' + str(valid_id))
assert request.status_code == 200
def test_change_light_color_valid_aquarium_id(client):
color = 'test_color'
with create_app().app_context():
valid_id = get_max_aquarium_id()
request = client.put('/lightColor/' + str(valid_id) + '?color=' + color)
assert request.status_code == 200
def test_change_light_color_invalid_aquarium_id(client):
color = 'test_color'
with create_app().app_context():
invalid_id = get_max_aquarium_id() + 1
request = client.put('/lightColor/' + str(invalid_id) + '?color=' + color)
assert request.status_code == 403
| 26.627119
| 78
| 0.706556
| 0
| 0
| 128
| 0.081477
| 144
| 0.091661
| 0
| 0
| 194
| 0.123488
|
c53e6e767c955b2bf53a179312e0dc8ac8e05972
| 4,293
|
py
|
Python
|
commands/inventory.py
|
zbylyrcxr/DennisMUD
|
cb9be389e3be3e267fd78b1520ed2902941742da
|
[
"MIT"
] | 2
|
2022-02-21T17:55:03.000Z
|
2022-02-22T06:25:04.000Z
|
commands/inventory.py
|
zbylyrcxr/DennisMUD
|
cb9be389e3be3e267fd78b1520ed2902941742da
|
[
"MIT"
] | 3
|
2022-02-09T18:18:29.000Z
|
2022-03-07T08:15:54.000Z
|
commands/inventory.py
|
zbylyrcxr/DennisMUD
|
cb9be389e3be3e267fd78b1520ed2902941742da
|
[
"MIT"
] | 1
|
2022-03-07T08:10:59.000Z
|
2022-03-07T08:10:59.000Z
|
#######################
# Dennis MUD #
# inventory.py #
# Copyright 2018-2020 #
# Michael D. Reiley #
#######################
# **********
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# **********
from lib.litnumbers import *
from lib.vigenere import *
import random
NAME = "inventory"
CATEGORIES = ["items"]
ALIASES = ["inv", "i"]
USAGE = "inventory"
DESCRIPTION = "List all of the items in your inventory."
def COMMAND(console, args):
# Perform initial checks.
if not COMMON.check(NAME, console, args, argc=0, awake=True):
return False
# Check if our inventory is empty.
if not console.user["equipment"]:
console.msg("You are not holding anything.")
cursedinv=False
for item in console.user["inventory"]+console.user["equipment"]:
item = COMMON.check_item(NAME, console, item, reason=False)
try:
if item["cursed"]["cursetype"]=="invmess":
cursedinv=True
break
except:
pass
mylang=console.database.user_by_name(console.user["name"])["lang"]
# Holding items
if console.user["equipment"]:
hitemlist=[]
for hitem in console.user["equipment"]:
hitem = console.database.item_by_id(hitem)
hitemname=hitem["name"]
hitemid=hitem["id"]
if cursedinv:
hitemname=encvigenere(hitemname, mylang)
hitemid=random.randint(1,100)
if console.user["builder"]["enabled"]: hitemlist.append("{0} (ID: {1})".format(COMMON.format_item(NAME, hitemname),hitemid))
else: hitemlist.append("{0}".format(COMMON.format_item(NAME, hitemname)))
hitemlist=' and '.join(hitemlist)
console.msg("You are holding {0}.".format(hitemlist))
# Check if our inventory is empty.
if not console.user["inventory"]:
console.msg("Your inventory is empty.")
# Enumerate our inventory.
itemcount = 0
for itemid in sorted(console.user["inventory"]):
# Lookup the target item and perform item checks.
thisitem = COMMON.check_item(NAME, console, itemid, reason=False)
# Uh oh, an item in our inventory doesn't actually exist.
if not thisitem:
console.log.error("Item referenced in user inventory does not exist: {user} :: {item}",
user=console.user["name"], item=itemid)
console.msg("{0}: ERROR: Item referenced in your inventory does not exist: {1}".format(NAME, itemid))
continue
# Show the item's name and ID.
hitemname=thisitem["name"]
if cursedinv:
hitemname=encvigenere(hitemname, mylang)
itemid=random.randint(1,100)
if console.user["builder"]["enabled"]: console.msg("{0} (ID: {1})".format(hitemname, itemid))
else: console.msg("{0}".format(hitemname))
# Keep count.
itemcount += 1
# Finished.
if itemcount>1:
console.msg("There are {0} items in your inventory.".format(int_to_en(itemcount)))
elif itemcount==1:
console.msg("There is one item in your inventory.".format(int_to_en(itemcount)))
else:
console.msg("There are no items in your inventory.")
return True
| 39.027273
| 136
| 0.642208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,126
| 0.495225
|
c53ebab62d8ce95d55ec92330a072c34d445b216
| 296
|
py
|
Python
|
tests/polynomials.py
|
mernst/cozy
|
d7b2c0ee575057dea4ebec201d579f0ecd785b1b
|
[
"Apache-2.0"
] | 188
|
2017-11-27T18:59:34.000Z
|
2021-12-31T02:28:33.000Z
|
tests/polynomials.py
|
mernst/cozy
|
d7b2c0ee575057dea4ebec201d579f0ecd785b1b
|
[
"Apache-2.0"
] | 95
|
2017-11-13T01:21:48.000Z
|
2020-10-30T06:38:14.000Z
|
tests/polynomials.py
|
mernst/cozy
|
d7b2c0ee575057dea4ebec201d579f0ecd785b1b
|
[
"Apache-2.0"
] | 16
|
2018-02-13T04:49:09.000Z
|
2021-02-06T13:26:46.000Z
|
import unittest
from cozy.polynomials import Polynomial
class TestPolynomials(unittest.TestCase):
def test_sorting(self):
self.assertLess(Polynomial([2019, 944, 95]), Polynomial([2012, 945, 95]))
self.assertGreater(Polynomial([2012, 945, 95]), Polynomial([2019, 944, 95]))
| 29.6
| 84
| 0.712838
| 237
| 0.800676
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c53ef504f8c908892ab80122b5998f9150c4ae18
| 823
|
py
|
Python
|
presenters/calculator_presenter.py
|
RamonWill/portfolio-management-project
|
ac8ce313f8d62f09810fc1da19d6b252f193871b
|
[
"MIT"
] | 14
|
2020-01-01T04:59:06.000Z
|
2022-02-08T06:48:21.000Z
|
presenters/calculator_presenter.py
|
linhvien/portfolio-management-project
|
ac8ce313f8d62f09810fc1da19d6b252f193871b
|
[
"MIT"
] | null | null | null |
presenters/calculator_presenter.py
|
linhvien/portfolio-management-project
|
ac8ce313f8d62f09810fc1da19d6b252f193871b
|
[
"MIT"
] | 8
|
2020-10-15T06:52:37.000Z
|
2021-10-04T06:44:36.000Z
|
from custom_objects import FinanceCalculator
from tkinter import messagebox
class CalculationsPresenter(object):
def __init__(self, view):
self.view = view
def convert_price(self, price):
try:
converted_price = FinanceCalculator.decimal_to_treasury(price)
self.view.display_conversion(new_price=converted_price)
return None
except (ValueError, IndexError) as err:
pass
try:
converted_price = FinanceCalculator.treasury_to_decimal(price)
self.view.display_conversion(new_price=converted_price)
except (ValueError, IndexError) as err:
messagebox.showinfo(
message="An example of a valid price would be 108.50 or 108-16",
title="Invalid Price",
)
| 34.291667
| 80
| 0.647631
| 744
| 0.90401
| 0
| 0
| 0
| 0
| 0
| 0
| 70
| 0.085055
|
c53f341c44f58f7cf080b91299e6c06e76e614e8
| 1,877
|
py
|
Python
|
core/power_status_monitor.py
|
kangyifei/CloudSimPy
|
45912e7ea35086b67941624102e400cb22e549ab
|
[
"MIT"
] | null | null | null |
core/power_status_monitor.py
|
kangyifei/CloudSimPy
|
45912e7ea35086b67941624102e400cb22e549ab
|
[
"MIT"
] | null | null | null |
core/power_status_monitor.py
|
kangyifei/CloudSimPy
|
45912e7ea35086b67941624102e400cb22e549ab
|
[
"MIT"
] | null | null | null |
import json
class PowerStateMonitor(object):
def __init__(self, simulation):
self.simulation = simulation
self.env = simulation.env
self.event_file = simulation.event_file + "_power"
self.events = []
def __cal_machine_power(self):
machines = self.simulation.cluster.machines
sum = 0
for machine in machines:
power = 100 * machine.state['cpu_usage_percent'] + 2 * machine.state['memory_usage_percent']
sum += power
return sum
def __cal_cooling_equipment_power(self):
cooling_equipment = self.simulation.cluster.cooling_equipment
if ((cooling_equipment.state['inlet_temp'] - cooling_equipment.state['setting_temp']) < 0):
power = 0
else:
power = 100 * (cooling_equipment.state['inlet_temp'] - cooling_equipment.state['setting_temp'])
return power
def run(self):
machine_power_sum = 0
cooling_power_sum = 0
while not self.simulation.finished:
machine_power = round(self.__cal_machine_power(), 2)
cooling_power = round(self.__cal_cooling_equipment_power(), 2)
machine_power_sum += machine_power
cooling_power_sum += cooling_power
state = {
'timestamp': self.env.now,
'machine_power': machine_power,
'cooling_power': cooling_power
}
self.events.append(state)
yield self.env.timeout(1)
state = {
'timestamp': self.env.now,
'machine_power_sum': machine_power_sum,
'cooling_power_sum': cooling_power_sum
}
self.events.append(state)
self.__write_to_file()
def __write_to_file(self):
with open(self.event_file, 'w') as f:
json.dump(self.events, f, indent=4)
| 33.517857
| 107
| 0.606819
| 1,862
| 0.992009
| 839
| 0.44699
| 0
| 0
| 0
| 0
| 194
| 0.103356
|
c53f7e729f7148ea37a06ebe087c005b16755a1d
| 25,133
|
py
|
Python
|
maintest.py
|
thorsilver/ABM-for-social-care
|
3a47868d2881799980a3f9f24b78c66a31eda194
|
[
"MIT"
] | null | null | null |
maintest.py
|
thorsilver/ABM-for-social-care
|
3a47868d2881799980a3f9f24b78c66a31eda194
|
[
"MIT"
] | null | null | null |
maintest.py
|
thorsilver/ABM-for-social-care
|
3a47868d2881799980a3f9f24b78c66a31eda194
|
[
"MIT"
] | 1
|
2018-01-05T15:42:40.000Z
|
2018-01-05T15:42:40.000Z
|
from sim import Sim
import os
import cProfile
import pylab
import math
import matplotlib.pyplot as plt
import argparse
import json
import decimal
import numpy as np
def init_params():
"""Set up the simulation parameters."""
p = {}
## The basics: starting population and year, etc.
p['initialPop'] = 750
p['startYear'] = 1860
p['endYear'] = 2050
p['thePresent'] = 2012
p['statsCollectFrom'] = 1960
p['minStartAge'] = 20
p['maxStartAge'] = 40
p['verboseDebugging'] = False
p['singleRunGraphs'] = True
p['favouriteSeed'] = None
p['numRepeats'] = 1
p['loadFromFile'] = False
## Mortality statistics
p['baseDieProb'] = 0.0001
p['babyDieProb'] = 0.005
p['maleAgeScaling'] = 14.0
p['maleAgeDieProb'] = 0.00021
p['femaleAgeScaling'] = 15.5
p['femaleAgeDieProb'] = 0.00019
p['num5YearAgeClasses'] = 28
## Transitions to care statistics
p['baseCareProb'] = 0.0002
p['personCareProb'] = 0.0008
##p['maleAgeCareProb'] = 0.0008
p['maleAgeCareScaling'] = 18.0
##p['femaleAgeCareProb'] = 0.0008
p['femaleAgeCareScaling'] = 19.0
p['numCareLevels'] = 5
p['cdfCareTransition'] = [ 0.7, 0.9, 0.95, 1.0 ]
p['careLevelNames'] = ['none','low','moderate','substantial','critical']
p['careDemandInHours'] = [ 0.0, 8.0, 16.0, 30.0, 80.0 ]
## Availability of care statistics
p['childHours'] = 5.0
p['homeAdultHours'] = 30.0
p['workingAdultHours'] = 25.0
p['retiredHours'] = 60.0
p['lowCareHandicap'] = 0.5
p['hourlyCostOfCare'] = 20.0
## Fertility statistics
p['growingPopBirthProb'] = 0.215
p['steadyPopBirthProb'] = 0.13
p['transitionYear'] = 1965
p['minPregnancyAge'] = 17
p['maxPregnancyAge'] = 42
## Class and employment statistics
p['numOccupationClasses'] = 3
p['occupationClasses'] = ['lower','intermediate','higher']
p['cdfOccupationClasses'] = [ 0.6, 0.9, 1.0 ]
## Age transition statistics
p['ageOfAdulthood'] = 17
p['ageOfRetirement'] = 65
## Marriage and divorce statistics (partnerships really)
p['basicFemaleMarriageProb'] = 0.25
p['femaleMarriageModifierByDecade'] = [ 0.0, 0.5, 1.0, 1.0, 1.0, 0.6, 0.5, 0.4, 0.1, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0 ]
p['basicMaleMarriageProb'] = 0.3
p['maleMarriageModifierByDecade'] = [ 0.0, 0.16, 0.5, 1.0, 0.8, 0.7, 0.66, 0.5, 0.4, 0.2, 0.1, 0.05, 0.01, 0.0, 0.0, 0.0 ]
p['basicDivorceRate'] = 0.06
p['variableDivorce'] = 0.06
p['divorceModifierByDecade'] = [ 0.0, 1.0, 0.9, 0.5, 0.4, 0.2, 0.1, 0.03, 0.01, 0.001, 0.001, 0.001, 0.0, 0.0, 0.0, 0.0 ]
## Leaving home and moving around statistics
p['probApartWillMoveTogether'] = 0.3
p['coupleMovesToExistingHousehold'] = 0.3
p['basicProbAdultMoveOut'] = 0.22
p['probAdultMoveOutModifierByDecade'] = [ 0.0, 0.2, 1.0, 0.6, 0.3, 0.15, 0.03, 0.03, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
p['basicProbSingleMove'] = 0.05
p['probSingleMoveModifierByDecade'] = [ 0.0, 1.0, 1.0, 0.8, 0.4, 0.06, 0.04, 0.02, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
p['basicProbFamilyMove'] = 0.03
p['probFamilyMoveModifierByDecade'] = [ 0.0, 0.5, 0.8, 0.5, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ]
p['agingParentsMoveInWithKids'] = 0.1
p['variableMoveBack'] = 0.1
## Description of the map, towns, and houses
p['mapGridXDimension'] = 8
p['mapGridYDimension'] = 12
p['townGridDimension'] = 40
p['numHouseClasses'] = 3
p['houseClasses'] = ['small','medium','large']
p['cdfHouseClasses'] = [ 0.6, 0.9, 5.0 ]
p['ukMap'] = [ [ 0.0, 0.1, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0 ],
[ 0.1, 0.1, 0.2, 0.2, 0.3, 0.0, 0.0, 0.0 ],
[ 0.0, 0.2, 0.2, 0.3, 0.0, 0.0, 0.0, 0.0 ],
[ 0.0, 0.2, 1.0, 0.5, 0.0, 0.0, 0.0, 0.0 ],
[ 0.4, 0.0, 0.2, 0.2, 0.4, 0.0, 0.0, 0.0 ],
[ 0.6, 0.0, 0.0, 0.3, 0.8, 0.2, 0.0, 0.0 ],
[ 0.0, 0.0, 0.0, 0.6, 0.8, 0.4, 0.0, 0.0 ],
[ 0.0, 0.0, 0.2, 1.0, 0.8, 0.6, 0.1, 0.0 ],
[ 0.0, 0.0, 0.1, 0.2, 1.0, 0.6, 0.3, 0.4 ],
[ 0.0, 0.0, 0.5, 0.7, 0.5, 1.0, 1.0, 0.0 ],
[ 0.0, 0.0, 0.2, 0.4, 0.6, 1.0, 1.0, 0.0 ],
[ 0.0, 0.2, 0.3, 0.0, 0.0, 0.0, 0.0, 0.0 ] ]
p['mapDensityModifier'] = 0.6
p['ukClassBias'] = [
[ 0.0, -0.05, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],
[ -0.05, -0.05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ],
[ 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0, 0.0 ],
[ 0.0, -0.05, -0.05, 0.05, 0.0, 0.0, 0.0, 0.0 ],
[ -0.05, 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],
[ -0.05, 0.0, 0.0, -0.05, -0.05, -0.05, 0.0, 0.0 ],
[ 0.0, 0.0, 0.0, -0.05, -0.05, -0.05, 0.0, 0.0 ],
[ 0.0, 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],
[ 0.0, 0.0, -0.05, 0.0, -0.05, 0.0, 0.0, 0.0 ],
[ 0.0, 0.0, 0.0, -0.05, 0.0, 0.2, 0.15, 0.0 ],
[ 0.0, 0.0, 0.0, 0.0, 0.1, 0.2, 0.15, 0.0 ],
[ 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0 ] ]
## Graphical interface details
p['interactiveGraphics'] = True
p['delayTime'] = 0.0
p['screenWidth'] = 1300
p['screenHeight'] = 700
p['bgColour'] = 'black'
p['mainFont'] = 'Helvetica 18'
p['fontColour'] = 'white'
p['dateX'] = 70
p['dateY'] = 20
p['popX'] = 70
p['popY'] = 50
p['pixelsInPopPyramid'] = 2000
p['careLevelColour'] = ['blue','green','yellow','orange','red']
p['houseSizeColour'] = ['brown','purple','yellow']
p['pixelsPerTown'] = 56
p['maxTextUpdateList'] = 22
return p
p = init_params()
#######################################################
## A basic single run
def basicRun(p):
s = Sim(p)
tax = s.run()
#######################################################
## Batch run (no graphics)
def batchRun(num):
p['interactiveGraphics'] = False
dataFile = open('batchRunData.txt','w')
for i in range ( 0, num ):
print "Doing batch run: ", i
taxList = []
s = Sim(p)
tax = s.run()
taxList.append(tax)
print "Social care cost per taxpayer: ", tax
dataFile.write(str(i) + "\t" + str(tax) + "\n")
dataFile.close()
#######################################################
## Retirement age run (no graphics)
def retireRun(reps):
taxMeans = []
taxSEs = []
p['verboseDebugging'] = False
p['singleRunGraphs'] = False
p['interactiveGraphics'] = False
dataFile = open('retirementAgeData2.txt','w')
#p['ageingParentList'] = [50, 55, 65, 70, 75, 80]
for variableCare in p['ageingParentList']:
p['ageOfRetirement'] = variableCare
print "Trying retirement age: ", variableCare
taxList = []
for i in range ( 0, reps ):
print i,
s = Sim(p)
tax = s.run()
taxList.append(tax)
print tax
dataFile.write(str(variableCare) + "\t" + str(i) + "\t" + str(tax) + "\n")
taxMeans.append(pylab.mean(taxList))
taxSEs.append(pylab.std(taxList) / math.sqrt(reps))
dataFile.close()
indices1 = pylab.arange(len(p['ageingParentList']))
taxFig = pylab.figure()
taxBar = taxFig.add_subplot(1,1,1)
taxBar.bar(indices1, taxMeans, facecolor='red',
align='center', yerr=taxSEs, ecolor='black')
taxBar.set_ylabel('Mean social care cost per taxpayer')
taxBar.set_xlabel('Age of retirement')
taxBar.set_xticks(indices1)
taxBar.set_xticklabels(p['ageingParentList'])
pylab.savefig('retirementAgeRunSet1.pdf')
pylab.show()
#######################################################
##runs for sensitivity analysis using GEM-SA
def gemRun(reps):
taxMeans = []
taxSEs = []
p['verboseDebugging'] = False
p['singleRunGraphs'] = False
p['interactiveGraphics'] = False
dataFile = open('GEMSA data new.txt','a')
meansFile = open('GEMSA means new.txt', 'a')
outFile = open('GEMSA outputs new.txt', 'a')
# agingParentList = [ 0.0, 0.1, 0.2, 0.4 ]
# careProbList = [ 0.0004, 0.0008, 0.0012, 0.0016 ]
# retiredHoursList = [ 20.0, 30.0, 40.0, 60.0 ]
# retiredAgeList = [ 60.0 ]
# ageingParentList = [ 0.0, 0.1 ]
# careProbList = [ 0.0004 ]
# retiredHoursList = [ 20.0 ]
# retiredAgeList = [ 60.0 ]
for variableCare in p['ageingParentList']:
for variableProb in p['careProbList']:
for variableRetired in p['retiredHoursList']:
for variableAge in p['retiredAgeList']:
p['agingParentsMoveInWithKids'] = variableCare
p['personCareProb'] = variableProb
p['retiredHours'] = variableRetired
p['ageOfRetirement'] = variableAge
print "Trying parents-moving-in probability: ", variableCare
print "Trying person care probability: ", variableProb
print "Trying retired hours: ", variableRetired
print "Trying retirement age: ", variableAge
taxList = []
taxSum = 0.0
meansFile.write(str(variableCare) + "\t" + str(variableProb) + "\t" + str(variableRetired) + "\t" + str(variableAge) + "\n")
for i in range ( 0, reps ):
print i,
s = Sim(p)
tax, seed = s.run()
taxList.append(tax)
taxSum += tax
print tax
dataFile.write(str(seed) + "\t" + str(variableCare) + "\t" + str(variableProb) + "\t" + str(variableRetired) + "\t" + str(variableAge) + "\t" + str(tax) + "\n")
taxMeans.append(pylab.mean(taxList))
outFile.write(str(taxSum/reps) + "\n")
taxSEs.append(pylab.std(taxList) / math.sqrt(reps))
dataFile.close()
meansFile.close()
outFile.close()
#######################################################
##runs for sensitivity analysis using GEM-SA - LPtau and Maximin LH
def sensitivityRun(runtype, ageingList, careList, retiredHList, retiredAList, reps):
taxMeans = []
taxSEs = []
p['verboseDebugging'] = False
p['singleRunGraphs'] = False
p['interactiveGraphics'] = False
dataFile = open(runtype + ' GEMSA data.txt','a')
meansFile = open(runtype + ' GEMSA means.txt', 'a')
outFile = open(runtype + ' GEMSA outputs.txt', 'a')
# agingParentList = [ 0.0, 0.1, 0.2, 0.4 ]
# careProbList = [ 0.0004, 0.0008, 0.0012, 0.0016 ]
# retiredHoursList = [ 20.0, 30.0, 40.0, 60.0 ]
# retiredAgeList = [ 60.0 ]
# ageingParentList = [ 0.0, 0.1 ]
# careProbList = [ 0.0004 ]
# retiredHoursList = [ 20.0 ]
# retiredAgeList = [ 60.0 ]
for run in xrange(len(ageingList)):
p['agingParentsMoveInWithKids'] = ageingList[run]
p['personCareProb'] = careList[run]
p['retiredHours'] = retiredHList[run]
p['ageOfRetirement'] = retiredAList[run]
print "Trying parents-moving-in probability: ", ageingList[run]
print "Trying person care probability: ", careList[run]
print "Trying retired hours: ", retiredHList[run]
print "Trying retirement age: ", retiredAList[run]
taxList = []
taxSum = 0.0
meansFile.write(str(ageingList[run]) + "\t" + str(careList[run]) + "\t" + str(retiredHList[run]) + "\t" + str(retiredAList[run]) + "\n")
for i in range ( 0, reps ):
print i,
s = Sim(p)
tax, seed = s.run()
taxList.append(tax)
taxSum += tax
print tax
dataFile.write(str(seed) + "\t" + str(ageingList[run]) + "\t" + str(careList[run]) + "\t" + str(retiredHList[run]) + "\t" + str(retiredAList[run]) + "\t" + str(tax) + "\n")
taxMeans.append(pylab.mean(taxList))
outFile.write(str(taxSum/reps) + "\n")
taxSEs.append(pylab.std(taxList) / math.sqrt(reps))
dataFile.close()
meansFile.close()
outFile.close()
#######################################################
##runs for sensitivity analysis using GEM-SA - LPtau and Maximin LH
# def sensitivityLarge(runtype, ageingList, careList, retiredHList, retiredAList, baseDieList, babyDieList, personCareList, maleCareList, femaleCareList, \
# childHoursList, homeAdultList, workingAdultList, lowCareList, growingBirthList, basicDivorceList, variableDivorceList, basicMaleMarriageList, \
# basicFemaleMarriageList, probMoveList, moveHouseholdList, probMoveOutList, probMoveBackList, reps):
def sensitivityLarge(runtype, input_list, reps):
taxMeans = []
taxSEs = []
p['verboseDebugging'] = False
p['singleRunGraphs'] = False
p['interactiveGraphics'] = False
outFile = open(runtype + ' GEMSA outputs large.txt', 'a')
for run in xrange(len(input_list[0])):
print("Running simulation number {}...".format(run))
print("Number of reps: {}".format(reps))
sim_list = np.array(input_list)
print(sim_list)
p['agingParentsMoveInWithKids'] = sim_list[0,run]
print(p['agingParentsMoveInWithKids'])
p['personCareProb'] = sim_list[1,run]
p['retiredHours'] = sim_list[2,run]
p['ageOfRetirement'] = sim_list[3,run]
p['baseDieProb'] = sim_list[4,run]
p['babyDieProb'] = sim_list[5,run]
p['personCareProb'] = sim_list[6,run]
p['maleAgeCareScaling'] = sim_list[7,run]
p['femaleAgeCareScaling'] = sim_list[8,run]
p['childHours'] = sim_list[9,run]
p['homeAdultHours'] = sim_list[10,run]
p['workingAdultHours'] = sim_list[11,run]
p['lowCareHandicap'] = sim_list[12,run]
p['growingPopBirthProb'] = sim_list[13,run]
p['basicDivorceRate'] = sim_list[14,run]
p['variableDivorce'] = sim_list[15,run]
p['basicMaleMarriageProb'] = sim_list[16,run]
p['basicFemaleMarriageProb'] = sim_list[17,run]
p['probApartWillMoveTogether'] = sim_list[18,run]
p['coupleMovesToExistingHousehold'] = sim_list[19,run]
p['basicProbAdultMoveOut'] = sim_list[20,run]
p['variableMoveBack'] = sim_list[21,run]
taxList = []
taxSum = 0.0
for i in range ( 0, reps ):
print i,
s = Sim(p)
tax, seed = s.run()
taxList.append(tax)
taxSum += tax
print tax
taxMeans.append(pylab.mean(taxList))
outFile.write(str(taxSum/reps) + "\n" + str(seed) + "\n")
taxSEs.append(pylab.std(taxList) / math.sqrt(reps))
outFile.close()
#######################################################
##runs for sensitivity analysis using GEM-SA - LPtau and Maximin LH, 10 params
def sensitivityTenParams(runtype, input_list, reps):
taxMeans = []
taxSEs = []
p['verboseDebugging'] = False
p['singleRunGraphs'] = False
p['interactiveGraphics'] = False
outFile = open(runtype + ' GEMSA outputs.txt', 'a')
for run in xrange(len(input_list[0])):
print("Running simulation number {}...".format(run))
print("Number of reps: {}".format(reps))
sim_list = np.array(input_list)
print(sim_list)
p['agingParentsMoveInWithKids'] = sim_list[0,run]
p['baseCareProb'] = sim_list[1,run]
p['retiredHours'] = sim_list[2,run]
p['ageOfRetirement'] = sim_list[3,run]
p['personCareProb'] = sim_list[4,run]
p['maleAgeCareScaling'] = sim_list[5,run]
p['femaleAgeCareScaling'] = sim_list[6,run]
p['childHours'] = sim_list[7,run]
p['homeAdultHours'] = sim_list[8,run]
p['workingAdultHours'] = sim_list[9,run]
taxList = []
taxSum = 0.0
for i in range ( 0, reps ):
print i,
s = Sim(p)
tax, seed = s.run()
taxList.append(tax)
taxSum += tax
print tax
taxMeans.append(pylab.mean(taxList))
outFile.write(str(taxSum/reps) + "\t" + str(seed) + "\n")
taxSEs.append(pylab.std(taxList) / math.sqrt(reps))
outFile.close()
#######################################################
# Recurrent neural network experiments -- 10 params, outputs recorded per year
def RNNOutputScenario(runtype, input_list, reps):
taxMeans = []
taxSEs = []
p['verboseDebugging'] = False
p['singleRunGraphs'] = False
p['interactiveGraphics'] = False
outFile = open(runtype + ' GEMSA outputs.txt', 'a')
outFile2 = open(runtype + ' yearly outputs.txt', 'a')
for run in xrange(len(input_list[0])):
print("Running simulation number {}...".format(run))
print("Number of reps: {}".format(reps))
sim_list = np.array(input_list)
#print(sim_list)
p['agingParentsMoveInWithKids'] = sim_list[0, run]
p['baseCareProb'] = sim_list[1, run]
p['retiredHours'] = sim_list[2, run]
p['ageOfRetirement'] = sim_list[3, run]
p['personCareProb'] = sim_list[4, run]
p['maleAgeCareScaling'] = sim_list[5, run]
p['femaleAgeCareScaling'] = sim_list[6, run]
p['childHours'] = sim_list[7, run]
p['homeAdultHours'] = sim_list[8, run]
p['workingAdultHours'] = sim_list[9, run]
taxList = []
taxSum = 0.0
for i in range(0, reps):
print i,
s = Sim(p)
tax, seed, carecost = s.run()
taxList.append(tax)
taxSum += tax
print tax
taxMeans.append(pylab.mean(taxList))
outFile.write(str(taxSum / reps) + "\t" + str(seed) + "\n")
outFile2.write(str(carecost) + "\n")
taxSEs.append(pylab.std(taxList) / math.sqrt(reps))
outFile.close()
outFile2.close()
#######################################################
## A profiling run; use import pstats then p = pstats.Stats('profile.txt') then p.sort_stats('time').print_stats(10)
#cProfile.run('s.run()','profile.txt')
#######################################################
## Parse command line arguments
def loadParamFile(file, dict):
"""
Given a JSON filename and a dictionary, return the dictionary with
the file's fields merged into it.
Example: if the initial dictionary is
dict['bobAge'] = 90 and dict['samAge']=20 and the JSON data is
{'age':{'bob':40, 'fred':35}}
the returned dictionary contains the following data values:
dict['bobAge'] = 40, dict['fredAge'] = 35, dict['samAge'] = 20
"""
json_data = open(file).read()
data = json.loads(json_data)
for group in data:
fields = data.get(group)
if type({}) == type(fields):
# Group of fields - create name from item and group
for item in fields:
name = item + group[:1].upper() + group[1:]
value = data [group][item]
dict [name] = value
else:
# Single data value - naming is assumed to be correct case
dict [group] = fields
return dict
def loadCommandLine(dict):
"""Process the command line, loading params file (if required). The dict
argument will be augmented with data from the user-specified parameters
file (if required), otherwise will return the dict argument unchanged"""
parser = argparse.ArgumentParser(
description='lives v1.0: complex social behaviour simulation.',
epilog='Example: "maintest.py -f test.json -n 3" --- run 3 sims with test.json\'s params',
formatter_class=argparse.RawTextHelpFormatter,
prog='lives',
usage='use "%(prog)s -h" for more information')
group = parser.add_mutually_exclusive_group()
parser.add_argument(
'-f', '--file',
help='parameters file in JSON format e.g. soylent.json')
group.add_argument(
'-n', '--num', metavar='N', type=int, default=0,
help='number of runs to carry out.')
group.add_argument('-r', '--retire', metavar='R', type=int, default=0,
help='retirement batch, number of iterations.')
group.add_argument('-g', '--gem', metavar='G', type=int, default=0,
help='GEM-SA batch for sensitivity analysis, number of iterations.')
group.add_argument('-l', '--lptau', metavar='L', type=int, default=0,
help='sensitivity analysis batch with LPtau sampling.')
group.add_argument('-m', '--maximin', metavar='M', type=int, default=0,
help='sensitivity analysis batch with maximin latin hypercube sampling.')
group.add_argument('-b', '--bigly', metavar='B', type=int, default=0,
help='bigly sensitivity analysis batch with maximin latin hypercube sampling.')
group.add_argument('-t', '--tenparams', metavar='T', type=int, default=0,
help='10 parameter sensitivity analysis batch with maximin latin hypercube sampling.')
group.add_argument('-c', '--recurrent', metavar='C', type=int, default=0,
help='10 parameter time-series run for RNN.')
args = parser.parse_args()
print("~ Filename: {}".format(args.file))
print("~ Number: {}".format(args.num))
print("~ Retire: {}".format(args.retire))
print("~ GEM-SA: {}".format(args.gem))
print("~ LPtau: {}".format(args.lptau))
print("~ Maximin: {}".format(args.maximin))
print("~ Big SA: {}".format(args.bigly))
print("~ Ten Params: {}".format(args.tenparams))
print("~ Ten Params RNN: {}".format(args.recurrent))
if args.file:
#agingParentList = json.load(retireList, parse_float=decimal.Decimal)
res = loadParamFile (args.file, dict)
print ("p = {}".format(dict))
basicRun(dict)
elif args.num >= 1:
batchRun(args.num)
elif args.retire:
p['ageingParentList'] = []
res = loadParamFile('retire.json', dict)
print("List = {}".format(dict))
retireRun(args.retire)
elif args.gem:
p['ageingParentList'] = []
p['careProbList'] = []
p['retiredHoursList'] = []
p['retiredAgeList'] = []
res = loadParamFile('gem.json', dict)
print("List = {}".format(dict))
gemRun(args.gem)
elif args.lptau:
sim_array = np.genfromtxt('lptau-4params.txt', delimiter=' ')
sim_list = list(sim_array.T)
# print(sim_list)
ageingParentSettings = sim_list[0]
careProbSettings = sim_list[1]
retiredHoursSettings = sim_list[2]
retiredAgeSettings = sim_list[3]
# print(ageingParentSettings)
# print(careProbSettings)
# print(retiredHoursSettings)
# print(retiredAgeSettings)
sensitivityRun('LPtau', ageingParentSettings, careProbSettings, retiredHoursSettings, retiredAgeSettings, args.lptau)
elif args.maximin:
sim_array = np.genfromtxt('latinhypercube-4params.txt', delimiter=' ')
sim_list = list(sim_array.T)
# print(sim_list)
ageingParentSettings = sim_list[0]
careProbSettings = sim_list[1]
retiredHoursSettings = sim_list[2]
retiredAgeSettings = sim_list[3]
# print(ageingParentSettings)
# print(careProbSettings)
# print(retiredHoursSettings)
# print(retiredAgeSettings)
sensitivityRun('Maximin', ageingParentSettings, careProbSettings, retiredHoursSettings, retiredAgeSettings, args.maximin)
elif args.bigly:
sim_array = np.genfromtxt('latinhypercube-22params.txt', delimiter=' ')
sim_list = list(sim_array.T)
#print(sim_list)
np.savetxt('hypercube22_GEMSA_inputs.txt', sim_array, fmt='%1.8f', delimiter='\t', newline='\n')
sensitivityLarge('hypercube22', sim_list, args.bigly)
elif args.tenparams:
sim_array = np.genfromtxt('LPtau-10params.txt', delimiter=' ')
sim_list = list(sim_array.T)
#print(sim_list)
np.savetxt('lptau10_GEMSA_inputs.txt', sim_array, fmt='%1.8f', delimiter='\t', newline='\n')
sensitivityTenParams('lptau10', sim_list, args.tenparams)
elif args.recurrent:
sim_array = np.genfromtxt('lptau10round2_GEMSA_inputs.csv', delimiter=',')
sim_list = list(sim_array.T)
print(sim_list)
np.savetxt('lptau10_recurrent_inputs.txt', sim_array, fmt='%1.8f', delimiter='\t', newline='\n')
RNNOutputScenario('LPtauRNN', sim_list, args.recurrent)
else:
basicRun(p)
return dict
# Load the default values, overwriting and adding to the initial p values
loadParamFile("default.json", p)
# Load values based upon the command line file passed (if any).
loadCommandLine (p)
#print ("p = {}".format(p))
| 40.08453
| 188
| 0.56018
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9,567
| 0.380655
|
c53f83b4724adf9f9dc5fc23447830899cf93a99
| 2,427
|
py
|
Python
|
mainapp/views.py
|
MelqonHovhannisyan/weather
|
455ce90fd480efb6c05002a53ed478fa4014e84b
|
[
"MIT"
] | null | null | null |
mainapp/views.py
|
MelqonHovhannisyan/weather
|
455ce90fd480efb6c05002a53ed478fa4014e84b
|
[
"MIT"
] | null | null | null |
mainapp/views.py
|
MelqonHovhannisyan/weather
|
455ce90fd480efb6c05002a53ed478fa4014e84b
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from rest_framework.viewsets import ViewSet
from rest_framework.response import Response
from .serializers import WeatherSerializer
import requests
import json
import math
import os
import yaml
from rest_framework.decorators import action
from django.conf import settings
def api_docs(request):
"""
Base API Docs endpoint function for the Swagger
"""
file = open(os.path.join(settings.BASE_DIR, 'api.yaml'), encoding='utf8')
spec = yaml.safe_load(file.read())
return render(request, template_name="swagger_base.html", context={'data': json.dumps(spec)})
class WeatherViewSet(ViewSet):
"""
General ViewSet for Weather API
"""
serializer_class = WeatherSerializer
@action(methods=['get'], detail=False, url_path=r'(?P<city>[\w-]+)/', url_name='get_weather')
def get(self, request, *args, **kwargs):
data = {'city': kwargs.get(
'city', None), 'days': request.GET.get('days', 1)}
serializer = WeatherSerializer(data=data)
if serializer.is_valid():
weather = []
data = {}
response = json.loads(requests.get(
f'{settings.BASE_WEATHER_API_URL}forecast.json?key={settings.WEATHER_API_KEY}&q={serializer.data["city"]}&days={serializer.data["days"]}&aqi=no&alerts=no').content)
if "error" in response:
return Response(response['error']['message'],status=400)
data['location'] = response['location']['name']
for d in response['forecast']['forecastday']:
day = {
"date": d["date"],
"maximum": d["day"]["maxtemp_c"],
"minimum": d["day"]["mintemp_c"],
"average": d["day"]["avgtemp_c"]
}
hours = []
for hour in d['hour']:
hours.append(hour['temp_c'])
hours.sort()
if len(hours) % 2 == 0:
middle = int(len(hours)/2)
day['median'] = round(
(hours[middle] + hours[middle+1])/2, 2)
else:
day['median'] = round(hours[math.ceil(len(hours)/2)], 2)
weather.append(day)
data['weather'] = weather
return Response(data)
return Response(serializer.errors,status=400)
| 37.338462
| 180
| 0.562835
| 1,808
| 0.744953
| 0
| 0
| 1,679
| 0.691801
| 0
| 0
| 565
| 0.232798
|
c53f9f1e1c994d952d8c3879b34114ccaf382fd6
| 5,420
|
py
|
Python
|
tests/test_backtrack.py
|
nisaruj/algorithms
|
1e03cd259c2d7ada113eb99843dcada9f20adf54
|
[
"MIT"
] | 6
|
2018-12-12T09:14:05.000Z
|
2019-04-29T22:07:28.000Z
|
tests/test_backtrack.py
|
nisaruj/algorithms
|
1e03cd259c2d7ada113eb99843dcada9f20adf54
|
[
"MIT"
] | null | null | null |
tests/test_backtrack.py
|
nisaruj/algorithms
|
1e03cd259c2d7ada113eb99843dcada9f20adf54
|
[
"MIT"
] | 1
|
2021-07-16T16:49:35.000Z
|
2021-07-16T16:49:35.000Z
|
from algorithms.backtrack import (
add_operators,
permute,
permute_iter,
anagram,
array_sum_combinations,
unique_array_sum_combinations,
combination_sum,
find_words,
pattern_match,
)
import unittest
from algorithms.backtrack.generate_parenthesis import *
class TestAddOperator(unittest.TestCase):
def test_add_operators(self):
# "123", 6 -> ["1+2+3", "1*2*3"]
s = "123"
target = 6
self.assertEqual(add_operators(s, target), ["1+2+3", "1*2*3"])
# "232", 8 -> ["2*3+2", "2+3*2"]
s = "232"
target = 8
self.assertEqual(add_operators(s, target), ["2+3*2", "2*3+2"])
s = "123045"
target = 3
answer = ['1+2+3*0*4*5',
'1+2+3*0*45',
'1+2-3*0*4*5',
'1+2-3*0*45',
'1-2+3+0-4+5',
'1-2+3-0-4+5',
'1*2+3*0-4+5',
'1*2-3*0-4+5',
'1*23+0-4*5',
'1*23-0-4*5',
'12+3*0-4-5',
'12-3*0-4-5']
self.assertEqual(add_operators(s, target), answer)
class TestPermuteAndAnagram(unittest.TestCase):
def test_permute(self):
perms = ['abc', 'bac', 'bca', 'acb', 'cab', 'cba']
self.assertEqual(perms, permute("abc"))
def test_permute_iter(self):
it = permute_iter("abc")
perms = ['abc', 'bac', 'bca', 'acb', 'cab', 'cba']
for i in range(len(perms)):
self.assertEqual(perms[i], next(it))
def test_angram(self):
self.assertTrue(anagram('apple', 'pleap'))
self.assertFalse(anagram("apple", "cherry"))
class TestArrayCombinationSum(unittest.TestCase):
def test_array_sum_combinations(self):
A = [1, 2, 3, 3]
B = [2, 3, 3, 4]
C = [2, 3, 3, 4]
target = 7
answer = [[1, 2, 4], [1, 3, 3], [1, 3, 3], [1, 3, 3],
[1, 3, 3], [1, 4, 2], [2, 2, 3], [2, 2, 3],
[2, 3, 2], [2, 3, 2], [3, 2, 2], [3, 2, 2]]
answer.sort()
self.assertListEqual(sorted(array_sum_combinations(A, B, C, target)), answer)
def test_unique_array_sum_combinations(self):
A = [1, 2, 3, 3]
B = [2, 3, 3, 4]
C = [2, 3, 3, 4]
target = 7
answer = [(2, 3, 2), (3, 2, 2), (1, 2, 4),
(1, 4, 2), (2, 2, 3), (1, 3, 3)]
answer.sort()
self.assertListEqual(sorted(unique_array_sum_combinations(A, B, C, target)), answer)
class TestCombinationSum(unittest.TestCase):
def check_sum(self, nums, target):
if sum(nums) == target:
return (True, nums)
else:
return (False, nums)
def test_combination_sum(self):
candidates1 = [2, 3, 6, 7]
target1 = 7
answer1 = [
[2, 2, 3],
[7]
]
self.assertEqual(combination_sum(candidates1, target1), answer1)
candidates2 = [2, 3, 5]
target2 = 8
answer2 = [
[2, 2, 2, 2],
[2, 3, 3],
[3, 5]
]
self.assertEqual(combination_sum(candidates2, target2), answer2)
class TestFindWords(unittest.TestCase):
def test_normal(self):
board = [
['o', 'a', 'a', 'n'],
['e', 't', 'a', 'e'],
['i', 'h', 'k', 'r'],
['i', 'f', 'l', 'v']
]
words = ["oath", "pea", "eat", "rain"]
self.assertEqual(find_words(board, words).sort(),
['oath', 'eat'].sort())
def test_none(self):
board = [
['o', 'a', 'a', 'n'],
['e', 't', 'a', 'e'],
['i', 'h', 'k', 'r'],
['i', 'f', 'l', 'v']
]
words = ["chicken", "nugget", "hello", "world"]
self.assertEqual(find_words(board, words), [])
def test_empty(self):
board = []
words = []
self.assertEqual(find_words(board, words), [])
def test_uneven(self):
board = [
['o', 'a', 'a', 'n'],
['e', 't', 'a', 'e']
]
words = ["oath", "pea", "eat", "rain"]
self.assertEqual(find_words(board, words), ['eat'])
def test_repeat(self):
board = [
['a', 'a', 'a'],
['a', 'a', 'a'],
['a', 'a', 'a']
]
words = ["a", "aa", "aaa", "aaaa", "aaaaa"]
self.assertTrue(len(find_words(board, words)) == 5)
class TestPatternMatch(unittest.TestCase):
def test_pattern_match(self):
pattern1 = "abab"
string1 = "redblueredblue"
pattern2 = "aaaa"
string2 = "asdasdasdasd"
pattern3 = "aabb"
string3 = "xyzabcxzyabc"
self.assertTrue(pattern_match(pattern1, string1))
self.assertTrue(pattern_match(pattern2, string2))
self.assertFalse(pattern_match(pattern3, string3))
class TestGenerateParenthesis(unittest.TestCase):
def test_generate_parenthesis(self):
self.assertEqual(generate_parenthesis_v1(2), ['()()', '(())'])
self.assertEqual(generate_parenthesis_v1(3), ['()()()', '()(())', '(())()', '(()())', '((()))'])
self.assertEqual(generate_parenthesis_v2(2), ['(())', '()()'])
self.assertEqual(generate_parenthesis_v2(3), ['((()))', '(()())', '(())()', '()(())', '()()()'])
if __name__ == '__main__':
unittest.main()
| 28.983957
| 104
| 0.474908
| 5,058
| 0.93321
| 0
| 0
| 0
| 0
| 0
| 0
| 798
| 0.147232
|
c542862715caa74d2fd3f0e9e9fcab1cbbe24d4a
| 284
|
py
|
Python
|
syncless/wscherry.py
|
irr/python-labs
|
43bb3a528c151653b2be832c7ff13240a10e18a4
|
[
"Apache-2.0"
] | 4
|
2015-11-25T09:06:44.000Z
|
2019-12-11T21:35:21.000Z
|
syncless/wscherry.py
|
irr/python-labs
|
43bb3a528c151653b2be832c7ff13240a10e18a4
|
[
"Apache-2.0"
] | null | null | null |
syncless/wscherry.py
|
irr/python-labs
|
43bb3a528c151653b2be832c7ff13240a10e18a4
|
[
"Apache-2.0"
] | 2
|
2015-11-25T09:19:38.000Z
|
2016-02-26T03:54:06.000Z
|
import sys
sys.path.append("/usr/lib/python2.7/site-packages")
import redis
_r = redis.Redis(host='localhost', port=6379, db=0)
import cherrypy
class Test(object):
def index(self):
_r.incr("/")
return "OK!"
index.exposed = True
cherrypy.quickstart(Test())
| 17.75
| 51
| 0.661972
| 107
| 0.376761
| 0
| 0
| 0
| 0
| 0
| 0
| 53
| 0.18662
|
c544eb603d7c0e4860f104e7e494d3ae3bdfe615
| 538
|
py
|
Python
|
server.py
|
celinekeisja/jobmonitorservice
|
aaf56dd198c1275439a0f5ed27617fb458f715ac
|
[
"MIT"
] | null | null | null |
server.py
|
celinekeisja/jobmonitorservice
|
aaf56dd198c1275439a0f5ed27617fb458f715ac
|
[
"MIT"
] | null | null | null |
server.py
|
celinekeisja/jobmonitorservice
|
aaf56dd198c1275439a0f5ed27617fb458f715ac
|
[
"MIT"
] | 1
|
2019-11-11T10:26:42.000Z
|
2019-11-11T10:26:42.000Z
|
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from config import db
import config
app = config.connex_app
app.add_api('swagger.yml')
@app.route('/')
def home():
return 'homepage here'
@app.route("/job")
@app.route("/job/<string:job_id>")
def job(job_id=""):
return 'result of job_id'
migrate = Migrate(app=app, db=db)
manager = Manager(app=app)
manager.add_command('db', MigrateCommand)
if __name__ == "__main__":
manager.run()
# app.run(host='localhost', port=5000, debug=True)
| 20.692308
| 54
| 0.711896
| 0
| 0
| 0
| 0
| 157
| 0.291822
| 0
| 0
| 143
| 0.265799
|
c54618a73487992c76ea8d3ae910cd85c832a27e
| 4,939
|
py
|
Python
|
website/addons/figshare/views/config.py
|
harrismendell/osf.io
|
e2727b1bb2aaa7de494f941be08cb3e9305ae624
|
[
"Apache-2.0"
] | null | null | null |
website/addons/figshare/views/config.py
|
harrismendell/osf.io
|
e2727b1bb2aaa7de494f941be08cb3e9305ae624
|
[
"Apache-2.0"
] | null | null | null |
website/addons/figshare/views/config.py
|
harrismendell/osf.io
|
e2727b1bb2aaa7de494f941be08cb3e9305ae624
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import httplib as http
from flask import request
from framework.exceptions import HTTPError
from framework.auth.decorators import must_be_logged_in
from website.util import web_url_for
from website.project.decorators import (
must_have_addon, must_be_addon_authorizer,
must_have_permission, must_not_be_registration,
must_be_valid_project
)
from ..api import Figshare
from ..utils import options_to_hgrid
###### AJAX Config
@must_be_logged_in
@must_be_valid_project
@must_have_addon('figshare', 'node')
def figshare_config_get(node_addon, auth, **kwargs):
"""API that returns the serialized node settings."""
return {
'result': serialize_settings(node_addon, auth.user),
}, http.OK
@must_have_permission('write')
@must_not_be_registration
@must_have_addon('figshare', 'node')
@must_be_addon_authorizer('figshare')
def figshare_config_put(node_addon, auth, **kwargs):
"""View for changing a node's linked figshare folder."""
fields = request.json.get('selected', {})
node = node_addon.owner
node_addon.update_fields(fields, node, auth)
return {
'result': {
'linked': {
'title': fields.get('title') or '',
'id': fields.get('id') or None,
'type': fields.get('type') or None
},
'urls': serialize_urls(node_addon)
},
'message': 'Successfully updated settings.',
}, http.OK
@must_have_permission('write')
@must_have_addon('figshare', 'node')
def figshare_import_user_auth(auth, node_addon, **kwargs):
"""Import figshare credentials from the currently logged-in user to a node.
"""
user = auth.user
user_addon = user.get_addon('figshare')
if user_addon is None or node_addon is None:
raise HTTPError(http.BAD_REQUEST)
node_addon.authorize(user_addon, save=True)
return {
'result': serialize_settings(node_addon, user),
'message': 'Successfully imported access token from profile.',
}, http.OK
@must_have_permission('write')
@must_have_addon('figshare', 'node')
@must_not_be_registration
def figshare_deauthorize(auth, node_addon, **kwargs):
node_addon.deauthorize(auth=auth, save=True)
return {}
def serialize_settings(node_settings, current_user, client=None):
"""View helper that returns a dictionary representation of a
FigshareNodeSettings record. Provides the return value for the
figshare config endpoints.
"""
current_user_settings = current_user.get_addon('figshare')
user_settings = node_settings.user_settings
user_has_auth = current_user_settings is not None and current_user_settings.has_auth
user_is_owner = user_settings is not None and (
user_settings.owner._primary_key == current_user._primary_key
)
valid_credentials = True
if user_settings:
client = client or Figshare.from_settings(user_settings)
articles, status = client.articles(node_settings)
if status == 401:
valid_credentials = False
result = {
'nodeHasAuth': node_settings.has_auth,
'userHasAuth': user_has_auth,
'userIsOwner': user_is_owner,
'urls': serialize_urls(node_settings),
'validCredentials': valid_credentials,
}
if node_settings.has_auth:
# Add owner's profile URL
result['urls']['owner'] = web_url_for('profile_view_id',
uid=user_settings.owner._primary_key)
result['ownerName'] = user_settings.owner.fullname
# Show available projects
linked = node_settings.linked_content or {'id': None, 'type': None, 'title': None}
result['linked'] = linked
return result
def serialize_urls(node_settings):
node = node_settings.owner
urls = {
'config': node.api_url_for('figshare_config_put'),
'deauthorize': node.api_url_for('figshare_deauthorize'),
'auth': node.api_url_for('figshare_oauth_start'),
'importAuth': node.api_url_for('figshare_import_user_auth'),
'options': node.api_url_for('figshare_get_options'),
'folders': node.api_url_for('figshare_get_options'),
'files': node.web_url_for('collect_file_trees'),
# Endpoint for fetching only folders (including root)
'contents': node.api_url_for('figshare_hgrid_data_contents'),
'settings': web_url_for('user_addons')
}
return urls
@must_be_valid_project
@must_have_addon('figshare', 'node')
def figshare_get_options(node_addon, **kwargs):
options = Figshare.from_settings(node_addon.user_settings).get_options()
# TODO: Fix error handling
if options == 401 or not isinstance(options, list):
raise HTTPError(http.BAD_REQUEST)
# self.user_settings.remove_auth()
# push_status_message(messages.OAUTH_INVALID)
else:
node = node_addon.owner
return options_to_hgrid(node, options) or []
| 33.371622
| 90
| 0.692853
| 0
| 0
| 0
| 0
| 2,284
| 0.462442
| 0
| 0
| 1,333
| 0.269893
|
c546f1f9e36c1fc60824e9adb3e2de4e63364611
| 2,290
|
py
|
Python
|
orquesta/utils/dictionary.py
|
igcherkaev/orquesta
|
2baa66d33f53cb04b660b3ce284a52d478ecc528
|
[
"Apache-2.0"
] | 85
|
2018-07-26T04:29:49.000Z
|
2022-03-31T10:47:50.000Z
|
orquesta/utils/dictionary.py
|
igcherkaev/orquesta
|
2baa66d33f53cb04b660b3ce284a52d478ecc528
|
[
"Apache-2.0"
] | 149
|
2018-07-27T22:36:45.000Z
|
2022-03-31T10:54:32.000Z
|
orquesta/utils/dictionary.py
|
igcherkaev/orquesta
|
2baa66d33f53cb04b660b3ce284a52d478ecc528
|
[
"Apache-2.0"
] | 24
|
2018-08-07T13:37:41.000Z
|
2021-12-16T18:12:43.000Z
|
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
def merge_dicts(left, right, overwrite=True):
if left is None:
return right
if right is None:
return left
for k, v in six.iteritems(right):
if k not in left:
left[k] = v
else:
left_v = left[k]
if isinstance(left_v, dict) and isinstance(v, dict):
merge_dicts(left_v, v, overwrite=overwrite)
elif overwrite:
left[k] = v
return left
def get_dict_value(obj, path, raise_key_error=False):
item = obj
traversed = ""
for key in path.split("."):
if not isinstance(item, dict) and traversed != path:
raise TypeError("Value of '%s' is not typeof dict." % traversed)
traversed += "." + key if len(traversed) > 0 else key
if key not in item and raise_key_error:
raise KeyError("Key '%s' does not exist." % traversed)
item = item.get(key, None)
if item is None:
break
return item
def set_dict_value(obj, path, value, raise_key_error=False, insert_null=True):
if not insert_null and value is None:
return
item = obj
traversed = ""
for key in path.split("."):
if not isinstance(item, dict) and traversed != path:
raise TypeError("Value of '%s' is not typeof dict." % traversed)
traversed += "." + key if len(traversed) > 0 else key
if key not in item and raise_key_error:
raise KeyError("Key '%s' does not exist." % traversed)
if traversed == path:
item[key] = value
else:
if key not in item:
item[key] = {}
item = item[key]
| 27.590361
| 78
| 0.609607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 711
| 0.31048
|
c5492165678226fd4bb43b85234e7a9ca9ccd6ce
| 4,983
|
py
|
Python
|
leetcode_py.py
|
HuangJingGitHub/PracMakePert_py
|
1e947b0804fdcd50b2a3afc0af9d824cb55978cd
|
[
"Apache-2.0"
] | 2
|
2019-05-28T15:04:20.000Z
|
2019-05-28T15:04:22.000Z
|
leetcode_py.py
|
HuangJingGitHub/PracMakePert_py
|
1e947b0804fdcd50b2a3afc0af9d824cb55978cd
|
[
"Apache-2.0"
] | null | null | null |
leetcode_py.py
|
HuangJingGitHub/PracMakePert_py
|
1e947b0804fdcd50b2a3afc0af9d824cb55978cd
|
[
"Apache-2.0"
] | 1
|
2019-08-30T06:06:33.000Z
|
2019-08-30T06:06:33.000Z
|
class solutionLeetcode_3:
def lengthOfLongestSubstring(self, s: str) -> (int, str):
if not s:
return 0
left = 0
lookup = set()
n = len(s)
max_len = 0
cur_len = 0
for i in range(n):
cur_len += 1
while s[i] in lookup:
lookup.remove(s[left])
left += 1
cur_len -= 1
if cur_len > max_len:
max_len = cur_len
lookup.add(s[i])
longestSubstring = s[left:left+max_len+1]
return max_len, longestSubstring
class solutionLeetcode_4:
def findMedianSortedArrays(self, nums1, nums2):
nums1.extend(nums2)
nums1.sort()
if len(nums1) % 2 == 0:
return sum(nums1[[len(nums1)//2]-1:len(nums1)//2+1])/2 # // is int division.
else:
return nums1[(len(nums1)-1)//2]
class solutionLeetcode_5:
def longestPalindrome(self, s):
str_length = len(s)
max_length = 0
start = 0
for i in range(str_length):
if i - max_length >= 1 and s[i - max_length - 1:i + 2] == s[i - max_length - 1:i+2][::-1]:
start = i - max_length - 1
max_length += 2
continue
if i - max_length >= 0 and s[i - max_length:i + 2] == s[i - max_length:i + 2][::-1]:
start = i - max_length
max_length += 1
return s[start:start + max_length + 1]
class solutionLeetcode_6:
def convert(self, s:str, numRows:int) -> list:
if numRows < 2:
return s
res = ["" for _ in range(numRows)] # "" stands for string
i, flag = 0, -1
for c in s:
res[i] += c
if i == 0 or i == numRows - 1:
flag = -flag
i += flag
return "".join(res)
class solutionLeetcode_7:
def reverse(self, x) -> int:
s = str(x)[::-1].strip('-') # use extended slices to reverse a string.
if int(s) < 2**31:
if x >= 0:
return int(s)
else:
return 0 - int(s)
return 0
class solutionLeetcode_8:
def myAtoi(self, str: str) -> int:
validChar = ['-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
validNumber = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
if len(str) == 0:
return 0
startIndex = -1
endIndex = 0
initialChar = True
lastEntryValid = True
stringChar = ''
for i in range(len(str)):
if str[i] == ' ' and initialChar:
continue
if (str[i] not in validChar) and initialChar:
return 0
if (str[i] in validChar) and initialChar:
initialChar = False
startIndex = i
if i == len(str) - 1:
if str[i] in ['-', '+']:
return 0
else:
return int(str)
if (i < len(str) - 1) and (str[i + 1] not in validNumber):
if str[i] in ['-', '+']:
return 0
else:
return int(str[i])
continue
if (str[i] not in validNumber) and (not initialChar):
endIndex = i
lastEntryValid = False
break
if startIndex == -1:
return 0
if lastEntryValid:
endIndex = len(str)
numberStr = str[startIndex:endIndex]
resultNumber = int(numberStr)
if resultNumber >= 2 ** 31:
return 2 ** 31 - 1
elif resultNumber <= -2 ** 31:
return -2 ** 31
else:
return resultNumber
class solutionLeetcode_10:
def isMatch(self, text: str, pattern: str) -> bool:
if not pattern:
return not text
firstMatch = bool(text) and pattern[0] in {text[0], '.'}
if len(pattern) >= 2 and pattern[1] == '*':
return self.isMatch(text, pattern[2:]) or (firstMatch and self.isMatch(text[1:], pattern))
else:
return firstMatch and self.isMatch(text[1:], pattern[1:])
class solutionLeetcode_11:
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
maxArea = 0
left = 0
right = len(height) - 1
while left < right:
area = (right - left) * min(height[left], height[right])
if maxArea < area:
maxArea = area
if height[left] < height[right]:
left += 1
else:
right -=1
return maxArea
| 31.14375
| 102
| 0.439896
| 4,960
| 0.995384
| 0
| 0
| 0
| 0
| 0
| 0
| 248
| 0.049769
|
c549524dfb308c9a530339a9a6c6add82b8d8653
| 9,114
|
py
|
Python
|
examples/twisted/websocket/auth_persona/server.py
|
rapyuta-robotics/autobahn-python
|
c08e9e352d526a7fd0885bb94706366a432ada1a
|
[
"MIT"
] | 1,670
|
2015-10-12T15:46:22.000Z
|
2022-03-30T22:12:53.000Z
|
examples/twisted/websocket/auth_persona/server.py
|
rapyuta-robotics/autobahn-python
|
c08e9e352d526a7fd0885bb94706366a432ada1a
|
[
"MIT"
] | 852
|
2015-10-16T22:11:03.000Z
|
2022-03-27T07:57:01.000Z
|
examples/twisted/websocket/auth_persona/server.py
|
rapyuta-robotics/autobahn-python
|
c08e9e352d526a7fd0885bb94706366a432ada1a
|
[
"MIT"
] | 790
|
2015-10-15T08:46:12.000Z
|
2022-03-30T12:22:13.000Z
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import sys
import json
import urllib
import Cookie
from twisted.internet import reactor
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
import autobahn
from autobahn.util import newid, utcnow
from autobahn.websocket import http
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.twisted.resource import WebSocketResource
class PersonaServerProtocol(WebSocketServerProtocol):
"""
WebSocket server protocol that tracks WebSocket connections using HTTP cookies,
and authenticates WebSocket connections using Mozilla Persona.
"""
def onConnect(self, request):
# This is called during the initial WebSocket opening handshake.
protocol, headers = None, {}
# our cookie tracking ID
self._cbtid = None
# see if there already is a cookie set ..
if 'cookie' in request.headers:
try:
cookie = Cookie.SimpleCookie()
cookie.load(str(request.headers['cookie']))
except Cookie.CookieError:
pass
else:
if 'cbtid' in cookie:
cbtid = cookie['cbtid'].value
if cbtid in self.factory._cookies:
self._cbtid = cbtid
log.msg("Cookie already set: %s" % self._cbtid)
# if no cookie is set, create a new one ..
if self._cbtid is None:
self._cbtid = newid()
maxAge = 86400
cbtData = {'created': utcnow(),
'authenticated': None,
'maxAge': maxAge,
'connections': set()}
self.factory._cookies[self._cbtid] = cbtData
# do NOT add the "secure" cookie attribute! "secure" refers to the
# scheme of the Web page that triggered the WS, not WS itself!!
##
headers['Set-Cookie'] = 'cbtid=%s;max-age=%d' % (self._cbtid, maxAge)
log.msg("Setting new cookie: %s" % self._cbtid)
# add this WebSocket connection to the set of connections
# associated with the same cookie
self.factory._cookies[self._cbtid]['connections'].add(self)
# accept the WebSocket connection, speaking subprotocol `protocol`
# and setting HTTP headers `headers`
return (protocol, headers)
def onOpen(self):
# This is called when initial WebSocket opening handshake has
# been completed.
# see if we are authenticated ..
authenticated = self.factory._cookies[self._cbtid]['authenticated']
if not authenticated:
# .. if not, send authentication request
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATION_REQUIRED'}))
else:
# .. if yes, send info on authenticated user
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATED', 'email': authenticated}))
def onClose(self, wasClean, code, reason):
# This is called when WebSocket connection is gone
# remove this connection from list of connections associated with
# same cookie
self.factory._cookies[self._cbtid]['connections'].remove(self)
# if list gets empty, possibly do something ..
if not self.factory._cookies[self._cbtid]['connections']:
log.msg("All connections for {} gone".format(self._cbtid))
def onMessage(self, payload, isBinary):
# This is called when we receive a WebSocket message
if not isBinary:
msg = json.loads(payload)
if msg['cmd'] == 'AUTHENTICATE':
# The client did it's Mozilla Persona authentication thing
# and now wants to verify the authentication and login.
assertion = msg.get('assertion')
audience = msg.get('audience')
# To verify the authentication, we need to send a HTTP/POST
# to Mozilla Persona. When successful, Persona will send us
# back something like:
# {
# "audience": "http://192.168.1.130:8080/",
# "expires": 1393681951257,
# "issuer": "gmail.login.persona.org",
# "email": "tobias.oberstein@gmail.com",
# "status": "okay"
# }
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
body = urllib.urlencode({'audience': audience, 'assertion': assertion})
from twisted.web.client import getPage
d = getPage(url="https://verifier.login.persona.org/verify",
method='POST',
postdata=body,
headers=headers)
log.msg("Authentication request sent.")
def done(res):
res = json.loads(res)
if res['status'] == 'okay':
# Mozilla Persona successfully authenticated the user
# remember the user's email address. this marks the cookie as
# authenticated
self.factory._cookies[self._cbtid]['authenticated'] = res['email']
# inform _all_ WebSocket connections of the successful auth.
msg = json.dumps({'cmd': 'AUTHENTICATED', 'email': res['email']})
for proto in self.factory._cookies[self._cbtid]['connections']:
proto.sendMessage(msg)
log.msg("Authenticated user {}".format(res['email']))
else:
log.msg("Authentication failed: {}".format(res.get('reason')))
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATION_FAILED', 'reason': res.get('reason')}))
self.sendClose()
def error(err):
log.msg("Authentication request failed: {}".format(err.value))
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATION_FAILED', 'reason': str(err.value)}))
self.sendClose()
d.addCallbacks(done, error)
elif msg['cmd'] == 'LOGOUT':
# user wants to logout ..
if self.factory._cookies[self._cbtid]['authenticated']:
self.factory._cookies[self._cbtid]['authenticated'] = False
# inform _all_ WebSocket connections of the logout
msg = json.dumps({'cmd': 'LOGGED_OUT'})
for proto in self.factory._cookies[self._cbtid]['connections']:
proto.sendMessage(msg)
else:
log.msg("unknown command {}".format(msg))
class PersonaServerFactory(WebSocketServerFactory):
"""
WebSocket server factory with cookie/sessions map.
"""
protocol = PersonaServerProtocol
def __init__(self, url):
WebSocketServerFactory.__init__(self, url)
# map of cookies
self._cookies = {}
if __name__ == '__main__':
log.startLogging(sys.stdout)
print("Running Autobahn|Python {}".format(autobahn.version))
# our WebSocket server factory
factory = PersonaServerFactory("ws://127.0.0.1:8080")
# we serve static files under "/" ..
root = File(".")
# .. and our WebSocket server under "/ws" (note that Twisted uses
# bytes for URIs)
resource = WebSocketResource(factory)
root.putChild(b"ws", resource)
# run both under one Twisted Web Site
site = Site(root)
site.log = lambda _: None # disable any logging
reactor.listenTCP(8080, site)
reactor.run()
| 36.456
| 115
| 0.585473
| 6,750
| 0.740619
| 0
| 0
| 0
| 0
| 0
| 0
| 4,244
| 0.465657
|
c54a392610a02b36eccf6f7a462a2e02a2aa190a
| 1,681
|
py
|
Python
|
src/ggrc_risks/models/risk.py
|
Killswitchz/ggrc-core
|
2460df94daf66727af248ad821462692917c97a9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc_risks/models/risk.py
|
Killswitchz/ggrc-core
|
2460df94daf66727af248ad821462692917c97a9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc_risks/models/risk.py
|
Killswitchz/ggrc-core
|
2460df94daf66727af248ad821462692917c97a9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from sqlalchemy.ext.declarative import declared_attr
from ggrc import db
from ggrc.access_control.roleable import Roleable
from ggrc.fulltext.mixin import Indexed
from ggrc.models.associationproxy import association_proxy
from ggrc.models import mixins
from ggrc.models.deferred import deferred
from ggrc.models.object_document import PublicDocumentable
from ggrc.models.object_person import Personable
from ggrc.models import reflection
from ggrc.models.relationship import Relatable
from ggrc.models.track_object_state import HasObjectState
class Risk(Roleable, HasObjectState, mixins.CustomAttributable, Relatable,
Personable, PublicDocumentable,
mixins.LastDeprecatedTimeboxed, mixins.BusinessObject,
Indexed, db.Model):
__tablename__ = 'risks'
# Overriding mixin to make mandatory
@declared_attr
def description(cls): # pylint: disable=no-self-argument
return deferred(db.Column(db.Text, nullable=False), cls.__name__)
risk_objects = db.relationship(
'RiskObject', backref='risk', cascade='all, delete-orphan')
objects = association_proxy('risk_objects', 'object', 'RiskObject')
_api_attrs = reflection.ApiAttributes(
'risk_objects',
reflection.Attribute('objects', create=False, update=False),
)
_aliases = {
"document_url": None,
"document_evidence": None,
"status": {
"display_name": "State",
"mandatory": False,
"description": "Options are: \n {}".format('\n'.join(
mixins.BusinessObject.VALID_STATES))
}
}
| 33.62
| 78
| 0.732302
| 1,021
| 0.607377
| 0
| 0
| 144
| 0.085663
| 0
| 0
| 392
| 0.233195
|
c54a493288773ae7775619c9f9c08446cac8b3d2
| 1,191
|
py
|
Python
|
booknlp/common/calc_coref_metrics.py
|
ishine/booknlp
|
2b42ccd40dc2c62097308398d4e08f91ecab4177
|
[
"MIT"
] | 539
|
2021-11-22T16:29:40.000Z
|
2022-03-30T17:50:58.000Z
|
booknlp/common/calc_coref_metrics.py
|
gxxu-ml/booknlp
|
2b42ccd40dc2c62097308398d4e08f91ecab4177
|
[
"MIT"
] | 6
|
2021-12-12T18:21:49.000Z
|
2022-03-30T20:51:40.000Z
|
booknlp/common/calc_coref_metrics.py
|
gxxu-ml/booknlp
|
2b42ccd40dc2c62097308398d4e08f91ecab4177
|
[
"MIT"
] | 44
|
2021-11-22T07:22:50.000Z
|
2022-03-25T20:02:26.000Z
|
import subprocess, re, sys
def get_coref_score(metric, path_to_scorer, gold=None, preds=None):
output=subprocess.check_output(["perl", path_to_scorer, metric, preds, gold]).decode("utf-8")
output=output.split("\n")[-3]
matcher=re.search("Coreference: Recall: \(.*?\) (.*?)% Precision: \(.*?\) (.*?)% F1: (.*?)%", output)
if matcher is not None:
recall=float(matcher.group(1))
precision=float(matcher.group(2))
f1=float(matcher.group(3))
return recall, precision, f1
def get_conll(path_to_scorer, gold=None, preds=None):
bcub_r, bcub_p, bcub_f=get_coref_score("bcub", path_to_scorer, gold, preds)
muc_r, muc_p, muc_f=get_coref_score("muc", path_to_scorer, gold, preds)
ceaf_r, ceaf_p, ceaf_f=get_coref_score("ceafe", path_to_scorer, gold, preds)
print("bcub:\t%.1f" % bcub_f)
print("muc:\t%.1f" % muc_f)
print("ceaf:\t%.1f" % ceaf_f)
avg=(bcub_f + muc_f + ceaf_f)/3.
print("Average F1: %.1f" % (avg))
# Generate Latex table
# print("%.1f&%.1f&%.1f&%.1f" % (bcub_f, muc_f, ceaf_f, avg))
return bcub_f, avg
if __name__ == "__main__":
goldFile=sys.argv[1]
predFile=sys.argv[2]
scorer=sys.argv[3]
bcub_f, avg=get_conll(scorer, gold=goldFile, preds=predFile)
| 31.342105
| 102
| 0.686818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 258
| 0.216625
|
c54b7ed70bd070a66a466b8ee7706f4673635759
| 16,878
|
py
|
Python
|
apps/core/test.py
|
zjjott/html
|
68429832d8b022602915a267a62051f4869f430f
|
[
"MIT"
] | null | null | null |
apps/core/test.py
|
zjjott/html
|
68429832d8b022602915a267a62051f4869f430f
|
[
"MIT"
] | null | null | null |
apps/core/test.py
|
zjjott/html
|
68429832d8b022602915a267a62051f4869f430f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from __future__ import unicode_literals
from tornado.testing import AsyncTestCase
from apps.core.models import (ModelBase,
_get_master_engine,
_get_slave_engine)
from tornado.options import options
from apps.core.urlutils import urlpattens
from apps.auth.views import LoginHandler
from apps.views import IndexHandler
from apps.core.datastruct import QueryDict, lru_cache
from simplejson import loads
from tornado.testing import AsyncHTTPTestCase, gen_test
from apps.core.httpclient import (RESTfulAsyncClient, SessionClient)
from apps.core.crypto import get_random_string
from tornado.web import URLSpec
import re
from tornado.web import Application
from apps.core.cache.base import CacheBase, cache as cache_proxy
from tornado.gen import sleep
from mock import patch
from apps.core.timezone import now
from concurrent.futures import ThreadPoolExecutor
import thread
# 这样不会清掉数据库哈
options.testing = True # 这个太关键了所以不用mock,下边的base_url改掉了
class EngineTest(AsyncTestCase):
"""测试是否已经是测试用的sql连接了"""
contexts = None
def setUp(self):
if self.contexts is None:
self.contexts = []
o = patch.object(options.mockable(), 'sql_connection',
b"sqlite:///")
self.contexts.append(o)
for context in self.contexts:
context.__enter__()
super(EngineTest, self).setUp()
engine = _get_master_engine()
self.assertEqual(str(engine.url), options.test_db)
self.assertEqual(engine.driver, "pysqlite")
engine = _get_slave_engine()
self.assertEqual(str(engine.url), options.test_db)
self.assertEqual(engine.driver, "pysqlite")
def tearDown(self):
engine = _get_master_engine()
self.assertEqual(str(engine.url), options.test_db)
self.assertEqual(engine.driver, "pysqlite")
engine = _get_slave_engine()
self.assertEqual(str(engine.url), options.test_db)
self.assertEqual(engine.driver, "pysqlite")
for context in self.contexts:
context.__exit__()
super(EngineTest, self).tearDown()
def test_engine(self):
engine = _get_master_engine()
self.assertEqual(str(engine.url), options.test_db)
self.assertEqual(engine.driver, "pysqlite")
engine = _get_slave_engine()
self.assertEqual(str(engine.url), options.test_db)
self.assertEqual(engine.driver, "pysqlite")
class BaseTestCase(EngineTest):
contexts = None
@staticmethod
def _parse_cookie(cookie_line):
return cookie_line.split(";")[0]
def reverse_url(self, url_name, *args):
return self.get_url(self._app.reverse_url(url_name, *args))
def setUp(self):
if self.contexts is None:
self.contexts = []
o = patch.object(options.mockable(), 'base_url',
b"/")
self.contexts.append(o)
super(BaseTestCase, self).setUp()
engine = _get_master_engine()
self.assertEqual(engine.driver, "pysqlite")
ModelBase.metadata.create_all(engine)
def tearDown(self):
# 用sqlite 内存数据库不需要删除,主要是为了本地文件而搞的
engine = _get_master_engine()
self.assertEqual(str(engine.url), options.test_db)
self.assertEqual(engine.driver, "pysqlite")
engine = _get_slave_engine()
self.assertEqual(str(engine.url), options.test_db)
self.assertEqual(engine.driver, "pysqlite")
ModelBase.metadata.drop_all(engine)
super(BaseTestCase, self).tearDown()
class UrlTestCase(BaseTestCase, AsyncHTTPTestCase):
def get_app(self):
url = urlpattens('test',
[
("/login/", LoginHandler, None, "login"),
("/callback", LoginHandler, None, "callback"),
]
)
return Application(url)
def test_reverse(self):
self.assertEqual(self._app.reverse_url("test:login"), "/test/login/")
def test_urlpatten_with_prefex(self):
url = urlpattens('user',
[
("/login/", LoginHandler),
("/callback", LoginHandler),
]
)
root_url = [(r"/", IndexHandler)]
new_urls = root_url + url
self.assertEqual(new_urls[2].regex,
re.compile(r"/user/callback$"))
def test_urlpatten_without_prefex(self):
url = urlpattens('',
[
("/login/", LoginHandler),
("/callback", LoginHandler),
]
)
root_url = [(r"/", IndexHandler)]
new_urls = root_url + url
self.assertEqual(new_urls[1].regex,
URLSpec(r"/login/", LoginHandler).regex)
def test_urlpatten_radd(self):
url = urlpattens('',
[
("/login/", LoginHandler),
("/callback", LoginHandler),
]
)
root_url = [(r"/", IndexHandler)]
new_urls = url + root_url # 换顺序
self.assertEqual(new_urls[0].regex,
URLSpec(r"/login/", LoginHandler).regex)
class DataStructTestCase(EngineTest):
def test_urlencode_safe(self):
q = QueryDict({})
q['next'] = '/a&b/'
self.assertEqual(q.urlencode(), 'next=%2Fa%26b%2F')
self.assertEqual(q.urlencode("/"), 'next=/a%26b/')
def test_urlencode_unicode(self):
q = QueryDict({})
q['next'] = '啊'
self.assertEqual(q.urlencode(), 'next=%E5%95%8A')
def test_urlencode_list(self):
q = QueryDict({})
q['next'] = ['1', "2"]
self.assertEqual(q.urlencode(), 'next=1&next=2')
def test_lru(self):
store = dict(zip("abcd", range(4)))
@lru_cache(2)
def somefunc(arg):
return store[arg]
self.assertEqual(somefunc("a"), 0)
self.assertEqual(somefunc("b"), 1)
cache_info = somefunc.cache_info()
self.assertEqual(cache_info.misses, 2)
self.assertEqual(cache_info.hits, 0)
self.assertEqual(somefunc("a"), 0)
self.assertEqual(somefunc("b"), 1)
cache_info = somefunc.cache_info()
self.assertEqual(cache_info.misses, 2)
self.assertEqual(cache_info.hits, 2)
somefunc.cache_clear()
self.assertEqual(somefunc("a"), 0)
self.assertEqual(somefunc("b"), 1)
cache_info = somefunc.cache_info()
self.assertEqual(cache_info.misses, 2)
self.assertEqual(cache_info.hits, 0)
self.assertEqual(somefunc("c"), 2)
self.assertEqual(somefunc("d"), 3)
cache_info = somefunc.cache_info()
self.assertEqual(cache_info.misses, 4)
self.assertEqual(cache_info.hits, 0)
def test_lru_nosize(self):
store = dict(zip("abcd", range(4)))
@lru_cache(None)
def somefunc(arg):
return store[arg]
self.assertEqual(somefunc("a"), 0)
self.assertEqual(somefunc("b"), 1)
cache_info = somefunc.cache_info()
self.assertEqual(cache_info.misses, 2)
self.assertEqual(cache_info.hits, 0)
class ClientTestCase(BaseTestCase, AsyncHTTPTestCase):
def get_app(self):
from main import make_app
return make_app()
def test_get(self):
client = RESTfulAsyncClient()
url = self.get_url("/api/")
client.get(url,
{"a": "b", "c": 1},
callback=self.stop)
response = self.wait()
response = loads(response.body)["data"]
self.assertItemsEqual(response['query'], {
"a": "b",
"c": "1"
})
def test_post(self):
client = RESTfulAsyncClient()
url = self.get_url("/api/")
response = client.post(url,
{"a": "b", "c": ["1", 3, 4]},
callback=self.stop)
# self.assertEqual()
response = self.wait()
response = loads(response.body)["data"]
self.assertItemsEqual(response['form'], {
"a": "b",
"c": ["1", 3, 4]
})
def test_put(self):
client = RESTfulAsyncClient()
url = self.get_url("/api/")
response = client.put(url,
{"a": "b", "c": ["1", 3, 4]},
callback=self.stop)
response = self.wait()
response = loads(response.body)["data"]
self.assertItemsEqual(response['form'], {
"a": "b",
"c": ["1", 3, 4]
})
def test_delete(self):
client = RESTfulAsyncClient()
url = self.get_url("/api/")
response = client.delete(url,
callback=self.stop)
response = self.wait()
self.assertEqual(response.code, 200)
class TestCrypto(EngineTest):
def test_random(self):
self.assertEqual(len(get_random_string(12)), 12)
self.assertEqual(len(get_random_string(20)), 20)
self.assertNotEqual(get_random_string(12), get_random_string(12))
class TestTimeUtils(EngineTest):
def test_now(self):
dt = now()
self.assertIsNotNone(dt.tzinfo)
class TestSessionClient(AsyncHTTPTestCase, BaseTestCase):
def get_app(self):
from main import make_app
return make_app()
def get_http_client(self):
return SessionClient(io_loop=self.io_loop)
def test_single_instance(self):
new_client = SessionClient(io_loop=self.io_loop)
self.assertEqual(id(new_client), id(self.http_client))
self.assertEqual(id(new_client.cookiejar),
id(self.http_client.cookiejar))
def test_session(self):
url = self.get_url("/api/")
self.http_client.get(url, callback=self.stop)
response = self.wait()
# 第一次请求有Set-Cookie头
self.assertEquals(response.code, 200)
self.assertIn("Set-Cookie", response.headers)
url = self.get_url("/api/")
self.http_client.get(url, callback=self.stop)
response = self.wait()
# 第二次响应头就没有Set-Cookie了
self.assertNotIn("Set-Cookie", response.headers)
self.assertIn("cookie", response.request.headers)
# 外部请求影响速度,不测了
# self.http_client.get("http://httpbin.org/get",
# callback=self.stop)
# response = self.wait()
# self.assertNotIn("cookie", response.request.headers)
class MemoryCacheTestCase(EngineTest):
contexts = None
def setUp(self):
if self.contexts is None:
self.contexts = []
o = patch.object(options.mockable(), 'cache_engine',
"apps.core.cache.memory.MemoryCache")
self.contexts.append(o)
super(MemoryCacheTestCase, self).setUp()
@gen_test
def test_get(self):
CacheBase.configure(
"apps.core.cache.memory.MemoryCache", io_loop=self.io_loop)
cache = CacheBase(self.io_loop)
value = yield cache.get("key_not_exist")
self.assertEqual(value, None)
@gen_test
def test_set(self):
CacheBase.configure(
"apps.core.cache.memory.MemoryCache", io_loop=self.io_loop)
cache = CacheBase(self.io_loop)
yield cache.set("somekey", 1)
value = yield cache.get("somekey")
self.assertEqual(value, 1)
@gen_test
def test_size_set(self):
CacheBase.configure(
"apps.core.cache.memory.MemoryCache", io_loop=self.io_loop,
defaults={"max_size": 2})
cache = CacheBase()
yield cache.set("somekey", 1)
yield cache.set("somekey2", 2)
yield cache.set("somekey3", 3)
value = yield cache.get("somekey")
self.assertEqual(value, None)
@gen_test
def test_size_lru(self):
CacheBase.configure(
"apps.core.cache.memory.MemoryCache", io_loop=self.io_loop,
defaults={"max_size": 2})
cache = CacheBase()
yield cache.set("somekey", 1)
yield cache.set("somekey2", 2)
# yield cache.set("somekey3", 3)
value = yield cache.get("somekey")
self.assertEqual(value, 1)
yield cache.set("somekey3", 3) # somekey2被挤出
value = yield cache.get("somekey")
self.assertEqual(value, 1)
value = yield cache.get("somekey2")
self.assertEqual(value, None)
@gen_test
def test_timeout(self):
CacheBase.configure(
"apps.core.cache.memory.MemoryCache", io_loop=self.io_loop,
defaults={"max_size": 2})
cache = CacheBase()
yield cache.set("somekey", 1, 1)
yield cache.set("somekey2", 2, 2)
yield sleep(2)
self.assertNotIn("somekey", cache._cache)
self.assertNotIn("somekey", cache)
@gen_test
def test_proxy(self):
o = patch.object(options.mockable(),
'cache_options',
{"max_size": 2})
o.__enter__()
self.contexts.append(o)
o = patch.object(options.mockable(),
'cache_engine',
"apps.core.cache.memory.MemoryCache")
o.__enter__()
self.contexts.append(o)
yield cache_proxy.set("somekey", 1, 1)
yield cache_proxy.set("somekey2", 2, 2)
yield sleep(2)
self.assertNotIn("somekey", cache_proxy._cache)
self.assertNotIn("somekey", cache_proxy)
class A(object):
def __init__(self, arg):
self.arg = arg
class RedisCacheTest(BaseTestCase):
# teardown怎么清掉呢。。。。
@gen_test
def test_get(self):
CacheBase.configure("apps.core.cache.redis.RedisCache",
defaults=options.cache_options)
cache = CacheBase(self.io_loop)
value = yield cache.get("key_not_exist")
self.assertEqual(value, None)
@gen_test
def test_set(self):
CacheBase.configure("apps.core.cache.redis.RedisCache",
defaults=options.cache_options)
cache = CacheBase(self.io_loop)
yield cache.set("testkey", "value")
value = yield cache.get("testkey",)
self.assertEqual(value, "value")
yield cache.delete("testkey")
value = yield cache.get("testkey",)
self.assertEqual(value, None)
@gen_test
def test_set_object(self):
CacheBase.configure("apps.core.cache.redis.RedisCache",
defaults=options.cache_options)
cache = CacheBase(self.io_loop)
obj = A(123123)
yield cache.set("testkey", obj)
value = yield cache.get("testkey",)
self.assertEqual(isinstance(value, A), True)
self.assertEqual(value.arg, 123123)
yield cache.delete("testkey")
value = yield cache.get("testkey",)
self.assertEqual(value, None)
@gen_test
def test_set_dict(self):
CacheBase.configure("apps.core.cache.redis.RedisCache",
defaults=options.cache_options)
cache = CacheBase(self.io_loop)
obj = {"asd": 123, "zxc": "qwe"}
yield cache.set("testkey", obj)
value = yield cache.get("testkey",)
self.assertEqual(isinstance(value, dict), True)
self.assertItemsEqual(value, {"asd": 123, "zxc": "qwe"})
yield cache.delete("testkey")
value = yield cache.get("testkey",)
self.assertEqual(value, None)
@gen_test
def test_bin(self):
CacheBase.configure("apps.core.cache.redis.RedisCache",
defaults=options.cache_options)
cache = CacheBase(self.io_loop)
obj = {"asd": 123, "zxc": u"啊"}
yield cache.set("testkey", obj)
value = yield cache.get("testkey",)
self.assertItemsEqual(value, {"asd": 123, "zxc": u"啊"})
self.assertTrue(isinstance(value["zxc"], unicode))
obj = {"asd": 123, "zxc": b"\x00\x01\x02"}
yield cache.set("testkey2", obj)
value = yield cache.get("testkey2",)
self.assertTrue(isinstance(value["zxc"], bytes))
self.assertEquals(value["zxc"], b"\x00\x01\x02")
class ExecutorTestCase(EngineTest):
def user_pow(self, *args):
self.assertNotEqual(thread.get_ident(), self.father_id)
return pow(*args)
def test_thread_db(self):
self.father_id = thread.get_ident()
with ThreadPoolExecutor(max_workers=4) as exectors:
future = exectors.map(self.user_pow, range(5), range(5))
self.assertItemsEqual(list(future), [1, 1, 4, 27, 256])
| 33.159136
| 77
| 0.583185
| 15,999
| 0.935614
| 5,004
| 0.292632
| 5,391
| 0.315263
| 0
| 0
| 2,289
| 0.13386
|
c54bfc8137e477b8a93b0291e14e014c3954ee65
| 622
|
py
|
Python
|
docker/aws/update_event_mapping.py
|
uk-gov-mirror/nationalarchives.tdr-jenkins
|
1bcbee009d4384a777247039d44b2790eba34caa
|
[
"MIT"
] | null | null | null |
docker/aws/update_event_mapping.py
|
uk-gov-mirror/nationalarchives.tdr-jenkins
|
1bcbee009d4384a777247039d44b2790eba34caa
|
[
"MIT"
] | 34
|
2020-02-03T14:20:42.000Z
|
2022-01-26T09:22:09.000Z
|
docker/aws/update_event_mapping.py
|
uk-gov-mirror/nationalarchives.tdr-jenkins
|
1bcbee009d4384a777247039d44b2790eba34caa
|
[
"MIT"
] | 1
|
2021-04-11T07:11:53.000Z
|
2021-04-11T07:11:53.000Z
|
import sys
from sessions import get_session
account_number = sys.argv[1]
stage = sys.argv[2]
function_name = sys.argv[3]
version = sys.argv[4]
function_arn = f'arn:aws:lambda:eu-west-2:{account_number}:function:{function_name}'
boto_session = get_session(account_number, "TDRJenkinsLambdaRole" + stage.capitalize())
client = boto_session.client("lambda")
event_mappings = client.list_event_source_mappings()['EventSourceMappings']
uuid = list(filter(lambda x: x['FunctionArn'].startswith(function_arn), event_mappings))[0]['UUID']
client.update_event_source_mapping(UUID=uuid, FunctionName=function_arn + ":" + version)
| 41.466667
| 99
| 0.786174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 142
| 0.228296
|
c54c0437171dca7cbeb276eabca7979dd5dce208
| 2,202
|
py
|
Python
|
src/python/compressao_huffman.py
|
willisnou/Algoritmos-e-Estruturas-de-Dados
|
b70a2f692ccae948576177560e3628b9dece5aee
|
[
"MIT"
] | 653
|
2015-06-07T14:45:40.000Z
|
2022-03-25T17:31:58.000Z
|
src/python/compressao_huffman.py
|
willisnou/Algoritmos-e-Estruturas-de-Dados
|
b70a2f692ccae948576177560e3628b9dece5aee
|
[
"MIT"
] | 64
|
2017-10-29T10:53:37.000Z
|
2022-03-14T23:49:18.000Z
|
src/python/compressao_huffman.py
|
willisnou/Algoritmos-e-Estruturas-de-Dados
|
b70a2f692ccae948576177560e3628b9dece5aee
|
[
"MIT"
] | 224
|
2015-06-07T14:46:00.000Z
|
2022-03-25T17:36:46.000Z
|
# Árvore Huffman
class node:
def __init__(self, freq, symbol, left=None, right=None):
# Frequência do Símbolo
self.freq = freq
# Símbolo (caracter)
self.symbol = symbol
# nó à esquerda do nó atual
self.left = left
# nó à direita do nó atual
self.right = right
# direção da árvore (0/1)
self.huff = ''
# Função utilitária para imprimir
# códigos huffman para todos os símbolos
# na nova árvore huffman que sera criada
def printNodes(node, val=''):
# código huffman para o nó atual
newVal = val + str(node.huff)
# se o nó não pertence á ponta da
# árvore então caminha dentro do mesmo
# até a ponta
if(node.left):
printNodes(node.left, newVal)
if(node.right):
printNodes(node.right, newVal)
# Se o nó estiver na ponta da árore
# então exibe o código huffman
if(not node.left and not node.right):
print(f"{node.symbol} -> {newVal}")
# caracteres para à árvore huffman
chars = ['a', 'b', 'c', 'd', 'e', 'f']
# frequência dos caracteres
freq = [5, 9, 12, 13, 16, 45]
# lista contendo os nós não utilizados
nodes = []
if __name__ == '__main__':
# convertendo caracteres e frequência em
# nós da árvore huffman
for x in range(len(chars)):
nodes.append(node(freq[x], chars[x]))
while len(nodes) > 1:
# Ordena todos os nós de forma ascendente
# baseado em sua frequência
nodes = sorted(nodes, key=lambda x: x.freq)
# Seleciona os dois nós menores
left = nodes[0]
right = nodes[1]
# Atribui um valor direcional à estes nós
# (direita ou esquerda)
left.huff = 0
right.huff = 1
# Combina os 2 nós menores para um novo nó pai
# para eles.
newNode = node(
left.freq +
right.freq,
left.symbol +
right.symbol,
left,
right)
# remove os 2 nós e adiciona o nó pai
# como um novo só sobre os outros
nodes.remove(left)
nodes.remove(right)
nodes.append(newNode)
# Árvore Huffman pronta!
printNodes(nodes[0])
| 24.741573
| 60
| 0.584469
| 383
| 0.170071
| 0
| 0
| 0
| 0
| 0
| 0
| 1,018
| 0.452043
|
c54db6fb5167c6cfc8f323c48a3a8c66fab835af
| 8,927
|
py
|
Python
|
optiga.py
|
boraozgen/personalize-optiga-trust
|
2a158d9fb6cba2bfabce8f5eecb38bc7b81f5bc8
|
[
"MIT"
] | 6
|
2019-09-27T13:16:29.000Z
|
2021-04-19T22:00:49.000Z
|
optiga.py
|
boraozgen/personalize-optiga-trust
|
2a158d9fb6cba2bfabce8f5eecb38bc7b81f5bc8
|
[
"MIT"
] | 2
|
2020-07-10T12:40:59.000Z
|
2020-08-13T09:26:15.000Z
|
optiga.py
|
boraozgen/personalize-optiga-trust
|
2a158d9fb6cba2bfabce8f5eecb38bc7b81f5bc8
|
[
"MIT"
] | 7
|
2019-08-23T09:20:52.000Z
|
2021-06-14T15:01:14.000Z
|
import argparse
import json
import base64
import hashlib
import sys
import binascii
from optigatrust.util.types import *
from optigatrust.pk import *
from optigatrust.x509 import *
private_key_slot_map = {
'second': KeyId.ECC_KEY_E0F1,
'0xE0E1': KeyId.ECC_KEY_E0F1,
'0xE0F1': KeyId.ECC_KEY_E0F1,
'third': KeyId.ECC_KEY_E0F2,
'0xE0E2': KeyId.ECC_KEY_E0F2,
'0xE0F2': KeyId.ECC_KEY_E0F2,
'fourth': KeyId.ECC_KEY_E0F3,
'0xE0E3': KeyId.ECC_KEY_E0F3,
'0xE0F3': KeyId.ECC_KEY_E0F3,
'five': KeyId.RSA_KEY_E0FC,
'0xE0FC': KeyId.RSA_KEY_E0FC,
'six': KeyId.RSA_KEY_E0FD,
'0xE0FD': KeyId.RSA_KEY_E0FD
}
certificate_slot_map = {
'second': ObjectId.USER_CERT_1,
'0xE0E1': ObjectId.USER_CERT_1,
'0xE0F1': ObjectId.USER_CERT_1,
'third': ObjectId.USER_CERT_2,
'0xE0E2': ObjectId.USER_CERT_2,
'0xE0F2': ObjectId.USER_CERT_2,
'fourth': ObjectId.USER_CERT_3,
'0xE0E3': ObjectId.USER_CERT_3,
'0xE0F3': ObjectId.USER_CERT_3,
'0xE0E8': ObjectId.TRUST_ANCHOR_1,
'0xE0EF': ObjectId.TRUST_ANCHOR_2
}
object_slot_map = {
'0xf1d0': ObjectId.DATA_TYPE1_0,
'0xf1d1': ObjectId.DATA_TYPE1_1,
'0xf1d2': ObjectId.DATA_TYPE1_2,
'0xf1d3': ObjectId.DATA_TYPE1_3,
'0xf1d4': ObjectId.DATA_TYPE1_4,
'0xf1d5': ObjectId.DATA_TYPE1_5,
'0xf1d6': ObjectId.DATA_TYPE1_6,
'0xf1d7': ObjectId.DATA_TYPE1_7,
'0xf1d8': ObjectId.DATA_TYPE1_8,
'0xf1d9': ObjectId.DATA_TYPE1_9,
'0xf1da': ObjectId.DATA_TYPE1_A,
'0xf1db': ObjectId.DATA_TYPE1_B,
'0xf1dc': ObjectId.DATA_TYPE1_C,
'0xf1dd': ObjectId.DATA_TYPE1_D,
'0xf1de': ObjectId.DATA_TYPE1_E,
'0xf1e0': ObjectId.DATA_TYPE2_0,
'0xf1e1': ObjectId.DATA_TYPE2_1
}
allowed_object_ids = [
# Certificate Slots
'0xe0e0', '0xe0e1', '0xe0e2', '0xe0e3',
# Trust Anchor Slots
'0xe0e8', '0xe0ef',
# Arbitrary Data Objects
'0xf1d0', '0xf1d1', '0xf1d2', '0xf1d3', '0xf1d4', '0xf1d5', '0xf1d6', '0xf1d7',
'0xf1d8', '0xf1d9', '0xf1da', '0xf1db', '0xf1dc', '0xf1dd', '0xf1de',
'0xf1e0', '0xf1e1'
]
def _break_apart(f, sep, step):
return sep.join(f[n:n + step] for n in range(0, len(f), step))
def parse_csr(_args):
if not _args.csr:
raise IOError('--csr command is used, but no config file provided. Exit.')
with open(_args.csr, "r") as csr_config:
try:
cfg = json.load(csr_config)
cfg = cfg['csr_config']
if not _args.quiet or _args.verbose:
print("\nYour configuration is following:\n{0}".format(json.dumps(cfg, sort_keys=True, indent=4)))
if 'certificate_info' not in cfg:
raise IOError("Your CSR configuration file should have a certificate_info field. Check out the example")
if 'key_info' not in cfg:
raise IOError("Your CSR configuration file should have a key_info field. Check out the example")
if 'signature_info' not in cfg:
raise IOError("Your CSR configuration file should have a signature_info field. Check out the example")
except json.JSONDecodeError as err:
raise IOError("The config file incorrectly composed. Parser error. "
"Unformated Message from parser: {0}".format(err.msg))
if _args.slot:
if _args.slot not in private_key_slot_map:
raise ValueError("--slot has been used with wrong argument, allowed values {0}, you used {1}".
format(private_key_slot_map, _args.slot))
_key_id_slot = private_key_slot_map[_args.slot]
else:
if cfg['key_info']['parameters']['slot'] not in private_key_slot_map:
raise ValueError("--slot has been used with wrong argument, allowed values {0}, you used {1}".
format(private_key_slot_map, cfg['key_info']['parameters']['slot']))
_key_id_slot = private_key_slot_map[cfg['key_info']['parameters']['slot']]
if cfg['key_info']['algorithm_id'] == 'ec':
key = ecc.generate_keypair(cfg['key_info']['parameters']['curve'], _key_id_slot)
elif cfg['key_info']['algorithm_id'] == 'rsa':
key = rsa.generate_keypair(cfg['key_info']['parameters']['key_size'], _key_id_slot)
else:
raise ValueError("unsupported algorithm_id, allowed values 'rsa', or 'ec', you used {0}".
format(cfg['key_info']['algorithm_id']))
builder = csr.Builder(cfg['certificate_info'], key)
_csr_request = base64.b64encode(builder.build(key).dump())
csr_fingerprint_sha1 = hashlib.sha1(_csr_request).hexdigest()
csr_request = '-----BEGIN CERTIFICATE REQUEST-----\n'
csr_request += _break_apart(_csr_request.decode(), '\n', 64)
csr_request += '\n-----END CERTIFICATE REQUEST-----'
with open(csr_fingerprint_sha1 + ".csr", "w+") as csr_file:
csr_file.write(csr_request)
_return_value = {
"filename": csr_fingerprint_sha1 + ".csr",
"public_key": binascii.hexlify(bytearray(key.pkey)).decode()
}
if _args.query:
if _args.query[0] not in _return_value:
raise ValueError("The query argument is not within the available values. Available {0}, you gave {1}".
format(_return_value.keys(), _args.query))
return_value = _return_value[_args.query[0]]
else:
return_value = _return_value
sys.stdout.write(str(return_value))
sys.stdout.flush()
sys.exit(0)
def parse_write(_args):
if not _args.write:
raise IOError('--write command is used, but no data file provided. Exit.')
if not _args.slot:
_id = 'second'
else:
_id = _args.slot
if _id not in certificate_slot_map:
raise ValueError("--id has been used with wrong argument, allowed values {0}, you used {1}".
format(certificate_slot_map, _id))
_certificate_slot = certificate_slot_map[_id]
with open(_args.write, "r") as datafile:
data = datafile.read()
if not _args.quiet or _args.verbose:
print("Your are going to write the following file:\n{0}".format(data))
cert.write_new(data, _certificate_slot)
if not _args.quiet or _args.verbose:
print("Certificate has been written")
'''
#################################################################################################################
'''
parser = argparse.ArgumentParser(description="Communicate with your OPTIGA(TM) Trust sample")
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true")
group.add_argument("-q", "--quiet", action="store_true")
parser.add_argument("--query", nargs=1, metavar='QUERY_ARGUMENT',
help="Define the query argument you want to extract from the output")
parser.add_argument("--csr", metavar='CONFIG_FILE',
help="Instructs the script to generate a Certificate Signing Request."
"Give the script the configuration file for your CSR (fields like Common Name, "
"AWS IoT Thing Name, etc)")
parser.add_argument("--write", metavar='DATA_TO_WRITE', help="Write provided data to the chip.")
parser.add_argument("--read",
metavar='OBJECT_ID',
choices=allowed_object_ids,
help="Certificate Slots: 0xe0e0-0xe0e3\n"
"Trust Anchor slots: 0xe0e8 and 0xe0ef\n"
"100 bytes: 0xf1d0-0xf1de\n"
"1500 bytes: 0xf1e0, 0xf1e1")
parser.add_argument("--slot",
choices=[
# They all mean the same
'second', '0xe0e1', '0xe0f1',
'third', '0xe0e2', '0xe0f2',
'fourth', '0xe0e3', '0xe0f3',
'five', '0xe0fc', 'six', '0xe0fd',
'0xE0E8', '0xE0EF'
],
help="Use one the predefined slots; e.g. second, 0xe0e1, or 0xe0f1, they all mean the same")
parser.add_argument("--id",
metavar='OBJECT_ID',
choices=allowed_object_ids,
help="USe to define which ID to use with your write command \n"
"Certificate Slots: 0xe0e0-0xe0e3\n"
"Trust Anchor slots: 0xe0e8 and 0xe0ef\n"
"100 bytes: 0xf1d0-0xf1de\n"
"1500 bytes: 0xf1e0, 0xf1e1")
args = parser.parse_args()
if args.csr:
parse_csr(args)
sys.exit(0)
if args.write:
parse_write(args)
sys.exit(0)
else:
parser.print_help()
sys.exit(0)
| 39.325991
| 121
| 0.596729
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,123
| 0.349838
|
c54de03fd28e53eb54540b034a2e8a1f2994146a
| 3,532
|
py
|
Python
|
graph_test.py
|
MathewMacDougall/Two-Faced-Type
|
53fae81a151fd0689ac7328dda6b3e984c9a42e9
|
[
"MIT"
] | null | null | null |
graph_test.py
|
MathewMacDougall/Two-Faced-Type
|
53fae81a151fd0689ac7328dda6b3e984c9a42e9
|
[
"MIT"
] | 25
|
2020-11-15T05:30:23.000Z
|
2020-12-12T22:03:35.000Z
|
graph_test.py
|
MathewMacDougall/Two-Faced-Type
|
53fae81a151fd0689ac7328dda6b3e984c9a42e9
|
[
"MIT"
] | null | null | null |
import unittest
from graph import Graph
class TestGraph(unittest.TestCase):
def test_create_graph_simple(self):
graph = Graph()
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 0)
graph.add_edge(2, 0) # Test double edges don't make a difference
self.assertEqual(graph.all_vertices(), {0, 1, 2})
self.assertCountEqual(graph.get_adjacency_list()[0], [1, 2])
self.assertCountEqual(graph.get_adjacency_list()[1], [0, 2])
self.assertCountEqual(graph.get_adjacency_list()[2], [0, 1])
def test_create_graph_complex(self):
graph = Graph()
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
graph.add_edge(2, 3)
graph.add_edge(2, 4)
graph.add_edge(1, 4)
graph.add_edge(4, 5)
graph.add_edge(5, 0)
self.assertEqual(graph.all_vertices(), {0, 1, 2, 3, 4, 5})
self.assertCountEqual(graph.get_adjacency_list()[0], [1, 5])
self.assertCountEqual(graph.get_adjacency_list()[1], [0, 2, 4])
self.assertCountEqual(graph.get_adjacency_list()[2], [1, 4, 3])
self.assertCountEqual(graph.get_adjacency_list()[3], [2])
self.assertCountEqual(graph.get_adjacency_list()[4], [1, 2, 5])
self.assertCountEqual(graph.get_adjacency_list()[5], [4, 0])
def test_remove_vertex(self):
graph = Graph()
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
graph.add_edge(2, 3)
graph.add_edge(2, 4)
graph.add_edge(1, 4)
graph.add_edge(4, 5)
graph.add_edge(5, 0)
self.assertEqual(graph.all_vertices(), {0, 1, 2, 3, 4, 5})
print(len(graph.all_vertices()))
graph.remove_vertex(0)
print(len(graph.all_vertices()))
self.assertEqual(graph.all_vertices(), {1, 2, 3, 4, 5})
self.assertCountEqual(graph.get_adjacency_list()[1], [2, 4])
self.assertCountEqual(graph.get_adjacency_list()[2], [1, 4, 3])
self.assertCountEqual(graph.get_adjacency_list()[3], [2])
self.assertCountEqual(graph.get_adjacency_list()[4], [1, 2, 5])
self.assertCountEqual(graph.get_adjacency_list()[5], [4])
graph.remove_vertex(4)
self.assertEqual(graph.all_vertices(), {1, 2, 3, 5})
self.assertCountEqual(graph.get_adjacency_list()[1], [2])
self.assertCountEqual(graph.get_adjacency_list()[2], [1, 3])
self.assertCountEqual(graph.get_adjacency_list()[3], [2])
self.assertCountEqual(graph.get_adjacency_list()[5], [])
def test_is_connected_with_connected_graph(self):
graph = Graph()
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
graph.add_edge(2, 3)
graph.add_edge(2, 4)
graph.add_edge(1, 4)
graph.add_edge(4, 5)
graph.add_edge(5, 0)
self.assertEqual(graph.all_vertices(), {0, 1, 2, 3, 4, 5})
self.assertTrue(graph.is_connected())
def test_is_connected_with_disconnected_graph(self):
graph = Graph()
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
graph.add_edge(2, 3)
graph.add_edge(2, 4)
graph.add_edge(1, 4)
graph.add_edge(4, 5)
graph.add_edge(5, 0)
self.assertEqual(graph.all_vertices(), {0, 1, 2, 3, 4, 5})
graph.remove_vertex(2)
self.assertFalse(graph.is_connected())
if __name__ == '__main__':
unittest.main()
| 34.627451
| 72
| 0.610136
| 3,441
| 0.974236
| 0
| 0
| 0
| 0
| 0
| 0
| 53
| 0.015006
|
c5508e61b45a9bd59041d4ba0c8bea652aa09b89
| 2,033
|
py
|
Python
|
cfnbootstrap/construction_errors.py
|
roberthutto/aws-cfn-bootstrap
|
801a16802a931fa4dae0eba4898fe1ccdb304924
|
[
"Apache-2.0"
] | null | null | null |
cfnbootstrap/construction_errors.py
|
roberthutto/aws-cfn-bootstrap
|
801a16802a931fa4dae0eba4898fe1ccdb304924
|
[
"Apache-2.0"
] | null | null | null |
cfnbootstrap/construction_errors.py
|
roberthutto/aws-cfn-bootstrap
|
801a16802a931fa4dae0eba4898fe1ccdb304924
|
[
"Apache-2.0"
] | 3
|
2017-02-10T13:14:38.000Z
|
2018-09-20T01:04:20.000Z
|
#==============================================================================
# Copyright 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
class BuildError(Exception):
"""
Base exception for errors raised while building
"""
pass
class NoSuchConfigSetError(BuildError):
"""
Exception signifying no config error with specified name exists
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class NoSuchConfigurationError(BuildError):
"""
Exception signifying no config error with specified name exists
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class CircularConfigSetDependencyError(BuildError):
"""
Exception signifying circular dependency in configSets
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class ToolError(BuildError):
"""
Exception raised by Tools when they cannot successfully change reality
Attributes:
msg - a human-readable error message
code - an error code, if applicable
"""
def __init__(self, msg, code=None):
self.msg = msg
self.code = code
def __str__(self):
if (self.code):
return '%s (return code %s)' % (self.msg, self.code)
else:
return self.msg
| 28.236111
| 79
| 0.619774
| 1,243
| 0.611412
| 0
| 0
| 0
| 0
| 0
| 0
| 1,261
| 0.620266
|
c5514806a6a0a0953948700c69152edf438355ea
| 2,798
|
py
|
Python
|
tests/test_variable.py
|
snc2/tequila
|
6767ced9215408f7d055c22df7a66ccd610b00fb
|
[
"MIT"
] | 1
|
2021-01-11T18:40:47.000Z
|
2021-01-11T18:40:47.000Z
|
tests/test_variable.py
|
snc2/tequila
|
6767ced9215408f7d055c22df7a66ccd610b00fb
|
[
"MIT"
] | 1
|
2020-05-08T13:34:33.000Z
|
2021-12-06T06:12:37.000Z
|
tests/test_variable.py
|
snc2/tequila
|
6767ced9215408f7d055c22df7a66ccd610b00fb
|
[
"MIT"
] | null | null | null |
import pytest
from tequila import numpy as np
from tequila.circuit.gradient import grad
from tequila.objective.objective import Objective, Variable
import operator
def test_nesting():
a = Variable(name='a')
variables = {a: 3.0}
b = a + 2 - 2
c = (b * 5) / 5
d = -(-c)
e = d ** 0.5
f = e ** 2
assert np.isclose(a(variables), f(variables))
def test_gradient():
a = Variable(name='a')
variables = {a: 3.0}
b = a + 2 - 2
c = (b * 5) / 5
d = -(-c)
assert grad(d, a)(variables) == 1.0
def test_equality():
a = Variable('a')
b = Variable('a.')
assert a != b
def test_transform_update():
a = Variable('a')
b = Variable('a.')
t = Objective(transformation=operator.add, args=[a, b])
variables = {a: 8, b: 1, a: 9, "c": 17}
assert np.isclose(float(t(variables)), 10.0)
@pytest.mark.parametrize('gradvar', ['a', 'b', 'c', 'd', 'e', 'f'])
def test_exotic_gradients(gradvar):
# a and b will fail for autograd not with jax
a = Variable('a')
b = Variable('b')
c = Variable('c')
d = Variable('d')
e = Variable('e')
f = Variable('f')
variables = {a: 2.0, b: 3.0, c: 4.0, d: 5.0, e: 6.0, f: 7.0}
t = c * a ** b + b / c - Objective(args=[c], transformation=np.cos) + f / (d * e) + a * Objective(args=[d],
transformation=np.exp) / (
f + b) + Objective(args=[e], transformation=np.tanh) + Objective(args=[f], transformation=np.sinc)
g = grad(t, gradvar)
if gradvar == 'a':
assert np.isclose(g(variables) , c(variables) * b(variables) * (a(variables) ** (b(variables) - 1.)) + np.exp(d(variables)) / (f(variables) + b(variables)))
if gradvar == 'b':
assert np.isclose(g(variables) , (c(variables) * a(variables) ** b(variables)) * np.log(a(variables)) + 1. / c(variables) - a(variables) * np.exp(d(variables)) / (f(variables) + b(variables)) ** 2.0)
if gradvar == 'c':
assert np.isclose(g(variables) , a(variables) ** b(variables) - b(variables) / c(variables) ** 2. + np.sin(c(variables)))
if gradvar == 'd':
assert np.isclose(g(variables) , -f(variables) / (np.square(d(variables)) * e(variables)) + a(variables) * np.exp(d(variables)) / (f(variables) + b(variables)))
if gradvar == 'e':
assert np.isclose(g(variables), 2. / (1. + np.cosh(2 * e(variables))) - f(variables) / (d(variables) * e(variables) ** 2.))
if gradvar == 'f':
assert np.isclose(g(variables) , 1. / (d(variables) * e(variables)) - a(variables) * np.exp(d(variables)) / (f(variables) + b(variables)) ** 2. + np.cos(np.pi * f(variables)) / f(variables) - np.sin(np.pi * f(variables)) / (np.pi * f(variables) ** 2.))
| 39.408451
| 260
| 0.550751
| 0
| 0
| 0
| 0
| 1,939
| 0.692995
| 0
| 0
| 131
| 0.046819
|
c55176ac699f36bb549a798358fd9868f0da10c3
| 7,649
|
py
|
Python
|
getnear/tseries.py
|
edwardspeyer/getnear
|
746f3cedc1aed6166423f54d32e208017f660b38
|
[
"MIT"
] | null | null | null |
getnear/tseries.py
|
edwardspeyer/getnear
|
746f3cedc1aed6166423f54d32e208017f660b38
|
[
"MIT"
] | null | null | null |
getnear/tseries.py
|
edwardspeyer/getnear
|
746f3cedc1aed6166423f54d32e208017f660b38
|
[
"MIT"
] | null | null | null |
from getnear.config import Tagged, Untagged, Ignore
from getnear.logging import info
from lxml import etree
import re
import requests
import telnetlib
def connect(hostname, *args, **kwargs):
url = f'http://{hostname}/'
html = requests.get(url).text
doc = etree.HTML(html)
for title in doc.xpath('//title'):
if re.match('NETGEAR GS\d+T', title.text):
return TSeries(hostname, *args, **kwargs)
class TSeries:
def __init__(self, hostname, password='password', old_password='password', debug=False):
info('connecting')
self.t = telnetlib.Telnet(hostname, 60000)
if debug:
self.t.set_debuglevel(2)
info('entering admin mode')
self.admin_mode()
info('logging in')
if self.login(password):
return
else:
info('trying old password')
self.admin_mode()
if self.login(old_password):
info('changing password')
self.change_password(old_password, password)
else:
raise Exception('login failed')
def admin_mode(self):
self.t.read_until(b'please wait ...')
self.t.write(b'admin\n')
def login(self, password):
self.t.read_until(b'Password:')
self.t.write(password.encode('ascii'))
self.t.write(b'\n')
_, _, match = self.t.expect([b'>', b'Applying'])
if b'Applying' in match:
return False
self.t.write(b'enable\n\n')
self.t.read_until(b'#')
return True
def exit(self):
# Leave "enable" mode
self.t.write(b'exit\n')
self.t.read_until(b'>')
self.t.write(b'logout\n')
def get_current_config(self):
# (ports, pvids, {vlan_id -> {U, T, _, _...})
ports_pvids = dict(self.get_port_pvids())
ports = tuple(sorted(ports_pvids))
pvids = tuple(ports_pvids[p] for p in ports)
vlans = {}
vlan_ids = set(pvids) | set(self.get_vlan_ids())
for vlan_id in vlan_ids:
port_map = dict(self.get_vlan(vlan_id))
membership = tuple(port_map[p] for p in ports)
vlans[vlan_id] = membership
return (ports, pvids, vlans)
def get_vlan_ids(self):
self.t.write(b'show vlan brief\n')
output = self.page().decode(errors='ignore')
for line in output.splitlines():
fields = line.split()
if fields and fields[0].isnumeric():
yield int(fields[0])
def get_vlan(self, vlan_id):
self.t.write(f'show vlan {vlan_id}\n'.encode())
for line in self.paged_table_body():
fields = line.split(maxsplit=3)
interface_port, current = fields[0:2]
interface, port = map(int, interface_port.split('/'))
if interface == 0:
port = int(interface_port.split('/')[1])
is_included = current == 'Include'
is_tagged = 'Tagged' in line
if is_tagged:
state = Tagged
elif is_included:
state = Untagged
else:
state = Ignore
yield port, state
def get_port_pvids(self):
self.t.write(b'show vlan port all\n')
for line in self.paged_table_body():
fields = line.split()
interface_port, pvid_s = fields[0:2]
interface, port = map(int, interface_port.split('/'))
if interface == 0:
pvid = int(pvid_s)
yield port, pvid
def set_port_pvid(self, port, vlan_id):
self.do_configure_interface(port, f'vlan pvid {vlan_id}')
def set_port_vlan_tagging(self, port, vlan_id, is_tagged):
if is_tagged:
command = f'vlan tagging {vlan_id}'
else:
command = f'no vlan tagging {vlan_id}'
self.do_configure_interface(port, command)
def set_port_vlan_participation(self, port, vlan_id, is_included):
if is_included:
command = f'vlan participation include {vlan_id}'
else:
command = f'vlan participation exclude {vlan_id}'
self.do_configure_interface(port, command)
def add_vlan(self, vlan_id):
self.do_vlan_database(f'vlan {vlan_id}')
def delete_vlan(self, vlan_id):
self.do_vlan_database(f'no vlan {vlan_id}')
def do_configure_interface(self, port, command):
self.t.write(b'configure\n')
self.t.read_until(b'#')
self.t.write(f'interface 0/{port}\n'.encode())
self.t.read_until(b'#')
self.t.write((command + '\n').encode())
self.t.read_until(b'#')
self.t.write(b'exit\n')
self.t.read_until(b'#')
self.t.write(b'exit\n')
self.t.read_until(b'#')
def do_vlan_database(self, command):
self.t.write(b'vlan database\n')
self.t.read_until(b'#')
self.t.write((command + '\n').encode())
self.t.read_until(b'#')
self.t.write(b'exit\n')
self.t.read_until(b'#')
def change_password(self, password_old, password_new):
# TODO For this to work, we have to leave "enable" mode. It would be
# better if all other commands entererd enable mode instead. More
# verbose, but less confusing. Maybe have a cursor to remember which
# mode we are in?
self.t.write(b'exit\n')
self.t.read_until(b'>')
self.t.write(b'passwd\n')
self.t.read_until(b'Enter old password:')
self.t.write((password_old + '\n').encode())
self.t.read_until(b'Enter new password:')
self.t.write((password_new + '\n').encode())
self.t.read_until(b'Confirm new password:')
self.t.write((password_new + '\n').encode())
self.t.read_until(b'Password Changed!')
self.t.write(b'enable\n') # Double newline
self.t.read_until(b'#')
def paged_table_body(self):
output = self.page().decode(errors='ignore')
in_body = False
for line in output.splitlines():
if line.strip() == '':
in_body = False
if in_body:
yield line
if line and line[0:4] == '----':
in_body = True
def page(self):
result = b''
while True:
index, _, output = self.t.expect([
b'--More-- or \(q\)uit',
b'#'
])
result += output
if index == 0:
self.t.write(b'\n')
else:
break
return result
def sync(self, config):
ports, pvids, vlans = config
vlan_ids = set(pvids) | set(vlans)
for vlan_id in sorted(vlan_ids):
info(f'adding vlan {vlan_id}')
self.add_vlan(vlan_id)
for port, pvid in zip(ports, pvids):
info(f'setting port {port} to PVID {pvid}')
self.set_port_pvid(port, pvid)
for vlan_id, membership in vlans.items():
info(f'vlan {vlan_id}')
for port, status in zip(ports, membership):
if status == Ignore:
info(f' port {port} off')
self.set_port_vlan_participation(port, vlan_id, False)
else:
is_tagged = status == Tagged
symbol = 'T' if is_tagged else 'U'
info(f' port {port} {symbol}')
self.set_port_vlan_participation(port, vlan_id, True)
self.set_port_vlan_tagging(port, vlan_id, is_tagged)
| 34.61086
| 92
| 0.552098
| 7,216
| 0.943391
| 1,683
| 0.220029
| 0
| 0
| 0
| 0
| 1,319
| 0.172441
|
c5524c8d02f3aef3cff31c032990bb8d482aaf1e
| 16,945
|
py
|
Python
|
Tests/subset/svg_test.py
|
ThomasRettig/fonttools
|
629f44b8cc4ed768088b952c9e600190685a90fc
|
[
"Apache-2.0",
"MIT"
] | 2,705
|
2016-09-27T10:02:12.000Z
|
2022-03-31T09:37:46.000Z
|
Tests/subset/svg_test.py
|
ThomasRettig/fonttools
|
629f44b8cc4ed768088b952c9e600190685a90fc
|
[
"Apache-2.0",
"MIT"
] | 1,599
|
2016-09-27T09:07:36.000Z
|
2022-03-31T23:04:51.000Z
|
Tests/subset/svg_test.py
|
ThomasRettig/fonttools
|
629f44b8cc4ed768088b952c9e600190685a90fc
|
[
"Apache-2.0",
"MIT"
] | 352
|
2016-10-07T04:18:15.000Z
|
2022-03-30T07:35:01.000Z
|
from string import ascii_letters
import textwrap
from fontTools.misc.testTools import getXML
from fontTools import subset
from fontTools.fontBuilder import FontBuilder
from fontTools.pens.ttGlyphPen import TTGlyphPen
from fontTools.ttLib import TTFont, newTable
from fontTools.subset.svg import NAMESPACES, ranges
import pytest
etree = pytest.importorskip("lxml.etree")
@pytest.fixture
def empty_svg_font():
glyph_order = [".notdef"] + list(ascii_letters)
pen = TTGlyphPen(glyphSet=None)
pen.moveTo((0, 0))
pen.lineTo((0, 500))
pen.lineTo((500, 500))
pen.lineTo((500, 0))
pen.closePath()
glyph = pen.glyph()
glyphs = {g: glyph for g in glyph_order}
fb = FontBuilder(unitsPerEm=1024, isTTF=True)
fb.setupGlyphOrder(glyph_order)
fb.setupCharacterMap({ord(c): c for c in ascii_letters})
fb.setupGlyf(glyphs)
fb.setupHorizontalMetrics({g: (500, 0) for g in glyph_order})
fb.setupHorizontalHeader()
fb.setupOS2()
fb.setupPost()
fb.setupNameTable({"familyName": "TestSVG", "styleName": "Regular"})
svg_table = newTable("SVG ")
svg_table.docList = []
fb.font["SVG "] = svg_table
return fb.font
def new_svg(**attrs):
return etree.Element("svg", {"xmlns": NAMESPACES["svg"], **attrs})
def _lines(s):
return textwrap.dedent(s).splitlines()
@pytest.mark.parametrize(
"gids, retain_gids, expected_xml",
[
# keep four glyphs in total, don't retain gids, which thus get remapped
(
"2,4-6",
False,
_lines(
"""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph1" d="M2,2"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="2" startGlyphID="2">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph2" d="M4,4"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="3" startGlyphID="3">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph3" d="M5,5"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="4" startGlyphID="4">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph4" d="M6,6"/></svg>]]>
</svgDoc>
"""
),
),
# same four glyphs, but we now retain gids
(
"2,4-6",
True,
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="2">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph2" d="M2,2"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="4" startGlyphID="4">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph4" d="M4,4"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="5" startGlyphID="5">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph5" d="M5,5"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="6" startGlyphID="6">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph6" d="M6,6"/></svg>]]>
</svgDoc>
"""
),
),
],
)
def test_subset_single_glyph_per_svg(
empty_svg_font, tmp_path, gids, retain_gids, expected_xml
):
font = empty_svg_font
svg_docs = font["SVG "].docList
for i in range(1, 11):
svg = new_svg()
etree.SubElement(svg, "path", {"id": f"glyph{i}", "d": f"M{i},{i}"})
svg_docs.append((etree.tostring(svg).decode(), i, i))
svg_font_path = tmp_path / "TestSVG.ttf"
font.save(svg_font_path)
subset_path = svg_font_path.with_suffix(".subset.ttf")
subset.main(
[
str(svg_font_path),
f"--output-file={subset_path}",
f"--gids={gids}",
"--retain_gids" if retain_gids else "--no-retain_gids",
]
)
subset_font = TTFont(subset_path)
assert getXML(subset_font["SVG "].toXML, subset_font) == expected_xml
# This contains a bunch of cross-references between glyphs, paths, gradients, etc.
# Note the path coordinates are completely made up and not meant to be rendered.
# We only care about the tree structure, not it's visual content.
COMPLEX_SVG = """\
<svg xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
<radialGradient id="rg2" cx="50" cy="50" r="10" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</radialGradient>
<radialGradient id="rg3" xlink:href="#rg2" r="20"/>
<radialGradient id="rg4" xlink:href="#rg3" cy="100"/>
<path id="p1" d="M3,3"/>
<clipPath id="c1">
<circle cx="10" cy="10" r="1"/>
</clipPath>
</defs>
<g id="glyph1">
<g id="glyph2">
<path d="M0,0"/>
</g>
<g>
<path d="M1,1" fill="url(#lg1)"/>
<path d="M2,2"/>
</g>
</g>
<g id="glyph3">
<use xlink:href="#p1"/>
</g>
<use id="glyph4" xlink:href="#glyph1" x="10"/>
<use id="glyph5" xlink:href="#glyph2" y="-10"/>
<g id="glyph6">
<use xlink:href="#p1" transform="scale(2, 1)"/>
</g>
<g id="group1">
<g id="glyph7">
<path id="p2" d="M4,4"/>
</g>
<g id=".glyph7">
<path d="M4,4"/>
</g>
<g id="glyph8">
<g id=".glyph8">
<path id="p3" d="M5,5"/>
<path id="M6,6"/>
</g>
<path d="M7,7"/>
</g>
<g id="glyph9">
<use xlink:href="#p2"/>
</g>
<g id="glyph10">
<use xlink:href="#p3"/>
</g>
</g>
<g id="glyph11">
<path d="M7,7" fill="url(#rg4)"/>
</g>
<g id="glyph12">
<path d="M7,7" style="fill:url(#lg1);stroke:red;clip-path:url(#c1)"/>
</g>
</svg>
"""
@pytest.mark.parametrize(
"subset_gids, expected_xml",
[
# we only keep gid=2, with 'glyph2' defined inside 'glyph1': 'glyph2'
# is renamed 'glyph1' to match the new subset indices, and the old 'glyph1'
# is kept (as it contains 'glyph2') but renamed '.glyph1' to avoid clash
(
"2",
_lines(
"""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id=".glyph1">
<g id="glyph1">
<path d="M0,0"/>
</g>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
# we keep both gid 1 and 2: the glyph elements' ids stay as they are (only the
# range endGlyphID change); a gradient is kept since it's referenced by glyph1
(
"1,2",
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
</defs>
<g id="glyph1">
<g id="glyph2">
<path d="M0,0"/>
</g>
<g>
<path d="M1,1" fill="url(#lg1)"/>
<path d="M2,2"/>
</g>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
(
# both gid 3 and 6 refer (via <use xlink:href="#...") to path 'p1', which
# is thus kept in <defs>; the glyph ids and range start/end are renumbered.
"3,6",
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<path id="p1" d="M3,3"/>
</defs>
<g id="glyph1">
<use xlink:href="#p1"/>
</g>
<g id="glyph2">
<use xlink:href="#p1" transform="scale(2, 1)"/>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
(
# 'glyph4' uses the whole 'glyph1' element (translated); we keep the latter
# renamed to avoid clashes with new gids
"3-4",
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
<path id="p1" d="M3,3"/>
</defs>
<g id=".glyph1">
<g id=".glyph2">
<path d="M0,0"/>
</g>
<g>
<path d="M1,1" fill="url(#lg1)"/>
<path d="M2,2"/>
</g>
</g>
<g id="glyph1">
<use xlink:href="#p1"/>
</g>
<use id="glyph2" xlink:href="#.glyph1" x="10"/>
</svg>
]]>
</svgDoc>
"""
),
),
(
# 'glyph9' uses a path 'p2' defined inside 'glyph7', the latter is excluded
# from our subset, thus gets renamed '.glyph7'; an unrelated element with
# same id=".glyph7" doesn't clash because it was dropped.
# Similarly 'glyph10' uses path 'p3' defined inside 'glyph8', also excluded
# from subset and prefixed with '.'. But since an id=".glyph8" is already
# used in the doc, we append a .{digit} suffix to disambiguate.
"9,10",
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="group1">
<g id=".glyph7">
<path id="p2" d="M4,4"/>
</g>
<g id=".glyph8.1">
<g id=".glyph8">
<path id="p3" d="M5,5"/>
</g>
</g>
<g id="glyph1">
<use xlink:href="#p2"/>
</g>
<g id="glyph2">
<use xlink:href="#p3"/>
</g>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
(
# 'glyph11' uses gradient 'rg4' which inherits from 'rg3', which inherits
# from 'rg2', etc.
"11",
_lines(
"""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<radialGradient id="rg2" cx="50" cy="50" r="10" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</radialGradient>
<radialGradient id="rg3" xlink:href="#rg2" r="20"/>
<radialGradient id="rg4" xlink:href="#rg3" cy="100"/>
</defs>
<g id="glyph1">
<path d="M7,7" fill="url(#rg4)"/>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
(
# 'glyph12' contains a style attribute with inline CSS declarations that
# contains references to a gradient fill and a clipPath: we keep those
"12",
_lines(
"""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
<clipPath id="c1">
<circle cx="10" cy="10" r="1"/>
</clipPath>
</defs>
<g id="glyph1">
<path d="M7,7" style="fill:url(#lg1);stroke:red;clip-path:url(#c1)"/>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
],
)
def test_subset_svg_with_references(
empty_svg_font, tmp_path, subset_gids, expected_xml
):
font = empty_svg_font
font["SVG "].docList.append((COMPLEX_SVG, 1, 12))
svg_font_path = tmp_path / "TestSVG.ttf"
font.save(svg_font_path)
subset_path = svg_font_path.with_suffix(".subset.ttf")
subset.main(
[
str(svg_font_path),
f"--output-file={subset_path}",
f"--gids={subset_gids}",
"--pretty-svg",
]
)
subset_font = TTFont(subset_path)
if expected_xml is not None:
assert getXML(subset_font["SVG "].toXML, subset_font) == expected_xml
else:
assert "SVG " not in subset_font
def test_subset_svg_empty_table(empty_svg_font, tmp_path):
font = empty_svg_font
svg = new_svg()
etree.SubElement(svg, "rect", {"id": "glyph1", "x": "1", "y": "2"})
font["SVG "].docList.append((etree.tostring(svg).decode(), 1, 1))
svg_font_path = tmp_path / "TestSVG.ttf"
font.save(svg_font_path)
subset_path = svg_font_path.with_suffix(".subset.ttf")
# there's no gid=2 in SVG table, drop the empty table
subset.main([str(svg_font_path), f"--output-file={subset_path}", f"--gids=2"])
assert "SVG " not in TTFont(subset_path)
def test_subset_svg_missing_glyph(empty_svg_font, tmp_path):
font = empty_svg_font
svg = new_svg()
etree.SubElement(svg, "rect", {"id": "glyph1", "x": "1", "y": "2"})
font["SVG "].docList.append(
(
etree.tostring(svg).decode(),
1,
# the range endGlyphID=2 declares two glyphs however our svg contains
# only one glyph element with id="glyph1", the "glyph2" one is absent.
# Techically this would be invalid according to the OT-SVG spec.
2,
)
)
svg_font_path = tmp_path / "TestSVG.ttf"
font.save(svg_font_path)
subset_path = svg_font_path.with_suffix(".subset.ttf")
# make sure we don't crash when we don't find the expected "glyph2" element
subset.main([str(svg_font_path), f"--output-file={subset_path}", f"--gids=1"])
subset_font = TTFont(subset_path)
assert getXML(subset_font["SVG "].toXML, subset_font) == [
'<svgDoc endGlyphID="1" startGlyphID="1">',
' <![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><rect id="glyph1" x="1" y="2"/></svg>]]>',
"</svgDoc>",
]
# ignore the missing gid even if included in the subset; in this test case we
# end up with an empty svg document--which is dropped, along with the empty table
subset.main([str(svg_font_path), f"--output-file={subset_path}", f"--gids=2"])
assert "SVG " not in TTFont(subset_path)
@pytest.mark.parametrize(
"ints, expected_ranges",
[
((), []),
((0,), [(0, 0)]),
((0, 1), [(0, 1)]),
((1, 1, 1, 1), [(1, 1)]),
((1, 3), [(1, 1), (3, 3)]),
((4, 2, 1, 3), [(1, 4)]),
((1, 2, 4, 5, 6, 9, 13, 14, 15), [(1, 2), (4, 6), (9, 9), (13, 15)]),
],
)
def test_ranges(ints, expected_ranges):
assert list(ranges(ints)) == expected_ranges
| 34.652352
| 110
| 0.477781
| 0
| 0
| 0
| 0
| 12,448
| 0.734612
| 0
| 0
| 11,761
| 0.694069
|
c552f157bcec716a7f87d20bd21cf1b7b813d8da
| 211
|
py
|
Python
|
models/dl-weights.py
|
diegoinacio/object-detection-flask-opencv
|
bc012e884138e9ead04115b8550e833bed134074
|
[
"MIT"
] | 16
|
2020-03-01T07:35:35.000Z
|
2022-02-01T16:34:24.000Z
|
models/dl-weights.py
|
girish008/Real-Time-Object-Detection-Using-YOLOv3-OpenCV
|
6af4c550f6128768b646f5923af87c2f654cd1bd
|
[
"MIT"
] | 6
|
2020-02-13T12:50:24.000Z
|
2022-02-02T03:22:30.000Z
|
models/dl-weights.py
|
girish008/Real-Time-Object-Detection-Using-YOLOv3-OpenCV
|
6af4c550f6128768b646f5923af87c2f654cd1bd
|
[
"MIT"
] | 8
|
2020-06-22T10:23:58.000Z
|
2022-01-14T21:17:50.000Z
|
"""
This script downloads the weight file
"""
import requests
URL = "https://pjreddie.com/media/files/yolov3.weights"
r = requests.get(URL, allow_redirects=True)
open('yolov3_t.weights', 'wb').write(r.content)
| 23.444444
| 55
| 0.739336
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 116
| 0.549763
|
c55412d74acd62e5e8c97c0f510ea4a9a80e5595
| 1,786
|
py
|
Python
|
utim-esp32/modules/utim/utilities/process_device.py
|
connax-utim/utim-micropython
|
23c30f134af701a44a8736b09c8c201e13760d18
|
[
"Apache-2.0"
] | null | null | null |
utim-esp32/modules/utim/utilities/process_device.py
|
connax-utim/utim-micropython
|
23c30f134af701a44a8736b09c8c201e13760d18
|
[
"Apache-2.0"
] | null | null | null |
utim-esp32/modules/utim/utilities/process_device.py
|
connax-utim/utim-micropython
|
23c30f134af701a44a8736b09c8c201e13760d18
|
[
"Apache-2.0"
] | null | null | null |
"""
Subprocessor for device messages
"""
import logging
from ..utilities.tag import Tag
from ..workers import device_worker_forward
from ..workers import device_worker_startup
from ..utilities.address import Address
from ..utilities.status import Status
from ..utilities.data_indexes import SubprocessorIndex
_SubprocessorIndex = SubprocessorIndex()
logger = logging.Logger('utilities.process_device')
class ProcessDevice(object):
"""
Subprocessor for device messages
"""
def __init__(self, utim):
"""
Initialization of subprocessor for device messages
"""
self.__utim = utim
def process(self, data):
"""
Process device message
:param data: array [source, destination, status, body]
:return: same as input
"""
logger.info('Starting device processing')
# Placeholder for data being processed, that will be returned one day
res = data
while (res[_SubprocessorIndex.status] is not Status.STATUS_TO_SEND and
res[_SubprocessorIndex.status] is not Status.STATUS_FINALIZED and
res[_SubprocessorIndex.source] is Address.ADDRESS_DEVICE):
command = res[_SubprocessorIndex.body][0:1]
if command == Tag.INBOUND.DATA_TO_PLATFORM:
res = device_worker_forward.process(self.__utim, res)
elif command == Tag.INBOUND.NETWORK_READY:
res = device_worker_startup.process(self.__utim, res)
else:
res[_SubprocessorIndex.status] = Status.STATUS_FINALIZED
if (res[_SubprocessorIndex.status] is Status.STATUS_TO_SEND or
res[_SubprocessorIndex.status] is Status.STATUS_FINALIZED):
break
return res
| 33.074074
| 80
| 0.663494
| 1,379
| 0.772116
| 0
| 0
| 0
| 0
| 0
| 0
| 425
| 0.237962
|
c556608e317003e7eff23a5318cc565b380cac29
| 174
|
py
|
Python
|
TrainAndTest/Fbank/LSTMs/__init__.py
|
Wenhao-Yang/DeepSpeaker-pytorch
|
99eb8de3357c85e2b7576da2a742be2ffd773ead
|
[
"MIT"
] | 8
|
2020-08-26T13:32:56.000Z
|
2022-01-18T21:05:46.000Z
|
TrainAndTest/Fbank/LSTMs/__init__.py
|
Wenhao-Yang/DeepSpeaker-pytorch
|
99eb8de3357c85e2b7576da2a742be2ffd773ead
|
[
"MIT"
] | 1
|
2020-07-24T17:06:16.000Z
|
2020-07-24T17:06:16.000Z
|
TrainAndTest/Fbank/LSTMs/__init__.py
|
Wenhao-Yang/DeepSpeaker-pytorch
|
99eb8de3357c85e2b7576da2a742be2ffd773ead
|
[
"MIT"
] | 5
|
2020-12-11T03:31:15.000Z
|
2021-11-23T15:57:55.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: 874681044@qq.com
@Software: PyCharm
@File: __init__.py.py
@Time: 2020/3/27 10:43 AM
@Overview:
"""
| 14.5
| 26
| 0.689655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 170
| 0.977011
|
c556a7e2e7f0a44508e2fef82666c7378cbf88cf
| 226
|
py
|
Python
|
ninja/security/__init__.py
|
lsaavedr/django-ninja
|
caa182007368bb0fed85b184fb0583370e9589b4
|
[
"MIT"
] | null | null | null |
ninja/security/__init__.py
|
lsaavedr/django-ninja
|
caa182007368bb0fed85b184fb0583370e9589b4
|
[
"MIT"
] | null | null | null |
ninja/security/__init__.py
|
lsaavedr/django-ninja
|
caa182007368bb0fed85b184fb0583370e9589b4
|
[
"MIT"
] | null | null | null |
from ninja.security.apikey import APIKeyQuery, APIKeyCookie, APIKeyHeader
from ninja.security.http import HttpBearer, HttpBasicAuth
def django_auth(request):
if request.user.is_authenticated:
return request.user
| 28.25
| 73
| 0.800885
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c5596189b90b1ffb040eef3bc2ba25c968d94c71
| 1,160
|
py
|
Python
|
migrations/versions/66d4be40bced_add_attribute_to_handle_multiline_.py
|
eubr-bigsea/limonero
|
54851b73bb1e4f5626b3d38ea7eeb50f3ed2e3c5
|
[
"Apache-2.0"
] | 1
|
2018-01-01T20:35:43.000Z
|
2018-01-01T20:35:43.000Z
|
migrations/versions/66d4be40bced_add_attribute_to_handle_multiline_.py
|
eubr-bigsea/limonero
|
54851b73bb1e4f5626b3d38ea7eeb50f3ed2e3c5
|
[
"Apache-2.0"
] | 37
|
2017-02-24T17:07:25.000Z
|
2021-09-02T14:49:19.000Z
|
migrations/versions/66d4be40bced_add_attribute_to_handle_multiline_.py
|
eubr-bigsea/limonero
|
54851b73bb1e4f5626b3d38ea7eeb50f3ed2e3c5
|
[
"Apache-2.0"
] | 2
|
2019-11-05T13:45:45.000Z
|
2020-11-13T22:02:37.000Z
|
"""Add attribute to handle multiline information
Revision ID: 66d4be40bced
Revises: 6a809295d586
Create Date: 2018-05-16 12:13:32.023450
"""
import sqlalchemy as sa
from alembic import op
from limonero.migration_utils import is_sqlite
# revision identifiers, used by Alembic.
revision = '66d4be40bced'
down_revision = '6a809295d586'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
if is_sqlite():
with op.batch_alter_table('data_source') as batch_op:
batch_op.add_column(sa.Column('is_multiline', sa.Boolean(), nullable=False, server_default='0'))
else:
op.add_column('data_source',
sa.Column('is_multiline', sa.Boolean(), nullable=False,
default=0))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
if is_sqlite():
with op.batch_alter_table('data_source') as batch_op:
batch_op.drop_column('is_multiline')
else:
op.drop_column('data_source', 'is_multiline')
# ### end Alembic commands ###
| 29.74359
| 108
| 0.668103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 503
| 0.433621
|
c559db70f1ddb6f54d717326e423cfde57c7f2af
| 247
|
py
|
Python
|
config.py
|
kxxoling/horus
|
a3c4b6c40a1064fffa595976f10358178dd65367
|
[
"MIT"
] | null | null | null |
config.py
|
kxxoling/horus
|
a3c4b6c40a1064fffa595976f10358178dd65367
|
[
"MIT"
] | null | null | null |
config.py
|
kxxoling/horus
|
a3c4b6c40a1064fffa595976f10358178dd65367
|
[
"MIT"
] | null | null | null |
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
CSRF_ENABLED = True
SECRET_KEY = 'you-will-never-guess'
SQLITE = 'db.sqlite3'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASE_DIR, SQLITE) + '?check_same_thread=False'
| 22.454545
| 100
| 0.740891
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 72
| 0.291498
|
c55a4b523b8ba2e29366bb76e2448cbced42c61f
| 4,372
|
py
|
Python
|
tests/test_lp_problem.py
|
LovisAnderson/flipy
|
bde898e46e34cdfba39cecb75586fa3f4d816520
|
[
"Apache-2.0"
] | null | null | null |
tests/test_lp_problem.py
|
LovisAnderson/flipy
|
bde898e46e34cdfba39cecb75586fa3f4d816520
|
[
"Apache-2.0"
] | null | null | null |
tests/test_lp_problem.py
|
LovisAnderson/flipy
|
bde898e46e34cdfba39cecb75586fa3f4d816520
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from flipy.lp_problem import LpProblem
from flipy.lp_objective import LpObjective, Maximize
from flipy.lp_variable import LpVariable
from flipy.lp_expression import LpExpression
from flipy.lp_constraint import LpConstraint
from io import StringIO
@pytest.fixture
def problem():
return LpProblem('test_problem')
@pytest.fixture
def expression(x):
return LpExpression(name='test_expr', expression={x: 998}, constant=8)
@pytest.mark.usefixtures('problem', 'x')
class TestLpProblem(object):
def test_init(self):
problem = LpProblem('test_problem')
assert problem.lp_objective is None
assert len(problem.lp_constraints) == 0 and isinstance(problem.lp_constraints, dict)
assert len(problem.lp_variables) == 0 and isinstance(problem.lp_variables, dict)
def test_add_variable(self, problem, x):
problem.add_variable(x)
assert problem.lp_variables == {'x': x}
with pytest.raises(Exception) as e:
problem.add_variable('123')
assert e.value.args == ('123 is not an LpVariable',)
x2 = LpVariable('x')
with pytest.raises(Exception) as e:
problem.add_variable(x2)
assert e.value.args == ('LP variable name x conflicts with an existing LP variable',)
def test_set_objective(self, problem, x):
objective = LpObjective(name='minimize_cpm', expression={x: 998}, constant=8)
problem.set_objective(objective)
assert problem.lp_objective == objective
with pytest.raises(Exception) as e:
problem.set_objective(objective)
assert e.value.args == ('LP objective is already set',)
assert x.obj_coeff == 998
def test_add_constraint(self, problem, x):
rhs = LpExpression('rhs', {x: 1})
lhs = LpExpression('lhs', {x: 1}, 2)
constraint = LpConstraint(rhs, 'geq', lhs, 'constraint')
problem.add_constraint(constraint)
assert problem.lp_constraints[constraint.name] == constraint
assert problem.lp_variables[x.name] == x
constraint = LpConstraint(lhs, 'geq', rhs, 'constraint')
with pytest.raises(Exception) as e:
problem.add_constraint(constraint)
assert e.value.args == ('LP constraint name %s conflicts with an existing LP constraint' % constraint.name,)
with pytest.raises(Exception) as e:
problem.add_constraint(10)
assert e.value.args == ('%s is not an LpConstraint' % 10,)
def test_write(self, problem, x):
objective = LpObjective(name='minimize_cpm', expression={x: 998}, constant=8)
rhs = LpExpression('rhs', {x: 1})
lhs = LpExpression('lhs', {}, -2)
constraint = LpConstraint(rhs, 'geq', lhs, 'constraint')
problem.add_constraint(constraint)
problem.set_objective(objective)
buffer = StringIO()
problem.write_lp(buffer)
flipy_string = buffer.getvalue()
assert flipy_string == '\\* test_problem *\\\nMinimize\nminimize_cpm: 998 x + 8\nSubject To\nconstraint: x >= -2\nBounds\nx <= 10\nEnd'
def test_write_slack(self, problem, x):
objective = LpObjective(name='minimize_cpm', expression={x: 998}, constant=8, sense=Maximize)
rhs = LpExpression('rhs', {x: 1})
lhs = LpExpression('lhs', {}, -2)
constraint = LpConstraint(rhs, 'leq', lhs, 'constraint', True, 100)
problem.add_constraint(constraint)
problem.set_objective(objective)
buffer = StringIO()
problem.write_lp(buffer)
flipy_string = buffer.getvalue()
assert flipy_string == '\\* test_problem *\\\nMaximize\nminimize_cpm: 998 x - 100 constraint_slack_variable + 8\nSubject To\nconstraint: - constraint_slack_variable + x <= -2\nBounds\nx <= 10\nEnd'
def test_write_with_empty_constraint(self, problem, x):
objective = LpObjective(name='minimize_cpm', expression={x: 998}, constant=8, sense=Maximize)
constraint = LpConstraint(LpExpression('lhs', {x: 0}), 'leq', LpExpression('rhs', {}), 'constraint')
problem.add_constraint(constraint)
problem.set_objective(objective)
buffer = StringIO()
problem.write_lp(buffer)
flipy_string = buffer.getvalue()
assert flipy_string == '\\* test_problem *\\\nMaximize\nminimize_cpm: 998 x + 8\nSubject To\nBounds\nx <= 10\nEnd'
| 41.638095
| 205
| 0.665599
| 3,883
| 0.888152
| 0
| 0
| 4,100
| 0.937786
| 0
| 0
| 825
| 0.188701
|
c55a6c83c0c4deda47ef169a2a79ced739a7f4c8
| 106
|
py
|
Python
|
src/invoice_medicine/apps.py
|
vandana0608/Pharmacy-Managament
|
f99bdec11c24027a432858daa19247a21cecc092
|
[
"bzip2-1.0.6"
] | null | null | null |
src/invoice_medicine/apps.py
|
vandana0608/Pharmacy-Managament
|
f99bdec11c24027a432858daa19247a21cecc092
|
[
"bzip2-1.0.6"
] | null | null | null |
src/invoice_medicine/apps.py
|
vandana0608/Pharmacy-Managament
|
f99bdec11c24027a432858daa19247a21cecc092
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.apps import AppConfig
class InvoiceMedicineConfig(AppConfig):
name = 'invoice_medicine'
| 17.666667
| 39
| 0.792453
| 69
| 0.650943
| 0
| 0
| 0
| 0
| 0
| 0
| 18
| 0.169811
|
c55b87c8df2b77ae553d466bad5d103ac2336d62
| 5,893
|
py
|
Python
|
tests/components/tectonics/test_listric_kinematic_extender.py
|
amanaster2/landlab
|
ea17f8314eb12e3fc76df66c9b6ff32078caa75c
|
[
"MIT"
] | 257
|
2015-01-13T16:01:21.000Z
|
2022-03-29T22:37:43.000Z
|
tests/components/tectonics/test_listric_kinematic_extender.py
|
amanaster2/landlab
|
ea17f8314eb12e3fc76df66c9b6ff32078caa75c
|
[
"MIT"
] | 1,222
|
2015-02-05T21:36:53.000Z
|
2022-03-31T17:53:49.000Z
|
tests/components/tectonics/test_listric_kinematic_extender.py
|
amanaster2/landlab
|
ea17f8314eb12e3fc76df66c9b6ff32078caa75c
|
[
"MIT"
] | 274
|
2015-02-11T19:56:08.000Z
|
2022-03-28T23:31:07.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 08:42:24 2021
@author: gtucker
"""
from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_raises
from landlab import HexModelGrid, RadialModelGrid, RasterModelGrid
from landlab.components import Flexure, ListricKinematicExtender
def test_hangingwall_nodes():
"""Test the correct identification of hangingwall nodes."""
grid = RasterModelGrid((3, 7), xy_spacing=2500.0)
grid.add_zeros("topographic__elevation", at="node")
extender = ListricKinematicExtender(grid, fault_location=2500.0)
assert_array_equal(
extender._hangwall, [2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 16, 17, 18, 19, 20]
)
def test_subsidence_and_horiz_shift():
"""Test that elev subsides then shifts after 2 time steps."""
grid = RasterModelGrid((3, 7), xy_spacing=2500.0)
topo = grid.add_zeros("topographic__elevation", at="node")
extender = ListricKinematicExtender(
grid, extension_rate=0.01, fault_location=2500.0
)
# Run long enough to extend by half a grid cell
extender.run_one_step(dt=125000.0)
assert_array_almost_equal(
topo[7:14],
[0.0, 0.0, -1404.156819, -910.66907, -590.616478, -383.045648, -248.425118],
)
# Now extend another half cell, so cumulative extension is one cell and
# elevations should get shifted by one cell
extender.run_one_step(dt=125000.0)
assert_array_almost_equal(
topo[7:14],
[0.0, 0.0, -3514.477461, -2808.313638, -1821.338140, -1181.232956, -766.091296],
)
# Another step, and this time the hangingwall edge has moved by one cell,
# so the first 3 cells in this row should not further subside.
extender.run_one_step(dt=125000.0)
assert_array_almost_equal(
topo[7:14],
[
0.0,
0.0,
-3514.477461,
-3718.982708,
-2411.954617,
-1564.278603,
-1014.516414,
],
)
def test_with_hex_grid():
grid = HexModelGrid((5, 5), node_layout="rect")
grid.add_zeros("topographic__elevation", at="node")
ListricKinematicExtender(grid)
ListricKinematicExtender(grid, fault_location=2.0)
grid = HexModelGrid((5, 5), node_layout="rect", orientation="vertical")
grid.add_zeros("topographic__elevation", at="node")
assert_raises(NotImplementedError, ListricKinematicExtender, grid)
def test_with_flexure():
"""Test integrating with flexure."""
crust_density = 2700.0 # density of crustal column, kg/m3
dx = 2500.0 # grid spacing, m
dt = 125000.0 # time step, y
upper_crust_base_depth = 10000.0 # m
grid = RasterModelGrid((3, 7), xy_spacing=dx)
topo = grid.add_zeros("topographic__elevation", at="node")
load = grid.add_zeros("lithosphere__overlying_pressure_increment", at="node")
thickness = grid.add_zeros("upper_crust_thickness", at="node")
upper_crust_base = grid.add_zeros("upper_crust_base__elevation", at="node")
extender = ListricKinematicExtender(
grid,
extension_rate=0.01,
fault_location=2500.0,
track_crustal_thickness=True,
)
flexer = Flexure(grid, eet=5000.0, method="flexure")
deflection = grid.at_node["lithosphere_surface__elevation_increment"]
topo[
grid.x_of_node <= 7500.0
] = 1000.0 # this will force thickness to be 1 km greater at left
upper_crust_base[:] = -upper_crust_base_depth
thickness[:] = topo - upper_crust_base
unit_wt = crust_density * flexer.gravity
load[:] = unit_wt * thickness # loading pressure
# Get the initial deflection, which we'll need to calculate total current
# deflection
flexer.update()
init_deflection = deflection.copy()
# Run extension for half a grid cell. Elevations change, but thickness
# doesn't, so deflection should not change. We should be able to recover
# elevation from:
#
# topo = thickness + crust base - (deflection + subsidence)
#
extender.run_one_step(dt=dt)
flexer.update()
net_deflection = deflection - init_deflection
assert_array_almost_equal(
net_deflection[7:14],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
)
test_topo = thickness + upper_crust_base - (net_deflection + extender._cum_subs)
assert_array_almost_equal(topo, test_topo)
# Now extend for another half cell, which should force a shift. The
# cumulative subsidence will be subtracted from the thickness field,
# representing thinning as the hangingwall slides to the "right". This
# will cause net upward isostatic deflection.
extender.run_one_step(dt=dt)
load[:] = unit_wt * thickness
flexer.update()
net_deflection = deflection - init_deflection
assert_array_almost_equal(
thickness[7:14],
[
11000.0,
11000.0,
8191.686362, # greatest subsidence: lost nearly 3 km
9178.66186,
9818.767044, # thicker because shifted (only lost <200 m)
9233.908704,
9503.149763,
],
)
assert_array_almost_equal(
net_deflection[7:14],
[
-59.497362,
-65.176276,
-69.222531,
-70.334462,
-68.608952,
-64.912352,
-59.743080,
],
)
def test_error_handling():
radial_grid = RadialModelGrid(
n_rings=1, nodes_in_first_ring=8
) # , xy_of_center=(0., 0.))
assert_raises(TypeError, ListricKinematicExtender, radial_grid)
hex_grid = HexModelGrid((3, 3))
assert_raises(TypeError, ListricKinematicExtender, hex_grid)
grid = RasterModelGrid((3, 7))
grid.add_zeros("topographic__elevation", at="node")
assert_raises(
KeyError, ListricKinematicExtender, grid, track_crustal_thickness=True
)
| 33.293785
| 88
| 0.659426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,730
| 0.293569
|
c55c22b2cd2bfee50cd66031731dbde75ccb7354
| 22,485
|
py
|
Python
|
unbalanced_dataset/under_sampling.py
|
designer357/IGBB
|
89a60ec38fa9dab54175c24c347ee43232825504
|
[
"MIT"
] | 1
|
2021-08-20T17:14:28.000Z
|
2021-08-20T17:14:28.000Z
|
unbalanced_dataset/under_sampling.py
|
designer357/IGBB
|
89a60ec38fa9dab54175c24c347ee43232825504
|
[
"MIT"
] | null | null | null |
unbalanced_dataset/under_sampling.py
|
designer357/IGBB
|
89a60ec38fa9dab54175c24c347ee43232825504
|
[
"MIT"
] | 1
|
2018-09-13T23:26:23.000Z
|
2018-09-13T23:26:23.000Z
|
from __future__ import print_function
from __future__ import division
import numpy as np
from numpy import logical_not, ones
from numpy.random import seed, randint
from numpy import concatenate
from random import sample
from collections import Counter
from .unbalanced_dataset import UnbalancedDataset
class UnderSampler(UnbalancedDataset):
"""
Object to under sample the majority class(es) by randomly picking samples
with or without replacement.
"""
def __init__(self,
ratio=1.,
random_state=None,
replacement=True,
verbose=True):
"""
:param ratio:
The ratio of majority elements to sample with respect to the number
of minority cases.
:param random_state:
Seed.
:return:
underx, undery: The features and target values of the under-sampled
data set.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self,
ratio=ratio,
random_state=random_state,
verbose=verbose)
self.replacement = replacement
def resample(self):
"""
...
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it
if key == self.minc:
continue
# Set the ratio to be no more than the number of samples available
if self.ratio * self.ucd[self.minc] > self.ucd[key]:
num_samples = self.ucd[key]
else:
num_samples = int(self.ratio * self.ucd[self.minc])
# Pick some elements at random
seed(self.rs)
if self.replacement:
indx = randint(low=0, high=self.ucd[key], size=num_samples)
else:
indx = sample(range((self.y == key).sum()), num_samples)
# Concatenate to the minority class
underx = concatenate((underx, self.x[self.y == key][indx]), axis=0)
undery = concatenate((undery, self.y[self.y == key][indx]), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
class TomekLinks(UnbalancedDataset):
"""
Object to identify and remove majority samples that form a Tomek link with
minority samples.
"""
def __init__(self, verbose=True):
"""
No parameters.
:return:
Nothing.
"""
UnbalancedDataset.__init__(self, verbose=verbose)
def resample(self):
"""
:return:
Return the data with majority samples that form a Tomek link
removed.
"""
from sklearn.neighbors import NearestNeighbors
# Find the nearest neighbour of every point
nn = NearestNeighbors(n_neighbors=2)
nn.fit(self.x)
nns = nn.kneighbors(self.x, return_distance=False)[:, 1]
# Send the information to is_tomek function to get boolean vector back
if self.verbose:
print("Looking for majority Tomek links...")
links = self.is_tomek(self.y, nns, self.minc, self.verbose)
if self.verbose:
print("Under-sampling "
"performed: " + str(Counter(self.y[logical_not(links)])))
# Return data set without majority Tomek links.
return self.x[logical_not(links)], self.y[logical_not(links)]
class ClusterCentroids(UnbalancedDataset):
"""
Experimental method that under samples the majority class by replacing a
cluster of majority samples by the cluster centroid of a KMeans algorithm.
This algorithm keeps N majority samples by fitting the KMeans algorithm
with N cluster to the majority class and using the coordinates of the N
cluster centroids as the new majority samples.
"""
def __init__(self, ratio=1, random_state=None, verbose=True, **kwargs):
"""
:param kwargs:
Arguments the user might want to pass to the KMeans object from
scikit-learn.
:param ratio:
The number of cluster to fit with respect to the number of samples
in the minority class.
N_clusters = int(ratio * N_minority_samples) = N_maj_undersampled.
:param random_state:
Seed.
:return:
Under sampled data set.
"""
UnbalancedDataset.__init__(self, ratio=ratio,
random_state=random_state,
verbose=verbose)
self.kwargs = kwargs
def resample(self):
"""
???
:return:
"""
# Create the clustering object
from sklearn.cluster import KMeans
kmeans = KMeans(random_state=self.rs)
kmeans.set_params(**self.kwargs)
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it.
if key == self.minc:
continue
# Set the number of clusters to be no more than the number of
# samples
if self.ratio * self.ucd[self.minc] > self.ucd[key]:
n_clusters = self.ucd[key]
else:
n_clusters = int(self.ratio * self.ucd[self.minc])
# Set the number of clusters and find the centroids
kmeans.set_params(n_clusters=n_clusters)
kmeans.fit(self.x[self.y == key])
centroids = kmeans.cluster_centers_
# Concatenate to the minority class
underx = concatenate((underx, centroids), axis=0)
undery = concatenate((undery, ones(n_clusters) * key), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
class NearMiss(UnbalancedDataset):
"""
An implementation of NearMiss.
See the original paper: NearMiss - "kNN Approach to Unbalanced Data
Distributions: A Case Study involving Information Extraction" by Zhang
et al. for more details.
"""
def __init__(self, ratio=1., random_state=None,
version=1, size_ngh=3, ver3_samp_ngh=3,
verbose=True, **kwargs):
"""
:param version:
Version of the NearMiss to use. Possible values
are 1, 2 or 3. See the original paper for details
about these different versions.
:param size_ngh:
Size of the neighbourhood to consider to compute the
average distance to the minority point samples.
:param ver3_samp_ngh:
NearMiss-3 algorithm start by a phase of re-sampling. This
parameter correspond to the number of neighbours selected
create the sub_set in which the selection will be performed.
:param **kwargs:
Parameter to use for the Nearest Neighbours.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self, ratio=ratio,
random_state=random_state,
verbose=verbose)
# Assign the parameter of the element of this class
# Check that the version asked is implemented
if not (version == 1 or version == 2 or version == 3):
raise ValueError('UnbalancedData.NearMiss: there is only 3 '
'versions available with parameter version=1/2/3')
self.version = version
self.size_ngh = size_ngh
self.ver3_samp_ngh = ver3_samp_ngh
self.kwargs = kwargs
def resample(self):
"""
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# For each element of the current class, find the set of NN
# of the minority class
from sklearn.neighbors import NearestNeighbors
# Call the constructor of the NN
nn_obj = NearestNeighbors(n_neighbors=self.size_ngh, **self.kwargs)
# Fit the minority class since that we want to know the distance
# to these point
nn_obj.fit(self.x[self.y == self.minc])
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it
if key == self.minc:
continue
# Set the ratio to be no more than the number of samples available
if self.ratio * self.ucd[self.minc] > self.ucd[key]:
num_samples = self.ucd[key]
else:
num_samples = int(self.ratio * self.ucd[self.minc])
# Get the samples corresponding to the current class
sub_samples_x = self.x[self.y == key]
sub_samples_y = self.y[self.y == key]
if self.version == 1:
# Find the NN
dist_vec, idx_vec = nn_obj.kneighbors(sub_samples_x,
n_neighbors=self.size_ngh)
# Select the right samples
sel_x, sel_y = self.__SelectionDistBased__(dist_vec,
num_samples,
key,
sel_strategy='nearest')
elif self.version == 2:
# Find the NN
dist_vec, idx_vec = nn_obj.kneighbors(sub_samples_x,
n_neighbors=self.y[self.y == self.minc].size)
# Select the right samples
sel_x, sel_y = self.__SelectionDistBased__(dist_vec,
num_samples,
key,
sel_strategy='nearest')
elif self.version == 3:
# We need a new NN object to fit the current class
nn_obj_cc = NearestNeighbors(n_neighbors=self.ver3_samp_ngh,
**self.kwargs)
nn_obj_cc.fit(sub_samples_x)
# Find the set of NN to the minority class
dist_vec, idx_vec = nn_obj_cc.kneighbors(self.x[self.y == self.minc])
# Create the subset containing the samples found during the NN
# search. Linearize the indexes and remove the double values
idx_vec = np.unique(idx_vec.reshape(-1))
# Create the subset
sub_samples_x = sub_samples_x[idx_vec, :]
sub_samples_y = sub_samples_y[idx_vec]
# Compute the NN considering the current class
dist_vec, idx_vec = nn_obj.kneighbors(sub_samples_x,
n_neighbors=self.size_ngh)
sel_x, sel_y = self.__SelectionDistBased__(dist_vec,
num_samples,
key,
sel_strategy='farthest')
underx = concatenate((underx, sel_x), axis=0)
undery = concatenate((undery, sel_y), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
def __SelectionDistBased__(self,
dist_vec,
num_samples,
key,
sel_strategy='nearest'):
# Compute the distance considering the farthest neighbour
dist_avg_vec = np.sum(dist_vec[:, -self.size_ngh:], axis=1)
# Sort the list of distance and get the index
if sel_strategy == 'nearest':
sort_way = False
elif sel_strategy == 'farthest':
sort_way = True
else:
raise ValueError('Unbalanced.NearMiss: the sorting can be done '
'only with nearest or farthest data points.')
sorted_idx = sorted(range(len(dist_avg_vec)),
key=dist_avg_vec.__getitem__,
reverse=sort_way)
# Select the desired number of samples
sel_idx = sorted_idx[:num_samples]
return self.x[self.y == key][sel_idx], self.y[self.y == key][sel_idx]
class CondensedNearestNeighbour(UnbalancedDataset):
"""
An implementation of Condensend Neareat Neighbour.
See the original paper: CNN - "Addressing the Curse of Imbalanced Training
Set: One-Sided Selection" by Khubat et al. for more details.
"""
def __init__(self, random_state=None,
size_ngh=1, n_seeds_S=1, verbose=True,
**kwargs):
"""
:param size_ngh
Size of the neighbourhood to consider to compute the
average distance to the minority point samples.
:param n_seeds_S
Number of samples to extract in order to build the set S.
:param **kwargs
Parameter to use for the Neareast Neighbours.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self, random_state=random_state,
verbose=verbose)
# Assign the parameter of the element of this class
self.size_ngh = size_ngh
self.n_seeds_S = n_seeds_S
self.kwargs = kwargs
def resample(self):
"""
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Import the K-NN classifier
from sklearn.neighbors import KNeighborsClassifier
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it
if key == self.minc:
continue
# Randomly get one sample from the majority class
maj_sample = sample(self.x[self.y == key],
self.n_seeds_S)
# Create the set C
C_x = np.append(self.x[self.y == self.minc],
maj_sample,
axis=0)
C_y = np.append(self.y[self.y == self.minc],
[key] * self.n_seeds_S)
# Create the set S
S_x = self.x[self.y == key]
S_y = self.y[self.y == key]
# Create a k-NN classifier
knn = KNeighborsClassifier(n_neighbors=self.size_ngh,
**self.kwargs)
# Fit C into the knn
knn.fit(C_x, C_y)
# Classify on S
pred_S_y = knn.predict(S_x)
# Find the misclassified S_y
sel_x = np.squeeze(S_x[np.nonzero(pred_S_y != S_y), :])
sel_y = S_y[np.nonzero(pred_S_y != S_y)]
underx = concatenate((underx, sel_x), axis=0)
undery = concatenate((undery, sel_y), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
class OneSidedSelection(UnbalancedDataset):
"""
An implementation of One-Sided Selection.
See the original paper: OSS - "Addressing the Curse of Imbalanced Training
Set: One-Sided Selection" by Khubat et al. for more details.
"""
def __init__(self, random_state=None,
size_ngh=1, n_seeds_S=1, verbose=True,
**kwargs):
"""
:param size_ngh
Size of the neighbourhood to consider to compute the
average distance to the minority point samples.
:param n_seeds_S
Number of samples to extract in order to build the set S.
:param **kwargs
Parameter to use for the Neareast Neighbours.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self, random_state=random_state,
verbose=verbose)
# Assign the parameter of the element of this class
self.size_ngh = size_ngh
self.n_seeds_S = n_seeds_S
self.kwargs = kwargs
def resample(self):
"""
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Import the K-NN classifier
from sklearn.neighbors import KNeighborsClassifier
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it
if key == self.minc:
continue
# Randomly get one sample from the majority class
maj_sample = sample(self.x[self.y == key],
self.n_seeds_S)
# Create the set C
C_x = np.append(self.x[self.y == self.minc],
maj_sample,
axis=0)
C_y = np.append(self.y[self.y == self.minc],
[key] * self.n_seeds_S)
# Create the set S
S_x = self.x[self.y == key]
S_y = self.y[self.y == key]
# Create a k-NN classifier
knn = KNeighborsClassifier(n_neighbors=self.size_ngh,
**self.kwargs)
# Fit C into the knn
knn.fit(C_x, C_y)
# Classify on S
pred_S_y = knn.predict(S_x)
# Find the misclassified S_y
sel_x = np.squeeze(S_x[np.nonzero(pred_S_y != S_y), :])
sel_y = S_y[np.nonzero(pred_S_y != S_y)]
underx = concatenate((underx, sel_x), axis=0)
undery = concatenate((undery, sel_y), axis=0)
from sklearn.neighbors import NearestNeighbors
# Find the nearest neighbour of every point
nn = NearestNeighbors(n_neighbors=2)
nn.fit(underx)
nns = nn.kneighbors(underx, return_distance=False)[:, 1]
# Send the information to is_tomek function to get boolean vector back
if self.verbose:
print("Looking for majority Tomek links...")
links = self.is_tomek(undery, nns, self.minc, self.verbose)
if self.verbose:
print("Under-sampling "
"performed: " + str(Counter(undery[logical_not(links)])))
# Return data set without majority Tomek links.
return underx[logical_not(links)], undery[logical_not(links)]
class NeighbourhoodCleaningRule(UnbalancedDataset):
"""
An implementation of Neighboorhood Cleaning Rule.
See the original paper: NCL - "Improving identification of difficult small
classes by balancing class distribution" by Laurikkala et al. for more details.
"""
def __init__(self, random_state=None,
size_ngh=3, verbose=True, **kwargs):
"""
:param size_ngh
Size of the neighbourhood to consider in order to make
the comparison between each samples and their NN.
:param **kwargs
Parameter to use for the Neareast Neighbours.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self, random_state=random_state,
verbose=verbose)
# Assign the parameter of the element of this class
self.size_ngh = size_ngh
self.kwargs = kwargs
def resample(self):
"""
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Import the k-NN classifier
from sklearn.neighbors import NearestNeighbors
# Create a k-NN to fit the whole data
nn_obj = NearestNeighbors(n_neighbors=self.size_ngh)
# Fit the whole dataset
nn_obj.fit(self.x)
idx_to_exclude = []
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# Get the sample of the current class
sub_samples_x = self.x[self.y == key]
# Get the samples associated
idx_sub_sample = np.nonzero(self.y == key)[0]
# Find the NN for the current class
nnhood_idx = nn_obj.kneighbors(sub_samples_x, return_distance=False)
# Get the label of the corresponding to the index
nnhood_label = (self.y[nnhood_idx] == key)
# Check which one are the same label than the current class
# Make an AND operation through the three neighbours
nnhood_bool = np.logical_not(np.all(nnhood_label, axis=1))
# If the minority class remove the majority samples (as in politic!!!! ;))
if key == self.minc:
# Get the index to exclude
idx_to_exclude += nnhood_idx[np.nonzero(nnhood_label[np.nonzero(nnhood_bool)])].tolist()
else:
# Get the index to exclude
idx_to_exclude += idx_sub_sample[np.nonzero(nnhood_bool)].tolist()
# Create a vector with the sample to select
sel_idx = np.ones(self.y.shape)
sel_idx[idx_to_exclude] = 0
# Get the samples from the majority classes
sel_x = np.squeeze(self.x[np.nonzero(sel_idx), :])
sel_y = self.y[np.nonzero(sel_idx)]
underx = concatenate((underx, sel_x), axis=0)
undery = concatenate((undery, sel_y), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
| 34.806502
| 104
| 0.558417
| 22,162
| 0.985635
| 0
| 0
| 0
| 0
| 0
| 0
| 8,359
| 0.371759
|
c55c65db8051ee9fdf9ceac3a9490b6f81b381e7
| 931
|
py
|
Python
|
wikipron/extract/cmn.py
|
Alireza-Sampour/wikipron
|
ac821c5d0a7d70e7e700f45f9d01b2dfb4ecae9d
|
[
"Apache-2.0"
] | 1
|
2021-08-01T20:31:27.000Z
|
2021-08-01T20:31:27.000Z
|
wikipron/extract/cmn.py
|
Alireza-Sampour/wikipron
|
ac821c5d0a7d70e7e700f45f9d01b2dfb4ecae9d
|
[
"Apache-2.0"
] | null | null | null |
wikipron/extract/cmn.py
|
Alireza-Sampour/wikipron
|
ac821c5d0a7d70e7e700f45f9d01b2dfb4ecae9d
|
[
"Apache-2.0"
] | null | null | null |
"""Word and pron extraction for (Mandarin) Chinese."""
import itertools
import typing
import requests
from wikipron.extract.default import yield_pron, IPA_XPATH_SELECTOR
if typing.TYPE_CHECKING:
from wikipron.config import Config
from wikipron.typing import Iterator, Word, Pron, WordPronPair
# Select pron from within this li
_PRON_XPATH_TEMPLATE = """
//div[@class="vsHide"]
//ul
//li[(a[@title="w:Mandarin Chinese"])]
"""
def yield_cmn_pron(
request: requests.Response, config: "Config"
) -> "Iterator[Pron]":
for li_container in request.html.xpath(_PRON_XPATH_TEMPLATE):
yield from yield_pron(li_container, IPA_XPATH_SELECTOR, config)
def extract_word_pron_cmn(
word: "Word", request: requests.Response, config: "Config"
) -> "Iterator[WordPronPair]":
words = itertools.repeat(word)
prons = yield_cmn_pron(request, config)
yield from zip(words, prons)
| 25.162162
| 71
| 0.71536
| 0
| 0
| 461
| 0.495166
| 0
| 0
| 0
| 0
| 247
| 0.265306
|
c55ca719e407ecd982eeb52d8e27fa9690f85669
| 420
|
py
|
Python
|
iis/tests/test_e2e.py
|
tcpatterson/integrations-core
|
3692601de09f8db60f42612b0d623509415bbb53
|
[
"BSD-3-Clause"
] | null | null | null |
iis/tests/test_e2e.py
|
tcpatterson/integrations-core
|
3692601de09f8db60f42612b0d623509415bbb53
|
[
"BSD-3-Clause"
] | null | null | null |
iis/tests/test_e2e.py
|
tcpatterson/integrations-core
|
3692601de09f8db60f42612b0d623509415bbb53
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2022-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import pytest
from datadog_checks.dev.testing import requires_py3
from datadog_checks.iis import IIS
@pytest.mark.e2e
@requires_py3
def test_e2e_py3(dd_agent_check, aggregator, instance):
aggregator = dd_agent_check(instance)
aggregator.assert_service_check('iis.windows.perf.health', IIS.CRITICAL)
| 26.25
| 76
| 0.797619
| 0
| 0
| 0
| 0
| 205
| 0.488095
| 0
| 0
| 131
| 0.311905
|
c560c444067061f2f72e5a0dd18c1c1230d2f961
| 1,174
|
py
|
Python
|
scripts/utils/param_grid_to_files.py
|
bagustris/emotion
|
5bd83d3ca8a6eb930f449b7a990fefd75d0c7d36
|
[
"MIT"
] | 3
|
2020-11-03T14:54:22.000Z
|
2021-04-12T12:23:10.000Z
|
scripts/utils/param_grid_to_files.py
|
bagustris/emotion
|
5bd83d3ca8a6eb930f449b7a990fefd75d0c7d36
|
[
"MIT"
] | null | null | null |
scripts/utils/param_grid_to_files.py
|
bagustris/emotion
|
5bd83d3ca8a6eb930f449b7a990fefd75d0c7d36
|
[
"MIT"
] | 2
|
2020-12-03T06:21:59.000Z
|
2021-01-16T04:47:12.000Z
|
from pathlib import Path
import click
import yaml
from sklearn.model_selection import ParameterGrid
from ertk.utils import PathlibPath, get_arg_mapping
@click.command()
@click.argument("param_grid", type=PathlibPath(exists=True, dir_okay=False))
@click.argument("output", type=Path)
@click.option("--format", help="Format string.")
def main(param_grid: Path, output: Path, format: str):
"""Creates a new parameters YAML file in the OUTPUT directory for
each combination of parameters in the PARAM_GRID file. The names of
the files will be formatted according to the --format parameter if
given, or else assigned a number starting from 1.
"""
grid = get_arg_mapping(param_grid)
output.mkdir(exist_ok=True, parents=True)
for i, params in enumerate(ParameterGrid(grid)):
if format:
filename = format.format(**params)
else:
filename = f"params_{i:02d}"
if not filename.endswith(".yaml"):
filename += ".yaml"
with open(output / filename, "w") as fid:
yaml.dump(params, fid)
print(f"Wrote {output / filename}.")
if __name__ == "__main__":
main()
| 32.611111
| 76
| 0.67632
| 0
| 0
| 0
| 0
| 977
| 0.832198
| 0
| 0
| 389
| 0.331346
|
c5610fccc549a5c9d69f6c3b166e598fbe0653b9
| 6,172
|
py
|
Python
|
mangabee_parsers.py
|
ta-dachi/mangaget
|
4ef39df0a6cceb2817d3bd0ad4d8290b8f576341
|
[
"MIT"
] | null | null | null |
mangabee_parsers.py
|
ta-dachi/mangaget
|
4ef39df0a6cceb2817d3bd0ad4d8290b8f576341
|
[
"MIT"
] | null | null | null |
mangabee_parsers.py
|
ta-dachi/mangaget
|
4ef39df0a6cceb2817d3bd0ad4d8290b8f576341
|
[
"MIT"
] | null | null | null |
from html.parser import HTMLParser
class mangabeeSearchParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.inLink = False
self.lastTag = None
self.lastClass = None
self.urls = [] # Where we store our results
def handle_starttag(self, tag, attrs):
if (tag == 'div'):
attrs = dict(attrs)
self.lastTag = 'div'
if (attrs.get('class') == 'nde'):
self.inLink = True
self.lastClass ='nde'
if (self.lastClass == 'nde'):
if (tag == 'div'):
attrs = dict(attrs)
if (attrs.get('class') == 'cvr'):
self.lastClass ='cvr'
if (self.lastTag == 'div' and tag == 'a' and self.lastClass == 'cvr'):
self.lastTag = 'a'
attrs = dict(attrs) # example output: {'href': 'http://www.mangabee.com/Tokyo_Ghoul/'}
self.urls.append( attrs.get('href') ) #['http://www.mangabee.com/Tokyo_Ghoul', ...]
def handle_endtag(self, tag):
if (tag == 'div'):
self.inLink = False
self.lastTag = None
self.lastClass = None
def handle_data(self, data):
pass
class mangabeeSetupParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.inLink = False
self.lastTag = None
self.lastClass = None
self.pages = []
self.chapters = []
self.src = []
self.first_occurrence_chapters = False
self.first_occurrence_pages = False
def handle_starttag(self, tag, attrs):
if (tag == 'select'): # The tag with pages data.
self.inLink = True
attrs = dict(attrs)
self.lastTag = 'select'
if (attrs.get('class') == 'cbo_wpm_pag'):
self.lastClass = 'cbo_wpm_pag'
if (tag == 'option' and self.lastClass == 'cbo_wpm_pag'):
self.inLink = True
self.lastTag = 'option'
if (tag == 'select'): # The tag with chapter data.
self.inLink = True
attrs = dict(attrs)
self.lastTag = 'select'
if (attrs.get('class') == 'cbo_wpm_chp'):
self.lastClass = 'cbo_wpm_chp'
if (tag == 'img'): # Wade through html to find img tag.
self.inLink = True
attrs = dict(attrs) # The tag with image data and location.
self.lastTag = 'img'
if (attrs.get('class') == 'manga-page'): # Found tag with manga image.
self.lastClass = 'manga-page'
self.src.append(attrs.get('src')) # Add example src. Need lots of string manipulation to generate image urls
def handle_endtag(self, tag):
if (tag == 'select' and self.lastClass =='cbo_wpm_chp'): # The tag with chapter data.
self.inLink = False
self.lastTag = None
self.lastClass = None
self.first_occurrence_chapters = True # Chapter selection occurs twice so, only add chapters once.
if (tag == 'select' and self.lastClass =='cbo_wpm_pag'): # The tag with chapter data.
self.inLink = False
self.lastTag = None
self.lastClass = None
self.first_occurrence_pages = True # Chapter selection occurs twice so, only add chapters once.
if (tag == 'img'): # The tag with image data and location.
self.inLink = False
self.lastTag = None
self.lastClass = None
def handle_data(self, data):
if (self.lastClass == 'cbo_wpm_chp' and self.first_occurrence_chapters == False):
self.chapters.append(data)
if (self.lastClass == 'cbo_wpm_pag' and self.lastTag == 'option' and self.first_occurrence_pages == False):
self.pages.append(data)
class mangabeeHTMLGetImageUrls(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.inLink = False
self.lastTag = None
self.lastClass = None
self.page_numbers = []
self.second_occurrence_pages = False
def handle_starttag(self, tag, attrs):
if (tag == 'select'): # The tag with pages data.
self.inLink = True
attrs = dict(attrs)
self.lastTag = 'select'
if (attrs.get('class') == 'cbo_wpm_pag'):
self.lastClass = 'cbo_wpm_pag'
if (tag == 'option' and self.lastClass == 'cbo_wpm_pag' and self.second_occurrence_pages == False):
self.inLink = True
self.lastTag = 'option'
attrs = dict(attrs)
self.page_numbers.append(attrs.get('value'))
if (tag == 'div'):
self.inLink = True
attrs = dict(attrs)
self.lastTag = 'div'
if (attrs.get('class') == 'clr'):
self.lastClass = 'clr'
def handle_endtag(self, tag):
if (tag == 'select'): # The tag with chapter data.
self.inLink = False
self.lastTag = None
self.lastClass = None
if (self.lastClass == 'clr'):
self.second_occurrence_pages = True
def handle_data(self, data):
pass
class mangabeeHTMLGetImageSrcs(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.inLink = False
self.lastTag = None
self.lastClass = None
self.src = None
def handle_starttag(self, tag, attrs):
if (tag == 'img'): # The tag with pages data.
self.inLink = True
attrs = dict(attrs)
self.lastTag = 'section'
if (attrs.get('class') == 'manga-page'):
self.lastClass = 'manga-page'
if (tag == 'img' and self.lastClass == 'manga-page'):
self.inLink = True
self.lastTag = 'img'
attrs = dict(attrs)
self.src = attrs.get('src')
def handle_endtag(self, tag):
if (tag == 'img'):
self.inLink = False
self.lastTag = None
self.lastClass = None
def handle_data(self, data):
pass
| 35.883721
| 124
| 0.545366
| 6,128
| 0.992871
| 0
| 0
| 0
| 0
| 0
| 0
| 1,203
| 0.194913
|
c5630abd4f13c6d9b9fd911d42b444b3c07c02dd
| 1,831
|
py
|
Python
|
bme280/reader.py
|
budrom/dht2eleasticsearch
|
286974c0f4096ae3fb2f1f700b761051b09c47cf
|
[
"MIT"
] | null | null | null |
bme280/reader.py
|
budrom/dht2eleasticsearch
|
286974c0f4096ae3fb2f1f700b761051b09c47cf
|
[
"MIT"
] | null | null | null |
bme280/reader.py
|
budrom/dht2eleasticsearch
|
286974c0f4096ae3fb2f1f700b761051b09c47cf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import os
import threading
import sys
from Adafruit_BME280 import *
from datetime import datetime
from elasticsearch import Elasticsearch
def readSensor():
# Timestamp for report
timestamp = datetime.utcnow()
# Recursively initiate next reading in a minute
threading.Timer(60-float(datetime.utcnow().strftime('0.%f')), readSensor).start()
degrees = sensor.read_temperature() + t_compensation
pascals = sensor.read_pressure()
pressure = pascals / 100 * 0.75006375541921
humidity = sensor.read_humidity()
report = { 'timestamp': timestamp, 'sensor': 'bme280', 'temperature': degrees, 'humidity': humidity, 'pressure': pressure }
if es_host:
send2es(report)
else:
print("Time UTC: {}\tt={:0.2f} h={:0.2f} p={:0.2f}".format(timestamp, degrees, humidity, pressure))
def send2es(data):
""" Initiate connection to Elasticsearch and send data as a single document.
data - dictionary/JSON to be sent
"""
i = 'metrics_{}'.format(datetime.now().strftime('%m.%y'))
es.index(index=i, doc_type='measurement', body=data)
if __name__ == "__main__":
print("Script started")
try:
es_host = os.environ['ELASTICSEARCH_URL']
es_user = os.environ['ELASTICSEARCH_USER']
es_pass = os.environ['ELASTICSEARCH_PASSWORD']
es = Elasticsearch(es_host, http_auth=(es_user, es_pass))
except KeyError:
es_host = None
try:
t_compensation = float(os.environ['T_COMPENSATION'])
except KeyError:
t_compensation = 0
sensor = BME280(t_mode=BME280_OSAMPLE_2,
p_mode=BME280_OSAMPLE_8,
h_mode=BME280_OSAMPLE_1,
filter=BME280_FILTER_16,
address=0x76)
threading.Timer(60-float(datetime.utcnow().strftime('%S.%f')), readSensor).start()
print("Waiting for next minute to start loop...")
| 31.033898
| 125
| 0.688695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 502
| 0.274167
|
c565a1b2f5a20a17f2045f38225c4233abda30b9
| 456
|
py
|
Python
|
tests/web/backend_pytest.py
|
brumar/eel-for-transcrypt
|
28cf5e0aa55a3c885b63d79d1ffae1370be644d2
|
[
"MIT"
] | 1
|
2019-12-31T13:53:05.000Z
|
2019-12-31T13:53:05.000Z
|
tests/web/backend_pytest.py
|
brumar/eel-for-transcrypt
|
28cf5e0aa55a3c885b63d79d1ffae1370be644d2
|
[
"MIT"
] | 1
|
2021-11-15T17:48:03.000Z
|
2021-11-15T17:48:03.000Z
|
tests/web/backend_pytest.py
|
brumar/eel-for-transcrypt
|
28cf5e0aa55a3c885b63d79d1ffae1370be644d2
|
[
"MIT"
] | null | null | null |
import eel_for_transcrypt as eel
from web.common import InventoryItem
@eel.expose
def i_return_the_same(anything):
return anything
@eel.expose
def a_generator(mn, mx):
yield from range(mn, mx)
@eel.expose
@eel.apply_factories(inputfactory=InventoryItem)
def return_a_dataclass(datac: InventoryItem):
assert isinstance(datac, InventoryItem)
return datac
@eel.expose
def sluggish(timeout):
eel.sleep(timeout)
return True
| 16.888889
| 48
| 0.756579
| 0
| 0
| 53
| 0.116228
| 369
| 0.809211
| 0
| 0
| 0
| 0
|
c565d028bd9c69f1d86bec597f86ad7c3dad14ce
| 6,989
|
py
|
Python
|
tests/test_polygon.py
|
tilezen/mapbox-vector-tile
|
4e3a65a6f98c317048266260b8e7aac705e31e6f
|
[
"MIT"
] | 121
|
2016-07-14T00:44:54.000Z
|
2022-03-19T00:49:14.000Z
|
tests/test_polygon.py
|
tilezen/mapbox-vector-tile
|
4e3a65a6f98c317048266260b8e7aac705e31e6f
|
[
"MIT"
] | 53
|
2016-07-05T14:35:06.000Z
|
2021-05-20T22:31:02.000Z
|
tests/test_polygon.py
|
tilezen/mapbox-vector-tile
|
4e3a65a6f98c317048266260b8e7aac705e31e6f
|
[
"MIT"
] | 34
|
2016-07-27T23:45:05.000Z
|
2022-01-02T20:37:58.000Z
|
# -*- coding: utf-8 -*-
"""
Tests for vector_tile/polygon.py
"""
import unittest
from mapbox_vector_tile.polygon import make_it_valid
from shapely import wkt
import os
class TestPolygonMakeValid(unittest.TestCase):
def test_dev_errors(self):
test_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(test_dir, 'errors.wkt')) as fh:
for line in fh:
geom = wkt.loads(line)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertTrue(fixed.area > 0.9 * abs(geom.area))
def test_multipolygon_with_flipped_ring(self):
geom = wkt.loads("""MULTIPOLYGON(
((0 0, 0 4, 4 4, 4 0, 0 0), (1 1, 1 3, 3 3, 3 1, 1 1)),
((5 0, 9 0, 9 4, 5 4, 5 0), (6 1, 6 3, 8 3, 8 1, 6 1))
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(24, fixed.area)
def test_polygon_self_touching(self):
geom = wkt.loads("""POLYGON(
(1 0, 5 0, 5 5, 0 5, 0 2, 2 2, 2 4, 3 4, 1 0)
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(21, fixed.area)
def test_polygon_self_touching_inner(self):
geom = wkt.loads("""POLYGON(
(-1 -1, -1 6, 6 6, 6 -1, -1 -1),
(1 0, 5 0, 5 5, 0 5, 0 2, 2 2, 2 4, 3 4, 1 0)
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(28, fixed.area)
def test_polygon_inners_touching(self):
geom = wkt.loads("""POLYGON(
(0 0, 6 0, 6 6, 0 6, 0 0),
(1 1, 1 3, 3 3, 3 1, 1 1),
(3 3, 3 5, 5 5, 5 3, 3 3)
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(28, fixed.area)
def test_polygon_inner_touching_outer(self):
geom = wkt.loads("""POLYGON(
(0 0, 3 0, 3 3, 0 3, 0 0),
(1 1, 2 3, 2 1, 1 1)
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(8, fixed.area)
def test_polygon_two_inners_touching_outer(self):
geom = wkt.loads("""POLYGON(
(0 0, 6 0, 6 3, 0 3, 0 0),
(1 1, 2 3, 2 1, 1 1),
(4 1, 5 3, 5 1, 4 1)
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(16, fixed.area)
def test_polygon_inners_touching_colinear(self):
geom = wkt.loads("""POLYGON(
(0 0, 6 0, 6 6, 0 6, 0 0),
(1 1, 1 3, 3 4, 3 1, 1 1),
(3 2, 3 5, 5 5, 5 3, 3 2)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(26, fixed.area)
def test_polygon_inner_colinear_outer(self):
geom = wkt.loads("""POLYGON(
(0 0, 3 0, 3 3, 0 3, 0 0),
(1 1, 1 3, 2 3, 2 1, 1 1)
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(7, fixed.area)
def test_polygon_many_inners_touching(self):
geom = wkt.loads("""POLYGON(
(0 0, 5 0, 5 5, 0 5, 0 0),
(1 1, 1 2, 3 2, 1 1),
(3 1, 3 3, 4 1, 3 1),
(2 2, 1 4, 2 4, 2 2),
(2 3, 4 4, 4 3, 2 3)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(21, fixed.area)
def test_polygon_inner_spike(self):
geom = wkt.loads("""POLYGON(
(0 0, 3 0, 3 4, 0 4, 0 0),
(1 1, 1 3, 2 3, 2 2, 1 2, 2 2, 2 1, 1 1)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(10, fixed.area)
def test_polygon_disconnected_inner(self):
geom = wkt.loads("""POLYGON(
(0 0, 5 0, 5 5, 0 5, 0 0),
(1 1, 1 2, 2 2, 1 1),
(2 1, 2 2, 3 2, 2 1),
(3 1, 3 2, 4 2, 3 1),
(1 2, 1 3, 2 3, 1 2),
(2 2, 2 3, 3 3, 2 2),
(3 2, 3 3, 4 3, 3 2),
(1 3, 1 4, 2 4, 1 3),
(2 3, 2 4, 3 4, 2 3),
(3 3, 3 4, 4 4, 3 3)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(20.5, fixed.area)
def test_polygon_disconnected_outer(self):
geom = wkt.loads("""POLYGON(
(0 0, 4 0, 4 3, 3 3, 3 2, 2 3, 1 2, 1 3, 0 3, 0 0),
(1 1, 1 2, 3 2, 3 1, 1 1)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(9, fixed.area)
def test_polygon_ring_of_inners(self):
geom = wkt.loads("""POLYGON(
(0 0, 4 0, 4 4, 0 4, 0 0),
(1 1, 1 2, 2 1, 1 1),
(1 2, 1 3, 2 3, 1 2),
(2 3, 3 3, 3 2, 2 3),
(2 1, 3 2, 3 1, 2 1)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(14, fixed.area)
def test_polygon_ring_of_inners_2(self):
geom = wkt.loads("""POLYGON(
(0 0, 5 0, 5 5, 0 5, 0 0),
(1 3, 1 4, 2 4, 1 3),
(3 3, 4 3, 4 2, 3 3),
(1 1, 1 2, 2 1, 1 1),
(1 2, 1 3, 2 3, 1 2),
(2 3, 3 3, 3 2, 2 3),
(2 1, 3 2, 3 1, 2 1)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(22, fixed.area)
def test_polygon_inners_crossing_outer(self):
geom = wkt.loads("""POLYGON (
(2325 1015, 2329 1021, 2419 1057, 2461 944, 2369 907, 2325 1015),
(2329 1012, 2370 909, 2457 944, 2417 1050, 2329 1012),
(2410 1053, 2410 1052, 2412 1053, 2411 1054, 2410 1053),
(2378 1040, 2378 1039, 2379 1040, 2379 1041, 2378 1040),
(2369 1037, 2370 1036, 2371 1036, 2371 1038, 2369 1037),
(2361 1034, 2362 1033, 2363 1033, 2363 1034, 2361 1034),
(2353 1031, 2354 1029, 2355 1030, 2354 1031, 2353 1031),
(2337 1024, 2338 1023, 2339 1023, 2338 1025, 2337 1024)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
# different versions of GEOS hit this bug in slightly different ways,
# meaning that some inners get included and some don't, depending on
# the version. therefore, we need quite a wide range of acceptable
# answers.
#
# the main part of this polygon (outer - largest inner) has area 1551,
# and the smaller inners sum up to area 11, so we'll take +/-6 from
# 1545.
self.assertAlmostEqual(1545, fixed.area, delta=6)
| 34.945
| 78
| 0.529976
| 6,817
| 0.97539
| 0
| 0
| 0
| 0
| 0
| 0
| 3,154
| 0.451281
|
c567553f0cf12169873a1f6859559b2967a6ea7a
| 275
|
py
|
Python
|
snake_debug.py
|
xlrobotics/PPOC-balance-bot
|
41dae4b2bbfce94ed04841fa9ba122eb57459e5a
|
[
"MIT"
] | 3
|
2020-11-10T01:45:35.000Z
|
2021-09-27T11:39:06.000Z
|
snake_debug.py
|
xlrobotics/PPOC-balance-bot
|
41dae4b2bbfce94ed04841fa9ba122eb57459e5a
|
[
"MIT"
] | null | null | null |
snake_debug.py
|
xlrobotics/PPOC-balance-bot
|
41dae4b2bbfce94ed04841fa9ba122eb57459e5a
|
[
"MIT"
] | 2
|
2020-01-25T17:26:33.000Z
|
2021-02-16T16:39:38.000Z
|
import gym
# from stable_baselines import DQN as deepq
from stable_baselines import A2C as ac
from stable_baselines.common.policies import MlpLnLstmPolicy
import snake_bot
if __name__ == '__main__':
env = gym.make("snakebot-v0")
env.debug_mode()
exit(0)
| 27.5
| 61
| 0.741818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 67
| 0.243636
|
c5675771c49be7e9f7d6d764c6141228f78fdc9d
| 2,179
|
py
|
Python
|
easy/572_subtree_of_another_tree.py
|
niki4/leetcode_py3
|
794f560a09a8950da21bd58ea222e0c74449ffa6
|
[
"MIT"
] | null | null | null |
easy/572_subtree_of_another_tree.py
|
niki4/leetcode_py3
|
794f560a09a8950da21bd58ea222e0c74449ffa6
|
[
"MIT"
] | null | null | null |
easy/572_subtree_of_another_tree.py
|
niki4/leetcode_py3
|
794f560a09a8950da21bd58ea222e0c74449ffa6
|
[
"MIT"
] | null | null | null |
"""
Given the roots of two binary trees root and subRoot, return true if there is a subtree of root with the same structure
and node values of subRoot and false otherwise.
A subtree of a binary tree tree is a tree that consists of a node in tree and all of this node's descendants.
The tree tree could also be considered as a subtree of itself.
Example 1:
3 (root)
/\
4 5 4 (subroot)
/ \ / \
1 2 1 2
Input: root = [3,4,5,1,2], subRoot = [4,1,2]
Output: true
Example 2:
3 (root)
/\
4 5 4 (subroot)
/ \ / \
1 2 1 2
/
0
Input: root = [3,4,5,1,2,null,null,null,null,0], subRoot = [4,1,2]
Output: false
Constraints:
The number of nodes in the root tree is in the range [1, 2000].
The number of nodes in the subRoot tree is in the range [1, 1000].
-104 <= root.val <= 104
-104 <= subRoot.val <= 104
"""
from tools.binary_tree import TreeNode
class Solution:
"""
Depth-first search (DFS) to compare all nodes from s (as a starting point) with t.
Runtime: 152 ms, faster than 57.48% of Python3
Memory Usage: 15.2 MB, less than 45.92% of Python3
Time complexity: O(|s| * |t|) where s and t are number of nodes in the related trees.
"""
def dfs_is_identical(self, root1: TreeNode, root2: TreeNode) -> bool:
if root1 is None or root2 is None:
return root1 == root2 # True if both are None, False otherwise
return (root1.val == root2.val and
self.dfs_is_identical(root1.left, root2.left) and
self.dfs_is_identical(root1.right, root2.right))
def isSubtree(self, s: TreeNode, t: TreeNode) -> bool:
if t is None: # subtree completely traversed at this point and all nodes matches within s
return True
if s is None: # either no match found or tree s has smaller size than subtree t
return False
if self.dfs_is_identical(s, t):
return True
return self.isSubtree(s.left, t) or self.isSubtree(s.right, t)
| 33.523077
| 119
| 0.592015
| 1,118
| 0.513079
| 0
| 0
| 0
| 0
| 0
| 0
| 1,494
| 0.685636
|
c567629ea21a15f16d30ea7895f7a40e8e344679
| 80,085
|
py
|
Python
|
pyeccodes/defs/grib2/localConcepts/cnmc/name_def.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | 7
|
2020-04-14T09:41:17.000Z
|
2021-08-06T09:38:19.000Z
|
pyeccodes/defs/grib2/localConcepts/cnmc/name_def.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | null | null | null |
pyeccodes/defs/grib2/localConcepts/cnmc/name_def.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | 3
|
2020-04-30T12:44:48.000Z
|
2020-12-15T08:40:26.000Z
|
import pyeccodes.accessors as _
def load(h):
def wrapped(h):
discipline = h.get_l('discipline')
parameterCategory = h.get_l('parameterCategory')
parameterNumber = h.get_l('parameterNumber')
instrumentType = h.get_l('instrumentType')
satelliteSeries = h.get_l('satelliteSeries')
scaledValueOfCentralWaveNumber = h.get_l('scaledValueOfCentralWaveNumber')
satelliteNumber = h.get_l('satelliteNumber')
typeOfGeneratingProcess = h.get_l('typeOfGeneratingProcess')
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 136986 and satelliteNumber == 72 and typeOfGeneratingProcess == 8:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 161290 and satelliteNumber == 72 and typeOfGeneratingProcess == 8:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and scaledValueOfCentralWaveNumber == 103092 and satelliteNumber == 72 and typeOfGeneratingProcess == 8 and instrumentType == 207 and satelliteSeries == 333:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and scaledValueOfCentralWaveNumber == 114942 and satelliteNumber == 72 and typeOfGeneratingProcess == 8 and instrumentType == 207 and satelliteSeries == 333:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 256410 and satelliteNumber == 72 and typeOfGeneratingProcess == 8:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 74626 and satelliteNumber == 72 and typeOfGeneratingProcess == 8:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and satelliteNumber == 72 and typeOfGeneratingProcess == 8 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 83333:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and scaledValueOfCentralWaveNumber == 92592 and satelliteNumber == 72 and typeOfGeneratingProcess == 8 and instrumentType == 207 and satelliteSeries == 333:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 1 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 1250000 and satelliteNumber == 72 and typeOfGeneratingProcess == 8:
return 'Obser. Sat. Meteosat sec. generation Albedo (scaled)'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 1 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 1666666 and satelliteNumber == 72 and typeOfGeneratingProcess == 8:
return 'Obser. Sat. Meteosat sec. generation Albedo (scaled)'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 1 and satelliteNumber == 72 and typeOfGeneratingProcess == 8 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 625000:
return 'Obser. Sat. Meteosat sec. generation Albedo (scaled)'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 1 and scaledValueOfCentralWaveNumber == 2000000 and satelliteNumber == 72 and typeOfGeneratingProcess == 8 and instrumentType == 207 and satelliteSeries == 333:
return 'Obser. Sat. Meteosat sec. generation Albedo (scaled)'
scaledValueOfFirstFixedSurface = h.get_l('scaledValueOfFirstFixedSurface')
typeOfFirstFixedSurface = h.get_l('typeOfFirstFixedSurface')
scaleFactorOfFirstFixedSurface = h.get_l('scaleFactorOfFirstFixedSurface')
typeOfStatisticalProcessing = h.get_l('typeOfStatisticalProcessing')
if discipline == 0 and parameterCategory == 2 and parameterNumber == 22 and scaledValueOfFirstFixedSurface == 10 and typeOfFirstFixedSurface == 103 and typeOfGeneratingProcess == 198 and scaleFactorOfFirstFixedSurface == 0 and typeOfStatisticalProcessing == 2:
return 'calibrated forecast, wind speed (gust)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 56 and typeOfFirstFixedSurface == 1 and typeOfGeneratingProcess == 198 and typeOfStatisticalProcessing == 1:
return 'calibrated forecast, large-scale snowfall rate w.e.'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 52 and typeOfFirstFixedSurface == 1 and typeOfGeneratingProcess == 198 and typeOfStatisticalProcessing == 1:
return 'calibrated forecast, total precipitation rate'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 22 and typeOfFirstFixedSurface == 103 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0 and typeOfStatisticalProcessing == 2 and scaledValueOfFirstFixedSurface == 10:
return 'smoothed forecast, wind speed (gust)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 18 and scaleFactorOfFirstFixedSurface == -2 and typeOfGeneratingProcess == 197 and typeOfFirstFixedSurface == 106:
return 'smoothed forecast, soil temperature'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 56 and typeOfGeneratingProcess == 197 and typeOfStatisticalProcessing == 1 and typeOfFirstFixedSurface == 1:
return 'smoothed forecast, large-scale snowfall rate w.e.'
typeOfSecondFixedSurface = h.get_l('typeOfSecondFixedSurface')
scaledValueOfSecondFixedSurface = h.get_l('scaledValueOfSecondFixedSurface')
scaleFactorOfSecondFixedSurface = h.get_l('scaleFactorOfSecondFixedSurface')
if discipline == 0 and parameterCategory == 6 and parameterNumber == 22 and typeOfFirstFixedSurface == 100 and typeOfSecondFixedSurface == 100 and scaledValueOfFirstFixedSurface == 0 and scaledValueOfSecondFixedSurface == 400 and scaleFactorOfFirstFixedSurface == -2 and typeOfGeneratingProcess == 197 and scaleFactorOfSecondFixedSurface == -2:
return 'smoothed forecast, cloud cover high'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 22 and typeOfFirstFixedSurface == 100 and typeOfSecondFixedSurface == 100 and scaledValueOfSecondFixedSurface == 800 and scaleFactorOfFirstFixedSurface == -2 and typeOfGeneratingProcess == 197 and scaleFactorOfSecondFixedSurface == -2 and scaledValueOfFirstFixedSurface == 400:
return 'smoothed forecast, cloud cover medium'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 22 and typeOfFirstFixedSurface == 100 and typeOfSecondFixedSurface == 1 and scaleFactorOfFirstFixedSurface == -2 and typeOfGeneratingProcess == 197 and scaledValueOfFirstFixedSurface == 800:
return 'smoothed forecast, cloud cover low'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 1 and typeOfFirstFixedSurface == 1 and typeOfGeneratingProcess == 197:
return 'smoothed forecast, total cloud cover'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 52 and typeOfFirstFixedSurface == 1 and typeOfGeneratingProcess == 197 and typeOfStatisticalProcessing == 1:
return 'smoothed forecast, total precipitation rate'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 3 and scaledValueOfFirstFixedSurface == 10 and typeOfFirstFixedSurface == 103 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0:
return 'smoothed forecast, v comp. of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 2 and typeOfFirstFixedSurface == 103 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0 and scaledValueOfFirstFixedSurface == 10:
return 'smoothed forecast, u comp. of wind'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 6 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0 and scaledValueOfFirstFixedSurface == 2 and typeOfFirstFixedSurface == 103:
return 'smoothed forecast, dew point temp.'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0 and typeOfStatisticalProcessing == 3 and scaledValueOfFirstFixedSurface == 2 and typeOfFirstFixedSurface == 103:
return 'smoothed forecast, minimum temp.'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0 and typeOfStatisticalProcessing == 2 and scaledValueOfFirstFixedSurface == 2 and typeOfFirstFixedSurface == 103:
return 'smoothed forecast, maximum temp.'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and scaledValueOfFirstFixedSurface == 2 and typeOfFirstFixedSurface == 103 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0:
return 'smoothed forecast, temperature'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 136986 and satelliteNumber == 72:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and scaledValueOfCentralWaveNumber == 161290 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and scaledValueOfCentralWaveNumber == 103092 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 114942 and satelliteNumber == 72:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and scaledValueOfCentralWaveNumber == 256410 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 74626 and satelliteNumber == 72:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and scaledValueOfCentralWaveNumber == 82644 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and scaledValueOfCentralWaveNumber == 92592 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 136986 and satelliteNumber == 72:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 161290 and satelliteNumber == 72:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and scaledValueOfCentralWaveNumber == 103092 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 114942 and satelliteNumber == 72 and instrumentType == 207:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 256410 and satelliteNumber == 72:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and scaledValueOfCentralWaveNumber == 74626 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and scaledValueOfCentralWaveNumber == 82644 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 92592 and satelliteNumber == 72:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 136986:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 161290 and satelliteNumber == 72:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and scaledValueOfCentralWaveNumber == 103092 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 256410 and satelliteNumber == 72:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 74626 and satelliteNumber == 72:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and scaledValueOfCentralWaveNumber == 82644 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 92592 and satelliteNumber == 72:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and scaledValueOfCentralWaveNumber == 114942 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and scaledValueOfCentralWaveNumber == 136986 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and scaledValueOfCentralWaveNumber == 161290 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 103092 and satelliteNumber == 72:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and scaledValueOfCentralWaveNumber == 114942 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 256410 and satelliteNumber == 72:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 74626 and satelliteNumber == 72:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 82644 and satelliteNumber == 72:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and scaledValueOfCentralWaveNumber == 92592 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and satelliteNumber == 54 and instrumentType == 205 and satelliteSeries == 331:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and instrumentType == 205 and satelliteSeries == 331 and satelliteNumber == 54:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and satelliteNumber == 54 and instrumentType == 205 and satelliteSeries == 331:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and instrumentType == 205 and satelliteSeries == 331 and satelliteNumber == 54:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and instrumentType == 205 and satelliteSeries == 331 and satelliteNumber == 54:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and satelliteSeries == 331 and satelliteNumber == 54 and instrumentType == 205:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and satelliteNumber == 54 and instrumentType == 205 and satelliteSeries == 331:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and satelliteNumber == 54 and instrumentType == 205 and satelliteSeries == 331:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and satelliteSeries == 331 and satelliteNumber == 53 and instrumentType == 205:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and instrumentType == 205 and satelliteSeries == 331 and satelliteNumber == 53:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and satelliteNumber == 53 and instrumentType == 205 and satelliteSeries == 331:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and satelliteNumber == 53 and instrumentType == 205 and satelliteSeries == 331:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and instrumentType == 205 and satelliteSeries == 331 and satelliteNumber == 52:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 16 and satelliteNumber == 52 and instrumentType == 205 and satelliteSeries == 331:
return 'Synth. Sat. radiance cloudy'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 15 and instrumentType == 205 and satelliteSeries == 331 and satelliteNumber == 52:
return 'Synth. Sat. brightness temperature clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 14 and satelliteNumber == 52 and instrumentType == 205 and satelliteSeries == 331:
return 'Synth. Sat. brightness temperature cloudy'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 196 and typeOfGeneratingProcess == 200 and typeOfStatisticalProcessing == 5:
return 'Monthly Mean of RMS of difference IA-AN of kinetic energy'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 196 and typeOfStatisticalProcessing == 5 and typeOfGeneratingProcess == 199:
return 'Monthly Mean of RMS of difference FG-AN of kinetic energy'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 8 and typeOfStatisticalProcessing == 5 and typeOfGeneratingProcess == 200:
return 'Monthly Mean of RMS of difference IA-AN of vert.velocity (pressure)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 8 and typeOfGeneratingProcess == 199 and typeOfStatisticalProcessing == 5:
return 'Monthly Mean of RMS of difference FG-AN of vert.velocity (pressure)'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and typeOfStatisticalProcessing == 5 and typeOfGeneratingProcess == 200:
return 'Monthly Mean of RMS of difference IA-AN of temperature'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and typeOfStatisticalProcessing == 5 and typeOfGeneratingProcess == 199:
return 'Monthly Mean of RMS of difference FG-AN of temperature'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 1 and typeOfGeneratingProcess == 200 and typeOfStatisticalProcessing == 5:
return 'Monthly Mean of RMS of difference IA-AN of relative humidity'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 1 and typeOfGeneratingProcess == 199 and typeOfStatisticalProcessing == 5:
return 'Monthly Mean of RMS of difference FG-AN of relative humidity'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 4 and typeOfStatisticalProcessing == 5 and typeOfGeneratingProcess == 200:
return 'Monthly Mean of RMS of difference IA-AN of geopotential'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 4 and typeOfStatisticalProcessing == 5 and typeOfGeneratingProcess == 199:
return 'Monthly Mean of RMS of difference FG-AN of geopotential'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 3 and typeOfGeneratingProcess == 200 and typeOfStatisticalProcessing == 5:
return 'Monthly Mean of RMS of difference IA-AN of v-component of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 3 and typeOfStatisticalProcessing == 5 and typeOfGeneratingProcess == 199:
return 'Monthly Mean of RMS of difference FG-AN of v-component of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 2 and typeOfStatisticalProcessing == 5 and typeOfGeneratingProcess == 200:
return 'Monthly Mean of RMS of difference IA-AN of u-component of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 2 and typeOfGeneratingProcess == 199 and typeOfStatisticalProcessing == 5:
return 'Monthly Mean of RMS of difference FG-AN of u-component of wind'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 1 and typeOfGeneratingProcess == 200 and typeOfStatisticalProcessing == 5:
return 'Monthly Mean of RMS of difference IA-AN of pressure reduced to MSL'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 1 and typeOfStatisticalProcessing == 5 and typeOfGeneratingProcess == 199:
return 'Monthly Mean of RMS of difference FG-AN of pressure reduced to MSL'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 199 and typeOfFirstFixedSurface == 1:
return 'modified cloud cover for media'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 198 and typeOfFirstFixedSurface == 1:
return 'modified cloud depth for media'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 7:
return 'Icing Grade (1=LGT,2=MOD,3=SEV)'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 13 and typeOfFirstFixedSurface == 1:
return 'Ceiling'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 3:
return 'Aequivalentpotentielle Temperatur'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 3 and typeOfFirstFixedSurface == 1:
return 'KO index'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 0 and typeOfFirstFixedSurface == 107 and scaleFactorOfFirstFixedSurface == -2:
return 'Druck einer isentropen Flaeche'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 14 and typeOfFirstFixedSurface == 107:
return 'Isentrope potentielle Vorticity'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 25 and typeOfFirstFixedSurface == 1:
return 'weather interpretation (WMO)'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 26 and typeOfFirstFixedSurface == 1:
return 'Konv.-U-Grenze-nn Hoehe der Konvektionsuntergrenze ueber nn'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 10:
return 'absolute vorticity advection'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 8 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'storm relative helicity'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 1 and typeOfFirstFixedSurface == 105:
return 'wind shear'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 51:
return 'UV_Index_Maximum_W UV_Index clouded (W), daily maximum'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 23 and typeOfFirstFixedSurface == 1:
return 'Gravity wave dissipation (vertical integral)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 23 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 0:
return 'Gravity wave dissipation (vertical integral)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 194 and typeOfFirstFixedSurface == 1:
return 'v-momentum flux due to SSO-effects'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 194 and typeOfStatisticalProcessing == 0 and typeOfFirstFixedSurface == 1:
return 'v-momentum flux due to SSO-effects'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 193 and typeOfFirstFixedSurface == 1:
return 'u-momentum flux due to SSO-effects'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 193 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 0:
return 'u-momentum flux due to SSO-effects'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 227:
return 'Ba140 - wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 226:
return 'Ba140 - dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 225:
return 'Air concentration of Barium 40'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 224:
return 'I131o - wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 223:
return 'I131o - dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 222:
return 'I131o - concentration'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 221:
return 'I131g - wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 220:
return 'Xe133 - wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 219:
return 'I131g - concentration'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 218:
return 'Xe133 - wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 217:
return 'Xe133 - dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 216:
return 'Air concentration of Xenon 133 (Xe133 - concentration)'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 215:
return 'TRACER - wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 214:
return 'TRACER - dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 213:
return 'TRACER - concentration'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 212:
return 'Kr85-wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 211:
return 'Kr85-dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 210:
return 'Air concentration of Krypton 85 (Kr85-concentration)'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 209:
return 'Zr95-wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 208:
return 'Zr95-dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 207:
return 'Air concentration of Zirconium 95 (Zr95-concentration)'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 206:
return 'Te132-wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 205:
return 'Te132-dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 204:
return 'Air concentration of Tellurium 132 (Te132-concentration)'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 203:
return 'Cs137-wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 202:
return 'Cs137-dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 201:
return 'Cs137-concentration'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 200:
return 'I131-wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 199:
return 'I131-dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 198:
return 'I131-concentration'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 197:
return 'Sr90-wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 196:
return 'Sr90-dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 195:
return 'Air concentration of Strontium 90'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 194:
return 'Ru103-wet deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 193:
return 'Ru103-dry deposition'
if discipline == 0 and parameterCategory == 18 and parameterNumber == 192:
return 'Air concentration of Ruthenium 103 (Ru103- concentration)'
if discipline == 0 and parameterCategory == 14 and parameterNumber == 1 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Ozone Mixing Ratio'
if discipline == 0 and parameterCategory == 15 and parameterNumber == 194 and typeOfFirstFixedSurface == 1:
return 'Delay of the GPS signal trough dry atmos.'
if discipline == 0 and parameterCategory == 15 and parameterNumber == 193 and typeOfFirstFixedSurface == 1:
return 'Delay of the GPS signal trough wet atmos.'
if discipline == 0 and parameterCategory == 15 and parameterNumber == 192 and typeOfFirstFixedSurface == 1:
return 'Delay of the GPS signal trough the (total) atm.'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 200 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Friction velocity'
if discipline == 0 and parameterCategory == 191 and parameterNumber == 2 and typeOfFirstFixedSurface == 1:
return 'geographical longitude'
if discipline == 0 and parameterCategory == 191 and parameterNumber == 1 and typeOfFirstFixedSurface == 1:
return 'geographical latitude'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 193 and typeOfFirstFixedSurface == 1:
return 'Coriolis parameter'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 208:
return 'water vapor flux'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 207 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'tendency of specific humidity'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 196 and typeOfStatisticalProcessing == 0:
return 'Sea salt aerosol (12M)'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 196:
return 'Sea salt aerosol'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 195 and typeOfStatisticalProcessing == 0:
return 'Black carbon aerosol (12M)'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 195:
return 'Black carbon aerosol'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 194 and typeOfStatisticalProcessing == 0:
return 'Organic aerosol (12M)'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 194:
return 'Organic aerosol'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 193 and typeOfStatisticalProcessing == 0:
return 'Total soil dust aerosol (12M)'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 193:
return 'Total soil dust aerosol'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 192 and typeOfStatisticalProcessing == 0:
return 'Total sulfate aerosol (12M)'
if discipline == 0 and parameterCategory == 13 and parameterNumber == 192:
return 'Total sulfate aerosol'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 192 and typeOfFirstFixedSurface == 1:
return 'ratio of monthly mean NDVI (normalized differential vegetation index) to annual maximum'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 192 and typeOfStatisticalProcessing == 0 and typeOfFirstFixedSurface == 1:
return 'ratio of monthly mean NDVI (normalized differential vegetation index) to annual maximum'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 31 and typeOfStatisticalProcessing == 2:
return 'normalized differential vegetation index (NDVI)'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 31:
return 'normalized differential vegetation index'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 30 and typeOfFirstFixedSurface == 1:
return 'deciduous forest'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 29 and typeOfFirstFixedSurface == 1:
return 'evergreen forest'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 20 and typeOfFirstFixedSurface == 106 and typeOfSecondFixedSurface == 106 and scaledValueOfSecondFixedSurface == 100 and scaleFactorOfFirstFixedSurface == -2 and typeOfStatisticalProcessing == 7 and scaleFactorOfSecondFixedSurface == -2 and scaledValueOfFirstFixedSurface == 10:
return 'variance of soil moisture content (10-100)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 20 and typeOfFirstFixedSurface == 106 and typeOfSecondFixedSurface == 106 and scaledValueOfSecondFixedSurface == 10 and scaleFactorOfFirstFixedSurface == -2 and typeOfStatisticalProcessing == 7 and scaleFactorOfSecondFixedSurface == -2 and scaledValueOfFirstFixedSurface == 0:
return 'variance of soil moisture content (0-10)'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 7:
return 'Orographie + Land-Meer-Verteilung'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 28 and typeOfStatisticalProcessing == 3 and typeOfFirstFixedSurface == 1:
return 'Min Leaf area index'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 28 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 2:
return 'Max Leaf area index'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 4 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 3:
return 'Plant covering degree in the quiescent phas'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 4 and typeOfStatisticalProcessing == 2 and typeOfFirstFixedSurface == 1:
return 'Plant covering degree in the vegetation phase'
if discipline == 0 and parameterCategory == 14 and parameterNumber == 193 and typeOfFirstFixedSurface == 1:
return 'vertically integrated ozone content (climatological)'
if discipline == 0 and parameterCategory == 14 and parameterNumber == 192 and typeOfFirstFixedSurface == 1:
return 'height of ozone maximum (climatological)'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 32 and typeOfFirstFixedSurface == 1:
return 'root depth of vegetation'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 28 and typeOfFirstFixedSurface == 1:
return 'Leaf area index'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 0 and typeOfFirstFixedSurface == 1:
return 'Soil Type'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 196 and typeOfFirstFixedSurface == 1:
return 'surface emissivity'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 22 and typeOfFirstFixedSurface == 1:
return 'Slope of sub-gridscale orography'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 21 and typeOfFirstFixedSurface == 1:
return 'Angle of sub-gridscale orography'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 24 and typeOfFirstFixedSurface == 1:
return 'Anisotropy of sub-gridscale orography'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 20 and typeOfFirstFixedSurface == 1:
return 'Standard deviation of sub-grid scale orography'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 195 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'meridional wind tendency due to subgrid scale oro.'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 194 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'zonal wind tendency due to subgrid scale oro.'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 3 and typeOfStatisticalProcessing == 6 and typeOfGeneratingProcess == 7:
return 'analysis error(standard deviation), v-comp. of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 2 and typeOfStatisticalProcessing == 6 and typeOfGeneratingProcess == 7:
return 'analysis error(standard deviation), u-comp. of wind'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 5 and typeOfGeneratingProcess == 7 and typeOfStatisticalProcessing == 6:
return 'analysis error(standard deviation), geopotential(gpm)'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 199:
return 'total directional spread'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 198:
return 'total Tm2 period'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 197:
return 'total Tm1 period'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 196:
return 'total wave mean period'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 195:
return 'total wave peak period'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 194:
return 'swell peak period'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 194 and typeOfFirstFixedSurface == 101:
return 'swell mean period'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 193:
return 'wind sea peak period'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 193 and typeOfFirstFixedSurface == 101:
return 'wind sea mean period'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 192:
return 'total wave direction'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 206 and typeOfFirstFixedSurface == 1:
return 'moisture convergence for Kuo-type closure'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 6 and typeOfFirstFixedSurface == 1:
return 'Convective Available Potential Energy'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 205 and typeOfFirstFixedSurface == 1:
return 'Massflux at convective cloud base'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 195:
return 'residuum of soil moisture'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 194:
return 'total forcing at soil surface'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 193:
return 'total transpiration from all soil layers'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 192:
return 'sum of contributions to evaporation'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 193 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Effective transmissivity of solar radiation'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 192 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'unknown'
if discipline == 0 and parameterCategory == 15 and parameterNumber == 1 and typeOfFirstFixedSurface == 10:
return 'Base reflectivity (cmax)'
if discipline == 0 and parameterCategory == 15 and parameterNumber == 1 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Base reflectivity'
if discipline == 0 and parameterCategory == 15 and parameterNumber == 1 and typeOfFirstFixedSurface == 1:
return 'Base reflectivity'
if discipline == 10 and parameterCategory == 2 and parameterNumber == 8 and typeOfFirstFixedSurface == 1:
return 'sea Ice Temperature'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 16 and typeOfFirstFixedSurface == 1:
return 'Minimal Stomatal Resistance'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 18 and typeOfFirstFixedSurface == 1:
return 'Snow temperature (top of snow)'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 13 and typeOfFirstFixedSurface == 1:
return 'Plant Canopy Surface Water'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 22 and typeOfFirstFixedSurface == 106 and scaleFactorOfFirstFixedSurface == -2:
return 'soil ice content (multilayers)'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 20 and scaleFactorOfFirstFixedSurface == -2 and typeOfFirstFixedSurface == 106:
return 'Column-integrated Soil Moisture (multilayers)'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 18 and scaleFactorOfFirstFixedSurface == -2 and typeOfFirstFixedSurface == 106:
return 'Soil Temperature (multilayers)'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 192:
return 'Air concentration of Ruthenium 103'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 22 and scaledValueOfFirstFixedSurface == 10 and typeOfFirstFixedSurface == 103 and scaleFactorOfFirstFixedSurface == 0 and typeOfStatisticalProcessing == 2:
return 'maximum Wind 10m'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 3 and typeOfFirstFixedSurface == 1:
return 'mixed layer depth'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 19 and typeOfFirstFixedSurface == 1:
return 'Turbulent transfer coefficient for heat (and Moisture)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 29 and typeOfFirstFixedSurface == 1:
return 'Turbulent transfer coefficient for impulse'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 20 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Turbulent diffusion coefficient for heat (and moisture)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 31 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Turbulent diffusioncoefficient for momentum'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 11 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Turbulent Kinetic Energy'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 196 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Kinetic Energy'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 192 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Tendency of turbulent kinetic energy'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 24:
return 'Convective turbulent kinetic enery'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 7 and typeOfFirstFixedSurface == 192:
return 'Convective Inhibition, mean layer'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 6 and typeOfFirstFixedSurface == 192:
return 'Convective Available Potential Energy, mean layer'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 7 and typeOfFirstFixedSurface == 193:
return 'Convective Inhibition, most unstable'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 6 and typeOfFirstFixedSurface == 193:
return 'Convective Available Potential Energy, most unstable'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 193 and typeOfFirstFixedSurface == 1:
return 'supercell detection index 2 (only rot. up drafts)'
if discipline == 0 and parameterCategory == 7 and parameterNumber == 192 and typeOfFirstFixedSurface == 1:
return 'supercell detection index 1 (rot. up+down drafts)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 192 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Pressure perturbation'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 61 and typeOfFirstFixedSurface == 1:
return 'Snow density'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 75 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 1:
return 'Graupel (snow pellets) precipitation rate'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 75 and typeOfFirstFixedSurface == 1:
return 'Graupel (snow pellets) precipitation rate'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 202 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'tendency of specific cloud ice content due to grid scale precipitation'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 203:
return 'Fresh snow factor (weighting function for albedo indicating freshness of snow)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 201 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'tendency of specific cloud liquid water content due to grid scale precipitation'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 200 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Specific humitiy tendency due to grid scale precipitation'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 193 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Temperature tendency due to grid scale precipation'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 66 and typeOfStatisticalProcessing == 1 and typeOfFirstFixedSurface == 1:
return 'snow amount, grid-scale plus convective'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 65 and typeOfStatisticalProcessing == 1 and typeOfFirstFixedSurface == 1:
return 'rain amount, grid-scale plus convective'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 76 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 1:
return 'Convective rain rate (s)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 55 and typeOfFirstFixedSurface == 1:
return 'Convective snowfall rate water equivalent'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 76 and typeOfFirstFixedSurface == 1:
return 'Convective rain rate'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 77 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 1:
return 'Large scale rain rate (s)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 56 and typeOfFirstFixedSurface == 1:
return 'Large scale snowfall rate water equivalent'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 77 and typeOfFirstFixedSurface == 1:
return 'Large scale rain rate'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 196 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Specific content of precipitation particles (needed for water loadin)g'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 199 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'tendency of specific cloud ice content due to convection'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 198 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Tendency of specific cloud liquid water content due to conversion'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 204:
return 'Height of snow fall limit'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 200 and typeOfFirstFixedSurface == 4:
return 'height of 0 degree celsius level code 0,3,6 ?'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 196 and typeOfFirstFixedSurface == 1:
return 'height of top of dry convection'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 193 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'meridional wind tendency due to convection'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 192 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'zonal wind tendency due to convection'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 197 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Specific humitiy tendency due to convection'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 192 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Temperature tendency due to convection'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 195 and typeOfFirstFixedSurface == 1:
return 'top index (vertical level) of main convective cloud (i)'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 194 and typeOfFirstFixedSurface == 1:
return 'base index (vertical level) of main convective cloud (i)'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 27 and typeOfFirstFixedSurface == 3:
return 'Height of Convective Cloud Top (i)'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 26 and typeOfFirstFixedSurface == 2:
return 'Height of Convective Cloud Base (i)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 195 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'specific cloud water content, convective cloud'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 193 and typeOfFirstFixedSurface == 3:
return 'cloud top above msl, shallow convection'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 192 and typeOfFirstFixedSurface == 2:
return 'cloud base above msl, shallow convection'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 194 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'subgridscale cloud ice'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 193 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'subgrid scale cloud water'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 192 and typeOfFirstFixedSurface == 1:
return 'vertical integral of divergence of total water content (s)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 78 and typeOfFirstFixedSurface == 1:
return 'Total Column integrated water (all components incl. precipitation)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 74:
return 'Total column integrated grauple'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 32 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Grauple'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 46:
return 'Total column integrated snow'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 45:
return 'Total column integrated rain'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 25 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Snow mixing ratio'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 24 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Rain mixing ratio'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 82 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Cloud Ice Mixing Ratio'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 22 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Cloud Mixing Ratio'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 14 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Non-Convective Cloud Cover, grid scale'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 22 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Cloud cover'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 195 and typeOfFirstFixedSurface == 1:
return 'Stomatal Resistance'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 24 and typeOfStatisticalProcessing == 1:
return 'Sunshine'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 194 and typeOfStatisticalProcessing == 0 and scaleFactorOfFirstFixedSurface == -2 and typeOfFirstFixedSurface == 106:
return 'Latent heat flux from plants'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 193 and typeOfStatisticalProcessing == 0 and typeOfFirstFixedSurface == 1:
return 'Latent heat flux from bare soil'
if discipline == 0 and parameterCategory == 5 and parameterNumber == 192 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Thermal radiation heating rate'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 192 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Solar radiation heating rate'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 10 and typeOfFirstFixedSurface == 1:
return 'Photosynthetically active radiation'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 10 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 0:
return 'Photosynthetically active radiation (m) (at the surface)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 18 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 0:
return 'Momentum Flux, V-Component (m)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 17 and typeOfStatisticalProcessing == 0 and typeOfFirstFixedSurface == 1:
return 'Momentum Flux, U-Component (m)'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 11 and typeOfStatisticalProcessing == 0 and typeOfFirstFixedSurface == 1:
return 'Sensible Heat Net Flux (m)'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 10 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 0:
return 'Latent Heat Net Flux (m)'
if discipline == 0 and parameterCategory == 5 and parameterNumber == 5 and typeOfFirstFixedSurface == 0:
return 'Net long wave radiation flux'
if discipline == 0 and parameterCategory == 5 and parameterNumber == 5 and typeOfFirstFixedSurface == 0 and typeOfStatisticalProcessing == 0:
return 'Net long wave radiation flux (m) (on the model top)'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 9 and typeOfFirstFixedSurface == 0:
return 'Net short wave radiation flux'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 9 and typeOfFirstFixedSurface == 0 and typeOfStatisticalProcessing == 0:
return 'Net short wave radiation flux (m) (on the model top)'
if discipline == 0 and parameterCategory == 5 and parameterNumber == 5 and typeOfFirstFixedSurface == 1:
return 'Net long wave radiation flux'
if discipline == 0 and parameterCategory == 5 and parameterNumber == 5 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 0:
return 'Net long wave radiation flux (m) (at the surface)'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 9 and typeOfFirstFixedSurface == 1:
return 'Net short wave radiation flux'
if discipline == 0 and parameterCategory == 4 and parameterNumber == 9 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 0:
return 'Net short wave radiation flux (m) (at the surface)'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 9:
return 'Mean period of swell waves'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 8:
return 'Significant height of swell waves'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 7:
return 'Direction of swell waves'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 6:
return 'Mean period of wind waves'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 5:
return 'Significant height of wind waves'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 4:
return 'Direction of wind waves'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 3:
return 'Significant height of combined wind waves and swell'
if discipline == 10 and parameterCategory == 2 and parameterNumber == 1 and typeOfFirstFixedSurface == 1:
return 'sea Ice Thickness'
if discipline == 10 and parameterCategory == 2 and parameterNumber == 0 and typeOfFirstFixedSurface == 1:
return 'Sea Ice Cover ( 0= free, 1=cover)'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 5 and typeOfFirstFixedSurface == 106 and typeOfSecondFixedSurface == 106 and scaledValueOfFirstFixedSurface == 0 and scaleFactorOfSecondFixedSurface == -2 and typeOfStatisticalProcessing == 1 and scaleFactorOfFirstFixedSurface == -2 and scaledValueOfSecondFixedSurface == 10:
return 'Water Runoff (s)'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 5 and typeOfFirstFixedSurface == 106 and typeOfSecondFixedSurface == 106 and typeOfStatisticalProcessing == 1 and scaleFactorOfFirstFixedSurface == -2 and scaledValueOfSecondFixedSurface == 190 and scaledValueOfFirstFixedSurface == 10 and scaleFactorOfSecondFixedSurface == -2:
return 'Water Runoff (10-190)'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 5 and typeOfFirstFixedSurface == 106 and typeOfSecondFixedSurface == 106 and typeOfStatisticalProcessing == 1 and scaleFactorOfFirstFixedSurface == -2 and scaledValueOfSecondFixedSurface == 100 and scaledValueOfFirstFixedSurface == 10 and scaleFactorOfSecondFixedSurface == -2:
return 'Water Runoff (10-100)'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 4 and typeOfFirstFixedSurface == 1:
return 'Plant cover'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 20 and typeOfFirstFixedSurface == 106 and typeOfSecondFixedSurface == 106 and scaledValueOfSecondFixedSurface == 100 and scaledValueOfFirstFixedSurface == 10 and scaleFactorOfSecondFixedSurface == -2 and scaleFactorOfFirstFixedSurface == -2:
return 'Column-integrated Soil Moisture (2) 10-100cm'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 20 and typeOfFirstFixedSurface == 106 and typeOfSecondFixedSurface == 106 and scaledValueOfFirstFixedSurface == 0 and scaleFactorOfSecondFixedSurface == -2 and scaleFactorOfFirstFixedSurface == -2 and scaledValueOfSecondFixedSurface == 10:
return 'Column-integrated Soil Moisture (1) 0 -10 cm'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 20 and typeOfFirstFixedSurface == 106 and typeOfSecondFixedSurface == 106 and scaleFactorOfFirstFixedSurface == -2 and scaledValueOfSecondFixedSurface == 190 and scaledValueOfFirstFixedSurface == 100 and scaleFactorOfSecondFixedSurface == -2:
return 'Column-integrated Soil Moisture'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 18 and typeOfFirstFixedSurface == 106 and scaledValueOfFirstFixedSurface == 0 and scaleFactorOfFirstFixedSurface == -2:
return 'Soil Temperature'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 18 and typeOfFirstFixedSurface == 106 and scaledValueOfFirstFixedSurface == 9 and scaleFactorOfFirstFixedSurface == -2:
return 'Soil Temperature'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 18 and scaleFactorOfFirstFixedSurface == -2 and typeOfFirstFixedSurface == 106 and scaledValueOfFirstFixedSurface == 41:
return 'Soil Temperature (41 cm depth)'
if discipline == 2 and parameterCategory == 3 and parameterNumber == 18 and scaleFactorOfFirstFixedSurface == -2 and typeOfFirstFixedSurface == 106 and scaledValueOfFirstFixedSurface == 36:
return 'Soil Temperature ( 36 cm depth, vv=0h)'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 1 and typeOfStatisticalProcessing == 0 and typeOfFirstFixedSurface == 1:
return 'Albedo (in short-wave)'
if discipline == 0 and parameterCategory == 19 and parameterNumber == 1 and typeOfFirstFixedSurface == 1:
return 'Albedo (in short-wave)'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 1 and typeOfFirstFixedSurface == 1:
return 'Surface Roughness length Surface Roughness'
if discipline == 2 and parameterCategory == 0 and parameterNumber == 0 and typeOfFirstFixedSurface == 1:
return 'Land Cover (1=land, 0=sea)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 56 and typeOfStatisticalProcessing == 1 and typeOfFirstFixedSurface == 1:
return 'Large-Scale snowfall rate water equivalent (s)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 55 and typeOfStatisticalProcessing == 1 and typeOfFirstFixedSurface == 1:
return 'Convective Snowfall rate water equivalent (s)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 69 and typeOfFirstFixedSurface == 1:
return 'Total Column-Integrated Cloud Water'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 22 and typeOfFirstFixedSurface == 100 and typeOfSecondFixedSurface == 100 and scaleFactorOfFirstFixedSurface == -2 and scaledValueOfSecondFixedSurface == 400 and scaledValueOfFirstFixedSurface == 0 and scaleFactorOfSecondFixedSurface == -2:
return 'Cloud Cover (0 - 400 hPa)'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 22 and typeOfFirstFixedSurface == 100 and typeOfSecondFixedSurface == 100 and scaledValueOfFirstFixedSurface == 400 and scaleFactorOfSecondFixedSurface == -2 and scaleFactorOfFirstFixedSurface == -2 and scaledValueOfSecondFixedSurface == 800:
return 'Cloud Cover (400 - 800 hPa)'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 22 and scaledValueOfFirstFixedSurface == 800 and scaleFactorOfFirstFixedSurface == -2 and typeOfSecondFixedSurface == 1 and typeOfFirstFixedSurface == 100:
return 'Cloud Cover (800 hPa - Soil)'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 2 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Convective Cloud Cover'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 1 and typeOfFirstFixedSurface == 1:
return 'Total Cloud Cover'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 11 and typeOfFirstFixedSurface == 1:
return 'Snow Depth'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 60 and typeOfFirstFixedSurface == 1:
return 'Snow depth water equivalent'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 37 and typeOfStatisticalProcessing == 1:
return 'Convective Precipitation rate'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 54 and typeOfStatisticalProcessing == 1:
return 'Large-Scale Precipitation rate'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 52 and typeOfStatisticalProcessing == 1 and typeOfFirstFixedSurface == 1:
return 'Total Precipitation rate (S)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 70 and typeOfFirstFixedSurface == 1:
return 'Total Column-Integrated Cloud Ice'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 79 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 1:
return 'Evaporation (s)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 64 and typeOfFirstFixedSurface == 1:
return 'Total column integrated water vapour'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 1:
return 'Relative Humidity'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 1 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 2 and scaleFactorOfFirstFixedSurface == 0:
return '2m Relative Humidity'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 0 and scaleFactorOfFirstFixedSurface == 0:
return 'Specific Humidity'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 0 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 2:
return 'Specific Humidity (2m)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 0 and typeOfFirstFixedSurface == 1:
return 'Specific Humidity (S)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 9:
return 'Vertical Velocity (Geometric) (w)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 8:
return 'Vertical Velocity (Pressure) ( omega=dp/dt )'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 3:
return 'V component of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 3 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 10:
return 'V component of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 2:
return 'U component of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 2 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 10:
return 'U component of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 1 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Wind speed (SP)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 1 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 10 and scaleFactorOfFirstFixedSurface == 0:
return 'Wind speed (SP_10M)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 0 and typeOfFirstFixedSurface == 105 and scaleFactorOfFirstFixedSurface == 0:
return 'Wind Direction (DD)'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 0 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 10:
return 'Wind Direction (DD_10M)'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 2:
return 'Wave spectra (3)'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 1:
return 'Wave spectra (2)'
if discipline == 10 and parameterCategory == 0 and parameterNumber == 0:
return 'Wave spectra (1)'
if discipline == 0 and parameterCategory == 15 and parameterNumber == 6 and typeOfFirstFixedSurface == 1 and typeOfStatisticalProcessing == 2:
return 'Radar spectra (1)'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 6 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 2 and typeOfStatisticalProcessing == 0 and scaleFactorOfFirstFixedSurface == 0:
return '2m Dew Point Temperature (AV)'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 2 and typeOfStatisticalProcessing == 3 and scaleFactorOfFirstFixedSurface == 0:
return 'Min 2m Temperature (i)'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 2 and typeOfStatisticalProcessing == 2 and scaleFactorOfFirstFixedSurface == 0:
return 'Max 2m Temperature (i)'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0:
return 'Temperature'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and typeOfGeneratingProcess == 9 and typeOfFirstFixedSurface == 103 and scaledValueOfFirstFixedSurface == 2 and typeOfStatisticalProcessing == 0 and scaleFactorOfFirstFixedSurface == 0:
return 'Climat. temperature, 2m Temperature'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and typeOfFirstFixedSurface == 1:
return 'Temperature (G)'
if discipline == 0 and parameterCategory == 14 and parameterNumber == 2 and typeOfFirstFixedSurface == 1:
return 'Total Column Integrated Ozone'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 6 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Geometric Height of the layer limits above sea level(NN)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 6 and typeOfFirstFixedSurface == 1:
return 'Geometric Height of the earths surface above sea level'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 4:
return 'Geopotential'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 4 and scaleFactorOfFirstFixedSurface == 0 and typeOfFirstFixedSurface == 105:
return 'Geopotential (full lev)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 4 and typeOfFirstFixedSurface == 1:
return 'Geopotential (S)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 2 and typeOfFirstFixedSurface == 1:
return 'Pressure Tendency (S)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 1 and typeOfFirstFixedSurface == 101:
return 'Pressure Reduced to MSL'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 0:
return 'Pressure'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 0 and typeOfFirstFixedSurface == 1:
return 'Pressure (S) (not reduced)'
is_s2s = h.get_l('is_s2s')
subCentre = h.get_l('subCentre')
if discipline == 0 and parameterCategory == 0 and parameterNumber == 6 and scaleFactorOfFirstFixedSurface == 0 and scaledValueOfFirstFixedSurface == 2 and is_s2s == 1 and typeOfFirstFixedSurface == 103 and subCentre == 102:
return '2 metre dewpoint temperature'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 6 and typeOfFirstFixedSurface == 103 and scaleFactorOfFirstFixedSurface == 0 and scaledValueOfFirstFixedSurface == 2:
return '2 metre dewpoint temperature'
if discipline == 10 and parameterCategory == 2 and parameterNumber == 0 and subCentre == 102 and is_s2s == 1:
return 'Sea ice area fraction'
if discipline == 10 and parameterCategory == 2 and parameterNumber == 0:
return 'Sea ice area fraction'
return wrapped
| 66.7375
| 355
| 0.700893
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 14,194
| 0.177237
|
c56762e2edaef44daca6ab74ffdc3c598a3d259d
| 2,038
|
py
|
Python
|
perspective_transform.py
|
shengchen-liu/CarND-Advanced_Lane_Finding
|
e23a3f5021e59f3acef4e8fec48537fffab0f1b3
|
[
"MIT"
] | null | null | null |
perspective_transform.py
|
shengchen-liu/CarND-Advanced_Lane_Finding
|
e23a3f5021e59f3acef4e8fec48537fffab0f1b3
|
[
"MIT"
] | null | null | null |
perspective_transform.py
|
shengchen-liu/CarND-Advanced_Lane_Finding
|
e23a3f5021e59f3acef4e8fec48537fffab0f1b3
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import matplotlib.pyplot as plt
from calibration_utils import calibrate_camera, undistort
import glob
import matplotlib.image as mpimg
import pickle
from threshold import binarize
def perspective_transform(img, verbose=False):
"""
Execute perspective transform
"""
img_size = (img.shape[1], img.shape[0])
# algorithm to automatically pick?
# https: // knowledge.udacity.com / questions / 22331
src = np.float32(
[[200, 720],
[1100, 720],
[595, 450],
[685, 450]])
dst = np.float32(
[[300, 720],
[980, 720],
[300, 0],
[980, 0]])
m = cv2.getPerspectiveTransform(src, dst)
m_inv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(img, m, img_size, flags=cv2.INTER_LINEAR)
unwarped = cv2.warpPerspective(warped, m_inv, (warped.shape[1], warped.shape[0]), flags=cv2.INTER_LINEAR) # DEBUG
if verbose:
f, axarray = plt.subplots(1, 2)
f.set_facecolor('white')
axarray[0].set_title('Before perspective transform')
axarray[0].imshow(img, cmap='gray')
for point in src:
axarray[0].plot(*point, '.')
axarray[1].set_title('After perspective transform')
axarray[1].imshow(warped, cmap='gray')
for point in dst:
axarray[1].plot(*point, '.')
for axis in axarray:
axis.set_axis_off()
plt.show()
return warped, m, m_inv
if __name__ == '__main__':
with open('calibrate_camera.p', 'rb') as f:
save_dict = pickle.load(f)
mtx = save_dict['mtx']
dist = save_dict['dist']
# show result on test images
for test_img in glob.glob('test_images/*.jpg'):
img = cv2.imread(test_img)
img_undistorted = undistort(img, mtx, dist, verbose=False)
img_binary = binarize(img_undistorted, verbose=False)
img_birdeye, M, Minv = perspective_transform(cv2.cvtColor(img_undistorted, cv2.COLOR_BGR2RGB), verbose=True)
| 29.114286
| 118
| 0.628557
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 315
| 0.154563
|
c5681f32ba0443d6943fe18106423ebafc204c78
| 12,733
|
py
|
Python
|
epgrefresh/src/plugin.py
|
builder08/enigma2-plugins_2
|
f8f08b947e23c1c86b011492a7323125774c3482
|
[
"OLDAP-2.3"
] | null | null | null |
epgrefresh/src/plugin.py
|
builder08/enigma2-plugins_2
|
f8f08b947e23c1c86b011492a7323125774c3482
|
[
"OLDAP-2.3"
] | null | null | null |
epgrefresh/src/plugin.py
|
builder08/enigma2-plugins_2
|
f8f08b947e23c1c86b011492a7323125774c3482
|
[
"OLDAP-2.3"
] | null | null | null |
from __future__ import print_function
# for localized messages
from . import _, NOTIFICATIONDOMAIN
# Config
from Components.config import config, ConfigYesNo, ConfigNumber, ConfigSelection, \
ConfigSubsection, ConfigClock, ConfigYesNo, ConfigInteger, NoSave
from Screens.MessageBox import MessageBox
from Screens.Standby import TryQuitMainloop
from Tools.BoundFunction import boundFunction
from boxbranding import getImageDistro
from Components.SystemInfo import SystemInfo
from Components.NimManager import nimmanager
# Error-print
from traceback import print_exc
from sys import stdout
# Calculate default begin/end
from time import time, localtime, mktime
now = localtime()
begin = mktime((
now.tm_year, now.tm_mon, now.tm_mday, 07, 30,
0, now.tm_wday, now.tm_yday, now.tm_isdst)
)
end = mktime((
now.tm_year, now.tm_mon, now.tm_mday, 20, 00,
0, now.tm_wday, now.tm_yday, now.tm_isdst)
)
#Configuration
config.plugins.epgrefresh = ConfigSubsection()
config.plugins.epgrefresh.enabled = ConfigYesNo(default=False)
config.plugins.epgrefresh.begin = ConfigClock(default=int(begin))
config.plugins.epgrefresh.end = ConfigClock(default=int(end))
config.plugins.epgrefresh.interval_seconds = ConfigNumber(default=120)
config.plugins.epgrefresh.delay_standby = ConfigNumber(default=10)
config.plugins.epgrefresh.inherit_autotimer = ConfigYesNo(default=False)
config.plugins.epgrefresh.afterevent = ConfigYesNo(default=False)
config.plugins.epgrefresh.force = ConfigYesNo(default=False)
config.plugins.epgrefresh.skipProtectedServices = ConfigSelection(choices=[
("bg_only", _("Background only")),
("always", _("Foreground also")),
], default="bg_only"
)
config.plugins.epgrefresh.enablemessage = ConfigYesNo(default=True)
config.plugins.epgrefresh.wakeup = ConfigYesNo(default=False)
config.plugins.epgrefresh.lastscan = ConfigNumber(default=0)
config.plugins.epgrefresh.parse_autotimer = ConfigSelection(choices=[
("always", _("Yes")),
("never", _("No")),
("bg_only", _("Background only")),
("ask_yes", _("Ask default Yes")),
("ask_no", _("Ask default No")),
], default="never"
)
config.plugins.epgrefresh.erase = ConfigYesNo(default=False)
adapter_choices = [("main", _("Main Picture"))]
if SystemInfo.get("NumVideoDecoders", 1) > 1:
adapter_choices.append(("pip", _("Picture in Picture")))
adapter_choices.append(("pip_hidden", _("Picture in Picture (hidden)")))
if len(nimmanager.nim_slots) > 1:
adapter_choices.append(("record", _("Fake recording")))
config.plugins.epgrefresh.adapter = ConfigSelection(choices=adapter_choices, default="main")
config.plugins.epgrefresh.show_in_extensionsmenu = ConfigYesNo(default=False)
config.plugins.epgrefresh.show_run_in_extensionsmenu = ConfigYesNo(default=True)
if getImageDistro() in ("openatv", "openvix",):
config.plugins.epgrefresh.show_in_plugins = ConfigYesNo(default=False)
else:
config.plugins.epgrefresh.show_in_plugins = ConfigYesNo(default=True)
config.plugins.epgrefresh.show_help = ConfigYesNo(default=True)
config.plugins.epgrefresh.wakeup_time = ConfigInteger(default=-1)
config.plugins.epgrefresh.showadvancedoptions = NoSave(ConfigYesNo(default=False))
# convert previous parameters
config.plugins.epgrefresh.background = ConfigYesNo(default=False)
if config.plugins.epgrefresh.background.value:
config.plugins.epgrefresh.adapter.value = "pip_hidden"
config.plugins.epgrefresh.background.value = False
config.plugins.epgrefresh.save()
config.plugins.epgrefresh.interval = ConfigNumber(default=2)
if config.plugins.epgrefresh.interval.value != 2:
config.plugins.epgrefresh.interval_seconds.value = config.plugins.epgrefresh.interval.value * 60
config.plugins.epgrefresh.interval.value = 2
config.plugins.epgrefresh.save()
#pragma mark - Help
try:
from Components.Language import language
from Plugins.SystemPlugins.MPHelp import registerHelp, XMLHelpReader
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, fileExists
lang = language.getLanguage()[:2]
HELPPATH = resolveFilename(SCOPE_PLUGINS, "Extensions/EPGRefresh")
if fileExists(HELPPATH + "/locale/" + str(lang) + "/mphelp.xml"):
helpfile = HELPPATH + "/locale/" + str(lang) + "/mphelp.xml"
else:
helpfile = HELPPATH + "/mphelp.xml"
reader = XMLHelpReader(helpfile)
epgrefreshHelp = registerHelp(*reader)
except Exception as e:
print("[EPGRefresh] Unable to initialize MPHelp:", e, "- Help not available!")
epgrefreshHelp = None
#pragma mark -
# Notification-Domain
# Q: Do we really need this or can we do this better?
from Tools import Notifications
try:
Notifications.notificationQueue.registerDomain(NOTIFICATIONDOMAIN, _("EPGREFRESH_NOTIFICATION_DOMAIN"), deferred_callable=True)
except Exception as e:
EPGRefreshNotificationKey = ""
#print("[EPGRefresh] Error registering Notification-Domain:", e)
# Plugin
from EPGRefresh import epgrefresh
from EPGRefreshService import EPGRefreshService
# Plugins
from Components.PluginComponent import plugins
from Plugins.Plugin import PluginDescriptor
#pragma mark - Workaround for unset clock
from enigma import eDVBLocalTimeHandler
def timeCallback(isCallback=True):
"""Time Callback/Autostart management."""
thInstance = eDVBLocalTimeHandler.getInstance()
if isCallback:
# NOTE: this assumes the clock is actually ready when called back
# this may not be true, but we prefer silently dying to waiting forever
thInstance.m_timeUpdated.get().remove(timeCallback)
elif not thInstance.ready():
thInstance.m_timeUpdated.get().append(timeCallback)
return
epgrefresh.start()
# Autostart
def autostart(reason, **kwargs):
if reason == 0 and "session" in kwargs:
session = kwargs["session"]
epgrefresh.session = session
if config.plugins.epgrefresh.enabled.value:
# check if box was woken up by a timer, if so, check if epgrefresh set this timer
if session.nav.wasTimerWakeup() and abs(config.plugins.epgrefresh.wakeup_time.getValue() - time()) <= 360:
# if box is not in idle mode, do that
from Screens.Standby import Standby, inStandby
if not inStandby:
from Tools import Notifications
Notifications.AddNotificationWithID("Standby", Standby)
timeCallback(isCallback=False)
elif reason == 1:
epgrefresh.stop()
def getNextWakeup():
# Return invalid time if not automatically refreshing
if not config.plugins.epgrefresh.enabled.value or \
not config.plugins.epgrefresh.wakeup.value:
setConfigWakeupTime(-1)
return -1
now = localtime()
begin = int(mktime(
(now.tm_year, now.tm_mon, now.tm_mday,
config.plugins.epgrefresh.begin.value[0],
config.plugins.epgrefresh.begin.value[1],
0, now.tm_wday, now.tm_yday, now.tm_isdst)
))
# todays timespan has not yet begun
if begin > time():
setConfigWakeupTime(begin)
return begin
# otherwise add 1 day
setConfigWakeupTime(begin + 86400)
return begin + 86400
def setConfigWakeupTime(value):
config.plugins.epgrefresh.wakeup_time.value = value
config.plugins.epgrefresh.save()
# Mainfunction
def main(session, **kwargs):
try:
from EPGRefreshConfiguration import EPGRefreshConfiguration
session.openWithCallback(
doneConfiguring,
EPGRefreshConfiguration
)
except:
print("[EPGRefresh] Error while Opening EPGRefreshConfiguration")
print_exc(file=stdout)
def forceRefresh(session, **kwargs):
epgrefresh.forceRefresh(session)
def stopRunningRefresh(session, **kwargs):
epgrefresh.stopRunningRefresh(session)
def showPendingServices(session, **kwargs):
epgrefresh.showPendingServices(session)
def doneConfiguring(session, needsRestart):
if needsRestart:
session.openWithCallback(boundFunction(restartGUICB, session), MessageBox,
_("To apply your Changes the GUI has to be restarted.\nDo you want to restart the GUI now?"),
MessageBox.TYPE_YESNO, timeout=30)
else:
_startAfterConfig(session)
def restartGUICB(session, answer):
if answer is True:
session.open(TryQuitMainloop, 3)
else:
_startAfterConfig(session)
def _startAfterConfig(session):
if config.plugins.epgrefresh.enabled.value:
if not epgrefresh.isRunning():
epgrefresh.start(session)
# Eventinfo
def eventinfo(session, servicelist, **kwargs):
ref = session.nav.getCurrentlyPlayingServiceReference()
if not ref:
return
sref = ref.toString()
# strip all after last :
pos = sref.rfind(':')
if pos != -1:
sref = sref[:pos + 1]
epgrefresh.services[0].add(EPGRefreshService(str(sref), None))
# XXX: we need this helper function to identify the descriptor
# Extensions menu
def extensionsmenu(session, **kwargs):
main(session, **kwargs)
extSetupDescriptor = PluginDescriptor(_("EPG-Refresh_SetUp"), description=_("Automatically refresh EPG"), where=PluginDescriptor.WHERE_EXTENSIONSMENU, fnc=extensionsmenu, needsRestart=False)
extRunDescriptor = PluginDescriptor(_("EPG-Refresh_Refresh now"), description=_("Start EPGrefresh immediately"), where=PluginDescriptor.WHERE_EXTENSIONSMENU, fnc=forceRefresh, needsRestart=False)
extStopDescriptor = PluginDescriptor(_("EPG-Refresh_Stop Refresh"), description=_("Stop Running EPG-refresh"), where=PluginDescriptor.WHERE_EXTENSIONSMENU, fnc=stopRunningRefresh, needsRestart=False)
extPendingServDescriptor = PluginDescriptor(_("EPG-Refresh_Pending Services"), description=_("Show the pending Services for refresh"), where=PluginDescriptor.WHERE_EXTENSIONSMENU, fnc=showPendingServices, needsRestart=False)
extPluginDescriptor = PluginDescriptor( name=_("EPGRefresh"), description=_("Automatically refresh EPG"), where=PluginDescriptor.WHERE_PLUGINMENU, fnc=main, icon="EPGRefresh.png", needsRestart=False)
def AdjustExtensionsmenu(enable, PlugDescriptor):
if enable:
if PlugDescriptor not in plugins.getPlugins(PlugDescriptor.where):
plugins.addPlugin(PlugDescriptor)
else:
try:
plugins.removePlugin(PlugDescriptor)
except ValueError as ve:
if PlugDescriptor != extRunDescriptor:
print("[EPGRefresh] AdjustExtensionsmenu got confused, tried to remove non-existant plugin entry... ignoring.")
def housekeepingExtensionsmenu(configentry, force=False):
if force or (epgrefresh != None and not epgrefresh.isRunning()):
PlugDescriptor = None
if configentry == config.plugins.epgrefresh.show_in_plugins:
PlugDescriptor = extPluginDescriptor
elif configentry == config.plugins.epgrefresh.show_in_extensionsmenu:
PlugDescriptor = extSetupDescriptor
elif configentry == config.plugins.epgrefresh.show_run_in_extensionsmenu:
PlugDescriptor = extRunDescriptor
#if PlugDescriptor != None:
if PlugDescriptor is not None:
AdjustExtensionsmenu(configentry.value, PlugDescriptor)
config.plugins.epgrefresh.show_in_plugins.addNotifier(housekeepingExtensionsmenu, initial_call=False, immediate_feedback=True)
config.plugins.epgrefresh.show_in_extensionsmenu.addNotifier(housekeepingExtensionsmenu, initial_call=False, immediate_feedback=True)
config.plugins.epgrefresh.show_run_in_extensionsmenu.addNotifier(housekeepingExtensionsmenu, initial_call=False, immediate_feedback=True)
def menu_main(menuid, **kwargs):
if getImageDistro() in ("openvix", "openatv", "openspa", "openhdf"):
if menuid != "epg":
return []
else:
return []
return [(_("EPGRefresh"), main, "epgrefresh", None)]
def Plugins(**kwargs):
# NOTE: this might be a little odd to check this, but a user might expect
# the plugin to resume normal operation if installed during runtime, but
# this is not given if the plugin is supposed to run in background (as we
# won't be handed the session which we need to zap). So in turn we require
# a restart if-and only if-we're installed during runtime AND running in
# background. To improve the user experience in this situation, we hide
# all references to this plugin.
needsRestart = config.plugins.epgrefresh.enabled.value and not plugins.firstRun
list = [
PluginDescriptor(
name="EPGRefresh",
where=[
PluginDescriptor.WHERE_AUTOSTART,
PluginDescriptor.WHERE_SESSIONSTART
],
fnc=autostart,
wakeupfnc=getNextWakeup,
needsRestart=needsRestart,
),
PluginDescriptor(
name=_("add to EPGRefresh"),
where=PluginDescriptor.WHERE_EVENTINFO,
fnc=eventinfo,
needsRestart=needsRestart,
),
]
list.append(PluginDescriptor(name=_("EPGRefresh"),
description=_("Automatically refresh EPG"),
where=PluginDescriptor.WHERE_MENU,
fnc=menu_main))
if config.plugins.epgrefresh.show_in_extensionsmenu.value:
extSetupDescriptor.needsRestart = needsRestart
list.append(extSetupDescriptor)
if config.plugins.epgrefresh.show_run_in_extensionsmenu.value:
extRunDescriptor.needsRestart = needsRestart
list.append(extRunDescriptor)
if config.plugins.epgrefresh.show_in_plugins.value:
extPluginDescriptor.needsRestart = needsRestart
list.append(extPluginDescriptor)
return list
| 35.766854
| 224
| 0.783162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,589
| 0.20333
|
c56a7f8daf694ac42476f66c3d71841a3dd5c679
| 29,709
|
py
|
Python
|
ongoing/prescriptors/bandit/bandit_prescriptor.py
|
bradyneal/covid-xprize-comp
|
d515f58b009a0a3e2421bc83e7ac893f3c3a1ece
|
[
"Apache-2.0"
] | null | null | null |
ongoing/prescriptors/bandit/bandit_prescriptor.py
|
bradyneal/covid-xprize-comp
|
d515f58b009a0a3e2421bc83e7ac893f3c3a1ece
|
[
"Apache-2.0"
] | null | null | null |
ongoing/prescriptors/bandit/bandit_prescriptor.py
|
bradyneal/covid-xprize-comp
|
d515f58b009a0a3e2421bc83e7ac893f3c3a1ece
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
import os
from copy import deepcopy
import datetime
import pickle
import time
import copy
os.system('export PYTHONPATH="$(pwd):$PYTHONPATH"')
from ongoing.prescriptors.base import BasePrescriptor, PRED_CASES_COL, CASES_COL, NPI_COLUMNS, NPI_MAX_VALUES
import ongoing.prescriptors.base as base
from bandit import CCTSB
# np.warnings.filterwarnings('error', category=np.VisibleDeprecationWarning)
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
TMP_PRED_FILE_NAME = os.path.join(ROOT_DIR, 'tmp_predictions_for_prescriptions', 'preds.csv')
TMP_PRESCRIPTION_FILE = os.path.join(ROOT_DIR, 'tmp_prescription.csv')
MODEL_FILE = os.path.join(ROOT_DIR, 'bandits.pkl')
# Number of iterations of training for the bandit.
# Each iteration presents the bandit with a new context.
# Each iteration trains the bandit for the entire prediction window.
NB_ITERATIONS = 2
EXPLORE_ITERATIONS = 1
CHOICE = 'fixed'
# Number of days the prescriptors will look at in the past.
# Larger values here may make convergence slower, but give
# prescriptors more context. The number of inputs of each neat
# network will be NB_LOOKBACK_DAYS * (NPI_COLUMNS + 1) + NPI_COLUMNS.
# The '1' is for previous case data, and the final NPI_COLUMNS
# is for IP cost information.
# NB_LOOKBACK_DAYS = 14
# Number of countries to use for training. Again, lower numbers
# here will make training faster, since there will be fewer
# input variables, but could potentially miss out on useful info.
# NB_EVAL_COUNTRIES = 10
# Range of days the prescriptors will be evaluated on.
# To save time during training, this range may be significantly
# shorter than the maximum days a prescriptor can be evaluated on.
# EVAL_START_DATE = '2020-08-01'
# EVAL_END_DATE = '2020-08-02'
# Number of prescriptions to make per country.
# This can be set based on how many solutions in PRESCRIPTORS_FILE
# we want to run and on time constraints.
NB_PRESCRIPTIONS = 10
# OBJECTIVE_WEIGHTS = [0.01, 0.1, 0.2, 0.3, 0.4, 0.6, 0.7, 0.8, 0.9, 0.99]
OBJECTIVE_WEIGHTS = [0.5, 1.0]
LOAD = True
class Bandit(BasePrescriptor):
def __init__(self,
seed=base.SEED,
# eval_start_date=EVAL_START_DATE,
# eval_end_date=EVAL_END_DATE,
# nb_eval_countries=NB_EVAL_COUNTRIES,
nb_prescriptions=NB_PRESCRIPTIONS,
# nb_lookback_days=NB_LOOKBACK_DAYS,
hist_df=None,
start_date=None,
end_date=None,
verbose=True,
load=True):
super().__init__(seed=seed)
# self.eval_start_date = pd.to_datetime(eval_start_date, format='%Y-%m-%d')
# self.eval_end_date = pd.to_datetime(eval_end_date, format='%Y-%m-%d')
self.eval_start_date = None
self.eval_end_date = None
self.load = load
# self.nb_eval_countries = nb_eval_countries
# self.nb_lookback_days = nb_lookback_days
self.nb_prescriptions = nb_prescriptions
# self.action_duration = action_duration
# self.config_file = config_file
# self.prescriptors_file = prescriptors_file
self.hist_df = hist_df
self.verbose = verbose
self.bandits = {}
self.load = load
def get_predictions(self, start_date_str, end_date_str, pres_df):
start_date = pd.to_datetime(start_date_str)
last_known_date = self.predictor.df['Date'].max()
if last_known_date < pd.to_datetime(self.hist_df['Date'].min()) - np.timedelta64(1, 'D'):
# append prior NPIs to the prescripted ones because the predictor will need them
prior_ips_df = self.hist_df[(self.hist_df['Date'] > last_known_date) & (self.hist_df['Date'] < start_date)]
prior_ips_df = prior_ips_df[pres_df.columns()]
ips_df = pres_df.append(prior_ips_df)
else:
ips_df = pres_df
# print('start_date_str :', start_date_str)
# print('ips_df : ', ips_df)
# generate the predictions
pred_df = self.predictor.predict(start_date_str, end_date_str, ips_df)
return pred_df
def fit(self, hist_df=None):
if hist_df is not None:
self.hist_df = hist_df
if self.load == True:
print('loading bandit')
with open(MODEL_FILE, 'rb') as f:
self.bandits = pickle.load(f)
return
# eval_geos = self.choose_eval_geos()
# eval_geos = list(self.hist_df.groupby('GeoID').max()['ConfirmedCases'].sort_values(
# ascending=False).index)
eval_geos = ['Canada']
if self.verbose:
print("Bandit will be evaluated on the following geos:", eval_geos)
past_cases, past_ips = self.prep_past_ips_cases(eval_geos)
start_date = '2021-01-01'
end_date = '2021-02-01'
self.eval_start_date = pd.to_datetime(start_date, format='%Y-%m-%d')
self.eval_end_date = pd.to_datetime(end_date, format='%Y-%m-%d')
# Compute prescribed stringency incrementally
stringency = {date : {geo: 0. for geo in eval_geos}
for date in pd.date_range(self.eval_start_date, self.eval_end_date)}
geo_costs = self.prep_geo_costs(eval_geos) #dummy call to get size
context_size = len(next(iter(geo_costs.values())))
# predictor_df_bkp = self.predictor.df.copy()
for weight in OBJECTIVE_WEIGHTS:
# self.bandits is a dict of dicts [weight][geo]
self.bandits[weight] = {}
# Initialize a bandit for each weight and geo
for geo in eval_geos:
self.bandits[weight][geo] = CCTSB(
N=[i + 1 for i in NPI_MAX_VALUES.values()], #assumed max val + zero
K=len(NPI_MAX_VALUES),
C=context_size,
alpha_p=1,
nabla_p=1,
w = weight,
choice=CHOICE)
rewards = []
for t in range(NB_ITERATIONS):
predictor_df_bkp = self.predictor.df.copy()
eval_past_cases = deepcopy(past_cases)
eval_past_ips = deepcopy(past_ips)
# forget all data before eval_start_date
self.predictor.df = self.predictor.df[
(self.predictor.df['Date'] < self.eval_start_date)
& (self.predictor.df['GeoID'].isin(eval_geos))]
#prepare costs for all geos
df_dict = self.prep_prescription_dict()
for date in pd.date_range(self.eval_start_date, self.eval_end_date):
geo_costs = self.prep_geo_costs(eval_geos)
date_str = date.strftime("%Y-%m-%d")
# Make prescriptions one day at a time, feeding resulting
# predictions from the predictor back into the prescriptor.
for geo in eval_geos:
bandit = self.bandits[weight][geo]
if geo == eval_geos[0]:
bandit.verbose = True
X_costs = geo_costs[geo]
bandit.observe(X_costs)
if t < EXPLORE_ITERATIONS:
bandit.choice = 'random'
else:
bandit.choice = 'fixed'
prescribed_ips = bandit.act() # gets prescriptions
# print(prescribed_ips)
# Add it to prescription dictionary
self.add_pres_to_dict(df_dict, date_str, geo, prescribed_ips)
# Calculate stringency
stringency[date][geo] = self.calc_stringency(X_costs,
prescribed_ips)
# Once predictions are made for all geos,
# Create dataframe from prescriptions
pres_df = pd.DataFrame(df_dict)
pres_df = base.add_geo_id(pres_df)
# Make batch predictions with prescriptions for all geos
pred_df = self.get_predictions(date_str, date_str, pres_df)
pred_df = base.add_geo_id(pred_df)
new_pres_df = pres_df[pres_df['Date'] == date_str]
new_pred_df = pred_df[pred_df['Date'] == date_str]
# update each geo's bandit based on predictions
for geo in eval_geos:
bandit = self.bandits[weight][geo]
geo_pres = new_pres_df[new_pres_df['GeoID'] == geo]
geo_pred = new_pred_df[new_pred_df['GeoID'] == geo]
# calculate reward before appending to df
reward = eval_past_cases[geo][-1] / (np.max([0.1,geo_pred[PRED_CASES_COL].values[0][0]]))
# reward = 1 if eval_past_cases[geo][-1] > (np.max([0.1,geo_pred[PRED_CASES_COL].values[0][0]])) else 0
# print('reward : ', reward)
# print('eval_past_cases[geo][-1] : ', eval_past_cases[geo][-1])
# print('(np.max([0.1,geo_pred[PRED_CASES_COL].values[0]])) : ', np.max([0.1,geo_pred[PRED_CASES_COL].values[0]]))
# print('[0.1,geo_pred[PRED_CASES_COL].values[0]] : ', [0.1,geo_pred[PRED_CASES_COL].values[0][0]])
if geo == eval_geos[0]:
bandit.update(r_past=eval_past_cases[geo][-1], r_present=(np.max([0.1,geo_pred[PRED_CASES_COL].values[0][0]])), s=stringency[date][geo], w=weight, verbose=True)
else:
bandit.update(r_past=eval_past_cases[geo][-1], r_present=(np.max([0.1,geo_pred[PRED_CASES_COL].values[0][0]])), s=stringency[date][geo], w=weight)
# Append predictions and prescriptions to past data
self.append_pres_pred_to_df(eval_past_cases, eval_past_ips, geo,
geo_pres, geo_pred)
new_pred_df = new_pred_df.merge(new_pres_df, on=['CountryName', 'RegionName', 'GeoID', 'Date'], how='left')
new_pred_df = new_pred_df.rename(columns={'PredictedDailyNewCases': 'NewCases'})
new_pred_df['ConfirmedCases'] = np.nan
new_pred_df['Population'] = np.nan
# for geo in eval_geos: # INEFFICIENT: there should be a way to avoid this loop using pandas functions
# temp_df = self.predictor.df[(self.predictor.df['GeoID'] == geo) & (self.predictor.df['Date'] == date - np.timedelta64(1,'D'))]
# new_cases = new_pred_df[new_pred_df['GeoID'] == geo]['NewCases'].to_numpy()
# new_pred_df.loc[new_pred_df['GeoID'] == geo, 'ConfirmedCases'] = np.cumsum(new_cases) + temp_df['ConfirmedCases'].to_numpy()
# new_pred_df.loc[new_pred_df['GeoID'] == geo, 'Population'] = temp_df.iloc[0]['Population']
# if geo == eval_geos[0]:
# print('temp_df : ', temp_df)
# print('New Cases : ', new_pred_df[new_pred_df['GeoID'] == geo]['NewCases'])
# print('New pred df : ', new_pred_df)
# print('self.predictor.df : ', self.predictor.df)
temp_df = self.predictor.df[self.predictor.df['Date'] == date - np.timedelta64(1,'D')]
new_cases = new_pred_df['NewCases']
new_pred_df.loc['ConfirmedCases'] = new_cases + temp_df['ConfirmedCases']
new_pred_df.loc['Population'] = temp_df['Population']
self.predictor.df = self.predictor.df.append(new_pred_df, ignore_index=True)
# we need to compute the PredictionRatio since this is used as input for the predictor
# INNEFICIENT: there should be a way to compute these quantities only for the new dates
self.predictor.df['SmoothNewCases'] = self.predictor.df.groupby('GeoID')['NewCases'].rolling(
7, center=False).mean().fillna(0).reset_index(0, drop=True)
self.predictor.df['CaseRatio'] = self.predictor.df.groupby('GeoID').SmoothNewCases.pct_change(
).fillna(0).replace(np.inf, 0) + 1
self.predictor.df['ProportionInfected'] = self.predictor.df['ConfirmedCases'] / self.predictor.df['Population']
self.predictor.df['PredictionRatio'] = self.predictor.df['CaseRatio'] / (1 - self.predictor.df['ProportionInfected'])
print('Iteration ' + str(t) + ' done.')
pres_df = pd.DataFrame(df_dict)
pres_df.to_csv('inspection.csv')
for geo in eval_geos:
self.bandits[weight][geo].clear_update_hist()
# restore the predictor historical data after evaluating the genome
self.predictor.df = predictor_df_bkp
mean_rewards = np.mean(self.bandits[weight]['Canada'].rewards, axis=0)
self.bandits[weight]['Canada'].rewards = []
rewards.append(mean_rewards[0:4])
# print('Weight ' + str(weight) + ' done.')
# np.savetxt('rewards_cumulative_' + str(weight) + '_' + str(self.bandits[weight]['Canada'].choice),
# rewards,
# fmt='%1.10f')
# indiv_rewards = self.bandits[weight]['Canada'].rewards
# np.savetxt('rewards_' + str(weight) + '_' + CHOICE,
# indiv_rewards,
# fmt='%1.10f')
# with open('bandits.pkl', 'wb') as f:
# pickle.dump(self.bandits, f)
return
def prescribe(self,
start_date_str,
end_date_str,
prior_ips_df,
cost_df):
if self.load == True:
with open(MODEL_FILE, 'rb') as f:
self.bandits = pickle.load(f)
start_date = pd.to_datetime(start_date_str, format='%Y-%m-%d')
end_date = pd.to_datetime(end_date_str, format='%Y-%m-%d')
geos = prior_ips_df['GeoID'].unique()
eval_stringency = {date : {geo: 0. for geo in geos}
for date in pd.date_range(start_date, end_date)}
if self.verbose:
print("Bandit will be evaluated on the following geos:", geos)
# Restrict it to dates before the start_date
df = self.hist_df[self.hist_df['Date'] <= start_date]
# Create past case data arrays for all geos
past_cases = {}
for geo in geos:
geo_df = df[df['GeoID'] == geo]
past_cases[geo] = np.maximum(0, np.array(geo_df[CASES_COL]))
# Create past ip data arrays for all geos
past_ips = {}
for geo in geos:
geo_df = prior_ips_df[prior_ips_df['GeoID'] == geo]
past_ips[geo] = np.array(geo_df[NPI_COLUMNS])
self.fill_missing_data(prior_ips_df, start_date, geos, df, past_cases)
# Load IP costs to condition prescriptions
geo_costs = self.prep_geo_costs(eval_geos=geos,
costs_provided=True, cost_df=cost_df)
# Generate prescriptions iteratively, feeding resulting
# predictions from the predictor back into the prescriptor.
prescription_dfs = []
for weight in OBJECTIVE_WEIGHTS:
for geo in geos:
start_time = time.time()
self.bandits[weight][geo] = copy.deepcopy(self.bandits[weight]['Canada'])
for idx, weight in enumerate(OBJECTIVE_WEIGHTS):
current_date = start_date
predictor_df_bkp = self.predictor.df.copy()
# Set initial data
eval_past_cases = deepcopy(past_cases)
eval_past_ips = deepcopy(past_ips)
# Set up dictionary for keeping track of prescription
df_dict = self.prep_prescription_dict()
# forget all data after start_date
self.predictor.df = self.predictor.df[
(self.predictor.df['Date'] < start_date)
& (self.predictor.df['GeoID'].isin(geos))]
while current_date <= end_date:
date_str = current_date.strftime("%Y-%m-%d")
# Get prescription for all regions
for geo in geos:
# only Canada bandit is trained
bandit = self.bandits[weight][geo]
X_costs = geo_costs[geo]
bandit.observe(X_costs)
prescribed_ips = bandit.act()
# Add it to prescription dictionary
if current_date > end_date:
break
self.add_pres_to_dict(df_dict, date_str, geo, prescribed_ips)
# Calculate stringency
eval_stringency[current_date][geo] = self.calc_stringency(X_costs,
prescribed_ips)
# Create dataframe from prescriptions
pres_df = pd.DataFrame(df_dict)
pres_df = base.add_geo_id(pres_df)
# Make prediction given prescription for all countries
pred_df = self.get_predictions(date_str, date_str, pres_df)
pred_df = base.add_geo_id(pred_df)
new_pres_df = pres_df[pres_df['Date'] == date_str]
new_pred_df = pred_df[pred_df['Date'] == date_str]
# make sure we haven't passed the end date
if current_date > end_date:
break
for geo in geos:
bandit = self.bandits[weight][geo]
geo_pres = new_pres_df[new_pres_df['GeoID'] == geo]
geo_pred = new_pred_df[new_pred_df['GeoID'] == geo]
# calculate reward before appending to df
reward = eval_past_cases[geo][-1] / (np.max([0.1,geo_pred[PRED_CASES_COL].values[0][0]]))
# print(geo, reward)
self.append_pres_pred_to_df(eval_past_cases, eval_past_ips,
geo, geo_pres, geo_pred)
bandit.update(eval_past_cases[geo][-1], (np.max([0.1,geo_pred[PRED_CASES_COL].values[0][0]])), eval_stringency[current_date][geo], weight)
# Move on to next date
new_pred_df = new_pred_df.merge(new_pres_df, on=['CountryName', 'RegionName', 'GeoID', 'Date'], how='left')
new_pred_df = new_pred_df.rename(columns={'PredictedDailyNewCases': 'NewCases'})
new_pred_df['ConfirmedCases'] = np.nan
new_pred_df['Population'] = np.nan
for geo in geos: # INNEFICIENT: there should be a way to avoid this loop using pandas functions
temp_df = self.predictor.df[(self.predictor.df['GeoID'] == geo) & (self.predictor.df['Date'] == current_date - np.timedelta64(1,'D'))]
new_cases = new_pred_df[new_pred_df['GeoID'] == geo]['NewCases'].to_numpy()
# print('New Cases : ', np.cumsum(new_cases))
# print('Confirmed Cases : ', temp_df['ConfirmedCases'])
new_pred_df.loc[new_pred_df['GeoID'] == geo, 'ConfirmedCases'] = np.cumsum(new_cases) + temp_df['ConfirmedCases'].to_numpy()
new_pred_df.loc[new_pred_df['GeoID'] == geo, 'Population'] = temp_df.iloc[0]['Population']
self.predictor.df = self.predictor.df.append(new_pred_df, ignore_index=True)
# we need to compute the PredictionRatio since this is used as input for the predictor
# INNEFICIENT: there should be a way to compute these quantities only for the new dates
self.predictor.df['SmoothNewCases'] = self.predictor.df.groupby('GeoID')['NewCases'].rolling(
7, center=False).mean().fillna(0).reset_index(0, drop=True)
self.predictor.df['CaseRatio'] = self.predictor.df.groupby('GeoID').SmoothNewCases.pct_change(
).fillna(0).replace(np.inf, 0) + 1
self.predictor.df['ProportionInfected'] = self.predictor.df['ConfirmedCases'] / self.predictor.df['Population']
self.predictor.df['PredictionRatio'] = self.predictor.df['CaseRatio'] / (1 - self.predictor.df['ProportionInfected'])
current_date += pd.DateOffset(days=1)
pres_df['PrescriptionIndex'] = idx
prescription_dfs.append(pres_df)
pres_df = pd.DataFrame(df_dict)
for geo in geos:
self.bandits[weight][geo].clear_update_hist()
self.predictor.df = predictor_df_bkp
print('Weight ' + str(weight) + ' done.')
prescription_df = pd.concat(prescription_dfs)
prescription_df = prescription_df.drop(columns='GeoID')
return prescription_df
def append_pres_pred_to_df(self, eval_past_cases, eval_past_ips, geo,
geo_pres, geo_pred):
"""
Append prescriptions and predictions to eval_past_cases and
eval_past_ips.
These apprend pres and pred will be used the next day in the context
fed to the bandit.
"""
# Append prescriptions
pres_arr = np.array([geo_pres[ip_col].values[0] for
ip_col in NPI_COLUMNS]).reshape(1,-1)
eval_past_ips[geo] = np.concatenate([eval_past_ips[geo], pres_arr])
# Append predicted cases
if len(geo_pred) != 0:
eval_past_cases[geo] = np.append(eval_past_cases[geo],
geo_pred[PRED_CASES_COL].values[0])
def calc_stringency(self, X_costs, prescribed_ips):
"""
Calculate stringency. This calculation could include division by
the number of IPs and/or number of geos, but that would have
no effect on the ordering of candidate solutions.
Input:
- X_costs:
"""
stringency = np.dot(X_costs,np.array(list(prescribed_ips.values())))
return stringency
def add_pres_to_dict(self, df_dict, date_str, geo, prescribed_ips):
"""
Add prescribed NPIs to the dict of prescriptions.
Input:
- df_dict: a dict of prescriptions, see prep_prescription_dict();
- date_str: a string representing the date for which a
prescription was made;
- geo: a GeoID for which the prescription was made;
- prescribed_ips: An array indicating the intensity of each
intervention in the prescription (0 to N).
Output:
- None. Appends to each list in df_dict.
"""
country_name, region_name = (geo.split(' / ') + [np.nan])[:2]
if region_name == 'nan':
region_name = np.nan
df_dict['CountryName'].append(country_name)
df_dict['RegionName'].append(region_name)
df_dict['Date'].append(date_str)
for ip_col, prescribed_ip in zip(NPI_COLUMNS, prescribed_ips):
df_dict[ip_col].append(prescribed_ips[prescribed_ip])
def get_context_size(self, eval_past_cases, eval_past_ips, geo_costs):
"""
Artifact function for when context included past cases and ips.
DO NOT USE.
Calculates context size needed for Bandit.
Each of the inputs' first elements is obtained to get length.
Context currently includes:
- geo_costs[geo]: an array of costs for NPIs for a specific GeoID;
- previous day's cases: the last element of eval_past_cases;
- previous day's IPS: the last element of eval_past_IPS.
"""
eval_past_ips_len = len(next(iter(eval_past_ips.values()))[-1]) * NB_LOOKBACK_DAYS
eval_past_cases_len = len(next(iter(eval_past_cases.values()))[-1]) * NB_LOOKBACK_DAYS
geo_costs_len = len(next(iter(geo_costs.values())))
context_size = eval_past_ips_len + eval_past_cases_len + geo_costs_len
return context_size
def prep_prescription_dict(self):
"""
Prepares a dict for prescriptions that will be turned into a df
fed to the BasePredictor's `get_predictions()`.
Input: None
Output: a dict where keys are column names and values are lists.
"""
df_dict = {'CountryName': [], 'RegionName': [], 'Date': []}
for ip_col in NPI_COLUMNS:
df_dict[ip_col] = []
return df_dict
def prep_geo_costs(self, eval_geos, costs_provided=False, cost_df=None):
"""
Prepares costs for each intervention (the "weights") for each GeoID.
Input: eval_geos, a list of GeoIDs for which costs are desired.
Output: geo_costs, a dict:
- each key is a GeoID
- each value is an array of size len(NPI_COLUMNS), so 12 usually,
which represents the stringency cost associated with each
Non-Pharmaceutical Intervention (NPI). These values should sum to
12 (To be verified).
"""
if costs_provided == False:
cost_df = base.generate_costs(self.hist_df, mode='random')
cost_df = base.add_geo_id(cost_df)
# Separate costs by geo
geo_costs = {}
for geo in eval_geos:
costs = cost_df[cost_df['GeoID'] == geo]
cost_arr = costs[NPI_COLUMNS].values[0]
geo_costs[geo] = cost_arr
return geo_costs
def prep_past_ips_cases(self, eval_geos):
"""
Separate past cases and past ips data for each eval geo.
Input: eval_geos, a list of GeoIDs used for evaluation.
Output: past_cases, past_ips, eval_past_cases, eval_past_ips
Dictionaries where each key is a GeoID and each value is:
- an array of past case values, or;
- an array of past interventions plans (IPs) represented by
arrays indicating the intensity of each intervention (0 to N).
"""
past_cases = {}
past_ips = {}
for geo in eval_geos:
geo_df = self.hist_df[self.hist_df['GeoID'] == geo]
past_cases[geo] = np.maximum(0, np.array(geo_df[CASES_COL]))
past_ips[geo] = np.array(geo_df[NPI_COLUMNS])
return past_cases, past_ips
def choose_eval_geos(self):
"""
As a heuristic, use the top NB_EVAL_COUNTRIES w.r.t. ConfirmedCases
so far as the geos for evaluation.
Input: None. Uses self.hist, which is part of __init__
output: a list of GeoIDs.
"""
assert self.nb_eval_countries, "need to uncomment nb_eval_countries"
eval_geos = list(self.hist_df.groupby('GeoID').max()['ConfirmedCases'].sort_values(
ascending=False).head(self.nb_eval_countries).index)
if self.verbose:
print("Bandit will be evaluated on the following geos:", eval_geos)
return eval_geos
def fill_missing_data(self, prior_ips_df, start_date, geos, df, past_cases):
"""
Fill in any missing case data before start_date using predictor given
past_ips_df. Note that the following assumes that the df returned by
prepare_historical_df() has the same final date for all regions. This
has been true so far, but relies on it being true for the Oxford data
csv loaded by prepare_historical_df().
"""
last_historical_data_date_str = df['Date'].max()
last_historical_data_date = pd.to_datetime(last_historical_data_date_str,
format='%Y-%m-%d')
if last_historical_data_date + pd.Timedelta(days=1) < start_date:
if self.verbose:
print("Filling in missing data...")
missing_data_start_date = last_historical_data_date + pd.Timedelta(days=1)
missing_data_start_date_str = datetime.datetime.strftime(missing_data_start_date,
format='%Y-%m-%d')
missing_data_end_date = start_date - pd.Timedelta(days=1)
missing_data_end_date_str = datetime.datetime.strftime(missing_data_end_date,
format='%Y-%m-%d')
pred_df = self.get_predictions(missing_data_start_date_str,
missing_data_end_date_str,
prior_ips_df)
pred_df = base.add_geo_id(pred_df)
for geo in geos:
geo_df = pred_df[pred_df['GeoID'] == geo].sort_values(by='Date')
pred_cases_arr = np.array(geo_df[PRED_CASES_COL])
past_cases[geo] = np.append(past_cases[geo], pred_cases_arr)
elif self.verbose:
print("No missing data.")
if __name__ == '__main__':
prescriptor = Bandit(seed=42)
output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir, os.pardir, 'prescriptions')
ofile_path = os.path.abspath(os.path.join(output_dir, 'bandit_evaluate.csv'))
print(ofile_path)
print()
prescriptor.evaluate(output_file_path=ofile_path)
| 46.492958
| 188
| 0.586321
| 27,272
| 0.917971
| 0
| 0
| 0
| 0
| 0
| 0
| 10,902
| 0.36696
|
c56aa8051395c03cfefdb6b4c31ba197b3b0d2c8
| 1,876
|
py
|
Python
|
examples/server.py
|
zaibon/tcprouter
|
7e9d2590e1b1d9d984ac742bd82fcbcf3d42b3ef
|
[
"BSD-3-Clause"
] | 5
|
2019-05-30T23:36:05.000Z
|
2019-10-10T21:37:53.000Z
|
examples/server.py
|
zaibon/tcprouter
|
7e9d2590e1b1d9d984ac742bd82fcbcf3d42b3ef
|
[
"BSD-3-Clause"
] | 7
|
2019-06-12T11:55:46.000Z
|
2019-11-18T22:53:06.000Z
|
examples/server.py
|
xmonader/eltcprouter
|
b3435733d102c2435e9f62aa469d34c475cc31bd
|
[
"BSD-3-Clause"
] | 1
|
2021-01-05T20:09:51.000Z
|
2021-01-05T20:09:51.000Z
|
from gevent import monkey; monkey.patch_all()
import logging
from gevent.server import StreamServer
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Receiver(object):
""" Interface for a receiver - mimics Twisted's protocols
"""
def __init__(self):
self.socket = None
self.address = None
def connection_made(self, socket, address):
self.socket = socket
self.address = address
def connection_lost(self):
pass
def line_received(self, line):
pass
def send_line(self, line):
self.socket.sendall(line + b'\n')
class EchoReceiver(Receiver):
""" A basic implementation of a receiver which echoes back every line it
receives.
"""
def line_received(self, line):
self.send_line(line)
def Handler(receiver_class):
""" A basic connection handler that applies a receiver object to each
connection.
"""
def handle(socket, address):
logger.info('Client (%s) connected', address)
receiver = receiver_class()
receiver.connection_made(socket, address)
try:
f = socket.makefile()
while True:
line = f.readline().strip()
if line == "":
break
logger.info('Received line from client: %s', line)
receiver.line_received(line.encode())
logger.info('Client (%s) disconnected.', address)
except Exception as e:
logger.exception(e)
finally:
try:
f.close()
receiver.connection_lost()
except:
pass
return handle
server = StreamServer(('0.0.0.0', 9092), Handler(EchoReceiver), keyfile='server.key', certfile='server.crt')
logger.info('Server running')
server.serve_forever()
| 25.351351
| 108
| 0.601812
| 641
| 0.341684
| 0
| 0
| 0
| 0
| 0
| 0
| 389
| 0.207356
|
c56c42d080d6ecfdd85da2bc93ed1de36bb3b713
| 9,289
|
py
|
Python
|
starter.py
|
device42/DOQL_scripts_examples
|
55cdf3868768cb4f609011575b1051d7a69c19c5
|
[
"Apache-2.0"
] | 7
|
2017-10-25T13:54:18.000Z
|
2022-01-25T16:16:53.000Z
|
starter.py
|
RomanNyschuk/DOQL_scripts_examples
|
1ec20426dcbe586c9b93ec77002a048c6563dca6
|
[
"Apache-2.0"
] | 2
|
2018-11-19T18:17:35.000Z
|
2020-10-09T19:38:53.000Z
|
starter.py
|
RomanNyschuk/DOQL_scripts_examples
|
1ec20426dcbe586c9b93ec77002a048c6563dca6
|
[
"Apache-2.0"
] | 6
|
2018-10-18T14:39:08.000Z
|
2021-04-15T19:06:01.000Z
|
# encoding: utf-8
import os
import ssl
import sys
import csv
import json
import time
import base64
from datetime import datetime
from datetime import timedelta
try:
import pyodbc
except ImportError:
pass
# PYTHON 2 FALLBACK #
try:
from urllib.request import urlopen, Request
from urllib.parse import urlencode
from io import StringIO
python = 3
except ImportError:
from urllib import urlencode
from urllib2 import urlopen, Request
from StringIO import StringIO
reload(sys)
sys.setdefaultencoding('utf8')
python = 2
# PYTHON 2 FALLBACK #
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
global _debug
_debug = True
def _post(url, query, options):
# PYTHON 2 FALLBACK #
if python == 3:
base64string = base64.b64encode(bytes('%s:%s' % (options['username'], options['password']), 'utf-8'))
post_data = bytes(urlencode({
"query": query,
"header": "yes"
}), 'utf-8')
else:
base64string = base64.b64encode('%s:%s' % (options['username'], options['password']))
post_data = urlencode({
"query": query,
"header": "yes"
})
# PYTHON 2 FALLBACK #
request = Request(url, post_data)
request.add_header("Authorization", "Basic %s" % base64string.decode("utf-8"))
request.get_method = lambda: 'POST'
r = urlopen(request, context=ctx)
body = r.read()
r.close()
if _debug:
msg = 'Status code: %s' % str(r.code)
print('\n\t----------- POST FUNCTION -----------')
print('\t' + url)
print('\t' + msg)
print('\tQuery: ' + query)
print('\t------- END OF POST FUNCTION -------\n')
return body
def get_list_from_csv(text):
f = StringIO(text.decode("utf-8"))
list_ = []
dict_reader = csv.DictReader(f, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True, dialect='excel')
for item in dict_reader:
list_.append(item)
return list_, [x for x in dict_reader.fieldnames]
def doql_call(config, query):
limit = 0
query['query'] = ' '.join(query['query'].split())
# prepare date-filtered query
if query['date'] and query['date']['column'] and query['date']['days_limit']:
index = None
where_index = query['query'].find('where')
order_index = query['query'].find('order by')
if where_index > 0:
index = where_index + 6
query['query'] = query['query'][:index] + " %s > current_date - interval '%s day' and " % (query['date']['column'], query['date']['days_limit']) + query['query'][index:]
elif order_index > 0:
index = order_index
query['query'] = query['query'][:index] + " where %s > current_date - interval '%s day' " % (query['date']['column'], query['date']['days_limit']) + query['query'][index:]
if query['output_format'] == 'csv' or query['output_format'] == 'json':
if query['offset']:
page = 0
_next = True
while _next:
doql_offset = page * query['offset']
doql_limit = query['offset']
if query['limit'] and query['limit'] > query['offset']:
if (doql_offset + query['offset']) > query['limit']:
doql_limit = query['limit'] - doql_offset
else:
if query['limit']:
doql_limit = query['limit']
doql_query = query['query'] + ' LIMIT %s OFFSET %s' % (doql_limit, doql_offset)
res = _post(
'https://%s/services/data/v1.0/query/' % config['host'], doql_query, {
'username': config['username'],
'password': config['password']
}
)
csv_list, field_order = get_list_from_csv(res)
if query['output_format'] == 'csv':
file = open('%s_%s_%s.csv' % (query['output_filename'], time.strftime("%Y%m%d%H%M%S"), page + 1 ), 'w+')
file.write(res.decode("utf-8"))
elif query['output_format'] == 'json':
file = open('%s_%s_%s.json' % (query['output_filename'], time.strftime("%Y%m%d%H%M%S"), page + 1), 'w+')
file.write(json.dumps(csv_list, indent=4, sort_keys=True))
if doql_limit != query['offset'] or len(csv_list) != query['offset'] or (doql_offset + doql_limit) == query['limit'] :
break
page += 1
else:
if query['limit']:
doql_query = query['query'] + ' LIMIT %s ' % query['limit']
else:
doql_query = query['query']
res = _post(
'https://%s/services/data/v1.0/query/' % config['host'], doql_query, {
'username': config['username'],
'password': config['password']
}
)
csv_list, field_order = get_list_from_csv(res)
if query['output_format'] == 'csv':
file = open('%s_%s.csv' % (query['output_filename'], time.strftime("%Y%m%d%H%M%S")), 'w+')
file.write(res)
elif query['output_format'] == 'json':
csv_list, field_order = get_list_from_csv(res)
file = open('%s_%s.json' % (query['output_filename'], time.strftime("%Y%m%d%H%M%S")), 'w+')
file.write(json.dumps(csv_list, indent=4, sort_keys=True))
file.close()
elif query['output_format'] == 'database':
if query['offset']:
page = 0
_next = True
while _next:
doql_offset = page * query['offset']
doql_limit = query['offset']
if query['limit'] and query['limit'] > query['offset']:
if (doql_offset + query['offset']) > query['limit']:
doql_limit = query['limit'] - doql_offset
else:
if query['limit']:
doql_limit = query['limit']
doql_query = query['query'] + ' LIMIT %s OFFSET %s' % (doql_limit, doql_offset)
res = _post(
'https://%s/services/data/v1.0/query/' % config['host'], doql_query, {
'username': config['username'],
'password': config['password']
}
)
csv_list, field_order = get_list_from_csv(res)
cnxn = pyodbc.connect(query['connection_string'], autocommit=True)
conn = cnxn.cursor()
for record in csv_list:
# some special cases for strange DOQL responses ( that may break database such as MySQL )
query_str = "INSERT INTO %s (%s) VALUES (%s)" % (query['table'], ','.join(field_order), ','.join([str("'%s'" % record[x][:-1].replace("'", "\\'")) if record[x].endswith('\\') else str("'%s'" % record[x].replace("'", "\\'")) for x in record]))
conn.execute(query_str)
print("Added %s records" % len(csv_list))
if doql_limit != query['offset'] or len(csv_list) != query['offset'] or (doql_offset + doql_limit) == query['limit'] :
conn.close()
break
page += 1
else:
if query['limit']:
doql_query = query['query'] + ' LIMIT %s ' % query['limit']
else:
doql_query = query['query']
res = _post(
'https://%s/services/data/v1.0/query/' % config['host'], doql_query, {
'username': config['username'],
'password': config['password']
}
)
csv_list, field_order = get_list_from_csv(res)
cnxn = pyodbc.connect(query['connection_string'], autocommit=True)
conn = cnxn.cursor()
for record in csv_list:
# some special cases for strange DOQL responses ( that may break database such as MySQL )
query_str = "INSERT INTO %s (%s) VALUES (%s)" % (query['table'], ','.join(field_order), ','.join([str("'%s'" % record[x][:-1].replace("'", "\\'")) if record[x].endswith('\\') else str("'%s'" % record[x].replace("'", "\\'")) for x in record]))
conn.execute(query_str)
print("Added %s records" % len(csv_list))
conn.close()
def main():
try:
with open('settings.json') as data_file:
config = json.load(data_file)
except IOError:
print('File "settings.json" doesn\'t exists.')
sys.exit()
try:
with open(sys.argv[1]) as data_file:
query = json.loads(data_file.read().replace('\n', '').replace(" ", ' '))
except IOError:
print('File "%s" doesn\'t exists.' % sys.argv[1])
sys.exit()
doql_call(config, query)
if __name__ == "__main__":
if len(sys.argv) < 2:
print('Please use "python starter.py query.json".')
sys.exit()
main()
print('Done!')
sys.exit()
| 34.531599
| 262
| 0.518247
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,300
| 0.247605
|
c56c7f75c3a3c13e0936e45885df9754bb813e14
| 5,001
|
py
|
Python
|
docs/conf.py
|
donatelli01/donatelli_documentations
|
6bf851014a96cd54c16d7d56b5677b081ca0d4e3
|
[
"CC-BY-4.0"
] | null | null | null |
docs/conf.py
|
donatelli01/donatelli_documentations
|
6bf851014a96cd54c16d7d56b5677b081ca0d4e3
|
[
"CC-BY-4.0"
] | null | null | null |
docs/conf.py
|
donatelli01/donatelli_documentations
|
6bf851014a96cd54c16d7d56b5677b081ca0d4e3
|
[
"CC-BY-4.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys, os
sys.path.insert(0, os.path.abspath('extensions'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig',
'epub2', 'mobi', 'autoimage', 'code_example']
todo_include_todos = True
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
exclude_patterns = []
add_function_parentheses = True
#add_module_names = True
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
project = u'Music for Geeks and Nerds'
copyright = u'2012, Pedro Kroger'
version = ''
release = ''
# -- Options for HTML output ---------------------------------------------------
html_theme = 'book'
html_theme_path = ['themes']
html_title = "Music for Geeks and Nerds"
#html_short_title = None
#html_logo = None
#html_favicon = None
html_static_path = ['_static']
html_domain_indices = False
html_use_index = False
html_show_sphinx = False
htmlhelp_basename = 'MusicforGeeksandNerdsdoc'
html_show_sourcelink = False
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'papersize': '',
'fontpkg': '',
'fncychap': '',
'maketitle': '\\cover',
'pointsize': '',
'preamble': '',
'releasename': "",
'babel': '',
'printindex': '',
'fontenc': '',
'inputenc': '',
'classoptions': '',
'utf8extra': '',
}
latex_additional_files = ["mfgan-bw.sty", "mfgan.sty", "_static/cover.png"]
latex_documents = [
('index', 'music-for-geeks-and-nerds.tex', u'Music for Geeks and Nerds',
u'Pedro Kroger', 'manual'),
]
latex_show_pagerefs = False
latex_domain_indices = False
latex_use_modindex = False
#latex_logo = None
#latex_show_urls = False
# -- Options for Epub output ---------------------------------------------------
epub_title = u'Music for Geeks and Nerds'
epub_author = u'Pedro Kroger'
epub_publisher = u'Pedro Kroger'
epub_copyright = u'2012, Pedro Kroger'
epub_theme = 'epub2'
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
epub_cover = ("_static/cover.png", "epub-cover.html")
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['_static/opensearch.xml', '_static/doctools.js',
'_static/jquery.js', '_static/searchtools.js', '_static/underscore.js',
'_static/basic.css', 'search.html', '_static/websupport.js']
# The depth of the table of contents in toc.ncx.
epub_tocdepth = 2
# Allow duplicate toc entries.
epub_tocdup = False
# -- Options for Mobi output ---------------------------------------------------
mobi_theme = "mobi"
mobi_title = u'Music for Geeks and Nerds'
mobi_author = u'Pedro Kroger'
mobi_publisher = u'Pedro Kroger'
mobi_copyright = u'2012, Pedro Kroger'
# The scheme of the identifier. Typical schemes are ISBN or URL.
#mobi_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#mobi_identifier = ''
# A unique identification for the text.
#mobi_uid = ''
mobi_cover = "_static/cover.png"
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#mobi_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#mobi_post_files = []
# A list of files that should not be packed into the mobi file.
mobi_exclude_files = ['_static/opensearch.xml', '_static/doctools.js',
'_static/jquery.js', '_static/searchtools.js', '_static/underscore.js',
'_static/basic.css', 'search.html', '_static/websupport.js']
# The depth of the table of contents in toc.ncx.
mobi_tocdepth = 2
# Allow duplicate toc entries.
mobi_tocdup = False
mobi_add_visible_links = False
# -- Options for Code Examples output ---------------------------------------------------
code_example_dir = "code-example"
code_add_python_path = ["../py"]
################################################################################
def setup(app):
from sphinx.util.texescape import tex_replacements
tex_replacements += [(u'♮', u'$\\natural$'),
(u'ē', u'\=e'),
(u'♩', u'\quarternote'),
(u'↑', u'$\\uparrow$'),
]
| 28.577143
| 89
| 0.642072
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,463
| 0.691494
|
c56cd8896de2c6a2a8be34144a21660a056501d9
| 17,031
|
py
|
Python
|
sapphire/simulation.py
|
alexanderzimmerman/sapphire
|
1236000d201b8ff44296b0428ef31e5ff0e6078f
|
[
"MIT"
] | 10
|
2019-04-26T16:23:49.000Z
|
2022-02-01T22:44:29.000Z
|
sapphire/simulation.py
|
alexanderzimmerman/sapphire
|
1236000d201b8ff44296b0428ef31e5ff0e6078f
|
[
"MIT"
] | 35
|
2018-12-10T08:55:59.000Z
|
2019-03-21T10:48:57.000Z
|
sapphire/simulation.py
|
alexanderzimmerman/sapphire
|
1236000d201b8ff44296b0428ef31e5ff0e6078f
|
[
"MIT"
] | 4
|
2019-04-11T16:49:48.000Z
|
2021-03-15T00:58:09.000Z
|
"""Provides a class for constructing simulations based on Firedrake.
Simulations proceed forward in time by solving
a sequence of Initial Boundary Values Problems (IBVP's).
Using the Firedrake framework,
the PDE's are discretized in space with Finite Elements (FE).
The symbolic capabilities of Firedrake are used to
automatically implement backward difference formula (BDF) time
discretizations and to automatically linearize nonlinear problems
with Newton's method.
Nonlinear and linear solvers are provided by PETSc
and are accessed via the Firedrake interface.
This module imports `firedrake` as `fe` and its documentation writes
`fe` instead of `firedrake`.
"""
import typing
import pathlib
import ufl
import firedrake as fe
import sapphire.time_discretization
import sapphire.output
class Simulation:
"""A PDE-based simulation using the Firedrake framework.
The PDE's are discretized in space using finite elements
and in time using backward difference formulas.
Implementing a simulation requires at least instantiating this
class and calling the instance's `run` method.
"""
def __init__(self,
solution: fe.Function,
time: float = 0.,
time_stencil_size: int = 2,
timestep_size: float = 1.,
quadrature_degree: int = None,
solver_parameters: dict = {
"snes_type": "newtonls",
"snes_monitor": None,
"ksp_type": "preonly",
"pc_type": "lu",
"mat_type": "aij",
"pc_factor_mat_solver_type": "mumps"},
output_directory_path: str = "output/",
fieldnames: typing.Iterable[str] = None):
"""
Instantiating this class requires enough information to fully
specify the FE spatial discretization and weak form residual.
boundary conditions, and initial values. All of these required
arguments are Firedrake objects used according to Firedrake
conventions.
Backward Difference Formula time discretizations are
automatically implemented. To use a different time
discretization, inherit this class and redefine
`time_discrete_terms`.
Args:
solution: Solution for a single time step.
As a `fe.Function`, this also defines the
mesh, element, and solution function space.
time: The initial time.
time_stencil_size: The number of solutions at
discrete times used for approximating time derivatives.
This also determines the number of stored solutions.
Must be greater than zero.
Defaults to 2. Set to 1 for steady state problems.
Increase for higher-order time accuracy.
timestep_size: The size of discrete time steps.
Defaults to 1.
Higher order time discretizations are assumed to use
a constant time step size.
quadrature_degree: The quadrature degree used for
numerical integration.
Defaults to `None`, in which case Firedrake will
automatically choose a suitable quadrature degree.
solver_parameters: The solver parameters dictionary
which Firedrake uses to configure PETSc.
output_directory_path: String that will be converted
to a Path where output files will be written.
Defaults to "output/".
fieldnames: A list of names for the components of `solution`.
Defaults to `None`.
These names can be used when indexing solutions that are split
either by `firedrake.split` or `firedrake.Function.split`.
If not `None`, then the `dict` `self.solution_fields` will be created.
The `dict` will have two items for each field,
containing the results of either splitting method.
The results of `firedrake.split` will be suffixed with "_ufl".
"""
assert(time_stencil_size > 0)
self.fieldcount = len(solution.split())
if fieldnames is None:
fieldnames = ["w_{}" for i in range(self.fieldcount)]
assert(len(fieldnames) == self.fieldcount)
self.fieldnames = fieldnames
self.solution = solution
self.time = fe.Constant(time)
self.solution_space = self.solution.function_space()
self.mesh = self.solution_space.mesh()
self.unit_vectors = unit_vectors(self.mesh)
self.element = self.solution_space.ufl_element()
self.timestep_size = fe.Constant(timestep_size)
self.quadrature_degree = quadrature_degree
self.dx = fe.dx(degree = self.quadrature_degree)
self.solver_parameters = solver_parameters
initial_values = self.initial_values()
if initial_values is not None:
self.solution = self.solution.assign(initial_values)
# States for time dependent simulation and checkpointing
self.solutions = [self.solution,]
self.times = [self.time,]
self.state = {
"solution": self.solution,
"time": self.time,
"index": 0}
self.states = [self.state,]
for i in range(1, time_stencil_size):
self.solutions.append(fe.Function(self.solution))
self.times.append(fe.Constant(self.time - i*timestep_size))
self.states.append({
"solution": self.solutions[i],
"time": self.times[i],
"index": -i})
# Continuation helpers
self.backup_solution = fe.Function(self.solution)
# Mixed solution indexing helpers
self.solution_fields = {}
self.solution_subfunctions = {}
self.test_functions = {}
self.time_discrete_terms = {}
self.solution_subspaces = {}
for name, field, field_pp, testfun, timeterm in zip(
fieldnames,
fe.split(self.solution),
self.solution.split(),
fe.TestFunctions(self.solution_space),
time_discrete_terms(
solutions = self.solutions,
timestep_size = self.timestep_size)):
self.solution_fields[name] = field
self.solution_subfunctions[name] = field_pp
self.test_functions[name] = testfun
self.time_discrete_terms[name] = timeterm
self.solution_subspaces[name] = self.solution_space.sub(
fieldnames.index(name))
# Output controls
self.output_directory_path = pathlib.Path(output_directory_path)
self.output_directory_path.mkdir(parents = True, exist_ok = True)
self.vtk_solution_file = None
self.plotvars = None
self.snes_iteration_count = 0
def run(self,
endtime: float,
write_checkpoints: bool = True,
write_vtk_solutions: bool = False,
write_plots: bool = False,
write_initial_outputs: bool = True,
endtime_tolerance: float = 1.e-8,
solve: typing.Callable = None) \
-> (typing.List[fe.Function], float):
"""Run simulation forward in time.
Args:
endtime (float): Run until reaching this time.
write_vtk_solutions (bool): Write checkpoints if True.
write_vtk_solutions (bool): Write solutions to VTK if True.
write_plots (bool): Write plots if True.
Writing the plots to disk can in some cases dominate
the processing time. Additionally, much more data
is generated, requiring more disk storage space.
write_initial_outputs (bool): Write for initial values
before solving the first time step. Default to True.
You may want to set this to False if, for example, you
are calling `run` repeatedly with later endtimes.
In such a case, the initial values are the same as
the previously computed solution, and so they should
not be written again.
endtime_tolerance (float): Allows endtime to be only
approximately reached. This is larger than a
typical floating point comparison tolerance
because errors accumulate between timesteps.
solve (callable): This is called to solve each time step.
By default, this will be set to `self.solve`.
"""
if write_initial_outputs:
self.write_outputs(
headers = True,
checkpoint = write_checkpoints,
vtk = write_vtk_solutions,
plots = write_plots)
if solve is None:
solve = self.solve
while self.time.__float__() < (endtime - endtime_tolerance):
self.states = self.push_back_states()
self.time = self.time.assign(self.time + self.timestep_size)
self.state["index"] += 1
self.solution = solve()
print("Solved at time t = {}".format(self.time.__float__()))
self.write_outputs(
headers = False,
checkpoint = write_checkpoints,
vtk = write_vtk_solutions,
plots = write_plots)
return self.states
def solve(self) -> fe.Function:
"""Set up the problem and solver, and solve.
This is a JIT (just in time), ensuring that the problem and
solver setup are up-to-date before calling the solver.
All compiled objects are cached, so the JIT problem and solver
setup does not have any significant performance overhead.
"""
problem = fe.NonlinearVariationalProblem(
F = self.weak_form_residual(),
u = self.solution,
bcs = self.dirichlet_boundary_conditions(),
J = fe.derivative(self.weak_form_residual(), self.solution))
solver = fe.NonlinearVariationalSolver(
problem = problem,
nullspace = self.nullspace(),
solver_parameters = self.solver_parameters)
solver.solve()
self.snes_iteration_count += solver.snes.getIterationNumber()
return self.solution
def weak_form_residual(self):
raise("This method must be redefined by the derived class.")
def initial_values(self):
return None
def dirichlet_boundary_conditions(self):
return None
def nullspace(self):
return None
def push_back_states(self) -> typing.List[typing.Dict]:
"""Push back states, including solutions, times, and indices.
Sufficient solutions are stored for the time discretization.
Advancing the simulation forward in time requires re-indexing
the solutions and times.
"""
for i in range(len(self.states[1:])):
rightstate = self.states[-1 - i]
leftstate = self.states[-2 - i]
rightstate["index"] = leftstate["index"]
for key in "solution", "time":
# Set values of `fe.Function` and `fe.Constant`
# with their `assign` methods.
rightstate[key] = rightstate[key].assign(leftstate[key])
return self.states
def postprocess(self) -> 'Simulation':
""" This is called by `write_outputs` before writing.
Redefine this to add post-processing.
"""
return self
def kwargs_for_writeplots(self) -> dict:
"""Return kwargs needed for `sappphire.outupt.writeplots`.
By default, no plots are made.
This must be redefined to return a dict
if `run` is called with `plot = True`.
"""
return None
def write_checkpoint(self):
sapphire.output.write_checkpoint(
states=self.states,
dirpath=self.output_directory_path,
filename="checkpoints")
def write_outputs(self,
headers: bool,
checkpoint: bool = True,
vtk: bool = False,
plots: bool = False):
"""Write all outputs.
This creates or appends the CSV report,
writes the latest solution, and plots (in 1D/2D case).
Redefine this to control outputs.
Args:
write_headers (bool): Write header line to report if True.
You may want to set this to False, for example, if the
header has already been written.
checkpoint (bool): Write checkpoint if True.
vtk (bool): Write solution to VTK if True.
plots (bool): Write plots if True.
"""
self = self.postprocess()
sapphire.output.report(sim = self, write_header = headers)
if checkpoint:
self.write_checkpoint()
if vtk:
if self.vtk_solution_file is None:
vtk_solution_filepath = self.output_directory_path.joinpath(
"solution").with_suffix(".pvd")
self.vtk_solution_file = fe.File(str(vtk_solution_filepath))
sapphire.output.write_solution_to_vtk(
sim = self, file = self.vtk_solution_file)
if plots:
if self.mesh.geometric_dimension() < 3:
sapphire.output.writeplots(
**self.kwargs_for_writeplots(),
time = self.time.__float__(),
time_index = self.state["index"],
outdir_path = self.output_directory_path)
elif self.mesh.geometric_dimension() == 3:
# This could be done with VTK and PyVista, but VTK can be a
# difficult dependency. It may be best to run a separate
# program for generating 3D plots from the solution files.
raise NotImplementedError()
def unit_vectors(mesh) -> typing.Tuple[ufl.tensors.ListTensor]:
"""Returns the mesh's spatial unit vectors in each dimension.
Args:
mesh (fe.Mesh): The mesh for the spatial discretization.
"""
dim = mesh.geometric_dimension()
return tuple([fe.unit_vector(i, dim) for i in range(dim)])
def time_discrete_terms(
solutions: typing.List[fe.Function],
timestep_size: fe.Constant) \
-> typing.Union[
ufl.core.operator.Operator,
typing.List[ufl.core.operator.Operator]]:
"""Returns backward difference time discretization.
The backward difference formula's stencil size is determine by the
number of solutions provided, i.e. `len(solutions)`.
For example, if `len(solutions == 3)`, then the second-order BDF2
method will be used, because it involves solutions at three
discrete times.
The return type depends on whether or not the solution is based on
a mixed finite element. For mixed finite elements, a list of time
discrete terms will be returned, each item corresponding to one of
the sub-elements of the mixed element. Otherwise, a single term
will be returned.
"""
"""
The return type design choice was made, rather than always
returning a list (e.g. with only one item if not using a mixed
element), so that it would be more intuitive when not using mixed
elements.
"""
"""
This implementation assumes constant time step size.
Variable time step sizes change the BDF formula
for all except first order.
"""
time_discrete_terms = [
sapphire.time_discretization.bdf(
[fe.split(solutions[n])[i] for n in range(len(solutions))],
timestep_size = timestep_size)
for i in range(len(solutions[0].split()))]
return time_discrete_terms
| 36.391026
| 86
| 0.575245
| 14,333
| 0.841583
| 0
| 0
| 0
| 0
| 0
| 0
| 8,140
| 0.477952
|
c56d0b93bb067141c9ac8d852c7ba2ad1f8b703b
| 16,389
|
py
|
Python
|
lookmlint/lookmlint.py
|
kingfink/lookmlint
|
5fd76328b3ad6917e649a28abed05f64707422b6
|
[
"Apache-2.0"
] | null | null | null |
lookmlint/lookmlint.py
|
kingfink/lookmlint
|
5fd76328b3ad6917e649a28abed05f64707422b6
|
[
"Apache-2.0"
] | 1
|
2020-02-25T16:01:31.000Z
|
2020-02-25T16:01:31.000Z
|
lookmlint/lookmlint.py
|
kingfink/lookmlint
|
5fd76328b3ad6917e649a28abed05f64707422b6
|
[
"Apache-2.0"
] | null | null | null |
from collections import Counter
import json
import os
import re
import subprocess
import attr
import yaml
@attr.s
class ExploreView(object):
data = attr.ib(repr=False)
explore = attr.ib(init=False, repr=False)
name = attr.ib(init=False, repr=True)
source_view = attr.ib(init=False, repr=False)
def __attrs_post_init__(self):
self.from_view_name = self.data.get('from')
self.view_name = self.data.get('view_name')
self.join_name = self.data.get('_join')
self.explore = self.data['_explore']
self.sql_on = self.data.get('sql_on')
self.view_label = self.data.get('view_label')
self.name = self._first_existing([self.view_name, self.join_name, self.explore])
def _first_existing(self, values):
return next(v for v in values if v is not None)
def source_view_name(self):
priority = [self.from_view_name, self.view_name, self.join_name, self.explore]
return self._first_existing(priority)
def display_label(self):
priority = [
self.view_label,
self.source_view.label,
self.name.replace('_', ' ').title(),
]
return self._first_existing(priority)
def contains_raw_sql_ref(self):
if not self.sql_on:
return False
raw_sql_words = [
w
for line in self.sql_on.split('\n')
for w in line.split()
# not a comment line
if not line.replace(' ', '').startswith('--')
# doesn't contain lookml syntax
and not '${' in w and not '}' in w
# not a custom function with newlined args
and not w.endswith('(')
# contains one period
and w.count('.') == 1
# doesn't contain noqa
and not '#noqa' in line
]
return len(raw_sql_words) > 0
@attr.s
class Explore(object):
data = attr.ib(repr=False)
label = attr.ib(init=False)
model = attr.ib(init=False)
name = attr.ib(init=False)
views = attr.ib(init=False, repr=False)
def __attrs_post_init__(self):
self.name = self.data.get('_explore')
self.label = self.data.get('label')
self.model = self.data.get('_model')
joined_views = [ExploreView(j) for j in self.data.get('joins', [])]
self.views = [ExploreView(self.data)] + joined_views
def display_label(self):
return self.label if self.label else self.name.replace('_', ' ').title()
def view_label_issues(self, acronyms=[], abbreviations=[]):
results = {}
for v in self.views:
issues = label_issues(v.display_label(), acronyms, abbreviations)
if issues == []:
continue
results[v.display_label()] = issues
return results
def duplicated_view_labels(self):
c = Counter(v.display_label() for v in self.views)
return {label: n for label, n in c.items() if n > 1}
@attr.s
class Model(object):
data = attr.ib(repr=False)
explores = attr.ib(init=False, repr=False)
included_views = attr.ib(init=False, repr=False)
name = attr.ib(init=False)
def __attrs_post_init__(self):
includes = self.data.get('include', [])
if isinstance(includes, str):
includes = [includes]
self.included_views = [i[: -len('.view')] for i in includes]
self.explores = [Explore(e) for e in self.data['explore'].values() if isinstance(e, dict)]
self.name = self.data['_model']
def explore_views(self):
return [v for e in self.explores for v in e.views]
def unused_includes(self):
# if all views in a project are imported into a model,
# don't suggest any includes are unused
if self.included_views == ['*']:
return []
explore_view_sources = [v.source_view.name for v in self.explore_views()]
return sorted(list(set(self.included_views) - set(explore_view_sources)))
def explore_label_issues(self, acronyms=[], abbreviations=[]):
results = {}
for e in self.explores:
issues = label_issues(e.display_label(), acronyms, abbreviations)
if issues == []:
continue
results[e.display_label()] = issues
return results
@attr.s
class View(object):
data = attr.ib(repr=False)
name = attr.ib(init=False)
label = attr.ib(init=False)
dimensions = attr.ib(init=False, repr=False)
dimension_groups = attr.ib(init=False, repr=False)
measures = attr.ib(init=False, repr=False)
def __attrs_post_init__(self):
self.name = self.data['_view']
self.label = self.data.get('label')
self.dimensions = [Dimension(d) for d in self.data.get('dimension', {}).values() if isinstance(d, dict)]
self.measures = [Measure(m) for m in self.data.get('measure', {}).values() if isinstance(m, dict)]
self.dimension_groups = [
DimensionGroup(dg) for dg in self.data.get('dimension_group', {}).values() if isinstance(dg, dict)
]
self.fields = self.dimensions + self.dimension_groups + self.measures
self.extends = [v.strip('*') for v in self.data.get('extends', [])]
self.sql_table_name = self.data.get('sql_table_name')
self.derived_table_sql = None
if 'derived_table' in self.data:
self.derived_table_sql = self.data['derived_table']['sql']
def field_label_issues(self, acronyms=[], abbreviations=[]):
results = {}
for f in self.fields:
if f.is_hidden:
continue
issues = label_issues(f.display_label(), acronyms, abbreviations)
if issues == []:
continue
results[f.display_label()] = issues
return results
def has_primary_key(self):
return any(d.is_primary_key for d in self.dimensions)
def has_sql_definition(self):
return self.sql_table_name is not None or self.derived_table_sql is not None
def derived_table_contains_semicolon(self):
return self.derived_table_sql is not None and ';' in self.derived_table_sql
def derived_table_contains_select_star(self):
return self.derived_table_sql is not None and len(re.findall('(?:[^/])(\*)(?:[^/])', self.derived_table_sql)) > 0 and '#noqa:select-star' not in self.derived_table_sql
@attr.s
class Dimension(object):
data = attr.ib(repr=False)
name = attr.ib(init=False, repr=True)
type = attr.ib(init=False)
label = attr.ib(init=False)
description = attr.ib(init=False, repr=False)
def __attrs_post_init__(self):
self.name = self.data['_dimension']
self.type = self.data.get('type', 'string')
self.label = self.data.get('label')
self.description = self.data.get('description')
self.sql = self.data.get('sql')
self.is_primary_key = self.data.get('primary_key') is True
self.is_hidden = self.data.get('hidden') is True
def display_label(self):
return self.label if self.label else self.name.replace('_', ' ').title()
@attr.s
class DimensionGroup(object):
data = attr.ib(repr=False)
name = attr.ib(init=False, repr=True)
type = attr.ib(init=False)
label = attr.ib(init=False)
description = attr.ib(init=False, repr=False)
timeframes = attr.ib(init=False, repr=False)
def __attrs_post_init__(self):
self.name = self.data['_dimension_group']
self.type = self.data.get('type', 'string')
self.label = self.data.get('label')
self.description = self.data.get('description')
self.sql = self.data.get('sql')
self.timeframes = self.data.get('timeframes')
self.is_hidden = self.data.get('hidden') is True
def display_label(self):
return self.label if self.label else self.name.replace('_', ' ').title()
@attr.s
class Measure(object):
data = attr.ib(repr=False)
name = attr.ib(init=False, repr=True)
type = attr.ib(init=False)
label = attr.ib(init=False)
description = attr.ib(init=False, repr=False)
def __attrs_post_init__(self):
self.name = self.data['_measure']
self.type = self.data.get('type')
self.label = self.data.get('label')
self.description = self.data.get('description')
self.sql = self.data.get('sql')
self.is_hidden = self.data.get('hidden') is True
self.drill_fields = self.data.get('drill_fields', [])
self.tags = self.data.get('tags', [])
def display_label(self):
return self.label if self.label else self.name.replace('_', ' ').title()
def has_drill_fields(self):
return len(self.drill_fields) > 0 or self.type in ["number", "percent_of_previous", "percent_of_total"] or self.is_hidden or '#noqa:drill-fields' in self.tags
@attr.s
class LookML(object):
lookml_json_filepath = attr.ib()
data = attr.ib(init=False, repr=False)
models = attr.ib(init=False, repr=False)
views = attr.ib(init=False, repr=False)
def __attrs_post_init__(self):
with open(self.lookml_json_filepath) as f:
self.data = json.load(f)
model_dicts = [self._model(mn) for mn in self._model_file_names()]
self.models = [Model(m) for m in model_dicts]
view_dicts = [self._view(vn) for vn in self._view_file_names()]
self.views = [View(v) for v in view_dicts]
# match explore views with their source views
for m in self.models:
for e in m.explores:
for ev in e.views:
source_view = next(
v for v in self.views if v.name == ev.source_view_name()
)
ev.source_view = source_view
def _view_file_names(self):
return sorted(self.data['file']['view'].keys())
def _view(self, view_file_name):
return list(self.data['file']['view'][view_file_name]['view'].values())[0]
def _model_file_names(self):
return sorted(self.data['file']['model'].keys())
def _model(self, model_file_name):
return self.data['file']['model'][model_file_name]['model'][model_file_name]
def mismatched_view_names(self):
results = {}
for vf in self._view_file_names():
v = View(self._view(vf))
if v.name != vf:
results[vf] = v.name
return results
def all_explore_views(self):
explore_views = []
for m in self.models:
explore_views += m.explore_views()
return explore_views
def unused_view_files(self):
view_names = [v.name for v in self.views]
explore_view_names = [v.source_view.name for v in self.all_explore_views()]
extended_views = [exv for v in self.views for exv in v.extends]
return sorted(
list(set(view_names) - set(explore_view_names) - set(extended_views))
)
def read_lint_config(repo_path):
# read .lintconfig.yml
full_path = os.path.expanduser(repo_path)
config_filepath = os.path.join(full_path, '.lintconfig.yml')
acronyms = []
abbreviations = []
if os.path.isfile(config_filepath):
with open(config_filepath) as f:
config = yaml.load(f)
acronyms = config.get('acronyms', acronyms)
abbreviations = config.get('abbreviations', abbreviations)
lint_config = {'acronyms': acronyms, 'abbreviations': abbreviations}
return lint_config
def parse_repo(full_path):
cmd = (
f'cd {full_path} && '
'lookml-parser --input="*.lkml" --whitespace=2 > /tmp/lookmlint.json'
)
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
output, error = process.communicate()
def lookml_from_repo_path(repo_path):
full_path = os.path.expanduser(repo_path)
parse_repo(full_path)
lkml = LookML('/tmp/lookmlint.json')
return lkml
def label_issues(label, acronyms=[], abbreviations=[]):
def _contains_bad_acronym_usage(label, acronym):
words = label.split(' ')
# drop plural 's' from words
if not acronym.lower().endswith('s'):
words = [w if not w.endswith('s') else w[:-1] for w in words]
return any(acronym.upper() == w.upper() and w == w.title() for w in words)
def _contains_bad_abbreviation_usage(label, abbreviation):
return any(abbreviation.lower() == k.lower() for k in label.split(' '))
acronyms_used = [
a.upper() for a in acronyms if _contains_bad_acronym_usage(label, a)
]
abbreviations_used = [
a.title() for a in abbreviations if _contains_bad_abbreviation_usage(label, a)
]
return acronyms_used + abbreviations_used
def lint_labels(lkml, acronyms, abbreviations):
# check for acronym and abbreviation issues
explore_label_issues = {}
for m in lkml.models:
issues = m.explore_label_issues(acronyms, abbreviations)
if issues != {}:
explore_label_issues[m.name] = issues
explore_view_label_issues = {}
for m in lkml.models:
for e in m.explores:
issues = e.view_label_issues(acronyms, abbreviations)
if issues != {}:
if m.name not in explore_view_label_issues:
explore_view_label_issues[m.name] = {}
explore_view_label_issues[m.name][e.name] = issues
field_label_issues = {}
for v in lkml.views:
issues = v.field_label_issues(acronyms, abbreviations)
if issues != {}:
field_label_issues[v.name] = issues
# create overall labels issues dict
label_issues = {}
if explore_label_issues != {}:
label_issues['explores'] = explore_label_issues
if explore_view_label_issues != {}:
label_issues['explore_views'] = explore_view_label_issues
if field_label_issues != {}:
label_issues['fields'] = field_label_issues
return label_issues
def lint_duplicate_view_labels(lkml):
issues = {}
for m in lkml.models:
for e in m.explores:
dupes = e.duplicated_view_labels()
if dupes == {}:
continue
if m.name not in issues:
issues[m.name] = {}
if e.name not in issues[m.name]:
issues[m.name][e.name] = dupes
return issues
def lint_sql_references(lkml):
# check for raw SQL field references
raw_sql_refs = {}
for m in lkml.models:
for e in m.explores:
for v in e.views:
if not v.contains_raw_sql_ref():
continue
if m.name not in raw_sql_refs:
raw_sql_refs[m.name] = {}
if e.name not in raw_sql_refs[m.name]:
raw_sql_refs[m.name][e.name] = {}
raw_sql_refs[m.name][e.name][v.name] = v.sql_on
return raw_sql_refs
def lint_view_primary_keys(lkml):
# check for missing primary keys
views_missing_primary_keys = [v.name for v in lkml.views if not v.has_primary_key()]
return views_missing_primary_keys
def lint_missing_drill_fields(lkml):
# check for measures missing drill fields
measures_missing_drill_fields = []
for v in lkml.views:
measures_missing_drill_fields += [(v.name, m.name) for m in v.measures if not m.has_drill_fields()]
return sorted(list(set(measures_missing_drill_fields)))
def lint_unused_includes(lkml):
# check for unused includes
unused_includes = {
m.name: m.unused_includes() for m in lkml.models if m.unused_includes() != []
}
return unused_includes
def lint_unused_view_files(lkml):
# check for unused view files
unused_view_files = lkml.unused_view_files()
return unused_view_files
def lint_missing_view_sql_definitions(lkml):
return [
v.name
for v in lkml.views
if not v.has_sql_definition()
and v.extends == []
and any(f.sql and '${TABLE}' in f.sql for f in v.fields)
]
def lint_semicolons_in_derived_table_sql(lkml):
return [v.name for v in lkml.views if v.derived_table_contains_semicolon()]
def lint_select_star_in_derived_table_sql(lkml):
return [v.name for v in lkml.views if v.derived_table_contains_select_star()]
def lint_mismatched_view_names(lkml):
return lkml.mismatched_view_names()
| 34.430672
| 175
| 0.626945
| 10,779
| 0.657697
| 0
| 0
| 10,843
| 0.661602
| 0
| 0
| 1,491
| 0.090976
|
c56d395d346db6cdbf6e9c0543fb7e6ccd0a31e0
| 4,306
|
py
|
Python
|
book-code/numpy-ml/numpy_ml/utils/testing.py
|
yangninghua/code_library
|
b769abecb4e0cbdbbb5762949c91847a0f0b3c5a
|
[
"MIT"
] | null | null | null |
book-code/numpy-ml/numpy_ml/utils/testing.py
|
yangninghua/code_library
|
b769abecb4e0cbdbbb5762949c91847a0f0b3c5a
|
[
"MIT"
] | null | null | null |
book-code/numpy-ml/numpy_ml/utils/testing.py
|
yangninghua/code_library
|
b769abecb4e0cbdbbb5762949c91847a0f0b3c5a
|
[
"MIT"
] | null | null | null |
"""Utilities for writing unit tests"""
import numbers
import numpy as np
#######################################################################
# Assertions #
#######################################################################
def is_symmetric(X):
"""Check that an array `X` is symmetric along its main diagonal"""
return np.allclose(X, X.T)
def is_symmetric_positive_definite(X):
"""Check that a matrix `X` is a symmetric and positive-definite."""
if is_symmetric(X):
try:
# if matrix is symmetric, check whether the Cholesky decomposition
# (defined only for symmetric/Hermitian positive definite matrices)
# exists
np.linalg.cholesky(X)
return True
except np.linalg.LinAlgError:
return False
return False
def is_stochastic(X):
"""True if `X` contains probabilities that sum to 1 along the columns"""
msg = "Array should be stochastic along the columns"
assert len(X[X < 0]) == len(X[X > 1]) == 0, msg
assert np.allclose(np.sum(X, axis=1), np.ones(X.shape[0])), msg
return True
def is_number(a):
"""Check that a value `a` is numeric"""
return isinstance(a, numbers.Number)
def is_one_hot(x):
"""Return True if array `x` is a binary array with a single 1"""
msg = "Matrix should be one-hot binary"
assert np.array_equal(x, x.astype(bool)), msg
assert np.allclose(np.sum(x, axis=1), np.ones(x.shape[0])), msg
return True
def is_binary(x):
"""Return True if array `x` consists only of binary values"""
msg = "Matrix must be binary"
assert np.array_equal(x, x.astype(bool)), msg
return True
#######################################################################
# Data Generators #
#######################################################################
def random_one_hot_matrix(n_examples, n_classes):
"""Create a random one-hot matrix of shape (`n_examples`, `n_classes`)"""
X = np.eye(n_classes)
X = X[np.random.choice(n_classes, n_examples)]
return X
def random_stochastic_matrix(n_examples, n_classes):
"""Create a random stochastic matrix of shape (`n_examples`, `n_classes`)"""
X = np.random.rand(n_examples, n_classes)
X /= X.sum(axis=1, keepdims=True)
return X
def random_tensor(shape, standardize=False):
"""
Create a random real-valued tensor of shape `shape`. If `standardize` is
True, ensure each column has mean 0 and std 1.
"""
offset = np.random.randint(-300, 300, shape)
X = np.random.rand(*shape) + offset
if standardize:
eps = np.finfo(float).eps
X = (X - X.mean(axis=0)) / (X.std(axis=0) + eps)
return X
def random_binary_tensor(shape, sparsity=0.5):
"""
Create a random binary tensor of shape `shape`. `sparsity` is a value
between 0 and 1 controlling the ratio of 0s to 1s in the output tensor.
"""
return (np.random.rand(*shape) >= (1 - sparsity)).astype(float)
def random_paragraph(n_words, vocab=None):
"""
Generate a random paragraph consisting of `n_words` words. If `vocab` is
not None, words will be drawn at random from this list. Otherwise, words
will be sampled uniformly from a collection of 26 Latin words.
"""
if vocab is None:
vocab = [
"at",
"stet",
"accusam",
"aliquyam",
"clita",
"lorem",
"ipsum",
"dolor",
"dolore",
"dolores",
"sit",
"amet",
"consetetur",
"sadipscing",
"elitr",
"sed",
"diam",
"nonumy",
"eirmod",
"duo",
"ea",
"eos",
"erat",
"est",
"et",
"gubergren",
]
return [np.random.choice(vocab) for _ in range(n_words)]
#######################################################################
# Custom Warnings #
#######################################################################
class DependencyWarning(RuntimeWarning):
pass
| 29.902778
| 80
| 0.510683
| 49
| 0.011379
| 0
| 0
| 0
| 0
| 0
| 0
| 2,153
| 0.5
|
c56da321682df09ceea1c41371b833fb49044e9e
| 1,373
|
py
|
Python
|
test/test_googleoauth2.py
|
GallopLabs/libsaas
|
80b2d51b81a769eacafc3847cc33700ac80e66fc
|
[
"MIT"
] | null | null | null |
test/test_googleoauth2.py
|
GallopLabs/libsaas
|
80b2d51b81a769eacafc3847cc33700ac80e66fc
|
[
"MIT"
] | null | null | null |
test/test_googleoauth2.py
|
GallopLabs/libsaas
|
80b2d51b81a769eacafc3847cc33700ac80e66fc
|
[
"MIT"
] | null | null | null |
import unittest
from libsaas.executors import test_executor
from libsaas.services import googleoauth2
class GoogleOauth2TestCase(unittest.TestCase):
def setUp(self):
self.executor = test_executor.use()
self.executor.set_response(b'{}', 200, {})
self.service = googleoauth2.GoogleOAuth2('id', 'secret')
def expect(self, method=None, uri=None, params=None):
if method is not None:
self.assertEqual(method, self.executor.request.method)
if uri is not None:
self.assertEqual(self.executor.request.uri,
self.service.APIROOT + uri)
if params is not None:
self.assertEqual(self.executor.request.params, params)
def test_access_token(self):
params = {'client_id': 'id',
'client_secret': 'secret',
'grant_type': 'authorization_code',
'code': 'code',
'redirect_uri': 'uri'}
self.service.access_token('code', 'uri')
self.expect('POST', '/token', params)
def test_refresh_token(self):
params = {'client_id': 'id',
'client_secret': 'secret',
'grant_type': 'refresh_token',
'refresh_token': 'token'}
self.service.refresh_token('token')
self.expect('POST', '/token', params)
| 32.690476
| 66
| 0.584122
| 1,267
| 0.922797
| 0
| 0
| 0
| 0
| 0
| 0
| 251
| 0.182811
|
c56df3f7bc34ea2a6465e6d328eeae9b03525f21
| 5,654
|
py
|
Python
|
post_office/migrations/0001_initial.py
|
carrerasrodrigo/django-post_office
|
0257a39f9f2d20c1a42c58e8fd4dfaf591221132
|
[
"MIT"
] | null | null | null |
post_office/migrations/0001_initial.py
|
carrerasrodrigo/django-post_office
|
0257a39f9f2d20c1a42c58e8fd4dfaf591221132
|
[
"MIT"
] | null | null | null |
post_office/migrations/0001_initial.py
|
carrerasrodrigo/django-post_office
|
0257a39f9f2d20c1a42c58e8fd4dfaf591221132
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
import post_office.fields
import post_office.validators
import post_office.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('file', models.FileField(upload_to=post_office.models.get_upload_path)),
('name', models.CharField(help_text=b'The original filename', max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BackendAccess',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=250)),
('host', models.CharField(max_length=500)),
('port', models.IntegerField()),
('username', models.CharField(max_length=250)),
('password', models.CharField(max_length=250)),
('use_tsl', models.BooleanField(default=False)),
('backend_class', models.CharField(max_length=500, null=True, blank=True)),
('limit_min', models.IntegerField(default=0)),
('total_sent_last_min', models.IntegerField(default=0)),
('last_time_sent', models.IntegerField(default=0)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Email',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('from_email', models.CharField(max_length=254, validators=[post_office.validators.validate_email_with_name])),
('to', post_office.fields.CommaSeparatedEmailField(blank=True)),
('cc', post_office.fields.CommaSeparatedEmailField(blank=True)),
('bcc', post_office.fields.CommaSeparatedEmailField(blank=True)),
('subject', models.CharField(max_length=255, blank=True)),
('message', models.TextField(blank=True)),
('html_message', models.TextField(blank=True)),
('status', models.PositiveSmallIntegerField(blank=True, null=True, db_index=True, choices=[(0, b'sent'), (1, b'failed'), (2, b'queued')])),
('priority', models.PositiveSmallIntegerField(blank=True, null=True, choices=[(0, b'low'), (1, b'medium'), (2, b'high'), (3, b'now')])),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('last_updated', models.DateTimeField(auto_now=True, db_index=True)),
('scheduled_time', models.DateTimeField(db_index=True, null=True, blank=True)),
('headers', jsonfield.fields.JSONField(null=True, blank=True)),
('context', jsonfield.fields.JSONField(null=True, blank=True)),
('backend_access', models.ForeignKey(blank=True, to='post_office.BackendAccess', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EmailTemplate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text=b"e.g: 'welcome_email'", max_length=255)),
('description', models.TextField(help_text=b'Description of this template.', blank=True)),
('subject', models.CharField(blank=True, max_length=255, validators=[post_office.validators.validate_template_syntax])),
('content', models.TextField(blank=True, validators=[post_office.validators.validate_template_syntax])),
('html_content', models.TextField(blank=True, validators=[post_office.validators.validate_template_syntax])),
('created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Log',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(auto_now_add=True)),
('status', models.PositiveSmallIntegerField(choices=[(0, b'sent'), (1, b'failed')])),
('exception_type', models.CharField(max_length=255, blank=True)),
('message', models.TextField()),
('email', models.ForeignKey(related_name='logs', editable=False, to='post_office.Email')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='email',
name='template',
field=models.ForeignKey(blank=True, to='post_office.EmailTemplate', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='attachment',
name='emails',
field=models.ManyToManyField(related_name='attachments', to='post_office.Email'),
preserve_default=True,
),
]
| 49.596491
| 155
| 0.583658
| 5,439
| 0.961974
| 0
| 0
| 0
| 0
| 0
| 0
| 803
| 0.142023
|
c56e81e80b9caed3db5600ddbb8cc958f425902d
| 3,890
|
py
|
Python
|
ic_gan/data_utils/store_kmeans_indexes.py
|
ozcelikfu/IC-GAN_fMRI_Reconstruction
|
31b0dc7659afbf8d12b1e460a38ab6d8d9a4296c
|
[
"MIT"
] | null | null | null |
ic_gan/data_utils/store_kmeans_indexes.py
|
ozcelikfu/IC-GAN_fMRI_Reconstruction
|
31b0dc7659afbf8d12b1e460a38ab6d8d9a4296c
|
[
"MIT"
] | null | null | null |
ic_gan/data_utils/store_kmeans_indexes.py
|
ozcelikfu/IC-GAN_fMRI_Reconstruction
|
31b0dc7659afbf8d12b1e460a38ab6d8d9a4296c
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Store dataset indexes of datapoints selected by k-means algorithm."""
from argparse import ArgumentParser
import numpy as np
import os
import h5py as h5
import faiss
def main(args):
if args["which_dataset"] == "imagenet":
dataset_name_prefix = "ILSVRC"
im_prefix = "IN"
elif args["which_dataset"] == "coco":
dataset_name_prefix = "COCO"
im_prefix = "COCO"
else:
dataset_name_prefix = args["which_dataset"]
im_prefix = args["which_dataset"]
# HDF5 filename
filename = os.path.join(
args["data_root"],
"%s%s_feats_%s_%s.hdf5"
% (
dataset_name_prefix,
args["resolution"],
args["feature_extractor"],
args["backbone_feature_extractor"],
),
)
# Load features
print("Loading features %s..." % (filename))
with h5.File(filename, "r") as f:
features = f["feats"][:]
features = np.array(features)
# Normalize features
features /= np.linalg.norm(features, axis=1, keepdims=True)
feat_dim = 2048
# k-means
print("Training k-means with %i centers..." % (args["kmeans_subsampled"]))
kmeans = faiss.Kmeans(
feat_dim,
args["kmeans_subsampled"],
niter=100,
verbose=True,
gpu=args["gpu"],
min_points_per_centroid=200,
spherical=False,
)
kmeans.train(features.astype(np.float32))
# Find closest instances to each k-means cluster
print("Finding closest instances to centers...")
index = faiss.IndexFlatL2(feat_dim)
index.add(features.astype(np.float32))
D, closest_sample = index.search(kmeans.centroids, 1)
net_str = (
"rn50"
if args["backbone_feature_extractor"]
else args["backbone_feature_extractor"]
)
stored_filename = "%s_res%i_%s_%s_kmeans_k%i" % (
im_prefix,
args["resolution"],
net_str,
args["feature_extractor"],
args["kmeans_subsampled"],
)
np.save(
os.path.join(args["data_root"], stored_filename),
{"center_examples": closest_sample},
)
print(
"Instance indexes resulting from a subsampling based on k-means have been saved in file %s!"
% (stored_filename)
)
if __name__ == "__main__":
parser = ArgumentParser(
description="Storing cluster indexes for k-means-based data subsampling"
)
parser.add_argument(
"--resolution",
type=int,
default=64,
help="Data resolution (default: %(default)s)",
)
parser.add_argument(
"--which_dataset", type=str, default="imagenet", help="Dataset choice."
)
parser.add_argument(
"--data_root",
type=str,
default="data",
help="Default location where data is stored (default: %(default)s)",
)
parser.add_argument(
"--feature_extractor",
type=str,
default="classification",
choices=["classification", "selfsupervised"],
help="Choice of feature extractor",
)
parser.add_argument(
"--backbone_feature_extractor",
type=str,
default="resnet50",
choices=["resnet50"],
help="Choice of feature extractor backbone",
)
parser.add_argument(
"--kmeans_subsampled",
type=int,
default=-1,
help="Number of k-means centers if using subsampled training instances"
" (default: %(default)s)",
)
parser.add_argument(
"--gpu",
action="store_true",
default=False,
help="Use faiss with GPUs (default: %(default)s)",
)
args = vars(parser.parse_args())
main(args)
| 29.029851
| 100
| 0.607198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,587
| 0.407969
|
c570fd6a05953760ae560c4fbed0f8ac9f2fd02d
| 100
|
py
|
Python
|
src/cattrs/errors.py
|
aha79/cattrs
|
50ba769c8349f5891b157d2bb7f06602822ac0a3
|
[
"MIT"
] | null | null | null |
src/cattrs/errors.py
|
aha79/cattrs
|
50ba769c8349f5891b157d2bb7f06602822ac0a3
|
[
"MIT"
] | null | null | null |
src/cattrs/errors.py
|
aha79/cattrs
|
50ba769c8349f5891b157d2bb7f06602822ac0a3
|
[
"MIT"
] | null | null | null |
from cattr.errors import StructureHandlerNotFoundError
__all__ = ["StructureHandlerNotFoundError"]
| 25
| 54
| 0.86
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 0.31
|
c5744b17de40e44fcacba60862bc64a6577cf8bb
| 4,873
|
py
|
Python
|
plugins/funcs.py
|
prxpostern/URLtoTG003
|
b41ef5e756193798d8f92ccaa55c0fd7ab5ef931
|
[
"MIT"
] | null | null | null |
plugins/funcs.py
|
prxpostern/URLtoTG003
|
b41ef5e756193798d8f92ccaa55c0fd7ab5ef931
|
[
"MIT"
] | null | null | null |
plugins/funcs.py
|
prxpostern/URLtoTG003
|
b41ef5e756193798d8f92ccaa55c0fd7ab5ef931
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from main import Config
from pyrogram import filters
from pyrogram import Client
#from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from urllib.parse import quote_plus, unquote
import math, os, time, datetime, aiohttp, asyncio, mimetypes, logging
from helpers.download_from_url import download_file, get_size
from helpers.file_handler import send_to_transfersh_async, progress
from hachoir.parser import createParser
from hachoir.metadata import extractMetadata
from helpers.display_progress import progress_for_pyrogram, humanbytes
from helpers.tools import execute
from helpers.ffprobe import stream_creator
from helpers.thumbnail_video import thumb_creator
from helpers.url_uploader import leecher2
from helpers.video_renamer import rnv2
from helpers.audio_renamer import rna2
from helpers.file_renamer import rnf2
from helpers.vconverter import to_video2
from helpers.media_info import cinfo2
from helpers.link_info import linfo2
logger = logging.getLogger(__name__)
HELP_TXT = """
A Simple Telegram Bot to
Upload Files From **Direct** and **Google Drive** and **Youtube** Links,
Convert Document Media to Video,
and Rename Audio/Video/Document Files.
/upload : reply to your url .
`http://aaa.bbb.ccc/ddd.eee` | **fff.ggg**
or
`http://aaa.bbb.ccc/ddd.eee`
/c2v : reply to your document to convert it into streamable video.
/rnv : reply to your video. Example:
`/rnv | videoname`
/rna : reply to your audio. \"`-`\" : leave without change.
`/rna | audioname | title | artists`
`/rna | audioname`
`/rna | - | title`
`/rna | - | - | artists`
/rnf : reply to your document. Example:
`/rnf | filename.ext`
"""
@Client.on_message(filters.command(["start"]))
async def start(client , m):
"""Send a message when the command /start is issued."""
await m.reply_text(text=f"Hi\n\nSee /help for More Info!")
@Client.on_message(filters.command(["help"]))
async def help(client , m):
"""Send a message when the command /help is issued."""
await m.reply_text(text=f"{HELP_TXT}")
@Client.on_message(filters.private & filters.command(["rnv"]))
async def rnv1(client , u):
if u.from_user.id in Config.AUTH_USERS:
await rnv2(client , u)
elif not Config.AUTH_USERS:
await rnv2(client , u)
else:
await u.reply_text(text=f"sorry ! you cant use this bot.\n\ndeploy your own bot:\n[Repository_Link](https://github.com/prxpostern/URLtoTG001)", quote=True, disable_web_page_preview=True)
return
@Client.on_message(filters.private & filters.command(["rna"]))
async def rna1(client , u):
if u.from_user.id in Config.AUTH_USERS:
await rna2(client , u)
elif not Config.AUTH_USERS:
await rna2(client , u)
else:
await u.reply_text(text=f"sorry ! you cant use this bot.\n\ndeploy your own bot:\n[Repository_Link](https://github.com/prxpostern/URLtoTG001)", quote=True, disable_web_page_preview=True)
return
@Client.on_message(filters.private & filters.command(["rnf"]))
async def rnf1(client , u):
if u.from_user.id in Config.AUTH_USERS:
await rnf2(client , u)
elif not Config.AUTH_USERS:
await rnf2(client , u)
else:
await u.reply_text(text=f"sorry ! you cant use this bot.\n\ndeploy your own bot:\n[Repository_Link](https://github.com/prxpostern/URLtoTG001)", quote=True, disable_web_page_preview=True)
return
@Client.on_message(filters.private & filters.command(["c2v"]))
async def to_video1(client , u):
if u.from_user.id in Config.AUTH_USERS:
await to_video2(client , u)
elif not Config.AUTH_USERS:
await to_video2(client , u)
else:
await u.reply_text(text=f"sorry ! you cant use this bot.\n\ndeploy your own bot:\n[Repository_Link](https://github.com/prxpostern/URLtoTG001)", quote=True, disable_web_page_preview=True)
return
@Client.on_message(filters.private & (filters.audio | filters.document | filters.video))
async def cinfo1(client , m):
await cinfo2(client , m)
@Client.on_message(filters.private & filters.incoming & filters.text & (filters.regex('^(ht|f)tp*')))
async def linfo1(client , m):
await linfo2(client , m)
@Client.on_message(filters.private & filters.command(["upload"]))
async def leecher1(client , u):
if u.from_user.id in Config.AUTH_USERS:
await leecher2(client , u)
elif not Config.AUTH_USERS:
await leecher2(client , u)
else:
await u.reply_text(text=f"sorry ! you cant use this bot.\n\ndeploy your own bot:\n[Repository_Link](https://github.com/prxpostern/URLtoTG001)", quote=True, disable_web_page_preview=True)
return
| 36.916667
| 195
| 0.692592
| 0
| 0
| 0
| 0
| 3,013
| 0.618305
| 2,402
| 0.49292
| 1,637
| 0.335933
|
c574ba0d5085fcc10f94dc14bafe60401b5587a7
| 2,304
|
py
|
Python
|
git_code_debt/repo_parser.py
|
cclauss/git-code-debt
|
6ced089857d3ccda4a00d274e85d7f26de0bdefd
|
[
"MIT"
] | null | null | null |
git_code_debt/repo_parser.py
|
cclauss/git-code-debt
|
6ced089857d3ccda4a00d274e85d7f26de0bdefd
|
[
"MIT"
] | null | null | null |
git_code_debt/repo_parser.py
|
cclauss/git-code-debt
|
6ced089857d3ccda4a00d274e85d7f26de0bdefd
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import unicode_literals
import collections
import contextlib
import shutil
import subprocess
import tempfile
from git_code_debt.util.iter import chunk_iter
from git_code_debt.util.subprocess import cmd_output
Commit = collections.namedtuple('Commit', ('sha', 'date'))
Commit.blank = Commit('0' * 40, 0)
COMMIT_FORMAT = '--format=%H%n%ct'
class RepoParser(object):
def __init__(self, git_repo):
self.git_repo = git_repo
self.tempdir = None
@contextlib.contextmanager
def repo_checked_out(self):
assert not self.tempdir
self.tempdir = tempfile.mkdtemp(suffix='temp-repo')
try:
subprocess.check_call((
'git', 'clone',
'--no-checkout', '--quiet', '--shared',
self.git_repo, self.tempdir,
))
yield
finally:
shutil.rmtree(self.tempdir)
self.tempdir = None
def get_commit(self, sha):
output = cmd_output(
'git', 'show', COMMIT_FORMAT, sha, cwd=self.tempdir,
)
sha, date = output.splitlines()[:2]
return Commit(sha, int(date))
def get_commits(self, since_sha=None):
"""Returns a list of Commit objects.
Args:
since_sha - (optional) A sha to search from
"""
assert self.tempdir
cmd = ['git', 'log', '--first-parent', '--reverse', COMMIT_FORMAT]
if since_sha:
commits = [self.get_commit(since_sha)]
cmd.append('{}..HEAD'.format(since_sha))
else:
commits = []
cmd.append('HEAD')
output = cmd_output(*cmd, cwd=self.tempdir)
for sha, date in chunk_iter(output.splitlines(), 2):
commits.append(Commit(sha, int(date)))
return commits
def get_original_commit(self, sha):
assert self.tempdir
output = cmd_output(
'git', 'show', sha, cwd=self.tempdir, encoding=None,
)
return output
def get_commit_diff(self, previous_sha, sha):
assert self.tempdir
output = cmd_output(
'git', 'diff', previous_sha, sha, '--no-renames',
cwd=self.tempdir, encoding=None,
)
return output
| 26.790698
| 74
| 0.588542
| 1,903
| 0.825955
| 423
| 0.183594
| 454
| 0.197049
| 0
| 0
| 315
| 0.136719
|
c574d6290b0c40bcbc5696cd5ebb36152641b976
| 215
|
py
|
Python
|
func_one.py
|
FoxProklya/Step-Python
|
67514509655e552fc5adcc7963b971ef6f0bb46a
|
[
"MIT"
] | null | null | null |
func_one.py
|
FoxProklya/Step-Python
|
67514509655e552fc5adcc7963b971ef6f0bb46a
|
[
"MIT"
] | null | null | null |
func_one.py
|
FoxProklya/Step-Python
|
67514509655e552fc5adcc7963b971ef6f0bb46a
|
[
"MIT"
] | null | null | null |
def f(x):
if x <= -2:
f = 1 - (x + 2)**2
return f
if -2 < x <= 2:
f = -(x/2)
return f
if 2 < x:
f = (x - 2)**2 + 1
return f
x = int(input())
print(f(x))
| 14.333333
| 26
| 0.316279
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|