blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c785d2e4b3ac49ac959fd7e958407cf973ca6e7c
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/erpnext/2017/4/product.py
|
d7afc3bfba6e3210089c690952f76794b1c68de6
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005
| 2021-01-28T19:40:51
| 2021-01-28T19:40:51
| 306,497,459
| 1
| 1
| null | 2020-11-24T20:56:18
| 2020-10-23T01:18:07
| null |
UTF-8
|
Python
| false
| false
| 3,344
|
py
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, fmt_money, flt
from erpnext.shopping_cart.cart import _get_cart_quotation
from erpnext.shopping_cart.doctype.shopping_cart_settings.shopping_cart_settings \
import is_cart_enabled, get_shopping_cart_settings, show_quantity_in_website
from erpnext.accounts.doctype.pricing_rule.pricing_rule import get_pricing_rule_for_item
@frappe.whitelist(allow_guest=True)
def get_product_info(item_code):
"""get product price / stock info"""
if not is_cart_enabled():
return {}
qty = 0
cart_quotation = _get_cart_quotation()
template_item_code = frappe.db.get_value("Item", item_code, "variant_of")
stock_status = get_qty_in_stock(item_code, template_item_code)
in_stock = stock_status.in_stock
stock_qty = stock_status.stock_qty
price = get_price(item_code, template_item_code, cart_quotation.selling_price_list)
if price:
price["formatted_price"] = fmt_money(price["price_list_rate"], currency=price["currency"])
price["currency"] = not cint(frappe.db.get_default("hide_currency_symbol")) \
and (frappe.db.get_value("Currency", price.currency, "symbol") or price.currency) \
or ""
if frappe.session.user != "Guest":
item = cart_quotation.get({"item_code": item_code})
if item:
qty = item[0].qty
return {
"price": price,
"stock_qty": stock_qty,
"in_stock": in_stock,
"uom": frappe.db.get_value("Item", item_code, "stock_uom"),
"qty": qty,
"show_stock_qty": show_quantity_in_website()
}
def get_qty_in_stock(item_code, template_item_code):
warehouse = frappe.db.get_value("Item", item_code, "website_warehouse")
if not warehouse and template_item_code and template_item_code != item_code:
warehouse = frappe.db.get_value("Item", template_item_code, "website_warehouse")
if warehouse:
stock_qty = frappe.db.sql("""select actual_qty from tabBin where
item_code=%s and warehouse=%s""", (item_code, warehouse))
if stock_qty:
in_stock = stock_qty[0][0] > 0 and 1 or 0
else:
in_stock = 0
return frappe._dict({"in_stock": in_stock, "stock_qty": stock_qty})
def get_price(item_code, template_item_code, price_list, qty=1):
if price_list:
cart_settings = get_shopping_cart_settings()
price = frappe.get_all("Item Price", fields=["price_list_rate", "currency"],
filters={"price_list": price_list, "item_code": item_code})
if not price:
price = frappe.get_all("Item Price", fields=["price_list_rate", "currency"],
filters={"price_list": price_list, "item_code": template_item_code})
if price:
pricing_rule = get_pricing_rule_for_item(frappe._dict({
"item_code": item_code,
"qty": qty,
"transaction_type": "selling",
"price_list": price_list,
"customer_group": cart_settings.default_customer_group,
"company": cart_settings.company,
"conversion_rate": 1,
"for_shopping_cart": True
}))
if pricing_rule:
if pricing_rule.pricing_rule_for == "Discount Percentage":
price[0].price_list_rate = flt(price[0].price_list_rate * (1.0 - (pricing_rule.discount_percentage / 100.0)))
if pricing_rule.pricing_rule_for == "Price":
price[0].price_list_rate = pricing_rule.price_list_rate
return price[0]
|
[
"rodrigosoaresilva@gmail.com"
] |
rodrigosoaresilva@gmail.com
|
4fed06b375f7b7273187a58d91f4dbef56e11511
|
061053268be2ec664f141d26399075476782fb71
|
/lib/commands/list_graphcool_events.py
|
d838b76427e23805db3b76761efb6cb70bf71a41
|
[] |
no_license
|
ChristChurchMayfair/ccm_talks_tools
|
7ebf83a295c1bb588534e84fb5f12188c42e34ce
|
10df7b6d3e875bda19b28db601a48827ef37796b
|
refs/heads/master
| 2020-03-30T02:51:44.617377
| 2019-02-06T10:49:29
| 2019-02-06T10:49:29
| 150,654,977
| 0
| 0
| null | 2019-02-06T10:49:30
| 2018-09-27T22:28:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,205
|
py
|
import json
import click
import yaml
from graphqlclient import GraphQLClient
from lib.graphql_queries import list_speakers, list_events
from lib.model.event import Event
from lib.model.speaker import Speaker
@click.command()
@click.option('--graphcoolcredsfile', default=".graphcoolcreds.yml", help='A file containing AWS credentials.')
@click.option('--graphcoolserviceid', default="cjkqvvoxy2pyy0175cdmdy1mz", help='A file containing AWS credentials.')
@click.option('--count', default=200, help='The number of series to show')
def list_graphcool_events(graphcoolcredsfile, graphcoolserviceid, count):
graphcool_creds = yaml.load(open(graphcoolcredsfile))
client = GraphQLClient('https://api.graph.cool/simple/v1/{}'.format(graphcoolserviceid))
client.inject_token(graphcool_creds['graphcooltoken'])
event_list_results = json.loads(client.execute(list_events(), {"number": count}))
if event_list_results['data']:
event_data = event_list_results['data']['allEvents']
events_list = map(lambda event: Event.fromGraphCoolData(event), event_data)
for event in events_list:
print(event.one_line())
else:
print("No data returned!")
|
[
"tduckering@apple.com"
] |
tduckering@apple.com
|
5482761634d54af947e99daecefee877583a2354
|
482ed16cd1c8d721e98a9c460555802f7cce8906
|
/run-tests/t215.py
|
27cadff3837c3630b1d0c5d1e14f0d436502eb85
|
[
"MIT"
] |
permissive
|
forkcodeaiyc/skulpt_parser
|
ea2347b2a452476854cf03412474fae63bca31c0
|
dd592e9b91bcbbe0c5cfdb5c2da0fb5ae604a428
|
refs/heads/master
| 2023-09-04T11:32:09.760317
| 2021-10-11T22:58:18
| 2021-10-11T22:58:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
def wee(waa, woo=False, wii=True):
return ("OK", waa, woo, wii)
print((wee("stuff")))
print((wee("stuff", "dog")))
print((wee("stuff", "dog", "cat")))
print((wee("stuff", wii="lamma")))
print((wee(wii="lamma", waa="pocky")))
print((wee(wii="lamma", waa="pocky", woo="blorp")))
|
[
"albert-jan.nijburg@babylonhealth.com"
] |
albert-jan.nijburg@babylonhealth.com
|
84b131edd610e480863f8b33c7cc96c4e6f6c99f
|
0c7b52370ab1ee5c908e3aa9d1e4cdef72582273
|
/app/handlers/base_handler.py
|
f6c294c2f4d749774f2da27738fae67e958f7788
|
[] |
no_license
|
aakanksha4695/Discussion-Board-Api
|
ff9a0ec13a8ec304cee3ae7ba483554c6a03f28b
|
08c09f332e637a2e74b7af7b28b91c585e635ec3
|
refs/heads/master
| 2022-10-07T20:32:09.648546
| 2019-12-17T20:59:43
| 2019-12-17T20:59:43
| 228,700,392
| 0
| 0
| null | 2022-09-16T18:15:03
| 2019-12-17T20:57:56
|
Python
|
UTF-8
|
Python
| false
| false
| 430
|
py
|
import logging
from flask_restful import Resource
from flask import request
from app import db
from app import flask_app
from app.config import Config
from flask import g
logger = logging.getLogger(__name__)
class BaseHandler(Resource):
def return_json(self, status=200, msg="Success"):
return {"status": status, "msg": msg}, status
class Ping(BaseHandler):
def get(self):
return self.return_json()
|
[
"ekwinder.singh@mswipe.com"
] |
ekwinder.singh@mswipe.com
|
9cd162832c1cf32bcfebf3df9a3d6104f144388f
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-gaussdb/huaweicloudsdkgaussdb/v3/model/create_dns_name_req.py
|
c1802c0c024f9ba5a8177adbe67381df209d2c32
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,100
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateDnsNameReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'dns_type': 'str'
}
attribute_map = {
'dns_type': 'dns_type'
}
def __init__(self, dns_type=None):
"""CreateDnsNameReq
The model defined in huaweicloud sdk
:param dns_type: 域名类型,当前只支持private。
:type dns_type: str
"""
self._dns_type = None
self.discriminator = None
self.dns_type = dns_type
@property
def dns_type(self):
"""Gets the dns_type of this CreateDnsNameReq.
域名类型,当前只支持private。
:return: The dns_type of this CreateDnsNameReq.
:rtype: str
"""
return self._dns_type
@dns_type.setter
def dns_type(self, dns_type):
"""Sets the dns_type of this CreateDnsNameReq.
域名类型,当前只支持private。
:param dns_type: The dns_type of this CreateDnsNameReq.
:type dns_type: str
"""
self._dns_type = dns_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateDnsNameReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
e6ea0e58b0ba51b6bedac8cd7d7b771f1d5db5b1
|
fbb3a5eb78b636f9a676e7b06b59e7f89a9d4cad
|
/nonlinear_solvers/steepest_descent.py
|
5a84be513445970672297ebea4a762bdfec576a3
|
[] |
no_license
|
prhorn/python_machine_learning
|
b1e6f30ce874c86e8ea1cc935a681851ce7a82f1
|
3cd13a4d1beec8d580a1396539ab22eb49f9925f
|
refs/heads/master
| 2021-01-10T04:38:06.388478
| 2016-02-21T21:57:37
| 2016-02-21T21:57:37
| 44,710,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,142
|
py
|
import numpy as np
import sys
import math
from line_search import line_search
#simple steepest descent with line search
class steepest_descent:
#{
# comment - description of our update
# error - our current error
# the_problem - an object that knows how to perform problem-specific operations
# iprint /**<Integer for controlling printing.*/
# value /**<The value of the function.*/
# ls /**<The line_search class object*/
# ls_param_pref /**<Integer determining line search parameters*/
# iteration /**<Current iteration. mostly to test if non-zero so that we don't do extra work*/
# direction /**<The current direction that we are searching along*/
def __init__(self,the_problem_,ls_param_pref_ = 2, iprint_=0):
#{
self.the_problem = the_problem_
self.iprint = iprint_
self.ls_param_pref = ls_param_pref_
self.ls = line_search(self.ls_param_pref,self.iprint)
self.reset()
#}
def reset(self):
#{
self.ls.new_line()
self.error = 777.77
self.value = 777.77
self.comment = " "
self.iteration = 0
self.direction = np.array([],dtype=np.float)
#}
#returns True if we have converged
def next_step(self):
#{
#get objective function value and gradient at current position
self.value = self.the_problem.value() #returns float
grad = self.the_problem.gradient() #returns np.array
#empty gradient should trigger instant convergence
if (grad.size == 0):
self.error = 0.0;
self.the_problem.ls_origin_to_current_pos(); #to old orbs
return True
#see if we have converged
self.error = math.sqrt(np.dot(grad,grad)/float(grad.size))
if (self.error < self.the_problem.tolerance):
#set the origin of the line search to the current
#position (make all variables in the_problem
#consistent) and declare victory
self.the_problem.ls_origin_to_current_pos()
return True
self.comment = "Line Search Step"
if (self.iteration==0):
#decide the first search direction
self.direction = -1.0*grad
self.comment = "New Steepest Descent Direction"
#we are at the line search origin
self.the_problem.ls_origin_to_current_pos()
#see if we are done along the search direction
#defining the univariate function phi(alpha)
#double phi = value;
phi_prime = np.dot(self.direction,grad)
done_this_dir, next_alpha = self.ls.next_step(self.value,phi_prime)
#if (done_this_dir):
if (True):
#we have either satisfied the wolfe conditions
#at the current position or have deemed the
#search to be fruitless
#either way next_alpha is the alpha corresponding to
#out current position
#get our new search direction
self.direction = -1.0*grad
self.comment = "New Steepest Descent Direction"
#reset the line search
self.ls.new_line()
#set the current position as the origin of
#the next line search
self.the_problem.ls_origin_to_current_pos()
#we need to make a move so that we have an update this iteration
#(we already know this point is not the global answer)
#next_step for iteration 0 of the line search will always return false
#but calling it here saves the gradient and value at the origin
phi_prime = np.dot(self.direction,grad) #we have the same gradient but a new dir
done_this_dir, next_alpha = self.ls.next_step(self.value,phi_prime)
#cout << "next_alpha = " << setprecision(2) << scientific << next_alpha << endl;
#update our position
#go to the origin of the line search
self.the_problem.move_to_ls_origin()
#apply the suggested alpha
disp = self.direction*next_alpha
self.the_problem.update(disp)
self.iteration = self.iteration+1
return False
#}
#}
|
[
"prhorn@stardust.local"
] |
prhorn@stardust.local
|
3e4b152c1448db9193873505bc8740f5238ade39
|
f465fdcc962a00b59a29f16d12bea5b781ca22aa
|
/final/comparison_plot.py
|
7f885f324fa87b64a43f772c60fa05426fd8ceba
|
[] |
no_license
|
PyaePhyoKhant/RL-with-open-AI-gym
|
4c509229c199756d0722fd7bc0b707f102316186
|
21e247992a5283b531fd9914db0cd3e805ee5ae6
|
refs/heads/master
| 2020-03-21T16:30:09.760949
| 2018-10-06T10:22:34
| 2018-10-06T10:22:34
| 138,773,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,024
|
py
|
import matplotlib.pyplot as plt
wo_train = []
with open('without_optimize_training.txt', 'r') as f:
for line in f:
wo_train.append(float(line))
w_train = []
with open('with_optimize_training.txt', 'r') as f:
for line in f:
w_train.append(float(line))
plt.plot(wo_train, 'b', label='without optimization')
plt.plot(w_train, 'r', label='with optimization')
plt.legend()
plt.title('Effect of optimization (training)')
plt.xlabel('episode')
plt.ylabel('score')
plt.savefig('result_training.png')
plt.show()
wo_train = []
with open('without_optimize_testing.txt', 'r') as f:
for line in f:
wo_train.append(float(line))
w_train = []
with open('with_optimize_testing.txt', 'r') as f:
for line in f:
w_train.append(float(line))
plt.plot(wo_train, 'b', label='without optimization')
plt.plot(w_train, 'r', label='with optimization')
plt.legend()
plt.title('Effect of optimization (testing)')
plt.xlabel('episode')
plt.ylabel('score')
plt.savefig('result_testing.png')
plt.show()
|
[
"lanmadawsmith@gmail.com"
] |
lanmadawsmith@gmail.com
|
a8cedb3cad0f4917561f56577e9d04b2b2c023e2
|
4d717d2c8147036483dce1cdd131b27d9e912e19
|
/file_test2.py
|
fe9e4891d91e9695f41229e3c049b187d7dd8cea
|
[] |
no_license
|
yRxf/python
|
e21fea4bd8ba60aba32a06da9ea7c8e040fb34b4
|
4442a473851c5bf07c280c72ed3c3caadcbfbe62
|
refs/heads/master
| 2022-11-24T06:39:09.723809
| 2020-06-30T16:00:45
| 2020-06-30T16:00:45
| 258,064,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 834
|
py
|
class file():
def __init__(self,filename):
self.filename = filename
self.file_ = ''
def file_write(self):
try:
with open(self.filename) as self.file_:
pass
except FileNotFoundError:
self.file_ = open(self.filename,'w')
return self.file_
else:
flag = input("存在文件是否需要清空内容(Y/N)")
if 'N' == flag:
self.file_ = open(self.filename,'a')
return self.file_
else:
self.file_ = open(self.filename,'w')
return self.file_
def __del__(self):
self.file_.close()
print("文件已关闭!")
file_ = file("test1.txt")
file_f = file_.file_write()
file_f.write("123")
|
[
"noreply@github.com"
] |
yRxf.noreply@github.com
|
84bfb6b46fb5bd9b3d3218a3ec40b3cfe8cbb87e
|
272e8dce35a6331684d5941d664ac24d1140da86
|
/src/main_naive_bayes.py
|
c1892fd6d596a909e55b4e18efa065aad30f07d3
|
[] |
no_license
|
BrainBroader/IMDbClassifier
|
1ebdf27318224af317b7618f8900bb7c4af35779
|
29279a3e15cd77f118a61fad13b5a00351a96aaf
|
refs/heads/master
| 2023-02-27T00:55:42.401249
| 2021-02-04T21:04:11
| 2021-02-04T21:04:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,889
|
py
|
import sys
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from file import read_data
from models.naive_bayes_clf import MultinomialNaiveBayes
from metrics import accuracy, precision_recall, f1
def main():
train_path = sys.argv[1] + '\\train\\'
test_path = sys.argv[1] + '\\test\\'
# load training data
print(f'[INFO] - Loading training data from {train_path}')
res = read_data(train_path)
train_data = res[0]
train_target = res[1]
print(f'[INFO] - Total train data: {len(train_data)}')
print(f'[INFO] - Loading testing data from {test_path}')
res = read_data(test_path)
test_data = res[0]
test_target = res[1]
print(f'[INFO] - Total test data: {len(test_data)}')
# 10% of training data will go to developer data set
print(f'[INFO] - Splitting training data into training data and developer data (keeping 10% for training data)')
res = train_test_split(train_data, train_target, test_size=0.1)
train_data = res[0]
train_target = res[2]
print(f'[INFO] - Total training data after split {len(train_data)}')
dev_data = res[1]
dev_target = res[3]
print(f'[INFO] - Total developer data {len(dev_data)}')
nb = MultinomialNaiveBayes()
accuracy_train = []
accuracy_test = []
counter = 1
for train_size in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
print(f'\n[INFO] - Iteration No.{counter} (using {int(train_size*100)}% of 90% of train data).')
if train_size != 1.0:
res = train_test_split(train_data, train_target, train_size=train_size, shuffle=False)
fold_data = res[0]
fold_target = res[2]
else:
fold_data = train_data
fold_target = train_target
feature_size = 0.007
print(f'[INFO] - Fitting Multinomial Naive Bayes classifier using {feature_size*100:.1f}% of features...')
nb.fit(fold_data, fold_target, feature_size)
print(f'[INFO] - Predicting with Multinomial Naive Bayes classifier using train data...')
nb_targets, _ = nb.predict(fold_data)
accuracy_score = accuracy(fold_target, nb_targets)
accuracy_train.append(accuracy_score)
print(f'[INFO] - Accuracy: {accuracy_score}')
print(f'[INFO] - Predicting with Multinomial Naive Bayes classifier using developer data...')
nb_targets, _ = nb.predict(dev_data)
accuracy_score = accuracy(dev_target, nb_targets)
print(f'[INFO] - Accuracy: {accuracy_score}')
print(f'[INFO] - Predicting with Multinomial Naive Bayes classifier using test data...')
nb_targets, probabilities = nb.predict(test_data)
accuracy_score = accuracy(test_target, nb_targets)
accuracy_test.append(accuracy_score)
print(f'[INFO] - Accuracy: {accuracy_score}')
counter += 1
learning_curves_plot = plt.figure(1)
plt.plot([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], accuracy_train, label='train')
plt.plot([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], accuracy_test, label='test')
plt.title('Learning Curves (Multinomial Naive Bayes)')
plt.legend(loc='lower right')
plt.xlabel('Number of Train Data')
plt.ylabel('Accuracy')
precision_recall_plot = plt.figure(2)
average_precision, average_recall, thresholds = precision_recall(probabilities, test_target, 10)
plt.step(average_recall, average_precision, where='post')
plt.title('Precision-Recall Curve (Multinomial Naive Bayes)')
plt.xlabel('Recall')
plt.ylabel('Precision')
f1_plot = plt.figure(3)
f1_score = f1(average_precision, average_recall)
plt.plot(thresholds, f1_score)
plt.title('F1 Curve (Multinomial Naive Bayes)')
plt.xlabel('Thresholds')
plt.ylabel('F1 Measure')
plt.show()
if __name__ == '__main__':
main()
|
[
"lambroslntz15@gmail.com"
] |
lambroslntz15@gmail.com
|
9947406052c6a4c55532c5c35f899f462225b503
|
0a47f736deacb7e8d55adb807575617f873c4787
|
/simulate_place_cell_Type_A_shifted_inh.py
|
01b6da3aedcc2bbbb2bad0518da135201676bacd
|
[] |
no_license
|
domni/CA1Sim
|
9bd23c746c325d3387b54303dccc0dcbb4f5ba1f
|
37b90d15e7eec7073736460ac0f5ab26ad97b70c
|
refs/heads/master
| 2021-01-15T15:23:47.726695
| 2016-07-18T02:18:02
| 2016-07-18T02:18:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,920
|
py
|
__author__ = 'milsteina'
from specify_cells import *
from plot_results import *
import random
import sys
"""
In this version of the simulation, phase precession of CA3 inputs is implemented using the method from Chadwick et al.,
Elife, 2015, which uses a circular gaussian with a phase sensitivity factor that effectively compresses the range of
phases within each theta cycle that each input is active, which will reduce jitter across within-cycle input sequences.
In this version of the simulation, every inhibitory component is shifted by 180 degrees to examine the effect on phase
precession.
"""
morph_filename = 'EB2-late-bifurcation.swc'
#mech_filename = '020516 altered km2 rinp - ampa nmda_kin5'
mech_filename = '043016 Type A - km2_NMDA_KIN5_Pr'
if len(sys.argv) > 1:
synapses_seed = int(sys.argv[1])
else:
synapses_seed = 1
if len(sys.argv) > 2:
num_exc_syns = int(sys.argv[2])
else:
num_exc_syns = 3000
if len(sys.argv) > 3:
num_inh_syns = int(sys.argv[3])
else:
num_inh_syns = 500
# whether to modulate the peak rate of all inhibitory inputs (0 = no, 1 = out of field at track start, 2 = in field)
# input_field_width)
if len(sys.argv) > 4:
mod_inh = int(sys.argv[4])
else:
mod_inh = 0
# the synaptic AMPAR conductances at in-field inputs are multiplied by a factor with this value at the peak of the
# field, and decays with cosine spatial modulation away from the field
if len(sys.argv) > 5:
mod_weights = float(sys.argv[5])
else:
mod_weights = 2.5
# allows parallel computation of multiple trials for the same spines with the same peak_locs, but with different
# input spike trains and stochastic synapses for each trial
if len(sys.argv) > 6:
trial_seed = int(sys.argv[6])
else:
trial_seed = 0
rec_filename = 'output'+datetime.datetime.today().strftime('%m%d%Y%H%M')+'-pid'+str(os.getpid())+'-seed'+\
str(synapses_seed)+'-e'+str(num_exc_syns)+'-i'+str(num_inh_syns)+'-mod_inh'+str(mod_inh)+\
'-shift_inh_'+str(mod_weights)+'_'+str(trial_seed)
def get_dynamic_theta_phase_force(phase_ranges, peak_loc, input_field_duration, stim_t, dt):
"""
Expects a list of tuples containing times and phases relative to peak_loc and the non-modulated phase preference
(zero degrees). Returns a waveform of phase vs time.
:param phase_ranges: list of tuple (ms, degrees)
:param peak_loc:
:param input_field_duration:
:param stim_t:
:param dt:
:return: :class: 'np.array'
"""
start_phase_val = phase_ranges[0][1] * 2. * np.pi / 360. # convert degrees to radians
end_phase_val = phase_ranges[-1][1] * 2. * np.pi / 360. # convert degrees to radians
phase_force = np.ones_like(stim_t) * start_phase_val
phase_gradient = np.array([])
for i in range(len(phase_ranges)-1):
t0 = phase_ranges[i][0]
t1 = phase_ranges[i+1][0]
phase0 = phase_ranges[i][1] * 2. * np.pi / 360. # convert degrees to radians
phase1 = phase_ranges[i+1][1] * 2. * np.pi / 360.
del_t = t1 - t0
del_phase = phase1 - phase0
if abs(del_phase) > 0.:
del_phase = del_phase / del_t * dt
this_range_piece = np.arange(phase0, phase1, del_phase)
else:
this_range_piece = np.ones(del_t / dt) * phase0
phase_gradient = np.append(phase_gradient, this_range_piece)
if stim_t[0] <= peak_loc-input_field_duration*0.5 <= stim_t[-1]:
phase_start = np.where(peak_loc-input_field_duration*0.5 >= stim_t)[0]
if np.any(phase_start):
phase_start = phase_start[-1]
phase_end = min(len(stim_t), phase_start+len(phase_gradient))
phase_force[:phase_start] = start_phase_val
phase_force[phase_start:phase_end] = phase_gradient[:phase_end-phase_start]
phase_force[phase_end:] = end_phase_val
elif stim_t[0] <= peak_loc+input_field_duration*0.5 <= stim_t[-1]:
phase_end = np.where(peak_loc+input_field_duration*0.5 >= stim_t)[0]
if np.any(phase_end):
phase_end = phase_end[-1]
phase_start = max(0, phase_end-len(phase_gradient))
phase_force[:phase_start] = start_phase_val
phase_force[phase_start:phase_end] = phase_gradient[-(phase_end-phase_start):]
phase_force[phase_end:] = end_phase_val
return phase_force
def run_trial(simiter):
"""
:param simiter: int
"""
local_random.seed(simiter)
global_phase_offset = local_random.uniform(-np.pi, np.pi)
with h5py.File(data_dir+rec_filename+'-working.hdf5', 'a') as f:
f.create_group(str(simiter))
f[str(simiter)].create_group('train')
f[str(simiter)].create_group('inh_train')
f[str(simiter)].attrs['phase_offset'] = global_phase_offset / 2. / np.pi * global_theta_cycle_duration
if mod_inh > 0:
if mod_inh == 1:
mod_inh_start = int(track_equilibrate / dt)
elif mod_inh == 2:
mod_inh_start = int((track_equilibrate + modulated_field_center - 0.3 * input_field_duration) / dt)
sim.parameters['mod_inh_start'] = stim_t[mod_inh_start]
mod_inh_stop = mod_inh_start + int(inhibitory_manipulation_duration * input_field_duration / dt)
sim.parameters['mod_inh_stop'] = stim_t[mod_inh_stop]
index = 0
for group in stim_exc_syns:
for i, syn in enumerate(stim_exc_syns[group]):
# the stochastic sequence used for each synapse is unique for each trial,
# up to 1000 input spikes per spine
if excitatory_stochastic:
syn.randObj.seq(rand_exc_seq_locs[group][i]+int(simiter*1e3))
gauss_force = excitatory_peak_rate[group] * np.exp(-((stim_t - peak_locs[group][i]) / gauss_sigma)**2.)
if group in excitatory_precession_range:
phase_force = get_dynamic_theta_phase_force(excitatory_precession_range[group], peak_locs[group][i],
input_field_duration, stim_t, stim_dt)
theta_force = np.exp(excitatory_theta_phase_tuning_factor[group] * np.cos(phase_force +
excitatory_theta_phase_offset[group] - 2. * np.pi * stim_t /
global_theta_cycle_duration + global_phase_offset))
else:
theta_force = np.exp(excitatory_theta_phase_tuning_factor[group] *
np.cos(excitatory_theta_phase_offset[group] - 2. * np.pi * stim_t /
global_theta_cycle_duration + global_phase_offset))
theta_force -= np.min(theta_force)
theta_force /= np.max(theta_force)
theta_force *= excitatory_theta_modulation_depth[group]
theta_force += 1. - excitatory_theta_modulation_depth[group]
stim_force = np.multiply(gauss_force, theta_force)
train = get_inhom_poisson_spike_times(stim_force, stim_t, dt=stim_dt, generator=local_random)
syn.source.play(h.Vector(np.add(train, equilibrate + track_equilibrate)))
with h5py.File(data_dir+rec_filename+'-working.hdf5', 'a') as f:
f[str(simiter)]['train'].create_dataset(str(index), compression='gzip', compression_opts=9, data=train)
f[str(simiter)]['train'][str(index)].attrs['group'] = group
f[str(simiter)]['train'][str(index)].attrs['index'] = syn.node.index
f[str(simiter)]['train'][str(index)].attrs['type'] = syn.node.parent.parent.type
f[str(simiter)]['train'][str(index)].attrs['peak_loc'] = peak_locs[group][i]
index += 1
index = 0
for group in stim_inh_syns:
for syn in stim_inh_syns[group]:
inhibitory_theta_force = np.exp(inhibitory_theta_phase_tuning_factor[group] *
np.cos(inhibitory_theta_phase_offset[group] - 2. * np.pi * stim_t /
global_theta_cycle_duration + global_phase_offset))
inhibitory_theta_force -= np.min(inhibitory_theta_force)
inhibitory_theta_force /= np.max(inhibitory_theta_force)
inhibitory_theta_force *= inhibitory_theta_modulation_depth[group]
inhibitory_theta_force += 1. - inhibitory_theta_modulation_depth[group]
inhibitory_theta_force *= inhibitory_peak_rate[group]
if mod_inh > 0 and group in inhibitory_manipulation_fraction and syn in manipulated_inh_syns[group]:
inhibitory_theta_force[mod_inh_start:mod_inh_stop] = 0.
train = get_inhom_poisson_spike_times(inhibitory_theta_force, stim_t, dt=stim_dt,
generator=local_random)
syn.source.play(h.Vector(np.add(train, equilibrate + track_equilibrate)))
with h5py.File(data_dir+rec_filename+'-working.hdf5', 'a') as f:
f[str(simiter)]['inh_train'].create_dataset(str(index), compression='gzip', compression_opts=9,
data=train)
f[str(simiter)]['inh_train'][str(index)].attrs['group'] = group
f[str(simiter)]['inh_train'][str(index)].attrs['index'] = syn.node.index
f[str(simiter)]['inh_train'][str(index)].attrs['loc'] = syn.loc
f[str(simiter)]['inh_train'][str(index)].attrs['type'] = syn.node.type
index += 1
sim.run(v_init)
with h5py.File(data_dir+rec_filename+'-working.hdf5', 'a') as f:
sim.export_to_file(f, simiter)
if excitatory_stochastic:
f[str(simiter)].create_group('successes')
index = 0
for group in stim_exc_syns:
for syn in stim_exc_syns[group]:
f[str(simiter)]['successes'].create_dataset(str(index), compression='gzip', compression_opts=9,
data=np.subtract(syn.netcon('AMPA_KIN').get_recordvec().to_python(),
equilibrate + track_equilibrate))
index += 1
# save the spike output of the cell, removing the equilibration offset
f[str(simiter)].create_dataset('output', compression='gzip', compression_opts=9,
data=np.subtract(cell.spike_detector.get_recordvec().to_python(),
equilibrate + track_equilibrate))
NMDA_type = 'NMDA_KIN5'
equilibrate = 250. # time to steady-state
global_theta_cycle_duration = 150. # (ms)
input_field_width = 20 # (theta cycles per 6 standard deviations)
input_field_duration = input_field_width * global_theta_cycle_duration
track_length = 2.5 # field widths
track_duration = track_length * input_field_duration
track_equilibrate = 2. * global_theta_cycle_duration
duration = equilibrate + track_equilibrate + track_duration # input_field_duration
excitatory_peak_rate = {'CA3': 40., 'ECIII': 40.}
excitatory_theta_modulation_depth = {'CA3': 0.7, 'ECIII': 0.7}
# From Chadwick et al., ELife 2015
excitatory_theta_phase_tuning_factor = {'CA3': 0.8, 'ECIII': 0.8}
excitatory_precession_range = {}
excitatory_precession_range['CA3'] = [(-input_field_duration*0.5, 180.), (-input_field_duration*0.35, 180.),
(input_field_duration*0.35, -180.), (input_field_duration*0.5, -180.)] # (ms, degrees)
excitatory_theta_phase_offset = {}
excitatory_theta_phase_offset['CA3'] = 165. / 360. * 2. * np.pi # radians
excitatory_theta_phase_offset['ECIII'] = 0. / 360. * 2. * np.pi # radians
excitatory_stochastic = 1
inhibitory_manipulation_fraction = {'perisomatic': 0.325, 'axo-axonic': 0.325, 'apical dendritic': 0.325,
'distal apical dendritic': 0.325, 'tuft feedback': 0.325}
inhibitory_manipulation_duration = 0.6 # Ratio of input_field_duration
inhibitory_peak_rate = {'perisomatic': 40., 'axo-axonic': 40., 'apical dendritic': 40., 'distal apical dendritic': 40.,
'tuft feedforward': 40., 'tuft feedback': 40.}
inhibitory_theta_modulation_depth = {'perisomatic': 0.5, 'axo-axonic': 0.5, 'apical dendritic': 0.5,
'distal apical dendritic': 0.5, 'tuft feedforward': 0.5, 'tuft feedback': 0.5}
inhibitory_theta_phase_tuning_factor = {'perisomatic': 0.6, 'axo-axonic': 0.6, 'apical dendritic': 0.6,
'distal apical dendritic': 0.6, 'tuft feedforward': 0.6, 'tuft feedback': 0.6}
inhibitory_precession_range = {}
inhibitory_theta_phase_offset = {}
inhibitory_theta_phase_offset['perisomatic'] = 315. / 360. * 2. * np.pi # Like PV+ Basket
inhibitory_theta_phase_offset['axo-axonic'] = 225. / 360. * 2. * np.pi # Vargas et al., ELife, 2014
inhibitory_theta_phase_offset['apical dendritic'] = 20. / 360. * 2. * np.pi # Like PYR-layer Bistratified
inhibitory_theta_phase_offset['distal apical dendritic'] = 0. / 360. * 2. * np.pi # Like SR/SLM Border Cells
inhibitory_theta_phase_offset['tuft feedforward'] = 160. / 360. * 2. * np.pi # Like Neurogliaform
inhibitory_theta_phase_offset['tuft feedback'] = 20. / 360. * 2. * np.pi # Like SST+ O-LM
stim_dt = 0.02
dt = 0.02
v_init = -67.
syn_types = ['AMPA_KIN', NMDA_type]
local_random = random.Random()
# choose a subset of synapses to stimulate with inhomogeneous poisson rates
local_random.seed(synapses_seed)
cell = CA1_Pyr(morph_filename, mech_filename, full_spines=True)
cell.set_terminal_branch_na_gradient()
cell.insert_inhibitory_synapses_in_subset()
trunk_bifurcation = [trunk for trunk in cell.trunk if cell.is_bifurcation(trunk, 'trunk')]
if trunk_bifurcation:
trunk_branches = [branch for branch in trunk_bifurcation[0].children if branch.type == 'trunk']
# get where the thickest trunk branch gives rise to the tuft
trunk = max(trunk_branches, key=lambda node: node.sec(0.).diam)
trunk = (node for node in cell.trunk if cell.node_in_subtree(trunk, node) and 'tuft' in (child.type
for child in node.children)).next()
else:
trunk_bifurcation = [node for node in cell.trunk if 'tuft' in (child.type for child in node.children)]
trunk = trunk_bifurcation[0]
all_exc_syns = {sec_type: [] for sec_type in ['basal', 'trunk', 'apical', 'tuft']}
all_inh_syns = {sec_type: [] for sec_type in ['soma', 'ais', 'basal', 'trunk', 'apical', 'tuft']}
stim_exc_syns = {'CA3': [], 'ECIII': []}
stim_inh_syns = {'perisomatic': [], 'axo-axonic': [], 'apical dendritic': [], 'distal apical dendritic': [],
'tuft feedforward': [], 'tuft feedback': []}
stim_successes = []
peak_locs = {'CA3': [], 'ECIII': []}
# place synapses in trunk for inheritance of mechanisms (for testing)
if 'trunk' not in all_exc_syns:
for node in cell.trunk:
for spine in node.spines:
syn = Synapse(cell, spine, syn_types, stochastic=excitatory_stochastic)
# place synapses in every spine
for sec_type in all_exc_syns:
for node in cell.get_nodes_of_subtype(sec_type):
for spine in node.spines:
syn = Synapse(cell, spine, syn_types, stochastic=excitatory_stochastic)
all_exc_syns[sec_type].append(syn)
cell.init_synaptic_mechanisms()
# collate inhibitory synapses
for sec_type in all_inh_syns:
for node in cell.get_nodes_of_subtype(sec_type):
for syn in node.synapses:
if 'GABA_A_KIN' in syn._syn:
all_inh_syns[sec_type].append(syn)
sim = QuickSim(duration, cvode=0, dt=0.01)
sim.parameters['equilibrate'] = equilibrate
sim.parameters['track_equilibrate'] = track_equilibrate
sim.parameters['global_theta_cycle_duration'] = global_theta_cycle_duration
sim.parameters['input_field_duration'] = input_field_duration
sim.parameters['track_length'] = track_length
sim.parameters['duration'] = duration
sim.parameters['stim_dt'] = stim_dt
sim.append_rec(cell, cell.tree.root, description='soma', loc=0.)
sim.append_rec(cell, trunk_bifurcation[0], description='proximal_trunk', loc=1.)
sim.append_rec(cell, trunk, description='distal_trunk', loc=1.)
spike_output_vec = h.Vector()
cell.spike_detector.record(spike_output_vec)
# get the fraction of total spines contained in each sec_type
total_exc_syns = {sec_type: len(all_exc_syns[sec_type]) for sec_type in ['basal', 'trunk', 'apical', 'tuft']}
fraction_exc_syns = {sec_type: float(total_exc_syns[sec_type]) / float(np.sum(total_exc_syns.values())) for sec_type in
['basal', 'trunk', 'apical', 'tuft']}
for sec_type in all_exc_syns:
for i in local_random.sample(range(len(all_exc_syns[sec_type])), int(num_exc_syns*fraction_exc_syns[sec_type])):
syn = all_exc_syns[sec_type][i]
if sec_type == 'tuft':
stim_exc_syns['ECIII'].append(syn)
else:
stim_exc_syns['CA3'].append(syn)
# get the fraction of inhibitory synapses contained in each sec_type
total_inh_syns = {sec_type: len(all_inh_syns[sec_type]) for sec_type in ['soma', 'ais', 'basal', 'trunk', 'apical',
'tuft']}
fraction_inh_syns = {sec_type: float(total_inh_syns[sec_type]) / float(np.sum(total_inh_syns.values())) for sec_type in
['soma', 'ais', 'basal', 'trunk', 'apical', 'tuft']}
num_inh_syns = min(num_inh_syns, int(np.sum(total_inh_syns.values())))
for sec_type in all_inh_syns:
for i in local_random.sample(range(len(all_inh_syns[sec_type])), int(num_inh_syns*fraction_inh_syns[sec_type])):
syn = all_inh_syns[sec_type][i]
if syn.node.type == 'tuft':
if cell.is_terminal(syn.node):
# GABAergic synapses on terminal tuft branches are about 25% feedforward
group = local_random.choice(['tuft feedforward', 'tuft feedback', 'tuft feedback', 'tuft feedback'])
else:
# GABAergic synapses on intermediate tuft branches are about 50% feedforward
group = local_random.choice(['tuft feedforward', 'tuft feedback'])
elif syn.node.type == 'trunk':
distance = cell.get_distance_to_node(cell.tree.root, syn.node, syn.loc)
if distance <= 50.:
group = 'perisomatic'
elif distance <= 150.:
group = local_random.choice(['apical dendritic', 'apical dendritic', 'distal apical dendritic'])
else:
group = local_random.choice(['apical dendritic', 'distal apical dendritic', 'distal apical dendritic'])
elif syn.node.type == 'basal':
distance = cell.get_distance_to_node(cell.tree.root, syn.node, syn.loc)
group = 'perisomatic' if distance <= 50. and not cell.is_terminal(syn.node) else 'apical dendritic'
elif syn.node.type == 'soma':
group = 'perisomatic'
elif syn.node.type == 'apical':
distance = cell.get_distance_to_node(cell.tree.root, cell.get_dendrite_origin(syn.node), loc=1.)
if distance <= 150.:
group = local_random.choice(['apical dendritic', 'apical dendritic', 'distal apical dendritic'])
else:
group = local_random.choice(['apical dendritic', 'distal apical dendritic', 'distal apical dendritic'])
elif syn.node.type == 'ais':
group = 'axo-axonic'
stim_inh_syns[group].append(syn)
stim_t = np.arange(-track_equilibrate, track_duration, dt)
gauss_sigma = global_theta_cycle_duration * input_field_width / 3. / np.sqrt(2.) # contains 99.7% gaussian area
rand_exc_seq_locs = {}
for group in stim_exc_syns:
rand_exc_seq_locs[group] = []
if stim_exc_syns[group]:
peak_locs[group] = np.arange(-0.75 * input_field_duration, (0.75 + track_length) * input_field_duration,
(1.5 + track_length) * input_field_duration / int(len(stim_exc_syns[group])))
peak_locs[group] = peak_locs[group][:len(stim_exc_syns[group])]
for group in stim_exc_syns:
for syn in stim_exc_syns[group]:
#peak_loc = local_random.uniform(-0.75 * input_field_duration, (0.75 + track_length) * input_field_duration)
#peak_locs.append(peak_loc)
if excitatory_stochastic:
success_vec = h.Vector()
stim_successes.append(success_vec)
syn.netcon('AMPA_KIN').record(success_vec)
rand_exc_seq_locs[group].append(syn.randObj.seq())
# if syn.node.parent.parent not in [rec['node'] for rec in sim.rec_list]:
# sim.append_rec(cell, syn.node.parent.parent)
# sim.append_rec(cell, syn.node, object=syn.target('AMPA_KIN'), param='_ref_i', description='i_AMPA')
# sim.append_rec(cell, syn.node, object=syn.target(NMDA_type), param='_ref_i', description='i_NMDA')
# remove this synapse from the pool, so that additional "modulated" inputs
# can be selected from those that remain
all_exc_syns[syn.node.parent.parent.type].remove(syn)
# rand_inh_seq_locs = [] will need this when inhibitory synapses become stochastic
# stim_inh_successes = [] will need this when inhibitory synapses become stochastic
# modulate the weights of inputs with peak_locs along this stretch of the track
modulated_field_center = track_duration * 0.6
cos_mod_weight = {}
peak_mod_weight = mod_weights
tuning_amp = (peak_mod_weight - 1.) / 2.
tuning_offset = tuning_amp + 1.
for group in stim_exc_syns:
this_cos_mod_weight = tuning_amp * np.cos(2. * np.pi / (input_field_duration * 1.2) * (peak_locs[group] -
modulated_field_center)) + tuning_offset
left = np.where(peak_locs[group] >= modulated_field_center - input_field_duration * 1.2 / 2.)[0][0]
right = np.where(peak_locs[group] > modulated_field_center + input_field_duration * 1.2 / 2.)[0][0]
cos_mod_weight[group] = np.array(this_cos_mod_weight)
cos_mod_weight[group][:left] = 1.
cos_mod_weight[group][right:] = 1.
peak_locs[group] = list(peak_locs[group])
cos_mod_weight[group] = list(cos_mod_weight[group])
indexes = range(len(peak_locs[group]))
local_random.shuffle(indexes)
peak_locs[group] = map(peak_locs[group].__getitem__, indexes)
cos_mod_weight[group] = map(cos_mod_weight[group].__getitem__, indexes)
for i, syn in enumerate(stim_exc_syns[group]):
syn.netcon('AMPA_KIN').weight[0] = cos_mod_weight[group][i]
manipulated_inh_syns = {}
for group in inhibitory_manipulation_fraction:
num_syns = int(len(stim_inh_syns[group]) * inhibitory_manipulation_fraction[group])
manipulated_inh_syns[group] = local_random.sample(stim_inh_syns[group], num_syns)
run_trial(trial_seed)
if os.path.isfile(data_dir+rec_filename+'-working.hdf5'):
os.rename(data_dir+rec_filename+'-working.hdf5', data_dir+rec_filename+'.hdf5')
|
[
"neurosutras@gmail.com"
] |
neurosutras@gmail.com
|
348545a60d077bff0734073fbbc179fd101744d3
|
c153228c39667875b9bed57359b2fd6cc7869bee
|
/socialite/account/urls.py
|
b9e42e351cca938bf17f73c15660193867af281e
|
[] |
no_license
|
prakashkumarbhanja/Being_Coder_Social_Media
|
d4563b775094d5c0fd8fcd2eee7c3720f241ca93
|
d9d59080cd83823c3bcf20da0aa13a4081add089
|
refs/heads/master
| 2022-12-11T21:02:05.499950
| 2020-08-29T18:08:30
| 2020-08-29T18:08:30
| 291,322,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
from django.urls import path
from . import views
from .views import *
urlpatterns = [
path('', views.sign_up_view, name='sign_up'),
path('activate/<uidb64>/<token>', views.activate, name='activate'),
path('login/', views.login_view, name='login_view'),
path('logout/', views.logout_request, name='logout'),
]
|
[
"pkbhanja9@gmail.com"
] |
pkbhanja9@gmail.com
|
2bfb1039b383a20788b3244975774fcd31c2f006
|
ae67d8257d5800eb627a443e86329b3a8db97324
|
/apps/models/base.py
|
4c972e333c7b2f985717e43ff0e29391b3d75ee8
|
[] |
no_license
|
young-tim/flask-project-directory
|
de5df3f67d5c450bd9c799c9a87dec145cad85df
|
57918e9fa1ff75a0def457692c2f13b984f477aa
|
refs/heads/main
| 2023-08-20T14:45:56.854769
| 2021-09-16T03:24:09
| 2021-09-16T03:24:09
| 406,994,243
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,364
|
py
|
#encoding: utf-8
from apps.app import db
from apps.configs.db_config import DB_CONFIG
from datetime import datetime
import time
import copy
from sqlalchemy.ext.declarative import declared_attr
from apps.app import weblog
class BaseModel(db.Model):
"""
设为db基类
"""
# Flask-SQLAlchemy创建table时,如何声明基类(这个类不会创建表,可以被继承)
# 方法就是把__abstract__这个属性设置为True,这个类为基类,不会被创建为表!
__abstract__ = True
# 添加配置设置编码
# __table_args__ = {
# 'mysql_charset': DB_CONFIG['mysql']['charset']
# }
@declared_attr
def __tablename__(cls):
# 将表类名自动小写,_分割,去掉末尾的"Model"字样
if "_" in cls.__name__:
name =cls.__name__
else:
name = ''.join([('_' + ch.lower()) if ch.isupper() else ch
for ch in cls.__name__]).strip('_')
if name[-6:] == "_model":
name = name[:-6]
_table_prefix = DB_CONFIG['mysql']['prefix']
return _table_prefix + name if _table_prefix else name
# 每个表都自动带上创建时间、更新时间
create_time = db.Column(db.Integer, default=time.time, comment='创建时间')
update_time = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now, comment='更新时间')
def toDict(self):
"""
返回dict结果
用法:
r = model.query.……
> r.toDict() # 输出dict结果数据
> r.toDict().__str__() # 输出字符串结果数据
:return: c_dict
"""
# 使用深拷贝,避免引用对象self.__dict__缺少"_sa_instance_state"属性
c_dict = copy.deepcopy(self.__dict__)
for (k, v) in c_dict.items():
# 将datetime内容格式化为[Y-m-d H:M:S]
if isinstance(v, datetime):
c_dict[k] = v.strftime("%Y-%m-%d %H:%M:%S")
try:
if "_sa_instance_state" in c_dict:
del c_dict["_sa_instance_state"]
# if "create_time" in c_dict:
# del c_dict["create_time"]
if "update_time" in c_dict:
del c_dict["update_time"]
except Exception as e:
weblog.err(e)
return c_dict
|
[
"610951468@qq.com"
] |
610951468@qq.com
|
e2fb9eb7e8c014f32f0fba90b3e576ba02f8cbcb
|
105f03c81499f1c640cc64be753f952edbd7f1d3
|
/ica_vs_pca.py
|
df97864fb1226441481b257735799e0931bf0ddb
|
[] |
no_license
|
rghiglia/ML_Process
|
4ee390dc0bce257f96c91749588813c0293fd53f
|
b5907553934182a2f104118bf4a52806044ddae4
|
refs/heads/master
| 2020-04-02T21:19:52.768389
| 2016-06-06T13:50:44
| 2016-06-06T13:50:44
| 60,531,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,843
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 23 14:27:31 2016
@author: rghiglia
"""
from sklearn.decomposition import FastICA, PCA
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
# This doesn't work ...
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
|
[
"rghigliaus@yahoo.com"
] |
rghigliaus@yahoo.com
|
e0dcb6d9ac235830ca55f46d9da077afa615e8ac
|
2a45507fff25c42cad05b52d83d011fea0909be5
|
/Gfg/Python/max_subtree_sum.py
|
03da2ec5d40505ce5dcd227ca6b784673614061c
|
[
"MIT"
] |
permissive
|
Sharayu1071/Daily-Coding-DS-ALGO-Practice
|
dc8256e76d43952f679236df904f597908fbda13
|
2c424b33a1385085f97b98d6379d6cd9cc71b1bd
|
refs/heads/main
| 2023-08-30T17:49:44.312613
| 2021-10-03T04:21:21
| 2021-10-03T04:21:21
| 412,973,714
| 3
| 0
|
MIT
| 2021-10-03T04:18:20
| 2021-10-03T04:18:19
| null |
UTF-8
|
Python
| false
| false
| 1,643
|
py
|
#Program to find Maximum possible subtree sum in a tree
class newNode:
def __init__(self, key):
self.key = key
self.left = self.right = None
def findLargestSubtreeSumUtil(root, ans):
# If current node is None then
# return 0 to parent node.
if (root == None):
return 0
# Subtree sum rooted at current node.
currSum = (root.key +
findLargestSubtreeSumUtil(root.left, ans) +
findLargestSubtreeSumUtil(root.right, ans))
# Update answer if current subtree
# sum is greater than answer so far.
ans[0] = max(ans[0], currSum)
# Return current subtree sum to
# its parent node.
return currSum
def findLargestSubtreeSum(root):
# If tree does not exist,
# then answer is 0.
if (root == None):
return 0
# Variable to store maximum subtree sum.
ans = [-999999999999]
# Call to recursive function to
# find maximum subtree sum.
findLargestSubtreeSumUtil(root, ans)
return ans[0]
if __name__ == '__main__':
root = newNode(1)
root.left = newNode(-2)
root.right = newNode(3)
root.left.left = newNode(4)
root.left.right = newNode(5)
root.right.left = newNode(-6)
root.right.right = newNode(2)
print(findLargestSubtreeSum(root))
#
# 1
# / \
# / \
# -2 3
# / \ / \
# / \ / \
# 4 5 -6 2
|
[
"noreply@github.com"
] |
Sharayu1071.noreply@github.com
|
8fb85eaebfe108f00c14e927e0c083bf43f087eb
|
ce1339e4d87087a1fe417c81dd40b9e686dbe455
|
/server_tools/tf_serving_tools/tfserving-python-predict-client/predict_client/prod_client.py
|
9af3d398dc141aeef127920fa15f74a64fb9bfbb
|
[] |
no_license
|
vovdlbezgod/Remake_app_union
|
09bcfd645b190b68885386050f56d127c67f99f1
|
9551d1a77baf6c3ab0f0a01c0324ba331ed47b88
|
refs/heads/master
| 2020-04-02T10:45:35.139746
| 2018-10-23T20:54:13
| 2018-10-23T20:54:13
| 154,353,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,396
|
py
|
import logging
import time
import grpc
from grpc import RpcError
from predict_client.pbs.prediction_service_pb2 import PredictionServiceStub
from predict_client.pbs.predict_pb2 import PredictRequest
from predict_client.util import predict_response_to_dict, make_tensor_proto
class ProdClient:
def __init__(self, host, model_name, model_version):
self.logger = logging.getLogger(self.__class__.__name__)
self.host = host
self.model_name = model_name
self.model_version = model_version
def predict(self, request_data, request_timeout=10):
self.logger.info('Sending request to tfserving model')
self.logger.info('Host: {}'.format(self.host))
self.logger.info('Model name: {}'.format(self.model_name))
self.logger.info('Model version: {}'.format(self.model_version))
# Create gRPC client and request
t = time.time()
channel = grpc.insecure_channel(self.host)
self.logger.debug('Establishing insecure channel took: {}'.format(time.time() - t))
t = time.time()
stub = PredictionServiceStub(channel)
self.logger.debug('Creating stub took: {}'.format(time.time() - t))
t = time.time()
request = PredictRequest()
self.logger.debug('Creating request object took: {}'.format(time.time() - t))
request.model_spec.name = self.model_name
if self.model_version > 0:
request.model_spec.version.value = self.model_version
t = time.time()
for d in request_data:
tensor_proto = make_tensor_proto(d['data'], d['in_tensor_dtype'])
request.inputs[d['in_tensor_name']].CopyFrom(tensor_proto)
self.logger.debug('Making tensor protos took: {}'.format(time.time() - t))
try:
t = time.time()
predict_response = stub.Predict(request, timeout=request_timeout)
self.logger.debug('Actual request took: {} seconds'.format(time.time() - t))
predict_response_dict = predict_response_to_dict(predict_response)
keys = [k for k in predict_response_dict]
self.logger.info('Got predict_response with keys: {}'.format(keys))
return predict_response_dict
except RpcError as e:
self.logger.error(e)
self.logger.error('Prediction failed!')
return {}
|
[
"vovdl18@gmail.com"
] |
vovdl18@gmail.com
|
419f90bb9c54bfbb271d10af8b3907e86dc3a234
|
4cf652ee4168f6f728d4ad86a9df13fd3431fd98
|
/DynamicProgramming/decode-ways.py
|
a5d6a4d2f81026074a86f9fea0127dccb1846078
|
[] |
no_license
|
jadenpadua/Foundation
|
73940b73720a3e84f46502797a02fa19117b653c
|
042a83177e8cce7291c9b31b54b3d71e4d4c9696
|
refs/heads/master
| 2023-03-07T03:20:21.546843
| 2021-02-16T23:16:43
| 2021-02-16T23:16:43
| 286,326,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
# Use 262314
class Solution:
def numDecodings(self, s: str) -> int:
def dfs(index, ht):
if index == len(s):
return 1
if s[index] == "0":
return 0
if index in ht:
return ht[index]
if index + 2 <= len(s) and int(s[index: index + 2]) <= 26:
single = dfs(index+1, ht)
double = dfs(index+2, ht)
ht[index] = single + double
else:
ht[index] = dfs(index+1, ht)
return ht[index]
ht = {}
return dfs(0, ht)
|
[
"noreply@github.com"
] |
jadenpadua.noreply@github.com
|
23d41afbe285e3d491f5b965f4b5f165813d1d6f
|
fa0bd730981a4a7333e7858c03e2a16c75e9cf5c
|
/Chapter 2/GradientTape.py
|
118f3d58651d9a1ed6815b956d3dd4185b3865bc
|
[
"MIT"
] |
permissive
|
PacktPublishing/Deep-Learning-with-TensorFlow-2-and-Keras
|
4cb5f7249dcd1efe6ea5a5263fb862240ce303bb
|
e23d2b4a4292386b70977473805acb2f93ef16ca
|
refs/heads/master
| 2023-02-13T04:04:57.531730
| 2023-02-07T19:23:47
| 2023-02-07T19:23:47
| 228,759,428
| 311
| 214
|
MIT
| 2021-06-01T14:06:06
| 2019-12-18T04:42:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 277
|
py
|
import tensorflow as tf
x = tf.constant(4.0)
with tf.GradientTape(persistent=True) as g:
g.watch(x)
y = x * x
z = y * y
dz_dx = g.gradient(z, x) # 108.0 (4*x^3 at x = 4)
dy_dx = g.gradient(y, x) # 6.0
print (dz_dx)
print (dy_dx)
del g # Drop the reference to the tape
|
[
"noreply@github.com"
] |
PacktPublishing.noreply@github.com
|
a088e6636c0718cc3f84476e63b19665801f7bc7
|
f12dff30e08582b4f586ce466df52614356731a8
|
/pysdss/astro/miscutils.py
|
db0e762f1f142381febfec7fa20b904e8f8cde7c
|
[] |
no_license
|
astrometry/pysdss
|
042e7d6c7ef82c141f265049d38c2ce490b27998
|
c6a42e48ff2dbcb240ff420043303dc39a077097
|
refs/heads/master
| 2021-01-25T08:54:17.267765
| 2012-03-16T14:44:32
| 2012-03-16T14:44:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,550
|
py
|
__all__ = ['lanczos_filter', 'get_overlapping_region']
from numpy import sin, atleast_1d, zeros, logical_and, pi
def lanczos_filter(order, x):
x = atleast_1d(x)
nz = logical_and(x != 0., logical_and(x < order, x > -order))
filt = zeros(len(x), float)
#filt[nz] = order * sin(pi * x[nz]) * sin(pi * x[nz] / order) / ((pi * x[nz])**2)
pinz = pi * x[nz]
filt[nz] = order * sin(pinz) * sin(pinz / order) / (pinz**2)
filt[x == 0] = 1.
#filt[x > order] = 0.
#filt[x < -order] = 0.
return filt
# Given a range of integer coordinates that you want to, eg, cut out
# of an image, [xlo, xhi], and bounds for the image [xmin, xmax],
# returns the range of coordinates that are in-bounds, and the
# corresponding region within the desired cutout.
def get_overlapping_region(xlo, xhi, xmin, xmax):
if xlo > xmax or xhi < xmin or xlo > xhi or xmin > xmax:
return ([], [])
assert(xlo <= xhi)
assert(xmin <= xmax)
xloclamp = max(xlo, xmin)
Xlo = xloclamp - xlo
xhiclamp = min(xhi, xmax)
Xhi = Xlo + (xhiclamp - xloclamp)
#print 'xlo, xloclamp, xhiclamp, xhi', xlo, xloclamp, xhiclamp, xhi
assert(xloclamp >= xlo)
assert(xloclamp >= xmin)
assert(xloclamp <= xmax)
assert(xhiclamp <= xhi)
assert(xhiclamp >= xmin)
assert(xhiclamp <= xmax)
#print 'Xlo, Xhi, (xmax-xmin)', Xlo, Xhi, xmax-xmin
assert(Xlo >= 0)
assert(Xhi >= 0)
assert(Xlo <= (xhi-xlo))
assert(Xhi <= (xhi-xlo))
return (slice(xloclamp, xhiclamp+1), slice(Xlo, Xhi+1))
|
[
"dan@danfm.ca"
] |
dan@danfm.ca
|
a24c69b1d7767d079c5476801ab29df309b46c07
|
9d64be935284ba6f370f59d273cb0a6185a6cb83
|
/testers/legacy/analyser.py
|
d9766ebc8cb2950367f66d30331986ac1da2e7d2
|
[] |
no_license
|
DillonSteyl/SCIE1000
|
8848168f40acf3f788cbab096e60de9de6914398
|
6d6b123a07659c80aba8b7c6668ee745bc03a767
|
refs/heads/master
| 2022-04-12T08:10:43.436631
| 2020-03-21T03:21:25
| 2020-03-21T03:21:25
| 114,079,107
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,615
|
py
|
from abc import ABCMeta, abstractmethod
import ast
class ListGeneratingNodeVisitor(ast.NodeVisitor):
"""
A NodeVisitor subclass which logs the order in which nodes are visited.
This makes use of the default implementation of NodeVisitor.generic_visit,
which recursively visits children of nodes.
Class Attributes:
VISIT (constant): Identifier for the start of a node visit. This is
appended to the events list immediately before a node is visited.
LEAVE (constant): Identifier for thee end of a node visit. This is
appended to the events list immediately after a node is visited (and
so also after all of its children have been visited).
Attributes:
events ([constant, ast.AST]): List of visitation events. The first item
in each pair will be one of VISIT or LEAVE, as appropriate. The
second item in each pair is the node in question.
"""
VISIT = 'VISIT'
LEAVE = 'LEAVE'
def __init__(self):
self.events = []
def generic_visit(self, node):
self.events.append((ListGeneratingNodeVisitor.VISIT, node))
super().generic_visit(node)
self.events.append((ListGeneratingNodeVisitor.LEAVE, node))
class CodeAnalyser(metaclass=ABCMeta):
"""
Abstract base class for analysis of student code.
This class must not be instantiated. Subclasses must override ._analyse
Provides basic logic for analysing student code using a subclass of
ast.NodeVisitor, as well as for recording the errors and warnings that may
be generated in the analysis.
Attributes:
visitor (ast.NodeVisitor): The code visitor used in the analysis.
errors ([str]): The error messages logged by the CodeAnalyser subclass,
in the order encountered.
warnings ([str]): The warning messages logged by the CodeAnalyser
subclass, in the order encountered.
"""
def __init__(self, visitor_class):
"""
Initialise a new CodeAnalyser object.
Args:
visitor_class (ast.NodeVisitor class): The type of NodeVisitor to use
when analysing the student code. Note that this argument must be
a class object, *not an instance of that class*.
"""
self.visitor = visitor_class()
self.errors = []
self.warnings = []
def add_error(self, message):
"""
Add the given error message to the list of errors.
Args:
message (str): The error message to add.
"""
self.errors.append(message)
def add_warning(self, message):
"""
Add the given warning message to the list of warnings.
Args:
message (str): The warning message to add.
"""
self.warnings.append(message)
def analyse(self, text):
"""
Analyse the given student code text.
Analysis will be performed using the visitor class given as an argument
to the constructor. This method ensures that all nodes are visited in
the appropriate order, but does not attempt to log any errors or
warnings that may be encountered.
Defers to ._analyse() for detailed, problem-specific analysis.
Args:
text (str): The code to analyse.
"""
# build up an ordered list of nodes in the default manner
tree = ast.parse(text)
list_generating_visitor = ListGeneratingNodeVisitor()
list_generating_visitor.visit(tree)
# visit each node in turn with our visitor (which will not recurse)
handle_event = {
ListGeneratingNodeVisitor.VISIT: self.visitor.visit,
ListGeneratingNodeVisitor.LEAVE: self.visitor.leave,
}
for event, node in list_generating_visitor.events:
assert event in handle_event, 'Unknown event: {}'.format(event)
handle_event[event](node)
# defer detailed analysis to subclasses
self._analyse()
@abstractmethod
def _analyse(self):
"""
Analyse the given student code text.
This method performs detailed, problem-specific analysis on the student
code. It must be overriden by subclasses.
Subclasses should make use of the visitor attribute (self.visitor) in
order to identify errors and warnings, then add them through calls to
.add_error() and .add_warning() respectively.
"""
pass
def check_for_errors(self, text):
"""
Check whether the given student code has any compile errors.
This method is only designed to detect compile errors which can be
highlighted in the GUI. If an exception is raised during compilation
which does not have an associated line number, it will be ignored.
Those exceptions will be dealt with during the testing phase.
Args:
text (str): The code to analyse.
Returns:
The line number associated with exception, if any.
None if no exception was raised, or if an exception with no
associated line number was encountered.
"""
try:
compile(text, '<student_code>', 'exec')
except Exception as e:
message = '{}: {}'.format(type(e).__name__, e)
self.add_error(message)
return getattr(e, 'lineno', None)
return None
|
[
"noreply@github.com"
] |
DillonSteyl.noreply@github.com
|
0ff5d1e469d8d66ead2d6bc49463d537bab08800
|
ceb80a32832da1a884cb84e811a9645834b95e52
|
/HW12/q2.py
|
1e886efe498da92ec9537142a30c3d8611fe7724
|
[] |
no_license
|
tobyclh/DataAnalysis
|
5183dbea25b395a7c3c4fd20ed12ceb677171ada
|
5076762cfb322a8b805c403fe519fcd9251d2aca
|
refs/heads/master
| 2020-05-22T21:24:00.163303
| 2019-07-17T19:15:02
| 2019-07-17T19:15:02
| 186,524,821
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,714
|
py
|
import numpy as np
import matplotlib
from scipy import linalg
from sklearn.neighbors import NearestNeighbors
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
np.random.seed(1)
def data_generation(n=1000):
a = 3. * np.pi * np.random.rand(n)
x = np.stack(
[a * np.cos(a), 30. * np.random.random(n), a * np.sin(a)], axis=1)
return a, x
def knn(xs, k=4):
"""reall slow implmentation of KNN"""
knn = []
for i, x in enumerate(xs):
dis = ((x - xs)**2).sum(1)
indices = np.argpartition(dis, k+1)[:k+1]
indices = indices.tolist()
indices.remove(i) #remove self
# assert len(indices) == k
knn.append(indices)
return knn
def knn2W(knn):
n_data = len(knn)
W = np.zeros([n_data, n_data])
for i, indices in enumerate(knn):
for idx in indices:
W[i, idx] = 1
W[idx, i] = 1
return W
def LapEig(x, d=2):
indices = knn(x)
W = knn2W(indices)
D = np.diag(W.sum(1))
L = D - W
eigval, eigvec = linalg.eig(L, D)
eigen_pairs = [[np.abs(eigval[i]),eigvec[:, i]] for i in range(len(eigval))]
eigen_pairs = sorted(eigen_pairs,key=lambda k: k[0], reverse=False)
w = np.stack([eig[1].real for eig in eigen_pairs], axis=-1)
z = w[:, 1:d+1]
return z
def visualize(x, z, a):
fig = plt.figure(figsize=(12, 6))
ax = fig.add_subplot(1, 2, 1, projection='3d')
ax.scatter3D(x[:, 0], x[:, 1], x[:, 2], c=a, marker='o')
ax = fig.add_subplot(1, 2, 2)
ax.scatter(z[:, 1], z[:, 0], c=a, marker='o')
fig.savefig('lecture10-h2.png')
n = 1000
a, x = data_generation(n)
z = LapEig(x)
visualize(x, z, a)
|
[
"tobyclh@gmail.com"
] |
tobyclh@gmail.com
|
29a032a73485b1163361b061988e861f443e6fe3
|
3638ead3dae764c53ac3461b2798777020c9a5c8
|
/clear.py
|
da0bb0eed129034b3d94f1d7fb02054f26dc0b2c
|
[
"MIT"
] |
permissive
|
joaomateusferr/VinusIOT
|
e254e7de5496245791dcf7a4cdbc49216b800c8f
|
a7c814c8e1d8c8d41f62f6e3e94c52c40340ecdb
|
refs/heads/master
| 2020-07-10T16:30:16.347587
| 2019-11-15T20:27:19
| 2019-11-15T20:27:19
| 204,311,490
| 0
| 0
|
MIT
| 2019-11-15T20:27:21
| 2019-08-25T15:14:09
|
Python
|
UTF-8
|
Python
| false
| false
| 347
|
py
|
import os
Adafruit_Python_DHT = os.path.isdir("/home/pi/Adafruit_Python_DHT")
if(Adafruit_Python_DHT):
os.system("sudo rm -r Adafruit_Python_DHT")
requests = os.path.isdir("/home/pi/requests")
if(requests):
os.system("sudo rm -r requests")
VinusIOT = os.path.isdir("/home/pi/VinusIOT")
if(VinusIOT):
os.system("sudo rm -r VinusIOT")
|
[
"47743004+joaomateusferr@users.noreply.github.com"
] |
47743004+joaomateusferr@users.noreply.github.com
|
a8672eb3759257dc5b6339db5794aad45a31b90d
|
14b8cf0b67104b53534678b8c0e9525ace4714ff
|
/crypto/der.py
|
6eb0ff04e8441522548d0db667a161832e5252a8
|
[] |
no_license
|
bhfwg/py_learn
|
bb11898fd81f653643fc61949f43df751d317fcb
|
eca9da748bada67357961d1581d8ec890a3385f8
|
refs/heads/master
| 2020-03-27T15:01:25.881792
| 2018-06-05T01:36:26
| 2018-06-05T01:36:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,059
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5 as Cipher_PKCS1_v1_5
from base64 import b64decode, b64encode
pubkey = 'MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQChixw4y0BDtlufNiwby9UTpampVdduYgBmCRdwJKfY/SPe/jGIdbmq1FONZiVBYArcfkVt4sDZpQ4Qh8nmNhU1kwOXYnehmPUVaWLo5lhd+OsGHbE+P6ZzvSG8f8R/BNK5uHSucC2mwsqG5nmfCwTLLaCnr4uu+EahTvDqW6AhMQIDAQAB'
msg = "test" * 1000
keyDER = b64decode(pubkey)
keyPub = RSA.importKey(keyDER)
cipher = Cipher_PKCS1_v1_5.new(keyPub)
cipher_text = cipher.encrypt(msg.encode()) # ValueError: Plaintext is too long
emsg = b64encode(cipher_text)
print emsg
# ValueError: RSA modulus length must be a multiple of 256 and >= 1024
key = RSA.generate(1024)
binPrivKey = key.exportKey('DER')
binPubKey = key.publickey().exportKey('DER')
print repr(binPubKey)
print b64encode(binPubKey)
privKeyObj = RSA.importKey(binPrivKey)
pubKeyObj = RSA.importKey(binPubKey)
msg = "attack at dawn"
emsg = pubKeyObj.encrypt(msg, 'x')[0]
dmsg = privKeyObj.decrypt(emsg)
assert(msg == dmsg)
|
[
"penomivy@gmail.com"
] |
penomivy@gmail.com
|
44192f20563759fc0e1431c454a41ff4dcff7a25
|
bc14fd37a1bfabdd3ffddf1e41432dd3eb20d8a0
|
/week2'2/avg.py
|
5306247045d4b459944a0d62a58d62b41d29a999
|
[] |
no_license
|
acedit/Python_kurs
|
2c4590e13c481275ac801f377aace5efb9638ddc
|
70fbcc9b10c8909cdc39acfdce277acbc7f5d71c
|
refs/heads/master
| 2020-05-27T08:25:08.837344
| 2015-03-19T19:33:13
| 2015-03-19T19:33:13
| 30,834,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
n = input("Enter count of numbers: ")
n = int(n)
suma=0
count = 1
numbers = []
while count <= n:
number = input("Enter number: ")
number = int(number)
suma+=number
numbers = numbers + [number]
count += 1
print(suma/len(numbers))
|
[
"pepi_hristov_pepi@yahoo.com"
] |
pepi_hristov_pepi@yahoo.com
|
4326dfdf7dea88b766d066453ef45457bb26713d
|
bd162255a73da3c989a2a426e9082ac295d16cb5
|
/database_setup.py
|
68942c9f6838668efd426e0447a39d9a6d4dc91c
|
[] |
no_license
|
AngaKoko/ResturantMenu
|
05533f448fe7525682afc4aeee621b47bed7fdcd
|
5789ed7c6e4905765d2481b6a9e236fb4c531760
|
refs/heads/master
| 2021-03-21T12:33:48.954540
| 2020-03-19T19:07:52
| 2020-03-19T19:07:52
| 247,293,183
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,283
|
py
|
"""
Creating a database using SQLAlchemy
"""
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class Restaurant(Base):
__tablename__ = "restaurant"
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
class MenuItem(Base):
__tablename__ = "menu_item"
name = Column(String(80), nullable=False)
id = Column(Integer, primary_key = True)
description = Column(String(250))
price = Column(String(8))
course = Column(String(250))
restaurant_id = Column(Integer, ForeignKey("restaurant.id"))
restaurant = relationship(Restaurant)
"""
#We added this serialize function to be able to send JSON objects in a serializable format
"""
@property
def serialize(self):
return {
'name' : self.name,
'description' : self.description,
'id' : self.id,
'price' : self.price,
'course' : self.course,
}
##Bottom of file ##
engine = create_engine("sqlite:///restaurantmenu.db")
Base.metadata.create_all(engine)
|
[
"angakoko@gmail.com"
] |
angakoko@gmail.com
|
ee8433beee796db5c42f604e999bed54d92cc03c
|
7eee289273350f7aa1b2bd43d85c81b58ef0b2c0
|
/parametrization_clean/domain/cost/factory.py
|
dd3a0b6abe92f9c1f965b12be7860d2200191c01
|
[
"MIT"
] |
permissive
|
chemshift/parametrization_clean
|
abe65687e3f4c30749c070b2ef195b9f871674be
|
702243d87c2045cf8155f3c18134665871f3b170
|
refs/heads/master
| 2023-01-19T06:48:59.088564
| 2020-11-30T02:07:43
| 2020-11-30T02:07:43
| 318,717,953
| 1
| 0
|
MIT
| 2020-12-05T06:22:49
| 2020-12-05T06:22:48
| null |
UTF-8
|
Python
| false
| false
| 2,218
|
py
|
#!/usr/bin/env python
"""Factory for error calculation algorithms allowed for usage."""
# Standard library
# 3rd party packages
# Local source
from parametrization_clean.domain.cost.strategy import IErrorStrategy
from parametrization_clean.domain.cost.reax_error import ReaxError
class ErrorFactory:
"""Factory class for creating error calculator algorithm executor - RegistryHolder design pattern.
Classes that implement IErrorStrategy can be registered and utilized through this factory's registry.
"""
REGISTRY = {}
"""Internal registry for available error calculation methods. Users can specify from one of the
`algorithm_name` strings available in the dictionary, mapping `algorithm_name` to the corresponding class
implementing that algorithm.
For example, "reax_error" maps to the ReaxFF error calculation algorithm;
users can specify the `error_strategy` in the user config.json file to use this algorithm.
"""
@classmethod
def register(cls, algorithm_name: str, error_calculator_class):
"""Register an error calculation strategy with a string key. Useful for abstraction and dynamic retrieval
of different algorithms in configuration file. Using this factory, one can easily implement an error
calculation algorithm (ex. MyErrorCalculatorClass) that follows IErrorStrategy, then use
"ErrorFactory.register('my_error_calculator_class')"
to generate a corresponding string reference for that error calculation strategy.
Parameters
----------
algorithm_name: str
Name that one wishes to assign to the designated `error_calculator_class`/algorithm.
error_calculator_class
Class that one wishes to associate/register with `algorithm_name`.
Returns
-------
error_calculator_class
Same as the `error_calculator_class` input parameter.
"""
cls.REGISTRY[algorithm_name] = error_calculator_class
return error_calculator_class
@classmethod
def create_executor(cls, algorithm_name: str) -> IErrorStrategy:
return cls.REGISTRY[algorithm_name]
ErrorFactory.register('reax_error', ReaxError)
|
[
"daksha@udel.edu"
] |
daksha@udel.edu
|
8287eb504530db503172a26a6e6c1d45ac01b508
|
9b0885f74c8814940995e2ce93f9657b5dd27053
|
/RecipeIndexer/asgi.py
|
88386e92be74ad8a8262daf8eb98c56161cfe986
|
[] |
no_license
|
zbala-hcttp/RecipeIndexer
|
d6128990ac1b58f410dfd957648987edaf7e5d5b
|
f1d93884e951eff455d150c4f58112066a42ab25
|
refs/heads/master
| 2023-06-03T11:49:01.177711
| 2021-06-14T18:58:59
| 2021-06-14T18:58:59
| 375,285,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
"""
ASGI config for RecipeIndexer project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'RecipeIndexer.settings')
application = get_asgi_application()
|
[
"zeynepbala@cs.hacettepe.edu.tr"
] |
zeynepbala@cs.hacettepe.edu.tr
|
1a0bcaaa40a7c024a9b69dedab6868c612874779
|
83bb3205275d29370dcd3412eb3405ac781639e8
|
/tiny_hands_pac/settings/dev.py
|
add43287f607e4d8dfe372223fac742425904d81
|
[
"MIT"
] |
permissive
|
DonaldTrumpHasTinyHands/tiny_hands_pac
|
faba72b9ab3292fae2b47dd9bae16001680c661d
|
3d7e5daeddbee218cb96f3b88c7c7010620225fd
|
refs/heads/master
| 2021-01-10T18:05:35.250290
| 2016-04-10T18:19:27
| 2016-04-10T18:19:27
| 54,299,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
from .base import *
DEBUG = True
COMPRESS_ENABLED = True
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
INTERNAL_IPS = ('127.0.0.1',)
INSTALLED_APPS += (
'django_medusa',
'wagtail.contrib.wagtailmedusa',
)
# Medusa settings
MEDUSA_RENDERER_CLASS = 'django_medusa.renderers.DiskStaticSiteRenderer'
MEDUSA_DEPLOY_DIR = os.path.join(PROJECT_ROOT, 'static_build')
SENDFILE_BACKEND = 'sendfile.backends.simple'
SECRET_KEY = '7nn(g(lb*8!r_+cc3m8bjxm#xu!q)6fidwgg&$p$6a+alm+eex'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'trumphands',
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Process all tasks synchronously.
# Helpful for local development and running tests
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
CELERY_ALWAYS_EAGER = True
try:
from .local import *
except ImportError:
pass
|
[
"kyle@iMac.local"
] |
kyle@iMac.local
|
7a493ec5c519f9f2e251053b66144d92882fdced
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2248/60632/250045.py
|
801f8ff34211ca750969b1f39e96a2896a52a968
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
n = int(input())
a = int(input())
b = int(input())
i = min(a, b)
while True:
if i % a == 0 or i % b == 0:
n -= 1
if n == 0:
break
i += 1
print(i % 1000000007)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
e2640614eb61fa5fa1cfffbf09575959a8801efb
|
bc9c9689b3a9c2fa48816479bd9771d7729cdc3e
|
/week-02/day-03/student_filter.py
|
a11a918b32169f7e8e7a129a37235b502a3ef3bb
|
[] |
no_license
|
green-fox-academy/Atis0505
|
565666d0edd78406e1f1eff237a8e67d966411c6
|
63939f821805aa2a5525d8eed55e1c8ab2945a4f
|
refs/heads/master
| 2021-09-09T18:42:45.937310
| 2018-03-18T23:28:42
| 2018-03-18T23:28:42
| 102,183,981
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
students = [
{'name': 'Rezso', 'age': 9.5, 'candies': 2},
{'name': 'Gerzson', 'age': 10, 'candies': 1},
{'name': 'Aurel', 'age': 7, 'candies': 3},
{'name': 'Zsombor', 'age': 12, 'candies': 5}
]
# create a function that takes a list of students and prints:
# - Who has got more candies than 4 candies
# create a function that takes a list of students and prints:
# - how many candies they have on average
def moreCandies():
for n in range(len(students)):
if students[n]['candies'] > 4:
print(students[n]['name'])
def averageCandies():
sum = 0
for n in range(len(students)):
sum += students[n]['candies']
print(sum/n)
moreCandies()
averageCandies()
|
[
"attilakorom2014@gmail.com"
] |
attilakorom2014@gmail.com
|
bb482e9fdeaf3186505098e7ddea360fa6436e09
|
b611968ae2d700928c4b16f973efc2e902a21157
|
/hydroplants/urls.py
|
ecd75faee38187dc07edcf885a5fef01bdacfa8f
|
[] |
no_license
|
nediola/hydroponics
|
d8b8a94b5735e2a9965df9c8743a3fef513fd15a
|
ff479b91ff679e1f35b0af1f263419f47175bbad
|
refs/heads/master
| 2016-09-06T04:32:12.676341
| 2015-05-21T19:25:30
| 2015-05-21T19:25:30
| 34,571,963
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('mainpageapp.urls')),
url(r'^base/', include('baseapp.urls')),
url(r'^robot/', include('robotapp.urls')),
]
urlpatterns += staticfiles_urlpatterns()
|
[
"nediola@yandex.ru"
] |
nediola@yandex.ru
|
3511652857ae32d0e92cf540a1687e1f59a0dffe
|
b2894d71703f744bc84fd34141187e8fef11ccbf
|
/distributed/spill.py
|
2c849c2447e40fbc9bd7ea7b3aed251daecdb635
|
[] |
no_license
|
vero-so/distributed
|
6919221712cc6f8082918580cd5db383b2ecee88
|
7d1401a915d8612cbd35244f96f4ec6d95b38dec
|
refs/heads/main
| 2023-09-05T06:24:34.794774
| 2021-11-19T20:35:45
| 2021-11-19T20:35:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,441
|
py
|
from __future__ import annotations
from collections.abc import Hashable, Mapping
from functools import partial
from typing import Any
from zict import Buffer, File, Func
from .protocol import deserialize_bytes, serialize_bytelist
from .sizeof import safe_sizeof
class SpillBuffer(Buffer):
"""MutableMapping that automatically spills out dask key/value pairs to disk when
the total size of the stored data exceeds the target
"""
spilled_by_key: dict[Hashable, int]
spilled_total: int
def __init__(self, spill_directory: str, target: int):
self.spilled_by_key = {}
self.spilled_total = 0
storage = Func(
partial(serialize_bytelist, on_error="raise"),
deserialize_bytes,
File(spill_directory),
)
super().__init__(
{},
storage,
target,
weight=self._weight,
fast_to_slow_callbacks=[self._on_evict],
slow_to_fast_callbacks=[self._on_retrieve],
)
@property
def memory(self) -> Mapping[Hashable, Any]:
"""Key/value pairs stored in RAM. Alias of zict.Buffer.fast.
For inspection only - do not modify directly!
"""
return self.fast
@property
def disk(self) -> Mapping[Hashable, Any]:
"""Key/value pairs spilled out to disk. Alias of zict.Buffer.slow.
For inspection only - do not modify directly!
"""
return self.slow
@staticmethod
def _weight(key: Hashable, value: Any) -> int:
return safe_sizeof(value)
def _on_evict(self, key: Hashable, value: Any) -> None:
b = safe_sizeof(value)
self.spilled_by_key[key] = b
self.spilled_total += b
def _on_retrieve(self, key: Hashable, value: Any) -> None:
self.spilled_total -= self.spilled_by_key.pop(key)
def __setitem__(self, key: Hashable, value: Any) -> None:
self.spilled_total -= self.spilled_by_key.pop(key, 0)
super().__setitem__(key, value)
if key in self.slow:
# value is individually larger than target so it went directly to slow.
# _on_evict was not called.
b = safe_sizeof(value)
self.spilled_by_key[key] = b
self.spilled_total += b
def __delitem__(self, key: Hashable) -> None:
self.spilled_total -= self.spilled_by_key.pop(key, 0)
super().__delitem__(key)
|
[
"noreply@github.com"
] |
vero-so.noreply@github.com
|
a94dec148b8a6d339b6c8f79ce166515411d8346
|
ad64e96dca44c213999a763e285b8f9bbb394dd8
|
/appu/audio.py
|
2332dd14ef0cdd48e75f870ed660c791b6b30721
|
[
"MIT"
] |
permissive
|
ifosch/appu
|
c84127a6ca6d81a041fcc5e5977cc099cd4a4b33
|
2c6dabf8971f97696cf229de14bec50a2ead79e9
|
refs/heads/master
| 2022-01-12T22:13:52.969705
| 2022-01-06T17:09:14
| 2022-01-06T17:09:14
| 121,063,665
| 1
| 0
| null | 2018-02-10T23:28:16
| 2018-02-10T23:28:15
| null |
UTF-8
|
Python
| false
| false
| 1,759
|
py
|
import re
import requests
from pydub import AudioSegment
from pydub.effects import normalize
def download_file(mp3_file_name, file_type):
"""
This check if is a url and download the file
in files directory with podcast.mp3 filename.
"""
remotefile = requests.get(
mp3_file_name,
headers={"User-Agent": "Wget/1.19.4 (linux-gnu)"})
# Set different file name if is jingle or podcast file.
result_file = "files/{}.mp3".format(file_type)
with open(result_file, 'wb') as output:
output.write(remotefile.content)
return result_file
def load_mp3(mp3_file_name, file_type='podcast'):
"""
This tries to load the audio from a named mp3 file.
It checks the filename has mp3 extension.
"""
url_pattern = re.compile('^http[s]://')
if url_pattern.match(mp3_file_name):
mp3_file_name = download_file(mp3_file_name, file_type)
if not mp3_file_name.lower().endswith('.mp3'):
raise SystemExit(
'Incorrect audio file format. The file must have .mp3 extension'
)
return AudioSegment.from_mp3(mp3_file_name)
def get_jingles(song_file_name):
"""
This function returns both starting and ending
jingles.
"""
song = load_mp3(song_file_name, "jingle")
return song[:20000], song[-40000:]
def glue_tracks(tracks):
"""
This functions glues all tracks in a single one,
using the specified fade for each track, and
returns the resulting audio.
"""
final = tracks[0][0]
for audio, fade in tracks[1:]:
final = final.append(audio, crossfade=fade)
return final
def normalize_audio(podcast_file):
"""
This function normalize track
"""
return normalize(podcast_file, headroom=-1.5)
|
[
"natx@y10k.ws"
] |
natx@y10k.ws
|
ad8404f4b341e1cc47ea3105fc118199e5f35a2a
|
cb6ed04c02a1cc3f292aa38ce63c34d95fb4dfaa
|
/TUPRO2.5/k-means.py
|
a30119fa50d36a51281b57494659291256dc558c
|
[] |
no_license
|
feresyan/AI-Tupro
|
372ba1df3d25a092dbdfa34ae90906c937d141a6
|
51d37f218bc77cb9c1c87b1b589bc225e71c47fa
|
refs/heads/master
| 2020-03-28T22:58:30.533172
| 2018-12-31T07:13:48
| 2018-12-31T07:13:48
| 149,268,726
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,025
|
py
|
# --- FERO RESYANTO 1301154318 IF-39-10 ---
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
import random
import math
style.use("ggplot")
array_train = []
array_test = []
array_data_train = [] #MEMBUAT ARRAY UNTUK MENYIMPAN DATA TRAINING
array_data_test = [] #MEMBUAT ARRAY UNTUK DATA TEST
# --- MEMBUKA FILE DATA-TRAIN DAN MENYIMPAN KEDALAM VARIABEL ---
text_file = open("TrainsetTugas2.txt")
array_train = text_file.read().split()
i=0
while i <= (len(array_train)-1) :
array = []
for l in range (2) :
array.append(float(array_train[i]))
i+=1
array_data_train.append(array)
array_data_train=np.array(array_data_train)
# print(array_data_train)
# MEMASUKAN DATA ARRAY KE DALAM VARIABEL UNTUK MEMBUAT SCATTER PLOT --
# x = array_data_train[:,0]
# y = array_data_train[:,1]
# plt.scatter(x,y,color='blue')
# plt.title("Persebaran Data Train")
# plt.show()
# --- MEMBUKA FILE DATA-TEST DAN MENYIMPAN KEDALAM VARIABEL --
text_file = open("TestsetTugas2.txt")
array_test = text_file.read().split()
i=0
while i <= (len(array_test)-1) :
array = []
for l in range (2) :
array.append(float(array_test[i]))
i+=1
array_data_test.append(array)
array_data_test=np.array(array_data_test)
# MEMASUKAN DATA ARRAY KE DALAM VARIABEL UNTUK MEMBUAT SCATTER SCATTER PLOT --
# x = array_data_test[:,0]
# y = array_data_test[:,1]
# plt.scatter(x,y,color='green')
# plt.show()
# -- MENGINISIASI RANDOM CENTROID --
centroid1 = random.choice(array_data_train)
centroid2 = random.choice(array_data_train)
centroid3 = random.choice(array_data_train)
centroid4 = random.choice(array_data_train)
centroid5 = random.choice(array_data_train)
cluster = [0 for i in range(len(array_data_train))]
# -- EUCLIDIAN DISTANCE --
# -- MELAKUKAN ALGORITMA K-MEANS SEBANYAK ITERASI --
for iterasi in range(13):
for i in range(len(array_data_train)):
jarak1 = math.sqrt((math.pow((centroid1[0] - array_data_train[i][0]), 2)) + (
math.pow((centroid1[1] - array_data_train[i][1]), 2)))
jarak2 = math.sqrt((math.pow((centroid2[0] - array_data_train[i][0]), 2)) + (
math.pow((centroid2[1] - array_data_train[i][1]), 2)))
jarak3 = math.sqrt((math.pow((centroid3[0] - array_data_train[i][0]), 2)) + (
math.pow((centroid3[1] - array_data_train[i][1]), 2)))
jarak4 = math.sqrt((math.pow((centroid4[0] - array_data_train[i][0]), 2)) + (
math.pow((centroid4[1] - array_data_train[i][1]), 2)))
jarak5 = math.sqrt((math.pow((centroid5[0] - array_data_train[i][0]), 2)) + (
math.pow((centroid5[1] - array_data_train[i][1]), 2)))
#-- MENENTUKAN MINIMUM DISTANCE --
if jarak1 < jarak2 and jarak1 < jarak3 and jarak1 < jarak4 and jarak1 < jarak5 :
cluster[i] = 1
elif jarak2 < jarak1 and jarak2 < jarak3 and jarak2 < jarak4 and jarak2 < jarak5 :
cluster[i] = 2
elif jarak3 < jarak1 and jarak3 < jarak2 and jarak3 < jarak4 and jarak3 < jarak5 :
cluster[i] = 3
elif jarak4 < jarak1 and jarak4 < jarak2 and jarak4 < jarak3 and jarak4 < jarak5 :
cluster[i] = 4
elif jarak5 < jarak1 and jarak5 < jarak2 and jarak5 < jarak3 and jarak5 < jarak4 :
cluster[i] = 5
centro1 = [0.0, 0.0]
centro2 = [0.0, 0.0]
centro3 = [0.0, 0.0]
centro4 = [0.0, 0.0]
centro5 = [0.0, 0.0]
total1 = 0
total2 = 0
total3 = 0
total4 = 0
total5 = 0
# -- MENENTUKAN CENTROID BARU --
for i in range(len(array_data_train)):
if (cluster[i] == 1):
centro1[0] = array_data_train[i][0] + centro1[0]
centro1[1] = array_data_train[i][1] + centro1[1]
total1 = total1 + 1
elif (cluster[i] == 2):
centro2[0] = array_data_train[i][0] + centro2[0]
centro2[1] = array_data_train[i][1] + centro2[1]
total2 = total2 + 1
elif (cluster[i] == 3):
centro3[0] = array_data_train[i][0] + centro3[0]
centro3[1] = array_data_train[i][1] + centro3[1]
total3 = total3 + 1
elif (cluster[i] == 4):
centro4[0] = array_data_train[i][0] + centro4[0]
centro4[1] = array_data_train[i][1] + centro4[1]
total4 = total4 + 1
elif (cluster[i] == 5):
centro5[0] = array_data_train[i][0] + centro5[0]
centro5[1] = array_data_train[i][1] + centro5[1]
total5 = total5 + 1
centro1[0] = centro1[0] / total1
centro1[1] = centro1[1] / total1
centro2[0] = centro2[0] / total2
centro2[1] = centro2[1] / total2
centro3[0] = centro3[0] / total3
centro3[1] = centro3[1] / total3
centro4[0] = centro4[0] / total4
centro4[1] = centro4[1] / total4
centro5[0] = centro5[0] / total5
centro5[1] = centro5[1] / total5
centroid1 = [centro1[0], centro1[1]]
centroid2 = [centro2[0], centro2[1]]
centroid3 = [centro3[0], centro3[1]]
centroid4 = [centro4[0], centro4[1]]
centroid5 = [centro5[0], centro5[1]]
print(centroid1,centroid2,centroid3,centroid4,centroid5)
#-- MENAMPILKAN HASIL ALGORITMA K-MEANS TERHADAP DATA TRAIN PADA SCATTER PLOT --
plt.scatter(array_data_train[:,0],array_data_train[:,1],c=cluster)
plt.scatter(centroid1[0],centroid1[1],c='r')
plt.scatter(centroid2[0],centroid2[1],c='r')
plt.scatter(centroid3[0],centroid3[1],c='r')
plt.scatter(centroid4[0],centroid4[1],c='r')
plt.scatter(centroid5[0],centroid5[1],c='r')
plt.title('Data Train')
plt.show()
# -- MELAKUKAN ALGORITMA K-MEANS SEBANYAK ITERASI UNTUK SETIAP DATA TEST --
cluster_test = [0 for i in range(len(array_data_test))]
for iterasi in range(13):
for i in range(len(array_data_test)):
jarak1 = math.sqrt((math.pow((centroid1[0] - array_data_test[i][0]), 2)) + (
math.pow((centroid1[1] - array_data_test[i][1]), 2)))
jarak2 = math.sqrt((math.pow((centroid2[0] - array_data_test[i][0]), 2)) + (
math.pow((centroid2[1] - array_data_test[i][1]), 2)))
jarak3 = math.sqrt((math.pow((centroid3[0] - array_data_test[i][0]), 2)) + (
math.pow((centroid3[1] - array_data_test[i][1]), 2)))
jarak4 = math.sqrt((math.pow((centroid4[0] - array_data_test[i][0]), 2)) + (
math.pow((centroid4[1] - array_data_test[i][1]), 2)))
jarak5 = math.sqrt((math.pow((centroid5[0] - array_data_test[i][0]), 2)) + (
math.pow((centroid5[1] - array_data_test[i][1]), 2)))
#-- MINIMUM DISTANCE --
if jarak1 < jarak2 and jarak1 < jarak3 and jarak1 < jarak4 and jarak1 < jarak5 :
cluster_test[i] = 1
elif jarak2 < jarak1 and jarak2 < jarak3 and jarak2 < jarak4 and jarak2 < jarak5 :
cluster_test[i] = 2
elif jarak3 < jarak1 and jarak3 < jarak2 and jarak3 < jarak4 and jarak3 < jarak5 :
cluster_test[i] = 3
elif jarak4 < jarak1 and jarak4 < jarak2 and jarak4 < jarak3 and jarak4 < jarak5 :
cluster_test[i] = 4
elif jarak5 < jarak1 and jarak5 < jarak2 and jarak5 < jarak3 and jarak5 < jarak4 :
cluster_test[i] = 5
#-- MENAMPILKAN HASIL ALGORITMA K-MEANS TERHADAP DATA TEST PADA SCATTER PLOT --
plt.scatter(array_data_test[:,0],array_data_test[:,1],c=cluster_test)
plt.scatter(centroid1[0],centroid1[1],c='r')
plt.scatter(centroid2[0],centroid2[1],c='r')
plt.scatter(centroid3[0],centroid3[1],c='r')
plt.scatter(centroid4[0],centroid4[1],c='r')
plt.scatter(centroid5[0],centroid5[1],c='r')
plt.title('Data Test')
plt.show()
# with open('hasil_data_test.txt', 'w') as file:
# file.write(" -- Klaster Hasil Prediksi Data Test -- "+'\n')
# file.write(" -- Fero Resyanto 1301154318 IF-39-10 -- "+'\n\n')
# for loop in range(len(array_data_test)):
# file.write(str(array_data_test[loop])+' = '+str(cluster_test[loop])+'\n')
|
[
"feresyan@gmail.com"
] |
feresyan@gmail.com
|
54e601ebf3697249bf0b2afad8187f7eebac4479
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/same_eye/child/woman/bad_day_or_child/child/old_day.py
|
da6619113a204fd5ce1fd7ce9571bcdf80c4ec4c
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
#! /usr/bin/env python
def right_group(str_arg):
public_work(str_arg)
print('company_and_own_hand')
def public_work(str_arg):
print(str_arg)
if __name__ == '__main__':
right_group('child')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
0cc866e1f93de105ce2c8f843a0d3923b010f56a
|
fd07d6a65cb7fc39d108fb4c6b795019c3ae0158
|
/KaggleProjects/PredictingHeartFailure/src/data/PlotPrecisionRecall.py
|
cdeb4178188547cfbb04806f8fbb3b600f2da5c5
|
[] |
no_license
|
Aisling-C/DataScienceWork
|
82983d46c2f34e8bea982131fd08fc052f4c8e6a
|
1e00945ce64e6515b6d794d450cf9ba290da93a2
|
refs/heads/main
| 2023-07-27T04:39:25.498154
| 2021-09-01T20:10:58
| 2021-09-01T20:10:58
| 359,247,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,715
|
py
|
def Plot_PR(Model, X_train, y_train, X_test, y_test, title, auc, precision_recall_curve, np, plt, figsz):
np.seterr(divide='ignore', invalid='ignore')
plt.subplots(figsize=figsz)
# Plot no skill line
plt.plot([0, 1], [0, 0], linestyle='--', label='No Skill', c='orange')
# Train Data
yhat = Model.predict_proba(X_train)
yhat = yhat[:, 1]
precision, recall, thresholds = precision_recall_curve(y_train, yhat)
fscore = (2 * precision * recall) / (precision + recall)
ix = np.argmax(fscore)
plt.scatter(recall, precision, marker='.', color='red', label='Train', s=10)
plt.scatter(recall[ix], precision[ix], marker='o', color='darkred', label='Best Train', s=100)
auc_train = round(auc(recall, precision), 3)
prob=thresholds[ix]
# Print Statement
print('Best Train Threshold=%f, F-Score=%.3f' % (thresholds[ix], fscore[ix]))
# Test Data
yhat = Model.predict_proba(X_test)
yhat = yhat[:, 1]
precision, recall, thresholds = precision_recall_curve(y_test, yhat)
fscore = (2 * precision * recall) / (precision + recall)
ix = np.argmax(fscore)
plt.scatter(recall, precision, marker='.', color='blue', label='Test', s=10)
plt.scatter(recall[ix], precision[ix], marker='^', color='darkblue', label='Best Test', s=100)
auc_test = round(auc(recall, precision), 3)
# Print Statements
print('Best Test Threshold=%f, F-Score=%.3f' % (thresholds[ix], fscore[ix]))
print('Train AUC: {} Test AUC: {}'.format(auc_train, auc_test))
# General Plotting
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall Curve')
plt.legend()
plt.title(title)
plt.show()
return prob
|
[
"amcasey177@gmail.com"
] |
amcasey177@gmail.com
|
c116410549f3163872e603eed3a4ac4b39ce91c7
|
9725c13591c1a78b2e1eaf28b742556ecabfbf17
|
/HAR_DATASET/models_code/latem.py
|
626fda65e9f4c4108b56bace13db4a3d21d0919d
|
[] |
no_license
|
amanshreshtha1998/Zero-Shot-Learning-on-Sensor-Data
|
3803789a9fa653a490054021b2993200706aa888
|
9c575fc72d28b93775e7d5468a9dafa2da1c848b
|
refs/heads/master
| 2020-08-26T11:51:22.118377
| 2019-10-23T08:37:07
| 2019-10-23T08:37:07
| 216,988,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,755
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 20 07:09:04 2018
@author: RAJDEEP PAL
"""
import os
import pandas as pd
import numpy as np
from numpy.linalg import inv
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, precision_score, recall_score, f1_score
import itertools
import matplotlib.pyplot as plt
n_cls = 6
all_cls = [0, 1, 2, 3, 4, 5]
n_att = 300
seed = 0
#%% LOAD DATASET
directory = 'F:/year 3/zsl/HAR_DATASET/extracted_features/final_extracted_features_32'
arr = os.listdir(directory)
#feature_names = ['maxX', 'minX', 'avgX', 'stdX', 'slopeX', 'zcrX', 'maxY','minY','avgY','stdY', 'slopeY', 'zcrY', 'maxZ', 'minZ', 'avgZ', 'stdZ', 'slopeZ', 'zcrZ', 'maxACC', 'minACC', 'avgACC', 'stdACC', 'XYcorr', 'YZcorr','ZXcorr', 'energy']
#feature_names = ['0', '1', '2', '3', '4', '5', '6','7','8','9', '10', '11', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24','25', '26', '27', '28', '29', '30', '31']
feature_names = list(range(0, 25))
path = directory+'/'+arr[0]
data = []
for file_name in arr:
path = directory + '/' + file_name
df = pd.read_csv(path, index_col = False, names = feature_names)
data.append(df)
print (df.shape)
print (data[0])
#%% ACTIVITY ATTRIBUTE MATRIX - GLOVE
# F:\year 2\hpg\project\activity_attribute_matrix.csv
class_names = ['walking', 'walking_upstairs', 'walking_downstairs', 'sitting', 'standing', 'laying']
aam = pd.read_csv('F:/year 3/zsl/HAR_DATASET/activity_attribute_matrix300.csv')
print (aam.shape)
#%%
def argmaxOverMatrices(x, s, W):
K = len(W)
# minimum value
best_score = -1e12
best_idx = -1
score = np.zeros(K)
for i in range(0,K):
projected_x = np.matmul(x.T, W[i])
score[i] = np.dot(projected_x, s)
if (score[i] > best_score):
best_score = score[i]
best_idx = i
return (best_score,best_idx)
#%%
def get_mapping(ts_cls):
X = pd.DataFrame()
S = pd.DataFrame()
labels = pd.DataFrame()
#print (ts_cls)
tr_cls = [x for x in all_cls if (x not in ts_cls)]
#print (tr_cls)
#print (tr_cls)
for i, cls in enumerate(tr_cls):
#print ('class', cls)
df = data[cls]
attribute_vec = aam[class_names[cls]]
m1, d = df.shape
y = np.ones((m1, 1)) * cls
#y[:, i] = 1
y_df = pd.DataFrame(y)
#Y = Y.append(y_df, ignore_index = True)
# print (m)
S = S.join(attribute_vec, how = 'right')
X = X.append(df, ignore_index = True)
labels = labels.append(y_df, ignore_index = True)
X = np.array(X)
S = np.array(S)
labels = np.array(labels)
(m, d) = X.shape
(a, z) = S.shape
#print (m, d)
#print (a, z)
#print (labels.shape)
n_train = X.shape[0]
#print (n_train)
n_class = S.shape[1]
# Initialization
W = {}
K = 10
for i in range(0,K):
W[i] = 1.0/np.sqrt(X.shape[1]) * np.random.rand(X.shape[1], S.shape[0])
n_epoch = 100
i=0
alpha = 0.05
# SGD
for e in range(0,n_epoch):
perm = np.random.permutation(n_train)
for i in range(1,n_train):
# A random image from a row
ni = perm[i]
best_j = -1
# Allocate the ground truths to picked_y
picked_y = labels[ni]
# If they're same
while(picked_y==labels[ni]):
# Randomly generate again until those are different
random_index = np.random.randint(n_class)
picked_y = tr_cls[random_index]
# If those are different
# Random labeling
picked_y = random_index
x = X[ni, :].T.reshape(d, 1)
col = tr_cls.index( int(labels[ni]) )
if (picked_y == col):
print ('corrrect', picked_y, col)
[max_score, best_j] = argmaxOverMatrices(x, s=S[:,picked_y], W=W)
# Grounded truth labeling
#print (S[:, col] )
[best_score_yi, best_j_yi] = argmaxOverMatrices(x, S[:,col], W)
#print (col)
#print ( S[:, col].shape , S[:, picked_y].shape)
if(max_score + 1 > best_score_yi):
if(best_j==best_j_yi):
W[best_j] = W[best_j] - alpha * np.matmul(x,(S[:,picked_y] - S[:,col]).reshape(1, n_att))
else:
W[best_j] = W[best_j] - alpha * np.matmul(x , S[:,picked_y].reshape(1, n_att))
W[best_j_yi] = W[best_j_yi] + alpha * np.matmul(x , S[:,col].reshape(1, n_att) )
return W
#%%
def evaluate(X, W, S, ts_cls, true_cls):
(m, n) = X.shape
# y_true = np.ones(m)
y_pred = np.zeros(m)
#all_scores = []
n_samples = m
#n_class = len(ts_cls)
K = len(W)
scores = {}
max_scores = np.zeros((K,n_samples))
tmp_label = np.zeros((K,n_samples))
for j in range(K):
projected_X = np.matmul(X , W[j])
scores[j] = np.matmul(projected_X, S)
# Maxima along the second axis
# Maxima between classes per an image: col
[max_scores[j,:], tmp_label[j,:]] = [np.amax(scores[j], axis = 1),np.argmax(scores[j],axis=1)]
# Maxima between Ws: Weight
[best_scores, best_idx] = [np.amax(max_scores, axis=0),np.argmax(max_scores,axis=0)]
#predict_label=np.zeros(n_samples)
for i in range(n_samples):
predict_label = tmp_label[best_idx[i],i]
# if predict_label == true_cls:
# y_pred[i] = 1
y_pred[i] = predict_label
#print (y_pred)
return y_pred
#print (data[0].shape)
#%% LEAVE 2 CLASS OUT CROSS VALIDATION
a = 0
p = 0
r = 0
f1 = 0
count = 0
y_macro_true = pd.DataFrame()
y_macro_pred = pd.DataFrame()
y_macro_true = np.array(y_macro_true)
y_macro_pred = np.array(y_macro_pred)
for i in range(0, n_cls):
X_test_i = data[i]
for j in range(i+1, n_cls):
y_true = pd.DataFrame()
y_pred = pd.DataFrame()
y_true = np.array(y_true)
y_pred = np.array(y_pred)
X_test_j = data[j]
#X_test.append(X_test_j, ignore_index = True)
count += 1
ts_cls = [i, j]
#print (ts_cls)
V = get_mapping(ts_cls)
S = pd.DataFrame()
for cls in ts_cls:
attribute_vec = aam[class_names[cls]]
S = S.join(attribute_vec, how = 'right')
S = np.array(S)
pred = evaluate(X_test_i, V, S, ts_cls, 0)
#print (i, pred)
y_pred = np.append(y_pred, pred)
y_true = np.append(y_true, np.zeros(pred.shape, dtype = int))
pred = evaluate(X_test_j, V, S, ts_cls, 1)
y_pred = np.append(y_pred, pred)
y_true = np.append(y_true, np.ones(pred.shape, dtype = int))
#print (j, pred)
a += accuracy_score(y_true, y_pred)
p += precision_score(y_true, y_pred)
r += recall_score(y_true, y_pred)
f1 += f1_score(y_true, y_pred)
y_true[y_true == 0] = i
y_true[y_true == 1] = j
y_pred[y_pred == 0] = i
y_pred[y_pred == 1] = j
y_macro_true = np.append(y_macro_true, y_true)
y_macro_pred = np.append(y_macro_pred, y_pred)
print (count)
#accuracy = accuracy / (n_cls - 1)
#att_acc = att_acc / count
#precision = precision / count
#recall = recall / count
print ( a/count, p/count, r/count, f1/count )
#print (accuracy)
#print (accuracy.mean(axis = 0) * 100)
#print (att_acc)
# THE END
#%%
y_true = y_macro_true
y_pred = y_macro_pred
print (y_true.shape, y_pred.shape, accuracy_score(y_true, y_pred))
print (classification_report(y_true, y_pred, target_names = class_names))
cnf = confusion_matrix(y_true, y_pred)
print (cnf)
#%%
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Oranges):
plt.figure(figsize = (15,15))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(cm.shape[1])
plt.xticks(tick_marks, rotation=45)
ax = plt.gca()
ax.set_xticklabels(class_names)
ax.set_yticklabels(class_names)
plt.yticks(tick_marks)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], '.1f'),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
#plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
np.set_printoptions(precision=1)
fig, ax = plt.subplots()
plot_confusion_matrix(cnf)
#%%
ts_cls = [0, 3]
W = get_mapping(ts_cls)
K = len(W)
for i in range(0, K):
print (W[i].shape)
#%%
n_train = 5
perm = np.random.permutation(n_train) # PEMUTATION = [0 3 1 4 2]
print (perm)
for i in range(1,n_train):
print (i)
#%%
def complete():
from scipy import sparse
X = pd.DataFrame()
S = pd.DataFrame()
labels = pd.DataFrame()
#print (ts_cls)
tr_cls = all_cls
#print (tr_cls)
#print (tr_cls)
for i, cls in enumerate(tr_cls):
#print ('class', cls)
df = data[cls]
attribute_vec = aam[class_names[cls]]
m1, d = df.shape
y = np.ones((m1, 1)) * cls
#y[:, i] = 1
y_df = pd.DataFrame(y)
#Y = Y.append(y_df, ignore_index = True)
# print (m)
S = S.join(attribute_vec, how = 'right')
X = X.append(df, ignore_index = True)
labels = labels.append(y_df, ignore_index = True)
X = np.array(X)
S = np.array(S)
labels = np.array(labels)
(m, d) = X.shape
(a, z) = S.shape
#print (m, d)
#print (a, z)
#print (labels.shape)
n_train = X.shape[0]
#print (n_train)
n_class = S.shape[1]
# Initialization
W = {}
K = 10
for i in range(0,K):
W[i] = 1.0/np.sqrt(X.shape[1]) * np.random.rand(X.shape[1], S.shape[0])
n_epoch = 15
i=0
alpha = 0.01
# SGD
for e in range(0,n_epoch):
perm = np.random.permutation(n_train)
for i in range(1,n_train):
# A random image from a row
ni = perm[i]
best_j = -1
# Allocate the ground truths to picked_y
picked_y = labels[ni]
# If they're same
while(picked_y==labels[ni]):
# Randomly generate again until those are different
random_index = np.random.randint(n_class)
picked_y = tr_cls[random_index]
# If those are different
# Random labeling
picked_y = random_index
x = X[ni, :].T.reshape(d, 1)
col = tr_cls.index( int(labels[ni]) )
if (picked_y == col):
print ('wrong', picked_y, col)
[max_score, best_j] = argmaxOverMatrices(x, s=S[:,picked_y], W=W)
# Grounded truth labeling
#print (S[:, col] )
[best_score_yi, best_j_yi] = argmaxOverMatrices(x, S[:,col], W)
#print (col)
#print ( S[:, col].shape , S[:, picked_y].shape)
if(max_score + 1 > best_score_yi):
if(best_j==best_j_yi):
W[best_j] = W[best_j] - alpha * np.matmul(x,(S[:,picked_y] - S[:,col]).reshape(1, n_att))
else:
W[best_j] = W[best_j] - alpha * np.matmul(x , S[:,picked_y].reshape(1, n_att))
W[best_j_yi] = W[best_j_yi] + alpha * np.matmul(x , S[:,col].reshape(1, n_att) )
all_scores = []
n_samples = m
n_class = 14
K = len(W)
scores = {}
max_scores = np.zeros((K,n_samples))
tmp_label = np.zeros((K,n_samples))
for j in range(K):
projected_X = np.matmul(X , W[j])
scores[j] = np.matmul(projected_X, S)
# Maxima along the second axis
[max_scores[j,:], tmp_label[j,:]] = [np.amax(scores[j], axis = 1),np.argmax(scores[j],axis=1)+1]
# Maxima between Ws: Weight
[best_scores, best_idx] = [np.amax(max_scores, axis=0),np.argmax(max_scores,axis=0)]
predict_label = np.zeros(n_samples)
for i in range(n_samples):
predict_label[i] = tmp_label[best_idx[i],i]
# ground truth labels
label_mat = sparse.csr_matrix((np.repeat(1,n_class),(np.squeeze(labels.reshape(1,m))-1,np.arange(n_samples))),shape=(n_class,n_samples))
predict_mat = sparse.csr_matrix((np.repeat(1,n_class),(predict_label-1,np.arange(n_samples))),shape=(n_class,n_samples))
# predicted labels
conf_mat = sparse.csr_matrix.dot(label_mat,np.transpose(predict_mat))
conf_mat_diag = sparse.csr_matrix.diagonal(conf_mat)
# a kind of classes
n_per_class = np.squeeze(np.array(np.sum(sparse.csr_matrix.transpose(label_mat),0)))
# mean class accuracy
mean_class_accuracy = np.sum(conf_mat_diag / n_per_class) / n_class
print (mean_class_accuracy)
return W
#%%
a = complete()
|
[
"aman.shreshtha.ece16@itbhu.ac.in"
] |
aman.shreshtha.ece16@itbhu.ac.in
|
c82cb77e6050347ad0b411272ad1fa32bc6d46fc
|
913fcaa8e193ae7541e1711e91f26d43a660f631
|
/blog/urls.py
|
532f9b102cdc01c61be9135672b17c068a01c4df
|
[] |
no_license
|
Brian10257/fabico
|
af3fd24d3f7544e0c82009fd394a0b6a32109ff7
|
fe9ff6e71f8bd09a7e87b41cddf89cdf4be32b21
|
refs/heads/master
| 2021-02-10T13:12:00.764732
| 2020-05-01T12:57:48
| 2020-05-01T12:57:48
| 244,385,119
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('blog/', views.BlogList.as_view(), name = 'blog_post'),
path('<slug>/', views.blog_detail, name = 'blog_detail'),
]
|
[
"47368577+Brian10257@users.noreply.github.com"
] |
47368577+Brian10257@users.noreply.github.com
|
8f9c60e39b2117f7b30856ca4162d699fd5efccb
|
d570ec5184acda8fb3c283c19c58c97c6588c9a3
|
/pymon/pymon.py
|
68b4acfdbc04560b913d2b682765c7f6420713f9
|
[
"MIT"
] |
permissive
|
vishalpentakota/pymon
|
86f120ce451f3ecc56f83e16cc5c957daaa9e20a
|
864cc25d2b592b6118f090c62c8138b97ddb18ad
|
refs/heads/master
| 2020-11-30T00:26:14.992591
| 2016-07-20T19:36:21
| 2016-07-20T19:36:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
#!/usr/bin/env python
from lib.run import run
if __name__ == "__main__":
run()
|
[
"curtis99877@gmail.com"
] |
curtis99877@gmail.com
|
51672d53fe9a21047bf76edc02739efeac589421
|
768a3b04ba1f99284460b6d8c10a5d8016acf371
|
/reptile/example/test/test.py
|
8c688bc25fd2e4ac2d6516e346f78d6c37d6a09b
|
[] |
no_license
|
qiualu/GrowProject
|
26777cf6c36bb8e106c3951792d199ee75f6732d
|
c015d67cb9b71be579f6bd122e83be878f2016ff
|
refs/heads/master
| 2021-09-05T14:04:57.661275
| 2018-01-28T14:54:11
| 2018-01-28T14:54:11
| 110,330,494
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 69
|
py
|
import os
print(os.getcwd())
print(os.path.join(os.getcwd(),'img'))
|
[
"1101938064@qq.com"
] |
1101938064@qq.com
|
797700159fc90dbd457a9eaca90349244cf4b0a3
|
13295a4fb8afc22618b85ff46fc528c932ade795
|
/summarizer.py
|
544aa6c0caa3c05cebbef8de703edb22e8b83810
|
[] |
no_license
|
AdlyTempleton/Exploring-Sentence-Vector-Spaces-Through-Automatic-Summarization
|
38b0ddf73ae47ac9e57b1f186027e283d86fcfd7
|
2a3dfd167b6f341732a2920305d66b3153932141
|
refs/heads/master
| 2020-04-10T15:17:24.929124
| 2018-12-10T02:17:46
| 2018-12-10T02:17:46
| 161,104,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,066
|
py
|
import functools
import nltk
import numpy as np
from nltk.stem.porter import *
from sklearn.decomposition import PCA
import summarizer_modules
# nltk.download('punkt')
# nltk.download('brown')
def keywords(v, str2vec, selector, wc, args):
stemmer = PorterStemmer()
args['semeval'] = True
matched_phrases = [(phrase, str2vec(phrase)) for phrase in v.candidates]
# matched_phrases = [(phrase, emb) for phrase, emb in zip(v.candidates, v.emb[str2vec.__name__])]
# Check for zero-vectors
matched_phrases = [x for x in matched_phrases if not np.linalg.norm(x[1]) == 0.0]
vecs = [x[1] for x in matched_phrases]
if 'docvec_avg' in args and args['docvec_avg']:
args['docvec'] = np.mean(np.stack(vecs, axis=1), axis=0)
else:
args['docvec'] = str2vec(' '.join(v.text))
summary = selector(matched_phrases, v.text, str2vec, wc, args)
excess_words = len(' '.join(summary).split()) - 100
summary[-1] = ' '.join(summary[-1].split()[:-excess_words])
print(len(' '.join(summary).split()))
# assert len(summary) == len(set(summary))
return [stemmer.stem(phrase) for phrase in summary]
def summarize(v, str2vec, selector, wc, args):
"""
Creates a summary of a document
:param v: A Corpus object
:param str2vec: A function which turns documents into vectors
With the following ordered parameters: string, args
:param selector:
A function which, given sentence embeddings, selects a summary
With the following ordered parameters: matched_sents, document, str2vec, wc, args
matched_sents: A list of tuples, (sentence, embedding)
document: A document, as a list of sentences
str2vec: A str2vec function, as above
wc: The maximum word count of the summary
args: Additional model-specific parameters
:param wc: The maximum word count of the summary
:return: A summary, as a list of sentences
"""
matched_sents = []
# Clone args, allow modification (ie str2vec_arora_true) to args from functions
args = dict(args)
# Add additional data
args['text'] = v.text
args['v'] = v
# Vector for sentences
if str2vec.__name__ in v.emb.keys() and not args.get('skip_cache', False):
matched_sents = [(sent, emb) for sent, emb in zip(v.text, v.emb[str2vec.__name__])]
else:
matched_sents = [(sent, str2vec(sent, args)) for sent in v.text]
# Check for zero-vectors
matched_sents = [x for x in matched_sents if not np.linalg.norm(x[1]) == 0.0]
vecs = [x[1] for x in matched_sents]
# PCA transform
if 'pca' in args and args['pca']:
pca = PCA(n_components=.5, whiten=True)
vecs = [x[1] for x in matched_sents]
pca.fit(vecs)
vecs = pca.transform(vecs)
matched_sents = [(sent, trans) for ((sent, vec), trans) in zip(matched_sents, vecs)]
str2vec = wrap_for_pca(str2vec, pca)
if 'docvec_avg' in args and args['docvec_avg']:
args['docvec'] = summarizer_modules.normalizeVec(np.mean(np.stack(vecs, axis=1), axis=1))
else:
if str2vec.__name__ in v.emb.keys() and not args.get('skip_cache', False):
args['docvec'] = v.emb[str2vec.__name__][-1]
else:
args['docvec'] = str2vec(' '.join(v.text), args)
# Check for duplicates
summary = selector(matched_sents, v.text, str2vec, wc, args)
#Cut to 100 words
excess_words = len(' '.join(summary).split()) - 100
summary[-1] = ' '.join(summary[-1].split()[:-excess_words])
#assert len(summary) == len(set(summary))
return summary
def wrap_for_pca(str2vec, pca):
return functools.partial(str2vec_from_pca, str2vec, pca)
def str2vec_from_pca(str2vec, pca, s, args={}):
vec = str2vec(s, args)
vec = vec.reshape(1, -1)
return pca.transform(vec)
if __name__ == "__main__":
raw = open('input.txt').read()
# Split sentences
sents = nltk.sent_tokenize(raw)
# Frequency distribution from brown corpus
# Load google news word vectors
print("Loaded data")
|
[
"adlytempleton@gmail.com"
] |
adlytempleton@gmail.com
|
9fce78c33599b043ab1153393dec36264b5bc2e4
|
f6705b45ee5577890f88a1c35cae8919644a74af
|
/docs/conf.py
|
2e06c2b343f8b43d0f780b92cf7c7a56216b89f8
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
LTHODAVDOPL/TensorNetwork
|
f51530ba8495718b2bf1c080903f5b67c918506a
|
3fdad23e3730c097960b20c8b35e6ebc9162fa56
|
refs/heads/master
| 2021-01-05T10:09:09.811264
| 2020-02-13T06:54:58
| 2020-02-13T06:54:58
| 240,987,377
| 1
| 0
|
Apache-2.0
| 2020-02-17T00:12:12
| 2020-02-17T00:12:11
| null |
UTF-8
|
Python
| false
| false
| 1,739
|
py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = 'TensorNetwork'
copyright = '2019, The TensorNetwork Authors'
author = 'The TensorNetwork Authors'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
master_doc = 'index'
default_role = 'py:obj'
|
[
"noreply@github.com"
] |
LTHODAVDOPL.noreply@github.com
|
e7159178feb44294ca65f2950df97849910ba3bd
|
fcd8bd855aeba3da232a50d5877d8867a3a6e99d
|
/unilogin/urls.py
|
cde27b5c4d20eb90534801b6ffff0eb39bf5b313
|
[] |
no_license
|
youngrok/actiontrac
|
89f05b6d04e32bcb9cb2db35099b60dca51022b3
|
11d0ac2e8f1fefd93a9fcf767dcfaa98443b3f43
|
refs/heads/master
| 2020-05-17T11:07:44.131946
| 2014-04-28T10:01:12
| 2014-04-28T10:01:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
from django.conf.urls import patterns, include, url
from djangox.route import discover_controllers
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url('', include('social.apps.django_app.urls', namespace='social')),
(r'', discover_controllers('unilogin.controllers')),
)
|
[
"pak.youngrok@gmail.com"
] |
pak.youngrok@gmail.com
|
2ee6997660492e3b2557d99cd0e6ab109b6f461e
|
5fa7f016dbcc9f23bf5676c9988e14fb13852231
|
/Beden Kitle İndeksi Kolay Hesaplama Programı.py
|
83adcfa1bd236ec6763b4032781e6445a9abd580
|
[] |
no_license
|
ibrahimek76/Python-le-Kodlama-Denemelerim
|
cb9a6241ecacc39a1d54a8c913886a0918d8420a
|
aaadbbdc8543d2851b27833b50c49387bf2bd03d
|
refs/heads/main
| 2023-05-27T04:24:17.308005
| 2021-06-08T07:30:48
| 2021-06-08T07:30:48
| 374,916,340
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 832
|
py
|
# Kolay Beden Kitle İndeksi Hesapma
#İf else Blokları sonradan dahil edilmiştir
#Aşağıda Verilen Değerler ile programı yazın
# BKİ 18.5'un altındaysa -------> Zayıf < küçüktür olucak
#BKİ 18.5 ile 25 arasındaysa ------> Normal >= 18.5 için büyük eşittir olucak < büyüktür 25 için
#BKİ 25 ile 30 arasındaysa --------> Fazla Kilolu >= 25 için büyük eşittir < küçüktür 30 için
#BKİ 30'un üstündeyse -------------> Obez else ile yazdır
boy=int and float(input("Boyunuzu (cm Cinsinden)Giriniz: "))
kilo=int and float(input("Kilonuzu Giriniz:"))
print("Beden Kitle İndeksiniz:",kilo / (boy ** 2))
if(kilo< 18.5):
print("Zayıf ")
elif (kilo >=18.5 and kilo < 25):
print("Normal")
elif (kilo>=25 and kilo<30):
print("Fazla Kilolu")
else:
print("Obez")
|
[
"noreply@github.com"
] |
ibrahimek76.noreply@github.com
|
37b94195e8a6c4a0741d7aa6055f129c1a300786
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/CodeJamData/08/42/15.py
|
74bfb998ed676426e202d073b69f4fc578a5ccf5
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
import psyco
psyco.full()
def solve(N, M, A):
for x1 in range(N + 1):
for y1 in range(M + 1):
for x2 in range(N + 1):
for y2 in range(M + 1):
S = abs(x1 * y2 - x2 * y1)
if S == A:
return "%d %d %d %d %d %d" % (0, 0, x1, y1, x2, y2)
return "IMPOSSIBLE"
if __name__ == '__main__':
C = input()
for case in range(C):
N, M, A = map(int, raw_input().split(' '))
print "Case #%d:" % (case + 1), solve(N, M, A)
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
ed46f8558aa62b977b86517ac53a60d2af75abf0
|
1f5860d45720bdf798c74308ba37c40d6f3e3bd7
|
/info_search_system/config.py
|
d18a7ca128c0957faabf91aa7ebdc07783eeac8c
|
[] |
no_license
|
Chinoholo0807/Information-Search-System
|
f3f5564b499fcdacc8a6fcd248a71b8042aee81e
|
a55b293ec64334175d3ec3f26ae5db0d5b6e6f99
|
refs/heads/main
| 2023-06-17T02:41:33.257822
| 2021-07-12T05:51:25
| 2021-07-12T05:51:25
| 385,136,610
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
data_folder = 'business' # info search system's topic
# business
# culture
# science
spider_doc_folder = 'science' # spider save folder
topic = 'Sci' # spider topic
# Culture
# Business
# China
# World
# Sports
# Sci
|
[
"likamylike@gmail.com"
] |
likamylike@gmail.com
|
fc3999335a4e17cc41dd8ed64041dbec4b3af4bf
|
9b4fe9c2693abc6ecc614088665cbf855971deaf
|
/103.binary-tree-zigzag-level-order-traversal.py
|
644efcffd6fdfe44a408863524ee55c7b14dff78
|
[
"MIT"
] |
permissive
|
windard/leeeeee
|
e795be2b9dcabfc9f32fe25794878e591a6fb2c8
|
0dd67edca4e0b0323cb5a7239f02ea46383cd15a
|
refs/heads/master
| 2022-08-12T19:51:26.748317
| 2022-08-07T16:01:30
| 2022-08-07T16:01:30
| 222,122,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,019
|
py
|
#
# @lc app=leetcode id=103 lang=python
#
# [103] Binary Tree Zigzag Level Order Traversal
#
# https://leetcode.com/problems/binary-tree-zigzag-level-order-traversal/description/
#
# algorithms
# Medium (40.48%)
# Likes: 957
# Dislikes: 59
# Total Accepted: 222.2K
# Total Submissions: 530.4K
# Testcase Example: '[3,9,20,null,null,15,7]'
#
# Given a binary tree, return the zigzag level order traversal of its nodes'
# values. (ie, from left to right, then right to left for the next level and
# alternate between).
#
#
# For example:
# Given binary tree [3,9,20,null,null,15,7],
#
# 3
# / \
# 9 20
# / \
# 15 7
#
#
#
# return its zigzag level order traversal as:
#
# [
# [3],
# [20,9],
# [15,7]
# ]
#
#
#
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def zigzagLevelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
nodes = [[root]]
result = []
flag = True
while nodes:
level = []
current = []
roots = nodes.pop()
for root in roots:
if flag:
current.append(root.val)
else:
current.insert(0, root.val)
if root.left:
level.append(root.left)
if root.right:
level.append(root.right)
nodes.append(level)
result.append(current)
flag = not flag
if not level:
break
return result
# if __name__ == "__main__":
# s = Solution()
# head = TreeNode(3)
# head.left = TreeNode(9)
# head.right = TreeNode(20)
# head.right.left = TreeNode(15)
# head.right.right = TreeNode(7)
# print s.zigzagLevelOrder(head)
|
[
"windard@qq.com"
] |
windard@qq.com
|
b6a8c082903c4779163d196b5ce46348b58a9d92
|
be06759270d816171bc576f973fb536e216aef9a
|
/BioInformatics/NumberToPattern.py
|
9f5be0f6725c83c796aa4356e112f83e27f5b16a
|
[] |
no_license
|
espaciomore/my-code-kata
|
6d6fbeda8ea75813e1c57d45ae1382207e2197fa
|
6c8e1987648350c880e8ab8a038c69608c680cab
|
refs/heads/master
| 2020-12-10T00:31:45.023012
| 2020-10-12T19:35:07
| 2020-10-12T19:35:07
| 18,149,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
from NumberToSymbol import *
def NumberToPattern(index, k):
if k == 1:
return NumberToSymbol(index)
prefix_index, remainder = divmod(index, 4)
symbol = NumberToSymbol(remainder)
prefix_pattern = NumberToPattern(prefix_index, k - 1)
return prefix_pattern + symbol
|
[
"manuel.cerda@introhive.com"
] |
manuel.cerda@introhive.com
|
48bb6753a68c5f833fd7da6923fc77257f9ab6ce
|
2391b76356cfab7ee4802fa029b2407fae529bfa
|
/main.py
|
771139557fc3968ff9ecdf110eb0f3fbd13d5d6b
|
[] |
no_license
|
LencoDigitexer/SomeCloud
|
28f4f56bfb8c91a2c719bf428bda8204517314ee
|
8e1b642c116d1d300521a7577bb5671b23520f19
|
refs/heads/master
| 2020-08-28T15:32:35.240617
| 2019-10-26T16:57:14
| 2019-10-26T16:57:14
| 217,740,911
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,621
|
py
|
from telethon import TelegramClient, events, sync, connection
# для создания приватного канала
from telethon.tl.functions.channels import CreateChannelRequest, CheckUsernameRequest, UpdateUsernameRequest
from telethon.tl.types import InputChannel, InputPeerChannel
import re # для поиска канала в списке
# These example values won't work. You must get your own api_id and
# api_hash from https://my.telegram.org, under API Development.
api_id = 713781
api_hash = '0c51c4c50d0587d53526c7ee082b3e65'
HaveChannel = False
client = TelegramClient(
'session_name',
api_id,
api_hash,
# Use one of the available connection modes.
# Normally, this one works with most proxies.
connection=connection.ConnectionTcpMTProxyRandomizedIntermediate,
# Then, pass the proxy details as a tuple:
# (host name, port, proxy secret)
#
# If the proxy has no secret, the secret must be:
# '00000000000000000000000000000000'
proxy=('tg-3.rknsosatb.pw', 443, 'dde99993ad3d7146fcf8f3baa789cc62ac')
)
client.start()
def search_channel(): #поиск канала ///SomeCloud///
for dialog in client.iter_dialogs():
allDialog = dialog.name + "\n"
#print(allDialog)
if re.search("///SomeCloud///", allDialog):
HaveChannel = True
if HaveChannel:
print("Канал уже создан")
else:
print("Надо создать канал")
createdPrivateChannel = client(CreateChannelRequest("///SomeCloud///","FileCloud",megagroup=False))
print("Канал успешно создан")
|
[
"noreply@github.com"
] |
LencoDigitexer.noreply@github.com
|
4a4b7eaede1687a8380d65b67a911646ed5032d5
|
8336cbd226bd9eebe1472e168c681a09daa6703f
|
/rareserverapi/migrations/0003_auto_20201116_1646.py
|
073a8791b03bb64ec2bbdebd0f5e777b3b6f6576
|
[] |
no_license
|
jmskinne/Rare-API-TalkingHeads
|
6d99bdcc4f8ef4d5f1e35a31ca96e1663e770d95
|
f869c789f046fa2b60c5658d4d27296cc4eff5e3
|
refs/heads/main
| 2023-01-20T03:33:10.053942
| 2020-11-20T16:44:08
| 2020-11-20T16:44:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 734
|
py
|
# Generated by Django 3.1.3 on 2020-11-16 16:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rareserverapi', '0002_auto_20201112_2129'),
]
operations = [
migrations.AlterField(
model_name='posttag',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagging', to='rareserverapi.post'),
),
migrations.AlterField(
model_name='posttag',
name='tag',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagging', to='rareserverapi.tag'),
),
]
|
[
"brett.derrington@gmail.com"
] |
brett.derrington@gmail.com
|
09c6da67040cb98994b4f39f265db4655729e137
|
8d17108960e95ff21c8b659419295f0b4e0cd754
|
/sms/models.py
|
cb2d34473d086604777af67df929f2f663bdda57
|
[] |
no_license
|
pawaranand/SchoolMS-Assignment
|
456acf55dd13caab6f243e559efad2f8be6d207a
|
9ff912844656fce22980b6250a99c13e6a6fbe2c
|
refs/heads/master
| 2020-04-13T10:31:54.315132
| 2019-01-02T07:17:49
| 2019-01-02T07:17:49
| 163,143,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,926
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# Create your models here.
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils import timezone
import datetime
from django.utils.translation import ugettext as _
class User(AbstractUser):
is_student = models.BooleanField(default=False)
is_teacher = models.BooleanField(default=False)
is_parent = models.BooleanField(default=False)
class UserType(models.Model):
user_type = models.CharField(max_length=100, primary_key=True)
GENDER_CHOICES = (
(1, _("Male")),
(2, _("Female"))
)
class Parent(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True)
first_name = models.CharField(max_length=100, null=True)
last_name = models.CharField(max_length=100, null=True)
gender = models.IntegerField(choices=GENDER_CHOICES, default=1)
email = models.EmailField(max_length=100, null=True)
address = models.CharField(max_length=200, null=True)
city = models.CharField(max_length=100, null=True)
def __str__(self):
if self.first_name and not self.last_name:
return self.first_name
elif self.first_name and self.last_name:
return self.first_name + ' ' + self.last_name
else:
return 'Parent'
class Student(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True)
first_name = models.CharField(max_length=100, null=True)
last_name = models.CharField(max_length=100, null=True)
email = models.EmailField(max_length=100, null=True)
gender = models.IntegerField(choices=GENDER_CHOICES, default=1)
address = models.CharField(max_length=200, null=True)
city = models.CharField(max_length=100, null=True)
parent = models.ForeignKey(Parent, on_delete=models.CASCADE,null=True , blank=True)
def __str__(self):
if self.first_name and not self.last_name:
return self.first_name
elif self.first_name and self.last_name:
return self.first_name + ' ' + self.last_name
else:
return 'Student'
def get_unanswered_questions(self, exam):
answered_questions = self.exam_question_logs \
.filter(exam=exam) \
.values_list('question__pk', flat=True)
questions = exam.questions.exclude(pk__in=answered_questions).order_by('question_text')
return questions
class Teacher(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True)
first_name = models.CharField(max_length=100, null=True)
last_name = models.CharField(max_length=100, null=True)
email = models.EmailField(max_length=100, null=True)
address = models.CharField(max_length=200, null=True)
gender = models.IntegerField(choices=GENDER_CHOICES, default=1)
city = models.CharField(max_length=100, null=True)
qualification = models.CharField(max_length=100, null=True)
def __str__(self):
if self.first_name and not self.last_name:
return self.first_name
elif self.first_name and self.last_name:
return self.first_name + ' ' + self.last_name
else:
return 'Teacher'
class Department(models.Model):
department_name = models.CharField(max_length=100)
description = models.CharField(max_length=200, null=True)
def __str__(self):
if self.department_name:
return self.department_name
else:
return 'Department'
class Course(models.Model):
course = models.CharField(max_length=100)
dept_description = models.CharField(max_length=200, null=True)
department = models.ForeignKey(Department, on_delete=models.CASCADE)
def __str__(self):
if self.course:
return self.course
else:
return 'Course'
class Subject(models.Model):
subject = models.CharField(max_length=100)
subject_description = models.CharField(max_length=200, null=True)
course = models.ForeignKey(Course, on_delete=models.CASCADE,related_name='subjects')
def __str__(self):
if self.subject:
return self.subject
else:
return 'Subject'
class Exam(models.Model):
exam_name = models.CharField(max_length=100,null=True)
description = models.CharField(max_length=200, null=True,blank=True)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE,related_name='exams')
created_by = models.ForeignKey(User, on_delete=models.CASCADE,null=True,related_name='exams')
def __str__(self):
if self.exam_name:
return self.exam_name
else:
return 'Exam'
class Question(models.Model):
question_text = models.CharField(max_length=200)
exam = models.ForeignKey(Exam, on_delete=models.CASCADE,null=True,related_name='questions')
pub_date = models.DateTimeField('date published',auto_now_add=True)
weightage = models.IntegerField(default=5)
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class StudentCourse(models.Model):
course = models.ForeignKey(Course, on_delete=models.CASCADE)
student = models.ForeignKey(Student, on_delete=models.CASCADE,related_name='student_courses')
class ExamResult(models.Model):
exam = models.ForeignKey(Exam, on_delete=models.CASCADE,related_name='exam_results')
student = models.ForeignKey(Student, on_delete=models.CASCADE,related_name='exam_results')
score = models.FloatField(null=True)
evaluated = models.BooleanField(default=False)
date = models.DateTimeField(auto_now_add=True)
class ExamLog(models.Model):
exam = models.ForeignKey(Exam, on_delete=models.CASCADE,related_name='exam_question_logs')
student = models.ForeignKey(Student, on_delete=models.CASCADE,related_name='exam_question_logs')
question = models.ForeignKey(Question, on_delete=models.CASCADE)
weightage = models.IntegerField(default=5)
score = models.FloatField(null=True)
evaluated = models.BooleanField(default=False)
answer = models.CharField(max_length=200,default=False)
date = models.DateTimeField(auto_now_add=True)
class Attendance(models.Model):
student = models.ForeignKey(Student, on_delete=models.CASCADE,related_name='student_attendances')
attendance_date = models.DateField('attendance date')
attendance_status = models.BooleanField('Present',default=False)
reason_for_absentee = models.CharField(max_length=200, null=True, blank=True)
attendance_marked_by = models.ForeignKey(Teacher, on_delete=models.CASCADE)
|
[
"apawar@mamar.sa"
] |
apawar@mamar.sa
|
8d92f8b0764cd51243d968a2fbd1ff3ac8889794
|
ef7f6f1193d76668882bf2e7d0ef742d40ed52be
|
/editor/lib/themes/darkstyle/pyside_style_rc.py
|
2180ed5b2282436c45ca205301f2a338e2b4a57d
|
[
"MIT"
] |
permissive
|
brucelevis/juma-editor
|
b5eca857ae1c27f414b5949656841bc775ef3721
|
125720f7386f9f0a4cd3466a45c883d6d6020e33
|
refs/heads/master
| 2022-04-12T17:28:34.848062
| 2019-09-15T09:13:56
| 2019-09-15T09:13:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 80,981
|
py
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Fri Jun 17 13:35:54 2016
# by: The Resource Compiler for PySide (Qt v4.8.7)
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore
qt_resource_data = b"\x00\x00c\xa6/*\x0a * The MIT License (MIT)\x0a *\x0a * Copyright (c) <2013-2014> <Colin Duquesnoy>\x0a *\x0a * Permission is hereby granted, free of charge, to any person obtaining a copy\x0a * of this software and associated documentation files (the \x22Software\x22), to deal\x0a * in the Software without restriction, including without limitation the rights\x0a * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\x0a * copies of the Software, and to permit persons to whom the Software is\x0a * furnished to do so, subject to the following conditions:\x0a\x0a * The above copyright notice and this permission notice shall be included in\x0a * all copies or substantial portions of the Software.\x0a\x0a * THE SOFTWARE IS PROVIDED \x22AS IS\x22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\x0a * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\x0a * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\x0a * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\x0a * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\x0a * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\x0a * THE SOFTWARE.\x0a */\x0a\x0aQProgressBar:horizontal {\x0a border: 1px solid #3A3939;\x0a text-align: center;\x0a padding: 1px;\x0a background: #201F1F;\x0a}\x0aQProgressBar::chunk:horizontal {\x0a background-color: qlineargradient(spread:reflect, x1:1, y1:0.545, x2:1, y2:0, stop:0 rgba(28, 66, 111, 255), stop:1 rgba(37, 87, 146, 255));\x0a}\x0a\x0aQToolTip\x0a{\x0a border: 1px solid #3A3939;\x0a background-color: rgb(90, 102, 117);;\x0a color: white;\x0a padding: 1px;\x0a opacity: 200;\x0a}\x0a\x0aQWidget\x0a{\x0a color: silver;\x0a background-color: #302F2F;\x0a selection-background-color:#5285a6;\x0a selection-color: black;\x0a background-clip: border;\x0a border-image: none;\x0a outline: 0;\x0a}\x0a\x0aQWidget:item:hover\x0a{\x0a background-color: #78879b;\x0a color: black;\x0a}\x0a\x0aQWidget:item:selected\x0a{\x0a background-color: #5285a6;\x0a}\x0a\x0aQCheckBox\x0a{\x0a spacing: 5px;\x0a outline: none;\x0a color: #bbb;\x0a margin-bottom: 2px;\x0a}\x0a\x0aQCheckBox:disabled\x0a{\x0a color: #777777;\x0a}\x0aQCheckBox::indicator,\x0aQGroupBox::indicator\x0a{\x0a width: 18px;\x0a height: 18px;\x0a}\x0aQGroupBox::indicator\x0a{\x0a margin-left: 2px;\x0a}\x0a\x0aQCheckBox::indicator:unchecked,\x0aQCheckBox::indicator:unchecked:hover,\x0aQGroupBox::indicator:unchecked,\x0aQGroupBox::indicator:unchecked:hover\x0a{\x0a image: url(:/qss_icons/rc/checkbox_unchecked.png);\x0a}\x0a\x0aQCheckBox::indicator:unchecked:focus,\x0aQCheckBox::indicator:unchecked:pressed,\x0aQGroupBox::indicator:unchecked:focus,\x0aQGroupBox::indicator:unchecked:pressed\x0a{\x0a border: none;\x0a image: url(:/qss_icons/rc/checkbox_unchecked_focus.png);\x0a}\x0a\x0aQCheckBox::indicator:checked,\x0aQCheckBox::indicator:checked:hover,\x0aQGroupBox::indicator:checked,\x0aQGroupBox::indicator:checked:hover\x0a{\x0a image: url(:/qss_icons/rc/checkbox_checked.png);\x0a}\x0a\x0aQCheckBox::indicator:checked:focus,\x0aQCheckBox::indicator:checked:pressed,\x0aQGroupBox::indicator:checked:focus,\x0aQGroupBox::indicator:checked:pressed\x0a{\x0a border: none;\x0a image: url(:/qss_icons/rc/checkbox_checked_focus.png);\x0a}\x0a\x0aQCheckBox::indicator:indeterminate,\x0aQCheckBox::indicator:indeterminate:hover,\x0aQCheckBox::indicator:indeterminate:pressed\x0aQGroupBox::indicator:indeterminate,\x0aQGroupBox::indicator:indeterminate:hover,\x0aQGroupBox::indicator:indeterminate:pressed\x0a{\x0a image: url(:/qss_icons/rc/checkbox_indeterminate.png);\x0a}\x0a\x0aQCheckBox::indicator:indeterminate:focus,\x0aQGroupBox::indicator:indeterminate:focus\x0a{\x0a image: url(:/qss_icons/rc/checkbox_indeterminate_focus.png);\x0a}\x0a\x0aQCheckBox::indicator:checked:disabled,\x0aQGroupBox::indicator:checked:disabled\x0a{\x0a image: url(:/qss_icons/rc/checkbox_checked_disabled.png);\x0a}\x0a\x0aQCheckBox::indicator:unchecked:disabled,\x0aQGroupBox::indicator:unchecked:disabled\x0a{\x0a image: url(:/qss_icons/rc/checkbox_unchecked_disabled.png);\x0a}\x0a\x0aQRadioButton\x0a{\x0a spacing: 5px;\x0a outline: none;\x0a color: #bbb;\x0a margin-bottom: 2px;\x0a}\x0a\x0aQRadioButton:disabled\x0a{\x0a color: #777777;\x0a}\x0aQRadioButton::indicator\x0a{\x0a width: 21px;\x0a height: 21px;\x0a}\x0a\x0aQRadioButton::indicator:unchecked,\x0aQRadioButton::indicator:unchecked:hover\x0a{\x0a image: url(:/qss_icons/rc/radio_unchecked.png);\x0a}\x0a\x0aQRadioButton::indicator:unchecked:focus,\x0aQRadioButton::indicator:unchecked:pressed\x0a{\x0a border: none;\x0a outline: none;\x0a image: url(:/qss_icons/rc/radio_unchecked_focus.png);\x0a}\x0a\x0aQRadioButton::indicator:checked,\x0aQRadioButton::indicator:checked:hover\x0a{\x0a border: none;\x0a outline: none;\x0a image: url(:/qss_icons/rc/radio_checked.png);\x0a}\x0a\x0aQRadioButton::indicator:checked:focus,\x0aQRadioButton::indicato::menu-arrowr:checked:pressed\x0a{\x0a border: none;\x0a outline: none;\x0a image: url(:/qss_icons/rc/radio_checked_focus.png);\x0a}\x0a\x0aQRadioButton::indicator:indeterminate,\x0aQRadioButton::indicator:indeterminate:hover,\x0aQRadioButton::indicator:indeterminate:pressed\x0a{\x0a image: url(:/qss_icons/rc/radio_indeterminate.png);\x0a}\x0a\x0aQRadioButton::indicator:checked:disabled\x0a{\x0a outline: none;\x0a image: url(:/qss_icons/rc/radio_checked_disabled.png);\x0a}\x0a\x0aQRadioButton::indicator:unchecked:disabled\x0a{\x0a image: url(:/qss_icons/rc/radio_unchecked_disabled.png);\x0a}\x0a\x0a\x0aQMenuBar\x0a{\x0a background-color: #302F2F;\x0a color: silver;\x0a}\x0a\x0aQMenuBar::item\x0a{\x0a background: transparent;\x0a}\x0a\x0aQMenuBar::item:selected\x0a{\x0a background: transparent;\x0a border: 1px solid #3A3939;\x0a}\x0a\x0aQMenuBar::item:pressed\x0a{\x0a border: 1px solid #3A3939;\x0a background-color: #5285a6;\x0a color: black;\x0a margin-bottom:-1px;\x0a padding-bottom:1px;\x0a}\x0a\x0aQMenu\x0a{\x0a border: 1px solid #3A3939;\x0a color: silver;\x0a margin: 2px;\x0a}\x0a\x0aQMenu::icon\x0a{\x0a margin: 5px;\x0a}\x0a\x0aQMenu::item\x0a{\x0a padding: 5px 30px 5px 30px;\x0a margin-left: 5px;\x0a border: 1px solid transparent; /* reserve space for selection border */\x0a}\x0a\x0aQMenu::item:selected\x0a{\x0a color: black;\x0a}\x0a\x0aQMenu::separator {\x0a height: 2px;\x0a background: lightblue;\x0a margin-left: 10px;\x0a margin-right: 5px;\x0a}\x0a\x0aQMenu::indicator {\x0a width: 18px;\x0a height: 18px;\x0a}\x0a\x0a/* non-exclusive indicator = check box style indicator\x0a (see QActionGroup::setExclusive) */\x0aQMenu::indicator:non-exclusive:unchecked {\x0a image: url(:/qss_icons/rc/checkbox_unchecked.png);\x0a}\x0a\x0aQMenu::indicator:non-exclusive:unchecked:selected {\x0a image: url(:/qss_icons/rc/checkbox_unchecked_disabled.png);\x0a}\x0a\x0aQMenu::indicator:non-exclusive:checked {\x0a image: url(:/qss_icons/rc/checkbox_checked.png);\x0a}\x0a\x0aQMenu::indicator:non-exclusive:checked:selected {\x0a image: url(:/qss_icons/rc/checkbox_checked_disabled.png);\x0a}\x0a\x0a/* exclusive indicator = radio button style indicator (see QActionGroup::setExclusive) */\x0aQMenu::indicator:exclusive:unchecked {\x0a image: url(:/qss_icons/rc/radio_unchecked.png);\x0a}\x0a\x0aQMenu::indicator:exclusive:unchecked:selected {\x0a image: url(:/qss_icons/rc/radio_unchecked_disabled.png);\x0a}\x0a\x0aQMenu::indicator:exclusive:checked {\x0a image: url(:/qss_icons/rc/radio_checked.png);\x0a}\x0a\x0aQMenu::indicator:exclusive:checked:selected {\x0a image: url(:/qss_icons/rc/radio_checked_disabled.png);\x0a}\x0a\x0aQMenu::right-arrow {\x0a margin: 5px;\x0a image: url(:/qss_icons/rc/right_arrow.png)\x0a}\x0a\x0a\x0aQWidget:disabled\x0a{\x0a color: #404040;\x0a background-color: #302F2F;\x0a}\x0a\x0aQAbstractItemView\x0a{\x0a alternate-background-color: #232222;\x0a color: silver;\x0a border: 1px solid #3A3939;\x0a border-radius: 2px;\x0a padding: 1px;\x0a}\x0a\x0a/*QWidget:focus, QMenuBar:focus\x0a{\x0a border: 1px solid #78879b;\x0a}*/\x0a\x0aQTabWidget:focus, QCheckBox:focus, QRadioButton:focus, QSlider:focus\x0a{\x0a border: none;\x0a}\x0a\x0aQLineEdit\x0a{\x0a background-color: #201F1F;\x0a padding: 2px;\x0a border-style: solid;\x0a border: 1px solid #3A3939;\x0a border-radius: 2px;\x0a color: silver;\x0a}\x0a\x0aQGroupBox {\x0a border:1px solid #3A3939;\x0a border-radius: 2px;\x0a margin-top: 20px;\x0a}\x0a\x0aQGroupBox::title {\x0a subcontrol-origin: margin;\x0a subcontrol-position: top center;\x0a padding-left: 10px;\x0a padding-right: 10px;\x0a padding-top: 10px;\x0a}\x0a\x0aQAbstractScrollArea\x0a{\x0a border-radius: 2px;\x0a border: 1px solid #3A3939;\x0a background-color: transparent;\x0a}\x0a\x0aQScrollBar:horizontal\x0a{\x0a height: 15px;\x0a margin: 3px 15px 3px 15px;\x0a border: 1px transparent #2A2929;\x0a border-radius: 2px;\x0a background-color: #2A2929;\x0a}\x0a\x0aQScrollBar::handle:horizontal\x0a{\x0a background-color: #605F5F;\x0a min-width: 5px;\x0a border-radius: 2px;\x0a}\x0a\x0aQScrollBar::add-line:horizontal\x0a{\x0a margin: 0px 3px 0px 3px;\x0a border-image: url(:/qss_icons/rc/right_arrow_disabled.png);\x0a width: 10px;\x0a height: 10px;\x0a subcontrol-position: right;\x0a subcontrol-origin: margin;\x0a}\x0a\x0aQScrollBar::sub-line:horizontal\x0a{\x0a margin: 0px 3px 0px 3px;\x0a border-image: url(:/qss_icons/rc/left_arrow_disabled.png);\x0a height: 10px;\x0a width: 10px;\x0a subcontrol-position: left;\x0a subcontrol-origin: margin;\x0a}\x0a\x0aQScrollBar::add-line:horizontal:hover,QScrollBar::add-line:horizontal:on\x0a{\x0a border-image: url(:/qss_icons/rc/right_arrow.png);\x0a height: 10px;\x0a width: 10px;\x0a subcontrol-position: right;\x0a subcontrol-origin: margin;\x0a}\x0a\x0a\x0aQScrollBar::sub-line:horizontal:hover, QScrollBar::sub-line:horizontal:on\x0a{\x0a border-image: url(:/qss_icons/rc/left_arrow.png);\x0a height: 10px;\x0a width: 10px;\x0a subcontrol-position: left;\x0a subcontrol-origin: margin;\x0a}\x0a\x0aQScrollBar::up-arrow:horizontal, QScrollBar::down-arrow:horizontal\x0a{\x0a background: none;\x0a}\x0a\x0a\x0aQScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal\x0a{\x0a background: none;\x0a}\x0a\x0aQScrollBar:vertical\x0a{\x0a background-color: #2A2929;\x0a width: 15px;\x0a margin: 15px 3px 15px 3px;\x0a border: 1px transparent #2A2929;\x0a border-radius: 4px;\x0a}\x0a\x0aQScrollBar::handle:vertical\x0a{\x0a background-color: #605F5F;\x0a min-height: 5px;\x0a border-radius: 4px;\x0a}\x0a\x0aQScrollBar::sub-line:vertical\x0a{\x0a margin: 3px 0px 3px 0px;\x0a border-image: url(:/qss_icons/rc/up_arrow_disabled.png);\x0a height: 10px;\x0a width: 10px;\x0a subcontrol-position: top;\x0a subcontrol-origin: margin;\x0a}\x0a\x0aQScrollBar::add-line:vertical\x0a{\x0a margin: 3px 0px 3px 0px;\x0a border-image: url(:/qss_icons/rc/down_arrow_disabled.png);\x0a height: 10px;\x0a width: 10px;\x0a subcontrol-position: bottom;\x0a subcontrol-origin: margin;\x0a}\x0a\x0aQScrollBar::sub-line:vertical:hover,QScrollBar::sub-line:vertical:on\x0a{\x0a border-image: url(:/qss_icons/rc/up_arrow.png);\x0a height: 10px;\x0a width: 10px;\x0a subcontrol-position: top;\x0a subcontrol-origin: margin;\x0a}\x0a\x0a\x0aQScrollBar::add-line:vertical:hover, QScrollBar::add-line:vertical:on\x0a{\x0a border-image: url(:/qss_icons/rc/down_arrow.png);\x0a height: 10px;\x0a width: 10px;\x0a subcontrol-position: bottom;\x0a subcontrol-origin: margin;\x0a}\x0a\x0aQScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical\x0a{\x0a background: none;\x0a}\x0a\x0a\x0aQScrollBar::add-page:vertical, QScrollBar::sub-page:vertical\x0a{\x0a background: none;\x0a}\x0a\x0aQTextEdit\x0a{\x0a background-color: #201F1F;\x0a color: silver;\x0a border: 1px solid #3A3939;\x0a}\x0a\x0aQPlainTextEdit\x0a{\x0a background-color: #201F1F;\x0a selection-color: white;\x0a selection-background-color: #565648;\x0a color: white;\x0a border-radius: 2px;\x0a border: 1px solid #3A3939;\x0a}\x0a\x0aQHeaderView::section\x0a{\x0a background-color: #3A3939;\x0a color: silver;\x0a padding-left: 4px;\x0a border: 1px solid #6c6c6c;\x0a}\x0a\x0aQSizeGrip {\x0a image: url(:/qss_icons/rc/sizegrip.png);\x0a width: 12px;\x0a height: 12px;\x0a}\x0a\x0a\x0aQMainWindow::separator\x0a{\x0a background-color: #302F2F;\x0a color: white;\x0a padding-left: 4px;\x0a spacing: 2px;\x0a border: 1px dashed #3A3939;\x0a}\x0a\x0aQMainWindow::separator:hover\x0a{\x0a\x0a background-color: #787876;\x0a color: white;\x0a padding-left: 4px;\x0a border: 1px solid #3A3939;\x0a spacing: 2px;\x0a}\x0a\x0a\x0aQMenu::separator\x0a{\x0a height: 1px;\x0a background-color: #3A3939;\x0a color: white;\x0a padding-left: 4px;\x0a margin-left: 10px;\x0a margin-right: 5px;\x0a}\x0a\x0a\x0aQFrame\x0a{\x0a border-radius: 2px;\x0a border: 1px solid #444;\x0a}\x0a\x0aQFrame[frameShape=\x220\x22]\x0a{\x0a border-radius: 2px;\x0a border: 1px transparent #444;\x0a}\x0a\x0aQStackedWidget\x0a{\x0a border: 1px transparent black;\x0a}\x0a\x0aQToolBar {\x0a border: 1px transparent #393838;\x0a background: 1px solid #302F2F;\x0a font-weight: bold;\x0a}\x0a\x0aQToolBar::handle:horizontal {\x0a image: url(:/qss_icons/rc/Hmovetoolbar.png);\x0a}\x0aQToolBar::handle:vertical {\x0a image: url(:/qss_icons/rc/Vmovetoolbar.png);\x0a}\x0aQToolBar::separator:horizontal {\x0a image: url(:/qss_icons/rc/Hsepartoolbar.png);\x0a}\x0aQToolBar::separator:vertical {\x0a image: url(:/qss_icons/rc/Vsepartoolbars.png);\x0a}\x0a\x0aQPushButton\x0a{\x0a color: silver;\x0a background-color: #424141;\x0a border-width: 1px;\x0a border-color: #4A4949;\x0a border-style: solid;\x0a padding-top: 5px;\x0a padding-bottom: 5px;\x0a padding-left: 5px;\x0a padding-right: 5px;\x0a border-radius: 2px;\x0a outline: none;\x0a\x0a /* fixes some glitches + a bit of space between buttons */\x0a margin: 1px;\x0a}\x0a\x0aQPushButton:disabled\x0a{\x0a background-color: #302f2f;\x0a border-width: 1px;\x0a border-color: #3A3939;\x0a border-style: solid;\x0a padding-top: 5px;\x0a padding-bottom: 5px;\x0a padding-left: 10px;\x0a padding-right: 10px;\x0a /*border-radius: 2px;*/\x0a color: #454545;\x0a}\x0a\x0aQPushButton:focus {\x0a background-color: #5285a6;\x0a color: white;\x0a}\x0a\x0aQComboBox\x0a{\x0a selection-background-color: #5285a6;\x0a background-color: #201F1F;\x0a border-style: solid;\x0a border: 1px solid #3A3939;\x0a border-radius: 2px;\x0a padding: 2px;\x0a min-width: 75px;\x0a}\x0a\x0aQPushButton:checked{\x0a background-color: #4A4949;\x0a border-color: #6A6969;\x0a}\x0a\x0aQComboBox:hover,QPushButton:hover,QAbstractSpinBox:hover,QLineEdit:hover,QTextEdit:hover,QPlainTextEdit:hover,QAbstractView:hover,QTreeView:hover\x0a{\x0a border: 1px solid #78879b;\x0a color: silver;\x0a}\x0a\x0aQComboBox:on\x0a{\x0a background-color: #626873;\x0a padding-top: 3px;\x0a padding-left: 4px;\x0a selection-background-color: #4a4a4a;\x0a}\x0a\x0aQComboBox QAbstractItemView\x0a{\x0a background-color: #201F1F;\x0a border-radius: 2px;\x0a border: 1px solid #444;\x0a selection-background-color: #5285a6;\x0a}\x0a\x0aQComboBox::drop-down\x0a{\x0a subcontrol-origin: padding;\x0a subcontrol-position: top right;\x0a width: 15px;\x0a\x0a border-left-width: 0px;\x0a border-left-color: darkgray;\x0a border-left-style: solid;\x0a border-top-right-radius: 3px;\x0a border-bottom-right-radius: 3px;\x0a}\x0a\x0aQComboBox::down-arrow\x0a{\x0a image: url(:/qss_icons/rc/down_arrow_disabled.png);\x0a}\x0a\x0aQComboBox::down-arrow:on, QComboBox::down-arrow:hover,\x0aQComboBox::down-arrow:focus\x0a{\x0a image: url(:/qss_icons/rc/down_arrow.png);\x0a}\x0a\x0aQPushButton:pressed\x0a{\x0a background-color: #484846;\x0a}\x0a\x0aQAbstractSpinBox {\x0a padding-top: 2px;\x0a padding-bottom: 2px;\x0a border: 1px solid #3A3939;\x0a background-color: #201F1F;\x0a color: silver;\x0a border-radius: 2px;\x0a min-width: 75px;\x0a}\x0a\x0aQAbstractSpinBox:up-button\x0a{\x0a background-color: transparent;\x0a/* subcontrol-origin: border;\x0a subcontrol-position: center left;*/\x0a}\x0a\x0aQAbstractSpinBox:down-button\x0a{\x0a background-color: transparent;\x0a/* subcontrol-origin: border;\x0a subcontrol-position: center left;*/\x0a}\x0a\x0aQAbstractSpinBox::up-arrow,QAbstractSpinBox::up-arrow:disabled,QAbstractSpinBox::up-arrow:off {\x0a image: url(:/qss_icons/rc/up_arrow_disabled.png);\x0a width: 10px;\x0a height: 10px;\x0a}\x0aQAbstractSpinBox::up-arrow:hover\x0a{\x0a image: url(:/qss_icons/rc/up_arrow.png);\x0a}\x0a\x0a\x0aQAbstractSpinBox::down-arrow,QAbstractSpinBox::down-arrow:disabled,QAbstractSpinBox::down-arrow:off\x0a{\x0a image: url(:/qss_icons/rc/down_arrow_disabled.png);\x0a width: 10px;\x0a height: 10px;\x0a}\x0aQAbstractSpinBox::down-arrow:hover\x0a{\x0a image: url(:/qss_icons/rc/down_arrow.png);\x0a}\x0a\x0a\x0aQLabel\x0a{\x0a border: 0px solid black;\x0a color: silver;\x0a}\x0a\x0aQTabWidget{\x0a border: 1px transparent black;\x0a}\x0a\x0aQTabWidget::pane {\x0a border: 1px solid #444;\x0a border-radius: 2px;\x0a padding: 2px;\x0a}\x0a\x0aQTabBar\x0a{\x0a qproperty-drawBase: 0;\x0a left: 5px; /* move to the right by 5px */\x0a}\x0a\x0aQTabBar:focus\x0a{\x0a border: 0px transparent black;\x0a}\x0a\x0aQTabBar::close-button {\x0a image: url(:/qss_icons/rc/close.png);\x0a background: transparent;\x0a}\x0a\x0aQTabBar::close-button:hover\x0a{\x0a image: url(:/qss_icons/rc/close-hover.png);\x0a background: transparent;\x0a}\x0a\x0aQTabBar::close-button:pressed {\x0a image: url(:/qss_icons/rc/close-pressed.png);\x0a background: transparent;\x0a}\x0a\x0a/* TOP TABS */\x0aQTabBar::tab:top {\x0a color: #b1b1b1;\x0a border: 1px solid #4A4949;\x0a border-bottom: 1px transparent black;\x0a background-color: #424141;\x0a padding: 5px;\x0a border-top-left-radius: 2px;\x0a border-top-right-radius: 2px;\x0a}\x0a\x0aQTabBar::tab:top:!selected\x0a{\x0a color: #b1b1b1;\x0a background-color: #201F1F;\x0a border: 1px transparent #4A4949;\x0a border-bottom: 1px transparent #4A4949;\x0a border-top-left-radius: 0px;\x0a border-top-right-radius: 0px;\x0a}\x0a\x0aQTabBar::tab:top:!selected:hover {\x0a background-color: #48576b;\x0a}\x0a\x0a/* BOTTOM TABS */\x0aQTabBar::tab:bottom {\x0a color: #b1b1b1;\x0a border: 1px solid #4A4949;\x0a border-top: 1px transparent black;\x0a background-color: #424141;\x0a padding: 5px;\x0a border-bottom-left-radius: 2px;\x0a border-bottom-right-radius: 2px;\x0a}\x0a\x0aQTabBar::tab:bottom:!selected\x0a{\x0a color: #b1b1b1;\x0a background-color: #201F1F;\x0a border: 1px transparent #4A4949;\x0a border-top: 1px transparent #4A4949;\x0a border-bottom-left-radius: 0px;\x0a border-bottom-right-radius: 0px;\x0a}\x0a\x0aQTabBar::tab:bottom:!selected:hover {\x0a background-color: #78879b;\x0a}\x0a\x0a/* LEFT TABS */\x0aQTabBar::tab:left {\x0a color: #b1b1b1;\x0a border: 1px solid #4A4949;\x0a border-left: 1px transparent black;\x0a background-color: #424141;\x0a padding: 5px;\x0a border-top-right-radius: 2px;\x0a border-bottom-right-radius: 2px;\x0a}\x0a\x0aQTabBar::tab:left:!selected\x0a{\x0a color: #b1b1b1;\x0a background-color: #201F1F;\x0a border: 1px transparent #4A4949;\x0a border-right: 1px transparent #4A4949;\x0a border-top-right-radius: 0px;\x0a border-bottom-right-radius: 0px;\x0a}\x0a\x0aQTabBar::tab:left:!selected:hover {\x0a background-color: #48576b;\x0a}\x0a\x0a\x0a/* RIGHT TABS */\x0aQTabBar::tab:right {\x0a color: #b1b1b1;\x0a border: 1px solid #4A4949;\x0a border-right: 1px transparent black;\x0a background-color: #424141;\x0a padding: 5px;\x0a border-top-left-radius: 2px;\x0a border-bottom-left-radius: 2px;\x0a}\x0a\x0aQTabBar::tab:right:!selected\x0a{\x0a color: #b1b1b1;\x0a background-color: #201F1F;\x0a border: 1px transparent #4A4949;\x0a border-right: 1px transparent #4A4949;\x0a border-top-left-radius: 0px;\x0a border-bottom-left-radius: 0px;\x0a}\x0a\x0aQTabBar::tab:right:!selected:hover {\x0a background-color: #48576b;\x0a}\x0a\x0aQTabBar QToolButton::right-arrow:enabled {\x0a image: url(:/qss_icons/rc/right_arrow.png);\x0a }\x0a\x0a QTabBar QToolButton::left-arrow:enabled {\x0a image: url(:/qss_icons/rc/left_arrow.png);\x0a }\x0a\x0aQTabBar QToolButton::right-arrow:disabled {\x0a image: url(:/qss_icons/rc/right_arrow_disabled.png);\x0a }\x0a\x0a QTabBar QToolButton::left-arrow:disabled {\x0a image: url(:/qss_icons/rc/left_arrow_disabled.png);\x0a }\x0a\x0a\x0aQDockWidget {\x0a border: 1px solid #403F3F;\x0a titlebar-close-icon: url(:/qss_icons/rc/close.png);\x0a titlebar-normal-icon: url(:/qss_icons/rc/undock.png);\x0a}\x0a\x0aQDockWidget::title\x0a{\x0a background-color: #353434;\x0a text-align: center;\x0a height: 10px;\x0a}\x0a\x0aQDockWidget::close-button, QDockWidget::float-button {\x0a border: 1px solid transparent;\x0a border-radius: 2px;\x0a background: transparent;\x0a}\x0a\x0aQDockWidget::close-button:hover, QDockWidget::float-button:hover {\x0a background: rgba(255, 255, 255, 10);\x0a}\x0a\x0aQDockWidget::close-button:pressed, QDockWidget::float-button:pressed {\x0a padding: 1px -1px -1px 1px;\x0a background: rgba(255, 255, 255, 10);\x0a}\x0a\x0aQTreeView, QListView\x0a{\x0a border: 1px solid #444;\x0a background-color: #201F1F;\x0a}\x0a\x0aQTreeView:branch:selected, QTreeView:branch:hover\x0a{\x0a background: url(:/qss_icons/rc/transparent.png);\x0a}\x0a\x0aQTreeView::branch:has-siblings:!adjoins-item {\x0a border-image: url(:/qss_icons/rc/transparent.png);\x0a}\x0a\x0aQTreeView::branch:has-siblings:adjoins-item {\x0a border-image: url(:/qss_icons/rc/transparent.png);\x0a}\x0a\x0aQTreeView::branch:!has-children:!has-siblings:adjoins-item {\x0a border-image: url(:/qss_icons/rc/transparent.png);\x0a}\x0a\x0aQTreeView::branch:has-children:!has-siblings:closed,\x0aQTreeView::branch:closed:has-children:has-siblings {\x0a image: url(:/qss_icons/rc/branch_closed.png);\x0a}\x0a\x0aQTreeView::branch:open:has-children:!has-siblings,\x0aQTreeView::branch:open:has-children:has-siblings {\x0a image: url(:/qss_icons/rc/branch_open.png);\x0a}\x0a\x0aQTreeView::branch:has-children:!has-siblings:closed:hover,\x0aQTreeView::branch:closed:has-children:has-siblings:hover {\x0a image: url(:/qss_icons/rc/branch_closed-on.png);\x0a }\x0a\x0aQTreeView::branch:open:has-children:!has-siblings:hover,\x0aQTreeView::branch:open:has-children:has-siblings:hover {\x0a image: url(:/qss_icons/rc/branch_open-on.png);\x0a }\x0a\x0aQListView::item:!selected:hover, QListView::item:!selected:hover, QTreeView::item:!selected:hover {\x0a background: rgba(0, 0, 0, 0);\x0a outline: 0;\x0a color: #FFFFFF\x0a}\x0a\x0aQListView::item:selected:hover, QListView::item:selected:hover, QTreeView::item:selected:hover {\x0a background: #5285a6;\x0a color: #FFFFFF;\x0a}\x0a\x0aQSlider::groove:horizontal {\x0a border: 1px solid #3A3939;\x0a height: 8px;\x0a background: #201F1F;\x0a margin: 2px 0;\x0a border-radius: 2px;\x0a}\x0a\x0aQSlider::handle:horizontal {\x0a background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1,\x0a stop: 0.0 silver, stop: 0.2 #a8a8a8, stop: 1 #727272);\x0a border: 1px solid #3A3939;\x0a width: 14px;\x0a height: 14px;\x0a margin: -4px 0;\x0a border-radius: 2px;\x0a}\x0a\x0aQSlider::groove:vertical {\x0a border: 1px solid #3A3939;\x0a width: 8px;\x0a background: #201F1F;\x0a margin: 0 0px;\x0a border-radius: 2px;\x0a}\x0a\x0aQSlider::handle:vertical {\x0a background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0.0 silver,\x0a stop: 0.2 #a8a8a8, stop: 1 #727272);\x0a border: 1px solid #3A3939;\x0a width: 14px;\x0a height: 14px;\x0a margin: 0 -4px;\x0a border-radius: 2px;\x0a}\x0a\x0aQToolButton {\x0a background-color: transparent;\x0a border: 1px transparent #4A4949;\x0a border-radius: 2px;\x0a margin: 3px;\x0a padding: 3px;\x0a}\x0a\x0aQToolButton[popupMode=\x221\x22] { /* only for MenuButtonPopup */\x0a padding-right: 20px; /* make way for the popup button */\x0a border: 1px transparent #4A4949;\x0a border-radius: 5px;\x0a}\x0a\x0aQToolButton[popupMode=\x222\x22] { /* only for InstantPopup */\x0a padding-right: 10px; /* make way for the popup button */\x0a border: 1px transparent #4A4949;\x0a}\x0a\x0a\x0aQToolButton:hover, QToolButton::menu-button:hover {\x0a background-color: transparent;\x0a border: 1px solid #78879b;\x0a}\x0a\x0aQToolButton:checked, QToolButton:pressed,\x0a QToolButton::menu-button:pressed {\x0a background-color: #4A4949;\x0a border: 1px solid #78879b;\x0a}\x0a\x0a/* the subcontrol below is used only in the InstantPopup or DelayedPopup mode */\x0aQToolButton::menu-indicator {\x0a image: url(:/qss_icons/rc/down_arrow.png);\x0a top: -7px; left: -2px; /* shift it a bit */\x0a}\x0a\x0a/* the subcontrols below are used only in the MenuButtonPopup mode */\x0aQToolButton::menu-button {\x0a border: 1px transparent #4A4949;\x0a border-top-right-radius: 6px;\x0a border-bottom-right-radius: 6px;\x0a /* 16px width + 4px for border = 20px allocated above */\x0a width: 16px;\x0a outline: none;\x0a}\x0a\x0aQToolButton::menu-arrow {\x0a image: url(:/qss_icons/rc/down_arrow.png);\x0a}\x0a\x0aQToolButton::menu-arrow:open {\x0a top: 1px; left: 1px; /* shift it a bit */\x0a border: 1px solid #3A3939;\x0a}\x0a\x0aQPushButton::menu-indicator {\x0a subcontrol-origin: padding;\x0a subcontrol-position: bottom right;\x0a left: 8px;\x0a}\x0a\x0aQTableView\x0a{\x0a border: 1px solid #444;\x0a gridline-color: #6c6c6c;\x0a background-color: #201F1F;\x0a}\x0a\x0a\x0aQTableView, QHeaderView\x0a{\x0a border-radius: 0px;\x0a}\x0a\x0aQTableView::item:pressed, QListView::item:pressed, QTreeView::item:pressed {\x0a background: #78879b;\x0a color: #FFFFFF;\x0a}\x0a\x0aQTableView::item:selected:active, QTreeView::item:selected:active, QListView::item:selected:active {\x0a background: #5285a6;\x0a color: #FFFFFF;\x0a}\x0a\x0a\x0aQHeaderView\x0a{\x0a border: 1px transparent;\x0a border-radius: 2px;\x0a margin: 0px;\x0a padding: 0px;\x0a}\x0a\x0aQHeaderView::section {\x0a background-color: #3A3939;\x0a color: silver;\x0a padding: 4px;\x0a border: 1px solid #6c6c6c;\x0a border-radius: 0px;\x0a text-align: center;\x0a}\x0a\x0aQHeaderView::section::vertical::first, QHeaderView::section::vertical::only-one\x0a{\x0a border-top: 1px solid #6c6c6c;\x0a}\x0a\x0aQHeaderView::section::vertical\x0a{\x0a border-top: transparent;\x0a}\x0a\x0aQHeaderView::section::horizontal::first, QHeaderView::section::horizontal::only-one\x0a{\x0a border-left: 1px solid #6c6c6c;\x0a}\x0a\x0aQHeaderView::section::horizontal\x0a{\x0a border-left: transparent;\x0a}\x0a\x0a\x0aQHeaderView::section:checked\x0a {\x0a color: white;\x0a background-color: #5A5959;\x0a }\x0a\x0a /* style the sort indicator */\x0aQHeaderView::down-arrow {\x0a image: url(:/qss_icons/rc/down_arrow.png);\x0a}\x0a\x0aQHeaderView::up-arrow {\x0a image: url(:/qss_icons/rc/up_arrow.png);\x0a}\x0a\x0a\x0aQTableCornerButton::section {\x0a background-color: #3A3939;\x0a border: 1px solid #3A3939;\x0a border-radius: 2px;\x0a}\x0a\x0aQToolBox {\x0a padding: 3px;\x0a border: 1px transparent black;\x0a}\x0a\x0aQToolBox::tab {\x0a color: #b1b1b1;\x0a background-color: #424141;\x0a border: 1px solid #4A4949;\x0a border-bottom: 1px transparent #424141;\x0a border-top-left-radius: 5px;\x0a border-top-right-radius: 5px;\x0a}\x0a\x0a QToolBox::tab:selected { /* italicize selected tabs */\x0a font: italic;\x0a background-color: #424141;\x0a border-color: #5285a6;\x0a }\x0a\x0aQStatusBar::item {\x0a border: 1px solid #3A3939;\x0a border-radius: 2px;\x0a }\x0a\x0a\x0aQFrame[height=\x223\x22], QFrame[width=\x223\x22] {\x0a background-color: #444;\x0a}\x0a\x0a\x0aQSplitter::handle {\x0a border: 1px dashed #3A3939;\x0a}\x0a\x0aQSplitter::handle:hover {\x0a background-color: #787876;\x0a border: 1px solid #3A3939;\x0a}\x0a\x0aQSplitter::handle:horizontal {\x0a width: 1px;\x0a}\x0a\x0aQSplitter::handle:vertical {\x0a height: 1px;\x0a}\x0a\x00\x00\x03\xac\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x03)IDATX\x85\xed\x95Oh\x5cU\x14\xc6\x7f\xe7e\x88d\xda\xc6\xbd\xa9\x94HW\xb6\x91:(\xae\xd3M\xc5\x0aM@fc\xda7/%\xcdF\x07\xd1$\x8e\xae\xb2P\xa8I\xddd\x99\xc2\xbc\x19\xd3n\x9e S\xc1\xe2\x9f\x85u\x1b\xfc\xd3\xa4\x15\x91RJpJ\xd7%3$\xcd\xe0\xfb\x5c\xbc7M\x90\xbc7\x1d\xe9\xce\xf9V\xf7\xcfw\xce\xfd\xee9\xe7\x9e\x0b=\xf4\xf0\x7f\x87uC\x0e\x82\xa0\x7f\xab\xd1\x18\x97\xd9\x98A\x0e\x18\x8a\xb7\xea\x98\xfd*\xa8e\xb3\xd9Z>\x9f\xdfy\xea\x02\xaa\xe5\xf2[\x98-\x00\xc3\x06\xb7\x047dV\x07p\xc2p\x08\xb3Q\xc1\x08p\xd7`\xee\x9c\xe7}\xf5T\x04\x04A\xd0\xb7\xd5l.\x00\xef\x1b|kaX:{\xfe\xfc\xda~\x5c\xdf\xf7O8p\x118\x05,\xde\xdb\xd8(\xcd\xcf\xcf\x87i\xfe3\x9d\x04\xc4\x87\xbf'i\xd6\x9d\x9c\xbc\x94\xc6\xf5<\xef&\xf0z\xd5\xf7g\x81\x8b\xc3G\x8e\x00\xcc\xa5\xd9\xa4F \x0e\xfb\x97f6s\xaeP\xf8\x1c`ii\xe9\x99\xc1\xc1\xc1i\x93\xde&\x0a9&\xad\xcb\xec\xea\xc3\xcd\xcd\xe5b\xb1\xf8\x08\xa0R\xa9\xcc\x99\xf4\x99\x03\xe3g=\xaf\xd6\xb5\x80 \x08\xfa\xb7\x9b\xcd?$\xfd\xe9NN\xbe\x01p\xe5\xf2\xe5\xc3a&s=\xceu\x0881=\x1a\x9b\xad\xf7\xb5Z\xa7'\xa6\xa6\xea\x00\x15\xdf\xff\xde\xcc\x86\x07\xb2\xd9cI\x85\xe9\xec\xb7\x08\xb0\xd5h\x8c\x0b^p\xa4\x8f\xda7\x0f3\x99\xeb2;\xbe\x8fm{<\xf2w&\xf3M\x10\x04\xfd\x00\xe68\x1f\x22\x1d\xddn6\xcf$\x9d\x93(@fc\xc0Z\xbb\xe0\x9e=t\xe8\x82`\x04)9m\xd1\xdeK[\x8d\xc6\x05\x00\xd7u\x7f\xc3\xec6\xd0\xbd\x00\x83\x9cI?\xedY\x9a \x0au:\xa4\xd0\x22n{\xfe\xa3\xe0\x95\xae\x05`\xf6\x5c\xfb\x9d\xc78\x96\xca\xdf\xb5s\x14q\xdb\xb8\x8f\xd9P\x12=\xd5\xa1\xcc\xba\xea\x94\xfb\xea\x01CJ\x8c\x5c\xb2\x00\xe9\x81I\x87\xf7\xac\xfc\xce\x13\xa6@p\xfb\xf14\xba\xfd\x83\xee\x05\x98\xfd\x8c\xd9\xe8\x9e\x95+\xa9\xfc];\xc7\xe0\xea\xae\x1e\x9d\x04V\xbb\x16 \xa8!\x1d\xf7}\xff\x04\xc0\xc3\xcd\xcde\xcc\xd61S\xca\xe1\x02n\x0e\x1c<\xb8\x0c\xb0R.\xe7\x0c^D\xfa\xbak\x01\xd9l\xb6\x06\xdc\x8d{;\xc5b\xf1Q_\xabu\x1a\xb8\x15Sv\xd3\xd1\xce\xb1\xb4\x86\xe3\xbc\x99\xcf\xe7w$Y\x18}^w\xb6[\xadk]\x0b\xc8\xe7\xf3;8\xce,p*\xee\xedLLM\xd5\x07\xb2\xd9W\x91\xde\x95\xb4\x0a4\x81\xa6`\xd5\xcc\xde\x198p\xe05\xd7u\xef\x03T}\xbf\x04\x9c\x94\xd9\xcc\xf4\xf4t+\xe9\x9c\x8eU^\xf5\xfd\x05\xe0\x03\xa0\xe4z\xdeb'\xbe$\xab\xfa~\xc9\xcc>\x01\x16]\xcf+\xa5\xf1;\x16\xd5\xbd\x8d\x8d\x92\xa4K\xc0B\xd5\xf7\xbf\xabV\xab/'qW\xca\xe5\xdc\x17\x95\xca\x0ff\xf6)\xd1w\xfcq'\xffO\xfc\xceW|\x7f,4[D:\x1a\xb7\xd7\x1b\x82\xbfb'\xcf#\x8d\x125\xa0;2\x9b)\x14\x0a\x89\x85\xf7\x9f\x04\xc0\xe3\x1f\xf2\x8c`\x0c\xc8a\x16\xf5\x09\xa9n\xf0\x8b\xa4\xdav\xabu--\xe7=\xf4\xd0\xc3\xbf\xf1\x0fx\xe5N\xf2\x11\xe4iB\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x02J\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00@\x00\x00\x00@\x08\x06\x00\x00\x00\xaaiq\xde\x00\x00\x00\x06bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdf\x04\x19\x10\x14\x1a8\xc77\xd0\x00\x00\x00\x1diTXtComment\x00\x00\x00\x00\x00Created with GIMPd.e\x07\x00\x00\x01\xaeIDATx\xda\xed\x9bI\x92\xc3 \x0cE#]\xdc\xf6\xc9\xd3\xbb\xaeT\x06&\xe9\x7f\x09\x8c\xd6]2\xef!h \xf0x\xec\xd8\xb1\xe3\xce!\xcc\x8f\x9d\xe7\xf9l\xfc;YB@+p\xa4\x10\xc9\x0a\xcd\x92!\xb3\x80\xa3D\xc8\x8c\xf0\x9e\x12dFpO\x112;\xbcU\x82\xcc\x0en\x15!+\xc1\x8fH\x90\xd5\xe0{%\xe8^\x0a/\xd8\xfb=U V\xf8\xe38\xfes\x5c\xd7E\x11\xf5\xfa\xcd\xdawk\x12\xd4\xbba\xef\x8dC\xc3[C\x11\xa5\x8f\x920\x92\xb7\xc6\xa0\xa8q\xef-\xc1\x92\xaf\xc4b\x1e\x02\xa5\xf1\xe7%\xa1\x94\xc7:\xef\x88W\xef\xa3\x1a\xe9\x99\xf7\xdb\x84\xe86\x09\x22*\x01\xd9\xf3\x90\xff\x02\x9e\x12\x18\xf0_\x87\x80\xc7\xa2\xc7\xdax$\xfc\xfb0\x80,\x85-\x95\xc0\xeay\xf8^`D\x02\x1b\x1e\xbe\x19\xea\x91\x10\x01\xff1\x07\xa06=586\xfc\xeb<@\xd9\x0e\x8f\xce\x09\x8c\xcd\x15\xed<\xa0\x17\x86\xb5\xb3\xa4\x1e\x88\xb4B\xb1\xe0\xe9\x02Z\xe0\x98\xf0!\x02,\xeb\x80\xe9\x05\xb4\xc21%h6x\xb6\x04\x8d\x86g\x9c'\x84\x0ah\x81\x8f\x94\x00\xd9\x0d\x8e\xf6<cQD\xd9\x0d\x8e\xc2DT\x82f\x1a\xf3\x11\x124\x13|\x84\x04\xb7CQ\xc4\x18\xf6\xce\x07=\x14EL`\x8cJ\xd0\xac\xf0,\x09R(\x97g4\xbc\xe7w~\xfdH\x1ar&\x98!_U\x80\xe5\xe6\x15\xaa\xb1\xa3yK,\x9a\xbd\xe7\xd1\xf9\xcd\x17$\xb2G\xad\x92\xf7\x15\x99\x8ed\xfb\x96\xd8\x8a\xb1/J\x0e$\xbf\xefU\xd9\xcc\x22h\x97\xa53J\x08\xb9.\x9fE\x82\xf5\xd1\xc4~2\x03h\xd8=\x1fM!eL\xf5l\xceC\x08\xf3\xe1\xe4\x8e\xbb\xc7\x1f\xfe\x88Z\xe2\xcd\xef\x1cI\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\xac\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x07\x00\x00\x00?\x08\x06\x00\x00\x00,{\xd2\x13\x00\x00\x00\x06bKGD\x00\xb3\x00y\x00y\xdc\xddS\xfc\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdf\x04\x19\x10.\x14\xfa\xd6\xc4\xae\x00\x00\x009IDAT8\xcbc` \x06\xc4\xc7\xc73\xc4\xc7\xc7\xa3\x881aS\x84S\x12\xaf\xce\x91(\xc9\x82\xc4\xfe\x8f\xc4f\x1c\x0d\xa1Q\xc9Q\xc9QI|\x05\x06\xe3h\x08\x91*I>\x00\x00\x88K\x04\xd39.\x90?\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\xb6\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x18\x00\x00\x00\x11\x08\x06\x00\x00\x00\xc7xl0\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x06bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x0b,\x0d\x1fC\xaa\xe1\x00\x00\x006IDAT8\xcbc` \x01,Z\xb4\xe8\xff\xa2E\x8b\xfe\x93\xa2\x87\x89\x81\xc6`\xd4\x82\x11`\x01#\xa9\xc9t\xd0\xf9\x80\x85\x1cMqqq\x8c\xa3\xa9h\xd4\x82ad\x01\x001\xb5\x09\xec\x1fK\xb4\x15\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x02B\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00@\x00\x00\x00@\x08\x06\x00\x00\x00\xaaiq\xde\x00\x00\x00\x06bKGD\x00\xb3\x00y\x00y\xdc\xddS\xfc\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdf\x04\x19\x10\x17;_\x83tM\x00\x00\x00\x1diTXtComment\x00\x00\x00\x00\x00Created with GIMPd.e\x07\x00\x00\x01\xa6IDATx\xda\xed\x9b\xdb\x0e\xc3 \x0cC\x9bh\xff\xdd\xf6\xcb\xb7\xb7i\x9avIK\xec\x98B^7Q|p(\x85\xb0,3f\xcc\x189\x8c\xf9\xb0m\xdb\xee\xc1\xff\xd9%\x00D\x05W\x021U\xd1,\x18\xd6\x8bp\x14\x08\xebQ|&\x04\xebQx&\x08\xeb]|+\x04\xeb]x+\x08\xbb\x92\xf83\x10\xecj\xe2\x8fB\xb8Uvr]\xd7g'\xf7}/\x01lU\xa3\xff*\x1e\x05!\xe2\x02S\x11_\x05\xc1+m\x7f\xe6wj\x0ad\x8f\xfe\x11q\x99N\xf8\xe5\x02S\x14\xcf\x84\xe0\xd5\xb6\xff%\x92\x91\x0e\x86\x1e\xfd\xa8x\xc6\xc4\xf8\xc9\x05\xae2\xf2UNp%\xdbW@0\x84\xfd[\xed\x8cL\x87\xf74p\x85\x91\xaft\x82\xab\x89gCpE\xf1L\x08\x96\x91\xff\xe8WXv\xfb\xaf\xf3\x80+\x8e<\xd3\x09\xae.\x1e\x0d\xc1{\x10\x8f\x84\xe0\xccN*\xb6O]\x07(\xb6\xefj9\xc9N;W\xcbI\xf6\x9c\xe3\xc8\x9c\xcc\x82\x80\x9cpS\xe6\x00$\x04\xf4\xdb&\xf5k0\xbb\xb3\x08\xf1\xd0\xaf\xc1L'\xb0\xd6\x19\xd4u@\x14\x02s\x91\x05\xd9\x11j\x81\xc0^aB7E\x8f\x8aA\x8b\xa7o\x8a\x1eqB\xc5\xb7\x05\x1c@\x14B\x95\xf8\xaf)\x90\x99\x06-\xeb\x81\xcb\x9c\x0c\x9d\x11\xc3\xaa\x17\xa0\x1e\x8eF\x9d\xc0<\x22\xa7\x1f\x8f\xff\x13\xc7\xae\x14))\x90\xf8\xe6\x04\x84\xf8\x7f\x05\x12e%2\xef\x10*\xc4\x87\x01 !\xa0\x22Z%\xe6\xcb\xe01\x0b%O4>n\xa9\xac2\x08Z\xb1\xb4\x22\x84\x92ry\x15\x08\xad\x97&\xe6\x95\x19@\xc7\xc6\xbc4\x85\x84\xd1\xd5\xb5\xb9\x0c \xcc\x8b\x933F\x8f\x07S!r\xe7\x176+c\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x02\xd8\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x02UIDATX\x85\xed\x95MOSQ\x10\x86\x9f\xb9\x1a\x12\xefO\x10\x0d\xc1\xb0\x12M\xb0\xf1\x0f\xc0\x06\xe3\x06HLw\xd0\x0f\x16l\x8d\x01,\xaeXh\x82\x05\xff\xc2=\xad\xec\xae\x89\x16W~,\xc4\xad\xf1\x8bhb\x0c!\xa4\xb1\x86?\xd0\x86\x86&}]\xb4!\xc6p[.\xb0\xb3\xefv\xe6\xcc<g\xce\x99\x19\xe8\xa9\xa7\xff]\x16\xc79\x0c\xc3\xbe\xfdjuJf\x93\x06\x09\xa0\xbfm\xaa`\xf6YP\xf2}\xbf\x94L&\x0f\xce\x1c\xa0\x18\x04w0\xcb\x03\x83\x06\xdf\x04\x9b2\xab\x00x\xcdf?f\xa3\x82\xeb\xc0\x8e\xc1\xe2L&\xf3\xfcL\x00\xc20<\xb7_\xab\xe5\x81{\x06\xaf\xac\xd9\xccM\xcf\xcen\x1d\xe5\xeb\x9c\x1b\xf1`\x05\x18\x07Vw\xcb\xe5\xdc\xf2\xf2r\xb3S\xfc\xf3\xdd\x00\xda\xc9\xefJZHe\xb3k\x9d|3\x99\xccW\xe0V\xd1\xb9\x05`ep`\x00`\xb1\xd3\x99\x8e\x15h\x97\xfd\x99\x99\xcd\xcf\xa4\xd3O\xba\xc1\xfe\xadB\xa1\xb0h\xd2c\x0f\xa6\xa63\x99Rl\x800\x0c\xfb\xea\xb5\xda\x0fI?S\xd9\xec\xed8\xc9\x0f!\x9c{cf\x83\x17|\x7f8\xeaczQ\x87\xf7\xab\xd5)\xc1\x15OZ:Ir\x00\xf3\xbc\xfbHC\xf5Zm\x22\xca'\x12@f\x93\xc0V\xd4\x87;\x8eR\xa9\xd4\x17\xcc\xbe\x03\xf1\x01\x0c\x12&\xbd?i\xf2CI\xef\x047\xa3\xcc\xd1]`vQP95\x00\xfc\xc6\xac?\xca\x18Y\x01\x00\x99\xc5\x9a\x94G\xc9\xc0\x90\x22gA4\x80\xb4g\xd2\xa5\xd3\x02\xa8u\xfb\xbd({\xa7'\xf8\x08\x8c\x9e\x1a@\x1a3\xf8\x10e\x8f\xee\x02(!]s\xce\x8d\x9c4\xf9z\x10$\x0c\xae\x22\xbd\x8c\x0d\xe0\xfb~\x09\xd8i\xcf\xf6\xd8\x92d\xcd\xd6\xf2\xda\xae7\x1a\x1b\xb1\x01\x92\xc9\xe4\x01\x9e\xb7\x00\x8c\xb7g{,\x15\x9d\xcb\x01c2\x9b\x9f\x9b\x9bk\xc4\x06\x00H\xa5R/\x80U`\xe5\xb8\x10\x92\xac\x10\x04Kf\xf6\x10\xc8\xa7\xd3\xe9\xc8\xf2w\x05\x00\xd8-\x97s\x92\xd6\x80|\xd1\xb9\xd7\xc5b\xf1F\x94\xefz\x10$\x9e\x16\x0ao\xcd\xec\x11\xadu\xfc\xa0[\xfcc\xf7\xf9\xbas\x93M\xb3U\xa4\xa1\xf6x\xdd\x14\xfcj\x07\xb9\x8c4\x0a\x0c\x03\xdb2\x9b\xefv\xf3\xd8\x00p\xb8!'\x04\x93@\x02\xb3\xd6\x9c\x90*\x06\x9f$\x95\xea\x8d\xc6F\xa77\xef\xa9\xa7\x9e\xfe\xd5\x1f>\xd4\xefD\x0d\xbc\xffe\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\x9f\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce|N\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x08\x14\x1f\xf9#\xd9\x0b\x00\x00\x00#IDAT\x08\xd7c`\xc0\x0d\xe6|\x80\xb1\x18\x91\x05R\x04\xe0B\x08\x15)\x02\x0c\x0c\x8c\xc8\x02\x08\x95h\x00\x00\xac\xac\x07\x90Ne4\xac\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x01\xd0\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x01MIDATX\x85\xed\xd7MN\xc2@\x18\xc6\xf1\xff[\x08\x08\xea\x01\xd0+\x88\x09[\xcf!\xbb\xca\xd8\x1aI\xe0>bBBiI\x97x\x0c\xd7\x84p\x07q\xef\x07\x02\x81\xd7\x85\xd4\x10\xc0\xdd\x10\x13\xed\xb3\x9b\xc9\x9by~\x93n:\xf0\xdf#\x9bk\xcf\x98k\xa0\x01\x94\x81\x03K=\x1f\xc0HDZA\x18F\x80\xee\x02\x88gL\x08\xd4\x80)0\x00^-\x01\x8e\x80\x0a\x90\x07\xba\xdd(\xbaI\x10\xdf\x00\xcf\x18\x0f\x08\x04\x1e\xb3\x8bE\xb5\x1d\xc7cK\xe5\x00\xd4]\xb74w\x9c>\x22\x17\x02&\x88\xa2\x1e\x80\xb36\xd3\x00\xa6K\x91K\xdb\xe5\x00\xed8\x1eK6[\x05f*\xd2L\xf6\xd7\x01g\xc0 \x0c\xc3g\xdb\xe5I\x82 xBd\x80jy\x17\xa0\x80\xea\xfb\xbe\xca\xbf\xb3\x5c\xbe\x01\xc5]\x80_I\x0aH\x01) \x05\xa4\x80\x14\x90\x02R\xc0:`\x82H\xf1\xc7Ik\x8d\xce!0\xd9\x02(\x8c\x80J\xdduK\xfb\xea\xae\xd5j\xa7\xa8V\x80\xe1\x16\xc0\x11\xb9\x07\xf2\xf3L\xe6\xc1\xf7\xfd\x93}\x94gD\xfa@NEZ\xc9\xfe\xe6\xc3\xa4\x03x\xc0l\xf5\xf7\xfab\xa5]\xe4xu\xf3\x9cB'\x8c\xa2[6\x1f&\xc9\xa8o\xcc\x95\x8a4Q=\x07\x0aV\x00_\xdf|\x88\xea]\xb7\xd7\x8b-\x9d\xf9G\xf2\x09>pdA\x95\x87\xdfi\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\xc3\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00@\x00\x00\x00@\x08\x06\x00\x00\x00\xaaiq\xde\x00\x00\x00\x06bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdc\x0b\x07\x09.7\xffD\xe8\xf0\x00\x00\x00\x1diTXtComment\x00\x00\x00\x00\x00Created with GIMPd.e\x07\x00\x00\x00'IDATx\xda\xed\xc1\x01\x0d\x00\x00\x00\xc2\xa0\xf7Om\x0e7\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80w\x03@@\x00\x01\xafz\x0e\xe8\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x01\xd0\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x01MIDATX\x85\xed\x97;N\xc3@\x14\x00\xe7EQ\xc2\xf7\x00\x81+\x00R\xeeB\xca\x8d\xedX\x14p\x1fBe\x99\x8d)\xc3\x1dh\xa8\xa3(w \xf4|B>\xf2\xa3p\x8c\x8cL\xb9\x16\x12x*[Zyf%\x17\xef\xc1\x7fG\x8a/\xaa*6\x8e\xfd\x86\xc8\xa5\xc2)\xb0\xe3\xc8\xf3!0\x03\x86\xc6\xf7\xad\x88h)@U%\x89\xe3[\x15\xe9\x03K`\x82\xc8\xab\x13\xbd\xea\x01\xd0\x05\xda\x88\xc4}\xcf\x0b\xf3\x88f~\xc6\xc6\xb1/\x99\xfc\xb1\xd1l\xf6\x8c1s'\xf2-I\x92t\xd2\xcdf\x8cj`\xad}\x00F\x00\x8d\xfc@C\xe4\x12X\xa6p\xeeZ\x0e`\x8c\x99o\xd2\xb4\x07\xacD\xf5\xea\xcb\x9b?(\x9c\x00\x93 \x08\x9e]\xcbs\xc20|\x02&d\xff\xd7\xf7\x00`\x17x\xafJ^\xe0\x0d\xd8\xfb)\xe0W\xa8\x03\xea\x80:\xa0\x0e\xa8\x03\xea\x80:\xa0\x0e(\x06,(L*\x15\xb2\xbfu\x95\x02f@7I\x92NUfk\xed1\xd9x>-\x05\x08\xdc\x00\xedt\xbd\xbe\x8f\xa2\xe8\xa8\x12y\x9a\x8e\x81\x96\xc0\xb0\xe0\xcdPU\x19Y\x1b\xa1\x1a\x00+\xb2\xc5\xe4\xc5\x89]\xf5\x90\xec\xe6-\x85\xc8\xf3\xfd\x8b|1)\xaff\xd6\x9a\xed\xdc~F6)\xbb`\x01LQ\xbd\xf6\x06\x83;G\xdf\xfc#|\x02\x90\xc4u0\xa38\xd1\xd4\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\xef\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00Q\x00\x00\x00:\x08\x06\x00\x00\x00\xc8\xbc\xb5\xaf\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x06bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x0b*2\xff\x7f Z\x00\x00\x00oIDATx\xda\xed\xd0\xb1\x0d\x000\x08\x03A\xc8\xa0\x0c\xc7\xa2I\xcf\x04(\xba/]Y\x97\xb1\xb4\xee\xbes\xab\xaa\xdc\xf8\xf5\x84 B\x84(\x88\x10!B\x14D\x88\x10!\x0a\x22D\x88\x10\x05\x11\x22D\x88\x82\x08\x11\x22DA\x84\x08Q\x10!B\x84(\x88\x10!B\x14D\x88\x10!\x0a\x22D\x88\x10\x05\x11\x22D\x88\x82\x08\x11\x22DA\x84\x08Q\x10!B\xfc\xaa\x07\x12U\x04tV\x9e\x9eT\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x02V\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00@\x00\x00\x00@\x08\x06\x00\x00\x00\xaaiq\xde\x00\x00\x00\x06bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdf\x04\x19\x10\x14-\x80z\x92\xdf\x00\x00\x00\x1diTXtComment\x00\x00\x00\x00\x00Created with GIMPd.e\x07\x00\x00\x01\xbaIDATx\xda\xed\x9b[\x92\x02!\x0cEM\x16\xa6\x1b\xd0\xd5\x8e\x1b\xd0\x8d\xe9\x9fe9\xda<\x92{\x13h\xf2=\x95\xe6\x1c\x1eC\x10\x0e\x87\x15+V\xec9\x84\xf9\xb1\xbf\xe3\xf1Q\xf3w\x97\xfb]\xa6\x10P\x0b\x1c)D\xb2B\xb3d\xc8(\xe0(\x112\x22\xbc\xa7\x04\x19\x11\xdcS\x84\x8c\x0eo\x95 \xa3\x83[E\xc8L\xf0=\x12d6\xf8V\x09\xba\xb6\xc2\x13\xf6~\xcb(\x10+\xfc\xf9v{\xe5\xb8\x9eN\x14Q\xef\xdf,}\xb7$A\xbd\x1b\xf6\xd984\xbc5\x141\xf4Q\x12z\xf2\x96\x18\x145\xef\xbd%X\xf2m\xb1\x98\xa7\xc0\xd6\xfc\xf3\x92\xb0\x95\xc7\xba\xee\x88W\xef\xa3\x1a\xe9\x99\xf7\xdb\x82\xe8\xb6\x08\x22F\x02\xb2\xe7!\xff\x05<%0\xe0\xbfN\x01\x8fM\x8f\xb5\xf1H\xf8\xcfi\x00\xd9\x0a[F\x02\xab\xe7\xe1\xb5@\x8f\x046<\xbc\x18j\x91\x10\x01\xffo\x0d@\x15=%86\xfc\xfb:@)\x87{\xd7\x04FqE;\x0fh\x85aU\x96\xd4\x03\x91Z(\x16<]@\x0d\x1c\x13>D\x80e\x1f0\xbc\x80Z8\xa6\x04\xcd\x06\xcf\x96\xa0\xd1\xf0\x8c\xf3\x84P\x015\xf0\x91\x12 \xd5`o\xcf36E\x94j\xb0\x17&b$h\xa69\x1f!A3\xc1GHp;\x14E\xcca\xef|\xd0CQ\xc4\x02\xc6\x18\x09\x9a\x15\x9e%\xe1g\x82\xdai\xc0\xaa\xe7\xad\xdf\xf9\xf5#i\xc8\x99`\x86|E\x01\x96\x9bW\xa8\xc6\xf6\xe6\xddb\xd1\xec=\x8f\xceo\xbe \x91=J#y]\x91\xa9M\xb6n\x89M\x1a\xeb\xa2dk\xf2]_\x95\xcd,\x82vY:\xa3\x84\x90\xeb\xf2Y$X\x1fM\xac'3\xde\x0d\xdb\xed\xa3)\xa4\x8c\xa1\x9e\xcdy\x08a>\x9c\x5c\xb1\xf7x\x02Q\xa0Z\x91w\xd2\x02#\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x01\xec\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x01iIDATX\x85\xed\x97;N\xc3@\x10\x86\xbf\xb1\xa2\x84\xe7\x01\x02W\x00\xa4\xdc\x85\x94\x8e\xedD\x14p\x1fBe-\x1bS\x86;\xd0PGQ\xee@\xe8y\x84<\xe4\xa1p\x8c\x8c,%\x056\x05\xf8\xafv\xb5#\x7f\x9f\xad\x95<\x03\xff=\x92\xdd\xa8\xaaXc|G\xe4R\xe1\x14\xd8)\x88\xf3!0\x01\xfa\xae\xef[\x11\xd1\x9c\x80\xaaJd\xcc\xad\x8at\x8090B\xe4\xb5\x10\xbc\xea\x01\xd0\x02\x1a\x88\x98\x8e\xe7\xf5R\x89ZZc\x8d\xf1%\x81?:\xb5Z\xdbu\xddi!\xf0u\xa2(j\xc6\xab\xd5\x10\xd5\xc0Z\xfb\x00\x0c\x00\x9c\xb4\xc0\x11\xb9\x04\xe61\x9c\x17\x0d\x07p]w\xba\x8a\xe36\xb0\x10\xd5\xab/n\xbaP8\x01FA\x10<\x17\x0dO\xd3\xeb\xf5\x9e\x80\x11\xc9\xfd\xfa.\x00\xec\x02\xefe\xc13y\x03\xf6\xd2MmC!\x00\xd6\x18\xddV\xb3)^\x10\xc8\xa6sg\xd3\xe1o\xa4\x12\xa8\x04*\x81J\xa0\x12\xa8\x04*\x81\xad\xfd\xc0\xb6\xff\xf9O\x93\xfd\x0232\x9dJ\x89\xd9_\xb3r\x02\x13\xa0\x15EQ\xb3,\xb2\xb5\xf6\x98\xa4=\x1f\xe7\x04\x04n\x80F\xbc\x5c\xde\x87axT\x0a<\x8e\x87@]\xa0\x9f\xe1&QU\x19X\x1b\xa2\x1a\x00\x0b\x92\xc1\xe4\xa5\x10\xba\xea!\xc9\x9b\xd7\x15B\xcf\xf7/\xd2\xc1$?\x9aY\xeb\xae\xfb\xf63\x92N\xb9\x88\xcc\x801\xaa\xd7^\xb7{W\xd03\xffH>\x01\xac\x18zV\x83\xd7\xe8n\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\xa6\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x14\x1d\x00\xb0\xd55\xa3\x00\x00\x00*IDAT\x08\xd7c`\xc0\x06\xfe\x9fg``B0\xa1\x1c\x08\x93\x81\x81\x09\xc1d``b``4D\xe2 s\x19\x90\x8d@\x02\x00d@\x09u\x86\xb3\xad\x9c\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\x96\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce|N\x00\x00\x00\x02bKGD\x00\xd3\xb5W\xa0\x5c\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdc\x0b\x07\x0c\x0d\x1bu\xfe1\x99\x00\x00\x00'IDAT\x08\xd7e\x8c\xb1\x0d\x00\x00\x08\x83\xe0\xff\xa3up\xb1\xca\xd4\x90Px\x08U!\x14\xb6Tp\xe6H\x8d\x87\xcc\x0f\x0d\xe0\xf0\x08\x024\xe2+\xa7\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\xa0\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x14\x1c\x1f$\xc6\x09\x17\x00\x00\x00$IDAT\x08\xd7c`@\x05\xff\xcf\xc3XL\xc8\x5c&dY&d\xc5p\x0e\xa3!\x9c\xc3h\x88a\x1a\x0a\x00\x00m\x84\x09u7\x9e\xd9#\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\xa5\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce|N\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x02bKGD\x00\x9cS4\xfc]\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x0b\x02\x04m\x98\x1bi\x00\x00\x00)IDAT\x08\xd7c`\xc0\x00\x8c\x0c\x0c\xff\xcf\xa3\x08\x18220 \x0b2\x1a200B\x98\x10AFC\x14\x13P\xb5\xa3\x01\x00\xd6\x10\x07\xd2/H\xdfJ\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\xbb\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00?\x00\x00\x00\x07\x08\x06\x00\x00\x00\xbfv\x95\x1f\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x06bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x095+U\xcaRj\x00\x00\x00;IDAT8\xcbc`\x18\x05#\x130\x12\xa3\xa8\xbe}*%v\xfc\xa7\x97;\xd1\xc1\xaa\xa5s\x18\xae_9\x8fS\x9ei4\xe6\x09\x00M\x1d\xc3!\x19\xf3\x0c\x0c\x0cxc~\x14\x8cT\x00\x00id\x0b\x05\xfdkX\xca\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\xe4\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x006\x00\x00\x00\x0a\x08\x06\x00\x00\x00\xff\xfd\xad\x0b\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x06bKGD\x00\x7f\x00\x87\x00\x95\xe6\xde\xa6\xaf\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x09*+\x98\x90\x5c\xf4\x00\x00\x00dIDATH\xc7c\xfc\xcf0<\x01\x0b\xa5\x064\xb4O\x85\x87\xcd\xaa\xa5s\x18\xae]9\xcfH+5\x14y\xcc\xd8\xc8\x88$\x03|\x89\xd0O-5\x84\xc0\xd9s\xe7\xe0l&\x86\x91\x92\x14\x91}MTR\x0cM&\xa8\x9fZjF\x93\xe2hR\x1c\x82I\x91\x91\xd2zLK\xc7\x10\xc5\x08l\xc54\xb5\xd4\xd0\xd5c\x83\x15\x00\x00z0J\x09q\xea-n\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\xe0\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00Q\x00\x00\x00:\x08\x06\x00\x00\x00\xc8\xbc\xb5\xaf\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x06bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x0b)\x1c\x08\x84~V\x00\x00\x00`IDATx\xda\xed\xd9\xb1\x0d\x00 \x08\x00AqP\x86cQ\xed\x8d\x85%\x89w\xa5\x15\xf9HE\x8c\xa6\xaaj\x9do\x99\x19\x1dg\x9d\x03\x11E\x14\x11\x11E\x14QDD\x14QD\x11\x11QD\x11EDD\x11E\x14\x11\x11E\x14\xf1[\xd1u\xb0\xdb\xdd\xd9O\xb4\xce\x88(\x22\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcf6\xcei\x07\x1e\xe99U@\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x02\xf8\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x02uIDATX\x85\xed\x96\xcdN\x13Q\x18\x86\x9f\xaf\x15\xd22x\x03VMiX\x89\xa6?\xf1\x06 &\x1a7\x94\x84\xd9\xb63\xc4\x0b0F\x104Q\x16.H\xd1\xb8rC\xb4t\xd8\x92\x98\xe2\xca\xb8\x117,\x8c\xda6\x12\xc0\x10@\x03\x86\x0b\xc0T\xa3q>\x17\xb4\xd1D\xa6e\x0a;\xfbl\xbf\xf7\x9c\xf7I\xe6\xcc\x99\x816m\xfew\xc4O\xd84\xcd\xce\xeepxHD\xd2@J!\x02\x80\xea\x0e\x22\xef\x05\x8a{\xd5jq~~\xfe\xc7\xb1\x0b\xd8\x99\xcc\xb0\x8a\xe4\x04z\x80\x0f\xa2\xba\xa8\x22;\xb5q\x04\xe8\x07.\x00\x1b*2V(\x14\x9e\x1d\x8b\x80i\x9a\xc1\x93\x86\x91S\xd5\x1b\x02/\x08\x06\xc7\xf3\xf9|\xe5\xa0\xaceY\x09\x81)T/\xab\xeat4\x16\x1b\x9f\x9c\x9ct\x1b\xed\x7f\xa2\x99@\xad\xfc:0\x9aw\x9c\x07\x8d\xb2\x85B\xa1\x0c\x5c\x19\xb1\xacQ`\xea\xd3\xe6&\xc0X\xa35\xc1FC;\x93\x19\x06\x1e\x09\x8c\xce:\xce\xc3f\xb2uJ\xe5\xf2R2\x91\xf8.\x22\xf7\x12\xc9d\xa5\x5c.\xafye=\x1f\x81i\x9a\x9d\xdd]]\xab\xc0\xc7Y\xc7\xb9z\xd8\xf2\xbf\xb1\xb3\xd9\x97@\xcf\xd7j\xb5\xcf\xeb`\x06\xbc\x16w\x87\xc3C@L\x82\xc1\x89V\xca\x01\x02\xaa\xb7\x80^\xc30\x06=3^\x03\x11I\xa3Z\xf1:p\x87\xe1\xe9\xdc\x5c\x09XF\xd5\xbf\x00\x90B\xe4u\xab\xe5uD\xf5\x95\xa8^\xf4-\xa0pJ\xfe\xbc\xe7-\xe3\xc2\x17D\x22\xbe\x05\x00T\xd5\xd7My`A \xfb\x1e\xfe\x05vE\xf5\xf4Q\x05T5\x82\xean+\x02oU\xa4\xff\xa8\x02\xc0\x80\xc0\x1b\xdf\x02\x02E\xe0\xbceY\x89V\x9bm\xdbN\x01\xe7\x14\x9e\xfb\x16\xd8\xabV\x8b\xc0\x86\xc0T\x8b\xfd\x22\xae\x9b\x03\xd6;B\xa1\x05\xaf\x90\xe7U\xbc\xb2\xb2\xf2+\x15\x8fo\x03wR\xc9d\xb5T./\xf9i\xb7\xb3\xd9\x09\xe0\x9a\xc0\xc8\x93|~\xd5\xb7\x00@\xa9RYK\xc4\xe3\x06p7\x95L~;\xa4\x84\xd4\xca\xef\x8b\xc8t\xdeq\x1e7\x0a7\xfd\x1aFc\xb1\xf1\xcf[[\xaa\xaa9+\x9b\xbd\x14T\x1d\xaf\xddp\xff`\xdbvJ\x5c7\xa70 \x22\xb9\xb3\xd1\xe8\xed\xa6\xb6\xcd\x02u,\xcbJ\x8b\xea4\xd0\x0b,\x03\x8b\xc0vm|\x86\xfd\x1f\x92>`]\xe0f\xdeq<\x0f^K\x02\xb0\xff\x854\x0ccP\x5c7\x8dH\x0a\xa8\xdf\x13;\x0a\xefD\xb5\xd8\x11\x0a-\xcc\xcc\xcc\xfc\xf4\xb3o\x9b6\xff7\xbf\x01J7\xdd\xdd\x8c\xf1\x82j\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\x93\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\x00\x00\x00\x02bKGD\x00\xd3\xb5W\xa0\x5c\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdc\x0b\x07\x0c\x0c+J<0t\x00\x00\x00$IDAT\x08\xd7c`@\x05\xff\xff\xc3XL\xc8\x5c&dY&d\xc5p\x0e##\x9c\xc3\xc8\x88a\x1a\x0a\x00\x00\x9e\x14\x0a\x05+\xca\xe5u\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\xa6\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce|N\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x02bKGD\x00\x9cS4\xfc]\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x0b\x1b\x0e\x16M[o\x00\x00\x00*IDAT\x08\xd7c`\xc0\x00\x8c\x0c\x0cs> \x0b\xa4\x08020 \x0b\xa6\x08000B\x98\x10\xc1\x14\x01\x14\x13P\xb5\xa3\x01\x00\xc6\xb9\x07\x90]f\x1f\x83\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\x81\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x10\x00\x00\x00\x10\x01\x03\x00\x00\x00%=m\x22\x00\x00\x00\x06PLTE\x00\x00\x00\xae\xae\xaewk\xd6-\x00\x00\x00\x01tRNS\x00@\xe6\xd8f\x00\x00\x00)IDATx^\x05\xc0\xb1\x0d\x00 \x08\x04\xc0\xc3X\xd8\xfe\x0a\xcc\xc2p\x8cm(\x0e\x97Gh\x86Uq\xda\x1do%\xba\xcd\xd8\xfd5\x0a\x04\x1b\xd6\xd9\x1a\x92\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\xdc\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x10\x00\x00\x00@\x08\x06\x00\x00\x00\x13}\xf7\x96\x00\x00\x00\x06bKGD\x00\xb3\x00y\x00y\xdc\xddS\xfc\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdf\x04\x19\x10-\x19\xafJ\xeb\xd0\x00\x00\x00\x1diTXtComment\x00\x00\x00\x00\x00Created with GIMPd.e\x07\x00\x00\x00@IDATX\xc3\xed\xce1\x0a\x00 \x0c\x03@\xf5\xa3}[_\xaaS\xc1\xc9\xc5E\xe42\x05\x1a\x8e\xb6v\x99^%\x22f\xf5\xcc\xec\xfb\xe8t\x1b\xb7\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf06\xf0A\x16\x0bB\x08x\x15WD\xa2\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x01\xe3\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x01`IDATX\x85\xed\xd7;NBa\x10\x86\xe1w@E\xbc,\x00\xdd\x82\x98\x90\x00F\x05W!\xa5\x17b,\xb0C\x12]\x83\x0d\xd8\x88\x15\xdeb\x89{09`\x14IN\x08{\x10{/\x08\x08c\xa1\x87@\xa0<\xc4D\xcf\xd7\xfdS\xfc\xdfS\xce\xc0\x7f\x8f\xf4\xbdT%\x92y\xd8\x16\x95\x04\x82\x1f\x98\xb4\xa9\xe7\x03\xa5\x0a\x925\xf6C\x97\x88\xe8 @U\xd6\x8eK\x17\xaal\x02\x0d\x01S\xd1W;\xda\x05\x99Q\x08\x00\x1e\x90s#\x19\xda\xb1\x10]@$}\x1f\x17\xe4\x0c\xb4\x88\x8c\xc5\x8cd\xb0fG\xb9\x95h\xa6\xecC\xdby`E\x95\xadBj\xe9\x0a\xc0\xd5U\xaa$\x80\x86\xfb\xd3\xb5nw9\x80\x91\x0c\xd6:\xadV\x0ch\x8a\xb0g\xcd\xbb\x00\x84\x05\x01\xf3\xf6 \xfclw\xb9\x95\xe2a\xe4\x090\x01\xff \x00\xbc\x0a\xef\xa3*\xef\xc9\x1b05\x0c\xf0+q\x00\x0e\xc0\x018\x00\x07\xe0\x00\x1c\x80\x03\xe8\x05\xd4\xa5gS\x19a\xa6\x81\xfa\x10\x80V\x15\x02\xd1L\xd97\xaa\xe6\xe5\xf4\xdd<\x10\x10\xa8\x0c\x02\xd4u\x0ax\xd0\xf6\xcd\xeaQan\x14\xe5\xe3\xb8\xf3\xc0DG4k\xcd\xfb\x0e\x93h\xe61\x07\x1a\x07\x9a\x80\x09\xfabO\xbd\xcc\xf2}\x98L(\xe4\x0a\xc9\xf0\xee\xc0ab!\x22\xe9\xd2\xc6\xcf\xde\xbe\x08x\xed\x01P\x17\xa8\xa8\xca\x89\x91\x0a_\xdb\xf4\xe7\x1f\xc9\x17\xa4)p#\xfc\x8b\x13\x87\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x02V\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00@\x00\x00\x00@\x08\x06\x00\x00\x00\xaaiq\xde\x00\x00\x00\x06bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdf\x04\x19\x10\x15\x00\xdc\xbe\xff\xeb\x00\x00\x00\x1diTXtComment\x00\x00\x00\x00\x00Created with GIMPd.e\x07\x00\x00\x01\xbaIDATx\xda\xed\x9b[\x92\x02!\x0cEM\xd67.H\x17\xa0\x0b\xd2\xfd\xe9\x9fe9\xda<\x92{\x13h\xf2=\x95\xe6\x1c\x1eC\x10\x0e\x87\x15+V\xec9\x84\xf9\xb1\xdb\xe9\xf4\xa8\xf9\xbb\xe3\xf5*S\x08\xa8\x05\x8e\x14\x22Y\xa1Y2d\x14p\x94\x08\x19\x11\xdeS\x82\x8c\x08\xee)BF\x87\xb7J\x90\xd1\xc1\xad\x22d&\xf8\x1e\x092\x1b|\xab\x04][\xe1\x09{\xbfe\x14\x88\x15\xfe\xefry\xe5\xb8\x9f\xcf\x14Q\xef\xdf,}\xb7$A\xbd\x1b\xf6\xd984\xbc5\x141\xf4Q\x12z\xf2\x96\x18\x145\xef\xbd%X\xf2m\xb1\x98\xa7\xc0\xd6\xfc\xf3\x92\xb0\x95\xc7\xba\xee\x88W\xef\xa3\x1a\xe9\x99\xf7\xdb\x82\xe8\xb6\x08\x22F\x02\xb2\xe7!\xff\x05<%0\xe0\xbfN\x01\x8fM\x8f\xb5\xf1H\xf8\xcfi\x00\xd9\x0a[F\x02\xab\xe7\xe1\xb5@\x8f\x046<\xbc\x18j\x91\x10\x01\xffo\x0d@\x15=%86\xfc\xfb:@)\x87{\xd7\x04FqE;\x0fh\x85aU\x96\xd4\x03\x91Z(\x16<]@\x0d\x1c\x13>D\x80e\x1f0\xbc\x80Z8\xa6\x04\xcd\x06\xcf\x96\xa0\xd1\xf0\x8c\xf3\x84P\x015\xf0\x91\x12 \xd5`o\xcf36E\x94j\xb0\x17&b$h\xa69\x1f!A3\xc1GHp;\x14E\xcca\xef|\xd0CQ\xc4\x02\xc6\x18\x09\x9a\x15\x9e%\xe1g\x82\xdai\xc0\xaa\xe7\xad\xdf\xf9\xf5#i\xc8\x99`\x86|E\x01\x96\x9bW\xa8\xc6\xf6\xe6\xddb\xd1\xec=\x8f\xceo\xbe \x91=J#y]\x91\xa9M\xb6n\x89M\x1a\xeb\xa2dk\xf2]_\x95\xcd,\x82vY:\xa3\x84\x90\xeb\xf2Y$X\x1fM\xac'3\xde\x0d\xdb\xed\xa3)\xa4\x8c\xa1\x9e\xcdy\x08a>\x9c\x5c\xb1\xf7x\x02G\xb0[\x07:D>\x01\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\xa0\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x14\x1f\x0d\xfcR+\x9c\x00\x00\x00$IDAT\x08\xd7c`@\x05s>\xc0XL\xc8\x5c&dY&d\xc5pN\x8a\x00\x9c\x93\x22\x80a\x1a\x0a\x00\x00)\x95\x08\xaf\x88\xac\xba4\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x03\xcc\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x03IIDATX\x85\xed\x96\xcdk\x5cU\x18\xc6\x7f\xcf\x9d\x99\x98\xe9d\x16\xd2\x9d\xa9\x92\x0e\xa1\x0b\xd3\xd8v\xf0\x1fh\x11\x14+4\x81\xdeU\xca\xcc\xbd\xa5T\x5c\x04Dm:\xd5M\x16.\xe2DW\xb3\x1b\xeax\xa7\x18\xb2\x08\xc8T\xb0\x88\x1b\xeb\xc6\x85h\xf3US\xa4\xb4U\x9aRp%\x990\xa56\xb9\xaf\x8b\xf9h\xc1\xcc\x0cS\xbak\x9e\xdd9\xe79\xef\xfb\xbb\xef}\xef9\x17v\xb5\xab\xe7]\xea\xc5\xec\xban\xdf@<>.i\x0cH\x1b\x0c\x02`\xb6\x8etMP\xa9\xd6j\x95\x85\x85\x85\x7f\x9f9\x80\x9f\xc9\x9c4)/\xd8\x0f\xac\xca\xec\xaaI\xeb\x8d\xe5A\xe0(0\x0a\xdc2i*\x08\x82o\x9e\x09\x80\xeb\xba\x91d\x22\x917\xb3\x0f\x04\xdf\x13\x89\xe4J\xa5\xd2\xf2N^\xcf\xf3\x0e\x0bf0{\xd3\xccf\x87R\xa9\xdc\xf4\xf4t\xd8)~\xb4\x1b@#\xf9\xfb\xc0\xb9R\xb9\xfcy'o\x10\x04K\xc0[\xa7=\xef\x1c0\xf3\xe7\xed\xdb\x00S\x9d\xf6t\xac\x80\x9f\xc9\x9cDZ\x10|T*\x97\xbf\x00\x98\x9c\x9c|asc\xe3]\x83\x09\xd5K\x0ef+\xe68s\xc9d\xb2X(\x14\x1e\x02\xf8\xd9\xec\x14\xf0\x99I\xe3A\x10Tz\x06p]\xb7o`\xcf\x9e\x1b\xc0\x1f_\x95\xcbo\x03\x9c\x99\x98\xd8\xb7\x1d\x8b]\xc1l\x14\x08\x01\xa7a\x0f\x01G\xb0\xe2lm\x1d\xbf87\xb7\xde\x80\xf8\x01\xd8\xbfY\xab\x8d\xb4kLg\xa7I\x80\x81x|\x1cH)\x12\xb9\xd0|\xf2\xedX\xec\x8a\x99\x1d\xdca\xaf\xd3\xa0\x18\x0d\xa3\xd1\xef\x5c\xd7\xed\x03p\xcc\xce\x03\xc3\x89D\xe2D\xbb<m\x01$\x8da\xb6\xdcl\xb8j\xb5z\x16\xb3Qu\xa8\x9a@\x06\xaf\x0d\xc4\xe3g\x01\xbe\xbcti\x11\xb8\x8eY\xef\x00@\x1a\xe9\xa7\xd6\xc8\xec\x14\xf5Rw\x96\x14\x02\xa7ZC\xb3\x1fe\xf6z\xcf\x00\x06/\xe9\xf1w\x8e`\xa4\x0bp\x13\xd4\x914\xd2\x1c\x86p\x0fi\xb0g\x80z,\xeb\xe9\xa4\xdc1\x81\xe3\x88\x0e\x95\xeb\x04p_f\xfbZ0\xf0{\xa7@-Ia\x08\xd7[\xfb\xcc\x061\xbb\xff4\x00\xbf\x9at\xf4\x89\xc0_w\xf173:2\x9b{b\xe6\x98\xe0\x97\x9e\x01\x04\x15\xe0\xa0\xe7y\x87\x01\x92\xc9dQ\xb0b`ms\x83!-m>xP\x04\xf0}?\x0d\xbcj\xf0m\xcf\x00\xd5Z\xad\x02\xdc\x12\xcc\x00\x14\x0a\x85\x87\xce\xd6\xd6q\x07V\x1b\x96\xc7\xaf\xa3\xde\xf9HZ\xde\x0e\xc3w\x1a\x87\x8e\x14\x86y\xe0f\xac\xbf\xffr\xbb<\x91v\x0bkkk\xdb\xe9C\x87\xee\x02\x9f\xa4\x8f\x1c\xa9-.-\xfd|muuc\xf8\xc0\x81R_4\xfa\xb7I{\x05/\x02\x8f\x0c\x16\x1d\x98\xd9\xac\xd5\xde\x9b\x9f\x9f\xff\x07\xc0\xcff/\x00g\x04\xa7/\x96J7\xda\xe5\xe9\xda\xe5^&\x93\x97\xf4\xa1\xa4\x5c)\x08f\xbb\xf9\x01\xf9\xd9l\x0e\xf8T\xd2l)\x08r\x9d\xcc]o\xc3\xa1T*\xf7\xd7\x9d;ffy/\x9b}#b\x96k\x9cp\xff\x93\xef\xfbi\x85a\xde\xe0\x98\xa4\xfc+CC\x1fw\xa5\xedfh\xca\xf3\xbc1\x99\xcd\x02\xc3\xd4?\xb3\xab\xc0\xdd\xc6\xf2\xcb\xd4\x7fHF\x80\x9b\x8d\xdb\xb3m\xe3=\x15\x00\xd4o\xc8D\x22qBa8\x86\x94\x06\x9a\xe7\xc4\xba\xc1o2\xab\xc4\xfa\xfb/\x17\x8b\xc5G\xbd\xc4\xdd\xd5\xae\x9eo\xfd\x07\xb0\xd0<\xea\x1c\xa0\xa5_\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\xa6\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce|N\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x08\x15;\xdc;\x0c\x9b\x00\x00\x00*IDAT\x08\xd7c`\xc0\x00\x8c\x0c\x0cs> \x0b\xa4\x08020 \x0b\xa6\x08000B\x98\x10\xc1\x14\x01\x14\x13P\xb5\xa3\x01\x00\xc6\xb9\x07\x90]f\x1f\x83\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\xa0\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x02bKGD\x00\x9cS4\xfc]\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x0b\x1b)\xb3G\xee\x04\x00\x00\x00$IDAT\x08\xd7c`@\x05s>\xc0XL\xc8\x5c&dY&d\xc5pN\x8a\x00\x9c\x93\x22\x80a\x1a\x0a\x00\x00)\x95\x08\xaf\x88\xac\xba4\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x01\xed\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x01jIDATX\x85\xed\x97\xcbN\xc2@\x14\x86\xbfC\x08x}\x00\xf4\x15\xd4\x84w\x91ei\x0bq\xa1\xef#\xae\x9aq\xa8K|\x077\xae\x09\xe1\x1d\xc4\xbd\x17\xe4\x92\x1e\x17\xa5\xa6\x06\xd8\x98!\x18\xed\xbf\x9av&\xfd\xbeN\xa6\xcd9\xf0\xdf#\xf9\x0bU\x15kLP\x12\xb9T8\x05v\x1cq>\x04\x86@\xc7\x0b\x02+\x22\xba$\xa0\xaa\x12\x1bs\xab\x22M`\x02\xf4\x11yu\x82W=\x00\xea@\x15\x11\xd3\xf4\xfdv&Q\xce\xd6Xc\x02I\xe1\x8f\xa5r\xb9\xe1y\xde\xc8\x09|\x918\x8ek\xc9|\xdeC5\xb4\xd6>\x00]\x80R\xb6\xa0$r\x09L\x128w\x0d\x07\xf0<o4O\x92\x060\x15\xd5\xab/n6P8\x01\xfaa\x18>\xbb\x86gi\xb7\xdbO@\x9f\xf4|}\x17\x00v\x81\xf7M\xc1sy\x03\xf6V\x09l%\x85\xc0\xd6\x05\xca\xeb&\xac1\xban\xee'\xf1\xc3PV\xdd\xdf\xfa\x0e\x14\x02\x85@!\xb0\xf6?\xb0\xee\xbbu\x9d\xad\xef@!\xf0\xab\x04\xc6\xe4*\x95\x0df\x7f\xc1Z\x12\x18\x02\xf58\x8ek\x9b\x22[k\x8fI\xcb\xf3\xc1\x92\x80\xc0\x0dPMf\xb3\xfb(\x8a\x8e6\x02O\x92\x1eP\x11\xe8\xe4\xb8iTU\xba\xd6F\xa8\x86\xc0\x94\xb41yqBW=$}\xf3\x8aB\xe4\x07\xc1E\xd6\x98,\xb7f\xd6z\x8b\xba\xfd\x8c\xb4Rv\x9110@\xf5\xdao\xb5\xee\x1c=\xf3\x8f\xe4\x13\xfb6zV\x11\xde\xcf\xd8\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\xa6\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x14\x1f \xb9\x8dw\xe9\x00\x00\x00*IDAT\x08\xd7c`\xc0\x06\xe6|```B0\xa1\x1c\x08\x93\x81\x81\x09\xc1d``b`H\x11@\xe2 s\x19\x90\x8d@\x02\x00#\xed\x08\xafd\x9f\x0f\x15\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x02\xd4\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x02QIDATX\x85\xed\x96AKTQ\x14\xc7\x7f\xe7\x8d\xb8\xd0&0wi\x84\xe1\xaa)\x90A\xc7\x92^\xa0\x1b\xa1\x8d\x0a\xf5\x19Z;3\xda\xd8j\x16A6\x83\xf3\xbe\x87A\x8d\xad\xc2M\xf6\x14\xf4\x0d\x99H\x0e\x11\xe2\xaa\x11\xdb\x184\xa8\x0b\xc3wZ\xccH\x10\xf3t\xee\xe8\xae\xf9o\xef9\xfc\x7f\xf7\xdc{\xcf=\xd0TS\xff\xbb\xc4$8\x92.\xb6v\x86\x0f'T\x18\x07\x8d\x02]\xd5\xa5\x12\xcag\x11\xc9\xef\x97\xdb\xf3\xc5t\xe4\xf8\xd2\x01lg\xed1*\x19\xa0\x07\xe4\x0b\xaaKX\x94\x00D\xb5K\xb1\x86A\xef\x22\xec\x082\xedN\xc6\xde\x5c\x0a\xc0\x93\xf9\xf9\xd0\x8f\xdd\x9b\x19\x948\xf0^\x95\xd4Jbp\xb3V\xec\x90S\xe8\x0b\xf9:\x8b0\x0ad\x97\xcb\xb1\x14i\xf1\xeb\xdddM\xd9\x8e7g\xe7\xbc\x93\x87\xceZ\xb2\xee\x9c\x9c7e\xe7\xbc\x13;\xe7e\xce\x8b=\xb3\x02\xd5\xb2\xbf\x16$\xe9\xc6cs\xf5\x02Tr\xbdi\x94W\x08\x13\xcb\x93\x83yc\x80H\xba\xd8z\xed\xea\xc1WA\xbf\xb9\xf1{\x8fL\xccO\xf5\xc0),\x8aj\xcf\xcf\xf2\x95H\xd0\xc5\xb4\x82\x92;\xc3\x87\x13\xc0-_e\xa6\x11s\x00\xcb\x97g@oG\xf8`,0&h\xa1\xf2\xd4\xd8\x0c\xbap\xf5\xc8M\x0cl\xa8\xb2%`\x0e\x00\x1a\x15\xf4c\xa3\xe6\xa7\x12\xf8\x80\xd0\xdf\x00\x00\xd7\x15)]\x14@a\x97\xbf\x0d\xcb\x08\x00\xc4\xacS\xd64\x10\x11 \xb0\x17\x9c\x05\xb0\x87O\xf7E\x01\x14\xed\x02\xf6\xcc\x01\x94O\x0a\xc3\x17\x05\x00F\x80\x821\x80\x88\xe4E\xb83\xe4\x14\xfa\x1au\xb6\x9d\xd5(p\x1b\xd1w\xc6\x00\xfb\xe5\xf6<\xc2N\xc8\xd7\xd9\x86\xdcU\x05\xb52\xc0\xf6Q[\xcb\x821@1\x1d9Ve\x0aa\xd4\xceyS\xa6\xfev\xceK\x01#\xa2~r\xfdi\xffoc\x00\x80\x95\xf8\xe0[ \x0b\xcc\xd6\x0d\xa1*\xf6\xdc\xda\x0c\x22/D\xc8\xb8\x89\xfb\x81\xe5\x87z\xe6\x81\xb4Zv\xb8\xf0\x12a\x1aX\x14\xb5Rnb`\xa3V\xa8\xed\xacF\xabe\x1f\x11!\xe3\xfe\x8a=?\xef;6\x18H\xbcq\x94,\xd0\xab\xca\x96\x08K\x08\xdf\x01PnPy1\x11`[\xd4O\x9e\xb7sc\x00\xa8\xfc\x90\x1d\xe1\x831\xaa#\x99 \xdd\x15\x7f-\x89\xca:\x96\xe6\x8f\xdaZ\x16\xce:\xf3\xa6\x9aj\xea_\xfd\x01\xd3\x1c\xd9\x7f^\xb93\xcd\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x02\x00\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x01}IDATX\x85\xed\x97;N\x02Q\x14\x86\xbf\x83(>\x17\xa0nAMHxD\x9dq\x15Z\xfa\x8a\xb1\xd0\x0eHt\x0d6`\xa3V>c\xa9{0\x194\x82$\x84\xb8\x07\xb5\xf7\x81\x82p,t\x08\x04\xc3\x14\xceX\xe8|\xdd\xbd\xe7\xe6\xfe_ns\xcf\x81\xff\x8e\xb4\xacT\xc5\xc8\xe4\x96De\x0da\x1c\xe8u)\xe7\x15\xe5\x16d\xd7JF\x8f\x11\xd1v\x01U\x99\xd9\xce\x1f\xa9\xb2\x00\xbc\x09\x14\x15}r#]\x90A\x850\x10\x029\xb4\x12\xd1\x15[\xa2!`\xa4\xaf\x97\x059\x00\xbdD\x82sV\x22r\xefF\xb8\x8d\x99)\x0c\xa3\xb53`J\x95\xc5l*~\x02\x10hX\xaa\xac\x01o]\xef\x81Y\xb7\xc3\x01\xacD\xe4\xbe^\xad\xce\x01\x15\x11\xd6\xed\xfd\x86\x00\xc2\x98@\xf1b#\xf6\xe0v\xb8\xcd\xe5\xa6q\x07\x14\x81\xf1v\x01\xe8Sx\xf1*\xbc\x89g\xa0\xdf^\x04\x9dN\x9b\xe9\x9c:\x9d\xe9\x84\x95\x8cK\xa7z\xa0S\xf17\xf0\x05|\x01_\xc0\x17\xf0\x05|\x01_\xc0\xb1\x1fp\xfa\xcf\x7fJ\xf3\x0b\x94\xa5\xa9S\xf1\x90\x01\xa0\xfc\x8d\x80\xde*\x84\xcdLa\xd8\xab\xe4\xc9\xf4\xd5(\x10\x16(\xb5\x0bh`\x0f\x08\xa1\xb5\xf3\xe9\xad\xec\x88\x17\xe1\xddt\x9d\x01=u\xd1]{\xbfe0137\xfb\xa0\xcb@\x05(\x82>\xba\x13/C|\x0e&=\x0a\xfb\xd9Dl\xb5m0\xb1%\x8ct~\xfe\xabo\x9f\x00\xfa\xdc\x11\xa0,PR\x95\x1d+\x15;u\xe9\xce?\xc2\x07\xd1\xbcu\x94\xcf\xbc\x8d\xf9\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x03\xa5\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x03\x22IDATX\x85\xed\x96MlTU\x14\xc7\x7f\xe7\x0d\xa9\x09\xcc\x90Pv\xb6\xc6``\xe3\xa3\x864\xf4\xc3\xc6g\xa4\x1b\xa2\x98@\x13]\xc9\x1a6\xda\x84~Y\x5c\xcd\xce:\xa43\x09\xcb\xaee\x83\x89\x19L\x04\xc3\xc6:\x98\xb4o\x22bK'\xc64\xac\x9c\x067\x94t\x98\x92P:\xef\xef\xe2M\xa75\x99\xe9\xccCv\xf4\xbf\xba\xe7\xbds\xef\xf9\xdds\xee\x17\xeciO\xaf\xba,\x8a\xb3\x9b,\xb4\x1dN\xac\x0f\xc98\x07\xea\x06:\xaa\xbf\x8a\x88\xdf\xcd,\xfb\xa8t [H\xba\x1b/\x1d\xc0\xcb\xcc\x7f\x82,\x05\x1c\x01\xbb\x8f4\x8bC\x11\xc0\xa4\x0e\xe1\x9c\x02ua<0l\x22w\xa9\xf7\xfb\x97\x02\xf0\xe9\xf5\xeb\xb1\x7fV\xdeL!F\x80\x9f$&\x7f\x1d\xed[\xa8\xe7;\x90\xc9\x9f\x88\x05\x9a\xc28\x0d\x5c\xb9S\xea\x9d$iA\xab\x93\xac+/\xe3O{i\xbf\xf2~f~\xac\xe5>i\x7f\xdcK\xfb\x15/\xed\xa7\x9a\xf9\xee\x9a\x81j\xda\xbf3l,7\xd2;\x0d\xf0\xe1\xd5\xe5\xd7\x9e<\x7f|\xd1\xe03Y\xd0\x15\x0eb\x8b\x18\xd7\xe2\xb1\xf6\x99[\xc3\xc7\x9eU\xc1'\x10\xdf`\x0c\xdd\xb9\xd4\x97\x8d\x0c\xe0&\x0bm\xed\x07\xcb\x7f\x1a\xfa+7\xd2\xff\x11\xc0\x07W\xe7;+\x9b\xceMP\x17X\x00r\xaa\xc3\x84mc1\x16\xd3\x99\xd9\xe1\xfe\x22\xc0{\x99\xfcm\x93\x8e\xac\x96\xe2n\xa3\x85\xe94\x028\x9cX\x1f\x02\xde\x0ad\x97\xb7f^\xd9tnb:\x1ezhG\xdfZ\xbb\xab\xb2\xc9\x8fn\xb2\xd0\x06\xe0\x04\xf6%p\xf4P\xa2|\xb6Q\x9c\x86\x00\xe1Vcak\xc1\x95+\xab\x17@]h\x97\xb2\x09\x03{\xa7\xfd`\xf9\x02@n\xb4\xe7\x9e\xc4\x92At\x00P\xb7\xa1_jf`\xe7\xc3T\xef.A\x00\x9c\xdf\xb2\x0d~\xc68\xf9\x02\x00\xbc.\xacX\xb3L\xee\x7f\xd3^_\x06\x0e\xc8\xdd\x01\xb4\xc2\xf6\x81\x15\x09\x00,\xdaIY7\x80\x99\x11f%2\xc0C\x02:k\x96\xac\xd0j\x09$\x96\xb6mu\x00\x0f\xa3\x03\x88\xdf\x04\xa7\xb6=\xf5m\xab%0\xb3k;>\x0d\x02\xf9\xc8\x00f\x965\xe3\xf8@&\x7f\x02 \x1ek\x9f\xc1X\xc4\xd0.\xd1%\xe3\x8f\xd5R|\x06\xc0\xcb\xccu\x03oc\xfa!2\xc0\xa3\xd2\x81,\xc6\x83X\xa0)\x80[\xc3\xc7\x9e\xc5b:\x03\xdc\xafF\xab\x95\xa3\xba\xf2\x11,TT\xf9\xb8\x90t7\x90\x0c9)`\xf9\xe9\xfe}7\x22\x03\x14\x92\xee\x86\xc48\xc6i/\xed\x8f\x03\xcc\x0e\xf7\x17W\xd7\xe2=\xc0\x17R\x90\x07\xd6\x81u\xa4\xbc\x99>\x7f\xbc\x16\xef\x9b\x1b\x19X\x01\xf0\xd2\xfe$0h\x0a\xc6\xee^<\xf9\xbcQ\x9c\xa6\xf2\xd2~\xaaz\xb1\x8c\xb7\xd4A2oz\xferx\x81\xf9S\xcd\xdc\x9bo\xb3\xa4\x1c/\x91\xff\x1ac\x02\xb8mr&s\xa3=\xf7\xea\xc2f\xe6\xba\xabi\x1f4#\x95[\xeb\xfd\xaa\xd9u\x1c\xe1A\xe2\x9fC\x5c\x01\x8eJ,\x991\x8b\xf17\x00\xe2\x0d\xc2\x1d\xe3\x02\xcb\xa6`,7\xfan\xc3\x85\xf7B\x00\x10\xde\x90\x87\x12\xe5\xb3T\x9fd\x86u\x86\xf1U4\xd9]\x1ce\x9f\xee\xdfw\xe3\x7f\xd5|O{z\xe5\xf4/\x95?G\xacm\xe50s\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x02\x02\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x01\x7fIDATX\x85\xed\x97\xcbJBQ\x14\x86\xbfe\xa5\xd9\xe5\x01\xacW\xc8@(\xa3\xd2\x9e\x22\x87\xdd\x88\x0663\xa1\x9e\xa1\x896\xa9F]iX\xef\x10\x1c\x8d\xb4@\xa2w\xc8\xe6]\xac,W\x83:\xa2\x1c\xcf$\xb6\x18u\xfe\xd9^\x1b\xf6\xf7\xb1`o\xf6\x82\xff\x1eiZ\xa9J,[X\x14\x95$B\x18\xe85\xc4yA\xb9\x05\xd9\xb1\xd6\xc6\x8f\x10Q\xa7\x80\xaa\xccl\x15\x0fU\x99\x07^\x05J\x8a>\x9a\xa0\x0b2\xa0\x10\x01\x02 \x07Vj|\xd9\x96\xa8\x0b\xc42\x97K\x82\xec\x83\xe6\x91\xee\x84\x95\x1a+\x9b\x80\xdb\x89g\xafC\xe8\xc7)0\xa5\xcaB.=q\x0c\xe0\xab[\xaa$\x81\xd7\xaew\xdf\xaci8\x80\x95\x1a+\xd7\xaa\xd5\x04\xf0&\xc2\xaa]\xaf\x0b \x8c\x08\x94\xce\xd7\xa3\xf7\xa6\xe1v\xf2\x1b\xb1;\xa0\x04\x84\x9d\x02\x10Txn\x17\xbc!O@_+\x81\x8e\xc4\x13\xe8\xb8@\xb7\xdbF<SP\xb7\xbd\x9f\xc4Z\x9b\x90V\xf5\x8ew\xc0\x13\xf0\x04<\x01\xd7w\xc0\xed\xde\x9aN\xc7;\xe0\x09\xfc*\x81\x8a4\xfcT\xda\x98~\xa0\xd2B@o\x15\x22\xf1\xecu\xa8]\xe4\xc9\xcc\xc50\x10\x11\xb8q\x0a\xa8o\x17\x08\xa0\x1fg\xd3\x9b\xb9\xa1v\xc0{\xe8:\x05\xfc5\xd1\x1d\xbb\xde4\x98\xc4\xb3W{\xa0K\xc0\x1bP\x02}0\x83\x97A\xbe\x06\x13\xbf\xc2^.\x15]q\x0c&\xb6D,S\x9c\xfb\xfe\xb7\x8f\x02A3\x02T\x04nTe\xdbJGO\x0c\x9d\xf9G\xf2\x09\xb5\xbdu\x94\xee\x91\xe8\xbe\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x00\x9e\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce|N\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x08\x15\x0f\xfd\x8f\xf8.\x00\x00\x00\x22IDAT\x08\xd7c`\xc0\x0d\xfe\x9f\x87\xb1\x18\x91\x05\x18\x0d\xe1BH*\x0c\x19\x18\x18\x91\x05\x10*\xd1\x00\x00\xca\xb5\x07\xd2v\xbb\xb2\xc5\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x01\xeb\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x01hIDATX\x85\xed\x97MN\xc2@\x18\x86\x9f\xaf\x10\x14\xd4\x03\xa0W\x10\x13\xb6\x9eCv\xc8X\x8c&p\x1f11\xa1tH\x97x\x0c\xd7\xc4x\x07q\xef\x0f\x02\x91\xcf\x85\x94 \xa0,\x1c\x5ch\xdf\xddL\xdf\xf4y\xa6M\xd3\x19\xf8\xef\x91\xf9\xb1o\xcc\x09P\x03\x0a\xc0\xa6#\xce+p'\x22\x8d \x0c-\xa0\xcb\x04\xc47&\x04*\xc0\x00\xe8\x02O\x8e\x04\xb6\x81\x22\xb0\x01\xb4Z\xd6\x9e\xc6\x12S\x01\xdf\x18\x1f\x08\x04n\xd2oo\xa5\xab(\xea9\x82\x03p^.\xe7G\x9e\xd7A\xe4P\xc0\x04\xd6\xb6\x01\xbc\x99N\x0d\x18\x8cE\x8e\x5c\xc3\x01\xae\xa2\xa8'\xe9t\x09\x18\xaaH=\x9e\x9f\x15\xd8\x07\xbaa\x18>\xb8\x86\xc7\x09\x82\xe0\x1e\x91.\xaa\x85e\x02YT_\xd6\x05\x9ff<~\x06r\xf10\xbd\xaa\xef\x1b\xa3\xab:\xdf\xa5e\xed\xfc\x97\xf6)\xdew\x17\x7f#\x89@\x22\x90\x08$\x02\x89@\x22\x90\x08\xac\xdc\x0f\xac\xfa\x9f\xff4\xb3O\xa0\x8fH\xee\xcb\xa63\xa2\xb7\x05\xf4\x17\x04\x14\xee\x80\xe2y\xb9\x9c_\x17\xbbR\xa9\xec\xa1Z\x04n\x17\x04<\x91K`c\x94J]W\xab\xd5\xddu\xc0S\x22\x1d \xa3\x22\x8dx~\xfe`\xd2\x04|`8\xd9\xbd>:\xa1\x8b\xecLV\x9eQh\x86\xd6\x9e1\x7f0\x89\xabUc\x8eU\xa4\x8e\xea\x01\x90u\x22\xf0\xf1\xceoQ\xbdh\xb5\xdb\x91\xa3{\xfe\x91\xbc\x03\x16qj'Dt\xfeO\x00\x00\x00\x00IEND\xaeB`\x82"
qt_resource_name = b"\x00\x09\x09_\x97\x13\x00q\x00s\x00s\x00_\x00i\x00c\x00o\x00n\x00s\x00\x08\x08\x92\xac\xbe\x00d\x00a\x00r\x00k\x00s\x00k\x00i\x00n\x00\x09\x00(\xad#\x00s\x00t\x00y\x00l\x00e\x00.\x00q\x00s\x00s\x00\x02\x00\x00\x07\x83\x00r\x00c\x00\x11\x0a\xe5l\x07\x00r\x00a\x00d\x00i\x00o\x00_\x00c\x00h\x00e\x00c\x00k\x00e\x00d\x00.\x00p\x00n\x00g\x00\x09\x06\x98\x83'\x00c\x00l\x00o\x00s\x00e\x00.\x00p\x00n\x00g\x00\x11\x08\x8cj\xa7\x00H\x00s\x00e\x00p\x00a\x00r\x00t\x00o\x00o\x00l\x00b\x00a\x00r\x00.\x00p\x00n\x00g\x00\x1a\x01!\xebG\x00s\x00t\x00y\x00l\x00e\x00s\x00h\x00e\x00e\x00t\x00-\x00b\x00r\x00a\x00n\x00c\x00h\x00-\x00m\x00o\x00r\x00e\x00.\x00p\x00n\x00g\x00\x0a\x05\x95\xde'\x00u\x00n\x00d\x00o\x00c\x00k\x00.\x00p\x00n\x00g\x00\x13\x08\xc8\x96\xe7\x00r\x00a\x00d\x00i\x00o\x00_\x00u\x00n\x00c\x00h\x00e\x00c\x00k\x00e\x00d\x00.\x00p\x00n\x00g\x00\x15\x0f\xf3\xc0\x07\x00u\x00p\x00_\x00a\x00r\x00r\x00o\x00w\x00_\x00d\x00i\x00s\x00a\x00b\x00l\x00e\x00d\x00.\x00p\x00n\x00g\x00\x1f\x0a\xae'G\x00c\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00u\x00n\x00c\x00h\x00e\x00c\x00k\x00e\x00d\x00_\x00d\x00i\x00s\x00a\x00b\x00l\x00e\x00d\x00.\x00p\x00n\x00g\x00\x0f\x0c\xe2hg\x00t\x00r\x00a\x00n\x00s\x00p\x00a\x00r\x00e\x00n\x00t\x00.\x00p\x00n\x00g\x00\x16\x01u\xcc\x87\x00c\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00u\x00n\x00c\x00h\x00e\x00c\x00k\x00e\x00d\x00.\x00p\x00n\x00g\x00\x14\x0b\xc5\xd7\xc7\x00s\x00t\x00y\x00l\x00e\x00s\x00h\x00e\x00e\x00t\x00-\x00v\x00l\x00i\x00n\x00e\x00.\x00p\x00n\x00g\x00\x11\x08\x90\x94g\x00c\x00l\x00o\x00s\x00e\x00-\x00p\x00r\x00e\x00s\x00s\x00e\x00d\x00.\x00p\x00n\x00g\x00\x14\x07\xec\xd1\xc7\x00c\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00c\x00h\x00e\x00c\x00k\x00e\x00d\x00.\x00p\x00n\x00g\x00\x0e\x0e\xde\xfa\xc7\x00l\x00e\x00f\x00t\x00_\x00a\x00r\x00r\x00o\x00w\x00.\x00p\x00n\x00g\x00\x12\x07\x8f\x9d'\x00b\x00r\x00a\x00n\x00c\x00h\x00_\x00o\x00p\x00e\x00n\x00-\x00o\x00n\x00.\x00p\x00n\x00g\x00\x0f\x02\x9f\x05\x87\x00r\x00i\x00g\x00h\x00t\x00_\x00a\x00r\x00r\x00o\x00w\x00.\x00p\x00n\x00g\x00\x0e\x04\xa2\xfc\xa7\x00d\x00o\x00w\x00n\x00_\x00a\x00r\x00r\x00o\x00w\x00.\x00p\x00n\x00g\x00\x11\x08\xc4j\xa7\x00V\x00s\x00e\x00p\x00a\x00r\x00t\x00o\x00o\x00l\x00b\x00a\x00r\x00.\x00p\x00n\x00g\x00\x10\x01\x07J\xa7\x00V\x00m\x00o\x00v\x00e\x00t\x00o\x00o\x00l\x00b\x00a\x00r\x00.\x00p\x00n\x00g\x00\x19\x08>\xcc\x07\x00s\x00t\x00y\x00l\x00e\x00s\x00h\x00e\x00e\x00t\x00-\x00b\x00r\x00a\x00n\x00c\x00h\x00-\x00e\x00n\x00d\x00.\x00p\x00n\x00g\x00\x1c\x01\xe0J\x07\x00r\x00a\x00d\x00i\x00o\x00_\x00u\x00n\x00c\x00h\x00e\x00c\x00k\x00e\x00d\x00_\x00d\x00i\x00s\x00a\x00b\x00l\x00e\x00d\x00.\x00p\x00n\x00g\x00\x14\x06^,\x07\x00b\x00r\x00a\x00n\x00c\x00h\x00_\x00c\x00l\x00o\x00s\x00e\x00d\x00-\x00o\x00n\x00.\x00p\x00n\x00g\x00\x0f\x06S%\xa7\x00b\x00r\x00a\x00n\x00c\x00h\x00_\x00o\x00p\x00e\x00n\x00.\x00p\x00n\x00g\x00\x0c\x06A@\x87\x00s\x00i\x00z\x00e\x00g\x00r\x00i\x00p\x00.\x00p\x00n\x00g\x00\x10\x01\x00\xca\xa7\x00H\x00m\x00o\x00v\x00e\x00t\x00o\x00o\x00l\x00b\x00a\x00r\x00.\x00p\x00n\x00g\x00\x1c\x08?\xdag\x00c\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00u\x00n\x00c\x00h\x00e\x00c\x00k\x00e\x00d\x00_\x00f\x00o\x00c\x00u\x00s\x00.\x00p\x00n\x00g\x00\x0f\x01\xf4\x81G\x00c\x00l\x00o\x00s\x00e\x00-\x00h\x00o\x00v\x00e\x00r\x00.\x00p\x00n\x00g\x00\x18\x03\x8e\xdeg\x00r\x00i\x00g\x00h\x00t\x00_\x00a\x00r\x00r\x00o\x00w\x00_\x00d\x00i\x00s\x00a\x00b\x00l\x00e\x00d\x00.\x00p\x00n\x00g\x00\x1a\x0e\xbc\xc3g\x00r\x00a\x00d\x00i\x00o\x00_\x00c\x00h\x00e\x00c\x00k\x00e\x00d\x00_\x00d\x00i\x00s\x00a\x00b\x00l\x00e\x00d\x00.\x00p\x00n\x00g\x00\x17\x0c\xabQ\x07\x00d\x00o\x00w\x00n\x00_\x00a\x00r\x00r\x00o\x00w\x00_\x00d\x00i\x00s\x00a\x00b\x00l\x00e\x00d\x00.\x00p\x00n\x00g\x00\x11\x0b\xda0\xa7\x00b\x00r\x00a\x00n\x00c\x00h\x00_\x00c\x00l\x00o\x00s\x00e\x00d\x00.\x00p\x00n\x00g\x00\x1a\x01\x87\xaeg\x00c\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00i\x00n\x00d\x00e\x00t\x00e\x00r\x00m\x00i\x00n\x00a\x00t\x00e\x00.\x00p\x00n\x00g\x00\x17\x0ce\xce\x07\x00l\x00e\x00f\x00t\x00_\x00a\x00r\x00r\x00o\x00w\x00_\x00d\x00i\x00s\x00a\x00b\x00l\x00e\x00d\x00.\x00p\x00n\x00g\x00\x19\x0bYn\x87\x00r\x00a\x00d\x00i\x00o\x00_\x00u\x00n\x00c\x00h\x00e\x00c\x00k\x00e\x00d\x00_\x00f\x00o\x00c\x00u\x00s\x00.\x00p\x00n\x00g\x00\x1a\x05\x11\xe0\xe7\x00c\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00c\x00h\x00e\x00c\x00k\x00e\x00d\x00_\x00f\x00o\x00c\x00u\x00s\x00.\x00p\x00n\x00g\x00\x17\x0f\x1e\x9bG\x00r\x00a\x00d\x00i\x00o\x00_\x00c\x00h\x00e\x00c\x00k\x00e\x00d\x00_\x00f\x00o\x00c\x00u\x00s\x00.\x00p\x00n\x00g\x00 \x09\xd7\x1f\xa7\x00c\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00i\x00n\x00d\x00e\x00t\x00e\x00r\x00m\x00i\x00n\x00a\x00t\x00e\x00_\x00f\x00o\x00c\x00u\x00s\x00.\x00p\x00n\x00g\x00\x0c\x06\xe6\xe6g\x00u\x00p\x00_\x00a\x00r\x00r\x00o\x00w\x00.\x00p\x00n\x00g\x00\x1d\x09\x07\x81\x07\x00c\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00c\x00h\x00e\x00c\x00k\x00e\x00d\x00_\x00d\x00i\x00s\x00a\x00b\x00l\x00e\x00d\x00.\x00p\x00n\x00g"
qt_resource_struct = b"\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x18\x00\x02\x00\x00\x00\x01\x00\x00\x00+\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00F\x00\x02\x00\x00\x00'\x00\x00\x00\x04\x00\x00\x04L\x00\x00\x00\x00\x00\x01\x00\x00\x84a\x00\x00\x03@\x00\x00\x00\x00\x00\x01\x00\x00}\xd3\x00\x00\x00\xb8\x00\x00\x00\x00\x00\x01\x00\x00jX\x00\x00\x01\xd0\x00\x00\x00\x00\x00\x01\x00\x00sr\x00\x00\x05\xa0\x00\x00\x00\x00\x00\x01\x00\x00\x8fD\x00\x00\x03\x9e\x00\x00\x00\x00\x00\x01\x00\x00\x7f\x9f\x00\x00\x04\xb0\x00\x00\x00\x00\x00\x01\x00\x00\x87(\x00\x00\x02\xd2\x00\x00\x00\x00\x00\x01\x00\x00{\xc7\x00\x00\x04\xd4\x00\x00\x00\x00\x00\x01\x00\x00\x89\x82\x00\x00\x02\xf6\x00\x00\x00\x00\x00\x01\x00\x00|k\x00\x00\x06F\x00\x00\x00\x00\x00\x01\x00\x00\x94\xb7\x00\x00\x00\xf2\x00\x00\x00\x00\x00\x01\x00\x00k\x12\x00\x00\x04.\x00\x00\x00\x00\x00\x01\x00\x00\x83\xdc\x00\x00\x04\x0a\x00\x00\x00\x00\x00\x01\x00\x00\x832\x00\x00\x03\xdc\x00\x00\x00\x00\x00\x01\x00\x00\x82\x9b\x00\x00\x00x\x00\x00\x00\x00\x00\x01\x00\x00gZ\x00\x00\x06\xfa\x00\x00\x00\x00\x00\x01\x00\x00\x9cj\x00\x00\x02\xa8\x00\x00\x00\x00\x00\x01\x00\x00{-\x00\x00\x02X\x00\x00\x00\x00\x00\x01\x00\x00x\x93\x00\x00\x03f\x00\x00\x00\x00\x00\x01\x00\x00~\xbb\x00\x00\x04r\x00\x00\x00\x00\x00\x01\x00\x00\x85A\x00\x00\x00\x90\x00\x00\x00\x00\x00\x01\x00\x00i\xa8\x00\x00\x020\x00\x00\x00\x00\x00\x01\x00\x00v9\x00\x00\x03\x18\x00\x00\x00\x00\x00\x01\x00\x00}\x14\x00\x00\x01\x0c\x00\x00\x00\x00\x00\x01\x00\x00mX\x00\x00\x07\x18\x00\x00\x00\x00\x00\x01\x00\x00\x9d\x0c\x00\x00\x06\xb4\x00\x00\x00\x00\x00\x01\x00\x00\x9ad\x00\x00\x01h\x00\x00\x00\x00\x00\x01\x00\x00p\xd7\x00\x00\x00P\x00\x00\x00\x00\x00\x01\x00\x00c\xaa\x00\x00\x06\x0e\x00\x00\x00\x00\x00\x01\x00\x00\x91\xdf\x00\x00\x02\x02\x00\x00\x00\x00\x00\x01\x00\x00uF\x00\x00\x05x\x00\x00\x00\x00\x00\x01\x00\x00\x8e\xa0\x00\x00\x05\xda\x00\x00\x00\x00\x00\x01\x00\x00\x915\x00\x00\x05D\x00\x00\x00\x00\x00\x01\x00\x00\x8d\xf6\x00\x00\x01\xac\x00\x00\x00\x00\x00\x01\x00\x00r\xab\x00\x00\x05\x0a\x00\x00\x00\x00\x00\x01\x00\x00\x8a&\x00\x00\x02\x86\x00\x00\x00\x00\x00\x01\x00\x00z\x83\x00\x00\x06\x80\x00\x00\x00\x00\x00\x01\x00\x00\x96\xbb\x00\x00\x018\x00\x00\x00\x00\x00\x01\x00\x00p4\x00\x00\x00.\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
[
"razielsun@gmail.com"
] |
razielsun@gmail.com
|
e2ca12699ef4867c8f0038004a8a54d12992063f
|
9fcca5389a6a888c9a36f623139da2a0590be150
|
/heydayzdiary/migrations/0033_remove_day_entry_bed_time.py
|
36f96aa0f47f1b5603aba21a150194a8eac0f70b
|
[] |
no_license
|
aramicon/heydayzdiary
|
1914261a081d7f1b870fef3aee8ad73bbd346299
|
85cd90090decbea74a3e2e017b6b590378f613a0
|
refs/heads/master
| 2022-03-19T22:54:35.062260
| 2022-02-21T20:01:47
| 2022-02-21T20:01:47
| 104,363,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-21 13:25
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('heydayzdiary', '0032_auto_20170921_1350'),
]
operations = [
migrations.RemoveField(
model_name='day_entry',
name='bed_time',
),
]
|
[
"aramicon@gmail.com"
] |
aramicon@gmail.com
|
d412f5d4d9f45f53b4d4cb7f89a0e4bfb0ab0e95
|
cbbbc1cfb846209c64a8ee77a07deb460bb73610
|
/backend/inventory/venv/Lib/site-packages/ibm_db_tests/test_040_FetchTuple.py
|
0ad776f870cf373451429e75aec9ae851c4d10cc
|
[
"Apache-2.0"
] |
permissive
|
SmartPracticeschool/SBSPS-Challenge-925-Optimized-warehouse-management-of-perishable-goods-for-a-food-delivery-company
|
dc7a07404ac7c29c645a326435786a4317384c3f
|
ffc57ba59a2525ceac245aa8f13d30009b14d6f2
|
refs/heads/master
| 2023-07-19T20:43:27.099710
| 2020-07-19T20:56:45
| 2020-07-19T20:56:45
| 270,385,655
| 4
| 1
| null | 2023-07-13T07:04:44
| 2020-06-07T17:50:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,421
|
py
|
#
# Licensed Materials - Property of IBM
#
# (c) Copyright IBM Corp. 2007-2008
#
import sys
import unittest
import ibm_db
import config
from testfunctions import IbmDbTestFunctions
class IbmDbTestCase(unittest.TestCase):
def test_040_FetchTuple(self):
obj = IbmDbTestFunctions()
obj.assert_expect(self.run_test_040)
def run_test_040(self):
conn = ibm_db.connect(config.database, config.user, config.password)
ibm_db.autocommit(conn, ibm_db.SQL_AUTOCOMMIT_OFF)
# Drop the test table, in case it exists
drop = 'DROP TABLE animals'
try:
result = ibm_db.exec_immediate(conn, drop)
except:
pass
# Create the test table
create = 'CREATE TABLE animals (id INTEGER, breed VARCHAR(32), name CHAR(16), weight DECIMAL(7,2))'
result = ibm_db.exec_immediate(conn, create)
insert = "INSERT INTO animals values (0, 'cat', 'Pook', 3.2)"
ibm_db.exec_immediate(conn, insert)
stmt = ibm_db.exec_immediate(conn, "select * from animals")
onerow = ibm_db.fetch_tuple(stmt)
for element in onerow:
print(element)
ibm_db.rollback(conn)
#__END__
#__LUW_EXPECTED__
#0
#cat
#Pook
#3.20
#__ZOS_EXPECTED__
#0
#cat
#Pook
#3.20
#__SYSTEMI_EXPECTED__
#0
#cat
#Pook
#3.20
#__IDS_EXPECTED__
#0
#cat
#Pook
#3.20
|
[
"jtaniha@gmail.com"
] |
jtaniha@gmail.com
|
670592f7a6691261829166472f8bd4d08d841f30
|
3fcdeb9d0f447b6f8af0f98c712345f768535fca
|
/image_classifier/data.py
|
c885ac5e8b0b1bd0d778b78c51260ec785793197
|
[] |
no_license
|
jl-massey/DSND_Term1
|
983b219ecad4de5092ed1690615dd46add9cfa51
|
994aac5af878e0d28c82621e15bf4be090e4780c
|
refs/heads/master
| 2020-03-22T17:50:01.396571
| 2018-09-25T15:28:25
| 2018-09-25T15:28:25
| 140,419,124
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,119
|
py
|
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import ImageFolder
from utils import get_max_workers
def count_classes(data_dir):
"""
Uses ImageFolder to check the number of classes in the provided directory.
:param data_dir: Image directory (E.g. training)
:return: Number of classes
"""
return len(ImageFolder(data_dir).classes)
def class_to_idx(data_dir):
return ImageFolder(data_dir).class_to_idx
def get_transform(arch, training=True):
"""
Create image transformer appropriate for the model
architecture and the desired usage (train vs. eval)
:param arch: Model architecture.
:param training: True if the transform is for a training dataset.
:return: torchvision image transformer
"""
if arch.startswith('inception'):
cropsize = 299
else:
cropsize = 224
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if training:
return transforms.Compose([
transforms.RandomRotation(30),
transforms.Resize(cropsize + 32),
transforms.CenterCrop(cropsize),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize])
else:
return transforms.Compose([
transforms.Resize(cropsize + 32),
transforms.CenterCrop(cropsize),
transforms.ToTensor(),
normalize])
def get_dataloader(img_dir, arch, batch_size, training=True):
"""
Creates a dataloader for an image dataset.
:param img_dir: str: path to the directory containing class directories
:param arch: str: valid model architecture being used
:param training: bool: True if the data loader is for training data
:param batch_size: Number of images to return in a batch
:return: torch.utils.data.DataLoader
"""
ds = ImageFolder(root=img_dir, transform=get_transform(arch, training))
dl = DataLoader(dataset=ds, batch_size=batch_size, shuffle=training, num_workers=get_max_workers())
return dl
|
[
"jim.massey@team.telstra.com"
] |
jim.massey@team.telstra.com
|
fa3dc974ee0583d90f7d7261a7d65e39e99cdf5f
|
2ea093309f6637c03566d2af11401417e2fca8fa
|
/server.py
|
f28751fe31e18ff13826277a2bcd709c0d675b20
|
[] |
no_license
|
quildm/Assignment-Great-Number-Game
|
820444ff239dd79b28d393885ff2ccc90a0fffd5
|
73a39704ee2bc213f88ecd016e4d06083f5e0612
|
refs/heads/master
| 2020-12-02T07:45:18.491212
| 2017-07-10T00:43:43
| 2017-07-10T00:43:43
| 96,720,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
from flask import Flask, flash, render_template, request, redirect, session
import random
app = Flask(__name__)
app.secret_key = 'ThisIsSecret'
def setSession():
session['num'] = random.randint(1,100)
@app.route('/')
def index():
if 'num' not in session:
setSession()
print (session['num'])
return render_template('index.html')
@app.route('/guess', methods=['POST'])
def checkNumber():
if 'num' not in session:
setSession()
print (session['num'])
return redirect('/')
@app.route('/reset', methods=['GET', 'POST'])
def reset():
def reset():
setSession()
return redirect('/')
app.run(debug=True)
|
[
"quildm@ymail.com"
] |
quildm@ymail.com
|
a689cb07002456071c67dd35e848176c59018407
|
ec8146beeb2bb86186cfdd4c7087fa01f8ac7a45
|
/src/Snakemake/rules/Alignment/GATK.smk
|
765be9953bc364826edf96f92957adc77ea6e3a9
|
[] |
no_license
|
jonca79/TSO500_GATK4
|
3d28c4e9031604b2caa7a5033a4119ecb464703e
|
e45eea3620ea15aa192ed4db6467182366966eb8
|
refs/heads/master
| 2023-01-29T23:39:10.862496
| 2020-12-08T12:51:03
| 2020-12-08T12:51:03
| 297,880,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,704
|
smk
|
chrom_list = ['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19','chr20','chr21','chr22','chrX','chrY']
rule GATK_recal_step1:
input:
bam = "bam/{sample}-sort-cumi.bam",
bai = "bam/{sample}-sort-cumi.bam.bai",
dbsnp = "/data/ref_genomes/hg19/variation/dbsnp_138.vcf.gz", #config
bed = config["bed"]["bedfile"],
ref = config["reference"]["ref"]
output:
grp = "bam/{sample}-sort-cumi-recal.grp"
params:
"--interval_set_rule INTERSECTION -U LENIENT_VCF_PROCESSING --read_filter BadCigar --read_filter NotPrimaryAlignment"
log:
"logs/gatk3/recal_step1_{sample}.log"
singularity:
config["singularity"]["gatk3"]
threads:
10
shell:
"(java -jar -Xms1000m -Xmx50960m /usr/GenomeAnalysisTK.jar -T BaseRecalibrator -nct {threads} -I {input.bam} -o {output.grp} -R {input.ref} --knownSites {input.dbsnp} -L {input.bed} {params}) &> {log}"
rule GATK_recal_step2:
input:
grp = "bam/{sample}-sort-cumi-recal.grp",
bam = "bam/{sample}-sort-cumi.bam",
bai = "bam/{sample}-sort-cumi.bam.bai",
ref = config["reference"]["ref"]
output:
bam = "bam/{sample}-sort-cumi-recal.bam"
params:
"-jdk_deflater -jdk_inflater -U LENIENT_VCF_PROCESSING --read_filter BadCigar --read_filter NotPrimaryAlignment"
log:
"logs/gatk3/recal_step2_{sample}.log"
singularity:
config["singularity"]["gatk3"]
threads:
2
shell:
"(java -jar -Xms1000m -Xmx91728m /usr/GenomeAnalysisTK.jar -T PrintReads -nct {threads} -R {input.ref} -I {input.bam} -BQSR {input.grp} -o {output.bam} {params}) &> {log}"
rule Split_bam_realign:
input:
bam = "bam/{sample}-sort-cumi-recal.bam",
#bai = "DNA_bam/{sample}-ready.bam.bai"
# vcf = "Results/DNA/{sample}/vcf/{sample}-ensemble.final.no.introns.vcf.gz"
output:
bam = "bam/realign_temp/{sample}-sort-cumi-recal.{chr}.bam",
bai = "bam/realign_temp/{sample}-sort-cumi-recal.{chr}.bam.bai"
log:
"logs/gatk3/split_bam_realign_{sample}-sort-cumi-recal-{chr}.log"
singularity:
config["singularity"]["samtools"]
shell:
"(samtools view -b {input.bam} {wildcards.chr} > {output.bam} && samtools index {output.bam}) &> {log}"
rule GATK_realign_step1:
input:
bam = "bam/realign_temp/{sample}-sort-cumi-recal.{chr}.bam",
bai = "bam/realign_temp/{sample}-sort-cumi-recal.{chr}.bam.bai",
ref = config["reference"]["ref"],
indels = "/data/ref_genomes/hg19/variation/Mills_and_1000G_gold_standard.indels.vcf.gz" #config
output:
intervals = "bam/{sample}-sort-cumi-recal-realign.{chr}.intervals"
params:
"--interval_set_rule INTERSECTION -L {chr} -l INFO -U LENIENT_VCF_PROCESSING --read_filter BadCigar --read_filter NotPrimaryAlignment"
log:
"logs/gatk3/realign_step1_{sample}_{chr}.log"
singularity:
config["singularity"]["gatk3"]
shell:
"(java -jar -Xms500m -Xmx3500m /usr/GenomeAnalysisTK.jar -T RealignerTargetCreator -R {input.ref} -I {input.bam} --known {input.indels} -o {output.intervals} {params}) &> {log}"
rule GATK_realign_step2:
input:
bam = "bam/realign_temp/{sample}-sort-cumi-recal.{chr}.bam",
bai = "bam/realign_temp/{sample}-sort-cumi-recal.{chr}.bam.bai",
ref = config["reference"]["ref"],
indels = "/data/ref_genomes/hg19/variation/Mills_and_1000G_gold_standard.indels.vcf.gz", #config
intervals = "bam/{sample}-sort-cumi-recal-realign.{chr}.intervals",
output:
bam = "bam/realign_temp/{sample}-sort-cumi-recal-realign.{chr}.bam"
params:
"-L {chr} -U LENIENT_VCF_PROCESSING --read_filter BadCigar --read_filter NotPrimaryAlignment"
log:
"logs/gatk3/realign_step2_{sample}_{chr}.log"
singularity:
config["singularity"]["gatk3"]
shell:
"(java -jar -Xms909m -Xmx6363m /usr/GenomeAnalysisTK.jar -T IndelRealigner -R {input.ref} -I {input.bam} --targetIntervals {input.intervals} --knownAlleles {input.indels} -o {output.bam} {params}) &> {log}"
rule Merge_bam_gatk3:
input:
bams = expand("bam/realign_temp/{{sample}}-sort-cumi-recal-realign.{chr}.bam", chr=chrom_list)
output:
bam = "DNA_bam/{sample}-ready.bam",
bai = "DNA_bam/{sample}-ready.bam.bai"
log:
"logs/gatk3/merge_bam_{sample}.log"
singularity:
config["singularity"]["samtools"]
shell:
"(samtools merge {output.bam} {input.bams} && samtools index {output.bam}) &> {log}"
|
[
"jonas.almlof@igp.uu.se"
] |
jonas.almlof@igp.uu.se
|
6fab7f8926a85095669ce77bdb87d63bc747815d
|
201b42947d38be70cd0985fb3fc46bd6068ca2f8
|
/virtual_env/libs/mysql-connector/python3/mysql/connector/protocol.py
|
fccc6da7a8056753c07f1d0606e1733f16a1d8af
|
[
"MIT",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-or-later",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
rcosnita/fantastico
|
d4fade25a10231f50b62fff6b41d814815dd4b62
|
81c8590556baa9e1148071b7835d74b1efada561
|
refs/heads/master
| 2021-03-12T21:55:03.260192
| 2020-07-28T07:16:39
| 2020-07-28T07:16:39
| 9,280,607
| 3
| 1
|
MIT
| 2020-07-28T07:16:40
| 2013-04-07T17:52:51
|
Python
|
UTF-8
|
Python
| false
| false
| 9,971
|
py
|
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Implementing the MySQL Client/Server protocol
"""
import struct
from decimal import Decimal
try:
from hashlib import sha1
except ImportError:
from sha import new as sha1
from .constants import (FieldFlag, ServerCmd)
from . import (errors, utils)
class MySQLProtocol(object):
def _scramble_password(self, passwd, seed):
"""Scramble a password ready to send to MySQL"""
hash4 = None
try:
hash1 = sha1(passwd).digest()
hash2 = sha1(hash1).digest() # Password as found in mysql.user()
hash3 = sha1(seed + hash2).digest()
xored = [ h1 ^ h3 for (h1,h3) in zip(hash1, hash3) ]
hash4 = struct.pack('20B', *xored)
except Exception as e:
raise errors.InterfaceError('Failed scrambling password; %s' % e)
return hash4
def _prepare_auth(self, usr, pwd, db, flags, seed):
"""Prepare elements of the authentication packet"""
if usr is not None and len(usr) > 0:
_username = usr.encode('utf-8') + b'\x00'
else:
_username = b'\x00'
if pwd is not None and len(pwd) > 0:
_password = utils.int1store(20) +\
self._scramble_password(pwd.encode('utf-8'),seed)
else:
_password = b'\x00'
if db is not None and len(db):
_database = db.encode('utf-8') + b'\x00'
else:
_database = b'\x00'
return (_username, _password, _database)
def make_auth(self, seed, username=None, password=None, database=None,
charset=33, client_flags=0, max_allowed_packet=1073741824):
"""Make a MySQL Authentication packet"""
if not seed:
raise errors.ProgrammingError('Seed missing')
auth = self._prepare_auth(username, password, database,
client_flags, seed)
data = utils.int4store(client_flags) +\
utils.int4store(max_allowed_packet) +\
utils.int1store(charset) +\
b'\x00' * 23 + auth[0] + auth[1] + auth[2]
return data
def make_auth_ssl(self, charset=33, client_flags=0,
max_allowed_packet=1073741824):
"""Make a SSL authentication packet"""
return utils.int4store(client_flags) +\
utils.int4store(max_allowed_packet) +\
utils.int1store(charset) +\
b'\x00' * 23
def make_command(self, command, argument=None):
"""Make a MySQL packet containing a command"""
data = utils.int1store(command)
if argument is not None:
data += argument
return data
def make_change_user(self, seed, username=None, password=None,
database=None, charset=33, client_flags=0):
"""Make a MySQL packet with the Change User command"""
if not seed:
raise errors.ProgrammingError('Seed missing')
auth = self._prepare_auth(username, password, database,
client_flags, seed)
data = utils.int1store(ServerCmd.CHANGE_USER) +\
auth[0] + auth[1] + auth[2] + utils.int2store(charset)
return data
def parse_handshake(self, packet):
"""Parse a MySQL Handshake-packet"""
res = {}
(packet, res['protocol']) = utils.read_int(packet[4:], 1)
(packet, res['server_version_original']) = utils.read_string(
packet, end=b'\x00')
(packet, res['server_threadid']) = utils.read_int(packet, 4)
(packet, res['scramble']) = utils.read_bytes(packet, 8)
packet = packet[1:] # Filler 1 * \x00
(packet, res['capabilities']) = utils.read_int(packet, 2)
(packet, res['charset']) = utils.read_int(packet, 1)
(packet, res['server_status']) = utils.read_int(packet, 2)
packet = packet[13:] # Filler 13 * \x00
(packet, scramble_next) = utils.read_bytes(packet, 12)
res['scramble'] += scramble_next
return res
def parse_ok(self, packet):
"""Parse a MySQL OK-packet"""
if not packet[4] == 0:
raise errors.InterfaceError("Failed parsing OK packet.")
ok = {}
try:
(packet, ok['field_count']) = utils.read_int(packet[4:], 1)
(packet, ok['affected_rows']) = utils.read_lc_int(packet)
(packet, ok['insert_id']) = utils.read_lc_int(packet)
(packet, ok['server_status']) = utils.read_int(packet, 2)
(packet, ok['warning_count']) = utils.read_int(packet, 2)
if packet:
(packet, ok['info_msg']) = utils.read_lc_string(packet)
ok['info_msg'] = ok['info_msg'].decode('utf-8')
except ValueError as err:
raise errors.InterfaceError(
"Failed parsing OK packet ({})".format(err))
return ok
def parse_column_count(self, packet):
"""Parse a MySQL packet with the number of columns in result set"""
return utils.read_lc_int(packet[4:])[1]
def parse_column(self, packet):
"""Parse a MySQL column-packet"""
column = {}
(packet, column['catalog']) = utils.read_lc_string(packet[4:])
(packet, column['db']) = utils.read_lc_string(packet)
(packet, column['table']) = utils.read_lc_string(packet)
(packet, column['org_table']) = utils.read_lc_string(packet)
(packet, column['name']) = utils.read_lc_string(packet)
(packet, column['org_name']) = utils.read_lc_string(packet)
packet = packet[1:] # filler 1 * \x00
(packet, column['charset']) = utils.read_int(packet, 2)
(packet, column['length']) = utils.read_int(packet,4)
(packet, column['type']) = utils.read_int(packet, 1)
(packet, column['flags']) = utils.read_int(packet, 2)
(packet, column['decimal']) = utils.read_int(packet, 1)
packet = packet[2:] # filler 2 * \x00
return (
column['name'].decode('utf-8'),
column['type'],
None, # display_size
None, # internal_size
None, # precision
None, # scale
~column['flags'] & FieldFlag.NOT_NULL, # null_ok
column['flags'], # MySQL specific
)
def parse_eof(self, packet):
"""Parse a MySQL EOF-packet"""
res = {}
packet = packet[1:] # disregard the first checking byte
(packet, res['warning_count']) = utils.read_int(packet[4:], 2)
(packet, res['status_flag']) = utils.read_int(packet, 2)
return res
def parse_statistics(self, packet):
"""Parse the statistics packet"""
errmsg = "Failed getting COM_STATISTICS information"
res = {}
# Information is separated by 2 spaces
pairs = packet[4:].split(b'\x20\x20')
for pair in pairs:
try:
(lbl, val) = [ v.strip() for v in pair.split(b':', 2) ]
except:
raise errors.InterfaceError(errmsg)
# It's either an integer or a decimal
lbl = lbl.decode('utf-8')
try:
res[lbl] = int(val)
except:
try:
res[lbl] = Decimal(val.decode('utf-8'))
except:
raise errors.InterfaceError(
"{} ({}:{}).".format(errmsg, lbl, val))
return res
def read_text_result(self, sock, count=1):
"""Read MySQL text result
Reads all or given number of rows from the socket.
Returns a tuple with 2 elements: a list with all rows and
the EOF packet.
"""
rows = []
eof = None
rowdata = None
i = 0
while True:
if eof is not None:
break
if i == count:
break
packet = sock.recv()
if packet[0:3] == b'\xff\xff\xff':
data = packet[4:]
packet = sock.recv()
while packet[0:3] == b'\xff\xff\xff':
data += packet[4:]
packet = sock.recv()
if packet[4] == 254:
eof = self.parse_eof(packet)
else:
data += packet[4:]
rowdata = utils.read_lc_string_list(data)
elif packet[4] == 254:
eof = self.parse_eof(packet)
rowdata = None
else:
eof = None
rowdata = utils.read_lc_string_list(packet[4:])
if eof is None and rowdata is not None:
rows.append(rowdata)
i += 1
return (rows, eof)
|
[
"radu.cosnita@gmail.com"
] |
radu.cosnita@gmail.com
|
b533703afa1112639dbb9060e7a0a0afc775db88
|
9e549ee54faa8b037f90eac8ecb36f853e460e5e
|
/venv/lib/python3.6/site-packages/pylint/test/functional/abstract_method_py2.py
|
1e4f318581d93261321a92d5a5c4eb5bc52d85be
|
[
"MIT"
] |
permissive
|
aitoehigie/britecore_flask
|
e8df68e71dd0eac980a7de8c0f20b5a5a16979fe
|
eef1873dbe6b2cc21f770bc6dec783007ae4493b
|
refs/heads/master
| 2022-12-09T22:07:45.930238
| 2019-05-15T04:10:37
| 2019-05-15T04:10:37
| 177,354,667
| 0
| 0
|
MIT
| 2022-12-08T04:54:09
| 2019-03-24T00:38:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,024
|
py
|
"""Test abstract-method warning."""
from __future__ import print_function
# pylint: disable=missing-docstring, no-init, no-self-use
# pylint: disable=too-few-public-methods, useless-object-inheritance
import abc
class Abstract(object):
def aaaa(self):
"""should be overridden in concrete class"""
raise NotImplementedError()
def bbbb(self):
"""should be overridden in concrete class"""
raise NotImplementedError()
class AbstractB(Abstract):
"""Abstract class.
this class is checking that it does not output an error msg for
unimplemeted methods in abstract classes
"""
def cccc(self):
"""should be overridden in concrete class"""
raise NotImplementedError()
class Concrete(Abstract): # [abstract-method]
"""Concrete class"""
def aaaa(self):
"""overidden form Abstract"""
class Structure(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __iter__(self):
pass
@abc.abstractmethod
def __len__(self):
pass
@abc.abstractmethod
def __contains__(self, _):
pass
@abc.abstractmethod
def __hash__(self):
pass
# +1: [abstract-method, abstract-method, abstract-method]
class Container(Structure):
def __contains__(self, _):
pass
# +1: [abstract-method, abstract-method, abstract-method]
class Sizable(Structure):
def __len__(self):
pass
# +1: [abstract-method, abstract-method, abstract-method]
class Hashable(Structure):
__hash__ = 42
# +1: [abstract-method, abstract-method, abstract-method]
class Iterator(Structure):
def keys(self):
return iter([1, 2, 3])
__iter__ = keys
class AbstractSizable(Structure):
@abc.abstractmethod
def length(self):
pass
__len__ = length
class GoodComplexMRO(Container, Iterator, Sizable, Hashable):
pass
# +1: [abstract-method, abstract-method, abstract-method]
class BadComplexMro(Container, Iterator, AbstractSizable):
pass
|
[
"aitoehigie@gmail.com"
] |
aitoehigie@gmail.com
|
00b8c07cc013ddb3f58ae153db09568888b28e2a
|
99441588c7d6159064d9ce2b94d3743a37f85d33
|
/cloud_convertor/scripts/print_diagram.py
|
b581bfdb63fcdcaadbebddf365488aec5cf569ad
|
[] |
no_license
|
YZT1997/robolab_project
|
2786f8983c4b02040da316cdd2c8f9bb73e2dd4c
|
a7edb588d3145356566e9dcc37b03f7429bcb7d6
|
refs/heads/master
| 2023-09-02T21:28:01.280464
| 2021-10-14T02:06:35
| 2021-10-14T02:06:35
| 369,128,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,991
|
py
|
#!/usr/bin/env python
# coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
# 数据格式定义:time,car_speed,reel_speed,cb_speed,reel_current,cm7290_current,cb_current
if __name__ == '__main__':
car_speed = np.load("m234_car_speed.npy", allow_pickle=True)
reel_speed = np.load("m234_reel_speed.npy", allow_pickle=True)
cb_speed = np.load("m234_cb_speed.npy", allow_pickle=True)
pf_speed = np.load("m234_pf_speed.npy", allow_pickle=True)
reel_current = np.load("m234_reel_current.npy", allow_pickle=True)
cb_current = np.load("m234_cb_current.npy", allow_pickle=True)
pf_current = np.load("m234_pf_current.npy", allow_pickle=True)
cm7290_current = np.load("m234_cm7290_current.npy", allow_pickle=True)
# x1 = [1.2,3.2,5.5,7.3,9.5]
# y1 = [10,10,10,10,10]
# x2 = [2.4,4.2,6.6,8.3,10.4]
# y2 = [11,11,11,11,11]
# plt.plot(x1,y1,'r')
# plt.plot(x2,y2,'b')
# plt.show()
# reel_speed = reel_speed[0:2225, ...]
# reel_speed = np.insert(reel_speed, 657, [159.42242, 0], 0)
# cb_speed = cb_speed[0:2225, ...]
# cb_speed = np.delete(cb_speed, (1666, 1915, 2154), 0)
# cb_speed = np.delete(cb_speed, 656, 0)
draw_what = 'speed' # speed or current
if draw_what == 'speed':
fig1 = plt.figure(1)
ax1 = plt.subplot(211)
plt.plot(reel_speed[..., 0], reel_speed[..., 1], 'g', label='reel_speed')
plt.plot(cb_speed[..., 0], cb_speed[..., 1], 'b', label='cb_speed')
plt.plot(pf_speed[..., 0], pf_speed[..., 1], 'r', label='pf_speed')
plt.title('Control motors based on car speed.\npriority and tracking', fontsize=30)
# 设置坐标刻度大小
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
# 设置坐标标签字体大小
ax1.set_xlabel('time s', fontsize=20)
ax1.set_ylabel('speed n/min', fontsize=20)
# 设置图例字体大小
ax1.legend(loc='center right', fontsize=20)
ax2 = plt.subplot(212, sharex=ax1)
plt.step(car_speed[..., 0], car_speed[..., 1], 'o', where='post', label='car_speed')
# 设置坐标标签字体大小
ax2.set_xlabel('time s', fontsize=20)
ax2.set_ylabel('speed m/s', fontsize=20)
# 设置坐标刻度大小
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
# 设置图例字体大小
ax2.legend(loc='center right', fontsize=20)
# plt.show()
# ax11 = fig1.add_axes([0.2, 0.65, 0.15, 0.15]) # inside axes
# ax11.plot(reel_speed[650:670, 0], reel_speed[650:670, 1], 'go-', label='reel_speed')
# ax11.plot(cb_speed[650:670, 0], cb_speed[650:670, 1], 'bo-', label='cb_speed')
# ax11.plot(pf_speed[650:670, 0], pf_speed[650:670, 1], 'bo-', label='pf_speed')
# ax11.set_xlabel('time s')
# ax11.set_ylabel('speed m/s')
# ax11.set_title('Zoom in start point.')
# # 去掉边框
# ax11.spines['top'].set_visible(False)
# ax11.spines['right'].set_visible(False)
# # ax11.spines['bottom'].set_visible(False)
# # ax11.spines['left'].set_visible(False)
plt.show()
elif draw_what == 'current':
# fig2 = plt.figure(2)
# ax3 = fig2.add_subplot(1, 1, 1)
# ax3.plot(reel_speed[650:670, 0], reel_speed[650:670, 1], 'go-', label='reel_speed')
# ax3.plot(cb_speed[650:670, 0], cb_speed[650:670, 1], 'bo-', label='cb_speed')
# ax3.plot(pf_speed[650:670, 0], pf_speed[650:670, 1], 'bo-', label='pf_speed')
# # 设置坐标刻度大小
# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# # 设置坐标标签字体大小
# ax3.set_xlabel('time s', fontsize=20)
# ax3.set_ylabel('speed n/min', fontsize=20)
# # 设置图例字体大小
# ax3.legend(fontsize=20)
# # plt.show()
fig3 = plt.figure(2)
ax4 = plt.subplot(411)
plt.plot(reel_current[..., 0], (reel_current[..., 1] + 1.8)*8, 'r', label='reel_current')
# plt.plot(cm7290_current[..., 0], cm7290_current[..., 1], 'y', label='cm7290_current')
plt.title('Control motors based on car speed.\nCurrent compared monitoring', fontsize=30)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
# 设置坐标标签字体大小
ax4.set_ylabel('current A', fontsize=20)
# 设置图例字体大小
ax4.legend(fontsize=20, loc='upper right')
# 设置坐标刻度大小
ax5 = plt.subplot(412)
plt.plot(cb_current[..., 0], cb_current[..., 1]+0.4, 'g', label='cb_current')
# plt.plot(pf_current[..., 0], pf_current[..., 1]+0.4, 'b', label='pf_current')
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
# 设置坐标标签字体大小
ax5.set_xlabel('time s', fontsize=20)
ax5.set_ylabel('current A', fontsize=20)
# 设置图例字体大小
ax5.legend(fontsize=20, loc='upper right')
ax6 = plt.subplot(413)
# plt.plot(cb_current[..., 0], cb_current[..., 1]+0.4, 'g', label='cb_current')
plt.plot(pf_current[..., 0], pf_current[..., 1]+0.4, 'b', label='pf_current')
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
# 设置坐标标签字体大小
ax6.set_xlabel('time s', fontsize=20)
ax6.set_ylabel('current A', fontsize=20)
# 设置图例字体大小
ax6.legend(fontsize=20, loc='upper right')
ax7 = plt.subplot(414, sharex=ax4)
ax7.step(car_speed[..., 0], car_speed[..., 1], 'o', where='post', label='car_speed')
# 设置坐标刻度大小
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
# 设置坐标标签字体大小
ax7.set_xlabel('time s', fontsize=20)
ax7.set_ylabel('current A', fontsize=20)
# 设置图例字体大小
ax7.legend(fontsize=20, loc='upper right')
plt.show()
|
[
"yangzt_0943@163.com"
] |
yangzt_0943@163.com
|
ae04fd859cfd830caef54076d10043f80449f504
|
2e3c4540e4470d1fc6043e8abf3c3e8b87b4c5ab
|
/oldstuff/python/bounce/box.py
|
28624db2ec65b832b5f46c05f8da4d250ca5fae4
|
[
"MIT"
] |
permissive
|
renowncoder/bcherry
|
ce521655bab2b79b07c1db02a272f569dcfaf4bc
|
5d2f1144dbdbf35d6284018fa2c9e24ec5cecec6
|
refs/heads/master
| 2021-12-04T15:31:50.046428
| 2015-03-26T22:42:37
| 2015-03-26T22:42:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
import pygame
from pygame.locals import *
class Box:
def __init__(self,screen,size,velocities,background,boxcolor):
self.screen = screen
screensize = self.screen.get_size()
self.screenwidth = screensize[0]
self.screenheight = screensize[1]
#Position
#Box will start roughly middle
self.x = screensize[0]/2
self.y = screensize[1]/2
self.width = size[0]
self.height = size[1]
#Velocity
self.vx = velocities[0]
self.vy = velocities[1]
self.bgcolor = background
self.boxcolor = boxcolor
self.rect = pygame.rect.Rect(self.x,self.y,self.width,self.height)
def draw(self):
#erase
pygame.draw.rect(self.screen,self.bgcolor,self.rect)
#update pos or reverse
#check for collision:
nx,ny = self.x+self.vx,self.y+self.vy
bound_x = nx + self.width
bound_y = ny + self.height
if((bound_x >= self.screenwidth) or (nx <= 0)):
self.vx *= -1 * 0.9
else:
self.x = nx
if((bound_y >= self.screenheight) or (ny <= 0)):
self.vy *= -1 * 0.9
else:
self.y = ny
#Draw new box
self.rect = pygame.rect.Rect(nx,ny,self.width,self.height)
pygame.draw.rect(self.screen,self.boxcolor,self.rect)
def setV(self,x,y):
self.vx = x
self.vy = y
def setBackgroundColor(self,color):
self.bgcolor = color
def setBoxColor(self, color):
self.boxcolor = color
|
[
"bcherry@gmail.com"
] |
bcherry@gmail.com
|
851b32b04b3b4b1a121d96f19aa3e85cf5aaf47a
|
326da863d53a4a05703424c6ab1cc17773967d52
|
/MacroSystem/words.py
|
e7db6041645a074d9530ebf2fe60150030625e95
|
[] |
no_license
|
rantaoca/voice-typing
|
0e54174ccd555eb92d664118f7d0cf804a6105d8
|
6ecc5961b042d3dea2770c10f17266779dccd434
|
refs/heads/master
| 2021-09-09T14:24:30.680927
| 2018-03-17T01:41:51
| 2018-03-17T01:41:51
| 114,822,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,539
|
py
|
# module for dictating words and basic sentences
#
# (based on the multiedit module from dragonfly-modules project)
# (heavily modified)
# (the original copyright notice is reproduced below)
#
# (c) Copyright 2008 by Christo Butcher
# Licensed under the LGPL, see <http://www.gnu.org/licenses/>
#
import aenea
import aenea.misc
import aenea.vocabulary
import aenea.configuration
import aenea.format
from aenea import (
AeneaContext,
AppContext,
Alternative,
CompoundRule,
Dictation,
DictList,
DictListRef,
Grammar,
IntegerRef,
Literal,
ProxyAppContext,
MappingRule,
NeverContext,
Repetition,
RuleRef,
Sequence
)
from aenea import (
Key,
Text
)
lastFormatRuleLength = 0
lastFormatRuleWords = []
class NopeFormatRule(CompoundRule):
spec = ('nope')
def value(self, node):
global lastFormatRuleLength
print "erasing previous format of length", lastFormatRuleLength
return Key('backspace:' + str(lastFormatRuleLength))
class ReFormatRule(CompoundRule):
spec = ('that was [upper | natural] ( proper | camel | rel-path | abs-path | score | sentence | '
'scope-resolve | jumble | dotword | dashword | natword | snakeword | brooding-narrative)')
def value(self, node):
global lastFormatRuleWords
words = lastFormatRuleWords
words = node.words()[2:] + lastFormatRuleWords
print words
uppercase = words[0] == 'upper'
lowercase = words[0] != 'natural'
if lowercase:
words = [word.lower() for word in words]
if uppercase:
words = [word.upper() for word in words]
words = [word.split('\\', 1)[0].replace('-', '') for word in words]
if words[0].lower() in ('upper', 'natural'):
del words[0]
function = getattr(aenea.format, 'format_%s' % words[0].lower())
formatted = function(words[1:])
global lastFormatRuleLength
lastFormatRuleLength = len(formatted)
return Text(formatted)
class FormatRule(CompoundRule):
spec = ('[upper | natural] ( proper | camel | rel-path | abs-path | score | sentence | '
'scope-resolve | jumble | dotword | dashword | natword | snakeword | brooding-narrative) [<dictation>] [bomb]')
extras = [Dictation(name='dictation')]
def value(self, node):
words = node.words()
print "format rule:", words
uppercase = words[0] == 'upper'
lowercase = words[0] != 'natural'
if lowercase:
words = [word.lower() for word in words]
if uppercase:
words = [word.upper() for word in words]
words = [word.split('\\', 1)[0].replace('-', '') for word in words]
if words[0].lower() in ('upper', 'natural'):
del words[0]
bomb = None
if 'bomb' in words:
bomb_point = words.index('bomb')
if bomb_point+1 < len(words):
bomb = words[bomb_point+1 : ]
words = words[ : bomb_point]
function = getattr(aenea.format, 'format_%s' % words[0].lower())
formatted = function(words[1:])
global lastFormatRuleWords
lastFormatRuleWords = words[1:]
global lastFormatRuleLength
lastFormatRuleLength = len(formatted)
# empty formatted causes problems here
print " ->", formatted
if bomb != None:
return Text(formatted) + Mimic(' '.join(bomb))
else:
return Text(formatted)
|
[
"rantaoca@google.com"
] |
rantaoca@google.com
|
bdea0d65f8b29b588e3a614f1d2946e88b2d8783
|
d789682dabe3b10106f9fea2d23ca09cb97f1e39
|
/src/api/hello/world/root.py
|
2b839bbb1dea88e46b383616f14ea8a544565534
|
[] |
no_license
|
PyBackendBoilerplate/micro-service
|
210feab9c4faa0ac9974b6a23d5710f620ab5b63
|
47664ae8148fbc23adca94da42634b53584bc373
|
refs/heads/master
| 2023-01-11T23:36:04.368522
| 2020-11-11T17:30:26
| 2020-11-11T17:30:26
| 284,282,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
"""APIs implementation.
Implementing the route's RESTFul API.
To attach route handlers functions to their routes in the relevant openapi yaml file, use this:
x-openapi-router-controller: [module path after src].[python module name (without extension)]
operationId: Route handler function name
Example:
x-openapi-router-controller: api.hello.world.root
operationId: root
"""
from datetime import datetime
def root() -> str:
now = datetime.now()
formatted_now = now.strftime('%A, %d %B, %Y at %X')
content = f"Hello, World! It's {formatted_now}"
return content
|
[
"Nusnus@users.noreply.github.com"
] |
Nusnus@users.noreply.github.com
|
fcae963673fb7a9a3e0704f4c5a45bf3d6f647f9
|
ebed5ccdbcc951b2891eafa59b97611366d7856b
|
/env.py
|
7350e656249bd8489ad01dde1a6bda7c0b48fbb4
|
[] |
no_license
|
Upasna29/hvac
|
b149c04c04401bfaedbdc85d46c7844032bce593
|
5a8843b51014632eba144ddd5c40dadfaa7f4b19
|
refs/heads/master
| 2021-01-23T22:00:08.881741
| 2017-06-05T22:11:19
| 2017-06-05T22:11:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 646
|
py
|
import os
from os.path import join, dirname
from dotenv import load_dotenv
MANDATORY_ENV_VARIABLES = ["DATABASE_URL", "SERIAL_PORT"]
environment = {}
class EnvironmentSetupError(Exception):
pass
try:
dotenv_path = join(dirname(__file__), '.env')
if not load_dotenv(dotenv_path):
raise EnvironmentSetupError("Missing dotenv file in root directory")
for variable in MANDATORY_ENV_VARIABLES:
if os.environ.has_key(variable):
environment[variable] = os.environ.get(variable)
else:
raise EnvironmentSetupError("Environment variable " + variable + " must be defined")
except Exception as e:
print "EnvironmentSetupException:", e
|
[
"brandonfujii2018@u.northwestern.edu"
] |
brandonfujii2018@u.northwestern.edu
|
0cf24761ef1e6b829ad1497b0e4181665a36f0dc
|
d0dd94cd9caf70731d3d95623752e0439e73c244
|
/Homework/Homework_4/test5.py
|
7fed87fbc918f09a165301d9155c87ac449ce16d
|
[] |
no_license
|
kratika1008/DS501_Introduction_To_DataScience
|
9499cd7fe7b40f3ed4dfdd65d1181a2734be10ec
|
b083f0c28ee7ee2624b3539868b497fb82da72f3
|
refs/heads/main
| 2023-02-18T06:08:27.539545
| 2021-01-19T05:42:04
| 2021-01-19T05:42:04
| 330,815,446
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,970
|
py
|
from problem5 import *
import numpy as np
import sys
'''
Unit test 5:
This file includes unit tests for problem5.py.
'''
#-------------------------------------------------------------------------
def test_python_version():
''' ----------- Problem 5 (20 points in total)---------------------'''
assert sys.version_info[0]==3 # require python 3 (instead of python 2)
#-------------------------------------------------------------------------
def test_compute_D():
'''(4 points) compute_D '''
#-------------------------------
# an example adjacency matrix (3 nodes)
A = np.array([[0., 1., 0.],
[1., 0., 1.],
[0., 1., 0.]])
# call the function
D = compute_D(A)
# test whether or not D is a numpy matrix
assert type(D) == np.ndarray
# true answer
D_true = np.mat([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 1.]])
# test the result
assert np.allclose(D,D_true)
#-------------------------------
# an example adjacency matrix
for _ in range(20):
n = np.random.randint(3,20)
A = np.random.random((n,n))
np.fill_diagonal(A,0.)
A = (A + A.T)/2 # symmetric
A[A>=0.5]=1.0
A[A<0.5]=0.
# call the function
D = compute_D(A)
d = np.diagonal(D)
# test the result
assert np.allclose(d.sum(), A.sum())
assert np.allclose(d.sum(), D.sum())
i = np.random.randint(n)
assert np.allclose(d[i], D[i].sum())
#-------------------------------------------------------------------------
def test_compute_L():
'''(4 points) compute_L '''
#-------------------------------
# an example adjacency matrix (3 nodes)
A = np.array([[0., 1., 0.],
[1., 0., 1.],
[0., 1., 0.]])
D = np.array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 1.]])
# call the function
L = compute_L(D,A)
# true answer
L_true = np.array([[ 1.,-1., 0.],
[-1., 2.,-1.],
[ 0.,-1., 1.]])
# test the result
assert np.allclose(L,L_true)
#-------------------------------
# an example adjacency matrix
for _ in range(20):
n = np.random.randint(3,20)
A = np.random.random((n,n))
np.fill_diagonal(A,0.)
A = (A + A.T)/2 # symmetric
A[A>=0.5]=1.0
A[A<0.5]=0.
D = compute_D(A)
# call the function
L = compute_L(D,A)
d = np.diagonal(L)
# test the result
assert np.allclose(d.sum(), A.sum())
assert np.allclose(L.sum(), 0)
assert np.allclose(L.sum(0), np.zeros(n))
# whether L is symmetric
assert np.allclose(L,L.T)
#-------------------------------------------------------------------------
def test_find_e2():
'''(4 points) find_e2'''
L = np.array([[1., -1.],
[-1., 1.]])
e = find_e2(L)
assert type(e) == np.ndarray
assert e.shape == (2,)
e_true = np.array([-0.70710678, 0.70710678])
assert np.allclose(e,e_true,atol=1e-2) or np.allclose(e,-e_true,atol=1e-2)
L= np.diag((1, 0, 3))
e = find_e2(L)
e_true= np.array([1,0,0])
assert np.allclose(e,e_true,atol=1e-2) or np.allclose(e,-e_true,atol=1e-2)
L= np.diag((1, 1e-5,3))
e = find_e2(L)
e_true= np.array([1,0,0])
assert np.allclose(e,e_true,atol=1e-2) or np.allclose(e,-e_true,atol=1e-2)
L= np.diag((1, 1e-3, 1e-5,3))
e = find_e2(L)
e_true= np.array([0,1,0,0])
assert np.allclose(e,e_true,atol=1e-2) or np.allclose(e,-e_true,atol=1e-2)
#-------------------------------------------------------------------------
def test_compute_x():
'''(4 points) compute_x'''
e2 = np.array([0.7, -0.7])
x = compute_x(e2)
assert type(x) == np.ndarray
assert x.shape == (2,)
assert np.allclose(x, [1.,0.])
e2 = np.array([0.7, -0.7, 0.2, -0.2])
x = compute_x(e2)
assert x.shape == (4,)
assert np.allclose(x, [1.,0.,1.,0.])
e2 = np.array([0.7, 0., 0.2, -0.2])
x = compute_x(e2)
assert type(x) == np.ndarray
assert x.shape == (4,)
assert np.allclose(x, [1.,0.,1.,0.])
#-------------------------------------------------------------------------
def test_spectral_clustering():
'''(4 points) spectral clustering'''
#-------------------------------
# an example adjacency matrix (2 groups with a link between the two groups)
A = np.array([[0., 1., 1., 0., 0., 0.],
[1., 0., 1., 0., 0., 0.],
[1., 1., 0., 1., 0., 0.],
[0., 0., 1., 0., 1., 1.],
[0., 0., 0., 1., 0., 1.],
[0., 0., 0., 1., 1., 0.]])
# make sure matrix A is symmetric
assert np.allclose(A, A.T)
# call the function
x = spectral_clustering(A)
# test the correctness of the result
assert np.allclose([0,0,0,1,1,1],x) or np.allclose([1,1,1,0,0,0],x)
#-------------------------------
# test on random matrix
for _ in range(20):
n1 = np.random.randint(3,20)
n2 = np.random.randint(3,20)
A1 = np.random.random((n1,n1))*100
A2 = np.random.random((n2,n2))*100
np.fill_diagonal(A1,0.)
np.fill_diagonal(A2,0.)
A = np.bmat([[A1,np.zeros((n1,n2))],
[np.zeros((n2,n1)),A2]])
A = (A + A.T)/2 # symmetric
A[A>=0.5]=1.0
A[A<0.5]=0.
i = np.random.randint(n1)
j = np.random.randint(n2) + n1
A[i,j] = 0.001
A[j,i] = 0.001
A =np.asarray(A)
# call the function
x = spectral_clustering(A)
x_true1 = np.asarray(np.bmat([np.zeros(n1), np.ones(n2)]))
x_true2 = np.asarray(np.bmat([np.ones(n1), np.zeros(n2)]))
assert np.allclose(x_true1,x) or np.allclose(x_true2,x)
|
[
"agrawalkratika1008@gmail.com"
] |
agrawalkratika1008@gmail.com
|
f81d2a4fe7b181380bced5758ec0db043b5e59c4
|
47ef6997d03f4d5c921c83cc09aef1dfc6828e2c
|
/zeus/report/__init__.py
|
b327b709b4289c1d32150f648d091454c7653e54
|
[
"MIT"
] |
permissive
|
huawei-noah/xingtian
|
620c9f245183d636e0a65659fd99a984397ecbd4
|
e4ef3a1c92d19d1d08c3ef0e2156b6fecefdbe04
|
refs/heads/master
| 2023-09-03T01:10:21.768245
| 2022-03-21T03:39:39
| 2022-03-21T03:39:39
| 287,759,621
| 308
| 91
|
MIT
| 2023-09-12T11:33:22
| 2020-08-15T14:13:06
|
Python
|
UTF-8
|
Python
| false
| false
| 218
|
py
|
from .report_server import ReportServer
from .report_client import ReportClient
from .record import ReportRecord
from .share_memory import ShareMemory
from .nsga_iii import NonDominatedSorting, SortAndSelectPopulation
|
[
"hustqj@126.com"
] |
hustqj@126.com
|
fa1dceff710c0975174ff918b25c703bd7067e05
|
ff66ebe88066241b1e783f4792d6f52d378f612d
|
/FTVM_TestAgent/tests/L1_tests/test1.py
|
7fde1c49852b46ca9fd99e984515908d9daf1f84
|
[] |
no_license
|
vaporting/FTVM_TA
|
17a82d7cffe22850c6e13b38fd336faef9f9fd4d
|
11dbec637e91cf23f999e2b21e98f82fb8831b03
|
refs/heads/master
| 2021-01-18T20:19:58.632216
| 2014-04-10T06:11:22
| 2014-04-10T06:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
import sys
import time
from testagent import TA_error
def run_test1(parser):
print "test1"
time.sleep(float("1.1"))
raise TA_error.Assert_Error("test1 assert exception")
|
[
"root@ting.(none)"
] |
root@ting.(none)
|
2695c05773efce23f447e4bda3a59255f398fe98
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03102/s335852298.py
|
dcc23fa4b8d4a4d7dcd9072373fd9795db300d2a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
MM = input().split()
N = int(MM[0])
M = int(MM[1])
C = int(MM[2])
count= 0
BB = input().split()
for i in range(N):
AA = input().split()
total = C
for i,j in zip(BB,AA):
total += int(i)*int(j)
if total >0:
count +=1
print(count)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
61085640f1d05f5f93b920a600305bda58c4f77a
|
2dc3f73dcce0d9abc692421a6c4c981e7ac3ca96
|
/speech.py
|
3f67055992c25f3714810688d60dd6f0a572ca86
|
[] |
no_license
|
Gokhu18/FacialAnalysis-Industrial.App.Dev
|
cf002dc70a9c2e5a67924e159e0f8d7c721498bb
|
691ef2fee6f68de4963b70ce1593ad2a45a553fd
|
refs/heads/master
| 2020-11-28T03:27:48.263959
| 2019-08-18T20:27:36
| 2019-08-18T20:27:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
"""
Author :- Aman Altaf Multani Awan
Description:- Recognize speech using Google Speech Recognition
- Listen for the first phrase and extract it into audio data
- Using the library for performing the speech recognition with the support of several engines and API’s online and as well as offline.
"""
import speech_recognition as sr
r = sr.Recognizer()
with sr.Microphone as source:
print("Speak Anything: ")
audio = r.listen(source)
try:
text = r.recognize_google(audio)
print("You said : {}".format(text))
except LookupError: #Shows error if speech is unintelligible
print("Sorry could not recognize what you said")
|
[
"noreply@github.com"
] |
Gokhu18.noreply@github.com
|
ad19bca46f37a4f6b3d7b02d44c2d4cfb74167c6
|
15598a49312b573cd405875cee06205011645baa
|
/src/application_properties.py
|
8c27c193ff5e0f266be3b19567eed25805bcb07f
|
[] |
no_license
|
guziy/GevFit
|
f206635a0d7f84cd89b236ab0d712b755969f71e
|
93784b295170c91121a863cfc671d0a9ee3c67ea
|
refs/heads/master
| 2021-01-21T05:00:42.560690
| 2015-06-05T15:54:24
| 2015-06-05T15:54:24
| 2,045,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
__author__="huziy"
__date__ ="$26 mai 2010 12:23:59$"
import os
PROJECT_DIR = 'GevFit'
def set_current_directory():
dir = os.getcwd()
while not dir.endswith(PROJECT_DIR):
os.chdir('..')
dir = os.getcwd()
if __name__ == "__main__":
set_current_directory()
print(os.getcwd())
print("Hello World")
|
[
"guziy.sasha@gmail.com"
] |
guziy.sasha@gmail.com
|
8b99a459701591aae0eb07ccbe1fdd17235d54ab
|
c8e0f1ae2987f98770482cccbf36f0a6d50d6f26
|
/code/正则表达式/z_04.py
|
7a5712a32a1ba96855621a20cb6fb4ae1c246507
|
[] |
no_license
|
RemainderTime/python_primary
|
2dd97d4b03abc7f71b45317fd3a3435a64df7a01
|
fcea9d9fc951295ed6064cf37b76275f2af32fd6
|
refs/heads/master
| 2020-07-06T03:15:48.451004
| 2020-01-11T07:45:34
| 2020-01-11T07:45:34
| 202,870,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
#数量词
import re
a='5566dfd5fdg5gs26s0'
r=re.findall('[a-z]{2,6}',a)
s=re.findall('[a-z]{2,6}?',a)
#贪婪 与 非贪婪
#python 默认贪婪
#非贪婪 后面加问号
print(r)
print(s)
|
[
"remaindertime@gmail.com"
] |
remaindertime@gmail.com
|
227f85d54309bd8972ddedc21cce93b2c0bbd882
|
525d0d2973de75fb012bdbb6e57ebc30769e20dc
|
/ci/admin.py
|
df29bed63e55a0f1ffc399e4faaba5cdd49f920f
|
[] |
no_license
|
xuhshen/comci
|
9fb6d1605e0b5c3dae79f6891df8bfce9dc1dc7c
|
7af0dd13c86dd729e69e4be2d61c320984b3debc
|
refs/heads/master
| 2021-07-14T20:41:25.555379
| 2017-10-13T02:15:32
| 2017-10-13T02:15:32
| 106,767,221
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,154
|
py
|
from django.contrib import admin
# Register your models here.
from .models import *
class FeatureAdmin(admin.ModelAdmin):
filter_horizontal = ('task','module','params')
class UserdefcasesetAdmin(admin.ModelAdmin):
filter_horizontal = ('caseset',)
class UserdeftagsetAdmin(admin.ModelAdmin):
filter_horizontal = ('tagset',)
class TaskAdmin(admin.ModelAdmin):
filter_horizontal = ('depends',)
admin.site.register(Feature, FeatureAdmin)
admin.site.register(Featuretype)
admin.site.register(Param)
admin.site.register(FilterTables)
admin.site.register(Product)
admin.site.register(Stage)
admin.site.register(Status)
admin.site.register(Gearman)
admin.site.register(Repository)
admin.site.register(Tasktype)
admin.site.register(Task,TaskAdmin)
admin.site.register(Moduletype)
admin.site.register(Module)
admin.site.register(Build)
admin.site.register(Envvariable)
admin.site.register(Caseset)
admin.site.register(Casetag)
admin.site.register(Userdefcaseset,UserdefcasesetAdmin)
admin.site.register(Userdeftagset,UserdeftagsetAdmin)
admin.site.register(Featurebuilder)
admin.site.register(Key_tables)
|
[
"xuhui.shen@nokia-sbell.com"
] |
xuhui.shen@nokia-sbell.com
|
d510bc9641baefa468a873146d2e284f9a2037c5
|
b73f3bacc7f0ad3b46f9efa4f7312b6ac487e3f4
|
/网易云音乐爬虫/4、生成词云图.py
|
eb15a75106fb27b42fa457cce76a0f989db96b31
|
[] |
no_license
|
sitetianminghui/-
|
ff87817462319dd4d64455287952a35153a040d5
|
d413df0c96c1606c37bdd576513201af7b91dee5
|
refs/heads/master
| 2022-11-22T15:34:07.932612
| 2020-07-26T06:29:03
| 2020-07-26T06:29:03
| 282,589,229
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,019
|
py
|
# 第三部分:提取词组 , 用作字云图像
import jieba # 结巴分词:用于分开词 , 详情见结巴分词的功能.py
from wordcloud import WordCloud, ImageColorGenerator
import matplotlib.pyplot as plt
with open('TFBOYS歌词集.txt','r',encoding='utf-8')as f:
all_lyric = f.read() # 用空格将所有歌词的歌曲连接起来
wordlist_after_jieba = jieba.cut(all_lyric) # 将歌词分词
wl_space_split = "".join(wordlist_after_jieba) # 歌词拼接起来
# print(wl_space_split)
user_font = 'simhei.ttf' # 使用字体 , 使得支持中文
wc = WordCloud(font_path=user_font) # 设置 字体参数
my_word_cloud = wc.generate(wl_space_split) # 对歌词生成字云
plt.imshow(my_word_cloud)
plt.axis("off") # off(关闭) 坐标轴线(axis)
plt.show()
'''
# # 扩展:可选择用图片做背景生成字云
import numpy
from PIL import Image
# 读取图片将组成的RGB数值 , 转换成数组(array)
alice_coloring = numpy.array(Image.open("holmes.png"))
# print(alice_coloring)
# 背景颜色 最大字数 掩饰面具 最大字体 随机范围 字体
wc = WordCloud(background_color="white", max_words=2000, mask=alice_coloring, max_font_size=40, random_state=42, font_path=user_font)
my_word_cloud = wc.generate(wl_space_split)
# 图片 颜色 生成器 颜色数据
image_colors = ImageColorGenerator(alice_coloring)
# 重设颜色
plt.imshow(my_word_cloud.recolor(color_func=image_colors))
plt.imshow(my_word_cloud)
plt.axis("off")
'''
# plt.savefig('test2.tif', dpi=4000, bbox_inches='tight') # 保存图片
# plt.show()
|
[
"noreply@github.com"
] |
sitetianminghui.noreply@github.com
|
b7fddaae7a5c65b4fa97bc257fe714c5e63d9e00
|
37f029c1dd24b89b74dfa8ce0abcc41f83418c16
|
/blog/blog/urls.py
|
99c74bd03d241d9c23729fc9de7c87988b0ed177
|
[] |
no_license
|
HalfSugar1/blog
|
5a6c42b6225ab759cf5ea722672a869eb0a796c7
|
bb1334a8a574cf9660e37672775aeac52f673bc4
|
refs/heads/master
| 2022-12-10T04:22:15.906923
| 2019-10-17T12:14:26
| 2019-10-17T12:14:26
| 214,826,852
| 1
| 0
| null | 2022-12-08T05:22:54
| 2019-10-13T13:39:06
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,531
|
py
|
"""blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path ,include
from article import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('article/',views.Article_List,name='Article_List'),
path('article_detail/<int:id>/',views.Article_detail,name='Article_detail'),
path('article_create/',views.Article_create,name='Article_create'),
path('article_delete/<int:id>/',views.Article_delete,name='Article_delete'),
path('article_safe_delete/<int:id>/',views.Article_safe_delete,name='Article_safe_delete'),
path('article_update/<int:id>/',views.Article_update,name='Article_update'),
path('control/',include('control.urls',namespace='control')),
path('password_reset/',include('password_reset.urls')),
]
urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
[
"634115922@qq.com"
] |
634115922@qq.com
|
a83cc72bca4ad13eaf7047e632b08e1f1ef9aeff
|
c5aadc198b1ccd9630498245d3b414f1871290c6
|
/internet.py
|
00c7d7885ce63a9aed034246a3063d3c5f2ffd80
|
[] |
no_license
|
Arce213/Telecommunication_Brand_Arce213
|
e6d8bba92c04c62b0f08e6c4532c2ac2fb10237c
|
674dc8e034329491aeff0ff2b87ce10ff2d040d6
|
refs/heads/main
| 2023-06-04T06:03:15.023824
| 2021-06-14T20:06:37
| 2021-06-14T20:06:37
| 376,916,769
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
import json
data = {}
data['clients'] = []
data['clients'].append({
'first_name': 'Sigrid',
'last_name': 'Mannock',
'age': 27,
'amount': 7.17})
data['clients'].append({
'first_name': 'Joe',
'last_name': 'Hinners',
'age': 31,
'amount': [1.90, 5.50]})
data['clients'].append({
'first_name': 'Theodoric',
'last_name': 'Rivers',
'age': 36,
'amount': 1.11})
with open('data.json', 'w') as file:
json.dump(data, file, indent=4)
|
[
"noreply@github.com"
] |
Arce213.noreply@github.com
|
13082ba6833f1270626fbce8bcb1f9b818b9a204
|
0fd92b7d882a1edb5542f6600bb177dcad67ed50
|
/powerful104/1676a.py
|
887a9fb2719458b35e1fa98069de68ec0ac3ac9e
|
[] |
no_license
|
alpha-kwhn/Baekjun
|
bce71fdfbbc8302ec254db5901109087168801ed
|
f8b4136130995dab78f34e84dfa18736e95c8b55
|
refs/heads/main
| 2023-08-02T11:11:19.482020
| 2021-03-09T05:34:01
| 2021-03-09T05:34:01
| 358,347,708
| 0
| 0
| null | 2021-04-15T17:56:14
| 2021-04-15T17:56:13
| null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
#1676번 오버플로우 신경안쓴 알고리즘
num = int(input())
fact=1
for i in range(num):
fact*=i+1
fact=list(str(fact))
fact.reverse()
ans=0
for i in fact:
if i!="0":
break
ans+=1
print(ans)
|
[
"noreply@github.com"
] |
alpha-kwhn.noreply@github.com
|
50a0e2505829ff003bd63cbfeea6d81747501061
|
934d09590409faf617e2652e7cb6e0ba1730b551
|
/Python/Raspberry Pi 3/Programs/RC/Test_Motor.py
|
c6b048e5a2b49e9ec8e0ae64514770aa29383ad0
|
[] |
no_license
|
TockThomas/stuffs
|
e72cf6dec7ce7b2396730375998433e6bfe4eecd
|
810ca0a6d45218679518357288e68daeb6e28c55
|
refs/heads/master
| 2023-03-19T09:17:29.909539
| 2021-02-19T21:29:51
| 2021-02-19T21:29:51
| 324,415,730
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
import RPi.GPIO as GPIO
import time
left = (7,8) # 7-; 8+
right = (9,10) # 9+; 10-
GPIO.setwarnings(False)
def init():
GPIO.setmode(GPIO.BCM)
GPIO.setup(7, GPIO.OUT)
GPIO.setup(8, GPIO.OUT)
GPIO.setup(9, GPIO.OUT)
GPIO.setup(10, GPIO.OUT)
def forward(tf):
init()
GPIO.output(7, False)
GPIO.output(8, True)
GPIO.output(9, True)
GPIO.output(10, False)
time.sleep(tf)
GPIO.cleanup()
forward(1)
|
[
"thomas.schleicher@fastad.de"
] |
thomas.schleicher@fastad.de
|
6c7cc1c3c99e73c089cde2fbd85e29eaf020de03
|
3f46ad00ebe6ffd0ac059152246d1dcc757fe52d
|
/code3/geojson.py
|
a6fbe02447d9db0fa5fd95851402dda16f5fe85f
|
[] |
no_license
|
pkern001/pythonlearn
|
63540c0e695c3ac578e4fdf433dd7209731668bf
|
aeb21370672b1265e1c7656067a65e38a565a7de
|
refs/heads/master
| 2021-01-18T01:16:06.850690
| 2016-01-03T22:55:19
| 2016-01-03T22:55:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 886
|
py
|
import urllib.request, urllib.parse, urllib.error
import json
serviceurl = 'http://maps.googleapis.com/maps/api/geocode/json?'
while True:
address = input('Enter location: ')
if len(address) < 1 : break
url = serviceurl + urllib.parse.urlencode({'sensor':'false', 'address': address})
print('Retrieving', url)
uh = urllib.request.urlopen(url)
data = uh.read()
print('Retrieved',len(data),'characters')
try: js = json.loads(str(data))
except: js = None
if 'status' not in js or js['status'] != 'OK':
print('==== Failure To Retrieve ====')
print(data)
continue
print(json.dumps(js, indent=4))
lat = js["results"][0]["geometry"]["location"]["lat"]
lng = js["results"][0]["geometry"]["location"]["lng"]
print('lat',lat,'lng',lng)
location = js['results'][0]['formatted_address']
print(location)
|
[
"csev@umich.edu"
] |
csev@umich.edu
|
f6842097c4dbf40525c664c5fc0304cfb0d2e9ac
|
90a036e37f0cdad26bfab4d14da70d1706ade74c
|
/src/pt_utils.py
|
cf8df8c9bc660e07e28914a8d1c7b25c040812f3
|
[
"Apache-2.0"
] |
permissive
|
m3hrdadfi/fun-neural-style-transfer
|
0a0ab97d9a77b8fea771c25c90d2b90d995d908a
|
c03d6c94a9ea59327378e9d0606cafd4bf5ee900
|
refs/heads/master
| 2022-12-03T03:38:35.489680
| 2020-08-17T13:59:28
| 2020-08-17T13:59:28
| 286,227,631
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,841
|
py
|
import json
import os
import numpy as np
from PIL import Image
import torch
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
def image_scale(image, resize=None, max_size=None):
"""Scale the image based on resize or maximum size ratio"""
if isinstance(max_size, int) and not isinstance(resize, tuple):
scale = max_size / max(image.size)
w, h = image.size
resize = (round(w * scale), round(h * scale))
image = image.resize(resize)
elif not isinstance(max_size, int) and isinstance(resize, tuple):
image = image.resize(resize)
return image
def load_image(image_path, resize=None, max_size=None):
"""Load and resize an image."""
image = Image.open(image_path).convert('RGB')
image = image_scale(image, resize, max_size)
return image
def load_image_to_tensor(image_path, resize=None, max_size=None):
"""Load and prepare image for PyTorch"""
# load image path to Pillow object
image = load_image(image_path, resize=None, max_size=None)
# transform object to tensor matched with pytorch structure
# normalization
# WxHxC to CxHxW
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
(0.485, 0.456, 0.406),
(0.229, 0.224, 0.225)
) # normalization
])
# expand dimensions from 3 to 4
# CxHxW to 1xCxHxW
image = transform(image)[:3, :, :].unsqueeze(0)
return image
def load_image_from_tensor(tensor, pillow_able=True, resize=None, max_size=None):
"""Transform tensor image to array one"""
# detach the pytorch tensor
image = tensor.to('cpu').clone().detach()
# convert detached tensor to numpy and remove single-dim
image = image.numpy().squeeze()
# transform the structure from CxHxW to WxHxC
image = image.transpose(1, 2, 0)
# denormalization
image = image * np.array((0.229, 0.224, 0.225)) + np.array((0.485, 0.456, 0.406))
# limit the value between 0 and 1
image = image.clip(0, 1)
# convert image numpy to Pillow object
if pillow_able:
# transform from 0-1 to 0-255
image = Image.fromarray(np.uint8(image * 255.0))
# resize the image if it sets
image = image_scale(image, resize, max_size)
return image
def torch_device():
"""Specifies the GPU/CPU status of the resource"""
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
return device
def plot_result(p, a, x, figsize):
"""Plot the content, style, and the target side by side!"""
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=figsize)
ax1.imshow(load_image_from_tensor(p))
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
ax2.imshow(load_image_from_tensor(a))
ax2.get_xaxis().set_visible(False)
ax2.get_yaxis().set_visible(False)
ax3.imshow(load_image_from_tensor(x))
ax3.get_xaxis().set_visible(False)
ax3.get_yaxis().set_visible(False)
plt.show()
def cb_every_step(base_save_dir):
"""Callback sample for each step of transferring."""
if not isinstance(base_save_dir, str) or not len(base_save_dir) > 0:
def every_step(step, content, style, target, losses):
pass
return every_step
base_save_dir = str(base_save_dir)
os.makedirs(base_save_dir, exist_ok=True)
def every_step(step, content, style, target, losses):
save_dir = os.path.join(base_save_dir, str(step))
os.makedirs(save_dir, exist_ok=True)
target_path = os.path.join(save_dir, 'target.jpg')
target = load_image_from_tensor(target, pillow_able=True)
target.save(target_path, "JPEG")
return every_step
def cb_final_step(base_save_dir):
"""Callback sample for final step"""
if not isinstance(base_save_dir, str) or not len(base_save_dir) > 0:
def final_step(content, style, target, history):
pass
return final_step
base_save_dir = str(base_save_dir)
os.makedirs(base_save_dir, exist_ok=True)
def final_step(content, style, target, history):
content_path = os.path.join(base_save_dir, 'content.jpg')
style_path = os.path.join(base_save_dir, 'style.jpg')
target_path = os.path.join(base_save_dir, 'target.jpg')
history_path = os.path.join(base_save_dir, 'history.json')
content = load_image_from_tensor(content, pillow_able=True)
content.save(content_path, "JPEG")
style = load_image_from_tensor(style, pillow_able=True)
style.save(style_path, "JPEG")
target = load_image_from_tensor(target, pillow_able=True)
target.save(target_path, "JPEG")
with open(history_path, 'w') as f:
json.dump(history, f, indent=2)
return final_step
|
[
"m3hrdadfi@gmail.com"
] |
m3hrdadfi@gmail.com
|
43b31344c7354d423bb37f809d1e4e406894f4a2
|
e21d39d4079f05563a2bb7655ba1fa471464827a
|
/smoothie/app.py
|
0f37d91f6e568a82d2d108f20388c043b5e010cc
|
[] |
no_license
|
izhyk/smoothie
|
8e091aea958a830f5298b214d5aa7f89083a75d8
|
2af233ee4f3ddb722301fa669bbe2ff1dc62ce33
|
refs/heads/master
| 2020-04-22T04:00:33.813658
| 2019-02-21T16:07:32
| 2019-02-21T16:07:32
| 170,108,170
| 0
| 1
| null | 2019-02-21T16:07:34
| 2019-02-11T10:15:03
| null |
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
from pathlib import Path
from typing import Optional, List
import aiohttp_jinja2
import aiopg.sa
from aiohttp import web
import jinja2
from smoothie.routes import init_routes
from smoothie.utils.common import init_config
path = Path(__file__).parent
def init_jinja2(app: web.Application) -> None:
'''
Initialize jinja2 template for application.
'''
aiohttp_jinja2.setup(
app,
loader=jinja2.FileSystemLoader(str(path / 'templates'))
)
async def database(app: web.Application) -> None:
'''
A function that, when the server is started, connects to postgresql,
and after stopping it breaks the connection (after yield)
'''
config = app['config']['postgres']
engine = await aiopg.sa.create_engine(**config)
app['db'] = engine
yield
app['db'].close()
await app['db'].wait_closed()
def init_app(config: Optional[List[str]] = None) -> web.Application:
app = web.Application()
init_jinja2(app)
init_config(app, config=config)
init_routes(app)
app.cleanup_ctx.extend([
database,
])
return app
|
[
"chimamireme@gmail.com"
] |
chimamireme@gmail.com
|
3c619d8d4d1d9b6995230f78b03e4547f4dd70bf
|
2714ecf5f72fa47ae52adaec7a6ef5e4ca284991
|
/爬虫第1关---HTML基础/1.py
|
a19aad350759266fbacc3dfc6e6e17afe559758c
|
[] |
no_license
|
somw/python
|
905e47b031dae712283a9c7ca602d5c6cf42fa3b
|
b774ff9aa89de1e23c4672a12751682a2bae42d7
|
refs/heads/master
| 2020-04-28T19:42:59.548234
| 2019-08-07T08:47:42
| 2019-08-07T08:47:42
| 175,520,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
import requests
res = requests.get('https://localprod.pandateacher.com/python-manuscript/crawler-html/spider-men5.0.html')
res.encoding = 'utf-8'
aa = res.text
aaa = open('aaa.txt','a+')
aaa.write(aa)
aaa.close()
|
[
"somw@qq.com"
] |
somw@qq.com
|
449f06694c194d2b417097d8fa3d6682d7bf25b7
|
acc71b91ffddc511d70a9a772a1823921840dc97
|
/Python/unidade5/fundo/fundodeinvestimento.py
|
a18b9e81f701c46a7ced03207bddf955d20505fa
|
[] |
no_license
|
biel2k20/C-diguin-em-Python
|
e1a1f02b78ae5c0f76a2ffa8e397bcf8f95bd6e4
|
a528109c3d005502b8c427c783e0bdc9efb79e7d
|
refs/heads/main
| 2022-12-20T04:47:19.036194
| 2020-10-06T14:58:57
| 2020-10-06T14:58:57
| 301,755,016
| 0
| 1
| null | 2020-10-06T14:57:44
| 2020-10-06T14:29:57
|
Python
|
UTF-8
|
Python
| false
| false
| 362
|
py
|
#coding: utf-8
#Gabriel Dnatas Santos de Azevêdo
#Matrícula: 118210140
#Problema: Fundo de Investimento
cima = 0.0
cont = 0
media = 0
while True:
valor = float(raw_input())
if valor >= media:
cima += valor
cont += 1
media = cima / cont
else:
break
print 'Saldo total do FIS: R$%.2f.' % (cima)
print 'Média das contribuições: R$%.2f.' % (media)
|
[
"gabriel.dantas.azevedo@ccc.ufcg.edu.br"
] |
gabriel.dantas.azevedo@ccc.ufcg.edu.br
|
665a15d66af83ab0d0674bf08a0eb2ef07401c04
|
a0c6174652a793c6d5ff989fc0ce2845cee3259b
|
/test_employee_ut57.py
|
b8f6c74165415b9afc3436982257a9055c0ad9ce
|
[] |
no_license
|
rfvdgh/schafer_crs
|
1fb8c840904873d27cf8cb71a1a309ed4d572dc9
|
40edc9b4c6a2b5c5b1120acb394409418c381b8d
|
refs/heads/master
| 2022-12-21T14:34:25.939855
| 2020-09-28T15:18:34
| 2020-09-28T15:18:34
| 296,402,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,180
|
py
|
import unittest
from unittest.mock import patch
from employee_ut57 import Employee
class TestEmployee(unittest.TestCase):
# setUpClass and tearDownCLass gets run before and after everything, respectively
@classmethod
def setUpClass(cls):
print("setupClass")
@classmethod
def tearDownClass(cls):
print("teardownClass")
# setUp and tearDown will run for each test
def setUp(self):
print("setUp")
self.emp_1 = Employee("Corey", "Schafer", 50000)
self.emp_2 = Employee("Sue", "Smith", 60000)
def tearDown(self):
print("tearDown\n")
pass
def test_email(self):
print("test_email")
self.assertEqual(self.emp_1.email, "Corey.Schafer@email.com")
self.assertEqual(self.emp_2.email, "Sue.Smith@email.com")
self.emp_1.first = "John"
self.emp_2.first = "Jane"
self.assertEqual(self.emp_1.email, "John.Schafer@email.com")
self.assertEqual(self.emp_2.email, "Jane.Smith@email.com")
def test_fullname(self):
print("test_fullname")
self.emp_1.first = "John"
self.emp_2.first = "Jane"
self.assertEqual(self.emp_1.fullname, "John Schafer")
self.assertEqual(self.emp_2.fullname, "Jane Smith")
def test_apply_raise(self):
print("test_apply_raise")
self.emp_1.apply_raise()
self.emp_2.apply_raise()
self.assertEqual(self.emp_1.pay, 52500)
self.assertEqual(self.emp_2.pay, 63000)
def test_monthly_schedule(self):
with patch("employee_ut57.requests.get") as mocked_get:
mocked_get.return_value.ok = True
mocked_get.return_value.text = "Success"
schedule = self.emp_1.monthly_schedule("May")
mocked_get.assert_called_with("http://company.com/Schafer/May")
self.assertEqual(schedule, "Success")
mocked_get.return_value.ok = False
schedule = self.emp_2.monthly_schedule("June")
mocked_get.assert_called_with("http://company.com/Smith/June")
self.assertEqual(schedule, "Bad Response!")
if __name__ == "__main__":
unittest.main()
|
[
"banannas@tree.edu"
] |
banannas@tree.edu
|
6ae2a28c5acfdb9ec8b1cb68c85502e938148591
|
5388078ef91709078563e55fa4bbc6935d79ae1a
|
/file_input_output/demo_max_word.py
|
2356e272287d9cc61d96aeed04eb7f47b7e54adb
|
[] |
no_license
|
hanv698/PythonDjangoLuminar
|
a0b764efe67be40f08337593b69143fa602a06ac
|
e2fdec301b5f3c48e070fddcf715c3565435806d
|
refs/heads/master
| 2023-03-19T17:35:15.838189
| 2021-03-10T06:44:27
| 2021-03-10T06:44:27
| 327,817,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
f=open("demo","r")
dict={}
for lines in f:
words=lines.rstrip("\n").split(" ")
for word in words:
if(word not in dict):
dict[word]=1
else:
dict[word]+=1
maxi=sorted(dict,key=dict.get,reverse=True)
print(maxi[0],":",dict[maxi[0]])
|
[
"hanv698@gmail.com"
] |
hanv698@gmail.com
|
6108b99e4d996f51bfbbc836f3d19abb16fa8cde
|
073aa281d44b1212a54f3f0ed7ecea17ce4ed195
|
/lib/BoardMRAA.py
|
efffb0aee2195037f798349115f6dcfb8df92046
|
[] |
no_license
|
VeronaFabLabRepo/intelmaker16_openlogger
|
cede8706af6da20002d6abc26474be6eb18731fe
|
17100db76af534512f14cf0f61391a587bb80ad9
|
refs/heads/master
| 2020-07-07T06:10:11.643116
| 2016-11-08T20:07:47
| 2016-11-08T20:07:47
| 67,450,382
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 963
|
py
|
import Hardware
import mraa
import psutil
import os
import time
#Implementazione dell'hardware Intel Edison
class BoardMRAA(Hardware.Hardware):
def getInfo(self):
return "IntelEdison"
def getData(self):
return {
"date":time.strftime("%x"),
"time":time.strftime("%X")
}
def setData(self,data):
# data = "2016/08/2 21:45:00"
os.system("date +%T -s " + ('"'+str(data)+'"'))
os.system("hwclock -w")
def getSizeDb(self):
return {"size_db":(os.path.getsize('db/openlogger.db')/(1024*1024))} #ritorna in mb
def getCpuLoad(self):
return {"cpu":psutil.cpu_percent(interval=None,percpu=False)}
def getMemLoad(self):
return {"percent":psutil.virtual_memory().percent}
def getDiskLoad(self):
return {"percent":psutil.disk_usage('/').percent}
def getDigital(self,ioconfig):
x = mraa.Gpio(int(ioconfig.pin))
x.dir(mraa.DIR_IN)
return x.read()
def getAnalog(self,ioconfig):
x = mraa.Aio(int(ioconfig.pin))
return x.read()
|
[
"zamby.ing@gmail.com"
] |
zamby.ing@gmail.com
|
6804a11ea2a813351e7b51112737afdddd3ab834
|
772c0c955eee54bfa8f483c52491c490c130e4bf
|
/inputs1.py
|
4aa6db3f2b43946c02766ee5c0a1ee2e0a36328e
|
[] |
no_license
|
CGayatri/Python-Practice1
|
9bedd2beb3c2418ed7f6212ef2810b451a055fdf
|
96d184628c9187db10ee4f0951805d157628ca8e
|
refs/heads/master
| 2023-08-25T20:29:20.565673
| 2021-11-11T05:02:35
| 2021-11-11T05:02:35
| 426,872,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
### program - 1 to accept a string from keyboard
# accepting a string from keyboard
str = input("Enter a string: ")
print("U entered: ", str)
#Output:
'''
F:\PY>py inputs1.py
Enter a string: Gayatri Chaudhari
U entered: Gayatri Chaudhari
'''
|
[
"chaudharisimran1@gmail.com"
] |
chaudharisimran1@gmail.com
|
c5bcac69d4260a7eef9611be71c5f98aa7e41129
|
5944350c93efe682af2018dd07ec74205be05969
|
/Jumping on the clouds.py
|
2096c55cc9bc0654f91d6e95ced5faa649a704ee
|
[] |
no_license
|
Siddharth-IITH/HackerRank-Solutions
|
9fda99a6624eeebb9687c2fe4330c7900c080c6c
|
00b934cf69d9b2e610137a36337591432ecf61d4
|
refs/heads/master
| 2022-12-06T14:15:11.147242
| 2020-09-03T07:11:24
| 2020-09-03T07:11:24
| 283,832,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the jumpingOnClouds function below.
def jumpingOnClouds(c):
i=0
count=0
while(i<len(c)-1):
try:
if c[i+2]!=1:
i=i+2
else:
i=i+1
except:
count+=1
break
count+=1
return (count)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
c = list(map(int, input().rstrip().split()))
result = jumpingOnClouds(c)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"noreply@github.com"
] |
Siddharth-IITH.noreply@github.com
|
2b4fa7e0c6e142064366b2e43a6fae63213b0e1e
|
93b866284ca1ac29c5005555f2cb30454a0fb5cf
|
/Problems/700-Problem/Problem 700.py
|
513d24149c9e61e8f1f15aaa4ce5ac5be3a963b3
|
[] |
no_license
|
FrancoisdeFouchecour/Projet-Euler
|
c2b17d1e35fbd10a708ba3221825a62a17818382
|
0cf70457c0418264c2eff7cdd0e92a07b61ecb07
|
refs/heads/master
| 2021-12-25T05:44:08.054648
| 2021-11-27T21:47:42
| 2021-11-27T21:47:42
| 168,253,571
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,119
|
py
|
import time
import numpy as np
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.getcwd())),'Utils'))
from arithm import bezout
problem_number = 700
problem_input = 1504170715041707
problem_input_modulo = 4503599627370517
#Solution
def solution(K, modulo):
sumation = K
Eulercoin, periode = K, modulo
while Eulercoin !=0:
periode = periode%Eulercoin
while Eulercoin >= periode:
Eulercoin -= periode
sumation += Eulercoin
return sumation
#Test & Result
fichier = open("Solution "+str(problem_number)+".txt", "w")
string = ""
begin_problem = time.time()
problem_value = solution(problem_input, problem_input_modulo)
end_problem = time.time()
problem_time = end_problem - begin_problem
string += "RESULT PROBLEM #"+str(problem_number)+"\n\n"
string += "Input: "+str(problem_input)+"\n"
string += "Output: "+str(problem_value)+"\n"
string += "Computation time: "+str(problem_time)+" sec\n"
string += "\n\n\nCurrent date & time: " + time.strftime("%c")
fichier.write(string)
fichier.close()
|
[
"francois.fouchecour@gmail.com"
] |
francois.fouchecour@gmail.com
|
6a1c0fef217a5a87defbe7ea2b9837d232dc7b92
|
56b4287af3bad2a7c5d56f5d150d41f445bf3e95
|
/util/text_connector/detectors.py
|
54d360a024cb5e506a3b5999387f0e27bb5767dd
|
[
"MIT"
] |
permissive
|
smartcai/cptn-crnn
|
32c9144df597f90306802ef51988083aa36297f8
|
5586d72dd513aad1fc55e6335cf38100af21be65
|
refs/heads/master
| 2021-05-24T17:17:37.156576
| 2019-07-23T03:32:04
| 2019-07-23T03:32:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,096
|
py
|
# coding:utf-8
import numpy as np
from util.bbox.nms import nms
from .text_connect_cfg import Config as TextLineCfg
from .text_proposal_connector import TextProposalConnector
from .text_proposal_connector_oriented import TextProposalConnector as TextProposalConnectorOriented
class TextDetector:
def __init__(self, DETECT_MODE="H"):
self.mode = DETECT_MODE
if self.mode == "H":
self.text_proposal_connector = TextProposalConnector()
elif self.mode == "O":
self.text_proposal_connector = TextProposalConnectorOriented()
def detect(self, text_proposals, scores, size):
# 删除得分较低的proposal
keep_inds = np.where(scores > TextLineCfg.TEXT_PROPOSALS_MIN_SCORE)[0]
text_proposals, scores = text_proposals[keep_inds], scores[keep_inds]
# 按得分排序
sorted_indices = np.argsort(scores.ravel())[::-1]
text_proposals, scores = text_proposals[sorted_indices], scores[sorted_indices]
# 对proposal做nms
keep_inds = nms(np.hstack((text_proposals, scores)), TextLineCfg.TEXT_PROPOSALS_NMS_THRESH)
text_proposals, scores = text_proposals[keep_inds], scores[keep_inds]
# 获取检测结果
text_recs = self.text_proposal_connector.get_text_lines(text_proposals, scores, size)
keep_inds = self.filter_boxes(text_recs)
return text_recs[keep_inds]
def filter_boxes(self, boxes):
heights = np.zeros((len(boxes), 1), np.float)
widths = np.zeros((len(boxes), 1), np.float)
scores = np.zeros((len(boxes), 1), np.float)
index = 0
for box in boxes:
heights[index] = (abs(box[5] - box[1]) + abs(box[7] - box[3])) / 2.0 + 1
widths[index] = (abs(box[2] - box[0]) + abs(box[6] - box[4])) / 2.0 + 1
scores[index] = box[8]
index += 1
return np.where((widths / heights > TextLineCfg.MIN_RATIO) & (scores > TextLineCfg.LINE_MIN_SCORE) &
(widths > (TextLineCfg.TEXT_PROPOSALS_WIDTH * TextLineCfg.MIN_NUM_PROPOSALS)))[0]
|
[
"xyh650209@163.com"
] |
xyh650209@163.com
|
a48a155a32f543452e418f1d4097f3748baa480d
|
803286daa5c0992b6ad0008676789d14178f465e
|
/CSElab2/quidditchStats.py
|
b23b439a373473412119b37a2ebc6a7895d77397
|
[] |
no_license
|
lchristopher99/CSE-Python
|
8bbd6f464336a11ce7dcb20e359cb35836434818
|
efb6b17d4eb514e602d65a3806d0872e310fd584
|
refs/heads/master
| 2020-04-02T15:56:26.185115
| 2019-02-04T00:45:49
| 2019-02-04T00:45:49
| 154,590,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,398
|
py
|
# Name: Logan Christopher Assigned: 9/12/18
#
# Course: CSE 1284 Section: 11 Date Due: 9/12/18
#
# File name: quidditchStats.py
#
# Program Description: Calculates statistics of a Quidditch match based off of user input.
print('The following is a quidditch stats calculator.')
print('Enter the match information below and the according statistcs will be generated.')
print()
teamA = input('Enter the name of the team who caught the golden snitch: ')
scoreA = input('What was the teams final score? ')
teamB = input('Enter the name of the other team: ')
scoreB = input('What was the teams final score? ')
length = input('Enter the length of the game in minutes: ')
print()
length = int(length)
scoreB = int(scoreB)
scoreA = int(scoreA)
if scoreA > 150:
scoreA = scoreA - 150
scoreA = scoreA / 10
gpmA = scoreA / length
print('First Team Statistics:')
print('------------------------------')
print('House: ', teamA)
print('Goals: ', scoreA)
print('Snitch: 1')
print('Goals per Minute: ', gpmA)
print()
scoreB = scoreB / 10
gpmB = scoreB / length
print('Second Team Statistics:')
print('------------------------------')
print('House: ', teamB)
print('Goals: ', scoreB)
print('Snitch: 0')
print('Goals per Minute: ', gpmB)
else:
print('Final score of the team that caught the snitch must be at least 150, due to the snitch being 150 points by itself.')
|
[
"logan.christopher@comcast.net"
] |
logan.christopher@comcast.net
|
39aafe89c7dec26bf248161d0d286f400253e5b9
|
5623c2115878a710f75e38d4bf2831afe18c5114
|
/Week3/Opdracht2/main.py
|
d3b0ac3f5541037dbd44a0dc9cf5d82414e93fe3
|
[] |
no_license
|
renedekluis/HBO-ICT_python_2B
|
126e90e2440db165aa7c6e582d02c09648fcace4
|
05854e6ac90c2945a784c98f1b54361f42c6e2b7
|
refs/heads/master
| 2021-01-12T07:00:33.936591
| 2017-01-17T16:14:57
| 2017-01-17T16:14:57
| 76,893,366
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,019
|
py
|
class ListNode:
def __init__(self,data,next_node):
self.data = data
self.next = next_node
def __repr__(self):
return str(self.data)
class MyCircularLinkedList:
"""
This class creates a looping list of nodes.
"""
def __init__(self):
self.tail = None
def __repr__(self):
"""
This prints the node_list.
Return
------
s : string
string of the node_list
Example
-------
>>> print(mylist)
>>> 5 -> 6
>>> print(myEmptyList)
>>> 'empty list'
"""
s = ''
if not self.tail:
return 'empty list'
current = self.tail.next
if current != None:
s = s + str(current)
current = current.next
while current != self.tail.next:
s = s + " -> " + str(current)
current = current.next
return s
def addLast(self,e):
"""
This function adds a node to the list.
Parameters
----------
e : integer
value to add
Example
-------
>>> addLast(5)
>>> addLast(6)
>>> node_list = 5 -> 6
"""
if not self.tail:
self.tail = ListNode(e,self.tail)
else:
self.tail.next = ListNode(e,self.tail.next)
self.tail = self.tail.next
if not self.tail.next:
self.tail.next = self.tail
def delete(self,e, current = None):
"""
This function removes a function from the node_list.
Parameters
----------
e : integer
value to be removed
Example
-------
>>> node_list = 5 -> 6 -> 7
>>> delete(6)
>>> node_list = 5 -> 7
"""
if not current:
current = self.tail
if current.next.data == e:
current.next = current.next.next
else:
self.delete(e,current.next)
if e == self.tail.data:
self.tail = None
mylist2 = MyCircularLinkedList()
print(mylist2)
mylist2.addLast(1)
mylist2.addLast(2)
mylist2.addLast(3)
mylist2.addLast(4)
mylist2.addLast(5)
print(mylist2)
mylist2.delete(1)
print(mylist2)
mylist2.delete(2)
print(mylist2)
mylist2.delete(3)
print(mylist2)
mylist2.delete(4)
print(mylist2)
mylist2.delete(5)
print(mylist2)
|
[
"rdekluis@hotmail.com"
] |
rdekluis@hotmail.com
|
512c0b680008db2acda2d2a77c4dadbb7d980b2b
|
f73d43d8139e9cc57ead5ece561a50540bc3a039
|
/DawnEng.py
|
4e73e5558a54f08d7bc2c9632fe091ccff6ea559
|
[] |
no_license
|
msohaibali/RssNews
|
5e96af6d1aa2844910389a0d94eee91db47b4c24
|
683a9ec433abfefe89b6a48577dd9ee2248c2727
|
refs/heads/master
| 2020-03-21T20:13:03.345962
| 2018-06-28T10:19:06
| 2018-06-28T10:19:06
| 138,994,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,392
|
py
|
import traceback
import arrow
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from pymongo import MongoClient
import xml.etree.ElementTree as ET
from selenium.webdriver.support.ui import WebDriverWait
import time
import dateutil.parser as parser
import redis
import datetime
tree = ET.parse('config.xml')
root = tree.getroot()
dbIP = ''
rIP = ''
db = ''
user1 = ''
password1 = ''
collName = ''
errCollName = ''
rPort = ''
shardIP = ''
for itm in root.findall('Project'):
dbIP = itm.find('MongoIP').text
shardIP = itm.find('ShardIP').text
rIP = itm.find('RedisIP').text
rPort = itm.find('RedisPort').text
db = itm.find('Database').text
user1 = itm.find('userName').text
password1 = itm.find('pass').text
collName = itm.find('collection').text
errCollName = itm.find('collection2').text
# Client = MongoClient(dbIP)
Client = MongoClient(shardIP)
db = Client[db]
# db.authenticate(user1, password1)
collection1 = db[collName]
collection3 = db[errCollName]
r = redis.StrictRedis(host=rIP, port=rPort)
# binary = FirefoxBinary('/usr/local/desktop/firefox')
# capabilities = webdriver.DesiredCapabilities().FIREFOX
# capabilities["-marionette"] = False
# binary = FirefoxBinary(r'/usr/bin/firefox')
# driver = webdriver.Firefox(firefox_binary=binary, capabilities=capabilities)
driver = webdriver.Firefox()
# Tags of Main Page Links
tag1 = "box story mb-sm-4 mb-2 pb-sm-4 pb-2"
tag2 = "box story mb-sm-4 mb-2 border--bottom pb-sm-4 pb-2"
tag3 = "box story mb-sm-4 mb-2 pb-sm-4 pb-2 mt-sm-4 mt-2 pull--top border--top pt-sm-4 pt-2 d-none d-sm-block"
tag4 = "box story mb-sm-4 mb-2 pb-sm-4 pb-2 mt-sm-4 mt-2 pull--top border--top pt-sm-4 pt-2 d-none d-md-block d-lg-none"
tag5 = "box story mb-4 border--bottom story--mustread"
tagMain = "box story mb-1 pb-2 border--bottom story--editors-pick"
tag = "story__title size-eleven text-center "
def error(err):
try:
source = "Dawn News English"
time = datetime.datetime.now()
time = time.isoformat()
time = arrow.get(time).datetime
collection3.insert([{"Error": err, "Source Name": source, "Time of Error": time, "Category": "National"}])
except:
pass
def implicitwait(timeout):
WebDriverWait(driver, timeout)
foxlinks = []
data = []
data.append(collection1.distinct('_id'))
def NewsContent(lnk):
wqt = ''
body = ''
title = ''
lnk = lnk.replace("https", "http")
for d26 in data:
for index in range(0, len(d26)):
if d26[index] == lnk:
print(d26[index])
wqt = "True"
break
else:
wqt = "False"
if wqt == "True":
print("This Link Already Exists")
else:
print("New Results Found")
driver.get(lnk)
time.sleep(3)
print(lnk)
soup = BeautifulSoup(driver.page_source, "html.parser")
try:
curr_post = driver.find_element_by_xpath('//html//body//div[2]//div//div[1]//main//div//div//article//div[1]//h2//a')
p_content1 = curr_post.text
title = p_content1
print(title)
except:
try:
curr_post = driver.find_element_by_xpath('//html//body//div[4]//h2//a')
p_content1 = curr_post.text
title = p_content1
print(title)
except:
print("Didn't found the Title")
pass
try:
p_content = ''
curr_post1 = driver.find_element_by_xpath('//html//body//div[2]//div//div[1]//main//div//div//article//div[2]//div')
targ_p1 = curr_post1.find_elements_by_xpath('//p')
for p in targ_p1:
p_content = p_content + p.text
body = p_content
body = body.replace("\n", "")
print(body)
except:
try:
p_content = ''
curr_post1 = soup.find('div', attrs={'class': 'tabs__pane active'})
targ_p1 = curr_post1.find_all('p')
for p in targ_p1:
p_content = p_content + p.text
body = p_content
body = body.replace("\n","")
print(body)
except:
print("Didn't found Body")
pass
try:
# Grabbing DATE
try:
global pubTime
dateDiv = driver.find_element_by_xpath('//html//body//div[2]//div//div[1]//main//div//div//article//div[1]//div[1]//span[3]')
pubTime = dateDiv.text
pubTime = pubTime.replace("Updated ","")
t = datetime.datetime.now().time()
tim = t.strftime('%H:%M:%S.%f')
pubTime = pubTime + "T" + tim
date = parser.parse(pubTime)
pubTime = date.isoformat()
pubTime = arrow.get(pubTime).datetime
print ("Published Date: ", pubTime)
except:
try:
dateDiv = driver.find_element_by_xpath('//html//body//div[4]//div[2]//span[3]//span[2]')
pubTime = dateDiv.text
date = parser.parse(pubTime)
pubTime = date.isoformat()
pubTime = arrow.get(pubTime).datetime
print("Published Date: ", pubTime)
except:
dateDiv = driver.find_element_by_xpath('//html//body//div[2]//div//div[1]//main//div//div//article//div[1]//div[2]//span[3]')
pubTime = dateDiv.text
pubTime = pubTime.replace("Updated ", "")
t = datetime.datetime.now().time()
tim = t.strftime('%H:%M:%S.%f')
pubTime = pubTime + "T" + tim
date = parser.parse(pubTime)
pubTime = date.isoformat()
pubTime = arrow.get(pubTime).datetime
print("Published Date: ", pubTime)
if title is '':
print("This is a Photo or Video, Content doesn't Exists!")
elif body is '':
print("This is a Photo or Video, Content doesn't Exists!")
elif pubTime is '':
print("This is a Photo or Video, Content doesn't Exists!")
else:
try:
collection1.insert([{"Type": "Predefined List", "Category": "National", "Language": "English",
"Source": "Dawn News English", "title": title, "body": body, "_id": lnk,
"published Time": pubTime}])
r.rpush('news_link', lnk)
except:
pass
except:
print ("Issues at Dumping Level")
pass
# Function for grabbing News links
def get_results():
url = "https://www.dawn.com/"
driver.get(url)
try:
# Grabbing Link Tags
try:
links = driver.find_elements_by_xpath("//article[@class=tag1]//h2//a")
except:
links = driver.find_elements_by_xpath("//h2//a")
for link in links:
# Grabbing Links
href = link.get_attribute("href")
foxlinks.append(href)
# Grabbing Headline Tags
try:
links = driver.find_elements_by_xpath("//article[@class=tag2]//h2//a")
except:
links = driver.find_elements_by_xpath("//h2//a")
for link in links:
href = link.get_attribute("href")
foxlinks.append(href)
# Grabbing Extra Link Tags
try:
links = driver.find_elements_by_xpath("//article[@class=tag3]//h2//a")
except:
links = driver.find_elements_by_xpath("//h2//a")
for link in links:
# Grabbing Links
href = link.get_attribute("href")
foxlinks.append(href)
# Grabbing 3rd Extra Link Tags
try:
links = driver.find_elements_by_xpath("//article[@class=tag4]//h2//a")
except:
links = driver.find_elements_by_xpath("//h2//a")
for link in links:
# Grabbing Links
href = link.get_attribute("href")
foxlinks.append(href)
# Grabbing 4th Extra Link Tags
try:
links = driver.find_elements_by_xpath("//article[@class=tag5]//h2//a")
except:
links = driver.find_elements_by_xpath("//h2//a")
for link in links:
# Grabbing Links
href = link.get_attribute("href")
foxlinks.append(href)
# Grabbing 5th Extra Link Tags
try:
links = driver.find_elements_by_xpath("//article[@class=tagMain]//h2//a")
except:
links = driver.find_elements_by_xpath("//h2//a")
for link in links:
# Grabbing Links
href = link.get_attribute("href")
foxlinks.append(href)
try:
links = driver.find_elements_by_xpath("//article[@class='box story ']//h2//a")
except:
links = driver.find_elements_by_xpath("//h2//a")
for link in links:
# Grabbing Links
href = link.get_attribute("href")
foxlinks.append(href)
except Exception as ex:
print(ex)
print("Issues in Links Grabbing")
# Total number of Links Grabbed
print("\nTotal Links Grabbed: ", len(foxlinks))
def main():
time.sleep(1)
get_results()
for lnk in foxlinks:
NewsContent(lnk)
driver.close()
try:
main()
print(len(foxlinks))
except:
tb_err = traceback.format_exc()
error(tb_err)
driver.close()
pass
|
[
"sohaibayub9@gmail.com"
] |
sohaibayub9@gmail.com
|
adf0636c17fb7050f4228e609a3fe4d8c9f97323
|
54f525ab1acc8f854c7a110f06086a94add93e16
|
/nlp-2/q3.py
|
394df0208ea2fdbaff3d883ab7429d1c50345351
|
[] |
no_license
|
edison0829/course_codes
|
799ee302d86aa4410c615e04bbfcf7d6e69a38d7
|
a8d6c34ce890250b62048ba22b93ed6acd64d298
|
refs/heads/master
| 2020-04-30T19:16:05.602708
| 2019-05-13T18:20:06
| 2019-05-13T18:20:06
| 177,033,521
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,532
|
py
|
#!/usr/bin/env python
import distsim
word_to_ccdict = distsim.load_contexts("nytcounts.4k")
### provide your answer below
###Answer examples; replace with your choices
# a name (for example: person, organization, or location)
print 'china'
for i, (word, score) in enumerate(distsim.show_nearest(word_to_ccdict, word_to_ccdict['china'],set(['china']),distsim.cossim_sparse), start=1):
print("{}: {} ({})".format(i, word, score))
# a common noun
print 'human'
for i, (word, score) in enumerate(distsim.show_nearest(word_to_ccdict, word_to_ccdict['human'],set(['human']),distsim.cossim_sparse), start=1):
print("{}: {} ({})".format(i, word, score))
# an adjective
print 'handsome'
for i, (word, score) in enumerate(distsim.show_nearest(word_to_ccdict, word_to_ccdict['handsome'],set(['handsome']),distsim.cossim_sparse), start=1):
print("{}: {} ({})".format(i, word, score))
# a verb
print 'fight'
for i, (word, score) in enumerate(distsim.show_nearest(word_to_ccdict, word_to_ccdict['fight'],set(['fight']),distsim.cossim_sparse), start=1):
print("{}: {} ({})".format(i, word, score))
# another two words
print 'homes'
for i, (word, score) in enumerate(distsim.show_nearest(word_to_ccdict, word_to_ccdict['homes'],set(['homes']),distsim.cossim_sparse), start=1):
print("{}: {} ({})".format(i, word, score))
print 'cars'
for i, (word, score) in enumerate(distsim.show_nearest(word_to_ccdict, word_to_ccdict['cars'],set(['cars']),distsim.cossim_sparse), start=1):
print("{}: {} ({})".format(i, word, score))
|
[
"ruotianj@usc.edu"
] |
ruotianj@usc.edu
|
25922c7f5a7a5df86b7b350b3530783a603c1dea
|
969027d46b99cc5e54f8fd683efb6cf6aa0bc8c0
|
/config.py
|
61c3f1084ba7bee398eab5b2ed00249a39f86934
|
[] |
no_license
|
anshingy/xinjingzixun
|
719f538a600ae3b2cf2b424e9caf8610c74c0201
|
ad11a009a7290e32857179adae5f1fd0a9fcd736
|
refs/heads/master
| 2020-04-12T16:16:51.397909
| 2018-06-15T06:36:38
| 2018-06-15T06:36:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 984
|
py
|
from redis import StrictRedis
class Config(object):
# 设置调试模式
DEBUG = None
# 设置秘钥 通过os,和base64代码编码生成
SECRET_KEY = 'k6fQDT/sHyZbrHiefRIESIvzo8LKQkrLYCui5glE2C0='
# 配置sqlalchemy连接mysql数据库
SQLALCHEMY_DATABASE_URI = 'mysql://root:mysql@localhost/newsInfo'
# 配置数据库的动态追踪修改
SQLALCHEMY_TRACK_MODIFICATIONS = False
# 配置redis的ip和port
REDIS_IP = "127.0.0.1"
REDIS_PORT = 6379
# 使用redis保存session信息
SESSION_TYPE = "redis"
# 对session信息进行签名
SESSION__USE_SIGNER = True
# 存储session的redis实例
SESSION_REDIS = StrictRedis(host=REDIS_IP, port=REDIS_PORT)
# 指定session过期时间 1天
PERMANENT_SESSION_LIFETIME= 86400
class Development(Config):
DEBUG = True
class Producetion(Config):
DEBUG = False
# 把配置对象实现字典映射
config = {
"dev":Development,
"pro":Producetion
}
|
[
"henan_youngstar@163.com"
] |
henan_youngstar@163.com
|
673c03193725598c13eeb2a99e9775d727bc081f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03362/s814934853.py
|
0da7c3cf2ebcc6702e2881843c1c3712208b689d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
from collections import defaultdict
import numpy
def sieve_of_eratosthenes(n):
primes = [2]
cands = numpy.array(list(range(3, n+1, 2)), dtype=numpy.int)
while len(cands) != 0:
prime = primes[-1]
cands = cands[cands % prime != 0]
if len(cands) == 0:
break
primes.append(cands[0])
return primes
def main():
n = int(input())
k = 5
primes = sieve_of_eratosthenes(55555)
counts = defaultdict(list)
for prime in primes:
counts[prime % k].append(prime)
if len(counts[prime % k]) >= n:
print(' '.join(list(map(str, counts[prime % k]))))
return
if __name__=='__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
f76cde7ae38998a40d36dafdd55813b58b0ea099
|
88122e9812c937196094aa11a7399f4e3f69baba
|
/change_attribute_xml.py
|
2e458e23c7d4bbb9bd38755f7bcc13e21e6d2166
|
[] |
no_license
|
ylltest/myscripts-github
|
d6c0383d43d92d7d70ec3c81f25b66f0c0146c07
|
0a79ce01c283d5fbb6032ed8d793bd14b0b00985
|
refs/heads/master
| 2020-04-04T18:04:59.479821
| 2018-12-12T09:38:50
| 2018-12-12T09:38:50
| 156,149,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 651
|
py
|
import os
import xml.etree.cElementTree as et
input_file_dir="C:\\Users\\Administrator.X36KKQ2UTQSEZ5O\\Desktop\\xmltest" # 待读取文件存放路径
xml_dir="C:\\Users\\Administrator.X36KKQ2UTQSEZ5O\\Desktop\\xml-ok\\" # 输出xml文件保存路径
def alter_file(file):
tree=et.parse(file)
root=tree.getroot()
for node in root.iter('name'):
new_name = 'person'
node.text = new_name
# node.set("updated", "yes")
print(xmi_name)
tree.write(xml_dir + xmi_name + '.xml')
for f in os.listdir(input_file_dir):
xmi_name = str(f[:-4])
if alter_file(input_file_dir+"\\"+f) == False:
break
|
[
"llye@miivii.com"
] |
llye@miivii.com
|
a56f060dbc5c93ea4851fc4fcbcf2f11e8adcccd
|
73e580830119adcf9bd0cd74598353fb4f3b000b
|
/pyhelm/repo.py
|
3f6dcc0826f01f5246c4f91f33b86bc0b9d111f9
|
[
"Apache-2.0"
] |
permissive
|
HackToHell/pyhelm
|
9a79b2edeec11299c13cfc11769a824296840d6a
|
054729c4838a50b3395aa5f000701414d5314d77
|
refs/heads/master
| 2021-04-28T16:49:52.334759
| 2018-02-19T05:33:28
| 2018-02-19T05:33:28
| 122,023,000
| 0
| 0
| null | 2018-02-19T05:32:32
| 2018-02-19T05:32:32
| null |
UTF-8
|
Python
| false
| false
| 1,777
|
py
|
import cStringIO
import itertools
import os
import pygit2
import requests
import shutil
import tarfile
import tempfile
import yaml
def repo_index(repo_url):
"""Downloads the Chart's repo index"""
index_url = os.path.join(repo_url, 'index.yaml')
index = requests.get(index_url)
return yaml.load(index.content)
def from_repo(repo_url, chart, version=None):
"""Downloads the chart from a repo."""
_tmp_dir = tempfile.mkdtemp(prefix='pyhelm-', dir='/tmp')
index = repo_index(repo_url)
if chart not in index['entries']:
raise RuntimeError('Chart not found in repo')
versions = index['entries'][chart]
if version is not None:
versions = itertools.ifilter(lambda k: k['version'] == version,
versions)
try:
metadata = sorted(versions, key=lambda x: x['version'])[0]
for url in metadata['urls']:
fname = url.split('/')[-1]
try:
req = requests.get(url, stream=True)
fobj = cStringIO.StringIO(req.content)
tar = tarfile.open(mode="r:*", fileobj=fobj)
tar.extractall(_tmp_dir)
return os.path.join(_tmp_dir, chart)
except:
# NOTE(flaper87): Catch requests errors
# and untar errors
pass
except IndexError:
raise RuntimeError('Chart version %s not found' % version)
def git_clone(repo_url, branch='master'):
"""clones repo to a /tmp/ dir"""
_tmp_dir = tempfile.mkdtemp(prefix='pyhelm-', dir='/tmp')
pygit2.clone_repository(repo_url, _tmp_dir, checkout_branch=branch)
return _tmp_dir
def source_cleanup(target_dir):
"""Clean up source."""
shutil.rmtree(target_dir)
|
[
"flaper87@gmail.com"
] |
flaper87@gmail.com
|
e2259e34e5a91d547e72dd1c516a745cf64faa1e
|
212336dec0f4d17cfb430b2401acd845a8d9ef56
|
/string_basic.py
|
3e365fc6de61d286fcf670e77a26fa1383efe4bb
|
[] |
no_license
|
Mizoguchi-Yuhei/udemy-kame-python
|
bdde0054a7ef1f096e43689be450ab20d3063636
|
1d5ac821e8b5032fc602c5a5da22877adc0f2578
|
refs/heads/main
| 2023-07-11T12:07:22.849858
| 2021-08-13T15:00:23
| 2021-08-13T15:00:23
| 376,057,781
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
# 文字列(String)
print("Hello World!!")
print('1')
print("I'm fine.")
print('I"m fine.')
print("""
Hello world!!
How are you doing?
""")
print("Hello \nworld")
print("Hello \tworld")
print("back slash n: \\n")
print('I\'m fine')
print("hello" + "world" + "!!")
|
[
"mizo.cb.fl@gmail.com"
] |
mizo.cb.fl@gmail.com
|
76fdbcb324ed6cc4886f7268ac013e349c82f725
|
88f76659804ae25947352c3c26310480f06cabea
|
/run_merge.py
|
e8afd510ca506400528e26c3a5d30bdf2c25ca69
|
[] |
no_license
|
92hoy/pandas_excel_merge
|
b0b2450efdb006ec641ba4fcc3a0bf36da1a95d0
|
a5d353014e321050b84dca26b671da8cf4a0fe26
|
refs/heads/main
| 2023-02-02T10:56:03.586190
| 2020-12-10T08:30:57
| 2020-12-10T08:30:57
| 319,853,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,037
|
py
|
import pandas as pd
from pandas import DataFrame
import openpyxl
import sys
########### use pandas ################
df = pd.read_excel("./location_data/AI_TEST_201104.xlsx", sheet_name='03_자동측정망')
df1 = df['SN'] != 0
# column sort
df1 = df[df1].sort_values('SN', ascending=False)
h_list = []
equal_name_excel = []
tmp_excel = []
set_excel = []
start_year = 2016
end_year = 2020
for i in df1.values:
river = []
tmp_excel.append(i[2])
for year in range(start_year, end_year):
river.append('./excel_data/' + str(i[2]) + '_' + str(year) + '.xlsx')
if year == end_year - 1:
equal_name_excel.append(river)
for year in range(start_year, end_year):
tmp = []
for i in tmp_excel:
tmp.append('./excel_data/' + i + '_' + str(year) + '.xlsx')
set_excel.append(tmp)
# g_df = pd.read_excel("./excel_data/가평_2016.xlsx")
# g_df1 = pd.read_excel("./excel_data/가평_2017.xlsx")
# p = pd.concat([g_df, g_df1], axis=1)
# print(p.head())
# 한강 하류-> 상류 excel
for set_excel_list, year in zip(set_excel, range(start_year, end_year)):
pd_concat = []
# print(set_excel_list)
cnt = 0
for i in set_excel_list:
print(i)
cnt += 1
d_f = pd.read_excel(i)
if cnt >1:
d_f.pop('date')
# 변경할 컬럼이름 생성
new_col_name = []
for k in d_f.columns.values:
new_col_name.append(k + '_' + str(cnt))
# 변경된 컬럼 적용
# d_f.rename(columns=dict(zip(d_f.columns.values, new_col_name)))
d_f.columns = new_col_name
pd_concat.append(d_f)
kk = pd.concat(pd_concat, axis=1)
kk.to_excel('./result/' + '한강_' + str(year) + '.xlsx', index=False)
# 연도 병합
# pd_concat = []
# for equal_name_excel_list, year, name in zip(equal_name_excel, range(start_year, end_year), df1.values):
# for i in equal_name_excel_list:
# d_f = pd.read_excel(i)
# pd_concat.append(d_f)
# kk = pd.concat(pd_concat, axis=0)
# kk.to_excel('./result/' + str(name[2]) + '_all.xlsx', index=False)
########### use python / openpyxl ################
# df = pd.read_excel("./location_data/AI_TEST_201104.xlsx", sheet_name='03_자동측정망')
# df1 = df['SN'] != 0
# # column sort
# df1 = df[df1].sort_values('SN')
# h_list = []
# equal_name_excel = []
# start_year=2016
# end_year=2020
# for i in df1.values:
# river=[]
# for year in range(start_year, end_year):
# river.append('./excel_data/'+str(i[2]) + '_' + str(year) + '.xlsx')
# if year == end_year-1:
# equal_name_excel.append(river)
#
# for i,k in zip(equal_name_excel,df1.values):
# excel_names = i
# excels = [pd.ExcelFile(name) for name in excel_names]
# frames = [x.parse(x.sheet_names[0], header=None,index_col=None) for x in excels]
# frames[1:] = [df[1:] for df in frames[2:]]
# combined = pd.concat(frames)
#
# #파일저장
# combined.to_excel("./result/"+k[2]+"_all.xlsx", header=False, index=False)
|
[
"ho_9209@naver.com"
] |
ho_9209@naver.com
|
a156d87e7889b89cfff4f16636c7edb787bc8b41
|
ca9ef917ecd5ba9615ab46d42d9409ccfa91d163
|
/step4c2.py
|
586870400c662f749c50bcd63de50c587523a01e
|
[] |
no_license
|
ssy248/Lidar
|
8936e09b0b7a1906a63d2d5c17d30f1bfb63d0b1
|
125de93e1c5c9af2f5edcef718831878f1aadae1
|
refs/heads/master
| 2021-12-17T20:03:32.223556
| 2021-12-09T01:47:18
| 2021-12-09T01:47:18
| 191,840,225
| 0
| 1
| null | 2020-07-25T21:45:55
| 2019-06-13T22:16:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,836
|
py
|
# STEP 4 C
# updated process from above
#input
tfile ="april2019/2019-02-27-12-19-36_Velodyne-HDL-32-Data-BF1-CL1-Traj.csv"
import math
import csv
trajnum = 0
obnum = 1
irow =0
outlier =0
#settimestamp={0}
#setx ={0}
#sety ={0}
#settimestamp.clear()
#setx.clear()
#sety.clear()
# change settimestamp to a normal array
settimestamp= []
setx =[]
sety =[]
with open(tfile) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
irow=irow+1
trajnum = row[0]
frameindex = row[17]
# check first row
if line_count==0:
line_count=line_count+1
continue
# check first row of data
if line_count==1:
line_count=line_count+1
prevrow = row
prevx = float(prevrow[6])
prevy = float(prevrow[7])
pfx = math.floor(prevx)
pfy = math.floor(prevy)
prevtimestamp = float(prevrow[2])
continue
# check if is vehicle
vp = row[1]
if vp == 1:
continue
# load current info
currx =float(row[6])
curry =float(row[7])
fx = math.floor(currx)
fy = math.floor(curry)
# debug -6, 17
if fx==-6 and fy==17:
print("actual x:",currx)
print("actual y:",curry)
print("row",irow)
timestamp = float(row[2])
# compare current + previous timestamps
ts = timestamp*0.000001
pts = prevtimestamp*0.000001
diff = ts - pts
# first check if in the same trajectory
if obnum != trajnum:
pfx = fx
pfy= fy
obnum = trajnum
prevframe=frameindex
prevtimestamp = timestamp
# if in diff traj, delete settimestamp /clear
settimestamp=[]
setx= []
sety=[]
continue
#print("frame:",frameindex)
# check if there is difference
if pfx==fx and pfy==fy:
prevtimestamp = timestamp
continue
# check for zero valued timestamp
if timestamp==0:
prevtimestamp=0
# set previous x, y
pfx = fx
pfy = fy
continue
# check for zero valued previous timestamp
if prevtimestamp==0:
prevtimestamp=timestamp
pfx = fx
pfy= fy
continue
# test the diff in set
if len(settimestamp) != 0:
#print some info
#print("settimestamp at", irow)
setlen = len(settimestamp)
for i in range(0, setlen-1):
sts1 = settimestamp[i]
pfx1 = setx[i]
pfy1 = sety[i]
#test current time stamp
ts1 = sts1*0.000001
#pts1 = prevtimestamp*0.000001
diff1 = ts - ts1
# if current distance greater than 0.15, discard
if diff1 > 0.15:
#settimestamp.remove(sts1)
del settimestamp[i]
# also remove from setx and sety
del setx[i]
del sety[i]
continue
if diff1< 0.05:
continue
# if falls within 0.05 to 0.15
pfx1 = setx[i]
pfy1 = sety[i]
fromi1 = invlookupdict[(pfx1,pfy1)]
toi1 = invlookupdict[(fx, fy)]
# debug -12, 29 and -6, 17
if pfx1==-6 and pfy1 ==17:
print('to x:',fx)
print('to y:',fy)
print("row",irow)
# change to not count when prev pt equals current pt
mcount1 = trajcount1[(fromi1,toi1)]
trajcount1[(fromi1, toi1)] = mcount1+1
#print("from coord at", pfx1, pfy1)
#print("to coord at", fx, fy)
# add to the trajectory
# higher than 0.15
if diff > 0.15:
# set previous values
pfx = fx
pfy = fy
prevtimestamp = timestamp
# clear the sets?
continue
# lower than 0.05
if diff < 0.05:
#print("settimestamp at", irow)
#print("current time stamp", timestamp)
#print("previous time stamp", prevtimestamp)
# add to sets
#prevtimestamp stays the same
# pfx and pfy stay the same
settimestamp.append(prevtimestamp)
prevtimestamp =timestamp
setx.append(pfx)
pfx =fx
sety.append(pfy)
pfy = fy
continue # continue
# if falls within 0.05 to 0.15
#print('current timestamp:',ts)
# now save to the map(i,j)
fromi = invlookupdict[(pfx,pfy)]
#topt = []
toi = invlookupdict[(fx,fy)]
# debug -12, 29 and -6, 17
if pfx==-6 and pfy==17:
print('outer loop to x:',fx)
print('outer loop to y:',fy)
#print('real x val',currx)
#print('real y val',curry)
print("row",irow)
mcount = trajcount1[(fromi,toi)]
trajcount1[(fromi, toi)] = mcount+1
# set previous
pfx = fx
pfy = fy
#prevframe = frameindex
#prevtrajnum= trajnum
prevtimestamp = timestamp
# condition break for testing
#if irow > 10000:
# print
# print("fx", fx)
# print("fy", fy)
# break
|
[
"noreply@github.com"
] |
ssy248.noreply@github.com
|
ac66200d1dd1ca23fe2f3468e6f90795b56a4e74
|
cd63877cac79429599bf30b7ad916ff52b7df266
|
/zvt/api/account.py
|
89674a5576d0babd121511684721c472dc901db8
|
[] |
no_license
|
zfsamzfsam/zvt
|
2f803c3d0f4e5a4729b38fa316c007192e0a464f
|
f77f27d1ccd199a1b860ead71d4e6738438f804a
|
refs/heads/master
| 2020-05-30T22:43:22.894053
| 2019-05-31T03:16:55
| 2019-05-31T03:16:55
| 189,998,719
| 1
| 0
| null | 2019-06-03T12:15:05
| 2019-06-03T12:15:04
| null |
UTF-8
|
Python
| false
| false
| 2,124
|
py
|
# -*- coding: utf-8 -*-
from zvt.api.common import get_data
from zvt.domain.account import SimAccount, Position, Order
def get_account(trader_name=None, return_type='df', start_timestamp=None, end_timestamp=None,
filters=None, session=None, order=None, limit=None):
if trader_name:
if filters:
filters = filters + [SimAccount.trader_name == trader_name]
else:
filters = [SimAccount.trader_name == trader_name]
return get_data(data_schema=SimAccount, security_id=None, codes=None, level=None, provider='zvt',
columns=None, return_type=return_type, start_timestamp=start_timestamp,
end_timestamp=end_timestamp, filters=filters, session=session, order=order, limit=limit)
def get_position(trader_name=None, return_type='df', start_timestamp=None, end_timestamp=None,
filters=None, session=None, order=None, limit=None):
if trader_name:
if filters:
filters = filters + [Position.trader_name == trader_name]
else:
filters = [Position.trader_name == trader_name]
return get_data(data_schema=Position, security_id=None, codes=None, level=None, provider='zvt',
columns=None, return_type=return_type, start_timestamp=start_timestamp,
end_timestamp=end_timestamp, filters=filters, session=session, order=order, limit=limit)
def get_orders(trader_name=None, return_type='df', start_timestamp=None, end_timestamp=None,
filters=None, session=None, order=None, limit=None):
if trader_name:
if filters:
filters = filters + [Order.trader_name == trader_name]
else:
filters = [Order.trader_name == trader_name]
return get_data(data_schema=Order, security_id=None, codes=None, level=None, provider='zvt',
columns=None, return_type=return_type, start_timestamp=start_timestamp,
end_timestamp=end_timestamp, filters=filters, session=session, order=order, limit=limit)
if __name__ == '__main__':
print(get_account())
|
[
"5533061@qq.com"
] |
5533061@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.