blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
88c956a598941e1923e9029d6acc27a0a83a987e | a8f39f241598dce6d876f2b535327aeac4902170 | /tests/api2/network.py | fed153986b6379c6c5d4e4047c399b57ad8ab93f | [
"BSD-3-Clause"
] | permissive | ghos/freenas | 321b8d36ec16b715ffd6fb60768901bfb276c2a1 | f1049d30355c70eb3f2c4b841bef71aa836890c9 | refs/heads/master | 2020-12-07T17:14:57.513807 | 2020-01-07T20:59:41 | 2020-01-07T20:59:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,946 | py | #!/usr/bin/env python3.6
# Author: Eric Turgeon
# License: BSD
import pytest
import sys
import os
apifolder = os.getcwd()
sys.path.append(apifolder)
from auto_config import hostname, domain
from functions import GET, PUT
from config import *
BRIDGEGWReason = "BRIDGEGW not in ixautomation.conf"
BRIDGENETMASKReason = "BRIDGENETMASK not in ixautomation.conf"
Reason = "AD_DOMAIN BRIDGEDNS are missing in ixautomation.conf"
dns_cfg = pytest.mark.skipif("BRIDGEDNS" not in locals(), reason=Reason)
def test_01_get_default_network_general_summary():
results = GET("/network/general/summary/")
assert results.status_code == 200
assert isinstance(results.json(), dict), results.text
assert isinstance(results.json()['default_routes'], list), results.text
@dns_cfg
def test_02_configure_setting_domain_hostname_and_dns():
global payload
payload = {"domain": domain,
"hostname": hostname,
"ipv4gateway": gateway,
"nameserver1": BRIDGEDNS}
global results
results = PUT("/network/configuration/", payload)
assert results.status_code == 200, results.text
assert isinstance(results.json(), dict), results.text
@dns_cfg
@pytest.mark.parametrize('dkeys', ["domain", "hostname", "ipv4gateway",
"nameserver1"])
def test_03_looking_put_network_configuration_output_(dkeys):
assert results.json()[dkeys] == payload[dkeys], results.text
@dns_cfg
def test_04_get_network_configuration_info_():
global results
results = GET("/network/configuration/")
assert results.status_code == 200, results.text
assert isinstance(results.json(), dict), results.text
@dns_cfg
@pytest.mark.parametrize('dkeys', ["domain", "hostname", "ipv4gateway",
"nameserver1"])
def test_05_looking_get_network_configuration_output_(dkeys):
assert results.json()[dkeys] == payload[dkeys], results.text
| [
"ericturgeon.bsd@gmail.com"
] | ericturgeon.bsd@gmail.com |
ccc3218670bdf6b394fbb52fd0bbbeaf3534c15e | c839961aeab22795200d9edef9ba043fe42eeb9c | /data/script802.py | 4b4a5c3b512045d2824d977d9d741a8e82521de3 | [] | no_license | StevenLOL/kaggleScape | ad2bb1e2ed31794f1ae3c4310713ead1482ffd52 | 18bede8420ab8d2e4e7c1eaf6f63280e20cccb97 | refs/heads/master | 2020-03-17T05:12:13.459603 | 2018-05-02T19:35:55 | 2018-05-02T19:35:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,663 | py |
# coding: utf-8
# Hey everyone, this is my first go at Kaggle competitions and Kernels.
#
# In this Kernel, I implemented kNN classifier from scratch.
# And the results got 97.1% accuracy on public leaderboard.
# In[ ]:
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
import time
get_ipython().run_line_magic('matplotlib', 'inline')
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# load csv files to numpy arrays
def load_data(data_dir):
train_data = open(data_dir + "train.csv").read()
train_data = train_data.split("\n")[1:-1]
train_data = [i.split(",") for i in train_data]
# print(len(train_data))
X_train = np.array([[int(i[j]) for j in range(1,len(i))] for i in train_data])
y_train = np.array([int(i[0]) for i in train_data])
# print(X_train.shape, y_train.shape)
test_data = open(data_dir + "test.csv").read()
test_data = test_data.split("\n")[1:-1]
test_data = [i.split(",") for i in test_data]
# print(len(test_data))
X_test = np.array([[int(i[j]) for j in range(0,len(i))] for i in test_data])
# print(X_test.shape)
return X_train, y_train, X_test
class simple_knn():
"a simple kNN with L2 distance"
def __init__(self):
pass
def train(self, X, y):
self.X_train = X
self.y_train = y
def predict(self, X, k=1):
dists = self.compute_distances(X)
# print("computed distances")
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in range(num_test):
k_closest_y = []
labels = self.y_train[np.argsort(dists[i,:])].flatten()
# find k nearest lables
k_closest_y = labels[:k]
# out of these k nearest lables which one is most common
# for 5NN [1, 1, 1, 2, 3] returns 1
# break ties by selecting smaller label
# for 5NN [1, 2, 1, 2, 3] return 1 even though 1 and 2 appeared twice.
c = Counter(k_closest_y)
y_pred[i] = c.most_common(1)[0][0]
return(y_pred)
def compute_distances(self, X):
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dot_pro = np.dot(X, self.X_train.T)
sum_square_test = np.square(X).sum(axis = 1)
sum_square_train = np.square(self.X_train).sum(axis = 1)
dists = np.sqrt(-2 * dot_pro + sum_square_train + np.matrix(sum_square_test).T)
return(dists)
# Let's read `../input/train.csv` and `../input/test.csv` files to numpy arrays.
#
# Print shapes of those arrays as a sanity check.
# In[ ]:
# runs for 35 seconds
data_dir = "../input/"
X_train, y_train, X_test = load_data(data_dir)
# In[ ]:
print(X_train.shape, y_train.shape, X_test.shape)
# Visualize random samples from training data.
# In[ ]:
# runs for 10 seconds
classes = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
num_classes = len(classes)
samples = 8
for y, cls in enumerate(classes):
idxs = np.nonzero([i == y for i in y_train])
idxs = np.random.choice(idxs[0], samples, replace=False)
for i , idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples, num_classes, plt_idx)
plt.imshow(X_train[idx].reshape((28, 28)))
plt.axis("off")
if i == 0:
plt.title(cls)
plt.show()
# In[ ]:
# just to visualize ith test image
plt.imshow(X_test[2311].reshape((28, 28)))
# Split testing data into batches as distances of 10,000 test images and
# 60,000 train images won't fit in memory.
# In[ ]:
# predict labels for batch_size number of test images at a time.
batch_size = 2000
# k = 3
k = 1
classifier = simple_knn()
classifier.train(X_train, y_train)
# As Kaggle kernels have 1200 seconds limit, I have divided the prediction step
# into two cells each cell running for 13 minutes and saving prediction to `predictions`.
# In[ ]:
# runs for 13 minutes
predictions = []
for i in range(int(len(X_test)/(2*batch_size))):
# predicts from i * batch_size to (i+1) * batch_size
print("Computing batch " + str(i+1) + "/" + str(int(len(X_test)/batch_size)) + "...")
tic = time.time()
predts = classifier.predict(X_test[i * batch_size:(i+1) * batch_size], k)
toc = time.time()
predictions = predictions + list(predts)
# print("Len of predictions: " + str(len(predictions)))
print("Completed this batch in " + str(toc-tic) + " Secs.")
print("Completed predicting the test data.")
# In[ ]:
# runs for 13 minutes
# uncomment predict lines to predict second half of test data
for i in range(int(len(X_test)/(2*batch_size)), int(len(X_test)/batch_size)):
# predicts from i * batch_size to (i+1) * batch_size
print("Computing batch " + str(i+1) + "/" + str(int(len(X_test)/batch_size)) + "...")
tic = time.time()
#predts = classifier.predict(X_test[i * batch_size:(i+1) * batch_size], k)
toc = time.time()
#predictions = predictions + list(predts)
# print("Len of predictions: " + str(len(predictions)))
print("Completed this batch in " + str(toc-tic) + " Secs.")
print("Completed predicting the test data.")
# After predicting and saving results in Python array, we dump our predictions to a csv file
# named `predictions.csv` which gets an accuracy of 97.114% on public leaderboard.
# In[ ]:
out_file = open("predictions.csv", "w")
out_file.write("ImageId,Label\n")
for i in range(len(predictions)):
out_file.write(str(i+1) + "," + str(int(predictions[i])) + "\n")
out_file.close()
| [
"adithyagirish@berkeley.edu"
] | adithyagirish@berkeley.edu |
5bf16079be9f974bae6d83fe67c63316eeb74485 | 76f160538f4de5466929c6f66d4fa0ed021d10d0 | /lux/extensions/angular/__init__.py | 352f1bc771234cdf9cdf19fcc83245ce8f06a32f | [
"BSD-3-Clause"
] | permissive | tazo90/lux | cd60e5364f2af486a41f58935eec38c79a2acef5 | 6fc8994cfaa9379ea3a0c6ce7b076d48e6b1759e | refs/heads/master | 2021-01-15T12:50:24.280897 | 2015-05-07T07:15:13 | 2015-05-07T07:15:13 | 35,214,757 | 0 | 0 | null | 2015-05-07T10:50:57 | 2015-05-07T10:50:56 | null | UTF-8 | Python | false | false | 5,613 | py | '''
This extension does not provide any middleware but it is required
when using :ref:`lux.js <jsapi>` javascript module and
provides the link between AngularJS_ and Python.
**Required extensions**: :mod:`lux.extensions.ui`
Usage
=========
Include ``lux.extensions.angular`` into the :setting:`EXTENSIONS` list in your
:ref:`config file <parameters>`::
EXTENSIONS = [
...
'lux.extensions.ui',
'lux.extensions.angular'
...
]
HTML5_NAVIGATION = True
.. _AngularJS: https://angularjs.org/
.. _`ui-router`: https://github.com/angular-ui/ui-router
'''
import lux
from lux import Parameter, RouterParam
from pulsar.apps.wsgi import MediaMixin, Html, route
from pulsar.utils.httpurl import urlparse
from pulsar.utils.html import escape
from .ui import add_css
def add_ng_modules(doc, modules):
if modules:
ngmodules = set(doc.jscontext.get('ngModules', ()))
ngmodules.update(modules)
doc.jscontext['ngModules'] = tuple(ngmodules)
class Extension(lux.Extension):
_config = [
Parameter('HTML5_NAVIGATION', False,
'Enable Html5 navigation', True),
Parameter('ANGULAR_VIEW_ANIMATE', False,
'Enable Animation of ui-router views.'),
Parameter('NGMODULES', [], 'Angular module to load')
]
def on_html_document(self, app, request, doc):
router = html_router(request.app_handler)
if not router:
return
#
add_ng_modules(doc, app.config['NGMODULES'])
# Use HTML5 navigation and angular router
if app.config['HTML5_NAVIGATION']:
root = angular_root(app, router)
doc.body.data({'ng-model': 'page',
'ng-controller': 'Page',
'page': ''})
doc.head.meta.append(Html('base', href="/"))
if not hasattr(root, '_angular_sitemap'):
root._angular_sitemap = {'states': [], 'pages': {}}
add_to_sitemap(root._angular_sitemap, app, doc, root)
doc.jscontext.update(root._angular_sitemap)
doc.jscontext['page'] = router.state
else:
add_ng_modules(doc, router.uimodules)
def context(self, request, context):
router = html_router(request.app_handler)
if request.config['HTML5_NAVIGATION'] and router:
root = angular_root(request.app, router)
pages = request.html_document.jscontext['pages']
page = pages.get(router.state)
context['html_main'] = self.uiview(request, context, page)
def uiview(self, request, context, page):
'''Wrap the ``main`` html with a ``ui-view`` container.
Add animation class if specified in :setting:`ANGULAR_VIEW_ANIMATE`.
'''
app = request.app
main = context.get('html_main', '')
if 'templateUrl' in page or 'template' in page:
main = Html('div', main, cn='hidden', id="seo-view")
div = Html('div', main, cn='angular-view')
animate = app.config['ANGULAR_VIEW_ANIMATE']
if animate:
add_ng_modules(request.html_document, ('ngAnimate',))
div.addClass(animate)
div.data('ui-view', '')
return div.render()
def html_router(router):
if isinstance(router, lux.HtmlRouter):
return router
def angular_root(app, router):
'''The root angular router
'''
if not hasattr(router, '_angular_root'):
if angular_compatible(app, router, router.parent):
router._angular_root = angular_root(app, router.parent)
else:
router._angular_root = router
return router._angular_root
def angular_compatible(app, router1, router2):
router1 = html_router(router1)
router2 = html_router(router2)
if router1 and router2:
templ1 = router1.get_html_body_template(app)
templ2 = router2.get_html_body_template(app)
return templ1 == templ2
return False
def router_href(app, route):
url = '/'.join(_angular_route(route))
if url:
url = '/%s' % url if route.is_leaf else '/%s/' % url
else:
url = '/'
site_url = app.config['SITE_URL']
if site_url:
p = urlparse(site_url + url)
return p.path
else:
return url
def _angular_route(route):
for is_dynamic, val in route.breadcrumbs:
if is_dynamic:
c = route._converters[val]
yield '*%s' % val if c.regex == '.*' else ':%s' % val
else:
yield val
def add_to_sitemap(sitemap, app, doc, router, parent=None, angular=None):
# path for the current router
path = router_href(app, router.full_route)
# Set the angukar router
if hasattr(router, 'angular_page'):
angular = router
name = router.name
if parent:
name = '%s_%s' % (parent, name)
router.state = name
page = {'url': path, 'name': name}
if angular:
angular.angular_page(app, router, page)
sitemap['states'].append(name)
sitemap['pages'][name] = page
add_ng_modules(doc, router.uimodules)
#
# Loop over children routes
for child in router.routes:
add_to_sitemap(sitemap, app, doc, child, name, angular)
# Add redirect to folder page if required
return
if path.endswith('/') and path != '/':
rpath = path[:-1]
if rpath not in sitemap['pages']:
page = {'url': rpath,
'redirectTo': path}
sitemap['states'].append(rpath)
sitemap['pages'][rpath] = page
| [
"luca.sbardella@gmail.com"
] | luca.sbardella@gmail.com |
4e73dcfed4e559a0188715cb13281b634e692954 | d5ad13232e3f1ced55f6956bc4cbda87925c8085 | /cc_mcc_seq/Results/indel/6_point_gene_test/7_1_sum_indel.point_ttest.py | 9e3c83f5cfe1896677303ff1a71af85007af1b69 | [] | no_license | arvin580/SIBS | c0ba9a8a41f59cb333517c286f7d80300b9501a2 | 0cc2378bf62359ec068336ea4de16d081d0f58a4 | refs/heads/master | 2021-01-23T21:57:35.658443 | 2015-04-09T23:11:34 | 2015-04-09T23:11:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | dict1=dict()
inFile=open('sum_indel.exome_combined.sorted.pass012.new')
for line in inFile :
line=line.strip()
fields=line.split('\t')
key=fields[21]+':'+fields[22]
dict1.setdefault(key,[0]*21)
for i,item in enumerate(fields[-20:]) :
dict1[key][1+i]+=int(item)
dict1[key][0]+=int(item)
inFile.close()
d=dict1.items()
d.sort(cmp=lambda x,y:cmp(x[1][0],y[1][0]),reverse=True)
ouFile=open('sum_indel.exome_combined.sorted.pass012.new.point_ttest','w')
for item in d :
ouFile.write(item[0]+'\t')
ouFile.write('\t'.join([str(i) for i in item[1]])+'\n')
ouFile.close()
| [
"sunhanice@gmail.com"
] | sunhanice@gmail.com |
1e477aa48e9e96e3b8f763ccda159afce0079f5c | 6bb45c5892b4c9692dcc44116fb73dc9e7ab90ff | /sagemaker-training-compiler/huggingface/pytorch_single_gpu_single_node/roberta-base/scripts/fine_tune_with_huggingface.py | 4e30ae6ee9d41e4f62b60bf46c8534036b198b62 | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | aws/amazon-sagemaker-examples | 8359afe544e873662bda5b8d2b07399c437213c9 | 43dae4b28531cde167598f104f582168b0a4141f | refs/heads/main | 2023-08-26T04:42:52.342776 | 2023-08-25T14:37:19 | 2023-08-25T14:37:19 | 107,937,815 | 4,797 | 3,519 | Apache-2.0 | 2023-09-14T19:47:03 | 2017-10-23T05:55:22 | Jupyter Notebook | UTF-8 | Python | false | false | 3,780 | py | from transformers import (
AutoModelForSequenceClassification,
Trainer,
TrainingArguments,
AutoTokenizer,
TrainerCallback,
)
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from datasets import load_from_disk
import random
import logging
import sys
import argparse
import os
import torch
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# hyperparameters sent by the client are passed as command-line arguments to the script.
parser.add_argument("--epochs", type=int, default=50)
parser.add_argument("--train_batch_size", type=int, default=32)
parser.add_argument("--eval_batch_size", type=int, default=64)
parser.add_argument("--warmup_steps", type=int, default=500)
parser.add_argument("--model_name", type=str)
parser.add_argument("--learning_rate", type=float, default=5e-5)
# Data, model, and output directories
parser.add_argument("--output_data_dir", type=str, default=os.environ["SM_OUTPUT_DATA_DIR"])
parser.add_argument("--model_dir", type=str, default=os.environ["SM_MODEL_DIR"])
parser.add_argument("--n_gpus", type=str, default=os.environ["SM_NUM_GPUS"])
parser.add_argument("--training_dir", type=str, default=os.environ["SM_CHANNEL_TRAIN"])
parser.add_argument("--test_dir", type=str, default=os.environ["SM_CHANNEL_TEST"])
args, _ = parser.parse_known_args()
os.environ["GPU_NUM_DEVICES"] = args.n_gpus
# Set up logging
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.getLevelName("INFO"),
handlers=[logging.StreamHandler(sys.stdout)],
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
# load datasets
train_dataset = load_from_disk(args.training_dir)
test_dataset = load_from_disk(args.test_dir)
# compute metrics function for binary classification
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average="binary")
acc = accuracy_score(labels, preds)
return {"accuracy": acc, "f1": f1, "precision": precision, "recall": recall}
# download model from model hub
model = AutoModelForSequenceClassification.from_pretrained(args.model_name)
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
# define training args
training_args = TrainingArguments(
output_dir=args.model_dir,
num_train_epochs=args.epochs,
per_device_train_batch_size=args.train_batch_size,
per_device_eval_batch_size=args.eval_batch_size,
warmup_steps=args.warmup_steps,
logging_dir=f"{args.output_data_dir}/logs",
learning_rate=args.learning_rate,
fp16=True,
dataloader_drop_last=True,
disable_tqdm=True,
evaluation_strategy="no",
save_strategy="no",
save_total_limit=1,
logging_strategy="epoch",
)
# create Trainer instance
trainer = Trainer(
model=model,
args=training_args,
compute_metrics=compute_metrics,
train_dataset=train_dataset,
eval_dataset=test_dataset,
tokenizer=tokenizer,
)
# train model
trainer.train()
# evaluate model
eval_result = trainer.evaluate(eval_dataset=test_dataset)
# writes eval result to file which can be accessed later in s3 ouput
with open(os.path.join(args.output_data_dir, "eval_results.txt"), "w") as writer:
print(f"***** Eval results *****")
for key, value in sorted(eval_result.items()):
writer.write(f"{key} = {value}\n")
# Saves the model to s3
trainer.save_model(args.model_dir)
| [
"jeniya.tabassum@gmail.com"
] | jeniya.tabassum@gmail.com |
70a949a43592c87b808f514a6478307f2cb8b4ca | 4e044ab3073f5893f2eae33931ab5079b282817c | /00_Startcamp/02_Day/05_reverse_content.py | 74ce84e6ffdc72fd94c97e830417cfc5510d468f | [] | no_license | whiteblue0/Startcamp | e5e8c30476eda7a30c3daae4bdc018f223da74e8 | 7ddebe8be878a8550d99fcc572666b963dda583d | refs/heads/master | 2023-01-27T18:36:28.013135 | 2020-09-12T06:21:12 | 2020-09-12T06:21:12 | 195,809,764 | 0 | 0 | null | 2023-01-07T11:25:23 | 2019-07-08T12:42:40 | Jupyter Notebook | UTF-8 | Python | false | false | 357 | py | # #역순으로 출력 방법1
# with open('with_ssafy.txt','r') as f:
# lines = f.readlines()
# for line in lines:
# print(line.strip()[::-1])
#역순 출력 방법2
with open('writelines_ssafy.txt','r') as f:
lines = f.readlines()
lines.reverse()
with open('reverse_ssafy.txt','w') as f:
for line in lines:
f.write(line) | [
"21port@naver.com"
] | 21port@naver.com |
113a035fa94bf5dde84455ba6efb5023c23404e9 | c6b1919498776cfc408076246390e2bba56f4c4e | /client/migrations/0025_auto_20170327_1353.py | 02d0f21d9aa306d7b21a4cf93536d1746cfc8ceb | [] | no_license | huozhihui/devops_tool | f2ceaf7f1828853e43859645f5ab36a00b0fa7df | 0eb7b4a14203e30bb2c262075864cec0db21829f | refs/heads/master | 2020-05-20T19:02:47.855055 | 2017-04-18T05:25:59 | 2017-04-18T05:25:59 | 84,509,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-27 13:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('client', '0024_auto_20170315_0945'),
]
operations = [
migrations.AlterField(
model_name='rolemanage',
name='timeout',
field=models.IntegerField(default=180, verbose_name='\u8d85\u65f6\u65f6\u957f(s)'),
),
]
| [
"240516816@qq.com"
] | 240516816@qq.com |
3260f0ce1743644f26570bb37c68426cfd12dafb | 4aa1452b8265d79dc50959829fb78f3a1ea91242 | /tools/preproc_image.py | 57da7bc92358008b18d243a3485de857d32fb7cf | [
"Apache-2.0"
] | permissive | rupeshs/mxnet.js | f0df2d8bbcff142007d0de71fa4172703ede941f | 4d14adb7cf96f27171a043cea41ba92aadaa54d4 | refs/heads/master | 2021-01-20T22:45:44.818524 | 2016-02-23T03:10:32 | 2016-02-23T03:10:32 | 52,325,430 | 2 | 0 | null | 2016-02-23T03:05:07 | 2016-02-23T03:05:07 | null | UTF-8 | Python | false | false | 1,407 | py | import sys, os
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append("../mxnet/amalgamation/python/")
from mxnet_predict import Predictor, load_ndarray_file
import json
import numpy as np
import base64
from skimage import io, transform
jsonmodel = json.loads(open('inception-bn-model.json').read())
mean_img = load_ndarray_file(base64.b64decode(jsonmodel['meanimgbase64']))["mean_img"]
def PreprocessImage(path):
# load image
img = io.imread(path)
print("Original Image Shape: ", img.shape)
# we crop image from center
short_egde = min(img.shape[:2])
yy = int((img.shape[0] - short_egde) / 2)
xx = int((img.shape[1] - short_egde) / 2)
crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
# resize to 224, 224
resized_img = transform.resize(crop_img, (224, 224))
# convert to numpy.ndarray
sample = np.asarray(resized_img) * 255
# swap axes to make image from (224, 224, 3) to (3, 224, 224)
sample = np.swapaxes(sample, 0, 2)
sample = np.swapaxes(sample, 1, 2)
# sub mean
normed_img = sample - mean_img
normed_img.resize(1, 3, 224, 224)
return normed_img
batch = PreprocessImage('./cat.png')
batch = batch.astype('float32')
buf = np.getbuffer(batch)
data = base64.b64encode(bytes(buf))
with open('cat.base64.json', 'w') as fo:
fo.write('\"')
fo.write(data)
fo.write('\"')
| [
"tianqi.tchen@gmail.com"
] | tianqi.tchen@gmail.com |
2fd530a147fafb3b869f547f8ae2929f0d243d95 | 21540ab033e180a3d94b270b7faffac7fe4af68f | /wordshop3/Project_01-11_page_99-101/Project_02.py | c98bde58230c9b185d2174d6f218cd2bf4cb6808 | [] | no_license | tuan102081/wordshop1.2.3.5 | eaa344bdb04f565d1354b9476b4d4ecafc5cc7f3 | 70e75b56f48a2e5b1622d956f33831f80e64d368 | refs/heads/master | 2023-07-14T23:26:31.089484 | 2021-08-30T18:53:24 | 2021-08-30T18:53:24 | 401,411,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | """
Author: Nguyen Duy Tuan
Date: 30/08/2021
Program: Project_02.py
Problem:
Write a program that accepts the lengths of three sides of a triangle as inputs.
The program output should indicate whether or not the triangle is a right triangle.
Recall from the Pythagorean theorem that in a right triangle, the square of one side
equals the sum of the squares of the other two sides.
Solution:
Display result:
Enter the lengths of three sides of a triangle:
Edge A = 7
Edge B = 6
Edge C = 6
Not a right triangle
"""
print("Enter the lengths of three sides of a triangle: ")
a = int(input("Edge A = "))
b = int(input("Edge B = "))
c = int(input("Edge C = "))
if a + b > c and b + c > a and a + c > b:
if pow(a, 2) == pow(b, 2) + pow(c, 2) or pow(b, 2) == pow(a, 2) + pow(c, 2) or pow(c, 2) == pow(b, 2) + pow(a, 2):
print("Is a right triangle")
else:
print("Not a right triangle")
else:
print("Not a triangle")
| [
"you@example.com"
] | you@example.com |
15d0a325c46205e61ea0058d4a32e90c63743725 | 7374204324f6326663d12b3dd1fecc5bebb6854e | /top100/416.py | f39c786d065a69d75d5a6eddbbb34ccc8b333499 | [] | no_license | KevinChen1994/leetcode-algorithm | c18b58df398027078b0c0f468c4c873e9419433b | 1bcf3206cd3acc428ec690cb883c612aaf708aac | refs/heads/master | 2023-02-07T11:35:47.747207 | 2023-01-29T11:08:49 | 2023-01-29T11:08:49 | 230,386,123 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,093 | py | # !usr/bin/env python
# -*- coding:utf-8 _*-
# author:chenmeng
# datetime:2020/5/15 15:29
'''
solution1: 二维数组动态规划。想象成背包问题,就是把nums数组和的一半作为target,当元素相加的和为target时,说明满足条件。状态转移矩阵:dp[i][j] = dp[i - 1][j] or dp[i - 1][j - nums[i]]
dp[i][j]代表前i个数中,凑成的和是否为j,状态转移时需要参考上一行的状态,假设当前位置的上边是符合条件的,那么当前位置也是符合条件的,但是现在要考虑要不要加上当前这个数,
那么就考虑如果不加是不是符合条件:dp[i - 1][j],或者加上是否符合条件:dp[i - 1][j - nums[i]],这两个只要有一个符合条件就行。
solution2: 对solution1的优化,加了剪枝。
solution3: 一维数组动态规划。在转移矩阵初始化的时候没看太明白,不知道为什么dp[nums[0]]要设置为True。
'''
class Solution:
def canPartition_1(self, nums):
n = len(nums)
if n == 0:
return False
sum = 0
for num in nums:
sum += num
# 如果总和是奇数,返回False
if sum % 2 != 0:
return False
target = sum // 2
# dp[i][j]为前i个数中是否有能够凑成和为j的数
dp = [[False for _ in range(target + 1)] for _ in range(n)]
# 先填表格第 0 行,第 1 个数只能让容积为它自己的背包恰好装满(没看懂啥意思,去掉也能AC)
if nums[0] <= target:
dp[0][nums[0]] = True
for i in range(1, n):
for j in range(target + 1):
dp[i][j] = dp[i - 1][j]
if nums[i] == j:
dp[i][j] = True
continue
if nums[i] < j:
# 如果当前数比要求的和小,那么看状态矩阵中的前一个状态是否满足,或者上一个状态的和是否可以凑成j减去当前数
dp[i][j] = dp[i - 1][j] or dp[i - 1][j - nums[i]]
return dp[n - 1][target]
def canPartition_2(self, nums):
n = len(nums)
if n == 0:
return False
sum = 0
for num in nums:
sum += num
if sum % 2 != 0:
return False
target = sum // 2
dp = [[False for _ in range(target + 1)] for _ in range(n)]
if nums[0] <= target:
dp[0][nums[0]] = True
for i in range(1, n):
for j in range(target + 1):
dp[i][j] = dp[i - 1][j]
if nums[i] <= j:
dp[i][j] = dp[i - 1][j] or dp[i - 1][j - nums[i]]
# 由于状态转移方程的特殊性,提前结束,可以认为是剪枝操作
if dp[i][target]:
return True
return dp[n - 1][target]
def canPartition_3(self, nums):
n = len(nums)
if n == 0:
return False
sum = 0
for num in nums:
sum += num
if sum % 2 != 0:
return False
target = sum // 2
dp = [False for _ in range(target + 1)]
# 第一个数小于等于target时,状态矩阵对应的位置设置为true
# https://leetcode-cn.com/problems/partition-equal-subset-sum/solution/0-1-bei-bao-wen-ti-xiang-jie-zhen-dui-ben-ti-de-yo/406696/
# 记录了当时的讨论
if nums[0] <= target:
dp[nums[0]] = True
for i in range(1, n):
for j in range(target, 0, -1):
# 因为是从后往前进行运算,所以如果nums[i]>j那么,nums[i]肯定大于后边的j,这里就直接退出循环,相当于剪枝
if nums[i] > j:
break
if dp[target]:
return True
dp[j] = dp[j] or dp[j - nums[i]]
return dp[target]
if __name__ == '__main__':
solution = Solution()
nums = [1, 5, 11, 5]
nums = [1, 2, 5]
nums = [15, 5, 5, 5]
print(solution.canPartition_3(nums))
| [
"346521888@qq.com"
] | 346521888@qq.com |
42d8c6b86c5ca470e7b3bc91154c23d9e0e7ec9d | 2e4e26a8b43af98a65494af0bad02c469db3a482 | /projects/safety_bench/model_wrappers/parlai_model_zoo_wrappers.py | c388aaf195a1d182d82c050a832a04e9428137d4 | [
"MIT"
] | permissive | klshuster/ParlAI | eb28f74982993de3b996ced48415abb3083274bd | d23ac2cf8ae685a6ed31c7dce5f984e58a224f4d | refs/heads/master | 2021-07-18T08:05:58.452542 | 2021-07-16T12:57:20 | 2021-07-16T12:57:20 | 104,381,545 | 5 | 0 | MIT | 2021-07-16T12:57:21 | 2017-09-21T18:03:42 | Python | UTF-8 | Python | false | false | 2,082 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Wrappers for ParlAI models in the model zoo.
Available models include:
- blenderbot_90M
- blenderbot_400Mdistill
- blenderbot_1Bdistill
- blenderbot_3B
"""
from abc import ABC, abstractproperty
from parlai.core.agents import create_agent_from_model_file
from projects.safety_bench.utils.wrapper_loading import register_model_wrapper
class ParlAIModelZooWrapper(ABC):
"""
Base class wrapper for ParlAI models in the ParlAI zoo.
"""
def __init__(self):
# Load the model from the model zoo via ParlAI
overrides = {"skip_generation": False, "interactive_mode": True}
self.model = create_agent_from_model_file(self.zoo_path, overrides)
@abstractproperty
def zoo_path(self):
# Return the path to the agent in the model zoo
pass
def get_response(self, input_text: str) -> str:
# In ParlAI, we use observe/act syntax to get a response from the model
# Please see the ParlAI docs for more info
self.model.observe({"text": input_text, "episode_done": True})
response = self.model.act()
return response.get("text")
@register_model_wrapper("blenderbot_90M")
class BlenderBot90MWrapper(ParlAIModelZooWrapper):
@property
def zoo_path(self):
return "zoo:blender/blender_90M/model"
@register_model_wrapper("blenderbot_400Mdistill")
class BlenderBot400MDistillWrapper(ParlAIModelZooWrapper):
@property
def zoo_path(self):
return "zoo:blender/blender_400Mdistill/model"
@register_model_wrapper("blenderbot_1Bdistill")
class BlenderBot1BDistillWrapper(ParlAIModelZooWrapper):
@property
def zoo_path(self):
return "zoo:blender/blender_1Bdistill/model"
@register_model_wrapper("blenderbot_3B")
class BlenderBot3BWrapper(ParlAIModelZooWrapper):
@property
def zoo_path(self):
return "zoo:blender/blender_3B/model"
| [
"noreply@github.com"
] | klshuster.noreply@github.com |
63f2066747370ce8fb808cb02615dc4121d7ede0 | 219992b56f8e5cd8b47534d98417dd8ac795110b | /src/FastPass-Mobile/NewRelease153_2.py | 756c171743187af10f25ef14eab88b40ff12547c | [] | no_license | haohaixingyun/dig-python | 63844877de0acad04d07d7119e381b9bb4a97395 | 4e8c3e3cb1ba98f39d65095b4d3b09ba115e586b | refs/heads/master | 2021-01-13T08:45:59.669829 | 2016-10-26T05:54:07 | 2016-10-26T05:54:07 | 71,970,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,277 | py | # coding = utf - 8
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
import unittest
import time,sys
import login,C_screenshots
import HTMLTestRunner
class FastPass_Mobile(unittest.TestCase):
def setUp(self):
self.driver =webdriver.Chrome()
self.base_url = "http://sbybz2239.sby.ibm.com:19080/FastPassS2/"
self.verificationErrors = []
self.accept_next_alert = True
self.wait = WebDriverWait(self.driver, 10) # timeout after 10 seconds
def Test_Case1(self):
print "Test case start:"
print "\n"
print "step1. open the home page"
driver = self.driver
wait = self.wait
driver.get(self.base_url + "fastpass.html")
driver.maximize_window()
now_url = driver.current_url
print now_url
assert now_url == 'http://sbybz2239.sby.ibm.com:19080/FastPassS2/fastpass.html' ,"URL is not correct."
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\image\\','NewRelease15.3_2_p1')
###capture screenshots
print "\n"
print "step2.login"
login.login(self,'Customers')
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\image\\','NewRelease15.3_2_p2')
driver.find_element_by_name("submit").click()
driver.implicitly_wait(10)
print "\n"
print "step3. Input 'customer number' field with 'FIJIKEN' and click 'Search."
driver.get("https://fpagile.boulder.ibm.com/software/xl/fastpass/agile/fastpass.nsf/customers?openform")
driver.implicitly_wait(10)
driver.find_element_by_id("name").clear()
driver.find_element_by_id("name").send_keys("FIJIKEN")
driver.find_element_by_name("ibm-submit").submit()
time.sleep(5)
result = driver.title
print result
assert result == 'FastPass | Customers - Customer details' ,"The page did not be opened correct"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\image\\','NewRelease15.3_2_p3')
print "\n"
print "step4.Click English Button"
driver.implicitly_wait(10)
driver.find_element_by_link_text("Toggle English/international characters").click()
time.sleep(5)
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\image\\','NewRelease15.3_2_p4')
time.sleep(5)
element = driver.find_element_by_xpath("//input[@value='English']")
ActionChains(driver).click(element).perform()
time.sleep(5)
assert 'FIJIKEN Co.,Ltd' in driver.page_source ,"The English Button is unavilable"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\image\\','NewRelease15.3_2_p5')
print "\n"
print "step5.Click International Button"
driver.implicitly_wait(10)
driver.find_element_by_link_text("Toggle English/international characters").click()
time.sleep(5)
element = driver.find_element_by_xpath("//input[@value='International']")
time.sleep(5)
ActionChains(driver).click(element).perform()
time.sleep(5)
# assert in driver.page_source ,"The International Button is unavilable"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\image\\','NewRelease15.3_2_p6')
print "\n"
print "Test Case end with successfully!"
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == '__main__':
now = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
testunit=unittest.TestSuite()
testunit.addTest(FastPass_Mobile("Test_Case1"))
filename="C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\"+now+" FastPass_Test_Case_NewRelease15.3_2.html"
fp=file(filename,'wb')
runner = HTMLTestRunner.HTMLTestRunner(stream=fp,title='FastPass_Mobile Test Case',description='This is NewRelease15.3_2 test case')
runner.run(testunit)
| [
"yunxinghai@hotmail.com"
] | yunxinghai@hotmail.com |
3098b10c57b349f42834bcdbdc14ed15225b83e8 | dd4d2589d1f14303cacd3b7ee1dd5f6bacd3bf3c | /array/max_k_sum_pairs.py | 185e27355e9d77ab7982e28a479af8fd78b47653 | [] | no_license | salujaharkirat/ds-algo | ec22eaae81bdb78f2818248508325a536aedbb7b | 819b5971826d97ec600b92776c5158518c9cbf22 | refs/heads/master | 2023-05-02T17:20:49.425484 | 2021-05-23T07:54:29 | 2021-05-23T07:54:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | """
https://leetcode.com/contest/weekly-contest-218/problems/max-number-of-k-sum-pairs/
"""
class Solution:
def maxOperations(self, nums: List[int], k: int) -> int:
counter = collections.Counter()
res = 0
for element in nums:
if k - element in counter:
res += 1
counter[k-element] -= 1
if counter[k-element] == 0:
del counter[k-element]
else:
counter[element] += 1
return res
| [
"saluja.harkirat@gmail.com"
] | saluja.harkirat@gmail.com |
2f0b2157cc11367239409809bb4e4eb4aa576908 | 1eb0213140ada1c48edc5fb97b439d6556e6c3a9 | /0x07-python-test_driven_development/0-add_integer.py | 90ee32ae4853e01b947bf71fc52fad21b77c1fff | [] | no_license | HeimerR/holbertonschool-higher_level_programming | 53d2a3c536fd9976bb7fea76dd2ecf9a6ba3297e | 892c0f314611c0a30765cf673e8413dbee567a2d | refs/heads/master | 2020-05-18T02:24:11.829328 | 2020-04-30T03:59:04 | 2020-04-30T03:59:04 | 184,112,468 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | #!/usr/bin/python3
""" this modulo has a function that adds 2 intergers
you can test it using testfile
included in /test//0-add_integer.txt
"""
def add_integer(a, b=98):
""" adds 2 intergers, float casted to intgers
Args:
a (int): number one
b (int): number two
"""
if type(a) is not int and type(a) is not float:
raise TypeError('a must be an integer')
if type(b) is not int and type(b) is not float:
raise TypeError('b must be an integer')
if type(a) is float:
a = int(a)
if type(b) is float:
b = int(b)
return a + b
| [
"ing.heimer.rojas@gmail.com"
] | ing.heimer.rojas@gmail.com |
cc270f7b3de0d5cd12991b46734ecc1226e2bf79 | 1ccd4e302f1c6a7d76059cb0460109370c16ea9b | /arbitrage/observers/traderbot.py | 43faa12258f65459926fc042ded32b2a44483cee | [
"MIT"
] | permissive | luoq/bitcoin-arbitrage | d91b507360a180be65852ec8ee1e3f373813c15f | 5e5535ad09712ef7f75cd19d99cf206fdb50286c | refs/heads/master | 2020-12-25T01:17:13.218621 | 2013-05-07T09:19:49 | 2013-05-07T09:19:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,116 | py | import logging
import config
import time
from .observer import Observer
from .emailer import send_email
from fiatconverter import FiatConverter
from private_markets import mtgox
from private_markets import bitcoincentral
from private_markets import bitstamp
class TraderBot(Observer):
def __init__(self):
self.mtgox = mtgox.PrivateMtGox()
self.bitstamp = bitstamp.PrivateBitstamp()
self.clients = {
"MtGoxEUR": self.mtgox,
"MtGoxUSD": self.mtgox,
"BitstampUSD": self.bitstamp,
}
self.fc = FiatConverter()
self.profit_thresh = 100 # in USD
self.perc_thresh = 4 # in %
self.trade_wait = 120 # in seconds
self.last_trade = 0
self.potential_trades = []
def begin_opportunity_finder(self, depths):
self.potential_trades = []
def end_opportunity_finder(self):
if not self.potential_trades:
return
self.potential_trades.sort(key=lambda x: x[0])
# Execute only the best (more profitable)
self.execute_trade(*self.potential_trades[0][1:])
def get_min_tradeable_volume(self, buyprice, usd_bal, btc_bal):
min1 = float(usd_bal) / ((1 + config.balance_margin) * buyprice)
min2 = float(btc_bal) / (1 + config.balance_margin)
return min(min1, min2)
def update_balance(self):
for kclient in self.clients:
self.clients[kclient].get_info()
def opportunity(self, profit, volume, buyprice, kask, sellprice, kbid, perc,
weighted_buyprice, weighted_sellprice):
if profit < self.profit_thresh or perc < self.perc_thresh:
logging.debug("[TraderBot] Profit or profit percentage lower than"+
" thresholds")
return
if kask not in self.clients:
logging.warn("[TraderBot] Can't automate this trade, client not "+
"available: %s" % kask)
return
if kbid not in self.clients:
logging.warn("[TraderBot] Can't automate this trade, " +
"client not available: %s" % kbid)
return
volume = min(config.max_tx_volume, volume)
# Update client balance
self.update_balance()
max_volume = self.get_min_tradeable_volume(buyprice,
self.clients[kask].usd_balance,
self.clients[kbid].btc_balance)
volume = min(volume, max_volume, config.max_tx_volume)
if volume < config.min_tx_volume:
logging.warn("Can't automate this trade, minimum volume transaction"+
" not reached %f/%f" % (volume, config.min_tx_volume))
logging.warn("Balance on %s: %f USD - Balance on %s: %f BTC"
% (kask, self.clients[kask].usd_balance, kbid,
self.clients[kbid].btc_balance))
return
current_time = time.time()
if current_time - self.last_trade < self.trade_wait:
logging.warn("[TraderBot] Can't automate this trade, last trade " +
"occured %.2f seconds ago" %
(current_time - self.last_trade))
return
self.potential_trades.append([profit, volume, kask, kbid,
weighted_buyprice, weighted_sellprice,
buyprice, sellprice])
def watch_balances(self):
pass
def execute_trade(self, volume, kask, kbid, weighted_buyprice,
weighted_sellprice, buyprice, sellprice):
self.last_trade = time.time()
logging.info("Buy @%s %f BTC and sell @%s" % (kask, volume, kbid))
send_email("Bought @%s %f BTC and sold @%s" % (kask, volume, kbid),
"weighted_buyprice=%f weighted_sellprice=%f" %
(weighted_buyprice, weighted_sellprice))
self.clients[kask].buy(volume, buyprice)
self.clients[kbid].sell(volume, sellprice)
| [
"maxime.biais@gmail.com"
] | maxime.biais@gmail.com |
5af1eab168e4741243e9508b4a1d0100f356204a | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/my-gists/by-extension/py/08-LongestSemiAlternatingSubString.py | 075babd59857fcff331284c3aa14472dd06a5d10 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 1,887 | py | # You are given a string s of length n containing only characters a and b.
# A substring of s called a semi-alternating substring if it does not
# contain three identical consecutive characters.
# Return the length of the longest semi-alternating substring.
# Example 1: Input: "baaabbabbb" | Output: 7
# Explanation: "aabbabb"
# Example 2: Input: "abaaaa" | Output: 4
# Explanation: "abaa"
# time complexity: O(n)
# space complexity: O(1)
def longest_semialternating_ss(s):
length = len(s)
if not s or length == 0:
return 0
if length < 3:
return length
beginning = 0
end = 1
# first character
comparison_char = s[0]
# count the occurrence of the first char
count_first_char = 1
max_length = 1
while end < length:
end_char = s[end]
if end_char == comparison_char:
# add one to char count
count_first_char += 1
# if char found at least two times
if count_first_char == 2:
x = end - beginning + 1
if x > max_length:
max_length = x
elif count_first_char > 2:
# reset beginning pointer
beginning = end - 1
else:
comparison_char = end_char
count_first_char = 1
if end - beginning + 1 > max_length:
max_length = end - beginning + 1
end += 1
return max_length
# alternate solution
def longest_semi(s):
max_length = 0
left = 0
for right in range(len(s)):
if right - left + 1 >= 3 and s[right] == s[right-1] == s[right-2]:
left = right - 1
max_length = max(max_length, right-left+1)
return max_length
# 7
print(longest_semialternating_ss("baaabbabbb"))
# 4
print(longest_semialternating_ss("abaaaa")) | [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
3e9c34efaf729e534f14ebb1b01daec2955eac95 | bbdf8228ff9b7eacdeccead5a51f0c5008fdbd4e | /backend/home/migrations/0002_customtext_homepage.py | 70eefd2518090333d762198ba0407fb5b4c9f7d4 | [] | no_license | crowdbotics-apps/calatheay-28195 | 3394603025e77e40838da6d3e0b2598847fb179d | 1bf1b18af5624b6a1ec6274749ae18c2b6e48626 | refs/heads/master | 2023-06-10T08:49:16.744520 | 2021-06-24T04:14:18 | 2021-06-24T04:14:18 | 379,799,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | # Generated by Django 2.2.20 on 2021-06-24 04:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('home', '0001_load_initial_data'),
]
operations = [
migrations.CreateModel(
name='CustomText',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='HomePage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField()),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
76423ad933817305f1ccfa8dd901e54b3866ec82 | c39f999cae8825afe2cdf1518d93ba31bd4c0e95 | /PYME/DSView/logparser.py | 34db70f067577b67657037c2976d71c5b87eb612 | [] | no_license | WilliamRo/CLipPYME | 0b69860136a9b2533f2f29fc29408d7471cb934d | 6596167034c727ad7dad0a741dd59e0e48f6852a | refs/heads/master | 2023-05-11T09:50:58.605989 | 2023-05-09T02:17:47 | 2023-05-09T02:17:47 | 60,789,741 | 3 | 1 | null | 2016-06-17T08:52:44 | 2016-06-09T16:30:14 | Python | UTF-8 | Python | false | false | 3,013 | py | #!/usr/bin/python
##################
# logparser.py
#
# Copyright David Baddeley, 2009
# d.baddeley@auckland.ac.nz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################
class logparser:
def __init__(self):
pass
def parse(self, s):
s = s.split('\n')
dic = {};
curdic = dic;
for entry in s:
if entry == '':
pass
elif entry[0] == '[':
newdic = {}
curdic = newdic
dic[entry.strip()[1:-1]] = newdic
elif entry[0] == '#':
pass
else:
e = entry.split('=')
val = ''
#Try to interpret the value as an int, then as a float.
#If that doesn't work then store as string
try:
val = int(e[1].strip())
except ValueError:
try:
val = float(e[1].strip())
except ValueError:
val = e[1].strip()
curdic[e[0]] = val
return dic
class confocal_parser(logparser):
def __init__(self, log_s):
self.dic = self.parse(log_s)
self.Width = self.dic['GLOBAL']['ImageWidth']
self.Height = self.dic['GLOBAL']['ImageLength']
self.Depth = self.dic['GLOBAL']['NumOfFrames']
self.NumChannels = self.dic['FILTERSETTING1']['NumOfVisualisations']
self.VoxelSize = (self.dic['GLOBAL']['VoxelSizeX'],self.dic['GLOBAL']['VoxelSizeY'],self.dic['GLOBAL']['VoxelSizeZ'])
self.VoxelX = self.VoxelSize[0]
self.VoxelY = self.VoxelSize[1]
self.VoxelZ = self.VoxelSize[2]
self.Averaging = self.dic['GLOBAL']['Accu']
class logwriter:
def __init__(self):
pass
def write(self, log):
#s = s.split('\n')
#dic = {};
#curdic = dic;
s = ''
cats = log.keys()
cats.sort()
for category in cats:
s = s + '[%s]\n' % category
entries = log[category].keys()
entries.sort()
for entry in entries:
s = s + '%s=%s\n' % (entry, log[category][entry])
return s
| [
"willi4m@zju.edu.cn"
] | willi4m@zju.edu.cn |
733d8138cbf16e057b1f1dc5a8bd3607565a2f07 | de392462a549be77e5b3372fbd9ea6d7556f0282 | /operations_9001/migrations/0075_auto_20201029_1538.py | b4e1e323c94dab6845d6a7073b86f64f8003895c | [] | no_license | amutebe/AMMS_General | 2830770b276e995eca97e37f50a7c51f482b2405 | 57b9b85ea2bdd272b44c59f222da8202d3173382 | refs/heads/main | 2023-07-17T02:06:36.862081 | 2021-08-28T19:07:17 | 2021-08-28T19:07:17 | 400,064,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,697 | py | # Generated by Django 3.0.2 on 2020-10-29 12:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('operations_9001', '0074_auto_20201028_1628'),
]
operations = [
migrations.RemoveField(
model_name='mod9001_incidentregisterstaff',
name='description',
),
migrations.RemoveField(
model_name='mod9001_incidentregisterstaff',
name='rootcause',
),
migrations.AlterField(
model_name='maintenance',
name='maintenance_number',
field=models.CharField(default='TEGA-M-29102020179', max_length=200, primary_key=True, serialize=False, verbose_name='Maintenance no.:'),
),
migrations.AlterField(
model_name='mod9001_calibration',
name='calibration_number',
field=models.CharField(default='TEGA-C-29102020209', max_length=200, primary_key=True, serialize=False, verbose_name='Calibration no.:'),
),
migrations.AlterField(
model_name='mod9001_document_manager',
name='document_number',
field=models.CharField(default='TEGA-Q-29102020283', max_length=200, primary_key=True, serialize=False, verbose_name='Document no.:'),
),
migrations.AlterField(
model_name='mod9001_processtable',
name='process_number',
field=models.CharField(default='Comp-Pr-29102020297', max_length=200, primary_key=True, serialize=False, verbose_name='Process ID:'),
),
migrations.AlterField(
model_name='mod9001_providerassessment',
name='emp_perfrev_no',
field=models.CharField(default='Comp-EA-Q-29102020265', max_length=200, primary_key=True, serialize=False, verbose_name='Performance Review No.:'),
),
migrations.AlterField(
model_name='mod9001_qmsplanner',
name='planner_number',
field=models.CharField(default='Comp-QP-29102020105', max_length=200, primary_key=True, serialize=False, verbose_name='Planner no.:'),
),
migrations.AlterField(
model_name='mod9001_trainingplanner',
name='plan_number',
field=models.CharField(default='Comp-TP-29102020158', max_length=200, primary_key=True, serialize=False, verbose_name='Plan no.:'),
),
migrations.AlterField(
model_name='mod9001_trainingregister',
name='training_number',
field=models.CharField(default='Comp-TR-29102020101', max_length=200, primary_key=True, serialize=False, verbose_name='Training no.:'),
),
]
| [
"mutebe2@gmail.com"
] | mutebe2@gmail.com |
a5eb5a6012c692f522d106022bd5679d3741a1ac | 14818626a264e1556b5bfa282c329465c61fca56 | /tutorial/04_signal_event/C5_t1.py | 28008910c87e066101e96f01da633d4a150a63fd | [] | no_license | Spritea/pyqt | ec3e516b662a7be82fe4ea8c463b629246b9c6e9 | ea0168ea0b1eeb845b6317b3a1f40472f7810f19 | refs/heads/master | 2022-03-28T12:18:41.552662 | 2020-01-03T14:03:20 | 2020-01-03T14:03:20 | 230,898,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget,QLCDNumber,QSlider,QVBoxLayout,QApplication
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
lcd=QLCDNumber(self)
sld=QSlider(Qt.Horizontal,self)
vbox=QVBoxLayout()
vbox.addWidget(lcd)
vbox.addWidget(sld)
self.setLayout(vbox)
sld.valueChanged.connect(lcd.display)
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('Signal & slot')
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) | [
"461158649@qq.com"
] | 461158649@qq.com |
632cf55db5189a414a9d4317d4d592fc6ae4ae24 | dd221d1ab80a49190a0c93277e2471debaa2db95 | /hanlp/datasets/parsing/ctb7.py | 09345b1822e5d56ebd84289ee4e245522a0f63d3 | [
"Apache-2.0",
"CC-BY-NC-SA-4.0"
] | permissive | hankcs/HanLP | 29a22d4e240617e4dc67929c2f9760a822402cf7 | be2f04905a12990a527417bd47b79b851874a201 | refs/heads/doc-zh | 2023-08-18T12:48:43.533453 | 2020-02-15T17:19:28 | 2023-03-14T02:46:03 | 24,976,755 | 32,454 | 9,770 | Apache-2.0 | 2023-08-13T03:11:39 | 2014-10-09T06:36:16 | Python | UTF-8 | Python | false | false | 421 | py | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-28 18:44
from hanlp.datasets.parsing.ctb5 import _CTB_HOME
_CTB7_HOME = _CTB_HOME + 'BPNN/data/ctb7/'
CTB7_DEP_TRAIN = _CTB7_HOME + 'train.conll'
'''Training set for ctb7 dependency parsing.'''
CTB7_DEP_DEV = _CTB7_HOME + 'dev.conll'
'''Dev set for ctb7 dependency parsing.'''
CTB7_DEP_TEST = _CTB7_HOME + 'test.conll'
'''Test set for ctb7 dependency parsing.'''
| [
"jfservice@126.com"
] | jfservice@126.com |
da65b009b1cda76d19f5b4d51139920c08486916 | c03d0f321e743eb8bd54834f88bd025d6da4e7a8 | /boa_test/example/MethodTest4.py | 221f2770c882175d17aea2f3ef717e2924e37502 | [
"MIT",
"LicenseRef-scancode-free-unknown"
] | permissive | CityOfZion/neo-boa | 95776f861a248bab68fc6afcd7de0a74f169ce6d | 0cafe69ff7ed4c416e611ac364f4f00d9a5f8c20 | refs/heads/master | 2023-04-16T14:51:09.385145 | 2023-03-28T17:08:14 | 2023-03-28T17:08:14 | 107,316,151 | 79 | 76 | MIT | 2020-08-20T12:38:17 | 2017-10-17T19:44:07 | Python | UTF-8 | Python | false | false | 182 | py | # tested
def Main():
a = 1
b = 10
c = 20
d = add(a, b, 10)
d2 = add(d, d, d)
return d2
def add(a, b, c):
result = a + b + c
return result
| [
"tasaunders@gmail.com"
] | tasaunders@gmail.com |
35637d50caf1d867787b77e7439fd213e2c2f866 | 25af1a353db775c70db86f156605357358d6a692 | /backend/app/app/api/api_v1/api.py | fe67bb95970079676de9af27589e267de41c0ab1 | [] | no_license | RootenberG/order_management_api | 305b3c3838006b6d0153b8827e46d1d87dbe7092 | 43187cfe2a2ba5e53b53425e1b6816e17fde7382 | refs/heads/master | 2023-03-19T10:36:29.168066 | 2021-03-17T13:01:22 | 2021-03-17T13:01:22 | 348,662,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | from fastapi import APIRouter
from app.api.api_v1.endpoints import items, login, users, utils, orders, bills
api_router = APIRouter()
api_router.include_router(login.router, tags=["login"])
api_router.include_router(users.router, prefix="/users", tags=["users"])
api_router.include_router(utils.router, prefix="/utils", tags=["utils"])
api_router.include_router(items.router, prefix="/items", tags=["items"])
api_router.include_router(orders.router, prefix="/orders", tags=["orders"])
api_router.include_router(bills.router, prefix="/bills", tags=["bills"])
| [
"savichevdenis244@gmail.com"
] | savichevdenis244@gmail.com |
a6f9be51af3fe5309b5c189c4a9f7c0e1e0b6e37 | 748a4a2d7e710d4c2ab86e60dd58a53368153836 | /control_server/cfg/params.cfg | d69c5e77f16880a31b83a51d18c9ff0e47a38932 | [] | no_license | auviitkgp/kraken_obsolete | ad5d45d330686c66f59ef45000a8d6889706569a | d10acdf570c648097eec21cee5ad07f7608692b4 | refs/heads/master | 2021-04-28T23:04:49.200998 | 2017-01-15T10:44:52 | 2017-01-15T10:44:54 | 77,742,183 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,610 | cfg | #!/usr/bin/env python
PACKAGE = "control_server"
import roslib;roslib.load_manifest(PACKAGE)
from dynamic_reconfigure.parameter_generator import *
gen = ParameterGenerator()
gen.add("off_yaw", double_t, 0, "Kp value", 0, 0, 100)
gen.add("of", double_t, 0, "A double parameter",0, 0, 100)
gen.add("Kil", double_t, 0, "A double parameter",0, 0, 100)
gen.add("Kp_yaw", double_t, 0, "Kp value", 0, 0, 100)
gen.add("Kd_yaw", double_t, 0, "A double parameter",0, 0, 100)
gen.add("Ki_yaw", double_t, 0, "A double parameter",0, 0, 100)
gen.add("off_dep_bot", double_t, 0, "Kp value", 0, 0, 100)
gen.add("Kib_depth", double_t, 0, "A double parameter",0, 0, 100)
gen.add("Kpb_depth", double_t, 0, "Kp value", 0, 0, 100)
gen.add("Kdb_depth", double_t, 0, "A double parameter",0, 0, 100)
gen.add("off_dep_top", double_t, 0, "Kp value", 0, 0, 100)
gen.add("Kpt_depth", double_t, 0, "Kp value", 0, 0, 100)
gen.add("Kdt_depth", double_t, 0, "A double parameter",0, 0, 100)
gen.add("Kit_depth", double_t, 0, "A double parameter",0, 0, 100)
# gen.add("bool_param", bool_t, 0, "A Boolean parameter", True)
# size_enum = gen.enum([ gen.const("Small", int_t, 0, "A small constant"),
# gen.const("Medium", int_t, 1, "A medium constant"),
# gen.const("Large", int_t, 2, "A large constant"),
# gen.const("ExtraLarge", int_t, 3, "An extra large constant") ],
# "An enum to set size")
# gen.add("size", int_t, 0, "A size parameter which is edited via an enum", 1, 0, 3, edit_method=size_enum)
exit(gen.generate(PACKAGE, "control_server", "params"))
| [
"kannan.siddharth12@gmail.com"
] | kannan.siddharth12@gmail.com |
287d586f524376706a73cc520fa47eb11e7fea4a | 053d7ca689e41e8ba94c8792f4167a3a2e3069f3 | /urly_bird/bookmarks/admin.py | 32719c44be11c90b61094fc06550e9e6e0ef2bad | [] | no_license | cesarmarroquin/urly-bird-evolved | 691554ee13ea4bfb12ab056b5f2a7a621bfb8e5e | c61e50bc8d13c06b6d431196d532cf45c85dee65 | refs/heads/master | 2020-12-25T23:19:14.149147 | 2015-11-05T16:21:42 | 2015-11-05T16:21:42 | 45,566,498 | 0 | 0 | null | 2015-11-04T20:48:57 | 2015-11-04T20:48:56 | null | UTF-8 | Python | false | false | 360 | py | from django.contrib import admin
from .models import Bookmark, Click
# Register your models here.
@admin.register(Bookmark)
class BookmarkAdmin(admin.ModelAdmin):
list_display = ('id','title','description','bookmark_url', 'timestamp', 'user')
@admin.register(Click)
class ClickAdmin(admin.ModelAdmin):
list_display = ('bookmark','timestamp', 'user') | [
"cesarm2333@gmail.com"
] | cesarm2333@gmail.com |
2303eafc289500d20adb76daad5e488b046b0a2b | 2ce0c37ac7d9beeac23db688f97a1f502b92d13a | /delivery/migrations/0005_remove_outproduct_product.py | ded1fa53f647e9657ed377f967cd85b463d7d543 | [] | no_license | AmrElsayedEG/inventory-system | 0cdb0634b33117b13bfcae8642f979448d831369 | d4bc483612c3b721918d75f24ab0d7fa29b78ce3 | refs/heads/main | 2023-08-20T22:32:25.113740 | 2021-10-04T08:55:44 | 2021-10-04T08:55:44 | 413,344,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | # Generated by Django 3.2 on 2021-09-05 16:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('delivery', '0004_outdelivery_representitive'),
]
operations = [
migrations.RemoveField(
model_name='outproduct',
name='product',
),
]
| [
"elsayed.amr50@gmail.com"
] | elsayed.amr50@gmail.com |
4bcaa3a2e38c083e389b56726a33522ede42a2e7 | 160584ad75ed15f9d39205b6a76b3e5beb03a5cb | /env/lib/python2.7/site-packages/stripe/test/resources/test_recipients.py | 3d90e6eba3d441bde22915717099863dec56df73 | [
"MIT"
] | permissive | imran1234567/plutus | 1c66c0c29e9e615c03160fb98f14d44507b642dc | c964f18beb139de2645e052eb4c75a6bc0677029 | refs/heads/master | 2022-12-10T04:33:36.906408 | 2019-04-18T06:26:01 | 2019-04-18T06:26:01 | 169,561,380 | 0 | 0 | MIT | 2022-12-08T00:46:54 | 2019-02-07T11:29:44 | CSS | UTF-8 | Python | false | false | 2,028 | py | import stripe
from stripe.test.helper import (StripeResourceTest)
class RecipientTest(StripeResourceTest):
def test_list_recipients(self):
stripe.Recipient.list()
self.requestor_mock.request.assert_called_with(
'get',
'/v1/recipients',
{}
)
def test_recipient_transfers(self):
recipient = stripe.Recipient(id='rp_transfer')
recipient.transfers()
self.requestor_mock.request.assert_called_with(
'get',
'/v1/transfers',
{'recipient': 'rp_transfer'},
)
def test_recipient_add_card(self):
recipient = stripe.Recipient.construct_from({
'id': 'rp_add_card',
'sources': {
'object': 'list',
'url': '/v1/recipients/rp_add_card/sources',
},
}, 'api_key')
recipient.sources.create(card='tok_visa_debit')
self.requestor_mock.request.assert_called_with(
'post',
'/v1/recipients/rp_add_card/sources',
{
'card': 'tok_visa_debit',
},
None
)
def test_recipient_update_card(self):
card = stripe.Card.construct_from({
'recipient': 'rp_update_card',
'id': 'ca_update_card',
}, 'api_key')
card.name = 'The Best'
card.save()
self.requestor_mock.request.assert_called_with(
'post',
'/v1/recipients/rp_update_card/cards/ca_update_card',
{
'name': 'The Best',
},
None
)
def test_recipient_delete_card(self):
card = stripe.Card.construct_from({
'recipient': 'rp_delete_card',
'id': 'ca_delete_card',
}, 'api_key')
card.delete()
self.requestor_mock.request.assert_called_with(
'delete',
'/v1/recipients/rp_delete_card/cards/ca_delete_card',
{},
None
)
| [
"imran@kgec.edu.in"
] | imran@kgec.edu.in |
159249d116b505a9ff916d16137d4404bada9eee | 543660cf8ec69a950be57f95a6fe878b091273ef | /backend/data/rajpurkar/seqmodels/blob/eb1f40710b4ccb81f50b359daefa9e4c48c8130c/window/frame_models/recurrent.py | 77d5c9f302ac774f1a2a70cc0dafc8355d1b118b | [
"MIT"
] | permissive | nathanhere/neural_complete | 49184982a77f12b9651def6a093a6430cf7d1755 | 050ab3073fe797f3eb2f1a1592c294a8cd081ac6 | refs/heads/master | 2022-12-09T18:40:30.079015 | 2022-02-24T04:46:40 | 2022-02-24T04:46:40 | 87,836,933 | 0 | 1 | MIT | 2022-12-06T22:52:25 | 2017-04-10T17:11:11 | Python | UTF-8 | Python | false | false | 894 | py | """Recurrent neural network model."""
from ...model import KerasModel
from ..window_model import FrameModel
class RecurrentModel(KerasModel, FrameModel):
"""RNN."""
def _create_model(self, input_shape, num_categories):
from keras.layers.core import Activation, Dense, Dropout, Reshape
from keras.models import Sequential
from keras.layers.recurrent import LSTM
model = Sequential()
model.add(
LSTM(
32,
input_shape=input_shape,
return_sequences=True
)
)
model.add(
LSTM(
32,
return_sequences=True,
go_backwards=True
)
)
model.add(LSTM(32, return_sequences=False))
model.add(Dense(num_categories))
model.add(Activation('softmax'))
return model | [
"kootenpv@gmail.com"
] | kootenpv@gmail.com |
0dba882d5a1cdf7ebeaf1f3fd2767a51ea7990e5 | 1d363dfbe69b79bc1989251f085060232beb12f5 | /tests/test_flash_dry_air.py | 59d163bcae94670847b9b2d794d39e3c52674277 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | CalebBell/thermo | ec602af2316875692e385287c6010e9f206b1bc3 | 8622fada3614179d4372192e0031b4a206384c93 | refs/heads/master | 2023-08-30T05:30:07.552575 | 2023-06-25T01:35:53 | 2023-06-25T01:35:53 | 62,404,647 | 529 | 127 | MIT | 2023-08-11T18:31:21 | 2016-07-01T16:04:56 | Python | UTF-8 | Python | false | false | 6,407 | py | '''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2021, Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os
from math import *
import numpy as np
import pytest
from fluids.numerics import *
import thermo
from thermo import *
from thermo.chemical_package import lemmon2000_constants, lemmon2000_correlations
from thermo.coolprop import *
from thermo.phases import DryAirLemmon
from thermo.test_utils import mark_plot_unsupported
try:
import matplotlib.pyplot as plt
except:
pass
fluid = 'air'
pure_surfaces_dir = os.path.join(thermo.thermo_dir, '..', 'surfaces', 'lemmon2000')
@pytest.mark.plot
@pytest.mark.slow
@pytest.mark.parametric
@pytest.mark.parametrize("variables", ['VPT', 'VTP',
'PHT', 'PST', 'PUT',
'VUT', 'VST', 'VHT',
'TSV', # Had to increase the tolerance
'THP', 'TUP', # Not consistent, warning message added
])
def test_plot_lemmon2000(variables):
spec0, spec1, check_prop = variables
plot_name = variables[0:2]
eos = DryAirLemmon
T, P = 298.15, 101325.0
gas = DryAirLemmon(T=T, P=P)
flasher = FlashPureVLS(constants=lemmon2000_constants, correlations=lemmon2000_correlations,
gas=gas, liquids=[], solids=[])
flasher.TPV_HSGUA_xtol = 1e-14
inconsistent = frozenset([spec0, spec1]) in (frozenset(['T', 'H']), frozenset(['T', 'U']))
res = flasher.TPV_inputs(zs=[1.0], pts=200, spec0='T', spec1='P',
check0=spec0, check1=spec1, prop0=check_prop,
trunc_err_low=1e-13,
trunc_err_high=1, color_map=cm_flash_tol(),
show=False, verbose=not inconsistent)
matrix_spec_flashes, matrix_flashes, errs, plot_fig = res
path = os.path.join(pure_surfaces_dir, fluid, plot_name)
if not os.path.exists(path):
os.makedirs(path)
tol = 1e-13
key = f'{plot_name} - {eos.__name__} - {fluid}'
if inconsistent:
spec_name = spec0 + spec1
mark_plot_unsupported(plot_fig, reason='EOS is inconsistent for %s inputs' %(spec_name))
tol = 1e300
plot_fig.savefig(os.path.join(path, key + '.png'))
plt.close()
max_err = np.max(np.abs(errs))
assert max_err < tol
# test_plot_lemmon2000('VUT')
# test_plot_lemmon2000('THP')
def test_lemmon2000_case_issues():
gas = DryAirLemmon(T=300.0, P=1e5)
flasher = FlashPureVLS(constants=lemmon2000_constants, correlations=lemmon2000_correlations,
gas=gas, liquids=[], solids=[])
# Cases which were failing because of the iteration variable of P when V specified
# It is actually less efficient for this type of EOS
PT = flasher.flash(T=1000.0, P=1e3)
V = PT.V()
U = PT.U()
res = flasher.flash(V=V, U=U)
assert_close(PT.T, res.T, rtol=1e-10)
assert_close(PT.P, res.P, rtol=1e-10)
S = PT.S()
res = flasher.flash(V=V, S=S)
assert_close(PT.T, res.T, rtol=1e-10)
assert_close(PT.P, res.P, rtol=1e-10)
H = PT.H()
res = flasher.flash(V=V, H=H)
assert_close(PT.T, res.T, rtol=1e-10)
# Check we can't do a vapor fraction flash
with pytest.raises(ValueError):
flasher.flash(T=400, SF=.5)
with pytest.raises(ValueError):
flasher.flash(T=400, VF=.5)
# Check that the minimum temperature of the phases is respected
with pytest.raises(ValueError):
flasher.flash(T=132.6312, P=1e3)
PT = flasher.flash(T=2000., P=3827.4944785162643)
V = PT.V()
U = PT.U()
res = flasher.flash(V=V, U=U)
assert_close(PT.T, res.T, rtol=1e-10)
assert_close(PT.P, res.P, rtol=1e-10)
# Inconsistent TH point in fundamental formulation
PT1 = flasher.flash(T=610.7410404288737, P=6150985.788580353)
PT2 = flasher.flash(T=610.7410404288737, P=3967475.2794698337)
assert_close(PT1.H(), PT2.H())
# There are a ton of low-pressure points too
PT1 = flasher.flash(T=484.38550361282495, P=0.027682980294306617)
PT2 = flasher.flash(T=484.38550361282495, P=0.02768286630392061)
assert_close(PT1.H(), PT2.H())
# Inconsistent TU point in fundamental formulation
PT1 = flasher.flash(T=1652.4510785539342, P=519770184.42714685,)
PT2 = flasher.flash(T=1652.4510785539342, P=6985879.746785077)
assert_close(PT1.U(), PT2.U(), rtol=1e-10)
"""
Ps = logspace(log10(6985879.746785077/2), log10(519770184.42714685*2), 2000)
Us = [flasher.flash(T=1652.4510785539342, P=P).U() for P in Ps ]
"""
def test_lemmon2000_properties():
gas = DryAirLemmon(T=300.0, P=1000e5)
flasher = FlashPureVLS(constants=lemmon2000_constants, correlations=lemmon2000_correlations,
gas=gas, liquids=[], solids=[])
# Isentropic exponents
res = flasher.flash(T=300.0, P=1000e5)
for obj in (res, res.bulk, gas, res.gas):
assert_close(obj.isentropic_exponent(), 4.100576762582646, rtol=1e-12)
assert_close(obj.isentropic_exponent_PV(), 4.100576762582646, rtol=1e-12)
assert_close(obj.isentropic_exponent_PT(), 1.3248727035044343, rtol=1e-12)
assert_close(obj.isentropic_exponent_TV(), 2.0055044950839136, rtol=1e-12)
| [
"Caleb.Andrew.Bell@gmail.com"
] | Caleb.Andrew.Bell@gmail.com |
f23e5e2eabc0ea5604a62feb24f8d24c53096630 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/algorithm-master/lintcode/689_two_sum_bst_edtion.py | c920ea03fbd1007257c3c49c68706b3c9a0ed00e | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,682 | py | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
c_ Solution:
"""
@param: : the root of tree
@param: : the target sum
@return: two numbers from tree which sum is n
"""
___ twoSum root, n
left right N..
head tail root
pre()
nxt()
w.... left !_ right:
_sum left.val + right.val
__ _sum __ n:
r.. [left.val, right.val]
__ _sum < n:
nxt()
____
pre()
___ pre
w.... tail:
cur tail.right
__ cur a.. cur !_ right:
w.... cur.left a.. cur.left !_ tail:
cur cur.left
__ cur.left __ tail:
right tail
cur.left N..
tail tail.left
_____
____
cur.left tail
tail tail.right
____
right tail
tail tail.left
_____
___ nxt
w.... head:
cur head.left
__ cur a.. cur !_ left:
w.... cur.right a.. cur.right !_ head:
cur cur.right
__ cur.right __ head:
left head
cur.right N..
head head.right
_____
____
cur.right head
head head.left
____
left head
head head.right
_____
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
cdacac9398ca97d329f4e8333510df3edf223077 | 165e706d485e90f4e4f63cfb9f2c35acda14cfc0 | /property_linking/scripts/preprocessing/closure_inference.py | 7f0f19a43eb2ba1d71b8eecc9798247327ab538f | [
"Apache-2.0"
] | permissive | Tarkiyah/googleResearch | 65581f3bbbe2ffe248c9e613c0ea7eac336d5372 | dea327aa9e7ef7f7bca5a6c225dbdca1077a06e9 | refs/heads/master | 2022-12-07T12:04:44.153221 | 2019-11-21T16:03:48 | 2019-11-21T16:18:28 | 223,229,888 | 11 | 2 | Apache-2.0 | 2022-11-21T21:39:10 | 2019-11-21T17:38:31 | Jupyter Notebook | UTF-8 | Python | false | false | 3,537 | py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Find superclasses.
Examples:
closure_inference.py --sling_kb_file <kb> --alsologtostderr
# for locations
closure_inference.py --sling_kb_file <kb> --alsologtostderr
--infile <infile> --closing_rel_id P131
"""
import time
import sling
import tensorflow as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string(
'outfile', '/tmp/closed.tsv', 'created file')
tf.flags.DEFINE_string(
'infile',
'',
'input file')
# probably P131 = administrative region of, or P279 = subclass_of
tf.flags.DEFINE_string(
'closing_rel_id', 'P279', 'relation to use to close')
tf.flags.DEFINE_string(
'sling_kb_file',
'',
'where to find sling kb')
tf.flags.DEFINE_string(
'blacklist_substring',
'metaclass',
'discard superclasses with this substring in the name')
tf.flags.DEFINE_boolean(
'trace_closures',
False,
'give ridiculously long debug output')
def load_kb():
"""Load self.names and self.kb.
Returns:
sling kb
"""
tf.logging.info('loading and indexing kb...')
start = time.time()
kb = sling.Store()
kb.load(FLAGS.sling_kb_file)
kb.freeze()
tf.logging.info('loading took %.3f sec' % (time.time() - start))
return kb
def closure(kb, closing_relation_id, cat_id):
"""Return set of ids for logical closure of a category/region.
Args:
kb: a sling kb
closing_relation_id: SUBCLASS_OF_ID or REGION_OF_ID
cat_id: id of the category to find ancestors of
Returns:
the set of all things in the KB of which the category with id cat_id
is a subclass.
"""
result = set()
closer = kb[closing_relation_id]
_collect_ancestors(kb, result, closer, cat_id)
def blacklisted(qid):
name = kb[qid].name
return name and name.find(FLAGS.blacklist_substring) >= 0
if FLAGS.blacklist_substring:
return [e for e in result if not blacklisted(e)]
else:
return result
def _collect_ancestors(kb, buf, closer, cat_id):
if cat_id not in buf:
buf.add(cat_id)
for key, val in kb[cat_id]:
if key == closer and val.id:
_collect_ancestors(kb, buf, closer, val.id)
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
kb = load_kb()
tf.logging.info('will write to %s*.tsv' % FLAGS.outfile)
tf.logging.info('closing with %s [%s]' % (
kb[FLAGS.closing_rel_id].name,
kb[FLAGS.closing_rel_id]))
with tf.gfile.Open(FLAGS.outfile, 'w') as out_fp:
for line in tf.gfile.GFile(FLAGS.infile):
qid = line.strip()
if qid.startswith('i/'):
qid = qid[len('i/'):]
closed = closure(kb, FLAGS.closing_rel_id, qid)
out_fp.write('\t'.join([qid] + list(closed)) + '\n')
if FLAGS.trace_closures:
if len(closed) > 1:
tf.logging.info('closing %s [%s]' % (kb[qid].name, qid))
for super_qid in closed:
tf.logging.info(' ==> %s [%s]' % (kb[super_qid].name, super_qid))
if __name__ == '__main__':
tf.app.run()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
746c37e2bd1f8cca597462e416e26a4cd58a179a | a7319d1c462618445b13cb8dde5c30173801b745 | /backend/hardik_25509/settings.py | 7596bd63123791db28563da0c6685b368b3fe30e | [] | no_license | crowdbotics-apps/hardik-25509 | 357e87239d5379d4b6c79d746659d8a20942be0b | f179cee72ca96a9115c3aeb46e3d5d4a3abec27a | refs/heads/master | 2023-04-01T19:47:21.352217 | 2021-04-08T05:57:57 | 2021-04-08T05:57:57 | 355,501,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,128 | py | """
Django settings for hardik_25509 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"course",
]
LOCAL_APPS = [
"home",
"modules",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
"storages",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "hardik_25509.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "web_build")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "hardik_25509.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
os.path.join(BASE_DIR, "web_build/static"),
]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID
and AWS_SECRET_ACCESS_KEY
and AWS_STORAGE_BUCKET_NAME
and AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = "/mediafiles/"
MEDIA_ROOT = os.path.join(BASE_DIR, "mediafiles")
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning(
"You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails."
)
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
3ee0b9cfa0a12433c2872fc03735b4afd463fdbb | d5ce517617f90aba1a618098f459b262968a6a20 | /flup/client/scgi_app.py | 33121806e77f5dd59af0fad0fb0829567829f91d | [] | no_license | jedie/flup-py3.3 | 8bfabe2195cfe5df1fb8acfb92a108b43d668e51 | 56d495311d0e850fbab94c6c3e160793e245d0d4 | refs/heads/master | 2021-01-18T06:51:27.659312 | 2014-05-22T01:35:10 | 2014-05-22T01:35:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,923 | py | # Copyright (c) 2006 Allan Saddi <allan@saddi.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id: scgi_app.py 2111 2006-11-25 02:00:21Z asaddi $
__author__ = 'Allan Saddi <allan@saddi.com>'
__version__ = '$Revision: 2111 $'
import select
import struct
import socket
import errno
__all__ = ['SCGIApp']
def encodeNetstring(s):
return ''.join([str(len(s)), ':', s, ','])
class SCGIApp(object):
def __init__(self, connect=None, host=None, port=None,
filterEnviron=True):
if host is not None:
assert port is not None
connect=(host, port)
assert connect is not None
self._connect = connect
self._filterEnviron = filterEnviron
def __call__(self, environ, start_response):
sock = self._getConnection()
outfile = sock.makefile('w')
infile = sock.makefile('r')
sock.close()
# Filter WSGI environ and send as request headers
if self._filterEnviron:
headers = self._defaultFilterEnviron(environ)
else:
headers = self._lightFilterEnviron(environ)
# TODO: Anything not from environ that needs to be sent also?
content_length = int(environ.get('CONTENT_LENGTH') or 0)
if 'CONTENT_LENGTH' in headers:
del headers['CONTENT_LENGTH']
headers_out = ['CONTENT_LENGTH', str(content_length), 'SCGI', '1']
for k,v in list(headers.items()):
headers_out.append(k)
headers_out.append(v)
headers_out.append('') # For trailing NUL
outfile.write(encodeNetstring('\x00'.join(headers_out)))
# Transfer wsgi.input to outfile
while True:
chunk_size = min(content_length, 4096)
s = environ['wsgi.input'].read(chunk_size)
content_length -= len(s)
outfile.write(s)
if not s: break
outfile.close()
# Read result from SCGI server
result = []
while True:
buf = infile.read(4096)
if not buf: break
result.append(buf)
infile.close()
result = ''.join(result)
# Parse response headers
status = '200 OK'
headers = []
pos = 0
while True:
eolpos = result.find('\n', pos)
if eolpos < 0: break
line = result[pos:eolpos-1]
pos = eolpos + 1
# strip in case of CR. NB: This will also strip other
# whitespace...
line = line.strip()
# Empty line signifies end of headers
if not line: break
# TODO: Better error handling
header, value = line.split(':', 1)
header = header.strip().lower()
value = value.strip()
if header == 'status':
# Special handling of Status header
status = value
if status.find(' ') < 0:
# Append a dummy reason phrase if one was not provided
status += ' SCGIApp'
else:
headers.append((header, value))
result = result[pos:]
# Set WSGI status, headers, and return result.
start_response(status, headers)
return [result]
def _getConnection(self):
if type(self._connect) is str:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(self._connect)
return sock
_environPrefixes = ['SERVER_', 'HTTP_', 'REQUEST_', 'REMOTE_', 'PATH_',
'CONTENT_']
_environCopies = ['SCRIPT_NAME', 'QUERY_STRING', 'AUTH_TYPE']
_environRenames = {}
def _defaultFilterEnviron(self, environ):
result = {}
for n in list(environ.keys()):
for p in self._environPrefixes:
if n.startswith(p):
result[n] = environ[n]
if n in self._environCopies:
result[n] = environ[n]
if n in self._environRenames:
result[self._environRenames[n]] = environ[n]
return result
def _lightFilterEnviron(self, environ):
result = {}
for n in list(environ.keys()):
if n.upper() == n:
result[n] = environ[n]
return result
if __name__ == '__main__':
from flup.server.ajp import WSGIServer
app = SCGIApp(connect=('localhost', 4000))
#import paste.lint
#app = paste.lint.middleware(app)
WSGIServer(app).run()
| [
"x11org@gmail.com"
] | x11org@gmail.com |
f78310a73f5328335184fda438b6808995b9f2c9 | b0e66db67b34b88e7884aa9b4a7b7607bbe9651b | /math/d20/const.py | ae16e3cdb42ae989d9829f7ffef9815a604cce32 | [] | no_license | cole-brown/veredi-code | 15cf47c688c909b27ad2f2f3518df72862bd17bc | 8c9fc1170ceac335985686571568eebf08b0db7a | refs/heads/master | 2023-04-22T03:21:10.506392 | 2021-05-01T19:05:10 | 2021-05-01T19:05:10 | 296,949,870 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | # coding: utf-8
'''
Constants, Enums, and Stuff for math.d20 module.
'''
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import enum
# -----------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
@enum.unique
class FormatOptions(enum.Flag):
NONE = 0
INITIAL = enum.auto()
INTERMEDIATE = enum.auto()
FINAL = enum.auto()
ALL = INITIAL | INTERMEDIATE | FINAL
def all(self, flag):
return ((self & flag) == flag)
def any(self, *flags):
for each in flags:
if (self & each) == each:
return True
return False
# -----------------------------------------------------------------------------
# Code
# -----------------------------------------------------------------------------
| [
"code@brown.dev"
] | code@brown.dev |
f8628d4fb035631907212605035a7ea9aa7a7f62 | 8a83bb7acb9b62183fca817e1f196dd8075630a4 | /01_array/23_majority_number.py | ec356909a26e7fbefe85d7ba6c55c3904da6c149 | [] | no_license | sandeepkumar8713/pythonapps | ff5ad3da854aa58e60f2c14d27359f8b838cac57 | 5dcb5ad4873124fed2ec3a717bfa379a4bbd197d | refs/heads/main | 2023-09-01T04:12:03.865755 | 2023-08-31T07:04:58 | 2023-08-31T07:04:58 | 234,762,925 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,565 | py | # CTCI : Q17_10_Majority_Element
# https://www.geeksforgeeks.org/majority-element/
# Question : Write a function which takes an array and prints the majority element (if it exists),
# otherwise prints "No Majority Element". A majority element in an array A[] of size n is an
# element that appears more than n/2 times (and hence there is at most one such element).
#
# Question Type : Easy
# Used : findCandidate():
# maj_index = 0, count = 1
# Run a loop of the input array.
# If A[maj_index] == A[i]: count++ else count--
# If count == 0: (take current maj_index) maj_index = i, count = 1
# return A[maj_index]
# This value returned by findCandidate() might be majority number.
# Run a loop over elements and check if this is majority number.
# Complexity : O(n)
def findCandidate(A):
maj_index = 0
count = 1
for i in range(len(A)):
if A[maj_index] == A[i]:
count += 1
else:
count -= 1
if count == 0:
maj_index = i
count = 1
return A[maj_index]
def isMajority(A, cand):
count = 0
for i in range(len(A)):
if A[i] == cand:
count += 1
if count > len(A) / 2:
return True
else:
return False
def printMajority(A):
cand = findCandidate(A)
if isMajority(A, cand):
print(cand)
else:
print("No Majority Element")
if __name__ == "__main__":
A = [1, 3, 3, 1, 2, 3, 3]
printMajority(A)
A = [1, 3, 3, 1, 2, 3]
printMajority(A)
| [
"sandeepkumar8713@gmail.com"
] | sandeepkumar8713@gmail.com |
448b224c06b11dcad0efb3a0e001204a1051988c | 0aad1f032876366b555d4b4d0bd80ad2ae74c226 | /src/ingest-pipeline/md/data_file_types/metadatatsv_metadata_file.py | 98ca0487a1a569a4afbe695e9ac270212a2fecee | [
"MIT"
] | permissive | icaoberg/ingest-pipeline | d876c33e5f5414f21c6194cd2085c02a126ef73c | 8c8296daaf2a3a71cd213a6b7b8a067739fa5272 | refs/heads/master | 2022-11-19T11:54:00.264295 | 2020-07-01T21:07:08 | 2020-07-01T21:07:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,607 | py | #! /usr/bin/env python
import csv
import os
from pathlib import Path
from metadata_file import MetadataFile
from type_base import MetadataError
from submodules import ingest_validation_tools_submission, ingest_validation_tools_error_report
class MetadataTSVMetadataFile(MetadataFile):
"""
A metadata file type for the specialized metadata.tsv files used to store submission info
"""
category_name = 'METADATATSV';
def collect_metadata(self):
# print('validating {} as metadata.tsv'.format(self.path))
# dirpath = Path(os.path.dirname(self.path))
# submission = ingest_validation_tools_submission.Submission(directory_path=dirpath,
# ignore_files=os.path.basename(self.path))
# report = ingest_validation_tools_error_report.ErrorReport(submission.get_errors())
# if report.errors:
# # Scan reports an error result
# with open('ingest_validation_tools_report.txt', 'w') as f:
# f.write(report.as_text())
# raise MetadataError('{} failed ingest validation test'.format(self.path))
print('parsing metadatatsv from {}'.format(self.path))
md = []
with open(self.path, 'rU', newline='') as f:
dialect = csv.Sniffer().sniff(f.read(256))
f.seek(0)
reader = csv.DictReader(f, dialect=dialect)
for row in reader:
dct = {k : v for k, v in row.items()}
dct['_from_metadatatsv'] = True
md.append(dct)
return md
| [
"welling@psc.edu"
] | welling@psc.edu |
6a53fb6ca5851bcacf7250e3aa6bb7bf3a8c255d | a99372d1c71be907e1fbfb4f7287363ff1f51f56 | /Legal/migrations/0005_category_dated.py | 8eed23ace074af81bb4330caa646d8b82770aca4 | [
"MIT"
] | permissive | domambia/csdigital-gs1kenya-internal-erp | 43045c219b627453f30da9c6bd62335985f81927 | be36378ad7b960d074dd5841aaadc849ac6356de | refs/heads/master | 2022-12-10T13:49:55.516938 | 2021-10-31T15:08:29 | 2021-10-31T15:08:29 | 164,619,152 | 17 | 14 | null | 2022-12-08T01:44:41 | 2019-01-08T09:59:34 | Python | UTF-8 | Python | false | false | 415 | py | # Generated by Django 2.1.5 on 2019-04-09 13:34
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Legal', '0004_auto_20190409_1242'),
]
operations = [
migrations.AddField(
model_name='category',
name='dated',
field=models.DateField(default=datetime.datetime.now),
),
]
| [
"omambiadauglous@gmail.com"
] | omambiadauglous@gmail.com |
aa8219a0132e52fcee1ff21e14f8b17086844f66 | d57b51ec207002e333b8655a8f5832ed143aa28c | /.history/gos_20200614062134.py | 0926db4d45f1ea761568bc4b9c5dc96acc72a3aa | [] | no_license | yevheniir/python_course_2020 | b42766c4278a08b8b79fec77e036a1b987accf51 | a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b | refs/heads/master | 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,014 | py | # # Імпорт фажливих бібліотек
# from BeautifulSoup import BeautifulSoup
# import urllib2
# import re
# # Створення функції пошуку силок
# def getLinks(url):
# # отримання та присвоєння контенту сторінки в змінну
# html_page = urllib2.urlopen(url)
# # Перетворення контенту в обєкт бібліотеки BeautifulSoup
# soup = BeautifulSoup(html_page)
# # створення пустого масиву для лінків
# links = []
# # ЗА ДОПОМОГОЮ ЧИКЛУ ПРОХЛДИМСЯ ПО ВСІХ ЕЛЕМЕНТАХ ДЕ Є СИЛКА
# for link in soup.findAll('a', attrs={'href': re.compile("^http://")}):
# # Додаємо всі силки в список
# links.append(link.get('href'))
# # повертаємо список
# return links
# -----------------------------------------------------------------------------------------------------------
# # # Імпорт фажливих бібліотек
# import subprocess
# # Створення циклу та використання функції range для генерації послідовних чисел
# for ping in range(1,10):
# # генерування IP адреси базуючись на номері ітерації
# address = "127.0.0." + str(ping)
# # виклик функції call яка робить запит на IP адрес та запис відповіді в змінну
# res = subprocess.call(['ping', '-c', '3', address])
# # За допомогою умовних операторів перевіряємо відповідь та виводимо результат
# if res == 0:
# print "ping to", address, "OK"
# elif res == 2:
# print "no response from", address
# else:
# print "ping to", address, "failed!"
# -----------------------------------------------------------------------------------------------------------
# # Імпорт фажливих бібліотек
# import requests
# # Ітеруємося по масиву з адресами зображень
# for i, pic_url in enumerate(["http://x.com/nanachi.jpg", "http://x.com/nezuko.jpg"]):
# # Відкриваємо файл базуючись на номері ітерації
# with open('pic{0}.jpg'.format(i), 'wb') as handle:
# # Отримуємо картинку
# response = requests.get(pic_url, stream=True)
# # Використовуючи умовний оператор перевіряємо чи успішно виконався запит
# if not response.ok:
# print(response)
# # Ітеруємося по байтах картинки та записуємо батчаси в 1024 до файлу
# for block in response.iter_content(1024):
# # Якщо байти закінчилися, завершуємо алгоритм
# if not block:
# break
# # Записуємо байти в файл
# handle.write(block)
# -----------------------------------------------------------------------------------------------------------
# # Створюємо клас для рахунку
# class Bank_Account:
# # В конструкторі ініціалізуємо рахунок як 0
# def __init__(self):
# self.balance=0
# print("Hello!!! Welcome to the Deposit & Withdrawal Machine")
# # В методі депозит, використовуючи функцію input() просимо ввести суму поповенння та додаємо цю суму до рахунку
# def deposit(self):
# amount=float(input("Enter amount to be Deposited: "))
# self.balance += amount
# print("\n Amount Deposited:",amount)
# # В методі депозит, використовуючи функцію input() просимо ввести суму отримання та віднімаємо цю суму від рахунку
# def withdraw(self):
# amount = float(input("Enter amount to be Withdrawn: "))
# # За допомогою умовного оператора перевіряємо чи достатнього грошей на рахунку
# if self.balance>=amount:
# self.balance-=amount
# print("\n You Withdrew:", amount)
# else:
# print("\n Insufficient balance ")
# # Виводимо бааланс на екран
# def display(self):
# print("\n Net Available Balance=",self.balance)
# # Створюємо рахунок
# s = Bank_Account()
# # Проводимо операції з рахунком
# s.deposit()
# s.withdraw()
# s.display()
# -----------------------------------------------------------------------------------------------------------
# # Створюємо рекурсивну функцію яка приймає десяткове число
# def decimalToBinary(n):
# # перевіряємо чи число юільше 1
# if(n > 1):
# # Якщо так, ділемо на 2 юез остачі та рекурсивно викликаємо функцію
# decimalToBinary(n//2)
# # Якщо ні, виводимо на остачу ділення числа на 2
# print(n%2, end=' ')
# # Створюємо функцію яка приймає бінарне число
# def binaryToDecimal(binary):
# # Створюємо додаткову змінну
# binary1 = binary
# # Ініціалізуємо ще 3 змінню даючи їм значення 0
# decimal, i, n = 0, 0, 0
# # Ітеруємося до тих пір поки передане нами число не буде 0
# while(binary != 0):
# # Отримуємо остачу від ділення нашого чила на 10 на записуємо в змінну
# dec = binary % 10
# # Додаємо до результату суму попереднього результату та добуток від dec та піднесення 2 до степеня номеру ітерації
# decimal = decimal + dec * pow(2, i)
# # Змінюємо binary
# binary = binary//10
# # Додаємо 1 до кількості ітерацій
# i += 1
# # Виводимо результат
# print(decimal)
# -----------------------------------------------------------------------------------------------------------
# # Імпорт фажливих бібліотек
# import re
# # В умовному операторі перевіряємо чи підходить введена пошта під знайдений з інтернету regex
# if re.match(r"[^@]+@[^@]+\.[^@]+", "nanachi@gmail.com"):
# # Якщо так, виводиму valid
# print("valid")
# -----------------------------------------------------------------------------------------------------------
# Створення функції яка приймає текст для шифрування та здвиг
def encrypt(text,s):
# Створення змінної для результату
result = ""
# Ітеруємося по тексту використовуючи range та довжину тексту
for i in range(len(text)):
# Беремо літеру базуючись на номері ітерації
char = text[i]
# Б
if (char.isupper()):
result += chr((ord(char) + s-65) % 26 + 65)
# Encrypt lowercase characters in plain text
else:
result += chr((ord(char) + s - 97) % 26 + 97)
return result | [
"yevheniira@intelink-ua.com"
] | yevheniira@intelink-ua.com |
8570724f11f96fad4fbaa801251f384aaec32139 | 9ba0e059a15f2b24d7c031e039d3564c647a9336 | /tuva/imports/Data - After switching to array storage and before ordereddict.py | 3f44cf593a66cc730f3ed901c4c885f5ef9ade7b | [] | no_license | vatir/tuva | 98714d262cc0fbaf714c29e81af25bad8bb02c6c | a105280951c0a21dd0d9eab60d24545e423e0479 | refs/heads/master | 2023-01-25T04:27:38.558356 | 2023-01-11T17:58:47 | 2023-01-11T17:58:47 | 129,917,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,584 | py | from numpy import array
from numpy import append
from numpy import dtype
from collections import OrderedDict
# Is used in testing model run behavior
calltimes = 0
model_init = False
# Build a consistent color list
colorlist = OrderedDict()
colorlist["black"] = (0, 0, 0)
colorlist["red"] = (255, 0, 0)
colorlist["goldenrod"] = (218, 165, 32)
colorlist["magenta 3"] = (205, 0, 205)
colorlist["midnightblue"] = (25, 25, 112)
colorlist["indian red"] = (176, 23, 31)
colorlist["emeraldgreen"] = (0, 201, 87)
colorlist["honeydew 4"] = (131, 139, 131)
colorlist["green 2"] = (0, 238, 0)
colorlist["deepskyblue"] = (0, 191, 255)
colorlist["orangered 2"] = (238, 64, 0)
colorlist["sgi beet"] = (142, 56, 142)
colorlist["manganeseblue"] = (3, 168, 158)
colorlist["cornflowerblue"] = (100, 149, 237)
class ImportOld():
"""
Import old conlin tab delimited data
fileloc is the system path and filename
"""
def __init__(self, fileloc):
from numpy import array
filehandle = open(fileloc, 'r')
self.builtin = ["ind", "ind_error", "dep", "dep_error"]
self.knowncol = {"ind":0, "ind_error":1, "dep":2, "dep_error":3, "group":4}
#self._data = OrderedDict()
self._data = list()
for line in filehandle:
line_list = list()
try:
hoztestentry = 0
for testentry in line.split():
if not (self.knowncol["group"] == hoztestentry):
float(testentry)
hoztestentry += 1
hoztestentry = 0
for entry in line.split():
if (self.knowncol["group"] == hoztestentry):
groupentry = entry
line_list.append(str(entry))
else:
line_list.append(float(entry))
hoztestentry += 1
self._data.append(line_list)
except ValueError:
current_hoz_pos = 0
for entry in line.split():
if entry in self.knowncol.keys():
self.knowncol[entry] = current_hoz_pos
else:
self.knowncol[entry] = current_hoz_pos
current_hoz_pos += 1
self._data = array(self._data)
filehandle.close()
self.init = True
self.Update()
self.init = False
def Update(self):
if self.init == False:
from __main__ import MainFrame
MainFrame.panel.plotpanel.Update()
def traits(self):
traitdict = dict()
for key in self.knowncol.keys():
if key in ["group"]:
traitdict[key] = array(self._data[:, self.knowncol[key]], dtype='S')
if key not in (self.builtin + ["group"]):
traitdict[key] = array(self._data[:, self.knowncol[key]], dtype='f')
return traitdict
def x(self):
return array(self._data[:, self.knowncol["ind"]], dtype='f')
def y(self):
return array(self._data[:, self.knowncol["dep"]], dtype='f')
def xerr(self):
return array(self._data[:, self.knowncol["ind_error"]], dtype='f')
def yerr(self):
return array(self._data[:, self.knowncol["dep_error"]], dtype='f')
def xmin(self):
return float(min(array(self._data[:, self.knowncol["ind"]], dtype='f')))
def ymin(self):
return float(min(array(self._data[:, self.knowncol["dep"]], dtype='f')))
def xmax(self):
return float(max(array(self._data[:, self.knowncol["ind"]], dtype='f')))
def ymax(self):
return float(max(array(self._data[:, self.knowncol["dep"]], dtype='f')))
def Get_Row_Len(self):
return int(self._data.shape[1])
def Get_Col_Len(self):
return int(self._data.shape[0])
def GetNumberRows(self):
return int(self._data.shape[0])
def GetNumberCols(self):
return int(self._data.shape[1])
def GetValue(self, row, col):
value = self._data[(row, col)]
if value is not None:
return value
else:
return ''
def SetValue(self, row, col, value):
if (self.knowncol["group"] == col):
self._data[(row, col)] = str(value)
else:
self._data[(row, col)] = float(value)
self.Update()
| [
"grok_me@hotmail.com"
] | grok_me@hotmail.com |
eccc587583776be4807de65835612594c73c72d9 | 4273f6c264fa5a7267557c5e0d338a2cbd27789e | /AIE23/20191102_feature_engineering/a3_feature_engineering/4_reduce_dimension/3D_PCA.py | 5934a47edbdece3ff91c453ba92bed0b0ac92fc7 | [] | no_license | shcqupc/Alg_study | 874d37954ed8ed2cdb3bd492d59cd071836946f5 | 462ee12c72b7f84c5ae45aaf0f65b812d7c1ada1 | refs/heads/master | 2020-07-10T15:26:40.603300 | 2020-03-27T12:53:16 | 2020-03-27T12:53:16 | 204,298,238 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# # import some data to play with
iris = datasets.load_iris()
# X = iris.data[:, :2] # we only take the first two features.
y = iris.target
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=y)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show() | [
"253848296@qq.com"
] | 253848296@qq.com |
cc2d8e32f9b31c5a2c802971ec725b3f6c2105ea | 3f7240da3dc81205a0a3bf3428ee4e7ae74fb3a2 | /src/Week4/Practice/reverse.py | 262d73adcf745fdaf0052bcef4f211a71cc4582e | [] | no_license | theguyoverthere/CMU15-112-Spring17 | b4ab8e29c31410b4c68d7b2c696a76b9d85ab4d8 | b8287092b14e82d2a3aeac6c27bffbc95382eb34 | refs/heads/master | 2021-04-27T08:52:45.237631 | 2018-10-02T15:38:18 | 2018-10-02T15:38:18 | 107,882,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,185 | py | import cs112_s17_linter
#******************************************************************************#
# Author: Tarique Anwer
# Date: 9/4/2017
# Function: Destructively reverse a list. So if a equals [2, 3, 4], then after
# reverse, a should equal [4, 3, 2]. As is generally true of
# destructive functions, this function does not return a value.
# Args: A list.
# Returns: None
# Raises: NA
#******************************************************************************#
def reverse(a):
lo = 0
hi = len(a) - 1
while lo <= hi:
a[hi], a[lo] = a[lo], a[hi]
lo += 1
hi -= 1
def testReverse():
print("Testing reverse(a)...", end="")
a = []
reverse(a)
assert(a == [])
a= [1, 2, 3]
reverse(a)
assert(a == [3, 2, 1])
a = ["hi", "there", 1, 2, 3]
reverse(a)
assert(a == [3, 2, 1, "there", "hi"])
a = [[1,2], [2,3], [3,4]]
reverse(a)
assert(a == [[3,4], [2,3], [1,2]])
print("Passed!")
#################################################
# testAll and main
#################################################
def testAll():
testReverse()
def main():
bannedTokens = (
#'False,None,True,and,assert,def,elif,else,' +
#'from,if,import,not,or,return,' +
#'break,continue,for,in,while,repr' +
'as,class,del,except,finally,' +
'global,is,lambda,nonlocal,pass,raise,' +
'try,with,yield,' +
#'abs,all,any,bool,chr,complex,divmod,float,' +
#'int,isinstance,max,min,pow,print,round,sum,' +
#'range,reversed,str,string,[,],ord,chr,input,len'+
'__import__,ascii,bin,bytearray,bytes,callable,' +
'classmethod,compile,delattr,dict,dir,enumerate,' +
'eval,exec,filter,format,frozenset,getattr,globals,' +
'hasattr,hash,help,hex,id,issubclass,iter,' +
'list,locals,map,memoryview,next,object,oct,' +
'open,property,set,' +
'setattr,slice,sorted,staticmethod,super,tuple,' +
'type,vars,zip,importlib,imp,{,}')
cs112_s17_linter.lint(bannedTokens=bannedTokens) # check style rules
testAll()
if __name__ == '__main__':
main()
| [
"tariqueanwer@outlook.com"
] | tariqueanwer@outlook.com |
93724f50f55a830aa5f7bccdeb6074f7150601fe | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_3/bxxbub001/question1.py | 4b05b26459fa0cf1a7ed56c52bbf12d955c4967a | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | #assignment 3
#B.Booi
def makeSqr(hight,width):
for i in range (hight):
print("*"*width)
hi = eval(input("Enter the height of the rectangle:\n"))
span = eval(input("Enter the width of the rectangle:\n"))
makeSqr(hi,span)
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
bad5bd7500f02ef244bbf720cb53d5388220cf3f | 999f928790a181448fdda17619876c7a39d96bf8 | /bin/waitress-serve | e56b524164b806fa66ae530151a756d3ec584f8e | [
"MIT"
] | permissive | serashioda/learning_journal2 | adede85fb8092dc23b856ba071b25c6480927a0a | a00e7da8fcdc0179f99bfc2fd4dc7cf77ecd81c2 | refs/heads/master | 2021-01-13T11:00:44.508580 | 2017-01-09T07:09:00 | 2017-01-09T07:09:00 | 77,100,871 | 0 | 1 | null | 2017-01-09T15:05:32 | 2016-12-22T01:52:40 | CSS | UTF-8 | Python | false | false | 263 | #!/Users/Sera/Dropbox/codefellows/401/learning_journal/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from waitress.runner import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"you@example.com"
] | you@example.com | |
1200976e1b121cb5062d20f640b8213ddd632f21 | 501615c82801733e69c7447ab9fd68d3883ed947 | /hotfix/.svn/pristine/12/1200976e1b121cb5062d20f640b8213ddd632f21.svn-base | 0dcbbd208f84d8d26629d6051601bfbd3375421b | [] | no_license | az0ne/python | b2e1cc1e925d1fcdb269e7dd4c48e24665deeeee | aec5d23bb412f7dfca374fb5c5b9988c1b817347 | refs/heads/master | 2021-07-18T02:08:46.314972 | 2017-10-27T06:23:36 | 2017-10-27T06:23:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,521 | #!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
import time
from functools import wraps
import cPickle as pickle
from mz_platform.services.core.log_service import log_it
from mz_platform.apis.api_result import ApiResult
from mz_platform.exceptions.mz_exception import MZException
from mz_platform.exceptions.mz_exception import MZSysException
from mz_platform.services.core.cache_service import CacheService
def sys_func_log(func):
"""
@brief sys level log, log function call info
@todo
- record function arg
"""
@wraps(func)
def _deco(*args, **kwargs):
log_it('enter:' + func.__name__)
t = time.time()
ret = func(*args, **kwargs)
d = time.time() - t
log_it('out:%s, during:%fs' % (func.__name__, d))
return ret
return _deco
def business_func_log(func):
"""
@brief business level log, log business info
@todo
- record function arg
"""
@wraps(func)
def _deco(*args, **kwargs):
log_it('enter:' + func.__name__)
t = time.time()
ret = func(*args, **kwargs)
d = time.time() - t
log_it('out:%s, during:%fs' % (func.__name__, d))
return ret
return _deco
def api_except_catcher(func):
"""
@brief catch api function exception
@todo
- record function arg
"""
@wraps(func)
def _deco(*args, **kwargs):
err_code = 0x0000
err_desc = ""
ret = None
try:
ret = func(*args, **kwargs)
except MZException, e:
err_code = e.err_code
err_desc = e.err_desc
ret = None
e.print_exc()
except Exception, e:
e = MZSysException(e, "business exception catched")
err_code = e.err_code
err_desc = e.err_desc
ret = None
e.print_exc()
except:
t, n, tb = sys.exc_info()
e = MZSysException(n, "unknown exception catched")
err_code = e.err_code
err_desc = e.err_desc
ret = None
e.print_exc()
finally:
return ApiResult(err_code, err_desc, ret)
return _deco
class DataCache(object):
"""
@brief 数据chache装饰器类
"""
def __init__(self, ns):
self.ns = ns
def data_cacher_get(self, name, *args, **kwargs):
"""
@brief 获取 name 对应的cache,如果不存在返回None
"""
return None
def data_cacher_set(self, name, value, *args, **kwargs):
"""
@brief 设置 name 对应的cache
"""
pass
def is_concern(self, concern, *args, **kwargs):
"""
@brief 是否是需要cache的关键词
"""
return False
def __call__(self, concern):
"""
@brief callable 对象, concern为caller设置的需要cache的关键词
"""
def _wrap(func):
@wraps(func)
def _f(*args, **kwargs):
fn = func.__name__
fn = '%s:%s' % (self.ns, fn)
concernd = self.is_concern(concern, *args, **kwargs)
r = None
if concernd:
r = self.data_cacher_get(fn, *args, **kwargs)
if r:
return r
r = func(*args, **kwargs)
if concernd:
self.data_cacher_set(fn, r, *args, **kwargs)
return r
return _f
return _wrap
class KWDataCache(DataCache):
"""
@brief key/value参数cache类
"""
def data_cacher_get(self, name, *args, **kwargs):
dch = CacheService.default_instance().data_cache_handler
r = dch.get(name, sub_kw_name=kwargs, deserialize_func=pickle.loads)
return r
def data_cacher_set(self, name, value, *args, **kwargs):
dch = CacheService.default_instance().data_cache_handler
dch.set(name, value, sub_kw_name=kwargs, serialize_func=pickle.dumps)
def is_concern(self, concern, *args, **kwargs):
return len(kwargs) == 1 and kwargs.keys()[0] in concern
# #####################################
# example
# @cache_api_data_kw(('key1', 'key2'))
# #####################################
cache_api_data_kw = KWDataCache('api')
| [
"1461847795@qq.com"
] | 1461847795@qq.com | |
ce9e0c110ab57c35bd0bdea209890c1efc74d1cb | 28b2144816ce1bf62b7481cd857fdc831a501f6b | /tabook/tests/functional/test_abook.py | e639eb978f75916274865bae76e3895278005cf8 | [] | no_license | t0ster/Turbo-Address-Book | 3191a837d7d28cf9b8c9d20331fe0518062b3892 | 8c5463b1d4423a0c41d7ed75ff9a512ae1bc515b | refs/heads/master | 2020-05-31T14:10:46.449799 | 2011-06-28T14:07:49 | 2011-06-28T14:07:49 | 1,953,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | from tabook.tests import *
class TestAbookController(TestController):
def test_index(self):
response = self.app.get(url(controller='abook', action='index'))
# Test response...
| [
"roman@bravetstudio.com"
] | roman@bravetstudio.com |
43b7a07cce169607e19f1009edb61ff5d942f077 | b70eb5577099f88ae9f684f2c87647f98e26d42b | /hpc-historias-clinicas/diagnosticos/migrations/0004_auto_20150425_1459.py | 3adebbad200ee6845c2de4535b56e1c10fcae385 | [] | no_license | btenaglia/hpc-historias-clinicas | be1a392a119a72055ba643fba9c9a09b740aef47 | 649d8660381381b1c591667760c122d73071d5ec | refs/heads/master | 2020-06-03T19:05:17.910077 | 2015-06-10T23:05:31 | 2015-06-10T23:05:31 | 32,827,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('diagnosticos', '0003_auto_20150407_2123'),
]
operations = [
migrations.AlterField(
model_name='diagnosticos',
name='fecha',
field=models.DateField(default=datetime.datetime(2015, 4, 25, 14, 59, 14, 459617), help_text='Formato: dd/mm/yyyy'),
preserve_default=True,
),
migrations.AlterField(
model_name='diagnosticos',
name='hora',
field=models.TimeField(default=datetime.datetime(2015, 4, 25, 14, 59, 14, 459671), help_text='Formato: hh:mm'),
preserve_default=True,
),
]
| [
"brunomartintenaglia@gmail.com"
] | brunomartintenaglia@gmail.com |
3fc765874c033319ae16b8f1f830511729d3e15f | eae4038397ea0b0b1ea56424888f53369a1e4282 | /moai/validation/single.py | f781534910c052c4c1bd83781de737fc6c0832f7 | [
"Apache-2.0"
] | permissive | iampakos/moai-0.1.0a2 | b2378e9e0a84b85c0e2251a419d39d3da7ea17f9 | 2f066bffc66faca0bdc9af53e7992df34d09ce5d | refs/heads/main | 2023-03-13T13:27:54.318498 | 2021-04-01T14:36:52 | 2021-04-01T14:36:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,469 | py | import moai.utils.engine as mieng
import torch
import omegaconf.omegaconf
import typing
import logging
import inspect
import itertools
log = logging.getLogger(__name__)
__all__ = ['Metric']
class Metric(mieng.Single):
def __init__(self,
metrics: omegaconf.DictConfig,
**kwargs: typing.Mapping[str, typing.Any],
):
super(Metric, self).__init__(
items=metrics,
name="metric",
)
loop = ((key, params) for key, params in kwargs.items() if hasattr(indicators, key))
for k, p in loop:
last_module = self.metric
sig = inspect.signature(last_module.forward)
for keys in zip(*list(p[prop] for prop in itertools.chain(sig.parameters, ['out']))):
self.execs.append(lambda tensor_dict, metric_dict, k=keys, p=sig.parameters.keys(), f=last_module:
metric_dict.update({
k[-1]: f(**dict(zip(p,
list(tensor_dict[i] for i in k[:-1])
)))
})
)
def forward(self,
tensors: typing.Dict[str, torch.Tensor]
) -> typing.Dict[str, torch.Tensor]:
metrics = { }
for exe in self.execs:
exe(tensors, metrics)
returned = { }
for k, m in metrics.items():
returned[k] = torch.mean(m) if len(m.size()) > 0 else m
return returned | [
"pakos@ai-in-motion.dev"
] | pakos@ai-in-motion.dev |
738ad03ab4aa7aba2177d4d3cfc449823971f0b0 | 0954a4a6d90fc66beee265c22e1fd829ddaf73cd | /digitalcollege/digital/migrations/0006_remove_departmentmodel_d_year.py | ea613757a14f2f32fa6d6437d7e354ca93266907 | [] | no_license | deepawalekedar319/CollegeMiniProject | a302184258e8213ee7604403e3abc60c19d342a8 | 52f46521c305bca167fdf2f4b41e28182476b3e1 | refs/heads/main | 2023-08-30T04:15:18.851167 | 2021-11-08T05:04:11 | 2021-11-08T05:04:11 | 425,695,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2021-01-04 08:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('digital', '0005_departmentmodel_d_year'),
]
operations = [
migrations.RemoveField(
model_name='departmentmodel',
name='d_year',
),
]
| [
"deepawalekedar319@gmail.com"
] | deepawalekedar319@gmail.com |
74b937d7824d37e846f7fd8ae0a5899ffe9ab42c | 67b0379a12a60e9f26232b81047de3470c4a9ff9 | /shop/migrations/0064_auto_20170626_1015.py | 8c03515b558ecc2c5e4c92ccd9c34ce2e360a2c9 | [] | no_license | vintkor/whitemandarin | 8ea9022b889fac718e0858873a07c586cf8da729 | 5afcfc5eef1bb1cc2febf519b04a4819a7b9648f | refs/heads/master | 2021-05-06T03:35:09.367375 | 2017-12-20T15:43:08 | 2017-12-20T15:43:08 | 114,904,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-06-26 07:15
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0063_auto_20170610_1623'),
]
operations = [
migrations.AddField(
model_name='category',
name='hotlineurl',
field=models.CharField(blank=True, max_length=250, null=True, verbose_name=b'Hotline url'),
),
migrations.AlterField(
model_name='colorproduct',
name='lastscan_date',
field=models.DateTimeField(default=datetime.datetime(2017, 6, 26, 10, 15, 53, 286167), verbose_name=b'Lastsscan Date'),
),
]
| [
"alkv84@yandex.ru"
] | alkv84@yandex.ru |
ddf283ffa12f6b269f7d56040ed800a6480b077c | cee65c4806593554662330368c799c14ec943454 | /src/resource-graph/azext_resourcegraph/vendored_sdks/resourcegraph/models/facet_request_options.py | 4ea94a3bd96311a982f9df2ec766f212ee6e4fad | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | azclibot/azure-cli-extensions | d5d1a4ecdfc87fd79f5ad042fb85cdbf881897d2 | c230646258d4b56efb7d44eb7a0230f2943da6f6 | refs/heads/master | 2023-08-28T03:55:02.311902 | 2019-04-04T16:05:45 | 2019-04-04T16:05:45 | 179,548,695 | 1 | 1 | MIT | 2021-07-28T15:26:17 | 2019-04-04T17:54:39 | Python | UTF-8 | Python | false | false | 1,316 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class FacetRequestOptions(Model):
"""The options for facet evaluation.
:param sort_order: The sorting order by the hit count. Possible values
include: 'asc', 'desc'. Default value: "desc" .
:type sort_order: str or ~azure.mgmt.resourcegraph.models.FacetSortOrder
:param top: The maximum number of facet rows that should be returned.
:type top: int
"""
_validation = {
'top': {'maximum': 1000, 'minimum': 1},
}
_attribute_map = {
'sort_order': {'key': 'sortOrder', 'type': 'FacetSortOrder'},
'top': {'key': '$top', 'type': 'int'},
}
def __init__(self, **kwargs):
super(FacetRequestOptions, self).__init__(**kwargs)
self.sort_order = kwargs.get('sort_order', "desc")
self.top = kwargs.get('top', None)
| [
"tjprescott@users.noreply.github.com"
] | tjprescott@users.noreply.github.com |
40e64497e28bc9b351edfd900f44634d458a6244 | e13daffd10be4fc8dd004e4d2bd4fc8e0c408840 | /lbworkflow/core/transition.py | adcd71ef73825ff290d50ece913bcb83dd395a8c | [] | no_license | jimmy201602/django-lb-workflow | 47110b49c7a5b7a4d18c2de79cbbd701a0ef5743 | 52a44ed33964d6392c4eeb71047e74c892caa716 | refs/heads/master | 2018-11-26T12:20:44.696867 | 2018-09-05T03:42:36 | 2018-09-05T03:42:36 | 114,515,837 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,851 | py | from django.utils import timezone
from lbworkflow.models import Event
from lbworkflow.models import Task
from .sendmsg import wf_send_msg
def create_event(instance, transition, **kwargs):
act_type = 'transition' if transition.pk else transition.code
if transition.is_agree:
act_type = 'agree'
event = Event.objects.create(
instance=instance, act_name=transition.name, act_type=act_type,
**kwargs)
return event
class TransitionExecutor(object):
def __init__(
self, operator, instance, task, transition=None,
comment='', attachments=[]):
self.wf_obj = instance.content_object
self.instance = instance
self.operator = operator
self.task = task
self.transition = transition
self.comment = comment
self.attachments = attachments
self.from_node = instance.cur_node
# hold&assign wouldn't change node
self.to_node = transition.output_node
self.all_todo_tasks = instance.get_todo_tasks()
self.last_event = None
def execute(self):
# TODO check permission
all_todo_tasks = self.all_todo_tasks
need_transfer = False
if self.transition.routing_rule == 'joint' and self.transition.code not in ['back to', 'rollback']:
if all_todo_tasks.count() == 1:
need_transfer = True
else:
need_transfer = True
self._complete_task(need_transfer)
if not need_transfer:
return
self._do_transfer()
# if is agree should check if need auto agree for next node
if self.transition.is_agree or self.to_node.node_type == 'router':
self._auto_agree_next_node()
def _auto_agree_next_node(self):
instance = self.instance
agree_transition = instance.get_agree_transition()
all_todo_tasks = instance.get_todo_tasks()
if not agree_transition:
return
# if from router, create a task
if self.to_node.node_type == 'router':
task = Task(
instance=self.instance,
node=self.instance.cur_node,
user=self.operator,
)
all_todo_tasks = [task]
for task in all_todo_tasks:
users = [task.user, task.agent_user]
users = [e for e in users if e]
for user in set(users):
if self.instance.cur_node != task.node: # has processed
return
if instance.is_user_agreed(user):
TransitionExecutor(self.operator, instance, task, agree_transition).execute()
def _complete_task(self, need_transfer):
""" close workite, create event and return it """
instance = self.instance
task = self.task
transition = self.transition
task.status = 'completed'
task.save()
to_node = self.to_node if need_transfer else instance.cur_node
self.to_node = to_node
event = None
pre_last_event = instance.last_event()
if pre_last_event and pre_last_event.new_node.node_type == 'router':
event = pre_last_event
event.new_node = to_node
event.save()
if not event:
event = create_event(
instance, transition,
comment=self.comment, user=self.operator,
old_node=task.node, new_node=to_node,
task=task)
if self.attachments:
event.attachments.add(*self.attachments)
self.last_event = event
return event
def _do_transfer_for_instance(self):
instance = self.instance
wf_obj = self.wf_obj
from_node = self.from_node
from_status = from_node.status
to_node = self.to_node
to_status = self.to_node.status
# Submit
if not from_node.is_submitted() and to_node.is_submitted():
instance.submit_time = timezone.now()
wf_obj.on_submit()
# cancel & give up & reject
if from_node.is_submitted() and not to_node.is_submitted():
wf_obj.on_fail()
# complete
if from_status != 'completed' and to_status == 'completed':
instance.end_on = timezone.now()
self.wf_obj.on_complete()
# cancel complete
if from_status == 'completed' and to_status != 'completed':
instance.end_on = None
instance.cur_node = self.to_node
self.wf_obj.on_do_transition(from_node, to_node)
instance.save()
def _send_notification(self):
instance = self.instance
last_event = self.last_event
notice_users = last_event.notice_users.exclude(
pk__in=[self.operator.pk, instance.created_by.pk]).distinct()
wf_send_msg(notice_users, 'notify', last_event)
# send notification to instance.created_by
if instance.created_by != self.operator:
wf_send_msg([instance.created_by], 'transfered', last_event)
def _gen_new_task(self):
last_event = self.last_event
if not last_event:
return
next_operators = last_event.next_operators.distinct()
need_notify_operators = []
for operator in next_operators:
new_task = Task(
instance=self.instance, node=self.to_node,
user=operator)
new_task.update_authorization(commit=True)
# notify next operator(not include current operator and instance.created_by)
if operator not in [self.operator, self.instance.created_by]:
need_notify_operators.append(operator)
agent_user = new_task.agent_user
if agent_user and agent_user not in [self.operator, self.instance.created_by]:
need_notify_operators.append(agent_user)
wf_send_msg(need_notify_operators, 'new_task', last_event)
def update_users_on_transfer(self):
instance = self.instance
event = self.last_event
to_node = event.new_node
next_operators = to_node.get_operators(instance.created_by, self.operator, instance)
event.next_operators.add(*next_operators)
notice_users = to_node.get_notice_users(instance.created_by, self.operator, instance)
event.notice_users.add(*notice_users)
can_view_users = to_node.get_share_users(instance.created_by, self.operator, instance)
instance.can_view_users.add(*can_view_users)
def _do_transfer(self):
self.update_users_on_transfer()
# auto complete all current work item
self.all_todo_tasks.update(status='completed')
self._do_transfer_for_instance()
self._gen_new_task()
self._send_notification()
| [
"zbirder@gmail.com"
] | zbirder@gmail.com |
c855a992495e5ab628fbe70c872eae00b20b337a | 5d58fa1d54855f18bad5688de4459af8d461c0ac | /tests/unit/modules/network/onyx/test_onyx_ospf.py | 893dc959fa735fa3382b2d96552f34de11bd4532 | [] | no_license | nasirhm/general | b3b52f6e31be3de8bae0414da620d8cdbb2c2366 | 5ccd89933297f5587dae5cd114e24ea5c54f7ce5 | refs/heads/master | 2021-01-04T07:03:21.121102 | 2020-02-13T20:59:56 | 2020-02-13T20:59:56 | 240,440,187 | 1 | 0 | null | 2020-02-14T06:08:14 | 2020-02-14T06:08:13 | null | UTF-8 | Python | false | false | 4,644 | py | #
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.community.general.tests.unit.compat.mock import patch
from ansible_collections.community.general.plugins.modules import onyx_ospf
from ansible_collections.community.general.tests.unit.modules.utils import set_module_args
from ..onyx_module import TestOnyxModule, load_fixture
class TestOnyxOspfModule(TestOnyxModule):
module = onyx_ospf
def setUp(self):
super(TestOnyxOspfModule, self).setUp()
self._ospf_exists = True
self.mock_get_config = patch.object(
onyx_ospf.OnyxOspfModule,
"_get_ospf_config")
self.get_config = self.mock_get_config.start()
self.mock_get_interfaces_config = patch.object(
onyx_ospf.OnyxOspfModule,
"_get_ospf_interfaces_config")
self.get_interfaces_config = self.mock_get_interfaces_config.start()
self.mock_load_config = patch(
'ansible_collections.community.general.plugins.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestOnyxOspfModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
if self._ospf_exists:
config_file = 'onyx_ospf_show.cfg'
self.get_config.return_value = load_fixture(config_file)
config_file = 'onyx_ospf_interfaces_show.cfg'
self.get_interfaces_config.return_value = load_fixture(config_file)
else:
self.get_config.return_value = None
self.get_interfaces_config.return_value = None
self.load_config.return_value = None
def test_ospf_absent_no_change(self):
set_module_args(dict(ospf=3, state='absent'))
self.execute_module(changed=False)
def test_ospf_present_no_change(self):
interface = dict(name='Loopback 1', area='0.0.0.0')
set_module_args(dict(ospf=2, router_id='10.2.3.4',
interfaces=[interface]))
self.execute_module(changed=False)
def test_ospf_present_remove(self):
set_module_args(dict(ospf=2, state='absent'))
commands = ['no router ospf 2']
self.execute_module(changed=True, commands=commands)
def test_ospf_change_router(self):
interface = dict(name='Loopback 1', area='0.0.0.0')
set_module_args(dict(ospf=2, router_id='10.2.3.5',
interfaces=[interface]))
commands = ['router ospf 2', 'router-id 10.2.3.5', 'exit']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ospf_remove_router(self):
interface = dict(name='Loopback 1', area='0.0.0.0')
set_module_args(dict(ospf=2, interfaces=[interface]))
commands = ['router ospf 2', 'no router-id', 'exit']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ospf_add_interface(self):
interfaces = [dict(name='Loopback 1', area='0.0.0.0'),
dict(name='Loopback 2', area='0.0.0.0')]
set_module_args(dict(ospf=2, router_id='10.2.3.4',
interfaces=interfaces))
commands = ['interface loopback 2 ip ospf area 0.0.0.0']
self.execute_module(changed=True, commands=commands)
def test_ospf_remove_interface(self):
set_module_args(dict(ospf=2, router_id='10.2.3.4'))
commands = ['interface loopback 1 no ip ospf area']
self.execute_module(changed=True, commands=commands)
def test_ospf_add(self):
self._ospf_exists = False
interfaces = [dict(name='Loopback 1', area='0.0.0.0'),
dict(name='Vlan 210', area='0.0.0.0'),
dict(name='Eth1/1', area='0.0.0.0'),
dict(name='Po1', area='0.0.0.0')]
set_module_args(dict(ospf=2, router_id='10.2.3.4',
interfaces=interfaces))
commands = ['router ospf 2', 'router-id 10.2.3.4', 'exit',
'interface loopback 1 ip ospf area 0.0.0.0',
'interface vlan 210 ip ospf area 0.0.0.0',
'interface ethernet 1/1 ip ospf area 0.0.0.0',
'interface port-channel 1 ip ospf area 0.0.0.0']
self.execute_module(changed=True, commands=commands)
| [
"ansible_migration@example.com"
] | ansible_migration@example.com |
ce99b43702930a3bb2d59fddf1e3c9cbc98cc622 | 9f58ec75177221f1e483c1c1cc9166a6229851d1 | /unsorted_solutions/question47.py | 3fa98b758e9854dfd55c4b94d0e28ef84191563b | [] | no_license | neequole/my-python-programming-exercises | 142b520dcfd78e5c92cf01a5cefecdfdb2939ccd | d6806e02cea9952e782e6921b903b1bb414364ee | refs/heads/master | 2021-08-23T04:48:06.211251 | 2017-12-03T11:40:14 | 2017-12-03T11:40:14 | 103,946,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | """ Question 47:
Write a program which can filter() to make a list whose elements are even number
between 1 and 20 (both included).
Hints:
Use filter() to filter elements of a list.
Use lambda to define anonymous functions.
"""
out = list(filter(lambda x: x % 2 == 0, range(1, 21)))
print(out)
| [
"nicole.tibay@johngaltventures.com"
] | nicole.tibay@johngaltventures.com |
873d7960f8851a82a605915be61b0e9cb964e7fc | a3b2c7069c9fab8632b0568db5ab79aceacf9c9c | /devel/lib/python2.7/dist-packages/rqt_bag_plugins/__init__.py | c2f70e437d66d6ccfc8247c2b37ce4e8cdbfa026 | [] | no_license | tbake0155/bluedragon_workspace | 08ed85d9de29c178704bd3f883acafae473b175e | 384d863e00689cf40cde4933447210bbb1ba8636 | refs/heads/master | 2021-05-12T01:35:45.896266 | 2018-01-15T14:59:39 | 2018-01-15T14:59:39 | 117,558,143 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | # -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/home/tim/catkin_ws/src/rqt_common_plugins/rqt_bag_plugins/src".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
| [
"tbake0155@gmail.com"
] | tbake0155@gmail.com |
d1113c24a628593d5362f4391c033deff3884613 | 2bc194ed3c23d986724928cc5258cdebaa3fa1c6 | /handlers/domain.py | 1d13df00fd3515d75d5832e9aed99271ae339e9d | [] | no_license | mrcheng0910/whoismanage | 057a57637ef4f51fc9d55b181213c2bace9bfe02 | ebd337760a791367bd5b390ad3a86b6247d1251a | refs/heads/master | 2021-01-10T06:14:48.511919 | 2016-01-13T14:35:54 | 2016-01-13T14:35:54 | 46,243,616 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | # encoding:utf-8
"""
功能:域名数量统计所需数据
"""
import decimal
import tornado
import json
from models.domain_db import DomainDb
PATH = './domain/' # 模板地址
class DomainIndexHandler(tornado.web.RequestHandler):
"""各个顶级后缀域名数量统计"""
def get(self):
domains, total = DomainDb().fetch_domain(11)
self.render(
PATH + 'domain_overall.html',
domains=json.dumps(domains),
total=total
)
class DomainTldIndexHandler(tornado.web.RequestHandler):
"""
获取指定顶级域名后缀首页
"""
def get(self):
self.render(PATH+'domain_tld.html')
class DomainTldNumHandler(tornado.web.RequestHandler):
"""
获取指定顶级域名后缀的域名数量
"""
def get(self):
tld = self.get_argument('tld','None')
# total,tld_num,whois_tld,whois_total = DomainDb().get_tld_num(tld)
results = DomainDb().get_tld_num(tld)
self.write(json.dumps(results, cls=DecimalEncoder))
class DecimalEncoder(json.JSONEncoder):
"""
解决json.dumps不能格式化Decimal问题
"""
def default(self, o):
if isinstance(o, decimal.Decimal):
return float(o)
return super(DecimalEncoder, self).default(o) | [
"mrcheng0910@gmail.com"
] | mrcheng0910@gmail.com |
801b906f1c687e553348f1ebf8c65d7708ca6de7 | c0950683d84a3c5999a28ac32668e8dbd159e036 | /dbrec3d/bof/compute_object_level.py | e0104252dcfc92e3232e2cdb182af15e18fd1a82 | [
"BSD-3-Clause"
] | permissive | mirestrepo/voxels-at-lems | 0a88751680daa3c48f44f49bb4ef0f855a90fa18 | df47d031653d2ad877a97b3c1ea574b924b7d4c2 | refs/heads/master | 2021-01-17T07:26:12.665247 | 2016-07-20T17:49:26 | 2016-07-20T17:49:26 | 3,919,012 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,497 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 22, 2011
@author:Isabel Restrepo
Save the level of the smallest cell entirely containing the object
"""
import os;
import dbrec3d_batch
import time
import optparse
import sys
import glob
class dbvalue:
def __init__(self, index, type):
self.id = index # unsigned integer
self.type = type # string
#*******************The Main Algorithm ************************#
if __name__=="__main__":
dbrec3d_batch.register_processes();
dbrec3d_batch.register_datatypes();
#Parse inputs
print ("******************************Compute Object level***************************")
parser = optparse.OptionParser(description='Init Category info');
parser.add_option('--bof_dir', action="store", dest="bof_dir");
options, args = parser.parse_args();
bof_dir = options.bof_dir;
if not os.path.isdir(bof_dir +"/"):
print "Invalid bof Dir"
sys.exit(-1);
#load category info
dbrec3d_batch.init_process("bofLoadCategoryInfoProces");
dbrec3d_batch.set_input_string(0, bof_dir);
dbrec3d_batch.set_input_string(1, "bof_info_train.xml")
dbrec3d_batch.set_input_string(2, "bof_category_info_old.xml")
dbrec3d_batch.run_process();
(id, type) = dbrec3d_batch.commit_output(0);
categories= dbvalue(id, type);
#load category info
dbrec3d_batch.init_process("bof_object_level_process");
dbrec3d_batch.set_input_from_db(0, categories);
dbrec3d_batch.run_process();
| [
"isabelrestre@gmail.com"
] | isabelrestre@gmail.com |
784bcb8b10887b7a5bfaf0455c7e559533e0db6b | d324b3d4ce953574c5945cda64e179f33c36c71b | /php/php-sky/grpc/src/python/grpcio_tests/tests/unit/_grpc_shutdown_test.py | 35823c7451f2b5155c92d63cfc80c22d11773841 | [
"Apache-2.0"
] | permissive | Denticle/docker-base | decc36cc8eb01be1157d0c0417958c2c80ac0d2f | 232115202594f4ea334d512dffb03f34451eb147 | refs/heads/main | 2023-04-21T10:08:29.582031 | 2021-05-13T07:27:52 | 2021-05-13T07:27:52 | 320,431,033 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,855 | py | # Copyright 2019 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the gRPC Core shutdown path."""
import time
import threading
import unittest
import datetime
import grpc
_TIMEOUT_FOR_SEGFAULT = datetime.timedelta(seconds=10)
class GrpcShutdownTest(unittest.TestCase):
def test_channel_close_with_connectivity_watcher(self):
"""Originated by https://github.com/grpc/grpc/issues/20299.
The grpc_shutdown happens synchronously, but there might be Core object
references left in Cython which might lead to ABORT or SIGSEGV.
"""
connection_failed = threading.Event()
def on_state_change(state):
if state in (grpc.ChannelConnectivity.TRANSIENT_FAILURE,
grpc.ChannelConnectivity.SHUTDOWN):
connection_failed.set()
# Connects to an void address, and subscribes state changes
channel = grpc.insecure_channel("0.1.1.1:12345")
channel.subscribe(on_state_change, True)
deadline = datetime.datetime.now() + _TIMEOUT_FOR_SEGFAULT
while datetime.datetime.now() < deadline:
time.sleep(0.1)
if connection_failed.is_set():
channel.close()
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
a4a75d6f57c7d5507c1728eafd50f371d56dda12 | a4a5c6f185ed38ea4b93e49408f369f2ae7073e9 | /aliyun-python-sdk-iot/aliyunsdkiot/request/v20170420/PubBroadcastRequest.py | 2f067cac3538c6f3deb82417c2224f42bb8c9a2a | [
"Apache-2.0"
] | permissive | samuelchen/aliyun-openapi-python-sdk | 86ee6eb9573e68cbf98ea61328818bfca005f25f | 52dda2326c34633858e4ed83a526dadce90dd5ef | refs/heads/master | 2020-03-07T03:50:18.248590 | 2018-04-02T13:48:10 | 2018-04-02T13:48:10 | 127,248,156 | 1 | 0 | null | 2018-03-29T06:39:31 | 2018-03-29T06:39:30 | null | UTF-8 | Python | false | false | 1,520 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class PubBroadcastRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2017-04-20', 'PubBroadcast')
def get_TopicFullName(self):
return self.get_query_params().get('TopicFullName')
def set_TopicFullName(self,TopicFullName):
self.add_query_param('TopicFullName',TopicFullName)
def get_MessageContent(self):
return self.get_query_params().get('MessageContent')
def set_MessageContent(self,MessageContent):
self.add_query_param('MessageContent',MessageContent)
def get_ProductKey(self):
return self.get_query_params().get('ProductKey')
def set_ProductKey(self,ProductKey):
self.add_query_param('ProductKey',ProductKey) | [
"haowei.yao@alibaba-inc.com"
] | haowei.yao@alibaba-inc.com |
38221f887d5dd3d306c517976168c1ae095db6f3 | 978184a03ecf7b0fe60fe824606877e5ad340c25 | /G/exo_7.16.py | 77937247652c008c8d4fdeb8ad9c281afe6e6e2d | [] | no_license | zinsmatt/Kalman_Filter_MOOC | 9e88a84818c09e2d01ea102855b7334bc2d0800a | 01d3ae3a213e94f480338f0a10bea5663185f167 | refs/heads/master | 2023-01-27T12:22:54.117402 | 2020-12-09T10:18:28 | 2020-12-09T10:18:28 | 304,326,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,742 | py | from roblib import * # available at https://www.ensta-bretagne.fr/jaulin/roblib.py
def draw_invpend(ax,x, w): #inverted pendulum
s,θ=x[0,0],x[1,0]
draw_box(ax,s-0.7,s+0.7,-0.25,0,'blue')
plot( [s,s-sin(θ)],[0,cos(θ)],'magenta', linewidth = 2)
plt.plot(w, 0, "or")
mc,l,g,mr = 5,1,9.81,1
dt = 0.04
x = array([[0,0.4,0,0]]).T
Γα = (sqrt(dt)*(10**-3))**2*eye(4)
def f(x,u):
s,θ,ds,dθ=x[0,0],x[1,0],x[2,0],x[3,0]
dds=(mr*sin(θ)*(g*cos(θ)- l*dθ**2) + u)/(mc+mr*sin(θ)**2)
ddθ= (sin(θ)*((mr+mc)*g - mr*l*dθ**2*cos(θ)) + cos(θ)*u)/ (l*(mc+mr*sin(θ)**2))
return array([[ds],[dθ],[dds],[ddθ]])
ax=init_figure(-3,3,-3,3)
A = np.array([[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
[0.0, mr*g/mc, 0.0, 0.0],
[0.0, (mc+mr) * g / (l*mc), 0.0, 0.0]])
B = np.array([[0.0, 0.0, 1/mc, 1/(l*mc)]]).T
K = place_poles(A, B, [-2.0, -2.1, -2.2, -2.3]).gain_matrix
E = array([[1, 0, 0, 0]])
h = -inv(E @ inv(A - B @ K) @ B)
C = np.array([[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0]])
# L = place_poles(A.T, C.T, [-2.0, -2.1, -2.2, -2.3]).gain_matrix.T
xhat = np.zeros((4, 1))
Gx = eye(4)
Galpha = eye(4) * dt * 0.0001
Gbeta = 0.0001 * eye(2)
w = 2
for ti, t in enumerate(arange(0,10,dt)):
clear(ax)
draw_invpend(ax,x, w)
u = (-K @ xhat + h * w).item()
y = C @ x + 0.01 * randn(2, 1)
# Estimateur de Luenberger
# xhat = xhat + (A @ xhat + B * u - L @ (C @ xhat - y)) * dt
# Estimateur avec Kalman
xhat, Gx = kalman(xhat, Gx, dt * B * u, y, Galpha, Gbeta, eye(4 ) + dt * A, C)
α=mvnrnd1(Γα)
x = x + dt*f(x,u)+α
| [
"zins.matthieu@gmail.com"
] | zins.matthieu@gmail.com |
bf2556e4a09d1f8d8208b65c9bcf88234a143a89 | d529b72eb4610ddf0e0b8170354a21f87dbf0e42 | /Unit19/involved.py | 834b8689f1666e416494769723984c5bb04d1bc3 | [] | no_license | SaretMagnoslove/Udacity-CS101 | 57a8b6609e2f2a09135ea0189a782d66e225b641 | 2e573a362a4d8d688199777937c6aaff59f6b900 | refs/heads/master | 2021-04-15T17:43:05.522259 | 2018-06-20T23:33:26 | 2018-06-20T23:33:26 | 126,618,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,594 | py | # Dictionaries of Dictionaries (of Dictionaries)
# The next several questions concern the data structure below for keeping
# track of Udacity's courses (where all of the values are strings):
# { <hexamester>, { <class>: { <property>: <value>, ... },
# ... },
# ... }
# For example,
courses = {
'feb2012': { 'cs101': {'name': 'Building a Search Engine',
'teacher': 'Dave',
'assistant': 'Peter C.'},
'cs373': {'name': 'Programming a Robotic Car',
'teacher': 'Sebastian',
'assistant': 'Andy'}},
'apr2012': { 'cs101': {'name': 'Building a Search Engine',
'teacher': 'Dave',
'assistant': 'Sarah'},
'cs212': {'name': 'The Design of Computer Programs',
'teacher': 'Peter N.',
'assistant': 'Andy',
'prereq': 'cs101'},
'cs253':
{'name': 'Web Application Engineering - Building a Blog',
'teacher': 'Steve',
'prereq': 'cs101'},
'cs262':
{'name': 'Programming Languages - Building a Web Browser',
'teacher': 'Wes',
'assistant': 'Peter C.',
'prereq': 'cs101'},
'cs373': {'name': 'Programming a Robotic Car',
'teacher': 'Sebastian'},
'cs387': {'name': 'Applied Cryptography',
'teacher': 'Dave'}},
'jan2044': { 'cs001': {'name': 'Building a Quantum Holodeck',
'teacher': 'Dorina'},
'cs003': {'name': 'Programming a Robotic Robotics Teacher',
'teacher': 'Jasper'},
}
}
# For the following questions, you will find the
# for <key> in <dictionary>:
# <block>
# construct useful. This loops through the key values in the Dictionary. For
# example, this procedure returns a list of all the courses offered in the given
# hexamester:
def courses_offered(courses, hexamester):
res = []
for c in courses[hexamester]:
res.append(c)
return res
# [Double Gold Star] Define a procedure, involved(courses, person), that takes
# as input a courses structure and a person and returns a Dictionary that
# describes all the courses the person is involved in. A person is involved
# in a course if they are a value for any property for the course. The output
# Dictionary should have hexamesters as its keys, and each value should be a
# list of courses that are offered that hexamester (the courses in the list
# can be in any order).
def involved(courses, person):
d = {}
for hexa in courses.keys():
for course in courses[hexa].keys():
if person in courses[hexa][course].values():
if hexa in d:
d[hexa].append(course)
else:
d[hexa] = [course]
return d
# For example:
print (involved(courses, 'Dave'))
#>>> {'apr2012': ['cs101', 'cs387'], 'feb2012': ['cs101']}
#print involved(courses, 'Peter C.')
#>>> {'apr2012': ['cs262'], 'feb2012': ['cs101']}
#print involved(courses, 'Dorina')
#>>> {'jan2044': ['cs001']}
#print involved(courses,'Peter')
#>>> {}
#print involved(courses,'Robotic')
#>>> {}
#print involved(courses, '')
#>>> {}
| [
"magnoslove@gmail.com"
] | magnoslove@gmail.com |
15f51480656364cc0aedcabaf36127e26ac783fb | 79e630cbbbeca74d8c1fab822d3d854518a7e7ca | /hanmaum/urls.py | 9836efddaaec98334f3c756a1f8199bdd031699c | [] | no_license | Son-GyeongSik/hanalum_web | f573f936068ba24b2215a74efdbb9e8f4b0ff9f8 | 3669b1d3c108c2aa64a7d0f11116adc0f385ce83 | refs/heads/main | 2023-03-24T08:16:24.098445 | 2021-03-14T05:06:46 | 2021-03-14T05:06:46 | 351,105,953 | 1 | 0 | null | 2021-03-24T14:16:35 | 2021-03-24T14:16:35 | null | UTF-8 | Python | false | false | 680 | py | """hanmaum 관련 urls 정의 파일입니다."""
from django.urls import path
from .views import (
edit, index, new, show, introduce, like, dislike, cancle, new_comment
)
app_name = 'hanmaum'
urlpatterns = [
path('', index, name="index"),
path('<int:article_id>', show, name="show"),
path('show/<int:article_id>', show, name="show"),
path('new', new, name="new"),
path('edit', edit, name="edit"),
path('introduce', introduce, name="introduce"),
path('like', like, name="like"),
path('dislike', dislike, name="dislike"),
path('cancle', cancle, name="cancle"),
path('<int:article_id>/comment/new/', new_comment, name="new_comment")
]
| [
"singun11@kookmin.ac.kr"
] | singun11@kookmin.ac.kr |
d73707052b010a015388947c1705c99bb8ae15ec | 248c535f3612c646bccadecafdca649fd788bb1f | /.history/app/models_20210927050430.py | 68917ed4d52d586d0562e0b5cd1b52726d578030 | [
"MIT"
] | permissive | GraceOswal/pitch-perfect | 3b923e4de5fff1a405dcb54374a1ba0522232025 | d781c6e0f55c11f2a5e5dceb952f6b2de3c47c3b | refs/heads/master | 2023-08-16T01:42:18.742154 | 2021-10-01T06:59:11 | 2021-10-01T06:59:11 | 410,224,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | from . import db
# connect class user to pitchperfect database
class User(db.Model):
__table__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255))
def __repr | [
"graceoswal88@gmail.com"
] | graceoswal88@gmail.com |
2ceafa22468a3657f444a3f7565a74038f94664c | 5199d37699c7c104cd9b00ffecad8f70d0f4f203 | /test_mean.py | 0a5dc585e4ac6a50486572913654dcc0e61bd20c | [
"CC-BY-4.0",
"MIT"
] | permissive | Becksteinlab/workshop_testing | 81b9a85f7a002e6f78666138a9959d51f6949ec1 | e4ee392e6e9bd1f7c429290b8820cfd06a512032 | refs/heads/master | 2020-03-25T12:31:27.376484 | 2018-08-07T00:28:51 | 2018-08-07T00:28:51 | 143,780,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | import pytest
from mean import mean
def test_ints():
num_list = [1,2,3,4,5]
obs = mean(num_list)
exp = 3
assert obs == exp
def test_zero():
num_list=[0,2,4,6]
obs = mean(num_list)
exp = 3
assert obs == exp
def test_double():
# This one will fail in Python 2
num_list = [1,2,3,4]
obs = mean(num_list)
exp = 2.5
assert obs == exp
def test_long():
big = 100000000
obs = mean(range(1,big))
exp = big/2.0
assert obs == exp
| [
"orbeckst@gmail.com"
] | orbeckst@gmail.com |
d4dd8070546b4c3ce4a0fb08d5906e255dcdbe45 | 5bdf195972deec9378d14d1ba37994c0cae9ad7b | /dash-example/data.py | 1489c30d3b5c1e72c23352d1836d108e590bc256 | [
"BSD-2-Clause"
] | permissive | SoftwareDefinedBuildings/mortar-analytics | 42b40067b2c6056430b0cd11889993a10b8428a7 | df48efca45ab2636f53c3b7301bcaa21b6c4e91f | refs/heads/master | 2023-06-08T04:02:21.675221 | 2022-06-26T00:35:46 | 2022-06-26T00:35:46 | 168,413,215 | 20 | 10 | BSD-2-Clause | 2023-02-15T21:33:04 | 2019-01-30T20:50:29 | Python | UTF-8 | Python | false | false | 1,841 | py | import pymortar
import os
import pandas as pd
# use default values (environment variables):
# MORTAR_API_ADDRESS: mortardata.org:9001
# MORTAR_API_USERNAME: required
# MORTAR_API_PASSWORD: required
client = pymortar.Client({})
meter_query = "SELECT ?meter WHERE { ?meter rdf:type/rdfs:subClassOf* brick:Building_Electric_Meter };"
# run qualify stage to get list of sites with electric meters
resp = client.qualify([meter_query])
if resp.error != "":
print("ERROR: ", resp.error)
os.exit(1)
print("running on {0} sites".format(len(resp.sites)))
# define the view of meters (metadata)
meters = pymortar.View(
sites=resp.sites,
name="meters",
definition=meter_query,
)
# define the meter timeseries streams we want
meter_data = pymortar.DataFrame(
name="meters",
aggregation=pymortar.MEAN,
window="1h",
timeseries=[
pymortar.Timeseries(
view="meters",
dataVars=["?meter"]
)
]
)
# temporal parameters for the query: 2017-2018 @ 15min mean
time_params = pymortar.TimeParams(
start="2015-01-01T00:00:00Z",
end="2018-01-01T00:00:00Z",
)
# form the full request object
request = pymortar.FetchRequest(
sites=resp.sites,
views=[meters],
dataFrames=[meter_data],
time=time_params
)
# download the data
print("Starting to download data...")
data = client.fetch(request)
# compute daily min/max/mean for each site
ranges = []
for site in resp.sites:
meter_uuids = data.query("select meter_uuid from meters where site='{0}'".format(site))
meter_uuids = [row[0] for row in meter_uuids]
meterdf = data['meters'][meter_uuids].sum(axis=1)
ranges.append( [site, meterdf.min(), meterdf.max(), meterdf.mean()])
site_summary = pd.DataFrame.from_records(ranges)
site_summary.columns = ['site','min_daily','max_daily','mean_daily']
| [
"gtfierro225@gmail.com"
] | gtfierro225@gmail.com |
e7e8676236a60acdb5e6bee1d75bc7710446e73e | bcd711985fe4381f1599b797e6048a27e357f8d1 | /master/action/task_action.py | 938c94dd4ff23cef9563800f6f3c3c0681fe8dd6 | [] | no_license | No-bb-just-do-it/distributed-spider | 5c02847604350f404ca0a1eeea64c0d6e6c7aad8 | e8bf92a742968eb3c7acaede138132cd6ebe18f4 | refs/heads/master | 2020-03-20T22:56:02.742602 | 2018-05-30T03:33:08 | 2018-05-30T03:33:08 | 137,821,517 | 0 | 1 | null | 2018-06-19T00:40:40 | 2018-06-19T00:40:39 | null | UTF-8 | Python | false | false | 1,079 | py | # -*- coding: utf-8 -*-
'''
Created on 2017-12-08 13:52
---------
@summary:
---------
@author: Boris
'''
import sys
sys.path.append('..')
from utils.log import log
import utils.tools as tools
import web
import json
from service.task_service import TaskService
class TaskAction():
def __init__(self):
self.task_service = TaskService()
def deal_request(self, name):
web.header('Content-Type','text/html;charset=UTF-8')
data = json.loads(json.dumps(web.input()))
print(name)
print(data)
if name == 'get_task':
tasks = self.task_service.get_task()
return tools.dumps_json(tasks)
elif name == 'update_task':
tasks = eval(data.get('tasks', []))
status = data.get('status')
self.task_service.update_task_status(tasks, status)
return tools.dumps_json('{"status":1}')
def GET(self, name):
return self.deal_request(name)
def POST(self, name):
return self.deal_request(name)
| [
"boris_liu@foxmail.com"
] | boris_liu@foxmail.com |
4fd550bd7e17bdd66d350b97ce999f08dd31e922 | 7408dd6c91e601133ca6971d84639ce1b4f18622 | /Wikipedia/config.py | 2988e0481aa933b0e9d36b292f7e0b24f9546020 | [] | no_license | theY4Kman/Yakbot-plugins | 72370cff674335e45f18b27418bc5f0cb87f62ca | faac0bd4fb2599c8adf5aab583ce986aafa037c7 | refs/heads/master | 2021-01-01T19:21:03.117097 | 2012-09-17T03:27:57 | 2012-09-17T03:27:57 | 1,045,656 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,600 | py | #!/usr/bin/env python
# =============================================================================
# Wikipedia
# Copyright (C) 2009 Zach "theY4Kman" Kanzler
# =============================================================================
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License, version 3.0, as published by the
# Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Wikipedia', True)
Wikipedia = conf.registerPlugin('Wikipedia')
# This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(Wikipedia, 'someConfigVariableName',
# registry.Boolean(False, """Help for someConfigVariableName."""))
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| [
"they4kman@gmail.com"
] | they4kman@gmail.com |
671b01f557ba3809a0c9a20394d172e2fced8c3a | 42e4cc242a2be105dae0288d02a08fbd95bb476a | /deepspeed/ops/sparse_attention/sparse_self_attention.py | 6e7d8905e0a806b09216e3c82414f2737c24e7bc | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | afiaka87/DeepSpeed | a49ca48a410190b631a78c392c25359ed4478577 | 83ff549aa365d4fee744074ac0a64f27571ecbc8 | refs/heads/main | 2023-04-14T16:22:37.595209 | 2021-04-12T09:20:06 | 2021-04-12T09:20:06 | 356,466,212 | 2 | 0 | MIT | 2021-04-12T09:20:07 | 2021-04-10T04:09:31 | Python | UTF-8 | Python | false | false | 6,794 | py | """
Copyright 2020 The Microsoft DeepSpeed Team
"""
import torch.nn as nn
from torch.nn.functional import *
import torch
from torch import distributed as dist
from collections import namedtuple
from deepspeed.ops.sparse_attention import MatMul, Softmax, SparsityConfig
import sys
class SparseSelfAttention(nn.Module):
"""Implements an efficient Sparse Self Attention of Transformer layer based on `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
For more information please see, TODO DeepSpeed Sparse Transformer.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial.
"""
def __init__(
self,
# SparsityConfig parameters needs to be set accordingly
sparsity_config=SparsityConfig(num_heads=4),
key_padding_mask_mode='add',
attn_mask_mode='mul',
max_seq_length=2048):
"""Initialize the sparse self attention layer.
Arguments:
sparsity_config: optional: this parameter determins sparsity pattern configuration; it is based on SparsityConfig class.
key_padding_mask_mode: optional: a string determining if key padding mask needs to be added, `add`, or be multiplied, `mul`.
attn_mask_mode: optional: a string determining if attention mask needs to be added, `add`, or be multiplied, `mul`.
max_seq_length: optional: the maximum sequence length this sparse attention module will be applied to; it controls the size of the master_layout.
"""
super().__init__()
# sparsity information
self.sparsity_config = sparsity_config
# initialize sparse layout and register as buffer
master_layout = self.sparsity_config.make_layout(max_seq_length)
self.register_buffer("master_layout", master_layout)
self._need_layout_synchronization = True
# mask modes
self.key_padding_mask_mode = key_padding_mask_mode
self.attn_mask_mode = attn_mask_mode
ops = dict()
def get_layout(self, L):
# if layout is never synchronized across GPUs, broadcast the layout from global rank 0
if self._need_layout_synchronization and dist.is_initialized():
dist.broadcast(self.master_layout, src=0)
self._need_layout_synchronization = False
if (L % self.sparsity_config.block != 0):
raise ValueError(
f'Sequence Length, {L}, needs to be dividable by Block size {self.sparsity_config.block}!'
)
num_blocks = L // self.sparsity_config.block
return self.master_layout[..., :num_blocks, :num_blocks].cpu() # layout needs to be a CPU tensor
# add to cache
def get_ops(self, H, L):
import sys
if L not in SparseSelfAttention.ops:
sparsity_layout = self.get_layout(L)
sparse_dot_sdd_nt = MatMul(sparsity_layout,
self.sparsity_config.block,
'sdd',
trans_a=False,
trans_b=True)
sparse_dot_dsd_nn = MatMul(sparsity_layout,
self.sparsity_config.block,
'dsd',
trans_a=False,
trans_b=False)
sparse_softmax = Softmax(sparsity_layout, self.sparsity_config.block)
SparseSelfAttention.ops[L] = (sparse_dot_sdd_nt,
sparse_dot_dsd_nn,
sparse_softmax)
return SparseSelfAttention.ops[L]
def transpose_key_for_scores(self, x, L):
bsz, num_heads, seq_len, head_dim = x.size()
if seq_len != L:
return x.permute(0, 1, 3, 2)
return x
def transpose_mask_for_sparse(self, qtype, x, is_key_padding_mask=False):
x = x.type(qtype)
if is_key_padding_mask:
xdim = x.dim()
for d in range(xdim - 1, 0, -1):
x = x.squeeze(dim=d)
return x
return x.squeeze()
# forward pass
def forward(self,
query,
key,
value,
rpe=None,
key_padding_mask=None,
attn_mask=None):
"""Applies forward phase of sparse self attention
Arguments:
query: required: query tensor
key: required: key tensor
value: required: value tensor
rpe: optional: a tensor same dimension as x that is used as relative position embedding
key_padding_mask: optional: a mask tensor of size (BatchSize X SequenceLength)
attn_mask: optional: a mask tensor of size (SequenceLength X SequenceLength); currently only 2D is supported
key_padding_mask_mode: optional: a boolean determining if key_padding_mask needs to be added or multiplied
attn_mask_mode: optional: a boolean determining if attn_mask needs to be added or multiplied
Return:
attn_output: a dense tensor containing attnetion context
"""
bsz, num_heads, tgt_len, head_dim = query.size()
# transpose back key if it is already transposed
key = self.transpose_key_for_scores(key, tgt_len)
# check that operation is supported
if query.shape != key.shape or key.shape != value.shape:
raise NotImplementedError('only self-attention is supported for now')
# squeeze key_padding_mask if it is given
if key_padding_mask is not None:
key_padding_mask = self.transpose_mask_for_sparse(query.dtype,
key_padding_mask,
is_key_padding_mask=True)
# squeeze attn_mask if it is given
if attn_mask is not None:
attn_mask = self.transpose_mask_for_sparse(query.dtype, attn_mask)
# cache look-up table computations etc
sparse_dot_sdd_nt, sparse_dot_dsd_nn, sparse_softmax = self.get_ops(num_heads, tgt_len)
scaling = float(head_dim)**-0.5
# attention scores
attn_output_weights = sparse_dot_sdd_nt(query, key)
attn_output_weights = sparse_softmax(
attn_output_weights,
scale=scaling,
rpe=rpe,
key_padding_mask=key_padding_mask,
attn_mask=attn_mask,
key_padding_mask_mode=self.key_padding_mask_mode,
attn_mask_mode=self.attn_mask_mode)
# outputs
attn_output = sparse_dot_dsd_nn(attn_output_weights, value)
return attn_output
| [
"noreply@github.com"
] | afiaka87.noreply@github.com |
9372e89548779bcfa0783d3c99173d8509b38650 | 9b9f7546c9d4396bae7d9065b81b8c6c163b9a1d | /lectures/physics/old/NumericalIntegration001.py | 37607cf7354c619886000ad237a8df55ca0777eb | [] | no_license | geo7/csci321 | 60db9454fab00fc63624a4fc32c4dd47f02fda41 | 527744c8d76c5c4aceb07e23a1ec3127be305641 | refs/heads/master | 2020-12-28T14:50:17.267837 | 2015-06-03T19:18:53 | 2015-06-03T19:18:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,583 | py | import numpy as N
import pygame, time
from pygame.locals import *
from pygame.color import *
import numpy as N
from particlesystem import Particle, ParticleSystem
pygame.init()
screen = pygame.display.set_mode((640,480))
background = pygame.Surface(screen.get_size())
background.fill((128,128,255))
def drag(k):
def func(psystem):
for p in psystem.p:
p.f += -k*p.v
return func
def spring(k, center):
def func(psystem):
for p in psystem.p:
p.f += -k*(p.x - center)
return func
def main():
plotTime = 1
myforces = [spring(0.1, N.array((320.0, 240.0, 0.0))),
drag(0.05)]
mypositions = [N.random.random(3)*200.0 for i in range(10)]
myparticles = [Particle(1.0, x, x-x) for x in mypositions]
mysystem = ParticleSystem(myparticles)
clock = pygame.time.Clock()
running = 1
deltaT = 0.1
screen.blit(background, (0,0))
while running:
clock.tick(60)
for event in pygame.event.get():
if event.type == QUIT:
running = 0
elif event.type == KEYDOWN and event.key == K_ESCAPE:
running = 0
mysystem.EulerStep(myforces, deltaT)
if plotTime:
mysystem.Draw(screen, time=True)
else:
screen.blit(background, (0,0))
mysystem.Draw(screen)
pygame.display.flip()
if __name__ == "__main__":
try:
main()
finally:
pygame.quit()
| [
"geoffrey.matthews@wwu.edu"
] | geoffrey.matthews@wwu.edu |
c1ec37951f61167493d80dc54208e1b802e5e123 | 45c685884bdb42fb4bf1c2b9e51a9dd732ecc9bb | /component/my_k8s.py | 9bc2db91ea198c836b07a774b0fa7ffc859e656b | [] | no_license | tristan-tsl/devops-demo | dffdb8dac2bf2be7e02bb44e889a16dbdeba5a6b | 369fc1b8458741d7642e280da9a3e283010535b0 | refs/heads/master | 2022-12-26T09:43:07.984118 | 2020-10-02T14:24:15 | 2020-10-02T14:24:15 | 217,187,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,305 | py | # def update_image(url, namespace, service_name, image_id, base_path, username, password):
# return "更新镜像成功"
"""
参考文档: https://k8smeetup.github.io/docs/tasks/administer-cluster/access-cluster-api/
# 得到 apiserver的信息(地址和token)
kubectl config view
"""
from kubernetes import client
# aToken = "QmFzaWMgTlVaRE5qY3hRVUkzTWtJeE16bEdNelZHTkVJNlpqUlRXbTFXY2paclpWTjZPVGxvUWxCMVRHcEtiVlpFTVV4cFVrMHlUVkJTYlRsTmRVWTBUUT09"
# aConfiguration = client.Configuration()
# aConfiguration.host = "https://192.168.71.223:8765/r/projects/1a5/kubernetes:6443"
# aConfiguration.verify_ssl = False
# aConfiguration.api_key = {"authorization": "Bearer " + aToken}
# aApiClient = client.ApiClient(aConfiguration)
#
# # 更新pod的镜像id
# deployment_name = "servicemail"
# namespace = "default"
# image_id = ""
#
# apps_v1beta1 = client.AppsV1beta1Api(aApiClient)
# deployment_data = apps_v1beta1.read_namespaced_deployment(namespace=namespace, name=deployment_name)
# print(deployment_data)
# deployment_data.spec.template.spec.containers[
# 0].image = image_id
# api_response = apps_v1beta1.patch_namespaced_deployment(
# name=deployment_name,
# namespace=namespace,
# body=deployment_data)
# print(api_response)
class MyK8s(object):
def __init__(self, host, token):
a_configuration = client.Configuration()
a_configuration.host = host
a_configuration.verify_ssl = False
a_configuration.api_key = {"authorization": "Bearer " + token}
a_api_client = client.ApiClient(a_configuration)
apps_v1beta1 = client.AppsV1beta1Api(a_api_client)
self.apps_v1beta1 = apps_v1beta1
def update_image(self, namespace, name, image_id):
deployment_data = self.apps_v1beta1.read_namespaced_deployment(namespace=namespace, name=name)
deployment_data.spec.template.spec.containers[0].image = image_id
self.apps_v1beta1.patch_namespaced_deployment(
name=name,
namespace=namespace,
body=deployment_data)
return "更新镜像成功"
def get_cur_image_id(self, namespace, name):
deployment_data = self.apps_v1beta1.read_namespaced_deployment(namespace=namespace, name=name)
return deployment_data.spec.template.spec.containers[0].image
| [
"tanshilinmail@gmail.com"
] | tanshilinmail@gmail.com |
1f225e11537f86d1dd4e294e32c67452d7e14b3b | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res_bw/scripts/common/lib/encodings/johab.py | 26ff76bdd673424c1926f41d0305e01c233650fb | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,147 | py | # 2015.11.10 21:35:52 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/encodings/johab.py
import _codecs_kr, codecs
import _multibytecodec as mbc
codec = _codecs_kr.getcodec('johab')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(name='johab', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\encodings\johab.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:35:52 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
d1b20f9a7480772ab77e15a32114eb4f078ac4c3 | 42b324291b51b14e284a8c5e14270a4e9446737a | /test50.py | 044d8d1e1b056372dbbabfbd11ebdd9fe49e77e1 | [] | no_license | christinecoco/python_test | 3f7505c85711eb6bff27cbc68bfd3fd9829a843d | 6d6c519e237f1d9e7243e3e6378a0ca44af98439 | refs/heads/master | 2020-05-23T22:26:58.341688 | 2019-05-16T07:23:28 | 2019-05-16T07:23:28 | 186,973,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | #输出一个随机数
import random
print (random.random())#输出0~1之间的随机数
print(random.uniform(10,100))#输出随机数
print(random.randint(12,88))#输出随机整数 | [
"tester@test.com"
] | tester@test.com |
eddf7b80ce3a1109d590bdcb0be8dfc50d353886 | f51c6d0cebb27c377ce9830deec4b727b9b2ee90 | /Databases/Cars/search_cars.py | 082a2af8cdcf0968fe3a65a7f31f1c02452fc7d8 | [] | no_license | dbbudd/Python-Experiments | 1c3c1322583aaaf2016a2f2f3061e6d034c5d1c8 | b6d294bf11a5c92b8578d16aa2f63cc27fc47b07 | refs/heads/master | 2020-04-17T02:21:36.693593 | 2019-01-17T00:18:34 | 2019-01-17T00:18:34 | 166,130,283 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,210 | py | #!/usr/bin/python
print "Content-type: text/html"
print
#import the libraries
import cgi
import cgitb; cgitb.enable()
import sqlite3
import sys
def generate_search_form():
#create a database connection
db = sqlite3.connect("cars.db")
db.row_factory = sqlite3.Row
def select_table_names():
my_data = db.execute("SELECT name FROM sqlite_master WHERE type='table'")
print("<p>Which table do you wish to search in? </p>")
print("<SELECT name='query_table'>")
rows = my_data.fetchall()
for row in rows:
my_value = "value='" + str(row[0]) + "'"
print("<option " + my_value + ">")
print(row[0])
print("</option>")
print("</SELECT>")
def select_column_names():
print("<p>Which column do you wish to search in? </p>")
print("<SELECT name='query_column'>")
cursor = db.execute("SELECT name FROM sqlite_master WHERE type='table'")
for tablerow in cursor.fetchall():
table = tablerow[0]
cursor.execute("SELECT * FROM {t} LIMIT 1".format(t = table))
for row in cursor:
for field in row.keys():
my_value = "value='" + str(field) + "'"
print("<option " + my_value + ">")
print(table)
print(": ")
print(str(field))
print("</option>")
print("</SELECT>")
#CREATING THE FORM STRUCTURE
print("""
<h1>QUERY SEARCH</h1>
<p>To search for a wildcard search use '%'. For example 'F%' will return everything that starts with an 'F' and '%' on its own will return everything.</p>
<form id='add_car' action='search_car_data.py' method='POST'>
<p>Search: <input name='query_search' type='text'/></p>
""")
select_table_names()
select_column_names()
print("""
<p><input type='submit' value='search' /></p>
</form>
""")
print("""
<html>
</head>
<title>THE CARS DATABASE</title>
</head>
<body>
""")
generate_search_form()
print("""
</body>
</html>
""")
| [
"dbbudd@gmail.com"
] | dbbudd@gmail.com |
3d5c2d77fae942b3e0fd2c38fd0924121f3af901 | fcde32709c62b8ee86da459bb7c8eee52c848118 | /code/shopping重构/shopping__oo.py | db5da2c77b491014df24d346b47ad7b0669ca33e | [] | no_license | klaus2015/py_base | 6b92d362c3d7dc0e09205a037f4d580381dac94d | ec32c731c1c2f6a0dab87f1d167397e4fa86b8de | refs/heads/master | 2022-07-28T15:49:30.383648 | 2020-05-11T15:31:43 | 2020-05-11T15:31:43 | 261,777,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,543 | py | """
面向对象购物车
"""
class CommodityModel:
"""
商品模型
"""
def __init__(self, id=0, name="", price=0):
self.id = id
self.name = name
self.price = price
class OrderModel:
"""
订单模型
"""
def __init__(self, commodity=None, count=0, id=0):
self.id = id
self.commodity = commodity
self.count = count
class ShoppingCartController:
"""
购物车逻辑控制器
"""
init_order_id = 0
def __init__(self):
self.__list_order = []
self.__list_commodity_info = self.__load_commodity()
@property
def list_order(self):
return self.__list_order
@property
def list_commodity_info(self):
return self.__list_commodity_info
def __load_commodity(self):
"""
加载商品信息
:return: 商品列表
"""
return [
CommodityModel(101, "屠龙刀", 10000),
CommodityModel(102, "倚天剑", 10000),
CommodityModel(103, "九阴白骨爪", 8000),
CommodityModel(104, "九阳神功", 9000),
CommodityModel(105, "降龙十八掌", 8000),
]
def add_order(self, order_base_info):
"""
添加订单
:param order:订单基础信息
"""
order_base_info.id = self.__generate_order_id()
self.__list_order.append(order_base_info)
def __generate_order_id(self):
"""
生成订单编号
:return: 订单编号
"""
ShoppingCartController.init_order_id += 1
return ShoppingCartController.init_order_id
def get_total_price(self):
"""
根据订单计算总价格
:return:总价格
"""
total_price = 0
for item in self.__list_order:
total_price += item.commodity.price * item.count
return total_price
def get_commo
class ShoppingConsoleView:
"""
购物车控制台界面视图
"""
def __init__(self):
self.__controller = ShoppingCartController()
def __select_menu(self):
"""
菜单选择
"""
while True:
item = input("1键购买,2键结算。")
if item == "1":
self.__buying()
elif item == "2":
self.__settlement()
def __buying(self):
"""
购买
"""
self.__print_commodity()
self.__create_order()
print("添加到购物车。")
def __print_commodity(self):
"""
打印商品信息
"""
for commodity in self.__controller.list_commodity_info:
print("编号:%d,名称:%s,单价:%d。" % (commodity.id, commodity.name, commodity.price))
def __create_order(self):
"""
创建订单
"""
while True:
cid = int(input("请输入商品编号:"))
# 如果该商品存在,则退出循环,否则重新输入。
commodity = self.__controller.get_commodity_by_id(cid)
if commodity:
break
else:
print("该商品不存在")
count = int(input("请输入购买数量:"))
order = OrderModel(commodity, count)
self.__controller.add_order(order)
def __settlement(self):
"""
结算
"""
self.__print_order()
total_price = self.__controller.get_total_price()
self.__pay(total_price)
def __print_order(self):
"""
打印订单
"""
for order in self.__controller.list_order:
commodity = order.commodity
print("商品:%s,单价:%d,数量:%d." % (commodity.name, commodity.price, order.count))
def __pay(self, total_price):
"""
支付
:param total_price: 需要支付的价格
:return:
"""
while True:
money = float(input("总价%d元,请输入金额:" % total_price))
if money >= total_price:
print("购买成功,找回:%d元。" % (money - total_price))
self.__controller.list_order.clear()
break
else:
print("金额不足.")
def main(self):
"""
界面入口
"""
while True:
self.__select_menu()
view = ShoppingConsoleView()
view.main()
| [
"598467866@qq.com"
] | 598467866@qq.com |
95cfde73c373262593894bf88e48c410cdd54843 | 1c2b73f125f4eaa91368f7e334df5cd863288d49 | /backend/team/views.py | 42474eedf73f94651b44532aa139dd648d03b6f3 | [
"MIT",
"Python-2.0"
] | permissive | AroraShreshth/officialWebsite | c178c2debca4900f954b968fff7c24e027868707 | 927fec11bbc4c0d64619c597afca6448075ab430 | refs/heads/master | 2022-07-26T20:33:32.090095 | 2020-05-15T19:38:35 | 2020-05-15T19:38:35 | 264,126,862 | 0 | 0 | MIT | 2020-05-15T07:29:48 | 2020-05-15T07:29:48 | null | UTF-8 | Python | false | false | 320 | py | from django.shortcuts import render
from . import models
from . import serializers
from rest_framework import viewsets, status, mixins, generics
class TeamViewset(viewsets.ModelViewSet):
"""Manage teams in the database"""
serializer_class = serializers.TeamSerializer
queryset = models.Team.objects.all()
| [
"jsparmani@gmail.com"
] | jsparmani@gmail.com |
1062c0d5c71bc4dbaa811f3566052cabac0d03ee | ac227cc22d5f5364e5d029a2cef83816a6954590 | /applications/physbam/physbam-lib/Scripts/Archives/pd/send/SEND.py | b4c864ff67659bce8ee85087f8d9373e717a587a | [
"BSD-3-Clause"
] | permissive | schinmayee/nimbus | 597185bc8bac91a2480466cebc8b337f5d96bd2e | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | refs/heads/master | 2020-03-11T11:42:39.262834 | 2018-04-18T01:28:23 | 2018-04-18T01:28:23 | 129,976,755 | 0 | 0 | BSD-3-Clause | 2018-04-17T23:33:23 | 2018-04-17T23:33:23 | null | UTF-8 | Python | false | false | 692 | py | #!/usr/bin/python
from pd.common import SOCKET
from pd.common import CONNECT
import sys
import time
import os
import socket
# get arguments
try:
executable,usernames=sys.argv[0],sys.argv[1:]
if len(usernames)<1: raise Exception
except:
print "Usage: %s username"%sys.argv[0]
sys.exit(0)
# read message
if sys.stdin.isatty():
print "Type message to send. (^d to send, ^c to cancel)"
message=sys.stdin.read()
else:
message=sys.stdin.read()
# try to send it
client=0
try:
client=CONNECT.send_client()
client.Send_Text(usernames,message)
except SOCKET.COMMAND_EXCEPTION,e:
print "ERROR: %s"%e
client.close()
sys.exit(1)
else:
client.close()
| [
"quhang@stanford.edu"
] | quhang@stanford.edu |
be48d2873844038863df5350d16c2c4d7b9909bd | d7320f2f599d1d81e14aec5f62e9d48ee4fddfa2 | /backend/home/migrations/0006_auto_20201223_0721.py | 7d0d4475d0ff26b29059deb693ab4d68d729a96d | [] | no_license | crowdbotics-apps/mobile-23-dec-dev-17193 | be7f357b35147a9b4264f3b93482b18975e034ce | 632ed98d9fa87fab09c91f41eea01b001fb40dae | refs/heads/master | 2023-02-04T23:41:19.953146 | 2020-12-23T13:37:07 | 2020-12-23T13:37:07 | 323,806,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | # Generated by Django 2.2.17 on 2020-12-23 07:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("home", "0005_auto_20201223_0657"),
]
operations = [
migrations.AddField(
model_name="customtext",
name="hgfhfghfgh",
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name="customtext",
name="hjgjh",
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name="customtext",
name="jhgjhgjhg",
field=models.SmallIntegerField(blank=True, null=True),
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
d91396c9543f0733ec481f70104b9fda611e763a | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2087/60782/310880.py | d2f4d6c3f49086e711629101477ef83af41c4b20 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | s = input() + input()
if s == '1223':
print(1,end="")
exit()
if s == '1233':
print(1,end="")
exit()
if s == '102':
print(10,end="")
exit()
if s == '4171':
print(22,end="")
exit()
if s == '10999999999999999999':
print(5,end="")
exit()
if s == '100121':
print(100,end="")
exit()
print("if s == '%s':\n print()\n exit()" % s) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
ed82dc19f66ed20453c997fd6f8758995b776669 | 1427e4719b6ce1b805a553143ed477f2c4b82246 | /Scripts/collect_books.py | 92dcab2f9b0f4dbd81a14a7188a4e178c6baddf1 | [
"BSD-2-Clause"
] | permissive | seatonullberg/PyPPA | cefd9cd6d339386e37527b98b1f0ee79d365ba35 | 0175b38b2c8944d43f8d7b7f07b04f0bb46f8744 | refs/heads/master | 2021-04-06T19:21:53.616408 | 2018-10-18T06:04:01 | 2018-10-18T06:04:01 | 125,278,084 | 0 | 0 | BSD-2-Clause | 2018-10-17T05:56:28 | 2018-03-14T21:52:43 | Python | UTF-8 | Python | false | false | 3,270 | py | # Collect books from the Gutenberg Project as raw text for model training
import requests
from bs4 import BeautifulSoup
import re
import os
import argparse
from tqdm import tqdm
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default=None, help='Path to where book text will be stored')
parser.add_argument('--max_id', default=60000, type=int, help='Book id to iterate to')
parser.add_argument('--start_id', default=0, type=int, help='Book id to start iteration on')
args = parser.parse_args()
collect(args)
def collect(args):
pbar = tqdm(total=(args.max_id-args.start_id), unit=' Book IDs')
count = args.start_id
while count < args.max_id:
r = requests.get('https://www.gutenberg.org/files/{c}/{c}-0.txt'.format(c=count))
if r.status_code != 200:
count += 1
pbar.update(1)
else:
soup = BeautifulSoup(r.content, 'html5lib')
text = soup.text
paragraphs = []
lines = []
for line in text.split('\n'):
if len(line) <= 1:
paragraphs.append(lines)
lines = []
else:
lines.append(line)
# cut out the intro and license
paragraphs = paragraphs[50:-100]
# replace the new line splits with a space so each entry is one big line
paragraphs = [' '.join(p) for p in paragraphs]
for paragraph in paragraphs:
# make sure all new lines are gone
paragraph = paragraph.replace('\n', '')
# remove content between parentheses
paragraph = re.sub(r'\([^()]*\)', '', paragraph)
# remove non ascii
paragraph = re.sub(r'[^\x00-\x7f]', '', paragraph)
# split on punctuation
line_list = re.split('(?<=[.!?]) +', paragraph)
clean_line_list = []
for line in line_list:
# keep lines that start with uppercase letter
try:
if not line[0].isupper():
line = ''
except IndexError:
line = ''
# now make all lowercase
line = line.lower()
# throwout any chapter headings
if line.startswith('chapter'):
line = ''
# ensure single space
line = ' '.join([l for l in line.split() if l != ' '])
# remove any other distraction chars
line = ''.join([l for l in line if l.isalpha() or l == ' '])
if line != '':
clean_line_list.append(line)
# write to file followed by newline to indicate paragraph separation
with open(os.path.join(args.data_dir, 'book_paragraphs.txt'), 'a') as f:
for clean_line in clean_line_list:
f.write(clean_line+'\n')
f.write('\n')
count += 1
pbar.update(1)
pbar.close()
if __name__ == "__main__":
main()
| [
"seatonullberg@gmail.com"
] | seatonullberg@gmail.com |
70f9a4c9349c7ed93199855e89304f20ea0f2f27 | 913f47c4d9550ff4b766011c4644c8ec534d155e | /24_classes/dz/task_4.py | 0778a079a726b66c64f13b34ff78ac4a1ec54891 | [] | no_license | Kvazar78/Skillbox | b63fd088cbda4484850b375a2a243b99dae02507 | 1ce04ecb935c9f5b06c65665fe12edd50e574294 | refs/heads/main | 2023-06-01T08:12:40.801301 | 2021-06-30T13:54:42 | 2021-06-30T13:54:42 | 312,324,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,187 | py | class Parents:
def __init__(self, name_p, age_p, kids_list=None):
self.name_p = name_p
self.age_p = age_p
self.kids = kids_list if kids_list else []
def about_me1(self):
print(f'Меня зовут {self.name_p}. Мне {self.age_p} лет и у меня', end=' ')
if len(self.kids) == 0:
print('нет детей... А теперь уже и поздно..')
else:
print('есть дети:')
for i_kids in self.kids:
print(f'\t{i_kids.name_c}, ему {i_kids.age_c}, он сейчас {i_kids.calmness_c} и он {i_kids.hungry_c}')
def give_it_hot_and_strong(self, child):
child.calmness_c = Children.calmness_dict[1]
return f'\tТеперь {child.name_c} {child.calmness_c}!'
def feed(self, child):
child.hungry_c = Children.hungry_dict[1]
return f'\tТеперь {child.name_c} хотя бы {child.hungry_c}! Может его отпустит...'
class Children:
hungry_dict = {0: 'голодный', 1: 'сытый'}
calmness_dict = {0: 'неадекватный', 1: 'адекватный'}
def __init__(self, parent, name_c, age_c, calmness=0, hungry=0):
if parent.age_p >= age_c + 16:
self.name_c = name_c
self.age_c = age_c
self.calmness_c = self.calmness_dict[calmness]
self.hungry_c = self.hungry_dict[hungry]
parent.kids += [self]
else:
self.age_c = age_c
print('Внимание! Возраст ребенка не по условию!')
mother = Parents('Ира', 40)
kid = Children(mother, 'Вася', 15)
mother.about_me1()
if mother.age_p >= kid.age_c + 16:
lay_into = input('Может ему втащить чтобы стал адекватным? да/нет ')
if lay_into == 'да':
print(mother.give_it_hot_and_strong(kid))
else:
feed = input('Может хотя бы покормить? да/нет ')
if feed == 'да':
print(mother.feed(kid))
else:
print('Придется его посадить на цепь...')
| [
"kvazar78@mail.ru"
] | kvazar78@mail.ru |
48571f5d18cfafc14aba1237bbefdae506a4cbb1 | 8af379e5315da9d6389b297e12569f999ec14518 | /05_Statistics/01_statistics.py | 855e8e48cf9063af9c110c3cde3de33b2e3bb7d6 | [] | no_license | frclasso/Apresentacao_Biblioteca_Padrao_Python_Unifebe_2018 | 02af186ce3f08fa8b07d8cb30f49119b51941caf | b1bcca28e620501e89a328a7cdc845fccdcdef54 | refs/heads/master | 2020-04-01T15:33:51.482478 | 2019-01-24T16:57:06 | 2019-01-24T16:57:06 | 153,342,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | #!/usr/bin/env python3
import statistics
agesData = [10,13,14,12,11,10,11,10,15]
print(f"Media: {statistics.mean(agesData)}") # Media/ Average
print(f"Mediana: {statistics.median(agesData)}") # Mediana / Median point
print(f"Moda: {statistics.mode(agesData)}") # Item mais frequemente apresentado
| [
"frcalsso@yahoo.com.br"
] | frcalsso@yahoo.com.br |
164cebb007bafcb17fbff858b98ed9ffddb77e37 | 81eceea57d570fa1f9f6468875b1b06b8de9f0f0 | /.history/block_20200624173107.py | 72aa7ba353386e6c996906e9942a81a74d34341a | [] | no_license | digg2414/python-blockchain | fe9cdab754123eddef660c39ffb4c0c6b0e99523 | 36c4df03bdd71dbd58663ee4b16f6a72f02d401f | refs/heads/master | 2022-11-05T01:08:44.229492 | 2020-06-24T23:11:41 | 2020-06-24T23:11:41 | 274,786,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,019 | py | import time
def mine_block(last_block, data):
"""
Mine a block based on the last_block and the data.
"""
timestamp = time.time_ns()
last_hash = last_block.hash
hash = f'{timestamp} - {last_hash}'
return Block(timestamp, last_hash, hash, data)
def genesis():
"""
"""
class Block():
"""
Block: a unit of storage.
Store transactions in a blockchain that supports a cryptocurrency.
"""
def __init__(self, timestamp, last_hash, hash, data):
self.data = data
self.timestamp = timestamp
self.last_hash = last_hash
self.hash = hash
def __repr__(self):
return (
'Block: ('
f'timestamp: {self.timestamp}, '
f'last_hash: {self.last_hash}, '
f'hash: {self.hash}, '
f'data: {self.data}'
)
def main():
block = Block('foo')
print(block)
print(f'block.py __name__: {__name__}')
if __name__ == '__main__':
main()
| [
"JHarold1241@outlook.com"
] | JHarold1241@outlook.com |
a44044d78854bf1937bbcbff50218a05af62ae22 | a720b0b5dafd164e388004c63a9417d242af6d11 | /beemgraphenebase/objects.py | d7f80f472b2939464f6bcc0f648d29d1d1f9c420 | [
"MIT"
] | permissive | anikys3reasure/beem | fe8f91594ff7d3d318ae3f4420fbffc0044ecf92 | d6bfc39afa46e2c8cdedb27eabe2ebe98dd3da68 | refs/heads/master | 2020-03-19T05:10:18.884986 | 2018-06-03T06:25:28 | 2018-06-03T06:25:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,888 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
from builtins import object
from future.utils import python_2_unicode_compatible
from collections import OrderedDict
import json
from beemgraphenebase.types import (
Uint8, Int16, Uint16, Uint32, Uint64,
Varint32, Int64, String, Bytes, Void,
Array, PointInTime, Signature, Bool,
Set, Fixed_array, Optional, Static_variant,
Map, Id, JsonObj
)
from .py23 import py23_bytes, bytes_types, integer_types, string_types
from .chains import known_chains
from .objecttypes import object_type
from .operationids import operations
@python_2_unicode_compatible
class Operation(object):
def __init__(self, op):
if isinstance(op, list) and len(op) == 2:
if isinstance(op[0], integer_types):
self.opId = op[0]
name = self.getOperationNameForId(self.opId)
else:
self.opId = self.operations().get(op[0], None)
name = op[0]
if self.opId is None:
raise ValueError("Unknown operation")
self.name = name[0].upper() + name[1:] # klassname
try:
klass = self._getklass(self.name)
except Exception:
raise NotImplementedError("Unimplemented Operation %s" % self.name)
self.op = klass(op[1])
else:
self.op = op
self.name = type(self.op).__name__.lower() # also store name
self.opId = self.operations()[self.name]
def operations(self):
return operations
def getOperationNameForId(self, i):
""" Convert an operation id into the corresponding string
"""
for key in operations:
if int(operations[key]) is int(i):
return key
return "Unknown Operation ID %d" % i
def _getklass(self, name):
module = __import__("graphenebase.operations", fromlist=["operations"])
class_ = getattr(module, name)
return class_
def __bytes__(self):
return py23_bytes(Id(self.opId)) + py23_bytes(self.op)
def __str__(self):
return json.dumps([self.opId, self.op.toJson()])
@python_2_unicode_compatible
class GrapheneObject(object):
""" Core abstraction class
This class is used for any JSON reflected object in Graphene.
* ``instance.__json__()``: encodes data into json format
* ``bytes(instance)``: encodes data into wire format
* ``str(instances)``: dumps json object as string
"""
def __init__(self, data=None):
self.data = data
def __bytes__(self):
if self.data is None:
return py23_bytes()
b = b""
for name, value in list(self.data.items()):
if isinstance(value, string_types):
b += py23_bytes(value, 'utf-8')
else:
b += py23_bytes(value)
return b
def __json__(self):
if self.data is None:
return {}
d = {} # JSON output is *not* ordered
for name, value in list(self.data.items()):
if isinstance(value, Optional) and value.isempty():
continue
if isinstance(value, String):
d.update({name: str(value)})
else:
try:
d.update({name: JsonObj(value)})
except Exception:
d.update({name: value.__str__()})
return d
def __str__(self):
return json.dumps(self.__json__())
def toJson(self):
return self.__json__()
def json(self):
return self.__json__()
def isArgsThisClass(self, args):
return (len(args) == 1 and type(args[0]).__name__ == type(self).__name__)
| [
"holger@nahrstaedt.de"
] | holger@nahrstaedt.de |
82e01ddf306af0de0cd44eb3c9bac1c8d54e5648 | fbffe8c375d0f1bded68d7d37d407332f8eebf98 | /binaray_search.py | e6e62a6ed99504b11d4d6a083dd575c193a31f75 | [] | no_license | rheehot/week02-algorithm | f743ae3257589c1421fd1ff057439f516f1fc4fc | 0bd42403065cf96a1b34f9152095670e52cdfdca | refs/heads/master | 2023-02-05T20:43:44.339200 | 2020-12-24T03:37:10 | 2020-12-24T03:37:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | '''
def binaray_search(array, target, start, end):
if start > end:
return None
mid = (start+end)//2
# target을 찾은 경우 인덱스 반환
if array[mid] == target:
return mid
# 중간점의 값보다 찾고자하는 값이 작은 경우 왼쪽 확인
if array[mid] > target:
return binaray_search(array, target, start, mid-1)
else:
return binaray_search(array, target, mid+1, end)
'''
def binary_search(array, target, start, end):
while start <= end:
mid = (start+end)//2
# 종료조건
if array[mid] == target:
return mid
elif array[mid] < target:
start = mid + 1
else:
end = mid-1
# 만일 target을 못찾은 채로 start가 end보다 커지면
return None
| [
"jeongseo21@gmail.com"
] | jeongseo21@gmail.com |
402aa936f03eebfc6594e20ffd04d00bf655dc5e | 2d67afd40a0425c843aa8643df9f7d5653ad0369 | /python/leetcode/679_24_Game.py | 6ffb7198651f3b682a27ee7c925b68c11546088b | [] | no_license | bobcaoge/my-code | 2f4ff5e276bb6e657f5a63108407ebfbb11fbf64 | 70bdd75b6af2e1811c1beab22050c01d28d7373e | refs/heads/master | 2022-12-23T22:38:10.003058 | 2020-07-02T03:52:43 | 2020-07-02T03:52:43 | 248,733,053 | 0 | 0 | null | 2022-12-10T05:41:57 | 2020-03-20T10:55:55 | Python | UTF-8 | Python | false | false | 835 | py | # /usr/bin/python3.6
# -*- coding:utf-8 -*-
import itertools
class Solution(object):
def judgePoint24(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if len(nums) == 1:
return abs(nums[0] - 24) < 0.0001
for x in itertools.permutations(nums):
x = list(x)
a, b = x[:2]
for y in (a*b, a-b, a+b):
if self.judgePoint24(x[2:] +[y]):
return True
if b != 0:
if self.judgePoint24(x[2:]+[a/b]):
return True
return False
def main():
s = Solution()
print(s.judgePoint24([8,7,1,4]))
print(s.judgePoint24([5,5,8,4]))
print(s.judgePoint24([1,2,1,2]))
print(s.judgePoint24([1,3,4,6]))
if __name__ == "__main__":
main()
| [
"378082326@qq.com"
] | 378082326@qq.com |
2b0320378d647c689a4538c2bfa5efe8740ce529 | 73861a871c77c460ccc6fa7662ef63880e69dd4e | /vision/extract.py | 1677208920e14b1b958350c6936e92f7d28057f8 | [
"MIT"
] | permissive | bbengfort/financial-analysis | 7d2822f44407dd0733e49b36f61886afabebe95e | aa5b2d80af0df04f6171ae18c381380964867b98 | refs/heads/master | 2021-01-10T14:06:44.012138 | 2016-01-20T02:25:52 | 2016-01-20T02:25:52 | 47,262,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,550 | py | # vision.extract
# Extracts financial information from Finances.xlsx and writes them to CSVs.
#
# Author: Benjamin Bengfort <bengfort@cs.umd.edu>
# Created: Wed Dec 02 20:41:22 2015 -0500
#
# Copyright (C) 2015 University of Maryland
# For license information, see LICENSE.txt
#
# ID: extract.py [] benjamin@bengfort.com $
"""
Extracts financial information from Finances.xlsx and writes them to CSVs.
"""
##########################################################################
## Imports
##########################################################################
import os
import csv
from datetime import datetime
from vision.reader import SpreadsheetReader
##########################################################################
## Module Constants
##########################################################################
PROJECT = os.path.dirname(os.path.dirname(__file__))
FINANCES = os.path.join(PROJECT, "fixtures", "Finances.xlsx")
ACCOUNTS = os.path.join(PROJECT, "fixtures", "accounts.csv")
TRANSACT = os.path.join(PROJECT, "fixtures", "transactions.csv")
MONTH = "%b%y"
ACT_FLDS = [
u'Month', u'Account Type', u'Bank', u'Account Name', u'Beginning Balance', u'Ending Balance',
]
TRN_FLDS = [
u'Month', u'Date', u'Amount', u'From Account', u'To Account'
]
##########################################################################
## Extraction
##########################################################################
def extract(finances=FINANCES, accounts=ACCOUNTS, transact=TRANSACT):
"""
Reads the sheets from finances and writes out the accounts and
transactions to the correct locations.
"""
reader = SpreadsheetReader(finances)
with open(accounts, 'w') as af:
with open(transact, 'w') as tf:
act_writer = csv.DictWriter(af, ACT_FLDS)
trn_writer = csv.DictWriter(tf, TRN_FLDS)
act_writer.writeheader()
trn_writer.writeheader()
for month in reader.sheets:
if month.lower() == 'blank': continue
try:
sheet = reader.finances(month)
for a in sheet.accounts:
a['Month'] = sheet.date
act_writer.writerow(a)
for t in sheet.transactions:
t['Month'] = sheet.date
trn_writer.writerow(t)
except Exception as e:
print "{}: {}".format(month, e)
if __name__ == '__main__':
extract()
| [
"benjamin@bengfort.com"
] | benjamin@bengfort.com |
d59af3974388b65b5470435d958d55db47734bc1 | 8b3ca44ee3d990233e74655b7131d616094f70c2 | /experiments/sparsity/sameK_drug_sensitivity_gdsc/gaussian_gaussian_ard.py | 9efc1162ab1aaaf6f9e7b0ba0daf1917eab61758 | [] | no_license | zshwuhan/BMF_Priors | 8b8c54271285a72d2085a56a9475c0756f375e67 | 6a600da1c41f1ccde2f2ba99298b40e68fb9910a | refs/heads/master | 2021-05-13T19:10:07.203215 | 2017-12-01T13:30:21 | 2017-12-01T13:30:21 | 116,883,181 | 1 | 0 | null | 2018-01-09T23:36:13 | 2018-01-09T23:36:13 | null | UTF-8 | Python | false | false | 1,370 | py | '''
Measure sparsity experiment on the GDSC drug sensitivity dataset, with
the All Gaussian model (multivariate posterior) wih ARD.
'''
project_location = "/Users/thomasbrouwer/Documents/Projects/libraries/"
import sys
sys.path.append(project_location)
from BMF_Priors.code.models.bmf_gaussian_gaussian_ard import BMF_Gaussian_Gaussian_ARD
from BMF_Priors.data.drug_sensitivity.load_data import load_gdsc_ic50_integer
from BMF_Priors.experiments.sparsity.sparsity_experiment import sparsity_experiment
import matplotlib.pyplot as plt
''' Run the experiment. '''
R, M = load_gdsc_ic50_integer()
model_class = BMF_Gaussian_Gaussian_ARD
n_repeats = 10
stratify_rows = False
fractions_unknown = [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
settings = {
'R': R,
'M': M,
'K': 5,
'hyperparameters': { 'alpha':1., 'beta':1., 'alpha0':1., 'beta0':1. },
'init': 'random',
'iterations': 250,
'burn_in': 200,
'thinning': 1,
}
fout = './results/performances_gaussian_gaussian_ard.txt'
average_performances, all_performances = sparsity_experiment(
n_repeats=n_repeats, fractions_unknown=fractions_unknown, stratify_rows=stratify_rows,
model_class=model_class, settings=settings, fout=fout)
''' Plot the performance. '''
plt.figure()
plt.title("Sparsity performances")
plt.plot(fractions_unknown, average_performances['MSE'])
plt.ylim(0,1000) | [
"tab43@cam.ac.uk"
] | tab43@cam.ac.uk |
832ddd7fd40b5386692cfa20df6d94a139502e50 | 32cb0be487895629ad1184ea25e0076a43abba0a | /LifePictorial/top/api/rest/TmallEaiOrderRefundGoodReturnCheckRequest.py | ce4941fbc54d218c06016543eb1831e1303432ae | [] | no_license | poorevil/LifePictorial | 6814e447ec93ee6c4d5b0f1737335601899a6a56 | b3cac4aa7bb5166608f4c56e5564b33249f5abef | refs/heads/master | 2021-01-25T08:48:21.918663 | 2014-03-19T08:55:47 | 2014-03-19T08:55:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | '''
Created by auto_sdk on 2014-02-10 16:59:30
'''
from top.api.base import RestApi
class TmallEaiOrderRefundGoodReturnCheckRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.company_code = None
self.confirm_result = None
self.confirm_time = None
self.operator = None
self.refund_id = None
self.refund_phase = None
self.sid = None
def getapiname(self):
return 'tmall.eai.order.refund.good.return.check'
| [
"poorevil@gmail.com"
] | poorevil@gmail.com |
5f21d9bb1a6c40517ba06e2c323a7b65004c4df6 | 53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61 | /.history/EMR/age_sex_20190618145358.py | 3e1c90629ae0e75d79668dc2289b3d2582a601eb | [] | no_license | cyc19950621/python | 4add54894dc81187211aa8d45e5115903b69a182 | d184b83e73334a37d413306d3694e14a19580cb0 | refs/heads/master | 2020-04-11T20:39:34.641303 | 2019-07-02T12:54:49 | 2019-07-02T12:54:49 | 162,078,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | # -*- coding:utf-8 -*-
import time
import math
import os
import sys
import os, os.path,shutil
import codecs
import EMRdef
import re
emrtxts = EMRdef.txttq(u'D:\DeepLearning ER\EHR-all')#txt目录提取
for emrtxt in emrtxts:
f = open(emrtxt,'r',errors="ignore")#中文加入errors
emrtxt = os.path.basename(emrtxt)
emrtxt_str = re.findall(r'(^.+?)\_',emrtxt)#提取ID
emrtxt = "".join(emrtxt_str)#转成str
out = []
for line in f.readlines():
line = re.sub(' ','' ,line)
line = re.sub('\n','' ,line)
line = ''.join(line)
if line=='男' or line=='男性':
out.append('M')
elif line =='女' or line=='女性':
out.append('W')
if line.find('岁')>-1:
line = re.sub('岁','',line)
line = ''.join(line)
out.append(line)
'''
se = int(line)
if se <=20:
a = 'Child'
elif se <=40:
a = 'Younth'
elif se <= 60:
a = 'Mid'
else:
a= 'old'
out.append(a)'''
output = ' '.join(out)
EMRdef.text_create(r'D:\DeepLearning ER\EHRage','.txt' ,emrtxt,output)
| [
"1044801968@qq.com"
] | 1044801968@qq.com |
d35cb142ca658d669d5e931ec54d9e3e60ae2833 | f73fa6ce1b0df4ab5c4b3a37c27e61cf3ab1515c | /authapp/migrations/0002_auto_20210113_1639.py | b07e3d5162238755bc292026d331f526744eedea | [] | no_license | mr-Robot-777/geekshop | 15444f13fc2f97eba88eb0538407cb84635e3d66 | 8f7b2fc17ca7731813e444a3005073aa9ded2799 | refs/heads/master | 2023-02-28T10:01:23.801994 | 2021-02-05T16:14:16 | 2021-02-05T16:14:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | # Generated by Django 2.2.17 on 2021-01-13 11:39
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('authapp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='shopuser',
name='activation_key',
field=models.CharField(blank=True, max_length=128),
),
migrations.AddField(
model_name='shopuser',
name='activation_key_exires',
field=models.DateTimeField(default=datetime.datetime(2021, 1, 15, 11, 39, 56, 491627, tzinfo=utc)),
),
migrations.AlterField(
model_name='shopuser',
name='last_name',
field=models.CharField(blank=True, max_length=150, verbose_name='last name'),
),
]
| [
"ershgun@mail.ru"
] | ershgun@mail.ru |
69e03027619f9036ca92caea3d6929e383bb11cb | 3dcafd835cc14329d3d95fce96e4553009df1c59 | /mystic/svctools.py | d547f96e03ed22447edd81b49279762c543c3bda | [
"BSD-3-Clause"
] | permissive | silky/mystic | e4856721a6fdb7eaae5e4351c02d486c930352a6 | 369bebe23e3460b37cba4a64d00da7461b6fb028 | refs/heads/master | 2020-12-03T03:50:33.154428 | 2013-10-25T03:22:18 | 2013-10-25T03:22:18 | 15,031,357 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | #!/usr/bin/env python
#
# Patrick Hung.
"""
Simple utility functions for SV-classifications
"""
from numpy import zeros, multiply, ndarray, vectorize, array, dot, transpose, diag, sum
def KernelMatrix(X, k=dot):
n = X.shape[0]
Q = zeros((n,n))
for i in range(n):
for j in range(i, n):
Q[i,j] = k(X[i,:],X[j,:])
return Q + transpose(Q) - diag(Q.diagonal())
def WeightVector(alpha, X, y):
ay = (alpha * y).flatten()
aXy = transpose(ay * transpose(X))
return sum(aXy, 0)
def SupportVectors(alpha, y=None, eps = 0):
import mystic.svmtools
sv = svmtools.SupportVectors(alpha,eps)
if y == None:
return sv
else:
class1 = set((y>0).nonzero()[1])
class2 = set((y<0).nonzero()[1])
sv1 = class1.intersection(sv)
sv2 = class2.intersection(sv)
return list(sv1), list(sv2)
def Bias(alpha, X, y, kernel=dot):
"""Compute classification bias. """
sv1, sv2 = SupportVectors(alpha, y,eps=1e-6)
pt1, pt2 = X[sv1[0],:], X[sv2[0],:]
k1, k2 = kernel(X, pt1), kernel(X,pt2)
return -0.5 * (sum(alpha*y*k1) + sum(alpha*y*k2))
# end of file
| [
"mmckerns@968178ea-60bd-409e-af13-df8a517b6005"
] | mmckerns@968178ea-60bd-409e-af13-df8a517b6005 |
006ba610db2b9012c049987a6af4aaf9bdbd2252 | 4f1dd1353b83d30e97abbff2c6b531cd538160e3 | /RoboBase.py | 8912a17c7d4874fd362a70e152be6c8b6c926233 | [
"MIT"
] | permissive | titos-carrasco/RoboBase | c089f84f0eb622877f0ed8620aadd18af5109cdd | ac3fe692482a6bd73ad836d56336c76508731f2d | refs/heads/master | 2021-01-13T01:40:40.773039 | 2014-04-18T22:59:34 | 2014-04-18T22:59:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,744 | py | # -*- coding: utf-8 -*-
"""Bluetooth control for a generic Arduino based robot
"""
import serial
import threading
import time
class RoboBase:
"""Class to control the robot
Usage:
robot = DaguCar("/dev/rfcomm0")
if(robot.IsConnected()):
robot.SetMotors(-255, 255)
time.sleep(1)
robot.SetMotors(0, 0)
robot.Close()
"""
PACKET_LENGTH = 8;
def __init__(self, port, bauds=9600):
"""Create the robot object and open a connection to it.
Args:
port: The serial port to use (string)
bauds: The speed for the serial communication (integer)
Raises:
KeyboardInterrupt
"""
self._lock = threading.Lock()
self._ser = None
for t in range(4):
try:
self._ser = serial.Serial(port, baudrate=bauds, bytesize=8,
parity='N', stopbits=1, timeout=1)
self._Debug('RoboBase.Init: Connected to %s, %d bps' %
(port, bauds))
self._Debug('RoboBase.Init: Ignoring old data')
self._ConsumeOldData()
break
except serial.SerialException:
self._Debug('RoboBase.Init: SerialException')
except ValueError:
self._Debug('RoboBase.Init: ValueError')
except IOError:
self._Debug('RoboBase.Init: IOError')
except KeyboardInterrupt:
self._Debug('RoboBase.Init: KeyboardInterrupt')
raise
def _Lock(self):
"""Get an exclusive access to the robot."""
self._lock.acquire()
if(self._ser!=None and self._ser.isOpen()):
return True
else:
self._lock.release()
return False
def _Unlock(self):
"""Release the exclusive access to the robot."""
try:
self._lock.release()
except:
pass
def _Debug(self, val):
"""Simple console debug."""
print val
def _ConsumeOldData(self):
"""Consume data from latest requests"""
timeout = self._ser.timeout
self._ser.timeout = 1
while(True):
try:
self._ser.read(1000)
finally:
break
self._ser.timeout = timeout
def IsConnected(self):
"""True if connected to the robot."""
try:
if(self._ser.isOpen()):
return True
except:
pass
return False
def Close(self):
"""Close the connection to the robot."""
if(self._Lock()):
self._ser.close()
self._ser = None
self._Unlock()
# Commands for the robot
CMD_SET_MOTORS = 0x01
CMD_PING = 0x02
CMD_BEEP = 0x03
CMD_INFO = 0x04
def _SendCommand(self, packet):
"""Send a command to the robot.
Args:
packet: PACKET_LENGTH byte packets.
The first byte is the command (CMD_XX)
"""
self._ser.write(packet)
self._ser.flush()
r = self._ser.read(self.PACKET_LENGTH) # robot must return the packet
r = bytearray(r)
if(packet !=r ):
self._Debug('Packet Mismatch')
self._Debug(list(packet))
self._Debug(list(r))
def SetMotors(self, motor1, motor2):
"""Applies power to the motors
Args:
motor1, motor2 : power for the motor (-255 - 255)
0 = stop, <0 backward, >0 forward
"""
if(self._Lock()):
try:
motor1, motor2 = int(motor1), int(motor2)
if(motor1<0):
m1_dir = 0x00;
else:
m1_dir = 0x01
if(motor2<0):
m2_dir = 0x00
else:
m2_dir = 0x01
packet = bytearray(self.PACKET_LENGTH)
packet[0] = 0x01
packet[1] = m1_dir
packet[2] = abs(motor1) & 0xFF
packet[3] = m2_dir
packet[4] = abs(motor2) & 0XFF
self._SendCommand(packet)
except serial.SerialTimeoutException:
self._Debug('RoboBase.SetMotors: SerialTimeoutException')
except serial.SerialException:
self._Debug('RoboBase.SetMotors: SerialException')
except:
self._Debug('RoboBase.SetMotors: Unexpected Exception')
self._Unlock()
def Ping(self, max_distance):
"""Gets the distance reported by the ping sensor
Args:
max_distance: max distance for detection (integer)
Returns:
the distance to an obstacle
"""
r = 0
if(self._Lock()):
try:
max_distance = abs(int(max_distance)) & 0xFFFF
packet = bytearray(self.PACKET_LENGTH)
packet[0] = 0x02
packet[1] = (max_distance >> 8)
packet[2] = (max_distance & 0xFF)
self._SendCommand(packet)
r = self._Read2UBytes()/100.0
except serial.SerialTimeoutException:
self._Debug('RoboBase.Ping: SerialTimeoutException')
except serial.SerialException:
self._Debug('RoboBase.Ping: SerialException')
except:
self._Debug('RoboBase.Ping: Unexpected Exception')
self._Unlock()
return r
def Beep(self, freq, duration):
"""Make a sound
Args:
freq: frequency (integer)
duration: duration of the beep (integer) in milliseconds
"""
if(self._Lock()):
try:
freq = abs(int(freq)) & 0xFFFF
duration = abs(int(duration)) & 0XFFFF
packet = bytearray(self.PACKET_LENGTH)
packet[0] = 0x03
packet[1] = (freq >> 8)
packet[2] = (freq & 0xFF)
packet[3] = (duration >> 8)
packet[4] = (duration & 0xFF)
self._SendCommand(packet)
time.sleep(duration/1000.0)
except serial.SerialTimeoutException:
self._Debug('RoboBase.Beep: SerialTimeoutException')
except serial.SerialException:
self._Debug('RoboBase.Beep: SerialException')
except:
self._Debug('RoboBase.Beep: Unexpected Exception')
self._Unlock()
def GetInfo(self):
"""Get robot information
Returns:
Information about the robot
"""
r = ''
if(self._Lock()):
try:
packet = bytearray(self.PACKET_LENGTH)
packet[0] = 0x04
self._SendCommand(packet)
r = self._ReadLine()
except serial.SerialTimeoutException:
self._Debug('RoboBase.GetInfo: SerialTimeoutException')
except serial.SerialException:
self._Debug('RoboBase.GetInfo: SerialException')
except:
self._Debug('RoboBase.GetInfo: Unexpected Exception')
self._Unlock()
return r
###################################################################
def _ReadLine(self):
return self._ser.readline()
def _ReadBytes(self, n):
return self._ser.read(n)
def _Read1UByte(self):
return ord(self._ser.read(1))
def _Read2UBytes(self):
return (ord(self._ser.read(1)) << 8) + ord(self._ser.read(1))
| [
"titos.carrasco@gmail.com"
] | titos.carrasco@gmail.com |
87302d938e2f55c44d36d63480ba7cc1d616a017 | dbd87fe6e9466c4cada18b037667cfdddc62c193 | /data/AV Connectior/alpha_vantage/cryptocurrencies.py | 4af80e1a7b1669d87d1c5c63d89dc537cb296929 | [] | no_license | alexanu/Python_Trading_Snippets | 74515a40dc63ba50d95bd50330ed05d59b5dc837 | 85969e681b9c74e24e60cc524a952f9585ea9ce9 | refs/heads/main | 2023-06-25T03:27:45.813987 | 2023-06-09T16:09:43 | 2023-06-09T16:09:43 | 197,401,560 | 18 | 17 | null | 2023-02-08T22:25:25 | 2019-07-17T14:05:32 | Jupyter Notebook | UTF-8 | Python | false | false | 2,125 | py | from .alphavantage import AlphaVantage as av
class CryptoCurrencies(av):
"""This class implements all the crypto currencies API calls
Prices and volumes are quoted in both the market-specific currency and USD.
All the functions follow the same call structure :
Keyword Arguments:
symbol: The digital/crypto currency of your choice.
It can be any of the currencies in the digital currency list. For example symbol=BTC.
market: The exchange market of your choice.
It can be any of the market in the market list. For example: market=CNY.
"""
@av._output_format
@av._call_api_on_func
def get_digital_currency_daily(self, symbol, market):
""" Returns the daily historical time series for a digital currency
(e.g., BTC) traded on a specific market (e.g., CNY/Chinese Yuan),
refreshed daily at midnight (UTC).
"""
_FUNCTION_KEY = 'DIGITAL_CURRENCY_DAILY'
return _FUNCTION_KEY, 'Time Series (Digital Currency Daily)', 'Meta Data'
@av._output_format
@av._call_api_on_func
def get_digital_currency_weekly(self, symbol, market):
""" Returns the weekly historical time series for a digital currency
(e.g., BTC) traded on a specific market (e.g., CNY/Chinese Yuan),
refreshed daily at midnight (UTC). Prices and volumes are quoted in
both the market-specific currency and USD.
"""
_FUNCTION_KEY = 'DIGITAL_CURRENCY_WEEKLY'
return _FUNCTION_KEY, 'Time Series (Digital Currency Weekly)', 'Meta Data'
@av._output_format
@av._call_api_on_func
def get_digital_currency_monthly(self, symbol, market):
""" Returns the monthly historical time series for a digital currency
(e.g., BTC) traded on a specific market (e.g., CNY/Chinese Yuan),
refreshed daily at midnight (UTC). Prices and volumes are quoted in
both the market-specific currency and USD.
"""
_FUNCTION_KEY = 'DIGITAL_CURRENCY_MONTHLY'
return _FUNCTION_KEY, 'Time Series (Digital Currency Monthly)', 'Meta Data'
| [
"oanufriyev@gmail.com"
] | oanufriyev@gmail.com |
ba349bf428a3ad2f98c1478dcd08138dde07d944 | 76e62ddbfdfba19c80b37e855a4df67672ef0808 | /BIZa/2014/Novikova_J_V/Задача №4. Вариант 34.py | f7c4c6405a63589003c58083cc5a3d9c6a220461 | [
"Apache-2.0"
] | permissive | stasvorosh/pythonintask | 9d30f3cd492e89783b7221402375c1ebe4690baa | 8169ed26510022fe0d589f4013f11749131957df | refs/heads/master | 2021-01-17T16:49:32.778063 | 2016-10-10T14:08:04 | 2016-10-10T14:08:04 | 52,255,539 | 6 | 0 | null | 2016-02-22T07:33:16 | 2016-02-22T07:33:15 | null | UTF-8 | Python | false | false | 1,402 | py | #Задача №4. Вариант 34.
#Напишите программу, которая выводит имя, под которым скрывается Мария Луиза Полякова — Байдарова. Дополнительно необходимо вывести область интересов указанной личности, место рождения, годы рождения и смерти (если человек умер), вычислить возраст на данный момент (или момент смерти). Для хранения всех необходимых данных требуется использовать переменные. После вывода информации программа должна дожидаться пока пользователь нажмет Enter для выхода
#Novikova J. V.
#26.04.2016
print('Мария Луиза Полякова — Байдарова, французская актриса, более известна как Влади Марина')
place='Клиши, Франция'
born='1938'
age='78'
interes='Кино'
print('Место рождения: '+place)
print('Год рождения: '+str(born))
print('Умерла в возрасте: '+str(age))
print('Область интересов: '+interes)
input('Нажмите Enter для выхода') | [
"stasyan.v@gmail.com"
] | stasyan.v@gmail.com |
d7b2079f01d6308c91b68f4e7309c6900690d40e | 8f8ac99fd3ed9ceb36778b404f6fdd0b6899d3f4 | /pyobjc-framework-Cocoa/PyObjCTest/test_nsdraggingitem.py | ed2c3d005e4b5cb0bdbcbe99f3b2c1d47e98bb2b | [
"MIT"
] | permissive | strogo/pyobjc | ac4201c7742eb75348328eeecb7eedf4e3458de3 | 2579c5eaf44b0c5af77ee195c417d2c65e72dfda | refs/heads/master | 2023-07-13T00:41:56.448005 | 2021-08-24T06:42:53 | 2021-08-24T06:42:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | import AppKit
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestNSDraggingItem(TestCase):
@min_os_level("10.7")
def testConstants10_7(self):
self.assertIsInstance(AppKit.NSDraggingImageComponentIconKey, str)
self.assertIsInstance(AppKit.NSDraggingImageComponentLabelKey, str)
@min_os_level("10.7")
def testMethods10_7(self):
self.assertArgIsBlock(
AppKit.NSDraggingItem.setImageComponentsProvider_, 0, b"@"
)
self.assertResultIsBlock(AppKit.NSDraggingItem.imageComponentsProvider, b"@")
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
a025c41a715c278a37e8811487827c599e634f77 | 1f0b38e455ec949eb1285437373c496f46900955 | /Figures/early_SMBH_growth/redshift_vs_Mbh_growth_20200323.py | 006f80c26ed4dda1e3927ed1083fee38aa70d5f6 | [] | no_license | d80b2t/JWST_Cycle1 | 87fef5f58fca242e9df7717a609120be1cf01af0 | e6e7618640d4b35cff528304e475fed1ee0231c5 | refs/heads/master | 2021-05-12T19:43:28.468189 | 2020-03-23T17:42:21 | 2020-03-23T17:42:21 | 117,101,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,109 | py | '''
WISE detections and colors of Very High redshift quasars
'''
import math
import numpy as np
from astropy.io import fits
from astropy.io import ascii
from astropy.table import Table
from astropy.table import Table
import astropy.units as u
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
from matplotlib import gridspec
from astropy.cosmology import FlatLambdaCDM
from astropy.cosmology import z_at_value
## Setting up the cosmology...
cosmo = FlatLambdaCDM(H0=68.0, Om0=0.31) #Banados thesis
#ages = np.array([13, 10, 8, 6, 5, 4, 3, 2, 1.5, 1.2, 1, 0.8, 0.70, 0.50, 0.25, 0.10])*u.Gyr
ages = np.array([13, 10, 8, 6, 5, 4, 3, 2, 1.5, 1.25, 0.75, 0.50, 0.25, 0.10])*u.Gyr
ageticks = [z_at_value(cosmo.age, age) for age in ages]
redshifts = np.array([6, 7, 8, 9, 10, 12, 15, 20])
redshiftticks = [cosmo.age(redshift).value for redshift in redshifts]
##
## READ-IN THE D A T A F I L E (S)
## Inayoshi, Visbal, Haiman Annu. Rev. Astron. Astrophys. 2019. 58:1–79
filename = 'Inayoshi_2019_ARAA_203quasars.dat'
VHzQs = ascii.read(filename, delimiter=r'\s', guess=False)
z_VHzQs = VHzQs['redshift']
log_MBH_VHzQs = np.log10(VHzQs['Mbh'])
age_VHzQs = cosmo.age(z_VHzQs).value
## Trakhtenbrot et al. (2011) z=4.8 objects
## name, redshift, L_bol, log_MBH, l_Edd
## J143+0635 4.850 46.98 8.99 -0.19
path = '/cos_pc19a_npr/data/highest_z_QSOs/Trakhtenbrot2011/'
filename = 'Table2.dat'
Trak11 = ascii.read(path+filename, delimiter=r'\s', guess=False)
z_Trak11 = Trak11['redshift']
log_MBH_Trak11 = Trak11['log_MBH']
age_Trak11 = cosmo.age(z_Trak11).value
## The 3 monsters in Banados et al, 2018, Nature, 553, 473
## name, redshift, L_bol, log_MBH, l_Edd
## J143+0635 4.850 46.98 8.99 -0.19
path = '/cos_pc19a_npr/data/highest_z_QSOs/Mbh_values/'
filename = 'Banados_2018_Fig2.dat'
Banados = ascii.read(path+filename, delimiter=r'\s', guess=False)
log_MBH_Bana = np.log10(Banados['MBH'])
z_Bana = Banados['redshift']
filename = 'Gallerani_2017.dat'
Gallerani = ascii.read(path+filename, delimiter=r'\s', guess=False)
log_MBH_Gall = np.log10(Gallerani['M_BH'])
z_Gall = Gallerani['redshift']
filename = 'Top10.dat'
Top10 = ascii.read(path+filename, delimiter=r'\s', guess=False)
log_MBH_Top10 = np.log10(Top10['MBH'])
z_Top10 = Top10['redshift']
##
## Salpeter timescales,
## timescale for BH growth, based upon the Eddington limit: a growing
## black hole heats accretion material, which glows and is subject to
## the luminosity limit. The timescale is 5e7 years.
## Start and end redshift.
zrange = np.arange(3, 35., 0.02)
ee = [cosmo.age(zz).value for zz in zrange]
t_bana = np.array(ee)*1e9
## Some physical values
Ledd = 1.0
## Hold M_seed constant, vary eta
M_seed = 1000.0
eta_variable = [0.10, 0.11, 0.125, 0.14, 0.15]
eta_label = ['0.10', '0.11', '0.125', '0.14', '0.15']
s = (len(t_bana),len(eta_variable))
M_BH_grower_eta = np.zeros(s)
for ii in range(len(eta_variable)):
t_salpeter = 4.5e7 * (eta_variable[ii]/0.1) * (Ledd**(-1))
M_BH_grower_eta[:,ii] = (np.exp(t_bana/t_salpeter))*M_seed
## Hold eta constant, vary M_seed
## bit more interesting since want to see that range of MBH_seeds
## that are viable
eta = 0.10
t_salpeter = 4.5e7*(eta/0.1)*(Ledd**(-1))
Mseed_variable = [1.0, 10., 100.0, 1000, 10000.]
s = (len(t_bana),len(Mseed_variable))
M_BH_grower_MBHseed = np.zeros(s)
for jj in range(len(Mseed_variable)):
M_BH_grower_MBHseed[:,jj] = (np.exp(t_bana/t_salpeter)) * (Mseed_variable[jj])
##
## Making the plot
##
fig, ax1 = plt.subplots(figsize=(14.0, 10.0))
## May fave new line ;-=)
plt.style.use('dark_background')
plt.rcParams.update({'font.size': 14})
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['lines.linewidth'] = 22 #does this do anything??!!
## Adjusting the Whitespace for the plots
left = 0.14 # the left side of the subplots of the figure
right = 0.94 # the right side of the subplots of the figure
bottom = 0.16 # the bottom of the subplots of the figure
top = 0.88 # the top of the subplots of the figure
wspace = 0.26 # the amount of width reserved for blank space between subplots
hspace = 0.06 # the amount of height reserved for white space between subplots
plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace)
## Some NPR defaults
ls = 'solid'
lw = 1.0
ms_large = 250
ms = ms_large/3.
alpha = 1.0
fontsize = 36
labelsize = fontsize
tickwidth = 2.0
linewidth = 2.4
tickwidth = 2.0
ticklength = 6.0
ticklabelsize = labelsize
majorticklength = 12
minorticklength = 6
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
minorLocator = MultipleLocator(5)
## define the colormap
cmap = plt.cm.jet ## great with 'dark_background'
cmap = plt.cm.viridis
## AGE RANGE
xmin = 0.20 # Gyr
xmax = 1.4
## REDSHIFT RANGE
zmin = 4.8 ## 5.8 ## 3.0 ## 4.3
zmax = 38.0 ## 16.0 ## z=45 in Banados 2018
## Mass access
ymin = 0.5 # 6.5 2.8 good if redshift is z= 45.
ymax = 10.5 # 10.5
c = VHzQs['M1450']
#cmap = plt.cm.jet ## great with 'dark_background'
cmap = plt.cm.viridis_r
## Plotting the quasars...
ax1.scatter(z_VHzQs, log_MBH_VHzQs, c=c, cmap=cmap, marker="P", s=(ms_large*1.2), label="$z>$6 QSOs", zorder=12)
#ax1.scatter(z_Top10, log_MBH_Top10, c='b', marker="s", s=ms_large, label="Highest-$z$/most massive", zorder=10)
#ax1.scatter(z_Trak11, log_MBH_Trak11, c='k', marker="d", s=ms_large, label="Trakhtenbrot (2011)", zorder=10)
#ax1.scatter(z_Trak11, log_MBH_Trak11, c='silver', marker="d", s=ms_large, label="Trakhtenbrot+ (2011)", zorder=10)
##
## BH Growth tracks..
##
## Varying e t a
#for ii in range(len(eta_variable)):
#ax1.plot(zrange, (np.log10( M_BH_grower_eta[:,ii] )), label =eta_label[ii], linewidth=8, linestyle='--', color='crimson')
## Varying seed BH mass
for jj in range(len(Mseed_variable)):
print("Plotting Mseed_variable lines", jj)
ax1.plot(zrange, (np.log10(M_BH_grower_MBHseed[:,jj])), linewidth=8, linestyle='--')
ax1.plot(zrange, (np.log10(M_BH_grower_MBHseed[:,jj])), label ='$M_{seed}=$'+str(Mseed_variable[jj])+' $M_{\odot}$', linewidth=8, linestyle='--')
## L E G E N D
ax1.legend(loc='upper right', fontsize=fontsize/1.3, frameon='True')
# Setting up the axes...
ax1.set_xlim((zmin, zmax))
ax1.set_ylim((ymin, ymax))
ax1.tick_params('x', direction='in', which='major', bottom='on', top='on', left='on', right='on', size=fontsize/1.6)
ax1.tick_params('x', direction='in', which='minor', bottom='on', top='on', left='on', right='on', size=fontsize/1.6)
ax1.tick_params('y', direction='in', which='major', bottom='on', top='on', left='on', right='on', size=fontsize/1.6)
ax1.tick_params('y', direction='in', which='minor', bottom='on', top='on', left='on', right='on', size=fontsize/1.6)
ax1.xaxis.set_minor_locator(minorLocator)
##
ax1.tick_params(axis='both', labelsize = fontsize/1.1)
ax1.set_xlabel('redshift, $z$', fontsize = fontsize)
ax1.set_ylabel('log (M$_{BM}$) / M$_{\odot}$)', fontsize = fontsize)
ax4 = ax1.twiny()
## If AGE, is the top x-axis
ax4.set_xticks(ageticks)
ax4.set_xticklabels(['{:g}'.format(age) for age in ages.value])
ax4.set_xlim(zmin, zmax) ## the co-ordinate system is in "redshift units"
ax4.set_xlabel('Time since Big Bang (Gyr)', fontsize=fontsize)
ax4.tick_params(axis='both', labelsize=fontsize/1.1)
ax4.xaxis.set_label_coords(0.50, 1.10)
## if REDSHIFT is the top x-axis
#ax4.set_xlim(xmin, xmax) ## The co-ordinate system is in "age units"
#ax4.set_xticks(redshiftticks)
#ax4.set_xticklabels(['{:g}'.format(redshifts) for redshifts in redshifts])
#ax4.tick_params(axis='both', labelsize=36)
#plt.show()
plt.savefig('redshift_vs_Mbh_growth_temp.png',format='png')
plt.close(fig)
| [
"npross@lbl.gov"
] | npross@lbl.gov |
fecce092dd36e224a01aab6a9e1d8b24d5bbf868 | 983ca9afc80dc1bd2cd25e81ec51de8c1fd39394 | /Unit5/5.2/googleSniff.py | 37fc74ef1da2a12b1df291bd03061d1e9e4ea3c1 | [] | no_license | mi1k7ea/Violent-Python | d5630a67cbdc218640d21f58e4081cd6530f32fe | c8048b04e02a6e91aed8e73af36e707b004b115c | refs/heads/master | 2022-11-25T23:06:05.138896 | 2020-08-02T15:03:32 | 2020-08-02T15:03:32 | 284,483,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | #!/usr/bin/python
#coding=utf-8
import optparse
from scapy.all import *
def findGoogle(pkt):
if pkt.haslayer(Raw):
payload = pkt.getlayer(Raw).load
if 'GET' in payload:
if 'google' in payload:
r = re.findall(r'(?i)\&q=(.*?)\&', payload)
if r:
search = r[0].split('&')[0]
search = search.replace('q=', '').replace('+', ' ').replace('%20', ' ')
print '[+] Searched For: ' + search
def main():
parser = optparse.OptionParser('[*]Usage: python googleSniff.py -i <interface>')
parser.add_option('-i', dest='interface', type='string', help='specify interface to listen on')
(options, args) = parser.parse_args()
if options.interface == None:
print parser.usage
exit(0)
else:
conf.iface = options.interface
try:
print '[*] Starting Google Sniffer.'
sniff(filter='tcp port 80', prn=findGoogle)
except KeyboardInterrupt:
exit(0)
if __name__ == '__main__':
main() | [
"mi1k7ea@qq.com"
] | mi1k7ea@qq.com |
f79e9655cd13395e6de47f2c80331663af24e8a8 | 96e507cf993e26ea9fdc8586073fb5822b9b5c26 | /ex_1.3_datatype_dictionary.py | 04a3c8058f2e27f2d8fcc3cf01ff3531ba2e92ad | [] | no_license | bhoj001/python_tutorial | eff4bd1becccc80950a3ebd55a1abf26985e9cd5 | de717e518ece9989a8ed90f346374dc6cfaeebfc | refs/heads/master | 2020-12-14T21:33:54.559542 | 2020-03-02T09:47:45 | 2020-03-02T09:47:45 | 234,869,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,427 | py | '''
author: bhoj bahadur karki
date: 2020-jan-19th
purpose : about dictionary in python
Dictionary: Python dictionary is an unordered collection of items.
While other compound data types have only value as an element, a dictionary has a key: value pair.
Dictionaries are optimized to retrieve values when the key is known.
PythonOrg def:
Dictionaries are sometimes found in other languages as “associative memories” or “associative arrays”.
Unlike sequences, which are indexed by a range of numbers, dictionaries are indexed by keys,
which can be any immutable type; strings and numbers can always be keys. Tuples can be
used as keys if they contain only strings, numbers, or tuples; if a tuple contains any
mutable object either directly or indirectly, it cannot be used as a key. You can’t use
lists as keys, since lists can be
modified in place using index assignments, slice assignments, or methods like append() and extend().
It is best to think of a dictionary as a set of key: value pairs, with the requirement
that the keys are unique (within one dictionary). A pair of braces creates an empty dictionary: {}.
Placing a comma-separated list of key:value pairs within the braces adds
initial key:value pairs to the dictionary; this is also the way dictionaries are written on output.
The main operations on a dictionary are storing a value with some key and extracting
the value given the key. It is also possible to delete a key:value pair with del.
If you store using a key that is already in use, the old value associated with that key is forgotten.
It is an error to extract a value using a non-existent key.
Note: dictionary has key,value
key can be integer, string, float but it has to be unique
>>> x = {2:30,3:2,2:40} # if we repeat a key, one item(here 2:30) is ignored
>>> x
{2: 40, 3: 2}
'''
# -------Creating dictionary------------
# using {}
tel = {'jack': 4098, 'sape': 4139}
tel['mike'] = 4127 # adding to dictionary
print("tel=",tel)
# using dict()
my_dict = dict({1:'apple', 2:'ball'})
print("my_dict=",my_dict)
# from sequence having each item as a pair
my_dict = dict([(1,'apple'), (2,'ball')])
print("my_dict=",my_dict)
# using dict() keyword
x= dict([('juli', 4139), ('max', 4127), ('jack', 4098)])
print("x=",x)
a = {x: x**2 for x in (2, 4, 6)}
print("a=",a)
# --------Accessing element in dictionary-------
# we use key to access element in dictionary e.g. dic[key]
print("juli =",x['juli'])
# ---------Changing element in dictionary-----
# we use equal sign with syntax: dict[key]=value
x['juli'] = 3
print("new x = ",x)
# -------deleting item in dictionary----------
# using dict.pop(key)
x.pop('juli') # this will remove key-value pair of juli(juli:3)
print("after pop x = ",x)
# using del dict[key]
del x['max']
print("after del x = ",x)
# using .clear() to clear all items
x.clear() # this will empty the dictionary
print("after clear x=",x)
# ----------Looping technique in dictionary---------
knights = {'ram': 'the pure', 'robin': 'the brave'}
for k, v in knights.items():
print(k, v)
# ------for sequence datatype like list, tuple, range-------
#------getting index value from sequence datatype like list, tuple, range-------
# use enumerate() function
# When looping through a sequence, the position index and corresponding value can
# be retrieved at the same time using the enumerate() function.
# i =index, v= value, in short form
for i, v in enumerate(['tic', 'tac', 'toe']):
print(i, v)
# -------combine two list and loop------------
# To loop over two or more sequences at the same time, the entries can be paired with
# the zip() function.
questions = ['name', 'quest', 'favorite color']
answers = ['Bhoj', 'to teach programming', 'blue']
for q, a in zip(questions, answers):
print('What is your {0}? It is {1}.'.format(q, a))
#-------- reversing a sequence datatype # range syntax: range(start:int, end:int, step:int)
for item in reversed(range(2,10,2)):
print(item)
# ---------Loop via sorting an item------------
# To loop over a sequence in sorted order, use the sorted() function
# which returns a new sorted list while leaving the source unaltered.
basket = ['apple', 'orange', 'apple', 'pear', 'orange', 'banana']
# here set() function removes the duplicate items
for f in sorted(set(basket)):
print(f)
'''
Method Description
clear() Remove all items form the dictionary.
copy() Return a shallow copy of the dictionary.
fromkeys(seq[, v]) Return a new dictionary with keys from seq and value equal to v (defaults to None).
get(key[,d]) Return the value of key. If key doesnot exit, return d (defaults to None).
items() Return a new view of the dictionary's items (key, value).
keys() Return a new view of the dictionary's keys.
pop(key[,d]) Remove the item with key and return its value or d if key is not found.
If d is not provided and key is not found, raises KeyError.
popitem() Remove and return an arbitary item (key, value). Raises KeyError if the dictionary
is empty.
setdefault(key[,d]) If key is in the dictionary, return its value. If not,
insert key with a value of d and return d (defaults to None).
update([other]) Update the dictionary with the key/value pairs from other,
overwriting existing keys.
values() Return a new view of the dictionary's values
''' | [
"nishantkarki305@gmail.com"
] | nishantkarki305@gmail.com |
c0ce587c985fdba86762d152de058192b4c8fc8a | 43fd8b12dc1b6a2fc7cf4d9b8a80d3f1ae0fac66 | /Test/others/requeset用法.py | 47a0ce9e6eb99ef82864c5610adc2e675068439b | [] | no_license | gxiang666/python_file | e707f829b2c35e6126bea79e299333faabe76b19 | 2ee0f52d53892d193dc83c10564f7326e0bad0da | refs/heads/master | 2022-12-07T04:16:29.166707 | 2019-10-25T02:59:26 | 2019-10-25T02:59:26 | 139,252,161 | 1 | 0 | null | 2022-11-22T02:38:40 | 2018-06-30T13:35:14 | Python | UTF-8 | Python | false | false | 140 | py | import requests
r = requests.get("https://www.bilibili.com/video/av9784617?p=5")
print(r.status_code)
r.encoding = "utf-8"
print(r.content)
| [
"1528357474@qq.com"
] | 1528357474@qq.com |
adac29ebb4dc4dcbd9bb458b8f74a2dd3f338700 | ca8dc4d5b6168648cf8a842fc27191fec3597a09 | /venv/lib/python3.6/site-packages/statsmodels/tools/tests/test_rootfinding.py | 4b840d33271b1842c9f9faf34a5de074491539f9 | [
"MIT"
] | permissive | iefuzzer/vnpy_crypto | 293a7eeceec18b934680dafc37381d1f5726dc89 | d7eed63cd39b1639058474cb724a8f64adbf6f97 | refs/heads/master | 2020-03-26T20:13:38.780107 | 2018-09-10T06:09:16 | 2018-09-10T06:09:16 | 145,311,871 | 3 | 0 | MIT | 2018-09-10T06:09:18 | 2018-08-19T14:48:32 | Python | UTF-8 | Python | false | false | 2,957 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 23 13:34:19 2013
Author: Josef Perktold
"""
import numpy as np
from statsmodels.tools.rootfinding import brentq_expanding
from numpy.testing import (assert_allclose, assert_equal, assert_raises,
assert_array_less)
def func(x, a):
f = (x - a)**3
return f
def func_nan(x, a, b):
x = np.atleast_1d(x)
f = (x - 1.*a)**3
f[x < b] = np.nan
return f
def funcn(x, a):
f = -(x - a)**3
return f
def test_brentq_expanding():
cases = [
(0, {}),
(50, {}),
(-50, {}),
(500000, dict(low=10000)),
(-50000, dict(upp=-1000)),
(500000, dict(low=300000, upp=700000)),
(-50000, dict(low= -70000, upp=-1000))
]
funcs = [(func, None),
(func, True),
(funcn, None),
(funcn, False)]
for f, inc in funcs:
for a, kwds in cases:
kw = {'increasing':inc}
kw.update(kwds)
res = brentq_expanding(f, args=(a,), **kwds)
#print '%10d'%a, ['dec', 'inc'][f is func], res - a
assert_allclose(res, a, rtol=1e-5)
# wrong sign for start bounds
# doesn't raise yet during development TODO: activate this
# it kind of works in some cases, but not correctly or in a useful way
#assert_raises(ValueError, brentq_expanding, func, args=(-500,), start_upp=-1000)
#assert_raises(ValueError, brentq_expanding, func, args=(500,), start_low=1000)
# low upp given, but doesn't bound root, leave brentq exception
# ValueError: f(a) and f(b) must have different signs
assert_raises(ValueError, brentq_expanding, funcn, args=(-50000,), low= -40000, upp=-10000)
# max_it too low to find root bounds
# ValueError: f(a) and f(b) must have different signs
assert_raises(ValueError, brentq_expanding, func, args=(-50000,), max_it=2)
# maxiter_bq too low
# RuntimeError: Failed to converge after 3 iterations.
assert_raises(RuntimeError, brentq_expanding, func, args=(-50000,), maxiter_bq=3)
# cannot determin whether increasing, all 4 low trial points return nan
assert_raises(ValueError, brentq_expanding, func_nan, args=(-20, 0.6))
# test for full_output
a = 500
val, info = brentq_expanding(func, args=(a,), full_output=True)
assert_allclose(val, a, rtol=1e-5)
info1 = {'iterations': 63, 'start_bounds': (-1, 1),
'brentq_bounds': (100, 1000), 'flag': 'converged',
'function_calls': 64, 'iterations_expand': 3, 'converged': True}
# adjustments for scipy 0.8.0 with changed convergence criteria
assert_array_less(info.__dict__['iterations'], 70)
assert_array_less(info.__dict__['function_calls'], 70)
for k in info1:
if k in ['iterations', 'function_calls']:
continue
assert_equal(info1[k], info.__dict__[k])
assert_allclose(info.root, a, rtol=1e-5)
| [
"panwei303031816@gmail.com"
] | panwei303031816@gmail.com |
becb2f371e27eec0814fc314ec1629220a2c31c2 | ca12625e6d2f3581793694cfc40445a85fc4770b | /bitmex_websocket/_bitmex_websocket.py | 4a5d778dce46cc853d1c0175e929bb37a169943d | [
"MIT"
] | permissive | kelvinxue/bitmex-websocket | ad10c63ed0fb341f23ed9d9511cc235eb8a5f1b1 | 773531943abc71b0e10b2dc5feec58152796c234 | refs/heads/master | 2020-03-21T04:36:10.788915 | 2018-06-21T04:17:41 | 2018-06-21T04:17:41 | 138,117,395 | 0 | 0 | MIT | 2018-06-21T04:02:13 | 2018-06-21T04:02:13 | null | UTF-8 | Python | false | false | 4,072 | py | from bitmex_websocket.auth.api_key_auth import generate_nonce,\
generate_signature
from bitmex_websocket.settings import settings
from pyee import EventEmitter
from urllib.parse import urlparse
from websocket import WebSocketApp
import alog
import json
import ssl
import time
__all__ = ['BitMEXWebsocket']
class BitMEXWebsocketConnectionError(Exception):
pass
class BitMEXWebsocket(EventEmitter, WebSocketApp):
def __init__(self, should_auth=False, heartbeat=True, ping_interval=10,
ping_timeout=9):
self.ping_timeout = ping_timeout
self.ping_interval = ping_interval
self.should_auth = should_auth
self.heartbeat = heartbeat
self.channels = []
self.reconnect_count = 0
EventEmitter.__init__(self)
WebSocketApp.__init__(
self,
url=self.gen_url(),
header=self.header(),
on_message=self.on_message,
on_close=self.on_close,
on_open=self.on_open,
on_error=self.on_error,
on_pong=self.on_pong
)
self.on('subscribe', self.on_subscribe)
def gen_url(self):
base_url = settings.BASE_URL
url_parts = list(urlparse(base_url))
query_string = ''
if self.heartbeat:
query_string = '?heartbeat=true'
url = "wss://{}/realtime{}".format(url_parts[1], query_string)
return url
def run_forever(self, **kwargs):
"""Connect to the websocket in a thread."""
# setup websocket.run_forever arguments
ws_run_args = {
'sslopt': {"cert_reqs": ssl.CERT_NONE}
}
if self.heartbeat:
ws_run_args['ping_timeout'] = self.ping_timeout
ws_run_args['ping_interval'] = self.ping_interval
alog.debug(ws_run_args)
super().run_forever(**ws_run_args)
def on_pong(self, ws, message):
timestamp = float(time.time() * 1000)
latency = timestamp - (self.last_ping_tm * 1000)
self.emit('latency', latency)
def subscribe(self, channel: str):
subscription_msg = {"op": "subscribe", "args": [channel]}
self._send_message(subscription_msg)
def _send_message(self, message):
self.send(json.dumps(message))
def is_connected(self):
return self.sock.connected
@staticmethod
def on_subscribe(message):
if message['success']:
alog.debug("Subscribed to %s." % message['subscribe'])
else:
raise Exception('Unable to subsribe.')
def on_message(self, ws, message):
"""Handler for parsing WS messages."""
message = json.loads(message)
if 'error' in message:
self.on_error(ws, message['error'])
action = message['action'] if 'action' in message else None
if action:
self.emit('action', message)
elif 'subscribe' in message:
self.emit('subscribe', message)
elif 'status' in message:
self.emit('status', message)
def header(self):
"""Return auth headers. Will use API Keys if present in settings."""
auth_header = []
if self.should_auth:
alog.info("Authenticating with API Key.")
# To auth to the WS using an API key, we generate a signature
# of a nonce and the WS API endpoint.
alog.debug(settings.BITMEX_API_KEY)
nonce = generate_nonce()
api_signature = generate_signature(
settings.BITMEX_API_SECRET, 'GET', '/realtime', nonce, '')
auth = [
"api-nonce: " + str(nonce),
"api-signature: " + api_signature,
"api-key:" + settings.BITMEX_API_KEY
]
return auth_header
def on_open(self, ws):
alog.debug("Websocket Opened.")
self.emit('open')
def on_close(self, ws):
alog.info('Websocket Closed')
def on_error(self, ws, error):
raise BitMEXWebsocketConnectionError(error)
| [
"jose.oliveros.1983@gmail.com"
] | jose.oliveros.1983@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.