text stringlengths 8 6.05M |
|---|
# I pledge my honor that I have abided by the Stevens Honor System
# Gabrielle Armetta
# A function which accepts a list of numbers
# and modifies the list by squaring each entry
def main():
l = list()
for i in range (1,11):
l.append(i**2)
print(l)
main()
# accepts list of numbers 1 through 10
# and returns a list of each number in the original list, squared |
# Generated by Django 2.2.2 on 2019-06-07 21:46
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('mornings', '0004_city_last_update'),
]
operations = [
migrations.AddField(
model_name='city',
name='weather',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='city',
name='last_update',
field=models.DateTimeField(default=datetime.datetime(2019, 6, 7, 21, 46, 26, 46597, tzinfo=utc)),
),
]
|
"""Functions for IAM policies in Blueprints."""
import awacs.sts
from awacs.aws import Allow, Policy, Principal, Statement
def assumerolepolicy(service):
"""Return boilerplate AWS service assume role policy document."""
return Policy(
Version='2012-10-17',
Statement=[
Statement(
Effect=Allow,
Action=[awacs.sts.AssumeRole],
Principal=Principal('Service',
['%s.amazonaws.com' % service])
)
]
)
|
import euslime
from setuptools import find_packages
from setuptools import setup
setup(
name=euslime.__name__,
description=euslime.__doc__,
long_description=open('README.md').read(),
version=euslime.__version__,
author=euslime.__author__,
url='https://github.com/furushchev/euslime',
license='BSD',
packages=find_packages(),
install_requires=open('requirements.txt').readlines(),
entry_points={
'console_scripts': [
'euslime = euslime.cli:main',
],
},
)
|
# written by all, debugged by Zhiwen Wang
from .neural_network import NeuralNetwork
import time
import sys
if sys.version_info[0] == 2:
from urllib import urlopen
else:
from urllib.request import urlopen
import subprocess
import numpy as np
from sklearn.svm import SVR
from datetime import datetime
from itertools import islice
# ================================================================
def normalizePrice(price, minimum, maximum):
return ((2*price - (maximum + minimum)) / (maximum - minimum))
def denormalizePrice(price, minimum, maximum):
return (((price*(maximum-minimum))/2) + (maximum + minimum))/2
# ================================================================
def rollingWindow(seq, windowSize):
it = iter(seq)
win = [next(it) for cnt in range(windowSize)] # First window
yield win
for e in it: # Subsequent windows
win[:-1] = win[1:]
win[-1] = e
yield win
def getMovingAverage(values, windowSize):
movingAverages = []
for w in rollingWindow(values, windowSize):
movingAverages.append(sum(w)/len(w))
return movingAverages
def getMinimums(values, windowSize):
minimums = []
for w in rollingWindow(values, windowSize):
minimums.append(min(w))
return minimums
def getMaximums(values, windowSize):
maximums = []
for w in rollingWindow(values, windowSize):
maximums.append(max(w))
return maximums
# ================================================================
def getTimeSeriesValues(values, window):
movingAverages = getMovingAverage(values, window)
minimums = getMinimums(values, window)
maximums = getMaximums(values, window)
returnData = []
# build items of the form [[average, minimum, maximum], normalized price]
for i in range(0, len(movingAverages)):
inputNode = [movingAverages[i], minimums[i], maximums[i]]
price = normalizePrice(values[len(movingAverages) - (i + 1)], minimums[i], maximums[i])
outputNode = [price]
tempItem = [inputNode, outputNode]
returnData.append(tempItem)
return returnData
# ================================================================
def getHistoricalData(stockSymbol):
historicalPrices = []
# login to API
urlopen("http://api.kibot.com/?action=login&user=guest&password=guest")
# get 14 days of data from API (business days only, could be < 10)
url = "http://api.kibot.com/?action=history&symbol=" + stockSymbol + "&interval=daily&period=365&unadjusted=1®ularsession=1"
apiData = urlopen(url).read().decode("utf-8").split("\n")
#print apiData
for line in apiData:
if len(line) > 0:
tempLine = line.split(',')
price = float(tempLine[1])
historicalPrices.append(price)
return historicalPrices
# ================================================================
def getTrainingData(stockSymbol,term):
historicalData = getHistoricalData(stockSymbol)
# reverse it so we're using the most recent data first, ensure we only have 9 data points
historicalData.reverse()
#del historicalData[9:]
# get five 5-day moving averages, 5-day lows, and 5-day highs, associated with the closing price
trainingData = getTimeSeriesValues(historicalData, term)
return trainingData
def getPredictionData(stockSymbol, term):
historicalData = getHistoricalData(stockSymbol)
# reverse it so we're using the most recent data first, then ensure we only have 5 data points
historicalData.reverse()
del historicalData[term:]
# get five 5-day moving averages, 5-day lows, and 5-day highs
predictionData = getTimeSeriesValues(historicalData, term)
# remove associated closing price
predictionData = predictionData[0][0]
return predictionData
# ================================================================
def analyze_symbol(stockSymbol, term):
startTime = time.time()
trainingData = getTrainingData(stockSymbol, term)
network = NeuralNetwork(inputNodes = 3, hiddenNodes = 3, outputNodes = 1)
network.train(trainingData)
# get rolling data for most recent day
predictionData = getPredictionData(stockSymbol, term)
# get prediction
returnPrice = network.test(predictionData)
# de-normalize and return predicted stock price
predictedStockPrice = denormalizePrice(returnPrice, predictionData[1], predictionData[2])
# create return object, including the amount of time used to predict
#returnData = {}
returnData= predictedStockPrice
#returnData['time'] = time.time() - startTime
return returnData
# ================================================================
def SVMpredict(filename):
input_file = open(filename)
X = []
y = []
for line in islice(input_file, 1, None):
for line in islice(input_file, 1, None):
tempLine = line.split(',')
date = tempLine[0]
date = datetime.strptime(date, "%Y-%m-%d")
compare = '2017-04-24'
compare = datetime.strptime(compare, "%Y-%m-%d")
days = (date - compare).days
X.append(days)
tempLine = line.split(',')
price = float(tempLine[1])
y.append(price)
# transfer form of data
X = np.asarray(X)
X = np.reshape(X, (len(X), 1))
y = np.asarray(y)
# data to predict
temp = X[-1]
predict_X = [temp + 1, temp + 2, temp + 3, temp + 4, temp + 5]
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_rbf.fit(X, y)
# y_rbf = svr_rbf.predict(X)
y_preRbf = svr_rbf.predict(predict_X)
y_preRbf = np.around(y_preRbf, decimals=2)
return [predict_X, y_preRbf]
if __name__ == "__main__":
print(analyze_symbol("GOOG",5))
|
#!/usr/bin/env python3
# install aws-cli and enter credentials
import boto3
import yaml
from collections import defaultdict
class AWS():
def __init__(self):
self.ec2 = boto3.resource('ec2')
self.ec2info = self.get_ec2info()
def get_ec2info(self):
ec2info = defaultdict()
running_ec2_instances = self.ec2.instances.filter(
Filters=[{
'Name': 'instance-state-name',
'Values': ['running']
}])
for instance in running_ec2_instances:
for tag in instance.tags:
if 'Name' in tag['Key']:
name = tag['Value']
ec2info[instance.id] = {
"Name": name,
"Type": instance.instance_type,
"State": instance.state["Name"],
"Public-IP": instance.public_ip_address,
"Internal-IP": instance.private_ip_address}
return ec2info
# def print_all_instances(self):
# print("=> Getting all ec2 instances")
# for instance_id, instance in self.ec2info.items():
# print(instance_id)
# for k, v in instance.items():
# print("{0}: {1}".format(k, v))
#
# def print_jumphosts(self):
# print("=> Searching for ec2 instances with public IP")
# for instance_id, instance in self.ec2info.items():
# if not instance.get("Public-IP") == None:
# for k, v in instance.items():
# print("{0}: {1}".format(k, v))
def find_cluster_instances(self, names):
cluster_info = defaultdict()
for name in names:
print("=> Searching for : {}".format(name))
for instance_id, instance in self.ec2info.items():
if instance["Name"] == name:
cluster_info[instance_id] = {
"Name": name,
"Internal-IP": instance["Internal-IP"],
"Public-IP": instance["Public-IP"]}
return cluster_info
class Cluster:
def __init__(self, file):
self.file = file
self.config = self.parse_config()
self.name = self.config["SSH"]["jump_host"]["name"]
self.search_instances = self.get_instances_names_for_cluster()
self.info = defaultdict()
def get_instances_names_for_cluster(self):
if self.name == 'qa-0':
search_instanses = ['QA-0-Core-SDP-QA', 'QA-0-Core-k8s-NGP-QA']
elif self.name == 'qa-1':
search_instanses = ['QA-1-Core-SDP-QA', 'QA-1-Core-k8s-NGP-QA']
elif self.name == 'qa-2':
search_instanses = ['QA-2-Core-SDP-QA', 'QA-2-Core-k8s-NGP-QA']
elif self.name == 'qa-4':
search_instanses = ['QA-4-Core-SDP-Dev', 'QA-4-Core-k8s-NGP-Dev']
return search_instanses
def parse_config(self):
with open(self.file, 'r') as stream:
try:
yaml_file = yaml.safe_load(stream)
return yaml_file
except yaml.YAMLError as exc:
print(exc)
return None
def update_config(self):
print("=> Updating {} config".format(self.name))
core_internal_ip = []
jumphost_public_ip = self.config["SSH"]["jump_host"]["ip"]
for cluster_name, cluster_info in self.info.items():
if cluster_info["Public-IP"]:
jumphost_public_ip = cluster_info["Public-IP"]
else:
core_internal_ip.append(cluster_info["Internal-IP"])
if self.config["SSH"]["jump_host"]["ip"] != jumphost_public_ip:
print("=> Changing Jumphost IP to {}".format(jumphost_public_ip))
self.config["SSH"]["jump_host"]["ip"] = jumphost_public_ip
if core_internal_ip != self.config["SSH"]["core"]["ips"]:
print("=> Changing Core IPs to:")
print(core_internal_ip)
self.config["SSH"]["core"]["ips"] = core_internal_ip
stream = open(self.file, 'w')
yaml.safe_dump(self.config, stream, default_flow_style=False)
def print_config(self, *args):
print(self.name)
print(self.info)
def print_info(self):
for cluster_name, cluster_info in self.info.items():
for k, v in cluster_info.items():
print("{0}: {1}".format(k, v))
def main():
# Change Cluster.get_instances_names_for_cluster if instances names in AWS changed.
aws = AWS()
cluster = Cluster('config-cluster.yaml')
cluster.info = aws.find_cluster_instances(cluster.search_instances)
cluster.update_config()
if __name__ == '__main__':
main()
|
from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class RivetToolfile(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
depends_on('rivet')
def install(self, spec, prefix):
values={}
values['VER']=spec['rivet'].version
values['PFX']=spec['rivet'].prefix
fname='rivet.xml'
contents = str("""
<tool name="rivet" version="${VER}">
<lib name="Rivet"/>
<client>
<environment name="RIVET_BASE" default="${PFX}"/>
<environment name="LIBDIR" default="$$RIVET_BASE/lib"/>
<environment name="INCLUDE" default="$$RIVET_BASE/include"/>
</client>
<runtime name="PATH" value="$$RIVET_BASE/bin" type="path"/>
<runtime name="RIVET_ANALYSIS_PATH" value="$$RIVET_BASE/lib" type="path"/>
<runtime name="PDFPATH" default="$$RIVET_BASE/share" type="path"/>
<runtime name="ROOT_INCLUDE_PATH" value="$$INCLUDE" type="path"/>
<runtime name="TEXMFHOME" value="$$RIVET_BASE/share/Rivet/texmf" type="path"/>
<use name="hepmc"/>
<use name="fastjet"/>
<use name="gsl"/>
<use name="yoda"/>
</tool>
""")
write_scram_toolfile(contents, values, fname, prefix)
|
import subprocess
import os
import sys
import re
sys.path.insert(0, os.path.join("tools", "families"))
import fam_data
from run_all import RunFilter
import run_all_species
from run_all_species import SpeciesRunFilter
datasets = []
cores = 40
if (True):
datasets = []
subst_model = "GTR"
datasets.append("ssim_s20_f100_sites100_GTR_bl1.0_d0.2_l0.2_t0.2_p0.0_pop10_mu1.0_theta0.0_seed10")
#datasets.append("ssim_s40_f100_sites100_dna_d0.2_l0.2_t0.0_p0.0")
#fam_data.generate_all_datasets(datasets)
run_filter = RunFilter(True, False)
run_filter.eval_joint_ll = False
run_filter.analyze = True
run_filter.pargenes = True
run_filter.mb_frequencies = 1000
run_filter.mb_generations = 100000
#run_filter.pargenes_starting_trees = 1
#run_filter.pargenes_bootstrap_trees = 5
run_filter.run_all_reference_methods(datasets, subst_model, cores)
|
# Generated by Django 2.1 on 2018-12-16 00:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('translations', '0003_language_keys'),
]
operations = [
migrations.AlterField(
model_name='translationkey',
name='slug',
field=models.CharField(max_length=50),
),
]
|
import unittest
from katas.beta.fractions_class import Fraction
class FractionTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(Fraction(1, 8) + Fraction(4, 5), Fraction(37, 40))
def test_equal_2(self):
self.assertEqual(Fraction(911, 920) + Fraction(980, 906),
Fraction(863483, 416760))
def test_equal_3(self):
self.assertEqual(Fraction(610, 941) + Fraction(253, 985),
Fraction(838923, 926885))
def test_equal_4(self):
self.assertEqual(Fraction(956, 798) + Fraction(662, 189),
Fraction(16880, 3591))
def test_equal_5(self):
self.assertEqual(Fraction(694, 485) + Fraction(853, 861),
Fraction(1011239, 417585))
def test_equal_6(self):
self.assertEqual(Fraction(982, 111) + Fraction(219, 561),
Fraction(191737, 20757))
def test_equal_7(self):
self.assertEqual(Fraction(344, 873) + Fraction(658, 486),
Fraction(41201, 23571))
def test_equal_8(self):
self.assertEqual(Fraction(662, 361) + Fraction(322, 382),
Fraction(184563, 68951))
def test_equal_9(self):
self.assertEqual(Fraction(740, 813) + Fraction(184, 348),
Fraction(33926, 23577))
def test_equal_10(self):
self.assertEqual(Fraction(579, 441) + Fraction(543, 807),
Fraction(78524, 39543))
def test_equal_11(self):
self.assertEqual(Fraction(212, 979) + Fraction(46, 580),
Fraction(83997, 283910))
|
# adding a new file in the child process
print("Inside child branch")
|
import sys
import glob
import os
import collections
model = "nbmodel.txt"
output = "nboutput.txt"
input_path = sys.argv[1]
all_files = glob.glob(os.path.join(input_path, '*/*/*/*.txt'))
# for file in all_files:
# class1, class2, fold, file_name = file.split('/')[-4:]
# if "positive" in class1:
# class1 = "positive"
# elif "negative" in class1:
# class1 = "negative"
# if "truthful" in class2:
# class2 = "truthful"
# elif "deceptive" in class2:
# class2 = "deceptive"
# test_data[class1].append(file)
# test_data[class2].append(file)
def new_word_format(word):
new_word = word.lower().strip()
char_list = []
for char in new_word:
if char not in ". , /":
char_list.append(char)
return "".join(char_list)
def read_tokens_from_files(all_files):
file_tokens_dict = {}
for file_txt in all_files:
tokens_list = []
current_file = open(file_txt, "r")
for line in current_file:
word_list = line.split(" ")
for word in word_list:
word = new_word_format(word)
tokens_list.append(word)
current_file.close()
file_tokens_dict[file_txt] = tokens_list
return file_tokens_dict
# ...WordDict{[word]:this word's number}, num...Word int(), num...File int()
file_tokens_dict = read_tokens_from_files(all_files)
model_file = open(model, "r")
first_line = model_file.readline().split(",")
log_prior_dict = {}
log_prior_dict["positive"] = float(first_line[0])
log_prior_dict["negative"] = float(first_line[1])
log_prior_dict["truthful"] = float(first_line[2])
log_prior_dict["deceptive"] = float(first_line[3].strip())
likelihood_positive = {}
likelihood_negative = {}
likelihood_truthful = {}
likelihood_deceptive = {}
for line in model_file:
class_list = line.split(",")
if class_list[0] == "positive":
likelihood_positive[class_list[1]] = float(class_list[2].strip())
elif class_list[0] == "negative":
likelihood_negative[class_list[1]] = float(class_list[2].strip())
elif class_list[0] == "truthful":
likelihood_truthful[class_list[1]] = float(class_list[2].strip())
elif class_list[0] == "deceptive":
likelihood_deceptive[class_list[1]] = float(class_list[2].strip())
model_file.close()
likelihood = {}
likelihood["positive"] = likelihood_positive
likelihood["negative"] = likelihood_negative
likelihood["truthful"] = likelihood_truthful
likelihood["deceptive"] = likelihood_deceptive
sentiment_list = ["positive", "negative", "truthful", "deceptive"]
file_class_dict = {}
for file in file_tokens_dict:
tokens_list = file_tokens_dict[file]
sum_class = {}
for sentiment in sentiment_list:
sum_class[sentiment] = log_prior_dict[sentiment]
for token in tokens_list:
if token in likelihood[sentiment]:
sum_class[sentiment] += likelihood[sentiment][token]
if sum_class["positive"] >= sum_class["negative"]:
file_class_dict[file] = ["positive"]
else:
file_class_dict[file] = ["negative"]
if sum_class["truthful"] >= sum_class["deceptive"]:
file_class_dict[file].append("truthful")
else:
file_class_dict[file].append("deceptive")
# num_test_file = len(file_class_dict)
# num_correct = 0
#
# for file in file_class_dict:
# predict_class1 = file_class_dict[file][0]
# predict_class2 = file_class_dict[file][1]
# if file in test_data[predict_class1] and file in test_data[predict_class2]:
# num_correct += 1
#
# print(num_correct/num_test_file)
file_output = open(output, "w")
for file in file_class_dict:
file_output.writelines(" ".join([file_class_dict[file][1], file_class_dict[file][0], file]) + "\n")
|
#####coding=utf-8
import re
import urllib.request
def getHtml(url):
page = urllib.request.urlopen(url)
## print(type(page.info()))
## print(page.info())
for i in range(0, 5):
print(i)
else:
pass
reg = r'charset=(\w+-\d+)\n'
print(reg)
imgre = re.compile(reg)
imglist = re.findall(imgre, "Content-Type: text/html; charset=UTF-8\n")
print(imglist)
## print(page.getcode())
html = page.read().decode('utf-8')
return html
def getImg(html):
reg = r'src="(.+?\.jpg)" pic_ext'
imgre = re.compile(reg)
imglist = re.findall(imgre, html)
x = 0
for imgurl in imglist:
urllib.request.urlretrieve(imgurl, '%s.jpg' % x)
x += 1
html = getHtml("http://tieba.baidu.com/p/2460150866")
# print(html)
# getImg(html)
|
class Animals:
def vakvak(self):
return self.strings['VakVakVak']
def tuylu(self):
return self.strings['Tuyum_var']
def havhav(self):
return self.strings['Vahvah']
def kurk(self):
return self.strings['Kürk']
def meow(self):
return self.strings['Meov']
class Ordek(Animals):
strings = dict(
vakvak="Vaaaaaak",
tuylu="Ordegin beyaz tuyleri var",
havhav="Ordek havlayamaz",
kurk="Ordeğin kürkü yok",
meow="Ordek miyavlayamaz"
)
class insan(Animals):
strings = dict(
vakvak="İnsan ördek gibi vakvak yapar",
tuylu="İnsan tüylü olabilir",
havhav="İnsan köpek gibi havlayabilir",
kurk="İnsanın kürkü yok",
meow="İnsan kedi gibi miyavlayabilir."
)
class Kopek(Animals):
strings = dict(
vakvak="KOpek ördek gibi vakvak yapamaz",
tuylu="Kopek tüylü olabilir",
havhav="köpek havlayabilir",
kurk="Kopek kürkü var",
meow="Kopek miyavlamaz."
)
def kopekYazdir(Kopek):
print(Kopek.havhav())
print(Kopek.kurk())
def ordekYazdir(Ordek):
print(Ordek.vakvak())
print(Ordek.kurk())
def insanYazdir(insan):
print(insan.havhav())
print(insan.kurk())
def main():
Donald=Ordek()
Tumaz=Kopek()
Ahmet=insan()
print("kopekYazdir")
for o in (Donald,Tumaz,Ahmet):
kopekYazdir(o)
if __name__ == "__main__": main()
|
"""
Tests for formatter.py
"""
import unittest
from app import formatter
class TestFormatter(unittest.TestCase):
"""
Formatter test cases
"""
def setUp(self):
self.fm = formatter.Formatter()
self.contents = self.fm.read_file('../data/sample-Liz.in')
self._entries = self.fm.get_entries_by_line(self.contents)
self._v_entries = self.fm.validate_entries(self._entries)
self._format_output = self.fm.format_output()
def test_read_file_method_returns_correct_type(self):
"""
tests if read_file returns str
"""
self.assertEqual(str, type(self.contents))
def test_read_file_method_returns_not_empty(self):
"""
tests if read file has content (besides whitespace)
"""
self.assertGreater(len(self.contents), 0)
def test_get_entries_by_line_method_returns_correct_type(self):
"""
tests type of entry container
"""
self.assertEqual(list, type(self._entries))
def test_line_count_equals_entry_count(self):
"""
tests if number of lines in file is equal to number of entries + errors
"""
self.assertEqual(self.fm.line_count, self.fm.entry_count)
if __name__ == '__main__':
unittest.main()
|
import time,threading
balance=0
lock=threading.Lock()
def change_it(n):
global balance
balance=balance+n
balance=balance-n
def run_thread(n):
for i in range(10000000):
try:lock.acquire()
change_it(n)
finally:
lock.release()
t1=threading.Thread(target=run_thread,args=(5,))
t2=threading.Thread(target=run_thread,args=(8,))
t1.start()
t2.start()
t1.join()
t2.join()
print(balance)
#浏览器端口号都是80,怎么区分不同浏览器 |
"""DEV-54 Decouple SpacedRep from Card
Revision ID: 16795b2ee0df
Revises: c08bce10bc7b
Create Date: 2021-03-04 23:20:03.885792
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "16795b2ee0df"
down_revision = "c08bce10bc7b"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"learn_spaced_repetition",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("next_date", sa.DateTime(), nullable=True),
sa.Column("bucket", sa.Integer(), nullable=True),
sa.Column("timestamp", sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint("id", name=op.f("pk_learn_spaced_repetition")),
)
with op.batch_alter_table("learn_spaced_repetition", schema=None) as batch_op:
batch_op.create_index(
batch_op.f("ix_learn_spaced_repetition_next_date"),
["next_date"],
unique=False,
)
batch_op.create_index(
batch_op.f("ix_learn_spaced_repetition_timestamp"),
["timestamp"],
unique=False,
)
with op.batch_alter_table("card", schema=None) as batch_op:
batch_op.add_column(
sa.Column("learn_spaced_rep_id", sa.Integer(), nullable=True)
)
batch_op.create_foreign_key(
batch_op.f("fk_card_learn_spaced_rep_id_learn_spaced_repetition"),
"learn_spaced_repetition",
["learn_spaced_rep_id"],
["id"],
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("card", schema=None) as batch_op:
batch_op.drop_constraint(
batch_op.f("fk_card_learn_spaced_rep_id_learn_spaced_repetition"),
type_="foreignkey",
)
batch_op.drop_column("learn_spaced_rep_id")
with op.batch_alter_table("learn_spaced_repetition", schema=None) as batch_op:
batch_op.drop_index(batch_op.f("ix_learn_spaced_repetition_timestamp"))
batch_op.drop_index(batch_op.f("ix_learn_spaced_repetition_next_date"))
op.drop_table("learn_spaced_repetition")
# ### end Alembic commands ###
|
## Copyright 2013 Sean McKenna
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
# run k-means algorithm on TSV data file
# output the clusters as a new TSV file
# requires numpy for the k-mean algorithm
# defines the TSV data filename & clustering
inFile = "data.tsv"
outFile = "data-cluster.tsv"
clusters = 7
# necessary imports
import csv
import copy
import time
import cv
import numpy as np
# start timer
start = time.time()
# get the TSV data file as input
input = open(inFile, "rU")
reader = csv.reader(input, dialect='excel-tab')
# initialize scanning variables
firstPass = True
numRows = 0
numCols = -1
firstRow = []
rows = []
data = []
cluster = []
# process TSV file,pyt row-by-row
for row in reader:
if firstPass:
firstRow = row
numCols = len(row) - 1
firstPass = False
else:
numRows += 1
rows.append(row)
data = copy.deepcopy(rows)
# close input file
input.close()
# initialize data & label matrix
samples = cv.CreateMat(numRows, numCols, cv.CV_32F)
labels = cv.CreateMat(numRows, 1, cv.CV_32S)
# remove row name from data
for j in range(0, numRows):
data[j].pop(0)
# fill data matrix
samples = cv.fromarray(np.array(data, np.float32))
# set ten iterations of the k-means algorithm
criteria = (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 10, 1.0)
# k-means algorithm (implementation in OpenCV)
cv.KMeans2(samples, clusters, labels, criteria)
# get the cluster info into an array
for j in range(0, numRows):
cluster.append(int(cv.Get1D(labels, j)[0]))
# prep output file
output = open(outFile, "wb")
writer = csv.writer(output, dialect='excel-tab')
# write the first row
firstRow.insert(1, "Cluster")
writer.writerow(firstRow)
for j in range(0, numRows):
row = rows[j]
row.insert(1, cluster[j])
writer.writerow(row)
# close output file
output.close()
# stop timer
end = time.time()
# process the time elapsed
elapsed = end - start
min = round(elapsed / 60, 3)
# display time taken
print "k-means clustering algorithm complete after", min, "minutes."
|
from django.conf.urls import patterns, include, url
from django.conf import settings
urlpatterns = patterns('lok.views',
url(r'^story/$', 'story'),
url(r'^create/$', 'create_character'),
url(r'^party/$', 'party'),
url(r'^invite_friend/$', 'invite_friend'),
url(r'^leave_party/$', 'leave_party'),
url(r'^dismiss_message/(?P<message_id>\d+)/$', 'dismiss_message'),
url(r'^dismiss_all_messages/$', 'dismiss_all_messages'),
url(r'^accept_friend/(?P<user_id>\d+)/$', 'accept_friend'),
url(r'^invite_party/(?P<character_id>\d+)/$', 'invite_party'),
url(r'^accept_party/(?P<invite_id>\d+)/$', 'accept_party'),
url(r'^cancel_invite_party/(?P<invite_id>\d+)/$', 'cancel_invite_party'),
url(r'^character/$', 'character'),
url(r'^dead/$', 'dead'),
url(r'^rest/$', 'rest'),
url(r'^travel/$', 'travel'),
url(r'^market/$', 'market'),
url(r'^travel/(?P<route_id>\d+)/$', 'travel_to'),
url(r'^scenario/(?P<scenario_id>\d+)/$', 'scenario'),
url(r'^choice/(?P<choice_id>\d+)/$', 'choice'),
url(r'^battle/(?P<battle_id>\d+)/$', 'battle'),
url(r'^title/(?P<title_id>\d+)/$', 'title'),
url(r'^equip/(?P<fieldname>\w+)/(?P<equip_id>\d+)/$', 'equip'),
url(r'^buy/(?P<item_id>\d+)/(?P<quantity>\d+)/$', 'buy'),
url(r'^sell/(?P<item_id>\d+)/(?P<quantity>\w+)/$', 'sell'),
url(r'^result/(?P<result_id>\d+)/$', 'result'),
url(r'^battle_result/(?P<result_id>\d+)/$', 'battle_result'),
url(r'^contact/$', 'contact'),
url(r'^logout/$', 'logout_view'),
url(r'^thanks/$', 'thanks'),
)
|
#!/usr/bin/env python
# Script info at the bottom
import os, time, glob, subprocess
from datetime import datetime
protocol = 'afp' # set your connection protocol, afp by default
tm_share = 'afp://tm:pass@10.1.1.1/TimeMachine' # user:pass @ ip address /share
mount_path = '/Volumes/TimeMachine' # Set your mount path
files = glob.glob(mount_path + '/*') # Change to fit the path to your tm backups
threshold = 30 # how many days until reporting no backup
def mutt(backup_list):
mutt_email = ''
echo_cmd = ['echo',\
'The following TimeMachine backups are older than %s days: \n%s'\
% (threshold, backup_list)]
send_cmd = ["/usr/local/bin/mutt", "-s",\
"'TimeMachine backups older than %s days'" % threshold, mutt_email]
echo = subprocess.Popen(echo_cmd, stdout=subprocess.PIPE)
output = subprocess.Popen(send_cmd, stdin=echo.stdout,\
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stuff = output.stdout.read()
errors = output.stderr.read()
print("stuff to know: " + stuff)
print("errors: " + errors)
exit(0)
def backup_check(files):
file_dates = []
old_backups = []
time_today = time.strftime("%Y-%m-%d")
today = datetime.strptime(time_today, '%Y-%m-%d')
for file in files:
meta = os.stat(file)
file_date = time.gmtime(meta[-2])
clean_date = time.strftime('%Y-%m-%d', file_date)
file_date_append = file, clean_date
file_dates.append(file_date_append)
for _ in file_dates:
file_path, raw_date = _
last_backup_date = datetime.strptime(raw_date, '%Y-%m-%d')
raw_backup_date = str(last_backup_date).split(' ')[0]
backup_date = datetime.strptime(raw_backup_date, '%Y-%m-%d')
last_backup = str(abs((today - backup_date).days))
if int(last_backup) > threshold:
old_backups.append("Last backup:\t" + last_backup
+ "\tdays ago, file: " + file_path)
if old_backups:
backup_list = '\n'.join(old_backups)
print(backup_list)
# mutt(backup_list)
def tm_volume(protocol, tm_share, mount_path, files):
mounted = os.path.isdir(mount_path)
while mounted != True:
mount_cmd = ['mount', '-t', protocol, tm_share, mount_path]
os.mkdir(mount_path)
subprocess.call(mount_cmd)
mounted = os.path.isdir(mount_path)
backup_check(files)
tm_volume(protocol, tm_share, mount_path, files)
# Script overview: This script checks the last modified date on the TimeMachine
# .sparsebundle and reports which files haven't been modified within the last
# 30 (default) days.
#
# The script first checks that the specified TimeMachine volume is mounted, if
# it isn't it mounts the volume.
# The script then gets a list of all of the backups, checks their last modified
# date, and compares it to the current date. It then makes a report of all of
# the backups that haven't run in the last 30 days.
# It passes this data to the mutt function which sends an email report to the
# specified email address.
#
# I created a user LaunchAgent on my server to run this script once a week to
# get weekly TimeMachine reports.
# This script isn't useful for users who let the backup start running but
# cancel midway through since the file's then been modified.
#
#
# Variables:
# Roughly lines 8-11 are the only variables you'll need to change to get
# working in your enviroment.
# I've disabled emailing and instead the script will just print out a report.
# To enable the mutt emailing un-comment (roughly) line 58 which calls the mutt
# function ( mutt(backup_list) )
#
#
# Issues:
# If you find any bugs or anything please just create a new issue and I'll
# take a look.
#
# Known bugs:
# When TimeMachine isn't mounted and the script mounts the volume it exits
# without properly checking and reporting the TimeMachine backup info. |
from ..utils.user_nested_exclude_list import USER_NESTED_FIELDS_EXCLUDES
from ..extensions import marshmallow
from .tag import TagSchema
from .user import UserSchema
from marshmallow import fields
class SnippetSchema(marshmallow.Schema):
class Meta:
fields = ('id', 'filename', 'body', 'description',
'star_count', 'tags', 'created', 'updated',
'user')
id = fields.Int()
star_count = fields.Int()
tags = fields.Nested(TagSchema, many=True)
user = fields.Nested(UserSchema, exclude=USER_NESTED_FIELDS_EXCLUDES)
# TODO: Add comment field nested SnippetCommentSchema |
import multiprocessing
def test(sample, to_add):
sample.append(to_add)
print(f'Process {id(sample)}: {sample}')
# Normally processes doesn't exchange data
x = [1, 2, 3]
proc1 = multiprocessing.Process(target=test, args=(x, 1))
proc2 = multiprocessing.Process(target=test, args=(x, 2))
proc1.start()
proc2.start()
proc1.join()
proc2.join()
# Although each of them have the same ID of the object
# But each in its own memroy space
print(f'Original {id(x)}: {x}')
# Pool are a liitle bit more compact
with multiprocessing.Pool(2) as p:
p.starmap(test, [(x, 1), (x, 2)])
|
import numpy
def div(a, b):
if b == 0:
print("Warning! \n Denominator cannot be zero")
return numpy.inf
else:
return (a/b)
def add(a, b):
return (a+b) |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-27 14:17
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID кафе')),
('country', models.CharField(max_length=100, verbose_name='Страна')),
('city', models.CharField(max_length=100, verbose_name='Город')),
('street', models.CharField(max_length=100, verbose_name='Улица')),
('house', models.CharField(max_length=100, verbose_name='Дом')),
],
),
migrations.CreateModel(
name='Cafe',
fields=[
('cafe_id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID кафе')),
('cafe_name', models.CharField(default=' ', max_length=1000, verbose_name='Название кафе')),
('cafe_description', models.CharField(max_length=1000, verbose_name='Описание кафе')),
('cafe_rating', models.FloatField(verbose_name='Рейтинг кафе')),
('add_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Дата добавления')),
('icon', models.ImageField(default=None, upload_to='', verbose_name='Иконка кафе')),
('cafe_address', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cafes.Address', verbose_name='Адрес кафе')),
],
),
migrations.CreateModel(
name='Coordinates',
fields=[
('coordinates_id', models.AutoField(primary_key=True, serialize=False, verbose_name='id координат')),
('lat', models.FloatField(verbose_name='Широта')),
('lon', models.FloatField(verbose_name='Долгота')),
],
),
migrations.CreateModel(
name='Feedback',
fields=[
('feedback_id', models.AutoField(primary_key=True, serialize=False)),
('desc', models.TextField(max_length=3500, verbose_name='Отзыв')),
('rating', models.FloatField(verbose_name='Рейтинг отзыва')),
('add_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Дата добавления')),
('author', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Автор отзыва')),
],
),
migrations.CreateModel(
name='Item',
fields=[
('item_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=1000, verbose_name='Название элемента')),
('description', models.TextField(verbose_name='Описание элемента')),
('time', models.IntegerField(default=10, verbose_name='Время приготовления (в минутах)')),
('icon', models.ImageField(blank=True, upload_to='', verbose_name='Иконка элемента')),
('image', models.ImageField(blank=True, upload_to='', verbose_name='Фото элемента')),
('price', models.IntegerField(verbose_name='Цена товара')),
('type', models.CharField(max_length=100, verbose_name='Тип товара')),
],
),
migrations.CreateModel(
name='OpeningHours',
fields=[
('opening_hours_id', models.AutoField(primary_key=True, serialize=False)),
('opening_time', models.TimeField(verbose_name='Время открытия')),
('closing_time', models.TimeField(verbose_name='Время закрытия')),
],
),
migrations.CreateModel(
name='WaitList',
fields=[
('order_id', models.AutoField(primary_key=True, serialize=False)),
('amount_1', models.IntegerField(default=1, verbose_name='Количество')),
('amount_2', models.IntegerField(blank=True, null=True, verbose_name='Количество')),
('amount_3', models.IntegerField(blank=True, null=True, verbose_name='Количество')),
('amount_4', models.IntegerField(blank=True, null=True, verbose_name='Количество')),
('amount_5', models.IntegerField(blank=True, null=True, verbose_name='Количество')),
('amount_6', models.IntegerField(blank=True, null=True, verbose_name='Количество')),
('time_to_take', models.TimeField(verbose_name='Заказ будет готов к ')),
('paid', models.BooleanField(verbose_name='Оплачено')),
('done', models.BooleanField(verbose_name='Готовность заказа')),
('cafe_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cafes.Cafe', verbose_name='Кафе')),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Клиент')),
('item_1', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item_1', to='cafes.Item')),
('item_2', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item_2', to='cafes.Item', verbose_name='Продукт 2')),
('item_3', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item_3', to='cafes.Item', verbose_name='Продукт 3')),
('item_4', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item_4', to='cafes.Item', verbose_name='Продукт 4')),
('item_5', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item_5', to='cafes.Item', verbose_name='Продукт 5')),
('item_6', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item_6', to='cafes.Item', verbose_name='Продукт 6')),
],
),
migrations.AddField(
model_name='cafe',
name='cafe_coordinates',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cafes.Coordinates', verbose_name='Координаты кафе'),
),
migrations.AddField(
model_name='cafe',
name='cafe_feedback',
field=models.ManyToManyField(blank=True, to='cafes.Feedback', verbose_name='Отзывы о кафе'),
),
migrations.AddField(
model_name='cafe',
name='cafe_menu',
field=models.ManyToManyField(to='cafes.Item', verbose_name='Меню'),
),
migrations.AddField(
model_name='cafe',
name='cafe_opening_hours',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cafes.OpeningHours', verbose_name='Часы работы кафе'),
),
migrations.AddField(
model_name='cafe',
name='cafe_staff',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='Работники кафе'),
),
]
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Header Packet data interfacing definitions."""
import operator
import functools
from enum import IntEnum
from amaranth import *
from amaranth.hdl.rec import Layout
from ....stream.arbiter import StreamArbiter
class HeaderPacket(Record):
""" Container that represents a Header Packet. """
# Create overrideable constants that allow us to specialize
# the data words of our headers in subclasses.
DW0_LAYOUT = [('dw0', 32)]
DW1_LAYOUT = [('dw1', 32)]
DW2_LAYOUT = [('dw2', 32)]
LINK_LAYER_FIELDS = [
('crc16', 16),
('sequence_number', 3),
('dw3_reserved', 3),
('hub_depth', 3),
('delayed', 1),
('deferred', 1),
('crc5', 5),
]
def get_type(self):
""" Returns the selection of bits in DW0 that encode the packet type. """
return self.dw0[0:5]
@classmethod
def get_layout(cls):
""" Computes the layout for the HeaderPacket (sub)class. """
return [
*cls.DW0_LAYOUT,
*cls.DW1_LAYOUT,
*cls.DW2_LAYOUT,
*cls.LINK_LAYER_FIELDS
]
def __init__(self):
super().__init__(self.get_layout(), name=self.__class__.__name__)
class HeaderQueue(Record):
""" Record representing a header, and stream-link control signals.
Attributes
----------
valid: Signal(), producer to consumer
Indicates that the data in :attr:``header`` is valid and ready to be consumed.
header: HeaderPacket(), producer to consumer
Contains a full set of header packet data.
ready: Signal(), consumer to producer
Strobed by the consumer to indicate that it has accepted the given header.
"""
def __init__(self, *, header_type=HeaderPacket):
super().__init__([
('valid', 1),
('header', header_type.get_layout()),
('ready', 1),
], name="HeaderQueue")
def get_type(self):
""" Returns the selection of bits in the current header's that encode the packet type. """
return self.header.dw0[0:5]
def header_eq(self, other):
""" Connects a producer (self) up to a consumer. """
return [
self.valid .eq(other.valid),
self.header .eq(other.header),
other.ready .eq(self.ready)
]
def stream_eq(self, other):
""" Alias for ``header_eq`` that ensures we share a stream interface. """
return self.header_eq(other)
class HeaderQueueArbiter(StreamArbiter):
""" Gateware that accepts a collection of header queues, and merges them into a single queue.
Add produces using ``add_producer``.
Attributes
----------
source: HeaderQueue(), output queue
A single header queue that carries data from all producer queues.
"""
def __init__(self):
super().__init__(stream_type=HeaderQueue, domain="ss")
def add_producer(self, interface: HeaderQueue):
""" Adds a HeaderQueue interface that will add packets into this mux. """
self.add_stream(interface)
class HeaderQueueDemultiplexer(Elaboratable):
""" Gateware that accepts a single Header Queue, and routes it to multiple modules.
Assumes that each type of header is handled by a separate module, and thus no two inputs
will assert :attr:``ready`` at the same time.
Add consumers using ``add_consumer``.
Attributes
----------
sink: HeaderQueue(), input queue
The single header queue to be distributed to all of our consumers.
"""
def __init__(self):
self._consumers = []
#
# I/O port
#
self.sink = HeaderQueue()
def add_consumer(self, interface: HeaderQueue):
""" Adds a HeaderQueue interface that will consume packets from this mux. """
self._consumers.append(interface)
def elaborate(self, platform):
m = Module()
# Share the ``valid`` signal and header itself with every consumer.
for consumer in self._consumers:
m.d.comb += [
consumer.valid .eq(self.sink.valid),
consumer.header .eq(self.sink.header),
]
# OR together all of the ``ready`` signals to produce our multiplex'd ready.
sink_ready = functools.reduce(operator.__or__, (c.ready for c in self._consumers))
m.d.comb += self.sink.ready.eq(sink_ready)
return m
|
n = int(input("Please enter a four digit number: "))
already_seen = list()
while n not in already_seen:
already_seen.append(n)
n = int(str(n * n).zfill(8)[2:6])
print(n)
print('periodicity = ', len(already_seen) - already_seen.index(n)) |
import pygmsh
from pyfr_wrapper import msh2pyfrm
from pyfr_wrapper import pyfr_run
from pyfr_wrapper import pyfr_export
import configparser
from flask import Flask, render_template, redirect, request, send_file, url_for
from model import Average
from werkzeug import secure_filename
import os
import meshio
import numpy as np
import fileinput
import sys
# Application object
app = Flask(__name__)
# Relative path of directory for uploaded files
UPLOAD_DIR = 'mesh/'
app.config['UPLOAD_FOLDER'] = UPLOAD_DIR
app.secret_key = 'MySecretKey'
if not os.path.isdir(UPLOAD_DIR):
os.mkdir(UPLOAD_DIR)
# Allowed file types for file upload
ALLOWED_EXTENSIONS = set(['msh'])
def allowed_file(filename):
"""Does filename have the right extension?"""
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET'])
def index():
form = Average(request.form)
return render_template("view.html", form=form, pyfrm=None)
@app.route('/pygmsh', methods=['POST'])
def pyGmsh():
PyGmsh = request.form.get("PyGmsh")
geom = pygmsh.built_in.Geometry()
exec(PyGmsh)
points, cells, point_data, cell_data, field_data = pygmsh.generate_mesh(geom)
#points, cells, _, _, _ = pygmsh.generate_mesh(geom)
meshio.write('mesh.vtu', points, cells, cell_data=cell_data)
return send_file("mesh.vtu")
@app.route('/upload_msh', methods=['POST'])
def upload_msh():
# Save uploaded file on server if it exists and is valid
form = Average(request.form)
pyfrm = None
if request.files:
file = request.files['file']
if file and allowed_file(file.filename):
# Make a valid version of filename for any file system
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'],
filename))
pyfrm = msh2pyfrm(filename)
return redirect(url_for('index') + '#calc')
@app.route('/config', methods=['POST'])
def upload_config():
keys = ["gamma", "mu", "Pr", "cp", "Uw", "H", "Pc", "Tw", "rho", "u", "v", "w", "p" ]
for line in fileinput.input("config/config.ini", inplace=1):
if line.startswith("["):
if "constants" in line or "soln-ics" in line:
check = True
else:
check = False
if check is True:
for i,key in enumerate(keys):
if line.startswith(key + " = "):
line = key + " = " + request.form.get(key) + "\n"
sys.stdout.write(line)
return render_template("view.html", pyfrm=True)
@app.route('/run', methods=['POST'])
def run():
filename = request.form.get("filename")
if os.path.isfile(os.path.join('mesh', filename +".pyfrm")):
pyfr_run(filename)
pyfr_export(filename)
return send_file(filename + "-040.vtu")
if __name__ == '__main__':
app.run(debug=True)
|
if key_press == 'c':
sendmsg('command')
if key_press == 'w':
sendmsg('forward 20')
elif key_press == 's':
sendmsg('back 20')
elif key_press == 'right':
sendmsg('cw 5')
elif key_press == 'left':
sendmsg('ccw 5')
elif key_press == 'up':
sendmsg('up 20')
elif key_press == 'down':
sendmsg('down 20')
elif key_press == 'space':
sendmsg('flip f')
elif key_press == 'e':
sendmsg('flip r')
elif key_press == 'q':
sendmsg('flip f')
|
## First written at local
import numpy as np
## Second written at github
x = [1, 2, 3, 4]
## Third written at github
## Fourth written at local
y = np.log(x)
## Fifth written at github branch_0
plot(x, y)
## Sixth written at merge
boxplot(x, y) |
import urllib.request
import json
import dml
import prov.model
import datetime
import uuid
import re
from alyu_sharontj_yuxiao_yzhang11.Util.Util import *
class education_trans_avg(dml.Algorithm):
contributor = 'alyu_sharontj_yuxiao_yzhang11'
reads = ['alyu_sharontj_yuxiao_yzhang11.education',
'alyu_sharontj_yuxiao_yzhang11.hubway',
'alyu_sharontj_yuxiao_yzhang11.MBTA'] #read the data of roads and trafficsignals from mongo
writes = ['alyu_sharontj_yuxiao_yzhang11.education_trans_avg']
@staticmethod
def execute(trial=False):
startTime = datetime.datetime.now()
'''Set up the database connection.'''
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('alyu_sharontj_yuxiao_yzhang11', 'alyu_sharontj_yuxiao_yzhang11')
'''get (schoolid,zipcode,latitude,longitute) from alyu_sharontj_yuxiao_yzhang11.education'''
schoolinfo = []
edudb = repo['alyu_sharontj_yuxiao_yzhang11.education']
educur = edudb.find() #filter not work
for info in educur:
school_id = info['properties']['SchoolId']
if (school_id != "0"):
address = info['properties']['Address']
zipcode = address[-5: ]
Latitude = float(info['properties']['Latitude'])
Longitude = float(info['properties']['Longitude'])
schoolinfo.append((school_id, zipcode, (Latitude, Longitude)))
# print(schoolinfo)
hubwaydb = repo['alyu_sharontj_yuxiao_yzhang11.hubway']
hubwayinfo = []
match = {
'status': "Existing"
}
hubwayExist = hubwaydb.aggregate([
{
'$match': match
}
])
for info in hubwayExist:
hubway_id = info['id']
Latitude = float(info['lat'])
Longitude = float(info['lng'])
hubwayinfo.append((hubway_id,(Latitude,Longitude)))
# print(hubwayinfo)
edu_hub = [(s[0],s[1], h[0], distance(s[2], h[1])) for (s, h) in product(schoolinfo, hubwayinfo)]
# print(len(edu_hub))
edu_hub_1 = [ ((s,zip),dis) for (s,zip,h,dis) in edu_hub if dis<0.8]
# print(len(edu_hub_1))
edu_hub_count = aggregate(project(edu_hub_1, lambda t: (t[0],1)), sum)
mbtadb = repo['alyu_sharontj_yuxiao_yzhang11.MBTA']
mbtainfo = []
mbtacur = mbtadb.find();
for info in mbtacur:
mbta_id = info['stop_id']
Latitude = float(info['stop_lat'])
Longitude = float(info['stop_lon'])
mbtainfo.append((mbta_id, (Latitude, Longitude)))
# print(mbtainfo)
edu_mbta = [(s[0], s[1], distance(s[2], h[1])) for (s, h) in product(schoolinfo, mbtainfo)]
# print(len(edu_mbta))
edu_mbta_1 = [((s, zip), dis) for (s, zip, dis) in edu_mbta if dis < 0.8]
# print(len(edu_mbta_1))
edu_mbta_count = aggregate(project(edu_mbta_1, lambda t: (t[0], 1)), sum)
# print(edu_mbta_count)
select_edu_mbta_hub = select(product(edu_hub_count, edu_mbta_count), lambda t: t[0][0][0]==t[1][0][0])
edu_hub_mbta = [(h[0][1], h[0][0], h[1]+m[1]) for (h,m) in select_edu_mbta_hub]
# print(edu_hub_mbta)
zip_edu_trans = project(edu_hub_mbta, lambda t: (t[0], (1, t[2])))
# print(zip_edu_trans)
zip_edu_trans_count = aggregate(zip_edu_trans, ADD)
# print(zip_edu_trans_count)
zip_edu_trans_avg = [(z, t[0], t[1]/t[0]) for (z,t)in zip_edu_trans_count]
# print(zip_edu_trans_avg)
repo.dropCollection("education_trans_avg")
repo.createCollection("education_trans_avg")
for i in zip_edu_trans_avg:
single = {'zip': i[0], 'school_count': i[1], 'trans_avg': i[2]}
repo['alyu_sharontj_yuxiao_yzhang11.education_trans_avg'].insert_one(single)
endTime = datetime.datetime.now()
return {"start": startTime, "end": endTime}
@staticmethod
def provenance(doc = prov.model.ProvDocument(), startTime = None, endTime = None):
'''
Create the provenance document describing everything happening
in this script. Each run of the script will generate a new
document describing that invocation event.
'''
# Set up the database connection.
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('alyu_sharontj_yuxiao_yzhang11', 'alyu_sharontj_yuxiao_yzhang11')
doc.add_namespace('alg', 'http://datamechanics.io/algorithm/') # The scripts are in <folder>#<filename> format.
doc.add_namespace('dat', 'http://datamechanics.io/data/') # The data sets are in <user>#<collection> format.
doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.
doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.
# doc.add_namespace('bdp', 'http://bostonopendata-boston.opendata.arcgis.com/datasets/')
# doc.add_namespace('hdv', 'https://dataverse.harvard.edu/dataset.xhtml')
this_script = doc.agent('alg:alyu_sharontj_yuxiao_yzhang11#education_trans_avg',
{ prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension':'py'})
education_input = doc.entity('dat:alyu_sharontj_yuxiao_yzhang11#education',
{prov.model.PROV_LABEL:'education',
prov.model.PROV_TYPE:'ont:DataSet'})
hubway_input = doc.entity('dat:alyu_sharontj_yuxiao_yzhang11#hubway',
{prov.model.PROV_LABEL:'hubway',
prov.model.PROV_TYPE:'ont:DataSet'})
mbta_input = doc.entity('dat:alyu_sharontj_yuxiao_yzhang11#MBTA',
{prov.model.PROV_LABEL: 'MBTA',
prov.model.PROV_TYPE: 'ont:DataSet'})
this_run = doc.activity('log:uuid'+str(uuid.uuid4()), startTime, endTime)#, 'ont:Query':'?type=Animal+Found&$select=type,latitude,longitude,OPEN_DT'})
output = doc.entity('dat:alyu_sharontj_yuxiao_yzhang11#education_trans_avg',
{ prov.model.PROV_LABEL:'education_trans_avg', prov.model.PROV_TYPE: 'ont:DataSet'})
doc.wasAssociatedWith(this_run, this_script)
doc.used(this_run, education_input, startTime)
doc.used(this_run, hubway_input, startTime)
doc.used(this_run, mbta_input, startTime)
doc.wasAttributedTo(output, this_script)
doc.wasGeneratedBy(output, this_run, endTime)
doc.wasDerivedFrom(output, education_input, this_run, this_run, this_run)
doc.wasDerivedFrom(output, hubway_input, this_run, this_run, this_run)
doc.wasDerivedFrom(output, mbta_input, this_run, this_run, this_run)
repo.logout()
return doc
#
# education_trans_avg.execute()
# doc = education_trans_avg.provenance()
# print(doc.get_provn())
# print(json.dumps(json.loads(doc.serialize()), indent=4))
## eof
|
import random
suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs')
ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')
values = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8, 'Nine':9, 'Ten':10, 'Jack':10,
'Queen':10, 'King':10, 'Ace':11}
playing = True
class Card:
def __init__(self,suit,rank):
self.suit = suit
self.rank = rank
def __str__(self):
return(f"{self.rank} of {self.suit}")
class Deck:
def __init__(self):
self.deck = []
for suit in suits:
for rank in ranks:
(self.deck).append(Card(suit,rank))
def __str__(self):
for card in self.deck:
print(card)
def shuffle(self):
random.shuffle(self.deck)
def deal(self):
return (self.deck).pop()
class Hand:
def __init__(self):
self.cards = []
self.values = 0
self.aces = 0
def __str__(self):
for card in self.cards:
print(card)
def add_card(self,card):
(self.cards).append(card)
self.values += values[card.rank]
if card.rank == "Ace":
self.aces += 1
self.adjust_for_ace()
def adjust_for_ace(self):
if self.values > 21:
self.values -=10
class Chips:
def __init__(self,total):
self.total = total
self.bet = 0
def win_bet(self):
self.total += self.bet
print(f"Current Balance: {self.total}")
def lose_bet(self):
self.total -= self.bet
print(f"Current Balance: {self.total}")
def take_bet():
while True:
try:
n = int(input("Place your bet:"))
except:
print("Please enter an integer amount")
continue
else:
if n > Chips.total:
print(f"Insufficient funds\nCurrent Balance: {Chips.total}")
else:
Chips.bet = n
print("Bet taken")
break
def hit(deck,hand):
hand.add_card(deck.deal())
def hit_or_stand(deck,hand):
global playing
while playing:
if hand.values > 21:
break
m = input("Hit(H) or Stand(S):").capitalize()
if m == "Hit" or m == "H":
hit(deck,hand)
show_some(player,dealer)
elif m == "Stand" or m == "S":
playing = False
def show_some(player,dealer):
print(f"\nDealer's Hand\n{dealer.cards[0]}\n?\nCurrent Value:?\n")
print("Player's Hand\n")
for i in player.cards:
print(i)
print(f"\nCurrent Value: {player.values}\n")
def show_all(player,dealer):
print(f"\nDealer's Hand\n")
for i in dealer.cards:
print(i)
print(f"\nCurrent Value: {dealer.values}\n")
print("Player's Hand\n")
for i in player.cards:
print(i)
print(f"\nCurrent Value: {player.values}\n")
def player_busts(player):
return player.values > 21
def player_wins(player, dealer):
return player.values > dealer.values
def dealer_busts(dealer):
return dealer.values > 21
def dealer_wins(player, dealer):
return dealer.values > player.values
def push(player, dealer):
return player.values == dealer.values
Deck = Deck()
player = Hand()
dealer = Hand()
while True:
try:
balance = int(input("What's your balance: "))
except:
print("Please enter an integer balance")
else:
break
Chips = Chips(balance)
while True:
print("A Game of BlackJack")
Deck.shuffle()
for i in range(0,4):
card = Deck.deal()
if i < 2:
player.add_card(card)
else:
dealer.add_card(card)
take_bet()
show_some(player,dealer)
while playing: # recall this variable from our hit_or_stand function
hit_or_stand(Deck,player)
if player_busts(player):
Chips.lose_bet()
print("Player bust\n")
break
while dealer.values < 17 or dealer.values < player.values:
hit(Deck,dealer)
show_all(player,dealer)
if player_wins(player,dealer):
Chips.win_bet()
print("Player wins\n")
elif dealer_busts(dealer):
Chips.win_bet()
print("Dealer bust\n")
elif push(player,dealer):
print("Push, bet returned\n")
else:
Chips.lose_bet()
print("Dealer Wins\n")
play_again = input("Would you like to play again? Yes(Y) or No(N)?").capitalize()
if play_again == "Yes" or play_again == "Y":
playing = True
else:
break
pl,dl= len(player.cards),len(dealer.cards)
for i in range(0,pl):
p = (player.cards).pop()
(Deck.deck).append(p)
for i in range(0,dl):
d = (dealer.cards).pop()
(Deck.deck).append(d)
player.values = 0
dealer.values = 0
|
#!/bin/python3
import math
import os
import random
import re
import sys
from collections import defaultdict
# start with 1-indexed array of zeros and a list of operations
# [0 0 0 0 0]
# input:
# 5 3 // 5: length array 3: number of subsequent lines
# 1 2 100 // add 100 to elements [1:2] inclussive
# 2 5 100 // add 100 to elements [2:5] inclusive
# 3 4 100 // add 100 to elements [3:4] inclusive
# [100 100 0 0 0]
# [100 200 100 100 100]
# [100 200 200 200 100]
# return highest number, 200 in above example
#
# Used algorithm from JAVAAID youtube video
# Uses O(n+m) time.
# Once the time was brought down, my submission still failed many tests.
# I had not optimized the space and needlessly used two arrays.
# Only one array is needed.
# Still could not pass after removing extra array.
# I had added one extra element to the array to make indexing easier.
# When I removed the one extra element all tests passed.
# Literally one array element caused 7 or 8 tests to fail.
# Lesson learned.
# Complete the arrayManipulation function below.
def arrayManipulation(n, queries):
a = [0]*(n+2)
for query in queries:
start = query[0]
end = query[1]
k = query[2]
a[start] += k
a[end+1] += -k
maxval = 0
for i in range(1, len(a)):
a[i] = a[i] + a[i-1]
if (maxval < a[i]): maxval = a[i]
return maxval
if __name__ == '__main__':
# fptr = open(os.environ['OUTPUT_PATH'], 'w')
fptr = sys.stdout
nm = input().split()
n = int(nm[0])
m = int(nm[1])
queries = []
for _ in range(m):
queries.append(list(map(int, input().rstrip().split())))
result = arrayManipulation(n, queries)
fptr.write(str(result) + '\n')
fptr.close()
|
from django.core.management.base import BaseCommand
from django.utils import timezone
from announcements.models import Announcement
from datetime import datetime
from notification.models import Notification
from django.utils import timezone
from push_notifications.models import GCMDevice
class Command(BaseCommand):
help = 'Update Announcement'
def handle(self, *args, **kwargs):
anns = Announcement.objects.filter(publish_datetime__lte = timezone.now(),send_out = False)
for a in anns:
re = list()
Announcement.objects.filter(pk=a.id).update(send_out=True)
for b in a.area.street_set.all():
for c in b.lot_set.all():
for d in c.resident_set.all():
if d.user.id in re:
pass
else:
devices = GCMDevice.objects.filter(user=d.user.id)
devices.send_message(instance.title, extra={"type": "A","value":instance.id})
Notification.objects.create(
descriptions = a.title,
type = "A",
object_id = a.id,
user_id = d.user.id,
)
re.append(d.user.id) |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import inspect
from typing import Dict, List, Optional, Tuple, Union
import torch
import copy
from torch import nn
import math
import numpy as np
import torch.nn.functional as F
from detectron2.config import configurable
from detectron2.layers import ShapeSpec
from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou
from detectron2.modeling.roi_heads.box_head import build_box_head
from detectron2.modeling.roi_heads.keypoint_head import build_keypoint_head
from detectron2.modeling.roi_heads.mask_head import build_mask_head
from detectron2.modeling.proposal_generator.proposal_utils import add_ground_truth_to_proposals, add_ground_truth_to_proposals_single_image
from detectron2.utils.events import get_event_storage
from detectron2.modeling.roi_heads.roi_heads import select_foreground_proposals, select_proposals_with_visible_keypoints, ROIHeads
from detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY
from detectron2.modeling.matcher import Matcher
from detectron2.modeling.sampling import subsample_labels
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
from .mypooler import MyROIPooler
from .my_fast_rcnn_output import MyFastRCNNOutputLayers
__all__ = ["TransformerROIHeads"]
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2,
(x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
def add_noise_to_boxes(boxes):
cxcy_boxes = box_xyxy_to_cxcywh(boxes)
resize_factor = torch.rand(cxcy_boxes.shape, device=cxcy_boxes.device)
new_cxcy = cxcy_boxes[..., :2] + cxcy_boxes[..., 2:] * (resize_factor[..., :2] - 0.5) * 0.2
assert (cxcy_boxes[..., 2:] > 0).all().item()
new_wh = cxcy_boxes[..., 2:] * (0.8 ** (resize_factor[..., 2:] * 2 - 1))
assert (new_wh > 0).all().item()
new_cxcy_boxes = torch.cat([new_cxcy, new_wh], dim=-1)
new_boxes = box_cxcywh_to_xyxy(new_cxcy_boxes)
return new_boxes
@ROI_HEADS_REGISTRY.register()
class TransformerROIHeads(ROIHeads):
"""
It's "standard" in a sense that there is no ROI transform sharing
or feature sharing between tasks.
Each head independently processes the input features by each head's
own pooler and head.
This class is used by most models, such as FPN and C5.
To implement more models, you can subclass it and implement a different
:meth:`forward()` or a head.
"""
@configurable
def __init__(
self,
*,
box_in_features: List[str],
box_pooler: MyROIPooler,
box_head: nn.Module,
box_predictor: nn.Module,
mask_in_features: Optional[List[str]] = None,
mask_pooler: Optional[MyROIPooler] = None,
mask_head: Optional[nn.Module] = None,
keypoint_in_features: Optional[List[str]] = None,
keypoint_pooler: Optional[MyROIPooler] = None,
keypoint_head: Optional[nn.Module] = None,
train_on_pred_boxes: bool = False,
add_noise_to_proposals: bool = False,
encoder_feature: Optional[str] = None,
random_sample_size: bool = False,
random_sample_size_upper_bound: float = 1.0,
random_sample_size_lower_bound: float = 0.8,
random_proposal_drop: bool = False,
random_proposal_drop_upper_bound: float = 1.0,
random_proposal_drop_lower_bound: float = 0.8,
max_proposal_per_batch: int = 0,
**kwargs
):
"""
NOTE: this interface is experimental.
Args:
box_in_features (list[str]): list of feature names to use for the box head.
box_pooler (ROIPooler): pooler to extra region features for box head
box_head (nn.Module): transform features to make box predictions
box_predictor (nn.Module): make box predictions from the feature.
Should have the same interface as :class:`FastRCNNOutputLayers`.
mask_in_features (list[str]): list of feature names to use for the mask head.
None if not using mask head.
mask_pooler (ROIPooler): pooler to extra region features for mask head
mask_head (nn.Module): transform features to make mask predictions
keypoint_in_features, keypoint_pooler, keypoint_head: similar to ``mask*``.
train_on_pred_boxes (bool): whether to use proposal boxes or
predicted boxes from the box head to train other heads.
"""
super().__init__(**kwargs)
# keep self.in_features for backward compatibility
self.in_features = self.box_in_features = box_in_features
self.box_pooler = box_pooler
self.box_head = box_head
self.box_predictor = box_predictor
self.mask_on = mask_in_features is not None
if self.mask_on:
self.mask_in_features = mask_in_features
self.mask_pooler = mask_pooler
self.mask_head = mask_head
self.keypoint_on = keypoint_in_features is not None
if self.keypoint_on:
self.keypoint_in_features = keypoint_in_features
self.keypoint_pooler = keypoint_pooler
self.keypoint_head = keypoint_head
self.train_on_pred_boxes = train_on_pred_boxes
self.add_noise_to_proposals = add_noise_to_proposals
self.encoder_feature = encoder_feature
self.random_sample_size = random_sample_size
self.random_proposal_drop = random_proposal_drop
self.max_proposal_per_batch = max_proposal_per_batch
self.random_proposal_drop_upper_bound = random_proposal_drop_upper_bound
self.random_proposal_drop_lower_bound = random_proposal_drop_lower_bound
self.random_sample_size_upper_bound = random_sample_size_upper_bound
self.random_sample_size_lower_bound = random_sample_size_lower_bound
@classmethod
def from_config(cls, cfg, input_shape):
ret = super().from_config(cfg)
ret["train_on_pred_boxes"] = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES
ret["add_noise_to_proposals"] = cfg.MODEL.ROI_BOX_HEAD.ADD_NOISE_TO_PROPOSALS
ret["encoder_feature"] = cfg.MODEL.ROI_BOX_HEAD.ENCODER_FEATURE
ret["random_sample_size"] = cfg.MODEL.ROI_BOX_HEAD.RANDOM_SAMPLE_SIZE
ret["random_sample_size_upper_bound"] = cfg.MODEL.ROI_BOX_HEAD.RANDOM_SAMPLE_SIZE_UPPER_BOUND
ret["random_sample_size_lower_bound"] = cfg.MODEL.ROI_BOX_HEAD.RANDOM_SAMPLE_SIZE_LOWER_BOUND
ret["random_proposal_drop"] = cfg.MODEL.ROI_BOX_HEAD.RANDOM_PROPOSAL_DROP
ret["random_proposal_drop_upper_bound"] = cfg.MODEL.ROI_BOX_HEAD.RANDOM_PROPOSAL_DROP_UPPER_BOUND
ret["random_proposal_drop_lower_bound"] = cfg.MODEL.ROI_BOX_HEAD.RANDOM_PROPOSAL_DROP_LOWER_BOUND
ret["max_proposal_per_batch"] = cfg.MODEL.ROI_BOX_HEAD.MAX_PROPOSAL_PER_BATCH
# Subclasses that have not been updated to use from_config style construction
# may have overridden _init_*_head methods. In this case, those overridden methods
# will not be classmethods and we need to avoid trying to call them here.
# We test for this with ismethod which only returns True for bound methods of cls.
# Such subclasses will need to handle calling their overridden _init_*_head methods.
if inspect.ismethod(cls._init_box_head):
ret.update(cls._init_box_head(cfg, input_shape))
if inspect.ismethod(cls._init_mask_head):
ret.update(cls._init_mask_head(cfg, input_shape))
if inspect.ismethod(cls._init_keypoint_head):
ret.update(cls._init_keypoint_head(cfg, input_shape))
ret["proposal_matcher"] = Matcher(
cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS,
cfg.MODEL.ROI_HEADS.IOU_LABELS,
allow_low_quality_matches=False,
)
return ret
@classmethod
def _init_box_head(cls, cfg, input_shape):
# fmt: off
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
# fmt: on
# If StandardROIHeads is applied on multiple feature maps (as in FPN),
# then we share the same predictors and therefore the channel counts must be the same
in_channels = [input_shape[f].channels for f in in_features]
# Check all channel counts are equal
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
box_pooler = MyROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
# Here we split "box head" and "box predictor", which is mainly due to historical reasons.
# They are used together so the "box predictor" layers should be part of the "box head".
# New subclasses of ROIHeads do not need "box predictor"s.
box_head = build_box_head(
cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution)
)
box_predictor = MyFastRCNNOutputLayers(cfg, box_head.output_shape)
return {
"box_in_features": in_features,
"box_pooler": box_pooler,
"box_head": box_head,
"box_predictor": box_predictor,
}
@classmethod
def _init_mask_head(cls, cfg, input_shape):
if not cfg.MODEL.MASK_ON:
return {}
else:
raise NotImplementedError
@classmethod
def _init_keypoint_head(cls, cfg, input_shape):
if not cfg.MODEL.KEYPOINT_ON:
return {}
else:
raise NotImplementedError
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
targets: Optional[List[Instances]] = None,
) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:
"""
See :class:`ROIHeads.forward`.
"""
del images
if self.training:
assert targets
proposals = self.label_and_sample_proposals(proposals, targets)
if self.training:
losses = self._forward_box(features, proposals, targets)
# Usually the original proposals used by the box head are used by the mask, keypoint
# heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes
# predicted by the box head.
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_keypoint(features, proposals))
return proposals, losses
else:
pred_instances = self._forward_box(features, proposals)
# During inference cascaded prediction is used: the mask and keypoints heads are only
# applied to the top scoring box detections.
pred_instances = self.forward_with_given_boxes(features, pred_instances)
return pred_instances, {}
def forward_with_given_boxes(
self, features: Dict[str, torch.Tensor], instances: List[Instances]
) -> List[Instances]:
"""
Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
This is useful for downstream tasks where a box is known, but need to obtain
other attributes (outputs of other heads).
Test-time augmentation also uses this.
Args:
features: same as in `forward()`
instances (list[Instances]): instances to predict other outputs. Expect the keys
"pred_boxes" and "pred_classes" to exist.
Returns:
instances (list[Instances]):
the same `Instances` objects, with extra
fields such as `pred_masks` or `pred_keypoints`.
"""
assert not self.training
assert instances[0].has("pred_boxes") and instances[0].has("pred_classes")
instances = self._forward_mask(features, instances)
instances = self._forward_keypoint(features, instances)
return instances
def _forward_box(
self, features: Dict[str, torch.Tensor], proposals: List[Instances], targets=None, return_box_features: bool=False
) -> Union[Dict[str, torch.Tensor], List[Instances]]:
"""
Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`,
the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
box_features = [features[f] for f in self.box_in_features]
padded_box_features, dec_mask, inds_to_padded_inds = (
self.box_pooler(box_features, [x.proposal_boxes for x in proposals]))
enc_feature = None
enc_mask = None
if self.box_head.use_encoder_decoder:
enc_feature = features[self.encoder_feature]
b = len(proposals)
h = max([x.image_size[0] for x in proposals])
w = max([x.image_size[1] for x in proposals])
enc_mask = torch.ones((b, h, w), dtype=torch.bool, device=padded_box_features.device)
for c, image_size in enumerate([x.image_size for x in proposals]):
enc_mask[c, :image_size[0], :image_size[1]] = False
names = ["res1", "res2", "res3", "res4", "res5"]
if self.encoder_feature == "p6":
names.append("p6")
for name in names:
if name == "res1":
target_shape = ((h+1)//2, (w+1)//2)
else:
x = features[name]
target_shape = x.shape[-2:]
m = enc_mask
enc_mask = F.interpolate(m[None].float(), size=target_shape).to(torch.bool)[0]
max_num_proposals = padded_box_features.shape[1]
normalized_proposals = []
for x in proposals:
gt_box = x.proposal_boxes.tensor
img_h, img_w = x.image_size
gt_box = gt_box / torch.tensor([img_w, img_h, img_w, img_h],
dtype=torch.float32, device=gt_box.device)
gt_box = torch.cat([box_xyxy_to_cxcywh(gt_box), gt_box], dim=-1)
gt_box = F.pad(gt_box, [0, 0, 0, max_num_proposals - gt_box.shape[0]])
normalized_proposals.append(gt_box)
normalized_proposals = torch.stack(normalized_proposals, dim=0)
padded_box_features = self.box_head(enc_feature, enc_mask, padded_box_features, dec_mask, normalized_proposals)
box_features = padded_box_features[inds_to_padded_inds]
predictions = self.box_predictor(box_features)
if self.training:
losses = self.box_predictor.losses(predictions, proposals, targets)
# proposals is modified in-place below, so losses must be computed first.
if self.train_on_pred_boxes:
with torch.no_grad():
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals
)
for proposals_per_image, pred_boxes_per_image in zip(proposals, pred_boxes):
proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)
if return_box_features:
return losses, box_features
else:
return losses
else:
pred_instances, _ = self.box_predictor.inference(predictions, proposals)
return pred_instances
def _forward_mask(
self, features: Dict[str, torch.Tensor], instances: List[Instances]
) -> Union[Dict[str, torch.Tensor], List[Instances]]:
"""
Forward logic of the mask prediction branch.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
instances (list[Instances]): the per-image instances to train/predict masks.
In training, they can be the proposals.
In inference, they can be the predicted boxes.
Returns:
In training, a dict of losses.
In inference, update `instances` with new fields "pred_masks" and return it.
"""
if not self.mask_on:
return {} if self.training else instances
else:
raise NotImplementedError
def _forward_keypoint(
self, features: Dict[str, torch.Tensor], instances: List[Instances]
) -> Union[Dict[str, torch.Tensor], List[Instances]]:
"""
Forward logic of the keypoint prediction branch.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
instances (list[Instances]): the per-image instances to train/predict keypoints.
In training, they can be the proposals.
In inference, they can be the predicted boxes.
Returns:
In training, a dict of losses.
In inference, update `instances` with new fields "pred_keypoints" and return it.
"""
if not self.keypoint_on:
return {} if self.training else instances
else:
raise NotImplementedError
@torch.no_grad()
def label_and_sample_proposals(
self, proposals: List[Instances], targets: List[Instances]
) -> List[Instances]:
"""
Prepare some proposals to be used to train the ROI heads.
It performs box matching between `proposals` and `targets`, and assigns
training labels to the proposals.
It returns ``self.batch_size_per_image`` random samples from proposals and groundtruth
boxes, with a fraction of positives that is no larger than
``self.positive_fraction``.
Args:
See :meth:`ROIHeads.forward`
Returns:
list[Instances]:
length `N` list of `Instances`s containing the proposals
sampled for training. Each `Instances` has the following fields:
- proposal_boxes: the proposal boxes
- gt_boxes: the ground-truth box that the proposal is assigned to
(this is only meaningful if the proposal has a label > 0; if label = 0
then the ground-truth box is random)
Other fields such as "gt_classes", "gt_masks", that's included in `targets`.
"""
gt_boxes = [copy.deepcopy(x.gt_boxes) for x in targets]
# Augment proposals with ground-truth boxes.
# In the case of learned proposals (e.g., RPN), when training starts
# the proposals will be low quality due to random initialization.
# It's possible that none of these initial
# proposals have high enough overlap with the gt objects to be used
# as positive examples for the second stage components (box head,
# cls head, mask head). Adding the gt boxes to the set of proposals
# ensures that the second stage components will have some positive
# examples from the start of training. For RPN, this augmentation improves
# convergence and empirically improves box AP on COCO by about 0.5
# points (under one tested configuration).
proposals_with_gt = []
num_fg_samples = []
num_bg_samples = []
for proposals_per_image, targets_per_image, gt_boxes_per_image in zip(proposals, targets, gt_boxes):
has_gt = len(targets_per_image) > 0
if self.add_noise_to_proposals:
proposals_per_image.proposal_boxes.tensor = (
add_noise_to_boxes(proposals_per_image.proposal_boxes.tensor))
match_quality_matrix = pairwise_iou(
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
)
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
if not torch.any(matched_labels == 1) and self.proposal_append_gt:
gt_boxes_per_image.tensor = add_noise_to_boxes(gt_boxes_per_image.tensor)
proposals_per_image = add_ground_truth_to_proposals_single_image(gt_boxes_per_image,
proposals_per_image)
match_quality_matrix = pairwise_iou(
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
)
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
sampled_idxs, gt_classes = self._sample_proposals(
matched_idxs, matched_labels, targets_per_image.gt_classes)
# Set target attributes of the sampled proposals:
proposals_per_image = proposals_per_image[sampled_idxs]
proposals_per_image.gt_classes = gt_classes
# We index all the attributes of targets that start with "gt_"
# and have not been added to proposals yet (="gt_classes").
if has_gt:
sampled_targets = matched_idxs[sampled_idxs]
# NOTE: here the indexing waste some compute, because heads
# like masks, keypoints, etc, will filter the proposals again,
# (by foreground/background, or number of keypoints in the image, etc)
# so we essentially index the data twice.
for (trg_name, trg_value) in targets_per_image.get_fields().items():
if trg_name.startswith("gt_") and not proposals_per_image.has(trg_name):
proposals_per_image.set(trg_name, trg_value[sampled_targets])
proposals_per_image.set('gt_idxs', sampled_targets)
else:
gt_boxes = Boxes(
targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 4))
)
proposals_per_image.gt_boxes = gt_boxes
proposals_per_image.set('gt_idxs', torch.zeros_like(sampled_idxs))
num_bg_samples.append((gt_classes == self.num_classes).sum().item())
num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
proposals_with_gt.append(proposals_per_image)
# Log the number of fg/bg samples that are selected for training ROI heads
storage = get_event_storage()
storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples))
storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples))
return proposals_with_gt
def _sample_proposals(
self, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Based on the matching between N proposals and M groundtruth,
sample the proposals and set their classification labels.
Args:
matched_idxs (Tensor): a vector of length N, each is the best-matched
gt index in [0, M) for each proposal.
matched_labels (Tensor): a vector of length N, the matcher's label
(one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal.
gt_classes (Tensor): a vector of length M.
Returns:
Tensor: a vector of indices of sampled proposals. Each is in [0, N).
Tensor: a vector of the same length, the classification label for
each sampled proposal. Each sample is labeled as either a category in
[0, num_classes) or the background (num_classes).
"""
if self.random_sample_size:
diff = self.random_sample_size_upper_bound - self.random_sample_size_lower_bound
sample_factor = self.random_sample_size_upper_bound - np.random.rand(1)[0] * diff
nms_topk = int(matched_idxs.shape[0] * sample_factor)
matched_idxs = matched_idxs[:nms_topk]
matched_labels = matched_labels[:nms_topk]
has_gt = gt_classes.numel() > 0
# Get the corresponding GT for each proposal
if has_gt:
gt_classes = gt_classes[matched_idxs]
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
gt_classes[matched_labels == 0] = self.num_classes
# Label ignore proposals (-1 label)
gt_classes[matched_labels == -1] = -1
else:
gt_classes = torch.zeros_like(matched_idxs) + self.num_classes
sampled_fg_idxs, sampled_bg_idxs = subsample_labels(
gt_classes, self.batch_size_per_image, self.positive_fraction, self.num_classes
)
sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)
if self.random_proposal_drop:
diff = self.random_proposal_drop_upper_bound - self.random_proposal_drop_lower_bound
sample_factor = self.random_proposal_drop_upper_bound - np.random.rand(1)[0] * diff
nms_topk = int(sampled_idxs.shape[0] * sample_factor)
subsample_idxs = np.random.choice(sampled_idxs.shape[0], nms_topk, replace=False)
subsample_idxs = torch.from_numpy(subsample_idxs).to(sampled_idxs.device)
sampled_idxs = sampled_idxs[subsample_idxs]
return sampled_idxs, gt_classes[sampled_idxs]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import time
import os
import sys
import serial
import argparse
"""
Read sensor values from an Arduino with a Piezo sensor. This script will read
in values from a serial connection with the Arduino and calculate the walking
speed or the number of steps.
optional arguments:
-h, --help show this help message and exit
--version show program's version number and exit
--onlysteps only count the number of steps, do not print the speed
--step STEP threshold value to start a new step (default: 500
--nostep NOSTEP threshold value to stop a started step (default:200)
--timewindow TIMEWINDOW
time window in seconds to calculate the speed
(default: 3)
--duration DURATION duration of the sample record in seconds (default: 30)
--verbose, -v verbose output
"""
def walk_detection(STEPS, THRESHOLD_STEP, THRESHOLD_NO_STEP, TIME_WINDOW, DURATION, VERBOSITY):
# try multiple port names because Linux has a problem here
PORTS = ['/dev/ttyACM0', '/dev/ttyACM1', '/dev/ttyACM2', '/dev/ttyACM3', '/dev/ttyACM4']
ser = None
for port in PORTS:
try:
ser = serial.Serial(port, baudrate=9600, timeout=3)
except serial.SerialException as e:
print >> sys.stderr, "Could not connect to the serial port: %s. Will try the next port." %e.strerror
continue
if ser:
print 'Serial Connection opened on port %s' %ser.name
break
# give time to open serial port
time.sleep(1.5)
if not ser:
print >> sys.stderr, "Could not connect to any of the serial ports! Will exit now."
sys.exit(1)
step_on = False
# list with timestamps of steps in the last TIME_WINDOW seconds
recent_steps = []
start_time = time.time()
current_time = start_time
if VERBOSITY:
print "Start Time: %d" %start_time
last_timestamp = 0
recent_steps = []
speed_list = []
while True:
try:
value = ser.readline().strip()
if len(value) == 0: continue
value = value.replace(os.linesep, '')
last_timestamp = current_time
current_time = time.time() - start_time
current_time_str = "%0.4f" %(current_time % 1000)
current_time = float(current_time_str)
if VERBOSITY:
print current_time_str
if current_time > DURATION:
break
except serial.SerialException as e:
print >> sys.stderr, ("Serial Exception: %s" %e.strerror)
current_time = time.time() - start_time
if current_time > DURATION:
break
else:
continue
except serial.SerialTimeoutException as e:
print >> sys.stderr, ("SerialTimeoutException: %s" %e.strerror)
current_time = time.time() - start_time
if current_time > DURATION:
break
else:
continue
except OSError as e:
print >> sys.stderr, ("OSError: %s" %e.strerror)
current_time = time.time() - start_time
if current_time > DURATION:
break
else:
continue
# every second, remove old entries from recent_steps and recalculate speed (in steps per minute)
if (int(current_time) - int(last_timestamp)) >= 1:
recent_steps = [step for step in recent_steps if (current_time - step < TIME_WINDOW)]
#check if there were any steps in the last 1.5 seconds and if not, declare the person to be stopped
if len([step for step in recent_steps if (current_time - step < 1.5)]) == 0:
print "%f Person stopped!" %current_time
speed = len(recent_steps) * (60 / TIME_WINDOW)
speed_list.append(speed)
#print recent_steps
if (not STEPS) and VERBOSITY:
print "walking at %d steps per minute" %speed
if (not step_on) and value >= THRESHOLD_STEP:
if STEPS:
print "%f Step!" %current_time
step_on = True
recent_steps.append(current_time)
if step_on and value <= THRESHOLD_NO_STEP:
step_on = False
print "Walk Detection finished!"
if not STEPS:
print "\nMeasured speeds:"
for speed in speed_list:
print speed
print "Average speed: %d" %(sum(speed_list) / len(speed_list))
if __name__ == '__main__':
description = """
Read sensor values from an Arduino with a Piezo sensor.\n\n
This script will read in values from a serial connection with the Arduino and calculate the walking speed or the number of steps."""
epilog = 'Written for python 2.7.3 on a Linux system.\n\n'
parser = argparse.ArgumentParser(description=description, epilog=epilog)
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
parser.add_argument('--onlysteps', help='only count the number of steps, do not print the speed', action='store_true')
parser.add_argument('--step', help='threshold value to start a new step (default: 500', type=int, default=500)
parser.add_argument('--nostep', help='threshold value to stop a started step (default:200)', type=int, default=200)
parser.add_argument('--timewindow', help='time window in seconds to calculate the speed (default: 3)', type=int, default=3)
parser.add_argument('--duration', help='duration of the sample record in seconds (default: 30)', type=int, default=30)
parser.add_argument('--verbose', '-v', help='verbose output', action='store_true')
args = parser.parse_args()
#print args
STEPS = args.onlysteps
THRESHOLD_STEP = args.step
THRESHOLD_NO_STEP = args.nostep
TIME_WINDOW = args.timewindow
VERBOSITY = args.verbose
DURATION = args.duration
walk_detection(STEPS, THRESHOLD_STEP, THRESHOLD_NO_STEP, TIME_WINDOW, DURATION, VERBOSITY)
|
import json
from django.apps import apps
from syncasync import sync_to_async
@sync_to_async
def getBookCount(book):
return book.objects.all().count()
async def websocket_application(scope, receive, send):
Book = apps.get_model('book', 'Book')
while True:
event = await receive()
if event['type'] == 'websocket.connect':
await send({
'type': 'websocket.accept'
})
if event['type'] == 'websocket.disconnect':
break
if event['type'] == 'websocket.receive':
print(event['text'])
if event['text'] == 'books?':
now_book_in_db = await getBookCount(Book)
await send({
'type': 'websocket.send',
'text': json.dumps({'value': now_book_in_db})
}) |
from rest_framework import serializers
from .models import TeamMember
from .utils import ChoiceField
class TeamMemberSerializer(serializers.ModelSerializer):
role = ChoiceField(choices=TeamMember.ROLE_CHOICES)
class Meta:
model = TeamMember
fields = ('email', 'first_name', 'last_name', 'phone_number', 'role')
|
#coding: utf-8
from __future__ import print_function, absolute_import
import logging
import re
import json
import requests
import uuid
import time
import os
import argparse
import uuid
import datetime
import socket
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.io.filesystems import FileSystems
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam import pvalue
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
TABLE_SCHEMA = (
'idkey:STRING, '
'fecha:STRING, '
'nro_credito:STRING, '
'nit:STRING, '
'razon:STRING, '
'nombre:STRING, '
'telefono:STRING, '
'tienda:STRING, '
'celular:STRING, '
'valor_a_cobrar:STRING, '
'valor_incial_credito:STRING, '
'valor_a_cobrar_maximo:STRING, '
'rango_mora_obligacion:STRING, '
'dias_sin_tramite_1:STRING, '
'rango_mora_cliente:STRING, '
'fecha_de_vencimiento:STRING, '
'ano_vencimiento:STRING, '
'edad_de_mora_maximo:STRING, '
'edad_de_mora:STRING, '
'dias_sin_tramite_2:STRING, '
'ciudad_del_cliente:STRING, '
'resultado_del_tramite:STRING, '
'fecha_ultimo_pago:STRING, '
'asignacion_de_usuarios:STRING, '
'gestor_del_tramite:STRING, '
'con_email:STRING, '
'con_telefono:STRING, '
'con_direccion:STRING, '
'con_referencia:STRING, '
'tipificacion:STRING, '
'ult_resultado_efectivo:STRING, '
'ult_resultado_efectivo_fecha:STRING, '
'ano_venci_oblig_rango:STRING, '
'ano_venci_cliente_rango:STRING, '
'estado_de_cartera:STRING, '
'ano_originacion:STRING, '
'ano_orig_oblig_rango:STRING, '
'ano_orig_cliente_rango:STRING, '
'creditos_en_mora:STRING, '
'fecha_prox_recordatorio:STRING, '
'fecha_de_asignacion:STRING, '
'ano_vencimiento_2:STRING, '
'capital_inicial:STRING, '
'total_capital_inicial:STRING, '
'gestor_ultima_gestion:STRING, '
'fecha_ult_sms:STRING, '
'con_celular:STRING, '
'empresa_que_gestiona:STRING, '
'cuota_vencida:STRING, '
'total_cuotas_vencidas:STRING, '
'cuotas_en_mora:STRING, '
'usuario_responsable:STRING, '
'valor_intereses:STRING, '
'empresa_que_reporta:STRING, '
'valor_aval:STRING, '
'valor_cuota:STRING, '
'valor_abonos:STRING, '
'ciudad_punto_de_credito:STRING, '
'fecha_empresa_reporta:STRING, '
'estado_de_la_cuota:STRING, '
'empresa_origen:STRING, '
'intereses_mora:STRING, '
'total_intereses_mora:STRING, '
'total_honorarios:STRING, '
'saldo_capital:STRING, '
'valcapitalmax:STRING, '
'tipo_de_credito:STRING, '
'emails:STRING, '
'numobligaciongr:STRING, '
'fecha_prox_acuerdo:STRING, '
'telefono_1:STRING, '
'telefono_2:STRING, '
'telefono_3:STRING, '
'telefono_4:STRING, '
'telefono_5:STRING, '
'telefono_6:STRING, '
'telefono_7:STRING '
)
# ?
class formatearData(beam.DoFn):
def __init__(self, mifecha):
super(formatearData, self).__init__()
self.mifecha = mifecha
def process(self, element):
# print(element)
arrayCSV = element.split('|')
tupla= {'idkey' : str(uuid.uuid4()),
# 'fecha' : datetime.datetime.today().strftime('%Y-%m-%d'),
'fecha' : self.mifecha,
'nro_credito' : arrayCSV[0].replace('"',''),
'nit' : arrayCSV[1].replace('"',''),
'razon' : arrayCSV[2].replace('"',''),
'nombre' : arrayCSV[3].replace('"',''),
'telefono' : arrayCSV[4].replace('"',''),
'tienda' : arrayCSV[5].replace('"',''),
'celular' : arrayCSV[6].replace('"',''),
'valor_a_cobrar' : arrayCSV[7].replace('"',''),
'valor_incial_credito' : arrayCSV[8].replace('"',''),
'valor_a_cobrar_maximo' : arrayCSV[9].replace('"',''),
'rango_mora_obligacion' : arrayCSV[10].replace('"',''),
'dias_sin_tramite_1' : arrayCSV[11].replace('"',''),
'rango_mora_cliente' : arrayCSV[12].replace('"',''),
'fecha_de_vencimiento' : arrayCSV[13].replace('"',''),
'ano_vencimiento' : arrayCSV[14].replace('"',''),
'edad_de_mora_maximo' : arrayCSV[15].replace('"',''),
'edad_de_mora' : arrayCSV[16].replace('"',''),
'dias_sin_tramite_2' : arrayCSV[17].replace('"',''),
'ciudad_del_cliente' : arrayCSV[18].replace('"',''),
'resultado_del_tramite' : arrayCSV[19].replace('"',''),
'fecha_ultimo_pago' : arrayCSV[20].replace('"',''),
'asignacion_de_usuarios' : arrayCSV[21].replace('"',''),
'gestor_del_tramite' : arrayCSV[22].replace('"',''),
'con_email' : arrayCSV[23].replace('"',''),
'con_telefono' : arrayCSV[24].replace('"',''),
'con_direccion' : arrayCSV[25].replace('"',''),
'con_referencia' : arrayCSV[26].replace('"',''),
'tipificacion' : arrayCSV[27].replace('"',''),
'ult_resultado_efectivo' : arrayCSV[28].replace('"',''),
'ult_resultado_efectivo_fecha' : arrayCSV[29].replace('"',''),
'ano_venci_oblig_rango' : arrayCSV[30].replace('"',''),
'ano_venci_cliente_rango' : arrayCSV[31].replace('"',''),
'estado_de_cartera' : arrayCSV[32].replace('"',''),
'ano_originacion' : arrayCSV[33].replace('"',''),
'ano_orig_oblig_rango' : arrayCSV[34].replace('"',''),
'ano_orig_cliente_rango' : arrayCSV[35].replace('"',''),
'creditos_en_mora' : arrayCSV[36].replace('"',''),
'fecha_prox_recordatorio' : arrayCSV[37].replace('"',''),
'fecha_de_asignacion' : arrayCSV[38].replace('"',''),
'ano_vencimiento_2' : arrayCSV[39].replace('"',''),
'capital_inicial' : arrayCSV[40].replace('"',''),
'total_capital_inicial' : arrayCSV[41].replace('"',''),
'gestor_ultima_gestion' : arrayCSV[42].replace('"',''),
'fecha_ult_sms' : arrayCSV[43].replace('"',''),
'con_celular' : arrayCSV[44].replace('"',''),
'empresa_que_gestiona' : arrayCSV[45].replace('"',''),
'cuota_vencida' : arrayCSV[46].replace('"',''),
'total_cuotas_vencidas' : arrayCSV[47].replace('"',''),
'cuotas_en_mora' : arrayCSV[48].replace('"',''),
'usuario_responsable' : arrayCSV[49].replace('"',''),
'valor_intereses' : arrayCSV[50].replace('"',''),
'empresa_que_reporta' : arrayCSV[51].replace('"',''),
'valor_aval' : arrayCSV[52].replace('"',''),
'valor_cuota' : arrayCSV[53].replace('"',''),
'valor_abonos' : arrayCSV[54].replace('"',''),
'ciudad_punto_de_credito' : arrayCSV[55].replace('"',''),
'fecha_empresa_reporta' : arrayCSV[56].replace('"',''),
'estado_de_la_cuota' : arrayCSV[57].replace('"',''),
'empresa_origen' : arrayCSV[58].replace('"',''),
'intereses_mora' : arrayCSV[59].replace('"',''),
'total_intereses_mora' : arrayCSV[60].replace('"',''),
'total_honorarios' : arrayCSV[61].replace('"',''),
'saldo_capital' : arrayCSV[62].replace('"',''),
'valcapitalmax' : arrayCSV[63].replace('"',''),
'tipo_de_credito' : arrayCSV[64].replace('"',''),
'emails' : arrayCSV[65].replace('"',''),
'numobligaciongr' : arrayCSV[66].replace('"',''),
'fecha_prox_acuerdo' : arrayCSV[67].replace('"',''),
'telefono_1' : arrayCSV[68].replace('"',''),
'telefono_2' : arrayCSV[69].replace('"',''),
'telefono_3' : arrayCSV[70].replace('"',''),
'telefono_4' : arrayCSV[71].replace('"',''),
'telefono_5' : arrayCSV[72].replace('"',''),
'telefono_6' : arrayCSV[73].replace('"',''),
'telefono_7' : arrayCSV[74].replace('"','')
}
return [tupla]
def run(archivo, mifecha):
gcs_path = "gs://ct-avalcreditos" #Definicion de la raiz del bucket
gcs_project = "contento-bi"
mi_runer = ("DirectRunner", "DataflowRunner")[socket.gethostname()=="contentobi"]
pipeline = beam.Pipeline(runner=mi_runer, argv=[
"--project", gcs_project,
"--staging_location", ("%s/dataflow_files/staging_location" % gcs_path),
"--temp_location", ("%s/dataflow_files/temp" % gcs_path),
"--output", ("%s/dataflow_files/output" % gcs_path),
"--setup_file", "./setup.py",
"--max_num_workers", "5",
"--subnetwork", "https://www.googleapis.com/compute/v1/projects/contento-bi/regions/us-central1/subnetworks/contento-subnet1"
# "--num_workers", "30",
# "--autoscaling_algorithm", "NONE"
])
# lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-bancolombia/info-segumiento/BANCOLOMBIA_INF_SEG_20181206 1100.csv", skip_header_lines=1)
#lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-bancolombia/info-segumiento/BANCOLOMBIA_INF_SEG_20181129 0800.csv", skip_header_lines=1)
lines = pipeline | 'Lectura de Archivo' >> ReadFromText(archivo, skip_header_lines=1)
transformed = (lines | 'Formatear Data' >> beam.ParDo(formatearData(mifecha)))
# lines | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_prej_small", file_name_suffix='.csv',shard_name_template='')
# transformed | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_seg", file_name_suffix='.csv',shard_name_template='')
#transformed | 'Escribir en Archivo' >> WriteToText("gs://ct-bancolombia/info-segumiento/info_carga_banco_seg",file_name_suffix='.csv',shard_name_template='')
transformed | 'Escritura a BigQuery avalcreditos' >> beam.io.WriteToBigQuery(
gcs_project + ":avalcreditos.prejuridico",
schema=TABLE_SCHEMA,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND
)
# transformed | 'Borrar Archivo' >> FileSystems.delete('gs://ct-avon/prejuridico/AVON_INF_PREJ_20181111.TXT')
# 'Eliminar' >> FileSystems.delete (["archivos/Info_carga_avon.1.txt"])
jobObject = pipeline.run()
# jobID = jobObject.job_id()
return ("Corrio Full HD")
|
# -*- coding: utf-8 -*-
# @Time : 2019/11/28 15:00
# @Author : Jeff Wang
# @Email : jeffwang987@163.com OR wangxiaofeng2020@ia.ac.cn
# @Software: PyCharm
import cv2
import numpy as np
image = cv2.imread("dinosaur.jpg")
cv2.imshow("Original",image)
cv2.waitKey(0)
(b, g, r) = image[0][0] # 颜色是tuple信息 # 取单个pixel [a][b]和[a, b]效果是一样的
print("Pixel at [0,0] - Red:{}, Green:{}, Blue:{}".format(r, g, b))
image[0][0] = (0, 0, 255) # 改变颜色信息
(b, g, r) = image[0][0] # 颜色是tuple信息
print("Now pixel at [0,0] - Red:{}, Green:{}, Blue:{}".format(r, g, b))
corner = image[0:100, 0:100]
cv2.imshow("Corner", corner)
image[0:100, 0:10] = (0, 0, 255) # [0:a, 0:b] 和[0:a][0:b]是不一样的效果,原因暂时未知
cv2.imshow("Update", image)
cv2.waitKey(0)
|
# -*- coding: utf-8 -*-
"""
Created on 2017/3/19
@author: will4906
"""
import json
from copy import deepcopy
import requests
from entity.QueryItem import QueryItem, DateSelect, And, ItemGroup, Or, Not
if __name__ == '__main__':
inventorList = [ItemGroup(And("陈思平", "董磊")), "陈昕", "汪天富", "谭力海", "彭珏", "但果", "叶继伦", "覃正笛",
"张旭", "张会生", "钱建庭", "丁惠君", "刁现芬", "沈圆圆", "周永进", "孔湉湉",
"陆敏华", "张新宇", "孙怡雯", "李乔亮", "齐素文", "徐海华", "倪东", "刘维湘",
"李抱朴", "黄炳升", "徐敏", "雷柏英", "胡亚欣", "何前军", "郑介志", "常春起",
"陈雯雯", "罗永祥", "黄鹏", "林静", "王倪传", "刘立", "张治国", "董磊"]
infoList = []
for i in inventorList:
queryItem = QueryItem(proposer_people='深圳大学',
inventor_people=i,
request_date=DateSelect('>=', '2001-01-01'),
invention_type=Or('I', 'U'),
publish_country=ItemGroup(Or=Or('HK')))
infoList.append(deepcopy(queryItem))
for i in infoList:
print(i.__getattribute__('search_exp'))
|
# I pledge my Honor that I have abided by the Stevens Honor System.
# I understand that I may access the course textbook and course lecture notes
# but I am not to access any other resource. I also pledge that I worked
# alone on this exam.
# Eshita Jain
# Quiz two Part two
def main():
try:
n = int(input("\nFor Mathematical Functions, Please Enter the Number 1. "
"\nFor String Operations, Please Enter the Number 2: "))
if n == 1:
m = int(input("\nFor Addition, Please Enter the Number 1."
"\nFor Subtraction, Please Enter the Number 2."
"\nFor Multiplication, Please Enter the Number 3."
"\nFor Division, Please Enter the Number 4: "))
if m == 1:
a = float(input("\nEnter the first number: "))
b = float(input("Enter the second number: "))
sum = a + b
print("The sum is: ", sum)
elif m == 2:
a = float(input("\nEnter the first number: "))
b = float(input("Enter the second number: "))
diff = a - b
print("The difference is: ", diff)
elif m == 3:
a = float(input("\nEnter the first number: "))
b = float(input("Enter the second number: "))
x = a * b
print("The product is: ", x)
elif m == 4:
a = float(input("\nEnter the first number: "))
b = float(input('Enter the second number: '))
d = a / b
print("The quotient is: ", d)
else:
print("\nError: User has entered an invalid entry.")
main()
elif n == 2:
t = int(input("\nTo Determine the Number of Vowels in a String; Enter the Number 1. "
"\nTo Encrypt a String; Enter the Number 2: "))
if t == 1:
str = input("Enter a message: ")
lowercase = str.lower()
vowel_counts = {}
for vowel in "aeiou":
count = lowercase.count(vowel)
vowel_counts[vowel] = count
print(vowel_counts)
elif t == 2:
message = input("Enter message to encode: ")
key = int(input("Enter an integer value for the key: "))
print("The encrypted message is: ")
for i in message:
print(ord(i) + key, end=' ')
else:
print("\nError: User has entered an invalid entry.")
main()
else:
print("\nError: User has entered an invalid entry.")
main()
except ValueError:
print("\nError: User has entered an invalid entry.")
main()
main() |
# Copyright (c) 2020 Yul HR Kang. hk2699 at caa dot columbia dot edu.
from collections import OrderedDict as odict
from copy import deepcopy
from typing import Union, Type, List, Dict, Iterable, Tuple
import numpy as np
import numpy_groupies as npg
import torch
from matplotlib import pyplot as plt
from a0_dtb import a3_dtb_2D_sim as sim2d
from a0_dtb.aa1_RT import a5_dtb_2D_fit_RT_nonparam as np2d
from data_2d import consts
from lib.pylabyk import localfile, np2, plt2
locfile = localfile.LocalFile(
pth_root='../../Data_2D/Data_2D_Py/a0_dtb/RTRecover',
cache_dir=''
)
dtb2ds = [sim2d.RTNonparam2DSer, sim2d.RTNonparam2DPar]
max_epoch = 300
# max_epoch = 1 # CHECKED
to_plot_progress = False # CHECKED
# subj_parad_bis0 = [consts.SUBJ_PARAD_BI[k]
# for k in [
# # 1,
# 7,
# # 13
# ]]
subj_parad_bis0 = consts.SUBJ_PARAD_BI
# subj_parad_bis0 = consts.SUBJ_PARAD_BI[1:3] # CHECKED
# subj_parad_bis0 = consts.SUBJ_PARAD_BI[-1::-1] # CHECKED
td_short = ['serial', 'parallel']
td_fits_short = ['ser_np', 'par_np']
td_sims_short = ['ser_np', 'par_np']
# preset1 = 'thtr=10+trst=200+nfl=5+sm1=0+lpsub=1e-3+dsk=sd+dsub=4'
# preset1 = 'thtr=10+trst=200+nfl=5+sm1=1+lpsub=1e-6+dsk=sd'
# preset1 = 'thtr=10+trst=200+nfl=5+sm1=0+lpsub=1e-3+dsk=sd'
# preset1 = 'thtr=10+trst=200+nfl=5+sm1=1+lpsub=1e-3+dsk=sd'
# preset1 = 'co=1+nfl=5'
# preset1 = 'co=1+nfl=5+lpsub=1e-6+sm1=1+dspub=3'
# preset1 = 'co=1+nfl=5+lpsub=1e-6+sm1=0+dspub=3'
# preset1 = 'co=1+nfl=5+lpsub=1e-6+sm1=0+dspub=0.99'
# preset1 = 'co=1+nfl=5+lpsub=1e-6+sm1=0+dspub=3'
# preset1 = 'co=1+nfl=5+lpsub=1e-3+sm1=0+dspub=0.99'
# preset1 = 'co=1+nfl=5+lpsub=1e-3+sm1=1+dspub=0.99'
preset1 = 'co=1+nfl=5+lpsub=1e-3+sm1=0+dspub=0.95'
# preset1 = 'co=1+nfl=5+lpsub=1e-3+sm1=1+dspub=0.95'
# preset1 = 'co=1+nfl=5+lpsub=1e-6+sm1=1+dspub=3'
preset_recovery = odict([(
'co=1+nfl=5+lpsub=1e-3+sm1=1+dspub=0.95', {
'preset_label': 'correct only\n5-fold crossval',
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': False,
'correct_only': True,
'n_fold_test': 5,
'mode_train': 'all',
'disper_ub': 0.95,
'sumto1_wi_cond': True,
'lapse_max': 1e-3,
}), (
'co=1+nfl=5+lpsub=1e-3+sm1=0+dspub=0.95', {
'preset_label': 'correct only\n5-fold crossval',
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': False,
'correct_only': True,
'n_fold_test': 5,
'mode_train': 'all',
'disper_ub': 0.95,
'sumto1_wi_cond': False,
'lapse_max': 1e-3,
}), (
'co=1+nfl=5+lpsub=1e-3+sm1=1+dspub=0.99', {
'preset_label': 'correct only\n5-fold crossval',
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': False,
'correct_only': True,
'n_fold_test': 5,
'mode_train': 'all',
'disper_ub': 0.99,
'sumto1_wi_cond': True,
'lapse_max': 1e-3,
}), (
'co=1+nfl=5+lpsub=1e-3+sm1=0+dspub=0.99', {
'preset_label': 'correct only\n5-fold crossval',
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': False,
'correct_only': True,
'n_fold_test': 5,
'mode_train': 'all',
'disper_ub': 0.99,
'sumto1_wi_cond': False,
'lapse_max': 1e-3,
}), (
'co=1+nfl=5+lpsub=1e-6+sm1=0+dspub=3', {
'preset_label': 'correct only\n5-fold crossval',
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': False,
'correct_only': True,
'n_fold_test': 5,
'mode_train': 'all',
'disper_ub': 3.,
'sumto1_wi_cond': False,
'lapse_max': 1e-6,
}), (
'co=1+nfl=5+lpsub=1e-6+sm1=1+dspub=3', {
'preset_label': 'correct only\n5-fold crossval',
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': False,
'correct_only': True,
'n_fold_test': 5,
'mode_train': 'all',
'disper_ub': 3.,
'sumto1_wi_cond': True,
'lapse_max': 1e-6,
}), (
'co=1+nfl=5+lpsub=1e-6+sm1=0+dspub=0.99', {
'preset_label': 'correct only\n5-fold crossval',
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': False,
'correct_only': True,
'n_fold_test': 5,
'mode_train': 'all',
'disper_ub': 0.99,
'sumto1_wi_cond': False,
'lapse_max': 1e-6,
}), (
'co=1+nfl=5+lpsub=1e-6+sm1=0+dspub=3', {
'preset_label': 'correct only\n5-fold crossval',
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': False,
'correct_only': True,
'n_fold_test': 5,
'mode_train': 'all',
'disper_ub': 3.,
'sumto1_wi_cond': False,
'lapse_max': 1e-6,
}), (
'co=1+nfl=5+lpsub=1e-3+sm1=0+dspub=3', {
'preset_label': 'correct only\n5-fold crossval',
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': False,
'correct_only': True,
'n_fold_test': 5,
'mode_train': 'all',
'disper_ub': 3.,
'sumto1_wi_cond': False,
'lapse_max': 1e-3,
}), (
'co=1+nfl=5+lpsub=1e-6+sm1=1', {
'preset_label': 'correct only\n5-fold crossval',
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': False,
'correct_only': True,
'n_fold_test': 5,
'mode_train': 'all',
'sumto1_wi_cond': True,
'lapse_max': 1e-6,
}), (
'co=1+nfl=5+lpsub=1e-3+sm1=0', {
'preset_label': 'correct only\n5-fold crossval',
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': False,
'correct_only': True,
'n_fold_test': 5,
'mode_train': 'all',
'sumto1_wi_cond': False,
'lapse_max': 1e-3,
}), (
'co=1+e0=1+nfl=5', {
'preset_label': 'correct only,\nexcl 0-coh\n5-fold crossval',
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': True,
'correct_only': True,
'n_fold_test': 5,
'mode_train': 'all',
}), (
'thtr=10+trst=200+nfl=5+sm1=0+lpsub=1e-3+dsk=sd+dsub=4', {
'preset_label': '# 10 tr, skip first 200,\nmed lapse, '
'high disper\n5-fold crossval',
'trial_st': 200,
'thres_n_tr': 10,
'exclude_0coh': False,
'correct_only': False,
'n_fold_test': 5,
'mode_train': 'all',
'sumto1_wi_cond': True,
'disper_kind': 'sd',
'disper_ub': 4.,
'lapse_max': 1e-3,
}), (
'thtr=10+trst=200+nfl=5+sm1=1+lpsub=1e-6+dsk=sd', {
'preset_label': '# 10 tr, skip first 200,\nmed lapse, '
'high disper\n5-fold crossval',
'trial_st': 200,
'thres_n_tr': 10,
'exclude_0coh': False,
'correct_only': False,
'n_fold_test': 5,
'mode_train': 'all',
'sumto1_wi_cond': True,
'disper_kind': 'sd',
'disper_ub': 2.,
'lapse_max': 1e-6,
}), (
'thtr=10+trst=200+nfl=5+sm1=0+lpsub=1e-3+dsk=sd', {
'preset_label': '# 10 tr, skip first 200,\nmed lapse, '
'high disper\n5-fold crossval',
'trial_st': 200,
'thres_n_tr': 10,
'exclude_0coh': False,
'correct_only': False,
'n_fold_test': 5,
'mode_train': 'all',
'sumto1_wi_cond': False,
'disper_kind': 'sd',
'disper_ub': 2.,
'lapse_max': 1e-3,
}), (
'thtr=10+trst=200+nfl=5+sm1=1+lpsub=1e-3+dsk=sd', {
'preset_label': '# 10 tr, skip first 200,\nmed lapse, '
'high disper\n5-fold crossval',
'trial_st': 200,
'thres_n_tr': 10,
'exclude_0coh': False,
'correct_only': False,
'n_fold_test': 5,
'mode_train': 'all',
'sumto1_wi_cond': True,
'disper_kind': 'sd',
'disper_ub': 2.,
'lapse_max': 1e-3,
}), (
'thtr=10+trst=200+nfl=5+sm1=1+lpsub=1e-3', {
'preset_label': '# 10 tr, skip first 200,\nmed lapse, '
'high disper\n5-fold crossval',
'trial_st': 200,
'thres_n_tr': 10,
'exclude_0coh': False,
'correct_only': False,
'n_fold_test': 5,
'mode_train': 'all',
'sumto1_wi_cond': True,
'disper_ub': 2.,
'lapse_max': 1e-3,
}), (
'thtr=10+trst=200+nfl=5+sm1=1+lpsub=1e-6', {
'preset_label': '# 10 tr, skip first 200,\nlow lapse, '
'high disper\n5-fold crossval',
'trial_st': 200,
'thres_n_tr': 10,
'exclude_0coh': False,
'correct_only': False,
'n_fold_test': 5,
'mode_train': 'all',
'sumto1_wi_cond': True,
'disper_ub': 2.,
'lapse_max': 1e-6,
}), (
'co=1+nfl=5+sm1=0+lpsub=1e-6', {
'preset_label': '# 10 tr, skip first 200,\nlow lapse, '
'high disper\n5-fold crossval',
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': False,
'correct_only': True,
'n_fold_test': 5,
'mode_train': 'all',
'sumto1_wi_cond': False,
'disper_ub': 2.,
'lapse_max': 1e-6,
}), (
'thtr=10+trst=200+nfl=5+sm1=0+lpsub=1e-6', {
'preset_label': '# 10 tr, skip first 200,\nlow lapse, '
'high disper\n5-fold crossval',
'trial_st': 200,
'thres_n_tr': 10,
'exclude_0coh': False,
'correct_only': False,
'n_fold_test': 5,
'mode_train': 'all',
'sumto1_wi_cond': False,
'disper_ub': 2.,
'lapse_max': 1e-6,
}), (
'thtr=10+trst=200+nfl=1+sm1=0+lpsub=1e-6+mtrn=easiest', {
'preset_label': '# 10 tr, skip first 200,\nlow lapse, '
'high disper, sum to 1\neasiest',
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': False,
'correct_only': False,
'n_fold_test': 1,
'mode_train': 'easiest',
'sumto1_wi_cond': False,
'disper_ub': 2.,
'lapse_max': 1e-6,
}), (
'thtr=10+trst=200+nfl=1+sm1=0+lpsub=1e-6', {
'preset_label': '# 10 tr, skip first 200,\nlow lapse, '
'high disper, sum to 1\nno crossval',
'trial_st': 200,
'thres_n_tr': 10,
'exclude_0coh': False,
'correct_only': False,
'n_fold_test': 1,
'mode_train': 'all',
'sumto1_wi_cond': False,
'disper_ub': 2.,
'lapse_max': 1e-6,
}), (
'thtr=10+trst=200+nfl=1+sm1=1+lpsub=1e-6', {
'preset_label': '# 10 tr, skip first 200,\nlow lapse, '
'high disper, sum to 1\nno crossval',
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': False,
'correct_only': False,
'n_fold_test': 1,
'mode_train': 'all',
'sumto1_wi_cond': True,
'disper_ub': 2.,
'lapse_max': 1e-6,
}), (
'thtr=10+trst=200+nfl=1+sm1=1+lpsub=1e-3', {
'preset_label': '# 10 tr, skip first 200,\nlow lapse, '
'high disper, sum to 1\nno crossval',
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': False,
'correct_only': False,
'n_fold_test': 1,
'mode_train': 'all',
'sumto1_wi_cond': True,
'disper_ub': 2.,
'lapse_max': 1e-3,
}), (
'thtr=10+trst=200+nfl=1+sm1=1+lpsub=1e-6', {
'preset_label': '# 10 tr, skip first 200,\nlow lapse, '
'high disper, sum to 1\nno crossval',
'trial_st': 200,
'thres_n_tr': 10,
'exclude_0coh': False,
'correct_only': False,
'n_fold_test': 1,
'mode_train': 'all',
'sumto1_wi_cond': True,
'disper_ub': 2.,
'lapse_max': 1e-6,
}), (
'co=1+nfl=1+lpsub=1e-6+sm1=0', {
'preset_label': 'correct only\nno crossval',
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': False,
# 'sumto1_wi_cond': False,
'correct_only': True,
'n_fold_test': 1,
'mode_train': 'all',
'sumto1_wi_cond': False,
'lapse_max': 1e-6,
}), (
'thtr=10+trst=200+nfl=1', {
'preset_label': 'thres 10 tr,\nskip first 200 tr\nno crossval',
'trial_st': 200,
'thres_n_tr': 10,
'exclude_0coh': False,
'correct_only': False,
'n_fold_test': 1,
'mode_train': 'all',
}), (
'thtr=10+trst=200+nfl=5', {
'preset_label': 'thres 10 tr,\nskip first 200 tr\n5-fold crossval',
'trial_st': 200,
'thres_n_tr': 10,
'exclude_0coh': False,
'correct_only': False,
'n_fold_test': 5,
'mode_train': 'all',
})])
def get_subj_parad_bi_str(subj_parad_bis
: Iterable[Tuple[str, str, bool]] = None):
"""
:param subj_parad_bis: [('subj', 'parad', is_bimanual), ...]
:return:
"""
ss = []
for subj, parad, bimanual in subj_parad_bis:
if parad in ['RT', 'eye']:
s = 'eye, %s' % subj
elif parad == 'bimanual' or bimanual:
s = 'bimanual, %s' % subj
elif parad == 'unibimanual' and not bimanual:
s = 'unimanual, %s' % subj
else:
s = '%s, %s' % (parad, subj)
ss.append(s)
return ss
parad_bis, ix_parad_bi = np.unique(
np.stack([v[1:] for v in subj_parad_bis0]), axis=0,
return_inverse=True)
colors_parad = {
('RT', 'False'): 'tab:orange',
('unibimanual', 'False'): 'tab:blue',
('unibimanual', 'True'): 'tab:cyan',
('binary', 'False'): 'plum',
}
labels_parad = {
('RT', 'False'): 'eye',
('unibimanual', 'False'): 'unimanual',
('unibimanual', 'True'): 'bimanual',
('binary', 'False'): 'binary',
}
def ____Compare_recovery_methods____():
pass
def main_compare_recovery_methods(
):
recovery_methods = list(preset_recovery.keys())
# dlosses_by_method[method][seed, data, td_sim]
dlosses_by_method = odict()
for name, kw in preset_recovery.items():
kw1 = deepcopy(kw)
kw1.pop('preset_label')
dlosses, td_fits = main_plot_recovery(
to_plot=False,
**kw1
)[:2]
dlosses_by_method[name] = dlosses
# --- Scatterplot ---
axs = plot_scatter_dloss(dlosses_by_method, td_fits)
file = locfile.get_file_fig('scatter_by_recovery_method',
subdir='main_compare_recovery_methods')
plt.savefig(file, dpi=300)
print('Saved to %s' % file)
# --- Bar plot across methods ---
# NOTE - could add:
# (1) mean dloss +- SEM
# (2) P(correct sign(dloss))
# plot_bar_mean_dloss_across_methods(dlosses_by_method, td_fits)
# --- Bar plot of recovery & model selection within subj ---
# for recovery_method, kw in enumerate(dlosses_by_method.items()):
# plot_bar_dloss_across_subjs(dlosses_by_method, td_fits)
print('--')
def plot_scatter_dloss(dlosses_by_method, td_fits):
n_methods = len(preset_recovery)
axs = plt2.GridAxes(
1, n_methods,
widths=1.25, heights=1.25,
top=0.75, left=1.1, bottom=1.
)
td_fits = list(td_fits)
hs = []
# dlosses_all = np.stack(v for v in dlosses_by_method.values())
# max_dloss = np.amax(dlosses_all)
# min_dloss = np.amin(dlosses_all)
# d_dloss = max_dloss - min_dloss
# lim = [min_dloss - d_dloss * 0.05, max_dloss + d_dloss * 0.05]
for i, (name, kw) in enumerate(preset_recovery.items()):
ax = axs[0, i]
plt.sca(ax)
# plt.xscale('log')
# plt.yscale('log')
dloss = dlosses_by_method[name]
for j, parad_bi in enumerate(parad_bis):
incl = ix_parad_bi == j
ser = -dloss[0, incl, td_fits.index('ser_np')] / np.log(10.)
par = dloss[0, incl, td_fits.index('par_np')] / np.log(10.)
ser = np.clip(ser, a_min=-10, a_max=10)
par = np.clip(par, a_min=-10, a_max=10)
def add_jitter(v, vmax=10):
incl_jitter = np.abs(v) >= vmax
v[incl_jitter] = (
v[incl_jitter] + np.sign(v[incl_jitter]) *
np.random.rand(np.sum(incl_jitter)))
return v
ser = add_jitter(ser)
par = add_jitter(par)
h = plt.plot(ser, par, '.', color=colors_parad[tuple(parad_bi)])
plt.axis('square')
if i == 0:
hs.append(h[0])
plt.xticks([-10, 0, 10], [r'$\leq$-10', '0', r'$\geq$10'])
plt.yticks([-10, 0, 10], [r'$\leq$-10', '0', r'$\geq$10'])
plt.xlim([-11, 11])
plt.ylim([-11, 11])
# plt.xlim(lim)
# plt.ylim(lim)
plt.axhline(0, color='gray', linewidth=0.5, linestyle='--')
plt.axvline(0, color='gray', linewidth=0.5, linestyle='--')
plt2.box_off()
plt.title(kw['preset_label'])
if i == 0:
plt.xlabel('correct support\nfor serial\n'
r'($\Delta\mathrm{log}_{10}\mathcal{L}$)')
plt.ylabel('correct support\nfor parallel\n'
r'($\Delta\mathrm{log}_{10}\mathcal{L}$)')
else:
ax.set_xticklabels([])
ax.set_yticklabels([])
# plt2.sameaxes(axs[:])
plt.figlegend(hs, [labels_parad[tuple(k)] for k in parad_bis],
loc='lower right', frameon=False,
handletextpad=0.4
)
for i in range(n_methods):
plt.sca(axs[0, i])
plt2.patch_chance_level(1, xy='x')
plt2.patch_chance_level(1, xy='y')
return axs
def ____Real_data___():
pass
def main_plot_real_data(
mode_train='all',
n_fold_test=5,
to_plot=True,
**kwargs,
):
"""
:param mode_train:
:param n_fold_test:
:param to_plot:
:param kwargs:
:return: (
dlosses, td_fits, losses, ds_cache,
ix_datas, subj_parad_bis,
dict_fit_sim, dict_subdir
)
"""
if n_fold_test is None:
if mode_train == 'all':
n_fold_test = 5
elif mode_train == 'easiest':
n_fold_test = 1
else:
raise ValueError()
sbj_str = get_subj_parad_bi_str(subj_parad_bis0)
cache = locfile.get_cache('mdlcmp', {
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': True,
'correct_only': True,
'n_fold_test': n_fold_test,
'mode_train': mode_train,
'sbj': '%s-%s' % (
sbj_str[0], sbj_str[-1]
),
**kwargs
}, subdir='main_plot_real_data')
if cache.exists():
dlosses, td_fits, losses, ds_cache, \
ix_datas, subj_parad_bis, \
dict_cache, dict_subdir \
= cache.getdict([
'dlosses', 'td_fits', 'losses', 'ds_cache',
'ix_datas', 'subj_parad_bis',
'dict_cache', 'dict_subdir'
])
else:
ds_cache = []
ds = []
subj_parad_bis = subj_parad_bis0
for ix_data, (subj, parad, bimanual) in enumerate(subj_parad_bis):
for i_fold_test in range(n_fold_test):
for dtb2d in dtb2ds:
# --- Load model fit to real data
model, data, dict_cache, dict_subdir, d = np2d.main_fit(
dtb2d=dtb2d, subj=subj,
parad=parad, bimanual=bimanual,
mode_train=mode_train,
fit_mode='d_only',
# fit_mode='auto', # CHECKED
i_fold_test=i_fold_test,
n_fold_test=n_fold_test,
max_epoch=max_epoch,
to_plot_progress=to_plot_progress,
**kwargs,
) # type: (Any, sim2d.Data2DRT, Any, ...)
# axs = sim2d.plot_rt_distrib(
# npy(d['out_train_valid']),
# data.ev_cond_dim,
# alpha_face=0.,
# colors=['b', 'b']
# )[0]
# axs = sim2d.plot_rt_distrib(
# npy(d['target_train_valid']),
# data.ev_cond_dim,
# alpha_face=0.,
# colors=['k', 'k'],
# axs=axs,
# )[0]
# for ext in ['.pdf', '.png']:
# file = np2d.locfile.get_file_fig(
# 'rtdstr', dict_cache, ext=ext, subdir=dict_subdir
# )
# plt.savefig(file, dpi=72, figure=axs[0, 0].figure)
# print('Saved to %s' % file)
ds.append(d)
ds_cache.append({
**dict_cache, **{
'ix_data': ix_data
}
})
plt.close('all')
ds = np2.listdict2dictlist(ds)
ds_cache = np2.listdict2dictlist(ds_cache)
ix_data = ds_cache['ix_data']
ix_datas = np.unique(ix_data)
# subjs, ix_subj = np.unique(ds_cache['sbj'], return_inverse=True)
mode_trains, ix_mode_train = np.unique(
ds_cache['mdtrn'], return_inverse=True)
td_fits, ix_td_fit = np.unique(ds_cache['td'], return_inverse=True)
losses_all = np.array(ds['loss_NLL_test'])
# losses[subj, td_fit]
losses = npg.aggregate([ix_data, ix_td_fit
], losses_all, 'sum')
# dlosses: [ix_data]: negative supports serial
dlosses = (losses[:, list(td_fits).index('ser_np')]
- losses[:, list(td_fits).index('par_np')])
cache.set({
'dlosses': dlosses,
'td_fits': td_fits,
'losses': losses,
'ds_cache': ds_cache,
'ix_datas': ix_datas,
'subj_parad_bis': subj_parad_bis,
'dict_cache': dict_cache,
'dict_subdir': dict_subdir,
})
cache.save()
del cache
if to_plot:
subj_parad_bi_str = get_subj_parad_bi_str(subj_parad_bis)
dict_file = {
**dict_cache,
'td_fit': '[%s]' % ','.join(td_fits_short),
'sbj': '[%s-%s]' % (subj_parad_bi_str[0], subj_parad_bi_str[-1]),
'mdtrn': mode_train,
}
for subdir in [dict_subdir, 'main_plot_real_data']:
axs = plot_bar_dloss_across_subjs(dlosses, ix_datas, subj_parad_bis)
plt.title('Actual Data\n(fit to %s)' % mode_train)
file = locfile.get_file_fig('dloss_fit_real', dict_file,
subdir=subdir)
plt.savefig(file, dpi=300)
print('Saved to %s' % file)
print('--')
return (
dlosses, td_fits, losses, ds_cache,
ix_datas, subj_parad_bis,
dict_cache, dict_subdir
)
def plot_bar_dloss_across_subjs(
dlosses, elosses=None, ix_datas=None,
subj_parad_bis: Iterable[Tuple[str, str, bool]] = None,
axs: Union[plt2.GridAxes, plt2.AxesArray] = None,
vmax=None,
add_scale=True,
base=10.,
):
"""
:param dlosses: [ix_data]
:param ix_datas:
:param axs:
:param subj_parad_bis: [('subj', 'parad', is_bimanual), ...]
:return: axs
"""
if subj_parad_bis is None:
subj_parad_bis = subj_parad_bis0
if vmax is None:
vmax = np.amax(np.abs(dlosses))
# order: eye S1-S3, hand by ID, paired uni-bimanual
subjs, parads, bis = zip(*subj_parad_bis)
subjs = np.array(['ID0' + v[-1] if v[:2] == 'ID' and len(v) == 3 else v
for v in subjs])
parads = np.array(parads)
bis = np.array(bis)
is_eye = parads == 'RT'
is_bin = parads == 'binary'
ix = np.arange(len(subjs))
def filt_sort(filt):
# return ix[filt]
ind = [int(subj[1:]) for subj in subjs[filt]]
return ix[filt][np.argsort(ind)]
# return ix[filt][np.argsort(subjs[filt])]
ix = np.concatenate([
filt_sort(is_eye & ~is_bin),
np.stack([filt_sort(~is_eye & ~bis & ~is_bin),
filt_sort(~is_eye & bis & ~is_bin)
], -1).flatten('C'),
filt_sort(is_bin)
])
subjs = subjs[ix]
parads = parads[ix]
bis = bis[ix]
is_eye = is_eye[ix]
dlosses = dlosses[ix]
subj_parad_bis = subj_parad_bis[ix]
n_eye = int(np.sum(is_eye))
n_hand = int(np.sum(~is_eye))
y = np.empty([n_eye + n_hand])
y[is_eye] = 1.5 + np.arange(n_eye)
y[~is_eye] = n_eye - 1 + 1.5 + np.cumsum([1.5, 1.] * (n_hand // 2))
y_max = np.amax(y) + 1.5
if axs is None:
axs = plt2.GridAxes(
nrows=1, ncols=1,
heights=y_max * 0.2, # len(dlosses) * 0.3,
widths=2,
left=1.5, right=0.25,
bottom=0.85
)
# print(axs.h)
ax = axs[0, 0]
plt.sca(ax)
m = dlosses
if elosses is None:
e = np.zeros_like(m)
else:
e = elosses
for y1, m1, e1, parad1, bi1 in zip(y, m, e, parads, bis):
plt.barh(y1, m1, xerr=e1,
color=colors_parad[(parad1, '%s' % bi1)],
edgecolor='None')
if add_scale:
dy = y[1] - y[0]
# for x, ha in zip([-vmax, vmax], ['left', 'right']):
# plt.plot([x, x], y[-1] + np.array([-0.5, 0.5]) * dy,
# linestyle=':', color='gray', linewidth=0.5)
# plt.text(x * 0.95, y[-1], r'$%s\mathrm{log}\,10^{%g}$'
# % ('-' if x == -vmax else '+', np.log10(vmax)),
# va='center', ha=ha)
# max_loss = np.amax(np.abs(m) + e)
# if ix_datas is None:
# ix_datas = np.arange(len(dlosses))
# y = ix_datas
# plt.barh(y, m / np.log(10),
# xerr=e,
# color='w',
# edgecolor='k')
# plt2.patch_chance_level(1, xy='x')
# plt.axvline(0, color='gray', linewidth=0.5, linestyle='--', zorder=-1)
axvline_dcost()
# plt.xlim(np.array([-max_loss, max_loss]) * 1.05)
# plt.xlabel('Support\nfor parallel')
# ix_big = np.nonzero([tuple(v) == ('ID7', 'unibimanual', True) for v
# in subj_parad_bis])[0][0]
x_lim = [-vmax * 1.2, vmax * 1.2]
for ix_big in range(len(y)):
if np.abs(m[ix_big]) > vmax:
for i_sign, sign in enumerate([1, -1]):
plt2.patch_wave(y[ix_big], x_lim[i_sign] * 1.01,
ax=ax, color='w',
wave_margin=0.15,
wave_amplitude=sign * 0.025,
)
plt.xlim(x_lim)
plt.ylim([y_max - 1., 1.])
# plt.ylim([-0.75, len(y) - 0.25])
xticks_serial_vs_parallel(vmax, base)
subj_parad_bi_str = get_subj_parad_bi_str(subj_parad_bis)
plt.yticks(y, subj_parad_bi_str)
plt2.detach_axis('y', y[0], y[-1])
plt2.detach_axis('x', -vmax, vmax)
# # CHECKED
# file = '../../Data_2D/Data_2D_Py/a0_dtb/RTNonparamMATLAB/model_comp.png'
# plt.savefig(file, dpi=300)
# axs = None
# print('Saved to %s' % file)
return axs
def xticks_serial_vs_parallel(vmax, base):
plt.xticks([-vmax, 0, vmax])
plt.xlabel('support for parallel model\n'
+ r'($\mathrm{log}_{%g}\mathrm{BF}$)' % base)
# plt.xticks([-vmax, 0, vmax], [r'$\leftarrow$' + '\nserial', '',
# r'$\rightarrow$' + '\nparallel'])
# plt.xlabel('support for model\n' + r'(${\Delta}\mathrm{log}\,\mathcal{L}$)',
# labelpad=7, )
def axvline_dcost(BF=100., base=10.):
plt.axvline(0, color='k', linewidth=0.5, linestyle='--', zorder=1)
for sign in [-1, 1]:
plt.axvline(sign * np.log(BF) / np.log(base),
color='silver',
linewidth=0.5,
linestyle='--', zorder=1)
plt2.box_off()
def ____Simulated_data____():
pass
def main_plot_recovery(
mode_train='all',
n_fold_test=None,
to_plot=True,
**kwargs,
) -> (np.ndarray, List[str], np.ndarray, Dict[str, list]):
"""
:param mode_train:
:param n_fold_test:
:param to_plot:
:param kwargs:
:return: (
dlosses[seed, data, td_sim],
td_fits[model]: str,
losses[seed, subj, td_sim, td_fit],
ds_cache[field][ix_data],
td_sims, ix_datas, subj_parad_bi, seed_sim,
dict_fit_sim, dict_subdir
)
"""
if n_fold_test is None:
if mode_train == 'all':
n_fold_test = 5
elif mode_train == 'easiest':
n_fold_test = 1
else:
raise ValueError()
sbj_str = get_subj_parad_bi_str(subj_parad_bis0)
cache = locfile.get_cache('recovery', {
'trial_st': 0,
'thres_n_tr': 1,
'exclude_0coh': True,
'correct_only': True,
'n_fold_test': n_fold_test,
'mode_train': mode_train,
'sbj': '%s-%s' % (
sbj_str[0], sbj_str[-1]
),
**kwargs
}, subdir='main_plot_recovery')
if cache.exists():
try:
dlosses, td_fits, losses, ds_cache, \
td_sims, ix_datas, subj_parad_bis, seed_sim, \
dict_fit_sim, dict_subdir \
= cache.getdict([
'dlosses', 'td_fits', 'losses', 'ds_cache',
'td_sims', 'ix_datas',
'subj_parad_bis',
'seed_sim',
'dict_fit_sim', 'dict_subdir'
])
except KeyError: # backward compatibility
print('subj_parad_bis missing: falling back to old cache for %s'
% cache.fullpath)
dlosses, td_fits, losses, ds_cache, \
td_sims, ix_datas, _, seed_sim, \
dict_fit_sim, dict_subdir \
= cache.getdict([
'dlosses', 'td_fits', 'losses', 'ds_cache',
'td_sims', 'ix_datas',
'subj_parad_bi',
'seed_sim',
'dict_fit_sim', 'dict_subdir'
])
subj_parad_bis = subj_parad_bis0
else:
ds = []
ds_cache = []
subj_parad_bis = subj_parad_bis0
for ix_data, (subj, parad, bimanual) in enumerate(subj_parad_bis):
for seed_sim in range(1):
for i_fold_test in range(n_fold_test):
for dtb2d_sim in dtb2ds:
for dtb2d_fit in dtb2ds:
d, dict_fit_sim, dict_subdir = main_fit_sim(
subj=subj,
parad=parad,
bimanual=bimanual,
seed_sim=seed_sim,
dtb2d_sim=dtb2d_sim,
dtb2d_fit=dtb2d_fit,
mode_train=mode_train,
n_fold_test=n_fold_test,
i_fold_test=i_fold_test,
**kwargs,
)
ds.append(d)
ds_cache.append({
**dict_fit_sim, **{
'ix_data': ix_data
}
})
plt.close('all')
ds = np2.listdict2dictlist(ds)
ds_cache = np2.listdict2dictlist(ds_cache)
ix_data = ds_cache['ix_data']
ix_datas = np.unique(ix_data)
# subjs, ix_subj = np.unique(ds_cache['sbj'], return_inverse=True)
td_fits, ix_td_fit = np.unique(ds_cache['td_fit'], return_inverse=True)
losses_all = np.array(ds['loss_NLL_test'])
seed_sim = np.array(ds_cache['seed_sim'])
td_sims, ix_td_sim = np.unique(ds_cache['td_sim'], return_inverse=True)
# losses[seed, subj, td_sim, td_fit]
# NOTE: aggregate takes care of averaging across i_fold_tests
losses = npg.aggregate([
seed_sim, ix_data, ix_td_sim, ix_td_fit
], losses_all, 'mean')
# dlosses: [seed, data, td_sim]: negative supports serial
dlosses = (losses[:, :, :, list(td_fits).index('ser_np')]
- losses[:, :, :, list(td_fits).index('par_np')])
cache.set({
'dlosses': dlosses,
'td_fits': td_fits,
'losses': losses,
'ds_cache': ds_cache,
'td_sims': td_sims,
'ix_datas': ix_datas,
'subj_parad_bis': subj_parad_bis,
'seed_sim': seed_sim,
'dict_fit_sim': dict_fit_sim,
'dict_subdir': dict_subdir,
})
cache.save()
del cache
if to_plot:
# mean_dlosses: [mode_train, subj, td_sim]
mean_dlosses = np.mean(dlosses, 0)
se_dlosses = np2.sem(dlosses, 0)
axs = plt2.GridAxes(
nrows=1, ncols=len(td_sims),
heights=dlosses.shape[1] * 0.3,
widths=2,
left=1.5,
bottom=0.75,
top=0.7,
)
for i_sim in range(len(td_sims)-1, -1, -1):
col = 1 - i_sim
plot_bar_dloss_across_subjs(
mean_dlosses[:, i_sim],
ix_datas,
subj_parad_bis,
axs=axs[:, [col]]
)
plt.title('Simulated\n%s' % td_sims[i_sim][:3])
if col != 0:
plt2.box_off(['left'])
plt.yticks([])
axs.suptitle(mode_train)
d_file = deepcopy(dict_fit_sim)
for k in ['sbj', 'prd', 'td_sim', 'td_fit', 'seed_sim']:
d_file.pop(k)
for subdir in ['main_plot_recovery', dict_subdir]:
file = locfile.get_file_fig('dloss_fit', {
**d_file,
'tdsm': '[%s]' % ','.join(td_sims_short),
'tdft': '[%s]' % ','.join(td_fits_short),
'nsbj': '%d' % len(subj_parad_bis),
'mdtrn': mode_train,
'sdsm': '[%g-%g]' % (seed_sim[0], seed_sim[-1]),
}, subdir=subdir)
plt.savefig(file, dpi=300)
print('Saved to %s' % file)
print('--')
return (
dlosses, td_fits, losses, ds_cache,
td_sims, ix_datas, subj_parad_bis, seed_sim,
dict_fit_sim, dict_subdir
)
def main_fit_sim(
subj='S1',
parad='RT',
bimanual=False,
seed_sim=0,
dtb2d_sim: Type = sim2d.RTNonparam2DSer,
dtb2d_fit: Type = sim2d.RTNonparam2DSer,
mode_train='easiest',
rt_only=None,
i_fold_test=0,
n_fold_test=1,
**kwargs,
):
"""
:param subj:
:param parad:
:param bimanual:
:param seed_sim:
:param dtb2d_sim:
:param dtb2d_fit:
:param mode_train:
:param rt_only:
:return: d, dict_fit_sim, dict_subdir
"""
# --- Load model fit to real data
model, data, dict_cache, dict_subdir, d = np2d.main_fit(
dtb2d=dtb2d_sim, subj=subj,
parad=parad, bimanual=bimanual,
# fit_mode: we may not need to run the model at all if the cached
# simulation is available.
fit_mode='d_only',
i_fold_test=i_fold_test,
mode_train=mode_train,
n_fold_test=n_fold_test,
to_plot_progress=to_plot_progress,
**kwargs,
)
if rt_only is None:
rt_only = (
issubclass(dtb2d_fit, sim2d.RTNonparam2D)
or isinstance(dtb2d_fit, sim2d.RTNonparam2D))
# --- Simulate new data (from the model 'fit_sim') and save
dict_subdir.update({
'rto': rt_only
})
dict_sim = {
**dict_cache,
'td_sim': dict_cache['td'],
'seed_sim': seed_sim,
}
dict_sim.pop('td')
dict_fit_sim = {
**dict_sim,
'td_fit': dtb2d_fit.kind
}
cache_fit_sim = locfile.get_cache(
'fit_sim', dict_fit_sim, subdir=dict_subdir)
if cache_fit_sim.exists():
best_state, d = cache_fit_sim.getdict([
'best_state', 'd'
])
else:
# --- Get/fit the model for simulation
model, data, dict_cache, dict_subdir, d = np2d.main_fit(
dtb2d=dtb2d_sim, subj=subj,
parad=parad, bimanual=bimanual,
mode_train=mode_train,
# fit_mode: not 'd_only', since we need d['out_all']
# since we need to simulate the data
fit_mode='auto',
i_fold_test=i_fold_test,
n_fold_test=n_fold_test,
to_plot_progress=to_plot_progress,
**kwargs,
)
# --- Simulate new data and save
data_sim = deepcopy(data) # type: sim2d.Data2DRT
cache_data_sim = locfile.get_cache(
'data_sim', dict_sim, subdir=dict_subdir)
if cache_data_sim.exists():
data_sim.update_data(
ch_tr_dim=cache_data_sim.getdict(['chSim_tr_dim'])[0],
rt_tr=cache_data_sim.getdict(['rtSim_tr'])[0]
)
else:
# np2.dict_shapes(d) # CHECKED
ch_tr_dim_bef = data_sim.ch_tr_dim.copy()
rt_tr_bef = data_sim.rt_tr.copy()
data_sim.simulate_data(
pPred_cond_rt_ch=d['out_all'],
seed=seed_sim,
rt_only=rt_only, # since nonparam model fits RT only
)
ch_tr_dim_aft = data_sim.ch_tr_dim.copy()
rt_tr_aft = data_sim.rt_tr.copy()
print('Proportion of trials with the same choice:')
print(np.mean(ch_tr_dim_bef == ch_tr_dim_aft))
print('Mean absolute RT difference:')
print(np.mean(np.abs(rt_tr_bef - rt_tr_aft)))
cache_data_sim.set({
'chSim_tr_dim': data_sim.ch_tr_dim,
'rtSim_tr': data_sim.rt_tr
})
cache_data_sim.save()
del cache_data_sim
# --- Fit simulated data
model, data, _, _, d = np2d.main_fit(
dtb2d=dtb2d_fit, data=data_sim,
dict_cache=dict_fit_sim,
dict_subdir=dict_subdir,
to_save_res=True,
locfile1=locfile,
mode_train=mode_train,
i_fold_test=i_fold_test,
n_fold_test=n_fold_test,
max_epoch=max_epoch,
to_plot_progress=to_plot_progress,
**kwargs,
)
cache_fit_sim.set({
'best_state': d['best_state'],
'd': {k: v for k, v in d.items()
if k.startswith('loss_')}
})
cache_fit_sim.save()
# # CHECKED
# print(d['loss_all'])
# print(best_state['dtb.dtb.dtb1ds.0.kb2._param'])
del cache_fit_sim
print('--')
return d, dict_fit_sim, dict_subdir
def ____Main____():
pass
if __name__ == '__main__':
# if torch.cuda.is_available():
# torch.set_default_tensor_type(torch.cuda.FloatTensor)
torch.set_num_threads(1)
torch.set_default_dtype(torch.double)
# main_compare_recovery_methods()
kw1 = deepcopy(preset_recovery[preset1])
kw1.pop('preset_label')
main_plot_real_data(**kw1)
# main_plot_recovery(**kw1) |
from .clustering import AutoencoderTSNE
from .autoencoder import Autoencoder
|
import os
import sys
import string
import pyspark
import itertools
conf = pyspark.SparkConf()
sc = pyspark.SparkContext(conf=conf)
datafiles_folder = sys.argv[1]
stopwords_file_path = sys.argv[2]
out_file_path = sys.argv[3]
stopwords = [w.strip("\n") for w in open(stopwords_file_path, "r").readlines()]
def remove_stopwords(l):
return " ".join([x for x in l.split(" ") if x not in stopwords])
def strip_punctuation(l):
return l.translate(str.maketrans(dict.fromkeys(string.punctuation)))
def remove_stray_spaces(l):
return " ".join(l.split())
def is_indep_number(s):
if s.isdigit():
return True
try:
float(s)
return True
except:
return False
def remove_indep_numbers(l):
return " ".join([x for x in l.split(" ") if not is_indep_number(x)])
def filter_empty_and_none(l):
return l is not None and len(l) > 0
# Step 1 ======================================================================
# Preprocessing
# 1. To lowercase
# 2. Remove stopwords, drop punctuation, drop independent numbers
# =============================================================================
datafile_rdd = sc.textFile(os.path.join(datafiles_folder))
d = (
datafile_rdd
.map(lambda x: x.lower())
.map(remove_stopwords)
.map(strip_punctuation)
.map(remove_indep_numbers)
.map(remove_stray_spaces)
.filter(filter_empty_and_none)
)
# Step 2 ======================================================================
# Compute the count of every word pair in the resulting documents. Note that
# <w1, w2> and <w2, w1> are considered the same word pair.
# =============================================================================
pairs = (
d.flatMap(lambda x: itertools.combinations(x.strip("\n").split(" "), 2))
.filter(lambda x: x[0] != x[1])
.map(lambda x: (x, 1))
.reduceByKey(lambda x, y: x + y)
)
# Step 3 ======================================================================
# Sort the list of word pairs in descending order and obtain the top-k
# frequently occurring word pairs. Use k=5.
# =============================================================================
k = 5
ranks = sorted(pairs.collect(), key=lambda x: x[1], reverse=True)
# Step 4 ======================================================================
# Output one line per word pair: <word pair> <count> sorted in descending order
# =============================================================================
with open(out_file_path, 'w+') as f:
for ((w1, w2), count) in ranks[:k]:
f.write(f"{w1} {w2} {count}\n")
sc.stop() |
import sys
sys.path.append('..')
import os
import time
import tensorflow as tf
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
import cv2
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
import random
import imageio
import urllib.request
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
PATH_TO_CKPT = '/home/recsys/xxx/wm_pb4/frozen_inference_graph.pb'
PATH_TO_LABELS = '/home/recsys/xxx/wm_data/wm_label_map2.pbtxt'
NUM_CLASSES = 11
IMAGE_FOLDER = 'images'
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.per_process_gpu_memory_fraction = 0.5
sess = tf.Session(graph=detection_graph, config=config)
def down2file(url,filename,folder=IMAGE_FOLDER):
if not os.path.exists(IMAGE_FOLDER):
os.mkdir(IMAGE_FOLDER)
f=open(folder+'/'+filename,'wb')
print('downloading file:')
req=urllib.request.Request(url)
data=urllib.request.urlopen(req).read()
f.write(data)
f.close()
print('download '+filename+' OK!')
def wm_remove(img,ymin,xmin,ymax,xmax):
shape = img.shape
h = shape[0]
w = shape[1]
mask = np.zeros((h,w),dtype=np.uint8)
if ymin-5<0:
ymin =0
else:
ymin = ymin-5
if xmin-5<0:
xmin =0
else:
xmin =xmin-5
if xmax+5>w:
xmax = w
else:
xmax = xmax+5
if ymax + 12>h:
ymax = h
else:
ymax = ymax + 12
mask[ymin:ymax,xmin:xmax] = np.ones((ymax-ymin,xmax-xmin),dtype=np.uint8)*255
dst = cv2.inpaint(img,mask,3,cv2.INPAINT_TELEA)
return dst[ymin:ymax,xmin:xmax,:],[ymin,ymax,xmin,xmax]
def wm_remove2(img,box_map):#it should be removed
shape = img.shape
h = shape[0]
w = shape[1]
mask = np.zeros((h,w),dtype=np.uint8)
rec = []
for box, color in box_map.items():
ymin, xmin, ymax, xmax = box
ymin = int(ymin*h)
xmin = int(xmin*w)
ymax = int(ymax*h)
xmax = int(xmax*w)
if ymin-5<0:
ymin =0
else:
ymin = ymin-5
if xmin-5<0:
xmin =0
else:
xmin =xmin-5
if xmax+5>w:
xmax = w
else:
xmax = xmax+5
if ymax + 12>h:
ymax = h
else:
ymax = ymax + 12
mask[ymin:ymax,xmin:xmax] = np.ones((ymax-ymin,xmax-xmin),dtype=np.uint8)*255
rec.append([ymin,ymax,xmin,xmax])
dst = cv2.inpaint(img,mask,3,cv2.INPAINT_TELEA)
return dst,rec
def wm_video(video_path):
'''''
detect watermark in one of the video frames
remove watermark from all of frames and save to a new video
'''''
video_dst = None
try:
vid = imageio.get_reader(video_path,'ffmpeg')
L = vid.get_length()
num = int(L/2)
print("select %d%s frame for watermark detection:"%(num,'th'))
image = vid.get_data(num)
h = image.shape[0]
w = image.shape[1]
h_tmp = int(h/3)
image_detec = image[0:h_tmp,:,:]
image_np_expanded = np.expand_dims(image_detec, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
image_np,class_name,box_map = vis_util.visualize_boxes_and_labels_on_image_array(
image_detec, np.squeeze(boxes), np.squeeze(classes).astype(np.int32), np.squeeze(scores),
category_index, use_normalized_coordinates=True, line_thickness=3)
if len(class_name)>0:
print('begain remove watermark ',class_name)
fps = vid.get_meta_data()['fps']
video_name, ext = os.path.splitext(video_path)
video_dst = video_name + '-2' + '.mp4' #where to save the new video
writer = imageio.get_writer(video_dst, fps=fps, macro_block_size=None)
#for num,im in enumerate(vid):
for i in range(0,L,5):
for box, color in box_map.items():
ymin, xmin, ymax, xmax = box
ymin = int(ymin*h_tmp)
xmin = int(xmin*w)
ymax = int(ymax*h_tmp)
xmax = int(xmax*w)
im = vid.get_data(i)
im1 = vid.get_data(i+1)
im2 = vid.get_data(i+2)
im3 = vid.get_data(i+3)
im4 = vid.get_data(i+4)
im_tmp = im[0:h_tmp,:,:]
wm_rec,rec = wm_remove(im_tmp,ymin,xmin,ymax,xmax)
im[rec[0]:rec[1],rec[2]:rec[3],:] = wm_rec
im1[rec[0]:rec[1],rec[2]:rec[3],:] = wm_rec
im2[rec[0]:rec[1],rec[2]:rec[3],:] = wm_rec
im3[rec[0]:rec[1],rec[2]:rec[3],:] = wm_rec
im4[rec[0]:rec[1],rec[2]:rec[3],:] = wm_rec
writer.append_data(im)
writer.append_data(im1)
writer.append_data(im2)
writer.append_data(im3)
writer.append_data(im4)
writer.close()
else:
print('there is no watermark in this video!')
except Exception as e:
print(e)
print('failed to remove the watermark')
return video_dst
def wm_video2(video_path):
'''''
detect watermark in one of the video frames
remove watermark from all of frames and save to a new video
'''''
try:
vid = imageio.get_reader(video_path,'ffmpeg')
L = vid.get_length()
num = int(L/2)
print("select %d%s frame for watermark detection:"%(num,'th'))
image = vid.get_data(num)
h = image.shape[0]
w = image.shape[1]
h_tmp = int(h/3)
image_detec = image[0:h_tmp,:,:]
image_np_expanded = np.expand_dims(image_detec, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
image_np,class_name,box_map = vis_util.visualize_boxes_and_labels_on_image_array(
image_detec, np.squeeze(boxes), np.squeeze(classes).astype(np.int32), np.squeeze(scores),
category_index, use_normalized_coordinates=True, line_thickness=3)
if len(class_name)>0:
print('begain remove watermark ',class_name)
fps = vid.get_meta_data()['fps']
video_dst = video_path.split('/')[-1][0:-4] + '-3' + '.mp4' #where to save the new video
writer = imageio.get_writer(video_dst, fps=fps, macro_block_size=None)
#for num,im in enumerate(vid):
for i in range(0,L,5):
im = vid.get_data(i)
im1 = vid.get_data(i+1)
im2 = vid.get_data(i+2)
im3 = vid.get_data(i+3)
im4 = vid.get_data(i+4)
im_tmp = im[0:h_tmp,:,:]
wm_rec,rec = wm_remove2(im_tmp,box_map)
im[rec[0]:rec[1],rec[2]:rec[3],:] = wm_rec[rec[0]:rec[1],rec[2]:rec[3],:]
im1[rec[0]:rec[1],rec[2]:rec[3],:] = wm_rec[rec[0]:rec[1],rec[2]:rec[3],:]
im2[rec[0]:rec[1],rec[2]:rec[3],:] = wm_rec[rec[0]:rec[1],rec[2]:rec[3],:]
im3[rec[0]:rec[1],rec[2]:rec[3],:] = wm_rec[rec[0]:rec[1],rec[2]:rec[3],:]
im4[rec[0]:rec[1],rec[2]:rec[3],:] = wm_rec[rec[0]:rec[1],rec[2]:rec[3],:]
writer.append_data(im)
writer.append_data(im1)
writer.append_data(im2)
writer.append_data(im3)
writer.append_data(im4)
writer.close()
else:
print('there is no watermark in this video!')
except Exception as e:
print(e)
print('something wrong when remove the watermark')
def wm_image(image_path):
'''''
detect the watermark in a image and remove it.
'''''
try:
image = imageio.imread(image_path)
h = image.shape[0]
w = image.shape[1]
h_tmp = int(h/3)
im_detec = image[0:h_tmp,:,:]
image_np_expanded = np.expand_dims(im_detec, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
image_np,class_name,box_map = vis_util.visualize_boxes_and_labels_on_image_array(
im_detec, np.squeeze(boxes), np.squeeze(classes).astype(np.int32), np.squeeze(scores),
category_index, use_normalized_coordinates=True, line_thickness=3)
if len(class_name)>0:
print('begain remove watermark ',class_name)
image_name, ext = os.path.splitext(image_path)
image_dst = image_name + '-2' + '.jpg' #where to save the new image
for box, color in box_map.items():
ymin, xmin, ymax, xmax = box
ymin = int(ymin*h_tmp)
xmin = int(xmin*w)
ymax = int(ymax*h_tmp)
xmax = int(xmax*w)
wm_rec,rec = wm_remove(im_detec,ymin,xmin,ymax,xmax)
im[rec[0]:rec[1],rec[2]:rec[3],:] = wm_rec
imageio.imwrite(image_dst, image)
except Exception as e:
print(e)
print('failed to remove the watermark')
def wm_remove_video(video_url):
filename = video_url.split('/')[-1]
try:
down2file(video_url,filename,folder=IMAGE_FOLDER)
except Exception as e:
print('failed to download the video!')
video_path = IMAGE_FOLDER + '/' + filename
video_dst = wm_video2(video_path)
try:
os.remove(video_path)
os.remove(video_dst)
except Exception as e:
pass
return True,video_dst
if __name__ == '__main__':
video_path = 'vvvvvv.mp4'
wm_video2(video_path)
video_url = 'http://flv3.bn.netease.com/videolib3/1801/31/JlUxq3843/SD/JlUxq3843-mobile.mp4'
wm_remove_video(video_url)
image_path = 'copy/haokan/1.jpg'
wm_image(image_path)
|
import importlib
import argparse
_parser = argparse.ArgumentParser(prog='bond')
_subparsers = _parser.add_subparsers(dest='subparser_name',
help='sub-command help')
_parser.set_defaults(func=lambda x: None)
def load_commands(COMMANDS):
for COMMAND in COMMANDS:
command_module = importlib.import_module('bond.commands.' + COMMAND)
command_module.register()
def register(command):
name = str(command)
parser_a = _subparsers.add_parser(name)
parser_a.set_defaults(func=command.run)
if hasattr(command, 'arguments'):
for argument in command.arguments:
parser_a.add_argument(*argument[0], **argument[1])
def execute_from_command_line(argv):
args = _parser.parse_args()
args.func(args)
|
from environs import Env
env = Env()
env.read_env()
BOT_TOKEN = env.str("BOT_TOKEN")
IP = env.str("ip")
DB_USER = env.str('DB_USER')
DB_PASS = env.str('DB_PASS')
DB_NAME = env.str('DB_NAME')
DB_HOST = env.str('DB_HOST')
|
def MN_matris (n,m):#n=row and m=colums
for i0 in range (1,n+1):
for i1 in range (1,m+1):
item=i1*i0
print(item," ",end="")
print ('\n')
MN_matris(4,4) |
from django.contrib.auth import authenticate, login
from django.contrib.auth.forms import UserCreationForm
from django.http import JsonResponse
from django.shortcuts import render, redirect
from django.views.generic.base import View
from app.models import UserMoney, Transaction
# Create your views here.
class Home(View):
def get(self, request):
if request.user.is_authenticated():
money = UserMoney.objects.get(user=request.user, name="money")
transactions = Transaction.objects.filter(user_money=money)
return render(request, "home.html", {"money": "{:.2f}".format(money.money), "transactions": transactions})
return render(request, "home.html")
def post(self, request):
number = request.POST.get("mode") + request.POST.get("amount")
money = UserMoney.objects.get(user=request.user, name="money")
Transaction.objects.create(user_money=money, amount=number, name=request.POST.get("name"))
money.money += float(number)
money.save()
return JsonResponse({"money": money.money})
class Signup(View):
def get(self, request):
form = UserCreationForm()
return render(request, 'registration/signup.html', {"form": form})
def post(self, request):
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get("username")
raw_password = form.cleaned_data.get("password1")
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect("home")
return render(request, "registration/signup.html", {"form": form})
class AddRecurrent(View):
def get(self, request):
return render(request, "addrecurrent.html")
def post(self, request):
pass
|
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest, HttpResponseForbidden
from django.core.urlresolvers import reverse
from django.shortcuts import render
from django.db.models import Q
from social.models import Post, Comment, User
# Create your views here.
def index(request):
return render(request, 'social/index.html')
def social_login(request):
check = _check_post_request(request, ['username', 'password'])
if check[0]:
user = authenticate(username=request.POST['username'], password=request.POST['password'])
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse('social:home'))
else:
return HttpResponseBadRequest("The combination of username and password does not exist. ")
else:
return HttpResponseBadRequest(check[1])
def social_register(request):
new_user = False
error = False
if request.method == 'POST':
check = _check_post_request(request, ["username", "email", "password"])
if check[0]:
try:
user = User.objects.create_user(request.POST['username'], request.POST['email'], request.POST['password'])
user.save()
new_user = True
except:
error = True
else:
HttpResponseBadRequest(check[1])
return render(request, 'social/register.html', {'new_user': new_user, 'error': error})
@login_required
def home(request):
if request.method == 'GET':
posts = Post.objects.all()
elif request.method == 'POST':
check = _check_post_request(request, ['search_terms'])
if check[0]:
search_terms = request.POST['search_terms']
#search in posts and comments
q_comment = Q(comment__text__icontains=search_terms)
q_text = Q(text__icontains=search_terms)
posts = Post.objects.filter(comment__text__icontains=search_terms) | Post.objects.filter(text__icontains=search_terms)
# posts = Post.objects.filter(q_comment | q_text)
posts = posts.distinct()
else:
return HttpResponseBadRequest(check[1])
posts = posts.order_by('-date_time')
return render(request, 'social/home.html', {'posts': posts, 'user': request.user})
@login_required
def add_post(request):
check = _check_post_request(request, ['text'])
if check[0]:
new_post = Post()
new_post.text = request.POST['text']
new_post.poster = request.user
if 'photo' in request.FILES and request.FILES['photo'] is not None:
new_post.photo = request.FILES['photo']
new_post.save()
return HttpResponseRedirect(reverse('social:home'))
else:
return HttpResponseBadRequest(check[1])
@login_required
def delete_post(request, post_id):
post = Post.objects.get(pk=post_id)
if request.user != post.poster:
return HttpResponseForbidden("You can only delete your own posts!")
else:
post.delete()
return HttpResponseRedirect(reverse('social:home'))
@login_required
def add_comment(request):
check = _check_post_request(request, ['comment', 'post_id'])
if check[0]:
new_comment = Comment()
new_comment.poster = request.user
new_comment.text = request.POST['comment']
try:
post = Post.objects.get(pk=request.POST['post_id'])
new_comment.post = post
except Post.DoesNotExist:
return HttpResponseBadRequest("Ther is no Post with id {}".format(request.POST['post_id']))
new_comment.save()
return HttpResponseRedirect(reverse('social:home'))
else:
return HttpResponseBadRequest(check[1])
@login_required
def profile(request):
return render(request, 'social/profile.html', {'user': request.user})
def _check_post_request(request, keys):
#Check the request method
if request.method != 'POST':
return (False, "This method should be called with a POST method!")
for key in keys:
if key not in request.POST:
return (False, "The POST request should contain a {} field".format(key))
if not request.POST[key]:
return (False, "The {} field cannot be empty!".format(key))
return (True, "Everything is alright!")
|
from requests import get
import socket
import os
pubIP = get('https://api.ipify.org').text
print ("Public IP is", pubIP)
print (pubIP) |
# JTSK-350112
# circle.py
# Taiyr Begeyev
# t.begeyev@jacobs-university.de
"""
File: circle.py
Resources to manage circles
"""
import math
class Circle(object):
"""Represents Circle"""
def __init__(self, radius = 1.0, color = "red"):
"""
takes a float argument for radius and a string argument
for the color with the default values of 1.0 for the radius
and ”red” for the color. Radius and color are private members
"""
self.__radius = float(radius)
self.__color = str(color)
def setRadius(self, radius):
"""Set new radius"""
self.__radius = float(radius)
def setColor(self, color):
"""Set new color"""
self.__color = str(color)
def getRadius(self):
"""Returns the radius of the circle"""
return self.__radius
def getColor(self):
"""Returns the color of the circle"""
return self.__color
def getArea(self):
"""Returns the are of the circle"""
return math.pi * self.__radius * self.__radius
def getPerimeter(self):
"""Returns the perimeter of the circle"""
return 2 * math.pi * self.__radius
def __add__(self, other):
"""Overloaded + operator. Returns the sum of areas"""
return self.getArea() + other.getArea()
def __sub__(self, other):
"""Overloaded - operator. Returns the the diff of areas"""
return self.getArea() - other.getArea()
|
#!/usr/bin/python2.7
# -*- coding:utf-8 -*-
'''
在数组中的两个数字,如果前面一个数字大于后面的数字,则这两个数字组成一个逆序对。
输入一个数组,求出这个数组中的逆序对的总数P。并将P对1000000007取模的结果输出。
即输出P%1000000007
'''
class Solution:
count = 0
def InversePairs(self, data):
self.MergeSort(data)
return self.count % 1000000007
def MergeSort(self, lists):
if len(lists) <= 1:
return lists
num = len(lists)/2
left = self.MergeSort(lists[:num])
right = self.MergeSort(lists[num:])
r, l=0, 0
result=[]
while l<len(left) and r<len(right):
if left[l] <= right[r]:
result.append(left[l])
l += 1
else:
result.append(right[r])
r += 1
self.count += len(left)-l # the rest of the left list all > the right
result += left[l:]
result += right[r:]
return result
if __name__ == '__main__':
print Solution().InversePairs([1,2,3,4,5,6,7,0]) |
from asm import disassemble, assemble, lex
prog = [0x7c01, 0x0030, 0x7de1, 0x1000, 0x0020, 0x7803, 0x1000, 0xc00d,
0x7dc1, 0x001a, 0xa861, 0x7c01, 0x2000, 0x2161, 0x2000, 0x8463,
0x806d, 0x7dc1, 0x000d, 0x9031, 0x7c10, 0x0018, 0x7dc1, 0x001a,
0x9037, 0x61c1, 0x7dc1, 0x001a, 0x0000, 0x0000, 0x0000, 0x0000]
asm = '''SET A, 0x30
SET [0x1000], 0x20
SUB A, [0x1000]
IFN A, 0x10
SET PC, 0x1a
SET I, 0xa
SET A, 0x2000
SET [0x2000+I], [A]
SUB I, 0x1
IFN I, 0x0
SET PC, 0xd
SET X, 0x4
JSR 0x18
SET PC, 0x1a
SHL X, 0x4
SET PC, POP
SET PC, 0x1a'''
def test_disassemble():
assert disassemble(prog) == asm
def test_assemble_one():
assert lex('SET X, 2') == [['SET', 'X', ',', '2']]
def test_assemble_comment():
assert lex('SET X, 2 ; foo') == lex('SET X, 2 ; foo')
def test_assemble_disassembled():
assert assemble(asm) == prog
def test_assemble_disasseble_cycle():
code = assemble(asm)
assert assemble(disassemble(code)) == code
def test_assemble_example():
with open('example.s') as f:
example = f.read()
assert assemble(example) == prog
|
#!/usr/bin/python
def meme():
md = {}
with open('./data/1.dat') as f:
price_dict = {}
for line in f.readlines():
row = line.strip().split('\t')
apt_name = row[4]
key = apt_name + "_" + row[5].replace(' ','')
if key in md:
price_dict[key].append(long(row[8].replace(' ', '').replace(',','')))
tmp_price = long(md[key][8].replace(' ', '').replace(',',''))
cur_price = long(row[8].replace(' ', '').replace(',',''))
if tmp_price < cur_price:
md[key][8] = row[8]
else:
md[key] = row
price_dict[key] = [long(row[8].replace(' ', '').replace(',',''))]
for key in price_dict:
md[key].append(sum(price_dict[key])/len(price_dict[key]))
return md
def junse():
jd = {}
with open('./data/2.dat') as f:
price_dict = {}
for line in f.readlines():
row = line.strip().split('\t')
apt_name = row[4]
key = apt_name + "_" + row[6].replace(' ','')
if key in jd:
price_dict[key].append(long(row[9].replace(' ', '').replace(',','')))
tmp_price = long(jd[key][9].replace(' ', '').replace(',',''))
cur_price = long(row[9].replace(' ', '').replace(',',''))
if tmp_price > cur_price:
jd[key][9] = row[9]
else:
jd[key] = row
price_dict[key] = [long(row[9].replace(' ', '').replace(',',''))]
for key in price_dict:
jd[key].append(sum(price_dict[key])/len(price_dict[key]))
return jd
def refine(s):
return s.replace(' ','')
def main():
md = meme()
jd = junse()
for m in md:
if m in jd:
juns = float(jd[m][6].replace(' ',''))
mes = float(md[m][5].replace(' ',''))
diff_s = abs(mes - juns)
if diff_s < 5:
junp = long(jd[m][9].replace(' ','').replace(',',''))
mep = long(md[m][8].replace(' ','').replace(',',''))
gap_p = mep - junp
gap_a = md[m][12] - jd[m][14]
name, py = refine(m).split('_')
addr = md[m][0].split(' ')[3]
print '%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s'%(
addr,
name,
py,
refine(jd[m][11]),
refine(md[m][9]),
refine(jd[m][9]),
refine(md[m][8]),
jd[m][14],
md[m][12],
gap_p,
gap_a)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-01 09:34
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('application', '0003_album'),
]
operations = [
migrations.RemoveField(
model_name='album',
name='publish_date',
),
]
|
# -*- coding: utf-8 -*-
import numpy as np
import cv2
video_capture = cv2.VideoCapture(0)
while(1):
ret, image = video_capture.read()
boundaries = [
([0, 0, 128], [155, 120, 255])
]
for (lower, upper) in boundaries:
lower = np.array(lower, dtype="uint8")
upper = np.array(upper, dtype="uint8")
mask = cv2.inRange(image, lower, upper)
output = cv2.bitwise_and(image, image, mask=mask)
# show the images
cv2.imshow("images", np.hstack([image, output]))
k = cv2.waitKey(30) & 0xFF
if k == 27:
break
video_capture.release()
cv2.destroyAllWindows()
|
"""This module is aimed specifically at gathering experiences from FireCommanderV2 by
using parallel worker-simulators to gather experiences from specific states. The goal
is to obtain state-value estimates of all (or at least the most relevant) states.
"""
import numpy as np
import multiprocessing as mp
import time
import queue
from abc import abstractmethod
from itertools import product
from spyro.utils import progress, make_env
# global variables specifying some FireCommanderV2 characteristics
NUM_STATIONS = 17
FIRECOMMANDERV2_MAX_VEHICLES = [2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1]
FIRECOMMANDERV2_MAX_VEHICLES_DAY_HOUR = FIRECOMMANDERV2_MAX_VEHICLES + [6, 23]
STATION_NAMES = ['AALSMEER', 'AMSTELVEEN', 'ANTON', 'DIEMEN', 'DIRK', 'DRIEMOND',
'DUIVENDRECHT', 'HENDRIK', 'IJSBRAND', 'NICO', 'OSDORP', 'PIETER',
'TEUNIS', 'UITHOORN', 'VICTOR', 'WILLEM', 'ZEBRA']
def extract_vehicles_from_state(state):
return state[:NUM_STATIONS]
def extract_vehicles_day_hour_from_state(state):
return state[:NUM_STATIONS+2]
class BaseParallelValueEstimator(object):
"""Base class that deploys parallel workers to gather experiences from an environment.
Not useful to instantiate on its own.
Parameters
----------
num_workers: int, default=-1
The number of worker processes to use. If -1, uses one per available per CPU core.
"""
def __init__(self, num_workers=-1, max_queue_size=100, include_time=False, name="ValueEstimator",
strategy='random', verbose=True):
"""Initialize general parameters."""
self.verbose = verbose
self.strategy = strategy
# set number of worker processes
if num_workers == -1:
self.num_workers = mp.cpu_count()
else:
self.num_workers = num_workers
# progress("Using {} workers".format(self.num_workers), verbose=self.verbose)
# other parameters
self.max_queue_size = max_queue_size
self.name = name
self.include_time = include_time
# set state characteristics
if include_time:
self.state_processor = extract_vehicles_day_hour_from_state
self.max_values = FIRECOMMANDERV2_MAX_VEHICLES_DAY_HOUR
else:
self.state_processor = extract_vehicles_from_state
self.max_values = FIRECOMMANDERV2_MAX_VEHICLES
self.total_vehicles = np.sum(self.max_values[:NUM_STATIONS])
self.state_shape = (len(self.max_values),)
# set spawn method for consistency
try:
mp.set_start_method("spawn")
except RuntimeError:
progress("multiprocessing method not (re)set to 'spawn', because context was "
"already given.", verbose=self.verbose)
def define_tasks(self, reps=100, debug_subset=None):
"""Define the states that will be explored by the worker processes.
Parameters
----------
include_time: bool, default=False
Whether to include day of week and hour of day in the states to permute.
reps: int, default=100
The number of repetitions for each state.
permute: bool, default=True
Whether to force all permutations to be visited or just to simulate
according to the probabilities in the environment.
"""
# create exhaustive list of states
ranges = [np.arange(0, y + 1) for y in self.max_values]
all_states = np.array([x for x in product(*ranges)])
state_sums = [all_states[i, :].sum() for i in range(len(all_states))]
tasks = [
{"state": all_states[i, :], "available": state_sums[i],
"deployed": self.total_vehicles - state_sums[i], "reps": reps}
for i in range(len(all_states))
if (state_sums[i] != 0) and (state_sums[i] != self.total_vehicles)
]
if debug_subset is not None:
return tasks[:debug_subset]
else:
return tasks
def perform_tasks(self, env_cls, reps=100, env_params=None, timeout=10,
debug_subset=None):
"""Gather experiences.
Parameters
----------
env_cls: Python class
The environment to gather experiences from. This class was designed for
FireCommanderV2, but similar environments might work as well.
include_time: bool, default=False
Whether to include day of the week and hour of the day in the state
representation. Note: setting to True significantly increases the number
of available states, and thus run time.
reps: int, default=100
The number of repetitions/experiences to gather for each state.
env_params: dict, default=None
Key-value pairs passed to env_cls.
timeout: int, default=10
The maximum time to wait for workers to produce results. After timeout
seconds, the main process stops getting results from the queue and
wraps up the other processes.
"""
# define tasks and put them in a global queue
tasks = self.define_tasks(reps=reps, debug_subset=debug_subset)
self.global_counter = 0
self.num_tasks = len(tasks)
self.task_queue = mp.Queue()
self.result_queue = mp.Queue()
_ = list(map(self.task_queue.put, tasks))
progress("Put {} tasks in Queue (queue length: {})".format(self.num_tasks, self.task_queue.qsize()), verbose=self.verbose)
# initialize workers
workers = [
ExperienceGatheringProcess(
env_cls, self.result_queue, task_queue=self.task_queue,
env_params=env_params, state_processor=self.state_processor,
strategy='tasks'
)
for _ in range(self.num_workers)
]
for worker in workers:
worker.start()
try:
while True:
try:
performed_task = self.result_queue.get(block=True, timeout=timeout)
self.process_performed_task(performed_task)
self.global_counter += 1
progress("performed {} / {} tasks".format(self.global_counter, self.num_tasks),
same_line=True, newline_end=False, verbose=self.verbose)
except queue.Empty:
progress("\nQueue is empty. Breaking loop.", verbose=self.verbose)
break
except KeyboardInterrupt:
pass
for worker in workers:
if worker.is_alive():
worker.join()
def gather_random_experiences(self, env_cls, total_steps=50000000, start_step=0, env_params=None,
strategy='random', timeout=3):
"""Collect random experiences from parallel workers.
Parameters
----------
env_cls: Python class
The environment to train on.
total_steps: int, default=50000000
The total number of experiences to gather.
env_params: dict, default=None
Parameters passed to env_cls upon initialization.
timeout: int, default=3
The maximum time to wait for an item in the results queue if it is empty.
"""
self.stop_indicator = mp.Value("i", 0)
self.global_counter = start_step
total_steps = total_steps + start_step
self.result_queue = mp.Queue(self.max_queue_size)
# initialize workers
workers = [
ExperienceGatheringProcess(
env_cls, self.result_queue, stop_indicator=self.stop_indicator,
env_params=env_params, state_processor=self.state_processor,
max_values=self.max_values, strategy=self.strategy
)
for _ in range(self.num_workers)
]
for worker in workers:
worker.start()
# wait for workers to start delivering
time.sleep(5)
try:
while True:
try:
experience = self.result_queue.get(block=True, timeout=timeout)
self.process_random_experience(experience)
self.global_counter += 1
progress("Processed {} / {} experiences".format(self.global_counter, total_steps),
same_line=True, newline_end=False, verbose=self.global_counter % 1000 == 0)
except queue.Empty:
progress("\nQueue is empty. Breaking loop.", verbose=self.verbose)
break
if self.global_counter >= total_steps:
if self.stop_indicator.value == 0:
with self.stop_indicator.get_lock():
self.stop_indicator.value = 1
progress("\nSent stop signal to workers. Processing last results in queue.", verbose=self.verbose)
except KeyboardInterrupt:
progress("KeyboardInterrupt: sending stop signal and waiting for workers.", verbose=self.verbose)
with self.stop_indicator.get_lock():
self.stop_indicator.value = 1
for worker in workers:
if worker.is_alive():
worker.join()
progress("Workers stopped gracefully.", verbose=self.verbose)
def fit(self, env_cls, env_params=None, *args, **kwargs):
"""Fit the estimator on the environment."""
if self.strategy == 'tasks':
self.perform_tasks(env_cls, env_params=None, *args, **kwargs)
else:
self.gather_random_experiences(env_cls, env_params=None, *args, **kwargs)
@abstractmethod
def process_performed_task(self, task):
"""Process the result of a performed task. May vary for different implementations"""
@abstractmethod
def process_random_experience(self, experience):
"""Process a random experience. May vary for different implementations"""
@abstractmethod
def get_config(self):
"""Return the estimator's configuration as a dictionary."""
class ExperienceGatheringProcess(mp.Process):
"""Worker-class that gathers experiences from specific states to obtain
estimates of state-values.
Parameters
----------
strategy: str, one of ['random', 'tasks', 'uniform']
If random, do not manipulate states. If 'tasks', process dictionaries with tasks and
reps specified. If uniform, sample uniformly over all possible states and return results
one-by-one.
"""
def __init__(self, env_cls, result_queue, task_queue=None, stop_indicator=None,
state_processor=None, max_values=None, strategy='random', env_params=None, timeout=5,
verbose=False):
super().__init__()
self.env_cls = env_cls
self.env_params = env_params
self.task_queue = task_queue
self.result_queue = result_queue
self.state_processor = state_processor
self.stop_indicator = stop_indicator
self.strategy = strategy
self.max_values = max_values
self.timeout = timeout
self.verbose = verbose
if self.strategy == 'tasks':
assert task_queue is not None, "Must provide a task_queue if strategy='tasks'"
if self.strategy != 'tasks':
assert stop_indicator is not None, "Must provide a stop_indicator if strategy!='tasks"
if self.strategy == 'uniform':
assert max_values is not None, "max_values must be provided when strategy='uniform'"
progress("Worker initialized.", verbose=self.verbose)
def run(self):
"""Call the main functionality of the class."""
if self.strategy == 'tasks':
self._run_tasks()
elif self.strategy == 'uniform':
self._run_uniform()
elif self.strategy == 'random':
self._run_randomly()
else:
raise ValueError("strategy should be one of ['random', 'tasks', 'uniform']. Got {}"
.format(self.strategy))
def _make_env(self):
try:
self.env = make_env(self.env_cls, self.env_params)
except:
print("Exception in env creation")
def _run_tasks(self):
"""Start interacting with the environment to obtain specifically requested
experiences (tasks) and send the results to the global queue.
"""
progress("Start peforming tasks.", verbose=self.verbose)
self._make_env()
while True:
try:
task = self.task_queue.get(timeout=1)
self.perform_task(task)
except queue.Empty:
progress("Empty task queue found at worker. Shutting down worker.", verbose=self.verbose)
break
def _run_randomly(self):
"""Start interacting with the environment without manipulating the state in-between
steps and send the result of each step to the global results queue.
"""
progress("Start obtaining experiences.", verbose=self.verbose)
self._make_env()
while self.stop_indicator.value != 1:
# start episode by resetting env
state = self.state_processor(self.env.reset())
done = False
# gather experiences until episode end
while not done:
response, target = self.env._simulate()
if (response is not None) and (response != np.inf):
try:
self.result_queue.put(
{"state": state, "response": response, "target": target},
block=True, timeout=self.timeout
)
except queue.Full:
progress("Queue has been full for {} seconds. Breaking."
.format(self.timeout), verbose=self.verbose)
break
raw_state, done = self.env._extract_state(self.env._get_available_vehicles())
state = self.state_processor(raw_state)
def _run_uniform(self):
"""Manipulate the state to ensure uniform sampling over all possible states."""
progress("Start sampling state values uniformly over states.", verbose=self.verbose)
# find all states and create a generator to sample efficiently
ranges = [np.arange(0, y + 1) for y in self.max_values]
all_states = np.array([x for x in product(*ranges)])
state_gen = self._state_generator(all_states, total_vehicles=np.sum(self.max_values))
# init env
self._make_env()
while self.stop_indicator.value != 1:
sampled_state, num_deployed = next(state_gen)
while True:
state = self.state_processor(self.env.reset(forced_vehicles=num_deployed))
self.manipulate_state(state, sampled_state)
response, target = self.env._simulate()
if (response is not None) and (response != np.inf):
try:
self.result_queue.put(
{"state": sampled_state, "response": response, "target": target},
block=True, timeout=self.timeout
)
except queue.Full:
progress("Queue has been full for {} seconds. Breaking."
.format(self.timeout), verbose=self.verbose)
break
def _state_generator(self, all_states, total_vehicles=21):
"""Generate states uniformly."""
indices = np.random.randint(0, len(all_states), size=50000)
counter = 0
while True:
try:
s = all_states[indices[counter], :]
yield s, int(total_vehicles - np.sum(s))
except IndexError:
counter = 0
np.random.randint(0, len(all_states), size=50000)
def perform_task(self, task):
"""Perform a given task."""
responses = np.zeros(task["reps"])
targets = np.zeros(task["reps"])
for i in range(task["reps"]):
success = False
while not success:
state = self.state_processor(self.env.reset(forced_vehicles=task["deployed"]))
self.manipulate_state(state, task["state"])
response, target = self.env._simulate()
if (response is not None) and (response != np.inf):
success = True
responses[i], targets[i] = response, target
task["responses"] = responses
task["targets"] = targets
self.result_queue.put(task)
def manipulate_state(self, current_state, desired_state):
"""Move vehicles so that the desired state is obtained.
Total number of vehicles must be the same in current_state
and desired_state, otherwise this method will hang in an
infinite loop.
"""
delta = desired_state - current_state
origins, destinations = [], []
while not np.all(delta == 0):
extra_origins = np.flatnonzero(delta < 0)
origins = np.append(origins, extra_origins)
extra_destinations = np.flatnonzero(delta > 0)
destinations = np.append(destinations, extra_destinations)
delta[extra_origins] += 1
delta[extra_destinations] -= 1
for i in range(len(origins)):
self.env.sim.fast_relocate_vehicle("TS",
self.env.station_names[int(origins[i])],
self.env.station_names[int(destinations[i])]
)
|
#!/usr/bin/env python
from __future__ import print_function
import os.path
import urlparse
import urllib2
import bs4
import datetime
import PyRSS2Gen
import re
url = "http://www.koka36.de/neu_im_vorverkauf.php"
def make_external(url):
return urlparse.urljoin("http://www.koka36.de", url)
def main():
html = urllib2.urlopen(url).read()
soup = bs4.BeautifulSoup(html)
items = []
for event in soup.find_all('div', {'class': 'event_box'}):
data = event.find('div', {'style': 'imagefield'})
title = data.find('p').string
description = data.find_all('div')[-1].string
image = make_external(data.find('img').get('src'))
link = make_external(event.find('a').get('href'))
if title:
item = PyRSS2Gen.RSSItem(
title = title,
link = link,
description = description,
guid = PyRSS2Gen.Guid(link))
items.append(item)
rss = PyRSS2Gen.RSS2(
title = "Neu im Vorverkauf",
link = "http://www.koka36.de/",
description = "Generated using bs4, PyRSS2Gen",
lastBuildDate = datetime.datetime.utcnow(),
items = items)
print(rss.to_xml())
if __name__ == '__main__':
main()
|
a, b, rest = [1, 2, 3]
print(a, b, rest)
s = [1, 2, 3, 4, 5, 6]
i = 0
i = s[i] = 3
print(i)
print(s)
foo = 'anyu'
foo *= 2
print(foo)
|
from .util.debug import dodebug
from .log import logger, debug
from .process import process, process_output
from .errors import MooException, TException
from .user_input import YesNo
from .config import (Configurations, ConfigClient, lazy_configurable, Config,
configurable)
from tek.run import cli
try:
from tek.test import Spec
except ImportError:
pass
__all__ = ['cli', 'Spec', 'Configurations', 'ConfigClient',
'lazy_configurable', 'Config', 'YesNo', 'MooException', 'process',
'process_output', 'debug', 'logger', 'dodebug', 'TException',
'configurable']
|
from .project import Project, ProjectCreate, ProjectUpdate
from .user import User, UserCreate, UserUpdate
from .item import Item, ItemCreate, ItemDeleted, ItemUpdate |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from unittest import TestCase
import nltk
from nlp.pos_tagger import PosTagger
class TestPosTagger(TestCase):
def setUp(self):
self.sentence = 'the food was amazing'
self.PosTagger = PosTagger(self.sentence)
def test_pos_tag(self):
extracted_pos_tagged = self.PosTagger.pos_tag()
expected_pos_tagged = [('the', 'DT'), ('food', 'NN'), ('was', 'VBD'), ('amazing', 'VBG')]
self.assertListEqual(extracted_pos_tagged, expected_pos_tagged)
def test_get_tagger(self):
tagger = self.PosTagger.get_tagger()
self.assertEqual(type(tagger), nltk.tag.perceptron.PerceptronTagger)
|
from __future__ import print_function
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
# A package for reading user and password from a configuration file.
import util
# cpapi is a library that handles the communication with the Check Point management server.
from cpapi import APIClient, APIClientArgs
_PORT = 20000
def add():
username, password = util.get_credentials_access()
client_args = APIClientArgs()
with APIClient(client_args) as client:
#
# The API client, would look for the server's certificate SHA1 fingerprint in a file.
# If the fingerprint is not found on the file, it will ask the user if he accepts the server's fingerprint.
# In case the user does not accept the fingerprint, exit the program.
if client.check_fingerprint() is False:
print("Could not get the server's fingerprint - Check connectivity with the server.")
exit(1)
# login to server:
login_res = client.login(username, password)
if login_res.success is False:
print("Login failed:\n{}".format(login_res.error_message))
exit(1)
for i in range(_PORT, _PORT+3000):
add_service_response = client.api_call("add-service-tcp",
{"name" : 'TCP-' + str(i),
"port" : i})
if add_service_response.success:
print("The service: '{}' has been added successfully".format(add_service_response.data['name']))
else:
print("Port: '{}'\n{}".format(i, add_service_response.error_message))
print("[{}] {}: {}".format(add_service_response.status_code, add_service_response.data['code'], add_service_response.data['message']))
# publish the result
publish_res = client.api_call("publish", {})
if publish_res.success:
print("The changes were published successfully.")
else:
print("Failed to publish the changes.")
|
from .wrapper import cli
|
'''
Fit plots of energy resolution vs pt for data from 2012 Vecbos
ntuples run with PHOSPHOR method.
28 August 2013 Valere Lambert
'''
import ROOT
import JPsi.MuMu.common.roofit as roo
import JPsi.MuMu.common.cmsstyle as cmsstyle
import JPsi.MuMu.common.canvases as canvases
from JPsi.MuMu.common.xychi2fitter import XYChi2Fitter as Fitter
_filename = 'pt_res_data.root'
# _filename = '/Users/veverka/Work/Data/phosphor/resDataVsPt_HggV2Ression_NoMuonBias_EGMPaperCategories.root'
_stochastic_from_tb = 3.
_mean_sqrt_cosh_eta_barrel = 1.16
_mean_sqrt_cosh_eta_endcaps = 1.91
#Enhanced Z+Jets
#_noise_from_mc_barrel_highr9_lownv = 23.87 # +/- 40.49
#_noise_from_mc_barrel_highr9_highnv = 26.54 # +/- 36.53
#_noise_from_mc_barrel_lowr9_lownv = 75.07 # +/- 34.27
#_noise_from_mc_barrel_lowr9_highnv = 79.69 # +/- 33.12
#_noise_from_mc_endcaps_highr9_lownv = 33.13 # +/- 43.96
#_noise_from_mc_endcaps_highr9_highnv = 34.2 # +/- 42.9
#_noise_from_mc_endcaps_lowr9_lownv = 89.12 # +/- 34.51
#_noise_from_mc_endcaps_lowr9_highnv = 99.94 # +/- 199.9
#_noise_from_mc_barrel_lowr9 = 76.32 # +/- 34.17
#_noise_from_mc_barrel_highr9 = 24.36 # +/- 39.73
#_noise_from_mc_endcaps_lowr9 = 92.11 # +/- 40.85
#_noise_from_mc_endcaps_highr9 = 33.46 # +/- 43.64
#_noise_from_mc_barrel_lownv = 23.87 # +/- 40.49
#_noise_from_mc_barrel_highnv = 26.54 # +/- 36.53
#_noise_from_mc_endcaps_lownv = 78.56 # +/- 32.27
#_noise_from_mc_endcaps_highnv = 94.48 # +/- 37.07
_noise_from_mc_barrel_highr9_lownv = 23.27 # +/- 40.83
_noise_from_mc_barrel_highr9_highnv = 26.5 # +/- 36.77
_noise_from_mc_barrel_lowr9_lownv = 74.61 # +/- 34.46
_noise_from_mc_barrel_lowr9_highnv = 78.93 # +/- 33.25
_noise_from_mc_endcaps_highr9_lownv = 33.37 # +/- 43.37
_noise_from_mc_endcaps_highr9_highnv = 34.43 # +/- 42.63
_noise_from_mc_endcaps_lowr9_lownv = 88.58 # +/- 34.74
_noise_from_mc_endcaps_lowr9_highnv = 99.56 # +/- 199.2
_noise_from_mc_barrel_lowr9 = 75.85 # +/- 34.18
_noise_from_mc_barrel_highr9 = 23.98 # +/- 39.49
_noise_from_mc_endcaps_lowr9 = 91.48 # +/- 37.9
_noise_from_mc_endcaps_highr9 = 33.83 # +/- 42.58
_noise_from_mc_barrel_lownv = 23.27 # +/- 40.83
_noise_from_mc_barrel_highnv = 26.5 # +/- 36.77
_noise_from_mc_endcaps_lownv = 78.04 # +/- 32.36
_noise_from_mc_endcaps_highnv = 93.37 # +/- 31.45
fitters = []
#==============================================================================
def main():
'''
Main entry point of execution
'''
do_barrel_highr9_lownv_fits()
do_barrel_highr9_highnv_fits()
do_barrel_lowr9_lownv_fits()
do_barrel_lowr9_highnv_fits()
do_endcap_highr9_lownv_fits()
do_endcap_highr9_highnv_fits()
do_endcap_lowr9_lownv_fits()
do_endcap_lowr9_highnv_fits()
do_barrel_lowr9_fits()
do_barrel_highr9_fits()
do_endcap_lowr9_fits()
do_endcap_highr9_fits()
do_barrel_lownv_fits()
do_barrel_highnv_fits()
do_endcap_lownv_fits()
do_endcap_highnv_fits()
canvases.make_plots("png eps root".split())
## End of main().
#==============================================================================
def do_barrel_highr9_lownv_fits():
'''
Barrel High R9 Low NV
'''
systematics = 0.2
#__________________________________________________________________________
## Float everything
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_LowNV_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_LowNV_HighR9',
title = 'Barrel NVtx < 19 R_{9}^{#gamma} > 0.94, Data Fit',
systematics = systematics,
)
fitter.run()
fitters.append(fitter)
ebfitter = fitter
#__________________________________________________________________________
## Fix S to TB
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_LowNV_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_LowNV_HighR9_SfromTB',
title = 'Barrel NVtx < 19 R_{9}^{#gamma} > 0.94, Data Fit, S from TB',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(ebfitter.N.getVal())
fitter.C.setVal(ebfitter.C.getVal())
fitter.S.setConstant()
fitter.run()
fitters.append(fitter)
#__________________________________________________________________________
## Fix N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_LowNV_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_LowNV_HighR9_NfromMC',
title = 'Barrel NVtx < 19 R_{9}^{#gamma} > 0.94, Data Fit, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(_noise_from_mc_barrel_highr9_lownv)
fitter.C.setVal(ebfitter.C.getVal())
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
#__________________________________________________________________________
## Fix S to TB and N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_LowNV_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_LowNV_HighR9_SfromTB_NfromMC',
title = 'Barrel NVtx < 19 R_{9}^{#gamma} > 0.94, Data Fit, S from TB, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(_noise_from_mc_barrel_highr9_lownv)
fitter.C.setVal(ebfitter.C.getVal())
fitter.S.setConstant()
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
## End of do_barrel_highr9_lownv_fits()
#==============================================================================
def do_barrel_highr9_highnv_fits():
'''
Barrel High R9 High NV
'''
systematics = 0.2
#__________________________________________________________________________
## Float everything
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_HighNV_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_HighNV_HighR9',
title = 'Barrel NVtx > 18 R_{9}^{#gamma} > 0.94, Data Fit',
systematics = systematics,
)
fitter.run()
fitters.append(fitter)
ebfitter = fitter
#__________________________________________________________________________
## Fix S to TB
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_HighNV_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_HighNV_HighR9_SfromTB',
title = 'Barrel NVtx > 18 R_{9}^{#gamma} > 0.94, Data Fit, S from TB',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(ebfitter.N.getVal())
fitter.C.setVal(ebfitter.C.getVal())
fitter.S.setConstant()
fitter.run()
fitters.append(fitter)
#__________________________________________________________________________
## Fix N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_HighNV_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_HighNV_HighR9_NfromMC',
title = 'Barrel NVtx > 18 R_{9}^{#gamma} > 0.94, Data Fit, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(_noise_from_mc_barrel_highr9_highnv)
fitter.C.setVal(ebfitter.C.getVal())
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
#__________________________________________________________________________
## Fix S to TB and N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_HighNV_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_HighNV_HighR9_SfromTB_NfromMC',
title = 'Barrel NVtx > 18 R_{9}^{#gamma} > 0.94, Data Fit, S from TB, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(_noise_from_mc_barrel_highr9_highnv)
fitter.C.setVal(ebfitter.C.getVal())
fitter.S.setConstant()
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
## End of do_barrel_highr9_fits()
#==============================================================================
def do_barrel_lowr9_lownv_fits():
'''
Barrel Low R9 Low NV
'''
systematics = 0.2
#__________________________________________________________________________
## Float everything
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_LowNV_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_LowNV_LowR9',
title = 'Barrel NVtx < 19 R_{9}^{#gamma} < 0.94, Data Fit',
systematics = systematics,
)
fitter.run()
fitters.append(fitter)
ebfitter = fitter
#__________________________________________________________________________
## Fix S to TB
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_LowNV_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_LowNV_LowR9_SfromTB',
title = 'Barrel NVtx < 19 R_{9}^{#gamma} < 0.94, Data Fit, S from TB',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(ebfitter.N.getVal())
fitter.C.setVal(ebfitter.C.getVal())
fitter.S.setConstant()
fitter.run()
fitters.append(fitter)
#__________________________________________________________________________
## Fix N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_LowNV_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_LowNV_LowR9_NfromMC',
title = 'Barrel NVtx < 19 R_{9}^{#gamma} < 0.94, Data Fit, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(_noise_from_mc_barrel_lowr9_lownv)
fitter.C.setVal(ebfitter.C.getVal())
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
#__________________________________________________________________________
## Fix S to TB and N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_LowNV_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_LowNV_LowR9_SfromTB_NfromMC',
title = 'Barrel NVtx < 19 R_{9}^{#gamma} < 0.94, Data Fit, S from TB, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(_noise_from_mc_barrel_lowr9_lownv)
fitter.C.setVal(ebfitter.C.getVal())
fitter.S.setConstant()
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
## End of do_barrel_lowr9_fits()
#==============================================================================
def do_barrel_lowr9_highnv_fits():
'''
Barrel Low R9 High NV
'''
systematics = 0.2
#______________________________________________________________________________
## Float everything
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_HighNV_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_HighNV_LowR9',
title = 'Barrel NVtx > 18 R_{9}^{#gamma} < 0.94, Data Fit',
systematics = systematics,
)
fitter.run()
fitters.append(fitter)
ebfitter = fitter
#______________________________________________________________________________
## Fix S to TB
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_HighNV_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_HighNV_LowR9_SfromTB',
title = 'Barrel NVtx > 18 R_{9}^{#gamma} < 0.94, Data Fit, S from TB',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(ebfitter.N.getVal())
fitter.C.setVal(ebfitter.C.getVal())
fitter.S.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_HighNV_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_HighNV_LowR9_NfromMC',
title = 'Barrel NVtx > 18 R_{9}^{#gamma} < 0.94, Data Fit, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(_noise_from_mc_barrel_lowr9_highnv)
fitter.C.setVal(ebfitter.C.getVal())
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix S to TB and N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_HighNV_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_HighNV_LowR9_SfromTB_NfromMC',
title = 'Barrel NVtx > 18 R_{9}^{#gamma} < 0.94, S from TB, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(_noise_from_mc_barrel_lowr9_highnv)
fitter.C.setVal(ebfitter.C.getVal())
fitter.S.setConstant()
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
## End of do_endcap_highr9_fits()
#==============================================================================
def do_endcap_highr9_lownv_fits():
'''
End Cap High R9 Low NV
'''
systematics = 0.2
#______________________________________________________________________________
## Float everything
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_LowNV_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_LowNV_HighR9',
title = 'Endcaps NVtx < 19 R_{9}^{#gamma} > 0.94, Data Fit',
systematics = systematics,
)
fitter.run()
fitters.append(fitter)
eefitter = fitter
#______________________________________________________________________________
## Fix S to TB
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_LowNV_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_LowNV_HighR9_SfromTB',
title = 'Endcaps NVtx < 19 R_{9}^{#gamma} > 0.94, Data Fit, S from TB',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(eefitter.N.getVal())
fitter.C.setVal(eefitter.C.getVal())
fitter.S.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_LowNV_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_LowNV_HighR9_NfromMC',
title = 'Endcaps NVtx < 19 R_{9}^{#gamma} > 0.94, Data Fit, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(_noise_from_mc_endcaps_highr9_lownv)
fitter.C.setVal(eefitter.C.getVal())
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix S to TB and N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_LowNV_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_LowNV_HighR9_SfromTB_NfromMC',
title = 'Endcaps NVtx < 19 R_{9}^{#gamma} > 0.94, Data Fit, S from TB, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(_noise_from_mc_endcaps_highr9_lownv)
fitter.C.setVal(eefitter.C.getVal())
fitter.S.setConstant()
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
## End of do_endcap_highr9_fits()
#==============================================================================
def do_endcap_highr9_highnv_fits():
'''
End Cap High R9 High NV
'''
systematics = 0.2
#______________________________________________________________________________
## Float everything
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_HighNV_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_HighNV_HighR9',
title = 'Endcaps NVtx > 18 R_{9}^{#gamma} > 0.94, Data Fit',
systematics = systematics,
)
fitter.run()
fitters.append(fitter)
eefitter = fitter
#______________________________________________________________________________
## Fix S to TB
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_HighNV_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_HighNV_HighR9_SfromTB',
title = 'Endcaps NVtx > 18 R_{9}^{#gamma} > 0.94, Data Fit, S from TB',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(eefitter.N.getVal())
fitter.C.setVal(eefitter.C.getVal())
fitter.S.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_HighNV_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_HighNV_HighR9_NfromMC',
title = 'Endcaps NVtx > 18 R_{9}^{#gamma} > 0.94, Data Fit, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(_noise_from_mc_endcaps_highr9_highnv)
fitter.C.setVal(eefitter.C.getVal())
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix S to TB and N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_HighNV_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_HighNV_HighR9_SfromTB_NfromMC',
title = 'Endcaps NVtx > 18 R_{9}^{#gamma} > 0.94, Data Fit, S from TB, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(_noise_from_mc_endcaps_highr9_highnv)
fitter.C.setVal(eefitter.C.getVal())
fitter.S.setConstant()
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
## End of do_endcap_lowr9_fits()
#==============================================================================
def do_endcap_lowr9_lownv_fits():
'''
End Cap Low R9 Low NV
'''
systematics = 0.2
#______________________________________________________________________________
## Float everything
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_LowNV_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_LowNV_LowR9',
title = 'Endcaps NVtx < 19 R_{9}^{#gamma} < 0.94, Data Fit',
systematics = systematics,
)
fitter.run()
fitters.append(fitter)
eefitter = fitter
#______________________________________________________________________________
## Fix S to TB
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_LowNV_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_LowNV_LowR9_SfromTB',
title = 'Endcaps NVtx < 19 R_{9}^{#gamma} < 0.94, Data Fit, S from TB',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(eefitter.N.getVal())
fitter.C.setVal(eefitter.C.getVal())
fitter.S.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_LowNV_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_LowNV_LowR9_NfromMC',
title = 'Endcaps NVtx < 19 R_{9}^{#gamma} < 0.94, Data Fit, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(_noise_from_mc_endcaps_lowr9_lownv)
fitter.C.setVal(eefitter.C.getVal())
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix S to TB and N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_LowNV_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_LowNV_LowR9_SfromTB_NfromMC',
title = 'Endcaps NVtx < 19 R_{9}^{#gamma} < 0.94, Data Fit, S from TB, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(_noise_from_mc_endcaps_lowr9_lownv)
fitter.C.setVal(eefitter.C.getVal())
fitter.S.setConstant()
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## End do_endcap_lowr9_lownv_fits()
def do_endcap_lowr9_highnv_fits():
'''
End Cap Low R9 High NV
'''
systematics = 0.2
#______________________________________________________________________________
## Float everything
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_HighNV_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_HighNV_LowR9',
title = 'Endcaps NVtx > 18 R_{9}^{#gamma} < 0.94, Data Fit',
systematics = systematics,
)
fitter.run()
fitters.append(fitter)
eefitter = fitter
#______________________________________________________________________________
## Fix S to TB
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_HighNV_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_HighNV_LowR9_SfromTB',
title = 'Endcaps NVtx > 18 R_{9}^{#gamma} < 0.94, Data Fit, S from TB',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(eefitter.N.getVal())
fitter.C.setVal(eefitter.C.getVal())
fitter.S.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_HighNV_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_HighNV_LowR9_NfromMC',
title = 'Endcaps NVtx > 18 R_{9}^{#gamma} < 0.94, Data Fit, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(_noise_from_mc_endcaps_lowr9_highnv)
fitter.C.setVal(eefitter.C.getVal())
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix S to TB and N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_HighNV_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_HighNV_LowR9_SfromTB_NfromMC',
title = 'Endcaps NVtx > 18 R_{9}^{#gamma} < 0.94, Data Fit, S from TB, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(_noise_from_mc_endcaps_lowr9_highnv)
fitter.C.setVal(eefitter.C.getVal())
fitter.S.setConstant()
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
## End do_endcap_lowr9_highnv_fits()
#================================================================================
def do_barrel_lowr9_fits():
'''
Barrel Low R9
'''
systematics = 0.2
#______________________________________________________________________________
## Float everything
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_LowR9',
title = 'Barrel R_{9}^{#gamma} < 0.94, Data Fit',
systematics = systematics,
)
fitter.run()
fitters.append(fitter)
ebfitter = fitter
#______________________________________________________________________________
## Fix S to TB
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_LowR9_SfromTB',
title = 'Barrel R_{9}^{#gamma} < 0.94, Data Fit, S from TB',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(ebfitter.N.getVal())
fitter.C.setVal(ebfitter.C.getVal())
fitter.S.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_LowR9_NfromMC',
title = 'Barrel R_{9}^{#gamma} < 0.94, Data Fit, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(_noise_from_mc_barrel_lowr9)
fitter.C.setVal(ebfitter.C.getVal())
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix S to TB and N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_LowR9_SfromTB_NfromMC',
title = 'Barrel R_{9}^{#gamma} < 0.94, Data Fit, S from TB, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(_noise_from_mc_barrel_lowr9)
fitter.C.setVal(ebfitter.C.getVal())
fitter.S.setConstant()
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
## End do_barrel_lowr9_fits()
#================================================================================
def do_barrel_highr9_fits():
'''
Barrel High R9
'''
systematics = 0.2
#______________________________________________________________________________
## Float everything
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_HighR9',
title = 'Barrel R_{9}^{#gamma} > 0.94, Data Fit',
systematics = systematics,
)
fitter.run()
fitters.append(fitter)
ebfitter = fitter
#______________________________________________________________________________
## Fix S to TB
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_HighR9_SfromTB',
title = 'Barrel R_{9}^{#gamma} > 0.94, Data Fit, S from TB',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(ebfitter.N.getVal())
fitter.C.setVal(ebfitter.C.getVal())
fitter.S.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_HighR9_NfromMC',
title = 'Barrel R_{9}^{#gamma} > 0.94, Data Fit, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(_noise_from_mc_barrel_highr9)
fitter.C.setVal(ebfitter.C.getVal())
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix S to TB and N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_HighR9_SfromTB_NfromMC',
title = 'Barrel R_{9}^{#gamma} > 0.94, Data Fit, S from TB, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(_noise_from_mc_barrel_highr9)
fitter.C.setVal(ebfitter.C.getVal())
fitter.S.setConstant()
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
## End do_barrel_highr9_fits()
#================================================================================
def do_endcap_lowr9_fits():
'''
Endcaps Low R9
'''
systematics = 0.2
#______________________________________________________________________________
## Float everything
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_LowR9',
title = 'Endcaps R_{9}^{#gamma} < 0.94, Data Fit',
systematics = systematics,
)
fitter.run()
fitters.append(fitter)
eefitter = fitter
#______________________________________________________________________________
## Fix S to TB
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_LowR9_SfromTB',
title = 'Endcaps R_{9}^{#gamma} < 0.94, Data Fit, S from TB',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(eefitter.N.getVal())
fitter.C.setVal(eefitter.C.getVal())
fitter.S.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_LowR9_NfromMC',
title = 'Endcaps R_{9}^{#gamma} < 0.94, Data Fit, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(_noise_from_mc_endcaps_lowr9)
fitter.C.setVal(eefitter.C.getVal())
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix S to TB and N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_R9Low_0_R9High_0.94_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_LowR9_SfromTB_NfromMC',
title = 'Endcaps R_{9}^{#gamma} < 0.94, Data Fit, S from TB, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(_noise_from_mc_endcaps_lowr9)
fitter.C.setVal(eefitter.C.getVal())
fitter.S.setConstant()
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
## End do_endcaps_lowr9_fits()
#================================================================================
def do_endcap_highr9_fits():
'''
Endcaps High R9
'''
systematics = 0.2
#______________________________________________________________________________
## Float everything
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_HighR9',
title = 'Endcaps R_{9}^{#gamma} > 0.94, Data Fit',
systematics = systematics,
)
fitter.run()
fitters.append(fitter)
eefitter = fitter
#______________________________________________________________________________
## Fix S to TB
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_HighR9_SfromTB',
title = 'Endcaps R_{9}^{#gamma} > 0.94, Data Fit, S from TB',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(eefitter.N.getVal())
fitter.C.setVal(eefitter.C.getVal())
fitter.S.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_HighR9_NfromMC',
title = 'Endcaps R_{9}^{#gamma} > 0.94, Data Fit, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(_noise_from_mc_endcaps_highr9)
fitter.C.setVal(eefitter.C.getVal())
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix S to TB and N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_R9Low_0.94_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_HighR9_SfromTB_NfromMC',
title = 'Endcaps R_{9}^{#gamma} > 0.94, Data Fit, S from TB, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(_noise_from_mc_endcaps_highr9)
fitter.C.setVal(eefitter.C.getVal())
fitter.S.setConstant()
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
## End do_encap_highr9_fits()
#================================================================================
def do_barrel_lownv_fits():
'''
Barrel Low NV
'''
systematics = 0.2
#______________________________________________________________________________
## Float everything
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_LowNV_R9Low_0_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_LowNV',
title = 'Barrel NVtx < 18 , Data Fit',
systematics = systematics,
)
fitter.run()
fitters.append(fitter)
ebfitter = fitter
#______________________________________________________________________________
## Fix S to TB
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_LowNV_R9Low_0_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_LowNV_SfromTB',
title = 'Barrel NVtx < 18 , Data Fit, S from TB',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(ebfitter.N.getVal())
fitter.C.setVal(ebfitter.C.getVal())
fitter.S.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_LowNV_R9Low_0_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_LowNV_NfromMC',
title = 'Barrel NVtx < 18 , Data Fit, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(_noise_from_mc_barrel_lownv)
fitter.C.setVal(ebfitter.C.getVal())
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix S to TB and N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_LowNV_R9Low_0_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_LowNV_SfromTB_NfromMC',
title = 'Barrel NVtx < 18 , Data Fit, S from TB, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(_noise_from_mc_barrel_lownv)
fitter.C.setVal(ebfitter.C.getVal())
fitter.S.setConstant()
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
## End do_barrel_lownv_fits()
#================================================================================
def do_barrel_highnv_fits():
'''
Barrel High NV
'''
systematics = 0.2
#______________________________________________________________________________
## Float everything
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_HighNV_R9Low_0_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_HighNV',
title = 'Barrel NVtx > 18 , Data Fit',
systematics = systematics,
)
fitter.run()
fitters.append(fitter)
ebfitter = fitter
#______________________________________________________________________________
## Fix S to TB
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_HighNV_R9Low_0_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_HighNV_SfromTB',
title = 'Barrel NVtx > 18 , Data Fit, S from TB',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(ebfitter.N.getVal())
fitter.C.setVal(ebfitter.C.getVal())
fitter.S.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_HighNV_R9Low_0_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_HighNV_NfromMC',
title = 'Barrel NVtx > 18 , Data Fit, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(_noise_from_mc_barrel_highnv)
fitter.C.setVal(ebfitter.C.getVal())
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix S to TB and N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EB_sixie_HighNV_R9Low_0_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Barrel_HighNV_SfromTB_NfromMC',
title = 'Barrel NVtx > 18 , Data Fit, S from TB, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_barrel)
fitter.N.setVal(_noise_from_mc_barrel_highnv)
fitter.C.setVal(ebfitter.C.getVal())
fitter.S.setConstant()
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
## End do_barrel_highnv_fits()
#================================================================================
def do_endcap_lownv_fits():
'''
End Cap Low NV
'''
systematics = 0.2
#______________________________________________________________________________
## Float everything
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_LowNV_R9Low_0_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_LowNV',
title = 'Endcaps NVtx < 18 , Data Fit',
systematics = systematics,
)
fitter.run()
fitters.append(fitter)
eefitter = fitter
#______________________________________________________________________________
## Fix S to TB
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_LowNV_R9Low_0_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_LowNV_SfromTB',
title = 'Endcaps NVtx < 18 , Data Fit, S from TB',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(eefitter.N.getVal())
fitter.C.setVal(eefitter.C.getVal())
fitter.S.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_LowNV_R9Low_0_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_LowNV_NfromMC',
title = 'Endcaps NVtx < 18 , Data Fit, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(_noise_from_mc_endcaps_lownv)
fitter.C.setVal(eefitter.C.getVal())
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix S to TB and N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_LowNV_R9Low_0_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_LowNV_SfromTB_NfromMC',
title = 'Endcaps NVtx < 18 , Data Fit, S from TB, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(_noise_from_mc_endcaps_lownv)
fitter.C.setVal(eefitter.C.getVal())
fitter.S.setConstant()
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
## End do_endcap_lownv_fits()
#================================================================================
def do_endcap_highnv_fits():
'''
End Cap High NV
'''
systematics = 0.2
#______________________________________________________________________________
## Float everything
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_HighNV_R9Low_0_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_HighNV',
title = 'Endcaps NVtx > 18 , Data Fit',
systematics = systematics,
)
fitter.run()
fitters.append(fitter)
eefitter = fitter
#______________________________________________________________________________
## Fix S to TB
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_HighNV_R9Low_0_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_HighNV_SfromTB',
title = 'Endcaps NVtx > 18 , Data Fit, S from TB',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(eefitter.N.getVal())
fitter.C.setVal(eefitter.C.getVal())
fitter.S.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_HighNV_R9Low_0_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_HighNV_NfromMC',
title = 'Endcaps NVtx > 18 , Data Fit, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(_noise_from_mc_endcaps_highnv)
fitter.C.setVal(eefitter.C.getVal())
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
#______________________________________________________________________________
## Fix S to TB and N to MC
fitter = Fitter(
filename = _filename,
graphname = 'regressions_resdata_EE_sixie_HighNV_R9Low_0_R9High_999_sixie',
name = 'PhotonResolutionVsEt_DataFit_Endcaps_HighNV_SfromTB_NfromMC',
title = 'Endcaps NVtx > 18 , Data Fit, S from TB, N from MC',
systematics = systematics,
)
fitter.S.setVal(_stochastic_from_tb/_mean_sqrt_cosh_eta_endcaps)
fitter.N.setVal(_noise_from_mc_endcaps_highnv)
fitter.C.setVal(eefitter.C.getVal())
fitter.S.setConstant()
fitter.N.setConstant()
fitter.run()
fitters.append(fitter)
## End do_endcap_highnv_fits()
#================================================================================
if __name__ == '__main__':
main()
import user
|
input = sorted(list(map(int, open('data/10.txt').read().split('\n'))))
input.append(max(input) + 3)
last_num = 0
tmp = 0
possibilities = 0
dp = [0] * len(input)
diff = []
for i, num in enumerate(input):
diff.append(num - last_num)
while tmp < i and input[tmp] < num - 3:
possibilities -= dp[tmp]
tmp += 1
dp[i] = possibilities + (num <= 3)
possibilities += dp[i]
last_num = num
print(dp[-1])
|
# merge_elevation_slope_summary.py
# by Ryan Spies (7/22/2014)
# ryan.spies@amec.com
# AMEC
# Description: merges elevation and slope data from individual basin .csv files
# output from ArcGIS Model Builder or automated python script: P:\NWS\GIS\Models\python\extract_basin_DEM_statistics.py
#import script modules
import os
import csv
os.chdir("../..")
maindir = os.getcwd()
####################################################################
#USER INPUT SECTION
####################################################################
# OPTIONAL -> run all basins within an RFC directory
RFC = 'WGRFC_2021'
fx_group = '' # leave blank if not processing by fx group
#FOLDER PATH OF ELEVATION AND SLOPE .CSV FILES FROM MODEL BUILDER OR PYTHON SCRIPT
if fx_group != '':
file_dir = maindir + '\\GIS\\' + RFC[:5] + os.sep + RFC + '\\Elevation_Slope\\Stats_Out\\' + fx_group + '\\'
else:
file_dir = "F:\\projects\\2021_twdb_wgrfc_calb\\data\DEM_nhdplus\\Elevation_Slope\\Stats_Out\\"
output_folderPath = "F:\\projects\\2021_twdb_wgrfc_calb\\data\DEM_nhdplus\\Elevation_Slope\\"
non_hydro1k_tasks = ['WGRFC_2021'] # identify tasks that DON'T use the hydro1k DEM (e.g. NHD plus 30m DEM)
####################################################################
#END USER INPUT SECTION
####################################################################
if fx_group != '':
new_file = open(output_folderPath + RFC[:5] + '_' + fx_group + '_' + RFC[-6:] + '_elev_slope_summary.csv', 'wb')
else:
new_file = open(output_folderPath + RFC + '_elev_slope_summary.csv', 'wb')
csvfile = csv.writer(new_file)
if RFC in non_hydro1k_tasks:
new_file.write('Units converted from ?cm? to feet\n')
else:
new_file.write('Units converted from meters to ft\n')
csvfile.writerow( ('Basin','Min Elev (ft)','Max Elev (ft)','Mean Elev (ft)','Mean Slope (%)') )
# creat a list of all the basins in the directory
basin_files = os.listdir(file_dir)
basins = []
for each in basin_files:
if each.split('_')[0] not in basins:
if each[:4] != 'info': # ignore the info directory
basins.append(each.split('_')[0])
print 'Script is Running...'
for basin in basins:
print basin
elev_file = open(file_dir + basin + '_elevation_stats_cm.csv', 'rb')
slope_file = open(file_dir + basin + '_mean_slope_percent.csv', 'rb')
elev_read = csv.reader(elev_file)
slope_read = csv.reader(slope_file)
row_num = 0
for row in elev_read:
if RFC in non_hydro1k_tasks: # most RFC's using NHD Plus v. 1 (30m resolution with units in cm)
if row_num == 1:
min_elev = float(row[3]) / 30.48 # convert cm to ft (NHD Plus DEM)
mean_elev = float(row[4]) / 30.48 # convert cm to ft (NHD Plus DEM)
max_elev = float(row[5]) / 30.48 # convert cm to ft (NHD Plus DEM)
row_num += 1
else: # RFC using the HYDRO1K (1km resolution with units in meters)
if row_num == 1:
min_elev = float(row[3]) * 3.28084 # convert m to ft (HYDRO1K DEM)
mean_elev = float(row[4]) * 3.28084 # convert m to ft (HYDRO1K DEM)
max_elev = float(row[5]) * 3.28084 # convert m to ft (HYDRO1K DEM)
row_num += 1
row_num = 0
for row in slope_read:
if row_num == 1:
if RFC in non_hydro1k_tasks:
mean_slope = float(row[2].rstrip())
else: # RFC uses the HYDRO1K (1km resolution with units in meters)
mean_slope = float(row[2].rstrip()) * 100
row_num += 1
csvfile.writerow((basin,min_elev,max_elev,mean_elev,mean_slope))
elev_file.close(); slope_file.close()
new_file.close()
print 'Script Complete'
|
# JTSK-350112
# a1_p6.py
# Taiyr Begeyev
# t.begeyev@jacobs-university.de
"""
Priority queue with list
"""
def is_empty(pq):
"""
check whether the pq has no elements
"""
return pq == []
def insert_with_priority(pq, x, p):
"""
add the element x to pq with a priority p
"""
pq.append((x, p))
def pull_highest_priority_element(pq):
"""
remove the element from pq that has the
highest priority, and return it
"""
if is_empty(pq):
print("Priority Queue Underflow")
else:
min_el = pq[0]
for i in pq:
if i[1] < min_el[1]:
min_el = i
pq.remove(min_el)
return min_el[0]
# main program
myList = []
insert_with_priority(myList, 5, 2)
insert_with_priority(myList, 3, 3)
insert_with_priority(myList, 13, 1)
insert_with_priority(myList, 6, 1)
print("Element = {}".format(pull_highest_priority_element(myList)))
print("Is empty = {}".format(is_empty(myList))) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 17 11:30:28 2018
@author: Arthur
"""
#-------------------------------------------------------------------#
# Code for Multidisciplinary Nuclear Scenarios Simulations (CMNSS) #
# Version 0.1 - 12/17/18 #
# Tom Verrier, Romain Pic, Aymeric Delon, Arthur Viette #
# ENS Paris-Saclay #
#-------------------------------------------------------------------#
# HYP 1: le type de reacteurs deploye n'est influence par aucun parametre
# de sociologie
from matplotlib import pyplot as plt
#----------------------------------------------------------------------------#
duration=60
step=0.25
n_step=int(duration/step)
T=[]
for i in range(n_step+1):
T.append(i*step)
power_UOx=1. #GWe
#----------------------------------------------------------------------------#
# Sociology
# definir une evolution temporelle pour les grandeurs suivantes :
# social movement mobilization (SOC)
# political allies (POL)
# state-industry relationship (SIR)
# arena shift (ANS)
# focusion event (FOC)
# a la fin de cette partie, 8 tableaux : t + SOC,POL,SIR,ANS,FOC
# + policyChange qui donne a partir des 5 gdeurs la "quantite de changement"
# + pNuc la puissance nuc demandee
# hyp : pNuc(t+step)=pNuc(t)*(1-policyChange(t-decisionTime))
soc=[]
pol=[]
sir=[]
ans=[]
foc=[]
P_0=60 # puissance initiale en GWe
policyChange=[]
pNuc=[P_0]
pNuc_react=[60]
decisionTime=0
# Strategie 1 : pas de changement
#def funcSoc(t):
# return(0)
#
#def funcPol(t):
# return(0)
#
#def funcSir(t):
# return(0)
#
#def funcAns(t):
# return(0)
#
#def funcFoc(t):
# return(0)
# Strategie 2 : social movement et political allies apres 20 ans, accident
# entre 10 et 10,5 ans
# /!\ modifie pour l'exemple ...
def funcSoc(t):
if t<20:
return(0)
else :
return(1)
def funcPol(t):
if t<20:
return(0)
else :
return(1)
def funcSir(t):
return(0)
def funcAns(t):
return(0)
def funcFoc(t):
if t<10 :
return(0)
elif t>=10 and t<=10.5 : # simule un accident
return(0.2)
elif t>=15 and t<=15.5 : # simule un accident
return(0.2)
else :
return(0)
inputCLASS_nUOx=open('inputCLASS_nUOx.txt','w')
for i in range(len(T)-1):
soc.append(funcSoc(i*step))
pol.append(funcPol(i*step))
sir.append(funcSir(i*step))
ans.append(funcAns(i*step))
foc.append(funcFoc(i*step))
policyChange.append((1*soc[-1]+1*pol[-1]+1*sir[-1]+1*ans[-1]+1*foc[-1])/5)
pNuc.append(pNuc[-1]*(1-policyChange[-1-decisionTime]))
pNuc_react.append(int(pNuc[-1]//power_UOx)*power_UOx)
inputCLASS_nUOx.write(str(int(pNuc[-1]//power_UOx))+'\n' )
inputCLASS_nUOx.close()
plt.plot(T,pNuc)
plt.plot(T,pNuc_react)
plt.xlabel('time(year)')
plt.ylabel('pNuc (GWe)')
plt.title('Nuclear power demand versus time')
plt.show()
# /!\ reprendre la discretisation de la puissance demandee
#----------------------------------------------------------------------------#
# Physics
# cette partie redige un script C++ pour CLASS qui modifie le scenario en
# faisant evoluer la puissance demandee au cours du temps (en ouvrant/fermant)
# des REP en restant toujours >=
# + differentes strategies :
# S1 : que des REP UOx
# S2 : REP UOx et REP MOx
# S3 : S1 puis RNR
# S4 : S2 puis RNR
#
# chacune declinee en Sn(E,typeGestion,BUmax)
# avec E = enrichissement max
# et typeGestion=LIFO/FIFO
# et BUmax le Burn-Up final des réacteurs (libre si non renseigne)
#
# puis appel CLASS, stocke les resultats avec qte tot dechets
# puis pour chaque strategie, execute SMURE pour calculer le alpha_rho max
# de chacune
data_CLASS=open('data_CLASS.txt','w')
# faire ça avec une boucle
data_CLASS.write('S E typeGestion BUmax \n')
data_CLASS.write('1 15 1 58')
data_CLASS.close()
#----------------------------------------------------------------------------#
# Economics
# pour chaque strategie, appel de FLORE pour evaluer le regret de chacune
# ainsi que la trajectoire optimale sachant le "contexte social"
# et stocke les resultats
#
# donne aussi cost(Strategy) le cout total estime pour suivre Strategy
# qui comprend Uranium, assurances, constructions centrales, etc.
#----------------------------------------------------------------------------#
# Data Processing
# script pour tracer tous les tableaux en fonction du temps, et d'autres
# comme alpha_rho_max(policyChangeTot) ou cost(policyChangeTot)
# affiche aussi la trajectoire optimale, avec les incertitudes
|
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import re
import signal
import time
from pathlib import Path
from typing import List, Tuple
import pytest
from pants.base.build_environment import get_buildroot
from pants.base.exception_sink import ExceptionSink
from pants.testutil.pants_integration_test import run_pants_with_workdir
from pants.util.dirutil import read_file
from pants_test.pantsd.pantsd_integration_test_base import PantsDaemonIntegrationTestBase
pytestmark = pytest.mark.platform_specific_behavior
def lifecycle_stub_cmdline() -> List[str]:
# Load the testprojects pants-plugins to get some testing tasks and subsystems.
testproject_backend_src_dir = os.path.join(
get_buildroot(), "testprojects/pants-plugins/src/python"
)
testproject_backend_pkg_name = "test_pants_plugin"
lifecycle_stub_cmdline = [
"--no-pantsd",
f"--pythonpath=+['{testproject_backend_src_dir}']",
f"--backend-packages=+['{testproject_backend_pkg_name}']",
# This task will always raise an exception.
"lifecycle-stub-goal",
]
return lifecycle_stub_cmdline
def get_log_file_paths(workdir: str, pid: int) -> Tuple[str, str]:
pid_specific_log_file = ExceptionSink.exceptions_log_path(for_pid=pid, in_dir=workdir)
assert os.path.isfile(pid_specific_log_file)
shared_log_file = ExceptionSink.exceptions_log_path(in_dir=workdir)
assert os.path.isfile(shared_log_file)
assert pid_specific_log_file != shared_log_file
return (pid_specific_log_file, shared_log_file)
def assert_unhandled_exception_log_matches(pid: int, file_contents: str) -> None:
regex_str = f"""\
timestamp: ([^\n]+)
process title: ([^\n]+)
sys\\.argv: ([^\n]+)
pid: {pid}
Exception caught: \\([^)]*\\)
(.|\n)*
Exception message:.*
"""
assert re.match(regex_str, file_contents)
def assert_graceful_signal_log_matches(pid: int, signum, signame, contents: str) -> None:
regex_str = """\
timestamp: ([^\n]+)
process title: ([^\n]+)
sys\\.argv: ([^\n]+)
pid: {pid}
Signal {signum} \\({signame}\\) was raised\\. Exiting with failure\\.
""".format(
pid=pid, signum=signum, signame=signame
)
assert re.search(regex_str, contents)
def test_logs_unhandled_exception(tmp_path: Path) -> None:
pants_run = run_pants_with_workdir(
# The backtrace should be omitted when --print-stacktrace=False.
[*lifecycle_stub_cmdline(), "--no-print-stacktrace"],
workdir=tmp_path.as_posix(),
extra_env={"_RAISE_EXCEPTION_ON_IMPORT": "True"},
)
pants_run.assert_failure()
regex = "exception during import!"
assert re.search(regex, pants_run.stderr)
pid_specific_log_file, shared_log_file = get_log_file_paths(tmp_path.as_posix(), pants_run.pid)
assert_unhandled_exception_log_matches(pants_run.pid, read_file(pid_specific_log_file))
assert_unhandled_exception_log_matches(pants_run.pid, read_file(shared_log_file))
class ExceptionSinkIntegrationTest(PantsDaemonIntegrationTestBase):
hermetic = False
def test_dumps_logs_on_signal(self):
"""Send signals which are handled, but don't get converted into a KeyboardInterrupt."""
signal_names = {
signal.SIGQUIT: "SIGQUIT",
signal.SIGTERM: "SIGTERM",
}
for signum, signame in signal_names.items():
with self.pantsd_successful_run_context() as ctx:
ctx.runner(["help"])
pid = ctx.checker.assert_started()
os.kill(pid, signum)
time.sleep(5)
# Check that the logs show a graceful exit by signal.
pid_specific_log_file, shared_log_file = get_log_file_paths(ctx.workdir, pid)
assert_graceful_signal_log_matches(
pid, signum, signame, read_file(pid_specific_log_file)
)
assert_graceful_signal_log_matches(pid, signum, signame, read_file(shared_log_file))
def test_dumps_traceback_on_sigabrt(self):
# SIGABRT sends a traceback to the log file for the current process thanks to
# faulthandler.enable().
with self.pantsd_successful_run_context() as ctx:
ctx.runner(["help"])
pid = ctx.checker.assert_started()
os.kill(pid, signal.SIGABRT)
time.sleep(5)
# Check that the logs show an abort signal and the beginning of a traceback.
pid_specific_log_file, shared_log_file = get_log_file_paths(ctx.workdir, pid)
regex_str = """\
Fatal Python error: Aborted
Thread [^\n]+ \\(most recent call first\\):
"""
assert re.search(regex_str, read_file(pid_specific_log_file))
# faulthandler.enable() only allows use of a single logging file at once for fatal tracebacks.
assert "" == read_file(shared_log_file)
|
import heapq
a=[]
heapq.heappush(a,1)
heapq.heappush(a,3)
heapq.heappush(a,2)
print(a)
k=heapq.heappop(a)
print(k)
print("a ",a)
k=heapq.heappop(a)
print(k)
print("a ",a)
k=heapq.heappop(a)
print(k) |
def seqlist(first,c,l):
output = [first]
for x in range(l-1):
output.append(first+c)
first += c
return output
'''
In this kata, you will write an arithmetic list which is basically a list that
contains consecutive terms in the sequence.
You will be given three parameters :
first the first term in the sequence
c the constant that you are going to ADD ( since it is an arithmetic sequence...)
l the number of terms that should be returned
'''
|
from typing import Dict, List, Optional, Tuple, Union
import torch
import torchvision
from torch import nn, Tensor
from torchvision import ops
from torchvision.transforms import functional as F, InterpolationMode, transforms as T
def _flip_coco_person_keypoints(kps, width):
flip_inds = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
flipped_data = kps[:, flip_inds]
flipped_data[..., 0] = width - flipped_data[..., 0]
# Maintain COCO convention that if visibility == 0, then x, y = 0
inds = flipped_data[..., 2] == 0
flipped_data[inds] = 0
return flipped_data
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
class RandomHorizontalFlip(T.RandomHorizontalFlip):
def forward(
self, image: Tensor, target: Optional[Dict[str, Tensor]] = None
) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
if torch.rand(1) < self.p:
image = F.hflip(image)
if target is not None:
_, _, width = F.get_dimensions(image)
target["boxes"][:, [0, 2]] = width - target["boxes"][:, [2, 0]]
if "masks" in target:
target["masks"] = target["masks"].flip(-1)
if "keypoints" in target:
keypoints = target["keypoints"]
keypoints = _flip_coco_person_keypoints(keypoints, width)
target["keypoints"] = keypoints
return image, target
class PILToTensor(nn.Module):
def forward(
self, image: Tensor, target: Optional[Dict[str, Tensor]] = None
) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
image = F.pil_to_tensor(image)
return image, target
class ToDtype(nn.Module):
def __init__(self, dtype: torch.dtype, scale: bool = False) -> None:
super().__init__()
self.dtype = dtype
self.scale = scale
def forward(
self, image: Tensor, target: Optional[Dict[str, Tensor]] = None
) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
if not self.scale:
return image.to(dtype=self.dtype), target
image = F.convert_image_dtype(image, self.dtype)
return image, target
class RandomIoUCrop(nn.Module):
def __init__(
self,
min_scale: float = 0.3,
max_scale: float = 1.0,
min_aspect_ratio: float = 0.5,
max_aspect_ratio: float = 2.0,
sampler_options: Optional[List[float]] = None,
trials: int = 40,
):
super().__init__()
# Configuration similar to https://github.com/weiliu89/caffe/blob/ssd/examples/ssd/ssd_coco.py#L89-L174
self.min_scale = min_scale
self.max_scale = max_scale
self.min_aspect_ratio = min_aspect_ratio
self.max_aspect_ratio = max_aspect_ratio
if sampler_options is None:
sampler_options = [0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0]
self.options = sampler_options
self.trials = trials
def forward(
self, image: Tensor, target: Optional[Dict[str, Tensor]] = None
) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
if target is None:
raise ValueError("The targets can't be None for this transform.")
if isinstance(image, torch.Tensor):
if image.ndimension() not in {2, 3}:
raise ValueError(f"image should be 2/3 dimensional. Got {image.ndimension()} dimensions.")
elif image.ndimension() == 2:
image = image.unsqueeze(0)
_, orig_h, orig_w = F.get_dimensions(image)
while True:
# sample an option
idx = int(torch.randint(low=0, high=len(self.options), size=(1,)))
min_jaccard_overlap = self.options[idx]
if min_jaccard_overlap >= 1.0: # a value larger than 1 encodes the leave as-is option
return image, target
for _ in range(self.trials):
# check the aspect ratio limitations
r = self.min_scale + (self.max_scale - self.min_scale) * torch.rand(2)
new_w = int(orig_w * r[0])
new_h = int(orig_h * r[1])
aspect_ratio = new_w / new_h
if not (self.min_aspect_ratio <= aspect_ratio <= self.max_aspect_ratio):
continue
# check for 0 area crops
r = torch.rand(2)
left = int((orig_w - new_w) * r[0])
top = int((orig_h - new_h) * r[1])
right = left + new_w
bottom = top + new_h
if left == right or top == bottom:
continue
# check for any valid boxes with centers within the crop area
cx = 0.5 * (target["boxes"][:, 0] + target["boxes"][:, 2])
cy = 0.5 * (target["boxes"][:, 1] + target["boxes"][:, 3])
is_within_crop_area = (left < cx) & (cx < right) & (top < cy) & (cy < bottom)
if not is_within_crop_area.any():
continue
# check at least 1 box with jaccard limitations
boxes = target["boxes"][is_within_crop_area]
ious = torchvision.ops.boxes.box_iou(
boxes, torch.tensor([[left, top, right, bottom]], dtype=boxes.dtype, device=boxes.device)
)
if ious.max() < min_jaccard_overlap:
continue
# keep only valid boxes and perform cropping
target["boxes"] = boxes
target["labels"] = target["labels"][is_within_crop_area]
target["boxes"][:, 0::2] -= left
target["boxes"][:, 1::2] -= top
target["boxes"][:, 0::2].clamp_(min=0, max=new_w)
target["boxes"][:, 1::2].clamp_(min=0, max=new_h)
image = F.crop(image, top, left, new_h, new_w)
return image, target
class RandomZoomOut(nn.Module):
def __init__(
self, fill: Optional[List[float]] = None, side_range: Tuple[float, float] = (1.0, 4.0), p: float = 0.5
):
super().__init__()
if fill is None:
fill = [0.0, 0.0, 0.0]
self.fill = fill
self.side_range = side_range
if side_range[0] < 1.0 or side_range[0] > side_range[1]:
raise ValueError(f"Invalid canvas side range provided {side_range}.")
self.p = p
@torch.jit.unused
def _get_fill_value(self, is_pil):
# type: (bool) -> int
# We fake the type to make it work on JIT
return tuple(int(x) for x in self.fill) if is_pil else 0
def forward(
self, image: Tensor, target: Optional[Dict[str, Tensor]] = None
) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
if isinstance(image, torch.Tensor):
if image.ndimension() not in {2, 3}:
raise ValueError(f"image should be 2/3 dimensional. Got {image.ndimension()} dimensions.")
elif image.ndimension() == 2:
image = image.unsqueeze(0)
if torch.rand(1) >= self.p:
return image, target
_, orig_h, orig_w = F.get_dimensions(image)
r = self.side_range[0] + torch.rand(1) * (self.side_range[1] - self.side_range[0])
canvas_width = int(orig_w * r)
canvas_height = int(orig_h * r)
r = torch.rand(2)
left = int((canvas_width - orig_w) * r[0])
top = int((canvas_height - orig_h) * r[1])
right = canvas_width - (left + orig_w)
bottom = canvas_height - (top + orig_h)
if torch.jit.is_scripting():
fill = 0
else:
fill = self._get_fill_value(F._is_pil_image(image))
image = F.pad(image, [left, top, right, bottom], fill=fill)
if isinstance(image, torch.Tensor):
# PyTorch's pad supports only integers on fill. So we need to overwrite the colour
v = torch.tensor(self.fill, device=image.device, dtype=image.dtype).view(-1, 1, 1)
image[..., :top, :] = image[..., :, :left] = image[..., (top + orig_h) :, :] = image[
..., :, (left + orig_w) :
] = v
if target is not None:
target["boxes"][:, 0::2] += left
target["boxes"][:, 1::2] += top
return image, target
class RandomPhotometricDistort(nn.Module):
def __init__(
self,
contrast: Tuple[float, float] = (0.5, 1.5),
saturation: Tuple[float, float] = (0.5, 1.5),
hue: Tuple[float, float] = (-0.05, 0.05),
brightness: Tuple[float, float] = (0.875, 1.125),
p: float = 0.5,
):
super().__init__()
self._brightness = T.ColorJitter(brightness=brightness)
self._contrast = T.ColorJitter(contrast=contrast)
self._hue = T.ColorJitter(hue=hue)
self._saturation = T.ColorJitter(saturation=saturation)
self.p = p
def forward(
self, image: Tensor, target: Optional[Dict[str, Tensor]] = None
) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
if isinstance(image, torch.Tensor):
if image.ndimension() not in {2, 3}:
raise ValueError(f"image should be 2/3 dimensional. Got {image.ndimension()} dimensions.")
elif image.ndimension() == 2:
image = image.unsqueeze(0)
r = torch.rand(7)
if r[0] < self.p:
image = self._brightness(image)
contrast_before = r[1] < 0.5
if contrast_before:
if r[2] < self.p:
image = self._contrast(image)
if r[3] < self.p:
image = self._saturation(image)
if r[4] < self.p:
image = self._hue(image)
if not contrast_before:
if r[5] < self.p:
image = self._contrast(image)
if r[6] < self.p:
channels, _, _ = F.get_dimensions(image)
permutation = torch.randperm(channels)
is_pil = F._is_pil_image(image)
if is_pil:
image = F.pil_to_tensor(image)
image = F.convert_image_dtype(image)
image = image[..., permutation, :, :]
if is_pil:
image = F.to_pil_image(image)
return image, target
class ScaleJitter(nn.Module):
"""Randomly resizes the image and its bounding boxes within the specified scale range.
The class implements the Scale Jitter augmentation as described in the paper
`"Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation" <https://arxiv.org/abs/2012.07177>`_.
Args:
target_size (tuple of ints): The target size for the transform provided in (height, weight) format.
scale_range (tuple of ints): scaling factor interval, e.g (a, b), then scale is randomly sampled from the
range a <= scale <= b.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
"""
def __init__(
self,
target_size: Tuple[int, int],
scale_range: Tuple[float, float] = (0.1, 2.0),
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
antialias=True,
):
super().__init__()
self.target_size = target_size
self.scale_range = scale_range
self.interpolation = interpolation
self.antialias = antialias
def forward(
self, image: Tensor, target: Optional[Dict[str, Tensor]] = None
) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
if isinstance(image, torch.Tensor):
if image.ndimension() not in {2, 3}:
raise ValueError(f"image should be 2/3 dimensional. Got {image.ndimension()} dimensions.")
elif image.ndimension() == 2:
image = image.unsqueeze(0)
_, orig_height, orig_width = F.get_dimensions(image)
scale = self.scale_range[0] + torch.rand(1) * (self.scale_range[1] - self.scale_range[0])
r = min(self.target_size[1] / orig_height, self.target_size[0] / orig_width) * scale
new_width = int(orig_width * r)
new_height = int(orig_height * r)
image = F.resize(image, [new_height, new_width], interpolation=self.interpolation, antialias=self.antialias)
if target is not None:
target["boxes"][:, 0::2] *= new_width / orig_width
target["boxes"][:, 1::2] *= new_height / orig_height
if "masks" in target:
target["masks"] = F.resize(
target["masks"],
[new_height, new_width],
interpolation=InterpolationMode.NEAREST,
antialias=self.antialias,
)
return image, target
class FixedSizeCrop(nn.Module):
def __init__(self, size, fill=0, padding_mode="constant"):
super().__init__()
size = tuple(T._setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill # TODO: Fill is currently respected only on PIL. Apply tensor patch.
self.padding_mode = padding_mode
def _pad(self, img, target, padding):
# Taken from the functional_tensor.py pad
if isinstance(padding, int):
pad_left = pad_right = pad_top = pad_bottom = padding
elif len(padding) == 1:
pad_left = pad_right = pad_top = pad_bottom = padding[0]
elif len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
else:
pad_left = padding[0]
pad_top = padding[1]
pad_right = padding[2]
pad_bottom = padding[3]
padding = [pad_left, pad_top, pad_right, pad_bottom]
img = F.pad(img, padding, self.fill, self.padding_mode)
if target is not None:
target["boxes"][:, 0::2] += pad_left
target["boxes"][:, 1::2] += pad_top
if "masks" in target:
target["masks"] = F.pad(target["masks"], padding, 0, "constant")
return img, target
def _crop(self, img, target, top, left, height, width):
img = F.crop(img, top, left, height, width)
if target is not None:
boxes = target["boxes"]
boxes[:, 0::2] -= left
boxes[:, 1::2] -= top
boxes[:, 0::2].clamp_(min=0, max=width)
boxes[:, 1::2].clamp_(min=0, max=height)
is_valid = (boxes[:, 0] < boxes[:, 2]) & (boxes[:, 1] < boxes[:, 3])
target["boxes"] = boxes[is_valid]
target["labels"] = target["labels"][is_valid]
if "masks" in target:
target["masks"] = F.crop(target["masks"][is_valid], top, left, height, width)
return img, target
def forward(self, img, target=None):
_, height, width = F.get_dimensions(img)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
if new_height != height or new_width != width:
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
img, target = self._crop(img, target, top, left, new_height, new_width)
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
if pad_bottom != 0 or pad_right != 0:
img, target = self._pad(img, target, [0, 0, pad_right, pad_bottom])
return img, target
class RandomShortestSize(nn.Module):
def __init__(
self,
min_size: Union[List[int], Tuple[int], int],
max_size: int,
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
):
super().__init__()
self.min_size = [min_size] if isinstance(min_size, int) else list(min_size)
self.max_size = max_size
self.interpolation = interpolation
def forward(
self, image: Tensor, target: Optional[Dict[str, Tensor]] = None
) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
_, orig_height, orig_width = F.get_dimensions(image)
min_size = self.min_size[torch.randint(len(self.min_size), (1,)).item()]
r = min(min_size / min(orig_height, orig_width), self.max_size / max(orig_height, orig_width))
new_width = int(orig_width * r)
new_height = int(orig_height * r)
image = F.resize(image, [new_height, new_width], interpolation=self.interpolation)
if target is not None:
target["boxes"][:, 0::2] *= new_width / orig_width
target["boxes"][:, 1::2] *= new_height / orig_height
if "masks" in target:
target["masks"] = F.resize(
target["masks"], [new_height, new_width], interpolation=InterpolationMode.NEAREST
)
return image, target
def _copy_paste(
image: torch.Tensor,
target: Dict[str, Tensor],
paste_image: torch.Tensor,
paste_target: Dict[str, Tensor],
blending: bool = True,
resize_interpolation: F.InterpolationMode = F.InterpolationMode.BILINEAR,
) -> Tuple[torch.Tensor, Dict[str, Tensor]]:
# Random paste targets selection:
num_masks = len(paste_target["masks"])
if num_masks < 1:
# Such degerante case with num_masks=0 can happen with LSJ
# Let's just return (image, target)
return image, target
# We have to please torch script by explicitly specifying dtype as torch.long
random_selection = torch.randint(0, num_masks, (num_masks,), device=paste_image.device)
random_selection = torch.unique(random_selection).to(torch.long)
paste_masks = paste_target["masks"][random_selection]
paste_boxes = paste_target["boxes"][random_selection]
paste_labels = paste_target["labels"][random_selection]
masks = target["masks"]
# We resize source and paste data if they have different sizes
# This is something we introduced here as originally the algorithm works
# on equal-sized data (for example, coming from LSJ data augmentations)
size1 = image.shape[-2:]
size2 = paste_image.shape[-2:]
if size1 != size2:
paste_image = F.resize(paste_image, size1, interpolation=resize_interpolation)
paste_masks = F.resize(paste_masks, size1, interpolation=F.InterpolationMode.NEAREST)
# resize bboxes:
ratios = torch.tensor((size1[1] / size2[1], size1[0] / size2[0]), device=paste_boxes.device)
paste_boxes = paste_boxes.view(-1, 2, 2).mul(ratios).view(paste_boxes.shape)
paste_alpha_mask = paste_masks.sum(dim=0) > 0
if blending:
paste_alpha_mask = F.gaussian_blur(
paste_alpha_mask.unsqueeze(0),
kernel_size=(5, 5),
sigma=[
2.0,
],
)
# Copy-paste images:
image = (image * (~paste_alpha_mask)) + (paste_image * paste_alpha_mask)
# Copy-paste masks:
masks = masks * (~paste_alpha_mask)
non_all_zero_masks = masks.sum((-1, -2)) > 0
masks = masks[non_all_zero_masks]
# Do a shallow copy of the target dict
out_target = {k: v for k, v in target.items()}
out_target["masks"] = torch.cat([masks, paste_masks])
# Copy-paste boxes and labels
boxes = ops.masks_to_boxes(masks)
out_target["boxes"] = torch.cat([boxes, paste_boxes])
labels = target["labels"][non_all_zero_masks]
out_target["labels"] = torch.cat([labels, paste_labels])
# Update additional optional keys: area and iscrowd if exist
if "area" in target:
out_target["area"] = out_target["masks"].sum((-1, -2)).to(torch.float32)
if "iscrowd" in target and "iscrowd" in paste_target:
# target['iscrowd'] size can be differ from mask size (non_all_zero_masks)
# For example, if previous transforms geometrically modifies masks/boxes/labels but
# does not update "iscrowd"
if len(target["iscrowd"]) == len(non_all_zero_masks):
iscrowd = target["iscrowd"][non_all_zero_masks]
paste_iscrowd = paste_target["iscrowd"][random_selection]
out_target["iscrowd"] = torch.cat([iscrowd, paste_iscrowd])
# Check for degenerated boxes and remove them
boxes = out_target["boxes"]
degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]
if degenerate_boxes.any():
valid_targets = ~degenerate_boxes.any(dim=1)
out_target["boxes"] = boxes[valid_targets]
out_target["masks"] = out_target["masks"][valid_targets]
out_target["labels"] = out_target["labels"][valid_targets]
if "area" in out_target:
out_target["area"] = out_target["area"][valid_targets]
if "iscrowd" in out_target and len(out_target["iscrowd"]) == len(valid_targets):
out_target["iscrowd"] = out_target["iscrowd"][valid_targets]
return image, out_target
class SimpleCopyPaste(torch.nn.Module):
def __init__(self, blending=True, resize_interpolation=F.InterpolationMode.BILINEAR):
super().__init__()
self.resize_interpolation = resize_interpolation
self.blending = blending
def forward(
self, images: List[torch.Tensor], targets: List[Dict[str, Tensor]]
) -> Tuple[List[torch.Tensor], List[Dict[str, Tensor]]]:
torch._assert(
isinstance(images, (list, tuple)) and all([isinstance(v, torch.Tensor) for v in images]),
"images should be a list of tensors",
)
torch._assert(
isinstance(targets, (list, tuple)) and len(images) == len(targets),
"targets should be a list of the same size as images",
)
for target in targets:
# Can not check for instance type dict with inside torch.jit.script
# torch._assert(isinstance(target, dict), "targets item should be a dict")
for k in ["masks", "boxes", "labels"]:
torch._assert(k in target, f"Key {k} should be present in targets")
torch._assert(isinstance(target[k], torch.Tensor), f"Value for the key {k} should be a tensor")
# images = [t1, t2, ..., tN]
# Let's define paste_images as shifted list of input images
# paste_images = [t2, t3, ..., tN, t1]
# FYI: in TF they mix data on the dataset level
images_rolled = images[-1:] + images[:-1]
targets_rolled = targets[-1:] + targets[:-1]
output_images: List[torch.Tensor] = []
output_targets: List[Dict[str, Tensor]] = []
for image, target, paste_image, paste_target in zip(images, targets, images_rolled, targets_rolled):
output_image, output_data = _copy_paste(
image,
target,
paste_image,
paste_target,
blending=self.blending,
resize_interpolation=self.resize_interpolation,
)
output_images.append(output_image)
output_targets.append(output_data)
return output_images, output_targets
def __repr__(self) -> str:
s = f"{self.__class__.__name__}(blending={self.blending}, resize_interpolation={self.resize_interpolation})"
return s
|
from django.utils import timezone
from django.utils.text import gettext_lazy as _
class Placeholder(object):
name = "John"
first_name = "John"
last_name = "Doe"
middle_name = "Samantha"
fullname = "Jane Doe"
phone = "+44 0000 00000"
email = "mail@email.com"
year = timezone.now().year
website = "https://example.com"
def __init__(self, country="UK"):
pass
HTML_MIME_TYPES = [
{
"type": "HTML Document",
"extension": ".html",
"mime": "text/html",
},
{
"type": "HTML Document",
"extension": ".htm",
"mime": "text/html",
},
]
MEDIA_MIME_TYPES = [
{
"type": "Audio File",
"extension": ".mp3",
"mime": "audio/mpeg",
},
{
"type": "Audio File",
"extension": ".mp3",
"mime": "audio/mp3",
},
]
PDF_MIME_TYPES = [
{
"type": "Adobe PDF",
"extension": ".pdf",
"mime": "application/pdf",
},
]
IMAGE_MIME_TYPES = [
{
"type": "Image (PNG)",
"extension": ".png",
"mime": "image/png",
},
{
"type": "Image (Jpeg)",
"extension": ".jpg",
"mime": "image/jpg",
},
{
"type": "Image (Jpeg)",
"extension": ".jpeg",
"mime": "image/jpeg",
},
{
"type": "Image (GIF)",
"extension": ".gif",
"mime": "image/gif",
},
{
"type": "Image (TIFF)",
"extension": ".tiff",
"mime": "image/tiff",
},
]
MS_MIME_TYPES = [
{
"type": "Microsoft Word Document",
"extension": ".doc",
"mime": "application/msword",
},
{
"extension": ".dot",
"mime": "application/msword",
},
{
"type": "Microsoft Word Document",
"extension": ".docx",
"mime": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
},
{
"type": "Microsoft Word Document (Template)",
"extension": ".dotx",
"mime": "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
},
{
"extension": ".docm",
"mime": "application/vnd.ms-word.document.macroEnabled.12",
},
{
"extension": ".dotm",
"mime": "application/vnd.ms-word.template.macroEnabled.12",
},
{
"type": "Microsoft Excel",
"extension": ".xls",
"mime": "application/vnd.ms-excel",
},
{
"type": "Microsoft Excel",
"extension": ".xlt",
"mime": "application/vnd.ms-excel",
},
{
"type": "Microsoft Excel",
"extension": ".xla",
"mime": "application/vnd.ms-excel",
},
{
"type": "Microsoft Excel",
"extension": ".xlsx",
"mime": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
},
{
"type": "Microsoft Excel (Template)",
"extension": ".xltx",
"mime": "application/vnd.openxmlformats-officedocument.spreadsheetml.template",
},
{
"extension": ".xlsm",
"mime": "application/vnd.ms-excel.sheet.macroEnabled.12",
},
{
"extension": ".xltm",
"mime": "application/vnd.ms-excel.template.macroEnabled.12",
},
{
"extension": ".xlam",
"mime": "application/vnd.ms-excel.addin.macroEnabled.12",
},
{
"extension": ".xlsb",
"mime": "application/vnd.ms-excel.sheet.binary.macroEnabled.12",
},
{
"type": "Microsoft Powerpoint",
"extension": ".ppt",
"mime": "application/vnd.ms-powerpoint",
},
{
"type": "Microsoft Powerpoint",
"extension": ".pot",
"mime": "application/vnd.ms-powerpoint",
},
{
"type": "Microsoft Powerpoint",
"extension": ".pps",
"mime": "application/vnd.ms-powerpoint",
},
{
"type": "Microsoft Powerpoint",
"extension": ".ppa",
"mime": "application/vnd.ms-powerpoint",
},
{
"type": "Microsoft Powerpoint",
"extension": ".pptx",
"mime": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
},
{
"type": "Microsoft Powerpoint",
"extension": ".potx",
"mime": "application/vnd.openxmlformats-officedocument.presentationml.template",
},
{
"type": "Microsoft Powerpoint",
"extension": ".ppsx",
"mime": "application/vnd.openxmlformats-officedocument.presentationml.slideshow",
},
{
"extension": ".ppam",
"mime": "application/vnd.ms-powerpoint.addin.macroEnabled.12",
},
{
"extension": ".pptm",
"mime": "application/vnd.ms-powerpoint.presentation.macroEnabled.12",
},
{
"extension": ".potm",
"mime": "application/vnd.ms-powerpoint.presentation.macroEnabled.12",
},
{
"extension": ".ppsm",
"mime": "application/vnd.ms-powerpoint.slideshow.macroEnabled.12",
},
]
MIME_TYPES = PDF_MIME_TYPES + IMAGE_MIME_TYPES + MS_MIME_TYPES + HTML_MIME_TYPES + MEDIA_MIME_TYPES
|
import string
i=input("Enter the range of upper bound : ")
print("\nsuper5 number which contains 5 5s together\n")
for n in range(int(i)):
x=5*n**5
if (str.find(str(x),'55555')!=-1):
print(n,x)
"""
deepak@deepak-Lenovo-ideapad-320-15IKB:~/mycglab$ python3 super5.py
Enter the range of upper bound : 10000
super5 number which contains 5 5s together
4602 10320555555665840160
5517 25555531873653736785
7539 121769555550158808495
deepak@deepak-Lenovo-ideapad-320-15IKB:~/mycglab$ python3 super5.py
Enter the range of upper bound : 1000
super5 number which contains 5 5s together
deepak@deepak-Lenovo-ideapad-320-15IKB:~/mycglab$
"""
|
#!/usr/bin/env python
# coding:utf-8
from __future__ import absolute_import, unicode_literals
from jspider.cli import cli
__author__ = "golden"
__date__ = '2018/6/9'
if __name__ == '__main__':
cli()
|
from unittest import TestCase
from blog import manage |
from appconfig.tasks import *
init()
@task_app_from_environment
def shutdown(app):
stop.execute_inner(app, maintenance_hours=None)
upload_db_to_cdstar(app)
@task_app_from_environment
def backup_to_cdstar(app):
upload_db_to_cdstar(app)
|
'''
Created on Nov 5, 2011
@author: jason
'''
import hmac
import bson
from bson import BSON
import datetime
import MongoEncoder.MongoEncoder
import unicodedata
import simplejson
import json
import urllib
import tornado
import tornado.auth
from functools import wraps
from Map.BrowseTripHandler import BaseHandler
from Calendar.CalendarHandler import ExportCalendarHandler
def ajax_login_authentication(f):
@wraps(f)
def wrapper(self,*args, **kwds):
if not self.current_user:
if self.request.method in ("POST","GET", "HEAD"):
json = simplejson.dumps({ 'not_authenticated': True })
print 'not_authenticated'
self.write('not_authenticated')
return
#raise urllib2.HTTPError(403)
return f(self, *args, **kwds)
return wrapper
class LoginHandler(BaseHandler):
def get(self):
self.render("signup.html")
def post(self):
email = self.get_argument("email")
password = self.get_argument("password")
user = self.syncdb.users.find_one({'email':email})
# use MD5 hash algorithm
if user:
#digest_marker = hmac.new(str(user["email"]))
digest_marker = hmac.new(str(user['email']))
digest_marker.update(password)
real_password = digest_marker.hexdigest()
print("check password")
# if real_password == str(user["password"]):
if real_password == str(user['password']):
owner_id = user['user_id']
self.set_secure_cookie("user", str(owner_id))
self.redirect("/")
else:
self.redirect("/login")
else:
self.redirect("/login")
class AuthLogoutHandler(BaseHandler):
def get(self):
self.clear_cookie("user")
self.redirect("/")
class CreateAccountHandler(BaseHandler):
def get(self):
self.render("signup.html")
def post(self):
print self.request.arguments
name = self.get_argument("username")
#name = "testjason"
email = self.get_argument("email")
check = self.syncdb.users.find_one( { 'email' : email })
#check = self.db.get("SELECT user_id from users where email = %s", email)
slug = unicodedata.normalize("NFKD", name).encode("ascii", "ignore")
if check:
raise tornado.web.HTTPError(500, "TripShare auth failed because of duplicated email address");
else:
slug = name
while True:
e = self.syncdb.users.find_one({'slug':slug})
if not e: break
slug += "-2"
password = self.get_argument("password")
# use MD5 hash algorithm
digest_marker = hmac.new(str(email), password)
#digest_marker.update(password)
real_password = digest_marker.hexdigest()
#if os.path.dirname(__file__).endswith('Users'):
# static_path = os.path.dirname(__file__)[:-5]
#picture = os.path.join(static_path, "static")+"/images/large-group.png"
picture = "/static/images/large-group.png"
user = { 'user_id' : bson.ObjectId(),
'username': name,
'lc_username': name.upper(),
'email': email,
'password': real_password,
'picture': picture,
'status': 'online',
'slug': slug,
'createdtime': datetime.datetime.utcnow(),
'facebook_friends':[],
'city': [],
'country': [],
'trips':[],
'like_trip':[],
'bio':'',
'link': '',
'trip_count':0,
'like_guide':[],
'save_guide':[],
'save_site':[],
'like_site':[],
'save_trip':[],
'like_trip':[],
'friends':[],
'current_location':'',
'current_position':[],
'new_notifications':[],
'notifications':[],
'search_type':'person'
}
self.syncdb.users.insert(user)
#===============================================================
# Store basic information in cookie
#===============================================================
self.set_secure_cookie("user", str(user['user_id']))
self.set_secure_cookie("username", str(user['username']))
self.set_secure_cookie("email", str(user['email']))
self.set_secure_cookie("picture", str(user['picture']))
self.redirect("/")
class AuthLoginFBHandler(BaseHandler, tornado.auth.FacebookGraphMixin):
access_token = ''
@tornado.web.asynchronous
def get(self):
my_url = (self.request.protocol + "://" + self.request.host +
"/auth/fblogin?next="+
tornado.escape.url_escape(self.get_argument("next", "/")))
#print(my_url)
if self.get_argument("code", False):
self.get_authenticated_user(
redirect_uri=my_url,
client_id=self.settings["facebook_api_key"],
client_secret=self.settings["facebook_secret"],
code=self.get_argument("code"),
callback=self._on_auth
)
return
self.authorize_redirect(redirect_uri=my_url,
client_id=self.settings["facebook_api_key"],
extra_params={"scope": "user_about_me,email,user_website,publish_stream,read_friendlists,offline_access"})
def handle_request(self, response):
#print('++++++++++++++++++++++++++++++'+response.body)
user = simplejson.loads(response.body)
slug = user[0]['name']
checkExist = self.syncdb.users.find_one({'fb_user_id':str(user[0]['uid'])})
if checkExist:
checkExist['access_token'] = self.access_token
self.syncdb.user.save(checkExist)
self.set_secure_cookie("user", str(checkExist['user_id']))
self.redirect(self.get_argument("next", "/"))
return
while True:
e = self.syncdb.users.find_one({'slug':slug})
if not e: break
slug += "-2"
_user = { 'fb_user_id' : str(user[0]['uid']),
'username': user[0]['name'],
'lc_username': user[0]['name'].upper(),
'web_url': user[0]['website'],
'locale':user[0]['locale'],
'email': user[0]['email'],
'picture': user[0]['pic'],
'current_location': user[0]['current_location'],
'current_position':[],
'status': 'online',
'slug': slug,
'createdtime': datetime.datetime.utcnow(),
'access_token': self.access_token,
'facebook_friends': [],
'save_guide':[],
'like_guide':[],
'save_site':[],
'like_site':[],
'save_trip':[],
'like_trip':[],
'friends':[],
'city': [],
'country': [],
'trips':[],
'like_trip':[],
'bio':'',
'link': '',
'trip_count':0,
'current_location':'',
'current_position':[],
'new_notifications':[],
'notifications':[],
'search_type':'person'
}
_user_db = self.syncdb.users.find_one({'email': user[0]['email']})
slug = unicodedata.normalize("NFKD", unicode(user[0]['name'])).encode("ascii", "ignore")
while True:
e = self.syncdb.users.find_one({'slug':slug})
if not e: break
slug += "-2"
user_id = ''
if _user_db:
user_id = _user_db['user_id']
_user['_id'] = _user_db['_id']
else:
user_id = _user['user_id'] = bson.ObjectId()
_user['createdtime']=datetime.datetime.utcnow()
_user['slug'] = slug
#self.db.users.save(_user, callback=self._on_action)
self.syncdb.users.save(_user)
self.set_secure_cookie("user", str(user_id))
self.redirect(self.get_argument("next", "/"))
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Facebook auth failed")
#print(tornado.escape.json_encode(user))
self.access_token = user['access_token']
#print(self.access_token)
http_client = tornado.httpclient.AsyncHTTPClient()
http_client.fetch("https://api.facebook.com/method/users.getInfo?uids="+user['id']+"&fields=uid%2C%20name%2C%20website%2C%20locale%2C%20pic%2C%20current_location%2C%20email&access_token="+self.access_token+"&format=json", self.handle_request)
class AuthLogoutFBHandler(BaseHandler, tornado.auth.FacebookGraphMixin):
def get(self):
self.clear_cookie("user")
self.redirect(self.get_argument("next", "/"))
class GoogleCalendarAuthHandler(BaseHandler):
@tornado.web.asynchronous
def get(self):
#code = self.get_argument('code')
error = self.get_arguments('error')
code = self.get_arguments('code')
if code:
print code[0]
redirect_uri = (self.request.protocol + "://" + self.request.host + "/calendar_oauth2callback")
post_args={
"code": code[0],
"redirect_uri": redirect_uri,
"client_secret": self.settings["google_client_secret"],
"client_id": self.settings["google_client_id"],
"grant_type": "authorization_code",
}
http_client = tornado.httpclient.AsyncHTTPClient()
http_client.fetch("https://accounts.google.com/o/oauth2/token",
method="POST",
body=urllib.urlencode(post_args),
callback=self.google_handle_calendar_request)
else:
print error[0]
self.redirect('/mytrips')
@tornado.web.asynchronous
def google_handle_calendar_request(self, response):
res = simplejson.loads(response.body)
if "access_token" in res:
access_token = res['access_token']
#print access_token
user = self.syncdb.users.find_one({'user_id':bson.ObjectId(self.current_user['user_id'])})
user['google_access_token'] = access_token
if "refresh_token" in res:
user['google_refresh_token'] = res["refresh_token"]
self.syncdb.users.save(user)
body = unicodedata.normalize('NFKD', self.current_user['temp_event']).encode('ascii','ignore')
http_client = tornado.httpclient.AsyncHTTPClient()
headers = {'Authorization':'Bearer '+access_token, 'X-JavaScript-User-Agent': 'Google APIs Explorer', 'Content-Type': 'application/json'}
req = tornado.httpclient.HTTPRequest(url="https://www.googleapis.com/calendar/v3/calendars/primary/events?key="+self.settings["google_developer_key"],
method="POST",
body=body,
headers=headers)
http_client.fetch(req, callback=self.insert_event_response)
def insert_event_response(self, response):
response = simplejson.loads(response.body)
if "status" in response and response['status'] == 'confirmed':
self.redirect('/mytrips')
else:
self.redirect('/mytrips')
class GoogleHandler(BaseHandler, tornado.auth.GoogleMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Google auth failed")
# Save the user with, e.g., set_secure_cookie()
class AuthLoginTWHandler(BaseHandler, tornado.auth.TwitterMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("oauth_token", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
print 'authorize_redirect'
self.authorize_redirect()
def handle_request(self, user):
checkExist = self.syncdb.users.find_one({'tw_user_id':str(user['uid'])})
if checkExist:
checkExist['tw_access_token'] = self.access_token
self.syncdb.user.save(checkExist)
self.set_secure_cookie("user", str(checkExist['user_id']))
self.redirect(self.get_argument("next", "/"))
return
_user['slug'] = slug
#self.db.users.save(_user, callback=self._on_action)
self.syncdb.users.save(_user)
self.set_secure_cookie("user", str(user_id))
self.redirect(self.get_argument("next", "/"))
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Twitter auth failed")
return
print user
self.current_user['tw_access_token'] = user['access_token']
self.syncdb.users.save(self.current_user)
self.redirect(self.get_argument("next", "/"))
class AuthLogoutTWHandler(BaseHandler, tornado.auth.TwitterMixin):
def get(self):
self.clear_cookie("user")
self.redirect(self.get_argument("next", "/"))
# Save the user using, e.g., set_secure_cookie() |
import tensorflow as tf
def create_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(4096,kernel_initializer='normal', activation=tf.nn.relu, input_shape=(2714,)),
# tf.keras.layers.Dense(4096,kernel_initializer='normal', activation=tf.nn.relu),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(2048,kernel_initializer='normal', activation=tf.nn.relu),
# tf.keras.layers.Dense(2048,kernel_initializer='normal', activation=tf.nn.relu),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(1024,kernel_initializer='normal', activation=tf.nn.relu),
# tf.keras.layers.Dense(1024,kernel_initializer='normal', activation=tf.nn.relu),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(512,kernel_initializer='normal', activation=tf.nn.relu),
# tf.keras.layers.Dense(512,kernel_initializer='normal', activation=tf.nn.relu),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(256,kernel_initializer='normal', activation=tf.nn.relu),
# tf.keras.layers.Dense(256,kernel_initializer='normal', activation=tf.nn.relu),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(128,kernel_initializer='normal', activation=tf.nn.relu),
# tf.keras.layers.Dense(128,kernel_initializer='normal', activation=tf.nn.relu),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(64,kernel_initializer='normal', activation=tf.nn.relu),
# tf.keras.layers.Dense(64,kernel_initializer='normal', activation=tf.nn.relu),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(4,kernel_initializer='normal', activation=tf.nn.softmax)
])
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=['accuracy'])
return model
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
from typing import Iterable
from pants.base.specs import Specs
from pants.core.goals.fix import AbstractFixRequest, FixFilesRequest, FixResult, FixTargetsRequest
from pants.core.goals.fix import Partitions as Partitions # re-export
from pants.core.goals.fix import _do_fix
from pants.core.goals.multi_tool_goal_helper import BatchSizeOption, OnlyOption
from pants.engine.console import Console
from pants.engine.fs import Workspace
from pants.engine.goal import Goal, GoalSubsystem
from pants.engine.rules import Get, collect_rules, goal_rule
from pants.engine.unions import UnionMembership, UnionRule, union
logger = logging.getLogger(__name__)
FmtResult = FixResult
@union
class AbstractFmtRequest(AbstractFixRequest):
is_formatter = True
is_fixer = False
@classmethod
def _get_rules(cls) -> Iterable[UnionRule]:
yield from super()._get_rules()
yield UnionRule(AbstractFmtRequest, cls)
yield UnionRule(AbstractFmtRequest.Batch, cls.Batch)
class FmtTargetsRequest(AbstractFmtRequest, FixTargetsRequest):
@classmethod
def _get_rules(cls) -> Iterable:
yield from super()._get_rules()
yield UnionRule(FmtTargetsRequest.PartitionRequest, cls.PartitionRequest)
class FmtFilesRequest(AbstractFmtRequest, FixFilesRequest):
@classmethod
def _get_rules(cls) -> Iterable:
yield from super()._get_rules()
yield UnionRule(FmtFilesRequest.PartitionRequest, cls.PartitionRequest)
class FmtSubsystem(GoalSubsystem):
name = "fmt"
help = "Autoformat source code."
@classmethod
def activated(cls, union_membership: UnionMembership) -> bool:
return AbstractFmtRequest in union_membership
only = OnlyOption("formatter", "isort", "shfmt")
batch_size = BatchSizeOption(uppercase="Formatter", lowercase="formatter")
class Fmt(Goal):
subsystem_cls = FmtSubsystem
environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY
@goal_rule
async def fmt(
console: Console,
specs: Specs,
fmt_subsystem: FmtSubsystem,
workspace: Workspace,
union_membership: UnionMembership,
) -> Fmt:
return await _do_fix(
union_membership.get(AbstractFmtRequest),
union_membership.get(FmtTargetsRequest.PartitionRequest),
union_membership.get(FmtFilesRequest.PartitionRequest),
Fmt,
fmt_subsystem,
specs,
workspace,
console,
lambda request_type: Get(Partitions, FmtTargetsRequest.PartitionRequest, request_type),
lambda request_type: Get(Partitions, FmtFilesRequest.PartitionRequest, request_type),
)
def rules():
return collect_rules()
|
from django.urls import path
from .views import index, newpost, post_detail, like, favorite, tags
urlpatterns = [
path('', index, name='index'),
path('newpost/', newpost, name='newpost',),
path('<uuid:post_id>/', post_detail, name='postdetails'),
path('<uuid:post_id>/like', like, name='postlikes'),
path('<uuid:post_id>/favorite', favorite, name='postfavorites'),
path('tag/<slug:tag_slug>', tags, name='tags'),
] |
#!/user/bin/python2.7
import pandas as pd
import numpy as np
##################################################################################
# This class imports a data table, transform it
# and apply featue extractions according to costum
# periods.
##################################################################################
class Translator_data(object):
def __init__(self, filename, periods):
self.table = pd.read_csv(filename)
self.periods = periods # periods = time buckets
self.distributions = {}
##################################################################################
# Transforms the raw data table into a table with
# the following columns: 'translator_id'(int), 'hour'(int), 'weekday'(bool), 'response'(str or NaN)
##################################################################################
def DataPreprocessing(self):
self.table = self.table[['translator_id','request_time','response']].copy()
SATURDAY = 5
timestamp = pd.to_datetime(self.table['request_time'])
hours = timestamp.dt.hour
days = timestamp.dt.weekday # returns number of day (Sat = 5, Sun = 6)
self.table['hour'] = hours
self.table['weekday'] = days < SATURDAY
self.table = self.table[['translator_id','hour','weekday','response']]
##################################################################################
# Creates the rate of NONRESPONSE for each translator in the table
# self.distributions is a dict with the translator_id as a key and
# the tuple (rate per period(array of length: len(self.periods)), overall rate(float))
##################################################################################
def create_distribution(self):
translators = self.table['translator_id'].unique()
for translator in translators:
rates = []
overall = (1 + len(self.table[(self.table['translator_id'] == translator) & (self.table['response'] != 'yes')]))\
*1./(2 + len(self.table[self.table['translator_id'] == translator]))
for period in self.periods:
minimum, maximum = period
rates.append((len(self.table[(self.table['translator_id'] == translator) & (self.table['hour'] < maximum) & \
(self.table['hour'] >= minimum) & (self.table['response'] != 'yes')])+1)* \
1./(2 + len(self.table[(self.table['translator_id']== translator) & \
(self.table['hour'] < maximum) & (self.table['hour'] >= minimum)])))
self.distributions[translator] = (np.array(rates), overall)
##################################################################################
# Extracts the features for each row in the data
# returns one row in the feature matrix X, and a class y
##################################################################################
def FeatureExtraction(self,row):
translator = row['translator_id']
hour = row['hour']
weekday = float(row['weekday'])
per_vec = np.array([float(minimum <= hour < maximum) for (minimum, maximum) in periods])
overall_rate = self.distributions[translator][1]
period_rate = np.dot(per_vec, self.distributions[translator][0])
features = np.array([1.0,overall_rate,period_rate,weekday])
y = float(row['response'] == 'yes')
return features, y
# Example
periods = [(0,6),(6,12),(12,18),(18,24)]
PT = Translator_data("PingedTranslators.csv", periods)
PT.DataPreprocessing()
PT.create_distribution()
# X is the feature matrix
# y is the class vector
X = np.zeros((len(PT.table), 4))
y = np.zeros(len(PT.table))
for index, row in PT.table.iterrows():
X[index] = PT.FeatureExtraction(row)[0]
y[index] = PT.FeatureExtraction(row)[1]
|
import pygame
from bullet_alien import BulletAlienDos
class TriPattern:
"""A pattern class for shooting boolets in a straightline of 3"""
def __init__(self, main_game, shooter):
self.main_game = main_game
self.screen = main_game.screen
self.settings = main_game.settings
self.shooter = shooter
# Flags to use in tandem with cooldown
self.burst_disabled = False # This for delay between burst
self.shoot_disabled = False # This is for boolet's delay
# Imported from settings.py
self.burst_cooldown = self.settings.tri_burst_cooldown
self.bullet_cooldown = self.settings.tri_bullet_cooldown
self.bullets_per_burst = self.settings.tri_bullets_per_burst
self.last_burst_fired = pygame.time.get_ticks()
self.last_bullet_fired = pygame.time.get_ticks()
self.angle = self.settings.angle_between_stream
# Dynamic bullet_count and burst_count
self.bullets_left = self.bullets_per_burst
def shoot_burst(self):
"""Shoot the boolet in burst of straight line. Do it like the alien_movement cooldown"""
self._check_burst_cooldown()
"""yeah, I have to check if any bursts left to move onto next pattern"""
if not self.burst_disabled:
"""check if any bullets left. Otherwise, reduce burst count and then do a new burst"""
if self.bullets_left != 0:
# Check to see whether the burst is finished
self._check_bullet_cooldown()
if not self.shoot_disabled:
# Shoot a bullet and then disable the shooting ability until cooldown
self.shoot_boolet()
self.last_bullet_fired = pygame.time.get_ticks()
self.bullets_left -= 1
self.shoot_disabled = True
else:
# If burst is finished reset burst and recorded last burst_time.
self.bullets_left = self.bullets_per_burst
self.last_burst_fired = pygame.time.get_ticks()
self.burst_disabled = True
def shoot_boolet(self):
"""Shoot each triplet of bullets"""
angle = self.angle
for i in range(3):
bullet = BulletAlienDos(self.main_game, shooter=self.shooter)
bullet.vector[0] = 0
bullet.vector[1] = 1
bullet.normalized_vector = bullet.vector.normalize()
bullet.normalized_vector = bullet.normalized_vector.rotate(angle)
angle -= self.angle
self.main_game.alien_bullets.add(bullet)
def _check_burst_cooldown(self):
time_now = pygame.time.get_ticks()
# I think I might have to put in the number of bullets_left in a burst.
if time_now - self.last_burst_fired >= self.burst_cooldown:
self.burst_disabled = False
def _check_bullet_cooldown(self):
"""Yeah, I don't want it to turn into a lazer beam of ultimate lethality"""
time_now = pygame.time.get_ticks()
if time_now - self.last_bullet_fired >= self.bullet_cooldown:
self.shoot_disabled = False
def reset(self):
# Flags to use in tandem with cooldown
self.burst_disabled = False # This for delay between burst
self.shoot_disabled = False # This is for boolet's delay
# Imported from settings.py
self.last_burst_fired = pygame.time.get_ticks()
self.last_bullet_fired = pygame.time.get_ticks()
# Dynamic bullet_count and burst_count
self.bullets_left = self.bullets_per_burst
|
import sqlite3,os
data=sqlite3.connect("kamu.db")
db="kamu.db"
im=data.cursor()
im.execute("""CREATE TABLE IF NOT EXISTS personel(
ad TEXT,
soyad TEXT,
maas INTEGER
)""")
print "(1)Veri Ekleme"
print "(2)Tablo Goruntulemek icin"
secim=raw_input("Lutfen bir secim yapin :")
if secim=="1":
ad=raw_input("Ad :")
soyad=raw_input("Soyad :")
maas=input("Maas :")
im.execute("""INSERT INTO personel(ad,soyad,maas) VALUES(?,?,?)""",(ad,soyad,maas))
data.commit()
elif secim=="2":
if not os.path.exists(db):
print "veritabani bulunamadi"
else:
im.execute("""SELECT * FROM personel""")
veriler=im.fetchall()
for veri in veriler:
print veri
|
import argparse
import uuid
import simplejson
import logging
from Setup_Manager import Setup
def get_queue_by_name(sqs, queue_name):
queues = list(sqs.queues.filter(QueueNamePrefix=queue_name))
if len(queues) == 0:
raise Exception
return queues[0]
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("inputFileName", action="store", type=str,
help="The name of the input file")
parser.add_argument("outputFileName", action="store", type=str,
help="The name of the output file")
parser.add_argument("n", action="store", type=int,
help="Number of PDF files per worker")
parser.add_argument("terminate", action="store", default=[], nargs="*",
help="Should we terminate at the end of the job?")
parser.add_argument("--offline", action="store_true", default=False,
help="Work offline without aws instances")
return parser.parse_args()
class LocalProgram:
def __init__(self, input_file_name, output_file_name, n, terminate=False):
self.task_uuid = str(uuid.uuid4())
self.input_file_name = input_file_name
self.input_file_path = "inputFiles/{}/{}".format(self.task_uuid, input_file_name)
self.output_file_path = output_file_name
self.n = n
self.terminate = terminate
self.answer_recieved = False
self.local_task_response = None
self.bucket_name = "OranShuster_Assignment1"
self.main()
def upload_inputfile(self, input_file_name, s3_resource):
with open(input_file_name) as input_file:
s3_resource.Object(self.bucket_name, self.input_file_path).put(Body=input_file.read(), ACL='public-read')
def create_html_page(self, output_file_contents_str):
logger.info("Creating HTML page")
output_html = "<!DOCTYPE html>\n<html>\n\t<head>\n\t\t<title>" + self.task_uuid + " Output file</title>\n\t" \
+ "</head>\n\t<body>\n"
lines = output_file_contents_str.split("\n")
for i in range(0, len(lines)):
output_html += "\t\t<p>" + lines[i] + "</p>\n"
output_html += "\t</body>\n</html>"
with open(self.output_file_path, 'w') as html_file:
html_file.write(output_html)
def main(self):
with Setup(offline=args.offline) as setup_manager:
self.upload_inputfile(args.inputFileName, setup_manager.s3)
new_task_queue = get_queue_by_name(sqs=setup_manager.sqs, queue_name=setup_manager.new_tasks_queue_name)
completed_task_queue = get_queue_by_name(sqs=setup_manager.sqs,
queue_name=setup_manager.completed_tasks_queue_name)
task_dict = {"inputFileName": self.input_file_name, "n": self.n, "task_uuid": self.task_uuid,
'bucket': self.bucket_name}
new_task_queue.send_message(MessageBody=simplejson.dumps(task_dict))
logger.info("Send new Task {}".format(task_dict))
while not self.answer_recieved:
completed_tasks_list = completed_task_queue.receive_messages(WaitTimeSeconds=10)
for completed_task in completed_tasks_list:
completed_task_dict = simplejson.loads(completed_task.body)
logger.info('Processing completed task {}'.format(completed_task_dict))
if self.task_uuid == completed_task_dict['task_uuid']:
self.answer_recieved = True
completed_task.delete()
break
if "reason" in completed_task_dict:
logger.warning("Task was terminated because manager is terminated")
else:
job_output_file_path = "outputFiles/{}/{}".format(self.task_uuid, self.input_file_name)
job_output_file_object = setup_manager.s3.Object(self.bucket_name, job_output_file_path).get()
self.create_html_page(job_output_file_object["Body"].read())
if args.terminate:
logger.info("Sending manager termination message")
new_task_queue.send_message(MessageBody="TERMINATE")
def setup_root_logger():
logger = logging.getLogger('Local')
logger.setLevel(logging.INFO)
# create a file handler
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
if __name__ == "__main__":
setup_root_logger()
logger = logging.getLogger('Local')
logger.info("Starting local program")
args = parse_arguments()
if len(args.terminate) == 1 and args.terminate[0] == "terminate":
args.terminate = True
else:
args.terminate = False
LocalProgram(args.inputFileName, args.outputFileName, args.n, terminate=args.terminate)
|
import tkinter as tk
import tkinter.font as tkFont
import GetImage as GM
from tkinter import messagebox
from tkinter.messagebox import askokcancel, showinfo, WARNING
def main(root,token):
root.title("User Login")
width=750; height=500
screenwidth = root.winfo_screenwidth()
screenheight = root.winfo_screenheight()
alignstr = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)
root.geometry(alignstr)
root.resizable(width=False, height=False)
BgImg = GM.getImage("D:\Programming\Python\Room_Rental\Images\BG.jpg", 744, 495)
BGLabel=tk.Label(root,image=BgImg)
BGLabel.image=BgImg
BGLabel["justify"] = "center"
BGLabel.place(x=3,y=1,width=744,height=494)
Title=tk.Label(root)
Title["bg"] = "#00ced1"
ft = tkFont.Font(family='Times',size=28)
Title["font"] = ft
Title["fg"] = "#333333"
Title["justify"] = "center"
if token == 'S' : who = 'Student'
else : who = 'Owner'
Title["text"] = who+" Login"
Title.place(x=150,y=10,width=590,height=75)
Divider=tk.Label(root)
Divider["bg"] = "#90ee90"
ft = tkFont.Font(family='Times',size=10)
Divider["font"] = ft
Divider["fg"] = "#333333"
Divider["justify"] = "center"
Divider["text"] = ""
Divider.place(x=0,y=100,width=744,height=3)
EnterUsername=tk.Label(root)
EnterUsername["bg"] = "#393d49"
ft = tkFont.Font(family='Times',size=16)
EnterUsername["font"] = ft
EnterUsername["fg"] = "#ffffff"
EnterUsername["justify"] = "center"
EnterUsername["text"] = "Enter Username \n(Your Email is your username)"
EnterUsername.place(x=60,y=160,width=270,height=54)
PasswordLabel=tk.Label(root)
PasswordLabel["bg"] = "#393d49"
ft = tkFont.Font(family='Times',size=14)
PasswordLabel["font"] = ft
PasswordLabel["fg"] = "#ffffff"
PasswordLabel["justify"] = "center"
PasswordLabel["text"] = "Enter Password"
PasswordLabel.place(x=120,y=270,width=160,height=31)
Username=tk.Entry(root)
Username["bg"] = "#eeeeee"
Username["borderwidth"] = "1px"
ft = tkFont.Font(family='Times',size=16)
Username["font"] = ft
Username["fg"] = "#000000"
Username["justify"] = "center"
Username["text"] = ""
Username.place(x=350,y=160,width=330,height=40)
Password=tk.Entry(root,show='*')
Password["bg"] = "#eeeeee"
Password["borderwidth"] = "1px"
ft = tkFont.Font(family='Times',size=16)
Password["font"] = ft
Password["fg"] = "#000000"
Password["justify"] = "center"
Password["text"] = ""
Password.place(x=350,y=270,width=330,height=40)
Login=tk.Button(root)
Login["bg"] = "#1eff69"
Login["borderwidth"] = "3px"
ft = tkFont.Font(family='Times',size=16)
Login["font"] = ft
Login["fg"] = "#000000"
Login["justify"] = "center"
Login["text"] = "Login"
Login.place(x=230,y=390,width=300,height=45)
Login["command"] = lambda : Login_command(root,Username.get(),Password.get(),token)
BackLogo = GM.getImage("D:\Programming\Python\Room_Rental\Images\Logo.png",131,77)
Back=tk.Button(root,image=BackLogo)
Back.image = BackLogo
Back["justify"] = "center"
Back.place(x=10,y=10,width=131,height=77)
Back["command"] = lambda : Back_command(root)
def Login_command(root,username,password,token):
if username != '' and password != '' :
import DatabaseConnection as DB
if token == 'O' :
cnt = DB.runQuery2("select count(o_id) from Owner where email = '"+username+"' AND password = '"+password+"'")
if cnt[0] == 0 :
messagebox.showerror("Failed","Wrong ! Incorrect username or password.")
else :
username,id = DB.runQuery2("select name,o_id from Owner where email = '"+username+"' AND password = '"+password+"'")
import OwnerHome as Home
Home.main(root,username,id)
else :
cnt = DB.runQuery2("SELECT count(s_id) FROM Student WHERE email ='%s' AND password ='%s'" % (username, password))
if cnt[0] == 0 :
messagebox.showerror("Failed","Wrong ! Incorrect username or password.")
else :
TUP = DB.runQuery2("SELECT name, s_id FROM Student WHERE email ='%s' AND password ='%s'" % (username, password))
import StudentHome as Home
Home.main(root,TUP[0],TUP[1])
else :
messagebox.showerror("Error","Details are not valid. Please enter valid details.")
def Back_command(root):
import Welcome as wc
wc.App(root)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 18 03:12:08 2019
@author: tboydev
This program calculates the distance of the voyager from the sun from 9/25/2009
"""
voyager_speed = 32241 #miles per hour
current_distance = 16_637_000_000
days_travelled = int(input("Enter number of days travelled: "))
"""Convert days to hours"""
days_travelled *= 24
#calculate distance from the sun
distance_in_miles = voyager_speed * days_travelled
#distance in miles
print("Distance travelled in miles is: ", distance_in_miles)
#distance in kilometers is
distance_in_km = distance_in_miles * 1.609344
print("Distance travelled in kilometers is: ", distance_in_km)
#distance in austronomical units is
distance_in_au = distance_in_km * 92955807.267433
print("Distance travelled in austronomical units is: ", distance_in_au)
#distance in radio waves distance
"""convert distance in miles to meters"""
distance_in_meters = distance_in_miles * 1609.34
radio_seconds = distance_in_meters / 299792458
radio_hours = radio_seconds / 3600
print("Round-trip time for radio communication in hours is: ", radio_hours)
|
# 趁热打铁
class Solution:
def reversePairs(self, nums: List[int]) -> int:
def add(x, n):
while x <= n:
t[x] += 1
x += (x & (-x))
def query(x):
res = 0
while x:
res += t[x]
x -= (x & (-x))
return res
n = len(nums)
temp = sorted(nums)
# 离散化
for i in range(n):
nums[i] = bisect.bisect_left(temp, nums[i]) + 1
t = [0] * (n+1)
res = 0
for i in range(n-1, -1, -1):
res += query(nums[i]-1)
add(nums[i], n)
return res
# 贴个树状数组类
class BIT:
def __init__(self, n):
self.n = n
self.tree = [0] * (n + 1)
@staticmethod
def lowbit(x):
return x & (-x)
def query(self, x):
ret = 0
while x > 0:
ret += self.tree[x]
x -= BIT.lowbit(x)
return ret
def update(self, x):
while x <= self.n:
self.tree[x] += 1
x += BIT.lowbit(x)
|
a = [1, 2, 3, 4, 5, 6]
for i in a:
print(i**2)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
# import os
import ujson
import datetime
import iso8601
from sanic import Blueprint
from sanic import response
from sanic.log import logger
from sanic.request import Request
from sanic_jwt import inject_user, protected, scoped
from web_backend.nvlserver.helper.request_wrapper import populate_response_format
from web_backend.nvlserver.helper.process_request_args import proc_arg_to_int
from .service import (
get_rent_list, get_rent_list_count, get_rent_element,
create_rent_element, update_rent_element, delete_rent_element
)
from web_backend.nvlserver.module.traceable_object.service import get_traceable_object_element
from web_backend.nvlserver.module.hw_action.service import get_hw_action_element
from web_backend.nvlserver.module.hw_module.service import get_hw_module_element_by_traceable_object_id
api_rent_blueprint = Blueprint('api_rent', url_prefix='/api/rent')
@api_rent_blueprint.route('/', methods=['GET'])
@inject_user()
@scoped(['rent:read'], require_all=True, require_all_actions=True)
async def api_rent_get(
request: Request,
user):
"""
:param request:
:param user:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
size = proc_arg_to_int(request.args.get('size', '1'), 1)
page = proc_arg_to_int(request.args.get('page', '1'), 1)
user_id_param = proc_arg_to_int(request.args.get('user_id', '1'), 0)
date_from_front = request.args.get('date_from', None)
date_to_front = request.args.get('date_to', None)
# print(date_from_front)
# print(date_to_front)
# TODO: REMOVE REPLACE ON CHANGE PARAM FROM FRONTEND
if date_from_front is not None:
date_from = iso8601.parse_date(date_from_front.replace(' ', '+'))
else:
date_from = None
# date_from = datetime.datetime.now() - datetime.timedelta(days=30)
if date_to_front is not None:
date_to = iso8601.parse_date(date_to_front.replace(' ', '+'))
else:
date_to = None
# date_to = datetime.datetime.now()
# print(date_from)
# print(date_to)
# state = request.args.get('state', None)
offset = (page - 1) * size
if request.method == 'GET':
try:
if user:
if user.get('user_id', None):
if user.get('account_type_name') == 'admin':
user_id = user_id_param
else:
user_id = user.get('user_id')
rent_list = await get_rent_list(
request, user_id=user_id, date_from=date_from, date_to=date_to, limit=size, offset=offset)
rent_count = await get_rent_list_count(
request, user_id=user_id, date_from=date_from, date_to=date_to)
print(rent_list)
print(rent_count)
if rent_list:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
res_data_formatted = await populate_response_format(
rent_list, rent_count, size=size, page=page)
ret_val['data'] = res_data_formatted
status = 200
else:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
ret_val['data'] = []
status = 200
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
except Exception as rt_err:
logger.error('Function api_rent_get -> GET erred with: {}'.format(rt_err))
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
@api_rent_blueprint.route('/', methods=['POST'])
@inject_user()
@protected()
@scoped(['rent:create'], require_all=True, require_all_actions=True)
async def api_rent_post(
request: Request,
user):
"""
:param request:
:param user:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
user_id_param = request.json.get('user_id', None)
traceable_object_id = request.json.get('traceable_object_id', None)
date_from_front = request.json.get('date_from', None)
date_to_front = request.json.get('date_to', None)
alaram_time_front = request.json.get('alaram_time', None)
# print(request.json)
if request.method == 'POST':
try:
if user:
if user.get('user_id', None):
if user.get('account_type_name') == 'admin':
user_id = user_id_param
else:
user_id = user.get('user_id')
if user_id == user.get('user_id') or user.get('account_type_name') == 'admin':
if date_from_front:
date_from = iso8601.parse_date(date_from_front)
else:
date_from = None
if date_to_front:
date_to = iso8601.parse_date(date_to_front)
else:
date_to = None
if alaram_time_front:
alaram_time = alaram_time_front
else:
alaram_time = None
traceable_object = await get_traceable_object_element(
request, traceable_object_id=traceable_object_id)
# CHANGED HET OBJECT WITH NAME
hw_action_object = await get_hw_action_element(
request, hw_action_id=7)
hw_module_object = await get_hw_module_element_by_traceable_object_id(
request, traceable_object_id=traceable_object_id)
if None not in (traceable_object, hw_action_object, hw_module_object):
rent_obj = await create_rent_element(
request, user_id=user_id,
hw_action_id=hw_action_object.get('id'),
proto_field=hw_action_object.get('proto_field'),
field_type='bool',
value='true',
hw_module_id=hw_module_object.get('id'),
traceable_object_id=traceable_object.get('id'),
ack_message=True,
alarm_time=alaram_time,
date_from=date_from,
date_to=date_to, active=True)
if rent_obj:
print(rent_obj)
ret_val['data'] = rent_obj
ret_val['success'] = True
status = 201
ret_val['message'] = 'server.object_created'
else:
status = 412
ret_val['message'] = 'server.query_condition_failed'
else:
status = 412
ret_val['message'] = 'server.query_condition_failed'
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
except Exception as al_err:
logger.error('Function api_rent_post -> POST erred with: {}'.format(al_err))
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
@api_rent_blueprint.route('/<rent_id:int>', methods=['GET'])
@inject_user()
@scoped(['rent:read'], require_all=True, require_all_actions=True)
async def api_rent_element_get(
request: Request,
user,
rent_id: int = 0):
"""
:param request:
:param user:
:param rent_id:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
if request.method == 'GET':
try:
if user:
if user.get('user_id', None) and rent_id:
rent_element = await get_rent_element(request, rent_id)
if rent_element:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
ret_val['data'] = rent_element
status = 200
else:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
status = 200
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
except Exception as al_err:
logger.error('Function api_rent_element_get -> GET erred with: {}'.format(al_err))
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
@api_rent_blueprint.route('/<rent_id:int>', methods=['PUT'])
@inject_user()
@scoped(['rent:update'], require_all=True, require_all_actions=True)
async def api_rent_element_put(
request: Request,
user,
rent_id: int = 0):
"""
:param request:
:param user:
:param rent_id:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
user_id_param = request.json.get('user_id', None)
traceable_object_id = request.json.get('traceable_object_id', None)
date_from_front = request.json.get('date_from', None)
date_to_front = request.json.get('date_to', None)
alaram_time_front = request.json.get('alaram_time', None)
if request.method == 'PUT':
try:
if user:
# print(request.json)
if user.get('user_id', None):
if user.get('account_type_name') == 'admin':
user_id = user_id_param
else:
user_id = user.get('user_id')
if user_id == user.get('user_id') or user.get('account_type_name') == 'admin':
if date_from_front:
date_from = iso8601.parse_date(date_from_front)
else:
date_from = None
if date_to_front:
date_to = iso8601.parse_date(date_to_front)
else:
date_to = None
if alaram_time_front:
alaram_time = alaram_time_front
else:
alaram_time = None
traceable_object = await get_traceable_object_element(
request, traceable_object_id=traceable_object_id)
# CHANGED HET OBJECT WITH NAME
hw_action_object = await get_hw_action_element(
request, hw_action_id=7)
hw_module_object = await get_hw_module_element_by_traceable_object_id(
request, traceable_object_id=traceable_object_id)
if None not in (traceable_object, hw_action_object, hw_module_object):
rent_obj = await update_rent_element(
request, rent_id=rent_id, user_id=user_id, hw_action_id=hw_action_object.get('id'),
proto_field=hw_action_object.get('proto_field'),
field_type='bool',
value='true',
hw_module_id=hw_module_object.get('id'),
traceable_object_id=traceable_object.get('id'),
ack_message=True,
alarm_time=alaram_time,
date_from=date_from,
date_to=date_to, active=True)
if rent_obj:
ret_val['data'] = rent_obj
ret_val['success'] = True
status = 201
ret_val['message'] = 'server.object_created'
else:
status = 412
ret_val['message'] = 'server.query_condition_failed'
else:
status = 412
ret_val['message'] = 'server.query_condition_failed'
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
except Exception as al_err:
logger.error('Function api_rent_element_put -> PUT erred with: {}'.format(al_err))
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
@api_rent_blueprint.route('/<rent_id:int>', methods=['DELETE'])
@inject_user()
@scoped(['rent:delete'], require_all=True, require_all_actions=True)
async def api_rent_element_delete(request: Request, user, rent_id: int = 0):
"""
:param request:
:param user:
:param rent_id:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
if request.method == 'DELETE':
try:
if user:
if user.get('user_id'):
if True and rent_id:
rent = await delete_rent_element(request, rent_id)
if rent:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
ret_val['data'] = None
status = 202
ret_val['message'] = 'server.accepted'
else:
status = 412
ret_val['message'] = 'server.query_condition_failed'
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
except Exception as al_err:
logger.error('Function api_rent_element_delete -> DELETE erred with: {}'.format(al_err))
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
|
from ._title import Title
from plotly.graph_objs.pie import title
from ._textfont import Textfont
from ._stream import Stream
from ._outsidetextfont import Outsidetextfont
from ._marker import Marker
from plotly.graph_objs.pie import marker
from ._insidetextfont import Insidetextfont
from ._hoverlabel import Hoverlabel
from plotly.graph_objs.pie import hoverlabel
from ._domain import Domain
|
""" module to create an ensight compatible file
to visualize your data"""
import os
import h5py
import numpy as np
from lxml import etree
NSMAP = {"xi": "http://www.w3.org/2001/XInclude"}
# pylint: disable=c-extension-no-member
class NpArray2Xmf():
""" main class for data output in XDMF format """
def __init__(self,
filename,
domain_name=None,
mesh_name=None,
time=None,
xmf_only=False):
""" class startup"""
extension = os.path.splitext(filename)
if extension[-1] == ".xmf":
self.filename = extension[0] + ".h5"
elif extension[-1] == ".h5":
self.filename = filename
else:
raise RuntimeError("Only extensions .xmf or .h5 are allowed")
self.geotype = None
self.mesh = {}
self.data = {}
self.shape = None
self.mesh["domain"] = domain_name
self.mesh["mesh"] = mesh_name
self.mesh["time"] = time
if time is None:
self.mesh["time"] = 0.0
self.mesh["time"] = "%14.8e" % self.mesh["time"]
if self.mesh["mesh"] is None:
self.mesh["mesh"] = "Mesh"
if self.mesh["domain"] is None:
self.mesh["domain"] = "Domain"
self.xmf_only = xmf_only
def create_grid(self, nparray_x, nparray_y, nparray_z):
""" create the grid according to numpy arrays x, y ,z
if arrays are 1D, switch to cloud point
if arrays are 2D, switch to quad connectivity
if arrays are 3D, switch to hexaedrons connectivity"""
self.mesh["x"] = np.ravel(nparray_x)
self.mesh["y"] = np.ravel(nparray_y)
self.mesh["z"] = np.ravel(nparray_z)
self.shape = list(nparray_x.shape)
dim = len(self.shape)
if dim == 1:
self.geotype = "cloud"
if dim == 2:
self.geotype = "quads"
if dim == 3:
self.geotype = "hexas"
if self.geotype is None:
raise RuntimeError("Unexpected shape of nparray :"
+ " ".join(self.shape))
def add_field(self, nparray_field, variable_name):
""" add a field, assuming same shape as nparray of coordiantes """
self.data[variable_name] = nparray_field
def _type(self, var):
""" retrun the xmf type according to nparray"""
var_shape = list(self.data[var].shape)
dtype = self.data[var].dtype
numbertype = None
if dtype == "float64":
numbertype = "Float"
if dtype == "S4":
numbertype = "Char"
if numbertype is None:
raise RuntimeError("Array of type " + dtype
+ "(" + var + ")"
+ "not recognized")
attributetype = None
if var_shape == self.shape:
attributetype = "Scalar"
if var_shape[:-1] == self.shape:
if var_shape[-1] == 3:
attributetype = "Vector"
if attributetype is None:
raise RuntimeError("Var " + var
+ " of shape " + str(var_shape)
+ " not consistent with grid of shape "
+ str(self.shape) +
"\n (neither scalar nor 3D vector...)")
shape_str = " ".join(str(dim) for dim in self.data[var].shape)
return (numbertype,
attributetype,
shape_str)
def xmf_dump(self):
""" create XDMF descriptor file """
if self.geotype == "cloud":
topology = "PolyVertex"
if self.geotype == "quads":
topology = "2DSMesh"
if self.geotype == "hexas":
topology = "3DSMesh"
dims = " ".join(str(dim) for dim in self.shape)
xmf_tree = dict()
xmf_tree['root'] = etree.Element("Xdmf", Version="2.0", nsmap=NSMAP)
xmf_tree['dom'] = etree.SubElement(xmf_tree['root'], "Domain",
Name=self.mesh['domain'])
xmf_tree['grd'] = etree.SubElement(xmf_tree['dom'], "Grid",
Name=self.mesh['mesh'],
Type='Uniform')
etree.SubElement(xmf_tree['grd'], "Time",
Type='Single',
Value=self.mesh['time'])
etree.SubElement(xmf_tree['grd'], "Topology",
Name="Topo",
TopologyType=topology,
NumberOfElements=dims)
xmf_tree['geo'] = etree.SubElement(xmf_tree['grd'], "Geometry",
GeometryType="X_Y_Z")
for var in ['x', 'y', 'z']:
field = etree.SubElement(xmf_tree['geo'], "DataItem",
Dimensions=dims,
Format="HDF",
NumberType="Float",
Precision="8")
text = "%s:/mesh/%s" % (os.path.basename(self.filename), var)
field.text = "\n%s%s\n%s" % (11 * " ", text, 8 * " ")
for var in self.data:
numbertype, attributetype, dims = self._type(var)
attr = etree.SubElement(xmf_tree['grd'], "Attribute",
Name=var,
Center="Node",
AttributeType=attributetype)
field = etree.SubElement(attr, "DataItem",
Dimensions=dims,
Format="HDF",
NumberType=numbertype,
Precision="8")
text = "%s:/variables/%s" % (os.path.basename(self.filename), var)
field.text = "\n%s%s\n%s" % (11 * " ", text, 8 * " ")
xmf_file = self.filename.replace(".h5", ".xmf")
xmf_ct = etree.tostring(xmf_tree['root'],
pretty_print=True,
xml_declaration=True,
doctype='<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd" []>')
xmf_ct = xmf_ct.decode().replace("encoding=\'ASCII\'", "")
with open(xmf_file, "w") as fout:
fout.write(xmf_ct)
def dump(self):
""" dump the final file """
if not self.xmf_only:
fout = h5py.File(self.filename, "w")
mesh_gp = fout.create_group("mesh")
for coord in ["x", "y", "z"]:
mesh_gp.create_dataset(coord, data=self.mesh[coord])
var_gp = fout.create_group("variables")
for var in self.data:
var_gp.create_dataset(var, data=self.data[var])
fout.close()
self.xmf_dump()
def create_time_collection_xmf(collection_filenames, xmf_filename):
""" Creates xmf file holding time collection of xmf files
Parameters :
============
collection_filenames: a list of single time xmf filenames to collect
xmf_filename : the name of the output file
Returns:
========
None
"""
root = etree.Element("Xdmf", Version="2.0", nsmap=NSMAP)
dom = etree.SubElement(root, "Domain")
print(os.path.split(xmf_filename)[-1])
grid = etree.SubElement(dom, "Grid",
Name=os.path.split(xmf_filename)[-1],
GridType="Collection",
CollectionType="Temporal")
for filename in collection_filenames:
etree.SubElement(grid, "XI_INCLUDE", href=filename,
xpointer='xpointer(//Xdmf/Domain/Grid)')
xmf_ct = etree.tostring(root,
pretty_print=True,
xml_declaration=True,
doctype='<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd" []>')
xmf_ct = xmf_ct.decode()
with open(xmf_filename, "w") as fout:
xmf_ct = xmf_ct.replace("encoding=\'ASCII\'", "")
xmf_ct = xmf_ct.replace("XI_INCLUDE", "xi:include")
fout.write(xmf_ct)
if __name__ == '__main__':
DIM_X = 41
DIM_Y = 21
DIM_Z = 11
SIZE_X = 4.
SIZE_Y = 2.
SIZE_Z = 1.
# 1D
TEST_X = np.linspace(0, SIZE_X, DIM_X)
TEST_Y = np.linspace(0, SIZE_Y, DIM_X)
TEST_Z = np.linspace(0, SIZE_Z, DIM_X)
TEST_U = (np.sin(TEST_X / SIZE_X * 1 * np.pi)
* np.sin(TEST_Y / SIZE_Y * 1 * np.pi)
* np.sin(TEST_Z / SIZE_Z * 1 * np.pi))
TEST_F = NpArray2Xmf("./test1D.h5")
TEST_F.create_grid(TEST_X, TEST_Y, TEST_Z)
TEST_F.add_field(TEST_U, "foobar")
TEST_V = np.stack((TEST_U,
TEST_U,
TEST_U),
axis=1)
TEST_F.add_field(TEST_V, "foobar_vect")
TEST_F.dump()
# 2D
TEST_X = np.tile(np.linspace(0., SIZE_X, DIM_X), (DIM_Y, 1))
TEST_Y = np.tile(np.linspace(0., SIZE_Y, DIM_Y), (DIM_X, 1)).transpose()
TEST_Z = np.ones((DIM_Y, DIM_X))
TEST_U = (np.sin(TEST_X / SIZE_X * 1 * np.pi)
* np.sin(TEST_Y / SIZE_Y * 1 * np.pi)
* np.sin(TEST_Z * 0.5 * np.pi))
TEST_F = NpArray2Xmf("./test2D.h5")
TEST_F.create_grid(TEST_X, TEST_Y, TEST_Z)
TEST_F.add_field(TEST_U, "foobar")
TEST_F.dump()
TEST_X = TEST_X[:, :, None].repeat(DIM_Z, 2)
TEST_Y = TEST_Y[:, :, None].repeat(DIM_Z, 2)
TEST_Z = np.tile(np.linspace(0., SIZE_Z, DIM_Z), (DIM_X, 1)).transpose()
TEST_Z = TEST_Z[:, :, None].repeat(DIM_Y, 2).transpose((2, 1, 0))
TEST_U = (np.sin(TEST_X / SIZE_X * 1 * np.pi)
* np.sin(TEST_Y / SIZE_Y * 1 * np.pi)
* np.sin(TEST_Z / SIZE_Z * 1 * np.pi))
TEST_F = NpArray2Xmf("./test3D.h5")
TEST_F.create_grid(TEST_X, TEST_Y, TEST_Z)
TEST_F.add_field(TEST_U, "foobar")
TEST_F.dump()
|
import cv2
import dlib
def eyeRatio(landmarks):
# calculate eye height
l_height = landmarks[40].y - landmarks[38].y
r_height = landmarks[47].y - landmarks[43].y
# calculate eye width
l_width = landmarks[39].x - landmarks[36].x
r_width = landmarks[45].x - landmarks[42].x
# calculate eye ratio
l_ratio = l_height / l_width
r_ratio = r_height / r_width
avg_ratio = (l_ratio + r_ratio) / 2
print(l_height)
print(l_width)
print(avg_ratio)
return avg_ratio
stream = cv2.VideoCapture(0) # webcam
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
closed = 0
threshold = 0.35
while(stream.isOpened()):
ret, frame = stream.read()
if ret:
img = cv2.flip(frame, 1)
dets = detector(img, 1) # detect driver's face
for face in dets:
cv2.rectangle(img, (face.left(), face.top()), (face.right(), face.bottom()), (0, 0, 255), 3)
print("Detection: Left: {} Top: {} Right: {} Bottom: {}".format(face.left(), face.top(), face.right(), face.bottom()))
landmarks = predictor(img, face).parts()
for p in landmarks:
cv2.circle(img, (p.x, p.y), 2, (0, 255, 0), -1)
if (eyeRatio(landmarks) < threshold):
closed += 1
if closed > 5:
cv2.putText(img, "WakeUp!!", (face.left(), face.bottom()+60), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,0,255), 2)
else:
closed = 0
cv2.imshow("Drowsy Driver Detection", img)
if cv2.waitKey(1) == ord('q'):
break
|
import pickle
from fastapi import FastAPI
from pydantic import BaseModel
class Person(BaseModel):
Sex: int
Age: float
Lifeboat: int
Pclass: int
app = FastAPI()
@app.post("/model")
## Coloque seu codigo na função abaixo
def titanic(person: Person):
with open("model/Titanic.pkl", "rb") as fid:
try:
titanic = pickle.load(fid)
y_pred = bool(
titanic.predict(
[[person.Sex, person.Age, person.Lifeboat, person.Pclass]]
)[0]
)
return {
"survived": y_pred,
"status": 200,
"message": "Survived" if y_pred else "Did not survive",
}
except Exception as e:
return {"survived": None, "status": 500, "message": e}
@app.get("/model")
def get():
return {"hello": "test"}
@app.get("/")
def root():
return {"message": "Hello, Titanic!"}
|
import maze
import dfs
import bfs
import a
import time
import json
max_dimensions = {
"dfs" : None,
"bfs" : None,
"a*" : None,
}
search_functions = {
"dfs" : dfs.dfs,
"bfs" : bfs.bfs,
"a*" : a.a
}
tries = 10
density = .3
size_increment = 100
def get_largest_dim(name):
current_size = [1000, 1000] # initial size to start with
largest_size = None
search_function = search_functions[name]
average_time = 0
# test different sized mazes until an average time above the limit is found
while average_time < 60:
largest_size = (current_size[0], current_size[1])
current_size[0] += size_increment
current_size[1] += size_increment
m = maze.Maze(current_size[1], current_size[0], .5)
i = 0
total_time = 0
while i < tries:
i += 1
m.generate_maze(density)
start_time = time.time()
search_function([], m, (1, 1), (current_size[0] - 2, current_size[1] - 2))
end_time = time.time()
total_time += end_time - start_time
average_time = total_time / tries
print(("\tAverage time for {name} in {width} by {height}: {average_time:.2f}").format(name = name, width = current_size[1], height = current_size[0], average_time = average_time))
print()
return largest_size
for name in search_functions:
max_dimensions[name] = get_largest_dim(name)
data = json.dumps(max_dimensions) # note, tuples will convert to lists
file = open("./test_results/Max Size of Each Search.json", "w+")
file.write(data)
file.close()
print("With density, .3, within a minute:")
for name in max_dimensions:
print(("\t{name} can search a {width} by {height} maze").format(name = name, width = max_dimensions[name][1], height = max_dimensions[name][0])) |
from tempfile import TemporaryFile
with TemporaryFile('w+t') as f:
f.write('Hello World\n')
f.write('Testing\n')
f.seek(0)
data = f.read()
print data
from tempfile import NamedTemporaryFile
with NamedTemporaryFile('w+t') as f:
print 'filename is:', f.name
with NamedTemporaryFile('w+t', delete=False) as f:
print 'filename is:', f.name |
# Generated by Django 2.2.13 on 2020-07-10 07:06
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0046_auto_20200710_1230'),
]
operations = [
migrations.RemoveField(
model_name='contactmessage',
name='name',
),
migrations.AddField(
model_name='contactmessage',
name='namec',
field=models.TextField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='about',
name='aboutus',
field=ckeditor_uploader.fields.RichTextUploadingField(),
),
migrations.AlterField(
model_name='contact',
name='contactus',
field=ckeditor_uploader.fields.RichTextUploadingField(),
),
]
|
from isolation import Board
from sample_players import GreedyPlayer
from sample_players import RandomPlayer
from game_agent import CustomPlayer
from sample_players import null_score
player1 = CustomPlayer(3, null_score, True, 'minimax')
player2 = GreedyPlayer()
game = Board(player1, player2)
game.apply_move((2, 3))
game.apply_move((0, 5))
winner, history, outcome = game.play()
print('student agent with 3 depths, null_score, iterative and minimax VS GreedyPlayer')
print("\nWinner: {}\nOutcome: {}".format(winner, outcome))
print(game.to_string())
print("Move history:\n{!s}".format(history)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.