text stringlengths 8 6.05M |
|---|
#!/usr/bin/env python
import liboscimp_fpga
import sys
liboscimp_fpga.fir_send_conf(sys.argv[1], sys.argv[2], 25)
|
import sys
import shelve
import pickle
import os
import math
from utils import helper, textprocessing
from collections import Counter
# Load data from files
db_file = os.path.join(os.getcwd(), 'db', 'index.db')
urls_file = os.path.join(os.getcwd(), 'db', 'urls.db')
lengths_file = os.path.join(os.getcwd(), 'db', 'lengths.db')
stopwords_file = os.path.join(os.getcwd(), 'vietnamese-stopwords-dash.txt')
with open(urls_file, mode='rb') as f:
urls = pickle.load(f)
with open(lengths_file, mode='rb') as f:
lengths = pickle.load(f)
with open(stopwords_file, mode='r', encoding='utf-8') as f:
stopwords_set = set(f.read().split())
# Get query
query = sys.argv[1]
# Load inverted index
index_db = shelve.open(db_file)
# Construct vocabulary from inverted index
vocabulary = set(index_db.keys())
num_docs = len(urls)
# Preprocess query
tokens = textprocessing.preprocess_text(query, stopwords_set)
tokens = [token for token in tokens if token in vocabulary]
# Caculate weights for query
query_bow = Counter(tokens)
query_weights = {}
for term, freq in query_bow.items():
df = index_db[term]['df']
query_weights[term] = helper.idf(df, num_docs) * helper.tf(freq)
# Normalize query weights
query_length = math.sqrt(sum((e ** 2 for e in query_weights.values())))
for term, value in query_weights.items():
query_weights[term] = value / query_length
# Caculate scores
scores = [[i, 0] for i in range(num_docs)]
for term, query_weight in query_weights.items():
df = index_db[term]['df']
postings_list = index_db[term]['postings_list']
for docId, freq in postings_list.items():
doc_weight = helper.idf(df, num_docs) * helper.tf(freq)
scores[docId][1] += query_weight * doc_weight / lengths[docId]
index_db.close()
# Sort scores and display results
scores.sort(key=lambda e: e[1], reverse=True)
for index, score in scores[:20]:
if score == 0:
break
print('{} - {}'.format(urls[index], score))
|
#!/usr/bin/python
import re, os
def getpwd(X):
s = "%s: (.*)" % X
p = re.compile(s)
authinfo = os.popen("gpg -q --batch -d ~/.pwd/emails.gpg").read()
return p.search(authinfo).group(1) |
from .database import *
def createDb():
"""Metodo de cracion de la base de datos"""
db.drop_all()
db.create_all()
def initDb():
"""Metodo de inicializacion de uestra base de datos"""
createDb()
admin = User(
name="faby",
lastname="star",
username="faby",
email="star._faby@hotmail.com",
isAdmin=True,
cellphone="0983856136",
)
admin.onSetPassord("faby123")
db.session.add(admin)
db.session.commit() |
import logging
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from djangosaml2.backends import Saml2Backend
from djangosaml2.signals import pre_user_save
try:
from django.contrib.auth.models import SiteProfileNotAvailable
except ImportError:
class SiteProfileNotAvailable(Exception):
pass
logger = logging.getLogger('djangosaml2')
class MultiEmailSaml2Backend(Saml2Backend):
def authenticate(self, request, session_info=None, attribute_mapping=None,
create_unknown_user=True, **kwargs):
if session_info is None or attribute_mapping is None:
logger.error('Session info or attribute mapping are None')
return None
if not 'ava' in session_info:
logger.error('"ava" key not found in session_info')
return None
attributes = session_info['ava']
if not attributes:
logger.error('The attributes dictionary is empty')
use_name_id_as_username = getattr(
settings, 'SAML_USE_NAME_ID_AS_USERNAME', False)
django_user_main_attribute = settings.SAML_DJANGO_USER_MAIN_ATTRIBUTE
django_user_main_attribute_lookup = settings.SAML_DJANGO_USER_MAIN_ATTRIBUTE_LOOKUP
# Selectively use the nameid field for IdPs in this list.
ids_use_name_id_as_username = getattr(
settings, 'SAML_IDPS_USE_NAME_ID_AS_USERNAME', [])
logger.debug('attributes: %s', attributes)
saml_user = None
if use_name_id_as_username or session_info['issuer'] in ids_use_name_id_as_username:
if 'name_id' in session_info:
logger.debug('name_id: %s', session_info['name_id'])
saml_user = session_info['name_id'].text
else:
logger.error('The nameid is not available. Cannot find user without a nameid.')
else:
saml_user = self.get_attribute_value(django_user_main_attribute, attributes, attribute_mapping)
if saml_user is None:
logger.error('Could not find saml_user value')
return None
if not self.is_authorized(attributes, attribute_mapping):
return None
main_attribute = self.clean_user_main_attribute(saml_user)
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
return self.get_saml2_user(
create_unknown_user, main_attribute, attributes, attribute_mapping)
def update_user(self, user, attributes, attribute_mapping,
force_save=False):
"""Update a user with a set of attributes and returns the updated user.
By default it uses a mapping defined in the settings constant
SAML_ATTRIBUTE_MAPPING. For each attribute, if the user object has
that field defined it will be set, otherwise it will try to set
it in the profile object.
"""
if not attribute_mapping:
return user
try:
profile = user.get_profile()
except ObjectDoesNotExist:
profile = None
except SiteProfileNotAvailable:
profile = None
# Django 1.5 custom model assumed
except AttributeError:
profile = user
user_modified = False
profile_modified = False
for saml_attr, django_attrs in attribute_mapping.items():
try:
for attr in django_attrs:
# do not overwrite the main attribute, e.g. email field,
# which is set on creation and does not need to be updated.
if attr == settings.SAML_DJANGO_USER_MAIN_ATTRIBUTE:
continue
if hasattr(user, attr):
user_attr = getattr(user, attr)
if callable(user_attr):
modified = user_attr(
attributes[saml_attr])
else:
modified = self._set_attribute(
user, attr, attributes[saml_attr][0])
user_modified = user_modified or modified
elif profile is not None and hasattr(profile, attr):
modified = self._set_attribute(
profile, attr, attributes[saml_attr][0])
profile_modified = profile_modified or modified
except KeyError:
# the saml attribute is missing
pass
logger.debug('Sending the pre_save signal')
signal_modified = any(
[response for receiver, response
in pre_user_save.send_robust(sender=user.__class__,
instance=user,
attributes=attributes,
user_modified=user_modified)]
)
if user_modified or signal_modified or force_save:
user.save()
if (profile is not None
and (profile_modified or signal_modified or force_save)):
profile.save()
return user
|
import hashlib
print(hashlib.sha512(raw_input()).hexdigest()) |
# -*- encoding: utf-8 -*-
from openerp.osv import fields, orm, osv
import openerp.addons.decimal_precision as dp
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
def _amount_line(self, cr, uid, ids, prop, unknow_none, unknow_dict):
res = {}
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids):
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, price, line.quantity, product=line.product_id, partner=line.invoice_id.partner_id)
res[line.id] = taxes['total']
if line.invoice_id:
cur = line.invoice_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, res[line.id])
return res
_columns = {
'price_subtotal': fields.function(_amount_line, string='Amount', type="float",
digits_compute=dp.get_precision('purchase'), store=True),
} |
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from .models import *
# Create your tests here.
class UserTests(APITestCase):
def test_create_user(self):
"""
Ensure we can create a new user object.
"""
url = reverse('user-list')
data = {
"firstName" : "John",
"lastName" : "Wick",
"partnerCode" : "01377224",
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(User.objects.count(), 1)
self.assertEqual(User.objects.get().firstName, 'John')
class BankTests(APITestCase):
def test_create_bank(self):
"""
Ensure we can create a new bank object.
"""
url = reverse('bank-list')
data = {
"name" : "123 bank",
"partnerCode" : "123-bank-code",
"loyaltyPrograms" : "loyalty"
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Bank.objects.count(), 1)
self.assertEqual(Bank.objects.get().partnerCode, '123-bank-code')
class LoyaltyProgramTests(APITestCase):
def test_create_loyaltyprogram(self):
"""
Ensure we can create a new loyalty program object.
And check that the corresponding bank objects are updated
"""
url = reverse('loyaltyprogram-list')
data = {
"loyaltyProgramName" : "Loyalty 1",
"loyaltyProgramId" : 1
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(LoyaltyProgram.objects.count(), 1)
# class TransactionTest(APITestCase):
# def test_get_transaction_by_user(self):
# bank = Bank.objects.create(name="test", partnerCode="123-bank-code")
# user = User.objects.create(firstName="test", lastName="test", bankAccountId="123123", pointBalance="123123")
# loyaltyprog = LoyaltyProgram.objects.create(bankCode=bank, loyaltyProgramName="test", loyaltyCurrencyName="test", description="sdfsd", enrollmentLink="sdfsdf", termsAndConditionLink="stet")
# membership = Membership.objects.create(userId=user, loyaltyProgramId=loyaltyprog, membershipNumber="1")
# transaction= Transaction.objects.create(referenceNumber="123", membershipNumber=membership, partnerCode=bank, transactionDate="20201212", transactionAmount="123", additionalInfo="123")
# url = reverse('get-transaction-by-user')
# response = self.client.get(url+"1")
# self.assertEqual(r)
|
print([5, 6])
|
#!/usr/bin/env python3
import boto3, json
from time import sleep
def create_athena_DB(database, regionName, DBbucket):
#Athena database and table definition
create_database = "CREATE DATABASE IF NOT EXISTS %s;" % (database)
client = boto3.client('athena', region_name = regionName)
config = {'OutputLocation': DBbucket}
res = client.start_query_execution(
QueryString = create_database,
ResultConfiguration = config)
return res
#Function for executing athena queries
def run_query(query, database, s3_output):
client = boto3.client('athena', region_name = 'ap-southeast-2') # this region is a must, don't work otherwise
response = client.start_query_execution(
QueryString=query,
QueryExecutionContext={
'Database': database
},
ResultConfiguration={
'OutputLocation': s3_output,
}
)
print('Execution ID: ' + response['QueryExecutionId'])
return response
# function to get the query results in a json format
# Be default, Athena stores the result as a csv file on s3 bucket
def getResults(q, database, s3_ouput, s3bucketOutputPrefix, DBbucket, regionName):
print("Executing query: %s" % (q))
res = run_query(q, database, s3_ouput)
client = boto3.client('athena', region_name = regionName)
# check the query status, keep looping unless there is a success
# break out in case there is a failure in query
queryStatus = "RUNNING"
while (queryStatus != "SUCCEEDED"):
try:
sleep(1)
queryStatus = client.get_query_execution(QueryExecutionId = res['QueryExecutionId'])['QueryExecution']['Status']['State']
if(queryStatus == "FAILED"):
break
except:
pass
# if query succedded, grab the result
try:
r = client.get_query_results(QueryExecutionId = res['QueryExecutionId'])
j = json.dumps(r['ResultSet']['Rows'])
except:
j = json.dumps([{"results" : "An error occurred. Please check your parameters, REGION and S3 resources on AWS."}])
return j
|
from keras.models import load_model
from helpers import resize_to_fit
from imutils import paths
import numpy as np
import imutils
import cv2
import pickle
import os
LETTER_MODEL_FILENAME = "captcha_model.hdf5"
LETTER_MODEL_LABELS_FILENAME = "model_labels.dat"
LENGHT_MODEL_FILENAME = "captcha_model_len.hdf5"
LENGHT_MODEL_LABELS_FILENAME = "model_labels_len.dat"
CAPTCHA_IMAGE_FOLDER = "dataSet"
# Load up the model labels (so we can translate model predictions to actual letters)
with open(LETTER_MODEL_LABELS_FILENAME, "rb") as f:
lb_letter = pickle.load(f)
with open(LENGHT_MODEL_LABELS_FILENAME, "rb") as f:
lb_lenght = pickle.load(f)
# Load the trained neural network
letter_model = load_model(LETTER_MODEL_FILENAME)
lenght_model = load_model(LENGHT_MODEL_FILENAME)
# Grab some random CAPTCHA images to test against.
# In the real world, you'd replace this section with code to grab a real
# CAPTCHA image from a live website.
captcha_image_files = list(paths.list_images(CAPTCHA_IMAGE_FOLDER))
#captcha_image_files = np.random.choice(captcha_image_files, size=(10,), replace=False)
def predictLenght(image):
newW = 400
(h, w) = image.shape[:2]
paddingWL = (newW - w) / 2
paddingWR = int(paddingWL)
if paddingWL != int(paddingWL):
paddingWL += 1
image = cv2.copyMakeBorder(image, top=0, bottom=0, left=int(paddingWL), right=paddingWR, borderType=cv2.BORDER_REPLICATE)
image = np.expand_dims(image, axis=2)
image = np.expand_dims(image, axis=0)
prediction = lenght_model.predict(image)
lenght = lb_lenght.inverse_transform(prediction)[0]
return lenght
def predict(image, letter_image_regions, output):
# loop over the letters
predictions = []
for letter_bounding_box in letter_image_regions:
# Grab the coordinates of the letter in the image
x, y, w, h = letter_bounding_box
# Extract the letter from the original image with a 2-pixel margin around the edge
letter_image = image[y - 2:y + h + 2, x - 2:x + w + 2]
# Re-size the letter image to 20x20 pixels to match training data
letter_image = resize_to_fit(letter_image, 20, 20)
# Turn the single image into a 4d list of images to make Keras happy
letter_image = np.expand_dims(letter_image, axis=2)
letter_image = np.expand_dims(letter_image, axis=0)
# Ask the neural network to make a prediction
prediction = letter_model.predict(letter_image)
# Convert the one-hot-encoded prediction back to a normal letter
letter = lb_letter.inverse_transform(prediction)[0]
predictions.append(letter)
cv2.rectangle(output, (x - 2, y - 2), (x + w + 4, y + h + 4), (0, 255, 0), 1)
cv2.putText(output, letter, (x - 5, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2)
return "".join(predictions)
def conturesToRegions(contours, split=True, ratio=1.3):
letter_image_regions = []
# Now we can loop through each of the four contours and extract the letter
# inside of each one
for contour in contours:
# Get the rectangle that contains the contour
(x, y, w, h) = cv2.boundingRect(contour)
if w < 9 and h < 9:
continue
# Compare the width and height of the contour to detect letters that
# are conjoined into one chunk
if split:
if w / h > ratio:
# This contour is too wide to be a single letter!
# Split it in half into two letter regions!
half_width = int(w / 2)
letter_image_regions.append((x, y, half_width, h))
letter_image_regions.append((x + half_width, y, half_width, h))
else:
# This is a normal letter by itself
letter_image_regions.append((x, y, w, h))
else:
letter_image_regions.append((x, y, w, h))
return letter_image_regions
thresholdVal = 0
def solveCaptchas(image_file):
global thresholdVal
# Load the image and convert it to grayscale
filename = os.path.basename(image_file)
image = cv2.imread(image_file)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
lenght = len(filename[:-4])#int(predictLenght(image))
# Add some extra padding around the image
image_b = cv2.copyMakeBorder(image, 20, 20, 20, 20, cv2.BORDER_REPLICATE)
regions = 0
thresholdVal = 5
while regions != lenght:
if thresholdVal > 127:
break
# threshold the image (convert it to pure black and white)
thresh = cv2.threshold(image_b, thresholdVal, 255, cv2.THRESH_BINARY_INV)[1]# | cv2.THRESH_OTSU)[1]
# find the contours (continuous blobs of pixels) the image
contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
dec = 0
for i, contour in enumerate(contours):
(x, y, w, h) = cv2.boundingRect(contour)
if w < 9 and h < 9:
dec += 1
thresholdVal += 2
regions = len(contours) - dec
letter_image_regions = conturesToRegions(contours, split=False)
letter_image_regions = sorted(letter_image_regions, key=lambda x: x[0])
output = cv2.merge([image_b] * 3)
captcha_text_split = predict(image_b, letter_image_regions, output)
#letter_image_regions = conturesToRegions(contours, split=False)
#letter_image_regions = sorted(letter_image_regions, key=lambda x: x[0])
#captcha_text_nosplit = predict(image_b, letter_image_regions, output)
return captcha_text_split
result = ""
'''
image_file = "dataSet/00G6BGV583.png"
filename = os.path.basename(image_file)
image = cv2.imread(image_file)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image_b = cv2.copyMakeBorder(image, 20, 20, 20, 20, cv2.BORDER_REPLICATE)
thresh = cv2.threshold(image_b, 10, 255, cv2.THRESH_BINARY_INV)[1]# | cv2.THRESH_OTSU)[1]
contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
letter_image_regions = conturesToRegions(contours, split=False)
print(len(letter_image_regions))
for box in letter_image_regions:
x, y, w, h = box
cv2.rectangle(thresh, (x, y), (x + w, y + h), (255, 255, 255), 1)
cv2.imwrite("test.png", thresh)
'''
if not os.path.exists("debug"):
os.makedirs("debug")
n = 0
m = 0
for image_file in captcha_image_files:
# Load the image and convert it to grayscale
answer = solveCaptchas(image_file)
filename = os.path.basename(image_file)
if answer != filename[:-4]:
print("CAPTCHA text is: {}".format(filename))
#print("CAPTCHA predicted lenght is: {}".format(lenght))
#print("CAPTCHA prediction split is: {}".format(captcha_text_split))
#print("CAPTCHA prediction no split is: {}".format(captcha_text_nosplit))
print("CAPTCHA final prediction is: {}".format(answer))
filename = os.path.basename(image_file)
image = cv2.imread(image_file)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image_b = cv2.copyMakeBorder(image, 20, 20, 20, 20, cv2.BORDER_REPLICATE)
thresh = cv2.threshold(image_b, thresholdVal, 255, cv2.THRESH_BINARY_INV)[1]# | cv2.THRESH_OTSU)[1]
contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
letter_image_regions = conturesToRegions(contours, split=False)
for box in letter_image_regions:
x, y, w, h = box
cv2.rectangle(image_b, (x - 2, y - 2), (x + w + 2, y + h + 2), (0, 255, 0), 1)
cv2.imwrite("debug/" + filename, image_b)
#os._exit(1)
m += 1
n += 1
if n % 100 == 0:
print(n)
print(m)
print(n)
print(1 - (m/n))
#p = "test/" + filename
#if not os.path.exists("test"):
#os.makedirs("test")
#cv2.imwrite(p, output)
|
# def array_length(arr):
# return len(test)
# def insert_number(arr, index, number):
# arr.insert(index, number)
# return arr
def insertShiftArray(arr, num):
array_length = len(arr)
if array_length % 2:
middle_array = round(array_length // 2) + 1
else:
middle_array = array_length // 2
arr.insert(middle_array, num)
return arr
# print(insert_number(test, index, insert_value))
# input('> ')
# Determine first if it's odd or even
# Insert the number to the left
|
from cookiecutter.main import cookiecutter as bake
from cutout.constants import STENCIL_PATH_PREFIX
class TestBuild:
def test_build__with_defaults(self, tmp_path):
bake("examples", output_dir=tmp_path, no_input=True)
build_path = tmp_path / "cut-out-test"
assert build_path.exists()
assert not (build_path / "foo.py").exists()
assert not (build_path / "bar.py").exists()
assert (build_path / "boring.py").exists()
assert "I am source code in boring.py" in (build_path / "boring.py").read_text()
assert (
"This should only be included if the 'qux' pattern is applied" not in (build_path / "boring.py").read_text()
)
assert not (build_path / "baz").exists()
assert not (build_path / "garply").exists()
assert len(list(build_path.glob(f"**/{STENCIL_PATH_PREFIX}*"))) == 0
assert "foo" not in (build_path / "README.md").read_text()
assert "bar" not in (build_path / "README.md").read_text()
assert "baz" not in (build_path / "README.md").read_text()
assert "qux" not in (build_path / "README.md").read_text()
def test_build__including_foo(self, tmp_path):
bake("examples", output_dir=tmp_path, no_input=True, extra_context=dict(include_foo=True))
build_path = tmp_path / "cut-out-test"
assert build_path.exists()
assert (build_path / "foo.py").exists()
assert not (build_path / "bar.py").exists()
assert (build_path / "boring.py").exists()
assert "I am source code in boring.py" in (build_path / "boring.py").read_text()
assert (
"This should only be included if the 'qux' pattern is applied" not in (build_path / "boring.py").read_text()
)
assert not (build_path / "baz").exists()
assert not (build_path / "garply").exists()
assert len(list(build_path.glob(f"**/{STENCIL_PATH_PREFIX}*"))) == 0
assert "foo" in (build_path / "README.md").read_text()
assert "bar" not in (build_path / "README.md").read_text()
assert "baz" not in (build_path / "README.md").read_text()
assert "qux" not in (build_path / "README.md").read_text()
def test_build__including_bar(self, tmp_path):
bake("examples", output_dir=tmp_path, no_input=True, extra_context=dict(include_bar=True))
build_path = tmp_path / "cut-out-test"
assert build_path.exists()
assert not (build_path / "foo.py").exists()
assert (build_path / "bar.py").exists()
assert (build_path / "boring.py").exists()
assert "I am source code in boring.py" in (build_path / "boring.py").read_text()
assert (
"This should only be included if the 'qux' pattern is applied" not in (build_path / "boring.py").read_text()
)
assert not (build_path / "baz").exists()
assert not (build_path / "garply").exists()
assert len(list(build_path.glob(f"**/{STENCIL_PATH_PREFIX}*"))) == 0
assert "foo" not in (build_path / "README.md").read_text()
assert "bar" in (build_path / "README.md").read_text()
assert "baz" not in (build_path / "README.md").read_text()
assert "qux" not in (build_path / "README.md").read_text()
def test_build__including_baz(self, tmp_path):
bake("examples", output_dir=tmp_path, no_input=True, extra_context=dict(include_baz=True))
build_path = tmp_path / "cut-out-test"
assert build_path.exists()
assert not (build_path / "foo.py").exists()
assert not (build_path / "bar.py").exists()
assert (build_path / "boring.py").exists()
assert "I am source code in boring.py" in (build_path / "boring.py").read_text()
assert (
"This should only be included if the 'qux' pattern is applied" not in (build_path / "boring.py").read_text()
)
assert (build_path / "baz").exists()
assert not (build_path / "garply").exists()
assert len(list(build_path.glob(f"**/{STENCIL_PATH_PREFIX}*"))) == 0
assert "foo" not in (build_path / "README.md").read_text()
assert "bar" not in (build_path / "README.md").read_text()
assert "baz" in (build_path / "README.md").read_text()
assert "qux" not in (build_path / "README.md").read_text()
def test_build__including_qux(self, tmp_path):
bake("examples", output_dir=tmp_path, no_input=True, extra_context=dict(include_qux=True))
build_path = tmp_path / "cut-out-test"
assert build_path.exists()
assert not (build_path / "foo.py").exists()
assert not (build_path / "bar.py").exists()
assert (build_path / "boring.py").exists()
assert "I am source code in boring.py" in (build_path / "boring.py").read_text()
assert "This should only be included if the 'qux' pattern is applied" in (build_path / "boring.py").read_text()
assert not (build_path / "baz").exists()
assert not (build_path / "garply").exists()
assert len(list(build_path.glob(f"**/{STENCIL_PATH_PREFIX}*"))) == 0
assert "foo" not in (build_path / "README.md").read_text()
assert "bar" not in (build_path / "README.md").read_text()
assert "baz" not in (build_path / "README.md").read_text()
assert "qux" in (build_path / "README.md").read_text()
|
import matplotlib.pyplot as plt
import numpy as np
IB=np.arange(0.001,40,0.005)
IBAR=[0.001,0.002,0.003,0.004,0.005,0.006,0.008,0.009,0.01,0.02,0.03,0.04,0.05,0.08,1,1.1,1.5,1.8,2,3,4,5,6,7,8,9,10,11,13,15,18,20,22,24,26,28,30,32,34,35,40]
curvature=[0.0,0.005,0.01,0.015,0.02,0.025,0.03,0.035,0.04,0.045,0.05,0.055,0.06,0.065,0.07,0.075,0.08,0.085,0.09,0.095,0.1]
curv_IR=[0,1,2,3,4,5,6,7,8,9]
clust=[]
clust_dose=[1.,2.,3.,4.,5.,6.,7.,8.,9.]
curv_IBAR=[]
curv_clust=[]
curv=[]
Rad=[]
respto_curv=[]
respto_clust=[]
respto_clust.append(0.0)
curv.append(1.0/np.sqrt(85/(2*(0.05))))
Rad.append(np.sqrt(85/(2*(0.05))))
for i in IB:
print i
clust.append(((0.027*3*i**2)/(1+0.027*3*i**2))*10)
## curv_IR.append(((0.027*3*i**2)/(1+0.027*3*i**2))*np.exp(0.055*c-0.5*c*c))
for j in range(1,len(clust)):
curv.append(1.0/np.sqrt(85/(2*(0.05-0.08*np.log(10.0*clust[j]/100.0)))))
Rad.append(np.sqrt(85/(2*(0.05-0.08*np.log(clust[j]/100.0)))))
for k in np.sort(curv):
curv_IBAR.append(np.exp(40*54*(0.055*0.055-0.5*0.055**2))-np.exp(40*54*(0.055*k-0.5*k**2)))
curv_clust.append(np.exp(40*54*(0.055*k-0.5*k**2)))
for l in curvature:
respto_curv.append(np.exp(40*54*(0.055*l-0.5*l**2))-1.0)
##respto_curv.append(np.exp(40*54*(0.055*l-0.5*l**2)))
print l, np.exp(40*54*(0.055*l-0.5*l**2))
dos=np.sort(respto_curv)
for m in range(1,len(dos)):
respto_clust.append(1.0/np.sqrt(((40+0.01*np.exp(max(dos)-dos[m]))*4)/(2*(0.05-0.08*np.log((dos[m])*0.0068)))))
print 'rigidity, max of dos, dos',(15+np.exp(max(dos)-dos[m])), max(dos), dos[m]
plt.xlabel('number of curv_IRSp53')
plt.ylabel('Curvature')
plt.plot(dos,respto_clust,'*-',label='curvature in response to curv_IRSp53')
plt.plot(respto_curv,curvature,label='curv_IRSp53 recruited in response to curvature')
plt.legend()
plt.show()
##n=max(curv)
##m=max(clust)
##for k in range(len(clust)):
## clust[k]=clust[k]/m
## curv[k]=curv[k]/n
|
#!/usr/local/bin/python3
# Change the above shebang to your python3 binary path
import os, sys, subprocess
import getpass, hashlib, binascii
import atexit
import argparse
# Check for Python3
if sys.version_info[0] < 3:
print("ShellLocker requires Python 3 or higher.")
exit()
# Silence Python internal error messages
class DevNull:
def write(self, msg):
pass
sys.stderr = DevNull()
# Declare Terminal ANSI colors
class ansi_colors:
RED = "\033[31m"
GREEN = "\033[32m"
YELLOW = "\033[33m"
BLUE = "\033[34m"
MAGENTA = "\033[35m"
CYAN = "\033[36m"
RESET = "\033[0m"
# Set variable for current location of script
SCRIPT_PATH = os.path.realpath(__file__)
SCRIPT_PARENT_DIR = os.path.dirname(SCRIPT_PATH)
FLAGS = []
def invalidate():
COMMAND = ""
if os.access(SCRIPT_PATH, os.X_OK):
COMMAND = SCRIPT_PATH
else:
COMMAND = "python3 " + SCRIPT_PATH
# Add flags onto the command:
for flag in FLAGS:
COMMAND += " " + flag
# Execute:
os.system(COMMAND)
def verify(TESTMODE):
if not os.path.exists(SCRIPT_PARENT_DIR + "/shelllocker.conf"):
print(ansi_colors.YELLOW + "shelllocker.conf not found" + ansi_colors.RESET)
print(ansi_colors.YELLOW + "Run with '-s' flag to generate the file" + ansi_colors.RESET)
atexit.unregister(invalidate)
exit()
f = open(os.path.dirname(SCRIPT_PATH) + "/shelllocker.conf" , 'r')
DATA = f.read().replace(" " , "")
f.close()
# Check header:
if DATA[0:34] != "7368656c6c6c6f636b6572686561646572":
print(ansi_colors.RED + "Invalid configuration file!" + ansi_colors.RESET)
print(ansi_colors.RED + "Fatal internal error occured" + ansi_colors.RESET)
exit()
DATA = DATA[34:]
# First byte = length of username
USER_LEN = int(DATA[0:2])
DATA = DATA[2:]
USERNAME = "".join(DATA[i:i+1] for i in range(0, USER_LEN * 2))
PASSWORD = DATA[USER_LEN * 2:]
user = binascii.hexlify(getpass.getuser().encode('utf-8')).decode()
if user != USERNAME:
print(ansi_colors.RED + "User not recognized!" + ansi_colors.RESET)
invalidate()
response = str(getpass.getpass("shell_auth: "))
m = hashlib.md5()
m.update(response.encode('utf-8'))
if m.hexdigest() == PASSWORD:
atexit.unregister(invalidate)
exit()
else:
print("Incorrect.")
blankshell(TESTMODE)
def blankshell(TESTMODE):
print("Use the command <help> to list all the commands.")
while True:
user_query = input(ansi_colors.GREEN + "[limbo_shell]" + ansi_colors.RESET + "$ ")
if user_query == 'start_t':
verify(TESTMODE)
elif user_query == 'clear':
print("\033\143", end="", flush=True)
elif user_query == 'exit' and TESTMODE:
atexit.unregister(invalidate)
exit()
elif user_query == 'reset':
reset()
elif user_query == 'help':
print("start_t | Starts the auth engine for terminal session")
print("clear | Clears the screen")
if TESTMODE:
print("exit | Exits ShellLocker (Only available in test mode)")
print("reset | Reset ShellLocker configurations")
print("help | Displays this message")
def setup():
configured = os.path.exists(SCRIPT_PARENT_DIR + "/shelllocker.conf")
if configured:
print(ansi_colors.YELLOW + "ShellLocker is already configured." + ansi_colors.RESET)
print(ansi_colors.YELLOW + "Use the '--reset' flag to remove configurations" + ansi_colors.RESET)
exit()
else:
new_pass = str(getpass.getpass("Set your shell password: ")).encode('utf-8')
new_pass_confirm = str(getpass.getpass("Confirm your password: ")).encode('utf-8')
if new_pass != new_pass_confirm:
print("Passwords did not match.")
exit()
else:
m = hashlib.md5()
m.update(new_pass)
HASHED_PASS = m.hexdigest()
USER = binascii.hexlify(getpass.getuser().encode('utf-8')).decode()
DATA = "73 68 65 6c 6c 6c 6f 63 6b 65 72 68 65 61 64 65 72 "
if len(USER) % 2 == 1:
USER = "0" + USER
USER_LEN = str(int(len(USER) / 2))
if int(USER_LEN) >= 100:
print(ansi_colors.RED + "Fatal internal error occured" + ansi_colors.RESET)
exit()
if len(USER_LEN) % 2 == 1:
USER_LEN = "0" + USER_LEN
if len(HASHED_PASS) % 2 == 1:
HASHED_PASS = "0" + HASHED_PASS
DATA += ' '.join(USER_LEN[i:i+2] for i in range(0,len(USER_LEN),2)) + " "
DATA += ' '.join(USER[i:i+2] for i in range(0,len(USER),2)) + " "
DATA += ' '.join(HASHED_PASS[i:i+2] for i in range(0,len(HASHED_PASS),2)) + " "
f = open(SCRIPT_PARENT_DIR + "/shelllocker.conf" , 'w')
f.write(DATA)
f.close()
def reset():
if subprocess.call(["sudo" , "-k" , "-v" , "-p" , "[Verify] Root password: "]) != 0:
print(ansi_colors.RED + "Fatal internal error occured" + ansi_colors.RESET)
print(ansi_colors.RED + "Incorrect root password!" + ansi_colors.RESET)
exit()
else:
os.remove(os.path.dirname(SCRIPT_PATH) + "/shelllocker.conf")
setup()
def diagnose():
if SCRIPT_PATH != os.path.expanduser("~") + "/.shelllocker/main.py":
print(ansi_colors.YELLOW + "ShellLocker program files are in " + SCRIPT_PATH + ansi_colors.RESET)
print(ansi_colors.YELLOW + "ShellLocker program files should be in ~/.shelllocker" + ansi_colors.RESET)
responded = False
while not responded:
conf = str(input("Fix? (Y/N): "))
if conf in ['N' , 'n']:
responded = True
elif conf in ['Y' , 'y']:
responded = True
# Fix the issue here
if not os.access(SCRIPT_PATH, os.X_OK):
print(ansi_colors.RED + "Script is not an executable!" + ansi_colors.RESET)
responded = False
while not responded:
conf = str(input("Fix? (Y/N): "))
if conf in ['N' , 'n']:
responded = True
elif conf in ['Y' , 'y']:
responded = True
os.system("chmod +x " + SCRIPT_PATH)
if not os.path.exists(SCRIPT_PARENT_DIR + "/shelllocker.conf"):
print(ansi_colors.YELLOW + "shelllocker.conf not found" + ansi_colors.RESET)
print(ansi_colors.YELLOW + "Run with '-s' flag to generate the file" + ansi_colors.RESET)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--setup",
help="Run the setup script",
action="store_true"
)
parser.add_argument(
"-d",
"--diagnose",
help="Check to see if dependencies for this program are met",
action="store_true"
)
parser.add_argument(
"--reset",
help="Resets ShellLocker configurations",
action="store_true"
)
parser.add_argument(
"-t",
"--test",
help="Puts ShellLocker into a test environment.",
action="store_true"
)
args = parser.parse_args()
if args.setup:
FLAGS.append("--setup")
setup()
elif args.diagnose:
FLAGS.append("--diagnose")
diagnose()
elif args.reset:
FLAGS.append("--reset")
reset()
elif args.test:
FLAGS.append("--test")
atexit.register(invalidate)
blankshell(True)
else:
atexit.register(invalidate)
blankshell(False)
if __name__ == "__main__":
main()
|
import aiohttp
import asyncio
from base import base as base
import time
import utils.make_exercise as make_exercise
import re
# 获取测试用户token,供并发测试使用
def get_users_token():
with open('../data/users.txt', 'r') as fp:
user_names = eval(fp.read())
print(user_names)
tokens = []
for user_name in user_names:
print(user_name)
token = base.get_token(username=user_name)
tokens.append(token)
print(tokens)
return tokens
# 将token写入headers
def get_users_headers():
tokens = get_users_token()
headers = []
for token in tokens:
header = {"token": token}
headers.append(header)
return headers
# 获取请求数据
def get_request_datas(exercise_id=None, question_id=None, start_time=None, end_time=None):
request_datas = {
"getExercise": {"method": "GET", "url": "https://test2.data4truth.com/student/practice/getExercise",
"data": None},
"getQuestion": {"method": "GET", "url": "https://test2.data4truth.com/student/practice/getQuestion",
"data": {"exerciseID": exercise_id}},
"submitExercise": {"method": "POST", "url": "https://test2:data4truth.com/student/practice/submitExercise",
"data": {
"answerList": [{"questionID": question_id, "answer": "test",
"exerciseStart": start_time, "exerciseEnd": end_time}],
"exerciseID": exercise_id}},
"logout": {"method": "POST", "url": "https://test2.data4truth.com/student/login/logout",
"data": None}
}
return request_datas
def get_exercise_id():
pass
def get_question_id():
pass
def do_exercise(exercise_id):
pass
async def on_request_start(session, trace_config_ctx, params):
trace_config_ctx.start = session.loop.time()
# print("start_time:", trace_config_ctx.start)
# star_time = time.time()
# print("Starting request:", star_time)
async def on_request_end(session, trace_config_ctx, params):
elapsed = session.loop.time() - trace_config_ctx.start
# print("Request took {}".format(elapsed), params)
local_date = base.local_date()
with open("../data/request_data-%s.txt" % local_date, 'a+') as fp:
fp.write(str(params) + ', elapsed:' + str(elapsed)+'\n')
# fp.write(str(elapsed))
# end_time = time.time()
# print("Ending request:", end_time)
async def fetch(method, url, data=None, headers=None):
trace_config = aiohttp.TraceConfig()
trace_config.on_request_start.append(on_request_start)
trace_config.on_request_end.append(on_request_end)
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False), trace_configs=[trace_config]) as session:
if method == 'GET':
async with session.get(url, params=data, headers=headers) as resp:
# json = await resp.json()
text = await resp.read()
print("text:", text)
# print(json)
else:
start_time = time.time()
async with session.post(url, json=data, headers=headers) as resp:
# json = await resp.json()
text = await resp.read()
print("text:", text)
return text
# print(json)
# 登陆
async def login(usernames):
# res_texts = []
tasks = []
pattern = r'"token":"(.*?)"'
token_compile = re.compile(pattern)
url = 'https://test2.data4truth.com/student/login/login'
print("url:", url)
method = 'POST'
for username in usernames:
data = {"phoneNumber": username, "password": "123456"}
tasks.append(asyncio.create_task(fetch(method, url, data=data)))
return await asyncio.wait(tasks)
# try:
# res_text = await fetch(method='POST', url=url, data=data)
# token = base.re_findall(token_compile, res_text)
# print(username, ':', token)
# if token == []:
# raise Exception("Get token failed!")
# else:
# print(token[0])
# return token[0]
# except Exception as e:
#
# raise e
# 获取练习题列表
async def get_exercise(headers):
request_data = get_request_datas()["getExercise"]
method = request_data["method"]
url = request_data["url"]
print("url:", url)
data = request_data["data"]
user_num = len(headers)
tasks = []
for asyn_num in range(1, user_num+1):
# print("url_%s" % asyn_num, url)
tasks.append(asyncio.create_task(fetch(method, url, data=data, headers=headers[asyn_num - 1])))
await asyncio.wait(tasks)
# 获取练习题题目
async def get_question(headers):
request_data = get_request_datas()["getQuestion"]
method = request_data["method"]
url = request_data["url"]
print("url:", url)
user_num = len(headers)
tasks = []
for i in range(1, user_num+1):
exercise_id = "155%08d-1" % i
request_data = get_request_datas(exercise_id=exercise_id)["getQuestion"]
data = request_data["data"]
# print("url_%s" % i, url)
tasks.append(asyncio.create_task(fetch(method, url, data=data, headers=headers[i - 1])))
await asyncio.wait(tasks)
# 做练习题,提交一道题目做题结果
async def submit_exercise(headers, question_id):
start_time = base.local_time()
time.sleep(1)
end_time = base.local_time()
request_data = get_request_datas(question_id=question_id)["getExercise"]
method = request_data["method"]
url = request_data["url"]
print("url:", url)
user_num = len(headers)
tasks = []
for i in range(1, user_num + 1):
exercise_id = "155%08d-1" % i
request_data = get_request_datas(exercise_id=exercise_id, question_id=question_id,
start_time=start_time, end_time=end_time)["getExercise"]
data = request_data["data"]
# print("url_%s" % i, url)
tasks.append(asyncio.create_task(fetch(method, url, data=data, headers=headers[i - 1])))
await asyncio.wait(tasks)
# 获取练习题做题结果
async def log_out(headers):
request_data = get_request_datas()["logout"]
method = request_data["method"]
url = request_data["url"]
print("url:", url)
user_num = len(headers)
tasks = []
for i in range(1, user_num + 1):
tasks.append(asyncio.create_task(fetch(method, url, headers=headers[i - 1])))
await asyncio.wait(tasks)
# 运行测试脚本
def run_test(user_num):
print("Performance testing ......")
users_headers = get_users_headers()
# run_submit_exercise(users_headers)
asyncio.run(get_exercise(users_headers))
# asyncio.run(log_out(users_headers))
# loop = asyncio.get_event_loop()
# loop.run_until_complete(get_exercise(users_headers))
# loop.run_until_complete(get_question(users_headers))
# question_id_list = ['0101010003001004', '0101010003001014', '0101010003001020',
# '0101010003001021', '0101010003001025', '0101010003001059',
# '0101010003001050', '0101010003001046',
# '0101010005001009', '0101010005001011']
# for question_id in question_id_list:
# loop.run_until_complete(submit_exercise(users_headers, question_id))
# get_question(users_headers)
def count_elapsed(file_name):
print("Counting for average elapse ......")
elapsed_pattern = "elapsed:(.*)"
# 统计整体平均请求时间
with open(file_name) as fp:
text = fp.read()
elapsed_results = base.re_findall(elapsed_pattern, text)
elapsed_sum = 0
for elapsed in elapsed_results:
elapsed_sum += float(elapsed)
avg_elapse = elapsed_sum/len(elapsed_results)
# print(elapsed_sum)
print("The average elapse is: ", avg_elapse)
# 按url统计平均请求时间
pass
local_time = base.local_time()
local_date = base.local_date()
with open('data/elapsed-%s.txt' % local_date, 'a+') as fp:
fp.write(local_time+' '+str(avg_elapse)+'\n')
if __name__ == '__main__':
# usernames = ('15500000001', '15500000002')
# res_texts = asyncio.run(login(usernames))
# print(res_texts)
run_test(2)
|
f = open("extract.txt","r")
lines = f.read().split("<div class=\"icon\" id=\"")
f.close()
ids = [i.split("\">")[0].strip() for i in lines][1:]
idsstr = '\n'.join(ids)
print idsstr
# #id1:hover,#id2:hover
hoverlist = ["#"+id+":hover" for id in ids]
hover = ', '.join(hoverlist)
print hover
# #id{
# background: url('../img/id.png') bottom;
# background-size: cover;
# }
urllist = ["#"+id+" {background: url('../img/"+id+".png') bottom; background-size: cover;}" for id in ids]
url = '\n'.join(urllist)
print url
fw = open("csscode.txt", "w")
fw.write(idsstr)
fw.write('\n\n')
fw.write(hover)
fw.write('\n\n')
fw.write(url)
fw.close() |
"""Run parallel shallow water domain.
run using command like:
mpirun -np m python run_parallel_sw_merimbula.py
where m is the number of processors to be used.
Will produce sww files with names domain_Pn_m.sww where m is number of processors and
n in [0, m-1] refers to specific processor that owned this part of the partitioned mesh.
"""
#------------------------------------------------------------------------------
# Import necessary modules
#------------------------------------------------------------------------------
import os
import sys
import time
import numpy as num
#------------------------
# ANUGA Modules
#------------------------
from anuga import Domain
from anuga import Reflective_boundary
from anuga import Dirichlet_boundary
from anuga import Time_boundary
from anuga import Transmissive_boundary
from anuga import rectangular_cross
from anuga import create_domain_from_file
from anuga_parallel.sequential_distribute import sequential_distribute_dump
#--------------------------------------------------------------------------
# Setup parameters
#--------------------------------------------------------------------------
#mesh_filename = "merimbula_10785_1.tsh" ; x0 = 756000.0 ; x1 = 756500.0
mesh_filename = "merimbula_17156.tsh" ; x0 = 756000.0 ; x1 = 756500.0
#mesh_filename = "merimbula_43200_1.tsh" ; x0 = 756000.0 ; x1 = 756500.0
#mesh_filename = "test-100.tsh" ; x0 = 0.25 ; x1 = 0.5
#mesh_filename = "test-20.tsh" ; x0 = 250.0 ; x1 = 350.0
yieldstep = 50
finaltime = 1500
verbose = True
#--------------------------------------------------------------------------
# Setup procedures
#--------------------------------------------------------------------------
class Set_Stage:
"""Set an initial condition with constant water height, for x0<x<x1
"""
def __init__(self, x0=0.25, x1=0.5, h=1.0):
self.x0 = x0
self.x1 = x1
self.h = h
def __call__(self, x, y):
return self.h*((x>self.x0)&(x<self.x1))+1.0
class Set_Elevation:
"""Set an elevation
"""
def __init__(self, h=1.0):
self.x0 = x0
self.x1 = x1
self.h = h
def __call__(self, x, y):
return x/self.h
#--------------------------------------------------------------------------
# Setup Sequential Domain
#--------------------------------------------------------------------------
domain = create_domain_from_file(mesh_filename)
domain.set_quantity('stage', Set_Stage(x0, x1, 2.0))
#domain.set_datadir('.')
domain.set_name('merimbula_new')
domain.set_store(True)
#--------------------------------------------------------------------------
# Distribute sequential domain on processor 0 to other processors
#--------------------------------------------------------------------------
if verbose: print 'DISTRIBUTING DOMAIN'
#sequential_distribute_dump(domain, 4, verbose=True)
sequential_distribute_dump(domain, 20, verbose=True)
|
class Test:
def __init__(self):
self.__color = 'red'
@property
def color(self):
return self.__color
@color.setter
def color(self, clr):
self.__color = clr
if __name__ == '__main__':
t = Test()
print(t.color)
t.color = 'blue'
print(t.color)
|
#PSU ARL 5 Capstone
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
from sklearn import linear_model
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier, RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
#reading in csv from dataset file
df = pd.read_csv(r"~/Desktop/capstone/dataset_2020.csv")
df.set_index('filename', inplace=True)
#Adding material type for model per requested
df.loc[(df['Process'] == '3D-Printing', 'Material_Type')] = 0 #ABS
df.loc[(df['Process'] == 'Machining', 'Material_Type')] = 1 #Aluminum
df.loc[(df['Process'] == 'Welding', 'Material_Type')] = 1
df.loc[(df['Process'] == 'Casting', 'Material_Type')] = 1
#changing from float to int
#temp_data['Process_Type'] = temp_data['Process_Type'].apply(np.int64)
df['Material_Type'] = df['Material_Type'].apply(np.int64)
#xgboost classifier
X = df[[
'length',
'width',
'height',
'linear_properties',
'surface_properties',
'volume_properties',
'Geom_Vertex_Unknown'
]]
Y = df.Process
#sizing
print(X.shape)
print(Y.shape)
X_train, X_test, y_train, y_test = train_test_split(X,Y, test_size = 0.33, random_state = 42)
X_test.set_index('filename', inplace=True)
#need to install xgboost privately
#conda install -c anaconda py-xgboost -- on python interactive
#like gradient boosting but optimizied
from xgboost import XGBClassifier
xgb = XGBClassifier(booster='gbtree', objective='multi:softprob', random_state=42, eval_metric="auc", num_class=4)
xgb.fit(X_train,y_train)
from sklearn.metrics import roc_auc_score
from sklearn import preprocessing
# Use trained model to predict output of test dataset
val = xgb.predict(X_test)
lb = preprocessing.LabelBinarizer()
lb.fit(y_test)
y_test_lb = lb.transform(y_test)
val_lb = lb.transform(val)
roc_auc_score(y_test_lb, val_lb, average='macro')
output = pd.DataFrame()
output['Predicted Output'] = val
output['filename'] = X_test.index
output.reset_index(level=0, inplace=True)
df.reset_index(level=0, inplace=True)
output = output.merge(df, how='left', on='filename')
output.head()
##writing model outputs to csv
output.to_csv(r"~/Desktop/capstone/pred_df.csv")
### CROSS VALIDATION
from numpy import loadtxt
import xgboost
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
model = xgboost.XGBClassifier()
kfold = KFold(n_splits=10, random_state=42)
results = cross_val_score(model, X, Y, cv=kfold)
print("Accuracy: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
#cross validation is better with 10 splits than 5
output.loc[(output['Predicted Output'] == '3D-Printing', 'Process_Type')] = 0
output.loc[(output['Predicted Output'] == 'Welding', 'Process_Type')] = 1
output.loc[(output['Predicted Output'] == 'Casting', 'Process_Type')] = 2
output.loc[(output['Predicted Output'] == 'Machining', 'Process_Type')] = 3
output['Process_Type'] = output['Process_Type'].apply(np.int64)
####OUTPUT ALL MANUFACTURING PLANS
for i in output['Process_Type']:
if i == 0:
three_d_plans.at[3,'Time (min)'] = list(filter(lambda num: num != 0.0, output['time_3D']))
three_d_plans.at[7,'Time (min)'] = list(filter(lambda num: num != 0.0, output['total_3D_time']))
three_d_plans.at[8,'Operation Description'] = list(filter(lambda num: num != 0.0, output['cost_3D']))
three_d_plans.at[9,'Operation Description'] = list(filter(lambda num: num != 0.0, output['cost_3D']))
elif i == 1:
welding_plans.at[5,'Time (min)'] = list(filter(lambda num: num != 0.0, output['time_welding']))
welding_plans.at[8,'Time (min)'] = list(filter(lambda num: num != 0.0, output['total_welding_time']))
welding_plans.at[9,'Operation Description'] = list(filter(lambda num: num != 0.0, output['welding_cost']))
elif i == 2:
casting_plans.at[0,'Time (min)'] = list(filter(lambda num: num != 0.0, output['time_casting1']))
casting_plans.at[19,'Time (min)'] = list(filter(lambda num: num != 0.0, output['time_casting20']))
casting_plans.at[22,'Time (min)'] = list(filter(lambda num: num != 0.0, output['time_casting23']))
casting_plans.at[23,'Time (min)'] = list(filter(lambda num: num != 0.0, output['time_casting24']))
casting_plans.at[24,'Time (min)'] = list(filter(lambda num: num != 0.0, output['total_casting_time']))
casting_plans.at[25,'Operation Description'] = list(filter(lambda num: num != 0.0, output['casting_cost']))
elif i == 3:
machining_plans.at[4,'Time (min)'] = list(filter(lambda num: num != 0.0, output['time_machining']))
machining_plans.at[6,'Time (min)'] = list(filter(lambda num: num != 0.0, output['total_machining_time']))
machining_plans.at[7,'Operation Description'] = list(filter(lambda num: num != 0.0, output['machining_cost']))
|
def aumentar(n, aum):
return n + aum
def diminuir(n, dim):
return n - dim
def dobro(n):
return n * 2
def metade(n):
return n / 2
def moeda(n):
print(f'R${n}')
|
# -*- coding:utf-8 -*-
"""
@file: feature_process.py
@time: 2019/06/05
"""
import pandas as pd
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from sklearn.preprocessing import LabelBinarizer
from scipy.sparse import hstack
META_INFO = pd.DataFrame([{"col": "id", "unique_count": 40428967},
{"col": "click", "unique_count": 2},
# {"col": "hour", "unique_count": 240},
{"col": "C1", "unique_count": 7},
{"col": "banner_pos", "unique_count": 7},
{"col": "site_id", "unique_count": 4737},
{"col": "site_domain", "unique_count": 7745},
{"col": "site_category", "unique_count": 26},
{"col": "app_id", "unique_count": 8552},
{"col": "app_domain", "unique_count": 559},
{"col": "app_category", "unique_count": 36},
{"col": "device_id", "unique_count": 2686408},
{"col": "device_ip", "unique_count": 6729486},
{"col": "device_model", "unique_count": 8251},
{"col": "device_type", "unique_count": 5},
{"col": "device_conn_type", "unique_count": 4},
{"col": "C14", "unique_count": 2626},
{"col": "C15", "unique_count": 8},
{"col": "C16", "unique_count": 9},
{"col": "C17", "unique_count": 435},
{"col": "C18", "unique_count": 4},
{"col": "C19", "unique_count": 68},
{"col": "C20", "unique_count": 172},
{"col": "C21", "unique_count": 60}])
type_mapper = {'id': 'uint64',
'click': 'int64',
'C1': 'object',
'banner_pos': 'object',
'site_id': 'object',
'site_domain': 'object',
'site_category': 'object',
'app_id': 'object',
'app_domain': 'object',
'app_category': 'object',
'device_id': 'object',
'device_ip': 'object',
'device_model': 'object',
'device_type': 'object',
'device_conn_type': 'object',
'C14': 'object',
'C15': 'object',
'C16': 'object',
'C17': 'object',
'C18': 'object',
'C19': 'object',
'C20': 'object',
'C21': 'object',
'year': 'object',
'month': 'object',
'day': 'object',
'hour': 'object'}
SAMPLE_SIZE = 40428967
# SAMPLE_SIZE = 40428
HASH_THRESHOLD = 100
MAPPER = {}
def read_df(col, sample_size=-1):
if sample_size == -1:
df = pd.read_csv("../data/train.csv", usecols=[col], nrows=100, dtype=type_mapper)
else:
df = pd.read_csv("../data/train.csv", usecols=[col], nrows=sample_size, dtype=type_mapper)
return df
def extract_time(df, meta_info):
df = df.rename(index=str, columns={"hour": "time_info"})
df = df.astype(dtype={"time_info": np.str})
all_meta_info = meta_info.copy()
year_unique_count = df.time_info.str[:2].unique().shape[0]
df.loc[:, "year"] = df.time_info.str[:2]
all_meta_info = all_meta_info.append(pd.Series({"col": "year", "unique_count": year_unique_count}), ignore_index=True)
month_unique_count = df.time_info.str[2:4].unique().shape[0]
df.loc[:, "month"] = df.time_info.str[2:4]
all_meta_info = all_meta_info.append(pd.Series({"col": "month", "unique_count": month_unique_count}), ignore_index=True)
day_unique_count = df.time_info.str[4:6].unique().shape[0]
df.loc[:, "day"] = df.time_info.str[4:6]
all_meta_info = all_meta_info.append(pd.Series({"col": "day", "unique_count": day_unique_count}), ignore_index=True)
hour_unique_count = df.time_info.str[6:8].unique().shape[0]
df.loc[:, "hour"] = df.time_info.str[6:8]
all_meta_info = all_meta_info.append(pd.Series({"col": "hour", "unique_count": hour_unique_count}), ignore_index=True)
df = df.drop(columns=["time_info"])
if not df.empty:
return df, all_meta_info
else:
return pd.DataFrame(), all_meta_info
def column_hash(df, col, all_meta_info):
# TODO:待优化,转成超参数。
col_unique_count = all_meta_info.loc[all_meta_info["col"] == col, "unique_count"].tolist()[0]
if int((col_unique_count / 2)) > 100:
n_features = 100
else:
n_features = int(col_unique_count / 2)
f = FeatureHasher(input_type="string", n_features=n_features)
return f
def columns_binary(df, col):
l = LabelBinarizer()
l.fit(df[col])
return l
def feature_extraction(df, all_meta_info):
if df.empty: return
cols = df.columns
for col in cols:
if all_meta_info.loc[all_meta_info["col"] == col, "unique_count"].tolist()[0] > HASH_THRESHOLD:
print("col:{} will be processed by feature hash".format(col))
transfer = column_hash(df, col, all_meta_info)
MAPPER.update({col: transfer})
else:
print("col:{} will be processed by feature binary".format(col))
transfer = columns_binary(df, col)
MAPPER.update({col: transfer})
def train_mapper():
all_columns = ['C1',
'banner_pos',
'site_id',
'site_domain',
'site_category',
'app_id',
'app_domain',
'app_category',
'device_id',
'device_ip',
'device_model',
'device_type',
'device_conn_type',
'C14',
'C15',
'C16',
'C17',
'C18',
'C19',
'C20',
'C21',
'hour']
all_meta_info = META_INFO
for col in all_columns:
df = read_df(col, SAMPLE_SIZE)
if col == 'hour':
df, all_meta_info = extract_time(df, META_INFO)
feature_extraction(df, all_meta_info)
return MAPPER
def feature_transform(df, mapper):
blocks_data = []
for col in df.columns:
the_mapper = mapper.get(col)
if the_mapper:
clean_col_data = the_mapper.transform(df[col])
blocks_data.append(clean_col_data)
else:
print('error!!!')
clean_data = hstack(blocks_data)
return clean_data
if __name__ == '__main__':
train_mapper()
|
import xml.dom.minidom
import re
from zipfile import ZipFile
import os
class ParserKM():
""" Класс который пердоставляет методы для получения
имён и координат из файлов kmz и kml """
def __init__(self,p):
filename, file_extension = os.path.splitext(p)
if file_extension==".kmz":
kmz = ZipFile(p, 'r')
kml = kmz.open(kmz.namelist()[0], 'r')
doc = xml.dom.minidom.parse(kml)
elif file_extension==".kml":
doc = xml.dom.minidom.parse(p)
else:
raise Exception("Error extension file")
doc.normalize()
Xml = doc.documentElement
self.Placemark = Xml.getElementsByTagName("Placemark")
def Name(self,s):
name = []
for i in range(len(self.Placemark)):
tags = self.Placemark[i].getElementsByTagName(s)
if len(tags) != 0 :
tags = self.Placemark[i].getElementsByTagName("name")[0]
name.append([self.getText(tags.childNodes),i])
return name
def List(self,s,n=None):
List = []
if n==None:
for i in range(len(self.Placemark)):
tags = self.Placemark[i].getElementsByTagName(s)
if len(tags) != 0 :
tags = self.Placemark[i].getElementsByTagName("coordinates")[0]
List.append(self.ParseCord(self.getText(tags.childNodes)))
return List
else:
tags = self.Placemark[n].getElementsByTagName(s)
if len(tags) != 0 :
tags = self.Placemark[n].getElementsByTagName("coordinates")[0]
return self.ParseCord(self.getText(tags.childNodes))
def PointName(self):
return self.Name("Point")
def PointList(self,n=None):
return self.List("Point",n)
def LineName(self):
return self.Name("LineString")
def LineList(self,n=None):
return self.List("LineString",n)
def ParseCord(self,coordinates):
Pointstr = re.split("\n\t\t\t\t|\n\t\t\t|,0|,",coordinates)
Point = []
k = 0
for i in range(len(Pointstr)):
try:
if k==0: a=[]
a.append(float(Pointstr[i]))
k+=1
if k==2:
Point.append(tuple([a[1],a[0]]))
k=0
except Exception:
k=0
if len(Point)==1:
return Point[0]
return Point
def getText(self, nodelist):
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
#print(p.PointName())
#print(p.PointList(4))
#print(p.LineName())
#print(p.LineList(7))
#'ВЛ 330 кВ Лукомльская ГРЭС- ПС 330 кВ Борисов'
""" for i in p.LineName():
print(i) """ |
from math import ceil
ADJ = None
GROUPID = None
GROUPS = None
VISITED = None
HEIGHT = None
OPT_TREE_HEIGHTS = None
def flood(u, gid):
GROUPS[gid] = [u]
queue = [u]
qi = 0
VISITED[u] = True
GROUPID[u] = gid
while qi < len(queue):
u = queue[qi]
for v in ADJ[u]:
if not VISITED[v]:
VISITED[v] = True
GROUPID[v] = gid
queue.append(v)
GROUPS[gid].append(v)
qi += 1
def get_opt_height(gid):
nodes = GROUPS[gid]
leaves = [u for u in nodes if len(ADJ[u]) <= 1]
queue = leaves
qi = 0
max_height = -1
while qi < len(queue):
u = queue[qi]
max_height = max(max_height, HEIGHT[u])
for v in ADJ[u]:
VISIT_COUNT[v] += 1
if VISIT_COUNT[v] == len(ADJ[v]) - 1:
heights = [HEIGHT[w] + 1 for w in ADJ[v]]
HEIGHT[v] = max(heights)
queue.append(v)
qi += 1
return max_height
def get_opt_internal_dist(gid, max_height):
nodes = [u for u in GROUPS[gid] if HEIGHT[u] == max_height and len(ADJ[u]) >= 1]
max_two_height = 0
for u in nodes:
adj_heights = [HEIGHT[v] + 1 for v in ADJ[u]]
adj_heights.sort(reverse=True)
max_two_height = max(max_two_height, sum(adj_heights[:2]))
return max_two_height
if __name__ == "__main__":
computers, cables = map(int, input().split())
ADJ = [[] for _ in range(computers)]
for _ in range(cables):
u, v = map(int, input().split())
ADJ[u].append(v)
ADJ[v].append(u)
gid = 0
GROUPID = [-1 for _ in range(computers)]
GROUPS = dict()
VISITED = [False for _ in range(computers)]
for u in range(computers):
if GROUPID[u] == -1:
flood(u, gid)
gid += 1
HEIGHT = [0 for _ in range(computers)]
VISIT_COUNT = [0 for _ in range(computers)]
OPT_TREE_HEIGHTS = [0 for _ in range(gid)]
OPT_INTERNAL_DIST = [0 for _ in range(gid)]
for i in range(gid):
OPT_TREE_HEIGHTS[i] = get_opt_height(i)
OPT_INTERNAL_DIST[i] = get_opt_internal_dist(i, OPT_TREE_HEIGHTS[i])
OPT_TREE_HEIGHTS.sort(reverse=True)
OPT_INTERNAL_DIST.sort(reverse=True)
options = []
if gid >= 1:
options.append(OPT_INTERNAL_DIST[0])
if gid >= 2:
h1, h2 = OPT_INTERNAL_DIST[:2]
h1 = ceil(h1 / 2)
h2 = ceil(h2 / 2)
options.append(h1 + h2 + 1)
if gid >= 3:
h1, h2, h3 = OPT_INTERNAL_DIST[:3]
h1 = ceil(h1 / 2)
h2 = ceil(h2 / 2)
h3 = ceil(h3 / 2)
options.append(h2 + h3 + 2)
print(max(options))
|
import yaml
from src import helpers,descriptors,svm,evaluate
from sklearn.externals import joblib
from ast import literal_eval
import numpy as np
# svm test data path to get the matrices like accuracy,recall and precision
svm_test_path='C:/Users/gvadakku/Desktop/final_software_cnn_play/images/svm_data/svm_evaluate/'
def load_descriptor(settings):
"""To load the descriptor settings from the config file,only HOG is supported """
return {
'hog': descriptors.HogDescriptor.from_config_file(settings['hog']),
}.get(settings['train']['descriptor'], 'hog') # Default to HOG for invalid input
if __name__ == "__main__":
"""Open the config file to load the training configuration,SVM train and test inputs,train and test on the given
data,all the paths need to be mentioned in the config file except the svm test data path which is mentioned above,
image inputs in .jpg only"""
with open("config.yaml", "r") as stream:
settings = yaml.load(stream)
descriptor = load_descriptor(settings)
classifier = svm.SVM(descriptor, settings['svm']['C'])
print("Descriptor Settings \n" + str(descriptor))
print("Classifier Settings \n" + str(classifier))
print("Reading in the images...")
positive_images = helpers.read_directory_images(settings['train']['positive_image_directory'], extension='.jpg')
negative_images = helpers.read_directory_images(settings['train']['negative_image_directory'], extension='.jpg')
training_size = literal_eval(settings['train']['window_size'])
positive_images = helpers.resize_images(list(positive_images), training_size)
negative_images = helpers.resize_images(list(negative_images), training_size)
print("Total positive images: {}".format(len(positive_images)))
print("Total negative images: {}".format(len(negative_images)))
images = np.concatenate((positive_images, negative_images))
# Set up the labels for binary classification
labels = np.array([1] * len(positive_images) + [0] * len(negative_images))
print(labels)
print("Starting training...")
classifier.train(images, labels)
joblib.dump(classifier, settings['train']['outfile'])
test_folder = helpers.read_directory_images(svm_test_path,extension='.jpg')
test_images = helpers.resize_images(list(test_folder), training_size)
classifier = joblib.load(settings['run']['classifier_location'])
labels = np.array([1] * 130)
print(str(classifier))
evaluate.evaluate_model(classifier, test_images, labels)
print()
|
from db_models.models.exercise import Exercise
from db_models.models.profile import Profile
from django.db import models
class PersonalRecord(models.Model):
profile = models.ForeignKey(
Profile,
on_delete=models.CASCADE,
db_index=True,
)
exercise = models.ForeignKey(
Exercise,
on_delete=models.CASCADE,
db_index=True,
)
weight = models.PositiveSmallIntegerField()
|
from selenium import webdriver
import math
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
browser = webdriver.Chrome()
link = "https://suninjuly.github.io/execute_script.html"
browser.get(link)
x = browser.find_element_by_id("input_value").text
answer = calc(x)
browser.execute_script("window.scrollBy(0, 100);")
browser.find_element_by_id("answer").send_keys(answer)
checkbox = browser.find_element_by_id("robotCheckbox")
checkbox.click()
radio = browser.find_element_by_id("robotsRule")
radio.click()
button = browser.find_element_by_tag_name("button")
button.click()
|
import sys,getopt,ConfigParser,os
import json
import subprocess
import math
ROOT_DIR = '/data/vision/billf/object-properties/sound/sound/'
class Obj:
ROOT = ROOT_DIR
def __init__(self,objId=0,matId=0):
self.objId = objId
self.matId = matId
self.Load()
def ReadMaterial(self,matId):
self.matId = matId
matCfg = ConfigParser.ConfigParser()
cfgPath = os.path.join(self.ROOT,'validation','materials','material-%d.cfg'%self.matId)
matCfg.read(cfgPath)
self.materialName = matCfg.get('DEFAULT','name')
self.youngsModulus = matCfg.getfloat('DEFAULT','youngs')
self.poissonRatio = matCfg.getfloat('DEFAULT','poison')
self.density = matCfg.getfloat('DEFAULT','density')
self.alpha = matCfg.getfloat('DEFAULT','alpha')
self.beta = matCfg.getfloat('DEFAULT','beta')
self.friction = matCfg.getfloat('DEFAULT','friction')
self.restitution = matCfg.getfloat('DEFAULT','restitution')
self.rollingFriction = matCfg.getfloat('DEFAULT','rollingFriction')
self.spinningFriction = matCfg.getfloat('DEFAULT','spinningFriction')
def ReadObj(self,objId):
self.objId = objId
self.objPath = os.path.join(self.ROOT,'validation','%d'%self.objId,'%d.orig.obj'%self.objId)
def ReadTet(self):
self.tetPath = os.path.join(self.ROOT,'validation','%d'%self.objId,'obj-%d.tet'%self.objId)
def Load(self):
self.ReadMaterial(self.matId)
self.ReadObj(self.objId)
self.ReadTet()
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def CreateDir(path):
if not os.path.exists(path):
os.makedirs(path)
if __name__=='__main__':
argv=sys.argv
SOURCECODE='/data/vision/billf/object-properties/sound'
MODALSOUND= SOURCECODE + '/sound/code/ModalSound/build/bin'
EXTMAT=MODALSOUND + '/extmat'
GENMOMENTS=MODALSOUND+'/gen_moments'
FILEGENERATORS=SOURCECODE+'/sound/code/file_generators'
print('CALL!')
obj_id = int(argv[1])
mat_id = int(argv[2])
overwrite = int(argv[3])
host = argv[4]
print overwrite
obj = Obj(obj_id,mat_id)
print obj.density,obj.youngsModulus,obj.poissonRatio
objfilePath = os.path.join(ROOT_DIR,'validation','%d'%obj.objId)
outPath = os.path.join(ROOT_DIR,'validation','%d'%obj.objId,'mat-%d'%obj.matId)
CreateDir(outPath)
renew = os.path.join(SOURCECODE,'ztzhang','renew','re_new.sh')
CreateDir(os.path.join(SOURCECODE,'www','EV_status',host))
logfile = os.path.join(SOURCECODE,'www','EV_status',host,'%d-%d.txt'%(obj.objId,obj.matId))
subprocess.call('echo start > %s'%logfile,shell=True)
#call extmat, save to root
with cd(outPath):
if not os.path.exists('obj-%d.tet'%obj.objId) or overwrite==1 :
#print not os.path.exists('obj-%d.tet'%obj.objId)
#print overwrite==1
subprocess.call('unlink obj-%d.tet'%(obj.objId),shell=True)
subprocess.call('ln -s ../obj-%d.tet obj-%d.tet'%(obj.objId,obj.objId),shell=True)
if not os.path.exists('obj-%d.stiff.spm'%obj.objId) or not os.path.exists('obj-%d.mass.spm'%obj.objId) or overwrite==1:
cmd = EXTMAT + ' -f obj-%d -y %.4g -p %.5g -m -k -g -s -d 1 | tee -a %s'%(obj.objId,obj.youngsModulus,obj.poissonRatio,logfile);
subprocess.call(cmd ,shell=True)
#call ev calculation, save to mat-id
print('EV!')
if not os.path.exists('obj-%d.ev'%obj.objId) or overwrite==1:
subprocess.call(renew ,shell=True)
cmd = 'matlab -nodisplay -nodesktop -nosplash -r "try,addpath(\'%s\'); ev_generator60(\'%s\', 60);catch,exit;end,exit"| tee -a %s'%(FILEGENERATORS,'obj-%d'%obj.objId,logfile)
subprocess.call(cmd ,shell=True)
#Geo maps
if not os.path.exists('obj-%d.vmap'%obj.objId) or overwrite==1:
cmd = '%s/vmap_generator obj-%d.geo.txt obj-%d.vmap | tee -a %s'%(FILEGENERATORS,obj.objId,obj.objId,logfile)
print cmd
subprocess.call(cmd ,shell=True)
CreateDir(os.path.join(outPath,'bem_input'))
CreateDir(os.path.join(outPath,'bem_result'))
CreateDir(os.path.join(outPath,'fastbem'))
if not os.path.exists('./bem_input/init_bem.mat') or \
not os.path.exists('./bem_input/mesh.mat') or\
not os.path.exists('./bem_input/init_bem.mat') or overwrite==1:
subprocess.call(renew ,shell=True)
cmd = 'matlab -nodisplay -nodesktop -nosplash -r "addpath(\'%s\');BEMInputGenerator(\'%s\', \'obj-%d\', %.5g, %.5g, %.5g,%d); quit" | tee -a %s'\
%(FILEGENERATORS,outPath,obj.objId,obj.density,obj.alpha,obj.beta,overwrite,logfile)
subprocess.call(cmd ,shell=True)
CreateDir(os.path.join(SOURCECODE,'www','EV_status','%d-%d'%(obj.objId,obj.matId)))
#call FMM solver, save to mat-id
#Calculate Moments |
#Import dependencies
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
import datetime as dt
#########################################################################################################################
# Database Setup
#########################################################################################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to each table
Measurement = Base.classes.measurement
Station= Base.classes.station
#########################################################################################################################
# Flask Setup
#########################################################################################################################
#Create an app
app = Flask(__name__)
#########################################################################################################################
# Setting variables
#########################################################################################################################
session = Session(engine)
#Parse through to turn string into datetime object
recent_date = session.query(func.max(Measurement.date)).all() [0][0]
recent_year = int(recent_date[0:4])
recent_month = int(recent_date[5:7])
recent_day = int(recent_date[8:])
recent_date = dt.date(recent_year,recent_month, recent_day)
"""Return a dictionary of dates and precipitation for a year"""
one_year_ago = recent_date - dt.timedelta(days = 365)
#Find most active station
total_stations_query = session.query(Measurement.station, func.count(Measurement.station))
most_active_stations = total_stations_query.group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()
# Station= Base.classes.station
# most_active = session.query(Measurement.station, Measurement.tobs, Measurement.date).filter(Measurement.station == Station, Measurement.date >= one_year_ago).order_by(Measurement.date)[0][0]
# # Most_active_station = pd.DataFrame(most_active)
# # Most_active_station
session.close()
#########################################################################################################################
# FLASK ROUTES
#########################################################################################################################
@app.route("/")
def welcome():
print("Server received request for 'Home' page...")
return (
f"Available api routes:<br/>"
f"/api/v1.0/precipitation : Precipitation percentages for the past year<br/>"
f"/api/v1.0/stations : Unique stations<br/>"
f"/api/v1.0/tobs : Temperatures for the most active station over the past year<br/>"
f"/api/v1.0/<start> : User inputs given start date (yyyymmdd) to search for minimum, maximum, and average temperature <br/>"
f"/api/v1.0/<start>/<end> : User inputs given start date (yyyymmdd) and end date (yyyymmdd) to search for minimum, maximum, and average temperature<br/>"
)
#########################################################################################################################
@app.route("/api/v1.0/precipitation")
def precipitation():
# Create our session (link) from Python to the DB
session = Session(engine)
# Perform a query to retrieve the data and precipitation scores and sort the dataframe by date
precipitation_year = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= one_year_ago).order_by(Measurement.date).all()
prcp_list = list(np.ravel(precipitation_year))
return jsonify(prcp_list)
session.close()
#########################################################################################################################
@app.route("/api/v1.0/stations")
def stations():
#Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all station names"""
#Query all stations
results = session.query(Station.station).all()
# Convert list of tuples into normal list
all_stations = list(np.ravel(results))
return jsonify(all_stations)
session.close()
#########################################################################################################################
@app.route("/api/v1.0/tobs")
def tobs():
#Create our session (link) from Python to the DB
session = Session(engine)
most_active_station = session.query(Measurement.date, Measurement.tobs).filter(Measurement.station == most_active_stations[0][0], Measurement.date >= one_year_ago).all()
Most_active_station = list(np.ravel(most_active_station))
return jsonify(Most_active_station)
session.close()
#########################################################################################################################
@app.route("/api/v1.0/<start>")
def start_date_lookup(start):
#Create our session (link) from Python to the DB
session = Session(engine)
#Reformat user's input
start_year = (start[0:4])
start_month = (start[4:6])
start_date = (start[6:])
start_input = dt.date(int(start_year), int(start_month), int(start_date)).strftime('%Y-%m-%d')
#Query Min, Max, & Avg temps for user's input
results = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).filter(Measurement.date >= start_input).all()
session.close()
start_date_tobs = []
for min, avg, max in results:
start_date_tobs_dict = {}
start_date_tobs_dict["min_temp"] = min
start_date_tobs_dict["avg_temp"] = avg
start_date_tobs_dict["max_temp"] = max
start_date_tobs.append(start_date_tobs_dict)
print("Years available: 2010-2017, please use yyyymmdd format.<br/>")
return jsonify(start_date_tobs)
@app.route("/api/v1.0/<start>/<end>")
def start_end_lookup(start, end):
#Create our session (link) from Python to the DB
session = Session(engine)
#Reformat user's input
start_year = (start[0:4])
start_month = (start[4:6])
start_date = (start[6:])
start_input = dt.date(int(start_year), int(start_month), int(start_date)).strftime('%Y-%m-%d')
#Reformat user's input
end_year = (end[0:4])
end_month = (end[4:6])
end_date = (end[6:])
end_input = dt.date(int(end_year), int(end_month), int(end_date)).strftime('%Y-%m-%d')
#Query Min, Max, & Avg temps for user's input
results2= session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).filter(Measurement.date >= start_input).filter (Measurement.date <= end_input).all()
# results2= session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).filter(Measurement.date <= stop).all()
session.close()
end_date_tobs = []
for min, avg, max in results2:
end_date_tobs_dict = {}
end_date_tobs_dict["min_temp"] = min
end_date_tobs_dict["avg_temp"] = avg
end_date_tobs_dict["max_temp"] = max
end_date_tobs.append(end_date_tobs_dict)
print("Years available: 2010-2017, please use yyyymmdd format.<br/>")
return jsonify(end_date_tobs)
if __name__ == "__main__":
app.run(debug=True)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# classify_captures.py
#
# Copyright 2018 Andres Aguilar <andresyoshimar@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
import argparse
import warnings
import pandas as pd
from os import path
__author__ = "Andres Aguilar"
__date__ = "27/Dic/2017"
__version__ = "0.0.1"
__mail__ = "andresyoshimar@gmail.com"
warnings.filterwarnings("ignore")
GFF_COLUMNS = ["Chr", "Source", "Feature", "Start", "End", "Score", "Strand", "Frame", "Attribute"]
def read_gff(gff_file, comment='#'):
""" Function: read_gff """
if path.isfile(gff_file):
return pd.read_table(gff_file, header=None, names=GFF_COLUMNS, comment=comment)
def get_chromosome(x):
if "AT1" in x:
return "Chr1"
elif "AT2" in x:
return "Chr2"
elif "AT3" in x:
return "Chr3"
elif "AT4" in x:
return "Chr4"
else:
return "Chr5"
def main(captures_file, gff_file, helitrons_file, classification_file):
captures = pd.read_table(captures_file)
genes = captures["Gene"].unique().tolist()
captures["Helitron_list"] = captures["Helitrons"].apply(lambda x: x.split("-"))
captures["Helitron_list"] = captures["Helitron_list"].apply(lambda x: [a.split(".")[0] for a in x])
hels = list()
gn_list = list()
cap_range = list()
class_list = list()
for gene in genes:
tmp = captures.get(captures["Gene"] == gene)
for x in tmp.itertuples():
for j in x.Helitron_list:
gn_list.append(gene)
hels.append(j)
cap_range.append(tmp["CapRange"][tmp.first_valid_index()])
class_list.append(tmp["Class"][tmp.first_valid_index()])
df = pd.DataFrame()
df["Gene"] = gn_list
df["Helitron"] = hels
df["CapRange"] = cap_range
df["Subclass"] = class_list
df["Chr_gene"] = df["Gene"].apply(get_chromosome)
df["Chr_hel"] = df["Helitron"].apply(get_chromosome)
df["Same_chr"] = df.apply(lambda x: x.Chr_gene == x.Chr_hel, axis=1)
# Get genomic coordinates for helitrons and genes
annotation = read_gff(gff_file)
annotation = annotation[annotation["Feature"].str.contains(r"^gene$")]
annotation["Id"] = annotation["Attribute"].apply(lambda x: x.split(";")[0])
annotation["Id"] = annotation["Id"].apply(lambda x: x.split("=")[-1])
df["Gene_start"] = 0
df["Gene_end"] = 0
for x in df.itertuples():
g_tmp = annotation.get(annotation["Id"] == x.Gene)
df["Gene_start"][x.Index] = g_tmp["Start"][g_tmp.first_valid_index()]
df["Gene_end"][x.Index] = g_tmp["End"][g_tmp.first_valid_index()]
helitrons = pd.read_table(helitrons_file)
helitrons["Name"] = helitrons["Long_name"].apply(lambda x: x.split("|")[0])
helitrons["Start"] = helitrons["Long_name"].apply(lambda x: int(x.split("|")[2]))
helitrons["End"] = helitrons["Long_name"].apply(lambda x: int(x.split("|")[3]))
df.index = df["Helitron"].tolist()
helitrons.index = helitrons["Name"].tolist()
df["Hel_start"] = helitrons["Start"].astype(int)
df["Hel_end"] = helitrons["End"].astype(int)
df.index = list(range(len(df)))
# ##############################################################################
# Gene capture classification
#
# Class I - Gene capture
# a) Gene inside a helitron.
#
# b) Gene fragment inside a helitron.
#
# Class II - Helitron inside gene
#
# Class III - Helitron - gene intersection
#
# #############################################################################
df["Class"] = ""
for x in df.itertuples():
starts = x.Gene_start >= x.Hel_start <= x.Hel_end
ends = x.Gene_end >= x.Hel_start <= x.Hel_end
if x.Gene_start >= x.Hel_start and x.Gene_end <= x.Hel_end:
df["Class"][x.Index] = "Ia"
elif x.Hel_start >= x.Gene_start and x.Hel_end <= x.Gene_end:
df["Class"][x.Index] = "II"
elif starts or ends:
df["Class"][x.Index] = "III"
else:
df["Class"][x.Index] = "Ib"
print(df["Class"].value_counts())
df.to_csv(classification_file, index=False, sep='\t')
if __name__ == "__main__":
args = argparse.ArgumentParser()
args.add_argument("-i", "--captures_file", help="Captures file", required=True)
args.add_argument("-g", "--gff_file", help="Gff file", required=True)
args.add_argument("-f", "--helitrons_file", help="file with corrected headers", required=True)
args.add_argument("-c", "--classification_file", help="output file", required=True)
p = args.parse_args()
main(p.captures_file, p.gff_file, p.helitrons_file, p.classification_file)
|
def main(config):
from SDML import Solver
solver = Solver(config)
cudnn.benchmark = True
return solver.train()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--compute_all', type=bool, default=False)
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--just_valid', type=bool, default=False) # wiki, pascal, nus-wide, xmedianet
parser.add_argument('--multiprocessing', type=bool, default=True)
parser.add_argument('--running_time', type=bool, default=False)
parser.add_argument('--cuda_list', type=list, default=[0])
parser.add_argument('--lr', type=list, default=[1e-4, 2e-4, 2e-4, 2e-4, 2e-4])
parser.add_argument('--beta1', type=float, default=0.5)
parser.add_argument('--beta2', type=float, default=0.999)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--output_shape', type=int, default=512)
parser.add_argument('--alpha', type=float, default=0.5)
parser.add_argument('--datasets', type=str, default='wiki_doc2vec') # xmedia, wiki_doc2vec, MSCOCO_doc2vec, nus_wide_doc2vec
parser.add_argument('--view_id', type=int, default=-1)
parser.add_argument('--sample_interval', type=int, default=1)
parser.add_argument('--epochs', type=int, default=200)
config = parser.parse_args()
seed = 123
print('seed: ' + str(seed))
import numpy as np
np.random.seed(seed)
import random as rn
rn.seed(seed)
import os
os.environ['PYTHONHASHSEED'] = str(seed)
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import torch
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
from torch.backends import cudnn
cudnn.enabled = False
results = main(config)
# print(config)
# import scipy.io as sio
# if config.running_time:
# runing_time = []
# for i in range(1):
# print('%d-th running time test', i)
# results = main(config)
# runing_time.append(results)
# print('average running time: %f', np.mean(runing_time))
# else:
# results = main(config)
# if config.just_valid:
# sio.savemat('para_results/params_' + config.datasets + '_' + str(config.batch_size) + '_' + str(config.output_shape) + '_' + str(config.alpha) + '_' + str(config.epochs) + '_' + str(config.lr) + '_loss.mat', {'val_d_loss': np.array(results[0]), 'tr_d_loss': np.array(results[1]), 'tr_ae_loss': np.array(results[2])})
# else:
# sio.savemat('results/params_' + config.datasets + '_' + str(config.batch_size) + '_' + str(config.output_shape) + '_' + str(config.alpha) + '_' + str(config.epochs) + '_' + str(config.lr) + '_resutls.mat', {'results': np.array(results)})
|
from django.db import models
from django.db.models.signals import pre_save, post_save
from django.utils.text import slugify
from django.conf import settings
from django.urls import reverse
def get_unique_slug(model_instance, slugable_field_name, slug_field_name):
"""
Takes a model instance, sluggable field name (such as 'title') of that
model as string, slug field name (such as 'slug') of the model as string;
returns a unique slug as string.
"""
slug = slugify(getattr(model_instance, slugable_field_name))
unique_slug = slug
extension = 1
ModelClass = model_instance.__class__
while ModelClass._default_manager.filter(
**{slug_field_name: unique_slug}
).exists():
unique_slug = '{}-{}'.format(slug, extension)
extension += 1
return unique_slug
# Create your models here.
class Publisher(models.Model):
name = models.CharField(max_length=30)
address = models.CharField(max_length=50,blank=True, null=True)
city = models.CharField(max_length=60,blank=True, null=True)
state_province = models.CharField(max_length=30,blank=True, null=True)
country = models.CharField(max_length=50,blank=True, null=True)
website = models.URLField(blank=True, null=True)
def __str__(self):
return self.name
class Book(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL,null=True,blank=True,on_delete=models.CASCADE, related_name="book_add")
title = models.CharField(max_length=240)
description = models.TextField()
slug = models.SlugField(blank=True,unique=True)
created_at = models.DateTimeField(auto_now_add=True,auto_now=False)
updated_at = models.DateTimeField(auto_now_add=False,auto_now=True)
publisher = models.ForeignKey(Publisher, on_delete=models.CASCADE,null=True,blank=True)
def save(self,*args,**kwargs):
print("save")
if not self.slug:
print("not self slug")
self.slug = get_unique_slug(self, 'title', 'slug')
super().save(*args, **kwargs)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("book_detail",kwargs={'slug':self.slug})
class Meta:
ordering = ["created_at"]
def before_save(sender,instance,*args,**kwargs):
print("before")
def after_save(sender,instance,created,*args,**kwargs):
print("after")
pre_save.connect(before_save,sender=Book)
post_save.connect(after_save,sender=Book) |
from django.urls import path, include
urlpatterns = [
path('api/tasks/', include('todo.urls')),
]
|
import onegov.core
import onegov.org
from tests.shared import utils
def test_view_permissions():
utils.assert_explicit_permissions(onegov.org, onegov.org.OrgApp)
def test_notfound(client):
notfound_page = client.get('/foobar', expect_errors=True)
assert "Seite nicht gefunden" in notfound_page
assert notfound_page.status_code == 404
def test_header_links(client):
client.login_admin()
page = client.get('/')
assert 'id="header-links"' not in page
settings = client.get('/header-settings')
settings.form['header_links'] = '''
{"labels":
{"text": "Text",
"link": "URL",
"add": "Hinzuf\\u00fcgen",
"remove": "Entfernen"},
"values": []
}
'''
page = settings.form.submit().follow()
assert 'id="header-links"' not in page
settings = client.get('/header-settings')
settings.form['header_links'] = '''
{"labels":
{"text": "Text",
"link": "URL",
"add": "Hinzuf\\u00fcgen",
"remove": "Entfernen"},
"values": [
{"text": "Govikon School",
"link": "https://www.govikon-school.ch", "error": ""},
{"text": "Castle Govikon",
"link": "https://www.govikon-castle.ch", "error": ""}
]
}
'''
page = settings.form.submit().follow()
assert '<a href="https://www.govikon-castle.ch">Castle Govikon</a>' in page
assert '<a href="https://www.govikon-school.ch">Govikon School</a>' in page
|
SUBSCRIPT = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉")
class SymbolParser:
@staticmethod
def subscript(string):
return string.translate(SUBSCRIPT)
|
#import sys
#input = sys.stdin.readline
def main():
N = 50
A = [["x"]*N for _ in range(N)]
cnt = 0
for i in range(1, N+1):
print(i, bin(i)[2:], end = " | ")
for j in range(i,N+1):
if j%i == i^j:
print(bin(j)[2:], end = " ")
cnt += 1
A[i-1][j-1] = "o"
print(" |", cnt)
print( " ".join( map( str, [i+1 for i in range(N)])))
for i in range(N):
print(" ".join(A[i]))
# for n in range(1,N):
# cnt = 0
# for i in range(1,n+1):
# for j in range(i, n+1):
# if j%i == i^j:
# cnt += 1
# print(n, cnt)
if __name__ == '__main__':
main()
|
import numpy as np
from scipy.stats import multivariate_normal
from matplotlib import pyplot as plt, lines as mlines, patches as mpatch
# Given in the problem
prior = [0.15, 0.35, 0.5]
mu = [np.array([-1, 0]), np.array([1, 0]), np.array([0, 1])]
sigma = [np.array([[1, -.4], [-.4, .5]]), np.array([[.5, 0], [0, .2]]), np.array([[.1, 0], [0, .1]])]
sampN = 10000
# Data generated from generateData_Exam1Question1.m
cTrue = np.loadtxt('LData.txt', delimiter=',')
locs = np.loadtxt('xData.txt', delimiter=',').transpose()
# Classify data
cGuess = []
for x in locs:
p = []
for i in range(3):
pTemp = multivariate_normal(mean=mu[i], cov=sigma[i]).pdf(x) * prior[i]
p.append(pTemp)
c = np.argmax(p) + 1
cGuess.append(c)
# Actual number of samples from each class
c1N = len([c for c in cTrue if c == 1])
c2N = len([c for c in cTrue if c == 2])
c3N = len([c for c in cTrue if c == 3])
print('Class 1: ' + str(c1N) + '\nClass 2: ' + str(c2N) + '\nClass 3: ' + str(c3N) + '\nTotal: ' + str(c1N+c2N+c3N))
# Confusion matrix
confused = np.zeros([3, 3])
for indx in range(len(cGuess)):
i = int(cGuess[indx] - 1)
j = int(cTrue[indx] - 1)
confused[i, j] += 1
print(confused)
# Total number of samples misclassified
misClass = np.sum(confused) - np.diag(confused).sum()
print('Total number of misclassified samples: ' + str(int(misClass)))
# Estimated probability of error
pErr = misClass / sampN
print('Probability of error: ' + str(pErr))
# Visualization
fig = plt.figure()
marks = ['+', '.', 'x']
tColor = ['red', 'blue', 'yellow']
for indx in range(len(locs)):
x = locs[indx][0]
y = locs[indx][1]
i = int(cGuess[indx] - 1)
j = int(cTrue[indx] - 1)
errColor = None
if i == j:
errColor = 'green'
else:
errColor = 'red'
# plt.scatter(x, y, marker=marks[i-1], color=tColor[i-1])
plt.scatter(x, y, marker=marks[j-1], color=errColor)
if (indx % 50) == 0:
print(indx)
plt.xlabel('x')
plt.ylabel('y')
# # For True Plot
# plt.title('True Class Labels')
# c1leg = mlines.Line2D([],[], color='red', marker='+', label='Class 1', linestyle='None')
# c2leg = mlines.Line2D([],[], color='blue', marker='.', label='Class 2', linestyle='None')
# c3leg = mlines.Line2D([],[], color='yellow', marker='x', label='Class 3', linestyle='None')
# plt.legend(handles=[c1leg, c2leg, c3leg])
# For Guess Plot
plt.title('Estimated Class Labels')
c1leg = mlines.Line2D([],[], marker='+', label='Class 1', linestyle='None')
c2leg = mlines.Line2D([],[], marker='.', label='Class 2', linestyle='None')
c3leg = mlines.Line2D([],[], marker='x', label='Class 3', linestyle='None')
gleg = mpatch.Patch(color='green', label='Correct')
rleg = mpatch.Patch(color='red', label='Incorrect')
plt.legend(handles=[c1leg, c2leg, c3leg, gleg, rleg])
fig.show() |
# 312. Burst Balloons
'''
Given n balloons, indexed from 0 to n-1. Each balloon is painted with a number on it represented by array nums. You are asked to burst all the balloons. If the you burst balloon i you will get nums[left] * nums[i] * nums[right] coins. Here left and right are adjacent indices of i. After the burst, the left and right then becomes adjacent.
Find the maximum coins you can collect by bursting the balloons wisely.
Note:
You may imagine nums[-1] = nums[n] = 1. They are not real therefore you can not burst them.
0 ≤ n ≤ 500, 0 ≤ nums[i] ≤ 100
Example:
Input: [3,1,5,8]
Output: 167
Explanation: nums = [3,1,5,8] --> [3,5,8] --> [3,8] --> [8] --> []
coins = 3*1*5 + 3*5*8 + 1*3*8 + 1*8*1 = 167
'''
Basic idea:
We append 1 to the start and end of the array, which were invisible
Think about the last step, there will only one left in the middle
1 ai 1
so every burst between left 1 and ai will have left 1 and ai as fixed unburst boundary,
same applies to the right,
just like the boundary 1 ... 1 for the original problem,
So this problem becomes a subproblem,
1 a0 a1 .... ai-1 ai ai+1 .... 1
---------------------
----------------
Score(1 -> 1) = 1*ai*1 + Score(1 -> ai) + Score(ai -> 1 right side)
Let T(left, right) be the max burst scores with left and right fixed,
then T(left, right) = a_left*ai*a_right + T(left, i) + T(right, i)
Then we go through all the ai to get the max
# Solution with Memoization
class Solution:
"""
@param nums: A list of integer
@return: An integer, maximum coins
"""
def maxCoins(self, nums):
# write your code here
import functools
@functools.lru_cache(None)
def dp(left, right):
if left > right-2:
return 0
if left == right-2:
return nums[left]*nums[left+1]*nums[right]
res = 0
for mid in range(left+1, right):
res = max(res, nums[left]*nums[mid]*nums[right] +
dp(left, mid)+dp(mid, right))
return res
nums = [1] + nums + [1]
return dp(0, len(nums)-1)
# Solution Iterative DP
todo
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import os, sys, re
# from fabric.api import *
# from fabric.contrib.console import confirm
from fabric2 import Connection
# .dotenv
# from os.path import join, dirname
# from dotenv import load_dotenv
# load environment variables from .env file
# dotenv_path = join(dirname(__file__), 'ENV', 'jonaso.de')
# load_dotenv(dotenv_path)
#os.environ.get('ftp_username')
#os.environ.get('ftp_password')
remotedir = os.environ.get("remotedir") # public
localdir = "public"
tarFilename = "dist/jonaso.tar.gz"
# credentials are stored in ENV/<site> file
# env.user = os.environ.get("ftp_username")
# env.hosts = [ os.environ.get("ftp_host") ]
# @task
def local_cleanup():
local("rm -f " + tarFilename)
# @task
def deploy():
print("========================================")
print("deploying to server") # + os.environ.get("ftp_host"))
print("========================================")
c = Connection(host='www.jonaso.de', port=21)
result = c.run('uname -s')
print(result.stdout.strip())
print(result.exited)
print(result.ok)
print(result.command)
print(result.connection)
print(result.connection.host)
sys.exit()
try:
# cleanup
# local_cleanup()
# compress the folder
# local("tar -zcvf %s %s" % (tarFilename, localdir))
pass
# upload the tar file to the remote host
# put(tarFilename, join(remotedir, tarFilename), use_sudo=True, mirror_local_mode=True)
# with cd(remotedir):
# untar the folder
# sudo("tar -xvf " + tarFilename)
# modify perms # TODO: check if this is necessary
# sudo("chmod 755 " + remotedir)
# drop the database
# sudo("mysqladmin -f -u%s -p\"%s\" drop %s" % (dbUsername, dbPassword, dbName))
# sudo("cp -r wordpress/dist ./");
# sudo("rm -rf ./wordpress/dist");
# sudo("cp -r wordpress/static ./");
# sudo("rm -rf ./wordpress/static");
# sudo("cp -r wordpress/favicon.* ./");
# # sudo("rm -f ./favicon.*");
# sudo("cp -r wordpress/.htaccess ./.htaccess");
# sudo("rm -f ./wordpress/.htaccess");
finally:
# cleanup
# local_cleanup()
# remote cleanup
# remove the tar file and sql file
# sudo("rm -f " + join(remotedir, localdir))
pass
if __name__ == "__main__":
deploy()
|
#ecoding = utf-8
__author__ = 'bohan'
# 函数修饰符的使用
|
from django.db import models
from tweets.models import Tweet
from users.models import UserAccount
# Create your models here.
class Comment(models.Model):
user_id = models.ForeignKey(UserAccount, on_delete=models.CASCADE, related_name='user_comments')
tweet_id = models.ForeignKey(Tweet, on_delete=models.CASCADE, related_name='comments')
content = models.CharField(max_length=5000)
image = models.CharField(max_length=1000, default='', blank=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "%s %s %s" % (self.user_id.name, self.tweet_id.content, self.content) |
from oscpy.client import OSCClient
address = "127.0.0.1"
port = 8000
osc = OSCClient(address, port)
for i in range(10):
osc.send_message(b'/send_i', [i])
|
import httplib2
from datetime import datetime
import simplejson
import time
URL = 'http://localhost:7777/api/newuser'
role = "driver"
uname = "hitesh"
fullname = "hitesh katre"
mob_num = "8085461683"
email_id = "hkkatre@gmail.com"
v_id = "jhgfdshddsd"
token = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjp7IjIiOnRydWV9LCJzdWIiOiI1YTU3YWY1NTE2Nzk2NzUwZDU4YTdlNGMiLCJleHAiOjE1MjEyMTI0NTB9.kWs0kh4RR18jG8do2ABdBGizHW48-YwsAdUReVLWfz8'
headers={'content-Type': 'application/json', 'Authorization':'Bearer '+token}
def user_movement():
#lat= 13.09809
#lng= 77.09809
while True:
# lat += 0.000050
#lng+= 0.000056
data = {"role": role, "username": uname,"full_name" : fullname,"mobile_number" : mob_num,
"email_id" : email_id , "v_id": v_id}
print(data)
client = httplib2.Http()
resp, content = client.request(URL,
'POST',
simplejson.dumps(data),
headers=headers)
print("Return ", resp)
time.sleep(30)
user_movement()
|
sum_of_sqares = sum(map(lambda x: x ** 2,list(range(1,101))))
sum_squared = sum(list(range(1,101))) ** 2
print sum_squared - sum_of_sqares
|
from application import Application
import pyglet
pyglet.resource.path = ['resources', 'resources/images/', 'resources/levels']
pyglet.resource.reindex()
application = Application()
|
list1 = [1,2,3]
try:
print(list1.index(5))
except:
print("ERROR")
|
import abc
class Embedding(abc.ABC):
"""Embedding interface."""
def embedding(self, inputs, length, params=None):
"""Do embedding.
Args:
inputs: A tensor, input sequence, shape is [B, T]
length: A tensor, input's length, shape is [B]
params: A python dict, optional, params
Returns:
A tensor, represent input's embedding, shape is [B, T, D]
"""
raise NotImplementedError()
def default_config(self):
"""Default params settings.
Returns:
A python dict, default params
"""
raise NotImplementedError()
|
from selenium import webdriver
from time import sleep
driver = webdriver.Firefox()
driver.get("https://videojs.com/")
video = driver.find_element_by_xpath("//*[@id='preview-player_html5_api']")
sleep(5)
url = driver.execute_script("return arguments[0].currentSrc;",video)
print(url)
print("start")
driver.execute_script("return arguments[0].play()",video)
sleep(10)
driver.execute_script("arguments[0].pause()",video)
driver.quit() |
#Receba o número de voltas, a extensão do circuito (em metros) e o tempo de
#duração (minutos). Calcule e mostre a velocidade média em km/h.
vt=int(input('digite o numero de voltas: '))
ext=int(input('digite a extensão do percurso(metros): '))
tempo=int(input('digite o tempo de duração(min): '))
print(f'a velocidade média é {(vt*ext/1000)/(tempo/60)} kms/h') |
from src.set_encoders import ContextFreeEncoder
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
class Simple2DConvNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(500, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 500)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return x
def test_1d_inputs():
# 2 sets
# 3 elements per set
# (1, 10) features per element per set
x = torch.ones(2, 3, 1, 10)
x[:, 1] = x[:, 1] + 1
x[1, 2] = x[1, 2] + 1
cfe = ContextFreeEncoder(nn.Conv1d(1, 2, 2), '1d')
y = cfe(Variable(x))
assert tuple(y.size()) == (2, 3, 2, 9)
y = y.view(2, 3, 2*9).data.numpy()
assert np.allclose(y[0, 1], y[1, 1])
assert np.allclose(y[0, 0], y[0, 2])
assert np.allclose(y[0, 1], y[1, 2])
def test_2d_inputs():
# 2 sets
# 3 images per set
# 3 channels per image
# 32x32 is image size
pix = torch.ones(2, 3, 3, 32, 32)
pix[0][0].add_(1)
pix[0][2].add_(1)
pix[1][2].add_(1)
cfe = ContextFreeEncoder(Simple2DConvNet(), '2d')
cfe.train(False) # stop stochastic dropouts
y = cfe(Variable(pix))
# since this is a flattened extractor we should get:
assert tuple(y.size()) == (2, 3, 10)
y = y.data.numpy()
assert np.allclose(y[0][0], y[0][2])
assert np.allclose(y[0][1], y[1][1])
assert np.allclose(y[0][0], y[1][2])
|
class Stack():
def __init__(self, stackSize):
self._top = -1
self._stackSize = stackSize
self._stackArray = [None] * stackSize
def getStackSize(self):
return (self._stackSize)
def setStackSize(self, stackSize):
self._stackSize = stackSize
def isEmpty(self):
return True if self._top == -1 else False
def isFull(self):
return True if self._top == self.getStackSize() -1 else False
def push(self, pushedElement):
if self.isFull():
print ('The Stack is already Full. Cannot push anymore elements')
else:
self._top += 1
self._stackArray[self._top] = pushedElement
def pop(self):
if not(self.isEmpty()):
poppedElement = self._stackArray[self._top]
self._stackArray[self._top] = None
self._top -= 1
return poppedElement
else:
print ('The stack is already empty. Nothing to pop')
def peek(self):
return self._stackArray[self._top] |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Topic(models.Model):
top_name = models.CharField(max_length=264, unique=True)
def __str__(self):
return self.top_name
class Webpage(models.Model):
topic = models.ForeignKey(Topic,on_delete=models.DO_NOTHING)
name = models.CharField(max_length = 264 , unique=True)
url = models.URLField(unique=True)
def __str__(self):
return self.name
class AccessRecord(models.Model):
name = models.ForeignKey(Webpage,on_delete=models.DO_NOTHING)
date = models.DateField()
def __self__(self):
return str(self.date)
class UserProfileInfo(models.Model):
user = models.OneToOneField(User,on_delete=models.DO_NOTHING)
#additinal
portofolio_site = models.URLField(blank = True)
profile_pic = models.ImageField(upload_to='profile_pics',blank = True)
def __str__(self):
return self.user.username
|
#!/usr/bin/env python
"""
wmcore-sensord
daemon for WMComponent, Service, Resource and Disk.
"""
import time
import os
import sys
import getopt
import subprocess
import string
import copy
import time
import smtplib
import signal
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
from datetime import datetime
from WMCore.Agent.Daemon.Details import Details
from WMCore.Configuration import loadConfigurationFile
########################
### Helper functions ###
########################
def findIOStat():
"""
Finds active 'iostat' process running for target.
"""
global target
tpids = os.popen("ps -C iostat wwho pid,cmd | grep '%s'" % target).read()
if len(str(tpids)) != 0:
pid = str(tpids).split()[0]
logDebug("findIOStat: %s" % pid)
return int(pid)
else:
logDebug("findIOStat: None")
return None
def findSar():
"""
Finds active 'sar' process running for target.
"""
global WMCOMPONENT, SERVICE, RESOURCE
global targetType, target, shell
if targetType in (WMCOMPONENT, SERVICE):
pattern = getPid()
if targetType == RESOURCE:
if target not in shell['sensor']['RESOURCE']:
logDebug("findSar: None")
return None
else:
pattern = "\\" + shell['sensor']['RESOURCE'][target][1]
tpids = os.popen("ps -C sar wwho pid,cmd | grep '%s'" % pattern).read()
if len(str(tpids)) != 0:
pid = str(tpids).split()[0]
logDebug("findSar: %s" % pid)
return int(pid)
else:
logDebug("findSar: None")
return None
def wmComponentPid():
"""
Finds running WMComponent pid.
"""
global config, target
daemonXml = os.path.abspath("%s/Components/%s/Daemon.xml" % (config.General.workDir, target))
logDebug("daemonXml=%s" % daemonXml)
if not os.path.isfile(daemonXml):
logWarning("Daemon.XML file was not found, assumed that WMComponent is not running.")
logDebug("wmComponentPid: None")
return None
daemon = Details(daemonXml)
if daemon.isAlive():
logDebug("wmComponentPid: %s" % daemon['ProcessID'])
return daemon['ProcessID']
else:
logDebug("wmComponentPid: None")
return None
def servicePid():
"""
Finds running service pid.
"""
global target
if target == "mySQL":
tpids = os.popen("ps -C %s wwho pid" % servicesList['mySQL']).read()
if len(str(tpids)) != 0:
pid = str(tpids).split()[0]
logDebug("servicePid: %s" % pid)
return int(pid)
if target == "GridFTP":
tpids = os.popen("ps -C %s wwho pgid,pid" % servicesList['GridFTP']).readlines()
for ipid in tpids:
if len(str(tpids)) != 0:
pgid, pid = str(ipid).split()[0:2]
if pid == pgid:
logDebug("servicePid: %s" % pid.rstrip())
return int(pid.rstrip())
logDebug("servicePid: None")
return None
def log(msg, code = "INFO"):
"""
General log handler for target.
"""
global target, sensorsPath
logFileName = "%s-stat.log" % target
path = "%s/%s" % (sensorsPath, logFileName)
logFile = open(path, "a")
logFile.writelines("%s;%s;%s\n" % (int(time.time()), code, msg))
logFile.close()
def logInfo(msg):
"""
Log handler for target (INFO type messages).
"""
log(msg, "INFO")
def logError(msg):
"""
Log handler for target (ERROR type messages).
"""
global TYPES
print "ERROR: (%s)(%s)" % (TYPES[targetType], target), msg
log(msg, "ERROR")
sys.exit(1)
def logWarning(msg):
"""
Log handler for target (WARNING type messages).
"""
global TYPES
print "WARNING: (%s)(%s)" % (TYPES[targetType], target), msg
log(msg, "WARNING")
def logDebug(msg):
"""
Log handler for target (DEBUG type messages).
"""
if debugMode == True:
log(msg, "DEBUG")
def error(msg):
"""
Handler for errors.
"""
print "ERROR:", msg
sys.exit(1)
def warning(msg):
"""
Handler for warnings.
"""
print "WARNING:", msg
def killSar(pattern):
"""
Finds 'sar' process for specific pattern and terminates it.
"""
logDebug("killSar(pattern='%s')" % pattern)
cmd = "pid_list=$(pid=`ps -C sar wwho pid,cmd | grep -i '%s' | awk '{ print $1 }'`; ps ho pid --ppid $pid; echo $pid); echo $pid_list | xargs kill -9" % pattern
subprocess.Popen(cmd, shell=True, stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT, stdin=None)
def killIOStat(pattern):
"""
Finds 'iostat' process for specific pattern and terminates it.
"""
logDebug("killIOStat(patter='%s')" % pattern)
cmd = "ps -C iostat wwho pid,cmd | grep -i '%s' | awk '{ print $1 }' | xargs kill -9" % pattern
subprocess.Popen(cmd, shell=True, stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT, stdin=None)
def stopSar():
"""
Terminates 'sar' process depending on target's type.
"""
global WMCOMPONENT, SERVICE, RESOURCE, TYPES
global target, targetType, shell
logDebug("Stopping sensor for %s %s" % (TYPES[targetType], target))
if targetType in (WMCOMPONENT, SERVICE):
pid = getPid()
if pid != None:
killSar(pid)
if targetType == RESOURCE:
killSar("\\" + shell['sensor']['RESOURCE'][target][1])
def stopIOStat():
"""
Terminates 'iostat' process depending on target's type.
"""
global target
logDebug("Stopping sensor for %s %s" % (TYPES[targetType], target))
killIOStat(target)
def startSar():
"""
Launches 'sar' process depending on target's type.
"""
global WMCOMPONENT, SERVICE, RESOURCE, TYPES
global target, targetType, sensorsPath, shell, config
sar = os.popen("which sar 2>/dev/null").readline().strip()
if len(sar) == 0:
logError("'sar' utility is not installed on this machine. Please, install the SYSSTAT package.")
logDebug("Starting sensor for %s %s" % (TYPES[targetType], target))
sensorFile = "%s/%s-stat.dat" % (sensorsPath, target)
logDebug("sensorFile=%s" % sensorFile)
sarP = None
awkP = None
grepP = None
try:
if targetType in (WMCOMPONENT, SERVICE):
pid = getPid()
if pid == None:
logDebug("startSar: False")
return False
sensorOUT = open(sensorFile, "a")
tmpSensor = copy.deepcopy(shell['sensor'][TYPES[targetType]])
if targetType == SERVICE:
tmpSensor.insert(1, servicesSensorType[target])
else:
try:
tmpSensor.insert(1, ['-x', '-X'][config.component_(target).monitorChilds])
except AttributeError:
tmpSensor.insert(1, '-x')
tmpSensor.insert(2, str(pid))
logDebug("Execute: %s" % string.join(tmpSensor))
sarP = subprocess.Popen(tmpSensor, bufsize=1, stdout=subprocess.PIPE, stderr=open(os.devnull, "w"))
del tmpSensor
logDebug("Execute: %s" % string.join(shell['select'][TYPES[targetType]]))
awkP = subprocess.Popen(shell['select'][TYPES[targetType]], bufsize=1, stdin=sarP.stdout, stdout=sensorOUT, stderr=open(os.devnull, "w"))
if targetType == RESOURCE:
sensorOUT = open(sensorFile, "a")
logDebug("Execute: %s" % string.join(shell['sensor']['RESOURCE'][target]))
sarP = subprocess.Popen(shell['sensor']['RESOURCE'][target], bufsize=1, stdout=subprocess.PIPE, stderr=open(os.devnull, "w"))
logDebug("Execute: %s" % string.join(shell['filter']['RESOURCE'][target]))
grepP = subprocess.Popen(shell['filter']['RESOURCE'][target], bufsize=1, stdin=sarP.stdout, stdout=subprocess.PIPE, stderr=open(os.devnull, "w"))
logDebug("Execute: %s" % string.join(shell['select']['RESOURCE'][target]))
awkP = subprocess.Popen(shell['select']['RESOURCE'][target], bufsize=1, stdin=grepP.stdout, stdout=sensorOUT, stderr=open(os.devnull, "w"))
except OSError, ex:
logDebug("startSar: False")
return False
logDebug("Waiting for 3 seconds.")
time.sleep(3)
awkP.poll()
if awkP.returncode is None:
logDebug("startSar: True")
return True
else:
logDebug("startSar: False")
return False
def startIOStat():
"""
Launches 'iostat' process depending on target's type.
"""
global TYPES
global target, targetType, sensorsPath, shell
iostat = os.popen("which sar 2>/dev/null").readline().strip()
if len(iostat) == 0:
logError("'iostat' utility is not installed on this machine. Please, install the SYSSTAT package.")
logDebug("Starting sensor for %s %s" % (TYPES[targetType], target))
tmpSensor = copy.deepcopy(shell['sensor'][TYPES[targetType]])
tmpSensor.insert(2, target)
sensorFile = "%s/%s-stat.dat" % (sensorsPath, target)
logDebug("sensorFile=%s" % sensorFile)
sensorOUT = open(sensorFile, "a")
try:
logDebug("Execute: %s" % string.join(tmpSensor))
ioStatP = subprocess.Popen(tmpSensor, bufsize=1, stdout=subprocess.PIPE, stderr=open(os.devnull, "w"))
logDebug("Execute: %s" % string.join(shell['select'][TYPES[targetType]]))
awkP = subprocess.Popen(shell['select'][TYPES[targetType]], bufsize=1, stdin=ioStatP.stdout, stdout=sensorOUT, stderr=open(os.devnull, "w"))
except OSError, ex:
logDebug("startIOStat: False")
return False
del tmpSensor
logDebug("Waiting for 3 seconds.")
time.sleep(3)
awkP.poll()
if awkP.returncode == None:
logDebug("startIOStat: True")
return True
else:
logDebug("startIOStat: False")
return False
def sendMail(to, subject, text, files=[]):
"""
Sends e-mail (attachments allowed).
"""
global config
logDebug("Making letter (to='%s', from='%s', date='%s', subject='%s')"
% (config.HWMon.Mail.GetterMail, config.HWMon.Mail.SenderMail, formatdate(localtime=True), subject))
msg = MIMEMultipart()
msg['From'] = config.HWMon.Mail.SenderMail
msg['To'] = config.HWMon.Mail.GetterMail
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach(MIMEText(text))
for file in files:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(file, "rb").read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"'
% os.path.basename(file))
logDebug("Adding file to letter: %s" % file)
msg.attach(part)
logDebug("Connecting to SMTP server (hostname='%s', port='%s')."
% (config.HWMon.Mail.SMTPServer, config.HWMon.Mail.SMTPPort))
try:
smtp = smtplib.SMTP(config.HWMon.Mail.SMTPServer, config.HWMon.Mail.SMTPPort)
except:
logWarning("Exception: %s" % str(sys.exc_info()[1]))
return
if (len(config.HWMon.Mail.Username) != 0):
try:
logDebug("Trying to login to SMTP server (username='%s', password='%s')"
% (config.HWMon.Mail.Username, config.HWMon.Mai.Password))
smtp.login(config.HWMon.Mail.Username, config.HWMon.Mail.Passowrd)
except:
logWarning("Exception: %s" % str(sys.exc_info()[1]))
smtp.quit()
return
try:
logDebug("Sending e-mail via SMTP server")
smtp.sendmail(config.HWMon.Mail.SenderMail, config.HWMon.Mail.GetterMail, msg.as_string())
except:
logWarning("Exception: %s" % str(sys.exc_info()[1]))
logDebug("Terminating SMTP server connection.")
smtp.quit()
def getLastTimeBackUp():
"""
Gets datetime object when the last backup was done for target.
"""
global sensorsBackUpPath, target
if not os.path.isdir(sensorsBackUpPath):
logWarning("Missing sensors back up folder. Creating one.")
os.system("mkdir -p %s" % sensorsBackUpPath)
# FIXME: Now contains timestamp (DONE)
lastBackUp = os.popen("ls -1 %s | grep '^%s-[0-9]\{10\}.bz2$' | sort -r" % (sensorsBackUpPath, target)).readline()
logDebug("lastBackUp=%s" % lastBackUp[:-1])
if len(lastBackUp) == 0:
# This means, that there should be a message written before going to routine (OK)
sensorLog = open("%s/%s-stat.log" % (sensorsPath, target), "r").readline()
logDebug("sensorLog=%s" % sensorLog[:-1])
logDebug("getLastTimeBackUp: %s" % datetime.fromtimestamp(int(sensorLog.split(";")[0])))
return datetime.fromtimestamp(int(sensorLog.split(";")[0]))
else:
# This means that all backups should be never modified (OK)
statinfo = os.stat("%s/%s" % (sensorsBackUpPath, lastBackUp[:-1]))
logDebug("getLastTimeBackUp: %s" % statinfo.st_mtime)
return datetime.fromtimestamp(statinfo.st_mtime)
def doBackUp():
"""
Back ups all sensors raw data and logs for target.
"""
global lastBackUpTime, target, sensorsPath, sensorsBackUpPath
logDebug("Making backup for %s %s" % (TYPES[targetType], target))
logDebug("Stopping sensor for %s %s" % (TYPES[targetType], target))
stopDaemon()
time.sleep(3)
if getSensor() == None:
oldDir = os.getcwd()
os.chdir(sensorsPath)
targetBackUp = "%s-%s.bz2" % (target, str(int(time.time()))) # str(time.localtime().tm_year)
cmd = "tar -cjf %(targetBackUp)s %(target)s-stat.log %(target)s-stat.dat" % \
{'targetBackUp': targetBackUp, 'target': target}
proc = subprocess.Popen(cmd, shell=True, stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT, stdin=None)
proc.wait()
if not os.path.isfile("%s/%s" % (sensorsPath, targetBackUp)):
print "baba"
return False
cmd = "mv %(sensorsPath)s/%(targetBackUp)s %(sensorsBackUpPath)s/%(targetBackUp)s" % \
{'sensorsPath': sensorsPath, 'sensorsBackUpPath': sensorsBackUpPath, 'targetBackUp': targetBackUp}
proc = subprocess.Popen(cmd, shell=True, stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT, stdin=None)
proc.wait()
cmd = "rm %(target)s-stat.log %(target)s-stat.dat" % {'target': target}
proc = subprocess.Popen(cmd, shell=True, stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT, stdin=None)
proc.wait()
os.chdir(oldDir)
logDebug("Starting sensor for %s %s" % (TYPES[targetType], target))
startDaemon()
lastBackUpTime = datetime.today()
logDebug("doBackUp: True")
return True
else:
logDebug("doBackUp: False")
return False
def routineCnS():
"""
Routine for WMComponents and Services.
"""
global downMinutes
logDebug("Starting routine: routineCnS")
targetPid = None
runningPid = None
tSensorPid = None
rSensorPid = None
firstTime = True
while True:
# ----
while True:
if firstTime:
firstTime = False
else:
logDebug("Sleeping for 60 seconds.")
time.sleep(60)
killDeadSensors()
logDebug("Starting 60 seconds check.")
runningPid = getPid()
logDebug("runningPid=%s" % str(runningPid))
if runningPid == None:
if targetPid != None:
logDebug("Killing sensor for targetPid (%s)." % str(targetPid))
killDaemon(targetPid)
targetPid = None
elif runningPid != targetPid:
logDebug("runningPid (%s) does not match targetPid (%s)" % (str(runningPid), str(targetPid)))
if targetPid != None:
killDaemon(targetPid)
break
rSensorPid = getSensor()
if rSensorPid == None:
break
elif rSensorPid != tSensorPid:
stopDaemon()
break
maintainance()
# ----
time.sleep(3)
if startDaemon() == False:
logWarning("Can not launch sensor!")
downMinutes += 1
else:
tSensorPid = getSensor()
targetPid = runningPid
downMinutes = 0
maintainance()
def routineRnD():
"""
Routine for Resources and Disks.
"""
global DISK, TYPES
global downMinutes, target, targetType
logDebug("Starting routine: routineRnD")
tSensorPid = None
rSensorPid = None
firstTime = True
while True:
# ---
while True:
if firstTime:
firstTime = False
else:
logDebug("Sleeping for 60 seconds.")
time.sleep(60)
killDeadSensors()
rSensorPid = getSensor()
if rSensorPid == None:
break
elif rSensorPid != tSensorPid:
stopDaemon()
break
maintainance()
# ---
time.sleep(3)
if startDaemon() == False:
logWarning("Can not launch sensor!")
downMinutes += 1
else:
downMinutes = 0
tSensorPid = getSensor()
maintainance()
def maintainance():
"""
Maitainance operations.
"""
global TYPES
global config, targetType, target, lastBackUpTime, downMinutes
if config.HWMon.Failures.Status == True and downMinutes == config.HWMon.Failures.Time:
logWarning("Sensor is down for %s minutes. Sendinng e-mail." % config.HWMon.Failures.Time)
msg = "Information for " + config.HWMon.Mail.GetterMail + "\n\n"
msg += "Sensor for " + TYPES[targetType] + " " + target + " is down for " + str(config.HWMon.Failures.Time) + " min.\n"
msg += "Required sys admin attention."
sendMail(config.HWMon.Mail.GetterMail, "Sensor Failure (%s, %s, %s min.)" % (TYPES[targetType], target, str(config.HWMon.Failures.Time)), msg)
if config.HWMon.Backup.Status == True and (datetime.today() - lastBackUpTime).seconds >= config.HWMon.Backup.Time * 2592000:
logInfo("Making back, %s days passed since last backup (%s)"
% ((datetime.today() - lastBackUpTime).days, lastBackUpTime))
doBackUp()
def killDeadSensors():
"""
Finds all 'iostat', 'sar', 'sadc' process with no parent process and sends them SIGKILL signal.
"""
logDebug("killDeadSensors:")
cmd = "ps -C sar,sadc,iostat wwho pid,ppid,cmd | awk '$2 == 1 { print $1\" \"$3}' | " + \
"awk '$2 == \"sar\" { system(\"ps wwho pid --ppid \"$1); print $1 } $2 == \"iostat\" { print $1 }' | xargs kill -9"
subprocess.Popen(cmd, shell=True, stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT, stdin=None)
def handlerSIGTERM(signum, frame):
"""
Handler for SIGTERM signal.
"""
stopDaemon()
sys.exit(0)
##########################
### Global definitions ###
##########################
WMCOMPONENT = 0
SERVICE = 1
RESOURCE = 2
DISK = 3
TYPES = ['WMCOMPONENT', 'SERVICE', 'RESOURCE', 'DISK']
pid = {
WMCOMPONENT : wmComponentPid,
SERVICE : servicePid,
RESOURCE : None, # not used
DISK : None # not used
}
startSensor = {
WMCOMPONENT : startSar,
SERVICE : startSar,
RESOURCE : startSar,
DISK : startIOStat
}
stopSensor = {
WMCOMPONENT : stopSar,
SERVICE : stopSar,
RESOURCE : stopSar,
DISK : stopIOStat
}
findSensor = {
WMCOMPONENT : findSar,
SERVICE : findSar,
RESOURCE : findSar,
DISK : findIOStat
}
killSensor = {
WMCOMPONENT : killSar,
SERVICE : killSar,
RESOURCE : killSar,
DISK : killIOStat
}
shell = {}
shell['sensor'] = {} # sar/iostat
shell['select'] = {} # awk
shell['filter'] = {} # grep
shell['sensor']['RESOURCE'] = {}
shell['select']['RESOURCE'] = {}
shell['filter']['RESOURCE'] = {}
shell['filter']['WMCOMPONENT'] = None
shell['filter']['SERVICE'] = None
shell['filter']['DISK'] = None
shell['filter']['RESOURCE']['CPU'] = ['grep', '--line-buffered', 'all']
shell['filter']['RESOURCE']['MEM'] = ['grep', '--line-buffered', '-v', 'kbmemfree\|Linux|^$']
shell['filter']['RESOURCE']['SWAP'] = ['grep', '--line-buffered', '-v', 'pswpin\|Linux|^$']
shell['filter']['RESOURCE']['LOAD'] = ['grep', '--line-buffered', '-v', 'ldavg\|Linux|^$']
shell['select']['WMCOMPONENT'] = ['awk', 'NR>3 { print strftime("%s")";"$6";"$7; fflush() }']
shell['select']['SERVICE'] = ['awk', 'NR>3 { print strftime("%s")";"$6";"$7; fflush() }']
shell['select']['DISK'] = ['awk', '(NR - 1) % 3 == 0 && NR != 1 { print strftime("%s")";"$3";"$4";"$5";"$6 fflush() }']
shell['select']['RESOURCE']['CPU'] = ['awk', 'NR>3 { print strftime("%s")";"$4";"$5";"$6";"$7";"$8; fflush() }']
shell['select']['RESOURCE']['MEM'] = ['awk', 'NR>3 { print strftime("%s")";"$6";"$7";"($4-$6-$7)";"$10; fflush() }']
shell['select']['RESOURCE']['SWAP'] = ['awk', 'NR>3 { print strftime("%s")";"$3";"$4; fflush() }']
shell['select']['RESOURCE']['LOAD'] = ['awk', 'NR>3 { print strftime("%s")";"$5";"$6";"$7; fflush() }']
shell['sensor']['WMCOMPONENT'] = ['sar', str(60), str(0)] # 1st - Sensor type (-x/-X), 2nd - WMCOMPONENT pid (inserted then needed, does not effect this list)
shell['sensor']['SERVICE'] = ['sar', str(60), str(0)] # 1st - Sensor type (-x/-X), 2nd - SERVICE pid (inserted then needed, does not effect this list)
shell['sensor']['DISK'] = ['iostat', '-dk', str(60)] # 2nd - DISK name (inserted then needed, does not effect this list)
shell['sensor']['RESOURCE']['CPU'] = ['sar', '-u', str(60), str(0)]
shell['sensor']['RESOURCE']['MEM'] = ['sar', '-r', str(60), str(0)]
shell['sensor']['RESOURCE']['SWAP'] = ['sar', '-W', str(60), str(0)]
shell['sensor']['RESOURCE']['LOAD'] = ['sar', '-q', str(60), str(0)]
servicesList = {
'GridFTP' : 'globus-gridftp-server',
'mySQL' : 'mysqld'
}
servicesSensorType = {
'GridFTP' : '-X',
'mySQL' : '-x'
}
resourcesList = ['CPU', 'MEM', 'SWAP', 'LOAD']
#################
### Main part ###
#################
valid = ['config=', 'wmcomponent=', 'service=',
'resource=', 'disk=', 'debug']
try:
opts, args = getopt.getopt(sys.argv[1:], "", valid)
except getopt.GetoptError, ex:
error(str(ex))
configFile = None # Configuration file name
target = None # Target name (MEM, CPU, hda, ErrorHandler, mySQL, etc.)
targetType = None # Target type (RESOURCE, SERVICE, etc.)
sensorsPath = None # Path to 'sensors' directory
config = None # Configuration object file
debugMode = False # Debug Mode On/Off
downMinutes = -1 # Minutes passed while sensor/WMComponent/service is down
lastBackUpTime = None # Datetime object of last backup
sensorsBackUpPath = None # Path to 'sensors_backup' directory
for opt, arg in opts:
if opt == "--config":
configFile = arg
if opt == "--wmcomponent":
target = arg
targetType = WMCOMPONENT
if opt == "--service":
target = arg
targetType = SERVICE
if opt == "--resource":
target = arg
targetType = RESOURCE
if opt == "--disk":
target = arg
targetType = DISK
if opt == "--debug":
debugMode = True
# Checking crucial variables
if configFile == None:
error("No configuration file set! Configuration file must be passed via '--config=' option.")
if target == "" or targetType == None:
error("No target was set! Must be passed target name, via '--wmcomponent=', '--service=', '--resource=' or '--disk' options.")
# Loading configuration file
try:
config = loadConfigurationFile(configFile)
except ImportError, ex:
error("Can no load configuration file! Please, check configuration file (" + os.path.abspath(configFile) + ")")
# Checking for 'HWMon' section in configuration file
if "HWMon" not in dir(config):
error("No 'HWMon' section found in WMCore configuration file! Please, check your configuration file.")
# Checking for 'sensors' directory
sensorsPath = config.HTTPFrontEnd.ComponentDir + "/sensors"
if not os.path.isdir(sensorsPath):
warning("Sensors folder is missing in HTTPFrontEnd ComponentDir! Created sensors folder.")
os.system("mkdir -p %s" % sensorsPath)
# Checking of availability depending on type
if targetType == WMCOMPONENT:
wmComponentsList = config.listComponents_()
if target not in wmComponentsList:
error("WMComponent '%s' is not defined in configuration file." % target)
elif targetType == SERVICE:
if target not in servicesList:
error("Service '%s' is not supported." % target)
elif targetType == RESOURCE:
if target not in resourcesList:
error("Resource '%s' is not supported." % target)
elif targetType == DISK:
disks = os.popen('ls -1 /dev/?d[a-z]').readlines()
disks = map(lambda x: x.split('/')[2].rstrip(), disks)
if target not in disks:
error("No such '%s' hard-drive on this machine." % target)
# type was confirmed and we can use log* functions
logInfo("wmcore-sensord: Sensor daemon for %s %s" % (TYPES[targetType], target))
logDebug("target=%s" % target)
logDebug("targetType=%s" % TYPES[targetType])
sensorsBackUpPath = config.HTTPFrontEnd.ComponentDir + "/sensors_backup"
lastBackUpTime = getLastTimeBackUp()
# Defining simple global functions
getSensor = findSensor[targetType]
killDaemon = killSensor[targetType]
startDaemon = startSensor[targetType]
stopDaemon = stopSensor[targetType]
getPid = pid[targetType]
# Take control of SIGTERM handling
signal.signal(signal.SIGTERM, handlerSIGTERM)
logInfo("wmcore-sensord is ready to start sensor.")
if targetType in (WMCOMPONENT, SERVICE):
routineCnS()
elif targetType in (RESOURCE, DISK):
routineRnD()
|
import sys
sys.stdin = open('input.txt', 'rt')
n,m = map(int, input().split())
cnt = [0]*(n+m+3 )
max = -2147000000
for i in range(1,n+1):
for j in range(1,m+1):
cnt[i+j] +=1
for i in range(n+m+1):
if cnt[i] > max:
max=cnt[i]
for i in range(n+m+1):
if cnt[i] == max:
print(i, end = ' ') |
from django.shortcuts import render
from .models import *
from django.contrib.auth.models import User
from rest_framework import viewsets,permissions
from order.serializers import *
from rest_framework.views import APIView
from rest_framework.response import Response
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils.html import strip_tags
# вывод всех продуктов по категории
class ProductInBasketViewSet(viewsets.ModelViewSet):
permission_classes = [ permissions.IsAuthenticated, ]
queryset = ProductInBasketModel.objects.all()
serializer_class = ProductInBasketSerializer
# AllowAny
class ProductInBasket(APIView):
permission_classes = [permissions.AllowAny, ]
def post(self,request):
print(request.data)
token_key = request.data.get("token_key")
qty = request.data.get("qty")
size = request.data.get("size")
product = request.data.get("product_name")
price = request.data.get("price")
image = request.data.get("image")
total_price = request.data.get("total_price")
new_product, created = ProductInBasketModel.objects.get_or_create(token_key=token_key,
qty=qty,size=size,
product=product,
price=price,image=image,total_price=total_price)
if not created:
new_product.qty += int(qty)
new_product.total_price += int(total_price)
new_product.save(force_update=True)
products_in_basket = ProductInBasketModel.objects.filter(token_key=token_key)
serializer = ProductInBasketSerializer(products_in_basket,many=True)
return Response({'data':serializer.data})
class DeleteProductInBasket(APIView):
def post(self,request):
id = request.data.get("id")
token_key = request.data.get("token")
products_in_basket = ProductInBasketModel.objects.filter(token_key=token_key,id=id)
products_in_basket.delete()
products_in_baskets = ProductInBasketModel.objects.filter(token_key=token_key)
serializer = ProductInBasketSerializer(products_in_baskets,many=True)
return Response({'data':serializer.data})
class DelProductInBasket(APIView):
def post(self,request):
token_key = request.data.get("token")
products_in_basket = ProductInBasketModel.objects.filter(token_key=token_key)
products_in_basket.delete()
products_in_baskets = ProductInBasketModel.objects.filter(token_key=token_key)
serializer = ProductInBasketSerializer(products_in_baskets,many=True)
return Response({'data':serializer.data})
class UpdateProductInBasket(APIView):
def post(self,request):
data = request.data
print(data)
products_in_basket = ProductInBasketModel.objects.filter(id=data["id"])
for ob in products_in_basket:
ob.qty = int(data["qty"])
ob.total_price = data["qty"]*ob.price
ob.save()
# serializer = ProductInBasketSerializer(products_in_basket,many=True)
return Response(status=201)
class Order(APIView):
def post(self,request):
data = request.data
customer_email = data["email"],
own_email = ['percaleshop@gmail.com']
customer_name = data["firstname"],
customer_surname = data["lastname"],
customer_tel = data["phone"],
customer_address = data["address"],
comments = data["comment"],
# print(customer_email)
products_in_basket = ProductInBasketModel.objects.filter(token_key=data["token_key"], is_active=True)#.exclude(order__isnull=False)
prod = products_in_basket.values()
p = list(prod)
a = []
for item in p:
q = int(float(item["total_price"]))
a.append(q)
total = sum(a)
delivery = data["address"]
user = User.objects.get(auth_token = data["token_key"])
order = OrderModel.objects.create(user = user,
customer_email = data["email"],
customer_name = data["firstname"],
customer_surname = data["lastname"],
customer_tel = data["phone"],
customer_address = data["address"],
comments = data["comment"],
status_id = 1,
token = data["token_key"])
for name in products_in_basket:
if name:
id = name.id
product_in_baskets = ProductInBasketModel.objects.get(token_key=data["token_key"], is_active=True,id = id )
# print(product_in_baskets)
product_in_baskets.save(force_update=True)
q = ProductInOrderModel.objects.create(
# id = order.id,
product = product_in_baskets.product,
nmb = product_in_baskets.qty,
size = product_in_baskets.size,
price_per_item = product_in_baskets.price,
image = product_in_baskets.image,
total_price = product_in_baskets.total_price,
order = order,
)
html_message = render_to_string('mail_template.html', {'context':prod,'order':order, 'total_price':total, 'delivery':delivery})
plain_message = strip_tags(html_message)
html_message_for_own = render_to_string('mail_for_own.html',{'context':prod,'order':order,
'total_price':total, 'delivery':delivery,
'customer_email' : customer_email,
'customer_name' : customer_name,
'customer_surname' : customer_surname,
'customer_tel' : customer_tel,
'customer_address' : customer_address,
'comments' : comments})
plain_message_own = strip_tags(html_message_for_own)
send_mail('Новый заказ',
plain_message_own,
'percaleshop@gmail.com',
own_email, html_message=html_message_for_own,
)
send_mail('Percale - Интернет магазин домашнего текстиля',
plain_message,
'percaleshop@gmail.com',
customer_email, html_message=html_message,
)
products_in_basket.delete()
return Response(status=201)
class OrderViewSet(viewsets.ModelViewSet):
permission_classes = [ permissions.IsAuthenticated, ]
queryset = OrderModel.objects.all()
serializer_class = OrderSerializer
filter_fields = ('token',)
class ProductInOrderViewSet(viewsets.ModelViewSet):
permission_classes = [ permissions.IsAuthenticated, ]
queryset = ProductInOrderModel.objects.all()
serializer_class = ProductInOrderSerializer
filter_fields = ('order',)
|
import numpy as np
np.set_printoptions(precision=3, suppress=True)
np.core.arrayprint._line_width = 10
import os
from itertools import product
import glob
from adult import preprocess_adult_data, get_metrics
from train_clp_adult import get_consistency, predict_from_checkpoint
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
import logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # FATAL
import tensorflow as tf
logging.getLogger('tensorflow').setLevel(logging.FATAL)
seeds = list(range(10))
base_dir = './tensorboard_adult_seeds/'
save_dir = './seeds_summary/'
try:
os.makedirs(save_dir)
except:
pass
# Selection
eps_grid = [0.01]
fe_grid = [50]
slr_grid = [5., 10., 20.]
se_grid = [50]
lr_grid = [1e-5]
lambda_grid = [5., 20., 40.]
hypers = [eps_grid, fe_grid, slr_grid, se_grid, lr_grid, lambda_grid]
# hypers = [se_grid, slr_grid, fe_grid, eps_grid, lr_grid, lambda_grid]
# names = ['eps', 'fe', 'slr', 'se', 'lr', 'lamb']
model = 'clp_fair-dim:4_adv-epoch:%d_batch_size:1000_adv-step:%.1f_l2_attack:%s_adv_epoch_full:%d_ro:%s_balanced:True_lr:%s_clp:%.1f_start:0.0_c_init:False_arch:100_%d'
for pack in product(*hypers):
(
eps,
full_epoch,
subspace_step,
subspace_epoch,
lr,
lamb
) = pack
full_step = eps/10
names = ['se', 'slr', 'flr', 'fe', 'eps', 'lr', 'lamb']
values = [subspace_epoch, subspace_step, str(full_step), full_epoch, str(eps), str(lr), lamb]
exp_descriptor = []
for n, v in zip(names, values):
exp_descriptor.append(':'.join([n,str(v)]))
exp_descriptor = '_'.join(exp_descriptor)
n_metrics = 8
result_exp = np.zeros((len(seeds), n_metrics))
for seed_idx, seed in enumerate(seeds):
X_train, X_test, y_train, y_test, X_gender_train, X_gender_test, y_gender_train, y_gender_test, dataset_orig_train, dataset_orig_test, names_income, names_gender = preprocess_adult_data(seed = seed)
## Metrics
model_dir = base_dir + model % tuple(values + [seed])
tf.reset_default_graph()
meta_file = glob.glob(model_dir + '/*.meta')[0]
saver = tf.train.import_meta_graph(meta_file)
cur_checkpoint = tf.train.latest_checkpoint(model_dir)
with tf.Session() as sess:
# Restore the checkpoint
saver.restore(sess, cur_checkpoint)
graph = tf.get_default_graph()
# list_of_tuples = [op.values() for op in graph.get_operations()]
if model.startswith('sensr') or model.startswith('clp'):
post_idx = '3:0'
else:
post_idx = '1:0'
logits = predict_from_checkpoint(sess, 'add_' + post_idx, X_test)
preds = np.argmax(logits, axis = 1)
gender_race_consistency, spouse_consistency = get_consistency(X_test, lambda x: predict_from_checkpoint(sess, 'add_' + post_idx, x))
# print(exp_descriptor)
# print('gender/race combined consistency', gender_race_consistency)
# print('spouse consistency', spouse_consistency)
acc_temp, bal_acc_temp, race_gap_rms_temp, race_max_gap_temp, gender_gap_rms_temp, gender_max_gap_temp = get_metrics(dataset_orig_test, preds, verbose=False)
seed_metrics = [acc_temp, bal_acc_temp, spouse_consistency, gender_race_consistency,
gender_gap_rms_temp, race_gap_rms_temp, gender_max_gap_temp, race_max_gap_temp]
result_exp[seed_idx] = np.array(seed_metrics)
np.save(save_dir + exp_descriptor, result_exp)
print(50*'==')
print(exp_descriptor)
print('acc', 'bal_acc', 'spouse_consistency', 'gender_race_consistency',
'gender_gap_rms', 'race_gap_rms', 'gender_max_gap', 'race_max_gap')
print(result_exp.mean(axis=0))
print(50*'==') |
from .piece import Piece
from game_rules import can_move
import os
class Bishop(Piece):
def __init__(self, color, name):
self.sprite_dir = color + "Bishop.png"
self.name = name
super(Bishop,self).__init__(color,name)
def get_possible_moves(self, coord, matrix):
list_aux = can_move(self.color, matrix, coord)
self.possible_moves=[]
self.mov_d(coord, matrix)
if(list_aux):
return [move for move in list_aux if move in self.possible_moves]
return self.possible_moves
def mov_d(self, coord, matrix):
self.check_upper_left(coord, matrix)
self.check_upper_right(coord, matrix)
self.check_lower_left(coord, matrix)
self.check_lower_right(coord, matrix)
def check_upper_left(self, coord, matrix):
for i in range(1,8):
if (coord[0]-i >= 0 and coord[1]-i >= 0):
piece = matrix[(coord[0]-i, coord[1]-i)]['piece']
if (not piece):
self.possible_moves.append((coord[0]-i, coord[1]-i, 'mov'))
elif (piece and piece.color != self.color):
self.possible_moves.append((coord[0]-i, coord[1]-i, 'mov'))
break
else:
break
def check_upper_right(self, coord, matrix):
for i in range(1,8):
if (coord[0]-i >= 0 and coord[1]+i <= 7):
piece = matrix[(coord[0]-i, coord[1]+i)]['piece']
if (not piece):
self.possible_moves.append((coord[0]-i, coord[1]+i, 'mov'))
elif (piece and piece.color != self.color):
self.possible_moves.append((coord[0]-i, coord[1]+i, 'mov'))
break
else:
break
def check_lower_right(self, coord, matrix):
for i in range(1,8):
if (coord[0]+i <= 7 and coord[1]+i <= 7):
piece = matrix[(coord[0]+i, coord[1]+i)]['piece']
if (not piece):
self.possible_moves.append((coord[0]+i,coord[1]+i,'mov'))
elif (piece and piece.color != self.color):
self.possible_moves.append((coord[0]+i,coord[1]+i,'mov'))
break
else:
break
def check_lower_left(self, coord, matrix):
for i in range(1,8):
if (coord[0]+i <= 7 and coord[1]-i >= 0):
piece = matrix[(coord[0]+i, coord[1]-i)]['piece']
if (not piece):
self.possible_moves.append((coord[0]+i, coord[1]-i, 'mov'))
elif (piece and piece.color != self.color):
self.possible_moves.append((coord[0]+i, coord[1]-i, 'mov'))
break
else:
break |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import re
import cgi
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
PASS_RE = re.compile(r"^.{3,20}$")
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
email_error=""
username_error=""
password_error=""
password_confirm_error=""
def validate_email(email):
valid_email=False
valid_email= not email or EMAIL_RE.match(email)
return valid_email
def validate_password(password, password_confirm):
valid_password=False
valid_password= password and password_confirm and password == password_confirm and PASS_RE.match(password)
return valid_password
def vailidate_username(username):
valid_username=False
valid_username= username and USER_RE.match(username)
return valid_username
def set_email_error(email):
global email_error
if not email:
return
elif not EMAIL_RE.match(email):
email_error="Error: That is not a valid email. (example: john@aol.com)"
else:
email_error=""
return
def set_password_error(password):
global password_error
if not password:
password_error="Error: Please enter a password (3-20 characters)"
elif not PASS_RE.match(password):
password_error="Error: That is not a valid password. Password must be 3-20 characters."
else:
password_error=""
return
def set_password_confirm_error(password,password_confirm):
global password_confirm_error
if not password_confirm:
password_confirm_error="Error: Please confirm your password"
return
elif password_confirm != password:
password_confirm_error="Error: Passwords do not match."
else:
password_confirm_error=""
return
def set_username_error(username):
global username_error
if not username:
username_error="Error: Please enter a username (3-20 alphanumeric characters)"
elif not USER_RE.match(username):
username_error="Error: That is not a valid username. Username must be 3-20 alphanumeric characters."
else:
username_error=""
return
def get_username_error():
return username_error
def get_email_error():
return email_error
def get_password_error():
return password_error
def get_password_confirm_error():
return password_confirm_error
def build_signup_page(email_error,password_error,password_confirm_error,username_error):
style="<style> label.error{ color: red } </style>"
header="<h2>User Signup</h2>"
username_input = "<label>Username:</label><input name='username' type='text' ></input><label class='error'><strong> "+get_username_error()+"</strong></label>"
password_input="<label>Password:</label><input type='password' name='password'></input><label class='error'><strong> "+get_password_error()+"</strong></label>"
password_confirm_input="<label>Confirm Password:</label><input type='password' name='password_confirm'></input><label class='error'><strong> "+get_password_confirm_error()+"</strong></label>"
email_input="<label>Email Address (optional)</label><input type='text' name=email></input><label class='error'><strong> "+get_email_error()+"</strong></label>"
submit="<input type='submit'>"
form="<form method=post>"+username_input +"<br>"+ password_input + "<br>"+password_confirm_input+"<br>"+email_input+"<br>"+submit+"</form>"
content=style+header+form
return content
def build_welcome_page(username,email):
header="<h2>Welcome <em>"+username+"</em></h2>"
email_header=""
if email:
email_header="<p>Your email address: <em>"+email+"</em> has been registered."
content=header+email_header
return content
class MainHandler(webapp2.RequestHandler):
def get(self):
content=build_signup_page("","","","")
self.response.write(content)
def post(self):
username=self.request.get("username")
password=self.request.get("password")
password_confirm=self.request.get("password_confirm")
email=self.request.get("email")
if vailidate_username(username) and validate_password(password,password_confirm) and validate_email(email):
content=build_welcome_page(username,email)
self.response.write(content)
else:
set_username_error(username)
set_email_error(email)
set_password_error(password)
set_password_confirm_error(password,password_confirm)
username_error=get_username_error()
password_error=get_password_error
password_confirm_error=get_password_confirm_error
email_error=get_email_error
content=build_signup_page(email_error,password_error,password_confirm_error,username_error)
self.response.write(content)
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
|
import time
from NID import NID
# The output (path taken from the UI): (1)Two csv files ,(2) Heat-Map for pairwise interactions, (3)Log file
# 1st csv file for pairwise interaction
# 2nd csv file for higher order interaction
# every line contains interaction and strength
'''input params, init format:
use_main_effect_nets - whether use main effects or not (true / false)
use_cutoff - whether use cutoff or not (true / false)
is_index_col - is index column exists (1 true / 0 false)
is_header - is header exists (1 true / 0 false)
file_path - full path
out_path - full path
units_list - network architecture (list of numbers seperate by comma)
is_classification_col - is classification dataset (1 true / 0 false, false means regression)
k_fold_entry - number of folds (int, greater than 2)
num_epochs_entry - number of epochs (int, greater than 1)
batch_size_entry - number of batches (int, greater than 1)
'''
file_path = r'C:\Users\Ilana\PycharmProjects\Neural_Interaction_Detection\datasets\higgs\higgs.csv'
output_path = r'C:\Users\Ilana\PycharmProjects\Neural_Interaction_Detection\datasets\higgs'
is_classification = 1
start_time = time.time()
nid = NID(main_effects=1, cutoff=0, is_index=1, is_header=1, file_path=file_path, output_path=output_path,
hidden_layers_structure=[140,100,60,20],is_classification_data=is_classification, k_fold_num = 5,
num_of_epochs = 200, batch_size = 100)
assessment = nid.run()
running_time = time.time() - start_time
if is_classification == 0:
print( "NID Process Completed successfully!\nFinal RMSE is: " + str(assessment)+'\nRuning time: '+ str(running_time))
else:
print("Info", "NID Process Completed successfully!\nFinal (1-AUC) is: " + str(assessment) + '\nRunning time: ' + str(running_time))
print('\nend')
|
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import click
from qiita_db.util import (
purge_filepaths as qiita_purge_filepaths,
empty_trash_upload_folder as qiita_empty_trash_upload_folder,
purge_files_from_filesystem as qiita_purge_files_from_filesystem)
from qiita_db.meta_util import (
update_redis_stats as qiita_update_redis_stats,
generate_biom_and_metadata_release as
qiita_generate_biom_and_metadata_release,
generate_plugin_releases as qiita_generate_plugin_releases)
@click.group()
def commands():
pass
@commands.command()
@click.option('--remove/--no-remove', default=True,
help='remove any filepaths from the qiita.filepath table that '
'are not linked to any other table')
def purge_filepaths(remove):
qiita_purge_filepaths(remove)
@commands.command()
@click.option('--remove/--no-remove', default=False,
help='check the filesystem mounts and remove files not used in '
'the database')
def purge_files_from_filesystem(remove):
qiita_purge_files_from_filesystem(remove)
@commands.command()
@click.option('--remove/--no-remove', default=True,
help='remove files from the trash folder within the upload '
'folders')
def empty_trash_upload_folder(remove):
qiita_empty_trash_upload_folder(remove)
@commands.command()
def update_redis_stats():
qiita_update_redis_stats()
@commands.command()
def generate_biom_and_metadata_release():
qiita_generate_biom_and_metadata_release('public')
@commands.command()
def generate_plugin_releases():
qiita_generate_plugin_releases()
if __name__ == "__main__":
commands()
|
#! usr/bin/python3
from treenode import TreeNode
def maxDepth(root: TreeNode) -> int:
if not root:
return 0
if not root.left and not root.right:
return 1
else:
return max(maxDepth(root.left), maxDepth(root.right)) + 1
t = TreeNode(1)
t.right = TreeNode(2)
print(maxDepth(t))
|
# quick sort
def quickSort(arr, start, end):
if start >= end: # if len(data) == 1 or start and end are crossed
return
pivot = start # pivot = first element in data set
i = start + 1 # find i bigger than pivot from start to right
j = end # find j smaller than pivot from end to left
while i <= j: # do the process until i, j are crossed
while i <= end and arr[pivot] >= arr[i]: # find bigger than pivot
i += 1
while j > start and arr[pivot] <= arr[j]: # find smaller than pivot
j -= 1
if i > j: # if crossed, change pivot, smaller element
arr[j], arr[pivot] = arr[pivot], arr[j]
else: # else, change bigger, smaller element
arr[i], arr[j] = arr[j], arr[i]
print(arr) # change log
# recursion
quickSort(arr, start, j - 1) # do the same process in left side data set of pivot
quickSort(arr, j + 1, end) # do the same process in right side data set of pivot
# use
data = [6, 1, 5, 7, 4, 8, 3, 9, 2, 10, 6]
print(f"{'before':=^40}\n{data}\n")
quickSort(data, 0, len(data) - 1)
print(f"{'after':=^40}\n{data}\n")
|
# 92. Backpack
'''
Given n items with size Ai, an integer m denotes the size of a backpack. How full you can fill this backpack?
'''
Basic idea: DP
T[size][j] is max cap backpack can fill with size and items 0...j
T[size][j] = T(size, j-1) if A[i] > size
max(T(size-A[i], i-1)+A[i], T(size, i-1))
Solution: 2d array
class Solution:
def backPack(self, m, A):
n = len(A)
f = [[False] * (m + 1) for _ in range(n + 1)]
f[0][0] = True
for i in range(1, n + 1):
f[i][0] = True
for j in range(1, m + 1):
if j >= A[i - 1]:
f[i][j] = f[i - 1][j] or f[i - 1][j - A[i - 1]]
else:
f[i][j] = f[i - 1][j]
for i in range(m, -1, -1):
if f[n][i]:
return i
return 0
Solution: 1d array
class Solution:
"""
@param m: An integer m denotes the size of a backpack
@param A: Given n items with size A[i]
@return: The maximum size
"""
def backPack(self, m, A):
# write your code here
T = [False]*(m+1)
T[0] = True
res = 0
for a in A:
for i in range(m,0,-1):
if i>=a and T[i-a]:
T[i] = True
res = max(res, i)
return res
Solution: Top-down
class Solution:
def backPack(self, m, A):
# write your code here
import functools
@functools.lru_cache(None)
def dp(size, i):
if size <= 0:
return 0
if i < 0:
return 0
if A[i] > size:
return dp(size, i-1)
return max(dp(size-A[i], i-1)+A[i], dp(size, i-1))
return dp(m, len(A)-1) |
from datetime import date
from decimal import Decimal
from onegov.core.utils import Bunch
from onegov.swissvotes.collections import SwissVoteCollection
from onegov.swissvotes.external_resources import MfgPosters
from onegov.swissvotes.external_resources import SaPosters
from onegov.swissvotes.external_resources.posters import Posters
from pytest import raises
from unittest.mock import MagicMock
from unittest.mock import patch
xml = '''
<object>
<field name="primaryMedia">
<value>{}</value>
</field>
</object>
'''
def test_posters_fetch(swissvotes_app):
session = swissvotes_app.session()
mfg_posters = MfgPosters('xxx')
sa_posters = SaPosters()
assert mfg_posters.fetch(session) == (0, 0, 0, set())
assert sa_posters.fetch(session) == (0, 0, 0, set())
votes = SwissVoteCollection(swissvotes_app)
votes = SwissVoteCollection(swissvotes_app)
kwargs = {
'date': date(1990, 6, 2),
'title_de': "Vote DE",
'title_fr': "Vote FR",
'short_title_de': "V D",
'short_title_fr': "V F",
'_legal_form': 1,
}
vote_1 = votes.add(
id=1,
bfs_number=Decimal('1'),
posters_mfg_yea='1.1 1.2 1.3 1.4',
posters_mfg_nay='',
posters_sa_yea='1.5 1.6 1.7 1.8',
posters_sa_nay='',
**kwargs
)
vote_2 = votes.add(
id=2,
bfs_number=Decimal('2'),
posters_mfg_yea='2.1',
posters_mfg_nay='2.2',
posters_sa_yea='2.3',
posters_sa_nay='2.4',
**kwargs
)
vote_3 = votes.add(
id=3,
bfs_number=Decimal('3'),
posters_mfg_yea='',
posters_mfg_nay='',
posters_sa_yea='',
posters_sa_nay='3.1',
**kwargs
)
with patch(
'onegov.swissvotes.external_resources.posters.get',
return_value=MagicMock(content=xml.format('http://source/xxx'))
):
assert mfg_posters.fetch(session) == (6, 0, 0, set())
assert vote_1.posters_mfg_yea_imgs == {
'1.1': 'https://source/xxx',
'1.2': 'https://source/xxx',
'1.3': 'https://source/xxx',
'1.4': 'https://source/xxx'
}
assert vote_1.posters_mfg_nay_imgs == {}
assert vote_2.posters_mfg_yea_imgs == {'2.1': 'https://source/xxx'}
assert vote_2.posters_mfg_nay_imgs == {'2.2': 'https://source/xxx'}
assert vote_3.posters_mfg_yea_imgs == {}
assert vote_3.posters_mfg_nay_imgs == {}
assert sa_posters.fetch(session) == (7, 0, 0, set())
assert vote_1.posters_sa_yea_imgs == {
'1.5': 'https://source/xxx',
'1.6': 'https://source/xxx',
'1.7': 'https://source/xxx',
'1.8': 'https://source/xxx'
}
assert vote_1.posters_sa_nay_imgs == {}
assert vote_2.posters_sa_yea_imgs == {'2.3': 'https://source/xxx'}
assert vote_2.posters_sa_nay_imgs == {'2.4': 'https://source/xxx'}
assert vote_3.posters_sa_yea_imgs == {}
assert vote_3.posters_sa_nay_imgs == {'3.1': 'https://source/xxx'}
vote_1.posters_mfg_yea = '1.1 1.2' # -2
vote_1.posters_mfg_nay = '1.9 1.10' # +2
vote_1.posters_sa_yea = '1.5 1.6' # -2
vote_1.posters_sa_nay = '1.11 1.12' # +2
vote_3.posters_sa_nay = '' # -1
with patch(
'onegov.swissvotes.external_resources.posters.get',
return_value=MagicMock(content=xml.format('http://source/yyy'))
):
assert mfg_posters.fetch(session) == (2, 4, 2, set())
assert vote_1.posters_mfg_yea_imgs == {
'1.1': 'https://source/yyy',
'1.2': 'https://source/yyy',
}
assert vote_1.posters_mfg_nay_imgs == {
'1.9': 'https://source/yyy',
'1.10': 'https://source/yyy',
}
assert vote_2.posters_mfg_yea_imgs == {'2.1': 'https://source/yyy'}
assert vote_2.posters_mfg_nay_imgs == {'2.2': 'https://source/yyy'}
assert vote_3.posters_mfg_yea_imgs == {}
assert vote_3.posters_mfg_nay_imgs == {}
assert sa_posters.fetch(session) == (2, 4, 3, set())
assert vote_1.posters_sa_yea_imgs == {
'1.5': 'https://source/yyy',
'1.6': 'https://source/yyy',
}
assert vote_1.posters_sa_nay_imgs == {
'1.11': 'https://source/yyy',
'1.12': 'https://source/yyy',
}
assert vote_2.posters_sa_yea_imgs == {'2.3': 'https://source/yyy'}
assert vote_2.posters_sa_nay_imgs == {'2.4': 'https://source/yyy'}
assert vote_3.posters_sa_yea_imgs == {}
assert vote_3.posters_sa_nay_imgs == {}
with patch(
'onegov.swissvotes.external_resources.posters.get',
side_effect=Exception()
):
assert mfg_posters.fetch(session) == (
0, 0, 0, {vote_1.bfs_number, vote_2.bfs_number}
)
assert vote_1.posters_mfg_yea_imgs == {
'1.1': 'https://source/yyy',
'1.2': 'https://source/yyy',
}
assert vote_1.posters_mfg_nay_imgs == {
'1.9': 'https://source/yyy',
'1.10': 'https://source/yyy',
}
assert vote_2.posters_mfg_yea_imgs == {'2.1': 'https://source/yyy'}
assert vote_2.posters_mfg_nay_imgs == {'2.2': 'https://source/yyy'}
assert vote_3.posters_mfg_yea_imgs == {}
assert vote_3.posters_mfg_nay_imgs == {}
assert sa_posters.fetch(session) == (
0, 0, 0, {vote_1.bfs_number, vote_2.bfs_number}
)
assert vote_1.posters_sa_yea_imgs == {
'1.5': 'https://source/yyy',
'1.6': 'https://source/yyy',
}
assert vote_1.posters_sa_nay_imgs == {
'1.11': 'https://source/yyy',
'1.12': 'https://source/yyy',
}
assert vote_2.posters_sa_yea_imgs == {'2.3': 'https://source/yyy'}
assert vote_2.posters_sa_nay_imgs == {'2.4': 'https://source/yyy'}
assert vote_3.posters_sa_yea_imgs == {}
assert vote_3.posters_sa_nay_imgs == {}
def test_posters_meta_data_url():
assert MfgPosters('xxx').meta_data_url('object') == (
'https://www.emuseum.ch/objects/object/xml'
)
assert SaPosters().meta_data_url('object') == (
'https://swissvotes.sozialarchiv.ch/object'
)
def test_posters_parse_xml(session):
class MyPosters(Posters):
def meta_data_url(self, url):
return url
# parse xml
posters = MyPosters()
with raises(Exception):
posters.parse_xml(Bunch(content=None))
with raises(Exception):
posters.parse_xml(Bunch(content=''))
with raises(ValueError):
posters.parse_xml(Bunch(content='<object></object>'))
with raises(ValueError):
posters.parse_xml(Bunch(content=xml.format('')))
assert posters.parse_xml(Bunch(content=xml.format('url'))) == 'url'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# time: 2020-8-9 23:11:00
# version: 1.0
# __author__: zhilong
import pymysql
def init():
conn = pymysql.connect(host='localhost', user='root', password='rootpwd', charset='utf8mb4', db='spider')
cursor = conn.cursor()
sql = '''
create table wangyi(id int primary key auto_increment,
title varchar(256),
content text)
'''
cursor.execute(sql)
conn.commit()
cursor.close()
conn.close()
def main():
pass
if __name__ == '__main__':
init()
|
import pytest
import boto3
from moto import mock_s3
import requests_mock
import os
os.environ['ATTACHMENT_BUCKET'] = 'attachments'
os.environ['BPM_CSRF_URL'] = 'https://localhost/bpm/dev/csrf'
os.environ['BPM_EMAIL_URL'] = 'https://localhost/bpm/dev/launch'
os.environ['BPM_PW'] = 'blah'
os.environ['BPM_USER'] = 'blah'
@pytest.fixture(scope='function')
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ['AWS_ACCESS_KEY_ID'] = 'testing'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'
os.environ['AWS_SECURITY_TOKEN'] = 'testing'
os.environ['AWS_SESSION_TOKEN'] = 'testing'
@pytest.fixture(scope='function')
def s3(aws_credentials):
with mock_s3():
yield boto3.client('s3', region_name='us-east-1')
@mock_s3
@pytest.mark.skip(reason="Needs updating to new nifi method")
def test_load_good_manifest(s3):
from lambdas.lambda_function import lambda_handler
s3.create_bucket(Bucket=os.environ['ATTACHMENT_BUCKET'])
s3.create_bucket(Bucket='email')
with open('tests/json/manifest_ok.json', 'r') as manifest_file:
manifest_json = manifest_file.read()
s3.put_object(Body=manifest_json,
Bucket='manifest',
Key='xyz')
with requests_mock.Mocker() as mock:
mock.post(os.getenv("BPM_CSRF_URL"), json={"csrf_token": "FAKETOKEN"}, status_code=201)
mock.post(os.getenv("BPM_EMAIL_URL"), status_code=201)
assert (lambda_handler({'Records': [
{'s3': {
'bucket': {'name': 'manifest'},
'object': {'key': 'xyz'}
}}
]}, None))['status'] == 201
|
## List Mutation ##
def map(fn, lst):
"""Maps fn onto lst using mutation.
>>> original_list = [5, -1, 2, 0]
>>> map(lambda x: x * x, original_list)
>>> original_list
[25, 1, 4, 0]
"""
for item in range(len(lst)):
lst[item] = fn(lst[item]) #need to say that its
# lst[:] = [fn(item) for item in lst]
def filter(pred, lst):
"""Filters lst with pred using mutation.
>>> original_list = [5, -1, 2, 0]
>>> filter(lambda x: x % 2 == 0, original_list)
>>> original_list
[2, 0]
"""
# counter = 0
# for item in range(len(lst)):
# if not pred(lst[counter]):
# lst.pop(counter)
# counter -= 1
# counter += 1
lst[:] = [item for item in lst if pred(item)]
# index (the position item) vs element (the thing, lst[item[]]) in a list
## Dictionaries ##
def replace_all(d, x, y):
"""Replace all occurrences of x as a value (not a key) in d with y.
>>> d = {3: '3', 'foo': 2, 'bar': 3, 'garply': 3, 'xyzzy': 99}
>>> replace_all(d, 3, 'poof')
>>> d == {3: '3', 'foo': 2, 'bar': 'poof', 'garply': 'poof', 'xyzzy': 99}
True
"""
for key in d:
if d[key] == x:
d[key] = y
# return True
# return False
|
import os
def main():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
inp = open("day23_input.txt").read().splitlines()
print(solve(inp))
class Duet:
def __init__(self, instructions):
self.instructions = instructions
self.cursor = 0
self.registers = {i: 0 for i in ['a','b','c','d','e','f','g','h']}
self.finished = 0
self.mul_count = 0
def grab(self, x):
if x.isalpha():
return self.registers[x]
else:
return int(x)
def next(self):
if self.cursor >= len(self.instructions):
self.finished = 1
else:
ins, *p = self.instructions[self.cursor].split()
if ins != 'jnz':
if ins == 'set':
self.set(*p)
elif ins == 'mul':
self.mul(*p)
elif ins == 'sub':
self.sub(*p)
self.cursor += 1
elif ins == 'jnz':
self.jnz(*p)
else:
print(f"Unknown instruction: {ins, *p}")
def set(self, x, y):
self.registers[x] = self.grab(y)
def sub(self, x, y):
self.registers[x] -= self.grab(y)
def mul(self, x, y):
self.mul_count += 1
self.registers[x] *= self.grab(y)
def jnz(self, x, y):
if self.grab(x) != 0:
self.cursor += self.grab(y)
else:
self.cursor += 1
def solve(inp):
d = Duet(inp)
while d.finished == 0:
d.next()
return d.mul_count
if __name__ == '__main__':
main()
|
import config
from bs4 import BeautifulSoup as bs
import os
def createTests():
for c in config.containers :
os.makedirs(config.testsDir + '/' + c + '/src', exist_ok=True)
for file in os.scandir(config.pagesDir + '/' + c + '/functions/' ):
with open(file, "r") as f :
content = f.read()
soup = bs(content, 'html.parser')
example = soup.find_all(id="example")
if len(example) == 1:
code = example[0].find(class_="source").find("code").text
with open(config.testsDir + '/' + c + '/src/' + file.name.replace(".html", ".cpp"), "w") as testfile:
code = code.replace("<" + c +">", "HEADER_FILE")
code = code.replace("std::" + c, "NAMESPACE::" + c)
testfile.write(code)
print("Generated code for " + testfile.name)
if __name__ == "__main__":
createTests()
|
n = 1001
a = 1
sq_list = [1]
for i in range(1,1+(n-1)/2):
for j in range(4):
a += 2*i
sq_list.append(a)
a = sq_list[-1]
print sum(sq_list)
|
import glob2
from datetime import datetime
szorsz = ["file1.txt", "file2.txt", "file3.txt"]
#fajlnev = str(date(now)) + ".txt"
fajlnev = "etwas.txt"
def writer(fajlok, fajlpath):
with open(fajlpath, "a") as myfile:
for s in fajlok:
readf = open(s)
content = readf.read()
readf.close()
myfile.write(content + "\n")
myfile.close()
writer(szorsz, fajlnev)
filenames = glob2.glob("*.txt")
with open(datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f")+".txt", 'w') as file:
for filename in filenames:
with open(filename, "r") as f:
file.write(f.read() + "\n")
|
import sys
sys.stdin=open("input.txt", "r")
'''
# 사용해야 하는 알고리즘 = bfs (최단 거리)
: 어느 지점에서 목표 지점으로 가는 최단 거리는 bfs로 구한다.
# 문제 풀이 아이디어
: 현재 위치에서 bfs를 한다.
: 단, bfs를 하기 전에 불이 번진 것을 먼저 표시한다.
: 출구가 몇개인지 어디 있는지 알 수 없으므로
: x 혹은 y 좌표가 가장자리에 위치하면 탈출한 것으로 간주한다.
# 중간 개선 사항
1. 시간초과 : fire를 원래 이중 반복문으로 체크하면서 늘렸는데 좌표 압축으로
2. 메모리 초과 : visited 따로 없이
'''
from collections import deque
dx = [1, -1, 0, 0]
dy = [0, 0, 1, -1]
T = int(input())
def isValid(x, y):
if x >= 0 and x < w and y >= 0 and y < h:
return True
else:
return False
def isEscaped(x, y):
if x == 0 or y == 0 or x == w - 1 or y == h - 1:
return True
else:
return False
# def moreFire():
# check = [[False for _ in range(w)] for _ in range(h)]
# for fx in range(w):
# for fy in range(h):
# if board[fy][fx] == "*" and not check[fy][fx]:
# check[fy][fx] = True
# for i in range(4):
# nfx = fx + dx[i]
# nfy = fy + dy[i]
# if isValid(nfx, nfy) and board[nfy][nfx] != "#" and not check[nfy][nfx]:
# board[nfy][nfx] = "*"
# check[nfy][nfx] = True
def morefire(fires):
newFires = []
for fire in fires:
x, y = fire
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if isValid(nx, ny) and board[ny][nx] != "#":
newFires.append((nx, ny))
return fires + newFires
def bfs(x, y, fires):
dq = deque()
dq.append((x, y, 0))
board[y][x] = 1
currentTime = 0
fires = morefire(fires)
while dq:
x, y, t = dq.popleft()
if t > currentTime:
fires = morefire(fires)
currentTime = t
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
nextPosition = (nx, ny)
if isValid(nx, ny) and nextPosition not in fires and board[ny][nx] == ".":
if isEscaped(nx, ny):
print(t + 2)
return
else:
dq.append((nx, ny, t + 1))
board[ny][nx] = 1
print("IMPOSSIBLE")
return
for _ in range(T):
w, h = map(int, input().split())
board = [list(input()) for _ in range(h)]
fires = []
for i in range(w):
for j in range(h):
if board[j][i] == "*":
fires.append((i, j))
for i in range(w):
for j in range(h):
if board[j][i] == "@":
bfs(i, j, fires)
'''
# 사용해야 하는 알고리즘 = bfs (최단 거리)
: 어느 지점에서 목표 지점으로 가는 최단 거리는 bfs로 구한다.
# 문제 풀이 아이디어
: 맨 S에서 출발해서 bfs로 최단거리를 기록하면서 간다.
: 매번 bfs를 할 때 마다 상하동서남북 6방향을 확인한다.
: E에 도달하면 도달한 시간을 출력하고
: 완전탐색 이후에도 E에 도달하지 못하면 Trapped를 출력한다.
# 의사코드
1. 입력을 받고 배열에 각 층별로 저장한다.
1-1. 배열 = 인접행렬 * (층수)
2. 맨 위층부터 bfs를 돌면서 최단 경로를 찾는다.
2-1. queue에 저장할 때 걸린 시간을 같이 저장하고
2-2. 매번 6방향 탐색을 할 때 마다 "E"에 도달했는지 확인한다.
3. E를 만나면 시간을 출력하고 완전탐색 이후에도 E를 만나지 못하면 Trapped를 출력한다.
# 직접 코드 짜본 후 소감
: 무조건 내려가는 방향으로 가는 것이 좋다고 생각했는데
: 아닌 반례도 충분히 있으니 (윗층, 아래층에 벽이 있어서 내려갔다, 올라갔다, 다시 내려가야하는 경우)
: 6방향 탐색을 해야함!
: bfs 내부의 queue는 무조건 비용 오름 차순으로 정렬 되어있다. (시간이 적게 걸리는 것이 앞으로 오도록)
: 즉 "E"를 만나면 더 이상 queue에 있는 것을 추가적으로 살펴보지 않아도 됨.
: 중간에 메모리 초과 났는데 코드 고치다 보니되었음 ㅡㅡ;;;
'''
# from collections import deque
# dr = [1, -1, 0, 0, 0, 0]
# dc = [0, 0, 1, -1, 0, 0]
# dl = [0, 0, 0, 0, 1, -1]
# # 🚫 이딴데서 오타나서 틀림!!!
# while True:
# L, R, C = map(int, input().split())
# if L == R == C == 0:
# break
# def isValid(l, r, c):
# if l >= 0 and l < L and r >= 0 and r < R and c >= 0 and c < C:
# return True
# else:
# return False
# building = []
# for _ in range(L):
# floor = [list(input()) for _ in range(R)]
# building.append(floor)
# input()
# # 💡 빈 줄 하나 띄고 입력 받기
# for i in range(L):
# for j in range(R):
# for k in range(C):
# if building[i][j][k] == "S":
# start = (i, j, k, 0)
# break
# dq = deque()
# dq.append(start)
# l, r, c, t = start
# building[l][r][c] = "#"
# escaped = False
# # 💡 이중반복문을 빠져나오기 위한 수단
# while dq and not escaped:
# l, r, c, t = dq.popleft()
# for i in range(6):
# nl = l + dl[i]
# nr = r + dr[i]
# nc = c + dc[i]
# if isValid(nl, nr, nc) and building[nl][nr][nc] != "#":
# if building[nl][nr][nc] == "E":
# escaped = True
# break
# # ⭐️ bfs에서의 queue는 무조건 t 순으로 정렬되어 있다.
# # 즉 앞으로 나올 것은 t가 더 많거나 적어도 같다
# else:
# dq.append((nl, nr, nc, t + 1))
# building[nl][nr][nc] = "#"
# if escaped:
# print(f"Escaped in {t + 1} minute(s).")
# else:
# print("Trapped!")
'''
# 사용해야 하는 알고리즘 = dfs 혹은 bfs (완전탐색)
: 일정 높이 이상의 안전지대를 구하려면
: 완전 탐색이 필요하다.
# 문제 풀이 아이디어
: 각 비의 양에 따라서 물에 잠기는 곳을
: 다른 지도문제의 벽이라고 생각하고
: 각 비의 양에 따라서 dfs를 돌면서 최대 값을 구하면 된다.
# 의사 코드
1. 입력을 받아서 인접 행렬로 만든다.
2. dfs 함수를 짤 때 비의 높이를 받아서 비의 높이 이상만 방문하도록 짠다.
3. 비의 높이를 0부터 시작해서 1씩 늘여가면서 반복문을 돈다.
3-1. 인접 행렬의 [0][0]부터 완전 탐색을 돌면서
3-2. 안전 지대의 갯수를 세고
3-3. 기존의 max 갯수와 비교해서 최댓값을 갱신한다.
4. 안전지대의 갯수가 0이면 반복문을 탈출한다. (다 잠김)
5. 최댓값을 출력한다.
# 주의할 점!
: 높이가 전부 같아서 한번도 안 잠기다가 한방에 다 잠기는 경우
: 높이가 전부 1이여서 비를 1부터 시작하면 다 잠겨 버리는 경우
# 시간복잡도
: 모든 좌표를 도는 반복문이 O(n**n)
: 그리고 다 잠길 때 까지 반복문을 도는데 높이가 최대 100이므로 안전
'''
# import sys
# sys.setrecursionlimit(10000)
# dx = [1, -1, 0, 0]
# dy = [0, 0, 1, -1]
# n = int(input())
# board = [list(map(int, input().split())) for _ in range(n)]
# def isValid(x, y):
# if x >= 0 and x < n and y >= 0 and y < n and check[y][x] == 0:
# return True
# else:
# return False
# def dfs(x, y, rain):
# global check
# check[y][x] = 1
# for i in range(4):
# nx = x + dx[i]
# ny = y + dy[i]
# if isValid(nx, ny) and board[ny][nx] > rain:
# dfs(nx, ny, rain)
# rain = 0
# maxResult = 0
# while True:
# check = [[0 for _ in range(n)] for _ in range(n)]
# currentResult = 0
# for i in range(n):
# for j in range(n):
# if board[j][i] > rain and check[j][i] == 0:
# dfs(i, j, rain)
# currentResult += 1
# maxResult = max(maxResult, currentResult)
# if currentResult < 1:
# break
# rain += 1
# print(maxResult)
|
# Copyright 2017 Covata Limited or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import requests
from . import signer, utils
from enum import Enum
class SecretLookupType(Enum):
"""
Enumerates the applicable secret lookup types.
"""
base = 1
"""
Restricts lookup to base secrets.
"""
derived = 2
"""
Restricts lookup to derived secrets.
"""
any = 3
"""
Perform lookup on both base and derived secrets.
"""
class ApiClient:
"""
The Delta API Client is an abstraction over the Delta API for execution of
requests and responses.
"""
DELTA_URL = 'https://delta.covata.io/v1' # type: str
RESOURCE_IDENTITIES = '/identities' # type: str
RESOURCE_SECRETS = '/secrets' # type: str
RESOURCE_EVENTS = '/events' # type: str
def __init__(self, key_store):
"""
Constructs a new Delta API client with the given configuration.
:param key_store: the DeltaKeyStore object
:type key_store: :class:`~.DeltaKeyStore`
"""
self.__key_store = key_store
@property
def key_store(self):
return self.__key_store
def register_identity(self, public_encryption_key, public_signing_key,
external_id=None, metadata=None):
"""
Creates a new identity in Delta with the provided metadata
and external id.
:param str public_encryption_key:
the public encryption key to associate with the identity
:param str public_signing_key:
the public signing key to associate with the identity
:param external_id: the external id to associate with the identity
:type external_id: str | None
:param metadata: the metadata to associate with the identity
:type metadata: dict[str, str] | None
:return: the id of the newly created identity
:rtype: str
"""
body = dict(
signingPublicKey=public_signing_key,
cryptoPublicKey=public_encryption_key,
externalId=external_id,
metadata=metadata)
response = requests.post(
url=self.DELTA_URL + self.RESOURCE_IDENTITIES,
json=dict((k, v) for k, v in body.items() if v is not None))
response.raise_for_status()
identity_id = response.json()['identityId']
return identity_id
@utils.check_id("requestor_id, identity_id")
def get_identity(self, requestor_id, identity_id):
"""
Gets the identity matching the given identity id.
:param str requestor_id: the authenticating identity id
:param str identity_id: the identity id to retrieve
:return: the retrieved identity
:rtype: dict[str, any]
"""
response = requests.get(
url="{base_url}{resource}/{identity_id}".format(
base_url=self.DELTA_URL,
resource=self.RESOURCE_IDENTITIES,
identity_id=identity_id),
auth=self.signer(requestor_id))
response.raise_for_status()
identity = response.json()
return identity
@utils.check_id("requestor_id")
@utils.check_optional_pagination("page, page_size")
@utils.check_arguments(
"metadata",
lambda x: x is not None and dict(x),
"must be a non-empty dict[str, str]")
def get_identities_by_metadata(self, requestor_id, metadata,
page=None, page_size=None):
"""
Gets a list of identities matching the given metadata key and value
pairs, bound by the pagination parameters.
:param str requestor_id: the authenticating identity id
:param metadata: the metadata key and value pairs to filter
:type metadata: dict[str, str]
:param page: the page number
:type page: int | None
:param page_size: the page size
:type page_size: int | None
:return: a list of identities satisfying the request
:rtype: list[dict[str, any]]
"""
metadata_ = dict(("metadata." + k, v) for k, v in metadata.items())
response = requests.get(
url="{base_url}{resource}".format(
base_url=self.DELTA_URL,
resource=self.RESOURCE_IDENTITIES),
params=dict(metadata_,
page=int(page) if page else None,
pageSize=int(page_size) if page_size else None),
auth=self.signer(requestor_id))
response.raise_for_status()
return response.json()
@utils.check_id("requestor_id")
def create_secret(self, requestor_id, content, encryption_details):
"""
Creates a new secret in Delta. The key used for encryption should
be encrypted with the key of the authenticating identity.
It is the responsibility of the caller to ensure that the contents
and key material in the encryption details are properly represented
in a suitable string encoding (such as base64).
:param str requestor_id: the authenticating identity id
:param str content: the contents of the secret
:param encryption_details: the encryption details
:type encryption_details: dict[str, str]
:return: the created base secret
:rtype: dict[str, str]
"""
response = requests.post(
url="{base_url}{resource}".format(
base_url=self.DELTA_URL,
resource=self.RESOURCE_SECRETS),
json=dict(
content=content,
encryptionDetails=encryption_details
),
auth=self.signer(requestor_id))
response.raise_for_status()
return response.json()
@utils.check_id("requestor_id, base_secret_id, rsa_key_owner_id")
def share_secret(self, requestor_id, content, encryption_details,
base_secret_id, rsa_key_owner_id):
"""
Shares the base secret with the specified target RSA key owner. The
contents must be encrypted with the public encryption key of the
RSA key owner, and the encrypted key and initialisation vector must
be provided. This call will result in a new derived secret being created
and returned as a response.
It is the responsibility of the caller to ensure that the contents
and key material in the encryption details are properly represented
in a suitable string encoding (such as base64).
:param str requestor_id: the authenticating identity id
:param str content: the contents of the secret
:param encryption_details: the encryption details
:type encryption_details: dict[str, str]
:param str base_secret_id: the id of the base secret
:param str rsa_key_owner_id: the id of the rsa key owner
:return: the created derived secret
:rtype: dict[str, str]
"""
response = requests.post(
url="{base_url}{resource}".format(
base_url=self.DELTA_URL,
resource=self.RESOURCE_SECRETS),
json=dict(
content=content,
encryptionDetails=encryption_details,
baseSecret=base_secret_id,
rsaKeyOwner=rsa_key_owner_id
),
auth=self.signer(requestor_id))
response.raise_for_status()
return response.json()
@utils.check_id("requestor_id, secret_id")
def delete_secret(self, requestor_id, secret_id):
"""
Deletes the secret with the given secret id.
:param str requestor_id: the authenticating identity id
:param str secret_id: the secret id to be deleted
"""
response = requests.delete(
url="{base_url}{resource}/{secret_id}".format(
base_url=self.DELTA_URL,
resource=self.RESOURCE_SECRETS,
secret_id=secret_id),
auth=self.signer(requestor_id))
response.raise_for_status()
@utils.check_id("requestor_id, secret_id")
def get_secret(self, requestor_id, secret_id):
"""
Gets the given secret. This does not include the metadata and contents,
they need to be made as separate requests,
:func:`~.ApiClient.get_secret_metadata`
and :func:`~.ApiClient.get_secret_content` respectively.
:param str requestor_id: the authenticating identity id
:param str secret_id: the secret id to be retrieved
:return: the retrieved secret
:rtype: dict[str, any]
"""
response = requests.get(
url="{base_url}{resource}/{secret_id}".format(
base_url=self.DELTA_URL,
resource=self.RESOURCE_SECRETS,
secret_id=secret_id),
auth=self.signer(requestor_id))
response.raise_for_status()
return response.json()
@utils.check_id("requestor_id, secret_id")
def get_secret_metadata(self, requestor_id, secret_id):
"""
Gets the metadata key and value pairs for the given secret.
:param str requestor_id: the authenticating identity id
:param str secret_id: the secret id to be retrieved
:return: the retrieved secret metadata dictionary and version tuple
:rtype: (dict[str, str], int)
"""
response = requests.get(
url="{base_url}{resource}/{secret_id}/metadata".format(
base_url=self.DELTA_URL,
resource=self.RESOURCE_SECRETS,
secret_id=secret_id),
auth=self.signer(requestor_id))
response.raise_for_status()
metadata = dict(response.json())
version = int(response.headers["ETag"])
return metadata, version
@utils.check_id("requestor_id, secret_id")
def get_secret_content(self, requestor_id, secret_id):
"""
Gets the contents of the given secret.
:param str requestor_id: the authenticating identity id
:param str secret_id: the secret id to be retrieved
:return: the retrieved secret
:rtype: str
"""
response = requests.get(
url="{base_url}{resource}/{secret_id}/content".format(
base_url=self.DELTA_URL,
resource=self.RESOURCE_SECRETS,
secret_id=secret_id),
auth=self.signer(requestor_id))
response.raise_for_status()
return response.text
@utils.check_id("requestor_id, secret_id")
@utils.check_metadata("metadata")
def update_secret_metadata(self,
requestor_id,
secret_id,
metadata,
version):
"""
Updates the metadata of the given secret given the version number.
The version of a secret's metadata can be obtained by calling
:func:`~.ApiClient.get_secret`.
A newly created base secret has a metadata version of 1.
:param str requestor_id: the authenticating identity id
:param str secret_id: the secret id to be updated
:param metadata: metadata dictionary
:type metadata: dict[str, str]
:param int version: metadata version, required for optimistic locking
"""
response = requests.put(
url="{base_url}{resource}/{secret_id}/metadata".format(
base_url=self.DELTA_URL,
resource=self.RESOURCE_SECRETS,
secret_id=secret_id),
headers={
"if-match": str(version)
},
json=metadata,
auth=self.signer(requestor_id))
response.raise_for_status()
@utils.check_id("requestor_id, identity_id")
def update_identity_metadata(self,
requestor_id,
identity_id,
metadata,
version):
"""
Updates the metadata of the given identity given the version number.
The version of an identity's metadata can be obtained by calling
:func:`~.ApiClient.get_identity`.
An identity has an initial metadata version of 1.
:param str requestor_id: the authenticating identity id
:param str identity_id: the identity id to be updated
:param metadata: metadata dictionary
:type metadata: dict[str, str]
:param int version: metadata version, required for optimistic locking
"""
response = requests.put(
url="{base_url}{resource}/{identity_id}".format(
base_url=self.DELTA_URL,
resource=self.RESOURCE_IDENTITIES,
identity_id=identity_id),
headers={
"if-match": str(version)
},
json=dict(metadata=metadata),
auth=self.signer(requestor_id))
response.raise_for_status()
@utils.check_id("requestor_id")
@utils.check_optional_id("secret_id, rsa_key_owner_id")
def get_events(self, requestor_id, secret_id=None, rsa_key_owner_id=None):
"""
Gets a list of events associated filtered by secret id or RSA key owner
or both secret id and RSA key owner.
:param str requestor_id: the authenticating identity id
:param secret_id: the secret id of interest
:type secret_id: str | None
:param rsa_key_owner_id: the rsa key owner id of interest
:type rsa_key_owner_id: str | None
:return: a list of audit events
:rtype: list[dict[str, any]]
"""
params = dict(purpose="AUDIT")
if secret_id is not None:
params["secretId"] = str(secret_id)
if rsa_key_owner_id is not None:
params["rsaKeyOwner"] = str(rsa_key_owner_id)
response = requests.get(
url="{base_url}{resource}".format(
base_url=self.DELTA_URL,
resource=self.RESOURCE_EVENTS),
params=params,
auth=self.signer(requestor_id))
response.raise_for_status()
return response.json()
@utils.check_id("requestor_id")
@utils.check_optional_id("base_secret_id, created_by, rsa_key_owner_id")
@utils.check_optional_pagination("page, page_size")
@utils.check_arguments(
"metadata",
lambda x: x is None or dict(x),
"must be a non-empty dict[str, str]")
@utils.check_arguments(
"lookup_type",
lambda x: isinstance(x, SecretLookupType),
"must be an instance of SecretLookupType")
def get_secrets(self,
requestor_id,
base_secret_id=None,
created_by=None,
rsa_key_owner_id=None,
metadata=None,
lookup_type=SecretLookupType.any,
page=None,
page_size=None):
"""
Gets a list of secrets based on the query parameters, bound by the
pagination parameters.
:param str requestor_id: the authenticating identity id
:param base_secret_id: the id of the base secret
:type base_secret_id: str | None
:param created_by: the id of the secret creator
:type created_by: str | None
:param rsa_key_owner_id: the id of the RSA key owner
:type rsa_key_owner_id: str | None
:param metadata: the metadata associated with the secret
:type metadata: dict[str, str] | None
:param lookup_type: the type of the lookup query
:type lookup_type: :class:`~.SecretLookupType`
:param page: the page number
:type page: int | None
:param page_size: the page size
:type page_size: int | None
:return: a list of secrets satisfying the search criteria
:rtype: list[dict[str, any]]
"""
params = dict(
page=int(page) if page else None,
pageSize=int(page_size) if page_size else None,
baseSecret=None if base_secret_id is None else str(base_secret_id),
createdBy=None if created_by is None else str(created_by),
rsaKeyOwner=None if rsa_key_owner_id is None else str(
rsa_key_owner_id))
if metadata is not None:
metadata_ = dict(("metadata." + k, v) for k, v in metadata.items())
params.update(metadata_)
if lookup_type is SecretLookupType.base:
params["baseSecret"] = "false"
elif lookup_type is SecretLookupType.derived:
params["baseSecret"] = "true"
response = requests.get(
url="{base_url}{resource}".format(
base_url=self.DELTA_URL,
resource=self.RESOURCE_SECRETS),
params=params,
auth=self.signer(requestor_id))
response.raise_for_status()
return response.json()
@utils.check_id("identity_id")
def signer(self, identity_id):
"""
Generates a request signer function for the
the authorizing identity.
>>> signer = api_client.signer(authorizing_identity)
:param str identity_id: the authorizing identity id
:return: the request signer function
:rtype: (:class:`PreparedRequest`) -> :class:`PreparedRequest`
"""
def sign_request(r):
# type: (requests.PreparedRequest) -> requests.PreparedRequest
signing_key = self.key_store.get_private_signing_key(identity_id)
r.headers = signer.get_updated_headers(
identity_id=identity_id,
method=r.method,
url=r.url,
headers=r.headers,
payload=r.body,
private_signing_key=signing_key)
return r
return sign_request
|
import time
from actors import Actor, Core, TAG_USER_CLASSIFER
from actors import concurentmethod, WorkAssignerActor
from twitter import Api, Status
class UserClassifierActor(Actor):
def __init__(self, core: Core):
super().__init__(core)
self.tags = [TAG_USER_CLASSIFER]
@concurentmethod
def classify(self, sender : Actor, uid: int, api: Api) -> (int, [(Status, str)], str, bool):
"""
:param uid: User id to classify nationality for
:param api: Twitter api given to us
:return: 4-tuple (uid, list of statuses zipped with guessed language, nationality of user, authorized?)
"""
assert type(sender) == type(WorkAssignerActor)
return uid, [], "mk", True
@concurentmethod
def noUidAvailable(self, sender : Actor):
assert type(sender) == type(WorkAssignerActor)
time.sleep(15)
self.send(sender, WorkAssignerActor.requestUidForClassification)
|
"""empty message
Revision ID: bcce035c4d07
Revises: f1fabccfa03d
Create Date: 2018-03-10 12:18:08.413852
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bcce035c4d07'
down_revision = 'f1fabccfa03d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('bookmark_comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('text', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('bookmark_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['bookmark_id'], ['bookmarks.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.drop_constraint('comments_bookmark_id_fkey', 'comments', type_='foreignkey')
op.drop_column('comments', 'bookmark_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('comments', sa.Column('bookmark_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('comments_bookmark_id_fkey', 'comments', 'bookmarks', ['bookmark_id'], ['id'])
op.drop_table('bookmark_comments')
# ### end Alembic commands ###
|
"""
Scraper for allrecipes.com. Collectis all recipe links from website.
Author: Ankur Duggal & John Li
"""
from .scraper_base import ScraperBase
from selectolax.parser import HTMLParser
import json
import requests
import string
import re
class AllRecipes(ScraperBase):
""" Class for scraping recipe links from allrecipes.com. """
def __init__(self):
""" Default constructor. Mainly sets up parent class. """
with open('base_links.json', 'r') as f:
links = json.load(f)
site = 'allrecipes'
super().__init__(site, links[site])
'''
def parse(self):
""" Scrapes website for recipe links. """
# how recipe links should look like in regex
pattern = r'.*allrecipes\.com/recipes/.*\d{6}'
# list or recipes are organized alphabetically on website,
# so just append letters to base link.
page_suffix = list(string.ascii_lowercase)
page_suffix.append('123')
page_suffix.append('xyz')
for suffix in page_suffix:
response = requests.get(self.base_link + suffix)
parser = HTMLParser(response.text)
anchors_nodes = parser.tags('a')
for anchor_node in anchors_nodes:
link = anchor_node.attributes['href'] if 'href' in anchor_node.attributes else ''
if re.fullmatch(pattern, link):
self.links.add('http:' + link)
'''
|
from django.db import models
from datetime import datetime
from django.utils.timezone import now
from django.contrib.sitemaps import ping_google
class Category(models.Model):
category = models.CharField(max_length=20)
category_en = models.CharField(max_length=20)
category_image = models.CharField(max_length=300)
pub_date = models.DateTimeField(default=now())
def __str__(self):
return self.category
def get_absolute_url(self):
return '/category/{}/'.format(self.id)
def save(self, force_insert=False, force_update=False):
super().save(force_insert, force_update)
try:
ping_google()
except Exception:
pass
class Post(models.Model):
page_title = models.CharField(max_length=60)
page_desc = models.CharField(max_length=160)
page_keywords = models.CharField(max_length=100)
title = models.CharField(max_length=300)
desc = models.TextField(blank=True)
img = models.CharField(max_length=300, blank=True)
img_alt = models.CharField(max_length=100, blank=True)
vid = models.CharField(max_length=300, blank=True)
pub_date = models.DateTimeField(default=now())
category = models.ForeignKey(Category, on_delete=models.CASCADE)
def __str__(self):
return self.title
def get_absolute_url(self):
return '/{}/'.format(self.id)
def save(self, force_insert=False, force_update=False):
super().save(force_insert, force_update)
try:
ping_google()
except Exception:
pass
class Section(models.Model):
title = models.CharField(max_length=300, blank=True)
desc = models.TextField(blank=True)
img = models.CharField(max_length=300, blank=True)
img_alt = models.CharField(max_length=100, blank=True)
vid = models.CharField(max_length=300, blank=True)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
def __str__(self):
return self.title
|
N, A, B = map( int, input().split())
hmax = 0
H = [ int( input()) for _ in range(N)]
hmax = max(H)
L = 0
R = hmax//B + 1
add = A-B
#以下よりも早い
while R-L != 1:#L != R:
now = (L+R)//2
need = 0
for i in range(N):
r = H[i] - now*B
if r > 0:
need += (r-1)//add+1
if need <= now:##うまくいくとき
R = now
else:##うまくいかないとき
L = now#+1 ##うまく行かないので+1している.これがないと無限ループが発生する。
print(R)
|
import unittest
class Solution:
def get_int(self, s):
i = 0
ans = 0
while i < len(s) and s[i] in "0123456789":
ans = 10 * ans + int(s[i])
i += 1
return ans
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
str = str.strip()
ans = 0
if len(str) == 0 or str[0] not in "-+0123456789":
return ans
if str[0] == "-":
ans = -self.get_int(str[1:])
elif str[0] == "+":
ans = self.get_int(str[1:])
else:
ans = self.get_int(str)
return min(max(ans, -2**31), 2**31 - 1)
class CaseCheck(unittest.TestCase):
def testSimple0(self):
s_in = "42"
s = Solution()
actual = s.myAtoi(s_in)
expected = 42
self.assertEqual(actual, expected)
def testSimple1(self):
s_in = " -42"
s = Solution()
actual = s.myAtoi(s_in)
expected = -42
self.assertEqual(actual, expected)
def testSimple2(self):
s_in = "4193with words"
s = Solution()
actual = s.myAtoi(s_in)
expected = 4193
self.assertEqual(actual, expected)
def testSimple3(self):
s_in = "words and 987"
s = Solution()
actual = s.myAtoi(s_in)
expected = 0
self.assertEqual(actual, expected)
def testSimple4(self):
s_in = "-91283472332"
s = Solution()
actual = s.myAtoi(s_in)
expected = -2**31
self.assertEqual(actual, expected)
def testSimple5(self):
s_in = "+1"
s = Solution()
actual = s.myAtoi(s_in)
expected = 1
self.assertEqual(actual, expected)
if (__name__ == "__main__"):
unittest.main()
|
import bson
import datetime
import json
import os
import uuid
import fs.errors
import fs.path
from .web import base
from .web.errors import FileFormException
from . import config
from . import files
from . import placer as pl
from . import util
from .dao import hierarchy
Strategy = util.Enum('Strategy', {
'targeted' : pl.TargetedPlacer, # Upload 1 files to a container.
'targeted_multi' : pl.TargetedMultiPlacer, # Upload N files to a container.
'engine' : pl.EnginePlacer, # Upload N files from the result of a successful job.
'token' : pl.TokenPlacer, # Upload N files to a saved folder based on a token.
'packfile' : pl.PackfilePlacer, # Upload N files as a new packfile to a container.
'label' : pl.LabelPlacer,
'uid' : pl.UIDPlacer,
'uidmatch' : pl.UIDMatchPlacer,
'reaper' : pl.UIDReaperPlacer,
'analysis' : pl.AnalysisPlacer, # Upload N files to an analysis as input and output (no db updates - deprecated)
'analysis_job' : pl.AnalysisJobPlacer, # Upload N files to an analysis as output from job results
'gear' : pl.GearPlacer
})
def process_upload(request, strategy, access_logger, container_type=None, id_=None, origin=None, context=None, response=None,
metadata=None, file_fields=None, tempdir=None):
"""
Universal file upload entrypoint.
Format:
Multipart form upload with N file fields, each with their desired filename.
For technical reasons, no form field names can be repeated. Instead, use (file1, file2) and so forth.
Depending on the type of upload, a non-file form field called "metadata" may/must also be sent.
If present, it is expected to be a JSON string matching the schema for the upload strategy.
Currently, the JSON returned may vary by strategy.
Some examples:
curl -F file1=@science.png -F file2=@rules.png url
curl -F metadata=<stuff.json -F file=@data.zip url
http --form POST url metadata=@stuff.json file@data.zip
Features:
| targeted | reaper | engine | packfile
Must specify a target container | X | | X |
May create hierarchy on demand | | X | | X
May send metadata about the files | X | X | X | X
MUST send metadata about the files | | X | | X
Creates a packfile from uploaded files | | | | X
"""
log = request.logger
if not isinstance(strategy, Strategy):
raise Exception('Unknown upload strategy')
if id_ is not None and container_type == None:
raise Exception('Unspecified container type')
allowed_container_types = ('project', 'subject', 'session', 'acquisition',
'gear', 'analysis',
'collection')
if container_type is not None and container_type not in allowed_container_types:
raise Exception('Unknown container type')
timestamp = datetime.datetime.utcnow()
container = None
if container_type and id_:
container = hierarchy.get_container(container_type, id_)
# Check if filename should be basename or full path
use_filepath = request.GET.get('filename_path', '').lower() in ('1', 'true')
if use_filepath:
name_fn = util.sanitize_path
else:
name_fn = os.path.basename
# The vast majority of this function's wall-clock time is spent here.
# Tempdir is deleted off disk once out of scope, so let's hold onto this reference.
file_processor = files.FileProcessor(config.fs, local_tmp_fs=(strategy == Strategy.token), tempdir_name=tempdir)
if not file_fields:
form = file_processor.process_form(request, use_filepath=use_filepath)
# Non-file form fields may have an empty string as filename, check for 'falsy' values
file_fields = extract_file_fields(form)
if 'metadata' in form:
try:
metadata = json.loads(form['metadata'].value)
except Exception:
raise FileFormException('wrong format for field "metadata"')
if isinstance(metadata, dict):
for f in metadata.get(container_type, {}).get('files', []):
f['name'] = name_fn(f['name'])
elif isinstance(metadata, list):
for f in metadata:
f['name'] = name_fn(f['name'])
placer_class = strategy.value
placer = placer_class(container_type, container, id_, metadata, timestamp, origin, context, file_processor, access_logger, logger=log)
placer.check()
# Browsers, when sending a multipart upload, will send files with field name "file" (if sinuglar)
# or "file1", "file2", etc (if multiple). Following this convention is probably a good idea.
# Here, we accept any
# TODO: Change schemas to enabled targeted uploads of more than one file.
# Ref docs from placer.TargetedPlacer for details.
if strategy == Strategy.targeted and len(file_fields) > 1:
raise FileFormException("Targeted uploads can only send one file")
for field in file_fields:
if hasattr(field, 'file'):
field.file.close()
field.hash = util.format_hash(files.DEFAULT_HASH_ALG, field.hasher.hexdigest())
if not hasattr(field, 'hash'):
field.hash = ''
# Augment the cgi.FieldStorage with a variety of custom fields.
# Not the best practice. Open to improvements.
# These are presumbed to be required by every function later called with field as a parameter.
field.path = field.filename
if not file_processor.temp_fs.exists(field.path):
# tempdir_exists = os.path.exists(tempdir.name)
raise Exception("file {} does not exist, files in tmpdir: {}".format(
field.path,
file_processor.temp_fs.listdir('/'),
))
field.size = int(file_processor.temp_fs.getsize(field.path))
field.uuid = str(uuid.uuid4())
field.mimetype = util.guess_mimetype(field.filename) # TODO: does not honor metadata's mime type if any
field.modified = timestamp
# create a file-attribute map commonly used elsewhere in the codebase.
# Stands in for a dedicated object... for now.
file_attrs = make_file_attrs(field, origin)
placer.process_file_field(field, file_attrs)
# Respond either with Server-Sent Events or a standard json map
if placer.sse and not response:
raise Exception("Programmer error: response required")
elif placer.sse:
# Returning a callable will bypass webapp2 processing and allow
# full control over the response.
def sse_handler(environ, start_response): # pylint: disable=unused-argument
write = start_response('200 OK', [
('Content-Type', 'text/event-stream; charset=utf-8'),
('Connection', 'keep-alive')
])
# Instead of handing the iterator off to response.app_iter, send it ourselves.
# This prevents disconnections from leaving the API in a partially-complete state.
#
# Timing out between events or throwing an exception will result in undefinied behaviour.
# Right now, in our environment:
# - Timeouts may result in nginx-created 500 Bad Gateway HTML being added to the response.
# - Exceptions add some error json to the response, which is not SSE-sanitized.
for item in placer.finalize():
try:
write(item)
except Exception: # pylint: disable=broad-except
log.info('SSE upload progress failed to send; continuing')
return ''
return sse_handler
else:
return placer.finalize()
class Upload(base.RequestHandler):
def _create_upload_ticket(self):
if not hasattr(config.fs, 'get_signed_url'):
self.abort(405, 'Signed URLs are not supported with the current storage backend')
payload = self.request.json_body
metadata = payload.get('metadata', None)
filenames = payload.get('filenames', None)
if metadata is None or not filenames:
self.abort(400, 'metadata and at least one filename are required')
tempdir = str(uuid.uuid4())
# Upload into a temp folder, so we will be able to cleanup
signed_urls = {}
for filename in filenames:
signed_urls[filename] = files.get_signed_url(fs.path.join('tmp', tempdir, filename),
config.fs,
purpose='upload')
ticket = util.upload_ticket(self.request.client_addr, self.origin, tempdir, filenames, metadata)
return {'ticket': config.db.uploads.insert_one(ticket).inserted_id,
'urls': signed_urls}
def _check_upload_ticket(self, ticket_id):
ticket = config.db.uploads.find_one({'_id': ticket_id})
if not ticket:
self.abort(404, 'no such ticket')
if ticket['ip'] != self.request.client_addr:
self.abort(403, 'ticket not for this resource or source IP')
return ticket
def upload(self, strategy):
"""Receive a sortable reaper upload."""
if not self.superuser_request:
user = self.uid
if not user:
self.abort(403, 'Uploading requires login')
if strategy in ['label', 'uid', 'uid-match', 'reaper']:
strategy = strategy.replace('-', '')
strategy = getattr(Strategy, strategy)
context = {'uid': self.uid if not self.superuser_request else None}
# Request for upload ticket
if self.get_param('ticket') == '':
return self._create_upload_ticket()
# Check ticket id and skip permissions check if it clears
ticket_id = self.get_param('ticket')
if ticket_id:
ticket = self._check_upload_ticket(ticket_id)
file_fields = []
for filename in ticket['filenames']:
file_fields.append(
util.dotdict({
'filename': filename
})
)
return process_upload(self.request, strategy, self.log_user_access, metadata=ticket['metadata'], origin=self.origin,
context=context, file_fields=file_fields, tempdir=ticket['tempdir'])
else:
return process_upload(self.request, strategy, self.log_user_access, origin=self.origin, context=context)
def engine(self):
"""Handles file uploads from the engine"""
if not self.superuser_request:
self.abort(402, 'uploads must be from an authorized drone')
level = self.get_param('level')
if level is None:
self.abort(400, 'container level is required')
if level not in ['analysis', 'acquisition', 'session', 'subject', 'project']:
self.abort(400, 'container level must be analysis, acquisition, session, subject or project.')
cid = self.get_param('id')
if not cid:
self.abort(400, 'container id is required')
else:
cid = bson.ObjectId(cid)
context = {
'job_id': self.get_param('job'),
'job_ticket_id': self.get_param('job_ticket'),
}
# Request for upload ticket
if self.get_param('upload_ticket') == '':
return self._create_upload_ticket()
# Check ticket id and skip permissions check if it clears
ticket_id = self.get_param('upload_ticket')
if ticket_id:
ticket = self._check_upload_ticket(ticket_id)
file_fields = []
for filename in ticket['filenames']:
file_fields.append(
util.dotdict({
'filename': filename
})
)
strategy = Strategy.analysis_job if level == 'analysis' else Strategy.engine
return process_upload(self.request, strategy, self.log_user_access, metadata=ticket['metadata'], origin=self.origin,
context=context, file_fields=file_fields, tempdir=ticket['tempdir'],
container_type=level, id_=cid)
else:
strategy = Strategy.analysis_job if level == 'analysis' else Strategy.engine
return process_upload(self.request, strategy, self.log_user_access, container_type=level, id_=cid,
origin=self.origin, context=context)
def clean_packfile_tokens(self):
"""Clean up expired upload tokens and invalid token directories.
Ref placer.TokenPlacer and FileListHandler.packfile_start for context.
"""
if not self.superuser_request:
self.abort(402, 'uploads must be from an authorized drone')
# Race condition: we could delete tokens & directories that are currently processing.
# For this reason, the modified timeout is long.
result = config.db['tokens'].delete_many({
'type': 'packfile',
'modified': {'$lt': datetime.datetime.utcnow() - datetime.timedelta(hours=1)},
})
removed = result.deleted_count
if removed > 0:
self.log.info('Removed %s expired packfile tokens', removed)
# Next, find token directories and remove any that don't map to a token.
# This logic is used by:
# TokenPlacer.check
# PackfilePlacer.check
# upload.clean_packfile_tokens
#
# It must be kept in sync between each instance.
folder = fs.path.join('tokens', 'packfile')
util.mkdir_p(folder, config.fs)
try:
paths = config.fs.listdir(folder)
except fs.errors.ResourceNotFound:
# Non-local storages are used without 0-blobs for "folders" (mkdir_p is a noop)
paths = []
cleaned = 0
for token in paths:
path = fs.path.join(folder, token)
result = None
try:
result = config.db['tokens'].find_one({
'_id': token
})
except bson.errors.InvalidId:
# Folders could be an invalid mongo ID, in which case they're definitely expired :)
pass
if result is None:
self.log.info('Cleaning expired token directory %s', token)
config.fs.removetree(path)
cleaned += 1
return {
'removed': {
'tokens': removed,
'directories': cleaned,
}
}
def extract_file_fields(form):
"""Returns a list of file fields in the form, handling multiple values"""
result = []
for fieldname in form:
field = form[fieldname]
if isinstance(field, list):
for field_entry in field:
if field_entry.filename:
result.append(field_entry)
elif field.filename:
result.append(field)
return result
def make_file_attrs(field, origin):
# create a file-attribute map commonly used elsewhere in the codebase.
# Stands in for a dedicated object... for now.
file_attrs = {
'_id': field.uuid,
'name': field.filename,
'modified': field.modified,
'size': field.size,
'mimetype': field.mimetype,
'hash': field.hash,
'origin': origin,
'type': None,
'modality': None,
'classification': {},
'tags': [],
'info': {}
}
file_attrs['type'] = files.guess_type_from_filename(file_attrs['name'])
return file_attrs
|
import sys
from rosalind_utility import parse_fasta
def semiglobal_alignment(str1, str2):
str1 = "-" + str1
str2 = "-" + str2
score_mat = [[0 for j in range(len(str2))] for i in range(len(str1))]
backtrack_mat = [[None for j in range(len(str2))] for i in range(len(str1))]
for i in range(1, len(str1)):
for j in range(1, len(str2)):
score1 = score_mat[i - 1][j - 1] + (1 if str1[i] == str2[j] else -1)
score2 = score_mat[i - 1][j] - 1
score3 = score_mat[i][j - 1] - 1
score_mat[i][j] = max(score1, score2, score3)
if score_mat[i][j] == score1:
backtrack_mat[i][j] = "d"
elif score_mat[i][j] == score2:
backtrack_mat[i][j] = "u"
else:
backtrack_mat[i][j] = "l"
last_row_index = max(range(len(str2)), key=lambda x: score_mat[len(str1) - 1][x])
last_column_index = max(range(len(str1)), key=lambda x: score_mat[x][len(str2) - 1])
if score_mat[len(str1) - 1][last_row_index] >= score_mat[last_column_index][len(str2) - 1]:
i = len(str1) - 1
j = last_row_index
else:
i = last_column_index
j = len(str2) - 1
max_score = score_mat[i][j]
insert_indel = lambda word, i: word[:i] + '-' + word[i:]
# Initialize the aligned strings as the input strings.
aligned_1, aligned_2 = str1[1:], str2[1:]
for _ in range(len(str1) - 1 - i):
aligned_2 += '-'
for _ in range(len(str2) - 1 - j):
aligned_1 += '-'
while i * j != 0:
if backtrack_mat[i][j] == "u":
i -= 1
aligned_2 = insert_indel(aligned_2, j)
elif backtrack_mat[i][j] == "l":
j -= 1
aligned_1 = insert_indel(aligned_1, i)
else:
i -= 1
j -= 1
for _ in range(i):
aligned_2 = insert_indel(aligned_2, 0)
for _ in range(j):
aligned_1 = insert_indel(aligned_1, 0)
return max_score, aligned_1, aligned_2
if __name__ == "__main__":
'''
Given: Two protein strings s and t in FASTA format (each having length at most 1000 aa).
Return: A maximum alignment score along with substrings r and u of s and t, respectively, which produce this maximum
alignment score (multiple solutions may exist, in which case you may output any one). Use:
The PAM250 scoring matrix.
Linear gap penalty equal to 5.
'''
input_lines = sys.stdin.read().splitlines()
stringA = list(parse_fasta(input_lines).values())[0]
stringB = list(parse_fasta(input_lines).values())[1]
score, aligned_strA, aligned_strB = semiglobal_alignment(stringA, stringB)
print(score)
print(aligned_strA)
print(aligned_strB)
|
import random as r, csv
issuers =('Adventure Lending','ABC Lending','Global Lending')
property_types=('Multi-family','Single-family','Retail','Office')
age_ranges = ('20-25','26-30','30-40','40-50','50-60')
currency = 'USD'
asset_type = "Mortgage Token"
with open('mortgage_data.csv', mode='w+') as mortgage_data:
w = csv.writer(mortgage_data, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for n in range(254):
asset_id = "MT" + str(r.randint(1000000,9999999))
issuer = issuers[r.randint(0,len(issuers)-1)]
address = str(r.randint(1000,9999)) + ' REposit St. San Francisco, CA 94401'
tax_id = '1-45-83-0453-' + str(r.randint(1000,9999)) + '-' + str(r.randint(1000,9999))
appraised_value = str(r.randint(2000000,15000000))
market_value = round(int(appraised_value) * 1.25)
ltv = round(r.uniform(.4,.7),2)
loan_amount = (1-ltv) * market_value
property_type = property_types[r.randint(0,len(property_types)-1)]
age_range = age_ranges[r.randint(0,len(age_ranges)-1)]
fico = str(r.randint(600,800))
dti = str(round(r.uniform(.1,.6),2))
loan_amount = round(ltv * market_value,2)
unpaid_amount = round(r.uniform(.8,.9) * loan_amount,2)
rate = str(round(r.uniform(.02,.06),4))
term = str(r.randint(20,30) * 12)
w.writerow([asset_id,asset_type,issuer,currency,address,tax_id,appraised_value, market_value,ltv,property_type,age_range,fico,dti,loan_amount,unpaid_amount,rate,term])
|
"""A number-guessing game."""
# Put your code here
#let's make a random number selection
import random
randomizer = random.randrange(1, 100)
#print(randomizer)
#let's greet the player
name = input('Hello, what is your name? ')
print(name + ", I'm thinking of a number between 1 and 100. \n Try to guess my number.")
count_tries = 0
# let's create a randomized congratulation message!
congrats = ['Awesome, ', "Well done, ", "Terrific, ", "Good job, "]
random_congrats = random.choice(congrats)
#let's loop through the game
guess = input("Your guess? ")
while randomizer != guess:
count_tries += 1
guess_as_int = int(guess)
#if guess is more than the value of randomizer, then
#we should print "too high", and ask to guess again
if guess_as_int > randomizer:
print("Your guess is too high, try again.")
guess = input("Your guess? ")
#elif, print "too low"
elif guess_as_int < randomizer:
print("Your guess is too low, try again.")
guess = input("Your guess? ")
#else, we congratulate the player and stop the loop
else:
print(f"{random_congrats}{name}! That took you {count_tries} tries.")
break
|
from functools import cached_property
from onegov.core.elements import Confirm
from onegov.core.elements import Intercooler
from onegov.core.elements import Link
from onegov.wtfs import _
from onegov.wtfs.layouts.default import DefaultLayout
from onegov.wtfs.security import AddModel
from onegov.wtfs.security import AddModelUnrestricted
from onegov.wtfs.security import DeleteModel
from onegov.wtfs.security import EditModel
from onegov.wtfs.security import EditModelUnrestricted
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from onegov.core.elements import Element
class UsersLayout(DefaultLayout):
@cached_property
def title(self) -> str:
return _("Users")
@cached_property
def editbar_links(self) -> list['Element']:
result: list['Element'] = []
if self.request.has_permission(self.model, AddModelUnrestricted):
result.append(
Link(
text=_("Add"),
url=self.request.link(
self.model,
name='add-unrestricted'
),
attrs={'class': 'add-icon'}
)
)
elif self.request.has_permission(self.model, AddModel):
result.append(
Link(
text=_("Add"),
url=self.request.link(
self.model,
name='add'
),
attrs={'class': 'add-icon'}
)
)
return result
@cached_property
def breadcrumbs(self) -> list['Element']:
return [
Link(_("Homepage"), self.homepage_url),
Link(self.title, self.users_url)
]
class UserLayout(DefaultLayout):
@cached_property
def title(self) -> str:
return self.model.title
@cached_property
def editbar_links(self) -> list['Element']:
result: list['Element'] = []
if self.request.has_permission(self.model, EditModelUnrestricted):
result.append(
Link(
text=_("Edit"),
url=self.request.link(self.model, 'edit-unrestricted'),
attrs={'class': 'edit-icon'}
)
)
elif self.request.has_permission(self.model, EditModel):
result.append(
Link(
text=_("Edit"),
url=self.request.link(self.model, 'edit'),
attrs={'class': 'edit-icon'}
)
)
if self.request.has_permission(self.model, DeleteModel):
result.append(
Link(
text=_("Delete"),
url=self.csrf_protected_url(
self.request.link(self.model)
),
attrs={'class': 'delete-icon'},
traits=(
Confirm(
_(
"Do you really want to delete this user?"
),
_("This cannot be undone."),
_("Delete"),
_("Cancel")
),
Intercooler(
request_method='DELETE',
redirect_after=self.users_url
)
)
)
)
return result
@cached_property
def breadcrumbs(self) -> list['Element']:
return [
Link(_("Homepage"), self.homepage_url),
Link(_("Users"), self.users_url),
Link(self.title, '#')
]
class AddUserLayout(DefaultLayout):
@cached_property
def title(self) -> str:
return _("Add user")
@cached_property
def breadcrumbs(self) -> list['Element']:
return [
Link(_("Homepage"), self.homepage_url),
Link(_("Users"), self.users_url),
Link(_("Add"), '#')
]
@cached_property
def cancel_url(self) -> str:
return self.users_url
@cached_property
def success_url(self) -> str:
return self.users_url
class EditUserLayout(DefaultLayout):
@cached_property
def title(self) -> str:
return _("Edit user")
@cached_property
def breadcrumbs(self) -> list['Element']:
return [
Link(_("Homepage"), self.homepage_url),
Link(_("Users"), self.users_url),
Link(self.model.title, self.request.link(self.model)),
Link(_("Edit"), '#')
]
@cached_property
def cancel_url(self) -> str:
return self.request.link(self.model)
@cached_property
def success_url(self) -> str:
return self.users_url
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Neighborhood(models.Model):
name = models.CharField(max_length=50)
location = models.ForeignKey('Location', on_delete=models.CASCADE, null=True)
admin = models.ForeignKey(User, on_delete=models.CASCADE)
occupants = models.IntegerField(null=True)
def __str__(self):
return self.name
def create_neighborhood(self):
self.save()
def delete_neighborhood(self):
self.delete()
@classmethod
def find_neighborhood(cls, neigborhood_id):
neighborhood = cls.objects.get(id=neigborhood_id)
return neighborhood
def update_neighborhood(self):
self.save()
def update_occupants(self):
self.occupants += 1
self.save()
class UserProfile(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='user_profile')
first_name = models.CharField(max_length=50, null=True)
last_name = models.CharField(max_length=50, null=True)
bio = models.CharField(max_length=100,null=True)
neighborhood = models.ForeignKey(Neighborhood, on_delete=models.CASCADE)
email = models.EmailField(max_length=60)
def __str__(self):
return self.user.username
class Company(models.Model):
name = models.CharField(max_length=60)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='company_user')
description = models.CharField(max_length=150, null=True)
neighborhood = models.ForeignKey(Neighborhood, on_delete=models.CASCADE, related_name='company_neighbourhood')
category = models.ForeignKey('Category', on_delete=models.CASCADE, null=True)
email = models.EmailField(max_length=60)
def __str__(self):
return self.name
def create_company(self):
self.save()
def delete_company(self):
self.delete()
@classmethod
def find_company(cls, company_id):
company = Company.objects.get(id=company_id)
return company
def update_company(self):
self.save()
class Post(models.Model):
title = models.CharField(max_length=50)
content = models.TextField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
neighborhood = models.ForeignKey(Neighborhood, on_delete=models.CASCADE)
type = models.CharField(max_length=50, null=True)
pub_date = models.DateTimeField(auto_now_add=True, null=True)
def __str__(self):
return self.title
class Comment(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE, null=True)
comment = models.TextField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
pub_date = models.DateTimeField(auto_now_add=True, null=True)
def __str__(self):
return self.comment
class Location(models.Model):
name = models.CharField(max_length=40)
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=40)
def __str__(self):
return self.name
|
from orun.db import models
from orun.utils.translation import gettext_lazy as _
class ActivityType(models.Model):
name = models.CharField(_('Name'), null=False, translate=True)
summary = models.CharField(_('Summary'), translate=True)
sequence = models.IntegerField(_('Sequence'), default=10)
active = models.BooleanField(default=True, verbose_name=_('Active'))
delay_count = models.IntegerField(
verbose_name=_('After'), default=0,
help_text='Number of days/week/month before executing the action. It allows to plan the action deadline.'
)
delay_unit = models.SelectionField(
(
('days', 'days'),
('weeks', 'weeks'),
('months', 'months')
), verbose_name="Delay units", help_text="Unit of delay", null=False, default='days'
)
delay_from = models.SelectionField(
(
('current_date', 'after validation date'),
('previous_activity', 'after previous activity deadline')
), verbose_name="Delay Type", help_text="Type of delay", null=False, default='previous_activity'
)
icon = models.CharField(verbose_name=_('Icon'), help_text="Font awesome icon e.g. fa-tasks")
decoration_type = models.SelectionField(
(
('warning', 'Alert'),
('danger', 'Error')
), verbose_name="Decoration Type",
help_text="Change the background color of the related activities of this type."
)
res_model_id = models.ForeignKey(
'content.type', 'Model', index=True,
filter=['&', ('is_mail_thread', '=', True), ('transient', '=', False)],
help_text='Specify a model if the activity should be specific to a model'
' and not available when managing activities for other models.'
)
default_next_type_id = models.ForeignKey(
'mail.activity.type', verbose_name=_('Default Next Activity'),
filter="['|', ('res_model_id', '=', False), ('res_model_id', '=', res_model_id)]"
)
force_next = models.BooleanField(verbose_name=_("Auto Schedule Next Activity"), default=False)
next_type = models.ManyToManyField(
'mail.activity.type', 'mail_activity_rel', 'activity_id', 'recommended_id',
filter="['|', ('res_model_id', '=', False), ('res_model_id', '=', res_model_id)]",
verbose_name='Recommended Next Activities')
previous_type = models.ManyToManyField(
'mail.activity.type', 'mail_activity_rel', 'recommended_id', 'activity_id',
filter="['|', ('res_model_id', '=', False), ('res_model_id', '=', res_model_id)]",
verbose_name='Preceding Activities')
category = models.ChoiceField(
(
('default', 'Other')
), default='default',
verbose_name='Category',
help_text='Categories may trigger specific behavior like opening calendar view'
)
mail_template = models.ManyToManyField('mail.template', verbose_name='Mails templates')
# Fields for display purpose only
initial_res_model_id = models.ForeignKey(
'content.type', 'Initial model', compute="_compute_initial_res_model_id", store=False,
help_text='Technical field to keep trace of the model at the beginning of the edition for UX related behaviour'
)
model_has_change = models.BooleanField(
verbose_name="Model has change",
help_text="Technical field for UX related behaviour", default=False,
store=False
)
class Meta:
name = 'mail.activity.type'
verbose_name = 'Activity Type'
ordering = ('sequence', 'pk')
class Activity(models.Model):
object_id = models.IntegerField('Related Document ID', index=True, null=False)
model = models.ForeignKey(
'content.type', 'Content Type',
index=True, ondelete=models.CASCADE, null=False)
model_name = models.CharField(
'Related Document Model',
index=True, related='res_model_id.model', store=True, readonly=True)
object_name = models.CharField(
'Document Name', compute='_compute_res_name', store=True,
help_text="Display name of the related document.", readonly=True)
# activity
activity_type_id = models.ForeignKey(
'mail.activity.type', verbose_name=_('Activity'),
)
activity_category = models.SelectionField(related='activity_type_id.category', readonly=True)
activity_decoration = models.SelectionField(related='activity_type_id.decoration_type', readonly=True)
icon = models.CharField('Icon', related='activity_type_id.icon', readonly=True)
summary = models.CharField('Summary')
note = models.HtmlField('Note')
feedback = models.HtmlField('Feedback')
date_deadline = models.DateField('Due Date', index=True, null=False)
automated = models.BooleanField(
'Automated activity', readonly=True,
help_text='Indicates this activity has been created automatically and not by any user.')
# description
user_id = models.ForeignKey(
'res.users', 'Assigned to',
default=lambda self: self.env.user,
index=True, null=False)
create_user_id = models.ForeignKey(
'res.users', 'Creator',
default=lambda self: self.env.user,
index=True)
state = models.SelectionField([
('overdue', 'Overdue'),
('today', 'Today'),
('planned', 'Planned')], 'State',
compute='_compute_state')
recommended_activity_type_id = models.ForeignKey('mail.activity.type', verbose_name="Recommended Activity Type")
previous_activity_type_id = models.ForeignKey('mail.activity.type', verbose_name='Previous Activity Type',
readonly=True)
has_recommended_activities = models.BooleanField(
'Next activities available',
compute='_compute_has_recommended_activities',
help_text='Technical field for UX purpose')
mail_template_ids = models.ManyToManyField(related='activity_type_id.mail_template_ids', readonly=False)
force_next = models.BooleanField(related='activity_type_id.force_next', readonly=False)
class Meta:
title_field = 'summary'
name = 'mail.activity'
verbose_name = 'Activity'
ordering = ('date_deadline',)
class MailActivityMixin(models.Model):
activity_ids = models.OneToManyField(
'mail.activity', 'object_id', verbose_name=_('Activities'),
# auto_join=True,
groups="base.group_user",
filter=lambda self: {'model_name': self._meta.name}
)
activity_state = models.SelectionField(
(
('overdue', 'Overdue'),
('today', 'Today'),
('planned', 'Planned')
), verbose_name='Activity State',
compute='_compute_activity_state',
groups="base.group_user",
help_text='Status based on activities\nOverdue: Due date is already passed\n'
'Today: Activity date is today\nPlanned: Future activities.'
)
activity_user_id = models.ForeignKey(
'res.users', 'Responsible User',
related='activity_ids.user_id', readonly=False,
search='_search_activity_user_id',
groups="base.group_user"
)
activity_type_id = models.ForeignKey(
'mail.activity.type', 'Next Activity Type',
related='activity_ids.activity_type_id', readonly=False,
search='_search_activity_type_id',
groups="base.group_user"
)
activity_date_deadline = models.DateField(
'Next Activity Deadline',
compute='_compute_activity_date_deadline', search='_search_activity_date_deadline',
readonly=True, store=False,
groups="base.group_user"
)
activity_summary = models.CharField(
'Next Activity Summary',
related='activity_ids.summary', readonly=False,
search='_search_activity_summary',
groups="base.group_user",
)
class Meta:
abstract = True
|
import unittest
import mimec
from email.message import Message
from email.parser import FeedParser
def mail(data):
parser = FeedParser()
parser.feed(data)
return parser.close()
def envelope(message):
return {
'from': str(message.get_all('From')),
'to': str(message.get_all('To')),
'subject': str(message.get_all('Subject'))
}
class TestMimeCompiler(unittest.TestCase):
def test_lift(self):
message = mail('''from: john doe <john@inter.net>
to: tiffany <breakfast@tiffany.com>
subject: Hello
this is plain message''')
e = envelope(message)
p = message.get_payload()
m = mimec.MimeCompiler(message)
m.lift()
message = m.close()
self.assertEqual(e, envelope(message))
self.assertTrue(message.is_multipart())
self.assertEqual('multipart/mixed', message.get_content_type())
root, wrapped = list(message.walk())
self.assertIs(root, message)
self.assertEqual('text/plain', wrapped.get_content_type())
self.assertEqual(p, wrapped.get_payload())
def test_add_to(self):
m = mimec.MimeCompiler(Message())
to = ['spam <spam@greenmidget.com>', 'egg@greenmidget.com']
m.add_to(to)
message = m.close()
self.assertEqual(to, message.get_all('To'))
def test_add_to_duplicate(self):
message = mail('''from: announcer@gc.gov
to: <harold@onetwo.com>, Bobby <bob@theotherdomain.net>
To: <someone@else.org>
subject: Announcement from genetic control
This is a announcement from genetic control. It is my sad duty to inform you
of a four foot restriction, on humanoid hight
''')
m = mimec.MimeCompiler(message)
m.add_to(['bob@theotherdomain.net', 'Bobby <notbob@theotherdomain.net>'])
message = m.close()
self.assertEqual([
'<harold@onetwo.com>, Bobby <bob@theotherdomain.net>',
'<someone@else.org>',
'Bobby <notbob@theotherdomain.net>'
], message.get_all('To'))
|
# Generated by Django 2.0 on 2019-11-06 07:49
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('chats', '0003_auto_20191030_1613'),
]
operations = [
migrations.AlterModelOptions(
name='message',
options={'ordering': ('-added_at',)},
),
migrations.AlterField(
model_name='message',
name='added_at',
field=models.DateTimeField(default=datetime.datetime(2019, 11, 6, 7, 49, 25, 639921, tzinfo=utc)),
),
]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##
# this script serves to use the k-means
# to cluster articles within a category
#
# @author Yuan JIN
# @contact chengdujin@gmail.com
# @since 2012.03.15
# @latest 2012.03.21
#
import redis
REDIS_SERVER = 'localhost'
r = redis.StrictRedis(REDIS_SERVER)
# reload the script encoding
import sys
reload(sys)
sys.setdefaultencoding('UTF-8')
class Cluster(object):
'class to model a cluster'
def __init__(self):
self.centroid = None
self.points = []
self.word_map = {}
class KMeans(object):
'a simple implementation of k-means algorithm'
def __init__(self, docs, k=None):
self.k = k
self.docs = docs
self.points, self.centroids = self.init_centroids(docs, k)
self.clusters = []
def init_centroids(self, docs, limit):
'pick the inital centroids among the data points'
'the candidates should be stored in self.centroids'
# merge all the points
points = []
for doc in docs:
points.extend(doc)
import random
from ordereddict import OrderedDict
centroids = []
weighted_points = {}
for point in points:
weight = random.random() * 10000
weighted_points[point] = weight
# sort out limit number of candidate centroids
weighted_points = OrderedDict(sorted(weighted_points.items(), key=lambda d: -d[1]))
for i in range(limit):
centroids.append(weighted_points.keys()[i])
return points, centroids
def distance(self, centroid, point):
'calculate the distance between the two points'
if not centroid or not point:
return 0
# a point (incl. centroid) is an instance of media.Segment
if r.exists(u'@%s' % centroid.word) or r.exists(u'@%s' % point.word):
if centroid.word <> point.word:
first = r.get(u'%s:%s' % (centroid.word, point.word))
first = first if first else 0
second = r.get(u'%s:%s' % (point.word, centroid.word))
second = second if second else 0
if first or second:
return first if first > second else second
else: # if one word does not really exists, which is highly possible
# compute the average value
compounds = r.keys(u'%s:*' % (centroid.word if first else point.word))
total = 0
for v in compounds:
total += float(r.get(v))
return total / float(len(compounds))
else: # if two words are the same, they should have the closest distance
return 1e30000
else: # no words actually exists
return 1 / float(len(r.keys('@*')))
def find_closest_centroid(self, point):
'find the closest centroid by distance'
maximum = float(0)
closest = None
for centroid in self.centroids:
dist = self.distance(centroid, point)
if dist > maximum:
maximum = dist
closest = centroid
return closest
def find_centroid(self, cluster):
'find the centroid within a cluster by finding out the smallest average value from a node to others'
all_points = []
all_points.extend(cluster.points)
all_points.append(cluster.centroid)
new_centroid = None
max_dist = 0
for point in all_points:
dists = [self.distance(point, p) if point <> p else 0 for p in all_points]
total_dist = 0
for dist in dists:
total_dist += dist
average_dist = float(total_dist) / float(len(all_points) - 1)
if average_dist > max_dist:
max_dist = average_dist
new_centroid = point
return new_centroid
def cluster(self):
''
# array of Cluster instances
clusters = []
centroids_changed = True
counter = 1
while centroids_changed:
print counter, 'round'
if counter > 1:
break
counter += 1
for point in self.points:
# point is an instance of media.Segment
# the closest centroid to point
closest = self.find_closest_centroid(point)
# gotch is an instance of Cluster
gotcha = None
for cluster in clusters:
# a cluster is an instance of Cluster
# a cluster contains a centroid and several points
if closest == cluster.centroid:
gotcha = cluster
break
if not gotcha:
gotcha = Cluster()
gotcha.centroid = closest
clusters.append(gotcha)
gotcha.points.append(point)
# calculate new centroids from current ones
centroid_changed = False
old_clusters = clusters
clusters = []
for old_cluster in old_clusters:
if len(old_cluster.points):
new_centroid = self.find_centroid(old_cluster)
if new_centroid <> old_cluster.centroid:
centroid_changed = True
new_cluster = Cluster()
new_cluster.centroid = new_centroid
clusters.append(new_cluster)
else:
clusters.append(old_cluster)
self.clusters = clusters
def publish(self):
words = []
for cluster in self.clusters:
terms = []
# first one is the center node of the cluster
terms.append(cluster.centroid)
terms.extend(cluster.points)
if terms:
print cluster.centroid.word;
print ','.join([p.word for p in cluster.points])
print
words.append(terms)
if words:
return words
if __name__ == '__main__':
'test ground'
kmeans = KMeans(4, None)
|
# coding: utf-8
# Game board. It needs a height and a width in order
# to be instantiated
class board(object):
BLACK = 1
WHITE = 0
NOTDONE = -1
def __init__(self, height, width, firstPlayer):
"""
Constructs a board, right now maxDepth is statically assigned
"""
# Set the height and width of the game board
self.width = width
self.height = height
# Create two lists which will contain the pieces each player posesses
self.blacklist = []
self.whitelist = []
# Set default piece positions
for i in range(width):
self.blacklist.append((i, (i+1)%2))
self.whitelist.append((i, height - (i%2) - 1))
# boardState contains the current state of the board for printing/eval
self.boardState = [[' '] * self.width for x in range(self.height)]
self.gameWon = self.NOTDONE
self.turn = firstPlayer
self.maxDepth = 10
# Generate an iterator for all of the moves
def iterWhiteMoves(self):
"""
Main generator for white moves
"""
for piece in self.whitelist:
for move in self.iterWhitePiece(piece):
yield move
def iterBlackMoves(self):
"""
Main Generator for black moves
"""
for piece in self.blacklist:
for move in self.iterBlackPiece(piece):
yield move
def iterWhitePiece(self, piece):
"""
Generates possible moves for a white piece
"""
return self.iterBoth(piece, ((-1,-1),(1,-1)))
def iterBlackPiece(self, piece):
"""
Generates possible moves for a black piece
"""
return self.iterBoth(piece, ((-1,1),(1,1)))
def iterBoth(self, piece, moves):
"""
Handles the actual generation of moves for either black or white pieces
"""
for move in moves:
# Regular Move
targetx = piece[0] + move[0]
targety = piece[1] + move[1]
# If the move is out of bounds don't move
if targetx < 0 or targetx >= self.width or targety < 0 or targety >= self.height:
continue
target = (targetx, targety)
# Check that there is nothing in the way of moving to the target
black = target in self.blacklist
white = target in self.whitelist
if not black and not white:
yield (piece, target, self.NOTDONE)
# There was something in the way, can we jump it?
else:
# It has to be of the opposing color to jump
if self.turn == self.BLACK and black:
continue
elif self.turn == self.WHITE and white:
continue
# Jump proceeds by adding the same movement in order to jump over the opposing
# piece on the checkerboard
jumpx = target[0] + move[0]
jumpy = target[1] + move[1]
# If the jump is going to be out of bounds don't do it.
if jumpx < 0 or jumpx >= self.width or jumpy < 0 or jumpy >= self.height:
continue
jump = (jumpx, jumpy)
# Check that there is nothing in the jumpzone
black = jump in self.blacklist
white = jump in self.whitelist
if not black and not white:
yield (piece, jump, self.turn)
def updateBoard(self):
"""
Updates the array containing the board to reflect the current state of the pieces on the
board
"""
for i in range(self.width):
for j in range(self.height):
self.boardState[i][j] = " "
for piece in self.blacklist:
self.boardState[piece[1]][piece[0]] = u'◆'
for piece in self.whitelist:
self.boardState[piece[1]][piece[0]] = u'◇'
# Movement of pieces
def moveSilentBlack(self, moveFrom, moveTo, winLoss):
"""
Move black piece without printing
"""
if moveTo[0] < 0 or moveTo[0] >= self.width or moveTo[1] < 0 or moveTo[1] >= self.height:
raise Exception("That would move black piece", moveFrom, "out of bounds")
black = moveTo in self.blacklist
white = moveTo in self.whitelist
if not (black or white):
self.blacklist[self.blacklist.index(moveFrom)] = moveTo
self.updateBoard()
self.turn = self.WHITE
self.gameWon = winLoss
else:
raise Exception
def moveSilentWhite(self, moveFrom, moveTo, winLoss):
"""
Move white piece without printing
"""
if moveTo[0] < 0 or moveTo[0] >= self.width or moveTo[1] < 0 or moveTo[1] >= self.height:
raise Exception("That would move white piece", moveFrom, "out of bounds")
black = moveTo in self.blacklist
white = moveTo in self.whitelist
if not (black or white):
self.whitelist[self.whitelist.index(moveFrom)] = moveTo
self.updateBoard()
self.turn = self.BLACK
self.gameWon = winLoss
else:
raise Exception
def moveBlack(self, moveFrom, moveTo, winLoss):
"""
Move a black piece from one spot to another. \n winLoss is passed as either 0(white)
or 1(black) if the move is a jump
"""
self.moveSilentBlack(moveFrom, MoveTo, winLoss)
self.printBoard()
def moveWhite(self, moveFrom, moveTo, winLoss):
"""
Move a white piece from one spot to another. \n winLoss is passed as either 0(white)
or 1(black) if the move is a jump
"""
self.moveSilentWhite(moveFrom, moveTo, winLoss)
self.printBoard()
def printBoard(self):
"""
Prints the game board to stdout
"""
print unicode(self)
def __unicode__(self):
"""
Contains the unicode and other BS for printing the board
"""
# Updates Game board
self.updateBoard()
lines = []
# This prints the numbers at the top of the Game Board
lines.append(' ' + ' '.join(map(str, range(self.width))))
# Prints the top of the gameboard in unicode
lines.append(u' ╭' + (u'───┬' * (self.width-1)) + u'───╮')
# Print the boards rows
for num, row in enumerate(self.boardState[:-1]):
lines.append(chr(num+65) + u' │ ' + u' │ '.join(row) + u' │')
lines.append(u' ├' + (u'───┼' * (self.width-1)) + u'───┤')
#Print the last row
lines.append(chr(self.height+64) + u' │ ' + u' │ '.join(self.boardState[-1]) + u' │')
# Prints the final line in the board
lines.append(u' ╰' + (u'───┴' * (self.width-1)) + u'───╯')
return '\n'.join(lines)
############## DEBUGGING
##############
# def getWin(self):
# return self.g
#
# def setWin(self, val):
## if val == 0:
## raise Exception("Game won by white")
# self.g = val
# gameWon=property(getWin, setWin)
##############
##############
|
# -*- coding: utf-8 -*-
"""
Este script utiliza funciones, importadas de boltzmann_codes.py, que calculan cantidades
cosmologicas (matter power spectrum, angular power spectrum, angular diameter distance,
hubble parameter, growth rate) para parametros cosmologicos a eleccion, utilizando ya sea
CAMB o CLASS
"""
from astropy.convolution import convolve, Box1DKernel
import pytest
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
path = '/home/gerald/Documentos/proyecto_cmb/cmb-1/boltzmann_codes.py' #boltzmann_codes.py path
sys.path.insert(0, path)
import boltzmann_codes
import time
t0= time.clock()
#print(t0)
z_plot = [0,1, 2, 3]
zmin = 0
zmax = 3
step = 0.05
zz = np.arange(zmin,zmax,step)
camb_code = boltzmann_codes.run(zz, "camb", 70, 0.04, 0.24)
class_code = boltzmann_codes.run(zz, "class", 70, 0.04, 0.24)
#boltzmann_codes.params_eps(boltzmann_codes.save_data, [[70,False], [0.04,False], [0.24,False], [2.3e-9, False],
# [0.96,True], [0,False], [0.09,False]], 0.01, 'camb', zz, lmax=2500, perturbations=True, z_pk=zz)
#hubble_array = np.arange(65,75,7.5)
#om_b_array = np.arange(0.03,0.05,0.015)
#om_cdm_array = np.arange(0.23,0.25,0.015)
#save_camb_array = boltzmann_codes.save_data("camb", zz, 70, 0.04, 0.24, lmax=2500, perturbations=False)
#save_class_array = boltzmann_codes.save_data("class", zz, 70, 0.04, 0.24, lmax=2500, perturbations=False)
#save_class = boltzmann_codes.save_data_array("class", zz, hubble_array, om_b_array, om_cdm_array,lmax=2500)
t = time.clock() - t0
#print(t)
p1 = boltzmann_codes.Matter_power_spectrum(zz, camb_code, "camb", l_max=2500, zplot=z_plot, plot='yes', units='(Mpc/h)^3')
p2 = boltzmann_codes.Matter_power_spectrum(zz, class_code, "class", l_max=2500, zplot=z_plot, plot='yes', units='(Mpc/h)^3')
a1 = boltzmann_codes.Angular_power_spectrum(zz, camb_code, "camb", l_max=2500, units=None)
a2 = boltzmann_codes.Angular_power_spectrum(zz, class_code, "class", l_max=2500, units=None)
huble = boltzmann_codes.HubbleParameter(zz, camb_code, "camb")
huble2 = boltzmann_codes.HubbleParameter(zz, class_code, "class")
d_a = boltzmann_codes.Angular_diameter_distance(zz, camb_code, "camb")
d_a2 = boltzmann_codes.Angular_diameter_distance(zz, class_code, "class")
f1 = boltzmann_codes.growth_rate(zz, camb_code, "camb")
f2 = boltzmann_codes.growth_rate(zz, class_code, "class")
ls = iter(['solid', 'dashed', 'dashdot'])
ls2 = iter(['solid', 'dashed', 'dashdot', 'dashdot'])
error = np.ones(len(zz))*0.005
error2 = np.ones(len(p1[0]))*0.05
'''
for i in range(len(z_plot)-1):
plt.plot(p1[0], (p1[2][i] - p2[1][i])/p1[2][i] , label = 'z = {}'.format(z_plot[i]), color='darkviolet')
print((p1[2][i] - p2[1][i])/p1[2][i])
plt.plot(p1[0], error2, color='k', ls='dashed')
plt.ylabel(r'$P(k)\:[(Mpc/h)³]$', fontsize=fonts)
plt.xlabel(r'$k\:[h/Mpc]$', fontsize=fonts)
plt.xlim((0.001,2))
plt.ylim((0,1.1))
#plt.show()
plt.clf()
plt.plot(a1[0][3:], (a1[1][3:] - a2[1][3:])/a1[1][3:])
plt.ylabel(r'$l(l+1)\:C^{TT}_l\: / 2\pi$', fontsize=fonts)
plt.xlabel('Multipole moment l', fontsize=fonts)
plt.legend()
plt.show()
plt.clf()
plt.plot(zz,np.abs((d_a-d_a2)/d_a), color='darkviolet')
#plt.plot(zz, error, color='k', ls='dashed')
plt.ylabel(r'$d_A(z)\: [Mpc]$', fontsize=fonts)
plt.xlabel(r'$z$', fontsize=fonts)
plt.legend()
#plt.ylim((-0.01,0.01))
#plt.show()
plt.clf()
plt.plot(zz, np.abs((huble-huble2)/huble), color='darkviolet')
#plt.plot(zz, error, color='k', ls='dashed')
plt.ylabel(r'$H(z)\:[Km\:s⁻¹\:Mpc⁻¹]$', fontsize=fonts)
plt.xlabel(r'$z$',fontsize=fonts)
plt.legend()
#plt.ylim((-0.01,0.01))
#plt.show()
plt.clf()
plt.plot(zz, np.abs((f1-f2)/f1), color='darkviolet')
#plt.plot(zz, error, color='k', ls='dashed')
plt.ylabel(r'Growth rate $f_g(z)$',fontsize=fonts)
plt.xlabel(r'$z$', fontsize=fonts)
plt.legend()
#plt.ylim((-0.01,0.01))
#plt.show()
plt.clf()
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(zz, np.abs((huble-huble2)/huble), color='darkviolet')
axs[0, 0].set_xlabel(r'$z$', fontsize=11)
#axs[0, 0].set_ylabel(r'$H(z)$', fontsize=11)
axs[0, 0].ticklabel_format(axis='both', style='sci', scilimits=(0,0))
axs[0, 0].set_title(r'$H(z)$')
axs[1, 0].plot(zz, np.abs((d_a-d_a2)/d_a), color='darkviolet')
axs[1, 0].set_xlabel(r'$z$', fontsize=11)
#axs[1, 0].set_ylabel(r'$d_A(z)$', fontsize=11)
axs[1, 0].ticklabel_format(axis='both', style='sci', scilimits=(0,0))
axs[1, 0].set_title(r'$d_A(z)$')
axs[0, 1].plot(zz, np.abs((f1-f2)/f1), color='darkviolet')
axs[0, 1].set_xlabel(r'$z$', fontsize=11)
#axs[0, 1].set_ylabel(r'$f_g(z)$',fontsize=11)
axs[0, 1].ticklabel_format(axis='both', style='sci', scilimits=(0,0))
axs[0, 1].set_title(r'$f_g(z)$')
for i in range(len(z_plot)-1):
axs[1, 1].plot(p1[0], (p1[2][i] - p2[1][i])/p1[2][i] , label = 'z = {}'.format(z_plot[i]), color='darkviolet')
axs[1, 1].plot(p1[0], error2, color='k', ls='dashed')
axs[1, 1].set_xlabel(r'$k\:[h/Mpc]$', fontsize=11)
#axs[1, 1].set_ylabel(r'$P(k)$', fontsize=11)
axs[1, 1].set_xlim((0.01,2))
axs[1, 1].set_ylim((0,1.1))
axs[1, 1].ticklabel_format(axis='both', style='sci', scilimits=(0,0))
axs[1, 1].set_title(r'$P(k)$')
fig.tight_layout(w_pad=4)
fig.suptitle('Relative Errors', y=0.99, fontsize=fonts)
plt.subplots_adjust(left=0.1, right=0.95, bottom=0.1, top=0.9)
plt.savefig('new_plots/relative_errors')
plt.show()
'''
ls = iter(['solid', 'dashed', 'dashdot'])
ls2 = iter(['solid', 'dashed', 'dashdot', 'dashdot'])
'''for i in range(len(z_plot)-1):
plt.loglog(p1[0], p1[2][i], label = 'z = {}'.format(z_plot[i]), color='darkblue', ls=next(ls))
#plt.legend(title='z')
plt.ylabel(r'$P(k)\:[(Mpc/h)³]$', fontsize=fonts)
plt.xlabel(r'$k\:[h/Mpc]$', fontsize=fonts)
#plt.show()
#plt.clf()
for i in range(len(z_plot)-1):
plt.loglog(p2[0], p2[1][i], label = 'z = {}'.format(z_plot[i]), color='red', ls=next(ls2))
plt.legend(title='CAMB CLASS', ncol=2)
plt.ylabel(r'$P(k)\:[(Mpc/h)³]$', fontsize=fonts)
plt.xlabel(r'$k\:[h/Mpc]$', fontsize=fonts)
#plt.savefig('new_plots/P(k)')
plt.show()
plt.clf()
plt.plot(a1[0][3:][:-50], a1[1][3:][:-50], color='darkblue', label='CAMB')
plt.plot(a2[0][3:], a2[1][3:], color='red', label='CLASS')
plt.ylabel(r'$l(l+1)\:C^{TT}_l\: / 2\pi$', fontsize=fonts)
plt.xlabel('Multipole moment l', fontsize=fonts)
plt.legend()
#plt.savefig('new_plots/C(l)')
plt.show()
plt.clf()
plt.plot(zz,d_a, color='darkblue', label='CAMB')
plt.plot(zz,d_a2, color='red', label='CLASS')
plt.ylabel(r'$d_A(z)\: [Mpc]$', fontsize=fonts)
plt.xlabel(r'$z$', fontsize=fonts)
plt.legend()
#plt.savefig('new_plots/d_A')
plt.show()
plt.clf()
plt.plot(zz, huble, color='darkblue', label='CAMB')
plt.plot(zz, huble2, color='red', label='CLASS')
plt.ylabel(r'$H(z)\:[Km\:s⁻¹\:Mpc⁻¹]$', fontsize=fonts)
plt.xlabel(r'$z$',fontsize=fonts)
plt.legend()
#plt.savefig('new_plots/H(z)')
plt.show()
plt.clf()
plt.plot(zz, f1, color='darkblue', label='CAMB')
plt.plot(zz, f2, color='red', label='CLASS')
plt.ylabel(r'Growth rate $f_g(z)$',fontsize=fonts)
plt.xlabel(r'$z$', fontsize=fonts)
plt.legend()
#plt.savefig('new_plots/f(z)')
plt.show()
plt.clf() '''
lw=0.4
fonts=16
fig, axs = plt.subplots(2, figsize=(7,7), gridspec_kw={'height_ratios': [6, 1.5]}, sharex=True, squeeze=True)
plt.style.use('seaborn-ticks')
fig.subplots_adjust(hspace=0)
#fig.tight_layout()
axs[0].tick_params(axis='both', which='major', labelsize=fonts)
axs[1].tick_params(axis='both', which='major', labelsize=fonts)
axs[0].plot(zz,huble, color='darkblue', label='CAMB')
axs[0].plot(zz,huble2, color='red', label='CLASS')
axs[0].legend(frameon=True, prop={'size': fonts})
#axs[0].set_xlabel(r'$z$', fontsize=11)
axs[0].set_ylabel(r'$H(z)\:[Km\:s⁻¹\:Mpc⁻¹]$', fontsize=fonts)
#axs[0].ticklabel_format(axis='both', style='sci', scilimits=(0,0))
axs[1].plot(zz, (huble-huble2)/huble, color='k', ls='dashed')
axs[1].set_xlabel(r'$z$', fontsize=fonts)
axs[1].set_ylabel(r'$\Delta H(z)$', fontsize=fonts)
#axs[1].ticklabel_format(axis='both', style='sci', scilimits=(0,0))
axs[1].yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0e'))
#fig.tight_layout(w_pad=4)
#plt.subplots_adjust(left=0.1, right=0.95, bottom=0.1, top=0.9)
axs[0].grid(color = 'gray', linestyle = '--', linewidth = lw)
axs[1].grid(color = 'gray', linestyle = '--', linewidth = lw)
fig.tight_layout()
#plt.savefig('test_plots/H(z)', dpi=300)
#plt.show()
plt.clf()
fig, axs = plt.subplots(2, figsize=(7,7), gridspec_kw={'height_ratios': [6, 1.5]}, sharex=True, squeeze=True)
plt.style.use('seaborn-ticks')
fig.subplots_adjust(hspace=0)
#fig.tight_layout()
axs[0].tick_params(axis='both', which='major', labelsize=fonts)
axs[1].tick_params(axis='both', which='major', labelsize=fonts)
axs[0].plot(zz,d_a, color='darkblue', label='CAMB')
axs[0].plot(zz,d_a2, color='red', label='CLASS')
axs[0].legend(frameon=True, prop={'size': fonts})
#axs[0].set_xlabel(r'$z$', fontsize=11)
axs[0].set_ylabel(r'$d_A(z)\: [Mpc]$', fontsize=fonts)
#axs[0].ticklabel_format(axis='both', style='sci', scilimits=(0,0))
axs[1].plot(zz, (d_a-d_a2)/d_a, color='k', ls='dashed')
axs[1].set_xlabel(r'$z$', fontsize=fonts)
axs[1].set_ylabel(r'$\Delta d_A(z)$', fontsize=fonts)
#axs[1].ticklabel_format(axis='both', style='sci', scilimits=(0,0))
axs[1].yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0e'))
#plt.subplots_adjust(left=0.1, right=0.95, bottom=0.1, top=0.9)
axs[0].grid(color = 'gray', linestyle = '--', linewidth = lw)
axs[1].grid(color = 'gray', linestyle = '--', linewidth = lw)
fig.tight_layout()
#plt.savefig('test_plots/d_A(z)', dpi=300)
#plt.show()
plt.clf()
fig, axs = plt.subplots(2, figsize=(7,7), gridspec_kw={'height_ratios': [6, 1.5]}, sharex=True, squeeze=True)
plt.style.use('seaborn-ticks')
#fig.tight_layout()
fig.subplots_adjust(hspace=0)
axs[0].tick_params(axis='both', which='major', labelsize=fonts)
axs[1].tick_params(axis='both', which='major', labelsize=fonts)
axs[0].plot(zz,f1, color='darkblue', label='CAMB')
axs[0].plot(zz,f2, color='red', label='CLASS')
axs[0].legend(frameon=True, prop={'size': fonts})
#axs[0].set_xlabel(r'$z$', fontsize=11)
axs[0].set_ylabel(r'Growth rate $f(z)$', fontsize=fonts)
#axs[0].ticklabel_format(axis='both', style='sci', scilimits=(0,0))
axs[1].plot(zz, (f1-f2)/f1, color='k', ls='dashed')
axs[1].set_xlabel(r'$z$', fontsize=fonts)
axs[1].set_ylabel(r'$\Delta f(z)$', fontsize=fonts)
#axs[1].ticklabel_format(axis='both', style='sci', scilimits=(0,0))
axs[1].yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0e'))
#fig.tight_layout(w_pad=4)
#plt.subplots_adjust(left=0.1, right=0.95, bottom=0.1, top=0.9)
axs[0].grid(color = 'gray', linestyle = '--', linewidth = lw)
axs[1].grid(color = 'gray', linestyle = '--', linewidth = lw)
fig.tight_layout()
#plt.savefig('test_plots/f(z)', dpi=300)
#plt.show()
plt.clf()
fig, axs = plt.subplots(2, figsize=(7,7), gridspec_kw={'height_ratios': [6, 1.5]}, sharex=True, squeeze=True)
plt.style.use('seaborn-ticks')
fig.subplots_adjust(hspace=0)
axs[0].tick_params(axis='both', which='major', labelsize=fonts)
axs[1].tick_params(axis='both', which='major', labelsize=fonts)
axs[0].plot(a1[0][3:][:-50],a1[1][3:][:-50], color='darkblue', label='CAMB')
axs[0].plot(a1[0][3:][:-50],a2[1][3:], color='red', label='CLASS')
axs[0].legend(frameon=True, prop={'size': fonts})
#axs[0].set_xlabel(r'$z$', fontsize=11)
axs[0].set_ylabel(r'$l(l+1)\:C^{TT}_l\: / 2\pi$', fontsize=fonts)
#axs[0].ticklabel_format(axis='both', style='sci', scilimits=(0,0))
axs[1].plot(a1[0][3:][:-50][::30], (a1[1][3:][:-50][::30]-a2[1][3:][::30])/a1[1][3:][:-50][::30], color='k', ls='dashed')
axs[1].set_xlabel('Multipole moment l', fontsize=fonts)
axs[1].set_ylabel(r'$\Delta C^{TT}_l$', fontsize=fonts)
#axs[1].ticklabel_format(axis='both', style='sci', scilimits=(0,0))
axs[1].yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0e'))
#fig.tight_layout(w_pad=4)
#plt.subplots_adjust(left=0.1, right=0.95, bottom=0.1, top=0.9)
axs[0].grid(color = 'gray', linestyle = '--', linewidth = lw)
axs[1].grid(color = 'gray', linestyle = '--', linewidth = lw)
fig.tight_layout()
#plt.savefig('test_plots/C(l)', dpi=300)
#plt.show()
plt.clf()
step=0.0008
k=[]
pcamb=[]
pclass=[]
for i in range(len(p1[0])):
for j in range(len(p2[0])):
if p1[0][i]==pytest.approx(p2[0][j], step):
k.append(p1[0][i])
pcamb.append(p1[2][0][i])
pclass.append(p2[1][0][j])
k=np.array(k)
pcamb=np.array(pcamb)
pclass=np.array(pclass)
k=[]
pcamb1=[]
pclass1=[]
for i in range(len(p1[0])):
for j in range(len(p2[0])):
if p1[0][i]==pytest.approx(p2[0][j], step):
k.append(p1[0][i])
pcamb1.append(p1[2][1][i])
pclass1.append(p2[1][1][j])
k=np.array(k)
pcamb1=np.array(pcamb1)
pclass1=np.array(pclass1)
k=[]
pcamb2=[]
pclass2=[]
for i in range(len(p1[0])):
for j in range(len(p2[0])):
if p1[0][i]==pytest.approx(p2[0][j], step):
k.append(p1[0][i])
pcamb2.append(p1[2][2][i])
pclass2.append(p2[1][2][j])
k=np.array(k)
pcamb2=np.array(pcamb2)
pclass2=np.array(pclass2)
perror = (pcamb-pclass)/pcamb
perror2 = (pcamb1-pclass1)/pcamb1
perror3 = (pcamb2-pclass2)/pcamb2
p1 = np.array_split(perror,4)
p2 = np.array_split(perror2,4)
p3 = np.array_split(perror3,4)
k1 = np.array_split(k,4)
#perror = np.concatenate((p1[0][::15], p1[1][::40], p1[2][::40], p1[3][::40]))
#perror2 = np.concatenate((p2[0][::15], p2[1][::40], p2[2][::40], p2[3][::40]))
#perror3 = np.concatenate((p3[0][::15], p3[1][::40], p3[2][::40], p3[3][::40]))
#kerror = np.concatenate((k1[0][::15], k1[1][::40], k1[2][::40], k1[3][::40]))
kerror = k
perror = convolve(perror,Box1DKernel(400))
perror2 = convolve(perror2,Box1DKernel(400))
perror3 = convolve(perror3,Box1DKernel(400))
fig, axs = plt.subplots(4, figsize=(7,9), gridspec_kw={'height_ratios': [6, 1.5, 1.5, 1.5]}, sharex=True, squeeze=True)
plt.style.use('seaborn-ticks')
#fig.tight_layout()
fig.subplots_adjust(hspace=0)
#axs[0].loglog(p1[0], p1[2][0], color='darkblue', label='CAMB')
#axs[0].loglog(p2[0], p2[1][0], color='red', label='CLASS')
axs[0].tick_params(axis='both', which='major', labelsize=fonts)
axs[1].tick_params(axis='both', which='major', labelsize=fonts)
axs[2].tick_params(axis='both', which='major', labelsize=fonts)
axs[3].tick_params(axis='both', which='major', labelsize=fonts)
axs[0].loglog(k, pcamb, color='darkblue', label='z = 0')
axs[0].loglog(k, pcamb1, color='darkblue', label='z = 1', ls='dashed')
axs[0].loglog(k, pcamb2, color='darkblue', label='z = 2', ls='dashdot')
axs[0].loglog(k, pclass, color='red', label='z = 0')
axs[0].loglog(k, pclass1, color='red', label='z = 1', ls='dashed')
axs[0].loglog(k, pclass2, color='red', label='z = 2', ls='dashdot')
#axs[0].plot(a1[0][3:][:-50],a2[1][3:], color='red', label='CLASS')
#axs[0].legend(title='CAMB CLASS', ncol=2, frameon=True, prop={'size': fonts}, title_fontsize=fonts)
leg = axs[0].legend(ncol=2, frameon=True, prop={'size': fonts})
leg.set_title('CAMB CLASS', prop={'size':fonts})
#axs[0].set_xlabel(r'$k\:[h/Mpc]$', fontsize=11)
axs[0].set_ylabel(r'$P(k)\:[(Mpc/h)³]$', fontsize=fonts)
#axs[0].ticklabel_format(axis='both', style='sci', scilimits=(0,0))
axs[1].plot(kerror, perror, color='k', label='z = 0')
axs[2].plot(kerror, perror2, color='k', ls='dashed', label='z = 1')
axs[3].plot(kerror, perror3, color='k', ls='dashdot', label='z = 2')
axs[3].set_xlabel(r'$k\:[h/Mpc]$', fontsize=fonts)
#axs[1].set_ylabel(r'$\Delta P(k)$', fontsize=fonts)
axs[2].set_ylabel(r'$\Delta P(k)$', fontsize=fonts)
#axs[3].set_ylabel(r'$\Delta P(k)$', fontsize=fonts)
axs[1].legend(frameon=True, prop={'size': fonts})
axs[2].legend(frameon=True, prop={'size': fonts})
axs[3].legend(frameon=True, prop={'size': fonts})
#axs[1].ticklabel_format(axis='both', style='sci', scilimits=(0,0))
axs[1].yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0e'))
axs[2].yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0e'))
axs[3].yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0e'))
#fig.tight_layout(w_pad=4)
#plt.subplots_adjust(left=0.1, right=0.95, bottom=0.1, top=0.9)
axs[0].grid(color = 'gray', linestyle = '--', linewidth = lw)
axs[1].grid(color = 'gray', linestyle = '--', linewidth = lw)
axs[2].grid(color = 'gray', linestyle = '--', linewidth = lw)
axs[3].grid(color = 'gray', linestyle = '--', linewidth = lw)
axs[1].set_ylim(-4e-2, -1e-2)
axs[2].set_ylim(-4e-2, -1e-2)
axs[3].set_ylim(-4e-2, -1e-2)
fig.tight_layout()
plt.savefig('test_plots/P(k)8', dpi=300)
plt.show()
plt.clf()
'''
paths = [["/home/gerald/Documentos/proyecto_cmb/cmb-1/data/camb/camb_P(k)/","/home/gerald/Documentos/proyecto_cmb/cmb-1/data/camb/camb_P(k)/",
"/home/gerald/Documentos/proyecto_cmb/cmb-1/data/camb/camb_C_l(l)/","/home/gerald/Documentos/proyecto_cmb/cmb-1/data/camb/camb_H(z)/",
"/home/gerald/Documentos/proyecto_cmb/cmb-1/data/camb/camb_d_A(z)/", "/home/gerald/Documentos/proyecto_cmb/cmb-1/data/camb/camb_f(z)/",
"/home/gerald/Documentos/proyecto_cmb/cmb-1/data/camb/camb_C_l(l)/"],
["/home/gerald/Documentos/proyecto_cmb/cmb-1/data/class/class_P(k)/", "/home/gerald/Documentos/proyecto_cmb/cmb-1/data/class/class_P(k)/",
"/home/gerald/Documentos/proyecto_cmb/cmb-1/data/class/class_C_l(l)/", "/home/gerald/Documentos/proyecto_cmb/cmb-1/data/class/class_H(z)/",
"/home/gerald/Documentos/proyecto_cmb/cmb-1/data/class/class_d_A(z)/", "/home/gerald/Documentos/proyecto_cmb/cmb-1/data/class/class_f(z)/",
"/home/gerald/Documentos/proyecto_cmb/cmb-1/data/class/class_C_l(l)/"]]
names = [["camb_P(k)", "camb_k", "camb_Cl(l)", "camb_H(z)", "camb_d_A(z)", "camb_f(z)", "camb_ls"],
["class_P(k)", "class_k", "class_Cl(l)", "class_H(z)", "class_d_A(z)", "class_f(z)", "class_ls"]]
boltzmann_codes.save_data_txt("camb", zz, hubble_array, om_b_array, om_cdm_array, paths, names, lmax=2500)
boltzmann_codes.save_data_txt("class", zz, hubble_array, om_b_array, om_cdm_array, paths, names,lmax=2500)
for i in range(len(z_plot)):
plt.loglog(p1[0], p1[2][i], label = '{}'.format(z_plot[i]))
plt.legend(title='z')
plt.ylabel(r'$P(k)\:[(Mpc/h)³]$')
plt.xlabel(r'$k\:[h/Mpc]$')
plt.show()
plt.clf()
for i in range(len(z_plot)):
plt.loglog(p2[0], p2[1][i], label = '{}'.format(z_plot[i]))
plt.legend(title='z')
plt.ylabel(r'$P(k)\:[(Mpc/h)³]$')
plt.xlabel(r'$k\:[h/Mpc]$')
plt.show()
plt.clf()
plt.plot(a1[0], a1[1])
plt.plot(a2[0], a2[1])
plt.ylabel(r'$l(l+1)\:C^{TT}_l\: / 2\pi$')
plt.xlabel('Multipole moment l')
plt.show()
plt.clf()
plt.plot(zz,d_a)
plt.plot(zz,d_a2)
plt.ylabel(r'$d_A(z)\: [Mpc]$')
plt.xlabel('z')
plt.show()
plt.clf()
plt.plot(zz, huble)
plt.plot(zz, huble2)
plt.ylabel(r'$H(z)\:[Km\:s⁻¹\:Mpc⁻¹]$')
plt.xlabel('z')
plt.show()
plt.clf()
plt.plot(zz, f1)
plt.plot(zz, f2)
plt.ylabel(r'Growth rate $f_g(z)$')
plt.xlabel(r'$z$')
plt.show()
plt.clf()
''' |
# # project euler problems
#
# # project euler problem 1 (multiples of 3 and 5)
#
# multiples = 0
# for x in range(3, 1000):
# if (x % 3) == 0 or (x % 5) == 0:
# multiples = multiples + x
#
# print(multiples)
#
# # project euler problem 2 (even fibonacci numbers)
#
# sum = 0
# a = 1
# b = 1
# for x in range(1, 500):
# c = a + b
# if (c % 2) == 0 and c <= 4000000:
# sum = sum + c
# a = b
# b = c
#
# print(sum)
# project euler problem 3
root = (float(600851475143 ** (1/2)))
for x in range(1, root):
|
import unittest
from dictpath import dictpath
class TestDictPath(unittest.TestCase):
def test_explore(self):
d = { 'level1a': 'string'}
self.assertEqual(dictpath(d).explore(), { 'level1a': 'str' })
d = { 'level1a': 'string',
'level1b': 1,
'level1c': [1 ,2 ,3],
}
self.assertEqual(dictpath(d).explore(), { 'level1a': 'str' ,
'level1b': 'int' ,
'level1c[]': ['int']})
d = { 'level1a': 'string',
'level1b': 1,
'level1c': [1 ,2 ,3],
'level1d': { 'level2': 'string' },
'level1e': { 'level2': { 'level3': 1 } }
}
self.assertEqual(dictpath(d).explore(),
{
'level1a': 'str' ,
'level1b': 'int' ,
'level1c[]': ['int'],
'level1d': { 'level1d/level2': 'str'},
'level1e': { 'level1e/level2': { 'level1e/level2/level3': 'int' } }
})
def test_path(self):
d = { 'level1a': 'string',
'level1b': 1,
'level1c': [1 ,2 ,3],
'level1d': { 'level2': 'string' },
'level1e': { 'level2': { 'level3': 1 } }
}
path = { 'a': 'level1a' }
expected_value = { 'a': 'string' }
self.assertEqual(dictpath(d).path(path), expected_value)
path = { 'deep': 'level1e/level2/level3' }
expected_value = { 'deep': 1 }
self.assertEqual(dictpath(d).path(path), expected_value)
#invalid path
path = { 'a': 'not_exists_path' }
expected_value = { 'a': None }
self.assertEqual(dictpath(d).path(path), expected_value)
def test_query(self):
d = { 'level1a': 'string',
'level1b': 1,
'level1c': [1 ,2 ,3],
'level1d': [ { 'level2': 'value' }, {'level2': 'other value'} ],
'level1e': { 'level2': 'anthony' },
'level1f': { 'level2': { 'level3': 1 } }
}
self.assertEqual(dictpath(d).query('level1d'), [ { 'level2': 'value' }, {'level2': 'other value'} ])
self.assertEqual(list(dictpath(d).query('level1d/level2')), ['value', 'other value'])
self.assertEqual(dictpath(d).query('level1e/level2'), 'anthony')
if __name__ == '__main__':
unittest.main()
|
from django.contrib.auth.models import User, Group
from django.core.management.base import BaseCommand
from materialcleaner.settings import ACCESS_POLICIES
class Command(BaseCommand):
help = 'Step 5 @ fill-in new DB. Assign users to user-groups to define her/his permissions acc. ACCESS_POLICIES'
def handle(self, *args, **options):
staff = Group.objects.get(name='staff')
regular = Group.objects.get(name='regular')
users = User.objects.all()
for user in users:
if user.is_active:
if user.is_staff:
user.groups.add(staff)
if not user.is_superuser and not user.is_staff:
user.groups.add(regular)
user.save()
self.stdout.write("DONE! Users assigned to groups acc. ACCESS_POLICIES")
|
#!/usr/bin/python
import json, operator
from datetime import datetime, timedelta
from sqlalchemy import create_engine, or_, and_
from sqlalchemy.orm import sessionmaker
# http://docs.sqlalchemy.org/en/latest/faq/performance.html
import cProfile
from io import StringIO
import pstats
import contextlib
import logging
@contextlib.contextmanager
def profiled():
pr = cProfile.Profile()
pr.enable()
yield
pr.disable()
s = StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats()
# uncomment this to see who's calling what
# ps.print_callers()
logging.info(s.getvalue())
from models import TechWord, Day, Advertisement, Match
import models
def get_techwords():
s = models.Session()
words = s.query(TechWord).all()
s.expunge_all(); s.close()
return words
def get_techword(word):
s = models.Session()
word = s.query(TechWord).filter_by(word=word).all()
s.expunge_all(); s.close()
return word
def get_advertisement(id):
s = models.Session()
ad = s.query(Advertisement).filter(Advertisement.id==id).all()
s.expunge_all(); s.close()
return ad
def get_advertisements(start='',end=''):
s = models.Session()
if start == '' or end == '':
ads = s.query(Advertisement).all()
s.expunge_all(); s.close()
return ads
# search full days including the end day
start = start.replace(hour=0,minute=0,second=0)
end = end.replace(hour=0,minute=0)
end = end + timedelta(days=1)
# ad is in the range in all these cases
# - start_date is in range
# - end_date is in range
# - range is between start_date and end_date
ads = s.query(Advertisement).filter(
or_(
and_(Advertisement.start_date <= start, models.Advertisement.end_date >= start),
and_(Advertisement.start_date <= end, models.Advertisement.end_date >= end),
and_(Advertisement.start_date >= start, models.Advertisement.end_date <= end)
)).all()
s.expunge_all(); s.close()
return ads
# return indexes, where match was found
def match_result(ad, words):
matches = []
tl = ad.text.lower()
for tw in words:
ss = json.loads(tw.search_strings)
for search_string in ss:
i = tl.find(search_string)
if i >= 0:
matches.append({'word':tw.word,
'search_string':search_string,
'index':i})
break
sorted_matches = sorted(matches,key=operator.itemgetter('index'))
return sorted_matches
def get_total_counts(start=datetime(2017,1,1), end=datetime.now()):
# search full days including the end day
start = start.replace(hour=1,minute=0)
end = end.replace(hour=0,minute=0)
end = end + timedelta(days=1)
day = start
s = models.Session()
counts = {}
while day <= end:
toDate = day + timedelta(days=1)
d = s.query(Day).filter(Day.date >= day, models.Day.date <= toDate).all()
if len(d) > 0:
counts[day.strftime('%Y-%m-%d')] = d[0].count;
day = day + timedelta(days=1)
s.expunge_all(); s.close()
return counts
def _format(date):
return "%4d-%2d-%2d" % (date.year, date.month, date.day)
def get_techword_counts(start=datetime(2017,1,1), end=datetime.now()):
# search full days including the end day
start = start.replace(hour=1,minute=0)
end = end.replace(hour=0,minute=0)
end = end + timedelta(days=1)
s = models.Session()
counts = {}
# uncomment this to see db query timing results
# with profiled():
# matches = s.query(Match).filter(Match.date >= start, Match.date < end).all()
matches = s.query(Match).filter(Match.date >= start, Match.date < end).all()
for m in matches:
if m.date.strftime('%Y-%m-%d') not in counts:
counts[m.date.strftime('%Y-%m-%d')] = []
counts[m.date.strftime('%Y-%m-%d')].append({ 'word': m.techword, 'count': m.count })
s.expunge_all(); s.close()
return counts
if __name__ == '__main__':
print ('get_counts')
print (json.dumps(get_total_counts(datetime(2017,4,2)),indent=4,sort_keys=True))
print ('get_techword_counts')
print (json.dumps(get_techword_counts(datetime(2017,4,19),datetime(2017,4,19)),indent=4,sort_keys=True))
|
def get_raw_path(cropped_path):
tmp_out_path = cropped_path.split('/')
orig_img_name = tmp_out_path[len(tmp_out_path)-1].split('_')[0]
raw_out_target = tmp_out_path[0] + '/' + tmp_out_path[1] + '/' + tmp_out_path[2] + '/' + tmp_out_path[3] + '/' + tmp_out_path[4] + '/' + orig_img_name
return raw_out_target
if __name__ == '__main__':
print get_raw_path('/home/yjian/0b370069434c34d0b3fda9f19033d433/thumbs/out/123.png_123')
|
import urllib.request, urllib.parse, urllib.error
import http.cookiejar
LOGIN_URL = 'http://acm.hit.edu.cn/hoj/system/login'
values = {'user': '******', 'password': '******'} # , 'submit' : 'Login'
postdata = urllib.parse.urlencode(values).encode()
user_agent = r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
headers = {'User-Agent': user_agent, 'Connection': 'keep-alive'}
cookie_filename = 'cookie.txt'
cookie = http.cookiejar.MozillaCookieJar(cookie_filename)
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
request = urllib.request.Request(LOGIN_URL, postdata, headers)
try:
response = opener.open(request)
page = response.read().decode()
# print(page)
except urllib.error.URLError as e:
print(e.code, ':', e.reason)
cookie.save(ignore_discard=True, ignore_expires=True) # 保存cookie到cookie.txt中
print(cookie)
for item in cookie:
print('Name = ' + item.name)
print('Value = ' + item.value)
get_url = 'http://acm.hit.edu.cn/hoj/problem/solution/?problem=1' # 利用cookie请求访问另一个网址
get_request = urllib.request.Request(get_url, headers=headers)
get_response = opener.open(get_request)
print(get_response.read().decode())
# print('You have not solved this problem' in get_response.read().decode()) |
import itertools
import math
import numpy as np
import pickle
import torch
from torch.utils.data import Dataset
from torch.utils.data.sampler import Sampler
class Seq2SeqDataset(Dataset):
def __init__(self, src_file, tgt_file, src_dict, tgt_dict):
self.src_dict, self.src_dict = src_dict, tgt_dict
with open(src_file, 'rb') as f:
self.src_dataset = pickle.load(f)
self.src_sizes = np.array([len(tokens) for tokens in self.src_dataset])
with open(tgt_file, 'rb') as f:
self.tgt_dataset = pickle.load(f)
self.tgt_sizes = np.array([len(tokens) for tokens in self.tgt_dataset])
def __getitem__(self, index):
return {
'id': index,
'source': torch.LongTensor(self.src_dataset[index]),
'target': torch.LongTensor(self.tgt_dataset[index]),
}
def __len__(self):
return len(self.src_dataset)
def collater(self, samples):
"""Merge a list of samples to form a mini-batch."""
if len(samples) == 0:
return {}
def merge(values, move_eos_to_beginning=False):
max_length = max(v.size(0) for v in values)
result = values[0].new(len(values), max_length).fill_(self.src_dict.pad_idx)
for i, v in enumerate(values):
if move_eos_to_beginning:
assert v[-1] == self.src_dict.eos_idx
result[i, 0] = self.src_dict.eos_idx
result[i, 1:len(v)] = v[:-1]
else:
result[i, :len(v)].copy_(v)
return result
id = torch.LongTensor([s['id'] for s in samples])
src_tokens = merge([s['source'] for s in samples])
tgt_tokens = merge([s['target'] for s in samples])
tgt_inputs = merge([s['target'] for s in samples], move_eos_to_beginning=True)
# Sort by descending source length
src_lengths = torch.LongTensor([s['source'].numel() for s in samples])
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
tgt_tokens = tgt_tokens.index_select(0, sort_order)
tgt_inputs = tgt_inputs.index_select(0, sort_order)
return {
'id': id,
'src_tokens': src_tokens,
'src_lengths': src_lengths,
'tgt_tokens': tgt_tokens,
'tgt_inputs': tgt_inputs,
'num_tokens': sum(len(s['target']) for s in samples),
}
class BatchSampler(Sampler):
def __init__(self, dataset, max_tokens=None, batch_size=None, num_shards=1, shard_id=0, shuffle=True, seed=42):
self.dataset, self.shuffle, self.seed = dataset, shuffle, seed
self.batch_size = batch_size if batch_size is not None else float('Inf')
self.max_tokens = max_tokens if max_tokens is not None else float('Inf')
self.batches = self._batch_generator()
self.shard_len = int(math.ceil(len(self.batches) / num_shards))
self.itr = itertools.zip_longest(
range(self.shard_len),
itertools.islice(self.batches, shard_id, len(self.batches), num_shards),
fillvalue=[])
def __len__(self):
return self.shard_len
def __iter__(self):
return self
def __next__(self):
return next(self.itr)[1]
def _batch_generator(self):
np.random.seed(self.seed)
indices = np.random.permutation(len(self.dataset)) if self.shuffle else np.arange(len(self.dataset))
indices = indices[np.argsort(self.dataset.tgt_sizes[indices], kind='mergesort')]
indices = indices[np.argsort(self.dataset.src_sizes[indices], kind='mergesort')]
batches, batch, sample_len = [], [], 0
for idx in indices:
batch.append(idx)
sample_len = max(sample_len, self.dataset.tgt_sizes[idx])
num_tokens = len(batch) * sample_len
if len(batch) == self.batch_size or num_tokens > self.max_tokens:
batches.append(batch)
batch, sample_len = [], 0
if len(batch) > 0:
batches.append(batch)
if self.shuffle:
np.random.shuffle(batches)
return batches
|
'''
Created on Dec 20, 2016
@author: bogdan
'''
from repo.repository import *
class FileDriverRepository(DriverRepository):
def __init__(self,file="/home/bogdan/Documents/test2/src/drivers.in"):
super().__init__()
self.__file=file
self.__loadData()
def __loadData(self):
try:
f=open(self.__file,'r')
except:
raise RepoError("File Error!")
for line in f:
if(len(line)>2):
x=line.split(',')
if(len(x)<=1):
continue
ID=x[0]
name=x[1]
name=name[:(len(name)-1)]
super().add(ID,name)
f.close()
def __saveData(self):
try:
f=open(self.__file,'w')
except:
raise RepoError("File Error!")
for it in super().getAll():
f.write(str(it.getID())+','+str(it.getName())+'\n')
f.close()
def add(self,ID,name):
super().add(ID,name)
self.__saveData()
class FileOrderRepository(OrderRepository):
def __init__(self,file="/home/bogdan/Documents/test2/src/orders.in"):
super().__init__()
self.__file=file
self.__loadData()
def __loadData(self):
try:
f=open(self.__file,'r')
except:
raise RepoError("File Error!")
for line in f:
if(len(line)>2):
x=line.split(',')
if(len(x)<=1):
continue
ID=x[0]
distance=x[1]
distance=distance[:(len(distance)-1)]
distance=int(distance)
super().add(ID,distance)
f.close()
def __saveData(self):
try:
f=open(self.__file,'w')
except:
raise RepoError("File Error!")
for it in super().getAll():
f.write(str(it.getID())+','+str(it.getDistance())+'\n')
f.close()
def add(self,ID,dist):
super().add(ID,dist)
self.__saveData()
|
from onegov.core.orm import Base
from onegov.core.orm.abstract import associated
from onegov.core.orm.mixins import ContentMixin
from onegov.core.orm.types import UUID, UTCDateTime
from onegov.file import File
from onegov.org.models import AccessExtension
from sedate import to_timezone
from sqlalchemy import (
Boolean, Column, ForeignKey, Integer, Numeric, Text, Enum
)
from sqlalchemy.orm import backref, relationship
from uuid import uuid4
MISSION_TYPES = ('single', 'multi')
class MissionReportFile(File):
__mapper_args__ = {'polymorphic_identity': 'mission-report-file'}
class MissionReport(Base, ContentMixin, AccessExtension):
__tablename__ = 'mission_reports'
#: the public id of the mission_report
id = Column(UUID, nullable=False, primary_key=True, default=uuid4)
#: the date of the report
date = Column(UTCDateTime, nullable=False)
#: how long the mission lasted, in hours
duration = Column(Numeric(precision=6, scale=2), nullable=False)
#: the nature of the mission
nature = Column(Text, nullable=False)
#: the location of the mission
location = Column(Text, nullable=False)
#: actually active personnel
personnel = Column(Integer, nullable=False)
#: backup personnel
backup = Column(Integer, nullable=False)
#: the Zivilschutz was involved
civil_defence = Column(Boolean, nullable=False, default=False)
#: pictures of the mission
pictures = associated(MissionReportFile, 'pictures', 'one-to-many')
# The number of missions on the same site during a day
mission_count = Column(Integer, nullable=False, default=1)
# the mission type
mission_type = Column(
Enum(*MISSION_TYPES, name='mission_type'),
nullable=False,
default='single'
)
@property
def title(self):
return self.nature
@property
def readable_duration(self):
return str(self.duration).rstrip('.0') + 'h'
@property
def local_date(self):
return to_timezone(self.date, 'Europe/Zurich')
class MissionReportVehicle(Base, ContentMixin, AccessExtension):
__tablename__ = 'mission_report_vehicles'
#: the public id of the vehicle
id = Column(UUID, nullable=False, primary_key=True, default=uuid4)
#: the short id of the vehicle
name = Column(Text, nullable=False)
#: the longer name of the vehicle
description = Column(Text, nullable=False)
#: symbol of the vehicle
symbol = associated(MissionReportFile, 'symbol', 'one-to-one')
#: a website describing the vehicle
website = Column(Text, nullable=True)
@property
def title(self):
return f'{self.name} - {self.description}'
@property
def readable_website(self):
if self.website:
return self.website.replace('https://', '').replace('http://', '')
class MissionReportVehicleUse(Base):
""" Many to many association between vehicles and reports. """
__tablename__ = 'mission_report_vehicle_usees'
mission_report_id = Column(
UUID,
ForeignKey('mission_reports.id'),
primary_key=True)
mission_report = relationship(
'MissionReport',
backref=backref(
'used_vehicles', cascade='all, delete-orphan'
)
)
vehicle_id = Column(
UUID,
ForeignKey('mission_report_vehicles.id'),
primary_key=True)
vehicle = relationship(
'MissionReportVehicle',
backref='uses'
)
# vehicles may be used multiple times in a single mission_report
count = Column(
Integer,
nullable=False,
default=1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.