text stringlengths 8 6.05M |
|---|
#Purpose: To utilized closed-form solutions to determine the accuracy of the CalculiX solution for T3D2 elements.
#Date: July 9th, 2018
#Programmer: Garrett M. Kelley
#Define the variables
L = 100 #Beam length
h = 10 #Beam height
w = 5 #Beam width
P = 500
E = 29e6 #Young's Modulus
I = h**3*w/12
#Calculate the displacement
delta = P*L**3/(3*E*I)
#Print the output
print("The resulting vertical displacement for the B31R element is: " + str(delta) + " in.")
#The FEM predicts:
delta_fem = 1.80966e-3
p_delta = (delta_fem - delta)/delta*100
print("The percent error is: " + str(p_delta) + "%")
|
from keras.models import Model, Input
from keras.layers import Dense, Dropout, BatchNormalization
from keras.utils import to_categorical
from sklearn.datasets import load_breast_cancer
import numpy as np
cancer = load_breast_cancer()
x = cancer.data
y = cancer.target
# y = to_categorical(y)
# print(y)
# print(x.shape)
# print(y.shape)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state=66)
print(x_train.shape)
print(y_train.shape)
def build_network(keep_prob = 0.5, optimizer='adam'):
inputs = Input(shape=(30,), name='input')
# x = Dense(512, activation='relu', name= 'hidden1')(inputs)
x = Dense(512, activation='relu', name= 'hidden1')(inputs)
prediction = Dense(1, activation='softmax', name='output')(x)
model = Model(inputs=inputs, outputs=prediction)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
return model
def create_hyperparameter():
batches = [10, 50, 100]
optimizers = ['rmsprop', 'adam', 'adadelta']
dropout = np.linspace(0.1, 0.25, 5)
return{"batch_size": batches, "optimizer": optimizers, "keep_prob":dropout}
from keras.wrappers.scikit_learn import KerasClassifier
model = KerasClassifier(build_fn=build_network, verbose=1)
hyperparameters = create_hyperparameter()
from sklearn.model_selection import RandomizedSearchCV
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, StandardScaler
search = RandomizedSearchCV(model, hyperparameters)
model = Pipeline([("scaler", MinMaxScaler()), ('search', search)])
model.fit(x_train, y_train)
print(search.best_params_) |
# Import libraries
import numpy
import os, json, datetime, sys
from operator import attrgetter
from azureml.core import Workspace
from azureml.core.model import Model
from azureml.core.image import Image
from azureml.core.webservice import Webservice
from azureml.core.authentication import AzureCliAuthentication
# Get an Azure CLI authentication object
cli_auth = AzureCliAuthentication()
# Get workspace from authentication
ws = Workspace.from_config(auth=cli_auth)
# Get the AKS Details
try:
# Read file aml_config/aks_webservice.json
with open("aml_config/aks_webservice.json") as f:
config = json.load(f)
except:
print("No new model, thus no deployment on AKS")
# Ending process, nothing else to do
sys.exit(0)
# Get the service name from aks_webservice.json
service_name = config["aks_service_name"]
# Get the hosted web service from workspace
service = Webservice(workspace=ws, name=service_name)
# Input for Model with all features
# Read data from data_test.json
with open("data/data_test.json") as f:
data_test = json.load(f)
# Configurate the data for testing in web service
input_j = data_test["data"][0]
print(input_j)
test_sample = json.dumps({"data": input_j})
test_sample = bytes(test_sample, encoding="utf8")
try:
# Call service
prediction = service.run(input_data=test_sample)
print(prediction)
except Exception as e:
result = str(e)
print(result)
raise Exception("AKS service is not working as expected")
# Delete service after test
# Optional
# service.delete()
|
#!/usr/bin/env python
# encoding: utf-8
"""
Created by 'bens3' on 2013-06-21.
Copyright (c) 2013 'bens3'. All rights reserved.
"""
import re
import luigi
from collections import OrderedDict
from pymongo import MongoClient
from ke2mongo import config
def mongo_client_db(database=config.get('mongo', 'database'), host=config.get('mongo', 'host')):
return MongoClient(host)[database]
def mongo_get_marker_collection_name():
return luigi.configuration.get_config().get('postgres', 'marker-table', 'table_updates')
def mongo_get_update_markers():
mongo_db = mongo_client_db()
marker_collection_name = mongo_get_marker_collection_name()
cursor = mongo_db[marker_collection_name].find()
re_update_id = re.compile('([a-zA-Z]+)\(date=([0-9]+)\)')
# OrderedDict to store all of the update classes
update_markers = OrderedDict()
for record in cursor:
result = re_update_id.match(record['update_id'])
if result:
update_cls = result.group(1)
update_date = int(result.group(2))
try:
update_markers[update_date].append(update_cls)
except KeyError:
update_markers[update_date] = [update_cls]
return update_markers |
import unittest
class TestMethods(unittest.TestCase):
def test_string_to_list(self):
self.assertEqual(list('abcd'), ['a', 'b', 'c', 'd'])
def test_string_to_int(self):
self.assertEqual(int('5'), 5)
with self.assertRaises(ValueError):
int('a')
if __name__ == '__main__':
unittest.main() |
# Mini-project #6 - Blackjack
import simplegui
import random
# load card sprite - 949x392 - source: jfitz.com
CARD_SIZE = (73, 98)
CARD_CENTER = (36.5, 49)
card_images = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/cards.jfitz.png")
CARD_BACK_SIZE = (71, 96)
CARD_BACK_CENTER = (35.5, 48)
card_back = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/card_back.png")
# initialize some useful global variables
in_play = False
outcome = ""
score = 500
wager = 100
# define globals for cards
SUITS = ('C', 'S', 'H', 'D')
RANKS = ('A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K')
VALUES = {'A':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, 'T':10, 'J':10, 'Q':10, 'K':10}
# define card class
class Card:
def __init__(self, suit, rank):
if (suit in SUITS) and (rank in RANKS):
self.suit = suit
self.rank = rank
else:
self.suit = None
self.rank = None
print "Invalid card: ", suit, rank
def __str__(self):
return self.suit + self.rank
def get_suit(self):
return self.suit
def get_rank(self):
return self.rank
def draw(self, canvas, pos):
card_loc = (CARD_CENTER[0] + CARD_SIZE[0] * RANKS.index(self.rank),
CARD_CENTER[1] + CARD_SIZE[1] * SUITS.index(self.suit))
canvas.draw_image(card_images, card_loc, CARD_SIZE, [pos[0] + CARD_CENTER[0], pos[1] + CARD_CENTER[1]], CARD_SIZE)
def draw_back(self, canvas, pos):
canvas.draw_image(card_back, CARD_BACK_CENTER, CARD_BACK_SIZE, [pos[0] + CARD_BACK_CENTER[0], pos[1] + CARD_BACK_CENTER[1]], CARD_BACK_SIZE)
# define hand class
class Hand:
def __init__(self):
self.cards = []
self.number_of_cards = 0
self.hand_value = 0
def __str__(self):
return str(self.cards)
def add_card(self, card):
self.cards.append(card)
self.number_of_cards += 1
def get_value(self):
self.hand_value = 0
for card in self.cards:
if card[1] == "A":
self.hand_value += 11
elif card[1] == "T":
self.hand_value += 10
elif card[1] == "Q":
self.hand_value += 10
elif card[1] == "K":
self.hand_value += 10
elif card[1] == "J":
self.hand_value += 10
else:
self.hand_value += int(card[1])
return self.hand_value
def reset_hand(self):
self.cards = []
# define deck class
class Deck:
def __init__(self):
self.cards = []
self.top_card = 1
self.create_deck()
def shuffle(self):
random.shuffle(self.cards)
def reshuffle(self):
self.cards = []
self.create_deck()
self.top_card = 1
self.shuffle()
def create_deck(self):
for i in SUITS:
for j in RANKS:
self.cards.insert(0, [str(i), str(j)])
def deal_card(self):
self.top_card += 1
if self.top_card > 40:
print "deck is done"
self.reshuffle()
return self.cards.pop(1)
def __str__(self):
return str(self.cards) + str(self.top_card)
def return_card(self, card_number):
return self.cards[card_number]
def return_suit(self, card_number):
i = 0
for card in self.cards:
if i == card_number:
return card[0]
else:
i += 1
#define event handlers for buttons
def deal():
global outcome, in_play, score, wager
# check if player reset cards before end of turn
if in_play:
score -= wager
player_hand.reset_hand()
dealer_hand.reset_hand()
# deal two cards to player
next_card = d1.deal_card()
player_hand.add_card(next_card)
next_card = d1.deal_card()
player_hand.add_card(next_card)
# deal two cards to dealer
next_card = d1.deal_card()
dealer_hand.add_card(next_card)
in_play = True
def hit():
global in_play, score, wager
# if the hand is in play, hit the player
if in_play:
next_card = d1.deal_card()
player_hand.add_card(next_card)
# if busted, assign a message to outcome, update in_play and score
if player_hand.get_value() > 21:
in_play = False
next_card = d1.deal_card()
dealer_hand.add_card(next_card)
score -= wager
def stand():
global in_play, score, wager
if in_play:
next_card = d1.deal_card()
dealer_hand.add_card(next_card)
# assign a message to outcome, update in_play and score
if dealer_hand.get_value() < 17:
next_card = d1.deal_card()
dealer_hand.add_card(next_card)
if dealer_hand.get_value() < 17:
next_card = d1.deal_card()
dealer_hand.add_card(next_card)
if dealer_hand.get_value() > 21:
score += wager
elif dealer_hand.get_value() > player_hand.get_value():
score -= wager
elif dealer_hand.get_value() < player_hand.get_value():
score += wager
in_play = False
# draw handler
def draw(canvas):
global in_play, score
# this draws dealer cards
x = 100
for card in dealer_hand.cards:
card = Card(card[0], card[1])
card.draw(canvas, [x, 100])
x += 80
if x == 180:
for card in dealer_hand.cards:
card = Card(card[0], card[1])
card.draw_back(canvas, [180, 100])
# this draws player cards
x = 100
for card in player_hand.cards:
card = Card(card[0], card[1])
card.draw(canvas, [x, 400])
x += 80
# this draws the titles
canvas.draw_text("Blackjack", [200, 60], 50, "White")
# canvas.draw_text("Dealer Hand " + str(dealer_hand.get_value()), [150, 60], 50, "Black")
canvas.draw_text("Player Count " + str(player_hand.get_value()), [150, 360], 50, "Black")
# this determines the outcome of the game and presents a message
if dealer_hand.get_value() == 0:
canvas.draw_text("Press Deal to Begin", [100, 260], 30, "Black")
elif in_play:
canvas.draw_text("Press Hit or Stand", [100, 260], 30, "Black")
elif player_hand.get_value() > 21:
canvas.draw_text("Player Busted! Press Deal to Play Again", [100, 260], 30, "Black")
elif dealer_hand.get_value() > 21:
canvas.draw_text("Dealer Busted! Press Deal to Play Again", [100, 260], 30, "Black")
elif player_hand.get_value() == dealer_hand.get_value():
canvas.draw_text("Tie Hand - No Winner", [100, 260], 30, "Black")
elif player_hand.get_value() > dealer_hand.get_value():
canvas.draw_text("Player Wins! Press Deal to Play Again", [100, 260], 30, "Black")
else:
canvas.draw_text("Dealer Wins! Press Deal to Play Again", [100, 260], 30, "Black")
# this prints the current score
canvas.draw_text("Player Money = $" + str(score), [100, 560], 40, "Black")
# initialization frame
frame = simplegui.create_frame("Blackjack", 600, 600)
frame.set_canvas_background("Green")
#create buttons and canvas callback
frame.add_button("Deal", deal, 200)
frame.add_button("Hit", hit, 200)
frame.add_button("Stand", stand, 200)
frame.set_draw_handler(draw)
# get things rolling
frame.start()
player_hand = Hand()
dealer_hand = Hand()
d1 = Deck()
d1.shuffle()
|
#!/usr/bin/env python
from __future__ import print_function
import fastjet as fj
import fjcontrib
import fjext
import tqdm
import argparse
import os
from heppy.pythiautils import configuration as pyconf
import pythia8
import pythiafjext
import pythiaext
import ROOT as r
import array
import random
from pyjetty.mputils import MPBase
from pyjetty.mputils import RTreeWriter
def logbins(xmin, xmax, nbins):
lspace = np.logspace(np.log10(xmin), np.log10(xmax), nbins+1)
arr = array.array('f', lspace)
return arr
class V0Detector(MPBase):
def __init__(self, **kwargs):
self.configure_from_args(particles = None)
super(V0Detector, self).__init__(**kwargs)
self.V0A_selector = fj.SelectorEtaMin(2.8) & fj.SelectorEtaMax(5.1)
self.V0C_selector = fj.SelectorEtaMin(-3.7) & fj.SelectorEtaMax(-1.7)
self.V0_selector = self.V0A_selector | self.V0C_selector
self.V0A_mult = 0
self.V0C_mult = 0
self.V0_mult = 0
if self.particles:
self.analyze_event(self.particles)
def analyze_event(self, parts):
self.V0A_mult = len(self.V0A_selector(parts))
self.V0C_mult = len(self.V0C_selector(parts))
self.V0_mult = self.V0A_mult + self.V0C_mult
self.V0_and = ((self.V0A_mult > 0) & (self.V0C_mult > 0))
self.V0_or = ((self.V0A_mult > 0) | (self.V0C_mult > 0))
def fill_branches(self, t, ev_id, weight=None):
t.fill_branch('V0w', weight)
t.fill_branch('mV0A', v0det.V0A_mult)
t.fill_branch('mV0C', v0det.V0C_mult)
t.fill_branch('mV0', v0det.V0_mult)
class JetAnalysis(MPBase):
def __init__(self, **kwargs):
self.configure_from_args(jet_R=0.4, jet_algorithm=fj.antikt_algorithm, particle_eta_max=0.9, particles=None)
super(JetAnalysis, self).__init__(**kwargs)
self.bg_rho_range = fj.SelectorAbsEtaMax(self.particle_eta_max)
self.bg_jet_def = fj.JetDefinition(fj.kt_algorithm, self.jet_R)
self.bg_area_def = fj.AreaDefinition(fj.active_area_explicit_ghosts, fj.GhostedAreaSpec(self.particle_eta_max))
self.bg_estimator = fj.JetMedianBackgroundEstimator(self.bg_rho_range, self.bg_jet_def, self.bg_area_def)
self.rho = 0
self.jet_eta_max = self.particle_eta_max - self.jet_R * 1.05
self.jet_def = fj.JetDefinition(self.jet_algorithm, self.jet_R)
self.jet_selector = fj.SelectorPtMin(0.0) & fj.SelectorAbsEtaMax(self.jet_eta_max)
self.jet_area_def = fj.AreaDefinition(fj.active_area_explicit_ghosts, fj.GhostedAreaSpec(self.particle_eta_max))
if self.particles:
self.analyze_event(self.particles)
def analyze_event(self, parts):
if len(parts) < 1:
self.rho = 0.0
self.cs = None
self.jets = []
self.corr_jet_pt = []
else:
self.bg_estimator.set_particles(parts)
self.rho = self.bg_estimator.rho()
self.cs = fj.ClusterSequenceArea(parts, self.jet_def, self.jet_area_def)
self.jets = fj.sorted_by_pt(self.jet_selector(self.cs.inclusive_jets()))
self.corr_jet_pt = [j.pt() - j.area() * self.rho for j in self.jets]
def fill_branches(self, t, ev_id, weight):
if ev_id:
t.fill_branch('ev_id', ev_id)
if weight:
t.fill_branch('jetw', weight)
t.fill_branch('rho', self.rho)
t.fill_branch('jet', self.jets)
t.fill_branch('jet_ptcorr', self.corr_jet_pt)
class HJetTree(MPBase):
def __init__(self, **kwargs):
self.configure_from_args( fout=None,
output='hjet_tree.root',
trigger_range = [6, 7],
jet_ana = None)
super(HJetTree, self).__init__(**kwargs)
if self.fout is None:
self.fout = r.TFile(self.output, 'RECREATE')
self.fout.cd()
self.tree_name='tjet_0{}_{}_{}'.format(int(10*self.jet_ana.jet_R), int(self.trigger_range[0]), int(self.trigger_range[1]))
self.tree_writer = RTreeWriter(tree_name=self.tree_name, fout=self.fout)
def analyze_event(self, jet_parts):
self.trigger_particle = None
t_selector = fj.SelectorPtRange(self.trigger_range[0], self.trigger_range[1])
t_candidates = t_selector(jet_parts)
if len(t_candidates) < 1:
return False
self.trigger_particle = random.choice(t_candidates)
return True
def fill_branches(self, ev_id, weight):
if self.jet_ana is None:
print('[e] no jet ana in hjettree...')
return False
if self.trigger_particle:
if self.trigger_particle:
self.dphis = [self.trigger_particle.delta_phi_to(j) for j in self.jet_ana.jets]
self.detas = [self.trigger_particle.eta() - j.eta() for j in self.jet_ana.jets]
else:
self.dphis = []
self.detas = []
self.tree_writer.fill_branch('ev_id', ev_id)
self.tree_writer.fill_branch('t_w', weight)
self.tree_writer.fill_branch('t_pt', self.trigger_particle.pt())
self.tree_writer.fill_branch('t_phi', self.trigger_particle.phi())
self.tree_writer.fill_branch('t_eta', self.trigger_particle.eta())
self.tree_writer.fill_branch('dphi', self.dphis)
self.tree_writer.fill_branch('deta', self.detas)
self.jet_ana.fill_branches(self.tree_writer, ev_id = None, weight = None)
def fill_tree(self):
self.tree_writer.fill_tree()
def write_and_close_file(self):
self.fout.Write()
self.fout.Close()
class HJetAnalysis(MPBase):
def __init__(self, **kwargs):
self.configure_from_args(jet_particle_eta_max = 0.9,
output='hjet.root',
trigger_ranges = [ [0, 1e3], [6, 7] , [12, 22], [20, 30] ],
jet_Rs = [0.2, 0.4, 0.6])
super(HJetAnalysis, self).__init__(**kwargs)
self.fout = r.TFile(self.output, 'RECREATE')
self.fout.cd()
self.event_output = RTreeWriter(tree_name='tev', fout=self.fout)
self.hmV0M = r.TH1F('hmV0M', 'hmV0M', 1000, 0, 1000)
self.hmV0A = r.TH1F('hmV0A', 'hmV0A', 1000, 0, 1000)
self.hmV0C = r.TH1F('hmV0C', 'hmV0C', 1000, 0, 1000)
self.jet_particle_selector = fj.SelectorAbsEtaMax(self.jet_particle_eta_max)
self.jet_ans = []
self.hjet_ts = []
for jR in self.jet_Rs:
j_ana = JetAnalysis(jet_R=jR, particle_eta_max=self.jet_particle_eta_max)
self.jet_ans.append(j_ana)
for tr in self.trigger_ranges:
hjet_tree = HJetTree(fout=self.fout, trigger_range=tr, jet_ana=j_ana)
self.hjet_ts.append(hjet_tree)
def analyze_event(self, ev_id, parts, pythia=None):
# V0 multiplicity and event props
v0det = V0Detector(particles=parts)
self.hmV0M.Fill(v0det.V0_mult)
self.hmV0A.Fill(v0det.V0A_mult)
self.hmV0C.Fill(v0det.V0C_mult)
self.event_output.fill_branch('ev_id', ev_id)
self.event_output.fill_branch('mV0A', v0det.V0A_mult)
self.event_output.fill_branch('mV0C', v0det.V0C_mult)
self.event_output.fill_branch('mV0', v0det.V0_mult)
if pythia:
ev_w = pythia.info.sigmaGen()
pthard = pythia.info.pTHat()
else:
ev_w = -1
pthard = -1
self.event_output.fill_branch('weight', ev_w)
self.event_output.fill_branch('pthard', pthard)
jet_parts = self.jet_particle_selector(parts)
mTot = len(parts)
mCB = len(jet_parts)
self.event_output.fill_branch('mTot', mTot)
self.event_output.fill_branch('mCB', mCB)
self.event_output.fill_tree()
trigger_present = False
for hj in self.hjet_ts:
if hj.analyze_event(jet_parts):
trigger_present = True
if trigger_present:
for ja in self.jet_ans:
ja.analyze_event(jet_parts)
for hj in self.hjet_ts:
if hj.trigger_particle:
hj.fill_branches(ev_id, ev_w)
hj.tree_writer.fill_branch('ev_id', ev_id)
hj.tree_writer.fill_branch('mV0A', v0det.V0A_mult)
hj.tree_writer.fill_branch('mV0C', v0det.V0C_mult)
hj.tree_writer.fill_branch('mV0', v0det.V0_mult)
hj.tree_writer.fill_branch('mTot', mTot)
hj.tree_writer.fill_branch('mCB', mCB)
hj.tree_writer.fill_branch('weight', ev_w)
hj.tree_writer.fill_branch('pthard', pthard)
hj.fill_tree()
def finalize(self):
self.fout.Write()
self.fout.Close()
print('[i] written', self.fout.GetName())
def generate():
parser = argparse.ArgumentParser(description='pythia8 fastjet on the fly', prog=os.path.basename(__file__))
parser.add_argument('-o', '--output', help='output file name', default='hjet.root', type=str)
parser.add_argument('-t', '--trigger', help='trigger pt', default=6., type=float)
pyconf.add_standard_pythia_args(parser)
parser.add_argument('--charged', default=False, action='store_true')
args = parser.parse_args()
# print the banner first
fj.ClusterSequence.print_banner()
print()
# mycfg = ['PhaseSpace:pThatMin = 6']
mycfg = []
pythia = pyconf.create_and_init_pythia_from_args(args, mycfg)
if args.nev < 1:
args.nev = 1
# jet_particle_eta_max = 0.9
hjet = HJetAnalysis(jet_particle_eta_max=0.9, output=args.output)
for iev in tqdm.tqdm(range(args.nev)):
if not pythia.next():
continue
parts = pythiafjext.vectorize_select(pythia, [pythiafjext.kFinal, pythiafjext.kCharged])
if len(parts) < 1:
continue
hjet.analyze_event(iev, parts, pythia)
hjet.finalize()
pythia.stat()
if __name__ == '__main__':
generate()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 15 16:08:08 2019
@author: Dell
"""
import torch
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
import os
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
import numpy as np
import pylab
from tensorboardX import SummaryWriter
def imshow(img):
img = img / 2 + 0.5
npimg = img.numpy()
plt.imshow(np.transpose(npimg , (1 , 2 , 0)))
pylab.show()
class residueBlock(nn.Module):
def __init__(self,inplane,outplane,kernel_size=3,stride=1,padding=1):
super(residueBlock, self).__init__()
self.inplane = inplane
self.outplane = outplane
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
if inplane == outplane:
self.dimension_inc = False
else:
self.dimension_inc = True
self.conv1 = nn.Conv2d(self.inplane ,self.outplane ,kernel_size = self.kernel_size,stride=self.stride,padding=self.padding,bias=False)
self.bn1 = nn.BatchNorm2d(self.outplane)
self.conv2 = nn.Conv2d(self.outplane ,self.outplane ,kernel_size = self.kernel_size,stride=self.stride,padding=self.padding,bias=False)
self.bn2 = nn.BatchNorm2d(self.outplane)
self.projection_shortcut = nn.Conv2d(self.inplane ,self.outplane,1,stride=self.stride, bias=False)
return
def forward(self,x):
shortcut = x
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = F.relu(x)
if self.dimension_inc == True:
shortcut = self.projection_shortcut(shortcut)
x = x + shortcut
x = F.relu(x)
return x
class Resnet(nn.Module):
def __init__(self):
super(Resnet,self).__init__()
self.conv1 = nn.Conv2d(3 ,64 ,7,stride=2,padding=3)
self.maxpool1 = nn.MaxPool2d(3,stride=2,padding=1)
self.block1 = residueBlock(64,64)
self.block2 = residueBlock(64,64)
self.maxpool2 = nn.MaxPool2d(3,stride=2,padding=1)
self.block3 = residueBlock(64,128)
self.block4 = residueBlock(128,128)
self.maxpool3 = nn.MaxPool2d(3,stride=2,padding=1)
self.block5 = residueBlock(128,256)
self.block6 = residueBlock(256,256)
self.maxpool4 = nn.MaxPool2d(3,stride=2,padding=1)
self.block7 = residueBlock(256,512)
self.block8 = residueBlock(512,512)
self.avepool1 = nn.AdaptiveAvgPool2d((1,1))
self.fc = nn.Linear(512 , 10)
return
def forward(self,x):
x = self.conv1(x)
x = self.maxpool1(x)
x = self.block1(x)
x = self.block2(x)
x = self.maxpool2(x)
x = self.block3(x)
x = self.block4(x)
x = self.maxpool3(x)
x = self.block5(x)
x = self.block6(x)
x = self.maxpool4(x)
x = self.block7(x)
x = self.block8(x)
x = self.avepool1(x)
x = torch.flatten(x, 1)
x = self.fc(x)
x = F.softmax(x,dim=1)
return x
def train(net,epochs,trainloader,testloader,classes,device_ids):
writer = SummaryWriter(comment='resnet')
# read model
print('===> Try resume from checkpoint')
if os.path.isdir('checkpoint'):
try:
checkpoint = torch.load('./checkpoint/resnet_final.t7')
net.load_state_dict(checkpoint['state'])
start_epoch = checkpoint['epoch']
print('===> Load last checkpoint data')
except FileNotFoundError:
start_epoch = 0
print('Can\'t found resnet_final.t7')
else:
start_epoch = 0
print('===> Start from scratch')
# start_epoch = 0
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters() , lr = 0.005,momentum=0.9)
# optimizer = nn.DataParallel(optimizer, device_ids=device_ids)
# scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=200, gamma=0.3)
#train the Network
for epoch in range(epochs):
running_loss = 0.0
epoch_loss = 0.0
if epoch + start_epoch > 10000:
break
correct = 0
total = 0
for i , data in enumerate(trainloader , 0):
inputs , labels = data
inputs , labels = Variable(inputs.cuda()) , Variable(labels.cuda())
# inputs = normalize1d(inputs,0.5,0.5)
optimizer.zero_grad()
#forward + backward + optimizer
outputs = net(inputs)
_ , predicted = torch.max(outputs.data , 1)
correct += (predicted == labels.cuda()).sum()
total += labels.size(0)
loss = criterion(outputs , labels)
loss.backward()
optimizer.step()
running_loss += loss.data
epoch_loss = running_loss /(i+1)
print('[%d %d] loss: %.3f' % (epoch+start_epoch+ 1 , i+1 ,running_loss /(i+1)))
train_acc = float(correct) / total
# print(train_acc)
# validation
correct = 0
total = 0
for data in testloader:
images , labels = data
outputs = net(Variable(images.cuda()))
_ , predicted = torch.max(outputs.data , 1)
correct += (predicted == labels.cuda()).sum()
total += labels.size(0)
val_acc = float(correct) / total
# print(val_acc)
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))
if epoch % 100 == 99:
class_correct = torch.ones(10).cuda()
class_total = torch.ones(10).cuda()
for data in testloader:
images , labels = data
outputs = net(Variable(images.cuda()))
_ , predicted = torch.max(outputs.data , 1)
c = (predicted == labels.cuda()).squeeze()
#print(predicted.data[0])
for i in range(4):
label = labels[i]
class_correct[label] += c[i]
class_total[label] += 1
for i in range(10):
# writer.add_scalar('Accuracy of '+classes[i],100 * class_correct[i] / class_total[i],epoch+start_epoch)
print('Accuracy of %5s : %2d %%' % (classes[i] , 100 * class_correct[i] / class_total[i]))
# save training loss and accuracy
writer.add_scalar('cross_entropy',epoch_loss,epoch+start_epoch)
writer.add_scalar('train_accuracy',train_acc,epoch+start_epoch)
writer.add_scalar('validation_accuracy',val_acc,epoch+start_epoch)
print('epoch[%d] loss: %.3f' % (epoch+start_epoch+ 1 , running_loss))
if epoch % 10 == 9:
print('===> Saving models...')
state = {
'state': net.state_dict(),
'epoch': epoch+start_epoch }
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/resnet_final.t7')
# writer.add_graph(net,(inputs,))
writer.close()
print('Finished Training')
if __name__=='__main__':
transform = transforms.Compose([transforms.RandomResizedCrop(224),transforms.ToTensor() ,transforms.Normalize((0.5 , 0.5 , 0.5) , (0.5 , 0.5 , 0.5))])
trainset = torchvision.datasets.CIFAR10(root = './data' , train = True , download = True , transform = transform)
trainloader = torch.utils.data.DataLoader(trainset , batch_size = 512 , shuffle = True , num_workers =2)
testset = torchvision.datasets.CIFAR10(root = './data' , train = False , download = True , transform = transform)
testloader = torch.utils.data.DataLoader(testset , batch_size = 128 , shuffle = True , num_workers = 2)
classes = ('plane' , 'car' , 'bird' , 'cat' , 'deer' , 'dog' , 'frog' , 'horse' , 'ship' , 'truck')
os.environ['CUDA_VISIBLE_DEVICES'] = "0,1,2"
resnet = Resnet()
device_ids=[0,1,2]
resnet = resnet.cuda(device_ids[0])
net=torch.nn.DataParallel(resnet,device_ids=device_ids)
train(net,200,trainloader,testloader,classes,device_ids)
|
from flask import Flask
app = Flask(__name__)
# import json
from model import (
session as db_session,
User,
Book,
Location,
)
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
@app.route("/pins", methods=['POST'])
def create_pin():
'''
This function adds a pin to the map.
If there's a logged in user, the pin should be readily approved and added.
If there's not a logged in user, the pin should be sent to admin for approval.
'''
pass
@app.route("/pins", methods=['GET'])
def get_pins():
# TODO: split this function out to get historical and
# historical fiction pins seperately.
pass
@app.route("/pins/id", methods=['PUT'])
def update_pin():
pass
@app.route("/pins/id", methods=['GET'])
def get_pin():
pass
@app.route("/pins/id", methods=['DELETE'])
def delete_pin():
pass
if __name__ == "__main__":
app.run(debug=True)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 16/4/23 下午2:50
# @Author : ZHZ
# @Description : 根据num_days去划分数据集,默认值是14
import pandas as pd
import numpy as np
import datetime
global sum_flag
num_days = 7
sum_flag_temp = 0
days_20141009 = datetime.datetime(2014, 10, 9)
item_id_dict = {}
all_item_sum = []
kid_item_sum = []
count1 = []
count2 = []
#filtered_outlier_if = pd.read_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/FilteredData/filtered_outlier_if.csv");
filtered_outlier_if = pd.read_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/OutputData/2_if2.csv");
filtered_outlier_if['days_20141009'] = filtered_outlier_if['date'].\
map(lambda x:(datetime.datetime(x / 10000, x / 100 % 100, x % 100) - days_20141009).days)
#filtered_outlier_isf = pd.read_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/FilteredData/filtered_outlier_isf.csv");
#filtered_outlier_isf = pd.read_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/OutputData/2_isf2.csv");
#filtered_outlier_isf['days_20141009'] = filtered_outlier_isf['date'].\
# map(lambda x:(datetime.datetime(x / 10000, x / 100 % 100, x % 100) - days_20141009).days)
def countByDays_if(dataframe, start_day, end_day):
if start_day > end_day:
return None,0
dataframe = dataframe[dataframe['days_20141009']>=start_day]
dataframe = dataframe[dataframe['days_20141009']<=end_day]
if len(dataframe)<=0:
return None,0
per = float(num_days)/float(end_day-start_day+1)
#print per
#print start_day,end_day
temp = {}
#print start_day,end_day,dataframe.date.sort_values().head(1),dataframe.days_20141009.max()
#temp['date'] = str(dataframe.date.min())+"_"+str(dataframe.date.max())
# temp['date'] = str(dataframe.days_20141009.min()+num_days)+"_"+str(dataframe.days_20141009.max()+num_days)
temp['date'] = (end_day-1)/num_days
temp['item_id'] = item_id_dict[int(dataframe.item_id.mean())]
temp['cate_id'] = dataframe.cate_id.max()
temp['cate_level_id'] = dataframe.cate_level_id.max()
temp['brand_id'] = dataframe.brand_id.max()
temp['supplier_id'] = dataframe.supplier_id.max()
temp['pv_ipv'] = dataframe.pv_ipv.sum()*per
temp['pv_uv'] = dataframe.pv_uv.sum()*per
temp['cart_ipv'] = dataframe.cart_ipv.sum()*per
temp['cart_uv'] = dataframe.cart_uv.sum()*per
temp['collect_uv'] = dataframe.collect_uv.sum()*per
temp['num_gmv'] = dataframe.num_gmv.sum()*per
temp['amt_gmv'] = dataframe.amt_gmv.sum()*per
temp['qty_gmv'] = dataframe.qty_gmv.sum()*per
temp['unum_gmv'] = dataframe.unum_gmv.sum()*per
temp['amt_alipay'] = dataframe.amt_alipay.sum()*per
temp['num_alipay'] = dataframe.num_alipay.sum()*per
temp['qty_alipay'] = dataframe.qty_alipay.sum()*per
temp['unum_alipay'] = dataframe.unum_alipay.sum()*per
temp['ztc_pv_ipv'] = dataframe.ztc_pv_ipv.sum()*per
temp['tbk_pv_ipv'] = dataframe.tbk_pv_ipv.sum()*per
temp['ss_pv_ipv'] = dataframe.ss_pv_ipv.sum()*per
temp['jhs_pv_ipv'] = dataframe.jhs_pv_ipv.sum()*per
temp['ztc_pv_uv'] = dataframe.ztc_pv_uv.sum()*per
temp['tbk_pv_uv'] = dataframe.tbk_pv_uv.sum()*per
temp['ss_pv_uv'] = dataframe.ss_pv_uv.sum()*per
temp['jhs_pv_uv'] = dataframe.jhs_pv_uv.sum()*per
temp['num_alipay_njhs'] = dataframe.num_alipay_njhs.sum()*per
temp['amt_alipay_njhs'] = dataframe.amt_alipay_njhs.sum()*per
temp['qty_alipay_njhs'] = dataframe.qty_alipay_njhs.sum()*per
temp['unum_alipay_njhs'] = dataframe.unum_alipay_njhs.sum()*per
temp['is_final'] = False
sum_flag_temp = dataframe.qty_alipay_njhs.sum()*per
if end_day>=431:
count1.append(0)
if end_day==444:
temp['date'] = 444/num_days
count2.append(0)
temp['is_final'] = True
#print start_day,end_day
print temp['item_id'],start_day,end_day,temp['date'],temp['qty_alipay_njhs']
#print dataframe[['item_id','days_20141009','qty_alipay_njhs']]
#print temp['item_id'],temp['date'],temp['qty_alipay_njhs']
return temp,sum_flag_temp
def TransferDataByDays_if():
new_father_kid_item_x = []
new_father_kid_item_all = []
for i,father_kid_item in filtered_outlier_if.groupby([filtered_outlier_if['cate_level_id'],
filtered_outlier_if['cate_id'],
filtered_outlier_if['item_id']]):
first_day = father_kid_item.days_20141009.min()
last_day = father_kid_item.days_20141009.max()
flag_day = last_day
print first_day,last_day
father_kid_item = father_kid_item.sort_values('days_20141009')
is_first = True
sum_flag = 0
while(flag_day>=first_day):
flag_day = flag_day - num_days
if (flag_day<=first_day):
temp,sum_flag_temp = countByDays_if(father_kid_item, first_day, flag_day+num_days)
else:
temp,sum_flag_temp = countByDays_if(father_kid_item, flag_day+1, flag_day+num_days)
if temp == None:
print "这里有个None"
continue
temp['qty_alipay_njhs'] = sum_flag
sum_flag = sum_flag_temp
new_father_kid_item_x.append(temp)
new_father_kid_item_all.append(temp)
new_father_kid_item_train = pd.DataFrame(new_father_kid_item_x,columns=[
"date","item_id","cate_id","cate_level_id","brand_id","supplier_id","pv_ipv","pv_uv","cart_ipv","cart_uv",
"collect_uv","num_gmv","amt_gmv","qty_gmv","unum_gmv","amt_alipay","num_alipay","qty_alipay","unum_alipay",
"ztc_pv_ipv","tbk_pv_ipv","ss_pv_ipv","jhs_pv_ipv","ztc_pv_uv","tbk_pv_uv","ss_pv_uv","jhs_pv_uv","num_alipay_njhs",
"amt_alipay_njhs","unum_alipay_njhs","is_final","qty_alipay_njhs"])
new_father_kid_item_train[new_father_kid_item_train['is_final']==False].drop(["is_final"],axis = 1).\
to_csv('/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/2016_04_24/train'+str(item_id_dict[i[2]])+'.csv',index = None,columns=None)
new_father_kid_item_x = []
#break
print len(count1),len(count2)
new_father_kid_item_all = pd.DataFrame(new_father_kid_item_all,columns=[
"date","item_id","cate_id","cate_level_id","brand_id","supplier_id","pv_ipv","pv_uv","cart_ipv","cart_uv",
"collect_uv","num_gmv","amt_gmv","qty_gmv","unum_gmv","amt_alipay","num_alipay","qty_alipay","unum_alipay",
"ztc_pv_ipv","tbk_pv_ipv","ss_pv_ipv","jhs_pv_ipv","ztc_pv_uv","tbk_pv_uv","ss_pv_uv","jhs_pv_uv","num_alipay_njhs",
"amt_alipay_njhs","unum_alipay_njhs","is_final","qty_alipay_njhs"])
new_father_kid_item_all.to_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/"
"Data/if_all.csv",index = None,columns=None)
if_all_predict = new_father_kid_item_all[new_father_kid_item_all['is_final']].drop(["is_final"],axis = 1)
if_all_predict.to_csv('/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/if_all_predict.csv',index = None,columns=None)
# new_father_kid_item[new_father_kid_item['is_final']==False].drop(["is_final"],axis = 1).\
# to_csv('/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/if_all_train.csv',index = None,columns=None)
def TransferDataByDays_isf():
new_father_kid_item_data = []
for i,father_kid_item in filtered_outlier_isf.groupby([filtered_outlier_isf['cate_level_id'],
filtered_outlier_isf['cate_id'],
filtered_outlier_isf['item_id'],
filtered_outlier_isf['store_code']]):
first_day = father_kid_item.days_20141009.min()
last_day = father_kid_item.days_20141009.max()
flag_day = last_day
print first_day,last_day
father_kid_item = father_kid_item.sort_values('days_20141009')
father_kid_item_data = {}
#print father_kid_item[father_kid_item['days_20141009']==last_day]
is_first = True
while(flag_day>=first_day):
flag_day = flag_day - num_days
if (flag_day<first_day):
temp = countByDays_if(father_kid_item, first_day, flag_day+num_days)
else:
temp = countByDays_if(father_kid_item, flag_day+1, flag_day+num_days)
if is_first:
is_first = False
kid_item_sum.append({'item_id':str(i[2])+"_"+str(i[3]),'sum':temp['qty_alipay_njhs']})
if temp == None:
print "这里有个None"
else:
new_father_kid_item_data.append(temp)
#break
new_father_kid_item = pd.DataFrame(new_father_kid_item_data,columns=[
"date","item_id","cate_id","cate_level_id","brand_id","supplier_id","pv_ipv","pv_uv","cart_ipv","cart_uv",
"collect_uv","num_gmv","amt_gmv","qty_gmv","unum_gmv","amt_alipay","num_alipay","qty_alipay","unum_alipay",
"ztc_pv_ipv","tbk_pv_ipv","ss_pv_ipv","jhs_pv_ipv","ztc_pv_uv","tbk_pv_uv","ss_pv_uv","jhs_pv_uv","num_alipay_njhs",
"amt_alipay_njhs","qty_alipay_njhs","unum_alipay_njhs"]
).to_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/"
"Data/isf_all.csv",index = None,columns=None)
def transItemID():
item_ids = filtered_outlier_if.item_id.value_counts().sort_values().index
df_data = []
for i in range(0,len(item_ids)):
temp = {}
temp['item_id'] = item_ids[i]
temp['new_id'] = i
item_id_dict[item_ids[i]] = i
#print i,item_ids[i]
df_data.append(temp)
pd.DataFrame(df_data,columns=['item_id','new_id']).to_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/"
"Data/item_id.csv",index = None,columns=None)
transItemID()
TransferDataByDays_if()
|
import flask
from flask_restful import Resource
from flask import request, jsonify, redirect, Response
from models.models import *
class ChatbotAPI(Resource):
def get(self):
if "id" in request.args:
model = TrainedModel.query.filter_by(id=request.args["id"]).first()
model_schema = TrainedModelSchema(many=False)
return jsonify(model_schema.dump(model))
elif "file_id" in request.args:
model = TrainedModel.query.filter_by(file_id=request.args["file_id"]).all()
conversation_schema = TrainedModelSchema(many=True)
return jsonify(model_schema.dump(model))
else:
model = TrainedModel.query.all()
model_schema = TrainedModelSchema(many=True)
return jsonify(model_schema.dump(model))
def post(self):
#TODO Promt a bot with id
return Response(status=200)
def delete(self):
# Delete chatbot with id
return Response(status=200) |
n = int(input())
paises = dict()
for i in range(n):
linha = input().split()
if paises.__contains__(linha[0]):
paises[linha[0]] += 1
else:
paises[linha[0]] = 1
dic = dict(sorted(paises.items()))
for k in dic:
print(k, dic[k])
|
import numpy as np
from .._common import jitted
@jitted
def normc(ee, nmat):
"""Normalize Haskell or Dunkin vectors."""
t1 = 0.0
for i in range(nmat):
t1 = max(t1, np.abs(ee[i]))
if t1 < 1.0e-40:
t1 = 1.0
for i in range(nmat):
ee[i] /= t1
ex = np.log(t1)
return ee, ex
|
list = ['abcd', 786, 2.23, 'runoob', 70.2]
tinylist = [123, 'runoob']
print(list) # 输出完整列表
print(list[0]) # 输出列表第一个元素
print(list[1:3]) # 从第二个开始输出到第三个元素
print(list[2:]) # 输出从第三个元素开始的所有元素
print(tinylist * 2) # 输出两次列表
print(list + tinylist) # 连接列表\
print("\n********************************\n")
#列表中的元素是可以改变的
a = [1, 2, 3, 4, 5, 6];
a[0] = 9
print(a[0]);
a[2:5] = [13,14,15]
print(a)
a[2:5] = [] #删除
print(a)
a = list.copy()
print(a)
print(list.count(786))
print(a.reverse()) |
import serial
import time
import datetime
import MySQLdb
ser = serial.Serial(
port='COM3',\
baudrate=9600,\
parity=serial.PARITY_NONE,\
stopbits=serial.STOPBITS_ONE,\
bytesize=serial.EIGHTBITS,\
timeout=0)
print("connected to: " + ser.portstr)
conn = MySQLdb.connect(host= "localhost",
user="root",
passwd="",
db="rfid")
x = conn.cursor()
seq = []
count = 1
f=open("dataFile.txt","a");
while True:
for c in ser.read():
seq.append((c))
joined_seq = ''.join(str(v) for v in seq) #Make a string from array
if c == '\n':
print("Vehicle " + str(count) + ': ' + joined_seq)
f.write(time.strftime('%Y-%m-%d'))
f.write("\t")
f.write(time.strftime("%X"))
f.write("\t")
f.write(joined_seq)
id=joined_seq[:10]
sql="SELECT s2.amount from vehicles s1 join charges s2 on (s1.vehicle=s2.vehicle) where s1.rfidcode=%s" %(id)
sql1="select balance from wallet where rfid=%s" % (id)
sql2="select username,registration from vehicles where rfidcode=%s" % (id)
sql3="SELECT `id` from data where `RowNo`=(select max(`RowNo`) from data)"
try:
x.execute(sql3)
if x.rowcount>0:
row0=x.fetchone()
check=row0[0]
if(check!=id):
x.execute(sql)
row=x.fetchone()
cost=row[0]
x.execute(sql1)
row1=x.fetchone()
bal=row1[0]
x.execute(sql2)
row2=x.fetchone()
name=row2[0]
registration=row2[1]
print(name)
if bal>=cost:
k="paid"
x.execute("""UPDATE `wallet` SET `balance`=`balance`-%s WHERE rfid=%s""",(cost,id))
fb=bal-cost
x.execute("""INSERT INTO `toll`(`rfid`,`regno`, `uname`, `initialbal`, `tollamount`, `finalbal`, `date`, `time`, `status`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)""",(id,registration,name,bal,cost,fb,time.strftime('%Y-%m-%d'),time.strftime("%X"),k))
else:
k="not paid"
x.execute("""INSERT INTO `tollcorruption`(`rfid`, `uname`, `amount`) VALUES (%s,%s,%s)""",(id,name,cost))
x.execute("""INSERT INTO `toll`(`rfid`,`regno`, `uname`, `initialbal`, `tollamount`, `finalbal`, `date`, `time`, `status`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)""",(id,registration,name,bal,cost,bal,time.strftime('%Y-%m-%d'),time.strftime("%X"),k))
x.execute("""INSERT INTO `data`(`date`, `time`, `id`,`cost`) VALUES (%s,%s,%s,%s)""",(time.strftime('%Y-%m-%d'),time.strftime("%X"),id,row[0]))
conn.commit()
except:
conn.rollback()
seq = []
count += 1
break
conn.close()
ser.close()
|
print('Display text')
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from logging import getLogger
__author__ = 'golden'
__create_date__ = '2018/5/26 22:23'
class Request(object):
def __init__(self, url, callback):
self.url = url
self.callback = callback
self.logger = None
def set_logger(self):
self.logger = getLogger(self.__class__.__name__)
self.logger.debug("{name} setup success.".format(name=self.__class__.__name__))
def __str__(self):
return self.url
|
#!/usr/bin/env python3
import cgi
from mysql_utils import get_connection
try:
# Gets ID from URL
fields = cgi.FieldStorage()
combatant_id = fields.getvalue("id")
sql = "SELECT combatant.name, species.name," \
"(combatant.plus_atk + species.base_atk)," \
"(combatant.plus_dfn + species.base_dfn)," \
"(combatant.plus_hp + species.base_hp) "\
"FROM combatant, species " \
"WHERE combatant.id='" + combatant_id + "' " \
"AND combatant.species_id = species.id"
# Connects to database and executes SQL statement.
connection = get_connection()
cursor = connection.cursor()
cursor.execute(sql)
data = cursor.fetchall()
cursor.close()
connection.close()
# Head and body of HTML response.
print("Content-type: text/html\n")
print("""
<head>
<title>Combatant Detail</title>
</head>
<body>
<h1>Combatant Detail:</h1>
<ul>
<b>Name: </b>""", data[0][0], """<br>
<b>Species: </b>""", data[0][1], """<br>
<b>Attack: </b>""", data[0][2], """<br>
<b>Defense: </b>""", data[0][3], """<br>
<b>HP: </b>""", data[0][4], """<br>
</ul>
<hr>
<a href="/cgi-bin/combatant_list.py">Combatant List</a><br>
<a href="/cgi-bin/battle_list.py">Battle List</a><br>
<a href="/cgi-bin/ranking_page.py">Ranking Page</a><br>
<a href="/index.html">Home Page</a>
</body>""")
print("</html>")
except:
print("<html><h1>Error: Cannot retrieve Combatant Details!</h1>")
print("<a href=\"/index.html\">Go back to Home Page</a></html>")
|
from flask import Flask, request
from twilio.rest import Client
from github import Github, GithubException
app = Flask(__name__)
# contents = repo.get_commits_traffic(per="week")
sid = "AC3de35d6b4b5246a899bade4f33c1fe8b"
token = "816d65fc8f364d4b1acdb0f4fec59765"
client = Client(sid, token)
fromWhatsApp = "whatsapp:+14155238886"
toWhatsApp = "whatsapp:+918050825266"
@app.route("/", methods = ['POST'])
def sampleCallMessage():
data = request.data
print(data)
return "hello world"
@app.route("/sendViewTraffic", methods = ['GET'])
def sendViewTraffic():
g = Github("shreyas.shivajirao@gmail.com", "Muffin@98")
repo = g.get_repo("Shreyasgujjar/agresseion")
contents = repo.get_views_traffic(per = "week")
String = ""
for keys in contents:
if type(contents.get(keys)) is not list:
String += keys + " - " + str(contents.get(keys)) + "\n"
else:
for elements in contents.get(keys):
String += str(elements) + "\n"
client.messages.create(body=String, from_ = fromWhatsApp, to = toWhatsApp)
return "message sent"
if __name__ == "__main__":
app.run(debug = True)
|
print "Is it true that 3 + 2 < 5 - 7 ?"
print 3 + 2 < 5 - 7
print 1.0/3
print 5%3.0 |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 13:08:19 2020
@author: vernika
"""
from __future__ import print_function
import cv2
import argparse
def show_img(img):
cv2.imshow("canvas",img)
cv2.waitKey(0)
return
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True, help = "path to image")
args = vars(ap.parse_args())
print(args)
load_image = cv2.imread(args["image"])
def flip_img(img, alignment = None):
if alignment == 1: # horizontal flip
flipped = cv2.flip(img, 1)
elif alignment == 0: # Vertical flip
flipped = cv2.flip(img, 0)
elif alignment == -1: # horizontal and vertical flip
flipped = cv2.flip(img, -1)
return flipped
def crop_img(img, ystart, yend, xstart, xend):
cropped_img = img[ystart:yend, xstart:xend]
return cropped_img
|
from selenium import webdriver
import time
driver=webdriver.Chrome()
driver.get('https://mail.163.com/')
time.sleep(3)
'''#先于iframe找到父元素,且点击不可触及的页面
driver.find_element_by_xpath("//*[@id='lbNormal']").click()
#找到所有的iframe
m=driver.find_elements_by_tag_name("iframe")
#跳转到iframe的列表中多个的某一个,用序号
driver.switch_to_frame(m[0])'''
driver.find_element_by_xpath("//*[@id='lbNormal']").click()
m=driver.find_element_by_css_selector("iframe[id^='x-URS-iframe']")
driver.switch_to_frame(m)
driver.find_element_by_name("email").send_keys("123")
driver.find_element_by_name("password").send_keys("456")
driver.switch_to_default_content()
time.sleep(3)
driver.quit()
|
# -*- coding: utf-8 -*-
from openerp import api, fields, models, _
# Product template
class product_template(models.Model):
_inherit = 'product.template'
@api.model
def default_get(self, fields):
rec = super(product_template, self).default_get(fields)
rec['type'] = 'product'
return rec
# Stock picking
class stock_picking(models.Model):
_inherit = 'stock.picking'
_order = 'id desc'
pay_term_id = fields.Many2one('account.payment.term', string='Plazo de pago',
index=True, ondelete='cascade')
# Stock move
class stock_move(models.Model):
_inherit = 'stock.move'
@api.onchange('product_id')
def onchange_product_id(self):
if self.product_id:
self.product_uom = self.product_id.uom_id.id
self.name = self.product_id.name
|
import pygame
import sys
from pygame.locals import *
import time
from lib.player import Player
from lib.fallingShit import FallingShit
WINDOW_WIDTH = 1280
WINDOW_HEIGHT = 736
white = (255, 255, 255)
black = (0, 0, 0)
blue = (0, 0, 128)
class App:
def main(self):
pygame.init()
self.DISPLAYSURF = pygame.display.set_mode(
(WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption('Dodger v0.1')
self.mainmenuImg = pygame.image.load("./pics/mainmenu.png")
self.arrowImg = pygame.image.load("./pics/arrow.png")
self.gameState = "MENU"
self.timer = 0
self.currentChoice = 0
self.gameOverFlag = 0
# Text
self.font = pygame.font.Font('freesansbold.ttf', 16)
self.gameOverFont = pygame.font.Font('freesansbold.ttf', 32)
self.text = self.font.render(
'Tid: ' + str(self.timer), True, white, black)
self.textRect = self.text.get_rect()
self.textRect.x = 1200
self.textRect.y = 700
# Arrow
self.arrowRect = self.arrowImg.get_rect()
self.arrowRect.x = 700
self.arrowRect.y = 125
while True:
if self.gameState == "MENU":
self.DISPLAYSURF.blit(self.mainmenuImg, (0, 0))
self.DISPLAYSURF.blit(self.arrowImg, self.arrowRect)
self.menuKeys()
if self.gameState == "GAME":
self.keyChecks()
self.draw()
self.update()
if self.gameState == "GAMEOVER":
self.gameOverMenu()
self.menuKeys()
pygame.display.update()
time.sleep(5.0 / 1000.0)
def menuKeys(self):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN and self.gameOverFlag == 0:
self.handleEnterInMenu()
if event.key == pygame.K_RETURN and self.gameOverFlag == 1:
self.gameState = "MENU"
self.gameOverFlag = 0
if event.key == pygame.K_ESCAPE:
self.die()
if event.key == pygame.K_DOWN and self.currentChoice < 3:
self.arrowRect.y += 125
self.currentChoice += 1
if event.key == pygame.K_UP and self.currentChoice > 0:
self.arrowRect.y -= 125
self.currentChoice -= 1
if event.type == QUIT:
self.die()
def handleEnterInMenu(self):
if self.currentChoice == 0:
self.startGame()
elif self.currentChoice == 3:
self.die()
else:
print("Not implemented u piece of garbage")
def startGame(self):
self.player = Player()
self.enemies = []
self.amountOfEnemies = 0
self.spawnEnemy()
self.clock = pygame.time.Clock()
self.timer = 0
self.enemyTimer = 0
self.timeUntilEnemySpawns = 2000
self.gameState = "GAME"
def gameOverMenu(self):
self.DISPLAYSURF.fill((0, 0, 0))
self.gameOverText = self.gameOverFont.render(
'Score: ' + str(int(self.timer/1000)), True, white, black)
self.gameOverTextRect = self.gameOverText.get_rect()
self.gameOverTextRect.x = 600
self.gameOverTextRect.y = 300
self.gameOverReturn = self.gameOverFont.render(
'Press Return to go back to the Main Menu!', True, white, black)
self.gameOverReturnRect = self.gameOverText.get_rect()
self.gameOverReturnRect.x = 320
self.gameOverReturnRect.y = 360
self.DISPLAYSURF.blit(self.gameOverText, self.gameOverTextRect)
self.DISPLAYSURF.blit(self.gameOverReturn, self.gameOverReturnRect)
# Creates new enemy and add it to the enemy array
def spawnEnemy(self):
enemy = FallingShit()
self.enemies.append(enemy)
self.amountOfEnemies += 1
# Contains game logic
def update(self):
# Time keeper
self.clock.tick()
self.timer += self.clock.get_time()
self.enemyTimer += self.clock.get_time()
self.text = self.font.render(
'Tid: ' + str(int(self.timer/1000)), True, white, black)
if self.enemyTimer > self.timeUntilEnemySpawns:
self.enemyTimer = 0
self.spawnEnemy()
if self.timer > 10000:
self.timeUntilEnemySpawns = 500
self.checkCollision()
for i in range(0, self.amountOfEnemies):
self.enemies[i].update()
if self.enemies[i].getRemovedStatus():
enemyToRemove = self.enemies.pop(i)
del enemyToRemove
self.amountOfEnemies -= 1
break
# Contains logic for key presses
def keyChecks(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_UP]:
self.player.moveUp()
if keys[pygame.K_DOWN]:
self.player.moveDown()
if keys[pygame.K_LEFT]:
self.player.moveLeft()
if keys[pygame.K_RIGHT]:
self.player.moveRight()
# Event Handler
for event in pygame.event.get():
if event.type == QUIT:
self.die()
# Draws everything onto the screen
def draw(self):
# The screen will be covered in black
self.DISPLAYSURF.fill((0, 0, 0))
# Draw player
self.player.draw(self.DISPLAYSURF)
# Draw Enemies
for i in range(0, self.amountOfEnemies):
self.enemies[i].draw(self.DISPLAYSURF)
# Draw our text
self.DISPLAYSURF.blit(self.text, self.textRect)
# Death to the game :>
def die(self):
pygame.quit()
sys.exit()
# TODO: Add logic
def gameOver(self):
self.gameOverFlag = 1
self.gameState = "GAMEOVER"
self.currentChoice = 0
self.arrowRect.x = 700
self.arrowRect.y = 125
# Contains collision logic
def checkCollision(self):
for i in range(0, self.amountOfEnemies):
if self.player.isCollidingWith(self.enemies[i].getRect()):
self.gameOver()
if __name__ == "__main__":
theApp = App()
theApp.main()
|
import requests
url = 'https://restcountries.eu/rest/v2/regionalbloc/asean'
myobj = {'name':'Myanmar'}
x = requests.post(url, data = myobj)
#print the response text (the content of the requested file):
print(x.text) |
class MarketState(object):
"""
This class models the state of an instrument in the market
Attributes:
bid: bid price
ask: bid price
"""
def __init__(self,bid,ask):
self.bid=bid
self.ask=ask
def update_bid_ask(self,new_bid,new_ask):
self.bid=new_bid
self.ask=new_ask
|
import cv2
input = cv2.imread("./Desktop/OpenCV/Basics/hand.jpg",0)
cv2.imshow("Grayscale image",input)
cv2.waitKey()
cv2.destroyAllWindows()
|
a = 1
b = 2
c = 3
my_sum = a + b
another_sum = 5 + 10
maths_operators = 1 + 3 * 4 / 2 - 2
print(maths_operators)
float_division = 12 / 3
print(float_division)
integer_division = 12 // 3 # drops anything after the decimal (no rounding!)
print(integer_division)
division_with_reminder = 12 // 5 # should be 2.4
print(division_with_reminder) # prints 2
reminder = 12 % 5
print(reminder) # prints 2
x = 37
reminder = x % 2
print(reminder) # should print 1, therefore it is odd
|
#-*- coding: utf-8 -*-
from django.contrib import admin
from models import Assinante
class AssinanteAdmin(admin.ModelAdmin):
list_display = ('email', 'data_assinatura', )
search_fields = ('email', 'data_assinatura', )
admin.site.register(Assinante, AssinanteAdmin)
|
# -*- coding: utf-8 -*-
"""
smartwall.base
~~~~~~~~~~~~~~
This module contains building blocks for the REST api such as:
* Error classes
* Base class for views
* Decorators
:author: Felipe Blassioli <felipe.blassioli@vtxbrasil.com.br>
"""
from functools import wraps
from flask import request, abort, jsonify, Response, current_app
from flask.ext.classy import FlaskView
from .error import ApiError, RowDoesNotExist, DuplicateKeyError
def wrap_response(f):
"""Decorator that jsonifies view responses
and also catches relevant errors.
"""
@wraps(f)
def wrapped_f(*args, **kwargs):
resp = f(*args, **kwargs)
if isinstance(resp, Response):
return resp
elif isinstance(resp, list):
return jsonify({'data': resp})
elif hasattr(resp,'to_json'):
return jsonify(resp.to_json())
else:
return jsonify(resp)
return wrapped_f
class RestView(FlaskView):
"""The RestView class that logs requests and responses
and parses params from the request object according to self.rules variable.
"""
decorators = [wrap_response]
def __init__(self):
self.parsers = {}
if hasattr(self, "args_rules"):
for name in self.args_rules:
self.parsers[name] = ArgsParser(self.args_rules[name])
else:
self.args_rules = {}
def before_request(self, name, *args, **kwargs):
if name in self.parsers:
self.args = self.parsers[name].parse(request)
else:
current_app.logger.warning("Parser not found! name=[%s]" % name)
self.args = {}
def after_request(self, name, response):
req = request.method + ' ' + request.url
if response.mimetype == 'text/html':
current_app.logger.debug("{:<45} response: text/html".format(req))
else:
resp = str(response.data)
fmt = "{:<45} response: {}"
if 'LOG_MAX_RESP_SIZE' in current_app.config:
end = current_app.config['LOG_MAX_RESP_SIZE']
msg = fmt.format(req,resp[:end]) + 'length=[{}]'.format(len(resp))
else:
msg = fmt.format(req,resp)
current_app.logger.debug(msg)
return response
from json import loads
class Argument(object):
def __init__(self, name, default=None, required=True, type=None, description=None, case_sensitive=True, coerce=loads):
self.name = name
self.default = default
self.required = required
self.type = type
self.description = description
self.case_sensitive = case_sensitive
self.coerce = coerce
def __repr__(self):
return "Arg({},default={})".format(self.name,self.default)
class ArgsParser(object):
def __init__(self, args=None):
self.args = args or []
def add_argument(self, *args, **kwargs):
self.args.current_append(Argument(*args, **kwargs))
def add_arguments(self, params):
self.args.extend(params)
def parse(self, request):
"""
Parses the request parameters according to rules(self.args).
"""
result = {}
if request.method.lower() == 'post':
params = request.get_json(
cache=False) if request.mimetype == 'application/json' else request.form
else:
params = request.args
for arg in self.args:
if arg.name in params:
if arg.type is not None and type(params[arg.name]) != arg.type:
try:
result[arg.name] = arg.coerce(params[arg.name])
except Exception as err:
current_app.logger.warning('Coercion failed for param: {}'.format(arg.name))
raise ApiError('Coercion failed for param: {}'.format(arg.name), 'ArgsParserException', 1, status_code=400)
abort(400)
else:
result[arg.name] = params[arg.name]
elif arg.required:
current_app.logger.warning("Missing required param: {}".format(arg.name))
raise ApiError('Missing required param: {}'.format(arg.name), 'ArgsParserException', 2, status_code=400)
abort(400)
else:
result[arg.name] = arg.default
return result
from flask import url_for, jsonify, make_response, render_template
class InfoView(RestView):
def map(self):
links = []
for rule in current_app.url_map.iter_rules():
s = '{} {} -> {}'.format(list(rule.methods), str(rule), rule.endpoint)
links.append(s)
return links
def log(self):
with open(current_app.config['LOG_FILENAME']) as f:
lines = f.readlines()
params = dict(
line_count = len(lines),
log_content = ''.join(lines)
)
return make_response(render_template('view_log.html', **params))
return dict(result="error")
def config(self):
resp = dict()
for k,v in current_app.config.items():
try:
dumps(v)
resp[k] = v
except Exception, err:
resp[k] = str(v)
return resp
from flask.json import JSONEncoder
import calendar
from datetime import datetime
from decimal import Decimal
class DefaultJSONEncoder(JSONEncoder):
def default(self, obj, *args, **kwargs):
if hasattr(obj,'to_json'):
return obj.to_json()
try:
if isinstance(obj, Decimal):
return float(obj)
if isinstance(obj, datetime):
if obj.utcoffset() is not None:
obj = obj - obj.utcoffset()
millis = int(
calendar.timegm(obj.timetuple()) * 1000 +
obj.microsecond / 1000
)
return millis
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, obj)
class RestAPI(object):
def init_app(self, app, json_encoder=DefaultJSONEncoder):
self.app = app
self.app.view_classes = {}
self.json_encoder = json_encoder
self.app.json_encoder = self.json_encoder
self.register_error_handlers()
InfoView.register(self.app)
def register(self, view):
self.app.view_classes[view.__name__] = view
view.register(self.app)
def register_all(self, *args):
for v in args:
self.register(v)
def register_error_handlers(self):
from werkzeug.exceptions import default_exceptions
from werkzeug.exceptions import HTTPException
# http://flask.pocoo.org/snippets/83/
def make_json_error(ex):
if hasattr(ex, 'to_json'):
response = jsonify(ex.to_json())
else:
response = jsonify(message=str(ex))
if hasattr(ex, 'status_code'):
response.status_code = ex.status_code
else:
response.status_code = (ex.code
if isinstance(ex, HTTPException)
else 500)
return response
for code in default_exceptions.iterkeys():
self.app.error_handler_spec[None][code] = make_json_error
|
import analysis
import master
FOLDER_ROOT_LOCATION = "/Users/wxp/Downloads/PHASE ONE CODED"
if __name__ == '__main__':
# Merge to a file
output_path, records_count = master.merge_excel_sheet(FOLDER_ROOT_LOCATION)
# Create analysis sheet
analysis.create_analysis_sheet(output_path, records_count)
|
import json
from simple_slack_bot.slack_request import SlackRequest
def get_thread_ts(data: SlackRequest, target):
json_data: dict = json.loads(str(data))
if target in json_data:
return json_data[target]
|
'''
Created on 28 jan. 2014
@author: Pieter
'''
from PIL import Image
class Card():
'''Objectholder for the cards in Dunqeun Petz'''
standards = {"red":"anger","green":"food","purple":"magic","yellow":"play"} #standard needs for a color
def __init__(self,color,need):
'''Card(string color [,string need="standard"]) -> Card
:param color: Color of the card string
:param need: type of need, standard for the standard for that color
'''
self.color = color
self.need=need
self.flipped=False
if (self.color,self.need) in self.standards.items():
self.numNeeds=1
else:
self.numNeeds=2
def turnCard(self):
'''C.turnCard() -- Turns over the card.'''
self.flipped = not self.flipped
def getNeeds(self):
'''C.getNeeds -> list -- returns a list of (1 or 2) need(s)'''
if not self.flipped:
return [self.need]
else:
if self.numNeeds==1:
return [self.need]
else:
return [self.need,self.standards[self.color]]
def getCard(self):
'''C.getCard() -> Image -- returns an Image-object for the card. Rightly Turned'''
crd = "dungeonz\\artwork\\cards\\"+self.color+"_"+self.need+".png"
im = Image.open(crd)
if self.flipped:
im = im.rotate(180)
return im
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @Time : 2021/4/8 19:07
# @Author :'liuyu'
# @Version:V 0.1
# @File :
# @desc :
import os
import tensorflow as tf
# tf2 --> tf1
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from .data_load import get_batch
from .model import Transformer
from .hparams import Hparams
from .utils import get_hypotheses, load_hparams
import logging
logging.basicConfig(level=logging.INFO)
logging.info("# hparams")
hparams = Hparams()
parser = hparams.parser
hp = parser.parse_args()
load_hparams(hp, hp.ckpt)
logging.info("# Prepare test batches")
test_batches, num_test_batches, num_test_samples = get_batch(hp.test1, hp.test1,
hp.maxlen1, hp.maxlen2,
hp.vocab, hp.test_batch_size,
shuffle=False)
iter = tf.compat.v1.data.Iterator.from_structure(tf.compat.v1.data.get_output_types(test_batches),
tf.compat.v1.data.get_output_shapes(test_batches))
test_init_op = iter.make_initializer(test_batches)
xs, ys = iter.get_next()
logging.info("# Load model")
m = Transformer(hp)
y_hat, _ = m.eval(xs, ys)
logging.info("# Session")
with tf.compat.v1.Session() as sess:
ckpt_ = tf.compat.v1.train.latest_checkpoint(hp.ckpt)
ckpt = hp.ckpt if ckpt_ is None else ckpt_ # None: ckpt is a file. otherwise dir.
saver = tf.compat.v1.train.Saver()
saver.restore(sess, ckpt)
model_bp = hp.ckpt_pb
if not os.path.exists(model_bp): os.makedirs(model_bp)
tf.io.write_graph(sess.graph_def, model_bp, 'expert-graph.pb', as_text=False)
sess.run(test_init_op)
logging.info("# get hypotheses")
hypotheses = get_hypotheses(num_test_batches, num_test_samples, sess, y_hat, m.idx2token)
logging.info(type(hypotheses))
logging.info("# write results")
model_output = ckpt.split("/")[-1]
if not os.path.exists(hp.testdir): os.makedirs(hp.testdir)
translation = os.path.join(hp.testdir, model_output)
with open(translation, 'w') as fout:
fout.write("\n".join(hypotheses))
logging.info("# calc bleu score and append it to translation")
#calc_bleu(hp.test2, translation) |
from tg import tmpl_context
import moksha.api.widgets
from tw2.jqplugins.ui import set_ui_theme_name
import decorator
def with_moksha_socket(f, *args, **kw):
tmpl_context.moksha_socket = moksha.api.widgets.moksha_socket
return f(*args, **kw)
def with_ui_theme(f, *args, **kw):
set_ui_theme_name('hot-sneaks') # hell yes
return f(*args, **kw)
with_moksha_socket = decorator.decorator(with_moksha_socket)
with_ui_theme = decorator.decorator(with_ui_theme)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
mi_cadena = "Hola Mundo"
def adios():
print "Chao"
|
#!/usr/bin/env python
#! -*- coding:utf-8 -*-
#!@Author: faple
#!@Time: 2019/4/2 9:04
import configparser
import os
config = configparser.ConfigParser()
# 初始化
def init():
config.read(os.path.join(os.path.dirname((os.path.dirname(__file__))), 'conf/config.ini'))
# 获取值
def getValue(type, key):
init()
return config.get(type, key) |
# creating Employee class
class Employee:
count_emp = 0 # count of Employees
total_emp_salary = 0 # total salary of employees
# Creating a constructor to initialize name, family, salary, department
def __init__(self, name, family, salary, department):
self.name = name
self.family = family
self.salary = salary
self.department = department
Employee.count_emp += 1 # increment Employee count by 1 for each employee
Employee.total_emp_salary += salary # calculate employee salary
# creating a function to calculate average salary
def avg_salary(self):
average_salary = Employee.total_emp_salary / Employee.count_emp
return average_salary
# creating a Fulltime Employee class that inherits the properties of Employee class
class FullTime_Emp(Employee):
# Creating a constructor to initialize name, family, salary, department, age
def __init__(self, name, family, salary, department, age):
Employee.__init__(self, name, family, salary, department)
self.age = age
# creating the instances of Employee class
emp_1 = Employee("Rupa", "Doppalapudi", 80000, "ABAP")
emp_2 = Employee("Sri", "DK", 40000, "BI")
# accessing members(variables and functions) of class
print("\nEmployee count: ", Employee.count_emp)
print("Total Employees salary: ", Employee.total_emp_salary)
print("The average salary of employees: ", emp_2.avg_salary())
# creating the instances of Fulltime Employee class
ft_Emp_1 = FullTime_Emp("Kalyan", "Kilaru", 90000, "Basis", 25)
# accessing members(variables and functions) of class
print("\nEmployee count: ", ft_Emp_1.count_emp)
print("Total Employees salary: ", ft_Emp_1.total_emp_salary)
print("The average salary of employees: ", ft_Emp_1.avg_salary())
|
def next_avaible_drone_in(position, drones):
return min(drones, key=lambda d: d.ends_in_target(position))
def closest_warehouse_that_fulfills_needs(order, warehouses):
def distance(warehouse):
return warehouse.distance(order)
sorted_warehouses = sorted(warehouses, key=distance)
for warehouse in sorted_warehouses:
if fullfills_needs(order, warehouse):
return warehouse
# TODO what happens when no warehouse can fulfill this order?
return None
def fullfills_needs(order, warehouse):
return all(stock >= need
for stock, need in zip(warehouse.stock, order.content))
|
# -*- test-case-name: mimic.test.test_util -*-
#
"""
Helper methods
:var fmt: strftime format for datetimes used in JSON.
"""
from __future__ import absolute_import, division, unicode_literals
import binascii
import os
import string
import calendar
from datetime import datetime, timedelta
import json
from random import choice, randint
import iso8601
from six import text_type
fmt = '%Y-%m-%dT%H:%M:%S.%fZ'
EMPTY_RESPONSE = object()
def json_from_request(request):
"""
Load JSON input from the given Twisted Web Request object.
"""
return json.loads(request.content.read().decode("utf-8"))
def json_dump(o):
"""
Serialize an object to JSON, unless it is :obj:`EMPTY_RESPONSE`, in which
case the empty string will be returned.
"""
if o is EMPTY_RESPONSE:
return b''
else:
return json.dumps(o)
def random_string(length, selectable=None):
"""
Create a random string of the specified length.
:param int length: How long the string must be.
:param str selectable: If left unspecified, the random character selection
will be taken from uppercase and lowercase letters, digits, and a few
punctuation marks. Otherwise, the characters will be taken from the
string provided.
:returns: A string of length `length`.
"""
selectable = (
selectable or (string.ascii_letters + string.digits
+ string.punctuation)
)
return ''.join([choice(selectable) for _ in range(length)])
def random_hipsum(length):
"""
Generates a random sentence using Hipsum ( http://hipsum.co/ ).
:param length The number of words in the desired sentence.
:returns: A Unicode string containing `length` words.
"""
hipsum = ''.join([
"Retro squid Portland raw denim Austin, normcore slow-carb Brooklyn. ",
"Deep v organic VHS drinking vinegar. Fingerstache locavore kogi Tumblr ",
"cred. Vice typewriter retro iPhone pour-over cred XOXO church-key, ",
"post-ironic kogi. Selvage polaroid retro, cold-pressed meh craft beer ",
"artisan pour-over taxidermy sartorial art party. Food truck church-key ",
"four loko wayfarers craft beer dreamcatcher normcore yr, jean shorts ",
"bespoke migas art party crucifix next level. Street art chia bitters, ",
"gastropub mixtape flexitarian Godard occupy lumbersexual."]).split(' ')
offset = randint(1, len(hipsum))
rotated = hipsum[offset:] + hipsum[:offset]
return ' '.join(rotated[:length])
def random_ipv4(*numbers):
"""
Return a random IPv4 address - parts of the IP address can be provided.
For example, ``random_ipv4(192, 168)`` will return a random 192.168.x.x
address.
"""
all_numbers = [text_type(num) for num in
list(numbers) + [randint(0, 255) for _ in range(4)]]
return ".".join(all_numbers[:4])
def random_hex_generator(num):
"""
Returns randomly generated n bytes of encoded hex data for the given `num`
"""
return binascii.hexlify(os.urandom(num)).decode('utf-8')
def random_port():
"""
Returns a random number in the range of registered non-system ports.
"""
return randint(1024, 49151)
def seconds_to_timestamp(seconds, format=fmt):
"""
Return an ISO8601 Zulu timestamp given seconds since the epoch.
"""
return datetime.utcfromtimestamp(seconds).strftime(format)
def timestamp_to_seconds(timestamp):
"""
Return epoch from an ISO8601 Zulu timestamp
:param str timestamp: ISO8601 formatted time
:return: EPOCH seconds
:rtype: float
"""
dt = iso8601.parse_date(timestamp)
return calendar.timegm(dt.utctimetuple()) + dt.microsecond / 1000000.
def not_found_response(resource='servers'):
"""
Return a 404 response body, depending on the resource. Expects
resource to be one of "images", "flavors", "loadbalancer", or "node".
If the resource is unrecognized, defaults to
"The resource culd not be found."
"""
message = {
'images': "Image not found.",
'flavors': "The resource could not be found.",
'loadbalancer': "Load balancer not found",
'node': "Node not found"
}
resp = {
"itemNotFound": {
"message": message.get(resource, "The resource could not be found."),
"code": 404
}
}
if resource == 'loadbalancer' or resource == 'node':
return resp["itemNotFound"]
return resp
def invalid_resource(message, response_code=400):
"""
Returns the given message, and sets the response code to given response
code. Defaults response code to 400, if not provided.
"""
return {"message": message, "code": response_code}
def set_resource_status(updated_time, time_delta, status='ACTIVE',
current_timestamp=None):
"""
Given the updated_time and time delta, if the updated_time + time_delta is
greater than the current time in UTC, returns the given status; otherwise
return None.
:param str updated_time: The time that the server was last updated by a
client.
:param int time_delta: The delta, in seconds, from ``updated_time``.
:param str status: The status to return if the time_delta has expired (i.e.
the wall clock has advanced more than ``time_delta`` past
``updated_time``).
:param float current_timestamp: The current time, in seconds from the POSIX
epoch.
:return: ``status`` or ``None``.
"""
current_datetime = datetime.utcfromtimestamp(current_timestamp)
last_updated_datetime = datetime.strptime(updated_time, fmt)
expiration_interval = timedelta(seconds=int(time_delta))
expiration_datetime = last_updated_datetime + expiration_interval
if current_datetime >= expiration_datetime:
return status
class Matcher(object):
"""
Class for implementing custom matching.
"""
def __init__(self, match_fn):
"""
Set a matcher function on self so that objects can be tested against it.
"""
self._match_fn = match_fn
def __eq__(self, other):
"""
Implements the == comparison based on the custom matcher.
"""
return self._match_fn(other)
def one_of_validator(*items):
"""
Return an :mod:`attr` validator which raises a :class:`TypeError`
if the value is not equivalent to one of the provided items.
:param items: Items to compare against
:return: a callable that returns with None or raises :class:`TypeError`
"""
def validate(inst, attribute, value):
if value not in items:
raise TypeError("{0} must be one of {1}".format(
attribute.name, items))
return validate
|
import sys
import re
from bot_feature import *
if __name__ == '__main__':
print(miaow(sys.argv[1]))
|
from rubicon_ml.viz.dashboard import Dashboard
from rubicon_ml.viz.dataframe_plot import DataframePlot
from rubicon_ml.viz.experiments_table import ExperimentsTable
from rubicon_ml.viz.metric_correlation_plot import MetricCorrelationPlot
from rubicon_ml.viz.metric_lists_comparison import MetricListsComparison
__all__ = [
"Dashboard",
"DataframePlot",
"ExperimentsTable",
"MetricListsComparison",
"MetricCorrelationPlot",
]
|
#Aim :- To understanding the gradients and various operators to detect edges
#Reference - Udacity introduction to computer vision lesson 2A-L5
try:
import cv2
import numpy as np
from matplotlib import pyplot as plt
except :
print ("please install the dependencies \n using command pip3 install requirements.txt")
image=cv2.imread("images/obama.jpg") #reading the image into opencv
image_bw=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
def convolution(image,kernal):
padding=int(kernal.shape[0]-1)
pd=int(padding/2)
##Padding the image so that we can convolve the corners and edges
image_padded = np.zeros((image.shape[0] + padding, image.shape[1] + padding),dtype="float32")
image_padded[pd:-pd, pd:-pd] = image
image_out=np.zeros_like(image) #to store the convolution result
for x in range(image_bw.shape[1]):
for y in range(image_bw.shape[0]):
roi=image_padded[x:x+padding+1,y:y+padding+1]
image_out[x,y]=int(np.sum((roi*kernal)))
return image_out
kernal_x=(1/8)*np.array([[-1,0,1],[-2,0,2],[-1,0,1]],dtype="float32")
kernal_y=(1/8)*np.array([[1,2,1],[0,0,0],[-1,-2,-1]],dtype="float32")
sobel_x=convolution(image_bw,kernal_x)
sobel_y=convolution(image_bw,kernal_y)
temp=np.sqrt(np.square(sobel_x)+np.square(sobel_y))
temp=np.array(temp,dtype="float32")
cv2.imshow("sobel x ",sobel_x)
cv2.imshow("sobel y ",sobel_y)
cv2.imshow("Magnitude",temp)
cv2.imshow("original Image ",image_bw)
cv2.waitKey(0) #to keep the ouput window to open
cv2.destroyAllWindows() #pressing anykey to close the ouput windows |
#Lists
'''
1. Stored collection of different data types
2. We can modify
3. Mutable
4. Addressing(Indexing) in order manner
5. Random access possible
6. Duplicate possible
---------------------
MyList=[45,67.89, 4+9j,"Data",True,None]
print(MyList)
print("\nList access using possitive index")
print(MyList[0])
print(MyList[1])
print(MyList[2])
print(MyList[3])
print(MyList[4])
print(MyList[5])
output:
[45, 67.89, (4+9j), 'Data', True, None]
List access using possitive index
45
67.89
(4+9j)
Data
True
None
----------------------
MyList=[45,67.89, 4+9j,"Data",True,None]
print(MyList)
print("\nList access using possitive index")
print(MyList[0])
print(MyList[1])
print(MyList[2])
print(MyList[3])
print(MyList[4])
print(MyList[5])
print(MyList[7])
output:
[45, 67.89, (4+9j), 'Data', True, None]
List access using possitive index
45
67.89
(4+9j)
Data
True
None
Traceback (most recent call last):
File "C:/Users/HP1/AppData/Local/Programs/Python/Python37/Lists.py", line 45, in <module>
print(MyList[7])
IndexError: list index out of range
------------------
#negative indexing
MyList=[45,67.89, 4+9j,"Data",True,None]
print(MyList)
print("\nList access using possitive index")
print(MyList[-6])
print(MyList[-5])
print(MyList[-4])
print(MyList[-3])
print(MyList[-2])
print(MyList[-1])
print(MyList[-7])
output:
List access using possitive index
45
67.89
(4+9j)
Data
True
None
Traceback (most recent call last):
File "C:/Users/HP1/AppData/Local/Programs/Python/Python37/Lists.py", line 76, in <module>
print(MyList[-7])
IndexError: list index out of range
-----------------
#MyList Slicing
MyList=[45,67.89, 4+9j,"Data",True,None]
print(MyList[0:6])
print(MyList[0:6:1])
print(MyList[:6:1])
print(MyList[0::1])
print(MyList[0::])
output:
[45, 67.89, (4+9j), 'Data', True, None]
[45, 67.89, (4+9j), 'Data', True, None]
[45, 67.89, (4+9j), 'Data', True, None]
[45, 67.89, (4+9j), 'Data', True, None]
[45, 67.89, (4+9j), 'Data', True, None]
--------------
MyList=[45,67.89, 4+9j,"Data",True,None]
print(MyList[0:6:1])
print(MyList[0:6:2])
print(MyList[0:6:3])
print(MyList[0:6:4])
print(MyList[0:6:5])
print(MyList[0:6:6])
output:
[45, 67.89, (4+9j), 'Data', True, None]
[45, (4+9j), True]
[45, 'Data']
[45, True]
[45, None]
[45]
----------------------
#List reverse printing
MyList=[45,67.89, 4+9j,"Data",True,None]
print(MyList[5::-1])
print(MyList[-6::1])
print(MyList[-1:-7:-1])
output:
[None, True, 'Data', (4+9j), 67.89, 45]
[45, 67.89, (4+9j), 'Data', True, None]
[None, True, 'Data', (4+9j), 67.89, 45]
--------------------
#List Length
MyList=[45,67.89, 4+9j,"Data",True,None]
print(len(MyList))
output:
6
------------------------
#Delete List
#List Length
MyList=[45,67.89, 4+9j,"Data",True,None]
del(MyList)
print(MyList)
output:
Traceback (most recent call last):
File "C:/Users/HP1/AppData/Local/Programs/Python/Python37/Lists.py", line 160, in <module>
print(MyList)
NameError: name 'MyList' is not defined
-------------------
#List Modification
MyList=[45,67.89, 4+9j,"Data",True,None]
MyList=MyList*2
print(MyList)
output:
[45, 67.89, (4+9j), 'Data', True, None, 45, 67.89, (4+9j), 'Data', True, None]
-------------------
#Using built-in functions
#list clear
MyList=[45,67.89, 4+9j,"Data",True,None]
print(MyList)
MyList.clear()
print(MyList)
output
[45, 67.89, (4+9j), 'Data', True, None]
[]
-------------------
#Append
MyList=[45,67.89, 4+9j,"Data",True,None]
print(MyList)
MyList.append("orange")
print(MyList)
output:
[45, 67.89, (4+9j), 'Data', True, None]
[45, 67.89, (4+9j), 'Data', True, None, 'orange']
-------------------
#Pop
MyList=[45,67.89, 4+9j,"Data",True,None]
print(MyList)
MyList.pop()
print(MyList)
MyList.pop(2)
print(MyList)
output:
[45, 67.89, (4+9j), 'Data', True, None]
[45, 67.89, (4+9j), 'Data', True]
[45, 67.89, 'Data', True]
-------------------
#Count
MyList=[45,67.89, 4+9j,"Data",True,None,"Data"]
print(MyList.count("Data"))
output:
2
-------------------
#Index
MyList=[45,67.89, 4+9j,"Data",True,None,"Data"]
print(MyList.index("Data"))
output:
3
#show only the first occurance
-------------------
#Insert
MyList=[45,67.89, 4+9j,"Data",True,None,"Data"]
print(MyList)
MyList.insert(1,"jeeva")
print(MyList)
output:
[45, 67.89, (4+9j), 'Data', True, None, 'Data']
[45, 'jeeva', 67.89, (4+9j), 'Data', True, None, 'Data']
-------------------
#Remove
MyList=[45,67.89, 4+9j,"Data",True,None,"Data"]
MyList.remove("Data")
print(MyList)
output:
[45, 67.89, (4+9j), True, None, 'Data']
#show only the first occurance
-------------------
#reverse
MyList=[45,67.89, 4+9j,"Data",True,None,"Data"]
MyList.reverse()
print(MyList)
output:
['Data', None, True, 'Data', (4+9j), 67.89, 45]
-------------------
#Copy
MyList=[45,67.89, 4+9j,"Data",True,None,"Data"]
print(MyList)
print(id(MyList))
y=MyList
x=MyList.copy()
print(x)
print(id(x))
print(y)
print(id(y))
output:
[45, 67.89, (4+9j), 'Data', True, None, 'Data']
68258333384
[45, 67.89, (4+9j), 'Data', True, None, 'Data']
68258333256
[45, 67.89, (4+9j), 'Data', True, None, 'Data']
68258333384
-------------------
#Sort
#data type should be same in the list
MyList=[45,67.89, 4, 788]
MyList.sort()
print(MyList)
MyList.sort(reverse=True)
print(MyList)
output:
[4, 45, 67.89, 788]
[788, 67.89, 45, 4]
-------------------
'''
|
import re
import logging
import atomacos
import pyautogui
import atomacos.errors
from utils.FileUtils import FileUtils
from AutoTradingService.AutoRefresh import AutoRefresh
class AutoOrder(FileUtils):
def __init__(self):
super().__init__()
logging.info('@ Start Auto Order Service ... ')
self.config = self.load_yaml('od_config.yaml')['order']
self.refresh_service = AutoRefresh()
self.refresh_service.start()
def activate(self):
atomacos.launchAppByBundleId(self.config['appId'])
app = atomacos.getAppRefByBundleId(self.config['appId'])
return app
def clickCheckBox(self, mainWindow, titleName=''):
axTitle = self.config['defaultCheckBoxTitle'] if titleName == '' else titleName
checkBox = mainWindow.findFirst(AXRole='AXCheckBox', AXTitle=axTitle)
checkBox.Press()
@staticmethod
def clickComboBox(ComboBox, itemIndex=0):
CodeButton = ComboBox.AXChildren[0]
CodeButton.Press()
CodeText = ComboBox.AXChildren[1].AXChildren[0].AXChildren[itemIndex]
CodeText.clickMouseButtonLeft(CodeText.AXPosition)
def setMarketCode(self, allComboBox, marketCode='SZ'):
marketCodeComboBox = allComboBox[0]
if marketCode in self.config['marketCode']:
marketCoedIndex = int(self.config['marketCode'][marketCode])
AutoOrder.clickComboBox(marketCodeComboBox, marketCoedIndex)
else:
logging.error("Market Code is invalid: " + marketCode)
raise ValueError
@staticmethod
def checkSocketCodeValid(socketCode):
if re.match('[0-9]+$', socketCode) is None:
logging.error('SocketCode can contain number only: ', socketCode)
raise ValueError
elif len(socketCode) != 6:
logging.error('SocketCode should be length of 6: ', socketCode)
raise ValueError
return True
@staticmethod
def setStockCode(allTextField, socketCode=""):
AutoOrder.checkSocketCodeValid(socketCode)
socketCodeText = allTextField[0]
socketCodeText.AXSelectedText = socketCode
socketCodeText.Confirm()
@staticmethod
def checkPriceWayValid(priceWay):
if priceWay not in (1, 2, 3, 4, 5):
logging.error('Price Way should be 1 to 5: ', str(priceWay))
raise ValueError
else:
return True
@staticmethod
def setPriceWay(allComboBox, priceWay):
AutoOrder.checkPriceWayValid(priceWay)
AutoOrder.clickComboBox(allComboBox[1], priceWay - 1)
@staticmethod
def setPrice(allTextField, price):
if price <= 0:
print('@ Trading on original market price')
else:
allTextField[2].AXValue = str(price)
@staticmethod
def setQuantity(allTextField, allQuantityButton, quantity):
if quantity <= 0:
allQuantityButton.clickMouseButtonLeft(allQuantityButton.AXPosition)
else:
allTextField[1].AXValue = str(quantity)
def confirm(self, app, mainWindow, directionTab, isAction):
try:
actionButton = mainWindow.findFirst(AXRole='AXButton', AXTitle=directionTab + self.config['actionText'])
actionButton.clickMouseButtonLeft(actionButton.AXPosition)
except atomacos.errors.AXErrorCannotComplete:
logging.info('Action not complete error, cannot find a way to handle this fail')
dialog = app.windows()[0]
confirmInfo = dialog.staticTexts()[1]
logging.info('@' + '|'.join(confirmInfo.AXValue.split('\r')[2:7]))
actionConfirmName = self.config['confirmAction'][isAction]
ActionConfirmButton = dialog.buttons(actionConfirmName)[0]
ActionConfirmButton.Press()
def order(self, directionTab, marketCode, socketCode, priceWay=1, price=-1, quantity=-1, isAction=False):
app = self.activate()
mainWindow = app.windows()[0]
self.clickCheckBox(mainWindow, directionTab)
allComboBox = mainWindow.findAll(AXRole='AXComboBox')
allTextField = mainWindow.textFields()
allQuantityButton = mainWindow.findFirst(AXRole='AXButton', AXTitle=self.config['allQuantityTitle'])
self.setMarketCode(allComboBox, marketCode)
AutoOrder.setStockCode(allTextField, socketCode)
if priceWay != 1:
AutoOrder.setPriceWay(allComboBox, priceWay)
else:
AutoOrder.setPrice(allTextField, price)
AutoOrder.setQuantity(allTextField, allQuantityButton, quantity)
self.confirm(app, mainWindow, directionTab, isAction)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
ao = AutoOrder()
ao.order('卖出', 'SZ', '002024', price=10, quantity=1, isAction=False)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
delete_language_element_permanent_query = "DELETE FROM public.language AS lng WHERE lng.id = $1 RETURNING *;"
delete_language_element_query = """
UPDATE public.language AS lng SET deleted = TRUE,
active = FALSE WHERE lng.id = $1 RETURNING *;
"""
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline to load bigquery datasets data into Inventory."""
from google.cloud.security.common.util import log_util
from google.cloud.security.common.util import parser
from google.cloud.security.inventory.pipelines import base_pipeline
LOGGER = log_util.get_logger(__name__)
class LoadBigqueryDatasetsPipeline(base_pipeline.BasePipeline):
"""Pipeline to load bigquery datasets data into Inventory."""
RESOURCE_NAME = 'bigquery_datasets'
MYSQL_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
def _retrieve_bigquery_projectids(self):
"""Retrieve a list of bigquery projectids.
Returns:
list: Project ids.
"""
return self.safe_api_call('get_bigquery_projectids')
def _retrieve_dataset_access(self, project_id, dataset_id):
"""Retrieve the bigquery dataset resources from GCP.
Args:
project_id (str): A project id.
dataset_id (str): A dataset id.
Returns:
list: Access lists for a given project_id and dataset_id.
[{'role': 'WRITER', 'specialGroup': 'projectWriters'},
{'role': 'OWNER', 'specialGroup': 'projectOwners'},
{'role': 'OWNER', 'userByEmail': 'user@domain.com'},
{'role': 'READER', 'specialGroup': 'projectReaders'}]
"""
return self.safe_api_call('get_dataset_access', project_id, dataset_id)
def _retrieve_dataset_project_map(self, project_ids):
"""Retrieve the bigquery datasets for all requested project ids.
Args:
project_ids (list): Project ids.
Returns:
list: A list of objects like:
[[{'datasetId': 'test', 'projectId': 'bq-test'},
{'datasetId': 'test', 'projectId': 'bq-test'}],
[{'datasetId': 'test', 'projectId': 'bq-test'},
{'datasetId': 'test', 'projectId': 'bq-test'}]]
"""
dataset_project_map = []
for project_id in project_ids:
result = self.safe_api_call('get_datasets_for_projectid',
project_id)
if result:
dataset_project_map.append(result)
return dataset_project_map
def _retrieve_dataset_access_map(self, dataset_project_map):
"""Iteriate through projects and their datasets to get ACLs.
Args:
dataset_project_map (list): A list of projects and their datasets.
See the output of _retrieve_dataset_project_map().
Returns:
list: A list of tuples in the form of:
[(project_id,
dataset_id,
{dataset_access_object}),
...]
"""
dataset_project_access_map = []
for map_item in dataset_project_map:
for item in map_item:
project_id = item.get('projectId')
dataset_id = item.get('datasetId')
dataset_acl = self._retrieve_dataset_access(project_id,
dataset_id)
if dataset_acl:
dataset_project_access_map.append(
(project_id, dataset_id, dataset_acl)
)
return dataset_project_access_map
def _transform(self, resource_from_api):
"""Yield an iterator of loadable groups.
Args:
resource_from_api (list): A list of tuples in the form of:
[(project_id, dataset_id, {dataset_access_object}),...]
Yields:
iterable: An iterable of project_id, dataset_id, and access detail.
"""
for (project_id, dataset_id, access) in resource_from_api:
for acl in access:
yield {
'project_id': project_id,
'dataset_id': dataset_id,
'access_domain': acl.get('domain'),
'access_user_by_email': acl.get('userByEmail'),
'access_special_group': acl.get('specialGroup'),
'access_group_by_email': acl.get('groupByEmail'),
'role': acl.get('role'),
'access_view_project_id': acl.get(
'view', {}).get('projectId'),
'access_view_table_id': acl.get(
'view', {}).get('tableId'),
'access_view_dataset_id': acl.get(
'view', {}).get('datasetId'),
'raw_access_map': parser.json_stringify(acl)
}
def _retrieve(self):
"""Retrieve dataset access lists.
Returns:
bigquery_dataset: A bigquery dataset access map.
See _retrieve_dataset_access_map() or
None if there are no bigquery projects.
"""
project_ids = self._retrieve_bigquery_projectids()
if not project_ids:
LOGGER.info('No bigquery project ids found.')
return None
dataset_project_map = self._retrieve_dataset_project_map(project_ids)
return self._retrieve_dataset_access_map(dataset_project_map)
def run(self):
"""Runs the actual data fetching pipeline."""
dataset_project_access_map = self._retrieve()
if dataset_project_access_map is not None:
loadable_datasets = self._transform(dataset_project_access_map)
self._load(self.RESOURCE_NAME, loadable_datasets)
self._get_loaded_count()
|
import sys
import pytest
import shutil
try:
import pycuda.driver as drv
drv.init()
cuda_present = True
except Exception:
cuda_present = False
try:
import pyopencl
opencl_present = True
if 'namespace' in str(sys.modules['pyopencl']):
opencl_present = False
if len(pyopencl.get_platforms()) == 0:
opencl_present = False
except Exception:
opencl_present = False
gfortran_present = shutil.which("gfortran") is not None
skip_if_no_cuda = pytest.mark.skipif(not cuda_present, reason="PyCuda not installed or no CUDA device detected")
skip_if_no_opencl = pytest.mark.skipif(not opencl_present, reason="PyOpenCL not installed or no OpenCL device detected")
skip_if_no_gfortran = pytest.mark.skipif(not gfortran_present, reason="No gfortran on PATH")
|
import json
from datetime import datetime, timedelta
from django.db import transaction,DatabaseError
from quicklook.models import UserQuickLook
from leaderboard.models import Score
def _get_lst(lst,i,default = None):
""" get method for list similar to dictionary's get method """
try:
return lst[i];
except IndexError:
return default
except TypeError:
return default
def str_to_datetime(str_date):
y,m,d = map(int,str_date.split('-'))
return datetime(y,m,d,0,0,0)
def _hours_to_hours_min(hours):
if hours or hours is not None:
mins = hours * 60
hours,mins = divmod(mins,60)
hours = round(hours)
mins = round(mins)
if mins < 10:
mins = "{:02d}".format(mins)
return "{}:{}".format(hours,mins)
return None
def _str_to_hours_min_sec(str_duration,time_format='hour',time_pattern="hh:mm:ss"):
'''
Expect duration in this format - "hh:mm:ss"
convert in into hours, min or sec
Arguments
- str_duration : type String, time in format 'hh:mm:ss'
- time_format: type String, possible values are [hour, minute, seconds]
specified in what format time to be converted
- time_pattern: type String, possible values are subtring of "hh:mm:ss"
specify the position of hour, minute and second in the str_duration
'''
if str_duration:
hms = str_duration.split(":")
pattern_lst = time_pattern.split(":")
pattern_indexed = {
"hour":pattern_lst.index("hh") if "hh" in pattern_lst else None,
"minute":pattern_lst.index("mm") if "mm" in pattern_lst else None,
"second":pattern_lst.index("ss") if "ss" in pattern_lst else None
}
h = int(_get_lst(hms,pattern_indexed["hour"],0))\
if _get_lst(hms,pattern_indexed["hour"],0) else 0
m = int(_get_lst(hms,pattern_indexed["minute"],0))\
if _get_lst(hms,pattern_indexed["minute"],0) else 0
s = int(_get_lst(hms,pattern_indexed["second"],0))\
if _get_lst(hms,pattern_indexed["second"],0) else 0
t = 0
if time_format == 'hour':
t = h + (m/60) + (s/3600)
elif time_format == 'minute':
t = (h*60) + m + (s/60)
else:
t = (h * 3600) + (m * 60) + s
return round(t,3)
return 0
def _update_helper(instance,data_dict):
'''
Helper function to update the instance
with provided key,value pair
'''
attr_original_val = {}
for attr, value in data_dict.items():
attr_original_val[attr] = getattr(instance,attr)
setattr(instance,attr,value)
try:
with transaction.atomic():
instance.save()
except DatabaseError:
setattr(instance,attr,attr_original_val[attr])
def _safe_get_mobj(obj,attr, default):
'''
Takes a model object and return the value
of attribute from the object. If value is None
then return the default provided
type obj: model objct
type attr: string
type default: any type
'''
if obj and attr:
val = obj.__dict__.get(attr,None)
return val if val else default
return None
def _get_model_related_fields_names(model):
''' Returns the list of all the related fields in the model'''
related_fields_names = [f.name for f in model._meta.get_fields()
if (f.one_to_many or f.one_to_one)
and f.auto_created and not f.concrete]
return related_fields_names
def _get_queryset(model,user,from_dt, to_dt, cache=False):
''' Returns the queryset and cache all the related fields '''
if cache:
related_fields = _get_model_related_fields_names(model)
qs = model.objects.select_related(*related_fields).filter(
user = user,created_at__range = (from_dt.date(),to_dt.date()))
return qs
else:
return model.objects.filter(
user = user,created_at__range = (from_dt.date(),to_dt.date()))
def _update_score_instance(instance, old_score, current_score):
if instance and old_score and current_score and old_score != current_score:
_update_helper(instance,{"score":current_score})
def _update_scores(ql_data,score_data):
''' update the scores'''
cur_score_oh_gpa = _safe_get_mobj(
ql_data.__dict__.get('_grades_ql_cache'),'overall_health_gpa',None)
old_score_oh_gpa = _safe_get_mobj(score_data.get('oh_gpa',None),'score',None)
_update_score_instance(score_data.get('oh_gpa',None),old_score_oh_gpa, cur_score_oh_gpa)
cur_score_mne_gpa = _safe_get_mobj(
ql_data.__dict__.get('_grades_ql_cache'),'movement_non_exercise_steps_gpa',None)
old_score_mne_gpa = _safe_get_mobj(score_data.get('mne_gpa',None),'score',None)
_update_score_instance(score_data.get('mne_gpa',None),old_score_mne_gpa,cur_score_mne_gpa)
mc = _safe_get_mobj(
ql_data.__dict__.get('_steps_ql_cache'),'movement_consistency',None)
if mc:
mc = json.loads(mc)['inactive_hours']
cur_score_mc = mc
old_score_mc = _safe_get_mobj(score_data.get('mc', None),'score',None)
_update_score_instance(score_data.get('mc', None),old_score_mc,cur_score_mc)
cur_score_avg_sleep = _safe_get_mobj(
ql_data.__dict__.get('_grades_ql_cache'),'avg_sleep_per_night_gpa',None)
old_score_avg_sleep = _safe_get_mobj(score_data.get('avg_sleep',None),'score',None)
_update_score_instance(score_data.get('avg_sleep',None),
old_score_avg_sleep,cur_score_avg_sleep)
cur_score_ec = _safe_get_mobj(
ql_data.__dict__.get('_grades_ql_cache'),'exercise_consistency_score',None)
old_score_ec = _safe_get_mobj(score_data.get('ec',None),'score',None)
_update_score_instance(score_data.get('ec',None),old_score_ec, cur_score_ec)
cur_score_prcnt_uf = _safe_get_mobj(
ql_data.__dict__.get('_grades_ql_cache'),'prcnt_unprocessed_food_consumed_gpa',None)
old_score_prcnt_uf = _safe_get_mobj(score_data.get('prcnt_uf',None),'score',None)
_update_score_instance(score_data.get('prcnt_uf',None),
old_score_prcnt_uf,cur_score_prcnt_uf)
cur_score_alcohol_drink = _safe_get_mobj(
ql_data.__dict__.get('_alcohol_ql_cache'),'alcohol_week',None)
old_score_alcohol_drink = _safe_get_mobj(score_data.get('alcohol_drink'),'score',None)
_update_score_instance(score_data.get('alcohol_drink'),
old_score_alcohol_drink,cur_score_alcohol_drink)
cur_score_total_steps = _safe_get_mobj(
ql_data.__dict__.get('_steps_ql_cache'),'total_steps',None)
old_score_total_steps = _safe_get_mobj(score_data.get('total_steps',None),'score',None)
_update_score_instance(score_data.get('total_steps',None),
old_score_total_steps,cur_score_total_steps)
cur_score_floor_climbed = _safe_get_mobj(
ql_data.__dict__.get('_steps_ql_cache'),'floor_climed',None)
old_score_floor_climbed = _safe_get_mobj(score_data.get('floor_climbed', None),'score',None)
_update_score_instance(score_data.get('floor_climbed', None),
old_score_floor_climbed,cur_score_floor_climbed)
cur_score_resting_hr = _safe_get_mobj(
ql_data.__dict__.get('_exercise_reporting_ql_cache'),'resting_hr_last_night',None)
old_score_resting_hr = _safe_get_mobj(score_data.get('resting_hr',None),'score',None)
_update_score_instance(score_data.get('resting_hr',None),
old_score_resting_hr,cur_score_resting_hr)
cur_score_deep_sleep = _safe_get_mobj(
ql_data.__dict__.get('_sleep_ql_cache'),'deep_sleep',None)
if cur_score_deep_sleep:
cur_score_deep_sleep = _str_to_hours_min_sec(cur_score_deep_sleep,"hour","hh:mm")
old_score_deep_sleep = _safe_get_mobj(score_data.get('deep_sleep',None),'score',None)
_update_score_instance(score_data.get('deep_sleep',None),
old_score_deep_sleep,cur_score_deep_sleep)
cur_score_awake_time = _safe_get_mobj(
ql_data.__dict__.get('_sleep_ql_cache'),'awake_time',None)
if cur_score_awake_time:
cur_score_awake_time = _str_to_hours_min_sec(cur_score_awake_time,"hour","hh:mm")
old_score_awake_time = _safe_get_mobj(score_data.get('awake_time',None),'score',None)
_update_score_instance(score_data.get('awake_time',None),
old_score_awake_time,cur_score_awake_time)
def _create_score_instance(user,current_date,category,score):
return Score(user = user, created_at = current_date,
category = category, score = score)
def _create_scores(user,current_date,ql_data):
''' Create the score instances'''
score_instances = []
cur_score_oh_gpa = _safe_get_mobj(
ql_data.__dict__.get('_grades_ql_cache'),'overall_health_gpa',None)
if cur_score_oh_gpa:
score_instances.append(_create_score_instance(user,current_date,
"oh_gpa",cur_score_oh_gpa))
cur_score_mne_gpa = _safe_get_mobj(
ql_data.__dict__.get('_grades_ql_cache'),'movement_non_exercise_steps_gpa',None)
if cur_score_mne_gpa:
score_instances.append(_create_score_instance(user,current_date,
"mne_gpa",cur_score_mne_gpa))
mc = _safe_get_mobj(
ql_data.__dict__.get('_steps_ql_cache'),'movement_consistency',None)
if mc:
mc = json.loads(mc)['inactive_hours']
cur_score_mc = mc
if cur_score_mc:
score_instances.append(_create_score_instance(user,current_date,"mc",cur_score_mc))
cur_score_avg_sleep = _safe_get_mobj(
ql_data.__dict__.get('_grades_ql_cache'),'avg_sleep_per_night_gpa',None)
if cur_score_avg_sleep:
score_instances.append(_create_score_instance(user,current_date,
"avg_sleep",cur_score_avg_sleep))
cur_score_ec = _safe_get_mobj(
ql_data.__dict__.get('_grades_ql_cache'),'exercise_consistency_score',None)
if cur_score_ec:
score_instances.append(_create_score_instance(user,current_date,"ec",cur_score_ec))
cur_score_prcnt_uf = _safe_get_mobj(
ql_data.__dict__.get('_grades_ql_cache'),'prcnt_unprocessed_food_consumed_gpa',None)
if cur_score_prcnt_uf:
score_instances.append(_create_score_instance(user,current_date,
"prcnt_uf",cur_score_prcnt_uf))
cur_score_alcohol_drink = _safe_get_mobj(
ql_data.__dict__.get('_alcohol_ql_cache'),'alcohol_week',None)
if cur_score_alcohol_drink:
score_instances.append(_create_score_instance(user,current_date,
"alcohol_drink",cur_score_alcohol_drink))
cur_score_total_steps = _safe_get_mobj(
ql_data.__dict__.get('_steps_ql_cache'),'total_steps',None)
if cur_score_total_steps:
score_instances.append(_create_score_instance(user,current_date,
"total_steps",cur_score_total_steps))
cur_score_floor_climbed = _safe_get_mobj(
ql_data.__dict__.get('_steps_ql_cache'),'floor_climed',None)
if cur_score_floor_climbed:
score_instances.append(_create_score_instance(user,current_date,
"floor_climbed",cur_score_floor_climbed))
cur_score_resting_hr = _safe_get_mobj(
ql_data.__dict__.get('_exercise_reporting_ql_cache'),'resting_hr_last_night',None)
if cur_score_resting_hr:
score_instances.append(_create_score_instance(user,current_date,
"resting_hr",cur_score_resting_hr))
cur_score_deep_sleep = _safe_get_mobj(
ql_data.__dict__.get('_sleep_ql_cache'),'deep_sleep',None)
if cur_score_deep_sleep:
cur_score_deep_sleep = _str_to_hours_min_sec(cur_score_deep_sleep,"hour","hh:mm")
score_instances.append(_create_score_instance(user,current_date,
"deep_sleep",cur_score_deep_sleep))
cur_score_awake_time = _safe_get_mobj(
ql_data.__dict__.get('_sleep_ql_cache'),'awake_time',None)
if cur_score_awake_time:
cur_score_awake_time = _str_to_hours_min_sec(cur_score_awake_time,"hour","hh:mm")
score_instances.append(_create_score_instance(user,current_date,
"awake_time",cur_score_awake_time))
# create score instance in bulk
Score.objects.bulk_create(score_instances)
def create_update_score(user, from_date, to_date):
'''
create or update score for given date range
Arguments
- user: type Obj, a "User" instance representing currently logged in user
- from_date: type String, expect date string in format YYYY-MM-DD
- to_date: type String, expect date string in format YYYY-MM-DD
'''
# date range for which quicklook is calculated
from_dt = str_to_datetime(from_date)
to_dt = str_to_datetime(to_date)
current_date = from_dt
quicklook_datewise_data = {q.created_at.strftime('%Y-%m-%d'):q for q
in _get_queryset(UserQuickLook,user,from_dt,to_dt,cache=True)}
score_data = {q for q in _get_queryset(Score, user, from_dt, to_dt)}
score_datewise_data = {}
for q in score_data:
created_at = q.created_at.strftime("%Y-%m-%d")
if score_datewise_data.get(created_at,None):
score_datewise_data.get(created_at)[q.category] = q
else:
score_datewise_data[created_at]={}
score_datewise_data[created_at][q.category] = q
while current_date <= to_dt:
ql_data = quicklook_datewise_data.get(current_date.strftime("%Y-%m-%d"),None)
score_data = score_datewise_data.get(current_date.strftime("%Y-%m-%d"),None)
if ql_data and score_data:
# update score
_update_scores(ql_data,score_data)
elif ql_data:
# create score
_create_scores(user,current_date,ql_data)
current_date += timedelta(days=1) |
class Pokemon(object):
def __init__(self, data):
self.id = data['id']
self.cp = data['cp']
self.pokemon_id = data['pokemon_id']
self.data = data
def release(self, api):
return api.release_pokemon(pokemon_id=self.id)
|
# -*- coding: utf-8 -*-
from math import sqrt, acos, pi
import math
from decimal import Decimal, getcontext
import numpy
from decimal import Decimal
class Vector(object):
"""
This class gives simple functionalities related to a Vector
params:
coordinates: tuple of vector values
dimension: Stores the number of dimensions of the vector,
equal to the length of coordinates
Example initialization:
v = Vector([1, 2, 3])
v.coordinates => (1, 2, 3)
v.dimension => 3
"""
EMPTY_COORDINATES_MSG = "The Coordinates must be nonempty"
NON_ITERABLE_COORDINATES_MSG = "The Coordinates must be an iterable"
ZERO_VECTOR_ERROR_MSG = "Cannot normalize zero vector"
NO_UNIQUE_PARALLEL_COMPONENT_MSG = "No unique parallel component to zero vector"
NO_UNIQUE_ORTHOGONAL_COMPONENT_MSG = "No unique othogonal component to zero vector"
ANGLE_WITH_ZERO_VECTOR_MG = "Cannot compute an angle with a zero vector."
DIMENSION_MORE_THAN_THREE_MSG = "Cross Product can be done on vectors of dimension 3 or less."
def __init__(self, coordinates):
try:
if not coordinates:
raise ValueError(Vector.EMPTY_COORDINATES_MSG)
self.coordinates = tuple(x for x in coordinates)
self.dimension = len(coordinates)
except TypeError:
raise TypeError(Vector.NON_ITERABLE_COORDINATES_MSG)
def __str__(self):
"""
returns vector as a string:
Vector (v1, v2, v3,...)
"""
return "Vector {}".format(self.coordinates)
def __eq__(self, v):
"""
compares 2 vectors for equality;
"""
return self.coordinates == v.coordinates
def __add__(self, v):
"""
Add two vectors element wise. Addition using + operator
"""
if self.dimension != v.dimension:
raise TypeError('Cannot add Vector of dimensions {} and {}'.format(self.dimension, v.dimension))
return Vector([x + y for x, y in zip(self.coordinates, v.coordinates)])
def __sub__(self, v):
"""
Subtract two vectors element wise. Subtraction using - operator
"""
if self.dimension != v.dimension:
raise TypeError('Cannot subtract Vector of dimensions {} and {}'.format(self.dimension, v.dimension))
return Vector([x - y for x, y in zip(self.coordinates, v.coordinates)])
def __mul__(self, n):
"""
Multiply two vectors element wise. Multiplication using vector1 * scalar
or vector1 * vector2 operator.
"""
if isinstance(n, Vector):
return self.dot(n)
else:
return Vector([x * n for x in self.coordinates])
def __rmul__(self, n):
"""
Multiply two vectors element wise. Multiplication using scalar * vector1
or vector2 * vector1 operator.
"""
if isinstance(n, Vector):
return self.dot(n)
else:
return self.__mul__(n)
def magnitude(self):
"""
returns magnitude of a vector
magnitude = sqrt(v1 * v1 + v2 * v2 + v3 * v3 +...)
"""
return math.sqrt(sum([x * x for x in self.coordinates]))
def normalize(self):
"""
Find unit vector in direction of the current vector
"""
magnitude = self.magnitude()
if magnitude == 0:
raise ZeroDivisionError(Vector.ZERO_VECTOR_ERROR_MSG)
return Vector([x / magnitude for x in self.coordinates])
def dot(self, v):
"""
Find dot product of the two vectors.
"""
return sum([x * y for x, y in zip(self.coordinates, v.coordinates)])
def angle_with(self, v, in_degrees=False):
"""
Find angle between 2 vectors in randians as default.
if in_degrees = True, then angle returned is in degrees.
"""
try:
normal_product = round((self.normalize()).dot(v.normalize()), 5)
angle_in_rad = math.acos(normal_product)
if in_degrees:
return angle_in_rad * 180 / math.pi
return angle_in_rad
except Exception as e:
if str(e) == Vector.ZERO_VECTOR_ERROR_MSG:
raise Exception(Vector.ANGLE_WITH_ZERO_VECTOR_MG)
else:
raise e
def project(self, b):
"""
Project current vector on the basis vector b.
Projection = magnitude of current vector * unit vector in direction of b.
"""
try:
return self.dot(b.normalize()) * b.normalize()
except Exception as e:
if str(e) == Vector.ZERO_VECTOR_ERROR_MSG:
raise Exception(Vector.NO_UNIQUE_PARALLEL_COMPONENT_MSG)
else:
raise e
def project_orth(self, b):
"""
Project current vector on a vector perpendicular to the basis vector b.
current vector = projection on b + projection on orthogonal of b
=> projection on orthogonal of b = current vector - projection on b.
"""
try:
parallel_project = self.project(b)
return self - parallel_project
except Exception as e:
if str(e) == Vector.NO_UNIQUE_PARALLEL_COMPONENT_MSG:
raise Exception(Vector.NO_UNIQUE_ORTHOGONAL_COMPONENT_MSG)
else:
raise e
def __make_3d__(self):
"""
returns 3D vector (first 3 dimensions) out of current vector
if dimensions of current vector = 1
Vector (v1, 0, 0)
if dimensions of current vector = 2
Vector (v1, v2, 0)
if dimensions of current vector >= 3
Vector (v1, v2, v3)
"""
if self.dimension > 3:
return Vector([self.coordinates[i] for i in range(3)])
li = []
for i in range(3):
if i > self.dimension:
li.append(0)
else:
li.append(self.coordinates[i])
return Vector(li)
def cross(self, v):
"""
Find cross product of the first 3D of the current vector with the
vector v.
"""
if self.dimension > 3 or v.dimension > 3:
raise ValueError(Vector.DIMENSION_MORE_THAN_THREE_MSG)
else:
print(self)
print(v)
v1 = self.__make_3d__()
v2 = v.__make_3d__()
x_1, y_1, z_1 = v1.coordinates
x_2, y_2, z_2 = v2.coordinates
li = []
li.append(y_1 * z_2 - y_2 * z_1)
li.append(-(x_1 * z_2 - z_1 * x_2))
li.append(x_1 * y_2 - y_1 * x_2)
return Vector(li)
def is_parallel_to(self, v):
"""
Find whether the current vector is parallel to the given vector.
"""
return (self.is_zero() or
v.is_zero() or
self.angle_with(v) == 0 or
self.angle_with(v) == math.pi)
def is_orthogonal_to(self, v, tolerance=1e-10):
"""
Find whether the current vector is perpendicular to the given vector.
"""
return abs(self.dot(v)) < tolerance
def is_zero(self, tolerance=1e-10):
"""
Find whether the current vector is a zero vector.
"""
return self.magnitude() < tolerance
if __name__ == '__main__':
my_vector = Vector([1, 2, 3])
print(my_vector)
vector1 = Vector([1, 2, 3])
vector2 = Vector([-1, 2, 3])
print('Compare vectors: ')
print(my_vector == vector1)
print(my_vector == vector2)
print()
print(my_vector, '+', vector1, '=')
print(my_vector + vector1)
print()
print(my_vector, '-', vector1, '=')
print(my_vector - vector1)
print('Magnitude of ', my_vector, '=', my_vector.magnitude())
print('Unit vector of ', my_vector, '=', my_vector.normalize())
print('Check for zero vector:', Vector([0, 0, 0]).is_zero())
|
from django.db import models
from analytics_project import settings
# РПД
class WorkProgramInFolder(models.Model):
RATING_CHOICES = [
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
]
folder = models.ForeignKey('Folder', verbose_name='Папка', on_delete=models.CASCADE,
related_name="work_program_in_folder")
work_program = models.ForeignKey("WorkProgram", verbose_name='Рпд в папке', on_delete=models.CASCADE)
work_program_rating = models.PositiveSmallIntegerField(choices=RATING_CHOICES, verbose_name="Важность рпд",
blank=True, null=True, default=0)
comment = models.CharField(max_length=10240, verbose_name="Комментарий", blank=True, null=True)
# УП
class AcademicPlanInFolder(models.Model):
RATING_CHOICES = [
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
]
folder = models.ForeignKey('Folder', verbose_name='Папка', on_delete=models.CASCADE,
related_name="academic_plan_in_folder")
academic_plan = models.ForeignKey("AcademicPlan", verbose_name='УП в папке', on_delete=models.CASCADE)
academic_plan_rating = models.PositiveSmallIntegerField(choices=RATING_CHOICES, verbose_name="Важность уп",
blank=True, null=True, default=0)
comment = models.CharField(max_length=10240, verbose_name="Комментарий", blank=True, null=True)
# МОДУЛИ
class DisciplineBlockModuleInFolder(models.Model):
RATING_CHOICES = [
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
]
folder = models.ForeignKey('Folder', verbose_name='Папка', on_delete=models.CASCADE,
related_name="block_module_in_folder")
block_module = models.ForeignKey("DisciplineBlockModule", verbose_name='модуль в папке', on_delete=models.CASCADE)
module_rating = models.PositiveSmallIntegerField(choices=RATING_CHOICES, verbose_name="Важность модуля",
blank=True, null=True, default=0)
comment = models.CharField(max_length=10240, verbose_name="Комментарий", blank=True, null=True)
# ТРАЕКТОРИИ
class IndividualImplementationAcademicPlanInFolder(models.Model):
RATING_CHOICES = [
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
]
folder = models.ForeignKey('Folder', verbose_name='Папка', on_delete=models.CASCADE,
related_name="individual_implementation_of_academic_plan_in_folder")
individual_implementation_of_academic_plan = models.ForeignKey("IndividualImplementationAcademicPlan", verbose_name='траектория в папке', on_delete=models.CASCADE)
route_rating = models.PositiveSmallIntegerField(choices=RATING_CHOICES, verbose_name="Важность траектории",
blank=True, null=True, default=0)
comment = models.CharField(max_length=10240, verbose_name="Комментарий", blank=True, null=True)
class Folder(models.Model):
name = models.CharField(max_length=1024, verbose_name="Имя папки")
description = models.CharField(max_length=1024, verbose_name="Описание папки", blank=True, null=True)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name='Владелец папки', on_delete=models.CASCADE)
work_program = models.ManyToManyField("WorkProgram", verbose_name='Рабочие программы',
through=WorkProgramInFolder, related_name='works_program_folder',
blank=True,
null=True)
academic_plan = models.ManyToManyField("AcademicPlan", verbose_name='Академические планы',
through=AcademicPlanInFolder, related_name='academic_plans_folder',
blank=True,
null=True)
block_module = models.ManyToManyField("DisciplineBlockModule", verbose_name='Модули дисциплины',
through=DisciplineBlockModuleInFolder, related_name='block_modules_folder',
blank=True,
null=True)
individual_implementation_of_academic_plan = models.ManyToManyField("IndividualImplementationAcademicPlan",
verbose_name='Индивидуальные траектории',
through=IndividualImplementationAcademicPlanInFolder,
related_name='individual_implementation_of_academic_plan_folder',
blank=True,
null=True)
|
from . import db
#db.create_all()
#from werkzeug.security import generate_password_hash
class Properties(db.Model):
# You can use this to change the table name. The default convention is to use
# the class name. In this case a class name of UserProfile would create a
# user_profile (singular) table, but if we specify __tablename__ we can change it
# to `user_profiles` (plural) or some other name.
# __tablename__ = 'Properties'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(80))
desc=db.Column(db.String(255))
bedroom=db.Column(db.String(80))
bathroom=db.Column(db.String(80))
price=db.Column(db.String(80))
location=db.Column(db.String(100))
propertytype=db.Column(db.String(80))
photoname=db.Column(db.String(100))
#def __init__(self, title,desc,bedroom,bathroom,price,location,propertytype,photoname):
# self.title = title
# self.desc=desc
# self.bedroom=bedroom
# self.bathroom=bathroom
#self.price=price
# self.location=location
# self.propertytype=propertytype
#self.photoname=photoname
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id) # python 2 support
except NameError:
return str(self.id) # python 3 support
def __repr__(self):
return '<Title %r>' % (self.title)
|
from django.urls import path
from .views import *
urlpatterns = [
#api
path('synchronize/',Synchronize.as_view()),
path('set_price/',SetPrice.as_view()),
path('set_stock/',SetStock.as_view()),
path('rest/',Rest.as_view()),
path('start/',Start.as_view()),
] |
#!/usr/bin/python3.4
# -*-coding:Utf-8 -*
year = input("Which year you want to check ...")
answer = False
int(year)
if year % 4 != 0 :
answer = False
elif year % 100 == 0 :
if year % 400 == 0 :
answer = True
else :
answer = False
else :
answer = True
if answer == True :
print("Yes it is a bissextile year !")
else :
print("Nope is not a bissextile year !")
|
import os
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
import onnx
import pytest
from onnx_chainer import export_testcase
@pytest.fixture(scope='function')
def model():
return chainer.Sequential(
L.Convolution2D(None, 16, 5, 1, 2),
F.relu,
L.Convolution2D(16, 8, 5, 1, 2),
F.relu,
L.Convolution2D(8, 5, 5, 1, 2),
F.relu,
L.Linear(None, 100),
L.BatchNormalization(100),
F.relu,
L.Linear(100, 10)
)
@pytest.fixture(scope='function')
def x():
return np.zeros((10, 3, 28, 28), dtype=np.float32)
@pytest.mark.parametrize('in_names,out_names',
[(None, None), (['x'], ['y'])])
def test_export_testcase(
tmpdir, model, x, disable_experimental_warning, in_names, out_names):
# Just check the existence of pb files
path = str(tmpdir)
export_testcase(model, (x,), path,
input_names=in_names, output_names=out_names)
assert os.path.isfile(os.path.join(path, 'model.onnx'))
input_pb_path = os.path.join(path, 'test_data_set_0', 'input_0.pb')
assert os.path.isfile(input_pb_path)
input_tensor = onnx.load_tensor(input_pb_path)
assert input_tensor.name == (in_names[0] if in_names else 'Input_0')
output_pb_path = os.path.join(path, 'test_data_set_0', 'output_0.pb')
assert os.path.isfile(output_pb_path)
output_tensor = onnx.load_tensor(output_pb_path)
assert output_tensor.name == (
out_names[0] if out_names else 'LinearFunction_1')
@pytest.mark.parametrize('train', [True, False])
def test_output_grad(tmpdir, model, x, train, disable_experimental_warning):
path = str(tmpdir)
export_testcase(model, (x,), path, output_grad=True, train=train)
model_filename = os.path.join(path, 'model.onnx')
assert os.path.isfile(model_filename)
assert os.path.isfile(os.path.join(path, 'test_data_set_0', 'input_0.pb'))
assert os.path.isfile(os.path.join(path, 'test_data_set_0', 'output_0.pb'))
onnx_model = onnx.load(model_filename)
initializer_names = {i.name for i in onnx_model.graph.initializer}
# 10 gradient files should be there
for i in range(12):
tensor_filename = os.path.join(
path, 'test_data_set_0', 'gradient_{}.pb'.format(i))
assert os.path.isfile(tensor_filename)
tensor = onnx.load_tensor(tensor_filename)
assert tensor.name.startswith('param_')
assert tensor.name in initializer_names
assert not os.path.isfile(
os.path.join(path, 'test_data_set_0', 'gradient_12.pb'))
|
import cv2
import numpy as np
video = cv2.VideoCapture(0)
while True:
ret, frame = video.read()
cv2.imshow(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows() |
from __future__ import absolute_import
import json
from pyes.query import MatchAllQuery
def dump_docs(fp, conn, index_name, doc_type, scroll='5m', encoding='utf8'):
q = MatchAllQuery()
for result in conn.search(q, indices=[index_name], doc_types=[doc_type],
scan=True, scroll=scroll):
fp.write(json.dumps(result, encoding=encoding))
fp.write('\n')
def restore_docs(fp, conn, index_name, doc_type, encoding='utf8'):
for line in fp:
doc = json.loads(line, encoding=encoding)
conn.index(line.strip().decode(encoding), index_name, doc_type,
bulk=True, id=doc['_id'])
conn.force_bulk()
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pants.util.strutil import camelcase, pluralize
# TODO(Eric Ayers): Backfill tests for other methods in strutil.py
class StrutilTest(unittest.TestCase):
def test_camelcase(self):
self.assertEquals('Foo', camelcase('foo'))
self.assertEquals('Foo', camelcase('_foo'))
self.assertEquals('Foo', camelcase('foo_'))
self.assertEquals('FooBar', camelcase('foo_bar'))
self.assertEquals('FooBar', camelcase('foo_bar_'))
self.assertEquals('FooBar', camelcase('_foo_bar'))
self.assertEquals('FooBar', camelcase('foo__bar'))
self.assertEquals('Foo', camelcase('-foo'))
self.assertEquals('Foo', camelcase('foo-'))
self.assertEquals('FooBar', camelcase('foo-bar'))
self.assertEquals('FooBar', camelcase('foo-bar-'))
self.assertEquals('FooBar', camelcase('-foo-bar'))
self.assertEquals('FooBar', camelcase('foo--bar'))
self.assertEquals('FooBar', camelcase('foo-_bar'))
def test_pluralize(self):
self.assertEquals('1 bat', pluralize(1, 'bat'))
self.assertEquals('1 boss', pluralize(1, 'boss'))
self.assertEquals('2 bats', pluralize(2, 'bat'))
self.assertEquals('2 bosses', pluralize(2, 'boss'))
self.assertEquals('0 bats', pluralize(0, 'bat'))
self.assertEquals('0 bosses', pluralize(0, 'boss'))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# File: w2v_to_numpy.py
# Convert binary W2V file to
import sys, struct, os
import cPickle, gzip
import numpy as np
class HiddenLoader:
def __init__(self, fname):
f = open(fname, 'rb')
header = f.readline().decode('utf-8').strip()
self.rows, self.cols = tuple([int(n) for n in header.split()])
self.M = np.zeros((self.rows, self.cols), dtype='float32')
print >> sys.stderr, "Row size = {:d}; Column size = {:d}".format(self.rows, self.cols)
# Read line by line
for r in xrange(self.rows):
self.M[r] = struct.unpack('{:d}f'.format(self.cols), f.read(4 * self.cols))
self.bias = np.zeros((self.cols,), dtype='float32')
self.bias = struct.unpack('{:d}f'.format(self.cols), f.read(4 * self.cols))
f.close()
if __name__ == '__main__':
# Open file and read header information
loader = HiddenLoader(sys.argv[1])
outFile = sys.stdout
print >> outFile, 'Weights:'
for r in xrange(loader.rows):
print >> outFile, ' '.join([str(x) for x in loader.M[r]])
print >> outFile, 'bias:'
print >> outFile, ' '.join([str(x) for x in loader.bias])
|
import gobject
gobject.threads_init()
import pygst
pygst.require("0.10")
import gst
from httplib import HTTP
from urlparse import urlparse
class BlankAudioSrc (gst.Bin):
def __init__ (self, wave = 4):
gst.Bin.__init__(self)
audiotestsrc = gst.element_factory_make("audiotestsrc")
audiotestsrc.set_property("wave", wave) #4 is silence
audioconvert = gst.element_factory_make("audioconvert")
self.add(audiotestsrc, audioconvert)
audiotestsrc.link(audioconvert)
pad = audioconvert.get_pad("src")
ghost_pad = gst.GhostPad("src", pad)
self.add_pad(ghost_pad)
class GnomeVFSMP3Src (gst.Bin):
def __init__ (self, p, uri, uri2, vol = 1.0):
gst.Bin.__init__(self)
self.uri2 = uri2
# bus = p.get_bus()
# bus.add_signal_watch()
# bus.connect("message", self.get_message)
self.gnomevfssrc = gst.element_factory_make("gnomevfssrc")
if checkURL(uri):
self.gnomevfssrc.set_property("location", uri)
else:
self.gnomevfssrc.set_property("location", uri2)
mad = gst.element_factory_make("mad")
audioconvert = gst.element_factory_make("audioconvert")
audioresample = gst.element_factory_make("audioresample")
self.current_vol = vol
self.target_vol = vol
self.volume = gst.element_factory_make("volume")
self.volume.set_property("volume", self.current_vol)
self.add(self.gnomevfssrc, mad, audioconvert,
audioresample, self.volume)
gst.element_link_many(self.gnomevfssrc, mad,
audioconvert, audioresample, self.volume)
pad = self.volume.get_pad("src")
ghostpad = gst.GhostPad("src", pad)
self.add_pad(ghostpad)
def get_message (self, bus, message):
if message.src == self.gnomevfssrc \
and message.type == gst.MESSAGE_ERROR:
err, debug = message.parse_error()
print err
if err == "Resource not found.":
self.set_state(gst.STATE_PAUSED)
self.gnomevfssrc.set_property(
"location",
self.uri2)
self.set_state(gst.STATE_PLAYING)
def checkURL(url):
p = urlparse(url)
h = HTTP(p[1])
h.putrequest('HEAD', p[2])
h.endheaders()
print h.getreply()
if h.getreply()[0] == 200: return 1
else: return 0
class Stream:
def __init__ (self):
self.main_loop = gobject.MainLoop()
self.pipeline = gst.Pipeline()
src = GnomeVFSMP3Src(
self.pipeline,
"http://aevidence2.dyndns.org:8000/scapes1.mp3",
"http://aevidence2.dyndns.org:8000/scapes1.mp3")
blanksrc = BlankAudioSrc(4)
self.adder = gst.element_factory_make("adder")
alsasink = gst.element_factory_make("alsasink")
self.pipeline.add(self.adder, alsasink)
self.adder.link(alsasink)
self.add_source_to_adder(blanksrc)
self.add_source_to_adder(src)
def play(self):
self.pipeline.set_state(gst.STATE_PLAYING)
self.main_loop.run()
def add_source_to_adder (self, src_element):
self.pipeline.add(src_element)
srcpad = src_element.get_pad('src')
addersinkpad = self.adder.get_request_pad('sink%d')
srcpad.link(addersinkpad)
s = Stream()
s.play()
|
# program to resize all train images
import os
import glob
from PIL import Image
from joblib import Parallel, delayed
in_dir = '../train_images/'
out_dir = 'train512/'
IMAGE_SIZE = 512
JPG_FILES = glob.glob(in_dir + '*.jpg')
def convert(img_file):
im = Image.open(img_file)
im.resize((IMAGE_SIZE, IMAGE_SIZE)).save(out_dir + os.path.basename(img_file), 'JPEG')
Parallel(n_jobs=-1, verbose=10)(delayed(convert)(f) for f in JPG_FILES)
|
def iterative_fibonacci(position):
"""Find a position in the Fibonacci sequence iteratively.
Time complexity: O(n)
Space complexity: O(1)
"""
if (position <= 1):
return position
first = 0
second = 1
next = first + second
for i in range(2, position):
first = second
second = next
next = first + second
return next
def concise_fibonacci(position):
"""Find a Fibonacci position by using previous values in a growing list.
Time complexity: O(n)
Space complexity: O(n)
"""
lst = []
for i in range(position):
lst.append(1 if i <= 1 else lst[i-1] + lst[i-2])
return lst[position-1] if position > 0 else position
def memoized_fibonacci(position):
"""Find a position in the Fibonacci sequence using recursion & memoization.
Time complexity: O(n)
Space complexity: O(n)
"""
memo = []
for i in range(position + 1):
memo.append(-1)
def fib(position):
"""Perform logic of memoized_fibonacci()"""
if position <= 1:
return position
if memo[position] > 0:
return memo[position]
memo[position] = fib(position-1) + fib(position-2)
return memo[position]
return fib(position)
def recursive_fibonacci(position):
"""Find a position in the Fibonacci sequence using naive recursion.
Time complexity: O(2^n)
Space complexity: O(n) memory
"""
if position <= 1:
return position
return recursive_fibonacci(position-1) + recursive_fibonacci(position-2)
if __name__ == "__main__":
print("Finding 37th position in Fibonacci sequence...")
print("Concise: ", concise_fibonacci(37))
print("Iterative:", iterative_fibonacci(37))
print("Memoized: ", memoized_fibonacci(37))
print("Recursive:", recursive_fibonacci(37))
|
from flask import Flask , render_template , request
from flask import jsonify
import pafy
import vlc
app = Flask(__name__)
Instance = vlc.Instance('--no-video')
player = Instance.media_player_new()
url = ''
@app.route('/')
def index():
return render_template('index.html')
@app.route('/song', methods=['GET'])
def youtube():
vid = request.args.get('vid')
url = 'https://www.youtube.com/watch?v=' + vid
video = pafy.new(url)
streams = video.audiostreams
best = streams[3]
playurl = best.url
Media = Instance.media_new(playurl)
Media.get_mrl()
player.set_media(Media)
player.play()
display_message = {"song":"started"}
resp = jsonify(display_message)
resp.status_code = 200
return resp
@app.route('/pause')
def pause():
player.pause()
display_message = {"song":"paused"}
resp = jsonify(display_message)
resp.status_code = 200
return resp
@app.route('/stop')
def stop():
player.stop()
display_message = {"song":"stopped"}
resp = jsonify(display_message)
resp.status_code = 200
return resp
@app.route('/restart')
def restart():
player.stop()
player.player()
display_message = {"song":"restarted"}
resp = jsonify(display_message)
resp.status_code = 200
return resp
@app.route('/resume')
def play():
player.play()
display_message = {"song":"played"}
resp = jsonify(display_message)
resp.status_code = 200
return resp
if __name__ == '__main__':
app.run(debug=False, port=7070, host='0.0.0.0')
|
from django.conf.urls import url, include
from django.urls import path
from .import views
urlpatterns = [
path('', views.index, name='index'),
path('inicioSesion', views.inicioSesion, name='inicioSesion'),
path('perfil', views.perfil, name='perfil'),
url(r'^signup', views.signup, name='signup'),
path('sendMessage', views.enviarMensaje, name='perfilnMessge'),
url('registro', views.registro, name='registro'),
# url(r'^accounts/login$', 'django.contrib.auth.views.login'),
url(r'^form$', views.DefaultFormView.as_view(), name='form_default'),
url(r'^message', views.message, name='escribe_mensajet'),
path('descifrar/<int:id>/', views.descifrar ,name="descifrar"),
# path('descifrar', views.descifrar ,name="descifrar"),
] |
import tkinter as tk
from tkinter import messagebox
def insertpoint():
var = e.get()
t.insert('insert', var)
def insertend():
var = e.get()
t.insert("end", var)
root = tk.Tk()
root.geometry("400x400+400+400")
root.title("无架构的GUI程序")
btn01 = tk.Button(text='insert point', command=insertpoint)
btnquit = tk.Button(root, text='insert end', command=insertend)
e = tk.Entry(root, show="*")
t = tk.Text(root, height=2)
e.pack()
t.pack()
btn01.pack()
btnquit.pack()
root.mainloop()
|
n1 = int(input('Digite o primeiro termo da PA: '))
r = int(input('Digite a razão da PA: '))
n = n1
for c in range(1, 11):
n = n1 + r*(c-1)
print(n, end=' -> ')
print('fim')
|
import json
import csv
import boto3
import json
import dateutil.parser
import datetime
import time
import os
import math
import random
import logging
import create_instance
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def close(session_attributes, fulfillment_state, message):
response = {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Close',
'fulfillmentState': fulfillment_state,
'message': message
}
}
return response
""" --- Function that return employee name --- """
def select_instance(instance_type):
logger.debug(instance_type)
if not instance_type:
apptype = create_instance(instance_type)
else:
return ('Please select one application, which you are looking for health info app, health care app, health medical app ')
def return_instance(intent_request):
"""
Performs dialog management and fulfillment for returning employee's department Name.
"""
instance = intent_request['currentIntent']['slots']['instance']
source = intent_request['invocationSource']
output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}
if source == 'DialogCodeHook':
# Perform basic validation on the supplied input slots.
slots = intent_request['currentIntent']['slots']
return close(
output_session_attributes,
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'Hello! {}'.format(select_instance(instance))
}
)
""" --- Intents --- """
def dispatch(intent_request):
"""
Called when the user specifies an intent for this bot.
"""
logger.debug('dispatch intentName={}'.format(intent_request['currentIntent']['name']))
intent_name = intent_request['currentIntent']['name']
# Dispatch to your bot's intent handlers
if intent_name == 'testhealth':
return return_instance(intent_request)
raise Exception('Intent with name ' + intent_name + ' not supported')
""" --- Main handler --- """
def lambda_handler(event, context):
"""
Route the incoming request based on intent.
The JSON body of the request is provided in the event slot.
"""
# By default, treat the user request as coming from the America/New_York time zone.
os.environ['TZ'] = 'America/New_York'
time.tzset()
logger.debug('event.bot.name={}'.format(event['bot']['name']))
return dispatch(event) |
# Generated by Django 2.1.2 on 2018-11-26 04:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('prediksi', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='prediksi',
name='temp_jumlah_mahasiswa',
field=models.IntegerField(blank=True, null=True),
),
]
|
from selenium import webdriver
'''
URL = 'https://www.miraeassetdaewoo.com/hki/hki3028/r01.do'
driver = webdriver.Chrome(executable_path='chromedriver')
driver.get(url=URL)
driver.implicitly_wait(time_to_wait=5)
'''
driver = webdriver.Chrome('H:\chromedriver.exe')
driver.get('https://www.melon.com/chart/day/index.htm')
driver.implicitly_wait(3)
#frm > div > table > tbody
#list_song = driver.find_elements_by_css_selector('#frm > div > table > tbody')
list_song = driver.find_elements_by_css_selector('.service_list song table tbody tr')
data = []
rank = 1
for tr in list_song:
title = tr.find_element_by_css_selector('.wrap_song_info .ellipsis.rank01').text
singer = tr.find_element_by_css_selector('.wrap_song_info .rank02').text
data.append([rank, title, singer])
rank += 1
print(title, singer)
with open('melon_rank.csv', 'w', encoding='utf-8') as file:
file.write('순위,노래제목,가수')
for i in data:
file.write(f"{data[0]},{data[1]},{data[2]}")
driver.close() |
# -*- coding:utf-8 -*-
# http://www.math.pku.edu.cn/teachers/lidf/docs/textrick/index.htm
import urllib2
import re
import os
import csv
'''
csvfile = file('sample.csv', 'rb')
reader = csv.reader(csvfile)
code_list=[]
line_num=0
for line in reader:
line_num=line_num+1
if line_num>150 and line_num<301:
code_list.append(line)
print line[0]
csvfile.close()
'''
'''
url_list=[]
True_Or_False=1
while(True_Or_False):
url = raw_input("请输入url\n")
if url!='q':
url_list.append(url)
else:
True_Or_False=0
for url in url_list:
print url
'''
# open the url and read
def getHtml(url):
page = urllib2.urlopen(url)
html = page.read()
page.close()
return html
def readHtml(htmlName):
file_object=open(htmlName)
try:
all_the_text = file_object.read()
finally:
file_object.close()
return all_the_text
# print len(all_the_text)
#f = open('/Users/joe/Documents/'+htmlName, 'wb')
# f.write(all_the_text)
# f.close()
def getContent(html):
reg=r'<li><div class="t1".*?><font>(.*?)</font></div><div class="t2".*?><font>(.*?)</font></div><div class="t3"><dd><span class="d1".*?><a href="(.*?)" target="_blank">(.*?)<img .*?>.*?</a></span><span class="d3">(.*?)</span></dd></div></li>'
content_re=re.compile(reg)
content_list=re.findall(content_re,html)
for content in content_list:
print content[0]
print content[1]
print content[2]
print content[3]
print content[4]
# compile the regular expressions and find
# all stuff we need
def getUrl(html):
reg = r'(?:href|HREF)="?((?:http://)?.+?\.pdf)'
url_re = re.compile(reg)
url_lst = re.findall(url_re,html)
return(url_lst)
def getFile(url,stockcode):
file_name = stockcode+'_'+url.split('/')[4]+'_'+url.split('/')[5]
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
f.write(buffer)
f.close()
print "Sucessful to download" + " " + file_name
html=readHtml('002099.html')
getContent(html)
'''
root_url = 'http://www.math.pku.edu.cn/teachers/lidf/docs/textrick/'
raw_url = 'http://www.math.pku.edu.cn/teachers/lidf/docs/textrick/index.htm'
html = getHtml(raw_url)
url_lst = getUrl(html)
url_lst=['http://www.cninfo.com.cn/cninfo-new/disclosure/szse/bulletin_detail/true/1200024187?announceTime=2014-07-02',
'http://www.cninfo.com.cn/cninfo-new/disclosure/szse/bulletin_detail/true/1200024188?announceTime=2014-07-02',
'http://www.cninfo.com.cn/cninfo-new/disclosure/szse/bulletin_detail/true/61201538?announceTime=2012-07-02%2006:30',
'http://www.cninfo.com.cn/cninfo-new/disclosure/szse/bulletin_detail/true/61201539?announceTime=2012-07-02%2006:30',
'http://www.cninfo.com.cn/cninfo-new/disclosure/szse/bulletin_detail/true/59618605?announceTime=2011-07-01%2006:30',
'http://www.cninfo.com.cn/cninfo-new/disclosure/szse/bulletin_detail/true/59618606?announceTime=2011-07-01%2006:30',
'http://www.cninfo.com.cn/cninfo-new/disclosure/szse/bulletin_detail/true/59618607?announceTime=2011-07-01%2006:30',
'http://www.cninfo.com.cn/cninfo-new/disclosure/szse/bulletin_detail/true/59618608?announceTime=2011-07-01%2006:30',
'http://www.cninfo.com.cn/cninfo-new/disclosure/szse/bulletin_detail/true/59618609?announceTime=2011-07-01%2006:30',
'http://www.cninfo.com.cn/cninfo-new/disclosure/szse/bulletin_detail/true/59509955?announceTime=2011-06-03%2006:30',
'http://www.cninfo.com.cn/cninfo-new/disclosure/szse/bulletin_detail/true/59479951?announceTime=2011-05-27%2006:30',
'http://www.cninfo.com.cn/cninfo-new/disclosure/szse/bulletin_detail/true/59421429?announceTime=2011-05-12%2011:46',
'http://www.cninfo.com.cn/cninfo-new/disclosure/szse/bulletin_detail/true/58302589?announceTime=2010-08-14%2006:30']
root_url = 'http://www.cninfo.com.cn/finalpage/'
new_url_lst=[]
for url in url_lst:
result0 = re.findall(".*/(.*)?announceTime.*",url)
print result0[0].strip('?')
if('%' in url):
result1 = re.findall(".*=(.*)%.*",url)
print result1[0]
else:
result1 = re.findall(".*=(.*)",url)
print result1[0]
new_url_lst.append(root_url+result1[0]+'/'+result0[0].strip('?')+'.PDF')
for url in new_url_lst:
print url
''' |
import os
import sys
import unittest
import mockings
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../core'))
sys.path.insert(1, path)
from now import Now
import chords
class TestShouldRun(unittest.TestCase):
def testModuleWithoutShouldRunMethod(self):
now = Now(tweak = "2015-01-01 12:00:00")
self.assertFalse(chords.shouldRun(
mockings.createModule(
"WithoutShouldRunMethod",
"def main():\n\tpass"
),
now,
mockings.getLogger()
))
def testModuleWithFalseShouldRunMethod(self):
now = Now(tweak = "2015-01-01 12:00:00")
self.assertFalse(chords.shouldRun(
mockings.createModule(
"WithoutShouldRunMethod",
"def shouldRun(now):\n\treturn False"
),
now,
mockings.getLogger()
))
def testModuleWithTrueShouldRunMethod(self):
now = Now(tweak = "2015-01-01 12:00:00")
self.assertTrue(chords.shouldRun(
mockings.createModule(
"ModuleWithTrueShouldRunMethod",
"def shouldRun(now):\n\treturn True"
),
now,
mockings.getLogger()
))
def testModuleWithExceptionThrowingShouldRun(self):
now = Now(tweak = "2015-01-01 12:00:00")
self.assertFalse(chords.shouldRun(
mockings.createModule(
"ModuleWithExceptionThrowingShouldRun",
"def shouldRun(now):\n\traise Exception('Something went wrong')"
),
now,
mockings.getLogger()
))
def main():
unittest.main()
if __name__ == '__main__':
main() |
# Let S be a list of permutations of 0, 1, 2, ..., 9 which are sorted in
# ascending order.
# Find S[10^6]
# Define permu(n, D) = S[n] which is the n-th permutation in S where D is
# the set of possible digits.
#
# Let M = |D|
# permu(n, D) = 'i' + permu(n - (M! - (M + 1)!), D\{i})
# for k <= i in S where i is in S such that max(0, (i - 1)!) <= n <= i!
factorialArr = []
def precalculateFactorial():
global factorialArr
factorialArr = [1] * 10
for n in range(2, 10):
factorialArr[n] = n * factorialArr[n - 1]
# Find the N-th permutation in S (N >= 1)
def findPermutation(N):
precalculateFactorial()
if N > 10 * factorialArr[9]:
return 'Invalid input'
D = [i for i in range(10)]
return recursiveFindPermu(N, D)
def recursiveFindPermu(N, D):
length = len(D)
if N == 0:
return ''
index = 0
order = 1
myStr = ''
while index < length and (order - 1) * factorialArr[length - 1] < N:
if order * factorialArr[length - 1] >= N:
myStr = str(D[index])
D.remove(D[index])
myStr += recursiveFindPermu(N - (order - 1) * factorialArr[length - 1], D)
index += 1
order += 1
return myStr
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the sockMerchant function below.
def sockMerchant(n, ar):
x = 0
banco = []
for s in ar:
if s in banco:
continue
else:
x += int(ar.count(s) / 2)
banco.append(s)
return x
if __name__ == '__main__':
n = 9
ar = [10, 20, 20, 10, 10, 30, 50, 10, 20]
result = sockMerchant(n, ar)
print(result)
|
'''
Created on Sep 6, 2015
@author: hugosenari
'''
from circuits import Component
from circuits import task
def show_window_gtk():
from gi.overrides.Gtk import Gtk
class MyWindow(Gtk.Window):
def __init__(self):
super().__init__(title="Hello World Gtk")
self.button = Gtk.Button(label="Hello World Gtk")
self.add(self.button)
win = MyWindow()
def quit_gtk(*args, **kwd):
Gtk.main_quit()
win.connect("delete-event", quit_gtk)
win.show_all()
Gtk.main()
class HelloWorldGtk(Component):
def hello_world(self, component):
yield self.call(task(show_window_gtk)) |
from model.database import *
from model.simulator import Simulator
from model.crops.storage import Storage
from model.crops.itemsprocessor import ItemsProcessorManager
import pandas as pd
import matplotlib.pylab as plt
if __name__ == '__main__':
simulator = Simulator()
database = Database()
database.init_data()
simulator.storage = Storage(database)
simulator.manager = ItemsProcessorManager(database, simulator.storage)
data = {}
data["levels"] = []
data["recipes"] = []
data["mills"] = []
data["av_time"] = []
for level in range(1,100):
mills, total_recipes = simulator.get_all_products(level)
#print(mills["CraftedProducts"])
data["levels"].append(level)
data["recipes"].append(total_recipes)
data["mills"].append(len(mills["CraftedProducts"]))
total_time = 0
total_recipes = 0
for mill in mills["CraftedProducts"]:
for recipe in mills["CraftedProducts"][mill]:
for element in recipe:
time = recipe[element]
total_time += int(time)
total_recipes += 1
average_time = total_time / total_recipes
data["av_time"].append(average_time)
charts = ["recipes", "mills", "av_time"]
xticks = [0, 9, 19, 29, 39, 49, 59, 69, 79, 89, 98]
xticklabels = [ n+1 for n in xticks ]
fig, axes = plt.subplots(nrows=3, ncols=1)
for i, name in enumerate(charts):
my_data = pd.Series(data[name], index=data["levels"])
myplot = my_data.plot(ax=axes[i], figsize=(20, 16), kind="bar", grid=True, title=name)
axes[i].xaxis.set_ticks(xticks)
axes[i].xaxis.set_ticklabels(xticklabels, rotation=0)
point = __file__.rfind(".")
bar = __file__.rfind("/")
plt.savefig(__file__[bar+1:point] + ".png")
|
import re
from django import forms
from django.forms import ModelForm
from .models import Mascota, Busqueda, Persona, Adopcion
# Formulario de inicio de sesion.
class IniciarSesionForm(forms.Form):
username = forms.CharField(
widget=forms.TextInput(), label="Nombre de Usuario")
password = forms.CharField(
widget=forms.PasswordInput(), label="Contraseña")
# formulario de mantenedor de rescatados.
class nuevoRescatadoForm(ModelForm):
class Meta:
model = Mascota
fields = ('nombre', 'raza', 'imagen', 'descripcion')
# Formulario de filtros de galeria de perritos.
class filtrarGaleriaForm(ModelForm):
class Meta:
model = Busqueda
fields = ['estado']
# Formulario para el registro de postulantes.
class RegistrarPersonaForm(ModelForm):
class Meta:
model = Persona
fields = ('nombre', 'apellido', 'rut', 'nacimiento', 'email',
'telefono', 'region', 'ciudad', 'vivienda')
# Formulario para el registro de experiencia de adopcion.
class registrarExperienciaForm(ModelForm):
class Meta:
model = Adopcion
fields = ['descripcion']
|
def knapsack(value, weight, capacity):
#index list with size of number of values
ind = list(range(len(value)))
#ratio list containing ratio's of value & weight
ratio = [v/w for v, w in zip(value, weight)]
#sorting the index list based on the ratio's in non-decreasing order
ind.sort(key=lambda i: ratio[i], reverse=True)
#initializing maximum value to 0
maxi= 0
#looping over the index list
for i in ind:
#since the weight of the item should be less than the capacity.. check condition
if(weight[i]<=capacity):
maxi+=value[i];
#after adding the weight of item subtracting it from the capacity
capacity-=weight[i]
else:
#if the weight is greater then we add the fractional weight of item inorder to have maximum value in given capacity
maxi+=value[i]*capacity/weight[i]
break
return maxi
#inputting the size i.e number of items
n=int(input('Enter no:of items :'))
#reading values given by user in list "value"
value=[int(i) for i in input('Enter values :').split()]
#reading weights given by user in list "weight"
weight=[int(i) for i in input('Enter weights :').split()]
#inputting the capacity of the knapsack
capacity=int(input('Enter Capacity : '))
#storing the maximum value in "max" variable
maxi=knapsack(value,weight,capacity)
#printing the result
print('maximum value is ',maxi)
|
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import numpy as np
from cv2 import aruco
def aruco_detection():
# start video capture for distance
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
gray = frame
# gray = cv2.flip(gray, 0)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters = aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
#if ids != None:
#print("Found ids:")
#print(ids)
#else:
#print("Aruco Marker not found")
gray = aruco.drawDetectedMarkers(gray, corners, ids)
height, width, _ = gray.shape
deltaX = 0
delyaY = 0
absX = int(width/2)
absY = int(height/2)
#print("Corner: ", corners)
if len(corners): #returns no of arucos
#print (len(corners)
aruco_list = {}
#print (len(ids))
markerOne = corners[0][0]
cornerOne = markerOne[0]
cornerTwo = markerOne[1]
cornerThree = markerOne[2]
cornerFour = markerOne[3]
#deltaX1 = abs(cornerTwo[0] - cornerOne[0])
#deltaX2 = abs(cornerThree[0] - cornerFour[0])
#deltaY1 = abs(cornerFour[1] - cornerOne[1])
#deltaY2 = abs(cornerThree[1] - cornerTwo[1])
deltaX1 = cornerTwo[0] - cornerOne[0]
deltaX2 = cornerThree[0] - cornerFour[0]
deltaY1 = cornerFour[1] - cornerOne[1]
deltaY2 = cornerThree[1] - cornerTwo[1]
centerX1 = int((cornerTwo[0] + cornerFour[0]) / 2)
centerY1 = int((cornerTwo[1] + cornerFour[1]) / 2)
centerX2 = int((cornerOne[0] + cornerThree[0]) / 2)
centerY2 = int((cornerOne[1] + cornerThree[1]) / 2)
centerX = (centerX1+centerX2) / 2
centerY = (centerY1+centerY2) / 2
deltaX = abs(absX-centerX)
deltaY = abs(absY-centerY)
arucoWidth = (deltaX1+deltaX2) / 2
arucoHeight = (deltaY1+deltaY2) / 2
#figure out width of screen
screenWidth = 0.492126 / (arucoWidth / width)
screenHeight = 0.492126 / (arucoHeight / height)
#figure out real distance from center
xDistance = (screenWidth / 2)*(deltaX / (width/2))
yDistance = (screenHeight / 2)*(deltaY / (height/2))
# figure out how many pixels correlates t
f = 0.0036
x = 0.0038
y = 0.0029
Z1 = f*(screenWidth/x)
Z2 = f*(screenHeight/y)
zDistance = abs((Z1+Z2)/2)
XYplaneDistance = np.sqrt(xDistance*xDistance + zDistance*zDistance)
totalDistance = np.sqrt(xDistance*xDistance + yDistance*yDistance + zDistance*zDistance)
print("xOffset", xDistance)
print("yOffset", yDistance)
print("zOffset: ", zDistance)
print("Plane Distance: ", XYplaneDistance)
print("Total Distance: ", totalDistance)
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
aruco_detection() |
#!/usr/bin/env python
# -*- encoding=utf-8 -*-
"""
Sync from server to local machine on both Win & Linux
Add config in sync.cfg to config the sync parameters
and make sure you add your pub key to authorized_keys on remote
server.
"""
import os
import sys
import ConfigParser
class Syncer():
def __init__(self):
#rsync command path
self.rsync_cmd = 'd:\\software\\cwRsync\\bin\\rsync.exe'
self.ssh_cmd = 'd:\\software\\cwRsync\\bin\\ssh.exe'
#private key path
self.id_rsa_path = 'd:\\software\\pietty\\ssh\\id_rsa'
#where to store the backup files
self.target_path = 'd:\\work\\bk'
self.user = 'liszt'
self.host = ''
#rsync path on server
self.rsync_path = '/home/liszt/bin/rsync'
#where to backup
self.source = '/var/log/'
self.exclude_from = 'd:\\work\\bk\\exclude.txt'
self.exclude = ''
self.include_from = ''
self.include = ''
def merge_config(self, conf):
for key in conf.keys():
setattr(self, key, conf[key])
@staticmethod
def massage_win_path(path):
if os.name == 'nt':
return '/cygdrive/%s' % path.replace(':', '').replace('\\', '/')
return path
def get_target_dir(self):
#create target dir
try:
os.makedirs(self.target_path)
except Exception, e:
print e
return Syncer.massage_win_path(self.target_path)
def get_ssh_cmd(self):
key_path = Syncer.massage_win_path(self.id_rsa_path)
return '-e "%s -i %s"' % (self.ssh_cmd, key_path)
def get_rsync_cmd(self):
return '--rsync-path="%s"' % self.rsync_path
def get_exclude_cmd(self):
if self.exclude_from:
return '--exclude-from="%s"' % Syncer.massage_win_path(self.exclude_from)
if self.exclude:
return '--exclude="%s"' % self.exclude
return ''
def get_include_cmd(self):
if self.include_from:
return '--include-from="%s"' % Syncer.massage_win_path(self.include_from)
if self.include:
return '--include="%s"' % self.include
return ''
def get_cmd(self):
target = self.get_target_dir()
ssh_cmd = self.get_ssh_cmd()
rsync_cmd = self.get_rsync_cmd()
exclude_cmd = self.get_exclude_cmd()
include_cmd = self.get_include_cmd()
source = '%s@%s:%s' % (self.user,
self.host,
self.source)
final_cmd = '%s -avz -P %s %s %s %s "%s" "%s"' % (self.rsync_cmd,
rsync_cmd,
include_cmd,
exclude_cmd,
ssh_cmd,
source,
target)
print final_cmd
try:
os.system(final_cmd)
except Exception, e:
print e
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'input a cfg file path'
sys.exit(0)
if not os.path.isfile(sys.argv[1]):
print 'use a file path'
sys.exit(0)
target_sections = sys.argv[2:]
config = ConfigParser.ConfigParser()
config.read(sys.argv[1])
if not target_sections:
target_sections = config.sections()
for sec in target_sections:
print sec
try:
sec_dict = dict(config.items(sec))
except Exception, e:
print '[Error] parse section %s met error: %s' % (sec, e)
continue
syncer = Syncer()
syncer.merge_config(sec_dict)
syncer.get_cmd()
|
import unittest
from katas.kyu_6.multi_tap_keypad_text_entry import presses
class PressesTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(presses('LOL'), 9)
def test_equals_2(self):
self.assertEqual(presses('HOW R U'), 13)
def test_equals_3(self):
self.assertEqual(presses('WHERE DO U WANT 2 MEET L8R'), 47)
|
from _typeshed import Incomplete
def hnm_harary_graph(n, m, create_using: Incomplete | None = None): ...
def hkn_harary_graph(k, n, create_using: Incomplete | None = None): ...
|
"""
corpkit: Interrogate a parsed corpus
"""
#!/usr/bin/python
from __future__ import print_function
from corpkit.constants import STRINGTYPE, PYTHON_VERSION, INPUTFUNC
def interrogator(corpus,
search,
query='any',
show='w',
exclude=False,
excludemode='any',
searchmode='all',
dep_type='collapsed-ccprocessed-dependencies',
case_sensitive=False,
save=False,
just_speakers=False,
preserve_case=False,
lemmatag=False,
files_as_subcorpora=False,
only_unique=False,
random=False,
only_format_match=False,
multiprocess=False,
spelling=False,
regex_nonword_filter=r'[A-Za-z0-9:_]',
gramsize=2,
split_contractions=False,
conc=False,
maxconc=9999,
window=4,
no_closed=False,
no_punct=True,
whitelist=False,
**kwargs
):
"""
Interrogate corpus, corpora, subcorpus and file objects.
See corpkit.interrogation.interrogate() for docstring
"""
# in case old kwarg is used
conc = kwargs.get('do_concordancing', conc)
# store kwargs and locs
locs = locals().copy()
locs.update(kwargs)
locs.pop('kwargs', None)
if isinstance(search, STRINGTYPE) and len(search) > 3:
raise ValueError('search argument not recognised.')
import codecs
import signal
import os
from time import localtime, strftime
from collections import Counter
import corenlp_xml
import pandas as pd
from pandas import DataFrame, Series
from corpkit.interrogation import Interrogation, Interrodict
from corpkit.corpus import Datalist, Corpora, Corpus, File, Subcorpus
from corpkit.process import (tregex_engine, get_deps, unsplitter, sanitise_dict,
get_speakername, animator, filtermaker)
from corpkit.other import as_regex
from corpkit.dictionaries.word_transforms import wordlist, taglemma
from corpkit.dictionaries.process_types import Wordlist
from corpkit.build import check_jdk
import re
if regex_nonword_filter:
is_a_word = re.compile(regex_nonword_filter)
else:
is_a_word = re.compile(r'.*')
have_java = check_jdk()
def signal_handler(signal, _):
"""pause on ctrl+c, rather than just stop loop"""
import signal
import sys
from time import localtime, strftime
signal.signal(signal.SIGINT, original_sigint)
thetime = strftime("%H:%M:%S", localtime())
INPUTFUNC('\n\n%s: Paused. Press any key to resume, or ctrl+c to quit.\n' % thetime)
time = strftime("%H:%M:%S", localtime())
print('%s: Interrogation resumed.\n' % time)
signal.signal(signal.SIGINT, signal_handler)
def fix_show(show):
"""lowercase anything in show and turn into list"""
if isinstance(show, list):
show = [i.lower() for i in show]
elif isinstance(show, STRINGTYPE):
show = show.lower()
show = [show]
# this little 'n' business is a hack: when ngramming,
# n shows have their n stripped, so nw should be nw
# so we know we're ngramming and so it's not empty.
for index, val in enumerate(show):
if val == 'n' or val == 'nw':
show[index] = 'nw'
elif val == 'b' or val == 'bw':
show[index] = 'bw'
elif val.endswith('pl'):
show[index] = val.replace('pl', 'x')
else:
if len(val) == 2 and val.endswith('w'):
show[index] = val[0]
return show
def fix_search(search):
"""if search has nested dicts, remove them"""
ends = ['w', 'l', 'i', 'n', 'f', 'p', 'x', 's']
if not search:
return
if isinstance(search, STRINGTYPE):
return search
if search.get('t'):
return search
newsearch = {}
for srch, pat in search.items():
if len(srch) == 1 and srch in ends:
srch = 'm%s' % srch
if isinstance(pat, dict):
for k, v in list(pat.items()):
if k != 'w':
newsearch[srch + k] = pat_format(v)
else:
newsearch[srch] = pat_format(v)
else:
newsearch[srch] = pat_format(pat)
return newsearch
def pat_format(pat):
from corpkit.dictionaries.process_types import Wordlist
import re
if pat == 'any':
return re.compile(r'.*')
if isinstance(pat, Wordlist):
pat = list(pat)
if isinstance(pat, list):
if all(isinstance(x, int) for x in pat):
pat = [str(x) for x in pat]
pat = filtermaker(pat, case_sensitive=case_sensitive, root=kwargs.get('root'))
else:
if isinstance(pat, int):
return pat
if isinstance(pat, re._pattern_type):
return pat
if case_sensitive:
pat = re.compile(pat)
else:
pat = re.compile(pat, re.IGNORECASE)
return pat
def is_multiquery(corpus, search, query, just_speakers):
"""determine if multiprocessing is needed
do some retyping if need be as well"""
is_mul = False
from collections import OrderedDict
#if hasattr(corpus, '__iter__'):
# is_mul = True
# so we can do search = 't', query = ['NP', 'VP']:
from corpkit.dictionaries.process_types import Wordlist
if isinstance(query, Wordlist):
query = list(query)
if isinstance(query, list):
if query != list(search.values())[0] or len(list(search.keys())) > 1:
query = {c.title(): c for c in query}
if isinstance(query, (dict, OrderedDict)):
is_mul = True
if just_speakers:
if just_speakers == 'each':
is_mul = True
just_speakers = ['each']
if just_speakers == ['each']:
is_mul = True
elif isinstance(just_speakers, STRINGTYPE):
is_mul = False
just_speakers = [just_speakers]
#import re
#if isinstance(just_speakers, re._pattern_type):
# is_mul = False
if isinstance(just_speakers, list):
if len(just_speakers) > 1:
is_mul = True
if isinstance(search, dict):
if all(isinstance(i, dict) for i in list(search.values())):
is_mul = True
return is_mul, corpus, search, query, just_speakers
def slow_tregex(sents, **dummy_args):
"""do the speaker-specific version of tregex queries"""
speakr = dummy_args.get('speaker', '')
import os
from corpkit.process import tregex_engine
# first, put the relevant trees into temp file
to_open = '\n'.join(sent.parse_string.strip() for sent in sents \
if sent.parse_string is not None)
q = list(search.values())[0]
ops = ['-o', '-%s' % translated_option]
concs = []
res = tregex_engine(query=q,
options=ops,
corpus=to_open,
root=root,
preserve_case=True
)
if not no_conc:
ops += ['-w', '-f']
whole_res = tregex_engine(query=q,
options=ops,
corpus=to_open,
root=root,
preserve_case=True
)
for line in whole_res:
line.insert(1, speakr)
res = format_tregex(res)
whole_res = format_tregex(whole_res, whole=True)
concs = make_conc_lines_from_whole_mid(whole_res, res)
if root:
root.update()
if countmode:
if isinstance(res, int):
return res, False
else:
return len(res), False
else:
return res, concs
def get_stats(sents, **dummy_args):
"""get a bunch of frequencies on interpersonal phenomena"""
from collections import Counter
statsmode_results = Counter()
# first, put the relevant trees into temp file
for sent in sents:
statsmode_results['Sentences'] += 1
deps = get_deps(sent, dep_type)
numpass = len([x for x in deps.links if x.type.endswith('pass')])
statsmode_results['Passives'] += numpass
statsmode_results['Tokens'] += len(sent.tokens)
words = [w.word for w in sent.tokens if w.word is not None and w.word.isalnum()]
statsmode_results['Words'] += len(words)
statsmode_results['Characters'] += len(''.join(words))
to_open = '\n'.join(s.parse_string.strip() for s in sents)
from corpkit.dictionaries.process_types import processes
from corpkit.other import as_regex
tregex_qs = {'Imperative': r'ROOT < (/(S|SBAR)/ < (VP !< VBD !< VBG !$ NP !$ SBAR < NP !$-- S !$-- VP !$ VP)) !<< (/\?/ !< __) !<<- /-R.B-/ !<<, /(?i)^(-l.b-|hi|hey|hello|oh|wow|thank|thankyou|thanks|welcome)$/',
'Open interrogative': r'ROOT < SBARQ <<- (/\?/ !< __)',
'Closed interrogative': r'ROOT ( < (SQ < (NP $+ VP)) << (/\?/ !< __) | < (/(S|SBAR)/ < (VP $+ NP)) <<- (/\?/ !< __))',
'Unmodalised declarative': r'ROOT < (S < (/(NP|SBAR|VP)/ $+ (VP !< MD)))',
'Modalised declarative': r'ROOT < (S < (/(NP|SBAR|VP)/ $+ (VP < MD)))',
'Open class': r'/^(NN|JJ|VB|RB)/ < __',
'Closed class': r'__ !< __ !> /^(NN|JJ|VB|RB)/',
'Clauses': r'/^S/ < __',
'Interrogative': r'ROOT << (/\?/ !< __)',
'Mental processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % \
as_regex(processes.mental, boundaries='w'),
'Verbal processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % \
as_regex(processes.verbal, boundaries='w'),
'Relational processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % \
as_regex(processes.relational, boundaries='w'),
'Verbless clause': r'/^S/ !<< /^VB.?/'}
for name, q in sorted(tregex_qs.items()):
res = tregex_engine(query=q,
options=['-o', '-C'],
corpus=to_open,
root=root
)
statsmode_results[name] += int(res)
if root:
root.update()
return statsmode_results, []
def make_conc_lines_from_whole_mid(wholes,
middle_column_result
):
"""
Create concordance line output from tregex output
"""
import re
import os
if not wholes and not middle_column_result:
return []
conc_lines = []
# remove duplicates from results
unique_wholes = []
unique_middle_column_result = []
duplicates = []
for (f, sk, whole), mid in zip(wholes, middle_column_result):
joined = '-join-'.join([f, sk, whole, mid])
if joined not in duplicates:
duplicates.append(joined)
unique_wholes.append([f, sk, whole])
unique_middle_column_result.append(mid)
# split into start, middle and end, dealing with multiple occurrences
for (f, sk, whole), mid in zip(unique_wholes, unique_middle_column_result):
reg = re.compile(r'([^a-zA-Z0-9-]|^)(' + re.escape(mid) + r')([^a-zA-Z0-9-]|$)', \
re.IGNORECASE | re.UNICODE)
offsets = [(m.start(), m.end()) for m in re.finditer(reg, whole)]
for offstart, offend in offsets:
start, middle, end = whole[0:offstart].strip(), whole[offstart:offend].strip(), \
whole[offend:].strip()
conc_lines.append([os.path.basename(f), sk, start, middle, end])
return conc_lines
def uniquify(conc_lines):
"""get unique concordance lines"""
from collections import OrderedDict
unique_lines = []
checking = []
for index, (_, speakr, start, middle, end) in enumerate(conc_lines):
joined = ' '.join([speakr, start, 'MIDDLEHERE:', middle, ':MIDDLEHERE', end])
if joined not in checking:
unique_lines.append(conc_lines[index])
checking.append(joined)
return unique_lines
def lemmatiser(list_of_words, tag):
"""
Take a list of unicode words and a tag and return a lemmatised list
"""
output = []
for word in list_of_words:
if translated_option.startswith('u'):
word = taglemma.get(word.lower(), 'Other')
else:
word = wordlist.get(word, lmtzr.lemmatize(word, tag))
if not preserve_case:
word = word.lower()
output.append(word)
return output
def tgrep_searcher(sents, search, show, conc, **kwargs):
"""
Use tgrep for constituency grammar search
"""
f = kwargs.get('filename')
from corpkit.process import show_tree_as_per_option, tgrep
out = []
conc_output = []
conc_out = []
for sent in sents:
sk = get_speakername(sent)
results = tgrep(sent, search['t'])
for res in results:
out.append(show_tree_as_per_option(show, res, sent))
if conc:
lin = [f, sk, show_tree_as_per_option(show + ['whole'], res, sent)]
conc_out.append(lin)
if conc:
conc_output = make_conc_lines_from_whole_mid(conc_out, out)
return out, conc_output
def gettag(query, lemmatag=False):
"""
Find tag for WordNet lemmatisation
"""
if lemmatag:
return lemmatag
tagdict = {'N': 'n',
'J': 'a',
'V': 'v',
'A': 'r',
'None': False,
'': False,
'Off': False}
# in case someone compiles the tregex query
try:
query = query.pattern
except AttributeError:
query = query
qr = query.replace(r'\w', '').replace(r'\s', '').replace(r'\b', '')
firstletter = next((c for c in qr if c.isalpha()), 'n')
return tagdict.get(firstletter.upper(), 'n')
def format_tregex(results, whole=False):
"""format tregex by show list"""
import re
if countmode:
return results
if not results:
return
done = []
if whole:
fnames, snames, results = zip(*results)
if 'l' in show or 'x' in show:
lemmata = lemmatiser(results, gettag(search.get('t'), lemmatag))
else:
lemmata = [None for i in results]
for word, lemma in zip(results, lemmata):
bits = []
if exclude and exclude.get('w'):
if len(list(exclude.keys())) == 1 or excludemode == 'any':
if re.search(exclude.get('w'), word):
continue
if len(list(exclude.keys())) == 1 or excludemode == 'any':
if re.search(exclude.get('l'), lemma):
continue
if len(list(exclude.keys())) == 1 or excludemode == 'any':
if re.search(exclude.get('p'), word):
continue
if len(list(exclude.keys())) == 1 or excludemode == 'any':
if re.search(exclude.get('x'), lemma):
continue
if exclude and excludemode == 'all':
num_to_cause_exclude = len(list(exclude.keys()))
current_num = 0
if exclude.get('w'):
if re.search(exclude.get('w'), word):
current_num += 1
if exclude.get('l'):
if re.search(exclude.get('l'), lemma):
current_num += 1
if exclude.get('p'):
if re.search(exclude.get('p'), word):
current_num += 1
if exclude.get('x'):
if re.search(exclude.get('x'), lemma):
current_num += 1
if current_num == num_to_cause_exclude:
continue
for i in show:
if i == 't':
bits.append(word)
if i == 'l':
bits.append(lemma)
elif i == 'w':
bits.append(word)
elif i == 'p':
bits.append(word)
elif i == 'x':
bits.append(lemma)
joined = '/'.join(bits)
done.append(joined)
if whole:
done = zip(fnames, snames, done)
return done
def tok_by_list(pattern, list_of_toks, concordancing=False, **kwargs):
"""search for regex in plaintext corpora"""
import re
if isinstance(pattern, STRINGTYPE):
pattern = [pattern]
if not case_sensitive:
pattern = [p.lower() for p in pattern]
if not concordancing:
if case_sensitive:
matches = [m for m in list_of_toks if m in pattern]
else:
matches = [m for m in list_of_toks if m.lower() in pattern]
else:
matches = []
for index, token in enumerate(list_of_toks):
if token in pattern:
if not split_contractions:
match = [' '.join(t for t in unsplitter(list_of_toks[:index]))[-140:]]
else:
match = [' '.join(t for t in list_of_toks[:index])[-140:]]
match.append(token)
if not split_contractions:
match.append(' '.join(t for t in unsplitter(list_of_toks[index + 1:]))[:140])
else:
match.append(' '.join(t for t in list_of_toks[index + 1:])[:140])
matches.append(match)
if countmode:
return len(matches)
else:
return matches
def tok_ngrams(pattern, list_of_toks, concordancing=False, split_contractions=True):
import re
result = []
list_of_toks = [x for x in list_of_toks if re.search(regex_nonword_filter, x)]
if not split_contractions:
list_of_toks = unsplitter(list_of_toks)
for i in range(len(list_of_toks)):
try:
the_gram = [list_of_toks[i+x] for x in range(gramsize)]
if any(re.search(pattern, x) for x in the_gram):
result.append(' '.join(the_gram))
except IndexError:
pass
if countmode:
return len(result)
else:
result = [i for i in result if result.count(i) > 1]
return result
def compiler(pattern):
"""compile regex or fail gracefully"""
if hasattr(pattern, 'pattern'):
return pattern
import re
try:
if case_sensitive:
comped = re.compile(pattern)
else:
comped = re.compile(pattern, re.IGNORECASE)
return comped
except:
import traceback
import sys
from time import localtime, strftime
exc_type, exc_value, exc_traceback = sys.exc_info()
lst = traceback.format_exception(exc_type, exc_value, exc_traceback)
error_message = lst[-1]
thetime = strftime("%H:%M:%S", localtime())
print('%s: Query %s' % (thetime, error_message))
if root:
return 'Bad query'
else:
raise ValueError('%s: Query %s' % (thetime, error_message))
def tok_by_reg(pattern, list_of_toks, concordancing = False, **kwargs):
"""search for regex in plaintext corpora"""
import re
comped = compiler(pattern)
if comped == 'Bad query':
return 'Bad query'
if not concordancing:
matches = [m for m in list_of_toks if re.search(comped, m)]
else:
matches = []
for index, token in enumerate(list_of_toks):
if re.search(comped, token):
if not split_contractions:
match = [' '.join(t for t in unsplitter(list_of_toks[:index]))[-140:]]
else:
match = [' '.join(t for t in list_of_toks[:index])[-140:]]
match.append(re.search(comped, token).group(0))
if not split_contractions:
match.append(' '.join(t for t in unsplitter(list_of_toks[index + 1:]))[:140])
else:
match.append(' '.join(t for t in list_of_toks[index + 1:])[:140])
matches.append(match)
if countmode:
return len(matches)
else:
return matches
def determine_search_func(show):
"""Figure out what search function we're using"""
simple_tregex_mode = False
statsmode = False
tree_to_text = False
if search.get('t') and not just_speakers and not kwargs.get('tgrep'):
if have_java:
simple_tregex_mode = True
searcher = None
else:
searcher = tgrep_searcher
optiontext = 'Searching parse trees'
else:
if datatype == 'plaintext':
if any(i.endswith('n') for i in search.keys()):
optiontext = 'n-grams via plaintext'
raise NotImplementedError('Use a tokenised or parsed corpus for n-gramming.')
#searcher = plaintext_ngram
elif any(i.endswith('w') for i in search.keys()):
if kwargs.get('regex', True):
searcher = plaintext_regex_search
else:
searcher = plaintext_simple_search
optiontext = 'Searching plaintext'
else:
raise ValueError("Plaintext search must be 'w' or 'n'.")
elif datatype == 'tokens':
if any(i.endswith('n') for i in search.keys()):
searcher = tok_ngrams
optiontext = 'n-grams via tokens'
elif any(i.endswith('w') for i in search.keys()):
if kwargs.get('regex', True):
searcher = tok_by_reg
else:
searcher = tok_by_list
if isinstance(search.get('w'), (list, Wordlist)):
searcher = tok_by_list
optiontext = 'Searching tokens'
only_parse = ['r', 'd', 'g', 'dl', 'gl', 'df', 'gf',
'dp', 'gp', 'f', 'd2', 'd2f', 'd2p', 'd2l']
if datatype != 'parse' and any(i in only_parse for i in list(search.keys())):
form = ', '.join(i for i in list(search.keys()) if i in only_parse)
raise ValueError('Need parsed corpus to search with "%s" option(s).' % form)
elif datatype == 'parse':
if any(i.endswith('n') for i in search.keys()):
search['w'] = search.pop('n')
if not show_ngram:
show = ['n']
if any(i.endswith('t') for i in search.keys()):
if have_java and not kwargs.get('tgrep'):
searcher = slow_tregex
else:
searcher = tgrep_searcher
optiontext = 'Searching parse trees'
elif any(i.endswith('s') for i in search.keys()):
searcher = get_stats
statsmode = True
optiontext = 'General statistics'
elif any(i.endswith('r') for i in search.keys()):
from corpkit.depsearch import dep_searcher
searcher = dep_searcher
optiontext = 'Distance from root'
else:
from corpkit.depsearch import dep_searcher
searcher = dep_searcher
optiontext = 'Dependency querying'
# ngram mode for parsed data
if show_ngram:
optiontext = 'N-grams from parsed data'
searcher = dep_searcher
return searcher, optiontext, simple_tregex_mode, statsmode, tree_to_text
def get_tregex_values():
"""If using Tregex, set appropriate values
- Check for valid query
- Make 'any' query
- Make list query
"""
translated_option = 't'
if isinstance(search['t'], Wordlist):
search['t'] = list(search['t'])
q = tregex_engine(corpus=False,
query=search.get('t'),
options=['-t'],
check_query=True,
root=root,
preserve_case=preserve_case
)
if q is False:
if root:
return 'Bad query', None
else:
return 'Bad query', None
if isinstance(search['t'], list):
regex = as_regex(search['t'], boundaries='line', case_sensitive=case_sensitive)
else:
regex = ''
# listquery, anyquery, translated_option
treg_dict = {'p': [r'__ < (/%s/ !< __)' % regex, r'__ < (/.?[A-Za-z0-9].?/ !< __)', 'u'],
'pl': [r'__ < (/%s/ !< __)' % regex, r'__ < (/.?[A-Za-z0-9].?/ !< __)', 'u'],
'x': [r'__ < (/%s/ !< __)' % regex, r'__ < (/.?[A-Za-z0-9].?/ !< __)', 'u'],
't': [r'__ < (/%s/ !< __)' % regex, r'__ < (/.?[A-Za-z0-9].?/ !< __)', 'o'],
'w': [r'/%s/ !< __' % regex, r'/.?[A-Za-z0-9].?/ !< __', 't'],
'c': [r'/%s/ !< __' % regex, r'/.?[A-Za-z0-9].?/ !< __', 'C'],
'l': [r'/%s/ !< __' % regex, r'/.?[A-Za-z0-9].?/ !< __', 't']
}
listq, anyq, translated_option = treg_dict.get(show[0].lower())
if isinstance(search['t'], list):
search['t'] = listq
elif search['t'] == 'any':
search['t'] = anyq
return search['t'], translated_option
def plaintext_regex_search(pattern, plaintext_data, concordancing=False, **kwargs):
"""search for regex in plaintext corpora
it searches over lines, so the user needs to be careful.
"""
import re
if concordancing:
pattern = r'(.{,140})\b(' + pattern + r')\b(.{,140})'
compiled_pattern = compiler(pattern)
if compiled_pattern == 'Bad query':
return 'Bad query'
matches = re.findall(compiled_pattern, plaintext_data)
if concordancing:
matches = [list(m) for m in matches]
if not concordancing:
for index, i in enumerate(matches):
if isinstance(i, tuple):
matches[index] = i[0]
if countmode:
return len(matches)
else:
return matches
def correct_spelling(a_string):
"""correct spelling within a string"""
if not spelling:
return a_string
from corpkit.dictionaries.word_transforms import usa_convert
if spelling.lower() == 'uk':
usa_convert = {v: k for k, v in list(usa_convert.items())}
bits = a_string.split('/')
for index, i in enumerate(bits):
converted = usa_convert.get(i.lower(), i)
if i.islower() or preserve_case is False:
converted = converted.lower()
elif i.isupper() and preserve_case:
converted = converted.upper()
elif i.istitle() and preserve_case:
converted = converted.title()
bits[index] = converted
r = '/'.join(bits)
return r
def plaintext_simple_search(pattern, plaintext_data, concordancing=False, **kwargs):
"""search for tokens in plaintext corpora"""
import re
result = []
if isinstance(pattern, STRINGTYPE):
pattern = [pattern]
for p in pattern:
if concordancing:
pat = r'(.{0,140})\b(' + re.escape(p) + r')\b(.{0,140})'
pat = compiler(pat)
if pat == 'Bad query':
return 'Bad query'
matches = re.findall(pat, plaintext_data)
if concordancing:
matches = [list(m) for m in matches]
for i in matches:
result.append(i)
else:
for m in range(len(matches)):
result.append(p)
return result
def make_search_iterable(corpus):
"""determine how to structure the corpus for interrogation"""
# skip file definitions if they are not needed
if simple_tregex_mode:
if corpus.level in ['s', 'f']:
return {(corpus.name, corpus.path): False}
else:
return {(os.path.basename(i), os.path.join(corpus.path, i)): False
for i in os.listdir(corpus.path)
if os.path.isdir(os.path.join(corpus.path, i))}
if isinstance(corpus, Datalist):
to_iterate_over = {}
# it could be files or subcorpus objects
if corpus[0].level == 's':
if files_as_subcorpora:
for subc in corpus:
for f in subc.files:
to_iterate_over[(f.name, f.path)] = [f]
else:
for subc in corpus:
to_iterate_over[(subc.name, subc.path)] = subc.files
elif corpus[0].level == 'f':
for f in corpus:
to_iterate_over[(f.name, f.path)] = [f]
elif corpus.singlefile:
to_iterate_over = {(corpus.name, corpus.path): [corpus]}
elif not hasattr(corpus, 'subcorpora') or not corpus.subcorpora:
# just files in a directory
if files_as_subcorpora:
to_iterate_over = {}
for f in corpus.files:
to_iterate_over[(f.name, f.path)] = [f]
else:
to_iterate_over = {(corpus.name, corpus.path): corpus.files}
else:
to_iterate_over = {}
if files_as_subcorpora:
# don't know if possible: has subcorpora but also .files
if hasattr(corpus, 'files') and corpus.files is not None:
for f in corpus.files:
to_iterate_over[(f.name, f.path)] = [f]
# has subcorpora with files in those
elif hasattr(corpus, 'files') and corpus.files is None:
for subc in corpus.subcorpora:
for f in subc.files:
to_iterate_over[(f.name, f.path)] = [f]
else:
if corpus[0].level == 's':
for subcorpus in corpus:
to_iterate_over[(subcorpus.name, subcorpus.path)] = subcorpus.files
elif corpus[0].level == 'f':
for f in corpus:
to_iterate_over[(f.name, f.path)] = [f]
else:
for subcorpus in corpus.subcorpora:
to_iterate_over[(subcorpus.name, subcorpus.path)] = subcorpus.files
return to_iterate_over
def welcome_printer(return_it=False):
"""Print welcome message"""
if no_conc:
message = 'Interrogating'
else:
message = 'Interrogating and concordancing'
if kwargs.get('printstatus', True):
thetime = strftime("%H:%M:%S", localtime())
from corpkit.constants import transshow, transobjs
sformat = '\n'
for k, v in search.items():
if k == 't':
dratt = ''
else:
dratt = transshow.get(k[-1], k[-1])
drole = transobjs.get(k[0], k[0])
if k == 't':
drole = 'Trees'
vform = getattr(v, 'pattern', v)
sformat += ' %s %s: %s\n' % (drole, dratt.lower(), vform)
if search.get('s'):
sformat = 'Features'
welcome = ('\n%s: %s %s ...\n %s\n ' \
'Query: %s\n %s corpus ... \n' % \
(thetime, message, cname, optiontext, sformat, message))
if return_it:
return welcome
else:
print(welcome)
def goodbye_printer(return_it=False, only_conc=False):
"""Say goodbye before exiting"""
if not kwargs.get('printstatus', True):
return
thetime = strftime("%H:%M:%S", localtime())
if only_conc:
show_me = (thetime, len(conc_df))
finalstring = '\n\n%s: Concordancing finished! %d results.' % show_me
else:
finalstring = '\n\n%s: Interrogation finished!' % thetime
if countmode:
finalstring += ' %d matches.' % tot
else:
dat = (numentries, total_total)
finalstring += ' %d unique results, %d total occurrences.' % dat
if return_it:
return finalstring
else:
print(finalstring)
def make_conc_obj_from_conclines(conc_results):
"""
Turn conclines into DataFrame
"""
from corpkit.interrogation import Concordance
all_conc_lines = []
for sc_name, resu in sorted(conc_results.items()):
if only_unique:
unique_results = uniquify(resu)
else:
unique_results = resu
#make into series
if PYTHON_VERSION == 2:
pindex = 'c f s l m r'.encode('utf-8').split()
else:
pindex = 'c f s l m r'.split()
for fname, spkr, start, word, end in unique_results:
#spkr = str(spkr, errors = 'ignore')
fname = os.path.basename(fname)
ser = [sc_name, fname, spkr, start, word, end]
all_conc_lines.append(Series(ser, index=pindex))
if random:
from random import shuffle
shuffle(all_conc_lines)
try:
conc_df = pd.concat(all_conc_lines, axis=1).T
if all(x == '' for x in list(conc_df['s'].values)):
conc_df.drop('s', axis=1, inplace=True)
if show_ngram or show_collocates:
if not language_model:
counted = Counter(conc_df['m'])
indices = [l for l in list(conc_df.index) if counted[conc_df.ix[l]['m']] > 1]
conc_df = conc_df.ix[indices]
conc_df = conc_df.reset_index(drop=True)
locs['corpus'] = corpus.name
conc_df = Concordance(conc_df)
try:
conc_df.query = locs
except AttributeError:
pass
return conc_df
except ValueError:
return
def make_progress_bar():
"""generate a progress bar"""
if simple_tregex_mode:
total_files = len(list(to_iterate_over.keys()))
else:
total_files = sum(len(x) for x in list(to_iterate_over.values()))
par_args = {'printstatus': kwargs.get('printstatus', True),
'root': root,
'note': note,
'length': total_files,
'startnum': kwargs.get('startnum'),
'denom': kwargs.get('denominator', 1)}
term = None
if kwargs.get('paralleling', None) is not None:
from blessings import Terminal
term = Terminal()
par_args['terminal'] = term
par_args['linenum'] = kwargs.get('paralleling')
if in_notebook:
par_args['welcome_message'] = welcome_message
outn = kwargs.get('outname', '')
if outn:
outn = outn + ': '
tstr = '%s%d/%d' % (outn, current_iter, total_files)
p = animator(None, None, init=True, tot_string=tstr, **par_args)
tstr = '%s%d/%d' % (outn, current_iter + 1, total_files)
animator(p, current_iter, tstr, **par_args)
return p, outn, total_files, par_args
# find out if using gui
root = kwargs.get('root')
note = kwargs.get('note')
language_model = kwargs.get('language_model')
# set up pause method
original_sigint = signal.getsignal(signal.SIGINT)
if kwargs.get('paralleling', None) is None:
original_sigint = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, signal_handler)
# find out about concordancing
only_conc = False
no_conc = False
if conc is False:
no_conc = True
if isinstance(conc, str) and conc.lower() == 'only':
only_conc = True
no_conc = False
numconc = 0
# wipe non essential class attributes to not bloat query attrib
if isinstance(corpus, Corpus):
import copy
corpus = copy.copy(corpus)
for k, v in corpus.__dict__.items():
if isinstance(v, (Interrogation, Interrodict)):
corpus.__dict__.pop(k, None)
# convert path to corpus object
if not isinstance(corpus, (Corpus, Corpora, Subcorpus, File, Datalist)):
if not multiprocess and not kwargs.get('outname'):
corpus = Corpus(corpus, print_info=False)
# figure out how the user has entered the query and show, and normalise
from corpkit.process import searchfixer
search = searchfixer(search, query)
show = fix_show(show)
show_ngram = any(x.startswith('n') for x in show)
show_collocates = any(x.startswith('b') for x in show)
# instantiate lemmatiser if need be
if 'l' in show and isinstance(search, dict) and search.get('t'):
from nltk.stem.wordnet import WordNetLemmatizer
lmtzr = WordNetLemmatizer()
# do multiprocessing if need be
im, corpus, search, query, just_speakers = is_multiquery(corpus, search, query, just_speakers)
# figure out if we can multiprocess the corpus
if hasattr(corpus, '__iter__') and im:
corpus = Corpus(corpus)
if hasattr(corpus, '__iter__') and not im:
im = True
if isinstance(corpus, Corpora):
im = True
# split corpus if the user wants multiprocessing but no other iterable
if not im and multiprocess:
im = True
corpus = corpus[:]
search = fix_search(search)
exclude = fix_search(exclude)
# if it's already been through pmultiquery, don't do it again
locs['search'] = search
locs['query'] = query
locs['just_speakers'] = just_speakers
locs['corpus'] = corpus
locs['multiprocess'] = multiprocess
locs['print_info'] = kwargs.get('printstatus', True)
# send to multiprocess function
if im:
signal.signal(signal.SIGINT, original_sigint)
from corpkit.multiprocess import pmultiquery
return pmultiquery(**locs)
# get corpus metadata
cname = corpus.name
if isinstance(save, STRINGTYPE):
savename = corpus.name + '-' + save
if save is True:
raise ValueError('save must be str, not bool.')
datatype = getattr(corpus, 'datatype', 'parse')
singlefile = getattr(corpus, 'singlefile', False)
level = getattr(corpus, 'level', 'c')
# store all results in here
results = {}
count_results = {}
conc_results = {}
# check if just counting, turn off conc if so
countmode = 'c' in show
if countmode:
no_conc = True
only_conc = False
# where we are at in interrogation
current_iter = 0
# multiprocessing progress bar
denom = kwargs.get('denominator', 1)
startnum = kwargs.get('startnum', 0)
# Determine the search function to be used #
searcher, optiontext, simple_tregex_mode, statsmode, tree_to_text = determine_search_func(show)
# no conc for statsmode
if statsmode:
no_conc = True
only_conc = False
conc = False
# Set some Tregex-related values
if search.get('t'):
if show_ngram:
raise ValueError("Can't search trees for n-grams---use a dependency search.")
query, translated_option = get_tregex_values()
if query == 'Bad query' and translated_option is None:
if root:
return 'Bad query'
else:
return
# more tregex options
if tree_to_text:
treg_q = r'ROOT << __'
op = ['-o', '-t', '-w']
elif simple_tregex_mode:
treg_q = search['t']
op = ['-o', '-' + translated_option]
# make iterable object for corpus interrogation
to_iterate_over = make_search_iterable(corpus)
from traitlets import TraitError
try:
from ipywidgets import IntProgress
_ = IntProgress(min=0, max=10, value=1)
in_notebook = True
except TraitError:
in_notebook = False
except ImportError:
in_notebook = False
# print welcome message
welcome_message = welcome_printer(return_it=in_notebook)
# create a progress bar
p, outn, total_files, par_args = make_progress_bar()
# Iterate over data, doing interrogations
for (subcorpus_name, subcorpus_path), files in sorted(to_iterate_over.items()):
# results for subcorpus go here
conc_results[subcorpus_name] = []
count_results[subcorpus_name] = []
results[subcorpus_name] = Counter()
# get either everything (tree_to_text) or the search['t'] query
if tree_to_text or simple_tregex_mode:
result = tregex_engine(query=treg_q,
options=op,
corpus=subcorpus_path,
root=root,
preserve_case=preserve_case
)
# format search results with slashes etc
if not countmode and not tree_to_text:
result = format_tregex(result)
# if concordancing, do the query again with 'whole' sent and fname
if not no_conc:
ops = ['-w', '-f'] + op
whole_result = tregex_engine(query=search['t'],
options=ops,
corpus=subcorpus_path,
root=root,
preserve_case=preserve_case
)
for line in whole_result:
line.insert(1, '')
# format match too depending on option
if not only_format_match:
whole_result = format_tregex(whole_result, whole=True)
# make conc lines from conc results
conc_result = make_conc_lines_from_whole_mid(whole_result, result)
for lin in conc_result:
if numconc < maxconc or not maxconc:
conc_results[subcorpus_name].append(lin)
numconc += 1
# add matches to ongoing counts
if countmode:
count_results[subcorpus_name] += [result]
else:
result = Counter(result)
results[subcorpus_name] += result
# update progress bar
current_iter += 1
tstr = '%s%d/%d' % (outn, current_iter + 1, total_files)
animator(p, current_iter, tstr, **par_args)
# dependencies, plaintext, tokens, slow_tregex and tree_to_text
if not simple_tregex_mode:
for f in files:
slow_treg_speaker_guess = kwargs.get('outname', False)
if datatype == 'parse' and not tree_to_text:
# right now, this is not using the File class's read() or document
# methods. the reason is that there seem to be memory leaks. these
# may have been fixed already though.
try:
from corenlp_xml import Document
except ImportError:
from corenlp_xml.document import Document
with codecs.open(f.path, 'rb') as fo:
data = fo.read()
corenlp_xml = Document(data)
#corenlp_xml = f.document
if just_speakers:
import re
if isinstance(just_speakers, re._pattern_type):
sents = [s for s in corenlp_xml.sentences if \
re.search(just_speakers, get_speakername(s))]
else:
sents = [s for s in corenlp_xml.sentences if get_speakername(s) in just_speakers]
if len(just_speakers) == 1:
slow_treg_speaker_guess = just_speakers[0]
else:
sents = corenlp_xml.sentences
# get coreferences
if kwargs.get('coref') or any(x.startswith('h') for x in show):
if just_speakers:
corefs = [i for i in corenlp_xml.coreferences if any(x == i.sentence for x in sents)]
else:
corefs = corenlp_xml.coreferences
else:
corefs = []
corenlp_xml = None
res, conc_res = searcher(sents, search=search, show=show,
dep_type=dep_type,
exclude=exclude,
excludemode=excludemode,
searchmode=searchmode,
case_sensitive=case_sensitive,
conc=conc,
only_format_match=only_format_match,
speaker=slow_treg_speaker_guess,
gramsize=gramsize,
no_punct=no_punct,
no_closed=no_closed,
whitelist=whitelist,
split_contractions=split_contractions,
window=window,
filename=f.name,
language_model=language_model,
corefs=corefs,
is_a_word=is_a_word,
**kwargs
)
if res == 'Bad query':
return 'Bad query'
if datatype == 'tokens':
import pickle
with codecs.open(f.path, "rb") as fo:
data = pickle.load(fo)
elif datatype == 'plaintext' or tree_to_text:
if tree_to_text:
data = '\n'.join(result)
if not split_contractions:
data = unsplitter(data)
else:
with codecs.open(f.path, 'rb', encoding='utf-8') as data:
data = data.read()
if datatype == 'tokens' or datatype == 'plaintext':
query = list(search.values())[0]
if not only_conc:
res = searcher(query,
data,
split_contractions=split_contractions,
concordancing=False
)
if res == 'Bad query':
if root:
return 'Bad query'
if not no_conc:
conc_res = searcher(query,
data,
split_contractions=split_contractions,
concordancing=True
)
if conc_res == 'Bad query':
if root:
return 'Bad query'
for line in conc_res:
line.insert(0, '')
if countmode:
count_results[subcorpus_name] += [res]
else:
# add filename and do lowercasing for conc
if not no_conc:
for line in conc_res:
if searcher != slow_tregex and searcher != tgrep_searcher:
line.insert(0, f.name)
else:
line[0] = f.name
if not preserve_case:
line[3:] = [x.lower() for x in line[3:]]
if spelling:
line = [correct_spelling(b) for b in line]
if numconc < maxconc or not maxconc:
conc_results[subcorpus_name].append(line)
numconc += 1
# do lowercasing and spelling
if not only_conc:
if not preserve_case:
if not statsmode:
res = [i.lower() for i in res]
if spelling:
if not statsmode:
res = [correct_spelling(r) for r in res]
#if not statsmode:
results[subcorpus_name] += Counter(res)
#else:
#results[subcorpus_name] += res
# update progress bar
current_iter += 1
tstr = '%s%d/%d' % (outn, current_iter + 1, total_files)
animator(p, current_iter, tstr, **par_args)
# Get concordances into DataFrame, return if just conc
if not no_conc:
# fail on this line with typeerror if no results?
conc_df = make_conc_obj_from_conclines(conc_results)
if only_conc:
locs = sanitise_dict(locs)
try:
conc_df.query = locs
except AttributeError:
return conc_df
if save and not kwargs.get('outname'):
print('\n')
conc_df.save(savename)
goodbye_printer(only_conc=True)
signal.signal(signal.SIGINT, original_sigint)
return conc_df
else:
conc_df = None
# Get interrogation into DataFrame
if countmode:
df = Series({k: sum(v) for k, v in sorted(count_results.items())})
tot = df.sum()
else:
the_big_dict = {}
unique_results = set(item for sublist in list(results.values()) for item in sublist)
sortres = sorted(results.items(), key=lambda x: x[0])
for word in unique_results:
the_big_dict[word] = [subcorp_result[word] for _, subcorp_result in sortres]
# turn master dict into dataframe, sorted
df = DataFrame(the_big_dict, index=sorted(results.keys()))
# for ngrams, remove hapaxes
if show_ngram or show_collocates:
if not language_model:
df = df[[i for i in list(df.columns) if df[i].sum() > 1]]
numentries = len(df.columns)
tot = df.sum(axis=1)
total_total = df.sum().sum()
# turn df into series if all conditions met
if not countmode:
if level == 's' or singlefile:
if not files_as_subcorpora:
if not kwargs.get('df1_always_df'):
df = Series(df.ix[0])
df.sort_values(ascending=False, inplace=True)
tot = df.sum()
numentries = len(df.index)
total_total = tot
# turn data into DF for GUI if need be
if isinstance(df, Series) and kwargs.get('df1_always_df'):
total_total = df.sum()
df = DataFrame(df)
tot = Series(total_total, index=['Total'])
# if we're doing files as subcorpora, we can remove the .txt.xml etc
if isinstance(df, DataFrame) and files_as_subcorpora:
cname = corpus.name.replace('-stripped', '').replace('-parsed', '')
edits = [(r'(-[0-9][0-9][0-9])?\.txt\.xml', ''),
(r'-%s(-stripped)?(-parsed)?' % cname, '')]
from corpkit.editor import editor
df = editor(df, replace_subcorpus_names=edits).results
tot = df.sum(axis=1)
total_total = df.sum().sum()
# sort by total
if isinstance(df, DataFrame):
if not df.empty:
df = df[list(df.sum().sort_values(ascending=False).index)]
# make interrogation object
locs['corpus'] = corpus.path
locs = sanitise_dict(locs)
interro = Interrogation(results=df, totals=tot, query=locs, concordance=conc_df)
# save it
if save and not kwargs.get('outname'):
print('\n')
interro.save(savename)
goodbye = goodbye_printer(return_it=in_notebook)
if in_notebook:
try:
p.children[2].value = goodbye.replace('\n', '')
except AttributeError:
pass
signal.signal(signal.SIGINT, original_sigint)
return interro
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 16 14:42:34 2019
Lsplane.py Least square regression plane
INPUT
X array [X Y Z] where x = vector of x coordinates, y = y coords, z = z coords
output
x0 =centroid of the data = point on the best fit plane
dim 3x1
a = direction cosines of the normal to the best-fit plane
dim 3x1
optional, removed for this code
d = residuals
m x 1
normd norm of the residual errors
dim 1x1
@author: matthewmorriss
"""
def lsplane(mat):
import numpy as np
import sys
import scipy
from scipy import linalg
#check for the number of data points
[_,m] = np.shape(mat)
if m <3:
print('Error' )
sys.exit()
#calculate centroid
x0 = np.transpose(np.mean(mat, axis = 0))
#form matrix A of translated points
#Important note about this product, there are decimals and fractions due to the issues surrounding indexing in python... not sure if it'll be an issue or not.
A = np.column_stack([(mat[:,0]-x0[0]),\
(mat[:,1]-x0[1]),\
(mat[:,2]-x0[2])])
#calculate the SVD of A
_,S,Vh = scipy.linalg.svd(A, overwrite_a = True, check_finite = False)
V = Vh.T.conj()
S = np.diag(S)
#find the smallest singular value in S and extract from V the corresponding right singular vector
[_, i] = [min(np.diag(S)),np.argmin(np.diag(S))]
a = V[:,i]
return(x0, a,) |
import itertools
import pandas as pd
class FPNode(object):
def __init__(self, name, value, children, parent):
self.name = name
self.value = value
self.children = children
self.parent = parent
self.next = None
def __repr__(self):
return "{}: {}".format(self.name, self.value)
def has_child(self, name):
for child in self.children:
if child.name == name:
return True
return False
def get_child(self, name):
for child in self.children:
if child.name == name:
return child
return None
def add_child(self, node):
self.children.append(node)
node.parent = self
def link_next(self, node):
nxt = self
while nxt.next is not None:
nxt = nxt.next
nxt.next = node
class FPTree(object):
def __init__(self, mini_support):
self.header_table = []
self.rootNode = FPNode(name='root', value=1, children=[], parent=None)
self.items_dic = {}
self.mini_support = mini_support
def build_tree(self, input_data):
self.__build_items_dic(input_data)
self.__build_header_table()
tree_data = self.filter_raw_input(input_data)
# print(tree_data)
# tree_data = [['f', 'a', 'b'], ['f', 'a', 'c'], ['a', 'b'], ['c']]
self.create_tree_with_data(tree_data)
# print(self.rootNode)
'''
private method
build a header_table
'''
def __build_items_dic(self, input_data):
for line in input_data:
for word in line:
if self.items_dic.get(word) is None:
self.items_dic[word] = 0
self.items_dic[word] += 1
# print(self.items_dic)
'''
private build header_table with items
'''
def __build_header_table(self):
self.header_table = [FPNode(name=key, value=val, children=[], parent=None)
for key,val in self.items_dic.items()
if val >= self.mini_support
]
self.header_table.sort(key=lambda x: x.value, reverse=True)
# print(self.header_table)
def filter_raw_input(self, input_data):
# print(self.items_dic)
tree_data = [sorted([word for word in line if self.items_dic[word] >= self.mini_support]
, key=lambda x:self.items_dic[x], reverse=True)
for line in input_data
]
return tree_data
def create_tree_with_data(self, tree_data):
for line in tree_data:
self.insert_tree(line)
def insert_tree(self, insert_nodes):
cur_node = self.rootNode
i_of_insert_nodes = 0
len_of_insert_nodes = len(insert_nodes)
while i_of_insert_nodes < len_of_insert_nodes and \
cur_node.has_child(insert_nodes[i_of_insert_nodes]):
cur_node = cur_node.get_child(insert_nodes[i_of_insert_nodes])
cur_node.value += 1
i_of_insert_nodes += 1
for i in range(i_of_insert_nodes, len_of_insert_nodes):
new_node_name = insert_nodes[i]
new_node = FPNode(name=new_node_name, value=1, children=[], parent=cur_node)
cur_node.add_child(new_node)
self.update_header_table(new_node)
cur_node = new_node
def update_header_table(self, node):
for nd in self.header_table:
if nd.name == node.name:
nd.link_next(node)
def generate_patterns(self, path, item):
patterns = {}
keys = path.keys()
for i in range(1, len(path)+1):
p_list = list(itertools.combinations(keys, i))
# print('list', list(p_list))
ptn_lst = [sorted(list(x)+[item.name]) for x in p_list]
# print(ptn_lst)
# set((list(x)).append(item.name))
for x in ptn_lst:
patterns[tuple(x)] = min([path[key] for key in x if key != item.name])
patterns[tuple([item.name])] = item.value
return patterns
def find_prefix_path(self, item):
single_ptn = {}
item_val = item.value
item = item.parent
while item.name != 'root':
single_ptn[item.name] = item_val
item = item.parent
return single_ptn
def mine_patterns(self):
patterns = {}
for item in self.header_table:
cur = item.next
single_path = {}
while cur is not None:
temp_dic = self.find_prefix_path(cur)
single_path = merge_two_dic(single_path, temp_dic)
cur = cur.next
single_path = {k: v for k, v in single_path.items() if v >= self.mini_support}
single_pattern = self.generate_patterns(single_path, item)
patterns = merge_two_dic(patterns, single_pattern)
return patterns
def merge_two_dic(dic1, dic2):
dic1 = dict(dic1)
merged_dic = dic1.copy()
merged_dic.update(dic2)
for key in merged_dic.keys():
merged_dic[key] = dic1.get(key, 0) + dic2.get(key, 0)
return merged_dic
'''
modify from https://github.com/waxnkw/fp-growth/blob/master/pyfpgrowth/pyfpgrowth.py
'''
def generate_association_rules(patterns, confidence_threshold):
rules = []
for itemset in patterns.keys():
upper_support = patterns[itemset]
for i in range(1, len(itemset)):
for antecedent in itertools.combinations(itemset, i):
antecedent = tuple(sorted(antecedent))
consequent = tuple(sorted(set(itemset) - set(antecedent)))
if antecedent in patterns:
lower_support = patterns[antecedent]
confidence = float(upper_support) / lower_support
if confidence >= confidence_threshold \
and (antecedent.__contains__('republican0') or antecedent.__contains__('democrat0')):
rules.append([list(antecedent), list(consequent)])
return rules
if __name__ == '__main__':
df = pd.read_csv('./A.csv')
tree = FPTree(150)
tree.build_tree(df.values)
p = tree.mine_patterns()
print(len(p))
x = generate_association_rules(p, 0.9)
print(len(x))
|
import tensorflow as tf
from tensorflow.keras import Model
from constants import anchor_size, feature_size
class RoiPooling(Model):
def call(self, features_map, boxes):
output = []
for box in boxes:
x1 = box[0]
y1 = box[1]
x2 = box[2]
y2 = box[3]
specific_features = tf.slice(features_map, [0, y1, x1, 0], [-1, y2-y1, x2-x1, -1])
specific_features = tf.image.resize(specific_features, (anchor_size, anchor_size))
output.append(specific_features)
return output
|
from datetime import timedelta
a = timedelta(days=2, hours=6)
b = timedelta(hours=4.5)
c = a + b
print c.days
print c.seconds
print c.seconds / 3600
print c.total_seconds() / 3600
from datetime import datetime
a = datetime(2012, 9, 23)
print a + timedelta(days=10)
b = datetime(2012, 12, 21)
d = b - a
print d.days
now = datetime.today()
print now
print now + timedelta(minutes=10)
a = datetime(2012, 3, 1)
b = datetime(2012, 2, 28)
print a - b
print (a - b).days
c = datetime(2013, 3, 1)
d = datetime(2013, 2, 28)
print (c - d).days |
def testData():
otest = open('test.txt', 'r')
test = otest.readlines()
oanswer = open('answer.txt', 'r')
answer = oanswer.readline()
status = False
print("Runs test data")
result = runCode(test)
if result == int(answer): #not always int
status = True
print("Correct answer: " + answer + "My answer: " + str(result))
return status
def runCode(data):
print("Runs code")
foodlist = []
all_allergens = []
all_ings = []
all_recipes = []
definite = {}
#a list of numbers
for line in data:
print(line)
recipe = []
foods, allergens = line.strip().split(" (")
ings = foods.split(" ")
allergens = allergens.replace("contains ", '')
allergens = allergens.replace(")", '')
allergenlist = allergens.split(", ")
recipe = [ings, allergenlist]
all_recipes.append(recipe)
for a in allergenlist:
if a not in all_allergens:
all_allergens.append(a)
for i in ings:
if i not in all_ings:
all_ings.append(i)
print("Allergens and ingredients:")
print(all_allergens)
print(all_ings)
while len(definite) < len(all_allergens):
print("Entering while loop with:")
for alg in all_allergens:
print(alg)
existing_recipes_with_single_allergens = []
existing_recipes_with_multiple_allergens = []
for rec in all_recipes:
if alg in rec[1] and len(rec[1]) == 1:
existing_recipes_with_single_allergens.append(rec)
elif alg in rec[1] and len(rec[1]) > 1:
existing_recipes_with_multiple_allergens.append(rec)
#print(existing_recipes_with_multiple_allergens)
#print(existing_recipes_with_single_allergens)
if len(existing_recipes_with_single_allergens) > 1:
print("Several recipes with single allergens")
#concatenate all lists into one. Find the word that exists exactly the same number as the number of lists.
togetherlist = []
for rec in existing_recipes_with_single_allergens:
togetherlist += rec[0]
for word in existing_recipes_with_single_allergens[0][0]: #loop over any of the recipes:
if togetherlist.count(word) == len(existing_recipes_with_single_allergens):
definite[alg] = word #add it as definite for this allergen
for r in all_recipes:
if word in r[0]:
r[0].remove(word)
elif len(existing_recipes_with_single_allergens) == 1:
print("Exactly one recipe with single allergens")
if len(existing_recipes_with_single_allergens[0][0]) == 1: #if there is only one ingredient
print("only one ingredient left")
last_ing = existing_recipes_with_single_allergens[0][0][0]
definite[alg] = last_ing #add it as definite for this allergen
for r in all_recipes:
if last_ing in r[0]:
r[0].remove(last_ing)
else:
print("Several ingredients left")
for food in existing_recipes_with_single_allergens[0]: #loop over words in recipe ingredients
for recipe in existing_recipes_with_multiple_allergens:
counter, found_ing = match(food, existing_recipes_with_multiple_allergens[0][0])
#print(counter, found_ing)
if counter == 1:
definite[alg] = found_ing
for r in all_recipes:
if found_ing in r[0]:
r[0].remove(found_ing)
print(definite)
#print(all_recipes)
occurrences = 0
for left in all_recipes:
occurrences += len(left[0])
print(definite)
'''
Goal: Find the ingredient that is the only one for a specific allergen to be found in another row where the same allergen is one of several. Then we can eliminate this ingredient to belong to this allergen. If theere are rows where it's only a single ingredient, it's the one.
Save this find in a dict with the allergen as key. Remove this ingredient from all rows, even the own one.
In the end, collect the ingredients that are still on the rows and count how many times they appear and sum it.
'''
print(occurrences)
return occurrences
def match(recipe1, recipe2):
#Recipe is a list of ingredients
counter = 0
found = ''
#print(recipe1)
for ing in recipe1:
#print(ing)
if ing in recipe2:
found = ing
counter+=1
return counter, found
#Runs testdata
testResult = testData()
if testResult == True:
print("Test data parsed. Tries to run puzzle.")
opuzzle = open('input.txt', 'r')
puzzle = opuzzle.readlines()
finalResult = runCode(puzzle)
print(finalResult)
else:
print("Test data failed. Code is not correct. Try again.")
|
# Generated by Django 2.2.13 on 2020-07-09 17:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0027_auto_20200709_2246'),
]
operations = [
migrations.DeleteModel(
name='About1',
),
migrations.DeleteModel(
name='Contact1',
),
]
|
# -*- coding: utf-8 -*-
"""
**************************************************************************
* IMAGE PROCESSING (e-Yantra 2016)
* ================================
* This software is intended to teach image processing concepts
*
* MODULE: Task1C
* Filename: task1-main.py
* Version: 1.5.0
* Date: November 10, 2016
*
* Author: Jayant Solanki, e-Yantra Project, Department of Computer Science
* and Engineering, Indian Institute of Technology Bombay.
*
* Software released under Creative Commons CC BY-NC-SA
*
* For legal information refer to:
* http://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
*
*
* This software is made available on an “AS IS WHERE IS BASIS”.
* Licensee/end user indemnifies and will keep e-Yantra indemnified from
* any and all claim(s) that emanate from the use of the Software or
* breach of the terms of this agreement.
*
* e-Yantra - An MHRD project under National Mission on Education using
* ICT(NMEICT)
*
**************************************************************************
"""
##################You are not allowed to add any external library here##################
import sys
import cv2
import numpy as np
import pickle
from getCellVal import *
########################################################################################
# This file will test your getCellVal.py with different test cases
# To compile the file, on the console type
# python task1-main.py N
# where N is the total number of images to be read, 7 in your case
# At the end, you will see the results of the test cases verified.
#=============================================================
# Task1C begins
#User providing the number of images files to be tested
N_images=int(sys.argv[1])
grid_line_x = 7
grid_line_y = 7
m=600/(grid_line_x-1)
n=600/(grid_line_y-1)
###Stores the numbers detected for all the tested images, maximum images 7 only
grid_map_result = [ [ [0 for i in range(grid_line_y-1)] for j in range(grid_line_x-1) ] for k in range(7) ]
######################Test case verification######################
############Do not edit this part of the code####################
def testCases(grid_map_result):
grid_map_solution = pickle.load( open( "grid_map_solution.p", "rb" ) )
error=0
for l in range(0, N_images):
print 'Testing task1_img_',l+1,'.jpg'
for i in range(0, grid_line_y-1):
if(grid_map_solution[l][i]==grid_map_result[l][i]):
print "Row ",i+1,"is correct"
else:
print "Row ",i+1,"is wrong"
error=error+1
if(error>0):
print "Test Cases verification completed with ",error,"errors"
else:
print "Test Cases verification completed successfully. \n You can upload your submissions now"
######################end of method###############################
#########################Test images are passed here#########################
for k in range(1,N_images+1):
grid_map = [ [ 0 for i in range(grid_line_y-1) ] for j in range(grid_line_x-1) ]
imgpath='task1sets/task1_img_'+str(k)+'.jpg'
img_rgb = cv2.imread(imgpath)
grid_map=detectCellVal(img_rgb,grid_map)
#print the grid_map
print grid_map
grid_map_result[k-1]=grid_map## store it in the 3 dimensional array
######################your code here###################################
#print the output of the each expression on the input image, similar to what shown in the output.jpg image
cv2.imshow('task1_img_'+str(k),img_rgb)
cv2.imwrite('output/task1_img_'+str(k)+'.jpg',img_rgb)
cv2.waitKey()
########################Test Cases are verified here, do not edit this code#########################
print "<--------------Starting Test Cases verification-------------->"
testCases(grid_map_result)
#=============================================================
# Your task1C ends here |
# python2 kmlwriter.py --agency_id 'BART' --style_csv_path './route_styles/Current_15.csv' sample_feeds/BA_gtfs.zip move_bay_area_bus_maps/bart_current_15_map.kml
# python2 kmlwriter.py --agency_id 'Muni' --style_csv_path './route_styles/Current_15.csv' sample_feeds/SF_gtfs.zip move_bay_area_bus_maps/muni_current_15_map.kml
# Requirements: pandas and togeojson (npm install -g @mapbox/togeojson)
# Note: this overwrites all KML and GeoJsons! Add shape IDs to the Hidden_Shapes column in styling csv
# to permanently remove certain route sections (like branches)
# Usage: python2 generate_move_bay_area_bus_maps.py
import subprocess
agencies = [
{ 'id': '3D', 'name': 'TriDelta' },
{ 'id': 'AC', 'name': 'AC' },
# { 'id': 'AM', 'name': 'Capitol Corridor Joint Powers Authority' },
# { 'id': 'AY', 'name': 'American Canyon Transit' },
{ 'id': 'BA', 'name': 'BART' },
{ 'id': 'CC', 'name': 'CC' }, # County Connection
# { 'id': 'CE', 'name': 'Altamont Corridor Express' },
# { 'id': 'CM', 'name': 'Commute.org Shuttle' },
{ 'id': 'CT', 'name': 'Caltrain' },
# { 'id': 'DE', 'name': 'Dumbarton Express Consortium' },
# { 'id': 'EM', 'name': 'Emery Go-Round' },
# { 'id': 'FS', 'name': 'Fairfield and Suisun Transit' },
{ 'id': 'GF', 'name': 'GGFerry' },
{ 'id': 'GG', 'name': 'GGT' }, # Golden Gate Transit
{ 'id': 'MA', 'name': 'MarinT' },
# { 'id': 'MS', 'name': 'Stanford Marguerite Shuttle' },
# { 'id': 'PE', 'name': 'Petaluma Transit' },
# { 'id': 'RV', 'name': 'Rio Vista Delta Breeze' },
{ 'id': 'SA', 'name': 'SMART' },
{ 'id': 'SB', 'name': 'SFBayFerry' },
{ 'id': 'SC', 'name': 'VTA' },
{ 'id': 'SF', 'name': 'Muni' },
{ 'id': 'SM', 'name': 'SamTrans' },
# { 'id': 'SO', 'name': 'Sonoma County Transit' },
# { 'id': 'SR', 'name': 'SantaRosa' }, skipping because gtfs is bad
{ 'id': 'ST', 'name': 'SolTrans' },
# { 'id': 'TD', 'name': 'Tideline Marine Group Inc' },
# { 'id': 'UC', 'name': 'Union City Transit' },
# { 'id': 'VC', 'name': 'Vacaville City Coach' },
{ 'id': 'VN', 'name': 'Napa' },
{ 'id': 'WC', 'name': 'WestCat' },
{ 'id': 'WH', 'name': 'Wheels' }, # Livermore Amador Valley Transit Authority
]
skip_ahead_to_agency_name = None
keep_skipping = True
csv_filenames = [
'Current_15',
'Current_20',
'Future_15',
'Future_20',
'Longer_Hours',
]
processes = []
for agency in agencies:
if keep_skipping and agency['name'] != skip_ahead_to_agency_name and skip_ahead_to_agency_name != None:
continue
keep_skipping = False
for csv_filename in csv_filenames:
processes.append(
subprocess.Popen(
(
"python2 kmlwriter.py --agency_id {agency_name} --style_csv_path './route_styles/{csv_filename}.csv' "
"'sample_feeds/{agency_id}_gtfs' 'move_bay_area_bus_maps/{agency_name}_{csv_filename}_map.kml'"
" && togeojson 'move_bay_area_bus_maps/{agency_name}_{csv_filename}_map.kml' > 'move_bay_area_bus_maps/{agency_name}_{csv_filename}_map.json'"
).format(agency_name=agency['name'], agency_id=agency['id'], csv_filename=csv_filename),
shell=True,
))
# Collect statuses
output = [p.wait() for p in processes]
|
'''
Introduction to Secant Methods in Python -- Regula Falsi
Name: Kevin Trinh
Goal: Find the root of log(3x/2)
'''
import math
import scipy as sp
def func(x):
'''The function that we are finding the root of.'''
return sp.log(1.5 * x)
def regulaFalsi(a, b, tol=1e-15, maxiter=1000):
'''Perform Regula Falsi root-finding algorithm. Uses f(x) in the tolerance criterion.'''
# check that the root is bracketed
assert func(a) * func(b) < 0
# perform linear interpolations to find the root
x1 = a
x2 = b
x3 = 10 # arbitrary high number
i = 1
while abs(func(x3)) > tol and i <= maxiter:
# interpolate
f1 = func(x1)
f2 = func(x2)
x3 = (f1*x2 - f2*x1) / (f1 - f2)
f3 = func(x3)
# prepare for next interpolation
x1 = x2
x2 = x3
i += 1
if i > maxiter:
print('Maximum number of iterations has been reached.')
return x3, f3, i
root, yval, i = regulaFalsi(0.5, 1.0)
print('The root is located at x = ' + str(root) + ' after ' + str(i) + ' number of iterations.')
print('This value should be close to zero: ' + str(yval))
|
from epidemioptim.environments.models.prague_ode_seirah_model import PragueOdeSeirahModel
list_models = ['prague_seirah']
def get_model(model_id, params={}):
"""
Get the epidemiological model.
Parameters
----------
model_id: str
Model identifier.
params: dict
Dictionary of experiment parameters.
"""
assert model_id in list_models, "Model id should be in " + str(list_models)
if model_id == 'prague_seirah':
return PragueOdeSeirahModel(**params)
else:
raise NotImplementedError
#TODO: add tests for model registration
|
import sqlite3
from sqlite3 import Error
import numpy as np
import pandas as pd
from datetime import datetime, time
def create_connection(db_file):
conn = None
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return conn
def fetch_slots(conn):
cur = conn.cursor()
cur.execute(f'SELECT * FROM slots;')
rows = cur.fetchall()
slots = []
for row in rows:
slots.append(row)
return slots
def create_calender(conn, calender):
sql = ''' INSERT INTO calender(user_id,slot_id)
VALUES(?,?) '''
cur = conn.cursor()
cur.execute(sql, calender)
conn.commit()
return cur.lastrowid
def main():
# Enter Path where you want your database to be:
database = r"database.db"
# create a database connection => Db will be created if there does not exists one.
conn = create_connection(database)
with conn:
slots = fetch_slots(conn)
for slot in slots:
# calender = (7, slot[0])
# create_calender(conn, calender)
# calender = (8, slot[0])
# create_calender(conn, calender)
calender = (10, slot[0])
create_calender(conn, calender)
if __name__ == '__main__':
main()
|
from sublime import Region
from .str_utils import get_quote, get_prefix
from .settings_utils import get_root_prefix, get_scope_prefix
from .paths import get_cur_proj, get_scopes, is_valid_root, is_valid_scope
import re
def get_module_specifier(strs):
ret = re.findall(r'(import|export|require)', strs)
if not len(ret):
return
return ret[-1]
def get_likely_source_at_sel(view):
sel = view.sel()[0].a
name = view.scope_name(sel)
at_string_of_js = '.js' in name and 'string.quoted' in name
if not at_string_of_js:
return
current_line = view.line(sel)
start_to_sel = Region(current_line.a, sel)
almost_source = view.substr(start_to_sel)
quote = get_quote(almost_source)
if not quote:
return
source_before = almost_source[:almost_source.rfind(quote)]
specifier = get_module_specifier(source_before)
if not specifier:
return
source = almost_source[almost_source.rfind(quote) + 1:]
return source
def get_source_at_sel(view):
source = get_likely_source_at_sel(view)
if not source:
return
prefix = get_prefix(source)
if not prefix:
return
return source
class Scope_cache():
scope = None
@staticmethod
def get_scope():
return Scope_cache.scope
@staticmethod
def set_scope(scope):
Scope_cache.scope = scope
def get_cur_path(view, add_path=''):
source = get_source_at_sel(view)
if not source:
return
source += add_path
filename = view.file_name()
cur_proj = get_cur_proj(filename)
if not cur_proj:
return
project_root = cur_proj['project_root']
prefix = get_prefix(source)
root_prefix = get_root_prefix()
if is_valid_root(source, root_prefix):
return source.replace(root_prefix, project_root)
scope_prefix = prefix
Scope_cache.set_scope(None)
scopes = get_scopes(filename)
if not scopes:
return
for scope in scopes:
Scope_cache.set_scope(None)
scope_name = scope['name']
scope_dir = scope['dir'].replace(root_prefix, project_root)
if is_valid_scope(source, scope_name):
source = source.replace(scope_name, scope_dir)
Scope_cache.set_scope(scope)
return source
return
|
# Generated by Django 2.2.7 on 2020-01-14 13:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('statics', '0012_auto_20191231_1330'),
]
operations = [
migrations.AlterField(
model_name='review',
name='content',
field=models.TextField(blank=True, null=True, verbose_name='내용'),
),
migrations.AlterField(
model_name='review',
name='expression',
field=models.CharField(blank=True, choices=[('good', 'Good'), ('soso', 'Soso'), ('sad', 'Sad'), ('surprise', 'Surprise'), ('thumb', 'Thumb')], max_length=100, null=True),
),
migrations.AlterField(
model_name='review',
name='rate',
field=models.FloatField(blank=True, null=True, verbose_name='평점'),
),
]
|
from utils.data import *
from config import *
def main():
preprogress(foreground_data)
if __name__ == '__main__':
main()
|
__author__ = "Narwhale"
# def get_formatted_name(first,last):
# '''合并姓名'''
# full_name = first + ' ' + last
# return full_name.title()
#
#------------------------------------------------------
def get_formatted_name(first,last,moddle=''):
'''合并姓名'''
if moddle:
full_name = first + ' ' + moddle + ' ' + last
else:
full_name = first + ' ' + last
return full_name.title() |
#Li Xin
#Student number: 014696390
#xin.li@helsinki.fi
import sys
import socket
import random
import threading
import os
import listy
def send_mouse(mouse_port, mouse_node):
command = 'ssh xgli@' + mouse_node + \
' python3 /cs/home/xgli/Distributed_System_Exercise_2016/big_exercise_2/mouse.py ' + mouse_port
os.system(command)
def start_cordy():
command = 'python3 /cs/home/xgli/Distributed_System_Exercise_2016/big_exercise_2/cordy.py'
os.system(command)
if __name__ == "__main__":
#read the usable nodes from the config file
ukkonodes = open('ukkonodes', 'r').read().split('\n')
#read the port of listy and mouse
f = open('port_number', 'r')
lines = f.read().split('\n')
temp = lines[0].split()
listy_port = temp[0]
temp = lines[1].split()
mouse_port = temp[0]
f.close()
#randomly choose a node to run mouse.py on a new thread
mouse_node = random.choice(ukkonodes)
mouse_thread = threading.Thread(target = send_mouse, args = (mouse_port, mouse_node))
mouse_thread.start()
#start a new thread and run listy.py on the same ukko node as main.py and cordy.py
host = socket.gethostname()
listy_thread = threading.Thread(target = listy.listy, args = (host, listy_port))
listy_thread.start()
#start a new thread and run cordy.py on the same ukko node as main.py and listy.py
cordy_thread = threading.Thread(target = start_cordy)
cordy_thread.start()
print('program ends!')
|
rt = 0
pt = 0
for _ in range(10):
cinout = list(map(int, input().split()))
pt += cinout[1] - cinout[0]
if pt > rt:
rt = pt
print(rt)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.