text stringlengths 8 6.05M |
|---|
import shutil
import datetime
from zrong.base import slog, write_by_templ
from wpcmd.base import Action,BlogError
from wordpress_xmlrpc import (WordPressTerm)
from wordpress_xmlrpc.methods.taxonomies import (NewTerm,GetTerm)
class NewAction(Action):
def _new_draft(self):
name = None
if self.args.query:
name = self.args.query[0]
try:
dfile, dname = self.conf.get_new_draft(name)
except BlogError as e:
slog.critical(e)
return
dt = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
subdict = {
'TITLE':'',
'DATE':dt,
'MODIFIED':dt,
'AUTHOR':self.conf.site.user,
'NICENAME':'',
'CATEGORY':'technology',
'TAG':'',
'POSTTYPE':self.args.type,
'POSTSTATUS':'draft',
}
write_by_templ(self.conf.get_path('templates', 'article.md'),
dfile,
subdict,
True)
slog.info('The draft file "%s" has created.'%dfile)
def _new_term(self):
if not self.args.query or len(self.args.query)<1:
slog.error('Provide 1 arguments at least please.')
return
query = self.get_term_query()
print('query:', query)
term = self.get_terms_from_wp(query, force=True)
print(term)
if term:
slog.error('The term "%s" has been in wordpress.'%self.args.query[0])
return
taxname = query[0]
slug = self.args.query[0]
name = self.args.query[1] if len(self.args.query)>1 else slug
term = WordPressTerm()
term.slug = slug
term.name = name
term.taxonomy = taxname
if len(self.args.query)>2:
term.description = self.args.query[2]
termid = self.wpcall(NewTerm(term))
if not termid:
return
term = self.wpcall(GetTerm(taxname, termid))
if not term:
return
slog.info('The term %s(%s) has created.'%(name, termid))
self.conf.save_term(term, taxname)
self.conf.save_to_file()
slog.info('The term %s has saved.'%name)
def go(self):
print(self.args)
if self.args.type in ('post','page'):
self._new_draft()
elif self.args.type in ('category', 'tag'):
self._new_term()
def build(gconf, gargs, parser=None):
action = NewAction(gconf, gargs, parser)
action.build()
|
from collections import defaultdict
import os.path
import re
from math import log
import codecs
import sys
from standardiser import standardise
from nltk.tokenize import sent_tokenize as sent_tokenise
from nltk.tokenize import word_tokenize as word_tokenise
import urllib2
from BeautifulSoup import BeautifulSoup
def utf8open(loc, mode='r'):
return codecs.open(loc, mode, 'utf8')
def localPath(filename):
return os.path.abspath(os.path.join(os.path.dirname(__file__), filename))
class Document(list):
def __init__(self, url):
"""
Build a list of sentences and a bag of words
"""
list.__init__(self)
title, text = self._urlToText(url)
bow = defaultdict(int)
for i, sentenceStr in enumerate(sent_tokenise(text)):
sentence = Sentence(sentenceStr, i)
self.append(sentence)
for k, v in sentence.bagOfWords.items():
bow[k] += v
self.bagOfWords = bow
self.title = title
whiteRE = re.compile(r'\s+')
constraintsRE = re.compile(u'^[^\u2022]|[.!?]$')
def _urlToText(self, url):
"""
Terrible text extraction using that ugly swamp beautifulsoup
"""
page = urllib2.urlopen(url)
soup = BeautifulSoup(page)
body = soup.findAll('body')
body = body[0] if body else soup
parElements = [p for p in body.findAll('p') if not p.attrs]
for node in [p for p in body(True) if p.find('br', recursive=False)]:
for par in node.fetchText(True, recursive=False):
parElements.append(par)
paragraphs = []
for paragraph in parElements:
if isinstance(paragraph, unicode):
text = paragraph
else:
text = ' '.join(paragraph.fetchText(True))
text = self.whiteRE.sub(' ', text.strip())
if self.constraintsRE.search(text):
paragraphs.append(text)
title = soup.find('title').string
return title, '\n'.join(paragraphs)
class IDFWeightedDocument(Document):
weights = None
@staticmethod
def loadWeights(n):
dfLoc = localPath('wiki_doc_freqs_trim.dat')
# Read in the document freqs.
# Have to do this first because we collapse some freqs through
# standardisation.
weights = defaultdict(int)
for line in utf8open(dfLoc):
term, freq = line.split('\t')
term = standardise(term)
if term:
weights[hash(term)] += int(freq)
# Turn the frequencies into IDF weights.
for term, freq in weights.items():
idf = log(n/freq, 10)
weights[term] = int(idf)
IDFWeightedDocument.weights = weights
def __init__(self, url):
class_ = self.__class__
n = 818741.0
if class_.weights is None:
class_.loadWeights(n)
weights = class_.weights
Document.__init__(self, url)
# Weight terms in bag of words
bow = self.bagOfWords
default = log(n/30)
for term, freq in bow.items():
bow[term] = freq*weights.get(term, default)
class Sentence(object):
def __init__(self, sentenceStr, position):
self.string = sentenceStr
# Lowercase the first word
if sentenceStr[0].isupper():
letters = list(sentenceStr)
letters[0] = letters[0].lower()
sentenceStr = ''.join(letters)
tokens = word_tokenise(sentenceStr)
bow = defaultdict(int)
for token in tokens:
term = standardise(token)
if term:
hashed = hash(term)
bow[hashed] += 1
self.bagOfWords = bow
self.position = position
def __unicode__(self):
return self.string
def __str__(self):
return self.string.decode('utf8', 'replace')
def test():
"""
>>> doc = Document(u"This is a document test. Let's see the test goes.")
>>> len(doc)
2
>>> print max(doc.bow.items(), key=lambda kv: kv[1])
(u'test', 2)
"""
pass
if __name__ == '__main__':
import doctest
doctest.testmod()
|
#!/usr/bin/env python3
from ev3dev2.motor import MoveSteering, MoveTank, MediumMotor, LargeMotor, OUTPUT_A, OUTPUT_B, OUTPUT_C, OUTPUT_D
from ev3dev2.sensor.lego import TouchSensor, ColorSensor, GyroSensor
from ev3dev2.sensor import INPUT_1, INPUT_2, INPUT_3, INPUT_4
import xml.etree.ElementTree as ET
import threading
import time
from sys import stderr
colourLeft = ColorSensor(INPUT_3)
colourRight = ColorSensor(INPUT_2)
gyro = GyroSensor(INPUT_1)
largeMotor_Left= LargeMotor(OUTPUT_B)
largeMotor_Right= LargeMotor(OUTPUT_C)
mediumMotor = MediumMotor(OUTPUT_D)
steering_drive = MoveSteering(OUTPUT_B, OUTPUT_C)
tank_block = MoveTank(OUTPUT_B, OUTPUT_C)
#_________________________________________________________________________________________________________________________________
def squareOnLine(stop, speed, target):
print("In squareOnLine", file=stderr)
# setting up program
colourLeft_RLI = 0
colourRight_RLI = 0
lineFound = False
#Turning on motor
steering_drive.on(steering=0,speed=speed)
while True:
#reading in the colour sensor values (reflected light intensity)
colourLeft_RLI = colourLeft.reflected_light_intensity
colourRight_RLI = colourRight.reflected_light_intensity
# if the left Rli is smaller than the target/aim then turn to the right
if colourLeft_RLI <= target:
largeMotor_Left.on(-speed)
largeMotor_Right.on(speed)
lineFound = True #setting bool varisable for cancelling movment later on
print('{} left found it'.format(colourLeft_RLI), file = stderr)
# if the right Rli is smaller than the target/aim then turn to the left
if colourRight_RLI <=target:
largeMotor_Left.on(speed)
largeMotor_Right.on(-speed)
lineFound = True #setting bool varisable for cancelling movment later on
print('{} right found it'.format(colourRight_RLI), file = stderr)
print('{} left, {} right'.format(colourLeft_RLI, colourRight_RLI), file = stderr)
if colourLeft_RLI == colourRight_RLI and lineFound:
break
if stop():
break
steering_drive.off()
print('Leaving squareOnLine', file=stderr)
#stopProcessing=False
#squareOnLine(lambda:stopProcessing, speed=30, target=100) |
# Author:ambiguoustexture
# Date: 2020-03-08
import codecs
import numpy as np
file_features = './features.txt'
file_theta = './file_theta.npy'
file_encoding = 'cp1252'
with codecs.open(file_features, 'r', file_encoding) as features:
features = list(features)
theta = np.load(file_theta)
index_sorted = theta.argsort()
print('Top 10 features with high weight:')
for index in index_sorted[::-1][:10]:
print('\t', theta[index], '\t', \
features[index - 1].strip())
print('Top 10 features with low weight:')
for index in index_sorted[:10]:
print('\t', theta[index], '\t', \
features[index - 1].strip())
|
import pygame
import time
import os
import random
WINDOW_WIDTH = 270
WINDOW_HEIGHT = 400
# For Reusability, function to handle all image loads
def load_img(img_name) :
return pygame.image.load(os.path.join('assets', img_name))
# Since The Bird Changes Through 3 Different States
# Init A List To Hold All Those 3 States
BIRD_IMGS = [
load_img('bird1.png'),
load_img('bird2.png'),
load_img('bird3.png')
]
PIPE_IMG = load_img('pipe.png') # Pipe Image
BACKGROUND_IMG = load_img('background.png') # Background Image
BASE_IMG = load_img('base.png') # Base Image
class Bird :
# Class Variables
IMGS = BIRD_IMGS
MAX_ROTATION = 25
ROT_VEL = 20
ANIME_TIME = 5
def __init__(self, x, y) :
# Instance Variables
self.x = x
self.y = y
self.tilt = 0
self.tick_count = 0
self.velocity = 0
self.height = self.y
self.img_count = 0
self.img = self.IMGS[0]
def jump(self) :
self.velocity = -5.5
self.tick_count = 0
self.height = self.y
def move(self) :
# Game Time
self.tick_count = self.tick_count + 1
# Get Displacement
d = self.velocity*self.tick_count + 1.5*self.tick_count**2 # Get Displacement Of The Bird, Itll Be In Pixels
if d >= 5 :
d = 4
if d < 0 :
d -= 2
self.y += d # This Is Going To Confuse Me In The Future
if d < 0 or self.y < self.height + 30 :
if self.tilt < self.MAX_ROTATION :
self.tilt = self.MAX_ROTATION
else :
if self.tilt > -90 :
self.tilt -= self.ROT_VEL
def draw(self, win) :
self.img_count += 1
# Change The Image States After Some Time
# To Animate The Bird To Symbolise Thats Its Flapping Its Wings
if self.img_count < self.ANIME_TIME :
self.img = self.IMGS[0]
elif self.img_count < self.ANIME_TIME * 2 :
self.img = self.IMGS[1]
elif self.img_count < self.ANIME_TIME * 3 :
self.img = self.IMGS[2]
elif self.img_count < self.ANIME_TIME * 4 :
self.img = self.IMGS[1]
elif self.img_count == self.ANIME_TIME * 4 + 1 :
self.img = self.IMGS[0]
self.img_count = 0
if self.tilt <= -80 :
self.img = self.IMGS[1]
self.img_count = self.ANIME_TIME * 2
rotated_image = pygame.transform.rotate(self.img, self.tilt)
new_rect = rotated_image.get_rect(center=self.img.get_rect(topleft = (self.x, self.y)).center)
win.blit(rotated_image, new_rect.topleft)
def get_mask(self) :
return pygame.mask.from_surface(self.img)
class Pipe :
GAP = 90
VEL = 2
def __init__(self, x) :
self.x = x
self.height = 0
self.gap = 30
self.top = 0
self.bottom = 0
self.PIPE_TOP = pygame.transform.flip(PIPE_IMG, False, True)
self.PIPE_BOTTOM = PIPE_IMG
self.passed = False
self.set_height()
def set_height(self) :
self.height = random.randrange(50, 200)
self.top = self.height - self.PIPE_TOP.get_height()
self.bottom = self.height + self.GAP
def move(self) :
self.x -= self.VEL
def draw(self, window) :
window.blit(self.PIPE_TOP, (self.x, self.top))
window.blit(self.PIPE_BOTTOM, (self.x, self.bottom))
def collide(self, bird) :
bird_mask = bird.get_mask()
top_mask = pygame.mask.from_surface(self.PIPE_TOP)
bottom_mask = pygame.mask.from_surface(self.PIPE_BOTTOM)
top_offset = (self.x - bird.x, self.top - round(bird.y))
bottom_offset = (self.x - bird.x, self.bottom - round(bird.y))
top_point = bird_mask.overlap(top_mask, top_offset)
bottom_point = bird_mask.overlap(bottom_mask, bottom_offset)
if top_point or bottom_point :
return True
return False
class Base :
VEL = 5
WIDTH = BASE_IMG.get_width()
IMG = BASE_IMG
def __init__(self, y) :
self.y = y
self.x1 = 0
self.x2 = self.WIDTH
def move(self) :
self.x1 -= self.VEL
self.x2 -= self.VEL
if self.x1 + self.WIDTH < 0 :
self.x1 = self.x2 + self.WIDTH
if self.x2 + self.WIDTH < 0 :
self.x2 = self.x1 + self.WIDTH
def draw(self, window) :
window.blit(self.IMG, (self.x1, self.y))
window.blit(self.IMG, (self.x2, self.y))
def draw_window(window, bird, pipes, base) :
window.blit(BACKGROUND_IMG, (0, 0))
for pipe in pipes :
pipe.draw(window)
base.draw(window)
bird.draw(window)
pygame.display.update()
def main() : # MAIN method to run the whole class
bird = Bird(50, 130) # Create A Bird Object
pipes = [Pipe(300)]
base = Base(380)
window = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT)) # Draw Game Display
clock = pygame.time.Clock() # Game Clock
score = 0
run = True
pygame.joystick.init()
while run : # Main Game Loop
clock.tick(30) # Should Run At 30FPS - 30 Frames Per Second
rem = []
add_pipe = False
for event in pygame.event.get() :
if event.type == 3 :
if event.dict.get('key') == pygame.K_SPACE : # Move The Bird UPPPPP
#code...
bird.jump()
if event.type == pygame.QUIT :
run = False
for pipe in pipes :
if pipe.collide(bird) :
# print('-- Collision Detected -- DIE BITCHHH DIEEEE')
run = False
pass
if pipe.x + pipe.PIPE_TOP.get_width() < 0 :
rem.append(pipe)
if not pipe.passed and pipe.x < bird.x :
pipe.passed = True
add_pipe = True
pipe.move()
if add_pipe :
score += 1
pipes.append(Pipe(300))
for r in rem :
pipes.remove(r)
bird.move()
base.move()
draw_window(window, bird, pipes, base)
# pygame.quit()
if not run :
print('--- Collision Detected --- \n=====================\n= YOU\'RE A LOSER =\n= SCORE : {} = \n====================='.format(score))
main()
|
import json
import datetime
from ..config import BaseProposalCreatorConfig
from grant.rfp.models import RFP, RFPStatus, db, Category
class TestRfpApi(BaseProposalCreatorConfig):
def test_rfp_like(self):
rfp = RFP(
title="title",
brief="brief",
content="content",
date_closes=datetime.datetime(2030, 1, 1),
bounty="10",
status=RFPStatus.DRAFT,
)
rfp_id = rfp.id
db.session.add(rfp)
db.session.commit()
# not logged in
resp = self.app.put(
f"/api/v1/rfps/{rfp_id}/like",
data=json.dumps({"isLiked": True}),
content_type="application/json",
)
self.assert401(resp)
# logged in, but rfp does not exist
self.login_default_user()
resp = self.app.put(
"/api/v1/rfps/123456789/like",
data=json.dumps({"isLiked": True}),
content_type="application/json",
)
self.assert404(resp)
# RFP is not live
resp = self.app.put(
f"/api/v1/rfps/{rfp_id}/like",
data=json.dumps({"isLiked": True}),
content_type="application/json",
)
self.assert404(resp)
self.assertEqual(resp.json["message"], "RFP is not live")
# set RFP live, test like
rfp = RFP.query.get(rfp_id)
rfp.status = RFPStatus.LIVE
db.session.add(rfp)
db.session.commit()
resp = self.app.put(
f"/api/v1/rfps/{rfp_id}/like",
data=json.dumps({"isLiked": True}),
content_type="application/json",
)
self.assert200(resp)
rfp = RFP.query.get(rfp_id)
self.assertTrue(self.user in rfp.likes)
resp = self.app.get(
f"/api/v1/rfps/{rfp_id}"
)
self.assert200(resp)
self.assertEqual(resp.json["authedLiked"], True)
self.assertEqual(resp.json["likesCount"], 1)
# test unliking
resp = self.app.put(
f"/api/v1/rfps/{rfp_id}/like",
data=json.dumps({"isLiked": False}),
content_type="application/json",
)
self.assert200(resp)
rfp = RFP.query.get(rfp_id)
self.assertTrue(self.user not in rfp.likes)
resp = self.app.get(
f"/api/v1/rfps/{rfp_id}"
)
self.assert200(resp)
self.assertEqual(resp.json["authedLiked"], False)
self.assertEqual(resp.json["likesCount"], 0)
|
#!/usr/bin/python
"""
PyRosetta4, Python 2.7
Joseph Lubin, 2017
"""
import argparse
from math import sqrt
from os import makedirs
from os.path import basename, isdir, join
from pyrosetta import *
from pyrosetta.rosetta import *
from pyrosetta.rosetta.core.pack.task import parse_resfile
from pyrosetta.rosetta.numeric import xyzVector_double_t
from pyrosetta.rosetta.protocols.enzdes import ADD_NEW, AddOrRemoveMatchCsts
from pyrosetta.rosetta.protocols.flexpep_docking import FlexPepDockingProtocol
from pyrosetta.rosetta.protocols.relax import FastRelax
from pyrosetta.teaching import MinMover, PackRotamersMover, SimpleThreadingMover
# [55, 56, 57, 58, 70, 73, 150, 151, 152, 153, 155, 170, 171, 172, 173, 174, 175]
def parse_args():
info = "Design a protease around a peptide sequence"
parser = argparse.ArgumentParser(description=info)
parser.add_argument("-s", "--start_struct", required=True,
help="Pick starting PDB")
parser.add_argument("-cseq", "--cut_peptide_sequence", type=str,
action='append', help="List cleaved peptide sequences or provide \
list file")
parser.add_argument("-useq", "--uncut_peptide_sequence", type=str,
action='append', help="List uncleaved peptide sequences or provide \
list file")
parser.add_argument("-cr", "--cat_res", type=str, nargs='+',
default=[72, 154], help="The catalytic residues of the protease, \
excluded from design (defaults are 72 and 154, for HCV)")
parser.add_argument("-cons", "--constraints", type=str,
help="Pick constraints file")
parser.add_argument("-rad", "--radius_design", type=int, default=8,
help="Cutoff for designable residues (Angstroms from peptide, \
default is 8A)")
parser.add_argument("-th", "--thread", action = "store_true",
help="Option to create threaded models. Use the first time running.")
parser.add_argument("-res", "--resfile", type=str,
help="Pick resfile for design")
args = parser.parse_args()
return args
def init_opts(cst_file='ly104.cst'):
""" Produces a list of init options for PyRosetta, including cst file """
ros_opts = '-mute core -mute protocols -mute basic'
ros_opts += ' -enzdes::cstfile ' + cst_file
ros_opts += ' -cst_fa_weight 1.0 -run:preserve_header -out:pdb_gz'
return ros_opts
def res_ca_cords(pose, res):
""" Returns the x,y,z coordinates of the A-carbon of a given residue"""
res_coords = []
for i in range(3):
res_coords.append(pose.residue(res).xyz('CA')[i])
return res_coords
def point_dist(point_1, point_2):
""" Given two sets of coordinates, determines distance between them """
sum_difs_squared = 0
for i in range(3):
sum_difs_squared += (point_2[i] - point_1[i]) ** 2
return sqrt(sum_difs_squared)
def res_to_design(pdb, radius, cat_res=[72, 154]):
"""
Determines the chain in the PDB that is the peptide, assuming that the
peptide is smallest. Determines the coordinates of all a-carbons in the
system, then checks the distances to each atom in the peptide. If the
atom is within the cutoff radius, it is added to the list of designable
residues. Returns a list of mutable residues.
"""
pose = pose_from_pdb(pdb)
chains = pose.split_by_chain()
# Determining peptide chain
pep_chain_no = 1
for i in range(2, len(chains) + 1):
if len(chains[i]) < len(chains[pep_chain_no]):
pep_chain_no = i
pep_chain = chains[pep_chain_no]
chains.pop(pep_chain_no) # removes peptide chain from chain list
# Getting residue number of peptide start
pep_start = 1
for i in range(1,pep_chain_no):
pep_start += chains[i].total_residue()
# Getting peptide residue CA coordinates:
pep_coords = []
for i in range(1, pep_chain.total_residue() + 1):
pep_coords.append(res_ca_cords(pep_chain, i))
# Populating the list of designable residues
mutable_residues = []
for chain in chains:
# Note that chains now excludes the peptide chain
for res in range(1, chain.total_residue() + 1):
if res in cat_res:
# Exclude the catalytic residues from the designable list
continue
a_cords = res_ca_cords(pose, res)
for i in pep_coords:
# If any res is within radius of any pep res, add to the list
if point_dist(a_cords, i) <= radius:
mutable_residues.append(res)
break
return mutable_residues
def get_seq_list(seq_arg):
"""
Takes an argument that can include individual peptide sequences or file(s)
containing a list of sequences, and returns a list of sequences.
Distinguishes between files and sequences by the presence of a dot (.).
"""
pep_sequences = []
for inp in seq_arg:
if '.' in inp:
# If input is a file
with open(inp, 'r') as t:
lis = t.readlines()
if len(lis) == 1:
# If all sequences are listed horizontally on one line
# rather than one per line, rearrange
lis = lis[0].split()
for i in lis:
pep_sequences.append(i.strip())
else:
# Sequence was typed directly into the argument
pep_sequences.append(inp.strip())
return pep_sequences
def thread_seq(pose, pep_start, pep_length, seq):
""" Thread a new sequence in for the peptide. """
tm = SimpleThreadingMover(seq, pep_start)
tm.apply(pose)
return pose
def quick_thread(destination, pdb, sequences, cleaved=False, make=False):
"""
Threads a set of sequences onto the peptide portion of the given PDB file,
outputting a threaded PDB file for each sequence.
Function is presently hard-coded for this application.
"""
pose = pose_from_pdb(pdb)
thread_files = []
for seq in sequences:
# Naming model
if cleaved:
pdbname = 'cleaved_ly104_wt_' + seq + '.pdb.gz'
else:
pdbname = 'uncleaved_ly104_wt_' + seq + '.pdb.gz'
out_name = join(destination, pdbname)
thread_files.append(out_name)
if make:
# Threading peptide sequences
threaded_pose = Pose()
threaded_pose.assign(pose)
threaded_pose = thread_seq(threaded_pose, 197, 11, seq)
threaded_pose.dump_pdb(out_name)
return thread_files
def apply_constraints(pose):
""" Applies the constraints form the input CST file to a pose """
cstm = AddOrRemoveMatchCsts()
cstm.set_cst_action(ADD_NEW)
cstm.apply(pose)
return pose
def make_move_map(near_res, pep_start=197, pep_end=208):
"""
Makes a movemap for a protease-peptide system, with all non-peptide
residue backbones fixed, and side chains mobile for the peptide and all
residues in an input list, which is intended to be the nearby residues
(8A by default), excluding the catalytic ones.
"""
mm = MoveMap()
mm.set_bb_true_range(pep_start,pep_end)
mm.set_chi_true_range(pep_start,pep_end)
for i in near_res:
mm.set_chi(i, True)
return mm
def make_pack_task(pose, resfile=None, pack_res=[]):
"""
Makes a packer task for a given pose using an input resfile or list of
packable (not designable) residues.
"""
# Packer for protease + peptide\
task = standard_packer_task(pose)
if resfile:
parse_resfile(pose, task, resfile)
else:
task.restrict_to_repacking()
task.temporarily_fix_everything()
for i in pack_res:
task.temporarily_set_pack_residue(i, True)
return task
def fastrelax(pose, score_function, movemap):
"""
Runs the FastRelax protocol on a pose, using given score function and
movemap
"""
relax = FastRelax()
relax.set_scorefxn(score_function)
relax.set_movemap(movemap)
relax.apply(pose)
return pose
def minmover(pose, score_function, movemap):
"""
Runs a gradient-base minimization on a pose, using given score function
and movemap
"""
min_mover = MinMover()
min_mover.score_function(score_function)
min_mover.movemap(movemap)
min_mover.apply(pose)
return pose
def design_pack(pose, score_function, task):
""" Runs packing mover on a pose, using given score function and task """
pack = PackRotamersMover(score_function, task)
pack.apply(pose)
return pose
def res_scores(pose, residues, score_function):
""" Gets the residue scores for a given set of residues in a pose """
score_function(pose)
pose_energies = str(pose.energies()).split('\n') # Table of residue
# score components, including a header line, so index matches res
set_energy = 0
for i in residues:
res_energies = pose_energies[i].split()[1:]
res_tot_energy = sum([float(j) for j in res_energies])
set_energy += res_tot_energy
return set_energy
def move_apart(pose, peptide_start, peptide_end):
""" Moves the peptide a long distance away from the protease """
# Making displacement vector
xyz = xyzVector_double_t()
xyz.x, xyz.y, xyz.z = [100 for i in range(3)]
# Moving peptide, atom by atom
for res in range(peptide_start, peptide_end + 1):
for atom in range(1, pose.residue(res).natoms() + 1):
pose.residue(res).set_xyz(atom, pose.residue(res).xyz(atom) + xyz)
return pose
def score_ddg(pose, near_res):
"""
Gets a score for the ddG of peptide binding to the protease. This is
achieved by taking the peptide and moving each atom a set arbitrary length
that is large enough to be far from the protease, then repacking the side
chains of both the peptide and the mutable protease residues. This
function does not take a scorefunction as an input, scoring instead with
the default function to ignore the effects of constraints.
"""
# Making a new pose to avoid messing up the input
ddg_pose = Pose()
ddg_pose.assign(pose)
sf = get_fa_scorefxn()
# Score when docked
dock_score = sf(ddg_pose)
# Score when separate
pt = make_pack_task(ddg_pose, pack_res=near_res+range(197,208))
ddg_pose = move_apart(ddg_pose, 197, 207)
ddg_pose = design_pack(ddg_pose, sf, pt)
split_score = sf(ddg_pose)
ddg = dock_score - split_score
return [round(i,3) for i in [dock_score, split_score, ddg]]
def identify_mutations(start_pose, end_pose, mutable_residues):
"""
Compares the sequences of a starting pose and ending pose at specified
residues and identifies differences. Returns a string listing changes in
the format of ANB, where A is the starting residue, N is the residue
number, and B is the ending residue.
"""
mutations = ''
for i in mutable_residues:
start_res = start_pose.residue(i).name1()
end_res = end_pose.residue(i).name1()
if start_res != end_res:
mut_string = start_res + str(i) + end_res
mutations = ','.join([mutations, mut_string])
if mutations == '':
return "NONE"
else:
return mutations.lstrip(',')
def set_design(pdb, score_function, des_res, num_decoys, resfile):
"""
Uses the job distributor to output a set of proteases designed for
compatability with a threaded peptide sequence. Outputs a provided number
of decoys into a directory that is also a required input. Will relax the
pdb, then run 20 rounds of design/repacking plus minimization. For all
movers, only the residues in the peptide and those within the input list
are repackable, and only those in the input list are designable. For the
relax, the peptide backbone is flexible, and constraints are applied.
"""
pose = apply_constraints(pose_from_pdb(pdb))
sf = score_function
mm = make_move_map(des_res) # for relax and minimization
dec_name = pdb.replace('.pdb.gz', '_designed')
jd = PyJobDistributor(dec_name, num_decoys, sf)
while not jd.job_complete:
# Relaxing
pp = Pose()
pp.assign(pose)
pp = fastrelax(pp, sf, mm)
# Doing design
for i in range(20):
pt = make_pack_task(pp, resfile=resfile)
pp = design_pack(pp, sf, pt)
pp = minmover(pp, sf, mm)
# Getting residue scores, ddG, and mutations list
prot_res_e = res_scores(pp, des_res, sf)
pep_res_e = res_scores(pp, range(197, 208), sf)
dock_split_ddg = score_ddg(pp, des_res)
mutations = identify_mutations(pose, pp, des_res)
# Making line to add to fasc file
scores = [prot_res_e, pep_res_e] + dock_split_ddg + [mutations]
temp = "protease_res_scores: {}\tpeptide_res_scores: {}\t"
temp += "docked_score: {}\tsplit_score: {}\tddG: {}\tmutations: {}"
score_text = temp.format(*[str(i) for i in scores])
print score_text
print '\n'
jd.additional_decoy_info = score_text
jd.output_decoy(pp)
def main():
# Getting user inputs
args = parse_args()
# Initializing PyRosetta
ros_opts = init_opts(cst_file=args.constraints)
init(options=ros_opts)
# Score function
sf = create_score_function('ref2015_cst')
# Destination folder for PDB files
pdb = args.start_struct
dir_nam = 'ly104_design_decoys'
if not isdir(dir_nam):
makedirs(dir_nam)
# Reading inputs for peptide sequences
cut_seq = get_seq_list(args.cut_peptide_sequence)
uncut_seq = get_seq_list(args.uncut_peptide_sequence)
# Determining peptide part of PDB file, residues near peptide
des_res = res_to_design(pdb, args.radius_design, cat_res=args.cat_res)
# Creating threaded structures
make = False
if args.thread:
make = True
t_structs = quick_thread(dir_nam, pdb, cut_seq, cleaved=True, make=make)
t_structs += quick_thread(dir_nam, pdb, uncut_seq, make=make)
# Doing design on threaded models
for struc in t_structs:
set_design(struc, sf, des_res, 11, args.resfile)
if __name__ == '__main__':
main() |
from wsgiref.simple_server import make_server
from time import ctime
def simple_application(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
return [b'<h1>Hello, web!</h1>']
class middleware:
def __init__(self, app):
self.app = app
def __call__(self, *stuff):
return (bytes('[{}] {}'.format(ctime(), x), 'utf-8') for x in self.app(*stuff))
if __name__ == '__main__':
httpd = make_server('', 8000, middleware(simple_application))
print("Serving HTTP on port 8000...")
httpd.serve_forever()
|
"""
Compare io_throughput for COPY TO vs regular driver
"""
import config
import numpy as np
import traceback
from uuid import uuid4, UUID
from lib.Database import Database
from time import time
async def main(args):
await test(int(args[0]))
def particleFilter(p):
if p["area"] < 100:
return True
if p["area"] > 2000:
return True
async def test(N):
db = Database()
inserts = np.random.randint(0, 30000, (N, 5))
await db.execute(
"""
DELETE FROM io_test
"""
)
await db.vacuum("io_test")
try:
print("Method 1: vanilla inserts")
start = time()
await db.executemany(
"""
INSERT INTO io_test (col1, col2, col3, col4, col5)
VALUES ($1, $2, $3, $4, $5)
""",
list(inserts),
)
print("Inserting " + str(N) + " records took " + str(time() - start))
except Exception as e:
print(e)
traceback.print_exc()
# -------------------------------------------------------------------------
await db.execute(
"""
DELETE FROM io_test
"""
)
await db.vacuum("io_test")
try:
print("Method 2: vanilla transaction inserts")
start = time()
tx, transaction = await db.transaction()
await tx.executemany(
"""
INSERT INTO io_test (col1, col2, col3, col4, col5)
VALUES ($1, $2, $3, $4, $5)
""",
list(inserts),
)
await transaction.commit()
print("Inserting " + str(N) + " records took " + str(time() - start))
except Exception as e:
print(e)
traceback.print_exc()
await transaction.rollback()
# -------------------------------------------------------------------------
await db.execute(
"""
DELETE FROM io_test
"""
)
await db.vacuum("io_test")
try:
print("Method 3: copy-to")
start = time()
# np.savetxt('/home/mot/tmp/io_test.csv', inserts, delimiter=',', fmt='%d')
with open("/home/mot/tmp/io_test.csv", "w") as f:
f.write("col1, col2, col3, col4\n")
for row in inserts:
f.write("{},{},{},{}\n".format(row[0], row[1], row[2], row[3]))
tx, transaction = await db.transaction()
await tx.execute(
"""
COPY io_test (col1, col2, col3, col4) FROM '/home/mot/tmp/io_test.csv' DELIMITER ',' CSV HEADER;
"""
)
await transaction.commit()
print("Inserting " + str(N) + " records took " + str(time() - start))
except Exception as e:
print(e)
traceback.print_exc()
await transaction.rollback()
|
import sys
def main():
print('Python version {}.{}.{}'.format(*sys.version_info))
if __name__ == "__main__": main()
|
from gurobipy import GRB
import gurobipy as gp
from project_thesis.model.BaseModel import BaseModel
from project_thesis.model.BaseModelInput import BaseModelInput
class StandardModelInput(BaseModelInput):
def compute_reward_matrix(self):
return (
[0.0] + [1 - x for x in self.battery_level[1:]] + [1.0] * len(self.delivery)
) # Reward for visiting location i (uniform distribution). Eks for 3 locations [0, 0.33, 0.66]
class StandardModel(BaseModel):
def setup(self):
super().setup()
@staticmethod
def get_input_class():
return StandardModelInput
def to_string(self, short_name=True):
return "S" if short_name else "Standard"
def set_objective(self):
self.m.setObjective(
gp.quicksum(
self._.reward[i] * self.y[(i, v)] for i, v in self.cart_loc_v_not_depot
)
- gp.quicksum(
self._.battery_level[i] * self.p[(i, v)]
for i, v in self.cart_loc_v_scooters
),
GRB.MAXIMIZE,
)
|
# Write a program that computes the net amount of a bank account based a transaction log from console input. The transaction log format is shown as following:
# D 100
# W 200
# D means deposit while W means withdrawal.
# Suppose the following input is supplied to the program:
# D 300
# D 300
# W 200
# D 100
# Then, the output should be:
# 500
amount = 0
while True:
raw = input("Make a transaction: ")
if not raw:
break
args = raw.split(' ')
operation = args[0]
value = float(args[1])
if operation is 'D':
amount += value
elif operation is 'W':
amount -= value
else:
pass
print('Total amount:', amount)
|
# Copyright 2019 vmware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
# Predefined provider types.
ACTIONS = [
{
'name': 'access_as_shared',
'value': _('Access as Shared')
},
{
'name': 'access_as_external',
'value': _('Access as External')
}
]
# Predefined provider object types.
OBJECT_TYPES = [
{
'name': 'network',
'value': _('Network')
}
]
QOS_POLICY_TYPE = {
'name': 'qos_policy',
'value': _('QoS Policy')
}
class CreatePolicyForm(forms.SelfHandlingForm):
target_tenant = forms.ThemableChoiceField(label=_("Target Project"))
object_type = forms.ThemableChoiceField(
label=_("Object Type"),
widget=forms.ThemableSelectWidget(
attrs={
'class': 'switchable',
'data-slug': 'object_type'
}))
network_id = forms.ThemableChoiceField(
label=_("Network"),
widget=forms.ThemableSelectWidget(attrs={
'class': 'switched',
'data-switch-on': 'object_type',
}),
required=False)
qos_policy_id = forms.ThemableChoiceField(
label=_("QoS Policy"),
widget=forms.ThemableSelectWidget(attrs={
'class': 'switched',
'data-switch-on': 'object_type',
}),
required=False)
action = forms.ThemableChoiceField(label=_("Action"))
def __init__(self, request, *args, **kwargs):
super(CreatePolicyForm, self).__init__(request, *args, **kwargs)
tenant_choices = [('', _("Select a project"))]
tenants, has_more = api.keystone.tenant_list(request)
tenant_choices.append(("*", "*"))
for tenant in tenants:
tenant_choices.append((tenant.id, tenant.name))
self.fields['target_tenant'].choices = tenant_choices
action_choices = [('', _("Select an action"))]
for action in ACTIONS:
action_choices.append((action['name'],
action['value']))
self.fields['action'].choices = action_choices
network_choices = []
networks = api.neutron.network_list(request)
for network in networks:
network_choices.append((network.id, network.name))
self.fields['network_id'].choices = network_choices
# If enable QoS Policy
if api.neutron.is_extension_supported(request, extension_alias='qos'):
qos_policies = api.neutron.policy_list(request)
qos_choices = [(qos_policy['id'], qos_policy['name'])
for qos_policy in qos_policies]
self.fields['qos_policy_id'].choices = qos_choices
if QOS_POLICY_TYPE not in OBJECT_TYPES:
OBJECT_TYPES.append(QOS_POLICY_TYPE)
object_type_choices = [('', _("Select an object type"))]
for object_type in OBJECT_TYPES:
object_type_choices.append((object_type['name'],
object_type['value']))
self.fields['object_type'].choices = object_type_choices
# Register object types which required
self.fields['network_id'].widget.attrs.update(
{'data-object_type-network': _('Network')})
self.fields['qos_policy_id'].widget.attrs.update(
{'data-object_type-qos_policy': _('QoS Policy')})
def handle(self, request, data):
try:
params = {
'target_tenant': data['target_tenant'],
'action': data['action'],
'object_type': data['object_type'],
}
if data['object_type'] == 'network':
params['object_id'] = data['network_id']
elif data['object_type'] == 'qos_policy':
params['object_id'] = data['qos_policy_id']
rbac_policy = api.neutron.rbac_policy_create(request, **params)
msg = _('RBAC Policy was successfully created.')
messages.success(request, msg)
return rbac_policy
except Exception:
redirect = reverse('horizon:admin:rbac_policies:index')
msg = _('Failed to create a rbac policy.')
exceptions.handle(request, msg, redirect=redirect)
return False
class UpdatePolicyForm(forms.SelfHandlingForm):
target_tenant = forms.ThemableChoiceField(label=_("Target Project"))
failure_url = 'horizon:admin:rbac_policies:index'
def __init__(self, request, *args, **kwargs):
super(UpdatePolicyForm, self).__init__(request, *args, **kwargs)
tenant_choices = [('', _("Select a project"))]
tenants, has_more = api.keystone.tenant_list(request)
for tenant in tenants:
tenant_choices.append((tenant.id, tenant.name))
self.fields['target_tenant'].choices = tenant_choices
def handle(self, request, data):
try:
params = {'target_tenant': data['target_tenant']}
rbac_policy = api.neutron.rbac_policy_update(
request, self.initial['rbac_policy_id'], **params)
msg = _('RBAC Policy %s was successfully updated.') \
% self.initial['rbac_policy_id']
messages.success(request, msg)
return rbac_policy
except Exception as e:
LOG.info('Failed to update rbac policy %(id)s: %(exc)s',
{'id': self.initial['rbac_policy_id'], 'exc': e})
msg = _('Failed to update rbac policy %s') \
% self.initial['rbac_policy_id']
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
|
from simulator import Simulator, Map, Agent
from devices import Device
import numpy as np
from time import time
from utils import dist
# an example of simulator configuration
class config:
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 10,
'world_width': 300,
'world_height': 250,
'screen_width': 600, # size of window
'screen_height': 500, # size of window
'dt': 1.0 / 10, # update interval in the system
'eps': 1.0 # minimal distance for collision detection
}
rebounce = 0.5 # rebounce factor. 0 for no rebounce. 1 for rebounce with same speed at the opposite direction.
# example of overloading agent class
class Robot(Agent):
def __init__(self, env):
'''
for each subclass of Agent you need to implement update method to update the velocity or acceleration
to enable motion.
'''
Agent.__init__(self, env,
kp=np.array([[-2, -2], [2, -2], [2, 2], [-2, 2]]),
color=(1, 0, 0, 0.5),
v_max=2.0)
self.sensor = Sensor(env, self)
def update(self, t):
self.sensor.read()
v_x = self.v_max * (np.cos(t) + np.random.rand())
v_y = self.v_max * (np.sin(t) + np.random.rand())
# the renderer
self.v = np.array([v_x, v_y])
self.va = np.pi
# example of a range sensor that changes color when detect other agents or a obstacle.
class Sensor(Device):
def __init__(self, env, parent, radius=10):
self.radius = radius
kp=np.array([[-radius, 0], [radius, 0]])
Device.__init__(self, env, parent, kp=kp, color=(0, 1, 0, 0.5), filled=True)
def read(self):
'''
return true if detect something.
'''
for a in self.env.agents:
if a is not self.parent and dist(self.parent, a) < self.radius:
self.geom.set_color_rgba(np.array([1, 0, 0, 0.5]))
return True
if dist(self.parent, self.env.map) < self.radius:
self.geom.set_color_rgba(np.array([1, 0, 0, 0.5]))
return True
self.geom.set_color_rgba(np.array([0, 1, 0, 0.5]))
return False
env = Simulator(config=config)
my_map = Map()
my_map.get_map_from_geom2d(env, kp=np.array([[-100, 0], [100, 0]]))
robots = [Robot(env) for i in range(5)]
while True:
t = time()
[r.update(t) for r in robots]
env._render()
|
#!/usr/bin/env python
vowels = ['a','e','i','o','u', 'A', 'E', 'I', 'O', 'U']
candidate = str(raw_input('Gimme a string: '))
vowel_count = reduce(lambda accum, char:
accum + (1 if char in vowels else 0), candidate, 0)
print 'There are ' + str(vowel_count) + ' in ' + candidate
|
import matplotlib.pyplot as plt
from numpy import *
import sys
import glob
import os
DATA_DIR = sys.argv[1]
MODE = sys.argv[2]
ENERGY_LEVEL = sys.argv[3]
os.chdir('/h/mattioli/thesis/misc/frag_dist')
barWidth = 0.001
data = []
for f in glob.glob(DATA_DIR + '/*' + MODE + '_' + ENERGY_LEVEL + '.frag') :
print f
tmp = genfromtxt(f)
data = hstack((data, tmp))
print data.shape
v, c = unique(data, return_counts=True)
print v.shape
plt.bar(v, c, barWidth)
plt.title(MODE + '_' + ENERGY_LEVEL)
plt.ylim([0,3000])
plt.show()
|
# coding=utf-8
import os
import sys
import unittest
from time import sleep
from selenium import webdriver
from selenium.common.exceptions import NoAlertPresentException, NoSuchElementException
sys.path.append(os.environ.get('PY_DEV_HOME'))
from webTest_pro.common.initData import init
from webTest_pro.common.model.baseActionAdd import add_schools, user_login
from webTest_pro.common.model.baseActionDel import del_school
from webTest_pro.common.model.baseActionModify import update_School
from webTest_pro.common.logger import logger, T_INFO
reload(sys)
sys.setdefaultencoding("utf-8")
schools = [{'schoolName': u'二中', 'schoolType': u'高中', 'schoolArea': u'郑州市'},
{'schoolName': u'三中', 'schoolType': u'中学', 'schoolArea': u'郑州市'},
{'schoolName': u'四中', 'schoolType': u'中学', 'schoolArea': u'开封市'},
{'schoolName': u'五中', 'schoolType': u'小学', 'schoolArea': u'开封市'},
{'schoolName': u'六中', 'schoolType': u'小学', 'schoolArea': u'开封市'},
{'schoolName': u'一中', 'schoolType': u'高中', 'schoolArea': u'郑州市'}]
schoolData = [{'schoolName': u'河南一中', 'searchName': u'二中'}]
schoolDel = [{'schoolName': u'三中'},
{'schoolName': u'四中'},
{'schoolName': u'五中'},
{'schoolName': u'六中'},
{'schoolName': u'一中'}]
class schoolmanager(unittest.TestCase):
''''学校管理'''
def setUp(self):
if init.execEnv['execType'] == 'local':
T_INFO(logger,"\nlocal exec testcase")
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(8)
self.verificationErrors = []
self.accept_next_alert = True
T_INFO(logger,"start tenantmanger...")
else:
T_INFO(logger,"\nremote exec testcase")
browser = webdriver.DesiredCapabilities.CHROME
self.driver = webdriver.Remote(command_executor=init.execEnv['remoteUrl'], desired_capabilities=browser)
self.driver.implicitly_wait(8)
self.verificationErrors = []
self.accept_next_alert = True
T_INFO(logger,"start tenantmanger...")
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
T_INFO(logger,"tenantmanger end!")
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
print "schoolmanager end!"
print "=" * 60
def test_addschool(self):
'''添加学校'''
print "执行:添加学校测试"
driver = self.driver
driver.refresh()
sleep(2)
user_login(driver, **init.loginInfo)
for school in schools:
add_schools(driver, **school)
self.assertEqual(u"添加成功!", driver.find_element_by_css_selector(".layui-layer-content").text)
sleep(0.5)
def test_bsearch_school(self):
'''学校查询'''
print "执行:学校查询"
try:
driver = self.driver
user_login(driver, **init.loginInfo)
driver.find_element_by_link_text(u"系统管理").click()
driver.find_element_by_link_text(u"学校管理").click()
sleep(1)
driver.find_element_by_xpath("//input[@id='condition']").clear()
driver.find_element_by_xpath("//input[@id='condition']").send_keys(u"教育局")
driver.find_element_by_xpath("//button[@id='searchbtn']").click()
sleep(1)
self.assertEqual(u"河南省教育局", driver.find_element_by_xpath(".//*[@id='schoolname']").text)
print "查询学校成功。"
except Exception as e:
print "查询学校失败。"
print e
sleep(1)
def test_bupdate_school(self):
'''修改单条学校数据'''
print "执行:修改单条学校数据"
# try:
driver = self.driver
user_login(driver, **init.loginInfo)
for itms in schoolData:
update_School(driver, **itms)
sleep(1)
# resultContent = driver.find_element_by_css_selector("div.layui-layer-content.firepath-matching-node").text
# self.assertEqual(u"河南一中", resultContent)
print "修改单条学校数据:成功。"
# except Exception as e:
print "修改单条学校数据失败。"
sleep(1)
def test_del_school_ok(self):
'''删除学校_确定'''
print "执行:删除学校_确定"
driver = self.driver
user_login(driver, **init.loginInfo)
for flag in schoolDel:
del_school(driver, **flag)
# self.assertEqual(u"删除成功!", driver.find_element_by_css_selector(".layui-layer-content").text)
sleep(0.5)
def test_del_school_cancel(self):
'''删除学校_取消'''
print "执行:删除学校_取消"
try:
driver = self.driver
user_login(driver, **init.loginInfo)
# add_school(driver, u'一中', u'高中', u'郑州市')
sleep(0.5)
driver.find_element_by_link_text(u"系统管理").click()
driver.find_element_by_link_text(u"学校管理").click()
sleep(0.5)
driver.find_element_by_xpath("//button[@id='delsc']").click()
driver.find_element_by_css_selector("a.layui-layer-btn1").click()
self.assertEqual(u"一中", driver.find_element_by_xpath("//*[@id='schoolname']").text)
# print driver.find_element_by_xpath("//*[@id='schoolname']").text
print "删除学校_取消:成功"
except Exception as e:
print e
print "删除学校_取消:失败"
sleep(1)
def is_element_present(self, how, what):
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException as e:
print e
return False
return True
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException as e:
print e
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
import logging
import time
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv
from datetime import datetime
_logger = logging.getLogger(__name__)
TYPE_STATUS = {
'ok': 'OK',
'pv': 'Prev',
'at': 'Atra'
}
TYPE_FILTRO = {
}
class account_flow_cash(osv.osv_memory):
"""
For Flow Cash
"""
_name = "account.flow_cash"
_description = "Account flow cash"
_columns = {
'date_from': fields.date('Data Inicial', readonly=True),
'date_to': fields.date('Data Final', readonly=True),
'date': fields.date('Data Emissão', readonly=True),
'linhas_ids' : fields.one2many('account.flow_cash.line','flowcash_id','Movimento',readonly=True),
'target_move': fields.selection([('all', 'Realizado + Previsto'),
('real', 'Realizado'),
('prev', 'Previsto')
], 'Filtro', required=True,readonly=True),
}
def create_flow(self, cr, uid, DataIn, DataOut, context=None, **kwargs):
_logger.info('Gerando Fluxo de Caixa de '+str(DataIn)+' até '+str(DataOut)+' '+str(kwargs))
if context == None:
context = {}
sintetico = False
account_analitic_id = False
journal_id = False
account_id = False
account_cd = False
tipo = 'all'
transf = False
SaldoAnterior = 0
if kwargs:
if 'transf' in kwargs:
transf = kwargs['transf']
if 'tipo' in kwargs:
tipo = kwargs['tipo']
if 'sintetico' in kwargs:
sintetico = kwargs['sintetico']
if 'account_analitic_id' in kwargs:
account_analitic_id = kwargs['account_analitic_id']
if 'journal_id' in kwargs:
journal_id = kwargs['journal_id']
journal = self.pool.get('account.journal').browse(cr,uid,journal_id,context=None)
account_id = journal.default_debit_account_id.id
account_cd = journal.default_debit_account_id.code
tipo = 'real'
_logger.info('DataIn = '+str(DataIn))
_logger.info('DataOut = '+str(DataOut))
hoje = datetime.today()
dFlow = {
'date_from': DataIn,
'date_to': DataOut,
'date': hoje,
'target_move': tipo,
}
_logger.info('SQL = '+str(dFlow))
FlowCash = self.pool.get('account.flow_cash')
FlowCashLine = self.pool.get('account.flow_cash.line')
idFlowCash = FlowCash.create(cr,uid,dFlow,context)
AccMoveLine = self.pool.get('account.move.line')
sql = "SELECT a.account_id as id, sum(a.credit) as vlcred, sum(a.debit) as vldebit " \
"FROM account_move_line a " \
"JOIN account_account b ON a.account_id = b.id " \
"WHERE CAST(date AS DATE) < '%s'" % datetime.strftime(DataIn,'%Y-%m-%d')+" " \
"AND b.type = 'liquidity' "\
"GROUP BY a.account_id"
#"AND b.code similar to '(1.01.01.)%'"
_logger.info('SQL = {'+sql+'}')
cr.execute(sql)
# res = cr.fetchone()
#
# vlCred = float(res[0] or 0)
# vlDeb = float(res[1] or 0)
vlCred = float(0)
vlDeb = float(0)
vlAcumCr = float(0)
vlAcumDb = float(0)
for r in cr.fetchall():
if account_id:
if int(account_id) == int(r[0]):
vlCred += float(r[1] or 0)
vlDeb += float(r[2] or 0)
_logger.info("Somado no filtro vlCred += "+str(r[1] or 0)+" & vlDeb += "+str(r[2] or 0))
else:
vlCred += float(r[1] or 0)
vlDeb += float(r[2] or 0)
_logger.info("Somado no total vlCred += "+str(r[1] or 0)+" & vlDeb += "+str(r[2] or 0))
_logger.info('Creditos/Debitos = '+str(vlCred)+' / '+str(vlDeb))
vlAcum = vlDeb - vlCred
# if journal_id:
# if account_cd == '1.01.01.02.003':
# vlAcum = vlDeb - (vlCred + 5116.06)
# elif account_cd == '1.01.01.02.004':
# vlAcum = vlDeb - (vlCred + (-11431.33))
# elif account_cd == '1.01.01.02.005':
# vlAcum = vlDeb - (vlCred + 688,24)
# elif account_cd == '1.01.01.02.006':
# vlAcum = vlDeb - (vlCred + 805.95)
# elif account_cd == '1.01.01.02.007':
# vlAcum = vlDeb - (vlCred + 192.81)
# else:
# vlAcum = vlDeb - (vlCred + 4628.27)
dLineFlow = {
'name': 'Saldo Anterior',
'flowcash_id': idFlowCash,
'seq': 0,
'date': DataIn,
'val_sum': vlAcum,
}
LineFlowId = FlowCashLine.create(cr,uid,dLineFlow,context)
Seq = 1
vlSaldo = 0
dtFinal = False
if tipo=='all' or tipo=='real':
_logger.info('realizado: '+tipo) # if journal_id:sql = sql + " AND a.journal_id = %d" % (journal_id,)
#MoveLineIds = AccMoveLine.search(cr, uid, [('date', '>=', DataIn), ('date', '<=', DataOut),('account_id.name', '=like', '%s%%' % '1.01.01.'),], order='date,id')
if account_id:
MoveLineIds = AccMoveLine.search(cr, uid, [('date', '>=', DataIn), ('date', '<=', DataOut),('account_id','=',account_id)], order='date,id')
else:
MoveLineIds = AccMoveLine.search(cr, uid, [('date', '>=', DataIn), ('date', '<=', DataOut),('account_id.type','=','liquidity'),], order='date,id')
for MoveLine in AccMoveLine.browse(cr, uid, MoveLineIds, context):
computa = True
if transf == False:
cre = MoveLine.credit
deb = MoveLine.debit
CPartidaIds = AccMoveLine.search(cr, uid,[('move_id', '=', MoveLine.move_id.id),
('id', '<>', MoveLine.id),
('account_id.type','=','liquidity'),
('credit','=',deb),
('debit','=',cre)],order='date')
if CPartidaIds:
computa = False
if computa:
if MoveLine.credit:
vlCredito = MoveLine.credit
else:
vlCredito = 0
if MoveLine.debit:
vlDebito = MoveLine.debit
else:
vlDebito = 0
vlAcumCr = vlAcumCr + vlCredito
vlAcumDb = vlAcumDb + vlDebito
vlSaldo = vlDebito - vlCredito
vlAcum = vlAcum + vlSaldo
name = MoveLine.ref or MoveLine.name or ''
if MoveLine.partner_id:
if name: name = name + ", "
name = name + MoveLine.partner_id.name
dLineFlow = {
'name': name,
'flowcash_id': idFlowCash,
'seq': Seq,
'date': MoveLine.date,
'val_in': vlDebito,
'val_out': vlCredito,
'val_add': vlSaldo,
'val_sum': vlAcum,
'state': 'ok'
}
LineFlowId = FlowCashLine.create(cr,uid,dLineFlow,context)
Seq = Seq + 1
dtFinal = MoveLine.date
if tipo=='all' or tipo=='prev':
_logger.info('previsto')
#MoveLineIds = AccMoveLine.search(cr, uid, [('date_maturity','<>',False),('date_maturity', '<=', DataOut),('account_id.type', 'in', ['receivable', 'payable']),('reconcile_id', '=', False),], order='date_maturity,id')
MoveLineIds = AccMoveLine.search(cr, uid, [('date_maturity','<>',False),('date_maturity', '<=', DataOut),('account_id.type', 'in', ['receivable', 'payable']),('reconcile_id', '=', False),], order='date_maturity,id')
for MoveLine in AccMoveLine.browse(cr, uid, MoveLineIds, context):
if datetime.strptime(MoveLine.date_maturity,'%Y-%m-%d') < datetime.today():
#DateDue = datetime.today()
DateDue = datetime.strptime(MoveLine.date_maturity,'%Y-%m-%d')
_logger.info(str(DateDue))
Status = 'at'
else:
DateDue = datetime.strptime(MoveLine.date_maturity,'%Y-%m-%d')
Status = 'pv'
if MoveLine.credit > 0:
vlCredito = MoveLine.amount_to_pay
else:
vlCredito = 0
if MoveLine.debit > 0:
vlDebito = MoveLine.amount_to_pay * (-1)
else:
vlDebito = 0
vlAcumCr = vlAcumCr + vlCredito
vlAcumDb = vlAcumDb + vlDebito
vlSaldo = vlDebito - vlCredito
vlAcum = vlAcum + vlSaldo
name = MoveLine.ref or MoveLine.name or ''
if MoveLine.partner_id:
if name: name = name + ", "
name = name + MoveLine.partner_id.name
dLineFlow = {
'name': name,
'flowcash_id': idFlowCash,
'seq': Seq,
'date': DateDue,
'val_in': vlDebito,
'val_out': vlCredito,
'val_add': vlSaldo,
'val_sum': vlAcum,
'state': Status,
}
LineFlowId = FlowCashLine.create(cr,uid,dLineFlow,context)
Seq = Seq + 1
dtFinal = DateDue
if dtFinal:
dLineFlow = {
'name': 'Saldo Final',
'flowcash_id': idFlowCash,
'seq': Seq,
#'date': dtFinal,
'val_in': vlAcumDb,
'val_out': vlAcumCr,
'val_sum': vlAcum,
}
LineFlowId = FlowCashLine.create(cr,uid,dLineFlow,context)
return idFlowCash
account_flow_cash()
class account_flow_cash_line(osv.osv_memory):
"""
For Flow Cash
"""
_name = "account.flow_cash.line"
_description = "Account flow cash line"
_order = "seq asc, id asc"
_columns = {
'name': fields.char(u'Descrição', size=64),
'flowcash_id': fields.many2one('account.flow_cash',u'Fluxo de Caixa'),
'seq': fields.integer(u'Sq'),
'date': fields.date(u'Data'),
'val_in': fields.float(u'Entradas', digits_compute=dp.get_precision('Account')),
'val_out': fields.float(u'Saídas', digits_compute=dp.get_precision('Account')),
'val_add': fields.float(u'Saldo', digits_compute=dp.get_precision('Account')),
'val_sum': fields.float(u'Acumulado', digits_compute=dp.get_precision('Account')),
'state': fields.selection([
('ok','OK'),
('pv','Prev'),
('at','Atra'),
],'St', select=True,),
'journal_id': fields.many2one('account.journal', u'Diário',domain=['|',('type', '=', 'cash'),('type', '=', 'bank')]),
'analytic_account_id': fields.many2one('account.analytic.account', u'Conta Analítica',),
'sintetico': fields.boolean(u'Sintético'),
}
_defaults = {
#'date': lambda *a: time.strftime('%Y-%m-%d'),
#'val_in': lambda *a: 0,
#'val_out': lambda *a: 0,
#'val_add': lambda *a: 0,
#'val_sum': lambda *a: 0,
}
account_flow_cash_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
from resourse import db
class Take(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
student_id = db.Column(db.Integer)
course_id = db.Column(db.Integer)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def __repr__(self):
return '<Take {}>'.format(self.name)
|
from django.conf.urls import patterns, url
from apps.missions import views
urlpatterns = patterns('',
#base character
url(r'^bounties/$', views.bounties, name='bounties'),
)
|
import models
from django.contrib import admin
admin.site.register(models.TradeRequest)
admin.site.register(models.TradeSession) |
import re
all_professions_list = ['accountant',
'activist',
'actor',
'alchemist',
'american football player',
'animator',
'announcer',
'anthropologist',
'architect',
'art critic',
'art director',
'artist',
'astrologer',
'astronaut',
'athlete',
'attorneys in the united states',
'audio engineer',
'author',
'bandleader',
'barrister',
'baseball manager',
'baseball player',
'basketball player',
'bassist',
'biologist',
'bishop',
'bodybuilder',
'book editor',
'botanist',
'broadcaster',
'bureaucrat',
'businessperson',
'cantor',
'carpenter',
'cartoonist',
'choreographer',
'choreography',
'cinematographer',
'civil servant',
'coach',
'comedian',
'commentator',
'composer',
'conductor',
'conservationist',
'consultant',
'costume designer',
'critic',
'curator',
'dancer',
'diplomat',
'disc jockey',
'drummer',
'economist',
'editor',
'educator',
'electrical engineer',
'electronic musician',
'engineer',
'entertainer',
'entrepreneur',
'essayist',
'evangelist',
'explorer',
'farmer',
'fashion designer',
'fashion model',
'fighter pilot',
'film art director',
'film critic',
'film director',
'film editor',
'film producer',
'film score composer',
'financial adviser',
'fisherman',
'football player',
'footballer',
'friar',
'game show host',
'guitarist',
'harpsichordist',
'historian',
'humorist',
'ice hockey player',
'illustrator',
'impresario',
'insurance broker',
'inventor',
'investor',
'jazz composer',
'jazz pianist',
'journalist',
'judge',
'keyboard player',
'law professor',
'lawyer',
'legislator',
'librarian',
'librettist',
'lifeguard',
'lyricist',
'manager',
'mathematician',
'media proprietor',
'merchant',
'meteorologist',
'military aviator',
'military officer',
'missionary',
'model',
'multi-instrumentalist',
'music arranger',
'music artist',
'music director',
'musician',
'music producer',
'neurologist',
'novelist',
'orator',
'orchestrator',
'organist',
'pastor',
'peace activist',
'performance artist',
'philanthropist',
'philosopher',
'photographer',
'physician',
'physicist',
'pianist',
'pilot',
'pin-up girl',
'playback singer',
'playwright',
'poet',
'police officer',
'political activist',
'politician',
'polymath',
'pornographic actor',
'preacher',
'presenter',
'priest',
'production designer',
'professor',
'prophet',
'psychiatrist',
'psychoanalyst',
'psychologist',
'public speaker',
'publisher',
'rabbi',
'racing driver',
'radio personality',
'radio producer',
'rapper',
'record producer',
'revolutionary',
'rodeo clown',
'rodeo performer',
'roman emperor',
'sailor',
'scenic designer',
'science writer',
'scientist',
'screenwriter',
'showgirl',
'singer',
'singer-songwriter',
'soccer player',
'social activist',
'soldier',
'songwriter',
'sound sculptor',
'speechwriter',
'spokesperson',
'statesman',
'surveyor',
'swimmer',
'talk show host',
'teacher',
'television director',
'television presenter',
'television producer',
'television show host',
'tennis player',
'tentmaker',
'theatre director',
'theatrical producer',
'theologian',
'theoretical physicist',
'tutor',
'tv editor',
'tv personality',
'urban planner',
'violinist',
'violist',
'voice actor',
'warrior',
'writer']
profession_synonyms_map = {'accountant':'analyst;clerk;auditor;bookkeeper;comptroller;examiner;calculator;cashier;actuary;cpa;reckoner;teller',
'activist':'revolutionary;advocate;opponent',
'actor':'star;artist;character;clown;player;villain;comedian;entertainer;thespian;impersonator;amateur;ham;lead;extra;understudy;mimic;foil;mime;idol;stooge;bit player;ingenue;performer;trouper;stand-in;walk-on;play-actor;pantomimist;ventriloquist;headliner;barnstormer;soubrette;hambone;ingénue;straight person;thesp',
'alchemist':'warlock;diviner;seer;enchanter;charmer;shaman;conjurer;medium;witch;soothsayer;sorceress;clairvoyant;magician;fortune-teller;occultist;necromancer;thaumaturge;augurer',
'american football player':'',
'animator':'illustrator;artisan;craftsman;landscapist;miniaturist;dauber',
'announcer':'disc jockey;reporter;newscaster;talker;broadcaster;anchorperson;communicator;dj;telecaster;vj;deejay;leader of ceremonies',
'anthropologist':'',
'architect':'designer;artist;builder;creator;planner;engineer;inventor;originator;maker;prime mover;draftsperson;master builder',
'art critic':'craft;profession;dexterity;artistry;knowledge;adroitness;ingenuity;mastery;facility;trade;imagination;know-how;aptitude;expertise;inventiveness;knack;method;virtuosity;craftsmanship',
'art director':'craft;profession;dexterity;artistry;knowledge;adroitness;ingenuity;mastery;facility;trade;imagination;know-how;aptitude;expertise;inventiveness;knack;method;virtuosity;craftsmanship',
'artist':'inventor;artisan;composer;painter;expert;virtuoso;whiz;authority;creator;craftsperson;artiste;handicrafter',
'astrologer':'soothsayer;fortuneteller;prophet;stargazer',
'astronaut':'pilot;cosmonaut',
'athlete':'competitor;player;animal;professional;contestant;sport;jock;jockey;amateur;contender;challenger;shoulders;gorilla;games player;iron person',
'attorneys in the united states':'counsel;mouthpiece;barrister;advocate;lip;proxy;front;counselor;ambulance chaser;pleader;fixer;da',
'audio engineer':'hearing;auditory;audile;aural',
'author':'producer;creator;writer;columnist;journalist;composer;poet;reporter;ghost;originator;playwright;scribe;biographer;scribbler;ghostwriter;wordsmith;essayist;ink slinger',
'bandleader':'composer;conductor;master;teacher',
'barrister':'advocate;counsel;counselor;lawyer;solicitor',
'baseball manager':'',
'baseball player':'',
'basketball player':'ball;hoops',
'bassist':'',
'biologist':'environmentalist;conservationist;zoologist;ecologist;botanist;preservationist',
'bishop':'administrator;pontiff;pope;cleric;patriarch;director;priest;prelate;angel;metropolitan;primate;cap;coadjutor;overseer;diocesan;suffragan;archer;mitre;miter;berretta;abba',
'bodybuilder':'',
'book editor':'essay;album;novel;publication;dictionary;pamphlet;text;work;manual;textbook;fiction;volume;edition;magazine;booklet;brochure;writing;copy;tome;lexicon;periodical;portfolio;primer;dissertation;opus;handbook;reader;roll;thesaurus;tract;compendium;bible;treatise;omnibus;leaflet;encyclopedia;scroll;monograph;paperback;hardcover;nonfiction;speller;folio;codex;atlas;quarto;reprint;octavo;offprint;bestseller;opuscule;preprint;softcover;vade mecum',
'botanist':'environmentalist;conservationist;zoologist;ecologist;biologist;preservationist',
'broadcaster':'disc jockey;reporter;newscaster;talker;anchorperson;communicator;dj;telecaster',
'bureaucrat':'administrator;civil servant;politician;public servant;functionary;desk-jockey',
'businessperson':'merchant;merchandiser;tycoon;capitalist;trafficker;storekeeper;executive;employer;financier;manager;industrialist;suit;operator;entrepreneur;dealer;baron;big wheel;wheeler-dealer;small potatoes;businesswoman;businessman;big-time operator;career person;franchiser',
'cantor':'singer;leader;precentor;hazan;soloist;chanter;vocalist',
'carpenter':'laborer;builder;worker;artisan;chips;craftsperson;woodworker;cabinetmaker;carps;joiner;mason',
'cartoonist':'illustrator;artist;caricaturist;gagster',
'choreographer':'',
'choreography':'dance',
'cinematographer':'',
'civil servant':'local;governmental;domestic;national;civilian;home',
'coach':'trainer;teacher;mentor;tutor;skipper;educator',
'comedian':'entertainer;comic;humorist;clown;actor;stooge;wag;jester;jokester;joker;wit;zany;laugh;card;droll;top banana;farceur;quipster;banana;cutup;merry-andrew;million laughs;stand-up comic;wisecracker',
'commentator':'correspondent;observer;pundit;announcer;writer;critic;reviewer;analyst;interpreter;sportscaster;expositor;annotator',
'composer':'writer;songster',
'conductor':'director;maestro;manager;supervisor;master;marshal;guide',
'conservationist':'environmentalist;guardian;green;tree hugger;preservationist',
'consultant':'specialist;veteran;expert;advisor;counsel;pro;authority;maven;master;mentor;guide;freelancer',
'costume designer':'style;fashion;attire;garb;uniform;apparel;suit;wardrobe;dress;ensemble;getup;mode;guise;rig;duds;outfit;clothing;robes;livery',
'critic':'pundit;expert;cartoonist;authority;judge;reviewer;commentator;connoisseur;sharpshooter;arbiter;diagnostic;expositor;caricaturist;annotator;analyzer;evaluator',
'curator':'director;custodian;administrator;manager;conservator;keeper;guardian;steward',
'dancer':'ballet dancer;danseuse;danseur;hoofer;chorus girl;prima ballerina',
'diplomat':'expert;mediator;envoy;agent;negotiator;representative;minister;ambassador;go-between;emissary;plenipotentiary;moderator;legate;conciliator;tactician',
'disc jockey':'sphere;ring;perimeter;revolution;globe;circus;crown;amphitheater;disk;enclosure;belt;tire;record;bowl;periphery;stadium;hoop;wheel;wreath;horizon;circlet;band;aureole;compass;coil;lap;halo;cycle;circumference;ringlet;bracelet;orbit;corona;meridian;turn;vortex;circuit;round;equator;cirque;cordon;ecliptic;zodiac;colure;full turn;parallel of latitude',
'drummer': '',
'economist': '',
'editor':'proofreader;copyreader;newspaperwoman;copyholder;newspaperman;rewriter;reviser;deskman;blue-penciler',
'educator':'lecturer;professor;instructor;trainer;coach;dean;monitor;tutor;mentor;schoolteacher;educationist;department head',
'electrical engineer': '',
'electronic musician':'computerized;anodic',
'engineer':'designer;builder;director;surveyor;manager;planner;architect;inventor;originator;schemer;manipulator;techie;sights;contriver;deviser;technie',
'entertainer':'artist;player',
'entrepreneur':'manager;executive;administrator;producer;contractor;undertaker;founder;backer;businessperson;promoter;industrialist;organizer',
'essayist':'producer;creator;writer;columnist;journalist;composer;poet;reporter;ghost;originator;playwright;scribe;biographer;scribbler;ghostwriter;wordsmith;ink slinger;prose writer',
'evangelist':'pastor;missionary;minister;televangelist;revivalist;tv evangelist',
'explorer':'pilgrim;adventurer;pioneer;seeker;traveler;searcher;pathfinder;experimenter;inquisitive person',
'farmer':'rancher;peasant;laborer;producer;grower;tender;reaper;gardener;agriculturist;clodhopper;hired hand;horticulturist;villein;homesteader;gleaner;feeder;planter;agriculturalist;cropper;sower;sharecropper;tiller;agronomist;harvester;cultivator;plower;cob;breeder;country person;grazer',
'fashion designer':'tone;look;form;trend;pattern;thing;shape;mode;model;fad;chic;cut;figure;rage;make;cry;appearance;custom;convention;usage;furor;line;configuration;mold;latest;craze;vogue;dernier cri;last word;latest thing;bandwagon;cultism;cultus;faddism;in thing;newest wrinkle',
'fashion model':'tone;look;form;trend;pattern;thing;shape;mode;model;fad;chic;cut;figure;rage;make;cry;appearance;custom;convention;usage;furor;line;configuration;mold;latest;craze;vogue;dernier cri;last word;latest thing;bandwagon;cultism;cultus;faddism;in thing;newest wrinkle',
'fighter pilot':'warrior;champion;mercenary;assailant;militant;soldier;boxer;combatant;opponent;heavy;competitor;bully;rival;belligerent;contender;pugilist;aggressor;antagonist;contestant;gladiator;wildcat;tanker;warlord;gi;bruiser;brawler;scrapper;battler;disputant;slugger;duelist;punching bag;jouster;person-at-arms;serviceperson',
'film art director':'layer;fold;skin;integument;cloud;web;sheet;fabric;dusting;haze;membrane;opacity;blur;foil;leaf;veil;covering;coat;partition;nebula;transparency;scum;brume;gauze;mistiness;pellicle;haziness;obscuration',
'film critic':'layer;fold;skin;integument;cloud;web;sheet;fabric;dusting;haze;membrane;opacity;blur;foil;leaf;veil;covering;coat;partition;nebula;transparency;scum;brume;gauze;mistiness;pellicle;haziness;obscuration',
'film director':'layer;fold;skin;integument;cloud;web;sheet;fabric;dusting;haze;membrane;opacity;blur;foil;leaf;veil;covering;coat;partition;nebula;transparency;scum;brume;gauze;mistiness;pellicle;haziness;obscuration',
'film editor':'layer;fold;skin;integument;cloud;web;sheet;fabric;dusting;haze;membrane;opacity;blur;foil;leaf;veil;covering;coat;partition;nebula;transparency;scum;brume;gauze;mistiness;pellicle;haziness;obscuration',
'film producer':'layer;fold;skin;integument;cloud;web;sheet;fabric;dusting;haze;membrane;opacity;blur;foil;leaf;veil;covering;coat;partition;nebula;transparency;scum;brume;gauze;mistiness;pellicle;haziness;obscuration',
'film score composer':'layer;fold;skin;integument;cloud;web;sheet;fabric;dusting;haze;membrane;opacity;blur;foil;leaf;veil;covering;coat;partition;nebula;transparency;scum;brume;gauze;mistiness;pellicle;haziness;obscuration',
'financial adviser':'fiscal;economic;commercial;monetary;banking;business',
'fisherman':'trawler;fisher;lobsterman;troller',
'football player':'soccer;rugby',
'footballer':'',
'friar':'priest;abbot;solitary;cenobite;recluse;brother;ascetic;religious;monastic;hermit;eremite;anchorite',
'game show host':'gallant;dogged;bold;hardy;inclined;disposed;prepared;spirited;eager;interested;ready;persevering;heroic;courageous',
'guitarist':'',
'harpsichordist':'',
'historian':'professor;writer;teacher;chronicler;historiographer;annalist',
'humorist':'entertainer;clown;jester;joker;wit;comic;jokester;card;satirist;comedienne;cutup;jokesmith',
'ice hockey player':'chunk;glaze;hail;crystal;iceberg;icicle;glacier;sleet;diamonds;hailstone;permafrost;floe',
'illustrator':'cartoonist',
'impresario':'director;sponsor',
'insurance broker':'allowance;coverage;guarantee;warranty;backing;warrant;support;indemnification;cover;indemnity;safeguard;provision;assurance',
'inventor':'designer;founder;author;builder;creator;architect;maker;innovator;originator;pioneer;father;experimenter;coiner',
'investor':'lender;shareholder;banker;backer;capitalist;stockholder;venture capitalist',
'jazz composer':'bop;swing;blues;boogie;boogie-woogie;ragtime;dixieland;jive;bebop',
'jazz pianist':'bop;swing;blues;boogie;boogie-woogie;ragtime;dixieland;jive;bebop',
'journalist':'correspondent;reporter;writer;columnist;press;commentator;announcer;publicist;cub;hack;contributor;scribe;pencil pusher;editor;broadcaster;scrivener;newsperson;stringer',
'judge':'referee;court;expert;justice;authority;critic;inspector;umpire;moderator;negotiator;peacemaker;interpreter;bench;intermediary;intercessor;judiciary;arbiter;warden;magistrate;honor;marshal;adjudicator;assessor;conciliator;chancellor;evaluator;reconciler;appraiser;ombudsman;justice of peace;legal official;magister',
'keyboard player':'manual;piano;console;clavier;ivories;blacks and whites',
'law professor':'case;statute;requirement;code;constitution;charter;mandate;decision;act;legislation;decree;precedent;regulation;ruling;charge;measure;order;covenant;enactment;injunction;canon;summons;precept;behest;dictate;ordinance;prescript;commandment;equity;command;warrant;edict;notice;garnishment;bidding;demand;prescription;institute;instruction;assize;divestiture;writ;subpoena;caveat;jurisprudence;bylaw;decretum;due process;reg',
'lawyer':'counselor;advocate;proctor;attorney;practitioner;barrister;mouthpiece;counsel;solicitor;defender;jurist;jurisprudent;procurator;counsellor;pleader',
'legislator':'administrator;deputy;lawmaker;member;representative;senator;leader;parliamentarian;council member;lawgiver;aldermember;assemblymember',
'librarian':'curator;cataloger',
'librettist':'author;writer;dramatist;scenarist;tragedian;dramaturge',
'lifeguard':'',
'lyricist':'poet;musician;composer;songwriter;lyrist;music writer',
'manager':'administrator;official;supervisor;producer;executive;boss;director;officer;superintendent;organizer;head;comptroller;conductor;exec;governor;proprietor;overseer;straw boss;controller;handler;head person;slavedriver;zookeeper',
'mathematician':'',
'media proprietor':'news;television;radio;correspondence;disclosure;cable;announcement;intelligence;communications;expression;publishing;announcing',
'merchant':'shopkeeper;trafficker;vendor;trader;broker;seller;operator;shipper;dealer;tycoon;businessperson;storekeeper;salesperson;exporter;retailer;wholesaler;sender;handler;jobber;consigner;marketer;tradesperson',
'meteorologist':'witch;seer;prognosticator;prophesier;diviner;medium;sibyl;astrologer;forecaster;reader;wizard;bard;fortuneteller;soothsayer;oracle;augur;auspex;magus;clairvoyant;sorcerer;ovate;palmist;druid;predictor',
'military aviator':'army;fighting;militant;martial;combatant;aggressive;armed;combative;warlike;militaristic;soldierly;warmongering',
'military officer':'army;fighting;militant;martial;combatant;aggressive;armed;combative;warlike;militaristic;soldierly;warmongering',
'missionary':'evangelist;pastor;preacher;messenger;clergy;teacher;apostle;missioner;promoter;herald;propagandist;minister;clergyperson;converter;revivalist;proselytizer',
'model':'miniature;exemplary;perfect;dummy;classical;classic;standard;facsimile;imitation;copy;representative;archetypal;commendable;flawless;illustrative',
'multi-instrumentalist':'',
'music arranger':'soul;rap;rock;melody;piece;singing;tune;hymn;modern;classical;song;bop;chamber;jazz;measure;fusion;refrain;air;strain;harmony;swing;acoustic;folk;hard rock;instrumental;popular;opera;ragtime;plainsong;bebop;a cappella;heavy metal;rock and roll',
'music artist':'soul;rap;rock;melody;piece;singing;tune;hymn;modern;classical;song;bop;chamber;jazz;measure;fusion;refrain;air;strain;harmony;swing;acoustic;folk;hard rock;instrumental;popular;opera;ragtime;plainsong;bebop;a cappella;heavy metal;rock and roll',
'music director':'soul;rap;rock;melody;piece;singing;tune;hymn;modern;classical;song;bop;chamber;jazz;measure;fusion;refrain;air;strain;harmony;swing;acoustic;folk;hard rock;instrumental;popular;opera;ragtime;plainsong;bebop;a cappella;heavy metal;rock and roll',
'musician':'conductor;artist;virtuoso;player;composer;entertainer;performer;soloist;instrumentalist;vocalist;artiste;diva',
'music producer':'soul;rap;rock;melody;piece;singing;tune;hymn;modern;classical;song;bop;chamber;jazz;measure;fusion;refrain;air;strain;harmony;swing;acoustic;folk;hard rock;instrumental;popular;opera;ragtime;plainsong;bebop;a cappella;heavy metal;rock and roll',
'neurologist':'',
'novelist':'writer;storyteller;author;fictionist',
'orator':'preacher;lecturer;rhetorician;declaimer;public speaker;sermonizer;lector;reciter;pontificator',
'orchestrator':'instigator;troublemaker;mastermind;chieftain;head;captain;spokesperson;ruler;general;inciter;chief;agitator;commander;skipper;president;boss;brains;head honcho',
'organist':'',
'pastor':'cleric;vicar;preacher;priest;minister;rector;divine;parson;ecclesiastic;shepherd;reverend',
'peace activist':'reconciliation;accord;love;unity;truce;friendship;conciliation;concord;union;amity;treaty;neutrality;cessation;unanimity;order;armistice;pacification;pacifism',
'performance artist':'work;act;achievement;conduct;completion;enforcement;fruition;consummation;administration;attainment;exploit;fulfillment;realization;discharge;execution;feat;doing;pursuance;carrying out',
'philanthropist':'contributor;donor;benefactor;patron;helper;do-gooder;good samaritan;bleeding heart;altruist',
'philosopher':'logician;sage;savant;theorist;sophist;wise person',
'photographer':'paparazzo;photojournalist;shutterbug',
'physician':'specialist;doctor;quack;medic;intern;healer;general practitioner;surgeon;md;doc;bones;sawbones',
'physicist':'analyst;expert;examiner;prober;chemist;tester',
'pianist':'',
'pilot':'flier;navigator;captain;aviator;leader;scout;jockey;dean;conductor;lead;bellwether;director;ace;flyer;guide;eagle;aerialist;coxswain;steerer;aeronaut;doyen/doyenne',
'pin-up girl':'babe;angel;doll;broad;honey;chick;bathing beauty;beauty queen;cover girl;sex kitten;tomato;centerfold;peach;fox;cupcake;bunny;cutie;cutie-pie;dollface;dream girl;dreamboat',
'playback singer':'',
'playwright':'author;writer;dramatist;scenarist;librettist;tragedian',
'poet':'lyricist;writer;dramatist;author;artist;dilettante;rhymer;bard;versifier;maker;poetaster;parodist;lyrist;librettist;odist',
'police officer':'detective;force;law enforcement;man;corps;bluecoat;pig;blue;law;badge;patrolman;bear;heat;bull;cop;bobby;constable;fuzz;gumshoe;copper;constabulary;fed;oink;officers;narc;flatfoot;gendarme;boy scout;county mounty',
'political activist': '',
'politician':'lawmaker;senator;leader;legislator;partisan;boss;speaker;orator;president;chieftain;public servant;officeholder;grandstander;democrat;republican;baby-kisser;congressperson;handshaker',
'polymath':'educated;scientific;studied;scholarly;accomplished;sound;experienced;versed;cultured;lettered;cultivated;grave;studious;well-grounded;well-read;well-rounded;well-educated;grounded;posted;in the know;pansophic;philosophic;professorial',
'pornographic actor':'immoral;lewd;salacious;indecent;sexy;actress',
'preacher':'cleric;evangelist;missionary;clergy;parson;divine;ecclesiastic;minister;clerical;reverend;sermonizer;revivalist',
'presenter':'contributor;benefactor;patron;backer;angel;subscriber;philanthropist;savior;benefactress;santa claus;donator;altruist;almsgiver;bestower;conferrer;grantor;heavy hitter',
'priest':'cleric;father;monk;preacher;elder;rector;vicar;curate;divine;ecclesiastic;pontiff;clergyperson;father confessor;man of god;lama;friar;padre;holy man;man of the cloth',
'production designer':'manufacture;management;construction;manufacturing;bearing;generation;creation;rendering;assembly;giving;presentation;direction;return;protraction;origination;provision;fabrication;prolongation;blossoming;elongation;yielding;preparation;reproduction;making;producing;formulation;staging;lengthening;fructification;authoring;engendering;extention',
'professor':'lecturer;assistant;fellow;tutor;educator;instructor;teacher;principal;pundit;sage;egghead;savant;brain;pedagogue;faculty member;prof;rocket scientist;quant',
'prophet':'witch;seer;prognosticator;prophesier;diviner;medium;sibyl;astrologer;forecaster;reader;wizard;bard;fortuneteller;soothsayer;oracle;augur;auspex;magus;clairvoyant;sorcerer;ovate;palmist;druid;meteorologist;predictor;evocator;haruspex;horoscopist;seeress;tea-leaf reader',
'psychiatrist':'doctor;therapist;psychoanalyst;shrink;psychotherapist;psychologist;clinician;disorders analyst',
'psychoanalyst':'therapist;analyst;shrink;psychotherapist',
'psychologist':'doctor;therapist;psychoanalyst;shrink;psychotherapist;clinician',
'public speaker':'urban;mutual;civic;civil;national;communal;governmental;social;popular;universal;city;metropolitan;government;country;free;state;common;open;accessible;municipal;unrestricted;widespread;conjoint;open-door;federal;conjunct;free to all;intermutual;not private;without charge',
'publisher': '',
'rabbi':'priest;teacher;master;rabbin',
'racing driver':'hurrying;flying;running;fast;dashing;swift;rushing;speedy;darting;sailing;zooming;whisking;hastening;whizzing;tearing;galloping',
'radio personality':'transmission;receiver;wireless;walkman;telegraphy;radiotelegraphy;telephony;radiotelegraph;marconi;radiotelephone;am-fm;cb',
'radio producer':'transmission;receiver;wireless;walkman;telegraphy;radiotelegraphy;telephony;radiotelegraph;marconi;radiotelephone;am-fm;cb',
'rapper':'',
'record producer':'story;evidence;note;file;testimony;report;document;transcript;history;inscription;manuscript;memorandum;memo;remembrance;archive;witness;memorial;log;script;writing;annals;archives;memoir;trace;journal;diary;legend;chronicle;register;jacket;almanac;monument;directory;entry;minutes;scroll;track record;documentation;transcription;registry;comic book;paper trail;swindle sheet;written material',
'revolutionary':'subversive;radical;insurgent;rebel;rioting;anarchistic',
'rodeo clown':'festival;roundup;exhibition;competition;enclosure',
'rodeo performer':'festival;roundup;exhibition;competition;enclosure',
'roman emperor':'classic;academic;attic;bookish;canonical;humanistic;latin;hellenic;doric;greek;scholastic;ionic;grecian;augustan;homeric',
'sailor':'cadet;marine;pilot;swab;tarpaulin;mate;jack;seafarer;navigator;mariner;salt;pirate;hearty;sea dog;boater;diver;bluejacket;shipmate;tar;windjammer;middy;lascar;able-bodied sailor;circumnavigator;deck hand;midshipman/woman;old salt',
'scenic designer':'panoramic;breathtaking;dramatic;spectacular;grand;impressive',
'science writer':'discipline;information;art;technique;system;learning;skill;education;lore;scholarship;erudition;branch;wisdom;body of knowledge',
'scientist':'analyst;expert;examiner;prober;physicist;chemist;tester;lab technician',
'screenwriter':'correspondent;author;reporter;critic;columnist;poet;novelist;dramatist;journalist;contributor;scribe;editor;biographer;essayist;wordsmith;scribbler;stenographer;ghostwriter;freelancer;stringer;newspaper person;person of letters;scripter',
'showgirl': '',
'singer':'artist;musician;voice;troubadour;songbird;songster;minstrel;diva;soloist;crooner;vocalist;artiste;warbler;chorister;accompanist;chanter;nightingale;chanteuse',
'singer-songwriter':'writer;songster',
'soccer player':'rugby;american football',
'social activist':'civil;communal;cordial;group;familiar;collective;community;general;common;societal;sociable;nice;amusing;communicative;companionable;convivial',
'soldier':'fighter;mercenary;guerrilla;veteran;guard;officer;volunteer;marine;pilot;warrior;cadet;infantry;recruit;private;gunner;scout;rank;warmonger;paratrooper;trooper;commando;draftee;musketeer;conscript;gi;green beret;airforce member',
'songwriter':'poet;musician;composer;lyrist',
'sound sculptor':'vigorous;sturdy;solid;vibrant;safe;thorough;flawless;sane;stable;robust;intact;firm;entire;right;fit;well;hale;perfect;total;whole',
'speechwriter':'',
'spokesperson':'deputy;delegate;champion;mediator;prophet;mouthpiece;agent;protagonist;mouth;representative;speaker;substitute;talker;stand-in;prolocutor',
'statesman':'lawmaker;politician;legislator',
'surveyor':'mapmaker;cartographer;assessor;measurer',
'swimmer':'',
'talk show host':'lecture;dissertation;homily;exhortation;harangue;descant;recitation;declamation;spiel;discourse;allocution;oration;monologue;epilogue;sermon;disquisition;prelection;screed;peroration;expatiation;chalk talk',
'teacher':'lecturer;supervisor;scholar;assistant;professor;tutor;coach;educator;instructor;trainer;adviser;preceptor;pundit;mentor;pedagogue;teach;disciplinarian;guide;schoolteacher;faculty member;abecedary',
'television director':'tv set;box;station;video;eye;tv;tube;receiver;baby-sitter;boob tube;idiot box;small screen;audio;telly;vid',
'television presenter':'tv set;box;station;video;eye;tv;tube;receiver;baby-sitter;boob tube;idiot box;small screen;audio;telly;vid',
'television producer':'tv set;box;station;video;eye;tv;tube;receiver;baby-sitter;boob tube;idiot box;small screen;audio;telly;vid',
'television show host':'tv set;box;station;video;eye;tv;tube;receiver;baby-sitter;boob tube;idiot box;small screen;audio;telly;vid',
'tennis player':'',
'tentmaker':'',
'theatre director':'troupe;theater;drama;histrionics;dramatics;theatrics;dramaturgy;footlights;histrionism',
'theatrical producer':'exaggerated;melodramatic;showy;comic;thespian;vaudeville;amateur;ham;show;affected;artificial;ceremonious;histrionic;legitimate;mannered;meretricious;schmaltzy;theatric;campy;operatic;hammy;staged',
'theologian':'cleric;clergy;philosopher;scholar;divine;curate;ecclesiastic',
'theoretical physicist':'imaginative;abstract;metaphysical;intellectual;vague;academic;philosophical;logical;analytical;speculative;assumed',
'tutor':'lecturer;educator;instructor;teacher;mentor;coach;preceptor;guardian;governor;grind;teach;guide;prof;private teacher',
'tv editor':'box;baby-sitter;eye;tube;station;video;receiver;tv set;telly;audio;vid',
'tv personality':'box;baby-sitter;eye;tube;station;video;receiver;tv set;telly;audio;vid',
'urban planner':'downtown;civil;metropolitan;civic',
'violinist':'',
'violist':'',
'voice actor':'sound;speech;cry;tone;statement;exclamation;vent;words;song;modulation;articulation;inflection;roar;mutter;delivery;vocalization;shout;call;murmur;intonation;yell;vociferation;utterance;tongue',
'warrior':'soldier;fighter;hero;champion;combatant;conscript;trooper;gi;battler',
'writer':'correspondent;author;reporter;critic;columnist;poet;novelist;dramatist;journalist;contributor;scribe;editor;biographer;essayist;screenwriter;wordsmith;scribbler;stenographer;ghostwriter;freelancer;stringer;newspaper person;person of letters;scripter',
}
def get_similarity_words(profession):
return re.findall(r"[\w]+", profession.lower())
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from recordclass import recordclass
from ortools.algorithms import pywrapknapsack_solver
from collections import deque
Item = recordclass('Item', 'index value weight')
Node = recordclass('Node', 'level value weight items')
def solve_it(input_data):
# return solve_it_dynamic_programming(input_data)
# return solve_using_solver(input_data)
return solve_it_branch_bound_breadth_first(input_data)
# return solve_it_branch_bound_best_first(input_data)
def solve_it_branch_bound_breadth_first(input_data):
lines = input_data.split('\n')
first_line = lines[0].split()
item_count = int(first_line[0])
capacity = int(first_line[1])
items = []
for i in range(1, item_count + 1):
line = lines[i]
parts = line.split()
items.append(Item(i - 1, int(parts[0]), int(parts[1])))
# sort가 되었다고 가정하지만 안되었을 수 있으므로 정렬
items = sorted(items, key=lambda Item: Item.weight / Item.value)
# 초기값이 0인 Node 생성
v = Node(level=-1, value=0, weight=0, items=[])
Q = deque([])
Q.append(v)
max_value = 0
best_items = []
while len(Q):
v = Q[0]
# print(Q)
Q.popleft()
u = Node(level=None, weight=None, value=None, items=[])
u.level = v.level + 1
u.weight = v.weight + items[u.level].weight
u.value = v.value + items[u.level].value
u.items = list(v.items)
u.items.append(items[u.level].index)
if u.weight <= capacity and u.value > max_value:
max_value = u.value
best_items = u.items
bound_u = bound(u, capacity, item_count, items)
if bound_u > max_value:
Q.append(u)
u = Node(level=None, weight=None, value=None, items=[])
u.level = v.level + 1
u.weight = v.weight
u.value = v.value
u.items = list(v.items)
bound_u = bound(u, capacity, item_count, items)
if bound_u > max_value:
Q.append(u)
taken = [0] * len(items)
for i in range(len(best_items)):
taken[best_items[i]] = 1
output_data = str(max_value) + ' ' + str(0) + '\n'
output_data += ' '.join(map(str, taken))
return output_data
def bound(node, capacity, item_count, items):
"""
한계값을 구하는 함수
:param node: 노드
:param capacity: 가방의 capacity
:param item_count: 아이템 개수
:param items: 아이
:return: 계산된 한계값
"""
if node.weight >= capacity:
return 0
else:
result = node.value
j = node.level + 1
totweight = node.weight
while j < item_count and totweight + items[j].weight <= capacity:
totweight = totweight + items[j].weight
result = result + items[j].value
j = j + 1
k = j
if k <= item_count-1:
result = result + (capacity - totweight) * items[k].value / items[k].weight
return result
def solve_it_branch_bound_best_first(input_data):
pass
def solve_using_solver(input_data):
"""
Google OR-Tools로 풀었음. 매우 사용하기 쉽고.. 순식간에 끝남
하지만 이 문제를 이거로 푸는건 마치 치트키 같은 상황이라.. 그냥 써봤다에 의의 정도
:param input_data: Knapsack Problem Dataset
:return: output_data
"""
lines = input_data.split('\n')
first_line = lines[0].split()
item_count = int(first_line[0])
capacity = int(first_line[1])
weight_list = []
value_list = []
for i in range(item_count):
value, weight = map(int, lines[1:][i].split())
weight_list.append(weight)
value_list.append(value)
solver = pywrapknapsack_solver.KnapsackSolver(
pywrapknapsack_solver.KnapsackSolver.
KNAPSACK_MULTIDIMENSION_BRANCH_AND_BOUND_SOLVER, 'KnapsackExample')
values = value_list
weights = [weight_list]
capacities = [capacity]
solver.Init(values, weights, capacities)
computed_value = solver.Solve()
packed_items = []
packed_weights = []
total_weight = 0
print('Total value =', computed_value)
for i in range(len(values)):
if solver.BestSolutionContains(i):
packed_items.append(i)
packed_weights.append(weights[0][i])
total_weight += weights[0][i]
print('Total weight:', total_weight)
print('Packed items:', packed_items)
print('Packed_weights:', packed_weights)
taken = [0] * item_count
for i in range(len(packed_items)):
taken[packed_items[i]] = 1
output_data = str(computed_value) + ' ' + str(1) + '\n'
output_data += ' '.join(map(str, taken))
return output_data
def solve_it_dynamic_programming(input_data):
"""
Dynamic Programming 방식으로 풀었음. 근데 연산 속도가 느림 => 매트릭스를 다 채우려니.. 그 많은걸..
노드 관점으로 풀어야할 듯
:param input_data: Knapsack Problem Dataset
:return: output_data
"""
lines = input_data.split('\n')
first_line = lines[0].split()
item_count = int(first_line[0])
capacity = int(first_line[1])
weight_list = []
value_list = []
for i in range(item_count):
value, weight = map(int, lines[1:][i].split())
weight_list.append(weight)
value_list.append(value)
def knapsack(capacity, weight_list, value_list, item_count):
"""
Knapsack Dynamic Programming 풀이
:param capacity: 배낭의 무게 한도
:param weight_list: 물건들의 weight list
:param value_list: 물건들의 value list
:param item_count: 물건 수
:return: Max Objective value
"""
# item별 capacity 별 objective value를 저장할 매트릭스 초기화
matrix = [[0 for _ in range(capacity + 1)] for _ in range(item_count + 1)]
for i in range(item_count + 1):
for w in range(capacity + 1):
if i == 0 or w == 0:
# 0번째 행/열은 넣을 수 없어서 0
matrix[i][w] = 0
elif weight_list[i - 1] <= w:
matrix[i][w] = max(value_list[i - 1] + matrix[i - 1][w - weight_list[i - 1]], matrix[i - 1][w])
else:
matrix[i][w] = matrix[i - 1][w]
return matrix[item_count][capacity], matrix
result, matrix = knapsack(capacity, weight_list, value_list, item_count)
print('result', result)
# 어떤 item이 선택되었는지 확인하기 위한 코드
taken = [0] * len([0 for _ in range(item_count)])
curr_item = item_count - 1
while curr_item >= 0:
if matrix[curr_item][capacity] != matrix[curr_item + 1][capacity]:
# 매트릭스에서 윗값과 다르면 새로운 것이 선택됨
taken[curr_item] = 1
capacity = capacity - weight_list[curr_item]
curr_item = curr_item - 1
# prepare the solution in the specified output format
output_data = str(result) + ' ' + str(1) + '\n'
output_data += ' '.join(map(str, taken))
return output_data
def solve_it_default(input_data):
"""
기본 예시 함수
:param input_data: Knapsack Problem Dataset
:return: output_data
"""
# Modify this code to run your optimization algorithm
# parse the input
lines = input_data.split('\n')
firstLine = lines[0].split()
item_count = int(firstLine[0])
capacity = int(firstLine[1])
items = []
for i in range(1, item_count+1):
line = lines[i]
parts = line.split()
items.append(Item(i-1, int(parts[0]), int(parts[1])))
# a trivial greedy algorithm for filling the knapsack
# it takes items in-order until the knapsack is full
value = 0
weight = 0
taken = [0]*len(items)
for item in items:
if weight + item.weight <= capacity:
taken[item.index] = 1
value += item.value
weight += item.weight
# prepare the solution in the specified output format
output_data = str(value) + ' ' + str(0) + '\n'
output_data += ' '.join(map(str, taken))
return output_data
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
file_location = sys.argv[1].strip()
with open(file_location, 'r') as input_data_file:
input_data = input_data_file.read()
print(solve_it(input_data))
else:
print('This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/ks_4_0)')
# TODO : 레퍼런스, https://github.com/Newmu/Discrete-Optimization-Coursera/blob/master/knapsack/solver.py
# TODO : 제네틱, https://aitch25.tistory.com/13
# TODO : Branch, https://www.geeksforgeeks.org/implementation-of-0-1-knapsack-using-branch-and-bound/
|
import pygame
from block import Block
from ball import Ball
import constants
# setup screen
pygame.init()
screen_size = [750, 750]
screen = pygame.display.set_mode(screen_size)
line_pos = [(screen_size[0]/2, 0), (screen_size[0]/2, screen_size[1])]
# setup blocks
block_width = 15
block_height = screen_size[0] / 5
block_speed = 10
h_gap = 35
left_rect_init = [h_gap, screen_size[1] / 2 - block_height / 2, block_width, block_height]
right_rect_init = [screen_size[0]-block_width-h_gap, screen_size[1] / 2 - block_height / 2, block_width, block_height]
left_block = Block(screen_size, left_rect_init, block_speed)
right_block = Block(screen_size, right_rect_init, block_speed)
# setup pong ball
ball_pos_init = [screen_size[0] / 2, screen_size[0] / 2]
# diameter should be able to fit inside the h_gap
ball_radius = 10
ball_speed_bounds = (10, 15)
ball = Ball(screen_size, ball_radius, speed_bounds=ball_speed_bounds)
# scoreboard
left_score = 0
right_score = 0
end_score = 20
running = True
clock = pygame.time.Clock()
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
keys = pygame.key.get_pressed()
if keys[pygame.K_UP]:
right_block.move(upwards=True)
if keys[pygame.K_DOWN]:
right_block.move(upwards=False)
if keys[pygame.K_w]:
left_block.move(upwards=True)
if keys[pygame.K_s]:
left_block.move(upwards=False)
delta_score = ball.move()
ball.check_collide(left_block)
ball.check_collide(right_block)
if delta_score < 0:
left_score += 1
elif delta_score > 0:
right_score += 1
if left_score == end_score or right_score == end_score:
running = False
screen.fill(constants.BLACK)
pygame.draw.line(screen, constants.WHITE, *line_pos)
pygame.draw.rect(screen, constants.WHITE, left_block.rect)
pygame.draw.rect(screen, constants.WHITE, right_block.rect)
ball.draw(screen)
pygame.display.update()
clock.tick(50)
print(f"Score: {left_score} - {right_score}")
pygame.quit()
|
import matplotlib.pyplot as plt
import numpy as np
n = np.arange(10)
dat = np.random.randint(0, 500, size=(3, n.size))
yrs = 2000 + n
fig, ax = plt.subplots(figsize=(5, 3))
ax.stackplot(yrs, dat, labels=["luzon", "visayas", "mindanao"])
ax.set_title("blebleble")
ax.legend(loc="upper left")
ax.set_ylabel("blelbe amount")
ax.set_xlim(xmin=yrs[0], xmax=yrs[-1])
fig.tight_layout()
plt.show()
|
args_count = lambda *args, **kwargs: len(args) + len(kwargs)
'''
Create a function args_count, that returns the count of passed arguments
args_count(1, 2, 3) -> 3
args_count(1, 2, 3, 10) -> 4
'''
|
from .ami import get_by_tag
import boto3
import six
import os.path
from botocore.configloader import raw_config_parse
class Config:
def __init__(self, client, args, config_file_path = "~/.spotr/config"):
self.client = client
config_file_path = os.path.expanduser(config_file_path)
if os.path.isfile(config_file_path):
self._config = raw_config_parse(config_file_path)['config']
else:
self._config = {}
self._config.update({k: v for k, v in six.iteritems(vars(args)) if v})
def set_az(self, az):
self._config['az'] = az
@property
def ami_tag(self):
return self._config.get('ami_tag', 'spotr')
@property
def instance_tag(self):
return self._config.get('instance_tag', 'spotr')
@property
def type(self):
return self._get_required('type')
@property
def max_bid(self):
return self._get_required('max_bid')
@property
def ami(self):
if 'ami' not in self._config:
self._config['ami'] = get_by_tag(self.client, self.ami_tag)
return self._config['ami']
@property
def key_name(self):
return self._config.get('key_name', 'spotr')
@property
def az(self):
return self._config['az']
@property
def security_group_id(self):
return self._config.get('security_group_id')
@property
def subnet_id(self):
return self._config.get('subnet_id')
def _get_required(self, key):
if not self._config.get(key):
raise RuntimeError("Missing required parameter: {0}".format(key))
return self._config.get(key)
|
import numpy as np
from numpy import linalg as LA
import math
def f(n, x):
return math.sqrt(2) * math.sin(np.pi * n * x)
def f2(n, x):
return (f(n, x + 0.01) - 2 * f(n, x) + f(n, x - 0.01)) / (0.01 * 0.01)
x = np.arange(0.01, 1, 0.01)
def ex2_3():
y = []
for xi in x:
y.append(f(1, xi))
k2y = []
for xi in x:
k2y.append(-(np.pi * np.pi) * f(1, xi))
h2M = []
for i in range(1, 100):
for j in range(1, 100):
if i == j:
h2M.append(-2 / (0.01 * 0.01))
elif i - j == 1:
h2M.append(1 / (0.01 * 0.01))
elif j - i == 1:
h2M.append(1 / (0.01 * 0.01))
else:
h2M.append(0)
h2M = np.array([h2M])
h2M = h2M.reshape(99, 99)
My = h2M @ y
bias = []
for i in range(0, 99):
bias.append(abs(My[i] - k2y[i]) / (abs(My[i] + k2y[i]) / 2))
return My, k2y, bias, h2M
def do2_3():
global My, h2M
My, k2y, bias, h2M = ex2_3()
print("bias\n", bias, '\n')
def do2_4():
My = np.array(h2M)
w, v = LA.eig(My)
print("w\n", w)
print("v\n", v, '\n')
|
import turtle # imports the turtle library
import random # imports the rankdom library
scr = turtle.Screen() # goes into turtle library and calls screen function
trt = turtle.Turtle() # creates turtle
def little_draw():
scr.tracer(10, 0)
myx = random.randrange(-360, 360)
myy = random.randrange(-360, 360)
randsize = random.randrange(50, 100)
trt.goto(myx, myy) # sends turtle to random x and y
trt.begin_fill()
trt.circle(randsize)
trt.end_fill()
scr.listen() # readies screen events
scr.update() # refreshes the screen
scr.onkey(little_draw, "a")
scr.mainloop() # keeps screen looping
|
import zipfile
from bs4 import BeautifulSoup
import requests
import re
import webbrowser
import datetime
import time
from extra import *
web = requests.get("https://www.whoisdownload.com/newly-registered-domains")
soup = BeautifulSoup(web.text,"html.parser")
table = soup.find('table',class_="cart_table table table-striped table-bordered")
tbody = table.find('tbody').find('tr')
file_name = tbody.find('td').text
creation_date = tbody.find('td').findNext('td').findNext('td').text[5::]
current_date = datetime.datetime.now().strftime("%x").replace("/","-")[0:5]
if creation_date==current_date:
downloaded_file = tbody.find('div',class_="add_cart").find('a').get('href')
download_file(downloaded_file)
time.sleep(5)
print("File downloded!")
time.sleep(1)
Unzip()
print("Done.")
else:
print("There is no any update file.")
|
import os
import sys
import nltk
import curses
import json
import urllib3
import lxml
import re
import ast
import time
import operator
import string
from multiprocessing import Process, Manager, Pool
from requests import get
from pyphen import Pyphen
from nltk.corpus import cmudict
from nltk.corpus import wordnet as wn
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.wsd import lesk
from curses.ascii import isdigit
from wiktionaryparser import WiktionaryParser
from bs4 import BeautifulSoup
from pywsd.lesk import simple_lesk
from tensefinder import changeTense
# Decorator
def time_it(func):
def wrapper(*args, **kwargs):
start_time = time.time()
r = func(*args, **kwargs)
end_time = time.time() - start_time
print(func.__name__ + " took " + str(end_time) + " seconds ")
return r
return wrapper
# Definitions
def Definition(s_word, measures, sent):
unambiguous = removeAmbiguity(sent, s_word)
syns = []
syns.append(unambiguous)
if len(syns) != 0:
measures["concept_definition"] = syns[0].definition()
else:
measures["concept_definition"] = "Not Found"
# Length of the word
def Length_of_Word(s_word, measures):
measures["length_of_word"] = len(s_word)
# Number of syllables
def Syllable_Count(s_word, lang, measures):
exclude = list(string.punctuation)
s_word = s_word.lower()
s_word = "".join(x for x in s_word if x not in exclude)
if s_word is None:
measures["no_of_syllables"] = 0
elif len(s_word) == 0:
measures["no_of_syllables"] = 0
else:
dic = Pyphen(lang=lang)
count = 0
for word in s_word.split(' '):
word_hyphenated = dic.inserted(word)
count += max(1, word_hyphenated.count("-") + 1)
measures["no_of_syllables"] = count
# Etymology
def Etymology(s_word, measures):
urllib3.disable_warnings()
http = urllib3.PoolManager()
url = "http://www.dictionary.com/browse/" + s_word
response = http.request('GET', url)
soup = BeautifulSoup(response.data, "lxml")
ety = soup.findAll("a", {"class": "language-name"})
if len(ety) != 0:
fin_ety = []
for i in range(len(ety)):
fin_ety.append(ety[i].text)
measures["etymology"] = fin_ety
else:
measures["etymology"] = "Not Found"
# Context
def Context(word_tokens_nl, s_word, measures, idx, main_pos):
pred = findPrecedingWords(word_tokens_nl, s_word, idx, main_pos)
con_d = {}
con_l = []
sent = ''
print(pred)
for i in range(0, len(pred)):
sent += pred[i] + " "
url = "http://phrasefinder.io/search?corpus=eng-us&query=" + sent
response = get(url)
data = response.json()
if len(data["phrases"]) != 0:
mc = data["phrases"][0]["mc"]
vc = data["phrases"][0]["vc"]
con_d["matched_count"] = mc
con_d["volume_count"] = vc
con_l.append(con_d)
measures["context"] = con_l
else:
measures["context"] = "Not Found"
# Familiarity
def Familiarity(s_word, measures):
word_file = open("./data/complex_count.txt","r+")
parse_file = word_file.read()
word_count = {}
word_count = ast.literal_eval(parse_file)
word_file.close()
if s_word in word_count.keys():
measures["familiarity"] = word_count[s_word]
else:
measures["familiarity"] = 0
# Number of morphemes
def Morphemes(s_word, lang, measures):
di = Pyphen(lang = lang)
morphemes = []
for pair in di.iterate(s_word):
morphemes.append(pair)
if len(morphemes) != 0:
measures["morphemes"] = morphemes
measures["morphemes_count"] = len(morphemes[0])
else:
measures["morphemes"] = "Not Found"
measures["morphemes_count"] = 0
# Tense Finder
def correctTense(s_word, tense):
word = s_word
text_file = open('./data/irregular_verbs_form.txt', 'r')
lines = text_file.read()
words = lines.split("\n")
text_file.close()
past_simple = {}
past_participle = {}
for i in range(0, len(words), 3):
past_simple[words[i]] = words[i+1]
for i in range(0, len(words), 3):
past_participle[words[i]] = words[i+2]
if tense == "VBD":
if word in past_simple:
return past_simple[word]
else:
return changeTense(word, tense)
elif tense == "JJ" or tense == "JJR" or tense == "JJS":
return word
else:
return changeTense(word, tense)
# Find Preceding Part of the sentence
def findPrecedingWords(word_tokens_nl, s_word, idx, main_pos):
for i in range(0, len(word_tokens_nl)):
if s_word == word_tokens_nl[i]:
return word_tokens_nl[0:i+1]
mn = word_tokens_nl[0:idx]
# Adjust the tense of the synonym
s_word = correctTense(s_word, main_pos)
mn.append(s_word)
return mn
# Attach Preceding Words to Dict
def AttachPredWords(word_tokens_nl, s_word, measures):
for i in range(0, len(word_tokens_nl)):
if s_word == word_tokens_nl[i]:
measures["preceding_words"] = word_tokens_nl[0:i+1]
# Ambiguity
def removeAmbiguity(sent, s_word):
ambiguous = s_word
unambiguous = lesk(sent, ambiguous, 'v')
if unambiguous:
return unambiguous
else:
unambiguous = wn.synsets(s_word)[0]
return unambiguous
# Find Synonyms of the Words
def findSynonyms(s_word, sent, measures, main_pos):
syn = removeAmbiguity(sent, s_word)
synonyms = []
temp = syn.name()
root_name = temp.split(".")
app_id = '75bf3b86'
app_key = '2cc787678601b689054268d194d3064b'
language = 'en'
word_id = root_name[0]
url = 'https://od-api.oxforddictionaries.com:443/api/v1/entries/' + language + '/' + word_id.lower() + '/synonyms'
r = get(url, headers = {'app_id': app_id, 'app_key': app_key})
if r.status_code == 200:
data = r.json()
#l = data["results"][0]["lexicalEntries"][0]["entries"][0]["senses"][0]["synonyms"]
mapper = {'VBD': 'Verb', 'VBG': 'Verb', 'JJ': 'Adjective', 'JJR': 'Adjective', 'JJS': 'Adjective'}
checkEnt = ''
for key, value in mapper.items():
if key == main_pos:
checkEnt = value
break
l = []
for i in range(0, len(data["results"][0]["lexicalEntries"])):
if data["results"][0]["lexicalEntries"][i]["lexicalCategory"] == checkEnt:
l = data["results"][0]["lexicalEntries"][i]["entries"][0]["senses"][0]["synonyms"]
break
for i in l:
if ' ' not in i["text"]:
synonyms.append(i["text"])
else:
for l in syn.lemmas():
synonyms.append(l.name())
measures["synonyms"] = synonyms
# Ranking of the synonyms
def RankEvaluationModule(measures, synonyms_measures, main_pos):
pointsForRootWord_dict = {}
pointsForSubWord_dict = {}
for i in range(0, len(synonyms_measures)):
pointsForRootWord = 0
pointsForSubWord = 0
# Rank For Length of Word
if measures["length_of_word"] >= synonyms_measures[i]["length_of_word"]:
pointsForSubWord += 1
else:
pointsForRootWord += 1
# Rank For Number of Syllable
if measures["no_of_syllables"] >= synonyms_measures[i]["no_of_syllables"]:
pointsForSubWord += 1
else:
pointsForRootWord += 1
# Rank For Familiarity
if measures["familiarity"] <= synonyms_measures[i]["familiarity"]:
pointsForSubWord += 2
else:
pointsForRootWord += 2
# Rank For Context
if synonyms_measures[i]["context"] != "Not Found":
if measures["context"] != "Not Found":
if measures["context"][0]["volume_count"] <= synonyms_measures[i]["context"][0]["volume_count"]:
pointsForSubWord += 3
else:
pointsForRootWord += 3
else:
pointsForSubWord += 3
else:
pointsForRootWord += 3
# Rank For Morphemes
if measures["morphemes_count"] >= synonyms_measures[i]["morphemes_count"]:
pointsForSubWord += 1
else:
pointsForRootWord += 1
# Rank For Etymology
if synonyms_measures[i]["etymology"] != "Not Found":
flag = 0
for x in range(0, len(synonyms_measures[i]["etymology"])):
if 'latin' in synonyms_measures[i]["etymology"][x].lower() or 'greek' in synonyms_measures[i]["etymology"][x].lower():
pointsForRootWord += 2
flag = 1
break
if flag == 0:
f1 = 0
if measures["etymology"] != "Not Found":
for y in range(0, len(measures["etymology"])):
if 'latin' in measures["etymology"][y].lower() or 'greek' in measures["etymology"][y].lower():
pointsForSubWord += 2
fl = 1
break
if fl == 0:
pointsForSubWord += 1
pointsForRootWord += 1
else:
pointsForSubWord += 1
else:
pointsForRootWord += 2
pointsForSubWord_dict[synonyms_measures[i]["word"]] = pointsForSubWord
mixer = measures["word"] + '_' + synonyms_measures[i]["word"]
pointsForRootWord_dict[mixer] = pointsForRootWord
print("\n")
print("Synonyms Points\n")
print(pointsForSubWord_dict)
print("\n")
print("Root Word Points\n")
print(pointsForRootWord_dict)
print("\n")
ranked_word = max(pointsForSubWord_dict.items(), key=operator.itemgetter(1))[0]
gr_ranked_word = correctTense(ranked_word, main_pos)
return gr_ranked_word
def findMeasures(s_word, word_tokens_nl, lang, sent, idx, main_pos):
manager = Manager()
measures = manager.dict()
p1 = Process(target = Definition, args = (s_word, measures, sent))
p2 = Process(target = Length_of_Word, args = (s_word, measures))
p3 = Process(target = Syllable_Count, args = (s_word, lang, measures))
p4 = Process(target = Etymology, args = (s_word, measures))
p5 = Process(target = Context, args = (word_tokens_nl, s_word, measures, idx, main_pos))
p6 = Process(target = Familiarity, args = (s_word, measures))
p7 = Process(target = Morphemes, args = (s_word, lang, measures))
p8 = Process(target = findSynonyms, args = (s_word, sent, measures, main_pos))
p9 = Process(target = AttachPredWords, args = (word_tokens_nl, s_word, measures))
p1.start()
p2.start()
p3.start()
p4.start()
p5.start()
p6.start()
p7.start()
p8.start()
p9.start()
p1.join()
p2.join()
p3.join()
p4.join()
p5.join()
p6.join()
p7.join()
p8.join()
p9.join()
# Given Word
measures["word"] = s_word
# Part of Speech
for i in range(0, len(pos_data)):
if pos_data[i][0] == s_word:
measures["part_of_speech"] = pos_data[i][1]
f = json.dumps(measures.copy())
# Convert str to dictionary
final_measures = {}
final_measures = ast.literal_eval(f)
return final_measures
def findMeasuresForSynonyms(s_word, word_tokens_nl, lang, idx, main_pos):
manager = Manager()
measures = manager.dict()
p2 = Process(target = Length_of_Word, args = (s_word, measures))
p3 = Process(target = Syllable_Count, args = (s_word, lang, measures))
p4 = Process(target = Etymology, args = (s_word, measures))
p5 = Process(target = Context, args = (word_tokens_nl, s_word, measures, idx, main_pos))
p6 = Process(target = Familiarity, args = (s_word, measures))
p7 = Process(target = Morphemes, args = (s_word, lang, measures))
p2.start()
p3.start()
p4.start()
p5.start()
p6.start()
p7.start()
p2.join()
p3.join()
p4.join()
p5.join()
p6.join()
p7.join()
# Given Word
measures["word"] = s_word
# Part of Speech
for i in range(0, len(pos_data)):
if pos_data[i][0] == s_word:
measures["part_of_speech"] = pos_data[i][1]
f = json.dumps(measures.copy())
# Convert str to dictionary
final_measures = {}
final_measures = ast.literal_eval(f)
return final_measures
# Code Start
if __name__ == '__main__':
start_time = time.time()
sent = str(sys.argv[1])
lang = str(sys.argv[2])
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(sent.lower())
word_tokens_nl = word_tokenize(sent)
pos_tags = nltk.pos_tag(word_tokens)
filtered_sentence = [ w for w in word_tokens if not w in stop_words ]
pos_data = []
words_to_be_analyzed = []
verbs = ["VB", "VBD", "VBG", "VBN", "VBP", "VBZ", "JJ", "JJS", "JJR"]
for i in range(0, len(filtered_sentence)):
for j in range(0, len(pos_tags)):
if filtered_sentence[i] == pos_tags[j][0] and pos_tags[j][1] in verbs:
words_to_be_analyzed.append(filtered_sentence[i])
pos_data.append((filtered_sentence[i], pos_tags[j][1]))
if len(words_to_be_analyzed) != 0:
s_word = words_to_be_analyzed[0]
idx = 0
main_pos = ''
# For initial instance to obtain POS
for i in range(0, len(pos_data)):
if pos_data[i][0] == s_word:
main_pos = pos_data[i][1]
measures = findMeasures(s_word, word_tokens_nl, lang, sent, idx, main_pos)
if len(measures["synonyms"]) != 0:
for i in range(0, len(word_tokens_nl)):
if s_word == word_tokens_nl[i]:
idx = i
print(measures)
synonyms = measures["synonyms"]
main_pos = measures["part_of_speech"]
synonyms_measures = []
for s in range(0, len(synonyms)):
synonyms_measures.append(findMeasuresForSynonyms(synonyms[s], word_tokens_nl, lang, idx, main_pos))
#print(synonyms_measures)
fin_ranked_word = RankEvaluationModule(measures, synonyms_measures, main_pos)
simplified_sentence = []
for i in range(0, len(word_tokens_nl)):
if i == idx:
simplified_sentence.append(fin_ranked_word)
else:
simplified_sentence.append(word_tokens_nl[i])
print(simplified_sentence)
else:
print(word_tokens_nl)
else:
print("Sentence cannot be simplified")
print("Execution Time: %s seconds" %(time.time() - start_time))
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from scrapy.conf import settings
from datetime import datetime
import time
class CosmeticSpiderPipeline(object):
def __init__(self):
host = settings['MONGODB_HOST']
port = settings['MONGODB_PORT']
dbname = settings['MONGODB_DBNAME']
sheetname = settings['MONGODB_SHEETNAME']
# 创建数据库连接
client = pymongo.MongoClient(host=host, port=port)
# 指定数据库
db = client[dbname]
# 存放数据的数据库表名
self.sheet = db[sheetname]
# 清空旧表
# self.sheet.drop()
# 库存清零,等待更新
for item in self.sheet.find():
updateFilter = {'item_name': item['item_name']}
self.sheet.update_one(filter=updateFilter, update={'$set': {'item_count': 0}})
if self.is_outdated(item['date']):
self.sheet.delete_one(item)
print('库存清零')
# 过久无更新的物品则删除
def is_outdated(self, item_date):
now_date = time.mktime(time.localtime())
item_date = time.mktime(time.strptime(item_date, "%Y-%m-%d %H:%M:%S"))
time_t = now_date - item_date
if time_t >= 200000.0:
return True
elif time_t < 200000.0:
return False
def process_item(self, item, spider):
data = dict(item)
updateFilter = {'item_name': data['item_name']}
updateRes = self.sheet.update_one(
filter=updateFilter,
update={'$set': data},
upsert=True)
# self.sheet.insert(data)
return updateRes
|
__author__ = 'dingxinhui'
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# function/function_01.py
# 业务功能脚本(用例脚本可调用此处的功能脚本)
from encapsulation.encapsulation import UIHandle
from constant.constant_1 import LOGIN_URL
import config.login_config
from config.config_01 import browser_config
from picture.picture1 import *
import time
def login(username,password):
# 打开浏览器
driver = browser_config['chrome']
# print(driver)
# driver.maximize_window()
print(driver.get_window_size())
driver.set_window_size(1200,800)
print(driver.get_window_size())
driver.implicitly_wait(30)
# 脚本运行时,错误的信息将被打印到这个列表中
driver.verificationErrors = []
# #是否继续接受下一下警告
driver.accept_next_alert = True
# 传入driver对象
uihandle = UIHandle(driver)
# 输入url地址
uihandle.get(LOGIN_URL)
uihandle.Click("老白首页", "首页登录按钮")
time.sleep(3)
uihandle.Clear('老白首页', '用户名')
uihandle.Input('老白首页', '用户名', username)
uihandle.Input('老白首页', '密码', password)
uihandle.Click('老白首页', '登录页面登录按钮')
time.sleep(3)
res = driver.page_source
title = driver.title
img = get_screenshot(driver)
a = [res, title, img]
return a
|
# https://www.youtube.com/watch?v=YrNpkuVIFdg&t=814s
# PyMunk library - 2D physics engine.
# PyGame library - graphical interface creator.
# sys library - way of managing python runtime environment
import pygame, sys, pymunk
def create_apple(space):
body = pymunk.Body(1, 100, body_type=pymunk.Body.DYNAMIC)
body.position = (400,0)
shape = pymunk.Circle(body,80)
space.add(body, shape)
return shape
def draw_apples(apples):
for apple in apples:
pos_x = int(apple.body.position.x)
pos_y = int(apple.body.position.y)
pygame.draw.circle(screen, (0,0,0), apple.body.position, 80)
pygame.init() # initiating game
screen = pygame.display.set_mode((800, 800)) # creating display space
clock = pygame.time.Clock() # creating game clock
space = pymunk.Space()
space.gravity = (0, 500)
apples = []
apples.append(create_apple(space)) #
while True: # game loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
screen.fill((200, 220, 220)) # background color
draw_apples(apples) # calling draw_apples function with apples list param.
space.step(1/50) # physics loop
pygame.display.update() # rendering frame
clock.tick(120) # 120 fps limit
|
import math
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
#%%
y_hat = tf.constant(36, name='y_hat') # Define y_hat constant. Set to 36.
y = tf.constant(39, name='y') # Define y. Set to 39
loss = tf.Variable((y - y_hat)**2, name='loss') # Create a variable for the loss
init = tf.global_variables_initializer() # When init is run later (session.run(init)), # the loss variable will be initialized and ready to be computed
with tf.Session() as session: # Create a session and print the output
session.run(init) # Initializes the variables
print(session.run(loss)) # Prints the loss
#%%
a = tf.constant(2)
b = tf.constant(10)
c = tf.multiply(a,b)
init = tf.global_variables_initializer()
sess = tf.Session()
print(sess.run(c))
#%%
x = tf.placeholder(tf.int64, name = 'x')
print(sess.run(2 * x, feed_dict = {x: 3}))
sess.close()
#%%
Y = tf.add(tf.matmul(W, X), b)
X = tf.placeholder(tf.float32, [n_x, None], name="X")
cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=z, labels=y)
one_hot_matrix = tf.one_hot(indices=labels, depth=C, axis=0)
W1 = tf.get_variable("W1", [25, 12288], initializer = tf.contrib.layers.xavier_initializer(seed=1))
b1 = tf.get_variable("b1", [25, 1], initializer = tf.zeros_initializer())
A1 = tf.nn.relu(Z1)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
#%%
|
import yaml
def load_config(file_config='config.yml'):
with open(file_config, 'r') as ymlfile:
return yaml.safe_load(ymlfile)
|
# -*- coding: utf-8 -*-
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isSymmetric(self, root):
if root is None:
return True
return self._isSymmetric(root.left, root.right)
def _isSymmetric(self, left, right):
if left is None or right is None:
return left is None and right is None
return (
left.val == right.val
and self._isSymmetric(left.left, right.right)
and self._isSymmetric(left.right, right.left)
)
if __name__ == "__main__":
solution = Solution()
t0_0 = TreeNode(1)
t0_1 = TreeNode(2)
t0_2 = TreeNode(2)
t0_3 = TreeNode(3)
t0_4 = TreeNode(4)
t0_5 = TreeNode(4)
t0_6 = TreeNode(3)
t0_2.left = t0_5
t0_2.right = t0_6
t0_1.left = t0_3
t0_1.right = t0_4
t0_0.left = t0_1
t0_0.right = t0_2
assert solution.isSymmetric(t0_0)
t1_0 = TreeNode(1)
t1_1 = TreeNode(2)
t1_2 = TreeNode(2)
t1_3 = TreeNode(3)
t1_4 = TreeNode(3)
t1_2.right = t1_4
t1_1.right = t1_3
t1_0.left = t1_1
t1_0.right = t1_2
assert not solution.isSymmetric(t1_0)
|
import numpy as np
import matplotlib.pyplot as plt
# Takes in a position in the board and outputs if it is possible or not to put the number n inside
def possible(board,x,y,n):
# checks the row
for i in range(len(board[0])):
# exits the function straightaway if the number n already exists in the row
if board[x][i] == n:
return False
# checks the column
for j in range(len(board[0])):
if board[j][y] == n:
return False
# first determine which of the 9 major boxes the x-y position is in
# it doest not matter where in box it is, becasue we haev to check the whole box
# so we just have to determine which of the box it is in
x_b = (x//3)*3
y_b = (y//3)*3
# check the same box
for ib in range(3):
for jb in range(3):
# checking the entire box the x-y is in
if board[x_b+ib][y_b+jb] == n:
return False
# if it passes all the rules return True
return True
def solve(board):
for x in range(9):
for y in range(9):
if board[x][y] == 0:
for n in range(1,10):
res = possible(board,x,y,n)
if res ==True:
board[x][y] = n
# further solve again - recursion
# the recursion just means going deepre into a tree/node
solve(board)
# this leaves the option that going deeper into the tree did not work so it remains empty as the previous branch was a wrong move
board[x][y] = 0 #NEEDED
return board
print("Solved")
print(np.matrix(board))
#input("More")
#initalise the sudoku board as a list
board = [[5,3,0,0,7,0,0,0,0],
[6,0,0,1,9,5,0,0,0],
[0,9,8,0,0,0,0,6,0],
[8,0,0,0,6,0,0,0,3],
[4,0,0,8,0,3,0,0,1],
[7,0,0,0,2,0,0,0,6],
[0,6,0,0,0,0,2,8,0],
[0,0,0,4,1,9,0,0,5],
[0,0,0,0,8,0,0,7,9]]
print(board)
#just for viewing purposes - we are doing this sovler with lists only though we can do it with numpy arrays as well
board_np = np.matrix(board)
print(board_np)
# this is the main function
solve(board)
|
from PACK import *
## supervised graphsage, classification loss
class SupervisedGraphSAGE(nn.Module):
def __init__(self, class_num, encoder_1, encoder_2):
super(SupervisedGraphSAGE, self).__init__()
self.class_num = class_num
self.encoder_1 = encoder_1
self.encoder = encoder_2
self.criterion = nn.CrossEntropyLoss()
self.weight = nn.Parameter(torch.FloatTensor(class_num, self.encoder.embed_dim))
init.xavier_uniform_(self.weight)
def forward(self, nodes):
new_feature = self.encoder(nodes)
scores = self.weight.mm(new_feature)
return new_feature, scores.t()
def loss(self, nodes, labels):
_, scores = self.forward(nodes)
return self.criterion(scores, labels.squeeze())
class SupervisedGraphSAGE_Single(nn.Module):
def __init__(self, class_num, encoder):
super(SupervisedGraphSAGE_Single, self).__init__()
self.class_num = class_num
self.encoder = encoder
self.criterion = nn.CrossEntropyLoss()
self.fc = nn.Sequential(
nn.Linear(encoder.embed_dim, class_num, bias=True)
)
def forward(self, nodes):
new_feature = self.encoder(nodes)
hidden_activation = F.relu(new_feature)
scores = self.fc(hidden_activation.t())
return new_feature, scores
def loss(self, nodes, labels):
_, scores = self.forward(nodes)
return self.criterion(scores, labels.squeeze())
## unsupervised graphsage, n-pair loss
class UnsupervisedGraphSAGE_Single(nn.Module):
def __init__(self, class_num, encoder):
super(UnsupervisedGraphSAGE_Single, self).__init__()
self.class_num = class_num
self.encoder = encoder
self.criterion = nn.CrossEntropyLoss()
self.weight = nn.Parameter(torch.FloatTensor(class_num, encoder.embed_dim))
init.xavier_uniform_(self.weight)
def forward(self, nodes):
return self.encoder(nodes)
def loss(self, nodes_anchor, nodes_positive, label):
## triplet loss + classification loss
anchor_feature = self.forward(nodes_anchor)
positive_feature = self.forward(nodes_positive)
anchor_activation = F.relu(anchor_feature)
positive_activation = F.relu(positive_feature)
anchor_score = self.weight.mm(anchor_activation)
positive_score = self.weight.mm(positive_activation)
target = label.view(label.size(0), -1)
target = (target != target.t()).float()
sqdist = 2 - 2 * torch.matmul(anchor_feature.t(), positive_feature)
pos_dist = torch.diagonal(sqdist)
diff_dist = pos_dist.view(-1, 1).repeat(1, sqdist.size(0)) - sqdist
loss_feature = torch.mean(torch.sum(F.relu(diff_dist + 0.3) * target, dim=1))
loss_class = self.criterion(anchor_score.t(), label.squeeze()) + self.criterion(positive_score.t(), label.squeeze())
return loss_feature + loss_class * 0.5
## n-pair loss + classification loss
# anchor_feature = self.forward(nodes_anchor)
# positive_feature = self.forward(nodes_positive)
# anchor_score = self.weight.mm(anchor_feature)
# positive_score = self.weight.mm(positive_feature)
#
# anchor_feature = F.normalize(anchor_feature, p=2, dim=0)
# positive_feature = F.normalize(positive_feature, p=2, dim=0)
# target = label.view(label.size(0), -1)
# target = (target == target.t()).float()
#
# target = target / torch.sum(target, dim=1, keepdim=True).float()
# logit = torch.matmul(anchor_feature.t(), positive_feature)
#
# loss_feature = - torch.mean(torch.sum(target * F.log_softmax(logit, dim=1), dim=1))
# loss_class = self.criterion(anchor_score.t(), label.squeeze()) + self.criterion(positive_score.t(), label.squeeze())
#
# return loss_class + 0.05 * loss_feature
|
#рса с античата
#!/usr/bin/python
import random
def gcd(v1, v2):
if (v2 == 0):
return v1
return gcd(v2, v1 % v2)
def euclid_ext(a,b):
x2 = 1
x1 = 0
y2 = 0
y1 = 1
q = 0
r = 0
if (b == 0):
return 0
while (b > 0):
q = a/b
r = a - q*b
x = x2 - q*x1
y = y2 - q*y1
a = b
b = r
x2 = x1
x1 = x
y2 = y1
y1 = y
return y2
def binpow(a, x, mod):
res = 1
a %= mod
while (x):
if (int(x) & 1):
res *= a
res %= mod
a *= a
a %= mod
x /= 2
return res
def is_prime(num) :
if (num == 2):
return True
if ((num < 2) or (num % 2 == 0)):
return False
p = 0
q = num-1
rcount = bit # количество раундов
#ищем степень в которую можно возвести
while (q % 2 == 0):
q /= 2
p += 1
#цикл A
while (rcount):
x = random.randint(2, num-1) # случайное число из диапазона
x = binpow(x,q,num) #то самое стремление к х
if (x == 1 or x == num-1):
return True
#цикл Б
for i in range(0,p):
x = (x * x) % num
if (x == num-1):
break
else:
return False
p -= 1
rcount -= 1
return False
def generate(p1,p2):
while (1):
curr = random.randint(p1, p2)
if (is_prime(curr)):
return curr
def enc(input):
return binpow(input, e, n)
def dec(output):
return binpow(output, d, n)
####################
bit = 10
e = 3
msg = 6666
####################
d = 0
n = 0
start = 2**(bit-1)
end = 2**bit - 1
while (1):
p = generate(start,end)
q = generate(start,end)
n = p*q
phi_n = (p-1)*(q-1)
if ((p != q) and (phi_n > e) and (phi_n % e != 0)):
break
d = euclid_ext(phi_n, e)
d = ((d % phi_n) + phi_n) % phi_n
####################
print ("RSA public key:\n{e, n} = {"+str(e)+", "+str(n)+"}\n")
print ("RSA private key:\n{d, n} = {"+str(d)+", "+str(n)+"}\n")
print ("Encoded message: "+str(enc(msg)))
print ("Decoded message: "+str(dec(enc(msg))))
|
def isPrime(num):
for e in range(2, num):
if num % e == 0: return False
return True
with open('./10.txt', 'w') as f:
for e in range(600, 800):
if isPrime(e):
f.write(str(e) + "\n")
f.close() |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-05-18 19:25
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('board', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='mediaprojeto',
name='data_ultima_modificacao',
),
migrations.RemoveField(
model_name='mediaprojeto',
name='pub_date',
),
migrations.RemoveField(
model_name='mediaprojeto',
name='publicado',
),
migrations.AlterField(
model_name='board',
name='data_criacao',
field=models.DateTimeField(default=datetime.datetime(2018, 5, 18, 19, 25, 45, 429306, tzinfo=utc), verbose_name='Submetido'),
),
migrations.AlterField(
model_name='board',
name='data_ultima_modificacao',
field=models.DateTimeField(default=datetime.datetime(2018, 5, 18, 19, 25, 45, 429306, tzinfo=utc), verbose_name='Modificado'),
),
migrations.AlterField(
model_name='board',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2018, 5, 18, 19, 25, 45, 429306, tzinfo=utc), verbose_name='Publicado'),
),
migrations.AlterField(
model_name='mediaprojeto',
name='data_criacao',
field=models.DateTimeField(default=datetime.datetime(2018, 5, 18, 19, 25, 45, 430306, tzinfo=utc), verbose_name='Submetido'),
),
]
|
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
DATADIR = "F:\Programming Projects\CNN cats and dogs classification\PetImages"
CATEGORIES = ["Dog", "Cat"]
IMG_SIZE = 96
train_data = []
def create_train_data():
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
class_num = CATEGORIES.index(category)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE) # convert the images into arrays
new_array = cv2.resize(img_array, (IMG_SIZE,IMG_SIZE))
train_data.append([new_array, class_num])
except Exception as e:
pass
create_train_data()
print(len(train_data))
import random
random.shuffle(train_data)
X = []
y= []
for features,label in train_data:
X.append(features)
y.append(label)
X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
import pickle
pickle_out = open("X.pickle","wb")
pickle.dump(X, pickle_out)
pickle_out.close()
pickle_out = open("y.pickle","wb")
pickle.dump(y, pickle_out)
pickle_out.close()
pickle_in = open("X.pickle", "rb")
X = pickle.load(pickle_in)
|
import random
def pick(options, amount):
return random.sample(options, amount)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Orthanc - A Lightweight, RESTful DICOM Store
# Copyright (C) 2012-2016 Sebastien Jodogne, Medical Physics
# Department, University Hospital of Liege, Belgium
# Copyright (C) 2017-2020 Osimis S.A., Belgium
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import os.path
import sys
import RestToolbox
def PrintHelp():
print('Download one ZIP archive for all the patients stored in Orthanc\n')
print('Usage: %s <URL> <Target>\n' % sys.argv[0])
print('Example: %s http://127.0.0.1:8042/ /tmp/Archive.zip\n' % sys.argv[0])
exit(-1)
if len(sys.argv) != 3:
PrintHelp()
URL = sys.argv[1]
TARGET = sys.argv[2]
patients = RestToolbox.DoGet('%s/patients' % URL)
print('Downloading ZIP...')
zipContent = RestToolbox.DoPost('%s/tools/create-archive' % URL, patients)
# Write the ZIP archive at the proper location
with open(TARGET, 'wb') as f:
f.write(zipContent)
|
"""
Creates metadata for audio files.
"""
__author__ = 'David Flury'
__email__ = "david@flury.email"
import os
import re
import glob
import json
import argparse
import requests
import audioread
import multiprocessing
from joblib import Parallel, delayed
from functools import reduce
audio_extensions = ['.wav', '.mp3']
file_prefix = 'vocals_'
replace_tokens = ['mogg_fixed', 'mogg_extract', 'mogg']
spotify_token = ''
def generate_metadata(file):
extension = os.path.splitext(file)[1].lower()
file_name = os.path.basename(file).replace(file_prefix, '')
file_name = file_name.replace(extension, '')
metadata = {}
metadata['name'] = file_name
metadata['file'] = file
metadata['path'] = os.path.dirname(file)
metadata['extension'] = extension
metadata['folder'] = os.path.basename(os.path.dirname(file))
metadata['collection'] = os.path.basename(os.path.dirname(os.path.dirname(file)))
normalized_name = reduce((lambda x, y: x.replace(y, '')), [file_name] + replace_tokens)
normalized_name = re.sub(r'[^a-zA-Z0-9]+', '', normalized_name).lower()
metadata['normalized_name'] = normalized_name
try:
with audioread.audio_open(file) as f:
metadata['channels'] = f.channels
metadata['sample_rate'] = f.samplerate
metadata['duration'] = f.duration
metadata = spotify_metadata(file_name, metadata)
except:
pass
metadata_file = os.path.join(os.path.dirname(file), '%s_metadata.json' % file_name)
with open(metadata_file, 'w') as fp:
json.dump(metadata, fp)
print('Generated metafile: %s' % metadata_file)
def spotify_metadata(song_name, metadata, retry=False):
response_track = requests.get('https://api.spotify.com/v1/search?q=%s&type=track&limit=1' % song_name, \
headers={'Authorization': 'Bearer %s' % spotify_token})
data = json.loads(response_track.text)
if 'error' in data:
if retry:
return metadata
set_spotify_token()
return spotify_metadata(song_name, metadata, True)
track = data['tracks']['items'][0]
metadata['title'] = track['name']
metadata['artists'] = []
for artist in track['artists']:
metadata['artists'].append(artist['name'])
metadata['album'] = track['album']['name']
metadata['explicit_content'] = track['explicit']
metadata['spotify_id'] = track['id']
metadata['spotify_popularity'] = track['popularity']
response_features = requests.get('https://api.spotify.com/v1/audio-features/%s' % track['id'], \
headers={'Authorization': 'Bearer %s' % spotify_token})
features = json.loads(response_features.text)
if 'error' in features:
if retry:
return metadata
set_spotify_token()
return spotify_metadata(song_name, metadata, True)
metadata['features'] = {}
metadata['features']['danceability'] = features['danceability']
metadata['features']['energy'] = features['danceability']
metadata['features']['loudness'] = features['danceability']
metadata['features']['speechiness'] = features['danceability']
metadata['features']['acousticness'] = features['acousticness']
metadata['features']['instrumentalness'] = features['instrumentalness']
metadata['features']['liveness'] = features['liveness']
metadata['features']['valence'] = features['valence']
metadata['features']['tempo'] = features['tempo']
response_artist = requests.get('https://api.spotify.com/v1/artists/%s' % track['artists'][0]['id'], \
headers={'Authorization': 'Bearer %s' % spotify_token})
artist = json.loads(response_artist.text)
if 'error' in artist:
if retry:
return metadata
set_spotify_token()
return spotify_metadata(song_name, metadata, True)
metadata['genres'] = artist['genres']
return metadata
def set_spotify_token():
global spotify_token
response = requests.post('https://accounts.spotify.com/api/token', \
data = {'grant_type': 'client_credentials'}, \
headers={'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'Basic %s' % os.environ['SPOTIFY_SECRET']})
data = json.loads(response.text)
spotify_token = data['access_token']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate and store meta data for audio files.')
parser.add_argument('--path', default='\\\\192.168.1.29\\unmix-server\\3_filter\\', type=str, help='Working path')
parser.add_argument('--job_count', default=int(multiprocessing.cpu_count() / 2), type=int, help='Maximum number of concurrently running jobs')
args = parser.parse_args()
print('Arguments:', str(args))
files = [] # Load all files into list
print('Load all music files...')
for file in glob.iglob(args.path + '**/*', recursive=True):
extension = os.path.splitext(file)[1].lower()
file_name = os.path.basename(file)
if extension in audio_extensions and file_name.startswith(file_prefix):
files.append(file)
print('Found %d music files' % len(files))
set_spotify_token()
print('Generate spectrograms with maximum %d jobs...' % args.job_count)
Parallel(n_jobs=args.job_count)(delayed(generate_metadata)(file) for file in files)
print('Finished processing')
|
from django.utils.translation import gettext_lazy as _
from django.db.models import (
SET_NULL,
CASCADE,
Model,
DateTimeField,
CharField,
PositiveSmallIntegerField,
DecimalField,
ForeignKey
)
from django.core.validators import (
MinValueValidator,
MaxValueValidator
)
from beverages.models import Beverage
from users.models import User
class Tasting(Model):
modified_at = DateTimeField(
_('Modified at'),
auto_now=True
)
name = CharField(
_('Name'),
max_length=200
)
beverage = ForeignKey(
Beverage,
null=True,
blank=True,
on_delete=SET_NULL,
related_name='tastings'
)
user = ForeignKey(
User,
null=True,
on_delete=CASCADE,
related_name='tastings'
)
color = CharField(
_('Color'),
null=True,
blank=True,
max_length=200
)
appearance = CharField(
_('Appearance'),
null=True,
blank=True,
max_length=200
)
aroma = CharField(
_('Aroma'),
null=True,
blank=True,
max_length=200
)
finish = CharField(
_('Finish'),
null=True,
blank=True,
max_length=200
)
rating = PositiveSmallIntegerField(
_('Rating'),
validators=[MinValueValidator(1), MaxValueValidator(10)]
)
def __str__(self):
return self.name
'''
color_choices = (
('ruby', 'Pinot Noir - Ruby'),
('garnet', 'Tempranillo - Garnet'),
('violet', 'Shiraz - Violet/Deep Purple'),
('deep_ruby', 'Cabernet Sauvignon - Deep Ruby'),
('pale_yellow', 'Pinot Grigio - Pale Yellow'),
('pale_gold', 'Sauvignon Blanc - Pale Gold/Light Yellow'),
('gold', 'Chardonnay - Gold'),
('deep_gold', 'Semillon - Deep Gold'),
('pale_blush', 'Merlot - Pale Blush'),
('blush', 'Shiraz - Blush'),
('salmon', 'Tempranillo - Salmon'),
('deep_salmon', 'Petite Verdot - Deep Salmon')
)
'''
|
from zope.interface import Interface, Attribute
class IFeaturelet(Interface):
"""
Marks a featurelet that can be plugged in to any IFeatureletSupporter.
"""
id = Attribute("id", "Featurelet id.")
title = Attribute("title", "Featurelet title.")
config_view = Attribute("config_view", """
The name of the view that is contains the configuration for this
featurelet. It should be publishable on objects that provide the
interface specified by installed_marker.
""")
installed_marker = Attribute("installed_marker", """
A marker interface that should be provided by the featurelet
supporter context object when the featurelet is installed, and
removed when then featurelet is uninstalled.
""")
def getRequiredInterfaces():
"""
Returns a tuple of any interfaces (additional to
IFeatureletSupporter) that this featurelet requires of its
supporters. Featurelet supporters may either implement or
singly adapt to each of the required interfaces.
"""
def deliverPackage(obj):
"""
Performs the actual installation of the functionality
'package' of the featurelet into an object. Raises an
exception if the object does not implement or adapt to
IFeatureletSupporter of any of the required interfaces.
If the featurelet is already installed, the featurelet will
only install any differences btn the supporters stored
featurelet description and the description that the featurelet
has currently.
Returns the featurelet description as it was installed.
"""
def removePackage(obj):
"""
Removes the featurelet's functionality from the obj. This is
potentially destructive (it will remove any objects that the
featurelet created, for instance) so it should be used with
care.
"""
class IFeatureletSupporter(Interface):
"""
Marks a featurelet supporter that can accomodate IFeaturelets.
"""
def getInstalledFeatureletIds():
"""
Returns the ids of all installed featurelets; retrieved from
the information the featurelets place in the featurelet
annotation.
"""
def getFeatureletDescriptor(id):
"""
Returns the featurelet descriptor for the provided featurelet
id, or None if the featurelet isn't installed.
"""
def installFeaturelet(featurelet):
"""
Installs the provided featurelet into the supporter.
"""
def removeFeaturelet(featurelet):
"""
Uninstalls the provided featurelet by id, if it is installed.
Can also take a full featurelet object, if you aren't into
brevity.
"""
class IMenuSupporter(Interface):
"""
Marks an object as being able to receive menu item mappings as
provided by a featurelet.
"""
def addMenuItem(menu_id, menu_item):
"""
Adds a menu item to the specified menu. Does nothing if a
menu item with the same title already exists.
o menu_id - A string representing the id of the menu w/ which
the item should be registered.
o menu_item - A mapping containing the menu item info.
"""
def removeMenuItem(menu_id, menu_item_title):
"""
Removes the menu item w/ the specified title from the
specified menu. Does nothing if a menu item w/ the specified
title does not exist.
o menu_id - A string representing the id of the menu from
which the item should be removed.
o menu_item_title - A string representing the title of the
menu item that should be removed.
"""
def getMenuItems(menu_id):
"""
Returns a mapping of menu items for the specified menu id.
Keys are the titles, values are menu item info mappings.
"""
class IFeatureletRegistry(Interface):
"""
Defines a featurelet registration utility that featurelet
supporters can use to discover which featurelets are available.
"""
def registerFeaturelet(featurelet):
"""
Registers a featurelet with the registry. Raises an exception
if a featurelet with the same id already exists.
"""
def unregisterFeaturelet(featurelet_id):
"""
Removes a featurelet with the specified id from the registry.
Does nothing if no matching featurelet is registered.
"""
def getFeaturelet(featurelet_id):
"""
Retrieves the featurelet w/ the specified id. Returns None if
no matching featurelet is registered.
"""
def getFeaturelets(supporter=None):
"""
Returns all registered featurelets. If 'supporter' is
provided, then supporter is checked for adaptation to each of
the featurelets' required interfaces, and only the featurelets
which can actually be installed into the supporter are
returned.
"""
|
#!/usr/bin/env python
balance = 999999
annualInterestRate = 0.18
#monthlyPaymentRate = 0.04
def years_payment(Mpay):
'''
calculate pyment for credit
Mpay - fixed montly payment
return - remaining balance in under 1 year
'''
total_pay = 0
local_balance = balance
for i in range(1,13):
pay = Mpay
total_pay += pay
local_balance -= pay
interest = annualInterestRate / 12.0 * local_balance
local_balance += interest
return local_balance
lower = balance / 12.0
upper = balance * pow((1+annualInterestRate/12.0),12) / 12.0
Mpay = (upper+lower)/2.0
rest = years_payment(Mpay)
while abs(rest) >= 0.01:
# print 'rest = ','%.2f'%rest
if rest > 0:
lower = Mpay
else:
upper = Mpay
Mpay = (upper+lower)/2.0
rest = years_payment(Mpay)
print 'Lowest Payment: ','%.2f'%Mpay
|
import numpy as np
from . IonSpeciesFluidQuantity import IonSpeciesFluidQuantity
from . OtherFluidQuantity import OtherFluidQuantity
class OtherIonSpeciesFluidQuantity(IonSpeciesFluidQuantity):
def __init__(self, name, data, description, grid, output):
"""
Constructor.
"""
attr = {'description': description}
super().__init__(name=name, data=data, grid=grid, attr=attr, output=output)
self.time = grid.t[1:]
def __repr__(self):
return self.__str__()
def __getitem__(self, name):
"""
Direct access to data.
"""
idx = self.ions.getIndex(name)
return OtherFluidQuantity(name='{}_{}'.format(self.name, name), data=self.data[:,idx,:], description=self.description, grid=self.grid, output=self.output)
|
"""
Scoped CSS Styles for the graphs page
"""
PAGE_STYLE = {
"paddingBottom": "5em"
}
TITLE_STYLE = {
"margin": "0.5em"
}
CHART_ITEM_STYLE = {
"margin": "2em",
}
|
# -*- coding: utf-8 -*-
# @Author: Safer
# @Date: 2016-08-19 00:55:40
# @Last Modified by: Safer
# @Last Modified time: 2016-08-22 23:52:24
import sys
from PyQt5.QtCore import Qt
# from PyQt5.QtWidgets import QApplication, QMessageBox
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtSql import QSqlDatabase, QSqlQuery, QSqlQueryModel, QSqlRecord
class DB(QSqlDatabase):
def __init__(self, db_name='client.db'):
super(DB, self).__init__()
self.db = self.addDatabase('QSQLITE')
# self.db.setDatabaseName(':memory:')
self.db_name = db_name
self.db.setDatabaseName(self.db_name)
if not self.db.open():
QMessageBox.critical(None, "Cannot open database",
"Unable to establish a database connection.\n"
"Click Cancel to exit.",
QMessageBox.Cancel)
return False
self.from_str = ''
self.columns_str = ''
self.where_str = ''
self.query_str = ''
def from_(self, from_str=''):
self.from_str = from_str
return self
def columns_(self, columns_str='*'):
self.columns_str = self._getTableColumns() if columns_str == '*' else columns_str
return self
def where_(self, where_str=''):
self.where_str = self._getTablePrimaryKey(
) + ' > 0' if where_str == '' else where_str
return self
def find_(self):
columns = self.columns_str.split(',')
select_str = ','.join(columns)
sql = 'SELECT %s FROM %s WHERE %s' % (
select_str, self.from_str, self.where_str)
self._executeSql(sql)
results = []
while self._q.isValid():
record = [self._q.value(i) for i in range(len(columns))]
results.append(record)
self._q.next()
return results
def create_(self, data):
keys = ','.join(["`" + k + "`" for k in data.keys()])
values = ','.join(["'" + v + "'" for v in data.values()])
sql = 'INSERT INTO %s (%s) VALUES (%s)' % (
self.from_str, keys, values)
self._executeSql(sql)
return True
def update_(self, data):
lists = data.items()
length = len(lists)
sql = ''
string = ''
num = 1
for i in lists:
string += "='".join(list(i)) + "'"
if num < length:
string += ", "
num += 1
sql = 'UPDATE %s SET %s WHERE %s' % (
self.from_str, string, self.where_str)
self._executeSql(sql)
return True
def delete_(self):
sql = 'DELETE FROM %s WHERE %s' % (self.from_str, self.where_str)
self._executeSql(sql)
return True
# 直接执行 sql 语句
def executeSql_(self, sql=''):
self._executeSql(sql)
return True
def _getTablePrimaryKey(self):
sql = "PRAGMA table_info([%s])" % (self.from_str)
self._executeSql(sql)
primary_key = ''
if self._q.isValid():
primary_key = self._q.value(1)
print(primary_key)
return primary_key
def _getTableColumns(self):
sql = "PRAGMA table_info([%s])" % (self.from_str)
self._executeSql(sql)
columns = []
while self._q.isValid():
columns.append(self._q.value(1))
self._q.next()
columns = ','.join(columns)
return columns
def _getAllTablesName(self):
sql = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
self._executeSql(sql)
tables = []
while self._q.isValid():
tables.append(self._q.value(0))
self._q.next()
return ','.join(tables)
def _executeSql(self, sql=''):
self._q = QSqlQuery()
self._q.prepare(sql)
self._q.exec_()
self._q.first()
# if __name__ == '__main__':
# app = QApplication(sys.argv)
# db = DB()
# db.from_('test')
# ######## select ########
# db.where_('id > 0')
# db.columns_('*')
# results = db.find_()
# print(results)
# ######## create ########
# # db.create_({'id': '3', 'name': 'safer'})
# ######## update ########
# # db.where_('id = 2')
# # db.update_({'name': 'saferssssssss'})
# ######## delete ########
# # db.where_('id = 2')
# # db.delete_()
|
from django import forms
from django.forms import ModelForm
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import feedbackform
class formuser(UserCreationForm):
username=forms.CharField(min_length=5,widget=forms.TextInput(attrs={'class':'form-control my-2','placeholder':'username'}))
first_name=forms.CharField(required=True , widget=forms.TextInput(attrs={'class':'form-control my-2 col-6','placeholder':'First name'}))
password1=forms.CharField(label="Password", widget=forms.PasswordInput(attrs={'class':'form-control my-2','placeholder':'Password'}))
password2=forms.CharField(label="Confirm Password", widget=forms.PasswordInput(attrs={'class':'form-control my-2','placeholder':'Re-type password'}))
email=forms.CharField(required=True, widget=forms.EmailInput(attrs={'class':'form-control my-2','placeholder':'email'}))
class Meta:
model=User
fields=['username','first_name','last_name','email']
widgets={
'last_name':forms.TextInput(attrs={'class':'form-control my-2 ','placeholder':'last name'})
}
class feedform(forms.ModelForm):
feedtxt=forms.CharField(max_length=500, required=True, min_length=15,widget=forms.Textarea(attrs={'class':'form-control my-2','placeholder':'Write your feedback here..',"rows":"5"}))
email=forms.CharField(required=True, widget=forms.EmailInput(attrs={'class':'form-control my-2','placeholder':'email'}))
class Meta:
model=feedbackform
fields=['email','feedtxt']
|
from aiogram.dispatcher import FSMContext
from aiogram.types import Message
from app.data.keyboards import EnglishWordMenu, EnglishWordMenuEnterRussianWord, EnglishWordMenuEnterEnglishWord
from app.data.states import EnglishForm
from app.data.texts import ENTER_RUSSIAN_WORD, ENTER_MORE_RUSSIAN_WORD
from app.keyboards.inline import MenuInlineButtons
from bot import telegram_bot
from loguru import logger
async def write_english_word_message_handler(message: Message, state: FSMContext):
await state.set_data({
"english_word": message.text
})
menu = EnglishWordMenuEnterEnglishWord()
menu_inline = MenuInlineButtons(
row_width=3,
data=menu.data,
prefix=menu.prefix,
callback_data=menu.callback_data
)
await message.reply(ENTER_RUSSIAN_WORD, reply_markup=menu_inline.buttons)
await EnglishForm.write_russian.set()
async def write_russian_word_message_handler(message: Message, state: FSMContext):
logger.debug(f"message2 {message.text}")
data = await state.get_data()
logger.debug(f"data {data}")
if data.get("russian_words"):
data["russian_words"].append(message.text)
await state.update_data({
"russian_words": data["russian_words"]
})
else:
await state.update_data({
"russian_words": [message.text]
})
menu = EnglishWordMenuEnterRussianWord()
menu_reply = MenuInlineButtons(
row_width=3,
data=menu.data,
prefix=menu.prefix,
callback_data=menu.callback_data
)
await message.reply(
text=ENTER_MORE_RUSSIAN_WORD,
reply_markup=menu_reply.buttons
) |
#! /usr/bin/python
# coding=utf-8
import commands
import os
import sys
import time
import re
action=sys.argv[1]
leapsql_server=sys.argv[2]
#hive需要在测试用例中修改参数"jdbc:hive2://"localhost":10000/;principal=hive/demo90.test.com@LEAP.COM;hive.server2.proxy.user=hive"
#不开启kerberos情况下参数为"jdbc:hive2://"localhost":10000/"
ipRegexForFile=re.compile("^((?:(2[0-4]\d)|(25[0-5])|([01]?\d\d?))\.){3}(?:(2[0-4]\d)|(255[0-5])|([01]?\d\d?))")
check_component=['impala','hive','hbase','kafka','sqoop','spark','flume','leapsql']
path=os.path.split(os.path.realpath(__file__))[0]
def getLegalIpsAndHostNames():
with open('/etc/hosts', 'r') as hostsFile:
lines=hostsFile.readlines()
ips=[]
hostnames=[]
for line in lines:
line=line.strip()
if not line.startswith("#") and line.find("localhost") == -1 and "" != line and line.find("repo") == -1 and line.find("ntpd") == -1:
matchObj=re.search(ipRegexForFile,line)
if matchObj is not None:
ip=matchObj.group()
ips.append(ip)
hostnames.append(line.replace(ip,"").strip())
return set(ips),set(hostnames)
ips,hostname=getLegalIpsAndHostNames()
def add_princs(user):
os.system("echo -e 'admin\nadmin'|kadmin -p admin/admin -w admin -q 'addprinc "+user+"@LEAP.COM'")
os.system("kadmin -p admin/admin -w admin -q 'xst -k "+path+"/"+user+".keytab "+user+"@LEAP.COM'")
def kinit_user(user):
os.system("kinit -kt "+path+"/"+user+".keytab "+user+"@LEAP.COM")
def kdestroy():
os.system("kdestroy -A")
def check_kerberos():
os.system('kdestroy -A')
file='/etc/security/keytabs'
if os.path.exists(file):
key_tabs=commands.getoutput("ll /etc/security/keytabs/|wc -l")
if key_tabs >=3:
return 'running'
else:
return 'stop'
if action=='check_kerberos':
result = check_kerberos()
if result=='running':
print '\033[1;32;40mkerberos is running!\033[0m'
else:
print '\033[1;31;40mkerberos is stopped!\033[0m'
elif action=='zookeeper':
kdestroy()
elif action=='leapsql':
result=check_kerberos()
if result=='running':
print "--------------kinit "+action+",please waiting!---------------"
keytab_path=path+'/leapsql.keytab'
if os.path.exists(keytab_path):
os.system("kinit -kt "+path+"/leapsql.keytab leapsql@LEAP.COM")
else:
if leapsql_server=='no':
add_princs(action)
kinit_user(action)
else:
os.system("scp "+leapsql_server+":/etc/security/keytabs/leapsql.keytab "+path+"/")
os.system("kinit -kt "+path+"/leapsql.keytab leapsql@LEAP.COM")
else:
print '------------please check kerberos to see if it is on!------------'
elif action in check_component:
result=check_kerberos()
if result=='running':
print "--------------kinit "+action+",please waiting!---------------"
keytab_path=path+'/'+action+'.keytab'
if os.path.exists(keytab_path):
kinit_user(action)
else:
add_princs(action)
kinit_user(action)
else:
print '------------please check kerberos to see if it is on!------------'
elif action=='kdestroy':
kdestroy()
else:
print '################################'
print '##\033[1;33;40mplease input correct action!\033[0m##'
print '################################'
|
import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name="tuneup",
version="0.0.8",
description="Global optimizer comparison and combination",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/microprediction/tuneup",
author="microprediction",
author_email="pcotton@intechinvestments.com",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=["tuneup"],
test_suite='pytest',
tests_require=['pytest'],
include_package_data=True,
install_requires=["pandas","numpy","pytest","python-dateutil","statsmodels","microfilter",
"optuna", "sklearn","scipy","microconventions", "deap","wheel","hyperopt",
"statsmodels","ax-platform","pysot","poap","microprediction","Platypus-Opt","sigopt","wheel",
"pymoo"],
entry_points={
"console_scripts": [
"tuneup=tuneup.__main__:main",
]
},
)
|
"""3-1. Names: Store the names of a few of your friends in a list called names. Print
each person’s name by accessing each element in the list, one at a time."""
names = ['Tamer', 'Roberta', 'Mike', 'Dalia', 'Vivian', 'Brody', 'Tori']
print(names[0])
print(names[1])
print(names[2])
print(names[3])
print(names[4])
print(names[5])
print(names[6]) |
# file w写模式
# 文件不存在会自动创建,每次执行都会覆盖原文件内容
file2 = open("file2", "w")
# 只写模式不能读,io.UnsupportedOperation: not readable
# data = file2.read()
# 1. 将文件写入缓冲区
file2.write("xianqian\n")
file2.write("i miss you dear!")
# 2. 刷新缓冲区
# 直接将内部缓冲区的数据立刻写入文件,而不是被动的等待,一直等到文件关闭(f.close())才刷新缓冲区
# 写满缓冲区,也会自动刷新
# 遇到"\n"也会刷新缓存区
file2.flush()
# file2.truncate() # 不删除
# file2.truncate(8) # 保留前8个字符"xianqian",之后的字符全部删除
file2.truncate(0) # 数字为0,全部删除
# 执行close()会刷新缓冲区
file2.close()
# with 方式写文件
# with open("file2", mode="w") as f:
# data = f.write("*********")
|
# Changed news to research in this file
import graphene
from graphene_django.types import DjangoObjectType
# from bootcamp.news.models import News
from bootcamp.research.models import Research
from bootcamp.helpers import paginate_data
class ResearchType(DjangoObjectType): # Changed news to research
"""DjangoObjectType to acces the Research model.""" # Changed news to research
count_thread = graphene.Int()
count_likers = graphene.Int()
class Meta:
# model = News
model = Research
def resolve_count_thread(self, info, **kwargs):
return self.get_thread().count()
def resolve_count_likers(self, info, **kwargs):
return self.get_likers().count()
def resolve_count_attendees(self, info, **kwargs):
return self.get_attendees().count()
def resolve_get_thread(self, info, **kwargs):
return self.get_thread()
def resolve_get_likers(self, info, **kwargs):
return self.get_likers()
def resolve_get_attendees(self, info, **kwargs):
return self.get_attendees()
class ResearchPaginatedType(graphene.ObjectType): # Changed news to research
"""A paginated type generic object to provide pagination to the research
graph.""" # Changed news to research
page = graphene.Int()
pages = graphene.Int()
has_next = graphene.Boolean()
has_prev = graphene.Boolean()
# objects = graphene.List(NewsType)
objects = graphene.List(ResearchType)
class ResearchQuery(object): # Changed news to research
# all_news = graphene.List(NewsType)
all_research = graphene.List(ResearchType)
# paginated_news = graphene.Field(NewsPaginatedType, page=graphene.Int())
paginated_research = graphene.Field(ResearchPaginatedType, page=graphene.Int())
# news = graphene.Field(NewsType, uuid_id=graphene.String())
research = graphene.Field(ResearchType, uuid_id=graphene.String())
def resolve_all_research(self, info, **kwargs):
# return News.objects.filter(reply=False)
return Research.objects.filter(reply=False)
def resolve_paginated_research(self, info, page): # Change news to research
"""Resolver functions to query the objects and turn the queryset into
the PaginatedType using the helper function"""
page_size = 30
# qs = News.objects.filter(reply=False)
qs = Research.objects.filter(reply=False)
# return paginate_data(qs, page_size, page, NewsPaginatedType)
return paginate_data(qs, page_size, page, ResearchPaginatedType)
def resolve_research(self, info, **kwargs): # Changed news to research
uuid_id = kwargs.get("uuid_id")
print("uuid_id" + uuid_id)
if uuid_id is not None:
# return News.objects.get(uuid_id=uuid_id)
print("uuid_id" + uuid_id)
return Research.objects.get(uuid_id=uuid_id)
return None
class ResearchMutation(graphene.Mutation): # Changed news to research
"""Mutation to create research objects on a efective way.""" # Changed news to research
class Arguments:
content = graphene.String()
user = graphene.ID()
parent = graphene.ID()
content = graphene.String()
user = graphene.ID()
parent = graphene.ID()
# news = graphene.Field(lambda: News)
research = graphene.Field(lambda: Research)
def mutate(self, **kwargs):
print(kwargs)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from head import *
from arm_frame_solution import ArmFrameSolution
class ArmFrameMain():
"""
此类负责处理每个连接实例,包括收发包,解析等操作
"""
def __init__(self, handler):
self.handler = handler
def main_receivor(self,):
"""
连接实例主处理入口,收取数据, 解析数据, 处理数据
:rtype: 如果接受数据不为空, 返回 1, 如果查过判断僵死时间,返回 -1, 否则返回 0
"""
servant = ArmFrameSolution()
# 收数据
origin_frame = servant.receive(self.handler)
if len(origin_frame) > 0:
print "UP MAIN THREAD STARTED !"
# 解包
protobuf_msg_dic = servant.unpack(origin_frame)
if protobuf_msg_dic == '':
return -1
# 反序列化
protobuf_inst = servant.parse(protobuf_msg_dic)
if protobuf_inst == '':
return -1
# 分发数据
result = servant.dispatch(protobuf_inst, self.handler)
return 1
else:
pass
# print 'get on empyt pack' |
# -*- coding: utf-8 -*-
'''
Test for kubernetes management
.. versionadded:: 2016.3.0
'''
# Import pytohn libs
from __future__ import absolute_import
import json
import yaml
import hashlib
import base64
import time
from subprocess import Popen, PIPE
import salt.modules.k8s as k8s
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath, skip_if_binaries_missing
ensure_in_syspath('../../')
# from unittest import TestCase
TestCase.maxDiff = None
@skip_if_binaries_missing(['kubectl'])
class TestK8SNamespace(TestCase):
def test_get_namespaces(self):
res = k8s.get("namespaces", apiserver_url="http://127.0.0.1:8080")
a = len(res.get("items"))
proc = Popen(["kubectl", "get", "namespaces", "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
b = len(kubectl_out.get("items"))
self.assertEqual(a, b)
def test_get_one_namespace(self):
res = k8s.get("namespaces", "default", apiserver_url="http://127.0.0.1:8080")
a = res.get("metadata", {}).get("name", "a")
proc = Popen(["kubectl", "get", "namespaces", "default", "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
b = kubectl_out.get("metadata", {}).get("name", "b")
self.assertEqual(a, b)
def test_create_namespace(self):
hash = hashlib.sha1()
hash.update(str(time.time()))
nsname = hash.hexdigest()[:16]
res = k8s.create_namespace(nsname, apiserver_url="http://127.0.0.1:8080")
proc = Popen(["kubectl", "get", "namespaces", nsname, "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
# if creation is failed, kubernetes return non json error message
self.assertTrue(isinstance(kubectl_out, dict))
@skip_if_binaries_missing(['kubectl'])
class TestK8SServices(TestCase):
def setUp(self):
hash = hashlib.sha1()
hash.update(str(time.time()))
## first char for DNS label must be [a-z]
self.name = "a"+hash.hexdigest()[:15]
self.default_service = {
'apiVersion': 'v1',
'kind': 'Service',
'metadata': {
'labels': {
'name': self.name
},
'name': self.name,
'namespace': "default"},
'spec': {
'ports': [
{'name': 'dns', 'port': 53, 'protocol': 'UDP'},
{'name': 'dns-tcp', 'port': 53, 'protocol': 'TCP'}
],
'selector': {'name': self.name}
}
}
def test_get_services(self):
name = self.name
filename = "/tmp/{0}.yaml".format(name)
with open(filename, 'w') as f:
f.write(yaml.dump(self.default_service))
proc = Popen(["kubectl", "create", "-f", filename], stdout=PIPE)
time.sleep(0.1)
res = k8s.get("services", "default", apiserver_url="http://127.0.0.1:8080")
a = len(res.get("items"))
proc = Popen(["kubectl", "get", "services", "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
b = len(kubectl_out.get("items"))
self.assertEqual(a, b)
def test_get_one_service(self):
name = self.name
filename = "/tmp/{0}.yaml".format(name)
with open(filename, 'w') as f:
f.write(yaml.dump(self.default_service))
proc = Popen(["kubectl", "create", "-f", filename], stdout=PIPE)
time.sleep(0.2)
res = k8s.get("services", "default", name=name, apiserver_url="http://127.0.0.1:8080")
a = res.get("metadata", {}).get("name", "a")
proc = Popen(["kubectl", "get", "services", name, "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
b = kubectl_out.get("metadata", {}).get("name", "b")
self.assertEqual(a, b)
def test_create_service(self):
name = self.name
filename = "/tmp/{0}.yaml".format(name)
with open(filename, 'w') as f:
f.write(yaml.dump(self.default_service))
res = k8s.create_service("default", filename, name, apiserver_url="http://127.0.0.1:8080")
proc = Popen(["kubectl", "get", "services", name, "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
# if creation is failed, kubernetes return non json error message
self.assertTrue(isinstance(kubectl_out, dict))
def test_update_service(self):
name = self.name
filename = "/tmp/{0}.yaml".format(name)
with open(filename, 'w') as f:
f.write(yaml.dump(self.default_service))
proc = Popen(["kubectl", "create", "-f", filename], stdout=PIPE)
time.sleep(0.2)
p = Popen(["kubectl", "get", "services", name, "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(p.communicate()[0])
uid = kubectl_out.get("metadata", {}).get("uid")
old_ip = kubectl_out.get("spec", {}).get("clusterIP", "old")
updated_service = {
'apiVersion': 'v1',
'kind': 'Service',
'metadata': {
'labels': {
'name': name
},
'name': name,
'namespace': "default"},
'spec': {
'ports': [
{'name': 'dns', 'port': 54, 'protocol': 'UDP'},
{'name': 'dns-tcp', 'port': 54, 'protocol': 'TCP'}
],
'selector': {'name': name}
}
}
with open(filename, 'w') as f:
f.write(yaml.dump(updated_service))
res = k8s.create_service("default", filename, name,
apiserver_url="http://127.0.0.1:8080",
update=True)
proc = Popen(["kubectl", "get", "services", name, "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
# if creation is failed, kubernetes return non json error message
port = kubectl_out.get("spec", {}).get("ports", [])[0].get("port", "0")
updated_uid = kubectl_out.get("metadata", {}).get("uid")
new_ip = kubectl_out.get("spec", {}).get("clusterIP", "new")
self.assertEqual(uid, updated_uid)
self.assertEqual(port, 54)
self.assertEqual(old_ip, new_ip)
# def test_recreate_service(self):
# name = self.name
# filename = "/tmp/{0}.yaml".format(name)
# with open(filename, 'w') as f:
# f.write(yaml.dump(self.default_service))
# proc = Popen(["kubectl", "create", "-f", filename], stdout=PIPE)
# time.sleep(0.2)
# p = Popen(["kubectl", "get", "services", name, "-o", "json"], stdout=PIPE)
# kubectl_out = json.loads(p.communicate()[0])
# uid = kubectl_out.get("metadata", {}).get("uid")
# old_ip = kubectl_out.get("spec", {}).get("clusterIP", "old")
# updated_service = {
# 'apiVersion': 'v1',
# 'kind': 'Service',
# 'metadata': {
# 'labels': {
# 'name': name
# },
# 'name': name,
# 'namespace': "default"},
# 'spec': {
# 'clusterIP': '10.116.0.3',
# 'ports': [
# {'name': 'dns', 'port': 54, 'protocol': 'UDP'},
# {'name': 'dns-tcp', 'port': 54, 'protocol': 'TCP'}
# ],
# 'selector': {'name': name}
# }
# }
# with open(filename, 'w') as f:
# f.write(yaml.dump(updated_service))
# res = k8s.create_service("default", name, filename,
# apiserver_url="http://127.0.0.1:8080",
# update=True)
# proc = Popen(["kubectl", "get", "services", name, "-o", "json"], stdout=PIPE)
# kubectl_out = json.loads(proc.communicate()[0])
# # if creation is failed, kubernetes return non json error message
# port = kubectl_out.get("spec", {}).get("ports", [])[0].get("port", "0")
# updated_uid = kubectl_out.get("metadata", {}).get("uid")
# new_ip = kubectl_out.get("spec", {}).get("clusterIP", "new")
# self.assertNotEqual(uid, updated_uid)
# self.assertEqual(port, 54)
@skip_if_binaries_missing(['kubectl'])
class TestK8SSecrets(TestCase):
def setUp(self):
hash = hashlib.sha1()
hash.update(str(time.time()))
self.name = hash.hexdigest()[:16]
data = {"testsecret": base64.encodestring("teststring")}
self.request = {
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": self.name,
"namespace": "default",
},
"data": data,
}
def test_get_secrets(self):
res = k8s.get("secrets", "default", apiserver_url="http://127.0.0.1:8080")
a = len(res.get("items", []))
proc = Popen(["kubectl", "--namespace=default", "get", "secrets", "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
b = len(kubectl_out.get("items", []))
self.assertEqual(a, b)
def test_get_one_secret(self):
name = self.name
filename = "/tmp/{0}.json".format(name)
with open(filename, 'w') as f:
json.dump(self.request, f)
create = Popen(["kubectl", "--namespace=default", "create", "-f", filename], stdout=PIPE)
# wee need to give kubernetes time save data in etcd
time.sleep(0.1)
res = k8s.get("secrets", "default", name, apiserver_url="http://127.0.0.1:8080")
a = res.get("metadata", {}).get("name", "a")
proc = Popen(["kubectl", "--namespace=default", "get", "secrets", name, "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
b = kubectl_out.get("metadata", {}).get("name", "b")
self.assertEqual(a, b)
def test_get_decoded_secret(self):
name = self.name
filename = "/tmp/{0}.json".format(name)
with open(filename, 'w') as f:
json.dump(self.request, f)
create = Popen(["kubectl", "--namespace=default", "create", "-f", filename], stdout=PIPE)
# wee need to give etcd to populate data on all nodes
time.sleep(0.1)
res = k8s.decode_secrets("default", name, apiserver_url="http://127.0.0.1:8080")
a = res.get("data", {}).get("testsecret", )
self.assertEqual(a, "teststring")
def test_create_secret(self):
name = self.name
names = []
expected_data = {}
for i in range(2):
names.append("/tmp/{0}-{1}".format(name, i))
with open("/tmp/{0}-{1}".format(name, i), 'w') as f:
expected_data["{0}-{1}".format(name, i)] = base64.b64encode("{0}{1}".format(name, i))
f.write("{0}{1}".format(name, i))
res = k8s.create_secret("default", name, names, apiserver_url="http://127.0.0.1:8080")
proc = Popen(["kubectl", "--namespace=default", "get", "secrets", name, "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
# if creation is failed, kubernetes return non json error message
b = kubectl_out.get("data", {})
self.assertTrue(isinstance(kubectl_out, dict))
self.assertEqual(expected_data, b)
def test_update_secret(self):
name = self.name
filename = "/tmp/{0}.json".format(name)
with open(filename, 'w') as f:
json.dump(self.request, f)
create = Popen(["kubectl", "--namespace=default", "create", "-f", filename], stdout=PIPE)
# wee need to give kubernetes time save data in etcd
time.sleep(0.1)
expected_data = {}
names = []
for i in range(3):
names.append("/tmp/{0}-{1}-updated".format(name, i))
with open("/tmp/{0}-{1}-updated".format(name, i), 'w') as f:
expected_data["{0}-{1}-updated".format(name, i)] = base64.b64encode("{0}{1}-updated".format(name, i))
f.write("{0}{1}-updated".format(name, i))
res = k8s.update_secret("default", name, names, apiserver_url="http://127.0.0.1:8080")
# if creation is failed, kubernetes return non json error message
proc = Popen(["kubectl", "--namespace=default", "get", "secrets", name, "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
# if creation is failed, kubernetes return non json error message
b = kubectl_out.get("data", {})
self.assertTrue(isinstance(kubectl_out, dict))
self.assertEqual(expected_data, b)
def test_delete_secret(self):
name = self.name
filename = "/tmp/{0}.json".format(name)
with open(filename, 'w') as f:
json.dump(self.request, f)
create = Popen(["kubectl", "--namespace=default", "create", "-f", filename], stdout=PIPE)
# wee need to give kubernetes time save data in etcd
time.sleep(0.1)
res = k8s.delete("secret", "default", name, apiserver_url="http://127.0.0.1:8080")
time.sleep(0.1)
proc = Popen(["kubectl", "--namespace=default", "get", "secrets", name, "-o", "json"], stdout=PIPE, stderr=PIPE)
kubectl_out, err = proc.communicate()
# stdout is empty, stderr is showing something like "not found"
self.assertEqual('', kubectl_out)
self.assertEqual('Error from server: secrets "{0}" not found\n'.format(name), err)
@skip_if_binaries_missing(['kubectl'])
class TestK8SResourceQuotas(TestCase):
def setUp(self):
hash = hashlib.sha1()
hash.update(str(time.time()))
self.name = hash.hexdigest()[:16]
def test_get_resource_quotas(self):
name = self.name
namespace = self.name
create_namespace = Popen(["kubectl", "create", "namespace", namespace], stdout=PIPE)
request = """
apiVersion: v1
kind: ResourceQuota
metadata:
name: {0}
spec:
hard:
cpu: "20"
memory: 1Gi
persistentvolumeclaims: "10"
pods: "10"
replicationcontrollers: "20"
resourcequotas: "1"
secrets: "10"
services: "5"
""".format(name)
filename = "/tmp/{0}.yaml".format(name)
with open(filename, 'w') as f:
f.write(request)
create = Popen(["kubectl", "--namespace={0}".format(namespace), "create", "-f", filename], stdout=PIPE)
# wee need to give kubernetes time save data in etcd
time.sleep(0.2)
res = k8s.get("quota", namespace, apiserver_url="http://127.0.0.1:8080")
a = len(res.get("items", []))
proc = Popen(["kubectl", "--namespace={0}".format(namespace), "get", "quota", "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
b = len(kubectl_out.get("items", []))
self.assertEqual(a, b)
def test_get_one_resource_quota(self):
name = self.name
namespace = self.name
create_namespace = Popen(["kubectl", "create", "namespace", namespace], stdout=PIPE)
request = """
apiVersion: v1
kind: ResourceQuota
metadata:
name: {0}
spec:
hard:
cpu: "20"
memory: 1Gi
persistentvolumeclaims: "10"
pods: "10"
replicationcontrollers: "20"
resourcequotas: "1"
secrets: "10"
services: "5"
""".format(name)
filename = "/tmp/{0}.yaml".format(name)
with open(filename, 'w') as f:
f.write(request)
create = Popen(["kubectl", "--namespace={0}".format(namespace), "create", "-f", filename], stdout=PIPE)
# wee need to give kubernetes time save data in etcd
time.sleep(0.2)
res = k8s.get("quotas", namespace, name, apiserver_url="http://127.0.0.1:8080")
a = res.get("metadata", {}).get("name", "a")
proc = Popen(["kubectl", "--namespace={0}".format(namespace), "get", "quota", name, "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
b = kubectl_out.get("metadata", {}).get("name", "b")
self.assertEqual(a, b)
def test_create_resource_quota(self):
name = self.name
namespace = self.name
create_namespace = Popen(["kubectl", "create", "namespace", namespace], stdout=PIPE)
quota = {
"cpu": "20",
"memory": "1Gi"
}
res = k8s.create_resource_quota(namespace, quota, name=name, apiserver_url="http://127.0.0.1:8080")
proc = Popen(["kubectl", "--namespace={0}".format(namespace), "get", "quota", name, "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
self.assertTrue(isinstance(kubectl_out, dict))
def test_update_resource_quota(self):
name = self.name
namespace = self.name
create_namespace = Popen(["kubectl", "create", "namespace", namespace], stdout=PIPE)
request = """
apiVersion: v1
kind: ResourceQuota
metadata:
name: {0}
spec:
hard:
cpu: "20"
memory: 1Gi
persistentvolumeclaims: "10"
pods: "10"
replicationcontrollers: "20"
resourcequotas: "1"
secrets: "10"
services: "5"
""".format(name)
filename = "/tmp/{0}.yaml".format(name)
with open(filename, 'w') as f:
f.write(request)
create = Popen(["kubectl", "--namespace={0}".format(namespace), "create", "-f", filename], stdout=PIPE)
# wee need to give kubernetes time save data in etcd
time.sleep(0.2)
quota = {
"cpu": "10",
"memory": "2Gi"
}
res = k8s.create_resource_quota(namespace, quota, name=name, apiserver_url="http://127.0.0.1:8080", update=True)
proc = Popen(["kubectl", "--namespace={0}".format(namespace), "get", "quota", name, "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
limit = kubectl_out.get("spec").get("hard").get("memory")
self.assertEqual("2Gi", limit)
def test_delete_resource_quota(self):
name = self.name
namespace = self.name
create_namespace = Popen(["kubectl", "create", "namespace", namespace], stdout=PIPE)
request = """
apiVersion: v1
kind: ResourceQuota
metadata:
name: {0}
spec:
hard:
cpu: "20"
memory: 1Gi
persistentvolumeclaims: "10"
pods: "10"
replicationcontrollers: "20"
resourcequotas: "1"
secrets: "10"
services: "5"
""".format(name)
filename = "/tmp/{0}.yaml".format(name)
with open(filename, 'w') as f:
f.write(request)
create = Popen(["kubectl", "--namespace={0}".format(namespace), "create", "-f", filename], stdout=PIPE)
# wee need to give kubernetes time save data in etcd
time.sleep(0.5)
res = k8s.delete("quota", namespace, name, apiserver_url="http://127.0.0.1:8080")
time.sleep(0.1)
proc = Popen(["kubectl", "--namespace={0}".format(namespace), "get", "resourcequotas", name, "-o", "json"], stdout=PIPE, stderr=PIPE)
kubectl_out, err = proc.communicate()
# stdout is empty, stderr is showing something like "not found"
self.assertEqual('', kubectl_out)
self.assertEqual('Error from server: resourcequotas "{0}" not found\n'.format(name), err)
@skip_if_binaries_missing(['kubectl'])
class TestK8SAnnotate(TestCase):
def setUp(self):
hash = hashlib.sha1()
hash.update(str(time.time()))
self.name = "a"+hash.hexdigest()[:15]
self.default_service = {
'apiVersion': 'v1',
'kind': 'Service',
'metadata': {
'labels': {
'name': self.name
},
'name': self.name,
'namespace': "default"},
'spec': {
'ports': [
{'name': 'dns', 'port': 53, 'protocol': 'UDP'},
{'name': 'dns-tcp', 'port': 53, 'protocol': 'TCP'}
],
'selector': {'name': self.name}
}
}
def test_add_annotation_value(self):
name = self.name
filename = "/tmp/{0}.yaml".format(name)
with open(filename, 'w') as f:
f.write(yaml.dump(self.default_service))
proc = Popen(["kubectl", "create", "-f", filename], stdout=PIPE)
time.sleep(0.2)
res = k8s.annotate("services", "default", name, "test", "1234", apiserver_url="http://127.0.0.1:8080")
proc = Popen(["kubectl", "get", "services", name, "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
b = kubectl_out.get("metadata", {}).get("annotations", {}).get("test")
self.assertEqual("1234", b)
@skip_if_binaries_missing(['kubectl'])
class TestK8SLimitRange(TestCase):
def setUp(self):
hash = hashlib.sha1()
hash.update(str(time.time()))
self.name = hash.hexdigest()[:16]
def test_create_limit_range(self):
name = self.name
limits = [{
"type": "Container",
"defaultRequest": {
"cpu": "100m"
}
}]
res = k8s.create_limit_range("default", limits, name=name, apiserver_url="http://127.0.0.1:8080")
proc = Popen(["kubectl", "--namespace=default", "get", "limits", name, "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
self.assertTrue(isinstance(kubectl_out, dict))
def test_update_limit_range(self):
name = self.name
request = """
apiVersion: v1
kind: LimitRange
metadata:
name: {0}
spec:
limits:
- default:
cpu: 200m
memory: 512Mi
defaultRequest:
cpu: 100m
memory: 256Mi
type: Container
""".format(name)
limits = {
"Container": {
"defaultRequest": {
"cpu": "100m"
}
}
}
filename = "/tmp/{0}.yaml".format(name)
with open(filename, 'w') as f:
f.write(request)
create = Popen(["kubectl", "--namespace=default", "create", "-f", filename], stdout=PIPE)
# wee need to give kubernetes time save data in etcd
time.sleep(0.1)
res = k8s.create_limit_range("default", limits, name=name, apiserver_url="http://127.0.0.1:8080", update=True)
proc = Popen(["kubectl", "--namespace=default", "get", "limits", name, "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
limit = kubectl_out.get("spec").get("limits")[0].get("defaultRequest").get("cpu")
self.assertEqual("100m", limit)
def test_get_limit_ranges(self):
res = k8s.get("limits", "default", apiserver_url="http://127.0.0.1:8080")
a = len(res.get("items", []))
proc = Popen(["kubectl", "--namespace=default", "get", "limits", "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
b = len(kubectl_out.get("items", []))
self.assertEqual(a, b)
def test_get_one_limit_range(self):
name = self.name
request = """
apiVersion: v1
kind: LimitRange
metadata:
name: {0}
spec:
limits:
- default:
cpu: 200m
memory: 512Mi
defaultRequest:
cpu: 100m
memory: 256Mi
type: Container
""".format(name)
filename = "/tmp/{0}.yaml".format(name)
with open(filename, 'w') as f:
f.write(request)
create = Popen(["kubectl", "--namespace=default", "create", "-f", filename], stdout=PIPE)
# wee need to give kubernetes time save data in etcd
time.sleep(0.1)
res = k8s.get("limit", "default", name, apiserver_url="http://127.0.0.1:8080")
a = res.get("metadata", {}).get("name", "a")
proc = Popen(["kubectl", "--namespace=default", "get", "limits", name, "-o", "json"], stdout=PIPE)
kubectl_out = json.loads(proc.communicate()[0])
b = kubectl_out.get("metadata", {}).get("name", "b")
self.assertEqual(a, b)
def test_delete_limit_range(self):
name = self.name
request = """
apiVersion: v1
kind: LimitRange
metadata:
name: {0}
spec:
limits:
- default:
cpu: 200m
memory: 512Mi
defaultRequest:
cpu: 100m
memory: 256Mi
type: Container
""".format(name)
filename = "/tmp/{0}.yaml".format(name)
with open(filename, 'w') as f:
f.write(request)
create = Popen(["kubectl", "--namespace=default", "create", "-f", filename], stdout=PIPE)
# wee need to give kubernetes time save data in etcd
time.sleep(0.1)
res = k8s.delete("limit", "default", name, apiserver_url="http://127.0.0.1:8080")
a = res.get("metadata", {}).get("name", "a")
proc = Popen(["kubectl", "--namespace=default", "get", "limits", name, "-o", "json"], stdout=PIPE, stderr=PIPE)
kubectl_out, err = proc.communicate()
# stdout is empty, stderr is showing something like "not found"
self.assertEqual('', kubectl_out)
self.assertEqual('Error from server: limitranges "{0}" not found\n'.format(name), err)
@skip_if_binaries_missing(['kubectl'])
class TestK8SReplicationController(TestCase):
def setUp(self):
hash = hashlib.sha1()
hash.update(str(time.time()))
self.name = hash.hexdigest()[:16]
def test_check_timeout_calculation(self):
calc = k8s._calculate_safe_timeout
containers = [{
"name": "test1",
"livenessProbe": {
"initialDelaySeconds": 10,
"timeoutSeconds": 5
}
}]
print calc(containers)
if __name__ == '__main__':
from integration import run_tests
run_tests(TestK8SNamespace,
TestK8SSecrets,
TestK8SResourceQuotas,
TestK8SLimitRange,
TestK8SServices,
TestK8SAnnotate,
TestK8SReplicationController,
needs_daemon=False)
|
# -*- encoding: ms949 -*-
from mglearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import roc_curve
import matplotlib.pylab as plt
import numpy as np
X, y = make_blobs(n_samples=(4000, 500), centers=2, cluster_std=[7.0, 2],
random_state=22)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
svc = SVC(gamma=.05).fit(X_train, y_train)
fpr, tpr, thresholds = roc_curve(y_test, svc.decision_function(X_test))
plt.plot(fpr, tpr, label="ROC Curve")
plt.xlabel("FPR")
plt.ylabel("TPR (recall)")
# 0 근처의 임계값
close_zero = np.argmin(np.abs(thresholds))
plt.plot(fpr[close_zero], tpr[close_zero], 'o', markersize=10,
label="threshold 0", fillstyle="none", c='k', mew=2)
plt.legend(loc=4)
plt.show() |
def countZeroes(arr,l,r):
if((r-l)==1):
return l if arr[l]==0 else (r if arr[r]==0 else -1)
if(l<r):
mid = (l+r)//2
if(arr[mid] == 0):
return countZeroes(arr,l,mid)
if(arr[mid] == 1):
return countZeroes(arr,mid+1,r)
return -1
arr = [1,1,1,0,0,0,0]
print(countZeroes(arr,0,len(arr)-1)) |
# -*- coding: utf-8 -*-
"""
操作文件辅助类
"""
import os
import codecs
import json
__author__ = 'JohnnyB0Y'
class FileOperation:
def __init__(self):
# py文件的当前路径
self.current_path = os.getcwd()
# 生成保存文件的文件夹
self.save_path = os.getcwd() + '/generating_object_files'
# 模板文件的文件夹
self.file_templates_path = 'file_templates'
# 生成文件和内容
def write_to_file(self, file_name, content, suffixes):
# 文件夹不存在就创建
dir_path = self.save_path
if not os.path.exists(dir_path):
os.makedirs(dir_path)
file_path = dir_path + '/' + file_name + '.' + suffixes
file = codecs.open(file_path, 'w', encoding='utf-8')
file.write(content)
file.close()
# 输出 成功与否 、生成文件的路径
print('-------------- ok --------------')
# 读取 json 配置文件,并转换成 字典
def dict_from_json_file(self, file_name, file_path):
if '.json' not in file_name:
file_name += '.json'
if not file_path:
file_path = self.current_path + '/' + self.file_templates_path + '/' + file_name
# print(file_path)
file = codecs.open(file_path, 'r+', encoding='utf-8')
# 配置文件的 Dict
info = json.loads(file.read())
file.close()
return info
# 读取文本字符串
def text_from_txt_file(self, file_name, file_path):
if '.txt' not in file_name:
file_name += '.txt'
if not file_path:
file_path = self.current_path + '/' + self.file_templates_path + '/' + file_name
# print(file_path)
file = codecs.open(file_path, 'r+', encoding='utf-8')
# 获取文本字符串
text = file.read()
file.close()
return text
|
# This is Arjun Koshal's Final CS Project for CS 110
# I pledge my honor that I have abided by the Stevens Honor System
# For this project, I wish to demonstrate the functionality of machine learning
# in Python. I am going to allow the user to input a CSV file and have the
# program examine the file and create the line of best fit. It will also
# be able to determine the accuracy of the line, identify the coefficients and
# intercepts, and predict the future statistics.
# Import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as mat
# Read file
file_input = str(input("Please enter a CSV file: "))
if ".csv" not in file_input:
file_input += ".csv"
data = pd.read_csv(file_input)
data.head()
# Input for x and y axis
x_axis = str(input("\nPlease enter the x-axis: "))
y_axis = str(input("Please enter the y-axis: "))
data = data[[x_axis, y_axis]]
# x axis vs y axis:
mat.scatter(data[x_axis], data[y_axis], color="purple")
mat.xlabel(x_axis)
mat.ylabel(y_axis)
# 1. Linear Regression
# Training and testing data using 80:20:
train = data[:(int((len(data)*0.8)))]
test = data[(int((len(data)*0.8))):]
# Model the data:
from sklearn import linear_model
regression = linear_model.LinearRegression()
train_x = np.array(train[[x_axis]])
train_y = np.array(train[[y_axis]])
regression.fit(train_x, train_y)
# The coefficient and intercept:
print("\nCoefficient: %.2f" % regression.coef_)
print("Intercept: %.2f" % regression.intercept_)
# The plot for data:
mat.scatter(train_x, train_y, color="purple")
mat.plot(train_x, regression.coef_*train_x + regression.intercept_, "-r")
mat.xlabel(x_axis)
mat.ylabel(y_axis)
mat.show()
# Prediction for values:
def get_predictions(m, b, x):
y = m * x + b
return y
# Predicting dependent value for the future:
future_pred = float(input("\nPlease enter the future prediction value you want to calculate: "))
estimate = get_predictions(future_pred, regression.intercept_[0], regression.coef_[0][0])
print("\nWhen", x_axis, "is", future_pred, "then", y_axis, "is %.2f" % estimate)
# Checking various accuracy:
from sklearn.metrics import r2_score
future_x = np.array(test[[x_axis]])
future_y = np.array(test[[y_axis]])
predict = regression.predict(future_x)
print("\nMean absolute error: %.2f" % np.mean(np.absolute(predict - future_y)))
print("Mean sum of squares (MSE): %.2f" % np.mean((predict - future_y) ** 2))
print("R^2 score: %.2f" % r2_score(predict, future_y))
|
from utils.vk_models import Base
class OwlCreator:
def __init__(self, ontology_name, classes):
"""
Init method
:param ontology_name: name ontology in owl file
"""
self.ontology_name = ontology_name
self.content = "\n"
self.data_properties = set()
self.references = set()
self.classes = classes
def create_owl_content_in_parser(self, entity_sets):
# create classes ontology
for owl_class in self.classes:
self.content += owl_class.create_owl_class(self.ontology_name, owl_class.get_parents(), owl_class.get_class_name())
# passage through entities
for entity_set in entity_sets:
for entity in entity_set:
self.add_data_properties(entity.get_attributes().keys())
self.add_references(entity.get_references().keys())
self.content += entity.create_owl_content(self.ontology_name)
# create data properties
self.content += Base.create_data_properties(self.ontology_name, self.data_properties)
# create object properties
self.content += Base.create_object_properties(self.ontology_name, self.references)
return self.content
def add_data_properties(self, list_properties):
for ontology_property in list_properties:
self.data_properties.add(ontology_property)
def add_references(self, list_references):
for ref in list_references:
self.references.add(ref)
|
n=int(input("Введите трёхзначное число - "))
print("Сумма цифр числа = ", (n//100)+((n%100)//10)+(n%10))
print("Произведение цифр числа = ", (n//100)*((n%100)//10)*(n%10))
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 13 14:10:53 2016
@author: Srinivas
"""
#import numpy
a = range(10)
b = range(15)
c = a in b
print c |
import spacy
# Load the spacy model that you have installed
nlp = spacy.load('en_core_web_md')
# process a sentence using the model
doc = nlp("orange apple banana fruit")
nlp = spacy.load('en_core_web_md') # make sure to use larger model!
tokens = nlp(u'dog cat banana')
for token1 in tokens:
for token2 in tokens:
print(token1.text, token2.text, token1.similarity(token2))
|
import spacy
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.externals import joblib
import spacy
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import LogisticRegression
class LogisticRegressionIntentClassifier(BaseEstimator, ClassifierMixin):
""" Logistic regression classifier, which determines the user's intent. """
def __init__(self, spacy_load_path='xx_ent_wiki_sm'):
""" Create a new object """
self.nlp = spacy.load(spacy_load_path)
self.text_clf = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', LogisticRegression(penalty='l1', random_state=42, class_weight='balanced')),
])
def fit(self, X, y, **kwargs):
""" Fit the logistic regression model to convert sequence to intent.
:param X: input texts for training.
:param y: target intents for training.
:return self
"""
y = [y for x, y in zip(X, y) if len(x) > 5]
X = [x for x in X if len(x) > 5]
X = self.__semhash_corpus(X)
self.text_clf.fit(X, y)
return self
def predict(self, X):
""" Predict resulting intents by source sequences with a trained logistic regression model.
:param X: source sequences.
:return: resulting intents, predicted for source sequences.
"""
Z = [len(x) > 5 for x in X]
y = np.array(['_empty__'] * len(X))
if np.sum(Z) > 0:
X = [x for x in X if len(x) > 5]
X = self.__semhash_corpus(X)
y[Z] = self.text_clf.predict(X)
return y
def __preprocess(self, sentence):
""" Preprocess sentence by changing all letters to lower case, replacing pronouns
by ’-PRON-’, and removing all special characters except stop characters.
:param sentence: origin sentence as list of sentense of String type
:return clear sentence as list of sentense of String type
"""
clean_tokens = []
sentence = self.nlp(sentence)
for token in sentence:
if not token.is_stop:
clean_tokens.append(token.lemma_)
return " ".join(clean_tokens)
def __semhash_tokenizer(self, sentence, n=3):
""" Convert sentence into semantic hash tokens.
:param sentence: origin sentence after preprocessing as 1D list of sentense of string type
:return list of semantic hash tokens as np.array, ndim = 2
"""
tokens = sentence.split(" ")
final_tokens = []
for unhashed_token in tokens:
hashed_token = "§{}§".format(unhashed_token)
final_tokens += [''.join(gram)
for gram in list(zip(*[list(hashed_token)[i:] for i in range(n)]))]
return final_tokens
def __semhash_corpus(self, corpus):
""" Convert corpus into semantic hash corpus.
:param corpus: list of unicode strings.
:return list of semantic hash tokens.
"""
new_corpus = []
for sentence in corpus:
sentence = self.__preprocess(sentence)
tokens = self.__semhash_tokenizer(sentence)
new_corpus.append(" ".join(map(str, tokens)))
return new_corpus
@staticmethod
def load(path="model.joblib"):
with open(path, 'rb') as model:
return joblib.load(model)
if __name__ == '__main__':
with open("model.joblib", 'rb') as model:
classifier = joblib.load(model)
print(classifier.predict(["привет, как жизнь", "for i in range(5)", "годовая отчетность"]))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trade', '0006_delivme_deposit'),
]
operations = [
migrations.AddField(
model_name='deposit',
name='deposvalue',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='deposit',
name='deposkey',
field=models.CharField(default=b'zg', max_length=20),
),
]
|
'''flickr module
May 2014: Mendez
'''
class Flickr(object):
def __init__(self):
self.api = None
def display(self):
pass
def update(self):
pass
if __name__ == '__main__':
f = Flickr() |
'''
Printing is simple.
Printing allows you to display text onto the shell / output screen.
Too do so type the command Print() and inside the brackets enter what you want to print...
'''
print("Hello, World.")
|
# Copyright 2020 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decorators for adding parametrization support."""
from __future__ import annotations
from collections.abc import Callable
from functools import wraps
from itertools import chain
from typing import Any, TypeVar, cast
from pulser.parametrized import Parametrized, ParamObj
F = TypeVar("F", bound=Callable)
def parametrize(func: F) -> F:
"""Makes a function support parametrized arguments.
Note:
Designed for use in class methods. Usage in instance or static methods
is not supported, and in regular functions is not tested.
"""
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
for x in chain(args, kwargs.values()):
if isinstance(x, Parametrized):
return ParamObj(func, *args, **kwargs)
return func(*args, **kwargs)
return cast(F, wrapper)
|
def producto (n,m):
if(m == 0):
return 0
if(m == 1):
return n
if(m > 1):
return n + producto(n, m-1)
print( producto(30,90))
|
class AQModel:
def __init__(self,temp,humidity,sensor,timestamp,batery):
self.temp=temp
self.humidity=humidity
self.sensor=sensor
self.timestamp=timestamp
self.batery = batery
|
#!/usr/bin/python
#https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Continued_fraction_expansion
total = 0
for N in range(2, 10001):
n = int(N ** 0.5)
if n ** 2 == N:
continue
odd = 0
d = 1
m = 0
a = n
while True:
m = d * a - m
d = (N - m ** 2) // d
a = (n + m) // d
odd = 1 - odd
if a == 2 * n:
break
total += odd
print(total)
|
import tweepy
from textblob import TextBlob
from tweepy.streaming import StreamListener
from pymongo import MongoClient
import json
client = MongoClient(
"mongodb+srv://rithvikkolanu:<password>@twitterminingdata-twph5.mongodb.net/test?retryWrites=true"
)
db = client.input
db.input.remove({})
consumer_key = "VBi8xSyHQyVTWQvsIvKhvuXs5"
consumer_secret = "klDMzRBOeDKCetaH7yWIlNRk7lN1MvZfXANXTpjvxkNHBvLMGH"
access_token = "1112897842312445952-GMVFJP6TnYxBM2PF9n2PMXXSebZ2xk"
access_secret = "JWaIfMVUzO82HzA68MhM6perPk3BmaaFS5r0tKlkLqHsP"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
class MyStreamListener(StreamListener):
def on_connect(self):
print("you are now connected")
def on_error(self, status_code):
print(repr(status_code))
def on_data(self, raw_data):
try:
global datajson
datajson = json.loads(raw_data)
db.input.insert(datajson)
created_at = datajson['created_at']
print(created_at)
except Exception as e:
print(e)
MyStreamListener = MyStreamListener()
MyStream = tweepy.Stream(auth = api.auth, listener = MyStreamListener)
input = input("KeyWord: ")
Tracked = MyStream.filter(track=[input])
|
#Sean Kim
#Unit 1 Company Invoice
print ("This program will calculate your invoice.")
#Gets Information about First Item from User
a = input("What is the first item?")
b = int(input("How many did you buy?"))
c = float(input("What was the price of each?"))
m = b * c
#Gets Information about Second Item from User
x = input("What is your second item?")
y = int(input("How many did you buy?"))
z = float(input("What was the price of each?"))
n = y * z
#Calculates Cost of First Item
print ("Item 1: \n\t" + str(b) , a + ", $" + str(c) + " each")
print ("\tTotal Cost: $" + str(m))
#Calculates Cost of Second Item
print ("Item 2: \n\t" + str(y) , x + ", $" + str(z) + " each")
print ("\tTotal Cost: " + str(n)) |
#引入一个socket
import socket
#创建套接字
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#绑定地址
s.bind(('localhost',8080))
#开始监听
s.listen()
#接受请求链接
print('数据传输中......')
conn,addres=s.accept()
while True:
res=conn.recv(1024) #接受请求数据
if res.decode()=='exit': #接受客户端传来的请求,然后解码,进行判断
break
s.close()
else:
print('客户端:',res.decode()) #打印客户端输出的数据
data=input('服务器--->:') #输入服务器的数据
conn.send(data.encode()) #给客户端发送编码的数据
s.close()
|
import re
from amath.algebra.Function import Function
from amath.algebra.Logarithm import Logarithm
from amath.algebra.Polynomial import Polynomial
def _check_integral(rat):
sub = str.maketrans("⁰¹²³⁴⁵⁶⁷⁸⁹", "0123456789")
if rat.bot.degree == 1 and rat.top.degree == 0:
a = rat.bot.coe[1]
b = rat.bot.coe[0]
c = rat.top
return Logarithm(Polynomial([a, b]), a=c / a)
if rat.bot.degree == 1 and rat.top.degree == 1 and rat.top.coe[0] == 0:
a = rat.bot.coe[1]
b = rat.bot.coe[0]
c = rat.top.coe[1]
return Function([Polynomial([c / a, 0]), Logarithm(Polynomial([a, b]), a=c * b / a ** 2)])
match = re.search(r"\((-?\d+(\.\d+)?)x \+ (-?\d+(\.\d+)?)\)(\d+)", rat.bot.factor_str().translate(sub))
if match and rat.top.degree == 1 and rat.top.coe[0] == 0:
a = float(match.group(1))
b = float(match.group(3))
c = rat.top.coe[1]
n = float(match.group(5))
fac = Polynomial.from_factor_str(f"({a}x + {b}){n - 1}")
return Rational(Polynomial([c * a * (1 - n), -b * c]), a ** 2 * (n - 1) * (n - 2) * fac)
if rat.bot.degree == 2 and rat.top.degree == 1:
m = rat.top.coe[1]
n = rat.top.coe[0]
a = rat.bot.coe[2]
b = rat.bot.coe[1]
c = rat.bot.coe[0]
if 4 * a * c - b ** 2 == 0:
return Logarithm(Polynomial([a, b, c]), a=m / (2 * a)) - Rational(Polynomial([2 * a * n - b * m]),
Polynomial([2 * a ** 2, b * a]))
class Rational(Function):
def __init__(self, num, den):
"""Initializes Rational Function with 2 Polynomials
:type num: Polynomial
:type den: Polynomial
:param num: Numerator
:param den: Denominator
>>> Rational(Polynomial([1, 3]), Polynomial([1, 2, -3]))
(1x + 3)/(1x² + 2x + -3)
>>> Rational(Polynomial([1, 2, 5]), Polynomial([-2, 3]))
(-1x² + -2x + -5)/(2x + -3)
"""
if den.coe[-1] < 0:
num, den = -num, -den # to keep top as negative
self.top, self.bot = num, den
def __repr__(self):
return f"({self.top})/({self.bot})"
def __call__(self, x):
return self.top(x) / self.bot(x)
def derivative(self, value=None):
"""Finds the derivative of the Rational Function
Given value, it will return an exact value of the derivative at that point
:param value: x-value
:return: Rational or slope at x-value
"""
der = Rational(self.bot * self.top.derivative() - self.top * self.bot.derivative(), self.bot * self.bot)
if value is not None:
return der(value)
return der
def integrate(self, a=None, b=None):
test = _check_integral(self)
if test:
return test
top = self.top
bot = self.bot
fracs = []
if self.top.degree > self.bot.degree:
fracs.append(top / bot)
top = top % bot
factors = bot.factor()
for i in range(len(factors)):
root = factors[i].find_root()
coe = 1
for j in range(len(factors)):
if i == j:
continue
coe *= factors[j](root)
fracs.append(Rational(Polynomial([top(root) / coe]), factors[i]))
for i in range(len(fracs)):
fracs[i] = fracs[i].integrate()
return Function(fracs)
def num_roots(self):
return self.top.num_roots()
def get_root_bounds(self):
return self.top.get_root_bounds()
def find_roots(self):
return self.top.find_roots()
def find_root(self, *, guess=None):
return self.top.find_root(guess=guess)
def vertical_asymptotes(self):
return self.bot.find_roots()
def horizontal_asymptote(self):
if self.top.degree > self.bot.degree:
return None
if self.top.degree < self.bot.degree:
return 0
return self.top.coe[-1] / self.bot.coe[-1]
def oblique_asymptote(self):
if self.top.degree - 1 == self.bot.degree:
return self.top / self.bot
return None
def simplify(self):
gcd = Polynomial.gcd(self.bot, self.top)
print(gcd)
return Rational(self.top / gcd, self.bot / gcd)
def __neg__(self):
return Rational(-self.top, self.bot)
def __add__(self, other):
if isinstance(other, Rational):
lcm = Polynomial.lcm(self.bot, other.bot)
return Rational(self.top * lcm / self.bot + other.top * lcm / other.bot, lcm) # Not working as intended
return Rational(self.top + self.bot * other, self.bot)
def __sub__(self, other):
if isinstance(other, Rational):
lcm = Polynomial.lcm(self.bot, other.bot)
return Rational(self.top * lcm / self.bot - other.top * lcm / other.bot, lcm) # Not working as intended
return Rational(self.top - self.bot * other, self.bot)
def __mul__(self, other):
if isinstance(other, Rational):
return Rational(self.top * other.top, self.bot * other.bot)
return Rational(self.top * other, self.bot)
|
import cv2
# cascade classifier object
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# capture video from webcam
vid = cv2.VideoCapture(0)
# frame counter
a = 1
while True:
a = a + 1
check, frame = vid.read()
# print(check)
# print(frame)
# convert video frame from RGB to Grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# look for face coordinates in the image
faces = face_cascade.detectMultiScale(gray, scaleFactor = 1.03, minNeighbors = 5)
# creating rectangle around the face
for x,y,w,h in faces:
live = cv2.rectangle(frame, (x,y), (x+w, y+h),(0,255,0),3)
cv2.imshow("Window", live)
key = cv2.waitKey(1)
if key == ord('q'):
break
print(a) # print number of frames
vid.release()
cv2.destroyAllWindows() |
import datetime
import os
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torchvision import transforms
import joint_transforms
from config import sbu_training_root
from dataset import ImageFolder
from misc import AvgMeter, check_mkdir
from model import BDRAR
cudnn.benchmark = True
torch.cuda.set_device(0)
ckpt_path = './ckpt'
exp_name = 'BDRAR'
# batch size of 8 with resolution of 416*416 is exactly OK for the GTX 1080Ti GPU
args = {
'iter_num': 3000,
'train_batch_size': 8,
'last_iter': 0,
'lr': 5e-3,
'lr_decay': 0.9,
'weight_decay': 5e-4,
'momentum': 0.9,
'snapshot': '',
'scale': 416
}
joint_transform = joint_transforms.Compose([
joint_transforms.RandomHorizontallyFlip(),
joint_transforms.Resize((args['scale'], args['scale']))
])
val_joint_transform = joint_transforms.Compose([
joint_transforms.Resize((args['scale'], args['scale']))
])
img_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
target_transform = transforms.ToTensor()
to_pil = transforms.ToPILImage()
train_set = ImageFolder(sbu_training_root, joint_transform, img_transform, target_transform)
train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=8, shuffle=True)
bce_logit = nn.BCEWithLogitsLoss().cuda()
log_path = os.path.join(ckpt_path, exp_name, str(datetime.datetime.now()) + '.txt')
def main():
net = BDRAR().cuda().train()
optimizer = optim.SGD([
{'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
'lr': 2 * args['lr']},
{'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
'lr': args['lr'], 'weight_decay': args['weight_decay']}
], momentum=args['momentum'])
if len(args['snapshot']) > 0:
print('training resumes from \'%s\'' % args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth')))
optimizer.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '_optim.pth')))
optimizer.param_groups[0]['lr'] = 2 * args['lr']
optimizer.param_groups[1]['lr'] = args['lr']
check_mkdir(ckpt_path)
check_mkdir(os.path.join(ckpt_path, exp_name))
open(log_path, 'w').write(str(args) + '\n\n')
train(net, optimizer)
def train(net, optimizer):
curr_iter = args['last_iter']
while True:
train_loss_record, loss_fuse_record, loss1_h2l_record = AvgMeter(), AvgMeter(), AvgMeter()
loss2_h2l_record, loss3_h2l_record, loss4_h2l_record = AvgMeter(), AvgMeter(), AvgMeter()
loss1_l2h_record, loss2_l2h_record, loss3_l2h_record = AvgMeter(), AvgMeter(), AvgMeter()
loss4_l2h_record = AvgMeter()
for i, data in enumerate(train_loader):
optimizer.param_groups[0]['lr'] = 2 * args['lr'] * (1 - float(curr_iter) / args['iter_num']
) ** args['lr_decay']
optimizer.param_groups[1]['lr'] = args['lr'] * (1 - float(curr_iter) / args['iter_num']
) ** args['lr_decay']
inputs, labels = data
batch_size = inputs.size(0)
inputs = Variable(inputs).cuda()
labels = Variable(labels).cuda()
optimizer.zero_grad()
fuse_predict, predict1_h2l, predict2_h2l, predict3_h2l, predict4_h2l, \
predict1_l2h, predict2_l2h, predict3_l2h, predict4_l2h = net(inputs)
loss_fuse = bce_logit(fuse_predict, labels)
loss1_h2l = bce_logit(predict1_h2l, labels)
loss2_h2l = bce_logit(predict2_h2l, labels)
loss3_h2l = bce_logit(predict3_h2l, labels)
loss4_h2l = bce_logit(predict4_h2l, labels)
loss1_l2h = bce_logit(predict1_l2h, labels)
loss2_l2h = bce_logit(predict2_l2h, labels)
loss3_l2h = bce_logit(predict3_l2h, labels)
loss4_l2h = bce_logit(predict4_l2h, labels)
loss = loss_fuse + loss1_h2l + loss2_h2l + loss3_h2l + loss4_h2l + loss1_l2h + \
loss2_l2h + loss3_l2h + loss4_l2h
loss.backward()
optimizer.step()
train_loss_record.update(loss.data, batch_size)
loss_fuse_record.update(loss_fuse.data, batch_size)
loss1_h2l_record.update(loss1_h2l.data, batch_size)
loss2_h2l_record.update(loss2_h2l.data, batch_size)
loss3_h2l_record.update(loss3_h2l.data, batch_size)
loss4_h2l_record.update(loss4_h2l.data, batch_size)
loss1_l2h_record.update(loss1_l2h.data, batch_size)
loss2_l2h_record.update(loss2_l2h.data, batch_size)
loss3_l2h_record.update(loss3_l2h.data, batch_size)
loss4_l2h_record.update(loss4_l2h.data, batch_size)
curr_iter += 1
log = '[iter %d], [train loss %.5f], [loss_fuse %.5f], [loss1_h2l %.5f], [loss2_h2l %.5f], ' \
'[loss3_h2l %.5f], [loss4_h2l %.5f], [loss1_l2h %.5f], [loss2_l2h %.5f], [loss3_l2h %.5f], ' \
'[loss4_l2h %.5f], [lr %.13f]' % \
(curr_iter, train_loss_record.avg, loss_fuse_record.avg, loss1_h2l_record.avg, loss2_h2l_record.avg,
loss3_h2l_record.avg, loss4_h2l_record.avg, loss1_l2h_record.avg, loss2_l2h_record.avg,
loss3_l2h_record.avg, loss4_l2h_record.avg, optimizer.param_groups[1]['lr'])
print log
open(log_path, 'a').write(log + '\n')
if curr_iter > args['iter_num']:
torch.save(net.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % curr_iter))
return
if __name__ == '__main__':
main()
|
class WotApiException(Exception): pass
class SystemException(WotApiException): pass
class HTTPRequestException(SystemException): pass
_ERROR_MAP = {
0: SystemException,
}
def exception_for_code(code):
return _ERROR_MAP[code]
|
from django.shortcuts import render
from django.contrib.auth import login, authenticate, logout
from django.views.generic import *
from .forms import *
from django.shortcuts import redirect,reverse
from django.urls import reverse_lazy
from django.http import JsonResponse, HttpResponseRedirect
# from django.core.urlresolvers import reverse, reverse_lazy
import json
from django.template.loader import render_to_string
from weasyprint import HTML
import tempfile
from django.http import HttpResponse
from django.contrib import messages
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.utils.html import strip_tags
# Create your views here.
#login form
class LoginView(FormView):
template_name = 'login/login.html'
form_class = LoginForm
success_url = '/home/'
def form_valid(self, form):
uname = form.cleaned_data['username']
pword = form.cleaned_data['password']
print(uname,'88')
user = authenticate(username=uname, password=pword)
print(user,'3333333333')
self.thisuser = user
if user is not None and user.is_superuser:
login(self.request, user)
else:
return render(self.request, self.template_name, {
'error': 'Username you enter doesnot exist',
'form': form
})
return super().form_valid(form)
#home view
class HomeView(TemplateView):
template_name = 'admintemplates/adminhome.html'
#logout
class LogoutView(View):
def get(self, request):
logout(request)
return redirect('/')
#supplier list
class SupplierListView(TemplateView):
template_name = 'supplier/supplierlist.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['suppliers'] = Supplier.objects.all()
return context
#supplier add
class SupplierCreateView(CreateView):
template_name = 'supplier/supplieradd.html'
form_class = SupplierForm
success_url = reverse_lazy('posapp:supplierlist')
#supplier update
class SupplierUpdateView(UpdateView):
template_name = 'supplier/supplierupdate.html'
model = Supplier
form_class = SupplierForm
success_url = reverse_lazy("posapp:supplierlist")
#supplier delete
class SupplierdeleteView(DeleteView):
template_name = 'supplier/supplierdelete.html'
model = Supplier
success_url = reverse_lazy("posapp:supplierlist")
#customer list
class CustomerListView(TemplateView):
template_name = 'customer/customerlist.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['customers'] = Customer.objects.all()
return context
#customer add
class CustomerCreateView(CreateView):
template_name = 'customer/customeradd.html'
form_class = CustomerForm
success_url = reverse_lazy('posapp:customerlist')
#customer update
class CustomerUpdateView(UpdateView):
template_name = 'customer/customerupdate.html'
model = Customer
form_class = CustomerForm
success_url = reverse_lazy("posapp:customerlist")
#supplier delete
class CustomerdeleteView(DeleteView):
template_name = 'customer/customerdelete.html'
model = Customer
success_url = reverse_lazy("posapp:customerlist")
#customer list
class ProductListView(TemplateView):
template_name = 'product/productlist.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['products'] = Product.objects.all()
return context
#product create
class ProductCreateView(CreateView):
template_name = 'product/productadd.html'
form_class = ProductForm
success_url = reverse_lazy('posapp:productslist')
#product update
class ProductUpdateView(UpdateView):
template_name = 'product/productupdate.html'
model = Product
form_class = ProductForm
success_url = reverse_lazy("posapp:productslist")
class ProductdeleteView(DeleteView):
template_name = 'product/productdelete.html'
model = Product
success_url = reverse_lazy("posapp:productslist")
class SalesListView(TemplateView):
template_name = 'sales/salescreate.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['sales'] = Sales.objects.all()
context['order'] = OrderItem.objects.order_by('-timestamp')
return context
#sales create
class SalesCreateView(FormView):
template_name = 'sales/test.html'
form_class = SalesCreateForm
def get_context_data(self,**kwargs):
context = super().get_context_data(**kwargs)
context['sales'] = SalesHistory.objects.all()
context['products'] = Product.objects.all()
# context['customers'] = Customer.objects.all()
return context
#create invoice
def CreateInvoiceView(request):
if request.method == 'GET':
customerform = CustomerForm
salesform = SalesCreateForm
customer = Customer.objects.all()
# print(customerform,"88888")
return render(request, 'sales/customerform.html',{'customerform':customerform , 'salesform':salesform,'customer':customer})
else:
cid = request.POST.get('dropdown',None)
print(cid,'|||||||||||||')
customer = Customer.objects.get(customer=cid)
products = list(Product.objects.all())
if customer is not None:
return render(request, 'sales/test.html', {'customer': customer, 'products': products})
else:
return messages.error(request, "Error!")
#order bill
def orderBill(request):
if request.method == 'POST':
data = json.loads(request.POST.get('data', None))
if data is None:
raise AttributeError
print(data)
customer = Customer.objects.get(pk=data['customer_id'])
print(customer.email,'************')
# print('********',customer.id)
order = Sales.objects.create(customer=customer,
total_price=data['total_price'],
)
for product_id in data['product_ids']:
OrderItem(product=Product.objects.get(pk=product_id), order=order).save()
if data['total_price']:
customer.save()
order.save()
print(order.id)
cus = customer.email
subject, from_email, to = 'Greetings Messages', 'settings.EMAIL_HOST_USER', cus
html_content = render_to_string('bill/emailmessage.html', {'customer':customer})
# render with dynamic value
text_content = strip_tags(html_content) # Strip the html tag. So people can see the pure text at least.
# create the email, and attach the HTML version as well.
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
# msg.attach_file('/home/ramthapa/Documents/djangoprojects/jobproject/static/email/logo.png')
msg.attach_alternative(html_content, "text/html")
# msg.mixed_subtype = 'related'
# for f in ['logo.png']:
# fp = open(os.path.join(os.path.dirname("/home/ramthapa/Documents/djangoprojects/jobproject/static/email/logo.png"), f), 'rb')
# msg_img = MIMEImage(fp.read())
# fp.close()
# msg_img.add_header('Content-ID', '<{}>'.format(f))
# msg.attach(msg_img)
# msg.send()
print('email succesfully send')
return render(request, 'sales/orderbill.html', context={'id':order.id})
#bill generate
def BillGeneration(request, pk):
"""Generate pdf."""
print(pk)
sales = Sales.objects.get(id=pk)
print(sales)
order = OrderItem.objects.filter(order=sales)
product = Product.objects.all()
# Rendered
html_string = render_to_string('bill/report.html', {'sales' : sales,'order' : order })
html = HTML(string=html_string)
result = html.write_pdf()
# Creating http response
response = HttpResponse(content_type='application/pdf;')
response['Content-Disposition'] = 'inline; filename=report.pdf'
response['Content-Transfer-Encoding'] = 'binary'
with tempfile.NamedTemporaryFile(delete=True) as output:
output.write(result)
output.flush()
output = open(output.name, 'rb')
response.write(output.read())
return response
#chart
class ChartView(TemplateView):
template_name = 'reports/chart.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['orders'] = OrderItem.objects.all()
context['sales'] = Sales.objects.all()
return context
|
treasure_chest = input().split("|")
count = int(input())
if count > len(treasure_chest):
del treasure_chest[::]
print(" ".join(treasure_chest[:]))
else:
print(" ".join(treasure_chest[-count:])) |
import json, sys
from pathlib import Path
# I have to use `# type: ignore` so I don't get errors that these imports don't work.
from helpers import error_message, getDataFolder, check_if_use_saved_directory, check_for_duplicates, set_default_directory, default_directory_exists, str2bool, dir_exists # type: ignore
from parser import argc_allowed, create_parser, usage # type: ignore
__doc__ = "Usage:\n" + usage
argc_allowed()
args = create_parser().parse_args()
if args.command == 'add':
if args.directory == None and not default_directory_exists():
error_message('Cannot use default directory if default directory not set', 3)
target = args.target
directory = check_if_use_saved_directory(args.directory)
check_for_duplicates(target)
if not Path(directory).is_dir(): # Checks if directory not found
error_message("Directory not found (may be a file)", 1)
create_new = "not bool"
while create_new == "not bool":
create_new = str2bool(input(f"Create new directory {directory}: "))
if create_new:
Path(directory).mkdir(parents=True)
else:
sys.exit()
# If the target does exist
if target.exists():
path = getDataFolder() / 'db.json'
if not path.exists():
path.touch()
path.write_text("[]")
print(type(args.interval));
new_entry = {
'target': str(target),
'interval': args.interval[0],
'directory': str(directory)
}
queries = json.loads(path.read_text())
queries.append(new_entry)
path.write_text(json.dumps(queries))
print(f"Successfully added {target}")
else:
error_message(f"Target {target} not found")
elif args.command == 'remove':
target = str(args.target)
path = getDataFolder() / 'db.json'
if not path.exists():
error_message(f'Target "{target}" not found. Use `backup.py add <target> <time> <backup_directory>` to add "{target}"', 3)
queries = json.loads(path.read_text())
new_queries = filter(lambda query: query['target'] != target, queries)
if queries == list(new_queries):
error_message(f"Target {target} has no queries. Use `backup.py add` to add a new query", 3)
str_new_queries = json.dumps(list(new_queries))
path.write_text(str_new_queries)
print(f"Successfully removed {target}")
elif args.command == 'config':
#extract option & value
option = args.option
value = args.value
if option == 'defaultDir':
if dir_exists(value):
set_default_directory(value);
else:
error_message("Directory doesn't exist.", 3);
else:
error_message(f'Option "{option}" is not valid.', 3)
|
import os
from celery import Celery
from django.conf import settings
DJANGO_SETTINGS_MODULE = os.environ.get('DJANGO_SETTINGS_MODULE')
if not DJANGO_SETTINGS_MODULE:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'coconut_server.settings')
app = Celery('api')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) |
key_string = "aaaaabbbbbabbbaabbababbaaababaab"
def generate_dict():
baconDict = {}
for i in range(0, 26):
tmp = key_string[i:i + 5]
baconDict[tmp] = chr(97 + i)
return baconDict
if __name__ == '__main__':
msg = "Hot sUn BEATIng dOWN bURNINg mY FEet JuSt WalKIng arOUnD HOt suN mAkiNG me SWeat"
msg = msg.replace(" ", "")
msg = [msg[i:i + 5] for i in range(0, len(msg), 5)]
msg = list(filter(lambda x: len(x) == 5, msg))
print(msg)
ciphered = []
for w in msg:
a = ""
for c in w:
if c.islower():
a += "a"
else:
a += "b"
ciphered.append(a)
dict = generate_dict()
result = "".join(map(lambda x: dict[x], ciphered))
print(result) |
#!/usr/bin/env python3
import argparse
import logging
from collections import namedtuple
from dataclasses import dataclass, field
from os import path
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d [%H:%M:%S]')
log = logging.getLogger(path.basename(__file__))
# TODO: make it possible to use scientific names in the same way as tax_id
@dataclass
class Node:
name: str = None
genbank_common_name: str = None
rank: str = None
parent: int = None
children: list = field(default_factory=list)
Rank = namedtuple('Rank', ['rank_name', 'rank_code', 'rank_depth'])
# Using the same rank codes as Kraken 2 (https://github.com/DerrickWood/kraken2/blob/master/src/reports.cc)
translate_rank2code = {
'superkingdom': 'D',
'kingdom': 'K',
'phylum': 'P',
'class': 'C',
'order': 'O',
'family': 'F',
'genus': 'G',
'species': 'S'
}
class TaxonomyTreeException(Exception):
pass
class TaxonomyTree:
"""
Creates a representation of the taxonomy in the files names.dmp and
nodes.dmp of a kraken2 database.
Inspired by https://github.com/frallain/NCBI_taxonomy_tree.
"""
def __init__(self, nodes_filename, names_filename):
self.nodes_filename = nodes_filename
self.names_filename = names_filename
self.wanted_name_types = set(
['scientific name', 'genbank common name'])
# Main data structure
self.taxonomy = {}
self.byranks = {}
self.leaves = set()
# "Memory" data structure to be populated at function calls
# For faster response in case of same query is asked again
self.lineages = {}
self.distances = {}
self.lca_mappings = {}
# Add nodes to self.taxonomy
self.construct_tree()
def construct_tree(self):
"""
Reads a names.dmp and nodes.dmp file, and constructs a taxonomy tree
representation:
{tax_id#1: Node('name', 'genbank_common_name', 'rank', 'parent', 'children'),
tax_id#2: Node('name', 'genbank_common_name', 'rank', 'parent', 'children'),
...,
tax_id#N: ...}
"""
log.info("Constructing taxonomy tree...")
taxid2name = {}
try:
log.info('Mapping taxonomic ID to scientific and genbank common names from "{names_file}"...'.format(names_file=self.names_filename))
# TODO: check so that names.dmp conforms to expected format
with open(self.names_filename, 'r') as f:
for name_line in f:
name_info = name_line.split('|')
name_type = name_info[3].strip()
if name_type not in self.wanted_name_types:
continue
tax_id = int(name_info[0].strip())
if tax_id not in taxid2name:
taxid2name[tax_id] = {
'scientific_name': None,
'genbank_common_name': None}
tax_name = name_info[1].strip()
if name_type == 'scientific name':
if taxid2name[tax_id]['scientific_name'] is not None:
# Some logical checking, should only be one scientific name for a tax_id
raise TaxonomyTreeException("Found more than one scientific name for a unique tax_id. The tax_id was '{}'".format(tax_id))
taxid2name[tax_id]['scientific_name'] = tax_name
elif name_type == 'genbank common name':
if taxid2name[tax_id]['genbank_common_name'] is not None:
# Some logical checking, should only be one genbank common name for a tax_id
raise TaxonomyTreeException("Found more than one genbank common name for a unique tax_id. The tax_id was '{}'".format(tax_id))
taxid2name[tax_id]['genbank_common_name'] = tax_name
else:
raise TaxonomyTreeException("Logical error. Should not end up here. name_type was '{}'".format(tax_name))
except FileNotFoundError:
log.exception('Could not find the file "{names_file}".'.format(names_file=self.names_filename))
raise
try:
log.info('Reading taxonomy from "{nodes_file}"...'.format(nodes_file=self.nodes_filename))
# TODO: check so that nodes.dmp conforms to expected format
with open(self.nodes_filename, 'r') as f:
for tax_line in f:
tax_info = tax_line.split('|')[0:3]
tax_id = int(tax_info[0].strip())
tax_parent = int(tax_info[1].strip())
tax_rank = tax_info[2].strip()
tax_scientific_name = taxid2name[tax_id]['scientific_name']
tax_common_name = taxid2name[tax_id]['genbank_common_name']
if tax_id in self.taxonomy:
# We already inserted the current tax_id as a parent of another
self.taxonomy[tax_id].rank = tax_rank
self.taxonomy[tax_id].parent = tax_parent
else:
node = Node(
name=tax_scientific_name,
genbank_common_name=tax_common_name,
rank=tax_rank,
parent=tax_parent,
children=[])
self.taxonomy[tax_id] = node
self.leaves.add(tax_id)
if tax_parent in self.taxonomy:
self.taxonomy[tax_parent].children.append(tax_id)
if tax_parent in self.leaves:
self.leaves.remove(tax_parent)
else:
parent_node = Node(
name=taxid2name[tax_parent]['scientific_name'],
genbank_common_name=taxid2name[tax_parent]['genbank_common_name'],
rank=None,
parent=None,
children=[tax_id])
self.taxonomy[tax_parent] = parent_node
# Save the tax_id to it's corresponding rank set
if tax_rank in self.byranks:
self.byranks[tax_rank].add(tax_id)
else:
self.byranks[tax_rank] = set([tax_id])
except FileNotFoundError:
log.exception('Could not find the nodes file "{nodes_file}".'.format(nodes_file=self.nodes_filename))
raise
# Adjust the root (the root is tax_id=1, and its parent is also tax_id=1)
root_children = self.taxonomy[1].children
root_children.remove(1)
self.taxonomy[1].parent = None
self.taxonomy[1].children = root_children
log.info("Taxonomy tree built.")
def translate2taxid(self, scientific_names_list):
"""
Will return the tax_ids for the scientific names listed in the input
list. If no name can be found the value will be None. More than one
tax_id may be found for any given scientific name - they will all be
added to the list of tax_ids being returned for that scientific name.
Returns:
{<scientific_name>: [tax_id_1, tax_id_2]}
"""
self._verify_list(scientific_names_list)
tax_id_dict = {k: list() for k in scientific_names_list}
if len(tax_id_dict) != len(scientific_names_list):
log.warning('You entered duplicated names in the input list for translate2taxid.')
for tax_id in self.taxonomy:
if self.taxonomy[tax_id].name in tax_id_dict:
name = self.taxonomy[tax_id].name
tax_id_dict[name].append(tax_id)
else:
# continue search
continue
return tax_id_dict
def _get_property(self, tax_id, property):
"""
Internal function to fetch the value of a single property of a namedtuple in the taxonomy dictionary.
Raises an exception if tax_id does not exist in the taxonomy tree.
Raises an exception if the taxonomy tree isn't built yet.
"""
if self.taxonomy:
try:
property_value = getattr(self.taxonomy[tax_id], property)
except KeyError:
log.exception('Could not find tax_id={tax_id} in the taxonomy tree.'.format(tax_id=tax_id))
raise
except AttributeError:
log.exception('There is no such field ("{field}") in the namedtuple.'.format(field=property))
raise
else:
log.exception('You have not built the taxonomy tree yet.')
raise TaxonomyTreeException('You have not built the taxonomy tree yet.')
return property_value
def _verify_list(self, putative_list):
"""
Internal helper function to check that input lists are indeed lists.
"""
try:
assert isinstance(putative_list, list)
except AssertionError:
log.exception('Input must be a list. You input "{input}", of type {input_type}'.format(
input=putative_list, input_type=type(putative_list)))
raise
def get_name(self, tax_id_list):
"""
Returns the names of the tax_ids in the input list.
"""
self._verify_list(tax_id_list)
name_dict = {}
for tax_id in tax_id_list:
name_dict[tax_id] = self._get_property(tax_id, 'name')
return name_dict
def get_common_name(self, tax_id_list):
"""
Returns the genbank common names of the tax_ids in the input list.
"""
self._verify_list(tax_id_list)
name_dict = {}
for tax_id in tax_id_list:
name_dict[tax_id] = self._get_property(tax_id, 'genbank_common_name')
return name_dict
def get_children(self, tax_id_list):
"""
Returns the direct descending children of each tax_id.
"""
self._verify_list(tax_id_list)
children_dict = {}
for tax_id in tax_id_list:
children_dict[tax_id] = self._get_property(tax_id, 'children')
return children_dict
def get_parent(self, tax_id_list):
"""
Returns the parent of each tax_id.
"""
self._verify_list(tax_id_list)
parent_dict = {}
for tax_id in tax_id_list:
parent_dict[tax_id] = self._get_property(tax_id, 'parent')
return parent_dict
def get_distance(self, tax_id_1, tax_id_2):
"""
Return the distance between two tax_ids. The distance is defined as
the number of edges that need to be traversed to get from tax_id_1 to
tax_id_2.
Distance between a parent and child is 1, distance between two genera
in the same family (where the family node is the direct parent of both
genera) is 2, etc.
All edges between two tax_ids are counted, so the distance between two
ranks in one part of the tree can be different from that in another
part of the tree (depending on tree structure).
"""
def one_way_distance(tax_id_ancestor, tax_id):
"""
Find the distance (number of steps) between the
ancestor (tax_id_ancestor) and the taxon (tax_id).
"""
# Lineage of the descendant tax_id (of which ancestor tax_id is part of)
lineage = self.get_lineage([tax_id])[tax_id]
# The indices of both tax_ids in the lineage
ancestor_index = lineage.index(tax_id_ancestor)
tax_id_index = lineage.index(tax_id)
distance = tax_id_index - ancestor_index
return distance
distance = None
# Extra calcs to check for distance from self.distances
tax_id_small = min(tax_id_1, tax_id_2)
tax_id_large = max(tax_id_1, tax_id_2)
# self.distances is ordered... smallest tax_id always goes first
if tax_id_small in self.distances:
if tax_id_large in self.distances[tax_id_small]:
distance = self.distances[tax_id_small][tax_id_large]
else:
self.distances[tax_id_small] = {}
# Do we need to calculate the distance?
if distance is None:
# Lowest common ancestor
lca = self.get_lca(tax_id_1, tax_id_2)
# Sum of distances between both tax_ids and the LCA makes the total distance
distance_1 = one_way_distance(lca, tax_id_1)
distance_2 = one_way_distance(lca, tax_id_2)
distance = distance_1 + distance_2
# Save distance for faster response next time
self.distances[tax_id_small][tax_id_large] = distance
return distance
def get_rank(self, tax_id_list):
"""
Returns the rank of each tax_id.
"""
self._verify_list(tax_id_list)
rank_dict = {}
for tax_id in tax_id_list:
rank_dict[tax_id] = self._get_property(tax_id, 'rank')
return rank_dict
def get_rank_code(self, tax_id_list):
"""
Returns the rank, rank code, and rank offset for each tax_id.
For example:
tax_id 314295 is rank 'superfamily'. That rank has no rank code in the
original Kraken 2 reports (see translate_rank2code dict above). Same
goes for all of the 'no rank' tax_ids. Instead, 314295 is considered to
be an 'order' but at the depth of 4, i.e. 4 steps down from the tax_id
of rank 'order' that is closes above it in the lineage. The rank code
is therefore O, and the depth is 4. So the full rank code is O4.
Returns a dict of namedtupes, one for each tax_id in the supplied list.
"""
rank_dict = self.get_rank(tax_id_list)
rank_code_dict = {}
for tax_id in rank_dict:
rank = rank_dict[tax_id]
rank_code = ''
current_node = tax_id
# Find the rank code for this node or the one above
while not rank_code:
if rank in translate_rank2code:
rank_code = translate_rank2code[rank]
elif current_node == 1:
# Special case for root, as it has rank 'no rank'
rank_code = 'R'
else:
current_node = self.get_parent([current_node])[current_node]
rank = self.get_rank([current_node])[current_node]
rank_depth = self.get_distance(current_node, tax_id)
rank_name = self.get_rank([tax_id])[tax_id]
rank_tuple = Rank(
rank_name=rank_name,
rank_code=rank_code,
rank_depth=rank_depth)
rank_code_dict[tax_id] = rank_tuple
return rank_code_dict
def get_node(self, tax_id_list):
"""
Returns the node instances of the supplied tax_ids.
"""
#TODO: Use this fnc in other fncs when getting nodes from self.taxonomy
self._verify_list(tax_id_list)
node_dict = {}
if self.taxonomy:
for tax_id in tax_id_list:
try:
node = self.taxonomy[tax_id]
except KeyError:
log.exception('Could not find tax_id={tax_id} in the taxonomy tree.'.format(tax_id=tax_id))
raise
node_dict[tax_id] = node
else:
log.exception('You have not built the taxonomy tree yet.')
raise TaxonomyTreeException('You have not built the taxonomy tree yet.')
return node_dict
def get_lineage(self, tax_id_list):
"""
For each tax_id, returns the input tax_id and the tax_ids of its
ancestors.
"""
self._verify_list(tax_id_list)
lineage_dict = {}
for tax_id in tax_id_list:
if tax_id in self.lineages:
# Lineage has already been calculated, retrieve it
lineage_dict[tax_id] = self.lineages[tax_id]
continue
lineage = [tax_id]
node = self.get_node([tax_id])[tax_id]
while node.parent:
lineage.append(node.parent)
node = self.get_node([node.parent])[node.parent]
lineage.reverse()
lineage_dict[tax_id] = lineage
# Save lineage for faster response next time
self.lineages[tax_id] = lineage
return lineage_dict
def get_clade(self, tax_id_list):
"""
For each tax_id, returns all of the tax_ids of the clade rooted at the
tax_id.
returns: {tax_id#1: set(all tax_ids in node),
tax_id#2: set(all tax_ids in node)}
"""
self._verify_list(tax_id_list)
clade_dict = {}
for tax_id in tax_id_list:
node = self.get_node([tax_id])[tax_id]
children_pool = set(node.children)
clade = set([tax_id])
clade.update(children_pool)
while children_pool:
try:
clade_taxon = children_pool.pop()
except KeyError:
break
else:
new_children = self.get_node([clade_taxon])[clade_taxon].children
clade.update(new_children)
children_pool.update(new_children)
clade_dict[tax_id] = clade
return clade_dict
def get_leaves(self, tax_ids=[1]):
"""
Returns a {tax_id: set(leaf_taxids)} mapping of leaf node tax_ids for
the clades rooted at the tax_ids.
"""
self._verify_list(tax_ids)
clade_dict = {}
def get_leaves_dfs(tax_id, clade_leaves, visited_nodes=None):
if visited_nodes == None:
visited_nodes = set()
if tax_id not in visited_nodes:
visited_nodes.add(tax_id)
children = self.get_children([tax_id])[tax_id]
if children:
for child in children:
get_leaves_dfs(child, clade_leaves, visited_nodes)
else:
clade_leaves.add(tax_id)
return clade_leaves
for tax_id in tax_ids:
clade_leaves = set()
clade_leaves = get_leaves_dfs(tax_id, clade_leaves)
clade_dict[tax_id] = clade_leaves
return clade_dict
def get_lca(self, tax_id_1, tax_id_2):
"""
Get the tax_id of the lowest common ancestor (LCA) of two tax_ids.
"""
lca = None
# Extra calcs to check for lca from self.lca_mappings
tax_id_small = min(tax_id_1, tax_id_2)
tax_id_large = max(tax_id_1, tax_id_2)
# self.lca_mappings is ordered... smallest tax_id always goes first
if tax_id_small in self.lca_mappings:
if tax_id_large in self.lca_mappings[tax_id_small]:
lca = self.lca_mappings[tax_id_small][tax_id_large]
else:
self.lca_mappings[tax_id_small] = {}
if lca is None:
# Get lineages and convert to sets for fast operation
lineages = self.get_lineage([tax_id_1, tax_id_2])
lineage_1 = set(lineages[tax_id_1])
lineage_2 = set(lineages[tax_id_2])
# Get only the common tax_ids between the lineages of tax_id 1 and 2
common_lineage = lineage_1.intersection(lineage_2)
# The LCA will be the tax_id @ index (num(common_taxIDs) - 1)
lca = lineages[tax_id_1][len(common_lineage) - 1]
# Save LCA for faster response next time
self.lca_mappings[tax_id_small][tax_id_large] = lca
return lca
def get_clade_rank_taxids(self, tax_ids, rank=None):
"""
For each clade rooted at the input tax_ids, return all tax_ids that
represent taxa at the supplied rank, or all ranks. For example:
# get_clade_rank_taxids([1], 'phylum') -- returns all phyla in the whole tree
# get_clade_rank_taxids([2, 9443], 'genus') -- returns all genera in the clades rooted at 'Bacteria' and 'Primates'
# get_clade_rank_taxids([1]) -- returns all canonical ranks in the whole tree.
"""
self._verify_list(tax_ids)
canonical_ranks = translate_rank2code.values()
canonical_rank_weights = {rank: weight for weight, rank in enumerate(['R'] + list(canonical_ranks))}
clade_tax_rank_dict = {tax_id: dict() for tax_id in tax_ids}
if rank:
rank = translate_rank2code[rank]
else:
rank = canonical_ranks
def dfs(tax_id, visited_nodes=None, tax_lvl_dict=None, wanted_ranks=None):
"""
Fnc to recursively search the taxonomy tree in a depth-first
fashion. Saves all tax_ids that are canonical (S/G/F etc) in
tax_lvl_dict.
"""
if visited_nodes is None:
visited_nodes = set()
if wanted_ranks is None:
wanted_ranks = {rank in canonical_ranks}
if tax_lvl_dict is None:
tax_lvl_dict = {tax_lvl: set() for tax_lvl in wanted_ranks}
if tax_id not in visited_nodes:
visited_nodes.add(tax_id)
taxonomy_rank = self.get_rank_code([tax_id])[tax_id]
rank_code = taxonomy_rank.rank_code
if taxonomy_rank.rank_depth == 0:
if rank_code in wanted_ranks:
tax_lvl_dict[rank_code].add(tax_id)
rank_code_weight = canonical_rank_weights[rank_code]
# Keep going down the tree only if there's a wanted rank below current rank
if any([rank_code_weight < canonical_rank_weights[rank] for rank in wanted_ranks]):
children = self.get_children([tax_id])[tax_id]
for child in children:
_ = dfs(child, visited_nodes, tax_lvl_dict, wanted_ranks)
return tax_lvl_dict
for tax_id in tax_ids:
tax_lvl_dict = dfs(tax_id, wanted_ranks=set(rank))
clade_tax_rank_dict[tax_id] = tax_lvl_dict
return clade_tax_rank_dict
def get_siblings(self, tax_id):
"""
NB! This fnc hasn't been extensively tested, use at own risk.
This fnc is similar to get_clade_rank_taxids, but I think it should
be faster.
For a given tax_id X with any rank in ['S', 'G', 'F', 'O', 'C', 'P'],
return all taxa with the same rank in the clade rooted at the parent
of X. The parent is defined as the most recent ancestor of X that has
a rank also in ['S', 'G', 'F', 'O', 'C', 'P'].
For example, if the tax_id 3352 (Pinus taeda, a species) is submitted
to the function, it will return all other species in the genus Pinus
(3337). Conversely, if the genus Pinus (3337) is submitted, the
function will return all genera in the family Pinaceae (3318).
"""
# TODO: Test this more.
# TODO: In line with other exposed functions in this class, it should take a list of taxids instead of a single one.
tax_id_rank = self.get_rank_code([tax_id])[tax_id]
rank = tax_id_rank.rank_code
rank_codes = ['S', 'G', 'F', 'O', 'C', 'P']
if tax_id_rank.rank_depth != 0:
raise TaxonomyTreeException("Can only work with ranks of level {}.".format(rank_codes))
def get_parent(tax_id):
parent_rank_ok = False
current_tax_id = tax_id
while not parent_rank_ok:
parent = self.get_parent([current_tax_id])[current_tax_id]
taxonomy_rank = self.get_rank_code([parent])[parent]
if taxonomy_rank.rank_code in rank_codes and taxonomy_rank.rank_depth == 0:
parent_rank_ok = True
elif parent == 1:
parent_rank_ok = True
else:
current_tax_id = parent
return parent
parent = get_parent(tax_id)
visited_nodes = set()
siblings = set()
def dfs(tax_id, wanted_rank):
if tax_id not in visited_nodes:
visited_nodes.add(tax_id)
taxonomy_rank = self.get_rank_code([tax_id])[tax_id]
if taxonomy_rank.rank_code != wanted_rank:
children = self.get_children([tax_id])[tax_id]
for child in children:
dfs(child, wanted_rank)
else:
siblings.add(tax_id)
dfs(parent, rank)
return siblings
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--nodes')
parser.add_argument('--names')
args = parser.parse_args()
taxonomy_tree = TaxonomyTree(args.nodes, args.names)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage Foreman smart proxy resources.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: foreman_smart_proxy
short_description: Manage Foreman smart proxy resources using Foreman API v2
description:
- Create and delete Foreman smart proxy resources using Foreman API v2
options:
name:
description: Smart proxy name
required: true
state:
description: Smart proxy state
required: false
default: present
choices: ["present", "absent"]
url:
description: Smart proxy URL
required: false
default: None
locations: List of locations the smart_proxy should be assigned to
required: false
default: None
organizations: List of organizations the smart_proxy should be assigned to
required: false
default: None
foreman_host:
description: Hostname or IP address of Foreman system
required: false
default: 127.0.0.1
foreman_port:
description: Port of Foreman API
required: false
default: 443
foreman_user:
description: Username to be used to authenticate on Foreman
required: true
foreman_pass:
description: Password to be used to authenticate user on Foreman
required: true
foreman_ssl:
description: Enable SSL when connecting to Foreman API
required: false
default: true
notes:
- Requires the python-foreman package to be installed. See https://github.com/Nosmoht/python-foreman.
version_added: "2.0"
author: "Thomas Krahn (@nosmoht)"
'''
try:
from foreman.foreman import *
foremanclient_found = True
except ImportError:
foremanclient_found = False
try:
from ansible.module_utils.foreman_utils import *
has_import_error = False
except ImportError as e:
has_import_error = True
import_error_msg = str(e)
def smart_proxies_equal(data, smart_proxy):
comparable_keys = set(data.keys()).intersection(set(['url']))
if not all(data.get(key, None) == smart_proxy.get(key, None) for key in comparable_keys):
return False
if not organizations_equal(data, smart_proxy):
return False
if not locations_equal(data, smart_proxy):
return False
return True
def ensure(module):
name = module.params['name']
url = module.params['url']
state = module.params['state']
organizations = module.params['organizations']
locations = module.params['locations']
theforeman = init_foreman_client(module)
data = {'name': name}
try:
smart_proxy = theforeman.search_smart_proxy(data=data)
if smart_proxy:
smart_proxy = theforeman.get_smart_proxy(id=smart_proxy.get('id'))
except ForemanError as e:
module.fail_json(msg='Could not get smart proxy: {0}'.format(e.message))
if organizations:
data['organization_ids'] = get_organization_ids(module, theforeman, organizations)
if locations:
data['location_ids'] = get_location_ids(module, theforeman, locations)
data['url'] = url
if not smart_proxy and state == 'present':
try:
smart_proxy = theforeman.create_smart_proxy(data=data)
return True, smart_proxy
except ForemanError as e:
module.fail_json(msg='Could not create smart proxy: {0}'.format(e.message))
if smart_proxy:
if state == 'absent':
try:
smart_proxy = theforeman.delete_smart_proxy(id=smart_proxy.get('id'))
return True, smart_proxy
except ForemanError as e:
module.fail_json(msg='Could not delete smart proxy: {0}'.format(e.message))
if not smart_proxies_equal(data, smart_proxy):
try:
smart_proxy = theforeman.update_smart_proxy(id=smart_proxy.get('id'), data=data)
return True, smart_proxy
except ForemanError as e:
module.fail_json(msg='Could not update smart proxy: {0}'.format(e.message))
return False, smart_proxy
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
url=dict(type='str', required=False),
state=dict(type='str', default='present', choices=['present', 'absent']),
organizations=dict(type='list', required=False),
locations=dict(type='list', required=False),
foreman_host=dict(type='str', default='127.0.0.1'),
foreman_port=dict(type='str', default='443'),
foreman_user=dict(type='str', required=True),
foreman_pass=dict(type='str', required=True, no_log=True),
foreman_ssl=dict(type='bool', default=True)
),
)
if not foremanclient_found:
module.fail_json(msg='python-foreman module is required. See https://github.com/Nosmoht/python-foreman.')
changed, smart_proxy = ensure(module)
module.exit_json(changed=changed, smart_proxy=smart_proxy)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
#modifying/adding to books database
import sqlite3
import pandas as pd
connection = sqlite3.connect('books.db')
pd.options.display.max_columns =10
pd.read_sql('SELECT * FROM authors', connection, index_col=['id'])
pd.read_sql('SELECT * From titles', connection)
df = pd.read_sql('SELECT * FROM author_ISBN', connection)
df.head()
#A
pd.read_sql("""SELECT * FROM authors ORDER BY last DESC""", connection, index_col=['id'])
#B
pd.read_sql("""SELECT * FROM titles ORDER by title ASC FROM """, connection, index_col=['id'])
#C
pd.read_sql("""SELECT first, last, title, copyright, isbn FROM authors INNER JOIN author_titles ORDER BY title""", connection, index_col=['id'])
#D
cursor = connection.cursor()
cursor = cursor.execute("""INSERT INTO authors (first, last) VALUES ('Chris','Hasty')""")
#E
cursor = cursor.execute("""INSERT INTO author_ISBN (title) VALUES ('Moby Dick')""")
cursor = cursor.execute(("""INSERT INTO titles (title) VALUES ('Moby Dick')""")) |
# -*- encoding:utf-8 -*-
'''Security Test
1. Check permission in AndroidManifest.xml
2. check protectLevel
3. check allowBack (the Value must be set False)
4. Check android:exported (the Value must be set False, otherwise, print Activity to analysis)
'''
from CoreFun.Log.LogUtil import MyLog
import xml.dom.minidom as xmldom
class ManifestCheck:
def __init__(self, manifestFile):
self.manifestFile = manifestFile
self.Mylog = MyLog.getLog()
self.log = self.Mylog.logger
# parse manifest file
def parseFile(self):
checklist = []
dom = xmldom.parse(self.manifestFile)
subpermission = dom.getElementsByTagName("permission")
subUserPermission = dom.getElementsByTagName("uses-permission")
subApplication = dom.getElementsByTagName("application")
subactivity = dom.getElementsByTagName("activity")
subService = dom.getElementsByTagName("service")
subProvider = dom.getElementsByTagName("provider")
checklist.append(subpermission)
checklist.append(subUserPermission)
checklist.append(subApplication)
checklist.append(subactivity)
checklist.append(subService)
checklist.append(subProvider)
return checklist
def allowBackUpCheck(self):
xmllist = self.parseFile()
for m in xmllist[2]:
checkBackup = m.getAttribute("android:allowBackup")
if checkBackup == "true":
#print("android:allowBackup=\"{}\"".format(checkBackup))
self.log.info("android:allowBackup=\"{}\"".format(checkBackup))
print("<<<<<<<赶紧提单,别磨蹭了>>>>>>")
else:
print("\n<<<<<<<<<<<***allowBackup***安全****>>>>>>>>")
def permissionCheck(self):
permissionList = self.parseFile()
print("(((**(((**(((** 权限级别申请 **))**))**))**))")
for j in permissionList[0]:
defindPermission_name = j.getAttribute("android:name")
defindPermission = j.getAttribute("android:protectionLevel")
if defindPermission != "signature" or "signatureOrSystem":
print(defindPermission_name)
print("android:protectionLevel={}".format(defindPermission))
print("\n")
print("(((**(((**(((** 权限列表 **))**))**))**))")
for i in permissionList[1]:
permission = i.getAttribute("android:name")
print(permission)
def exprotedCheck(self):
'''Check android:exported (the Value must be set False, otherwise, print Activity to analysis)'''
a = self.parseFile()
b = a[3:6:1]
for i in range(len(b)):
for n in b[i]:
if n.getAttribute("android:exported"):
getExported = n.getAttribute("android:exported")
if getExported == "true":
getActivity = n.getAttribute("android:name")
print(getActivity)
else:
getActivity = n.getAttribute("android:name")
print(getActivity)
def manifestSecurityCheck(self):
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print("|||||")
print("*********************备份检查(allowBackup)******************")
print("检查程序是否允许备份:android:allowBackup=\"false\"")
print("|||||")
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
self.allowBackUpCheck()
print("\n\n")
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print("|||||")
print("**************权*************限************列***********表**")
print("|||||")
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
self.permissionCheck()
print("\n\n")
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print("|||||")
print("Activity, Service, Provider组件未设置Android:exported=false")
print("|||||")
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
self.exprotedCheck()
#
# if __name__ == "__main__":
# path = "D:\\Code\\Android\\app\\src\\main\\AndroidManifest.xml"
#
# ManifestCheck(path).manifestSecurityCheck()
|
from keg_storage_ta.app import KegStorageTestApp
def pytest_configure(config):
KegStorageTestApp.testing_prep()
|
# django_file_system_searcher/urls.py
from django.urls import path
from django.views.generic import TemplateView
from .views import LightroomImageFileInfoViewSet, LightroomCatalogViewSet, ImageToFileInfoViewSet
urlpatterns = [
path('', TemplateView.as_view(template_name="LightroomCatalogFileInfo-index.html"), name='index'),
path('lightroom_catalog_file_info/', LightroomImageFileInfoViewSet.as_view({'get': 'list', 'post': 'create'})),
path('lightroom_catalog_file_info/<int:pk>/', LightroomImageFileInfoViewSet.as_view({
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy',
})
),
path('lightroom_catalog/', LightroomCatalogViewSet.as_view({'get': 'list', 'post': 'create'})),
path('lightroom_catalog/<int:pk>/', LightroomCatalogViewSet.as_view({
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy',
})
),
path('image_to_file_info/', ImageToFileInfoViewSet.as_view({'get': 'list', 'post': 'create'})),
path('image_to_file_info/<int:pk>/', ImageToFileInfoViewSet.as_view({
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy',
})
),
]
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import urllib2
import os
from google.appengine.ext.webapp import template
from google.appengine.ext import db
class StudentDetail(db.Model):
id = db.StringProperty()
date = db.StringProperty()
exams = db.StringProperty(multiline=True)
daily_notes = db.StringProperty(multiline=True)
contact_matters = db.StringProperty(multiline=True)
class Student(db.Model):
id = db.StringProperty()
birthday = db.StringProperty()
name = db.StringProperty()
address = db.StringProperty()
parent_name = db.StringProperty()
parent_tel = db.StringProperty()
emergency_tel = db.StringProperty()
school = db.StringProperty()
classno = db.StringProperty()
teacher = db.StringProperty()
teacher_tel = db.StringProperty()
district = db.StringProperty()
progress = db.StringProperty(multiline=True)
exams = db.StringProperty(multiline=True)
daily_notes = db.StringProperty(multiline=True)
contact_matters = db.StringProperty(multiline=True)
date = db.DateTimeProperty(auto_now_add=True)
class StudentDetailHandler(webapp2.RequestHandler):
def post(self):
q = self.request
r = self.response
s = Student(key_name=q.get('id','0'))
sd = StudentDetail.gql('where id=:1 and date=:2',q.get('id','0'),q.get('date','2013/01/01')).get()
sd.id = q.get('id','0')
sd.date = q.get('date','2013/01/01')
sd.exams = q.get('exams','')
sd.daily_notes = q.get('daily_notes','')
sd.contact_matters = q.get('contact_matters','')
sd.put()
s = Student.gql('where id =:1',q.get('id','0')).get()
s.exams = q.get('exams','')
s.daily_notes = q.get('daily_notes','')
s.contact_matters = q.get('contact_matters','')
s.put()
self.redirect('/students')
class StudentsHandler(webapp2.RequestHandler):
def get(self):
r = self.response
template_values = { }
students = Student.all().fetch(100)
template_values['students'] = students
for s in students:
if s!= None and ( s.name == None or len(s.name)==0 ):
s.name = '[Name]'
request = urllib2.unquote(self.request.path)
path = os.path.join(os.path.dirname(__file__), 'students.html')
r.out.write(template.render(path, template_values))
class StudentHandler(webapp2.RequestHandler):
def get(self):
q = self.request
r = self.response
s = Student.gql('where id =:1',q.get('id','0')).get()
student_details = StudentDetail.gql('where id =:1 order by date desc',q.get('id','0')).fetch(100)
template_values = { }
template_values['s'] = s
template_values['student_details'] = student_details
request = urllib2.unquote(self.request.path)
path = os.path.join(os.path.dirname(__file__), 'student.html')
r.out.write(template.render(path, template_values))
def post(self):
q = self.request
r = self.response
s = Student(key_name=q.get('id','0'))
s.id = q.get('id','0')
s.name = q.get('name','')
s.birthday = q.get('birthday','')
s.address = q.get('address','')
s.parent_name = q.get('parent_name','')
s.parent_tel = q.get('parent_tel','')
s.emergency_tel = q.get('emergency_tel','')
s.school = q.get('school','')
s.classno = q.get('classno','')
s.teacher = q.get('teacher','')
s.teacher_tel = q.get('teacher_tel','')
s.district = q.get('district','')
s.progress = q.get('progress','')
s.exams = q.get('exams','')
s.daily_notes = q.get('daily_notes','')
s.contact_matters = q.get('contact_matters','')
s.put()
self.redirect('/students')
class LoginHandler(webapp2.RequestHandler):
def get(self):
r = self.response
template_values = { }
request = urllib2.unquote(self.request.path)
path = os.path.join(os.path.dirname(__file__), 'login.html')
r.out.write(template.render(path, template_values))
def post(self):
q = self.request
r = self.response
password = q.get('password')
if password=='1111':
self.redirect('/students')
else:
r.write('login failed')
class MainHandler(webapp2.RequestHandler):
def get(self):
self.redirect('/login')
class CommandHandler(webapp2.RequestHandler):
def get(self):
r = self.response
command = urllib2.unquote(self.request.path)
r.write(command)
if command == '/info':
r.write('ssss')
app = webapp2.WSGIApplication([
('/', MainHandler)
, ('/login', LoginHandler)
, ('/students', StudentsHandler)
, ('/student', StudentHandler)
, ('/student_detail', StudentDetailHandler)
, ('/.*', CommandHandler)
], debug=True)
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class SeasonsAppConfig(AppConfig):
name = 'seasons'
verbose_name = _("seasons")
|
import tweets_downloader
import tweets_cleaner
import os
import sys
from configparser import ConfigParser
pathname = os.path.dirname(sys.argv[0])
config = ConfigParser()
config.read( pathname + '/../config.ini')
#result folder with the downloaded tweets
result_folder = config['twitter']['twitter_raw_tweets_folder']
#tweeter profile from where you want to download data
profiles = ["realDonaldTrump","rogerfederer","MercedesAMG", "Forbes"]
tweets_downloader.downloadTweets(profiles,result_folder)
profiles.append("movie_lines")
profiles.append("days")
profiles.append("test")
for profile in profiles:
tweets_cleaner.preprocessing(profile) |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Targeted Impala HBase Tests
import pytest
from tests.common.impala_test_suite import ImpalaTestSuite
class TestHBaseQueries(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestHBaseQueries, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(\
lambda v: v.get_value('table_format').file_format == 'hbase')
def test_hbase_scan_node(self, vector):
self.run_test_case('QueryTest/hbase-scan-node', vector)
def test_hbase_row_key(self, vector):
self.run_test_case('QueryTest/hbase-rowkeys', vector)
def test_hbase_filters(self, vector):
self.run_test_case('QueryTest/hbase-filters', vector)
def test_hbase_subquery(self, vector):
self.run_test_case('QueryTest/hbase-subquery', vector)
def test_hbase_inline_views(self, vector):
self.run_test_case('QueryTest/hbase-inline-view', vector)
def test_hbase_top_n(self, vector):
self.run_test_case('QueryTest/hbase-top-n', vector)
def test_hbase_limits(self, vector):
self.run_test_case('QueryTest/hbase-limit', vector)
@pytest.mark.execute_serially
def test_hbase_inserts(self, vector):
self.run_test_case('QueryTest/hbase-inserts', vector)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from appium_auto.three.page.base_page import BasePage
class ContactAddPage(BasePage):
def input_name(self):
return self
def set_gender(self):
return self
def input_phone(self):
return self |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.