text
stringlengths 8
6.05M
|
|---|
import random
lower = "abcdefghjiklmnopqrstuvwxyz"
upper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
numbers = "0123456789"
symbols = "!”#$%&'()*+,-./:;<=>?@[]^_`{|}~"
characters = lower + upper + numbers + symbols
password = "".join(random.sample(characters, random.randint(8, 25)))
print(password)
|
#! /usr/bin/env python
def f(x):
return x**3 - 1
def nwtn_meth(f,x_0,i):
"""nwtn_meth(f,x_0,i) Finds one of the roots of formula f, using newton's method
starting at point x_0 and iterating over i iterations"""
import num_diff
grad = num_diff.num_diff(f, x_0, 0.0001)
x_j = x_0
for j in range(i):
x_j_new = x_j - f(x_j)/grad
grad = num_diff.num_diff(f, x_j_new, 0.0001)
x_j = x_j_new
return x_j_new
import numpy
import matplotlib.pyplot as plt
import matplotlib.image as img
import sys
numberOfPoints = int(sys.argv[1])
print sys.argv[1]
#numberOfPoints = 100
result_array = numpy.zeros((2*numberOfPoints,2*numberOfPoints,3), dtype="uint8")
numberOfRoots = 0
resultList = []
for i in range(-numberOfPoints,numberOfPoints,1):
print i
for k in range(-numberOfPoints,numberOfPoints,1):
result = (nwtn_meth(f, i+(k*1j), 1000))
# if the result is a root to the function
fResult = f(result)
if (fResult.real**2 + fResult.imag**2 < 0.01) & (fResult.real**2 + fResult.imag**2 > -0.01):
# insert thing to find distinct roots here
if len(resultList) == 0:
resultList.append(result)
distinctResult=True
for l in range(len(resultList)):
# for each previously found result, if the currently found result is very similar then the current result is not a distinct root
if (result.real - resultList[l].real < 0.01) & (result.real - resultList[l].real > -0.01) & (result.imag - resultList[l].imag < 0.01) & (result.imag - resultList[l].imag > -0.01):
distinctResult=False
rootNumber=l
if distinctResult==True:
resultList.append(result)
rootNumber = len(resultList)
# if (result.real >= 0.99) & (result.real <= 1.01) & (result.imag <=0.01) & (result.imag >= -0.01):
if(rootNumber == 0):
result_array[k+numberOfPoints][i+numberOfPoints] = (255,0,0)
# elif (result.real >= -0.51) & (result.real <= -0.49) & (result.imag >= 0.865) & (result.imag <= 0.867):
if(rootNumber == 1):
result_array[k+numberOfPoints][i+numberOfPoints] = (0,255,0)
# elif (result.real >= -0.51) & (result.real <= -0.49) & (result.imag <= -0.865) & (result.imag >= -0.867):
if(rootNumber == 2):
result_array[k+numberOfPoints][i+numberOfPoints] = (0,0,255)
if(rootNumber == 3):
result_array[k+numberOfPoints][i+numberOfPoints] = (0,125,125)
image = plt.imshow(result_array)
numberOfRoots = len(resultList)
print "There are %d roots: " % (numberOfRoots)
for l in range(len(resultList)):
print "%f + %fj" % (resultList[l].real, resultList[l].imag)
plt.show()
|
# Given a string, you need to reverse the order of characters in each
# word within a sentence while still preserving whitespace
# and initial word order.
class Solution:
def reverseWords(self, s):
return " ".join([word[::-1] for word in s.split()])
if __name__ == "__main__":
testinput = "Let's take LeetCode contest"
print(Solution.reverseWords(Solution, testinput))
|
from squirrel.config import register_config
@register_config('valid')
def build_valid_config(parser):
parser.add_argument(
'--valid_batch_size',
type=int,
default=2048,
help='# of tokens processed per batch')
parser.add_argument(
'--valid_maxlen',
type=int,
default=10000,
help='limit the train set sentences to this many tokens')
parser.add_argument(
'--length_ratio',
type=int,
default=3,
help='maximum lengths of decoding')
parser.add_argument(
'--beam_size',
type=int,
default=1,
help='beam-size used in Beamsearch, default using greedy decoding')
parser.add_argument(
'--alpha', type=float, default=1, help='length normalization weights')
parser.add_argument(
'--original',
action='store_true',
help='output the original output files, not the tokenized ones.')
parser.add_argument(
'--decode_test',
action='store_true',
help='evaluate scores on test set instead of using dev set.')
parser.add_argument(
'--output_decoding_files',
action='store_true',
help='output separate files in testing.')
parser.add_argument(
'--output_on_the_fly',
action='store_true',
help='decoding output and on the fly output to the files')
parser.add_argument(
'--decoding_path',
type=str,
default=None,
help='manually provide the decoding path for the models to decode')
parser.add_argument(
'--output_confounding',
action='store_true',
help='language confounding.')
parser.add_argument(
'--metrics',
nargs='*',
type=str,
help='metrics used in this task',
default=['BLEU'],
choices=[
'BLEU', 'GLEU', 'RIBES', 'CODE_MATCH', 'TER', 'METEOR', 'CIDER'
])
parser.add_argument(
'--real_time', action='store_true', help='real-time translation.')
return parser
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#############################################################################
# #
# mta_common_functions.py: colleciton of funtions used by mta #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 16, 2021 #
# #
#############################################################################
import os
import sys
import re
import string
import random
import time
import math
import numpy
#import astropy.io.fits as pyfits
from datetime import datetime
import Chandra.Time
from io import BytesIO
import codecs
import unittest
#
#--- from ska
#
from Ska.Shell import getenv, bash
ascdsenv = getenv('source /home/ascds/.ascrc -r release; source /home/mta/bin/reset_param ', shell='tcsh')
tail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(tail)
house_keeping = '/data/mta/Script/Python3.8/MTA/'
#--------------------------------------------------------------------------
#-- read_data_file: read a data file and create a data list --
#--------------------------------------------------------------------------
def read_data_file(ifile, remove=0, ctype='r'):
"""
read a data file and create a data list
input: ifile --- input file name
remove --- if > 0, remove the file after reading it
ctype --- reading type such as 'r' or 'b'
output: data --- a list of data
"""
#
#--- if a file specified does not exist, return an empty list
#
if not os.path.isfile(ifile):
return []
try:
with open(ifile, ctype) as f:
data = [line.strip() for line in f.readlines()]
except:
with codecs.open(ifile, ctype, encoding='utf-8', errors='ignore') as f:
data = [line.strip() for line in f.readlines()]
#
#--- if asked, remove the file after reading it
#
if remove > 0:
rm_files(ifile)
return data
#--------------------------------------------------------------------------
#-- rm_files: remove a file of named file in a list --
#--------------------------------------------------------------------------
def rm_files(ifile):
"""
remove a file of named file in a list
input: ifile --- a file name or a list of file names to be removed
output: none
"""
mc = re.search('\*', ifile)
if mc is not None:
cmd = 'rm -fr ' + ifile
os.system(cmd)
else:
if isinstance(ifile, (list, tuple)):
ilist = ifile
else:
ilist = [ifile]
for ent in ilist:
if os.path.isfile(ent):
cmd = 'rm -fr ' + ent
os.system(cmd)
def rm_file(ifile):
rm_files(ifile)
#--------------------------------------------------------------------------
#-- sort_list_with_other: order a list with the order of another sorted list
#--------------------------------------------------------------------------
def sort_list_with_other(list1, list2, schoice=1):
"""
order a list with the order of another sorted list
input: list1 --- a list
list2 --- a list
schoice --- which list to be used to order; default:fist
output: list1, list2 --- sorted/reordered lists
"""
if len(list1) != len(list2):
return False
if schoice == 1:
list1, list2 = (list(t) for t in zip(*sorted(zip(list1, list2))))
else:
list2, list1 = (list(t) for t in zip(*sorted(zip(list2, list1))))
return [list1, list2]
#--------------------------------------------------------------------------
#-- sort_multi_list_with_one: order all lists in a list by nth list order -
#--------------------------------------------------------------------------
def sort_multi_list_with_one(clists, col=0):
"""
order all lists in a list by nth list sorted order
input: clist --- a list of lists
col --- position of a list to be use for sorting
output: save --- a list of lists, sorted
"""
array1 = numpy.array(clists[col])
index = numpy.argsort(array1)
save = []
for ent in clists:
save.append(list(numpy.array(ent)[index]))
return save
#--------------------------------------------------------------------------
#-- is_leapyear: check whether the year is a leap year --
#--------------------------------------------------------------------------
def is_leapyear(year):
"""
check whether the year is a leap year
input: year --- year
output: True/False
"""
year = int(float(year))
chk = year % 4 #--- every 4 years: leap year
chk2 = year % 100 #--- but every 100 years: not leap year
chk3 = year % 400 #--- except every 400 year: leap year
val = False
if chk == 0:
val = True
if chk2 == 0:
val = False
if chk3 == 0:
val = True
return val
def isLeapYear(year):
is_leapyear(year)
#--------------------------------------------------------------------------
#-- is_neumeric: checking the input is neumeric value --
#--------------------------------------------------------------------------
def is_neumeric(val):
"""
checking the input is neumeric value
input: val --- input value
output: True/False
"""
try:
var = float(val)
return True
except:
return False
def chkNumeric(val):
is_neumeric(val)
#--------------------------------------------------------------------------
#-- convert_date_format: convert date format --
#--------------------------------------------------------------------------
def convert_date_format(date, ifmt="%Y:%j:%H:%M:%S", ofmt="%Y-%m-%dT%H:%M:%S"):
"""
convert date format
input: date --- the original date
ifmt --- input date format. default: %Y:%j:%H:%M:%S
if input is chandara time, it will ignore the input format
ofmt --- output date format. default: %Y-%m-%dT%H:%M:%S
output: date --- converted date
"""
#
#--- if it is chandra time, convert the date into '%Y:%j:%H:%M:%S'
#
if is_neumeric(date) and (ifmt in ['%Y:%j:%H:%M:%S', 'chandra']):
date = Chandra.Time.DateTime(date).date
#
#--- chandra time give a dicimal part in the second; get rid of it
#
atmp = re.split('\.', date)
date = atmp[0]
ifmt = '%Y:%j:%H:%M:%S'
#
#--- convert it to time struct
#
out = time.strptime(str(date), ifmt)
#
#--- if output format is chandra time
#
if ofmt.lower() == 'chandra':
ofmt = '%Y:%j:%H:%M:%S'
ochk = 1
else:
ochk = 0
date = time.strftime(ofmt, out)
if ochk == 1:
date = Chandra.Time.DateTime(date).secs
return date
#--------------------------------------------------------------------------
#-- ydate_to_dom: find dom for a given year and ydate ---
#--------------------------------------------------------------------------
def ydate_to_dom(year, ydate):
"""
find dom for a given year and ydate
input: year --- year
ydate --- ydate
output: dom
"""
year = int(float(year))
ydate = int(float(ydate))
dom = ydate
if year == 1999:
dom -= 202
elif year >= 2000:
#
#--- add adjust leap year from the last year. 2100 is not a leap year
#--- so we need to correct that
#
add = int((year - 1997) / 4.0)
if year == 2101:
add -= 1
dom = dom + 163 + (year - 2000) * 365 + add
else:
dom = 0
dom = int(dom)
return dom
#--------------------------------------------------------------------------
#-- dom_to_ydate: find year and ydate from dom --
#--------------------------------------------------------------------------
def dom_to_ydate(dom):
"""
find year and ydate from dom
input: dom --- day of mission
output: year --- year
ydate --- ydate
"""
dom += 202
year = 1999
chk = 0
while chk == 0:
if is_leapyear(year):
base = 366
else:
base = 365
dom -= base
if dom < 0:
ydate = dom + base
chk = 1
break
else:
year += 1
return (year, ydate)
#--------------------------------------------------------------------------
#-- chandratime_to_fraq_year: convert chandra time into a fractional year date format
#--------------------------------------------------------------------------
def chandratime_to_fraq_year(ctime):
"""
convert chandra time into a fractional year date format
input: ctime --- time in seconds from 1998.1.1
output: ytime --- time in fractional year format
"""
atime = convert_date_format(ctime, ofmt='%Y:%j:%H:%M:%S')
btemp = re.split(':', atime)
year = float(btemp[0])
ydate = float(btemp[1])
hour = float(btemp[2])
mins = float(btemp[3])
sec = float(btemp[4])
if is_leapyear(year):
base = 366.0
else:
base = 365.0
ydate = ydate + (hour/24.0 + mins/1440.0 + sec/86400.0)
frac = ydate/base
ytime = year + frac
return ytime
#--------------------------------------------------------------------------
#-- chandratime_to_yday: convert chandra time into a day of year --
#--------------------------------------------------------------------------
def chandratime_to_yday(ctime):
"""
convert chandra time into a day of year
input: ctime --- time in seconds from 1998.1.1
output: ydate --- a day of year (fractional)
"""
atime = convert_date_format(ctime, ofmt='%Y:%j:%H:%M:%S')
btemp = re.split(':', atime)
year = float(btemp[0])
ydate = float(btemp[1])
hour = float(btemp[2])
mins = float(btemp[3])
sec = float(btemp[4])
ydate = ydate + (hour/24.0 + mins/1440.0 + sec/86400.0)
return ydate
#--------------------------------------------------------------------------
#-- mk_empty_dir: empyty or create a named directory --
#--------------------------------------------------------------------------
def mk_empty_dir(name):
"""
empty the existing directory. if it doesnot exist, create an empty directory
Input: name --- the name of direcotry
Output: <chk> --- if it is created/emptyed, return 1 otherwise 0
"""
try:
if os.path.isdir(name):
cmd = 'rm -rf ' + name
os.system(cmd)
cmd = 'mkdir ' + name
os.system(cmd)
return 1
except:
return 0
#--------------------------------------------------------------------------
#-- add_leading_zero: add leading 0 to digit --
#--------------------------------------------------------------------------
def add_leading_zero(val, dlen=2):
"""
add leading 0 to digit
input: val --- neumeric value or string value of neumeric
dlen --- length of digit
output: val --- adjusted value in string
"""
try:
val = int(val)
except:
return val
val = str(val)
vlen = len(val)
for k in range(vlen, dlen):
val = '0' + val
return val
#--------------------------------------------------------------------------
#-- add_tailing_zero: add '0' to the end to fill the length after a dicimal point
#--------------------------------------------------------------------------
def add_tailing_zero(val, digit):
"""
add '0' to the end to fill the length after a dicimal point
input: val --- value
digit --- the number of decimal position
output: val --- adjust value (str)
"""
val = str(val)
atemp = re.split('\.', val)
vlen = len(atemp[1])
diff = digit - vlen
if diff > 0:
for k in range(0, diff):
atemp[1] = atemp[1] + '0'
val = atemp[0] + '.' + atemp[1]
return val
#--------------------------------------------------------------------------
#-- check_file_with_name: check files with the name with a part 'part' exist
#--------------------------------------------------------------------------
def check_file_with_name(tdir, part=''):
"""
check files with the name with a part 'part' exist in dir
input: tdir --- a directory path or a full path with the full file name
part --- a part of the name of files which we want to check
output: Ture/False
"""
if part == '':
if os.path.isfile(tdir):
return True
else:
return False
else:
if tdir == './':
if os.path.isfile(test):
return True
else:
return False
try:
part = part.rstrip('\/')
part = part.rstrip('*')
part = part.rstrip('\\')
if os.path.isdir(tdir):
cmd = 'ls ' + tdir + '> ' + zspace
os.system(cmd)
f = open(zspace, 'r')
out = f.read()
f.close()
rm_files(zspace)
mc = re.search(part, out)
if mc is not None:
return True
else:
return False
else:
return False
except:
return False
#--------------------------------------------------------------------------
#-- remove_duplicated_lines: remove duplicated lines from a file or a list
#--------------------------------------------------------------------------
def remove_duplicated_lines(iname, chk=1, srt=1):
"""
remove duplicated lines from a file or a list
input: iname --- input file or a input list name
chk --- input is a list if 0, otherwise a file
srt --- if 1, sort requested
output: if chk == 0: return a list
chk > 0: updated file
"""
if (chk == 1) and (not os.path.isfile(iname)):
return []
else:
new = []
if chk == 1:
data = read_data_file(iname)
else:
data = iname
if len(data) > 1:
if srt > 0:
data = sorted(data)
first = data[0]
new = [first]
for i in range(1, len(data)):
ichk = 0
for k in range(len(new)-1, -1, -1):
if data[i] == new[k]:
ichk = 1
break
if ichk == 0:
new.append(data[i])
if chk == 1:
with open(iname, 'w') as fo:
for ent in new:
fo.write(ent + '\n')
else:
return new
else:
if chk == 0:
return data
def removeDuplicate(iname, chk = 1, srt=1):
remove_duplicated_lines(iname, chk=1, srt=1)
#--------------------------------------------------------------------------
#-- change_month_format: cnvert month format between digit and three letter month
#--------------------------------------------------------------------------
def change_month_format(month):
"""
cnvert month format between digit and three letter month
input: month --- either digit month or letter month
oupupt: either digit month or letter month
"""
m_list = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',\
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
#
#--- check whether the input is digit
#
try:
var = int(float(month))
if (var < 1) or (var > 12):
return 'NA'
else:
return m_list[var-1]
#
#--- if not, return month #
#
except:
mon = 'NA'
var = month.lower()
for k in range(0, 12):
if var == m_list[k].lower():
return k+1
return mon
#--------------------------------------------------------------------------
#-- today_date_display: get today's date in <mmm>-<dd>-<yyyy> ---
#--------------------------------------------------------------------------
def today_date_display():
"""
get today's date in <mmm>-<dd>-<yyyy> (e.g., Jan-01_2018)
input: none
output:<mmm>-<dd>-<yyyy>
"""
out= time.strftime('%Y:%m:%d', time.gmtime())
atemp = re.split(':', out)
lmon = change_month_format(atemp[1])
current = lmon + '-' + atemp[2] + '-' + atemp[0]
return current
#--------------------------------------------------------------------------
#-- today_date_display2 : get today's date in <mmm> <dd>, <yyyy> ---
#--------------------------------------------------------------------------
def today_date_display2():
"""
get today's date in <mmm>-<dd>-<yyyy> (e.g., Jan-01_2018)
input: none
output:<mmm>-<dd>-<yyyy>
"""
out= time.strftime('%Y:%m:%d', time.gmtime())
atemp = re.split(':', out)
lmon = change_month_format(atemp[1])
current = lmon + ' ' + atemp[2] + ', ' + atemp[0]
return current
#--------------------------------------------------------------------------
#-- today_date: return today's year, mon, and day --
#--------------------------------------------------------------------------
def today_date():
"""
return today's year, mon, and day
input: none
output: [year, mon, day]
"""
out = time.strftime('%Y:%m:%d', time.gmtime())
atemp = re.split(':', out)
year = int(atemp[0])
mon = int(atemp[1])
day = int(atemp[2])
return [year, mon, day]
#--------------------------------------------------------------------------
#-- separate_data_to_arrays: separate a table data into arrays of data --
#--------------------------------------------------------------------------
def separate_data_to_arrays(data, separator='\s+', com_out=''):
"""
separate a table data into arrays of data
input: data --- a data table
separator --- what is the delimited charactor.default: '\s+'
com_out --- if this is provided, the beginning of the line
marked with that won't be read in (e.g. by '#')
output: coldata --- a list of lists of each column
"""
atemp = re.split(separator, data[0])
alen = len(atemp)
coldata = [[] for x in range(0, alen)]
for ent in data:
if ent == '':
continue
if (com_out != '') and (ent[0] == com_out):
continue
atemp = re.split(separator, ent)
for j in range(0, alen):
try:
val = float(atemp[j])
except:
val = atemp[j]
coldata[j].append(val)
return coldata
#--------------------------------------------------------------------------
#-- run_arc5gl_process: un arc5gl process --
#--------------------------------------------------------------------------
def run_arc5gl_process(cline):
"""
run arc5gl process
input: cline --- command lines
output: f_list --- a list of fits (either extracted or browsed)
*fits --- if the command asked to extract; resulted fits files
"""
with open(zspace, 'w') as fo:
fo.write(cline)
try:
cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script ' + zspace + ' > ./zout'
os.system(cmd)
except:
try:
cmd = ' /proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace + ' > ./zout'
os.system(cmd)
except:
cmd1 = "/usr/bin/env PERL5LIB= "
cmd2 = ' /proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace + ' > ./zout'
cmd = cmd1 + cmd2
bash(cmd, env=ascdsenv)
rm_files(zspace)
out = read_data_file('./zout', remove=1)
save = []
for ent in out:
if ent == "":
continue
mc = re.search('Filename', ent)
if mc is not None:
continue
mc = re.search('Retrieved', ent)
if mc is not None:
continue
mc = re.search('---------------', ent)
if mc is not None:
continue
atemp = re.split('\s+', ent)
save.append(atemp[0])
return save
#--------------------------------------------------------------------------
#-- run_arc5gl_process_user: un arc5gl process with a user option --
#--------------------------------------------------------------------------
def run_arc5gl_process_user(cline, user='isobe'):
"""
run arc5gl process with a user option
input: cline --- command lines
user --- user option
output: f_list --- a list of fits (either extracted or browsed)
*fits --- if the command asked to extract; resulted fits files
"""
with open(zspace, 'w') as fo:
fo.write(cline)
try:
cmd = ' /proj/sot/ska/bin/arc5gl -user ' + user + ' -script ' + zspace + ' > ./zout'
os.system(cmd)
except:
try:
cmd = ' /proj/axaf/simul/bin/arc5gl -user ' + user + ' -script ' + zspace + ' > ./zout'
os.system(cmd)
except:
cmd1 = "/usr/bin/env PERL5LIB= "
cmd2 = ' /proj/axaf/simul/bin/arc5gl -user ' + user + ' -script ' + zspace + ' > ./zout'
cmd = cmd1 + cmd2
bash(cmd, env=ascdsenv)
rm_files(zspace)
out = read_data_file('./zout', remove=1)
save = []
for ent in out:
if ent == "":
continue
mc = re.search('Filename', ent)
if mc is not None:
continue
mc = re.search('Retrieved', ent)
if mc is not None:
continue
mc = re.search('---------------', ent)
if mc is not None:
continue
atemp = re.split('\s+', ent)
save.append(atemp[0])
return save
#--------------------------------------------------------------------------
#-- separate_data_into_col_data: separate a list of data lines into a list of lists
#--------------------------------------------------------------------------
def separate_data_into_col_data(data, spliter = '\s+'):
"""
separate a list of data lines into a list of lists of column data
input: data --- data list
spliter --- spliter of the line. default: \s+
output: save --- a list of lists of data
"""
atemp = re.split(spliter, data[0])
alen = len(atemp)
save = [[] for x in range(0, alen)]
for ent in data:
atemp = re.split(spliter, ent)
for k in range(0, alen):
try:
val = float(atemp[k])
except:
val = atemp[k].strip()
save[k].append(val)
return save
#--------------------------------------------------------------------------
#-- remove_non_neumeric_values: remove all rows of lists in a list which correspond to non-neumeric
#--------------------------------------------------------------------------
def remove_non_neumeric_values(alist, pos):
"""
remove all rows of lists in a list which correspond to non-neumeric
entries in pos-th list.
input: alist --- a list of lists
pos --- position of a list which contains non nuemeric values
output: slist --- a list of lists removed non-neumeric entries
"""
#
#--- get a list of which we want to find non-numeric entries
#
tlist = alist[pos]
tlist = genfromtxt3(tlist)
tarray = numpy.array(tlist)
#
#--- create index to remove non-neumeric values
#
oindex = ~numpy.isnan(tarray)
#
#--- apply the index to all lists
#
slist = []
for ent in alist:
tarray = numpy.array(ent)
#
#--- make sure that all entries are numeric not string
#
nlist = list(tarray[oindex])
if isinstance(nlist[0], str):
nlist = list(genfromtxt3(nlist))
slist.append(nlist)
return slist
#--------------------------------------------------------------------------
#-- genfromtxt3: genfromtxt python3 version --- correcting python 3 bug ---
#--------------------------------------------------------------------------
def genfromtxt3(alist):
"""
genfromtxt python3 version --- correcting python 3 bug
input: alist --- a list of string entries
output: out --- a list of numeric entries
"""
out = numpy.array(alist)
out = numpy. genfromtxt(map(lambda s:s.encode('utf8'), out))
return out
#--------------------------------------------------------------------------
#-- TEST TEST TEST TESt TESt TEST TEST TEST TEST TESt TESt TEST ---
#--------------------------------------------------------------------------
class TestFunctions(unittest.TestCase):
def test_is_leapyear(self):
year = 2000
self.assertTrue(is_leapyear(year))
year = 2100
self.assertFalse(is_leapyear(year))
#--------------------------------------------------------------------------
def test_sort_list_with_other(self):
list1 = ['z', 'v', 't', 'k']
list2 = ['a', 'b', 'c', 'd']
list1, list2 = sort_list_with_other(list1, list2)
self.assertEqual(list2, ['d', 'c', 'b', 'a'])
#--------------------------------------------------------------------------
def test_sort_multi_list_with_one(self):
list1 = [4,3,2,1]
list2 = [10, 9, 8, 7]
list3 = ['z', 'v', 't', 'k']
list4 = ['a', 'b', 'c', 'd']
clists = [list1, list2, list3, list4]
out = sort_multi_list_with_one(clists, 0)
self.assertEqual(out[3], ['d', 'c', 'b', 'a'])
#--------------------------------------------------------------------------
def test_convert_date_format(self):
date = '2019:184:00:43:32'
cdate = convert_date_format(date)
self.assertEqual(cdate, '2019-07-03T00:43:32')
cdate = convert_date_format(date, ofmt='%Y:%m:%d:%H:%M:%S')
self.assertEqual(cdate, '2019:07:03:00:43:32')
cdate = convert_date_format(date, ofmt="chandra")
self.assertEqual(cdate, 678501881.184)
cdate = convert_date_format(678501881.184)
self.assertEqual(cdate, '2019-07-03T00:43:32')
cdate = convert_date_format('20190626223528', ifmt='%Y%m%d%H%M%S', ofmt='%Y:%j:%H:%M:%S')
print("I AM HERE Cdate: " + str(cdate))
#--------------------------------------------------------------------------
def test_ydate_to_dom(self):
year = 1999
ydate = 202
out = ydate_to_dom(year, ydate)
self.assertEqual(out, 0)
year = 2012
ydate = 1
out = ydate_to_dom(year, ydate)
self.assertEqual(out, 4547)
year = 2019
ydate = 202
out = ydate_to_dom(year, ydate)
self.assertEqual(out, 7305)
#--------------------------------------------------------------------------
def test_dom_to_ydate(self):
dom = 7175
[year, ydate] = dom_to_ydate(dom)
line = str(year) + ':' + str(ydate)
self.assertEqual(line, '2019:72')
#--------------------------------------------------------------------------
def test_chandratime_to_fraq_year(self):
ctime = 584150395
fyear = chandratime_to_fraq_year(ctime)
self.assertEqual(fyear, 2016.5136588620724)
#--------------------------------------------------------------------------
def test_add_leading_zero(self):
val = 2
val = add_leading_zero(val)
self.assertEqual(val, '02')
val = 33
val = add_leading_zero(val, dlen=3)
self.assertEqual(val, '033')
val = 33
val = add_leading_zero(val)
self.assertEqual(val, '33')
val = '33'
val = add_leading_zero(val, dlen=3)
self.assertEqual(val, '033')
#--------------------------------------------------------------------------
def test_remove_duplicated_lines(self):
test = ['5,3,4', '1,2,3', '2,3,4', '1,2,3']
out = remove_duplicated_lines(test, chk=0)
chk = ['1,2,3', '2,3,4','5,3,4']
self.assertEqual(out, chk)
#--------------------------------------------------------------------------
def test_run_arc5gl_process(self):
line = 'operation=browse\n'
line = line + 'dataset=flight\n'
line = line + 'detector=acis\n'
line = line + 'level=1\n'
line = line + 'filetype=evt1\n'
line = line + 'tstart=2019-01-01T00:00:00\n'
line = line + 'tstop=2019-01-05T00:00:00\n'
line = line + 'go\n'
fits = 'acisf22032_000N001_evt1.fits'
out = run_arc5gl_process(line)
if fits in out:
self.assertEqual(fits, fits)
#--------------------------------------------------------------------------
def test_separate_data_into_col_data(self):
data = ['1 2 3 4', '5 6 7 8']
out = separate_data_into_col_data(data, spliter='\s+')
print("I AM HERE: " + str(out))
#--------------------------------------------------------------------------
def test_remove_non_neumeric_values(self):
in_list= [[1,2,3,4], [2,'test',4,5], [3,4,5,6]]
out_list = [[1, 3, 4], [2.0, 4.0, 5.0], [3, 5, 6]]
out = remove_non_neumeric_values(in_list, 1)
self.assertEqual(out, out_list)
#--------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf8 -*-
# [학번] [이름]
# https://wikidocs.net/13 <- 이 웹사이트 내용을 참고하여 아래 각 행 출력 내용을 예상하시오
# 실행 결과와 예상을 비교하시오
print("1234567890" * 4)
# print(math.pi)
# 예상 : 3.14159265359
# 결과 : 3.14159265359
# print("%f" % math.pi)
# 예상 :
# 결과 :
# print("%d" % math.pi)
# 예상 :
# 결과 :
# print("%5d" % math.pi)
# 예상 :
# 결과 :
# print("%5f" % math.pi)
# 예상 :
# 결과 :
# print("%8f" % math.pi)
# 예상 :
# 결과 :
# print("%+8f" % math.pi)
# 예상 :
# 결과 :
# print("%16.8f" % math.pi)
# 예상 :
# 결과 :
print("1234567890" * 4)
# math.pi 를 전체 8칸, 소숫점 3자리까지 표시하시오
# math.pi 를 전체 10칸, 소숫점 5자리까지 표시하시오
# math.pi 를 부호를 포함하여 전체 7칸, 소숫점 4자리까지 표시하시오
|
f_path=open("ticket.txt","r")
lines=f_path.readlines()
lines="".join(lines).split(";")
newlines=[]
for x in lines:
newlines.append(x.replace("\n",""))
print(newlines)
|
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
__author__ = 'jdroot'
from collection import MongoCollection
class MetaInfo(object):
collection = None
external = None
mapping = None
def __init__(self, meta):
self.__dict__.update(meta.__dict__)
class MetaObject(type):
def __new__(mcs, name, bases, attrs):
new_class = super(MetaObject, mcs).__new__(mcs, name, bases, attrs)
try:
meta = getattr(new_class, 'Meta')
delattr(new_class, 'Meta')
except:
meta = None
# Will be none for Model
if meta is not None:
info = MetaInfo(meta)
info.collection = info.collection or name.lower()
# Create the collection and add it to the new class
import pycloud.pycloud.cloudlet as cloudlet
coll = MongoCollection(cloudlet.get_cloudlet_instance().db, info.collection, obj_class=new_class)
new_class._collection = coll
# Create the external attributes list and add it to the new class
if isinstance(info.external, list):
#print 'Mapping _external attributes for "%s"' % str(new_class)
#print info.external
new_class._external = info.external
else:
new_class._external = None
if isinstance(info.mapping, dict):
new_class.variable_mapping = info.mapping
else:
new_class.variable_mapping = None
# Setup find and find one static methods
new_class.find = new_class._collection.find
new_class.find_one = new_class._collection.find_one
new_class.find_and_modify = new_class._collection.find_and_modify
new_class.external = external
return new_class
def external(obj):
ret = obj
if hasattr(ret, '_external'):
if isinstance(ret._external, list):
ret = {}
for key in obj._external:
tmp = obj[key]
if hasattr(tmp, 'external'):
if hasattr(tmp.external, '__call__'):
tmp = tmp.external()
ret[key] = tmp
return ret
|
import torch
import cv2
from .modules.darknet import Darknet
from ..utils.image_preprocess import to_tensor, prepare_raw_imgs
from ..utils.utils import load_classes, get_correct_path
from ..utils.bbox import non_max_suppression, rescale_boxes_with_pad, diff_cls_nms
class CarLocator():
def __init__(self, cfg):
# Yolov3 stuff
class_path = get_correct_path(cfg['class_path'])
weights_path = get_correct_path(cfg['weights_path'])
model_cfg_path = get_correct_path(cfg['model_cfg'])
self.img_size = cfg['img_size']
self.n_cpu = cfg['n_cpu']
self.conf_thres = cfg['conf_thres']
self.nms_thres = cfg['nms_thres']
self.classes = load_classes(class_path)
self.pred_mode = cfg['pred_mode']
self.device = "cuda" if torch.cuda.is_available() else "cpu"
# Set up model
self.model = Darknet(model_cfg_path, img_size=cfg['img_size']).to(self.device)
if cfg['weights_path'].endswith(".weights"):
# Load darknet weights
self.model.load_darknet_weights(weights_path)
else:
# Load checkpoint weights
self.model.load_state_dict(torch.load(weights_path, map_location=self.device))
self.model.eval() # Set in evaluation mode
# Define car detection classes
self.target_classes = ['car', 'bus', 'truck']
self.idx2targetcls = {idx:cls_name for idx, cls_name in enumerate(self.classes) if cls_name in self.target_classes}
def predict(self, img_lst, sort_by='conf'):
'''
Inputs
img_lst: list of np.array(h,w,c)
Can be empty
Cannot have any None elements
output:
[
# For each frame (empty list if no car in the frame)
[
# For each detected car
{
'coords': (x1,y1,x2,y2),
'confidence': 0.99
}
]
]
'''
if not img_lst: # Empty imgs list
return []
# Prepare input
input_imgs, imgs_shapes = prepare_raw_imgs(img_lst, self.pred_mode, self.img_size)
input_imgs = input_imgs.to(self.device)
# Yolo prediction
with torch.no_grad():
imgs_detections = self.model(input_imgs)
imgs_detections = non_max_suppression(imgs_detections, self.conf_thres, self.nms_thres)
# if no car in the frame, output empty list
output = [[] for _ in range(len(imgs_detections))]
for i, (img_detection, img_shape) in enumerate(zip(imgs_detections, imgs_shapes)): # for each image
if img_detection is not None:
# Rescale boxes to original image
img_detection = rescale_boxes_with_pad(img_detection, self.img_size, img_shape).numpy()
# Filter out wanted classes and perform diff class NMS
img_detection = [det for det in img_detection if int(det[-1]) in self.idx2targetcls]
img_detection = diff_cls_nms(img_detection, self.nms_thres, sort_by=sort_by)
'''
img_detection:
[
np.array([x1,y1,x2,y2,conf,cls_conf,cls]),
...
]
now make dict for output
'''
img_detection = [{
'coords': tuple(det[:4]),
'confidence': det[4]
} for det in img_detection]
output[i] = img_detection
return output
|
import numpy as np
import cv2, math, sys
def get_val(img, x, y):
if x < 0 or x > img.shape[0]-1 or y < 0 or y > img.shape[1]-1:
return 0
return img[x, y]
def get_neighbors(img, origin, sizes):
neighbors = np.zeros((sizes[0], sizes[1]), int)
half = sizes[0] / 2
x = origin[0]
y = origin[1]
for row in xrange(-half, half + 1):
for col in xrange(-half, half + 1):
neighbors[half + row, half + col] = get_val(img, x + row, y + col)
return neighbors
def magnitude(neighbors, mask):
magnitude = 0
size_x = len(mask)
size_y = len(mask[0])
for row in xrange(size_x):
for col in xrange(size_y):
magnitude += neighbors[row][col] * mask[row][col]
return magnitude
def laplace(img, mask, mask_ratio, threshold):
ret_img = np.zeros(img.shape, int)
for row in xrange(img.shape[0]):
for col in xrange(img.shape[1]):
neighbors = []
neighbors = get_neighbors(img, (row, col), [3, 3])
if((magnitude(neighbors, mask) / mask_ratio) > threshold):
ret_img[row, col] = 0
else:
ret_img[row, col] = 255
return ret_img
def MVL(img, threshold):
ret_img = np.zeros(img.shape, int)
mask = [[2, -1, 2], [-1, -4, -1], [2, -1, 2]]
mask_div_ratio = 3
neighbors = []
for row in xrange(img.shape[0]):
for col in xrange(img.shape[1]):
neighbors = get_neighbors(img, (row, col), [3, 3])
if((magnitude(neighbors, mask) / mask_div_ratio) > threshold):
ret_img[row, col] = 0
else:
ret_img[row, col] = 255
return ret_img
def LOG(img, threshold):
ret_img = np.zeros(img.shape, int)
mask = [[0, 0, 0, -1, -1, -2, -1, -1, 0, 0, 0],
[0, 0, -2, -4, -8, -9, -8, -4, -2, 0, 0],
[0, -2, -7, -15, -22, -23, -22, -15, -7, -2, 0],
[-1, -4, -15, -24, -14, -1, -14, -24, -15, -4, -1],
[-1, -8, -22, -14, 52, 103, 52, -14, -22, -8, -1],
[-2, -9, -23, -1, 103, 178, 103, -1, -23, -9, -2],
[-1, -8, -22, -14, 52, 103, 52, -14, -22, -8, -1],
[-1, -4, -15, -24, -14, -1, -14, -24, -15, -4, -1],
[0, -2, -7, -15, -22, -23, -22, -15, -7, -2, 0],
[0, 0, -2, -4, -8, -9, -8, -4, -2, 0, 0],
[0, 0, 0, -1, -1, -2, -1, -1, 0, 0, 0]]
neighbors = []
for row in xrange(img.shape[0]):
for col in xrange(img.shape[1]):
neighbors = get_neighbors(img, (row, col), [11, 11])
if(magnitude(neighbors, mask) > threshold):
ret_img[row, col] = 0
else:
ret_img[row, col] = 255
return ret_img
def DOG(img, threshold):
ret_img = np.zeros(img.shape, int)
mask = [[-1, -3, -4, -6, -7, -8, -7, -6, -4, -3, -1],
[-3, -5, -8, -11, -13, -13, -13, -11, -8, -5, -3],
[-4, -8, -12, -16, -17, -17, -17, -16, -12, -8, -4],
[-6, -11, -16, -16, 0, 15, 0, -16, -16, -11, -6],
[-7, -13, -17, 0, 85, 160, 85, 0, -17, -13, -7],
[-8, -13, -17, 15, 160, 283, 160, 15, -17, -13, -8],
[-7, -13, -17, 0, 85, 160, 85, 0, -17, -13, -7],
[-6, -11, -16, -16, 0, 15, 0, -16, -16, -11, -6],
[-4, -8, -12, -16, -17, -17, -17, -16, -12, -8, -4],
[-3, -5, -8, -11, -13, -13, -13, -11, -8, -5, -3],
[-1, -3, -4, -6, -7, -8, -7, -6, -4, -3, -1]]
neighbors = []
for row in xrange(img.shape[0]):
for col in xrange(img.shape[1]):
neighbors = get_neighbors(img, (row, col), [11, 11])
if(magnitude(neighbors, mask) > threshold):
ret_img[row, col] = 255
else:
ret_img[row, col] = 0
return ret_img
def main():
# usage: python ./hw10.py [operator] [threshold]
# default threshold = 12
threshold = '12'
if(len(sys.argv) == 2 and sys.argv[1] == '-h'):
print "usage: python ./hw10.py [-h] [operator] [threshold]"
print "Options and argmuments:"
print "-h: print this help message and exit"
print "operator: laplace1, laplace2, MVL, LOG, DOG"
print "threshold: an integer for the operator"
else:
assert(len(sys.argv) == 3)
operator = sys.argv[1]
threshold = sys.argv[2]
assert operator == 'laplace1' or \
operator == 'laplace2' or \
operator == 'MVL' or \
operator == 'LOG' or \
operator == 'DOG'
# img is a 512*512 array
img = cv2.imread('lena.bmp', 0)
# Do Laplace mask [[0, 1, 0], [1, -4, 1], [0, 1, 0]]
if operator == 'laplace1':
laplace_mask1 = [[0, 1, 0], [1, -4, 1], [0, 1, 0]]
mask_div_ratio1 = 1
img_laplace1 = laplace(img, laplace_mask1, mask_div_ratio1, int(threshold))
cv2.imwrite('laplace1_' + threshold + '.bmp', img_laplace1)
# Do Laplace mask [[1, 1, 1], [1, -8, 1], [1, 1, 1]] / 3
if operator == 'laplace2':
laplace_mask2 = [[1, 1, 1], [1, -8, 1], [1, 1, 1]]
mask_div_ratio2 = 3
img_laplace2 = laplace(img, laplace_mask2, mask_div_ratio2, int(threshold))
cv2.imwrite('laplace2_' + threshold + '.bmp', img_laplace2)
# Do Minimum variance Laplacian
elif operator == 'MVL':
img_mvl = MVL(img, int(threshold))
cv2.imwrite('MVL' + threshold + '.bmp', img_mvl)
# Do Laplace of Gaussian
elif operator == 'LOG':
img_log = LOG(img, int(threshold))
cv2.imwrite('LOG' + threshold + '.bmp', img_log)
# Do Difference of Gaussian
elif operator == 'DOG':
img_dog = DOG(img, int(threshold))
cv2.imwrite('DOG' + threshold + '.bmp', img_dog)
print "The operator finished."
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from time import sleep
import math, random
import sqlite3
from threading import Thread
from Battery import Battery
from drone_state import DroneState
from Log import Log as l
from threading import Thread, Lock
from utils import *
from parametersModel import oneSecond
#from Simulator import main
l.flags = l.LOG_ALL_ENABLE
class Drone:
MOVE_UPDATE_FREQ = 0.05
CONSUME_BATTERY_FREQ = 1
global django_status;
django_status = [
"0",
"UNINIT",
"BOOT",
"CALIBRATING",
"STANDBY",
"ACTIVE",
"CRITICAL",
"EMERGENCY",
"POWEROFF",
]
TAG = "DRONE"
"""
ctor
"""
def __init__(self, droneId = -1, homeLocation = (0,0), position = (0,0), altitude= 0, failureFrequency = 0, velocity = 1, battery = None):
self.id = droneId
self.homeLocation = homeLocation
self.position = position
self.state = DroneState.ON_LAND | DroneState.OFF
self.failureFrequency = failureFrequency
self.battery = battery
self.altitude = altitude
self.velocity = velocity
self.packet = None
self.deliveryList = []
self.currentDelivery = None
thread = Thread(target=self.consumeBattery, args=())
thread.daemon = True # Daemonize thread
thread.start()
#battery consumption
def consumeBattery(self):
while True:
if self.state & DroneState.RUNNING:
l.info(Drone.TAG, "Battery consumption lvl: " + str(self.battery.chargePercentage))
self.battery.use()
# percentage 0 attery
if self.battery.chargePercentage <= 0:
if self.state & DroneState.IN_AIR:
l.error(Drone.TAG, "WARNING CRASH !!!")
self.state = DroneState.OUT_OF_ORDER | DroneState.ON_LAND
else:
l.info(Drone.TAG, "WARNING NO BATTERY")
self.state = DroneState.ON_LAND | DroneState.OFF
break
sleep(Drone.CONSUME_BATTERY_FREQ * oneSecond)
def start(self):
if self.battery != None and self.state & DroneState.OFF and self.state & DroneState.OUT_OF_ORDER == 0:
self.state = DroneState.IDLE | DroneState.RUNNING | DroneState.ON_LAND
return 0
elif self.battery != None and self.state & DroneState.OUT_OF_ORDER == 0:
l.info(Drone.TAG, "The drone is already started")
elif self.battery != None and self.state & DroneState.OFF:
l.info(Drone.TAG, "Drone out of service please repair it")
else:
l.info(Drone.TAG, "Can't start drone please plug a battery")
return -1
def stop(self):
if self.battery != None and self.state & DroneState.RUNNING:
if self.state & DroneState.IN_AIR:
l.debug(Drone.TAG, "Stopping drone in air. Bad idea. CRASHING !")
self.state = DroneState.OUT_OF_ORDER | DroneState.ON_LAND | DroneState.OFF
else:
l.debug(Drone.TAG, "Shut off the drone")
self.state = DroneState.OFF | DroneState.ON_LAND
return 0
elif self.battery != None:
l.info(Drone.TAG, "Can't stop drone not started")
return -1
else:
l.info(Drone.TAG, "Irrelevant to stop drone. No battery found plugged into the drones")
return -1
def land(self, speed):
if self.altitude <= 0:
l.error(Drone.TAG, "The drone is already on the land")
return -1
if self.state & DroneState.IN_AIR and self.state & DroneState.RUNNING:
self.state = DroneState.LANDING | DroneState.RUNNING | DroneState.IN_AIR
while self.altitude > 0 and self.state & DroneState.RUNNING:
self.altitude = self.altitude - speed
l.info(Drone.TAG, "Drone ID " + str(self.id) + " landing altitude = " + str(self.altitude))
sleep(Drone.MOVE_UPDATE_FREQ * oneSecond)
if self.altitude > 0:
l.error(Drone.TAG, "NO ENERGY CRASHING")
return -1
else:
self.altitude = 0
self.state = DroneState.ON_LAND | DroneState.RUNNING | DroneState.IDLE
return 0
elif self.state & DroneState.RUNNING:
l.error(Drone.TAG, "The drone is not in the air impossile to land")
return -1
else:
l.error(Drone.TAG, "The drone is not running. Please call start() method before")
return -1
def takeoff(self, altitude, speed):
if self.state & DroneState.ON_LAND and self.state & DroneState.RUNNING:
if self.altitude > altitude:
l.error(Drone.TAG, "Impossible to take off to this altitude")
return -1
self.state = DroneState.TAKE_OFF | DroneState.RUNNING | DroneState.IN_AIR
while self.altitude < altitude and self.state & DroneState.RUNNING:
self.altitude = self.altitude + speed
l.info(Drone.TAG, "Drone ID " + str(self.id) + " taking off altitude = " + str(self.altitude))
sleep(Drone.MOVE_UPDATE_FREQ * oneSecond)
if self.altitude < altitude:
l.error(Drone.TAG, "NO ENERGY CRASHING")
return -1
else:
self.altitude = altitude
self.state = DroneState.IN_AIR | DroneState.IDLE | DroneState.RUNNING
return 0
elif self.state & DroneState.RUNNING:
l.error(Drone.TAG, "The drone is not on the land impossile to takeoff")
return -1
else:
l.error(Drone.TAG, "The drone is not running. Please call start() method before")
return -1
#TODO: implement the goto method
def goto(self, destPoint):
if self.state & DroneState.RUNNING and self.state & DroneState.IN_AIR:
self.state = DroneState.FLYING | DroneState.RUNNING | DroneState.IN_AIR | (DroneState.DELYVERING if self.currentDelivery == None else 0)
startPos = self.position
distance = dist(self.position, destPoint)
l.debug(Drone.TAG, "Distance: " + str(distance))
vecDir = vec2d_normalize(vec2d_sub(destPoint, self.position))
vecDir = vec2d_multiply_scalar(vecDir, self.velocity)
#vecDir = vec2d_multiply_scalar(vecDir, elapsed)
while(dist(startPos, self.position) < distance and self.state & DroneState.RUNNING):
#l.info(Drone.TAG, " direction vect : " + str(vecDir))
self.position = vec2d_add(vecDir, self.position)
sleep(Drone.MOVE_UPDATE_FREQ*oneSecond)
#l.info(Drone.TAG, " drone position : " + str(self.position))
if dist(startPos, self.position) < distance:
l.error(Drone.TAG, "NO ENERGY CRASHING")
return -1
else:
self.position = destPoint
self.state = DroneState.IN_AIR | DroneState.IDLE | DroneState.RUNNING
else:
l.error(Drone.TAG, "Please takeoff after go to a point")
def removePacketFromWarehouse(self, landingSpeed, takeoffSpeed, packet):
# land to retrieve the packe
if self.land(landingSpeed) != 0:
l.error(TAG, "Impossible to land to retrieve the packet")
return -1
self.packet = packet
# takeoff again
if self.takeoff(takeoffSpeed) != 0:
return -1
def executeDelivery(self, delivery):
l.info(Drone.TAG, "Delivering packet : " + delivery.packet.name)
self.currentDelivery = delivery
self.followPoints(self.currentDelivery.path)
if self.currentDelivery.path[len(self.currentDelivery.path) - 1] == self.position:
l.info(Drone.TAG, "Packet delivered : " + delivery.packet.name)
self.state = DroneState.IN_AIR | DroneState.IDLE | DroneState.RUNNING
else:
l.info(Drone.TAG, "Delivery failed for packet " + delivery.packet.name)
self.currentDelivery = None
##
# Suit les points fournit par un eventuelle calculateur de vol
def followPoints(self, path):
for point in path:
self.goto(point)
|
class Persegi:
def __init__ (self, panjang, lebar) :
self.panjang = panjang
self.lebar = lebar
self.luas = panjang * lebar
def itung (self):
print (f'Luas Persegi Panjang anda sebesar {self.luas}')
panjang = int(input("Tentukan Panjang Dari Persegi Panjang Anda : "))
lebar = int(input("Tentukan lebar Dari Persegi Panjang Anda : "))
xoxo = Persegi (panjang, lebar)
xoxo.itung()
|
from collections import namedtuple, deque
import numpy as np
import copy
import random
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new single experience to memory.
Params
======
state : the state array
action: the action array
reward: the reward array
next_state: the array with the next states
done: the array with the dones
"""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
#print("len experiences:{}".format(len(experiences)))
#print("shape experiences state:{}".format(experiences[0].state[0].shape))
#print("shape experiences action:{}".format(experiences[0].action.shape))
#print("shape experiences reward:{}".format(experiences[0].reward))
states = torch.from_numpy(np.vstack([np.dstack(e.state.T) for e in experiences if e is not None])).float().to(device)
#print("State shape after stack: {}".format(states.shape))
actions = torch.from_numpy(np.vstack([np.dstack(e.action.T) for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([np.dstack(e.next_state.T) for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([np.dstack(e.done) for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.3):
"""Initialize parameters and noise process.
PARAMS
=====
size: The dimension of the noise process
mu: the drift of the stochastic process
theta: The multiplier of the deterministic part of the stochastic process
sigma: The
"""
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
random.seed(seed)
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.array([np.random.standard_normal(len(x))])
self.state = x + dx
return self.state
|
from django.db import models
from django.db.models.query import QuerySet
from django.utils.translation import ugettext_lazy as _
class TeamMixin(object):
pass
class TeamQuerySet(QuerySet, TeamMixin):
pass
class TeamManager(models.Manager, TeamMixin):
def get_queryset(self):
return TeamQuerySet(self.model, using=self._db).filter(delete=False)
class Team(models.Model):
"""
model to store team records
"""
name = models.CharField(_("Team Name"), max_length=64)
logouri = models.URLField(_("Team Logo URI"))
club_state = models.CharField(_("Club State"), max_length=32)
delete = models.BooleanField(default=False)
objects = TeamManager()
class Meta:
verbose_name = "Team"
verbose_name_plural = "Teams"
app_label = "teams"
ordering = ("-name", )
def __unicode__(self):
return "%s" % (self.name)
|
h,w = map(int,input().split())
field = []
xx = [-1, -1, -1, 0, 0, 1, 1, 1]
yy = [-1, 0, 1, -1, 1, -1, 0, 1]
for i in range(h):
field.append(input())
for i in range(h):
line = ''
for j in range(w):
if field[i][j] == '#':
line += '#'
else:
temp = 0
for x,y in zip(xx,yy):
if i+x < 0 or i+x > h-1:
continue
elif j+y < 0 or j+y > w-1:
continue
elif field[i+x][j+y] == '#':
temp += 1
line += str(temp)
print(line)
|
#!/usr/local/bin/python
import os
import sys
import json
import pickle
#Sagemaker directory structure
prefix = '/opt/ml/'
input_path = prefix + 'input/data'
output_path = os.path.join(prefix, 'output')
model_path = os.path.join(prefix, 'model')
param_path = os.path.join(prefix, 'input/config/hyperparameters.json')
inputdata_path = os.path.join(prefix,'input/config/inputdataconfig.json')
with open(param_path, 'r') as tc:
trainingParams = json.load(tc)
print ("Training Params")
print (trainingParams)
with open(inputdata_path, 'r') as ip:
inputParams = json.load(ip)
print ("Input Params")
print (inputParams)
#Set-up environment variables for later bash processes
for key in inputParams.keys():
var_name="SM_CHANNEL_{}".format(key.upper())
path_name="/opt/ml/input/data/{}".format(key)
os.environ[var_name]=path_name
os.environ['SM_EPOCH']=trainingParams['epoch']
print ("Start Training")
os.system('/bin/bash sagemaker_train_tesseract.sh')
print ("Training Complete")
|
from itertools import combinations
#operator.le()
x = "5 1 4 2 3".split()
perms = []
y = []
for i in range(2, len(x)+1):
for c in combinations(x, i):
perms.append("".join(c))
for i in range(0, len(perms)):
x = list(perms[i])
if (x == sorted(x)):
y.append("".join(str(x)))
print(max((y), key=len))
y = []
for i in range(0, len(perms)):
x = list(perms[i])
if (x == sorted(x)[::-1]):
y.append("".join(str(x)))
print(max((y), key=len))
|
from django.test import TestCase
from django.db.utils import IntegrityError
from products.models import Category, Product, ProductType
def model_setup():
prod_type_female = ProductType.objects.create(name='Footwear', sex='Female')
prod_type_male = ProductType.objects.create(name='Footwear', sex='Male')
prod_type_junior = ProductType.objects.create(name='Footwear', sex='Junior')
for cat in ['Air Max', 'Jordan', 'Runners']:
Category.objects.create(name=cat)
cat1 = Category.objects.all()[0]
cat2 = Category.objects.all()[1]
cat3 = Category.objects.all()[2]
cat4 = Category.objects.all()[:2]
cat5 = Category.objects.all()[1:]
cat6 = Category.objects.all()[::2]
cat7 = Category.objects.all()
prod_type_female.category.set(cat4)
prod_type_male.category.set(cat5)
prod_type_junior.category.add(cat3)
product_1 = Product(
title='Air Jordan 11 Retro "Space Jam"',
price = 2000,
description='The Air Jordan 11 originally released in 1996\
and is famous for its patent leather upper. The shoes were\
designed by Tinker Hatfield and Michael Jordan himself calls\
it his favorite Air Jordan sneaker',
)
product_1.save()
prod_type_female.product_class.add(product_1)
product_2 = Product(
title='Nike Air Max 270',
price = 200,
description='Delivery takes place from 4 days after an order has been confirmed.',
)
product_2.save()
prod_type_female.product_class.add(product_2)
product_3 = Product(
title='adidas NMD R2',
price = 149,
description='The adidas NMD R2 is a new low-top sneaker from adidas Originals.\
It is the second version of the adidas NMD R1 and features a mix of Primeknit,\
suede, and Boost cushioning. ',
)
product_3.save()
prod_type_female.product_class.add(product_3)
product_4 = Product(
title='Reebok Classic Leather',
price = 49.01,
description='Delivery takes place from 4 days after an order has been confirmed.',
)
product_4.save()
prod_type_male.product_class.add(product_4)
product_5 = Product(
title='Vans x Rains UA Old Skool Lite',
price = 149.98,
description='Delivery takes place from 4 days after an order has been confirmed.',
)
product_5.save()
prod_type_male.product_class.add(product_5)
product_6 = Product(
title='Asics x asphaldgold GEL-DS Trainer OG',
price = 309,
description='Delivery takes place from 4 days after an order has been confirmed.',
)
product_6.save()
prod_type_male.product_class.add(product_6)
product_7 = Product(
title='Nike Air Max 98',
price = 200,
description='Delivery takes place from 4 days after an order has been confirmed.',
)
product_7.save()
prod_type_junior.product_class.add(product_7)
product_8 = Product(
title='adidas ZX750',
price = 38,
description='Delivery takes place from 4 days after an order has been confirmed.',
)
product_8.save()
prod_type_junior.product_class.add(product_8)
product_1.category.add(cat1)
product_2.category.add(cat2)
product_3.category.add(cat3)
product_4.category.set(cat4)
product_5.category.set(cat5)
product_6.category.set(cat6)
product_7.category.set(cat7)
product_8.category.add(cat1)
class ProductModelTest(TestCase):
def setUp(self):
model_setup()
def test_get_all_products_from_footwear_product_class(self):
prod = Product.objects.filter(product_class__name='Footwear')
self.assertQuerysetEqual(prod,
['<Product: Reebok Classic Leather>',
'<Product: Air Jordan 11 Retro "Space Jam">',
'<Product: Vans x Rains UA Old Skool Lite>',
'<Product: Asics x asphaldgold GEL-DS Trainer OG>',
'<Product: Nike Air Max 270>',
'<Product: adidas NMD R2>',
'<Product: Nike Air Max 98>',
'<Product: adidas ZX750>'], ordered=False
)
class ProductManagerModelTest(TestCase):
def setUp(self):
model_setup()
def test_custom_function_search(self):
qs = Product.objects.search('air')
self.assertQuerysetEqual(qs,
[# Result of product attributes
'<Product: Air Jordan 11 Retro "Space Jam">',
'<Product: Nike Air Max 270>',
'<Product: Nike Air Max 98>',
# Result of category attributes
'<Product: Reebok Classic Leather>',
'<Product: Asics x asphaldgold GEL-DS Trainer OG>',
'<Product: adidas ZX750>',], ordered=False
)
def test_get_product_by_slug(self):
obj = Product.objects.get_product_by_slug('reebok-classic-leather')
exp = Product.objects.get(title='Reebok Classic Leather')
self.assertEquals(obj.id, exp.id)
def test_sort_product_by_timestamp(self):
reversed_prod_list = Product.objects.all()[::-1]
sorted_prod_list = Product.objects.sort_product_by_timestamp()[::1]
self.assertEquals(sorted_prod_list, reversed_prod_list)
class CategoryModelTest(TestCase):
@classmethod
def setUpTestData(cls):
Category.objects.create(name='Air Max')
Category.objects.create(name='Jordan')
def test_name_getting(self):
category = Category.objects.first().name
self.assertEquals(category, 'Air Max')
class ProductTypeModelTest(TestCase):
def setUp(self):
model_setup()
def test_display_of___str___function(self):
prod_type = ProductType.objects.filter(name='Footwear', sex='Female')
self.assertQuerysetEqual(prod_type,
['<ProductType: Footwear -> Female>'], ordered=False)
def test_unique_together_for_name_and_sex(self):
with self.assertRaises(IntegrityError):
ProductType.objects.create(name='Footwear', sex='Female')
def test_get_all_female_categories(self):
prod_type = ProductType.objects.get(name='Footwear', sex='Female')
categories = prod_type.category.all()
self.assertQuerysetEqual(categories,
['<Category: Air Max>', '<Category: Jordan>'], ordered=False)
def test_get_all_male_products(self):
prod_type = ProductType.objects.get(name='Footwear', sex='Male')
products = prod_type.product_class.all()
self.assertQuerysetEqual(products,
['<Product: Reebok Classic Leather>',
'<Product: Vans x Rains UA Old Skool Lite>',
'<Product: Asics x asphaldgold GEL-DS Trainer OG>'], ordered=False
)
def test_get_all_male_products_with_respect_to_categories(self):
prod_type = ProductType.objects.get(name='Footwear', sex='Male')
products = prod_type.product_class.filter(category__in=prod_type.category.all())
self.assertQuerysetEqual(products,
['<Product: Reebok Classic Leather>',
'<Product: Vans x Rains UA Old Skool Lite>',
'<Product: Vans x Rains UA Old Skool Lite>',
'<Product: Asics x asphaldgold GEL-DS Trainer OG>'], ordered=False
)
def test_get_product_when_categories_do_not_match(self):
'''
Try to get product when ProductType and Product categories do not match.
We should get empty QuerySet as the output.
'''
prod_type = ProductType.objects.get(name='Footwear', sex='Junior')
products = prod_type.product_class.filter(title='adidas ZX750')\
.filter(category__in=prod_type.category.all())
self.assertQuerysetEqual(products,
[], ordered=False
)
|
#!/usr/bin/env python
"""
pyjld.phidgets.erl_manager.erl_server
"""
__author__ = "Jean-Lou Dupont"
__email = "python (at) jldupont.com"
__fileid = "$Id: erl_server.py 77 2009-05-04 18:10:24Z jeanlou.dupont $"
__all__ = ['',]
class ErlServer(object):
"""
"""
def __init__(self):
pass
def link(self):
"""
Links to dll/so
"""
|
import requests
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
import re
import os
import gzip
import json
'''
测试网页httpbin.org
Attention:
1.网址中的参数有中文报错,要变成编码形式。e.g. name=周杰伦 --->name=%E5%91%A8%E6%9D%B0%E4%BC%A6
'''
url = 'http://tool.liumingye.cn/music/?page=audioPage&type=migu&name=%E5%91%A8%E6%9D%B0%E4%BC%A6'
#url = 'https://www.douban.com/'
# # 获取一个get请求
# response = urlopen(url, timeout=10)
# # (response.read().decode('utf-8'))
# print(response.status)
# 获取一个post请求
# import urllib.parse
#
# data = bytes(urllib.parse.urlencode({'hello': 'world'}), encoding='utf-8')
# response = urlopen(url, data=data)
# print(response.read().decode('utf-8'))
# 增加信息,模拟浏览器
headers = {"Accept": "text/html,application/xhtml+xml,"
"application/xml;q=0.9,image/webp,image/apng,"
"*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding":" gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "max-age=0",
"Cookie": "UM_distinctid=173b301acfc308-04faef6924753b-3972095d-1fa400-173b301ad004d3; "
"myfreemp3_lang=zh-cn; CNZZDATA1277593802=1255473617-1596435498-%7C1596605456",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/84.0.4147.105 Safari/537.36"}
req = Request(url=url, headers=headers)
response = urlopen(req)
response_decode=gzip.decompress(response.read()).decode('utf-8')
print(response_decode)
if __name__ == '__main__':
os.chdir('/home/jzy/Desktop/Scrapy_project')
|
"""
我的哈希的理解:
1. 哈希是一种一一映射方法
2. 它需要保证对不同的对象生成的哈希值不可以相同
3. 哈希过程很快
4. 也叫散列技术
"""
print(hash('sdg'))
|
# -*- coding: utf-8 -*-
import math
import time
import random
from heap import *
h = 0 # Global räknare för Vertex-ID:n
d = 2
# Djikstras algoritm
# G is the graph
# s is the starting node
# e is the ending node
def djikstra(G, s, e):
unvisited = [] # Here we can do variations of d
visited = []
for i in G: # Runs in |V| time as we need to initialise the algoritm
i.dist = float("inf")
i.prev = None
unvisited.append(i)
visited.append(False)
s.dist = 0
visited[s.n] = True # We assume constant time for python lists
unvisited.remove(s)
curr = s
global d
adjacent = Heap(d) # We change this if we want another kind of heap than a binary one. The d in this d-ary heap is the d in log_d
while len(unvisited) > 0: # Runs a (worst case) total of |V| times, as we may need to visit every vertex
# Debug stuff. Breakpoints are for brainiacs
#if curr == e:
# print "Found ending!", curr.dist
#print "Iteratirng on", curr.n
#print "Unvisited:", unvisited
#print
#print "Visited:", visited
#print
#print
# This will run |E| times
for ed in curr.edges: # Runs at most |V| - 1 times, if the graph is well connected (worst-case scenarion if EVERY vertex is well connected)
if not visited[ed.dest]: # We only consider the unvisited vertices.
if (curr.dist + ed.weight < G[ed.dest].dist): # We compare the tentative distance
# print "Replacing tentative distances...", G[ed.dest].dist, "->", curr.dist + ed.weight, "for", G[ed.dest].n
G[ed.dest].dist = curr.dist + ed.weight # Update it
G[ed.dest].prev = ed.src
adjacent.insert(G[ed.dest].dist, G[ed.dest]) # Insert it into the heap for future use. Takes log |V| time.
# Greatest cost of entire for-loop: |E| log |V|, regardless of the amount of times the overlying while-loop is executed
if not visited[curr.n]:
visited[curr.n] = True
#else:
# print "Found", curr.n, "in visited set."
try:
unvisited.remove(curr)
except:
pass
if adjacent.isEmpty():
return e.dist
curr = get_min_unvisited(G, adjacent, visited)
if (curr == None): # Now we have vertices that cannot be reached from our starting position..
break
# Greatest cost of entire while-loop: |V| log |V|
#print
path = [] # Get the optimal path to the end point. This takes a magnitude of |V| time.
if e.dist < float("inf"):
path = get_prev(G, s.n, e.n)
return (e.dist, path) # Every thing will take about O(|V| log_d |V| + |E| log_d |V| + 2|V|) in worst-case
def get_min_unvisited(G, heap, visited):
try:
(key, value) = heap.getMin()
except:
return None
if not heap.isEmpty():
heap.deleteMin()
if visited[value.n]:
return get_min_unvisited(G, heap, visited)
return value
# G is the graph
# start_num is the _position_ of the starting node
# end_num is the _position_ of the goal node
def get_prev(G, start_num, end_num): # Recursively determine the path from the end to the start.
if end_num == start_num:
return [G[end_num].n]
else:
return get_prev(G, start_num, G[end_num].prev) + [G[end_num].n] # We do it in this order because python can't do tail recursion AFAWK
# Links the Vertex v1 with v2. This will link them at both ends, since we want an undirected graph.
def link(v1, v2, w):
v1.edges.append(Edge(v1.n, v2.n, w))
v2.edges.append(Edge(v2.n, v1.n, w))
class Vertex: # Container for our vertex data
def __init__(self, edges):
global h # Global variable that keeps track of the IDs we should give the verticies
self.n = h
h += 1
self.edges = edges
def addEdge(self, edge): # We keep a list of edges, an _adjacency list_ as our data structure.
self.edges.append(edge)
def __repr__(self):
edge_desc = ""
for e in self.edges:
edge_desc += str(e) + "; "
return str(self.n) + " has edges:: " + edge_desc + "$"
class Edge:
def __init__(self, src, dest, weight):
self.weight = weight
self.src = src
self.dest = dest
def __repr__(self):
return "from: " + str(self.src) + ", to: " + str(self.dest) + ", weight: " + str(self.weight)
# This generates a graph with n*n vertices and 2n(n-1) edges
# Runs in a magnitude of 2n^2 time. Should be used sparingly.
def generate_grid_graph(m, n):
v = []
global h # We want to start indexing at 0
oh = h
nsq = n * m
rint_max = 20 # The maxmimum weight we want on our edges
h = 0
for j in range(0, m): # Sloppy implementation, we create everying before we create the connections.
for i in range(0, n): # Generate the vertices
v.append(Vertex([]))
for j in range(0, n):
for i in range(0, m):
next_vertical = (j+1)*m+i
next_horisontal = j*m+1+i
current = j*m+i
if (next_vertical < nsq):
link(v[current], v[next_vertical], random.randint(1,rint_max))
if (next_horisontal < nsq and next_horisontal < m*(j+1)): # We don't want the edges to wrap
link(v[current], v[next_horisontal], random.randint(1,rint_max))
h = oh # We restore the global variable.
return v
# vertices = []
# for i in range(0,15):
# vertices.append(Vertex([]))
# v = vertices
# link(v[0], v[1], 12)
# link(v[0], v[2], 60)
# link(v[0], v[3], 13)
# link(v[1], v[2], 20)
# link(v[1], v[7], 24)
# link(v[2], v[4], 13)
# link(v[3], v[8], 12)
# link(v[3], v[9], 13)
# link(v[4], v[5], 8)
# link(v[4], v[11], 24)
# link(v[5], v[6], 2)
# link(v[6], v[10], 3)
# link(v[8], v[9], 1)
# link(v[9], v[11], 32)
# link(v[10], v[11], 30)
# link(v[11], v[12], 10)
# link(v[11], v[13], 5)
# link(v[12], v[13], 4)
# link(v[12], v[14], 7)
# link(v[13], v[14], 80)
# print djikstra(v, v[0], v[14])
## Test bench for variations of n
sizes = range(2, 48)
f = open("lab3_n.csv", "w")
f.write("n;m;vertices;edges;time\n")
for i in sizes:
avg = 0
for j in range (0,3):
v = generate_grid_graph(i, i)
v.append(Vertex([]))
startt = time.time()
(result, path) = djikstra(v, v[0], v[i*i-1])
endt = time.time()
avg += (endt-startt)
avg /= 3
print "It takes", avg, "for n =", i
print result
print path
f.write("%s;%s;%s;%s;%s\n" % (str(i), str(i), str(i*i), str(i*(i-1) + i*(i-1)), str(avg)))
f.close()
f = open("lab3_d.csv", "w")
f.write("d;time\n")
v = generate_grid_graph(8, 8)
for i in range(2, 100):
d = i
startt = time.time()
(result, path) = djikstra(v, v[0], v[63])
print result
print path
endt = time.time()
print "It takes", endt-startt, "for d =", i
f.write("%s;%s\n" % (str(d), str(endt-startt)))
f.close()
|
from telegram import InlineKeyboardButton
from core.models import UserPreference
from intent import Intent, IntentType
from chatgpt_model import model_names, get_next_model
def ask_button():
return InlineKeyboardButton(
text=f'Задать вопрос',
switch_inline_query_current_chat='',
)
def eco_mode_button(preference: UserPreference):
return InlineKeyboardButton(
text=f'🌱 Eco mode: {"On" if preference.eco_mode else "Off"}',
callback_data=Intent(
type=IntentType.SwitchEcoMode,
enable=not preference.eco_mode
).encode()
)
def model_button(preference: UserPreference):
return InlineKeyboardButton(
text=f'🤖 Model: {model_names[preference.model_name]}',
callback_data=Intent(
type=IntentType.SwitchModel,
model_name=get_next_model(preference.model_name)
).encode()
)
|
import ast
import sys
import difflib
from textwrap import indent
import numpy as np
import numpydoc.docscrape
def w(orig):
ll = []
for l in orig:
if l[0] in "+-":
# ll.append(l.replace(' ', '⎵'))
ll.append(l.replace(" ", "·"))
else:
ll.append(l)
lll = []
for l in ll:
if l.endswith("\n"):
lll.append(l[:-1])
else:
lll.append(l[:])
return lll
class DocstringFormatter:
@classmethod
def format_Signature(self, s):
return s
@classmethod
def format_Summary(self, s):
if len(s) == 1 and not s[0].strip():
return ""
return "\n".join(s) + "\n"
@classmethod
def format_Extended_Summary(self, es):
return "\n".join(es) + "\n"
@classmethod
def _format_ps(cls, name, ps):
res = cls._format_ps_pref(name, ps, compact=True)
if res is not None:
return res
return cls._format_ps_pref(name, ps, compact=False)
@classmethod
def _format_ps_pref(cls, name, ps, *, compact):
out = name + "\n"
out += "-" * len(name) + "\n"
for i, p in enumerate(ps):
if (not compact) and i:
out += "\n"
if p.type:
out += f"""{p.name} : {p.type}\n"""
else:
out += f"""{p.name}\n"""
if p.desc:
if any([l.strip() == "" for l in p.desc]) and compact:
return None
out += indent("\n".join(p.desc), " ")
out += "\n"
return out
@classmethod
def format_Parameters(cls, ps):
return cls._format_ps("Parameters", ps)
@classmethod
def format_Other_Parameters(cls, ps):
return cls._format_ps("Other Parameters", ps)
@classmethod
def format_See_Also(cls, sas):
out = "See Also\n"
out += "--------\n"
for a, b in sas:
if b:
desc = b[0]
else:
desc = None
if len(b) > 1:
rest_desc = b[1:]
else:
rest_desc = []
_first = True
for ref, type_ in a:
if not _first:
out += ", "
if type_ is not None:
out += f":{type_}:`{ref}`"
else:
out += f"{ref}"
_first = False
if desc:
if len(a) > 1:
out += f"\n {desc}"
else:
out += f" : {desc}"
for rd in rest_desc:
out += "\n " + rd
out += "\n"
return out
@classmethod
def format_References(cls, lines):
out = "References\n"
out += "----------\n"
out += "\n".join(lines)
out += "\n"
return out
@classmethod
def format_Notes(cls, lines):
out = "Notes\n"
out += "-----\n"
out += "\n".join(lines)
out += "\n"
return out
@classmethod
def format_Examples(cls, lines):
out = "Examples\n"
out += "--------\n"
out += "\n".join(lines)
out += "\n"
return out
@classmethod
def format_Warns(cls, ps):
return cls.format_RRY("Warns", ps)
@classmethod
def format_Raises(cls, ps):
return cls.format_RRY("Raises", ps)
@classmethod
def format_Yields(cls, ps):
return cls.format_RRY("Yields", ps)
@classmethod
def format_Returns(cls, ps):
return cls.format_RRY("Returns", ps)
@classmethod
def format_RRY(cls, name, ps):
out = name + "\n"
out += "-" * len(name) + "\n"
if name == "Returns":
if len(ps) > 1:
# do heuristic to check we actually have a description list and not a paragraph
pass
for i, p in enumerate(ps):
#if i:
# out += "\n"
if p.name:
out += f"""{p.name} : {p.type}\n"""
else:
out += f"""{p.type}\n"""
if p.desc:
out += indent("\n".join(p.desc), " ")
out += "\n"
return out
def test(docstr, fname, *, indempotenty_check):
fmt = compute_new_doc(docstr, fname)
dold = docstr.splitlines()
dnew = fmt.splitlines()
diffs = list(difflib.unified_diff(dold, dnew, n=100, fromfile=fname, tofile=fname),)
if diffs:
from pygments import highlight
from pygments.lexers import DiffLexer
from pygments.formatters import TerminalFormatter
code = "\n".join(w(diffs))
hldiff = highlight(code, DiffLexer(), TerminalFormatter())
print(indent(hldiff, " | ", predicate=lambda x: True))
def compute_new_doc(docstr, fname, *, indempotenty_check):
original_docstr = docstr
if len(docstr.splitlines()) <= 1:
return ""
shortdoc = False
short_with_space = False
if not docstr.startswith("\n "):
docstr = "\n " + docstr
shortdoc = True
if original_docstr[0] == " ":
short_with_space = True
long_end = True
long_with_space = True
if original_docstr.splitlines()[-1].strip():
long_end = False
if original_docstr.splitlines()[-2].strip():
long_with_space = False
doc = numpydoc.docscrape.NumpyDocString(docstr)
fmt = ""
start = True
# ordered_section is a local patch to that records the docstring order.
for s in getattr(doc, "ordered_sections", doc.sections):
if doc[s]:
f = getattr(DocstringFormatter, "format_" + s.replace(" ", "_"))
if not start:
fmt += "\n"
start = False
fmt += f(doc[s])
fmt = indent(fmt, " ") + " "
if "----" in fmt:
if long_with_space:
fmt = fmt.rstrip(" ") + "\n "
else:
fmt = fmt.rstrip(" ") + " "
if shortdoc:
fmt = fmt.lstrip()
if short_with_space:
fmt = " " + fmt
else:
fmt = "\n" + fmt
if not long_end:
fmt = fmt.rstrip()
if indempotenty_check:
ff = fmt
if not ff.startswith("\n "):
ff = "\n " + ff
d2 = numpydoc.docscrape.NumpyDocString(ff)
if not d2._parsed_data == doc._parsed_data:
raise ValueError(
"Numpydoc parsing seem to differ after reformatting, this may be a reformatting bug. Rerun with --unsafe",
fname,
d2._parsed_data,
doc._parsed_data,
)
assert fmt
return fmt
def main():
import argparse
parser = argparse.ArgumentParser(description="reformat the docstrigns of some file")
parser.add_argument("files", metavar="files", type=str, nargs="+", help="TODO")
parser.add_argument("--context", metavar="context", type=int, default=3)
parser.add_argument("--unsafe", action="store_true")
parser.add_argument(
"--write", dest="write", action="store_true", help="print the diff"
)
args = parser.parse_args()
to_format = []
from pathlib import Path
for f in args.files:
p = Path(f)
if p.is_dir():
for sf in p.glob("**/*.py"):
to_format.append(sf)
else:
to_format.append(p)
for file in to_format:
# print(file)
with open(file, "r") as f:
data = f.read()
tree = ast.parse(data)
new = data
funcs = [t for t in tree.body if isinstance(t, ast.FunctionDef)]
for i, func in enumerate(funcs[:]):
# print(i, "==", func.name, "==")
try:
e0 = func.body[0]
if not isinstance(e0, ast.Expr):
continue
# e0.value is _likely_ a Constant node.
docstring = e0.value.s
except AttributeError:
continue
if not isinstance(docstring, str):
continue
start, nindent, stop = (
func.body[0].lineno,
func.body[0].col_offset,
func.body[0].end_lineno,
)
if not docstring in data:
print(f"skip {file}: {func.name}, can't do replacement yet")
new_doc = compute_new_doc(
docstring, file, indempotenty_check=(not args.unsafe)
)
# test(docstring, file)
if new_doc:
if ('"""' in new_doc) or ("'''" in new_doc):
print(
"SKIPPING", file, func.name, "triple quote not handled", new_doc
)
else:
if docstring not in new:
print("ESCAPE issue:", docstring)
new = new.replace(docstring, new_doc)
# test(docstring, file)
if new != data:
dold = data.splitlines()
dnew = new.splitlines()
diffs = list(
difflib.unified_diff(
dold, dnew, n=args.context, fromfile=str(file), tofile=str(file)
),
)
from pygments import highlight
from pygments.lexers import DiffLexer
from pygments.formatters import TerminalFormatter
if not args.write:
code = "\n".join(diffs)
hldiff = highlight(code, DiffLexer(), TerminalFormatter())
print(hldiff)
else:
with open(file, "w") as f:
f.write(new)
|
# On a N * N grid, we place some 1 * 1 * 1 cubes that are axis-aligned
# with the x, y, and z axes.
#
# Each value v = grid[i][j] represents a tower of v cubes placed on top
# of grid cell (i, j).
#
# Now we view the projection of these cubes onto the xy, yz, and zx planes.
#
# A projection is like a shadow, that maps our 3 dimensional figure
# to a 2 dimensional plane.
#
# Here, we are viewing the "shadow" when looking at the cubes from
# the top, the front, and the side.
#
# Return the total area of all three projections.
class Solution:
def projectionArea(self, grid):
tot = len(grid[0]) ** 2
tot -= sum([elem for elem in grid], []).count(0)
for elem in grid:
tot += max(elem)
for elem in zip(*grid):
tot += max(elem)
return tot
if __name__ == "__main__":
testinput = [[1, 2], [3, 4]]
print(Solution.projectionArea(Solution, testinput))
|
def main():
i = 0
for i in range(0,101,2):
print(i)
for i in range(0,101,2):
print(100-i)
main()
|
# coding:utf-8
# --------------------------------------------------------
# Pytorch multi-GPU Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import pprint
import pdb
import time
import _init_paths
import torch
from torch.autograd import Variable
import torch.nn as nn
from model.utils.config import cfg, cfg_from_file, cfg_from_list
from model.utils.net_utils import (
adjust_learning_rate,
save_checkpoint,
get_dataloader,
setup_seed,
)
from model.ema.optim_weight_ema import WeightEMA
from model.utils.parser_func import parse_args, set_dataset_args
from model.rpn.bbox_transform import clip_boxes
from model.nms.nms_wrapper import nms
from model.rpn.bbox_transform import bbox_transform_inv
from prettytimer import PrettyTimer
def get_cfg():
args = parse_args()
print("Called with args:")
print(args)
args = set_dataset_args(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print("Using config:")
pprint.pprint(cfg)
# np.random.seed(cfg.RNG_SEED)
setup_seed(cfg.RNG_SEED)
return args
if __name__ == "__main__":
args = get_cfg()
output_dir = f"{args.save_dir}/{args.net}/{args.dataset}"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if args.dataset_t == "water":
args.aug = False
if args.dataset_t == "foggy_cityscape":
# initilize the network here.
from model.umt_faster_rcnn_truncate.umt_vgg16 import vgg16
from model.umt_faster_rcnn_truncate.umt_resnet import resnet
else:
from model.umt_faster_rcnn.umt_vgg16 import vgg16
from model.umt_faster_rcnn.umt_resnet import resnet
student_save_name = os.path.join(
output_dir,
"conf_{}_conf_gamma_{}_source_like_{}_aug_{}_target_like_{}_pe_{}_pl_{}_thresh_{}"
"_lambda_{}_student_target_{}".format(
args.conf,
args.conf_gamma,
args.source_like,
args.aug,
args.target_like,
args.pretrained_epoch,
args.pl,
args.threshold,
args.lam,
args.dataset_t,
),
)
print("Model will be saved to: ")
print(student_save_name)
# torch.backends.cudnn.benchmark = True
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# train set
# -- Note: Use validation set and disable the flipped to enable faster loading.
cfg.TRAIN.USE_FLIPPED = True
cfg.USE_GPU_NMS = args.cuda
# source train set
s_imdb, s_train_size, s_dataloader = get_dataloader(args.imdb_name, args)
# source-like/fake-source train set data loader
if args.source_like:
s_fake_imdb, s_fake_train_size, s_fake_dataloader = get_dataloader(
args.imdb_name_fake_source, args, sequential=True, augment=args.aug
)
else:
s_fake_imdb, s_fake_train_size, s_fake_dataloader = get_dataloader(
args.imdb_name_target, args, sequential=True, augment=args.aug
)
# target train set
t_imdb, t_train_size, t_dataloader = get_dataloader(
args.imdb_name_target, args, sequential=True, augment=args.aug
)
# target-like/fake-target train set
t_fake_imdb, t_fake_train_size, t_fake_dataloader = get_dataloader(
args.imdb_name_fake_target, args
)
print("{:d} source roidb entries".format(s_train_size))
print("{:d} source like roidb entries".format(s_fake_train_size))
print("{:d} target roidb entries".format(t_train_size))
print("{:d} target like roidb entries".format(t_fake_train_size))
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
if args.cuda:
cfg.CUDA = True
imdb = s_imdb
if args.net == "vgg16":
student_fasterRCNN = vgg16(
imdb.classes,
pretrained=True,
class_agnostic=args.class_agnostic,
conf=args.conf,
)
teacher_fasterRCNN = vgg16(
imdb.classes,
pretrained=True,
class_agnostic=args.class_agnostic,
conf=args.conf,
)
elif args.net == "res101":
student_fasterRCNN = resnet(
imdb.classes,
101,
pretrained=True,
class_agnostic=args.class_agnostic,
conf=args.conf,
)
teacher_fasterRCNN = resnet(
imdb.classes,
101,
pretrained=True,
class_agnostic=args.class_agnostic,
conf=args.conf,
)
elif args.net == "res50":
student_fasterRCNN = resnet(
imdb.classes, 50, pretrained=True, class_agnostic=args.class_agnostic
)
teacher_fasterRCNN = resnet(
imdb.classes, 50, pretrained=True, class_agnostic=args.class_agnostic
)
else:
print("network is not defined")
pdb.set_trace()
student_fasterRCNN.create_architecture()
teacher_fasterRCNN.create_architecture()
lr = cfg.TRAIN.LEARNING_RATE
lr = args.lr
student_detection_params = []
params = []
for key, value in dict(student_fasterRCNN.named_parameters()).items():
if value.requires_grad:
if "bias" in key:
params += [
{
"params": [value],
"lr": lr * (cfg.TRAIN.DOUBLE_BIAS + 1),
"weight_decay": cfg.TRAIN.BIAS_DECAY
and cfg.TRAIN.WEIGHT_DECAY
or 0,
}
]
else:
params += [
{
"params": [value],
"lr": lr,
"weight_decay": cfg.TRAIN.WEIGHT_DECAY,
}
]
student_detection_params += [value]
teacher_detection_params = []
for key, value in dict(teacher_fasterRCNN.named_parameters()).items():
if value.requires_grad:
teacher_detection_params += [value]
value.requires_grad = False
if args.optimizer == "adam":
lr = lr * 0.1
student_optimizer = torch.optim.Adam(params)
elif args.optimizer == "sgd":
student_optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
teacher_optimizer = WeightEMA(
teacher_detection_params, student_detection_params, alpha=args.teacher_alpha
)
if args.cuda:
student_fasterRCNN.cuda()
teacher_fasterRCNN.cuda()
if args.resume:
student_checkpoint = torch.load(args.student_load_name)
args.session = student_checkpoint["session"]
args.start_epoch = student_checkpoint["epoch"]
student_fasterRCNN.load_state_dict(student_checkpoint["model"])
student_optimizer.load_state_dict(student_checkpoint["optimizer"])
lr = student_optimizer.param_groups[0]["lr"]
if "pooling_mode" in student_checkpoint.keys():
cfg.POOLING_MODE = student_checkpoint["pooling_mode"]
print("loaded checkpoint %s" % (args.student_load_name))
teacher_checkpoint = torch.load(args.teacher_load_name)
teacher_fasterRCNN.load_state_dict(teacher_checkpoint["model"])
if "pooling_mode" in teacher_checkpoint.keys():
cfg.POOLING_MODE = teacher_checkpoint["pooling_mode"]
print("loaded checkpoint %s" % (args.teacher_load_name))
if args.mGPUs:
student_fasterRCNN = nn.DataParallel(student_fasterRCNN)
teacher_fasterRCNN = nn.DataParallel(teacher_fasterRCNN)
iters_per_epoch = int(10000 / args.batch_size)
if args.use_tfboard:
from tensorboardX import SummaryWriter
logger = SummaryWriter("logs")
count_iter = 0
conf_gamma = args.conf_gamma
pretrained_epoch = args.pretrained_epoch
timer = PrettyTimer()
for epoch in range(args.start_epoch, args.max_epochs + 1):
# setting to train mode
student_fasterRCNN.train()
teacher_fasterRCNN.train()
loss_temp = 0
start = time.time()
epoch_start = time.time()
if epoch % (args.lr_decay_step + 1) == 0:
adjust_learning_rate(student_optimizer, args.lr_decay_gamma)
lr *= args.lr_decay_gamma
data_iter_s = iter(s_dataloader)
data_iter_t = iter(t_dataloader)
data_iter_s_fake = iter(s_fake_dataloader)
data_iter_t_fake = iter(t_fake_dataloader)
for step in range(1, iters_per_epoch + 1):
timer.start("iter")
try:
data_s = next(data_iter_s)
except:
data_iter_s = iter(s_dataloader)
data_s = next(data_iter_s)
try:
data_s_fake = next(data_iter_s_fake)
except:
data_iter_s_fake = iter(s_fake_dataloader)
data_s_fake = next(data_iter_s_fake)
try:
data_t = next(data_iter_t)
except:
data_iter_t = iter(t_dataloader)
data_t = next(data_iter_t)
assert (
data_s_fake[0].size() == data_t[0].size()
), "The size should be same between source fake and target"
assert (
data_s_fake[1] == data_t[1]
).all(), "The image info should be same between source fake and target"
try:
data_t_fake = next(data_iter_t_fake)
except:
data_iter_t_fake = iter(t_fake_dataloader)
data_t_fake = next(data_iter_t_fake)
# eta = 1.0
count_iter += 1
# put source data into variable
im_data.data.resize_(data_s[0].size()).copy_(data_s[0])
im_info.data.resize_(data_s[1].size()).copy_(data_s[1])
gt_boxes.data.resize_(data_s[2].size()).copy_(data_s[2])
num_boxes.data.resize_(data_s[3].size()).copy_(data_s[3])
student_fasterRCNN.zero_grad()
(
rois,
cls_prob,
bbox_pred,
rpn_loss_cls,
rpn_loss_box,
RCNN_loss_cls,
RCNN_loss_bbox,
rois_label,
out_d_pixel,
out_d,
confidence_loss,
_,
) = student_fasterRCNN(im_data, im_info, gt_boxes, num_boxes, hints=True)
loss = (
rpn_loss_cls.mean()
+ rpn_loss_box.mean()
+ RCNN_loss_cls.mean()
+ RCNN_loss_bbox.mean()
)
if args.conf:
conf_loss = confidence_loss.mean()
if args.target_like:
# put fake target data into variable
im_data.data.resize_(data_t_fake[0].size()).copy_(data_t_fake[0])
im_info.data.resize_(data_t_fake[1].size()).copy_(data_t_fake[1])
# gt is empty
gt_boxes.data.resize_(data_t_fake[2].size()).copy_(data_t_fake[2])
num_boxes.data.resize_(data_t_fake[3].size()).copy_(data_t_fake[3])
(
rois,
cls_prob,
bbox_pred,
rpn_loss_cls_t_fake,
rpn_loss_box_t_fake,
RCNN_loss_cls_t_fake,
RCNN_loss_bbox_t_fake,
rois_label_t_fake,
out_d_pixel,
out_d,
_,
_,
) = student_fasterRCNN(
im_data, im_info, gt_boxes, num_boxes, hints=False
) # --------------------------------
loss += (
rpn_loss_cls_t_fake.mean()
+ rpn_loss_box_t_fake.mean()
+ RCNN_loss_cls_t_fake.mean()
+ RCNN_loss_bbox_t_fake.mean()
)
if epoch > pretrained_epoch and args.pl:
teacher_fasterRCNN.eval()
im_data.data.resize_(data_s_fake[0].size()).copy_(data_s_fake[0])
im_info.data.resize_(data_s_fake[1].size()).copy_(data_s_fake[1])
# gt is emqpty
gt_boxes.data.resize_(1, 1, 5).zero_()
num_boxes.data.resize_(1).zero_()
(
rois,
cls_prob,
bbox_pred,
rpn_loss_cls_,
rpn_loss_box_,
RCNN_loss_cls_,
RCNN_loss_bbox_,
rois_label_,
d_pred_,
_,
_,
confidence_s_fake,
) = teacher_fasterRCNN(im_data, im_info, gt_boxes, num_boxes, test=True)
scores = cls_prob.data
boxes = rois.data[:, :, 1:5]
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
if args.class_agnostic:
box_deltas = (
box_deltas.view(-1, 4)
* torch.FloatTensor(
cfg.TRAIN.BBOX_NORMALIZE_STDS
).cuda()
+ torch.FloatTensor(
cfg.TRAIN.BBOX_NORMALIZE_MEANS
).cuda()
)
box_deltas = box_deltas.view(1, -1, 4)
else:
box_deltas = (
box_deltas.view(-1, 4)
* torch.FloatTensor(
cfg.TRAIN.BBOX_NORMALIZE_STDS
).cuda()
+ torch.FloatTensor(
cfg.TRAIN.BBOX_NORMALIZE_MEANS
).cuda()
)
box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
scores = scores.squeeze()
if args.conf:
scores = torch.sqrt(
scores * confidence_s_fake
) # using confidence score to adjust scores
pred_boxes = pred_boxes.squeeze()
gt_boxes_target = []
pre_thresh = 0.0
thresh = args.threshold
empty_array = np.transpose(np.array([[], [], [], [], []]), (1, 0))
for j in range(1, len(imdb.classes)):
inds = torch.nonzero(scores[:, j] > pre_thresh).view(-1)
# if there is det
if inds.numel() > 0:
cls_scores = scores[:, j][inds]
_, order = torch.sort(cls_scores, 0, True)
if args.class_agnostic:
cls_boxes = pred_boxes[inds, :]
else:
cls_boxes = pred_boxes[inds][:, j * 4 : (j + 1) * 4]
cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)
# cls_dets = torch.cat((cls_boxes, cls_scores), 1)
cls_dets = cls_dets[order]
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep.view(-1).long()]
# all_boxes[j][i] = cls_dets.cpu().numpy()
cls_dets_numpy = cls_dets.cpu().numpy()
for i in range(np.minimum(10, cls_dets_numpy.shape[0])):
bbox = tuple(
int(np.round(x)) for x in cls_dets_numpy[i, :4]
)
score = cls_dets_numpy[i, -1]
if score > thresh:
gt_boxes_target.append(list(bbox[0:4]) + [j])
gt_boxes_padding = torch.FloatTensor(cfg.MAX_NUM_GT_BOXES, 5).zero_()
if len(gt_boxes_target) != 0:
gt_boxes_numpy = torch.FloatTensor(gt_boxes_target)
num_boxes_cpu = torch.LongTensor(
[min(gt_boxes_numpy.size(0), cfg.MAX_NUM_GT_BOXES)]
)
gt_boxes_padding[:num_boxes_cpu, :] = gt_boxes_numpy[:num_boxes_cpu]
else:
num_boxes_cpu = torch.LongTensor([0])
# teacher_fasterRCNN.train()
# put source-like data into variable
im_data.data.resize_(data_t[0].size()).copy_(data_t[0])
im_info.data.resize_(data_t[1].size()).copy_(data_t[1])
gt_boxes_padding = torch.unsqueeze(gt_boxes_padding, 0)
gt_boxes.data.resize_(gt_boxes_padding.size()).copy_(gt_boxes_padding)
num_boxes.data.resize_(num_boxes_cpu.size()).copy_(num_boxes_cpu)
(
rois,
cls_prob,
bbox_pred,
rpn_loss_cls_s_fake,
rpn_loss_box_s_fake,
RCNN_loss_cls_s_fake,
RCNN_loss_bbox_s_fake,
rois_label_s_fake,
out_d_pixel,
out_d,
_,
_,
) = student_fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
loss += args.lam * (
rpn_loss_cls_s_fake.mean()
+ rpn_loss_box_s_fake.mean()
+ RCNN_loss_cls_s_fake.mean()
+ RCNN_loss_bbox_s_fake.mean()
)
if args.conf:
loss += conf_gamma * conf_loss
loss_temp += loss.item()
student_optimizer.zero_grad()
loss.backward()
student_optimizer.step()
teacher_fasterRCNN.zero_grad()
teacher_optimizer.step()
timer.end("iter")
estimate_time = timer.eta(
"iter", count_iter, args.max_epochs * iters_per_epoch
)
if step % args.disp_interval == 0:
end = time.time()
if step > 0:
loss_temp /= args.disp_interval
if args.mGPUs:
loss_rpn_cls = rpn_loss_cls.mean().item()
loss_rpn_box = rpn_loss_box.mean().item()
loss_rcnn_cls = RCNN_loss_cls.mean().item()
loss_rcnn_box = RCNN_loss_bbox.mean().item()
fg_cnt = torch.sum(rois_label.data.ne(0))
bg_cnt = rois_label.data.numel() - fg_cnt
if args.pl and epoch > pretrained_epoch:
loss_rpn_cls_s_fake = rpn_loss_cls_s_fake.mean().item()
loss_rpn_box_s_fake = rpn_loss_box_s_fake.mean().item()
loss_rcnn_cls_s_fake = RCNN_loss_cls_s_fake.mean().item()
loss_rcnn_box_s_fake = RCNN_loss_bbox_s_fake.mean().item()
fg_cnt_s_fake = torch.sum(rois_label_s_fake.data.ne(0))
bg_cnt_s_fake = rois_label_s_fake.data.numel() - fg_cnt_s_fake
if args.target_like:
loss_rpn_cls_t_fake = rpn_loss_cls_t_fake.mean().item()
loss_rpn_box_t_fake = rpn_loss_box_t_fake.mean().item()
loss_rcnn_cls_t_fake = RCNN_loss_cls_t_fake.mean().item()
loss_rcnn_box_t_fake = RCNN_loss_bbox_t_fake.mean().item()
fg_cnt_t_fake = torch.sum(rois_label_t_fake.data.ne(0))
bg_cnt_t_fake = rois_label_t_fake.data.numel() - fg_cnt_t_fake
# dloss_s_fake = dloss_s_fake.mean().item()
# dloss_t_fake = dloss_t_fake.mean().item()
# dloss_s_p_fake = dloss_s_p_fake.mean().item()
# dloss_t_p_fake = dloss_t_p_fake.mean().item()
else:
loss_rpn_cls = rpn_loss_cls.item()
loss_rpn_box = rpn_loss_box.item()
loss_rcnn_cls = RCNN_loss_cls.item()
loss_rcnn_box = RCNN_loss_bbox.item()
fg_cnt = torch.sum(rois_label.data.ne(0))
bg_cnt = rois_label.data.numel() - fg_cnt
if args.conf:
loss_conf = conf_loss.item()
if args.pl and epoch > pretrained_epoch:
loss_rpn_cls_s_fake = rpn_loss_cls_s_fake.item()
loss_rpn_box_s_fake = rpn_loss_box_s_fake.item()
loss_rcnn_cls_s_fake = RCNN_loss_cls_s_fake.item()
loss_rcnn_box_s_fake = RCNN_loss_bbox_s_fake.item()
fg_cnt_s_fake = torch.sum(rois_label_s_fake.data.ne(0))
bg_cnt_s_fake = rois_label_s_fake.data.numel() - fg_cnt
if args.target_like:
loss_rpn_cls_t_fake = rpn_loss_cls_t_fake.item()
loss_rpn_box_t_fake = rpn_loss_box_t_fake.item()
loss_rcnn_cls_t_fake = RCNN_loss_cls_t_fake.item()
loss_rcnn_box_t_fake = RCNN_loss_bbox_t_fake.item()
fg_cnt_t_fake = torch.sum(rois_label_t_fake.data.ne(0))
bg_cnt_t_fake = rois_label_t_fake.data.numel() - fg_cnt_t_fake
print(
"[session %d][epoch %2d][iter %4d/%4d] lr: %.2e, loss: %.4f, eta: %s"
% (
args.session,
epoch,
step,
iters_per_epoch,
lr,
loss_temp,
estimate_time,
)
)
print(
"\t\t\tfg/bg=(%d/%d), time cost: %f" % (fg_cnt, bg_cnt, end - start)
)
print(
"\t\t\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box %.4f"
% (loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box)
)
if args.pl and epoch > pretrained_epoch:
print("\t\t\tfg/bg=(%d/%d)" % (fg_cnt_s_fake, bg_cnt_s_fake))
print(
"\t\t\trpn_cls_s_fake: %.4f, rpn_box_s_fake: %.4f, rcnn_cls_s_fake: %.4f, rcnn_box_s_fake %.4f"
% (
loss_rpn_cls_s_fake,
loss_rpn_box_s_fake,
loss_rcnn_cls_s_fake,
loss_rcnn_box_s_fake,
)
)
if args.target_like:
print("\t\t\tfg/bg=(%d/%d)" % (fg_cnt_t_fake, bg_cnt_t_fake))
print(
"\t\t\trpn_cls_t_fake: %.4f, rpn_box_t_fake: %.4f, rcnn_cls_t_fake: %.4f, rcnn_box_t_fake %.4f"
% (
loss_rpn_cls_t_fake,
loss_rpn_box_t_fake,
loss_rcnn_cls_t_fake,
loss_rcnn_box_t_fake,
)
)
if args.conf is True:
print(f"\t\t\tconf loss: {loss_conf:.4}")
if args.use_tfboard:
info = {
"loss": loss_temp,
"loss_rpn_cls": loss_rpn_cls,
"loss_rpn_box": loss_rpn_box,
"loss_rcnn_cls": loss_rcnn_cls,
"loss_rcnn_box": loss_rcnn_box,
"loss_rpn_cls_s_fake": loss_rpn_cls_s_fake,
"loss_rpn_box_s_fake": loss_rpn_box_s_fake,
"loss_rcnn_cls_s_fake": loss_rcnn_cls_s_fake,
"loss_rcnn_box_s_fake": loss_rcnn_box_s_fake,
"loss_rpn_cls_t_fake": loss_rpn_cls_t_fake
if args.target_like is True
else 0,
"loss_rpn_box_t_fake": loss_rpn_box_t_fake
if args.target_like is True
else 0,
"loss_rcnn_cls_t_fake": loss_rcnn_cls_t_fake
if args.target_like is True
else 0,
"loss_rcnn_box_t_fake": loss_rcnn_box_t_fake
if args.target_like is True
else 0,
"loss_conf": loss_conf if args.conf is True else 0,
"conf_gamma": conf_gamma,
}
logger.add_scalars(
"logs_s_{}/losses".format(args.session),
info,
(epoch - 1) * iters_per_epoch + step,
)
loss_temp = 0
start = time.time()
student_save_name = os.path.join(
output_dir,
"conf_{}_conf_gamma_{}_source_like_{}_aug_{}_target_like_{}_pe_{}_pl_{}_"
"thresh_{}_lambda_{}_lam2_{}_student_target_{}_session_{}_epoch_{}_step_{}.pth".format(
args.conf,
args.conf_gamma,
args.source_like,
args.aug,
args.target_like,
args.pretrained_epoch,
args.pl,
args.threshold,
args.lam,
args.lam2,
args.dataset_t,
args.session,
epoch,
step,
),
)
save_checkpoint(
{
"session": args.session,
"epoch": epoch + 1,
"model": student_fasterRCNN.mumt_train.pyodule.state_dict()
if args.mGPUs
else student_fasterRCNN.state_dict(),
"optimizer": student_optimizer.state_dict(),
"pooling_mode": cfg.POOLING_MODE,
"class_agnostic": args.class_agnostic,
},
student_save_name,
)
print("save student model: {}".format(student_save_name))
teacher_save_name = os.path.join(
output_dir,
"conf_{}_conf_gamma_{}_source_like_{}_aug_{}_target_like_{}_pe_{}_pl_{}_"
"thresh_{}_lambda_{}_lam2_{}_teacher_target_{}_session_{}_epoch_{}_step_{}.pth".format(
args.conf,
args.conf_gamma,
args.source_like,
args.aug,
args.target_like,
args.pretrained_epoch,
args.pl,
args.threshold,
args.lam,
args.lam2,
args.dataset_t,
args.session,
epoch,
step,
),
)
save_checkpoint(
{
"session": args.session,
"epoch": epoch + 1,
"model": teacher_fasterRCNN.mumt_train.pyodule.state_dict()
if args.mGPUs
else teacher_fasterRCNN.state_dict(),
"pooling_mode": cfg.POOLING_MODE,
"class_agnostic": args.class_agnostic,
},
teacher_save_name,
)
print("save teacher model: {}".format(teacher_save_name))
epoch_end = time.time()
print("epoch cost time: {} min".format((epoch_end - epoch_start) / 60.0))
# cmd = (
# f"python test_net_global_local.py --dataset {args.dataset_t} --net {args.net}"
# f" --load_name {student_save_name}"
# )
# print("cmd: ", cmd)
# cmd = [i.strip() for i in cmd.split(" ") if len(i.strip()) > 0]
# try:
# proc = subprocess.Popen(cmd)
# proc.wait()
# except (KeyboardInterrupt, SystemExit):
# pass
# cmd = (
# f"python test_net_global_local.py --dataset {args.dataset_t} --net {args.net}"
# f" --load_name {teacher_save_name}"
# )
# print("cmd: ", cmd)
# cmd = [i.strip() for i in cmd.split(" ") if len(i.strip()) > 0]
# try:
# proc = subprocess.Popen(cmd)
# proc.wait()
# except (KeyboardInterrupt, SystemExit):
# pass
if args.use_tfboard:
logger.close()
|
from operator import add, mul as multiply, div as divide,\
mod, pow as exponent, sub as subt
# add = lambda a, b: a + b
# multiply = lambda a, b: a * b
# divide = lambda a, b: a / b
# mod = lambda a, b: a % b
# exponent = lambda a, b: a ** b
# subt = lambda a, b: a - b
|
from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import json
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/documents', "https://www.googleapis.com/auth/drive"]
# The ID of a sample document.
DOCUMENT_ID = '1qOQmwivV1PaSI7wqx3zH_sRiUx4oHGuLO2gugoAvIfQ'
def main():
"""Shows basic usage of the Docs API.
Prints the title of a sample document.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('docs', 'v1', credentials=creds)
vols = [
# 1,
# 2
# 3
# 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
# 22, 23, 24, 25, 26, 27, 28, 29, 30,
# 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
# 41,
42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54
]
results = {}
for vol in vols:
print(vol)
title = str(vol).zfill(2)
body = {
'title': title
}
doc = service.documents() \
.create(body=body).execute()
print('Created document with title: {0}'.format(
doc.get('title')))
results[str(vol).zfill(2)] = doc.get("documentId")
print(results)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 4 16:01:17 2019
@author: Administrator
"""
import numpy as np
import matplotlib.pyplot as plt
my_bid = 215
my_bid_cnt = 3
op_bid_start = 180
op_bid_end = 220
op_bid_cnt = 1
bid_step = 0.1
my_offer = 215
def calc_base(mb,mbc,opbs,opbe,opbc,bid_step):
x = np.arange(opbs,opbe,bid_step)
y = (x*opbc +mb*mbc)/(mbc+opbc)
y = y*0.6 + 88
return x,y
def calc_score(my_offer,base):
temp = my_offer - base
for i in range(len(temp)):
if temp[i]<0:
temp[i] *= -0.8
return temp
#def get_base(bids):
# return np.dot(bid.T[0],bid.T[1])/np.sum(bid.T[1])
x,y = calc_base(my_bid,my_bid_cnt,op_bid_start,op_bid_end,op_bid_cnt,bid_step)
z = calc_score(my_offer,y)
fig = plt.figure()
ax = plt.subplot(211)
ax.plot(x,z,label='diff')
ax.legend()
ax.set_title("d")
bx = plt.subplot(212)
bx.plot(x,y,label='base')
bx.legend()
#print(calc_base(my_bid,my_bid_cnt,op_bid_start,op_bid_end,op_bid_cnt,bid_step))
|
from flask import Flask, request, Response
from rdflib import Graph, URIRef
app = Flask(__name__)
import pickle
import nif_system
import json
#fullText = URIRef("http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#isString")
#entityMention = URIRef("http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#anchorOf")
mimetype='application/x-turtle'
def normalizeWeights(fw):
total=sum(fw.values())
for k in fw:
fw[k]/=total
return fw
month='200712'
timePickle=pickle.load(open(month + '_agg.p', 'rb'))
@app.route("/", methods = ['POST'])
def run():
global num
num+=1
g=Graph()
inputRDF=request.stream.read()
#w.write(str(inputRDF) + '\n')
g.parse(data=inputRDF, format="n3")
#### DEFAULTS ####
memory=1 # By default, cross-document knowledge is ON
iterations=2 # By default, rereading is ON (2 iterations)
factorWeights={'wc':0.525,'wss': 0.325, 'wa': 0.05, 'wr':0.05, 'wt': 0.05}
limits={'l1':0.375, 'l2': 0.54}
lcoref=True
order=True
tp=True
N=10
#### end DEFAULTS ####
#### PARAMETERS ####
args=request.args
if args.get('crossdoc'): # Check if the cross-document bool has been supplied
memory=int(args.get('crossdoc'))
if args.get('iter'): # Check if iterations number has been supplied
iterations=int(args.get('iter'))
if args.get('lcoref') and int(args.get('lcoref'))==0:
lcoref=False
factorWeights['wr']=0.0
if args.get('order') and int(args.get('order'))==0:
order=False
if args.get('tp') and int(args.get('tp'))==0:
tp=False
factorWeights['wt']=0.0
if args.get('month'):
month=args.get('month')
for k in factorWeights.keys(): # Check if any weight is explicitly modified
if args.get(k):
factorWeights[k]=float(args.get(k))
factorWeights=normalizeWeights(factorWeights)
for limit in ['l1', 'l2']:
if args.get(limit):
limits[limit]=args.get(limit)
if args.get('N'):
N=args.get('N')
#### end PARAMETERS ####
print("Request %d came! %d iterations, Memory: %r, Local coreference: %r, Order: %r, Time popularity: %r, scoring weights: %s, limits: %s, N: %d" % (num, iterations, memory>0, lcoref, order, tp, json.dumps(factorWeights), json.dumps(limits), N))
#print("Normalized weights: " + factorWeights)
global timePickle
if not tp:
timePickle={}
global lastN
if memory==0:
lastN=[]
g,lastN=nif_system.run(g, factorWeights, timePickle, iterations, lcoref, order, lastN, limits, N)
if args.get('debug'):
g.serialize(destination='res' + str(num) + '.rdf', format='turtle')
#w=open('res' + str(num) + '.rdf', 'w')
#w.write(outputRDF)
outputRDF=g.serialize(format='turtle')
return Response(outputRDF, mimetype=mimetype)
if __name__ == "__main__":
global num
num=-1
global lastN
lastN=[]
app.run()
|
class FunctionalError(Exception):
"""Base class for all 'normal' errors of the API"""
pass
# ---
class ExistenceError(FunctionalError):
pass
class KeyDoesNotExist(ExistenceError):
pass
class KeyAlreadyExists(ExistenceError):
pass
class KeystoreDoesNotExist(ExistenceError):
pass
class KeystoreAlreadyExists(ExistenceError):
pass
class KeystoreMetadataDoesNotExist(KeystoreDoesNotExist):
pass
# No KeystoreMetadataAlreadyExists needed for now
# ---
class AuthenticationError(FunctionalError):
pass # E.g. the "secret" provided with an API request doesn't match that stored
class AuthorizationError(FunctionalError):
pass # E.g. no authorization has been pre-obtained before a trustee.decrypt_with_private_key()
class OperationNotSupported(FunctionalError):
pass # E.g. listing keypairs from a big SQL database
# ---
class CryptographyError(FunctionalError):
pass
class EncryptionError(CryptographyError):
pass
class DecryptionError(CryptographyError):
pass
class DecryptionIntegrityError(DecryptionError):
pass # E.g. MAC tags check failed
class SignatureCreationError(CryptographyError):
pass
class SignatureVerificationError(CryptographyError):
pass
class KeyLoadingError(CryptographyError):
pass # Used e.g. when decrypting a private key with a passphrase fails
# ---
class ValidationError(FunctionalError):
pass # Base for all errors related to corrupted data and invalid config tree
class SchemaValidationError(ValidationError):
pass # When data doesn't respect json format, or an additional python-schema, or some additional security constraints
|
from django.conf import settings
MERCHANT_ID = getattr(settings, 'DJANGO_W1_MERCHANT_ID', '')
SIGN_METHOD = getattr(settings, 'DJANGO_W1_SIGN_METHOD', None)
SECRET_KEY = getattr(settings, 'DJANGO_W1_SECRET_KEY', '')
SUCCESS_URL = getattr(settings, 'DJANGO_W1_SUCCESS_URL', '')
FAIL_URL = getattr(settings, 'DJANGO_W1_FAIL_URL', '')
CURRENCY_DEFAULT = getattr(settings, 'DJANGO_W1_CURRENCY_DEFAULT', '643')
FORM_ACTION_URL = getattr(
settings, 'DJANGO_W1_FORM_ACTION_URL',
'https://wl.walletone.com/checkout/checkout/Index')
|
"""
CCT 建模优化代码
二维曲线段
作者:赵润晓
日期:2021年4月27日
"""
import multiprocessing # since v0.1.1 多线程计算
import time # since v0.1.1 统计计算时长
from typing import Callable, Dict, Generic, Iterable, List, NoReturn, Optional, Tuple, TypeVar, Union
import matplotlib.pyplot as plt
import math
import random # since v0.1.1 随机数
import sys
import os # since v0.1.1 查看CPU核心数
import numpy
from scipy.integrate import solve_ivp # since v0.1.1 ODE45
import warnings # since v0.1.1 提醒方法过时
from packages.point import *
from packages.constants import *
from packages.base_utils import BaseUtils
from packages.local_coordinate_system import LocalCoordinateSystem
class Line2:
"""
二维 xy 平面的一条有方向的连续曲线段,可以是直线、圆弧
本类包含 3 个抽象方法,需要实现:
get_length 获得曲线长度
point_at 从曲线起点出发,s 位置处的点
direct_at 从曲线起点出发,s 位置处曲线方向
说明:这个类主要用于构建 “理想轨道”,理想轨道的用处很多:
1. 获取理想轨道上的理想粒子;
2. 研究理想轨道上的磁场分布
"""
def get_length(self) -> float:
"""
获得曲线的长度
Returns 曲线的长度
-------
"""
raise NotImplementedError
def point_at(self, s: float) -> P2:
"""
获得曲线 s 位置处的点 (x,y)
即从曲线起点出发,运动 s 长度后的位置
Parameters
----------
s 长度量,曲线上 s 位置
Returns 曲线 s 位置处的点 (x,y)
-------
"""
raise NotImplementedError
def direct_at(self, s: float) -> P2:
"""
获得 s 位置处,曲线的方向
Parameters
----------
s 长度量,曲线上 s 位置
Returns s 位置处,曲线的方向
refactor v0.1.3 添加粗略实现
-------
"""
delta = 1e-7
p1 = self.point_at(s)
p2 = self.point_at(s+delta)
return (p2-p1).normalize()
def right_hand_side_point(self, s: float, d: float) -> P2:
"""
位于 s 处的点,它右手边 d 处的点
1 5 10 15
-----------------@------>
|2
|4 *
如上图,一条直线,s=15,d=4 ,即点 @ 右手边 4 距离处的点 *
说明:这个方法,主要用于四极场、六极场的计算,因为需要涉及轨道横向位置的磁场
Parameters
----------
s 长度量,曲线上 s 位置
d 长度量,d 距离远处
Returns 位于 s 处的点,它右手边 d 处的点
-------
"""
ps = self.point_at(s)
# 方向
ds = self.direct_at(s)
return ps + ds.copy().rotate(-math.pi / 2).change_length(d)
def left_hand_side_point(self, s: float, d: float) -> P2:
"""
位于 s 处的点,它左手边 d 处的点
说明见 right_hand_side_point 方法
Parameters
----------
s 长度量,曲线上 s 位置
d 长度量,d 距离远处
Returns 位于 s 处的点,它左手边 d 处的点
-------
"""
return self.right_hand_side_point(s, -d)
# ------------------------------端点性质-------------------- #
def point_at_start(self) -> P2:
"""
获得曲线 line 起点位置
"""
return self.point_at(0.0)
def point_at_end(self) -> P2:
"""
获得曲线 line 终点位置
"""
return self.point_at(self.get_length())
def point_at_middle(self) -> P2:
"""
获得曲线 line 中点位置
"""
return self.point_at(self.get_length()/2)
def direct_at_start(self) -> P2:
"""
获得曲线 line 起点方向
"""
return self.direct_at(0.0)
def direct_at_end(self) -> P2:
"""
获得曲线 line 终点方向
"""
return self.direct_at(self.get_length())
def direct_at_middle(self) -> P2:
"""
获得曲线 line 中点方向
"""
return self.direct_at(self.get_length()/2)
# ------------------------------平移-------------------- #
def __add__(self, v2: P2) -> "Line2":
"""
Line2 的平移, v2 表示移动的方向和距离
Parameters
----------
v2 二维向量
Returns 平移后的 Line2
-------
"""
class MovedLine2(Line2):
def __init__(self, hold):
self.hold = hold
def get_length(self) -> float:
return self.hold.get_length()
def point_at(self, s: float) -> P2:
return self.hold.point_at(s) + v2
def direct_at(self, s: float) -> P2:
return self.hold.direct_at(s)
return MovedLine2(self)
# ------------------------------ 离散 ------------------------#
def disperse2d(self, step: float = 1.0 * MM) -> List[P2]:
"""
二维离散轨迹点
Parameters
----------
step 步长
Returns 二维离散轨迹点
-------
"""
number: int = int(math.ceil(self.get_length() / step)
) + 1 # 这里要加 1,调整于 2021年4月28日
return [
self.point_at(s) for s in BaseUtils.linspace(0, self.get_length(), number)
]
def disperse2d_with_distance(
self, step: float = 1.0 * MM
) -> List[ValueWithDistance[P2]]:
"""
同方法 disperse2d
每个离散点带有距离,返回值是 ValueWithDistance[P2] 的数组
"""
number: int = int(math.ceil(self.get_length() / step)
) + 1 # 这里要加 1,调整于 2021年4月28日
return [
ValueWithDistance(self.point_at(s), s)
for s in BaseUtils.linspace(0, self.get_length(), number)
]
def disperse3d(
self,
p2_t0_p3: Callable[[P2], P3] = lambda p2: P3(p2.x, p2.y, 0.0),
step: float = 1.0 * MM,
) -> List[P3]:
"""
三维离散轨迹点,其中第三维 z == 0.0
Parameters
----------
step 步长
p2_t0_p3:二维点 P2 到三维点 P3 转换函数,默认 z=0
Returns 三维离散轨迹点
-------
"""
return [p.to_p3(p2_t0_p3) for p in self.disperse2d(step=step)]
def disperse3d_with_distance(
self,
p2_t0_p3: Callable[[P2], P3] = lambda p2: P3(p2.x, p2.y, 0.0),
step: float = 1.0 * MM,
) -> List[ValueWithDistance[P3]]:
"""
同 disperse3d
每个离散点带有距离,返回值是 ValueWithDistance[P3] 的数组
"""
return [
ValueWithDistance(vp2.value.to_p3(p2_t0_p3), vp2.distance)
for vp2 in self.disperse2d_with_distance(step=step)
]
def __str__(self) -> str:
return f"Line2[起点{self.point_at_start()},长度{self.get_length()}]"
class StraightLine2(Line2):
"""
二维有向直线段,包含三个参数:长度、方向、起点
"""
def __init__(self, length: float, direct: P2, start_point: P2):
self.length = float(length)
self.direct = direct.normalize()
self.start_point = start_point
def get_length(self) -> float:
"""
二维有向直线段的长度
"""
return self.length
def point_at(self, s: float) -> P2:
"""
二维有向直线段 s 位置点
"""
return self.start_point + self.direct.copy().change_length(s)
def direct_at(self, s: float) -> P2:
"""
二维有向直线段 s 位置方向
"""
return self.direct
def __str__(self) -> str:
return f"直线段[起点{self.start_point},方向{self.direct},长度{self.length}]"
def __repr__(self) -> str:
return self.__str__()
def position_of(self, p: P2) -> int:
"""
求点 p 相对于直线段的方位
返回值:
1 在右侧
-1 在左侧
0 在直线段所在直线上
因为直线段 self 是有方向的,所以可以确定左侧还是右侧
这个函数用于确定 trajectory 当前是左偏还是右偏 / 逆时针偏转还是顺时针偏转
#
--------------&---->
$
如上图,对于点 # ,在直线左侧,返回 -1
对于点 & 在直线上,返回 0
对于点 $,在直线右侧,返回 1
"""
p0 = self.start_point # 直线段起点
d = self.direct # 直线方向
p0_t0_p: P2 = p - p0 # 点 p0 到 p
k: float = d * p0_t0_p # 投影
project: P2 = k * d # 投影点
vertical_line: P2 = p0_t0_p - project # 点 p 的垂线方向
# 垂线长度 0,说明点 p 在直线上
if vertical_line == P2.zeros():
return 0
# 归一化
vertical_line = vertical_line.normalize()
right_hand: P2 = d.rotate(
BaseUtils.angle_to_radian(-90)).normalize() # 右手侧
if vertical_line == right_hand:
return 1
else:
return -1
def straight_line_equation(self) -> Tuple[float, float, float]:
"""
返回直线的一般式方程 A B C
Ax + By + C = 0
注意结果不唯一,不能用于比较
具体计算方法如下 from https://www.zybang.com/question/7699174d2637a60b3db85a4bc2e82c95.html
当x1=x2时,直线方程为x-x1=0
当y1=y2时,直线方程为y-y1=0
当x1≠x2,y1≠y2时,
直线的斜率k=(y2-y1)/(x2-x1)
故直线方程为y-y1=(y2-y1)/(x2-x1)×(x-x1)
即x2y-x1y-x2y1+x1y1=(y2-y1)x-x1(y2-y1)
即为(y2-y1)x-(x2-x1)y-x1(y2-y1)+(x2-x1)y1=0
即为(y2-y1)x-(x2-x1)y-x1y2+x2y1=0
A = Y2 - Y1
B = X1 - X2
C = X2*Y1 - X1*Y2
since v0.1.3
"""
if BaseUtils.equal(self.direct.length(), 0, err=1e-10):
raise ValueError(
f"straight_line_equation 直线方向矢量 direct 长度为 0,无法计算一般式方程")
x1 = self.start_point.x
y1 = self.start_point.y
x2 = (self.direct+self.start_point).x
y2 = (self.direct+self.start_point).y
if BaseUtils.equal(x1, x2, err=1e-10):
return 1.0, 0.0, -x1
if BaseUtils.equal(y1, y2, err=1e-10):
return 0.0, 1.0, -y1
return (y2-y1), (x1-x2), (x2*y1-x1*y2)
@staticmethod
def intersecting_point(pa: P2, va: P2, pb: P2, vb: P2) -> Tuple[P2, float, float]:
"""
求两条直线 a 和 b 的交点
pa 直线 a 上的一点
va 直线 a 方向
pb 直线 b 上的一点
vb 直线 b 方向
返回值为交点 cp,交点在直线 a 和 b 上的坐标 ka kb
即 cp = pa + va * ka = pb + vb * kb
since v0.1.3
"""
# 方向矢量不能为 0
if BaseUtils.equal(va.length(), 0.0, err=1e-10):
raise ValueError(
f"intersecting_point:方向矢量 va 长度为零。pa={pa},pb={pb},va={va},vb={vb}")
if BaseUtils.equal(vb.length(), 0.0, err=1e-10):
raise ValueError(
f"intersecting_point:方向矢量 vb 长度为零。pa={pa},pb={pb},va={va},vb={vb}")
# 判断是否平行
if va.normalize() == vb.normalize() or (va.normalize()+vb.normalize()) == P2.origin():
print(
f"intersecting_point:两条直线平行,计算结果可能无意义。pa={pa},pb={pb},va={va},vb={vb}")
# pa 和 pb 就是交点。不短路,也走流程
# if pa==pb:
# return pa,0.0,0.0
# 计算交点
# 为了防止除数为 0 ,只能将直线转为一般式
line_a = StraightLine2(length=1.0, direct=va, start_point=pa)
line_b = StraightLine2(length=1.0, direct=vb, start_point=pb)
A1, B1, C1 = line_a.straight_line_equation()
A2, B2, C2 = line_b.straight_line_equation()
cpy = (A1*C2-A2*C1)/(A2*B1-A1*B2)
cpx = -(B1*cpy+C1)/A1 if A1 != 0 else -(B2*cpy+C2)/A2
cp = P2(cpx, cpy)
ka = (cp.x-pa.x)/va.x if va.x != 0 else (cp.y-pa.y)/va.y
kb = (cp.x-pb.x)/vb.x if vb.x != 0 else (cp.y-pb.y)/vb.y
return cp, ka, kb
@staticmethod
def is_on_right(view_point: P2, view_direct: P2, viewed_point: P2) -> int:
"""
查看点 viewed_point 是不是在右边
观察点为 view_point 观测方向为 view_direct
返回值
1 在右侧
0 在正前方或者正后方
-1 在左侧
"""
right_direct = view_direct.copy().rotate(BaseUtils.angle_to_radian(-90))
relative_position = viewed_point-view_point
k = right_direct*relative_position
if k > 0:
return 1
elif k < 0:
return -1
else:
return 0
@staticmethod
def calculate_k_b(p1: P2, p2: P2) -> Tuple[float]:
"""
求过两点的直线方程
y = kx + d
k 和 d 的值
"""
k = (p2.y-p1.y)/(p2.x-p1.x)
b = p2.y - k * p2.x
return (k, b)
@staticmethod
def create_by(start_point:P2,end_point:P2)->'StraightLine2':
"""
两点创造直线
"""
return StraightLine2(
length=(end_point-start_point).length(),
direct=(end_point-start_point),
start_point=start_point
)
class ArcLine2(Line2):
"""
二维有向圆弧段
借助极坐标的思想来描述圆弧
基础属性: 圆弧的半径 radius、圆弧的圆心 center
起点描述:极坐标 phi 值
弧长:len = radius * totalPhi
起点start_point、圆心center、半径radius、旋转方向clockwise、角度totalPhi 五个自由度
起点弧度值 starting_phi、起点处方向、半径radius、旋转方向clockwise、角度totalPhi 五个自由度
如图: *1 表示起点方向,@ 是圆心,上箭头 ↑ 是起点处方向,旋转方向是顺时针,*5 是终点,因此角度大约是 80 deg
*5
*4
*3
*2
*1 ↑ @
"""
def __init__(
self,
starting_phi: float,
center: P2,
radius: float,
total_phi: float,
clockwise: bool,
):
self.starting_phi = starting_phi
self.center = center
self.radius = radius
self.total_phi = total_phi
self.clockwise = clockwise
self.length = radius * total_phi
def get_length(self) -> float:
"""
二维有向圆弧段的长度
"""
return self.length
def point_at(self, s: float) -> P2:
"""
二维有向圆弧段的 s 位置点
"""
phi = s / self.radius
current_phi = (
self.starting_phi - phi if self.clockwise else self.starting_phi + phi
)
uc = ArcLine2.unit_circle(current_phi)
return uc.change_length(self.radius) + self.center
def direct_at(self, s: float) -> P2:
"""
二维有向圆弧段的 s 位置方向
"""
phi = s / self.radius
current_phi = (
self.starting_phi - phi if self.clockwise else self.starting_phi + phi
)
uc = ArcLine2.unit_circle(current_phi)
return uc.rotate(-math.pi / 2 if self.clockwise else math.pi / 2)
@staticmethod
def create(
start_point: P2,
start_direct: P2,
radius: float,
clockwise: bool,
total_deg: float,
) -> "ArcLine2":
"""
利用起点、起点方向、半径、偏转角度创建二维有向圆弧段
"""
center: P2 = start_point + start_direct.copy().rotate(
-math.pi / 2 if clockwise else math.pi / 2
).change_length(radius)
starting_phi = (start_point - center).angle_to_x_axis()
total_phi = BaseUtils.angle_to_radian(total_deg)
return ArcLine2(starting_phi, center, radius, total_phi, clockwise)
@staticmethod
def unit_circle(phi: float) -> P2:
"""
单位圆(极坐标)
返回:极坐标(r=1.0,phi=phi)的点的直角坐标(x,y)
Parameters
----------
phi 极坐标phi
Returns 单位圆上的一点
-------
"""
x = math.cos(phi)
y = math.sin(phi)
return P2(x, y)
def __str__(self) -> str:
clock_wise_str = "顺时针" if self.clockwise else "逆时针"
return (
f"弧线段[起点{self.point_at_start()},"
+ f"方向{self.direct_at_start()},{clock_wise_str},半径{self.radius},角度{self.total_phi}]"
)
def __repr__(self) -> str:
return self.__str__()
@staticmethod
def as_arc_line2(anything) -> "ArcLine2":
"""
仿佛是类型转换
实际啥也没做
但是 IDE 就能根据返回值做代码提示了
since v0.1.3
"""
return anything
|
# -*- coding: utf-8 -*-
"""dfVFS helpers."""
from dfvfs.helpers import command_line as dfvfs_command_line
from dfvfs.helpers import volume_scanner as dfvfs_volume_scanner
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import resolver as dfvfs_resolver
from dtformats import file_system
class DFVFSFileSystemHelper(
file_system.FileSystemHelper, dfvfs_volume_scanner.VolumeScanner):
"""dfVFS file system helper."""
def __init__(self, mediator):
"""dfVFS file system helper.
Args:
mediator (dfvfs.VolumeScannerMediator): mediator.
"""
super(DFVFSFileSystemHelper, self).__init__()
self._file_system = None
self._parent_path_spec = None
self._mediator = mediator
def BasenamePath(self, path):
"""Determines the basename of the path.
Args:
path (str): path.
Returns:
str: basename of the path.
"""
return self._file_system.BasenamePath(path)
def CheckFileExistsByPath(self, path):
"""Checks if a specific file exists.
Args:
path (str): path of the file.
Returns:
bool: True if the file exists, False otherwise.
"""
path_spec = path_spec_factory.Factory.NewPathSpec(
self._file_system.type_indicator, location=path,
parent=self._parent_path_spec)
return self._file_system.FileEntryExistsByPathSpec(path_spec)
def DirnamePath(self, path):
"""Determines the directory name of the path.
Args:
path (str): path.
Returns:
str: directory name of the path or None.
"""
return self._file_system.DirnamePath(path)
def GetFileSizeByPath(self, path):
"""Retrieves the size of a specific file.
Args:
path (str): path of the file.
Returns:
int: size of the file in bytes or None if not available.
"""
path_spec = path_spec_factory.Factory.NewPathSpec(
self._file_system.type_indicator, location=path,
parent=self._parent_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
if not file_entry:
return None
return file_entry.size
def JoinPath(self, path_segments):
"""Joins the path segments into a path.
Args:
path_segments (list[str]): path segments.
Returns:
str: joined path segments prefixed with the path separator.
"""
return self._file_system.JoinPath(path_segments)
def ListDirectory(self, path):
"""Lists the entries in a directory.
Args:
path (str): path of the directory.
Yields:
str: name of a directory entry.
"""
path_spec = path_spec_factory.Factory.NewPathSpec(
self._file_system.type_indicator, location=path,
parent=self._parent_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
if file_entry:
for sub_file_entry in file_entry.sub_file_entries:
yield sub_file_entry.name
def OpenFileByPath(self, path):
"""Opens a specific file.
Args:
path (str): path of the file.
Returns:
file: file-like object of the file.
"""
path_spec = path_spec_factory.Factory.NewPathSpec(
self._file_system.type_indicator, location=path,
parent=self._parent_path_spec)
return self._file_system.GetFileObjectByPathSpec(path_spec)
def OpenFileSystem(self, path_spec):
"""Opens a file system.
Args:
path_spec (dfvfs.PathSpec): file system path specification.
"""
self._file_system = dfvfs_resolver.Resolver.OpenFileSystem(path_spec)
self._parent_path_spec = path_spec.parent
def SplitPath(self, path):
"""Splits the path into path segments.
Args:
path (str): path.
Returns:
list[str]: path segments without the root path segment, which is
an empty string.
"""
return self._file_system.SplitPath(path)
def SetDFVFSBackEnd(back_end):
"""Sets the dfVFS back-end.
Args:
back_end (str): dfVFS back-end.
"""
if back_end == 'APM':
dfvfs_definitions.PREFERRED_APM_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_APM)
elif back_end == 'EXT':
dfvfs_definitions.PREFERRED_EXT_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_EXT)
elif back_end == 'FAT':
dfvfs_definitions.PREFERRED_FAT_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_FAT)
elif back_end == 'GPT':
dfvfs_definitions.PREFERRED_GPT_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_GPT)
elif back_end == 'HFS':
dfvfs_definitions.PREFERRED_HFS_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_HFS)
elif back_end == 'NTFS':
dfvfs_definitions.PREFERRED_NTFS_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_NTFS)
elif back_end == 'TSK':
dfvfs_definitions.PREFERRED_APM_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_TSK)
dfvfs_definitions.PREFERRED_EXT_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_TSK)
dfvfs_definitions.PREFERRED_FAT_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_TSK)
dfvfs_definitions.PREFERRED_GPT_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION)
dfvfs_definitions.PREFERRED_HFS_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_TSK)
dfvfs_definitions.PREFERRED_NTFS_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_TSK)
def AddDFVFSCLIArguments(argument_parser):
"""Adds dfVFS command line arguments.
Args:
argument_parser (argparse.ArgumentParser): argument parser.
"""
argument_parser.add_argument(
'--back_end', '--back-end', dest='back_end', action='store',
metavar='NTFS', default=None, help='preferred dfVFS back-end.')
argument_parser.add_argument(
'--image', dest='image', action='store', type=str, default=None,
help='path of the storage media image.')
argument_parser.add_argument(
'--partitions', '--partition', dest='partitions', action='store',
type=str, default=None, help=(
'Define partitions to be processed. A range of partitions can be '
'defined as: "3..5". Multiple partitions can be defined as: "1,3,5" '
'(a list of comma separated values). Ranges and lists can also be '
'combined as: "1,3..5". The first partition is 1. All partitions '
'can be specified with: "all".'))
argument_parser.add_argument(
'--snapshots', '--snapshot', dest='snapshots', action='store', type=str,
default=None, help=(
'Define snapshots to be processed. A range of snapshots can be '
'defined as: "3..5". Multiple snapshots can be defined as: "1,3,5" '
'(a list of comma separated values). Ranges and lists can also be '
'combined as: "1,3..5". The first snapshot is 1. All snapshots can '
'be specified with: "all".'))
argument_parser.add_argument(
'--volumes', '--volume', dest='volumes', action='store', type=str,
default=None, help=(
'Define volumes to be processed. A range of volumes can be defined '
'as: "3..5". Multiple volumes can be defined as: "1,3,5" (a list '
'of comma separated values). Ranges and lists can also be combined '
'as: "1,3..5". The first volume is 1. All volumes can be specified '
'with: "all".'))
# TODO: add image path
def ParseDFVFSCLIArguments(options):
"""Parses dfVFS command line arguments.
Args:
options (argparse.Namespace): command line arguments.
Returns:
DFVFSFileSystemHelper: dfVFS file system helper or None if no file system
could be found.
"""
SetDFVFSBackEnd(options.back_end)
mediator = dfvfs_command_line.CLIVolumeScannerMediator()
volume_scanner_options = dfvfs_volume_scanner.VolumeScannerOptions()
volume_scanner_options.partitions = mediator.ParseVolumeIdentifiersString(
options.partitions)
if options.snapshots == 'none':
volume_scanner_options.snapshots = ['none']
else:
volume_scanner_options.snapshots = mediator.ParseVolumeIdentifiersString(
options.snapshots)
volume_scanner_options.volumes = mediator.ParseVolumeIdentifiersString(
options.volumes)
file_system_helper = DFVFSFileSystemHelper(mediator)
base_path_specs = file_system_helper.GetBasePathSpecs(
options.image, options=volume_scanner_options)
if len(base_path_specs) != 1:
return None
file_system_helper.OpenFileSystem(base_path_specs[0])
return file_system_helper
|
def int_list(list):
return [i if isinstance(i, int) else 0 for i in list]
print(int_list(["a",1,"b",2,"c",3]))
|
from math import pi
import numpy as np
def Critical_Stress(height, width, t, sigma_y = 240*10**6, E = 69*10**9, v = 0.33, n = 0.6, alpha = 0.8):
def sigma_cc_over_sigma_y(b, C = 4, t = t, sigma_y = sigma_y, E = E, v = v, n = n, alpha = alpha):
sigma_cc_over_sigma_y = alpha*((C*(pi**2)*E*t**2)/(sigma_y*12*(1-v**2)*b**2))**(1-n)
return sigma_cc_over_sigma_y
area = height*width-(height-2*t)*(width-2*t)
area_no_corners = area-(t**2)*4
side_lengths = np.array([height-2*t, width-2*t])
side_sigma_cc_over_sigma_y = sigma_cc_over_sigma_y(side_lengths)
for i in range(len(side_sigma_cc_over_sigma_y)):
if side_sigma_cc_over_sigma_y>=1:
side_sigma_cc_over_sigma_y[i]=1
side_sigma_cc = side_sigma_cc_over_sigma_y*sigma_y
sigma_cc = (2*t*side_lengths[0]*side_sigma_cc[0]+2*t*side_lengths[1]*side_sigma_cc[1])/area_no_corners
return sigma_cc
def Euler_Stress(height, width, t, L, E = 69*10**9, C = 0.25):
area = height*width-(height-2*t)*(width-2*t)
I = (height**3*t)/6+(width**3*t)/6+(width-2*t)*t*((height/2)-t/2)**2
P = C*(pi**2*E*I)/L
sigma = P/area
return sigma
|
def odd_one(arr):
for i,x in enumerate(arr):
if x%2!=0:
return i
return -1
'''
Create a method that takes an array/list as an input,
and outputs the index at which the sole odd number is located.
This method should work with arrays with negative numbers. If there are
no odd numbers in the array, then the method should output -1.
Examples:
odd_one([2,4,6,7,10]) # => 3
odd_one([2,16,98,10,13,78]) # => 4
odd_one([4,-8,98,-12,-7,90,100]) # => 4
odd_one([2,4,6,8]) # => -1
'''
|
ft = open("/Volumes/PUBLIC/Everyone/corpus/rakuten-2016-/matome/201002-04.tsv","w")
count = 0
for i in rang(2,5):
f = open("/Volumes/PUBLIC/Everyone/corpus/rakuten-2016-/review/ichiba04_review20100"+str(i)+"_20140221.tsv")
for line in f:
ft.write(line)
ft.close()
|
import logging
from model.sileg.designation.designation import Designation
from model.sileg.place.place import Place
from model.sileg.position.position import Position
from model.users.users import User
class SilegModel:
@classmethod
def getEconoPageDataUser(cls, con, userId):
designationIds = Designation.findByUserId(con, [userId])
designations = Designation.findById(con, designationIds)
designationsActive = [d for d in designations if d.out is None and d.description == 'original' and Place.findById(con, [d.placeId])[0].type == 'Catedra' ]
data = {}
for designation in designationsActive:
position = Position.findById(con, [designation.positionId])[0]
place = Place.findById(con, [designation.placeId])[0]
designation.place = place
if position.description not in data:
data[position.description] = {"position":position, "designations":[]}
data[position.description]["designations"].append(designation)
return data
@classmethod
def getEconoPageDataPlace(cls, con, placeId):
designationIds = Designation.findByPlaceId(con, [placeId])
designations = Designation.findById(con, designationIds)
designationsActive = [d for d in designations if d.out is None and d.description == 'original' and Place.findById(con, [d.placeId])[0].type == 'Catedra' ]
data = {}
for designation in designationsActive:
position = Position.findById(con, [designation.positionId])[0]
user = User.findById(con, [designation.userId])[0]
designation.user = user
if position.description not in data:
data[position.description] = {"position":position, "designations":[]}
data[position.description]["designations"].append(designation)
return data
@classmethod
def getUsers(cls, con):
designationIds = Designation.findAll(con)
designations = Designation.findById(con, designationIds)
designationsActive = [d for d in designations if d.out is None and d.description == 'original' and Place.findById(con, [d.placeId])[0].type == 'Catedra' ]
userIds = [d.userId for d in designationsActive]
return User.findById(con, userIds)
@classmethod
def getCathedras(cls, con):
designationIds = Designation.findAll(con)
designations = Designation.findById(con, designationIds)
designationsActive = [d for d in designations if d.out is None and d.description == 'original' and Place.findById(con, [d.placeId])[0].type == 'Catedra' ]
placesIds = [d.placeId for d in designationsActive]
return Place.findById(con, placesIds)
@classmethod
def findPlacesByIds(cls, con, ids):
return Place.findById(con, ids)
@classmethod
def findPositionsByIds(cls, con, ids):
return Position.findById(con, ids)
@classmethod
def findDesignationsByIds(cls, con, ids):
designations = Designation.findById(con, ids)
for i in range(len(designations)):
position = Position.findById(con, [designations[i].positionId])[0]
designations[i].position = position
user = User.findById(con, [designations[i].userId])[0]
designations[i].user = user
place = Place.findById(con, [designations[i].placeId])[0]
designations[i].place = place
return designations
@classmethod
def findUsersByIds(cls, con, ids):
return User.findById(con, ids)
@classmethod
def findPositionsAll(cls, con):
positionIds = Position.findAll(con)
positions = Position.findById(con, positionIds)
return positions
@classmethod
def findPlacesAll(cls, con):
placesIds = Place.findAll(con)
places = Place.findById(con, placesIds)
return places
@classmethod
def findDesignationsBySearch(cls, con, search):
if search is None:
return
designations = Designation.findBySearch(con, search)
for i in range(len(designations)):
position = Position.findById(con, [designations[i].positionId])[0]
designations[i].position = position
user = User.findById(con, [designations[i].userId])[0]
designations[i].user = user
place = Place.findById(con, [designations[i].placeId])[0]
designations[i].place = place
return designations
@classmethod
def persistDesignation(cls, con, designation):
designation.persist(con);
|
from fabric.api import env as _env
TARGET_LIVE = _env.get("live", None)
VIRTUALENV_ROOT = "~/virtualenv"
PROJECT_NAME = "pikapika" if TARGET_LIVE else "pikapika-saber"
REMOTE_PYTHON_EXEC = "python2.7"
MAIN_PACKAGE = "pikapika"
print ("Target: " + PROJECT_NAME)
|
# Generated by Django 2.2.3 on 2019-07-10 09:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('DBStorage', '0011_auto_20190710_0902'),
]
operations = [
migrations.RemoveField(
model_name='giftanswers',
name='giftID',
),
migrations.AddField(
model_name='giftanswers',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AlterField(
model_name='giftanswers',
name='giftArou',
field=models.FloatField(blank=True),
),
migrations.AlterField(
model_name='giftanswers',
name='giftFree',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='giftanswers',
name='giftVael',
field=models.FloatField(blank=True),
),
migrations.AlterField(
model_name='giftanswers',
name='giftWord',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='giftanswers',
name='modID',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='giftanswers',
name='parID',
field=models.CharField(max_length=100),
),
]
|
class Solution:
# @param A : tuple of integers
# @return an integer
def maxSubArray(self, A):
curr_max = A[0]
max_so_far = A[0]
for i in range(len(A)):
curr_max = max(curr_max + A[i],A[i]);
max_so_far = max(max_so_far,curr_max);
return max_so_far
|
import pygame
import time
tank1_image = 'global image1'
tank2_image = 'global image2'
bullet_image = 'global bullet'
class Player():
def __init__(self, x, y, tank_image_file, place, dir_lurd, hp):
self.x = x
self.y = y
self.tank_velocity = 10
self.bullet_velocity = 20
self.dir_lurd = dir_lurd
self.bullet_image_file = 'bullet.png'
self.bulletX = 1000
self.bulletY = 1000
self.tank_image_file = tank_image_file
self.place = place
self.bullet_lurd = [0,0,0,0]
self.bullet_state = "ready"
self.angle = 0
self.width = 500
self.height = 500
self.bullet_length = 0
self.opponents_hp = hp
def get_place(self):
return self.place
def minus_hp(self):
self.opponents_hp = self.opponents_hp - 1
def end_game(self, win, victory, end_font):
pygame.font.init()
if victory:
text = end_font.render('YOU WIN!', True, (255, 255, 255))
else:
text = end_font.render('YOU LOSE!', True, (255, 255, 255))
win.blit(text, (110, 200))
pygame.display.update()
time.sleep(4)
def get_tank_image_file(self):
return self.tank_image_file
def draw(self, win):
global tank1_image, tank2_image
if self.place == 'top':
tank_image = tank1_image
else:
tank_image = tank2_image
win.blit(tank_image, (self.x, self.y))
def rotate_and_draw(self, win):
global tank1_image, tank2_image
if self.place == 'top':
tank1_image = pygame.transform.rotate(tank1_image, self.angle)
win.blit(tank1_image, (self.x, self.y))
else:
tank2_image = pygame.transform.rotate(tank2_image, self.angle)
win.blit(tank2_image, (self.x, self.y))
self.angle = 0
def draw_bullet(self, win):
global bullet_image
if self.bullet_lurd == [1, 0, 0, 0]:
bull_rot_im = pygame.transform.rotate(bullet_image, 90)
win.blit(bull_rot_im, (self.bulletX, self.bulletY + 5))
if self.bullet_lurd == [0, 1, 0, 0]:
bull_rot_im = bullet_image
win.blit(bull_rot_im, (self.bulletX + 5, self.bulletY))
if self.bullet_lurd == [0, 0, 1, 0]:
bull_rot_im = pygame.transform.rotate(bullet_image, -90)
win.blit(bull_rot_im, (self.bulletX + 45, self.bulletY + 5))
if self.bullet_lurd == [0, 0, 0, 1]:
bull_rot_im = pygame.transform.rotate(bullet_image, 180)
win.blit(bull_rot_im, (self.bulletX + 6, self.bulletY + 50))
def fire_bullet(self, bulletX, bulletY, lurd, win, x_aim, y_aim, lurd_aim):
global bullet_image
self.bulletX = bulletX
self.bulletY = bulletY
self.bullet_lurd = lurd
self.bullet_state = "fire"
self.bullet_length += self.bullet_velocity
if (lurd == [0,1,0,0] or lurd == [0,0,0,1]) and (lurd_aim == [0,1,0,0] or lurd_aim == [0,0,0,1]) and\
(x_aim-17<self.bulletX<x_aim+17) and (y_aim-55<self.bulletY<y_aim+65):
self.bullet_state = 'ready'
return 'hit'
if (lurd == [1,0,0,0] or lurd == [0,0,1,0]) and (lurd_aim == [0,1,0,0] or lurd_aim == [0,0,0,1]) and\
(x_aim-50<self.bulletX<x_aim+20) and (y_aim-5<self.bulletY<y_aim+50):
self.bullet_state = 'ready'
return 'hit'
if (lurd == [0,1,0,0] or lurd == [0,0,0,1]) and (lurd_aim == [1,0,0,0] or lurd_aim == [0,0,1,0]) and\
(x_aim -13<self.bulletX<x_aim+45) and (y_aim-65<self.bulletY<y_aim+30):
self.bullet_state = 'ready'
return 'hit'
if (lurd == [1,0,0,0] or lurd == [0,0,1,0]) and (lurd_aim == [1,0,0,0] or lurd_aim == [0,0,1,0]) and\
(x_aim-65<self.bulletX<x_aim+65) and (y_aim-17<self.bulletY<y_aim+17):
self.bullet_state = 'ready'
return 'hit'
if self.bullet_lurd == [1, 0, 0, 0]:
bull_rot_im = pygame.transform.rotate(bullet_image, 90)
win.blit(bull_rot_im, (self.bulletX-20, self.bulletY+5))
self.bulletX -= self.bullet_velocity
if self.bullet_lurd == [0, 1, 0, 0]:
bull_rot_im = bullet_image
win.blit(bull_rot_im, (self.bulletX+5, self.bulletY-20))
self.bulletY -= self.bullet_velocity
if self.bullet_lurd == [0, 0, 1, 0]:
bull_rot_im = pygame.transform.rotate(bullet_image, -90)
win.blit(bull_rot_im, (self.bulletX+65, self.bulletY+5))
self.bulletX += self.bullet_velocity
if self.bullet_lurd == [0, 0, 0, 1]:
bull_rot_im = pygame.transform.rotate(bullet_image, 180)
win.blit(bull_rot_im, (self.bulletX+6, self.bulletY+70))
self.bulletY += self.bullet_velocity
if self.bullet_length >= 420:
self.bullet_state = "ready"
return 'not_hit'
def rotate(self, old_lurd, new_lurd, win):
global tank1_image, tank2_image
dif = old_lurd.index(1) - new_lurd.index(1)
self.angle = 90 * dif
if self.place == 'top':
tank1_image = pygame.transform.rotate(tank1_image, self.angle)
win.blit(tank1_image, (self.x, self.y))
else:
tank2_image = pygame.transform.rotate(tank2_image, self.angle)
win.blit(tank2_image, (self.x, self.y))
pygame.display.flip()
def move(self, win, second_x, second_y, second_lurd):
self.angle = 0
keys = pygame.key.get_pressed()
if (keys[pygame.K_LEFT] and (keys[pygame.K_RIGHT] or keys[pygame.K_UP] or keys[pygame.K_DOWN]))\
or (keys[pygame.K_UP] and keys[pygame.K_DOWN]) or (keys[pygame.K_RIGHT] and (keys[pygame.K_DOWN] or keys[pygame.K_UP])):
return
if keys[pygame.K_SPACE]:
if self.bullet_state == "ready":
self.bullet_length = 0
# self.fire_bullet(self.x, self.y, self.dir_lurd, win)
self.bulletX = self.x
self.bulletY = self.y
self.bullet_lurd = self.dir_lurd
self.bullet_state = "fire"
if keys[pygame.K_LEFT]:
new_lurd = [1,0,0,0]
self.rotate(self.dir_lurd,new_lurd, win)
self.dir_lurd = new_lurd
if self.x <= 0:
self.x = 0
else:
new_x = self.x - self.tank_velocity
if second_lurd == [0,1,0,0]:
if ((second_x -5 < new_x < second_x + 14) and (second_y - 25 < self.y < second_y + 45)):
return
if second_lurd == [0,0,0,1]:
if ((second_x - 5 < new_x < second_x + 14) and (second_y - 30 < self.y < second_y + 50)):
return
if second_lurd == [0,0,1,0]:
if ((second_x +20 < new_x < second_x + 35) and (second_y - 25 < self.y < second_y + 32)):
return
if second_lurd == [1,0,0,0]:
if ((second_x +35 < new_x < second_x + 50) and (second_y - 15 < self.y < second_y + 50)):
return
self.x = new_x
return
if keys[pygame.K_RIGHT]:
new_lurd = [0, 0, 1, 0]
self.rotate(self.dir_lurd, new_lurd, win)
self.dir_lurd = new_lurd
if self.x >= 430:
self.x = 430
else:
new_x = self.x + self.tank_velocity
if second_lurd == [0,1,0,0]:
if ((second_x - 52 < new_x < second_x-32) and (second_y - 20 < self.y < second_y + 70)):
return
if second_lurd == [0,0,0,1]:
if ((second_x - 55 < new_x < second_x-35) and (second_y - 30 < self.y < second_y + 50)):
return
if second_lurd == [0,0,1,0]:
if ((second_x - 55 < new_x < second_x-40) and (second_y - 30 < self.y < second_y + 24)):
return
if second_lurd == [1,0,0,0]:
if ((second_x - 25 < new_x < second_x-10) and (second_y - 30 < self.y < second_y + 30)):
return
self.x = new_x
return
if keys[pygame.K_UP]:
new_lurd = [0, 1, 0, 0]
self.rotate(self.dir_lurd, new_lurd, win)
self.dir_lurd = new_lurd
if self.y <= 0:
self.y = 0
else:
new_y = self.y - self.tank_velocity
if second_lurd == [0,1,0,0]:
if ((second_x - 30 < self.x < second_x + 30) and (second_y + 30 < new_y < second_y + 50)):
return
if second_lurd == [0,0,0,1]:
if ((second_x - 30 < self.x < second_x + 30) and (second_y + 10 < new_y < second_y + 30)):
return
if second_lurd == [0,0,1,0]:
if ((second_x - 30 < self.x < second_x + 50) and (second_y - 10 < new_y < second_y + 5)):
return
if second_lurd == [1,0,0,0]:
if ((second_x - 20 < self.x < second_x + 70) and (second_y - 10 < new_y < second_y + 5)):
return
self.y = new_y
return
if keys[pygame.K_DOWN]:
new_lurd = [0, 0, 0, 1]
self.rotate(self.dir_lurd, new_lurd, win)
self.dir_lurd = new_lurd
if self.y >= 430:
self.y = 430
else:
new_y = self.y + self.tank_velocity
if second_lurd == [0,1,0,0]:
if ((second_x - 30 < self.x < second_x + 30) and (second_y - 35 < new_y < second_y -15)):
return
if second_lurd == [0,0,0,1]:
if ((second_x - 30 < self.x < second_x + 30) and (second_y - 45 < new_y < second_y-30 )):
return
if second_lurd == [0,0,1,0]:
if ((second_x - 30 < self.x < second_x + 55) and (second_y - 45 < new_y < second_y-30 )):
return
if second_lurd == [1,0,0,0]:
if ((second_x - 15 < self.x < second_x + 70) and (second_y - 45 < new_y < second_y-30 )):
return
self.y = new_y
return
|
def read_from_file(file_name:str) -> str:
with open(file_name, 'r') as file:
return file.read()
|
counts = dict()
names = ['jane', 'tom', 'jhon', 'tom']
for name in names:
counts[name] = counts.get(name, 0) + 1
print(counts)
|
"""
Processes Finngen's manifest to extract all studies and their metadata in the OTG format.
"""
# coding: utf-8
import argparse
from collections import OrderedDict
import logging
import numpy as np
import pandas as pd
def main(input_path: str, output_path: str) -> None:
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
# Read manifest
manifest = (
pd.read_json(input_path, orient='records')
.filter(items=['phenocode', 'phenosring', 'category', 'num_cases', 'num_controls'])
# When phenostring is not provided, phenotype extracted from the phenocode
.assign(phenostring=lambda df: df.apply(
lambda row: row['phenostring'] if row['phenostring'] and row['phenostring'] != '' else row['phenocode'],
axis=1)
)
# Renaming columns to accomodate OTG schema:
.rename(columns={
'phenocode': 'study_id',
'phenostring': 'trait',
'category': 'trait_category',
'num_cases': 'n_cases',
'num_controls': 'n_controls',
})
)
logging.info(f"{input_path} has been loaded. Formatting...")
# Format table:
manifest['study_id'] = 'FINNGEN_R6_' + manifest['study_id']
manifest['n_total'] = manifest['n_cases'] + manifest['n_controls']
manifest['pmid'] = ''
manifest['pub_date'] = '2022-01-24'
manifest['pub_author'] = 'FINNGEN_R6'
manifest['ancestry_initial'] = 'European=' + manifest['n_total'].astype(str)
manifest['n_replication'] = 0
manifest['ancestry_replication'] = ''
manifest['pub_journal'] = ''
manifest['pub_title'] = ''
manifest['trait_efos'] = np.nan
cols = OrderedDict(
[
('study_id', 'study_id'),
('pmid', 'pmid'),
('pub_date', 'pub_date'),
('pub_journal', 'pub_journal'),
('pub_title', 'pub_title'),
('pub_author', 'pub_author'),
('trait', 'trait_reported'),
('trait_efos', 'trait_efos'),
('ancestry_initial', 'ancestry_initial'),
('ancestry_replication', 'ancestry_replication'),
('n_total', 'n_initial'),
('n_cases', 'n_cases'),
('n_replication', 'n_replication'),
]
)
manifest = manifest.loc[:, list(cols.keys())].rename(columns=cols)
manifest.to_json(output_path, orient='records', lines=True)
logging.info(f"{len(manifest)} studies have been saved in {output_path}. Exiting.")
def parse_args():
"""
Load command line args.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--input', metavar="<str>", type=str, required=True)
parser.add_argument('--output', metavar="<str>", help=("Output"), type=str, required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
# Parse args
args = parse_args()
main(input_path=args.input, output_path=args.output)
|
import torch
import numpy as np
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from sklearn.model_selection import train_test_split
# 读取原始数据,并划分训练集和测试集
raw_data = np.loadtxt('diabetes.csv', delimiter=',', dtype=np.float32)
X = raw_data[:, :-1]
y = raw_data[:, [-1]]
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, y, test_size=0.3)
Xtest = torch.from_numpy(Xtest)
Ytest = torch.from_numpy(Ytest)
# 将训练数据集进行批量处理
# prepare dataset
class DiabetesDataset(Dataset):
def __init__(self, data, label):
self.len = data.shape[0] # shape(多少行,多少列)
self.x_data = torch.from_numpy(data)
self.y_data = torch.from_numpy(label)
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.len
train_dataset = DiabetesDataset(Xtrain, Ytrain)
train_loader = DataLoader(dataset=train_dataset, batch_size=32, shuffle=True, num_workers=0) # num_workers 多线程
# design model using class
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear1 = torch.nn.Linear(8, 6)
self.linear2 = torch.nn.Linear(6, 4)
self.linear3 = torch.nn.Linear(4, 2)
self.linear4 = torch.nn.Linear(2, 1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
x = self.sigmoid(self.linear1(x))
x = self.sigmoid(self.linear2(x))
x = self.sigmoid(self.linear3(x))
x = self.sigmoid(self.linear4(x))
return x
model = Model()
# construct loss and optimizer
criterion = torch.nn.BCELoss(reduction='mean')
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
# training cycle forward, backward, update
def train(epoch):
train_loss = 0.0
count = 0
for i, data in enumerate(train_loader, 0):
inputs, labels = data
y_pred = model(inputs)
loss = criterion(y_pred, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
count = i
if epoch % 2000 == 1999:
print("train loss:", train_loss / count, end=',')
def test():
with torch.no_grad():
y_pred = model(Xtest)
y_pred_label = torch.where(y_pred >= 0.5, torch.tensor([1.0]), torch.tensor([0.0]))
acc = torch.eq(y_pred_label, Ytest).sum().item() / Ytest.size(0)
print("test acc:", acc)
if __name__ == '__main__':
for epoch in range(50000):
train(epoch)
if epoch % 2000 == 1999:
test()
|
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('injury_data.tsv', sep='\t')
df['2020/21 proj'] = df['2020/21'] * 3
COLS = ["2016/17", "2017/18", "2018/19", "2019/20", "2020/21"]
PRIOR_YEARS = ["2016/17", "2017/18", "2018/19", "2019/20"]
# Injuries this year
res = df[['Country', 'Club', '2020/21']].loc[(df['Type'] == 'Injuries')].sort_values(by='2020/21')
fig = res.set_index('Club')['2020/21'].plot(kind='bar', figsize=(12,8), ylabel='Injuries 2020/21').get_figure()
fig.savefig('injuries_2020_21.pdf', bbox_inches='tight', pad_inches=2)
# Injuries vs time
def vs_time_plot(df, inj_type, clubs=None, countries=None):
clubs = clubs if clubs else []
countries = countries if countries else []
if clubs:
fltr = df['Club'].isin(clubs)
elif countries:
fltr = df['Country'].isin(countries)
else:
fltr = ~df.index.isna()
res = df[
['Club', "2016/17", "2017/18", "2018/19", "2019/20", "2020/21"]
].loc[
(df['Type'] == inj_type) & fltr
].set_index('Club')
res.T.plot(figsize=(12,8), ylabel=f'{inj_type} vs Time')
fig = vs_time_plot(df, 'Injuries', clubs=['FC Bayern', 'Juventus FC', 'Real Madrid']).get_figure()
fig.savefig('injuries_vs_time_clubs.pdf', bbox_inches='tight', pad_inches=2)
fig = vs_time_plot(df, 'Repeat', countries=['England']).get_figure()
fig.savefig('england_repeat_injuries_vs_time.pdf', bbox_inches='tight', pad_inches=2)
# Most and Least of
def who_has_the_most_of(df, inj_type, year=None):
if year:
res = df[['Club', year]].loc[(df['Type'] == inj_type)]
return res.iloc[res[year].argmax()]
else:
res = df[
['Club'] + COLS
].loc[(df['Type'] == inj_type)]
rr = pd.concat([res.set_index('Club').idxmax(), res.set_index('Club').max()], axis=1)
return rr[0].loc[rr[1].idxmax()], rr[1].max(), rr[1].idxmax()
def who_has_the_least_of(df, inj_type, year=None):
if year:
res = df[['Club', year]].loc[(df['Type'] == inj_type)]
return res.iloc[res[year].argmin()]
else:
res = df[
['Club'] + COLS
].loc[(df['Type'] == inj_type)]
rr = pd.concat([res.set_index('Club').idxmax(), res.set_index('Club').min()], axis=1)
return rr[0].loc[rr[1].idxmin()], rr[1].min(), rr[1].idxmin()
print(who_has_the_most_of(df, 'Concurrent', year='2019/20'))
print(who_has_the_most_of(df, 'Concurrent', year=None))
print(who_has_the_least_of(df, 'Injuries', year='2019/20'))
def compare_countries(df, inj_type):
res = df.loc[(df['Type'] == inj_type)].groupby('Country')[COLS + ['2020/21 proj']].mean()
res.plot(kind='bar', ylabel=f'Average {inj_type}', figsize=(12,8))
fig = compare_countries(df, 'Injuries').get_figure()
fig.savefig('country_comparison.pdf', bbox_inches='tight', pad_inches=2)
|
import logging , requests , lxml.html , os
from urllib.parse import urljoin
import pickle
def save_cookies(session,filename):
with open(filename, 'wb') as f:
pickle.dump(session.cookies, f)
def load_cookies(session,filename):
with open(filename,'rb') as f:
session.cookies.update(pickle.load(f))
def login(username, password,session=requests.Session()):
home_url='https://www.linkedin.com/'
if os.path.exists(filename):
logging.info("[*] Login step 0 - Loading Cookies...")
load_cookies(session,filename)
res = session.get(home_url, headers={'User-Agent': 'Mozilla/5.0'})
if not '>Sign in</' in res.text :
logging.info('[*] Login success')
return True
logging.info("[*] Login step 1 - Getting CSRF token...")
resp = session.get(urljoin(home_url,'/login/'),headers={'user-agent': 'Mozilla/5.0'})
body = resp.text
# Looking for CSRF Token
html = lxml.html.fromstring(body)
csrf = html.xpath("//input[@name='loginCsrfParam']/@value").pop()
sIdString = html.xpath("//input[@name='sIdString']/@value").pop()
parentPageKey = html.xpath("//input[@name='parentPageKey']/@value").pop()
pageInstance = html.xpath("//input[@name='pageInstance']/@value").pop()
loginCsrfParam = html.xpath("//input[@name='loginCsrfParam']/@value").pop()
fp_data = html.xpath("//input[@name='fp_data']/@value").pop()
d = html.xpath("//input[@name='_d']/@value").pop()
controlId = html.xpath("//input[@name='controlId']/@value").pop()
logging.debug(f"[*] CSRF: {csrf}")
data = {
"session_key": username,
"session_password": password,
"csrfToken": csrf,
'ac': 0,
'sIdString': sIdString,
'parentPageKey':parentPageKey,
'pageInstance': pageInstance,
'trk': '',
'authUUID': '',
'session_redirect': '',
'loginCsrfParam': loginCsrfParam,
'fp_data': fp_data,
'_d': d,
'controlId': controlId
}
logging.info("[*] Login step 1 - Done")
logging.info("[*] Login step 2 - Logging In...")
URL = urljoin('https://www.linkedin.com', 'checkpoint/lg/login-submit')
session.post(URL, data=data,headers={'user-agent': 'Mozilla/5.0'})
if not session.cookies._cookies['.www.linkedin.com']['/'].get('li_at',''):
logging.info("[!] Could not login. Please check your credentials")
return False
logging.info("[*] Login step 2 - Done")
logging.info("[*] Login step 3 - Saving Cookies...")
save_cookies(session,filename)
return True
filename = 'cookie_linkedin.txt'
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
|
class YearResult:
def __init__(self):
self.modules = []
def total_credits(self):
total_credits = 0
for m in self.modules:
total_credits += m.credits
return total_credits
def get_result(self):
total_credits = self.total_credits()
total_result = 0
for m in self.modules:
total_result += m.grade() * ((m.credits / total_credits) / m.module_completion())
return total_result
def needed_grades(self, t):
completed = self.completion()
remaining = 1 - completed
return (t - (self.get_result() * completed)) / remaining
# t = curr * completion + need * remaining
# need = t - curr * completion
# ---------------------
# remaining
def completion(self):
completion = 0
for m in self.modules:
completion += m.module_completion() * m.credits
return completion / self.total_credits()
def enter_marks(self):
for m in self.modules:
response = m.enter_mark()
if str(response).lower() == "q":
return "q"
print("Grades added.")
def display_results(self):
print(f"Generating results...\n")
for m in self.modules:
print(m)
print(f"Your overall weighted grade so far is: {round(self.get_result(),2)} ({self.completion()*100}% of year complete)\n")
def __str__(self):
return " ".join(self.modules)
class Module:
def __init__(self, name, credits):
self.name = name
self.credits = credits
self.assignments = []
def grade(self):
total = 0
for a in self.assignments:
total += a.weight * a.mark
# print(f"Grade is: {total}")
return total
def module_completion(self):
return sum([a.weight for a in self.assignments])
def add_assignment(self, title, weight, mark):
self.assignments.append(Assignment(title, weight, mark))
def enter_mark(self):
for a in self.assignments:
user_input = input(f"Enter the mark you got for {a.title} in {self.name}: \n")
if str(user_input).lower() == "q":
return "q"
else:
a.mark = int(user_input)
# return 1
def __str__(self):
a_list = ""
print(self.name)
for a in self.assignments:
a_list += " " + str(a) + "\n"
return a_list
class Assignment:
def __init__(self, title, weight, mark):
self.title = title
self.weight = weight
self.mark = mark
def __str__(self):
return f"{self.title}: {self.mark} / 100 ({round(self.weight*100)}% of module)"
DT265C = YearResult()
OOP = Module("OOP", 10)
DT265C.modules.append(OOP)
OOP.add_assignment("Exam1", 0.2, 0)
OOP.add_assignment("Exam2", 0.2, 0)
OOP.add_assignment("CA1", 0.3, 0)
AOSN = Module("Architecture, Operating Systems, and Networks", 5)
DT265C.modules.append(AOSN)
AOSN.add_assignment("CA1", 0.25, 0)
AOSN.add_assignment("CA2", 0.25, 0)
SA = Module("Systems Analysis", 5)
DT265C.modules.append(SA)
SA.add_assignment("CA1", 0.25, 0)
Web = Module("Web & UI", 5)
DT265C.modules.append(Web)
Web.add_assignment("Final result", 1, 0)
IS = Module("Information Systems", 5)
DT265C.modules.append(IS)
IS.add_assignment("Final result", 1, 0)
user_input = ""
while str(user_input).lower() != "q":
print("Press 1 to enter/edit results \nPress 2 to view marks\nPress 3 to calculate needed grades\n~ Press Q to quit")
user_input = input("")
if str(user_input).lower() == "q":
break
elif user_input == "1":
user_input = DT265C.enter_marks()
if str(user_input).lower() == "q":
break
elif user_input == "2":
DT265C.display_results()
elif user_input == "3":
target = input("What overall grade do you want to achieve? \n")
needed = round(DT265C.needed_grades(int(target)), 2)
grade = round(DT265C.get_result(),2)
completion = DT265C.completion()*100
print(f"\nYour grade so far is: {grade} / 100 with {completion}% complete...\n")
if needed > 100:
print(f"You need {needed} / 100 to get this amount unfortunately.")
else:
print(f"Target grade: {target}\n\nNeeded marks: {needed} / 100 averaged across remaining assignments ({100-completion}% of course left)\n")
print("Exiting...")
|
# coding:utf-8
import os, sys
# import pandas as pd
from glob import glob
from os import path
wildchar_dict = {'mapl8': ['/L*B[4-6].TIF', '/L*BQA.TIF', '/L*MTL.txt'],
'mapl57': ['/L*B[3-5].TIF', '/L*BQA.TIF', '/L*MTL.txt'],
'raw': ['']}
def parse_url(url, craft='LANDSAT'):
if craft == 'LANDSAT':
basedir,product_id=path.split(url)
basedir,orbit_row=path.split(basedir)
basedir,orbit_path=path.split(basedir)
return orbit_path,orbit_row
if craft == 'SENTINEL2':
basedir, product_id = path.split(url)
basedir, t2 = path.split(basedir)
basedir, t1 = path.split(basedir)
basedir, zone = path.split(basedir)
return zone, t1, t2
def downloadone(file, downloaddir=None, craft='LANDSAT', mode=''):
urls = open(file).readlines()
if not path.exists(downloaddir):
os.makedirs(downloaddir)
for i in range(len(urls)):
url = urls.pop()
# os.system('set http_proxy=127.0.0.1:1080')
# print 'gsutil cp -r {0} {1}'.format(url.strip('\n')+'/',dstdir)
if craft=='LANDSAT':
orbit_path, orbit_row = parse_url(url)
scenedir = path.join(downloaddir, orbit_path, orbit_row)
if craft=='SENTINEL2':
zone, t1, t2 = parse_url(url, craft)
scenedir = path.join(downloaddir, zone, t1, t2)
if not path.exists(scenedir):
os.makedirs(scenedir)
for wildchar in wildchar_dict[mode]:
while (os.system('gsutil -m cp -r -n {0} {1}'.format(url.strip('\n') + wildchar, scenedir)) != 0):
pass
open(file, 'w').writelines(urls)
if __name__ == '__main__':
tododown = sys.argv[1]
downloaddir = sys.argv[2]
craft = sys.argv[3]
if len(sys.argv)>4:
wildchar = sys.argv[4]
else:
wildchar = ''
if wildchar is None:
wildchar = ''
if os.path.isdir(tododown):
filelist = glob(os.path.join(tododown, r'*.txt'))
for file in filelist:
downloadone(file, downloaddir, craft, wildchar)
# urls = open(file).readlines()
# if not path.exists(downloaddir):
# os.makedirs(downloaddir)
# for i in range(len(urls)):
# url = urls.pop()
# # os.system('set http_proxy=127.0.0.1:1080')
# # print 'gsutil cp -r {0} {1}'.format(url.strip('\n')+'/',dstdir)
# orbit_path,orbit_row=parse_url(url)
# scenedir=path.join(downloaddir,orbit_path,orbit_row)
# if not path.exists(scenedir):
# os.makedirs(scenedir)
# while (os.system('gsutil -m cp -r -n {0} {1}'.format(url.strip('\n') + '/', scenedir)) != 0):
# pass
# open(file, 'w').writelines(urls)
else:
downloadone(tododown, downloaddir, craft, wildchar)
# urls = open(tododown).readlines()
# if not os.path.exists(downloaddir):
# os.makedirs(downloaddir)
# for i in range(len(urls)):
# # os.system('set http_proxy=127.0.0.1:1080')
# # print 'gsutil cp -r {0} {1}'.format(url.strip('\n')+'/',dstdir)
# url=urls.pop()
# orbit_path, orbit_row = parse_url(url)
# scenedir = path.join(downloaddir, orbit_path, orbit_row)
# if not path.exists(scenedir):
# os.makedirs(scenedir)
# while(os.system('gsutil -m cp -r -n {0} {1}'.format(url.strip('\n') + '/', scenedir))!=0):
# pass
# open(tododown,'w').writelines(urls)
|
import random
element_1=[]
element_2=[]
player_1=0
player_2=0
for i in range(5):
element_1=random.randint(1,6)
element_2=random.randint(1,6)
player_1+=element_1
player_2+=element_2
print ("player 1 =", element_1)
print ("player 2 =", element_2)
print("player 1 has", player_1, "points while player 2 has", player_2, "points" )
if player_1 > player_2:
print("The winner of the competiton is player 1 with", player_1 , "points")
elif player_1 < player_2:
print("The winner of the competiton is player 2 with", player_2 , "points")
else:
print("There is no winner between player 1 and player 2")
|
from math import sqrt, exp, floor, pi
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('../lab1')
import distributions as dst
def kernel_function(x: float):
return exp(-x*x/2)/sqrt(2*pi)
def kernel_approximation(xs: np.ndarray, sel: np.ndarray, k):
n = len(sel)
s = sqrt((sel*sel).sum()/n - (sel.sum()/n)**2)
h = 1.06*s*(n**(-0.2))*k
res = np.zeros_like(xs)
for i in range(len(xs)):
for v in sel:
res[i] += kernel_function((xs[i] - v)/h)
res[i] /= n*h
return res
ds = dst.get_distributions()
ns = [20, 60, 100]
ks = [0.5, 1, 2]
for d in ds:
fig, ax = plt.subplots(len(ks), len(ns))
rng = (-4, 4) if not d.discrete() else (6, 14)
for i in range(len(ns)):
sel = np.array(dst.selection(d, ns[i]))
x = np.linspace(*rng, 100)
y1 = list(map(d.f, x))
for j in range(len(ks)):
y2 = kernel_approximation(x, sel, ks[j])
ax1 = ax[i, j]
ax1.plot(x, y1, label='теор')
ax1.plot(x, y2, label='оценка')
ax1.legend()
ax1.set_xlabel('x' if i == len(ns) - 1 else '')
ax1.set_ylabel(f'$f(x), n = {ns[i]}$')
if i == 0:
ax1.set_title((d.name if j == 1 else '') + f"\n\n$h = h_n * {ks[j]}$")
plt.show()
for d in ds:
fig, ax = plt.subplots(1, len(ns))
rng = (-4, 4) if not d.discrete() else (6, 14)
for i in range(len(ns)):
data = [d.x() for j in range(ns[i])]
x = np.linspace(min(data), max(data), 200)
yF = [d.F(k) for k in x]
plt.subplot(1, 3, i+1)
plt.title(d.name + ", n = " + str(ns[i]))
plt.xlabel('x')
plt.ylabel('Функция распределения')
plt.plot(x, yF, label='ожидаемое')
plt.hist(data, density=True, label='полученное', bins=floor(len(data)),histtype='step',cumulative=True)
plt.legend()
plt.show()
|
from src.character.player import Player
from src.character.class_.standart import Warrior, Wizard
from .notification import ShowPlayerStats
from . import BaseScene, Quit
class Menu(BaseScene):
def exit(self, game):
pass
def execute(self, game):
text = [ 'Привет. Это моя игра в консоли.',
'1. Старт',
'2. Загрузка',
'2. Выход', ]
valid_answers = {
1: ['1', 'start', 'new'],
2: ['2', 'load'],
3: ['3', 'exit', 'quit']
}
question = self.interface.create_readable_text(valid_answers)
self.interface.print(text, delay=0.2)
answer = self.interface.ask(question, valid_answers)
if answer == 1:
game.scene = PlayerClassChoice()
elif answer == 2:
game.load()
elif answer == 3:
game.scene = Quit()
class PlayerClassChoice(BaseScene):
def execute(self, game):
text = [
'Выбор класса:',
'1. Warrior',
'2. Mage',
]
self.interface.print(text, delay=0.2)
valid_answers = {
1: ['1', 'warrior'],
2: ['2', 'mage', 'wizard'],
}
question = self.interface.create_readable_text(valid_answers)
answer = self.interface.ask(question, valid_answers)
if answer == 1:
game.player = Player(Warrior())
elif answer == 2:
game.player = Player(Wizard())
game.scene = ShowPlayerStats()
|
import os
import tempfile
#################################################################################################
# #
# Wrapper Function #
# #
#################################################################################################
from . import register_corpus_metric
TER_PATH = '/private/home/jgu/software/tercom.7.25.jar'
@register_corpus_metric('TER')
def get_ter_score(targets, decodes):
return corpus_ter([" ".join(t) for t in targets],
[" ".join(o) for o in decodes])
#################################################################################################
def corpus_ter(ref, hyp):
if not os.path.exists(TER_PATH):
raise FileNotFoundError(
"Please download meteor-1.5.jar and set the TER_PATH")
fr, pathr = tempfile.mkstemp()
fh, pathh = tempfile.mkstemp()
try:
with os.fdopen(fr, 'w') as tmp:
tmp.write(
'\n'.join([r + ' ({})'.format(i)
for i, r in enumerate(ref)]) + '\n')
with os.fdopen(fh, 'w') as tmp:
tmp.write(
'\n'.join([h + ' ({})'.format(i)
for i, h in enumerate(hyp)]) + '\n')
R = os.popen('java -jar {ter_path} -r {ref_file} -h {hyp_file}'.format(
ter_path=TER_PATH, hyp_file=pathh, ref_file=pathr)).read()
R = [l for l in R.split('\n') if 'Total TER' in l]
R = float(R[0].split()[2])
finally:
os.remove(pathr)
os.remove(pathh)
|
import pygame
import sys
from pygame.locals import *
import time
from lib.apple import Apple
from lib.player import Player
WINDOW_WIDTH = 1280
WINDOW_HEIGHT = 736
white = (255, 255, 255)
black = (0, 0, 0)
blue = (0, 0, 128)
class App:
def main(self):
pygame.init()
self.DISPLAYSURF = pygame.display.set_mode(
(WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption('Hello World!')
self.playerImg = pygame.image.load('./pics/player.png')
self.appleImg = pygame.image.load('./pics/apple.png')
self.player = Player()
self.apple = Apple()
self.score = 0
self.clock = pygame.time.Clock()
# Text
self.font = pygame.font.Font('freesansbold.ttf', 16)
self.text = self.font.render(
'Score: ' + str(self.score), True, white, black)
self.textRect = self.text.get_rect()
self.textRect.x = 1200
self.textRect.y = 700
while True:
# Event Handler
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
self.player.moveLeft()
if event.key == pygame.K_UP:
self.player.moveUp()
if event.key == pygame.K_DOWN:
self.player.moveDown()
if event.key == pygame.K_RIGHT:
self.player.moveRight()
if event.key == pygame.K_ESCAPE:
self.die()
if event.type == QUIT:
self.die()
self.player.update()
self.checkCollision()
self.draw()
pygame.display.update()
time.sleep(50.0 / 1000.0)
def draw(self):
# The screen will be covered in black
self.DISPLAYSURF.fill((0, 0, 0))
# Draw our player
self.player.draw(self.DISPLAYSURF)
# Draw our apple
self.apple.draw(self.DISPLAYSURF)
# Draw our text
self.DISPLAYSURF.blit(self.text, self.textRect)
# Death to the game :>
def die(self):
pygame.quit()
sys.exit()
# TODO: Add logic
def gameOver(self):
self.die()
def checkCollision(self):
if self.player.isCollidingWithApple(self.apple.getRect()):
self.score += 1
self.text = self.font.render(
'Score: ' + str(self.score), True, white, black)
self.apple.respawn()
if self.player.isCollidingWithMyself():
self.gameOver()
if __name__ == "__main__":
theApp = App()
theApp.main()
|
#!/usr/bin/python
import sys
import boto.ec2
instance_id = sys.argv[1]
print(instance_id + 'will be terminated')
conn = boto.ec2.connect_to_region("us-east-2",
aws_access_key_id="",
aws_secret_access_key=""
)
conn.terminate_instances(instance_ids=[instance_id])
|
import multiprocessing
import time
import contextlib
import requests
import selenium.webdriver as webdriver
import apl
from constants import driver_path, hapag_url, backup_msc_url
import cosco
from database import get_containers_by_steamship, update_container_eta, update_container_tracing
import hpl
import msc
import msc_selenium
import one_line
options = webdriver.ChromeOptions()
options.add_argument('headless')
driver = webdriver.Chrome(executable_path=driver_path, chrome_options=options)
session = requests.Session()
def get_backup_msc_tracing(driver):
msc = msc_selenium.MSC(driver)
events_dict = msc.get_all_events_dict(backup_msc_url, container)
tracing_results = msc.get_formatted_tracing_results(events_dict)
vessel_eta = events_dict['vessel_eta']
update_container_eta(container, vessel_eta)
update_container_tracing(container, tracing_results, 'ssl')
'''
APL TRACKING
'''
apl = apl.APL(session)
tracing_results_list = []
apl_containers_list = get_containers_by_steamship()['APLU'] + get_containers_by_steamship()['CMDU']
pool = multiprocessing.Pool(processes=len(apl_containers_list))
with pool as p:
tracing_results = p.map(apl.get_tracing_results_dict, apl_containers_list)
for result in tracing_results:
tracing_results_list.append(result)
for result in tracing_results_list:
formatted_tracing_results = apl.format_tracing_results(result)
update_container_eta(result['container'], result['vessel_eta'])
update_container_tracing(result['container'], formatted_tracing_results, 'ssl')
'''
MSC TRACKING
'''
msc = msc.MSC(session)
msc_containers_list = get_containers_by_steamship()['MEDU']
for container in msc_containers_list:
try:
html = msc.get_tracing_results_html(container)
update_container_tracing(container, msc.get_all_events(html), 'ssl')
update_container_eta(container, msc.get_vessel_eta(html))
except AttributeError:
with contextlib.closing(driver) as driver:
get_backup_msc_tracing(driver)
'''
HAPAG TRACKING
'''
with contextlib.closing(driver) as driver:
hpl = hpl.HPL(driver)
hpl_containers_list = get_containers_by_steamship()['HLCU']
for container in hpl_containers_list:
events_dict = hpl.get_all_events_dict(hapag_url, container)
tracing_results = hpl.get_formatted_tracing_results(events_dict)
vessel_eta = hpl.get_vessel_eta(events_dict['scheduled_events'])
update_container_tracing(container, tracing_results, 'ssl')
update_container_eta(container, vessel_eta)
'''
COSCO TRACKING
'''
timestamp = int(time.time())
cosco_containers_list = get_containers_by_steamship()['COSU']
cosco = cosco.COSCO(containers_list=cosco_containers_list, session=session, timestamp=timestamp)
cosco_containers_list = get_containers_by_steamship()['COSU']
most_recent_events_list = cosco.get_most_recent_event_list()
for event in most_recent_events_list:
container_number = event['container_number']
vessel_eta = cosco.get_vessel_eta(container_number)
update_container_eta(container_number, vessel_eta)
update_container_tracing(container_number, cosco.format_most_recent_event(event), 'ssl')
'''
ONE LINE TRACKING
'''
one = one_line.ONE(session)
one_containers_list = get_containers_by_steamship()['ONEY']
for container in one_containers_list:
events_dict = one.get_events_dict(container)
tracing_results = one.get_formatted_events(events_dict)
vessel_eta = one.get_vessel_eta(container)
update_container_eta(container, vessel_eta)
update_container_tracing(container, tracing_results, 'ssl')
|
import unittest
from neo.rawio.plexonrawio import PlexonRawIO
from neo.test.rawiotest.common_rawio_test import BaseTestRawIO
class TestPlexonRawIO(BaseTestRawIO, unittest.TestCase, ):
rawioclass = PlexonRawIO
files_to_download = [
'File_plexon_1.plx',
'File_plexon_2.plx',
'File_plexon_3.plx',
]
entities_to_test = files_to_download
if __name__ == "__main__":
unittest.main()
|
import sys
from collections import OrderedDict
from lib.intcode import Machine
from time import sleep
import subprocess
if len(sys.argv) == 1 or sys.argv[1] == '-v':
print('Input filename:')
f=str(sys.stdin.readline()).strip()
else: f = sys.argv[1]
verbose = sys.argv[-1] == '-v'
for l in open(f):
mreset = [int(x) for x in l.strip().split(',')]
class Droid:
WALL = 0
OK = 1
OXYGEN = 2
UNEXPLORED = 3
MOV_NORTH = 1
MOV_SOUTH = 2
MOV_WEST = 3
MOV_EAST = 4
def __init__(self, m):
self.__m = m
self.__cpu = Machine(self.__m[:])
self.__history = []
self.__bounds = (0, 0, 0, 0)
self.__pos = (0, 0)
self.__tank_pos = None
self.__move_count = 0
self.__map = OrderedDict({(0, 0): Droid.OK})
self.__tiles = {
0: '⬜',
1: '.',
2: '⭐'
}
if verbose: self.__cpu.toggle_verbose()
def map(self, display = True):
self.reset()
cpu = self.__cpu
history = self.__history
p = self.__pos = (0, 0)
nswe = set(range(1,5))
move = self.MOV_NORTH
tries = {p: {self.reverse(self.MOV_NORTH)}}
while True:
'''
here, we will use (D)epth (F)irst (S)earch algo to traverse
the entire map.
we will also capture tank's position and steps when it is found.
the goal here is to traverse every single path (even after the tank is found) by
going all the way to end of each path and backtracking till we find a new one.
Loop:
------------------------------------------------------------------
- each position is a new node/vertex
- each vertex potentially has 4 directions to try except for where
it is coming from. [ example: if we took a step NORTH, then SOUTH is excluded
from future potential tries. ]
- once a direction is tried, we add to the list of tries for the current position/node/vertex (x, y).
- if the node's tries are exhausted N, W, E ... then backtrack 1 step
- hmmm... we've been here before innit? oui ! mon Dieu !
- in the node we backtracked to, check if we have any untried directions left.
- if [ not ]: backtrack again. if [ yes ]: pop a move from remaining tries and go on your merry way.
REPEAT
'''
if p not in tries:
tries[p] = {self.reverse(move)}
if len(tries[p]) < 4:
backtrack = False
move = nswe.difference(tries[p]).pop()
tries[p].add(move)
else:
backtrack = True
if not history:
'''
no where to backtrack to. this will happen when we've explored
every other path and are forced to backtrack all the back to the beginning.
END PROGRAM
'''
break
move = self.reverse(history.pop())
cpu.run(move)
o = cpu.output()
if o in {self.OK, self.OXYGEN}:
p = self.xy(move, p)
self.__pos = p
self.__bounds = self.update_bounds()
if not backtrack:
history.append(move)
self.__map[p] = o
if o == self.OXYGEN:
'''
+ capture oxygen tank [position: x, y] and [steps] it took to get to it.
+ there is only 1 path but if there were more than 1, to get the shortest path,
we'd simply replace with the lowest count each time we hit the oxygen tank.
'''
self.__move_count = len(self.__history) if self.__move_count == 0 else min(self.__move_count, len(self.__history))
self.__tank_pos = p
if display:
self.display()
return self.__tank_pos, self.__move_count, self.__map
def reverse(self, direction = None):
if direction == None: direction = self.__history[-1]
return {
self.MOV_NORTH : self.MOV_SOUTH,
self.MOV_SOUTH : self.MOV_NORTH,
self.MOV_WEST : self.MOV_EAST,
self.MOV_EAST : self.MOV_WEST
}[direction]
def xy(self, move, from_xy: tuple = None):
x, y = from_xy if from_xy else self.__pos
dnswe = {
self.MOV_NORTH: (0, -1),
self.MOV_SOUTH: (0, 1),
self.MOV_WEST: (-1, 0),
self.MOV_EAST: (1, 0)
}
return tuple(a + b for a, b in zip((x, y), dnswe[move]))
def update_bounds(self):
mxy, xy = list(self.__bounds), self.__pos
return tuple([min(p1, p2) for p1, p2 in zip(mxy[:2], xy)] + [max(p1, p2) for p1, p2 in zip(mxy[2:], xy)])
def display(self):
minx, miny, maxx, maxy = self.__bounds
maxx, maxy = abs(minx) + maxx, abs(miny) + maxy
px, py = self.__pos
subprocess.call("clear")
dash = 'Steps: {}, Max (x): {}, Max (y): {}'.format(len(self.__history), maxx, maxy)
print(dash)
print('.'*(maxx+1))
grid = [[' ']*(maxx+1) for _ in range(maxy+1)]
for (x, y), v in self.__map.items():
x += abs(minx)
y += abs(miny)
if x < 0 or x > maxx or y < 0 or y > maxy: break
try:
grid[y][x] = self.__tiles[v]
except:
print('DEBUG', x, y, maxx, maxy)
exit()
break
prev = grid[py+abs(miny)][px+abs(minx)]
rx, ry = px+abs(minx), py+abs(miny)
if prev != self.__tiles[self.OXYGEN]:
grid[ry][rx] = '🛸'
else:
grid[ry][rx-1] = '🛸'
for l in grid: print(''.join(l))
print('\n')
print('.'*(maxx+1))
print(dash)
# sleep(0.1)
def reset(self):
if not self.__map: return
self.__pos = (0, 0)
self.__move_count = 0
self.__tank_pos = None
self.__map = OrderedDict()
self.__cpu = Machine(self.__m[:])
if verbose: self.__cpu.toggle_verbose()
return self
'''
Solution 1
'''
r = Droid(mreset[:])
tank_pos, steps, rmap = r.map(True)
print('Solution 1:', steps)
'''
Solution 2
'''
def neighbours(m: dict, pos: tuple, l: callable = lambda p, v: True):
'''
N (x, y), S (x, y), W (x, y), E (x, y)
'''
(x, y), dxy = pos, [(0, -1), (0, 1), (-1, 0), (1, 0)]
n = [(x + dx, y + dy) for dx, dy in dxy]
return [nn for nn in n if nn in m and l(nn, m.get(nn)) ]
def spread(m, p, t = 0):
if m.get(p, Droid.WALL) == Droid.WALL:
return t - 1
m[p] = Droid.WALL
n = neighbours(m, p)
return max([ spread(m, nn, t + 1) for nn in n ])
print('Solution 2:', spread(rmap, tank_pos))
|
import nltk
from sklearn.model_selection import train_test_split
nltk.download('punkt')
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
import json
import pickle
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout,LSTM
from keras.optimizers import SGD
import random
import warnings
warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
import matplotlib.pyplot as plt
words=[]
classes = []
documents = []
ignore_words = ['?', '!']
intents = json.loads(open('intents.json').read())
for intent in intents['intents']:
for pattern in intent['patterns']:
#Tokenize each word
w = nltk.word_tokenize(pattern)
words.extend(w)
#Add documents in the corpus
documents.append((w, intent['tag']))
#Add to our classes list
if intent['tag'] not in classes:
classes.append(intent['tag'])
lemmatizer = WordNetLemmatizer()
#Lemmaztize and lower each word and remove duplicates
words = [lemmatizer.lemmatize(w.lower())
for w in words
if w not in ignore_words]
words = sorted(list(set(words)))
#Sort classes
classes = sorted(list(set(classes)))
pickle.dump(words,open('words.pkl','wb'))
pickle.dump(classes,open('classes.pkl','wb'))
#Create training data
training = []
#Create an empty array for our output
output_empty = [0] * len(classes)
#Training set, bag of words for each sentence
for doc in documents:
#Initialize our bag of words
bag = []
#List of tokenized words for the pattern
pattern_words = doc[0]
#Lemmatize each word - create base word, in attempt to represent related words
pattern_words = [lemmatizer.lemmatize(word.lower()) for word in pattern_words]
#Create our bag of words array with 1, if word match found in current pattern
for w in words:
bag.append(1) if w in pattern_words else bag.append(0)
#Output is a '0' for each tag and '1' for each pattern
output_row = list(output_empty)
output_row[classes.index(doc[1])] = 1
training.append([bag, output_row])
#Shuffle our features and turn into np.array
random.shuffle(training)
training = np.array(training)
#Create train lists. X - patterns, Y - intents
train_x = list(training[:,0])
train_y = list(training[:,1])
x_train, x_test, y_train, y_test = train_test_split(train_x,train_y,test_size=0.1,random_state=5)
print("Training data created")
print(np.array(x_train).shape,np.array(y_train).shape,np.array(x_test).shape,np.array(y_test).shape)
#Create model - 3 layers. First layer 128 neurons, second layer 64 neurons and 3rd output layer contains number of neurons
#Equal to number of intents to predict output intent with softmax
model = Sequential()
model.add(Dense(80, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(40, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))
#Compile model with Stochastic gradient descent with Nesterov the best possible results
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
#Fitting and saving the model
hist = model.fit(x_train, y_train, epochs=200, batch_size=5, verbose=1,validation_data = (x_test, y_test))
model.save('model.h5', hist)
print("Model created")
test_results = model.evaluate(x_test,y_test, verbose=False)
print(f'Test results - Loss: {test_results[0]} - Accuracy: {100*test_results[1]}%')
plt.subplot(1,2,1)
plt.plot(hist.history['loss'],label="Loss",c='g')
plt.plot(hist.history['val_loss'],label="Validation Loss",c='c')
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend(loc='upper left')
plt.subplot(1,2,2)
plt.plot(hist.history['accuracy'],label="Accuracy",c='r')
plt.plot(hist.history['val_accuracy'],label="Validation Accuracy",c='b')
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend(loc='upper left')
plt.show()
|
"""Module with functions for management of installed APK lists."""
import glob
import re
import subprocess
import apkutils # needed for AndroidManifest.xml dump
import utils # needed for sudo
# Creates a APK/path dictionary to avoid the sluggish "pm path"
def create_pkgdict():
"""Creates a dict for fast path lookup from /data/system/packages.xml; returns dict."""
(out, err) = utils.sudo("cat /data/system/packages.xml")
if err: return False
xml_dump = [i for i in out.decode("utf-8").split("\n") if "<package name=" in i]
pkgdict = {}
for i in xml_dump:
pkgname = re.findall("<package name=\"(.*?)\"", i)[0]
pkgpath = re.findall("codePath=\"(.*?)\"", i)[0]
# Normalizes each entry
if not pkgpath.endswith(".apk"):
try:
pkgpath = glob.glob(pkgpath + "/*.apk")[0]
except:
continue
pkgdict[pkgname] = pkgpath
return pkgdict
def list_installed_pkgs(args):
"""Lists the members of a given category of packages; returns list."""
prefix = "pm list packages"
if args.user:
suffix = "-3"
elif args.system:
suffix = "-s"
elif args.disabled:
suffix = "-d"
else:
suffix = ""
pkgs = [i[8:] for i in subprocess.Popen("{0} {1}".format(prefix, suffix), stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = True).communicate()[0].decode("utf-8").split("\n") if i]
return pkgs
def list_installed_pkgs_nougat(args):
"""Uses Nougat's cmd command to query the package service (faster); returns list."""
prefix = "cmd package list packages"
if args.user:
suffix = "-3"
elif args.system:
suffix = "-s"
elif args.disabled:
suffix = "-d"
else:
suffix = ""
pkgs = [i[8:] for i in utils.sudo("{0} {1}".format(prefix, suffix))[0].decode("utf-8").split("\n") if i]
return pkgs
def check_substratum(nougat):
"""Checks if the Substratum engine is installed; returns bool."""
if nougat:
user_pkgs = [i[8:] for i in utils.sudo("cmd package list packages -3")[0].decode("utf-8").split("\n") if i]
else:
user_pkgs = [i[8:] for i in subprocess.Popen("pm list packages -3", stdout = subprocess.PIPE, shell = True).communicate()[0].decode("utf-8").split("\n") if i]
substratum_installed = True if "projekt.substratum" in user_pkgs else False
return substratum_installed
def exclude_overlays(aapt, pkgdict, pkgs):
"""Excludes Substratum overlays from the packages to extract; returns nothing."""
for i in pkgs:
pkgpath = pkgdict.get(i)
out = apkutils.get_pkgxml(aapt, pkgpath)[0].decode("utf-8")
if "Substratum_Parent" in out: pkgs.remove(i)
def exclude_arcus_variants(pkgs):
"""Excludes Arcus theme variants from the packages to extract; returns nothing."""
for i in pkgs:
if "pixkart.arcus.user" in i: pkgs.remove(i)
def check_already_extracted(pkgpath, md5sums):
"""Checks if an APK has already been extracted; returns bool, str."""
pkgsum = utils.compute_md5sum(pkgpath)
already_extracted = True if pkgsum in md5sums else False
return already_extracted, pkgsum
|
from sklearn.externals import joblib
question = raw_input(":> Ingresa la pregunta: ")
clf = joblib.load('clf.pkl')
vectorizer = joblib.load('vectorizer.pkl')
selector = joblib.load('selector.pkl')
question = vectorizer.transform([question])
question = selector.transform(question).toarray()
print clf.predict(question)
|
from collections import defaultdict
import numpy as np
def solution1(input):
m = defaultdict(lambda: defaultdict(int))
for i, ol, ot, w, h in input:
for x in range(ol, ol+w):
for y in range(ot, ot+h):
m[x][y] += 1
c = 0
for x in m:
for y in m:
if m[x][y] > 1:
c += 1
return c
def solution2(input):
m = np.zeros((1000, 1000), dtype=int)
for _, ol, ot, w, h in input:
m[ol:ol+w,ot:ot+h] += 1
return len(m[m > 1])
def parse_input(input):
for i in input.split('\n'):
id, _, offset, size = i.split()
o_l, o_t = offset[:-1].split(',')
w, h = size.split('x')
id = id[1:]
yield id, int(o_l), int(o_t), int(w), int(h)
if __name__ == '__main__':
#with open('input.txt') as fh:
# print(solution1(list(parse_input(fh.read().strip()))))
with open('input.txt') as fh:
print(solution2(list(parse_input(fh.read().strip()))))
|
#B
zub=int(input())
x2,y3=map(int,input().split())
if(zub<y3 and zub>x2):
print("yes")
else:
print("no")
|
from arcgis_terrain import meters2lat_lon
from arcgis_terrain import lat_lon2meters
import csv
import numpy as np
import matplotlib.pyplot as plt
# params = [37.67752, -79.33887, 'punchbowl']
# # params = [38.29288, -78.65848, 'brownmountain']
# # params = [38.44706, -78.46993, 'devilsditch']
# # params = [37.99092, -78.52798, 'biscuitrun']
# # params = [37.82520, -79.081910, 'priest']
# # params = [34.12751, -116.93247, 'sanbernardino']
#
# with open("C:\\Users\\Larkin\\ags_grabber\\track_temp.csv") as f:
# reader = csv.reader(f, delimiter=',')
# data = []
# for row in reader:
# if any(x.strip() for x in row):
# data.append(row)
# track = np.array(data).astype(np.float)
#
# ap_meters = lat_lon2meters(params[0], params[1])
extent = 20e3
scale_factor = 3/20 # factor to get 6.66667m mapping from 1m mapping (1/6.6667)
#
# xy = lat_lon2meters(track[:,1], track[:,0])
#
# x_pts = (np.array(xy[0]) - (ap_meters[0] - (extent/2)))*scale_factor # reduces number of interpolants
# y_pts = (np.array(xy[1]) - (ap_meters[1] - (extent/2)))*scale_factor
#
# np.savetxt(params[2] + '_track_meters.csv',np.array([x_pts, y_pts]),delimiter=",", fmt='%f')
# plt.plot(x_pts, y_pts)
# plt.show()
points = [[37.67752, -79.33887, 'punchbowl'],
[38.29288, -78.65848, 'brownmountain'],
[38.44706, -78.46993, 'devilsditch'],
[37.99092, -78.52798, 'biscuitrun'],
[37.82520, -79.081910, 'priest'],
[34.12751, -116.93247, 'sanbernardino']]
for pt in points:
pt_meters = lat_lon2meters(pt[0], pt[1])
print(pt[2])
print(meters2lat_lon(pt_meters[0] - extent/2, pt_meters[1] - extent/2))
print(meters2lat_lon(pt_meters[0] + extent/2, pt_meters[1] - extent/2))
print(meters2lat_lon(pt_meters[0] + extent/2, pt_meters[1] + extent/2))
print(meters2lat_lon(pt_meters[0] - extent/2, pt_meters[1] + extent/2))
print("-----------------")
|
"""
Contains business logic tasks for this order of the task factory.
Each task should be wrapped inside a task closure that accepts a **kargs parameter
used for task initialization.
"""
def make_task_dict():
"""
Returns a task dictionary containing all tasks in this module.
"""
task_dict = {}
task_dict["split_list"] = split_list_closure
return task_dict
def get_task(task_name, init_args):
"""
Accesses the task dictionary, returning the task corresponding to a given key,
wrapped in a closure containing the task and its arguments.
"""
tasks = make_task_dict()
return tasks[task_name](init_args)
def split_list_closure(init_args):
"""
A closure around the split_string function which is an endpoint in the task factory.
"""
list_to_split = init_args["list_key"]
key_to_add = init_args["key_name"]
async def split_list(list_map):
"""
Splits a string into a list and returns it.
"""
return_list = []
for list_item in list_map[list_to_split]:
return_dict = {}
return_dict[key_to_add] = list_item
for dict_key in list_map:
if dict_key != list_to_split:
return_dict[dict_key] = list_map[dict_key]
return_list.append(return_dict)
return return_list
return split_list
|
#!/usr/bin/python3
import time
import re
from datetime import datetime
gpio='/gpio/pin26/edge'
def setup_gpio(gpio):
with open(gpio, "w") as f:
f.write("rising")
#1Hz – 0.98 LPM
val=0
last_val=0
last_time=''
total_litres=0
sleep=10
Interrupts='/proc/interrupts'
# 41: 299 gpio-mxc 14 Edge gpiolib
pattern = re.compile(r"^\s*41:\s*(\d+).*$");
setup_gpio(gpio)
while True:
now = datetime.now()
with open(Interrupts) as interrupts:
lines = interrupts.readlines()
#print ("lines %d" % len(lines))
for line in lines:
match = pattern.search(line)
if match:
val = int(match[1])
#print('irqs %d' % val);
if last_val == 0:
print("Initialise last_val date %s %d" % (now, val));
last_val = val
last_time = now
continue
if val != last_val:
edges = val - last_val
diff_time = now - last_time
hertz = (edges / diff_time.total_seconds())
lpm = hertz / 0.98
litres = lpm * diff_time.total_seconds() / 60
total_litres = total_litres + litres
#print('val %d edges %d seconds %d hertz %f' % (val, edges, diff_time.total_seconds(), hertz))
print('total_litres %.1f liters %.1f lpm %.1f hertz %.1f edges %d' % (total_litres, litres, lpm, hertz, edges))
last_val = val
last_time = now
continue
time.sleep(10);
done
|
class sport():
activity = 'physical'
games = 'competitive'
def __init__(self, name, numberOfPlayers, ballShape):
self.name = name
self.numberOfPlayers = numberOfPlayers
self.ballShape = ballShape
def printAll(self):
print('name: %s\nnumber of players: %d\nball shape: %s' % (self.name, self.numberOfPlayers, self.ballShape))
def __str__(self):
return 'activity: %s\ngames: %s' % (self.activity, self.games)
footy = sport('footy', 12, 'spherical')
rugby = sport('rugby', 20, 'oblong')
print('footy: ')
footy.printAll()
print('\nrugby: ')
rugby.printAll()
cricket = sport('cricket', 12, 'round')
print('\ncricket: ')
print(cricket)
cricket.printAll()
|
from base.models import Group
from rest_framework.permissions import BasePermission, IsAuthenticated
class GroupAdminPermission(BasePermission):
"""
Checks that the request user is a group admin
"""
def has_permission(self, request, view):
group = Group.objects.get(id=view.kwargs.get('group_id'))
if group.checkUserPermission(request.user, 'admin') == False and request.user.is_staff == False:
return False
return True
class GroupPermission(BasePermission):
"""
Checks that the request user is a group admin
"""
def has_permission(self, request, view):
group = Group.objects.get(id=view.kwargs.get('group_id'))
if group.checkUserPermission(request.user, 'admin') == False \
and group.checkUserPermission(request.user, 'edit') == False \
and request.user.is_staff == False:
return False
return True
|
import math
import itertools
from collections import defaultdict
import pprint
# part 1 stuff
with open("input1.txt","r") as f:
data = f.readlines()
points = []
for y in range(len(data)):
for x in range(len(data[0])-1):
if data[y][x] == '#':
points.append([x,y])
maxSighted = 0
keyPoint = [0,0,-1]
for i in range(len(points)):
point = points[i]
visible = []
for j in range(len(points)):
if i==j:
continue
newPoint = points[j]
angle = math.atan2(point[1]-newPoint[1],point[0]-newPoint[0])+math.pi
angle = math.degrees(angle)
if angle not in visible:
visible.append(angle)
if len(visible)>maxSighted:
maxSighted=len(visible)
keyPoint=[point[0], point[1], i]
print("Base at {}, with sight of {} asteroids".format(keyPoint, maxSighted))
# part 2
roids = defaultdict(list)
# We have to reorganise the data to consider distances too
def getDistToPoint(px, py, px2, py2):
return math.sqrt((px-px2)**2 + (py-py2)**2)
for j in range(len(points)):
if keyPoint[2]==j:
continue
newPoint = points[j]
angle = math.atan2(keyPoint[1]-newPoint[1],keyPoint[0]-newPoint[0])+(math.pi*3/2)
angle = math.degrees(angle)
if angle>= 360:
angle-=360
roids[angle].append([newPoint, getDistToPoint(keyPoint[0], keyPoint[1], newPoint[0], newPoint[1])])
def generateKeysToIterateThrough(roids):
keys = []
for key in roids.keys():
keys.append(key)
keys.sort()
return keys
destroyCount = 0
while destroyCount<202:
keys = generateKeysToIterateThrough(roids)
for key in keys:
asteroidSelection = roids[key]
minIndex = 0
minDist = 99999
for asteroid in asteroidSelection:
if minDist > asteroid[1]:
minIndex = asteroidSelection.index(asteroid)
minDist = asteroid[1]
destroyCount+=1
if destroyCount == 200:
print(roids[key][minIndex][0][0]*100+roids[key][minIndex][0][1])
del roids[key][minIndex]
|
x1,y11,z11=map(int,input().split())
d13=(x1*y11)//z11
print(d13)
|
import random
#NOTES FROM DO NOW:
#1. Work with integers
#2. Random number generator
#3. A way to give commands(controls)
#FEATURES TO ADD:
#1. How to keep the program running until i quit
#2. Use numberOfRolls variable to show multiple die rolls
#3. Add roll totaling features (sum/highest/lowest)
#4. More human interface
myRolls= []
def diceEngine():
#a while loop will run forever until it's deliberately exited
while True:
dieType = input("How many sides should the die have? ")
numberOfRolls = input("How many times do you want to roll? ")
print("*Roll die*")
#use a loop to iterate through the number of rolls
for x in range(0, int(numberOfRolls)):
myRolls.append(random.randint(1, int(dieType)))
print("Here are your rolls: ()".format(myRolls))
print("Your roll total was: ()".format(sum(myRolls)))
print("Your highest roll was: ()".format(max(myRolls)))
print("Your lowest roll was: ()".format(min(myRolls)))
#add a way to exit the loop
rollAgain = input("Would you like to roll again? y / n? ")
if rollAgain == "n":
break
#add a way to clear the list for the next roll
clearList = input("Do you want me to clear your list of rolls? y / n ")
if clearList == "y":
I like how the arrows show the direction of the trend and how you have the descriptions. Maybe try to make the description more concise myRolls.clear()
if __name__== "__main__":
diceEngine()
|
import asyncio
import json
import wave
import websockets
import app
from playground.noise_reduction.denoiser import Denoiser
class VoskAudioRecognizer(app.AudioRecognizer):
def __init__(self, host):
self.host = host
def parse_recognizer_result(self, recognizer_result):
return app.RecognizedWord(
word=app.Word(recognizer_result['word']),
begin_timestamp=recognizer_result['start'],
end_timestamp=recognizer_result['end'],
probability=recognizer_result['conf'],
)
def recognize_wav(self, audio):
recognizer_results = asyncio.get_event_loop().run_until_complete(
self.send_audio_to_recognizer(audio.name)
)
recognized_words = list(map(self.parse_recognizer_result, recognizer_results))
return app.RecognizedAudio(recognized_words)
def recognize(self, audio):
temp_wav_file = app.convert_from_mp3_to_wav(audio)
Denoiser.process_wav_to_wav(temp_wav_file, temp_wav_file, noise_length=3)
return self.recognize_wav(temp_wav_file)
|
import unittest
from pyfiles.model import characterClass
EXPECTED_VALUES = ['Fighter', 'Spellcaster', 'Rogue']
class TestCharacterClass(unittest.TestCase):
def test_get_values(self):
values = characterClass.CharacterClass.get_values()
self.assertEqual(values, EXPECTED_VALUES)
def test_get_json_options(self):
json_options = characterClass.CharacterClass.get_json_options()
self.assertEqual(json_options, {'options':EXPECTED_VALUES})
|
from django.contrib import admin
from .models import Aparcamiento, AparcaSeleccionado, Comentario, Css
admin.site.register(Aparcamiento)
admin.site.register(AparcaSeleccionado)
admin.site.register(Comentario)
admin.site.register(Css)
|
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.model_selection import train_test_split
import lightgbm as lgb
# Global constants
MAX_LAG = 57
def downcast(df, verbose=False):
"""
Downcast the data to reduce memory usage.
Adapted from: https://www.kaggle.com/ragnar123/very-fst-model
Args:
df = [pd.DataFrame] pandas dataframe
verbose = [boolean] if True, print memory reduction
Returns [pd.DataFrame]:
Downcasted data.
"""
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose:
print(f'Mem. usage decreased to {end_mem:5.2f} Mb ({(start_mem - end_mem) / start_mem:.1%} reduction)')
return df
def obj_as_cat_int(df, ignore=[]):
"""
Convert object columns to categorical integers.
Args:
df = [pd.DataFrame] pandas dataframe
ignore = [list] list of columns to ignore in conversion
Returns [pd.DataFrame]:
Data where object columns are encoded as categorical integers.
"""
obj_cols = df.select_dtypes(include='object').columns
for col in obj_cols:
if col not in ignore:
df[col] = df[col].astype('category')
df[col] = df[col].cat.codes.astype("int16")
df[col] -= df[col].min()
return df
def optimize_df(calendar, prices, sales, days=None, val_days=0, verbose=False):
"""
Optimize dataframe.
Args:
calendar = [pd.DataFrame] dates of product sales
prices = [pd.DataFrame] price of the products sold per store and date
sales = [pd.DataFrame] historical daily unit sales data per product and store
days = [int] number of days to keep
val_days = [int] number of validation days
verbose = [boolean] if True, print memory reduction
Returns [[pd.DataFrame] * 3]
Optimized dataframes.
"""
assert days > 56, f"Minimum days is {MAX_LAG}."
assert val_days >= 0, "Invalid number of validation days."
calendar['date'] = pd.to_datetime(calendar['date'], format='%Y-%m-%d')
if val_days:
sales = sales.drop(sales.columns[-val_days:], axis=1)
if days:
sales = sales.drop(sales.columns[6:-days], axis=1)
calendar = downcast( obj_as_cat_int(calendar, ignore=['d']), verbose )
prices = downcast( obj_as_cat_int(prices), verbose )
sales = downcast( obj_as_cat_int(sales, ignore=['id']), verbose )
return calendar, prices, sales
def melt_and_merge(calendar, prices, sales, submission=False):
"""
Convert sales from wide to long format, and merge sales with
calendar and prices to create one dataframe.
Args:
calendar = [pd.DataFrame] dates of product sales
prices = [pd.DataFrame] price of the products sold per store and date
sales = [pd.DataFrame] historical daily unit sales data per product and store
submission = [boolean] if True, add day columns required for submission
Returns [pd.DataFrame]:
Merged long format dataframe.
"""
id_cols = ['id', 'item_id', 'dept_id','store_id', 'cat_id', 'state_id']
if submission:
last_day = int(sales.columns[-1].replace('d_', ''))
sales.drop(sales.columns[6:-MAX_LAG], axis=1, inplace=True)
for day in range(last_day + 1, last_day + 28 + 1):
sales[f'd_{day}'] = np.nan
df = pd.melt(sales,
id_vars=id_cols,
var_name='d',
value_name='sales')
df = df.merge(calendar, on='d', copy = False)
df = df.merge(prices, on=['store_id', 'item_id', 'wm_yr_wk'], copy=False)
return df
def features(df, submission=False):
"""
Create lag and rolling mean features.
Adapted from: https://www.kaggle.com/kneroma/m5-first-public-notebook-under-0-50
Args:
df = [pd.DataFrame] long format dataframe
submission = [boolean] if True, do not drop NaN rows
Returns [pd.DataFrame]:
Dataframe with created features.
"""
lags = [7, 28]
lag_cols = [f"lag_{lag}" for lag in lags]
for lag, lag_col in zip(lags, lag_cols):
df[lag_col] = df[["id", "sales"]].groupby("id")["sales"].shift(lag)
windows = [7, 28]
for window in windows :
for lag, lag_col in zip(lags, lag_cols):
lag_by_id = df[["id", lag_col]].groupby("id")[lag_col]
df[f"rmean_{lag}_{window}"] = lag_by_id.transform(lambda x: x.rolling(window).mean())
date_features = {
"wday": "weekday",
"week": "weekofyear",
"month": "month",
"quarter": "quarter",
"year": "year",
"mday": "day"
}
for name, attribute in date_features.items():
if name in df.columns:
df[name] = df[name].astype("int16")
else:
df[name] = getattr(df["date"].dt, attribute).astype("int16")
if not submission:
df.dropna(inplace=True)
return df
def training_data(df):
"""
Split data into features and labels for training.
Args:
df = [pd.DataFrame] pandas dataframe
Returns [[pd.DataFrame] * 2]:
X = training features
y = training labels
"""
drop_cols = ["id", "date", "sales", "d", "wm_yr_wk", "weekday"]
keep_cols = df.columns[~df.columns.isin(drop_cols)]
X = df[keep_cols]
y = df["sales"]
return X, y
def lgb_dataset(calendar, prices, sales):
"""
Make LightGBM training and validation datasets from preprocessed dataframes.
NOTE: preprocessed means that categorical features have been converted to integers.
Args:
calendar = [pd.DataFrame] dates of product sales
prices = [pd.DataFrame] price of the products sold per store and date
sales = [pd.DataFrame] historical daily unit sales data per product and store
Returns [[lgb.Dataset] * 2]:
train_set = LightGBM training dataset
val_set = LightGBM validation dataset
"""
df = melt_and_merge(calendar, prices, sales)
df = features(df)
X, y = training_data(df)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=0)
cat_features = ['item_id', 'dept_id', 'store_id', 'cat_id', 'state_id'] + \
['event_name_1', 'event_name_2', 'event_type_1', 'event_type_2']
train_set = lgb.Dataset(X_train,
label=y_train,
categorical_feature=cat_features)
val_set = lgb.Dataset(X_test,
label=y_test,
categorical_feature=cat_features)
return train_set, val_set
def data_frames(path):
"""
Load the data from storage into pd.DataFrame objects.
Args:
path = [str] path to folder with competition data
Returns [[pd.DataFrame] * 3]:
calendar = [pd.DataFrame] dates of product sales
prices = [pd.DataFrame] price of the products sold per store and date
sales = [pd.DataFrame] historical daily unit sales data per product and store
"""
path = Path(path)
calendar = pd.read_csv(path / 'calendar.csv')
prices = pd.read_csv(path / 'sell_prices.csv')
sales = pd.read_csv(path / 'sales_train_validation.csv')
return calendar, prices, sales
if __name__ == "__main__":
pass
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
from flask import Flask, render_template
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy #引用数据库,需要安装MySQL-python
import os
from flask_script import Manager
from flask import session, redirect,url_for # 引入重定向和用户会话
from flask import flash #Flash消息
# 引入表单模块
from flask_wtf import Form
from wtforms import StringField, SubmitField # 引入文本和提交按钮
from wtforms.validators import Required # 引入校验
# from wtforms import PasswordField # 引入密码输入框
from flask_moment import Moment # 本地化时间
from datetime import datetime # 引入时间模块
app = Flask(__name__)
manager = Manager(app)
moment = Moment(app)
bootstrap = Bootstrap(app)
db = SQLAlchemy(app)
# 自定义对象
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(64), unique = True)
users = db.relationship('User', backref = 'role',lazy = 'dynamic')
# backref 参数向 User 模型中添加一个 role 属性,从而定义反向关系
def __repr__(self):
return '<Role %r >' % self.name
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(64), unique = True, index = True )
role_id = db.Column(db.Integer, db.ForeignKey('roles.id')) # 设置外键
def __repr__(self):
return '<User %r >' % self.username
|
import zipfile
import wget
import glob
import os
import torch
import argparse
import pandas as pd
from tqdm import tqdm
from utils import overwrite_base, Logger
from configs.config import GlobalConfig
import mmcv
from mmcv import Config
from mmdet.apis import set_random_seed
from mmdet.datasets import build_dataset, build_dataloader
from mmdet.models import build_detector
from mmdet.apis import train_detector
from mmdet.apis import init_detector, inference_detector, show_result_pyplot
def detector_train(cfg):
model = build_detector(cfg.model)
datasets = [build_dataset(cfg.data.train)]
train_detector(model, datasets, cfg, distributed=False, validate=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='vinbigdata')
parser.add_argument('--image-size', type=int, default=1024,
help='image size for training')
parser.add_argument('--num-epochs', type=int, required=True,
help='number of training epoch')
parser.add_argument('--fold-num', type=int, required=True,
help='fold number for training')
args = parser.parse_args()
os.chdir('mmdetection')
#Overwrite
config = GlobalConfig
config.num_epochs = args.num_epochs
config.image_size = args.image_size
config.fold_num = args.fold_num
if not os.path.exists(config.log_path):
os.makedirs(config.log_path)
#Init logger
logger = Logger(config)
logger.write('Using GPU {} \n'.format(torch.cuda.get_device_name(0)))
#Read base config file
logger.write("Reading config from: {}".format(config.config_file))
base_cfg = Config.fromfile(config.config_file)
#Download pretrained model
config.model_path = os.path.join(config.pretrain_store_path, config.pretrain_url.split('/')[-1])
if not os.path.exists(config.pretrain_store_path):
os.makedirs(config.pretrain_store_path)
logger.write("Downloading pretrained weights: {}\n".format(config.pretrain_url))
wget.download(config.pretrain_url, config.model_path)
else:
logger.write("Pretrained model already in cache \n")
# Edit configuration settings
final_config = overwrite_base(base_cfg, config, is_train=True)
with open(os.path.join(config.config_path, config.model_name+'.py'), 'w') as f:
f.write(final_config.pretty_text)
#Train
logger.write(f'Begin training Fold {config.fold_num}... \n')
detector_train(final_config)
logger.write(f'Finished training Fold {config.fold_num}! \n')
logger.close()
|
'''
Main.py Starting File
'''
import os
import numpy as np
import tensorflow as tf
from model import Model
from plot import Plot
from game import Game
#import matplotlib.pyplot as plt
class Main:
def __init__(self):
self.feature_length = 6
self.label_length = 4
self.cost_plot = Plot([], 'Step', 'Cost')
self.accuracy_plot = Plot([], 'Step', 'Accuracy')
self.checkpoint = 'data/checkpoints/turn_based_ai.ckpt'
self.X = tf.placeholder(tf.float32, [None, self.feature_length])
self.Y = tf.placeholder(tf.float32, [None, self.label_length])
self.model = Model(self.X, self.Y)
self.global_step = 0
self.training_data_x = np.empty((0, self.feature_length))
self.training_data_y = np.empty((0, self.label_length))
self.test_training_data_x = np.empty((0, self.feature_length))
self.test_training_data_y = np.empty((0, self.label_length))
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# def add_training_data(self, features, labels):
# self.training_data_x = np.concatenate((self.training_data_x, features), axis=0)
# self.training_data_y = np.concatenate((self.training_data_y, labels), axis=0)
def add_training_data(self, features, labels, add_to_test_data):
self.training_data_x = np.concatenate((self.training_data_x, features), axis=0)
self.training_data_y = np.concatenate((self.training_data_y, labels), axis=0)
if add_to_test_data:
self.test_training_data_x = np.concatenate((self.test_training_data_x, features), axis=0)
self.test_training_data_y = np.concatenate((self.test_training_data_y, labels), axis=0)
def get_data_for_prediction(self, user, opponent):
#data = np.array([1, 0.4, 1, 1, 0.4, 1])#Default starting data (not great)
#if user != None:
data = np.array([user.attack / user.max_attack,
user.defence / user.max_defence,
user.health / user.max_health,
opponent.attack / opponent.max_attack,
opponent.defence / opponent.max_defence,
opponent.health / opponent.max_health
])
return np.reshape(data, (-1, self.feature_length))
def start(self, restore):
train = True
#players_turn = True
player_goes_first = True
saver = tf.train.Saver()
with tf.Session() as sess:
if restore:
saver.restore(sess, self.checkpoint)
else:
sess.run(tf.global_variables_initializer())
while train:
game = Game(player_goes_first, self.feature_length, self.label_length)
# if player_goes_first:
# players_turn = True
# else:
# players_turn = False
player_goes_first = not player_goes_first
# game_over = False
# user = None
# opponent = None
while not game.game_over:
predicted_action = 0
if game.players_turn is False:
#Predict opponent's action
data = self.get_data_for_prediction(game.user, game.opponent)
#print('opponents\'s view: {}'.format(data))
predicted_actions = sess.run(self.model.prediction, { self.X: data })[0]
#predicted_actions = sess.run(tf.nn.sigmoid(predicted_actions))
predicted_action = np.argmax(predicted_actions) + 1
#Play Game
did_player_win = game.run(predicted_action)
#game_over, players_turn, user, opponent, training_data = game.run(predicted_action)
# if game.game_over and training_data == None:
# train = False
# elif game_over:
# #record winning data
# self.add_training_data(training_data.feature, training_data.label)
if game.game_over and did_player_win == None:
train = False
elif game.game_over:
#record winning data
if did_player_win:
self.add_training_data(game.player_training_data.feature, game.player_training_data.label, False)
else:
self.add_training_data(game.opponent_training_data.feature, game.opponent_training_data.label, False)
#Train
if train:
for _ in range(50):
training_data_size = np.size(self.training_data_x, 0)
random_range = np.arange(training_data_size)
np.random.shuffle(random_range)
for i in range(training_data_size):
random_index = random_range[i]
_, loss = sess.run(self.model.optimize, { self.X: np.reshape(self.training_data_x[random_index], (-1, self.feature_length)), self.Y: np.reshape(self.training_data_y[random_index],(-1, 4))})
#_, loss = sess.run(model.optimize, { X: self.training_data_x, Y: self.training_data_y })
self.global_step += 1
current_accuracy = sess.run(self.model.error, { self.X: self.training_data_x, self.Y: self.training_data_y })
self.cost_plot.data.append(loss)
self.accuracy_plot.data.append(current_accuracy)
print('Saving...')
saver.save(sess, self.checkpoint)
print('Epoch {} - Loss {} - Accuracy {}'.format(self.global_step, loss, current_accuracy))
#weights = sess.run(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='layer_1/weights:0'))[0]
#Move out into class
# plt.close('all')
# plt.figure()
# plt.imshow(weights, cmap='Greys_r', interpolation='none')
# plt.xlabel('Nodes')
# plt.ylabel('Inputs')
# plt.show()
# plt.close()
self.cost_plot.save_sub_plot(self.accuracy_plot,
"data/charts/{} and {}.png".format(self.cost_plot.y_label, self.accuracy_plot.y_label))
#using tensorboard
#E:
#tensorboard --logdir=Logs
#http://localhost:6006/
Main().start(False)
|
# 自然数Nをコマンドライン引数などの手段で受け取り,入力のうち先頭のN行だけを表示せよ.
# 確認にはheadコマンドを用いよ.
# head -n 5 hightemp.txt
# !usr/bin/env python
# -*- coding;utf-8 -*-
import sys
N = int(sys.argv[1])
assert len(sys.argv) is 2, "usage: python nock_14.py [N]"
with open('hightemp.txt') as f:
print(''.join(f.readlines()[:N]),end="")
# count=0
# for line in f.readlines() :
# if count is N:
# break
# else:
# print(line,end="")
# count+=1
|
from datetime import datetime
from tkinter import *
win = Tk()
win.geometry("600x100")
win.title("What time??")
win.option_add("*Font","맑은고딕 8")
def what_time():
dnow = datetime.now()
btn.config(text=dnow)
btn = Button(win)
btn.config(text="현재 시각")
btn.config(width=30)
btn.config(command=what_time)
btn.pack()
win = mainloop()
|
# Generated by Django 3.0.1 on 2019-12-24 17:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('paroll', '0005_auto_20191225_0104'),
]
operations = [
migrations.AddField(
model_name='account',
name='address',
field=models.CharField(default=0, max_length=200000),
preserve_default=False,
),
migrations.AlterField(
model_name='account',
name='bank_account',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='paycheck',
name='deduction',
field=models.CharField(max_length=200000),
),
migrations.AlterField(
model_name='paycheck',
name='salary',
field=models.CharField(max_length=2000000),
),
]
|
#!/usr/bin/env python
import array
import math
import sys
import time
numb = array.array('l',[0]*1000000)
numbsize = 0
echostep=10
#tcount = 0
targ = {}
chash = {}
def DeDupes():
global numbsize
dupes=0
uniqid=0
for i in range(1,numbsize):
if numb[i] == numb[uniqid]:
dupes+=1
else:
uniqid+=1
numb[uniqid] = numb[i]
numbsize = uniqid + 1
return dupes
def rearrange2Dir(lo,hi):
i = lo+1
j = hi
while (1):
while (numb[i]<numb[lo])and(i<hi):
i+=1
while (numb[j]>numb[lo])and(j>lo):
j-=1
if i>=j:
break
numb[i],numb[j] = numb[j],numb[i]
i+=1
j-=1
numb[lo],numb[j] = numb[j],numb[lo]
return j
def QuickSort2Dir(lo,hi):
pivot = rearrange2Dir(lo,hi)
if (pivot-lo>1): # left part of numb is bigger than 1
QuickSort2Dir(lo,pivot-1)
if (hi-pivot>1):
QuickSort2Dir(pivot+1,hi)
def Load(FileName):
global numbsize
print "loading from",FileName,"...",
inFile = open (FileName, 'r', 0)
numbsize=0
for line in inFile:
numb[numbsize]=long(line)
numbsize+=1
print "loaded",numbsize,'numbers'
def Rank(lo,hi,v): # return max id | numb[id] <= v
if lo >= hi:
return lo
med = lo+int(math.ceil((hi-lo)/2.))
if v > numb[med]:
return Rank(med,hi,v)
elif v < numb[med]:
return Rank(lo,med-1,v)
else: # v == nubm[med]
return med
def CalcTargNaive():
# global tcount
print 'Naive method'
print 'sorting...',
QuickSort2Dir( 0, numbsize-1 )
print 'done ',
print DeDupes(), 'dupes deleted'
split=time.time()
for i in range(numbsize-1):
if (i)%int(math.ceil(numbsize/1./echostep)) == 0: print str(i*100/numbsize)+'%'
for j in range(i+1,numbsize):
sum = numb[i]+numb[j]
if abs(sum) <= 10000:
targ[sum]=1
# tcount+=1
print 'len(targ.keys()) =',len(targ.keys()), ' time elapsed:',time.time()-split,'sec'
def CalcTargBSearch():
# global tcount
print 'Binary Search method'
print 'sorting...',
QuickSort2Dir( 0, numbsize-1 )
print 'done ',
print DeDupes(), 'dupes deleted'
split=time.time()
for i in range(numbsize-1):
j = Rank(i+1,numbsize-1,-10000-numb[i])
if numb[j] < -10000-numb[i]:
j+=1
k = Rank(j-1,numbsize-1,10000-numb[i])
for l in range(j,k+1):
sum = numb[i] + numb[l]
if abs(sum) <= 10000:
targ[sum]=1
# tcount += 1
else:
exit('ERROR abs(sum) of ['+str(i)+'] and ['+str(l)+'] is bigger than 10000')
print 'len(targ.keys()) =',len(targ.keys()), ' time elapsed:',time.time()-split,'sec'
def fill_chash():
for i in range(numbsize):
key = numb[i]/10000
if key in chash:
if numb[i] not in chash[key]:
chash[key].append(numb[i])
else:
chash[key]=[numb[i]]
def count_dupes_in_chash():
c=0
for key in chash:
for i in range(len(chash[key])-1):
for j in range(i+1,len(chash[key])):
if chash[key][i] == chash[key][j]:
print chash[key]
c+=1
return c
def CalcTarg_chash():
print 'chash method'
fill_chash()
split=time.time()
for xkey in chash:
for x in chash[xkey]:
ykeylist = [-xkey-2, -xkey-1, -xkey, -xkey+1]
for ykey in ykeylist:
if ykey in chash:
for y in chash[ykey]:
sum = x+y
if sum<=10000 and x<>y:
targ[sum]=1
print 'len(targ.keys()) =',len(targ.keys()), ' time elapsed:',time.time()-split,'sec'
if len(sys.argv)==2:
FileName = sys.argv[1]
else:
FileName = 'algo1_programming_prob_2sum.txt'
Load( FileName )
CalcTarg_chash()
CalcTargBSearch()
#CalcTargNaive()
#print 'tcount =',tcount,
|
import sys
import requests
from requests.api import head
from bs4 import BeautifulSoup
import pandas as pd
import xlsxwriter
from datetime import datetime
companies = []
base_url = "https://www.finanzen.net/bilanz_guv/"
user_agent = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36"}
writer = pd.ExcelWriter("Bilanzen_" + str(datetime.date(datetime.now())) + ".xlsx",engine='xlsxwriter', options={'strings_to_numbers': True})
workbook=writer.book
def get_args():
if (len(sys.argv)) == 1:
print("Usage: guvscraper.py company1 company2 company1337")
sys.exit()
else:
for arg in range(1, len(sys.argv)):
companies.append(str(sys.argv[arg]))
def get_guv(companies):
for company in companies:
URL = base_url + company + ""
print("\n" + URL)
page = requests.get(URL, headers=user_agent)
soup = BeautifulSoup(page.content, "html.parser")
name = ""
try:
name = soup.find("h2", {"class":"font-resize"} ).get_text()
boxTableList = soup.findAll('div', attrs={"class" : "box table-quotes"})
headlineList = soup.findAll('h2', attrs={"class" : "box-headline"})
except AttributeError as err:
print("Share could not be retrieved! Wrong name?")
continue
#Export to HTML
#with open(company + ".html", "w", encoding='utf-8') as file:
# file.write(str(boxTableList))
print(name+"\n")
#print(boxTableList)
dflist = pd.read_html(str(boxTableList), decimal=',', thousands='.')
print("Writing to .xlsx.....")
write_to_xlsx(dflist, company, headlineList, name)
writer.save()
def write_to_xlsx(dataframelist, company, headlines, name):
headlinerow = 3
row = 3
spacing = 3
#Setup excel file and formatting
worksheet_name = company + " Bilanzen"
print(worksheet_name)
worksheet=workbook.add_worksheet(worksheet_name)
writer.sheets[worksheet_name] = worksheet
bold = workbook.add_format({'bold': True})
#write company headline
worksheet.write(1, 0, name, bold)
for x in range(len(dataframelist)):
#write GUV data
dataframelist[x] = dataframelist[x].drop(dataframelist[x].columns[0], 1)
dataframelist[x].to_excel(writer,sheet_name=worksheet_name,startrow=row , startcol=0, index = False )
row += (len(dataframelist[x]) + spacing)
#write headlines
worksheet.write(headlinerow, 0, str(headlines[x].get_text()), bold)
headlinerow += (len(dataframelist[x]) + spacing)
#Set A column size to 60px
worksheet.set_column(0, 0, 60)
get_args()
get_guv(companies)
print("\nDone!")
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Base Class for Protocols
from .protocol import Protocol
# Utility Classes for Protocols
from .header import Header
from .frame import Frame
from .link import *
from .internet import *
from .transport import *
from .application import *
# Ptotocol Chain
from .utilities import ProtoChain
# Info Classes
from .utilities import Info
from .header import VersionInfo
|
# coding=utf-8
"""
preInit.py
Desc:
Maintainer: wangfm
CreateDate: 2016/12/7
"""
import ConfigParser
import argparse
import os
import sys
try:
from statusdocke import checkRunner
from configTest import TestRunner
from logger import logger
except ImportError:
sys.path.append(os.getenv('PY_DEV_HOME'))
from webTest_pro.common.statusdocke import checkRunner
from webTest_pro.configTest import TestRunner
from webTest_pro.common.logger import logger
def _getcfgpath(initconf):
"""
Function: _getcfgpath()
Desc:
Args:
-
Return: None
Usage:
Maintainer: wangfm
CreateDate: 2016/12/7
"""
home_path = os.getenv("PY_DEV_HOME")
if initconf == 'dev':
cfg_path = home_path + '\webTest_pro\cfg\init_dev.conf'
elif initconf == 'debug':
cfg_path = home_path + '\webTest_pro\cfg\init_debug.conf'
elif initconf == 'test':
cfg_path = home_path + '\webTest_pro\cfg\init_test.conf'
elif initconf == 'wf':
cfg_path = home_path + '\webTest_pro\cfg\init_wf.conf'
else:
cfg_path = home_path + '\webTest_pro\cfg\init_default.conf'
return cfg_path
def setcfghost(platformhost, mediahost, initconf, active_code):
"""
Function: setcfghost()
Desc:
Args:
-
Return: None
Usage:
Maintainer: wangfm
CreateDate: 2016/12/6
"""
cfg_path = _getcfgpath(initconf)
cf = ConfigParser.ConfigParser()
cf.read(cfg_path)
original_platform_add = cf.get('basedata', 'addr')
original_media_add = cf.get('streaming_media', 'serverIps')
original_active_code = cf.get('basedata', 'active_code')
logger.info("###################configure info###################")
if platformhost is not None:
cf.set('basedata', 'addr', platformhost)
cf.set('db_conf', 'host', platformhost)
cf.set('db_conf', 'hostadd', platformhost)
logger.info("original platform add:{0} modfied to {1}".format(original_platform_add, platformhost))
else:
logger.info("waring: platform host address not configured.")
if mediahost is not None:
cf.set('streaming_media', 'serverIps', mediahost)
logger.info("original media add:{0} modfied to {1}".format(original_media_add, mediahost))
else:
logger.warning("waring: media host address not configured.")
if active_code is not None:
cf.set('basedata', 'active_code', active_code)
logger.info("original media add:{0} modfied to {1}".format(original_active_code, mediahost))
else:
logger.warning("waring: active_code not configured.")
cf.write(open(cfg_path, "w"))
logger.info("Configuration success: {}".format(cfg_path.split('\\')[-1]))
def parseargs():
"""
Function: parse_args()
Desc: CLI
Args:
-
Return: args
Usage:
Maintainer: wangfm
CreateDate: 2016/12/7
"""
description = 'example: python Runner.py -f dev x.x.x.x x.x.x.x -r case'
parser = argparse.ArgumentParser(description=description)
helph = 'The host IP address of the platform.'
parser.add_argument('host', help=helph)
helpm = 'The host IP address of the media service.'
parser.add_argument('media', help=helpm, nargs='?')
helpa = 'The active code of the platform.'
parser.add_argument('-a', '--active', help=helpa)
parser.add_argument('-r', '--runner', help='foo help', choices=['case', 'check'])
parser.add_argument('-f', '--file', help='foo help', choices=['dev', 'test', 'default', 'debug', 'wf'])
_args = parser.parse_args()
return _args
def preinit():
"""
Function: preinit()
Desc: 输入ip地址,修改配置文件,并返回平台ip
Args:
-
Return: platfrom ip address
Usage:
Maintainer: wangfm
CreateDate: 2016/12/7
"""
_args = parseargs()
host = _args.host
media = _args.media
init_conf = _args.file
runner = _args.runner
active_code = _args.active
if runner is None:
# config conf file
setcfghost(host, media, init_conf, active_code)
else:
if runner == 'check':
# check docker status
setcfghost(host, media, init_conf, active_code)
checkRunner(host)
else:
# run testcase
setcfghost(host, media, init_conf, active_code)
if checkRunner(host) is True:
logger.info("...")
_runner = TestRunner(init_conf)
_runner.run()
else:
logger.info("Service error")
if __name__ == '__main__':
preinit()
|
#The upper() String Method
fruit = 'Apple'
print(fruit.upper())
|
import numpy as np
from bokeh.plotting import figure, show, output_file, vplot
from bokeh.io import output_notebook
N = 100
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
#output_file("legend.html", title="legend.py example")
TOOLS = "pan,wheel_zoom,box_zoom,reset,save,box_select"
p2 = figure(title="Another Legend Example", tools=TOOLS)
p2.circle(x, y, legend="sin(x)")
p2.line(x, y, legend="sin(x)")
p2.line(x, 2*y, legend="2*sin(x)",
line_dash=[4, 4], line_color="orange", line_width=2)
p2.square(x, 3*y, legend="3*sin(x)", fill_color=None, line_color="green")
p2.line(x, 3*y, legend="3*sin(x)", line_color="green")
output_notebook()
show(p2)# open a browser
|
from flask import request, redirect, url_for
from flask_restful import Resource, marshal_with
from ..fields import Fields
from app.models.models import Sale, Product, SaleGroup, User, db
from app.forms import CreateSaleForm
from .sale_group import SaleGroupListAPI
sale_fields = Fields().sale_fields()
class SaleListAPI(Resource):
@marshal_with(sale_fields)
def get(self):
sales = Sale.query.all()
return sales
@marshal_with(sale_fields)
def post(self):
return {}
class SaleAPI(Resource):
@marshal_with(sale_fields)
def get(self, id):
sale = Sale.query.get(id)
return sale
@marshal_with(sale_fields)
def delete(self, id):
sale = Sale.query.get(id)
db.session.delete(sale)
db.session.commit()
sales = Sale.query.all()
return sales
@marshal_with(sale_fields)
def put(self, id):
return {}
class SaleCheckoutAPI(Resource):
# @marshal_with(sale_fields)
def post(self):
data = request.json
amount = data.get("amount")
paid = data.get("paid")
sales = data.get("cart")
sale_models = []
for product_id, sale in sales.items():
product = Product.query.filter_by(id=product_id).first()
create_sale_form = CreateSaleForm(data={"quantity":sale.get("quantity")}, meta={'csrf': False})
if create_sale_form.validate_on_submit():
data = create_sale_form.data
quantity = data.get("quantity")
sale = Sale(quantity=quantity, buying_price=product.buying_price, selling_price=product.selling_price, product_id=product_id)
sale_models.append(sale)
product.quantity -= quantity
current_user = User.query.filter_by(token=request.args.get("token")).first()
sale_group = SaleGroup(amount=amount, paid=paid, user_id=current_user.id)
sale_group.sales = sale_models
db.session.add_all(sale_models)
db.session.add(sale_group)
db.session.commit()
db.session.close()
return SaleGroupListAPI.get(SaleGroupListAPI)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^createGenesisNode/', views.createGenesisNode),
url(r'^createChildNode/', views.createChildNode),
url(r'^editNode/', views.ediNode),
url(r'^findLongestChain/', views.findLongestChain),
]
'''
API ENDPOINTS
1. localhost:8000/createGenesisNode
2. localhost:8000/createChildNode
3. localhost:8000/editNode
4. localhost:8000/findLongestChain
'''
|
#2019.07.20-KimSeokMin
#필요 라이브러리 : selenium, bs4(beautifulsoup)
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from bs4 import BeautifulSoup as BS
from multiprocessing.pool import Pool, ThreadPool
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
#from selenium.common.exceptions import TimeoutException
import threading
import re
import csv
casenum = 1
def getCaseNum(html):
global casenum
bs = BS(html, "lxml")
csnum = bs.find_all("a",{"class":"layer_pop_open"})
arr = []
for i in csnum:
cs = i.get('id')
cs = cs.replace("py_","")
arr += [[casenum,int(cs)]]
print(casenum, cs)
casenum+=1
return arr
def getCase(case):
f = open("case.csv", mode = "a", encoding = 'utf-8', newline = '')
wr = csv.writer(f)
driver = get_driver()
link = url2+str(case[1])
driver.get(link)
WebDriverWait(driver,5).until(EC.presence_of_element_located((By.CLASS_NAME, 'page')))
time.sleep(2.5)
html = driver.page_source
bs = BS(html, 'lxml')
prescripts = bs.find("div",{"class":"page"})
if prescripts == None:
return
else:
scripts = prescripts.find_all("p")
result = ''
for script in scripts:
strong_elements = script.find_all("strong")
for strong in strong_elements:
strong.extract()
for script in scripts:
result += script.get_text()
wr.writerow([case[0], case[1], result])
print(case[0])
f.close()
threadLocal = threading.local()
def get_driver():
driver = getattr(threadLocal, 'driver', None)
if driver is None:
options = webdriver.ChromeOptions()
prefs = {'profile.default_content_setting_values': {'cookies': 2, 'images': 2, 'javascript': 2,
'plugins': 2, 'popups': 2, 'geolocation': 2,
'notifications': 2, 'auto_select_certificate': 2, 'fullscreen': 2,
'mouselock': 2, 'mixed_script': 2, 'media_stream': 2,
'media_stream_mic': 2, 'media_stream_camera': 2, 'protocol_handlers': 2,
'ppapi_broker': 2, 'automatic_downloads': 2, 'midi_sysex': 2,
'push_messaging': 2, 'ssl_cert_decisions': 2, 'metro_switch_to_desktop': 2,
'protected_media_identifier': 2, 'app_banner': 2, 'site_engagement': 2,
'durable_storage': 2}}
options.add_argument('headless')
options.add_experimental_option('prefs', prefs)
options.add_argument("start-maximized")
options.add_argument("disable-infobars")
options.add_argument("--disable-extensions")
options.add_argument('window-size=1920x1080')
options.add_argument("disable-gpu")
options.add_argument("user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36")
driver = webdriver.Chrome(path, chrome_options=options)
setattr(threadLocal, 'driver', driver)
return driver
path = "D:\chromedriver\chromedriver"
url = "https://glaw.scourt.go.kr/wsjo/panre/sjo050.do"
url2 = "https://glaw.scourt.go.kr/wsjo/panre/sjo100.do?contId="
checknum = 1
def CaseNum():
global checknum
#1.대법원 사이트 접속
case = list()
driver = get_driver()
driver.get(url)
time.sleep(2)
search_box = driver.find_element_by_name("srchw")
search_box.send_keys("손해배상")
driver.find_element_by_xpath('//*[@id="search"]/div[2]/fieldset/a[1]').click()
driver.find_element_by_xpath('//*[@id="search"]/div[2]/fieldset/div/p/a').click()
#2. 판례 번호 크롤링
for i in range(10):
if i==0:
html = driver.page_source
case += getCaseNum(html)
time.sleep(0.3)
driver.find_element_by_xpath('//*[@id="tabwrap"]/div/div/div[1]/div[3]/div/fieldset/p/a[1]').click()
checknum +=1
elif(i>=1 and i<=9):
html = driver.page_source
case += getCaseNum(html)
time.sleep(0.3)
driver.find_element_by_xpath('//*[@id="tabwrap"]/div/div/div[1]/div[3]/div/fieldset/p/a[2]').click()
checknum +=1
elif(i>=10 and i<622):
html = driver.page_source
case += getCaseNum(html)
time.sleep(0.5)
button = WebDriverWait(driver,5).until(EC.presence_of_element_located((By.XPATH,'//*[@id="tabwrap"]/div/div/div[1]/div[3]/div/fieldset/p/a[3]')))
button.click()
checknum += 1
if i==622:
html = driver.page_source
case += getCaseNum(html)
time.sleep(0.3)
driver.find_element_by_xpath('//*[@id="tabwrap"]/div/div/div[1]/div[3]/div/fieldset/p/a[3]').click()
html = driver.page_source
case += getCaseNum(html)
time.sleep(0.3)
f = open("casenum.csv", mode="w", encoding = 'utf-8', newline = '')
wr = csv.writer(f)
for cs in case:
wr.writerow([cs[0], cs[1]])
f.close()
#1.먼저 casenum.csv 생성
CaseNum()#이건 처음 한번만 하고 지우기
print(checknum)
f = open('casenum.csv', mode='r', encoding='utf-8')
rd = csv.reader(f)
arr = list(rd)#casenum 리스트
f.close()
case = arr[:1000] #arr[0] 부터 arr[999]까지 자르기 알아서 원하는 개수만큼 잘라써라
#2. case.csv 생성 *중요 : 처음 실행시에 mode = 'w'이고, 다음부턴 mode = 'a'
f2 = open("case.csv", mode = "w", encoding = 'utf-8', newline = '')
wr2 = csv.writer(f2)
wr2.writerow(['caseindex','casenum', 'script'])
f2.close()
ThreadPool(6).map(getCase, case)
driver.quit()
|
# Criando/escrevendo arquivos no python
file = open('aula57/abcd.txt', 'w+')
file.write('Linha 1\n')
file.write('\t Linha 2\n')
file.write('\t\t Linha 3\n')
file.seek(0, 0)
print('Lendo... ')
print(file.read())
#
print(f'----------------------')
file.seek(0, 0)
print(file.readline(), end='')
print(file.readline(), end='')
#
print(f'----------------------')
file.seek(0, 0)
print(file.readlines())
#
print(f'----------------------')
file.seek(0, 0)
for linha in file.readlines():
print(linha, end='Hahahahaha')
#
file.close()
|
def main():
iList = [None]
with open('test.txt') as f:
firstLine = f.readline().split(' ')
kSize, n = int(firstLine[0]), int(firstLine[1])
for line in f:
tempList = line.split(' ')
iList.append((int(tempList[0]), int(tempList[1])))
print(knapsack(n, kSize, iList))
def knapsack(n, kSize, iList):
"""
knapsack(n, kSize, iList):
Function to find max value of knapsack, with space O(m), where
m = max weight allowed
Use for huge inputs, where reconstructing final set doesn't matter
"""
solutions = [[None for i in range(kSize + 1)] for i in range(2)]
for x in range(kSize + 1):
solutions[0][x] = 0
for i in range(1, n + 1):
value, weight = iList[i]
for x in range(kSize + 1):
if weight > x:
solutions[1][x] = solutions[0][x]
else:
solutions[1][x] = max(solutions[0][x], solutions[0][x - weight] + value)
solutions[0] = solutions[1].copy()
return solutions[1][kSize]
main()
|
#!/usr/bin/env python
from comet_ml import Experiment
import argparse
import os
import sys
from datetime import datetime
import warnings
warnings.simplefilter("ignore")
import keras
import tensorflow as tf
from keras_retinanet import models
from keras_retinanet.utils.keras_version import check_keras_version
#Custom Generators and callbacks
#Path hack
sys.path.insert(0, os.path.abspath('..'))
from DeepForest.onthefly_generator import OnTheFlyGenerator
from DeepForest.evaluation import neonRecall
from DeepForest.evalmAP import evaluate
from DeepForest import preprocess
from DeepForest.utils.generators import create_NEON_generator
def get_session():
""" Construct a modified tf session.
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def parse_args(args):
""" Parse the arguments.
"""
parser = argparse.ArgumentParser(description='Evaluation script for a RetinaNet network.')
parser.add_argument('--batch-size', help='Size of the batches.', default=1, type=int)
parser.add_argument('--model', help='Path to RetinaNet model.')
parser.add_argument('--convert-model', help='Convert the model to an inference model (ie. the input is a training model).', action='store_true')
parser.add_argument('--backbone', help='The backbone of the model.', default='resnet50')
parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')
parser.add_argument('--score-threshold', help='Threshold on score to filter detections with (defaults to 0.05).', default=0.05, type=float)
parser.add_argument('--iou-threshold', help='IoU Threshold to count for a positive detection (defaults to 0.5).', default=0.5, type=float)
parser.add_argument('--max-detections', help='Max Detections per image (defaults to 100).', default=200, type=int)
parser.add_argument('--suppression-threshold', help='Permitted overlap among predictions', default=0.2, type=float)
parser.add_argument('--save-path', help='Path for saving images with detections (doesn\'t work for COCO).')
parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=400)
parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333)
return parser.parse_args(args)
def main(DeepForest_config, args=None, experiment=None):
# parse arguments
if args is None:
args = sys.argv[1:]
args = parse_args(args)
# make sure keras is the minimum required version
check_keras_version()
# optionally choose specific GPU
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
keras.backend.tensorflow_backend.set_session(get_session())
#Add seperate dir
#save time for logging
dirname=datetime.now().strftime("%Y%m%d_%H%M%S")
# make save path if it doesn't exist
if args.save_path is not None and not os.path.exists(args.save_path + dirname):
os.makedirs(args.save_path + dirname)
#Evaluation metrics
site=DeepForest_config["evaluation_site"]
#create the NEON mAP generator
NEON_generator = create_NEON_generator(args.batch_size, DeepForest_config)
# load the model
print('Loading model, this may take a second...')
model = models.load_model(args.model, backbone_name=args.backbone, convert=args.convert_model, nms_threshold=DeepForest_config["nms_threshold"])
#print(model.summary())
#NEON plot mAP
average_precisions = evaluate(
NEON_generator,
model,
iou_threshold=args.iou_threshold,
score_threshold=args.score_threshold,
max_detections=args.max_detections,
save_path=args.save_path + dirname,
experiment=experiment
)
return average_precisions
if __name__ == '__main__':
import sys, os
sys.path.insert(0, os.path.abspath('..'))
#Model list
trained_models = {"SJER":"/orange/ewhite/b.weinstein/retinanet/20190715_133239/resnet50_30.h5",
"TEAK":"/orange/ewhite/b.weinstein/retinanet/20190713_230957/resnet50_40.h5",
"NIWO":"/orange/ewhite/b.weinstein/retinanet/20190712_055958/resnet50_40.h5",
"MLBS":"/orange/ewhite/b.weinstein/retinanet/20190712_035528/resnet50_40.h5",
"All":"/orange/ewhite/b.weinstein/retinanet/20190715_123358/resnet50_40.h5"}
import pandas as pd
import numpy as np
from DeepForest.config import load_config
DeepForest_config = load_config("..")
results = []
for training_site in trained_models:
#Sites are passed as list object.
sites = [["TEAK"],["SJER"],["NIWO"],["MLBS"]]
for eval_site in sites:
model = trained_models[training_site]
#pass an args object instead of using command line
args = [
"--batch-size", str(DeepForest_config['batch_size']),
'--score-threshold', str(DeepForest_config['score_threshold']),
'--suppression-threshold','0.15',
'--save-path', '../snapshots/images/',
'--model', model,
'--convert-model'
]
#Run eval
DeepForest_config["training_site"] = training_site
DeepForest_config["evaluation_site"] = eval_site
#Comet
experiment = Experiment(api_key="ypQZhYfs3nSyKzOfz13iuJpj2", project_name='deeplidar', log_code=False)
experiment.log_parameter("mode","cross_site")
experiment.log_parameters(DeepForest_config)
average_precisions = main(DeepForest_config, args, experiment=experiment)
# print evaluation
## print evaluation
present_classes = 0
precision = 0
for label, (average_precision, num_annotations) in average_precisions.items():
if num_annotations > 0:
present_classes += 1
precision += average_precision
NEON_map = round(precision / present_classes,3)
results.append({"Training Site":training_site, "Evaluation Site": eval_site, "mAP": NEON_map})
results = pd.DataFrame(results)
#model name
model_name = os.path.splitext(os.path.basename(model))[0]
print(results)
results.to_csv("cross_site.csv")
|
import sys
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import mpl_toolkits.mplot3d.art3d as art3d
from matplotlib.patches import Rectangle
import seaborn as sns
from astropy.table import Table
from dust_blorentz_ode import streamline
try:
thB_degrees = float(sys.argv[1])
LFAC = float(sys.argv[2])
MACH_ALFVEN = float(sys.argv[3])
YPLANE = float(sys.argv[4])
except:
sys.exit(f"Usage: {sys.argv[0]} FIELD_ANGLE LORENTZ_FORCE MACH_ALFVEN YPLANE")
suffix = f"b{int(thB_degrees):02d}-L{int(100*LFAC):04d}-Ma{int(10*MACH_ALFVEN):04d}-Y{int(100*YPLANE):+04d}"
figfile = sys.argv[0].replace('.py', f'-{suffix}.jpg')
sns.set_style('white')
sns.set_color_codes()
fig, ax = plt.subplots(subplot_kw=dict(projection='3d'), figsize=(6, 3.3))
ny0, nz0 = 31, 1
y0grid = 0.000 + np.linspace(-2.0, 2.0, 1.5*ny0)
z0grid = [0.0]
xmin, xmax = [-7, 3]
zmin, zmax = [-2, 2]
ymin, ymax = [-4, 4]
# show the z=0 plane
p = Rectangle((xmin, zmin), xmax - xmin, zmax - zmin,
edgecolor='none', facecolor='g', alpha=0.15)
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=YPLANE, zdir="y")
cthB = np.cos(np.radians(thB_degrees))
sthB = np.sin(np.radians(thB_degrees))
for xx in np.linspace(-15, 15, 31):
yy1, yy2 = -15, 15
x1 = -xx*sthB + yy1*cthB
x2 = -xx*sthB + yy2*cthB
y1 = xx*cthB + yy1*sthB
y2 = xx*cthB + yy2*sthB
x = np.linspace(x1, x2, 200)
y = np.linspace(y1, y2, 200)
m = (x >= xmin) & (y >= ymin) & (x <= xmax) & (y <= ymax)
x[~m] = np.nan
ax.plot(x, y, zs=zmin, zdir='z', lw=2, alpha=0.5, color='c')
#nt = 3001
nt = 3001
tstop = 60
X0 = 10
# Time index corresponding to stellar passage, approx
it0 = int(nt*X0/tstop)
zcolors = ['k']
# Random velocity of grains - assume at Alfven speed, at high pitch angle to field
v_turb_0 = 1.0/MACH_ALFVEN
mu_p_max = 0.1
# Find all the trajectories
trajectories = []
for y0 in y0grid:
# Pitch angle to field
c_p = np.random.uniform(0.0, mu_p_max)
s_p = np.sqrt(1.0 - c_p**2)
# Azimuth around field
phi_B = np.random.uniform(0.0, 2*np.pi)
spB, cpB = np.sin(phi_B), np.cos(phi_B)
# Magnitude and sign
v_turb = np.random.normal(loc=0.0, scale=v_turb_0)
# Components in frame of B-field
vBx = v_turb*c_p
vBy = v_turb*s_p*cpB
vBz = v_turb*s_p*spB
# Now transform to flow frame
cthB, sthB = np.cos(np.radians(thB_degrees)), np.sin(np.radians(thB_degrees))
u0 = vBx*cthB - vBy*sthB - 1.0
v0 = vBx*sthB + vBy*cthB
w0 = vBz
s = streamline(X0=X0, Y0=YPLANE, Z0=y0, U0=u0, V0=v0, W0=w0,
thB=np.radians(thB_degrees),
tstop=tstop, n=nt, LFAC=LFAC)
if ymin <= s['y'][it0] <= ymax:
trajectories.append(s)
# Sort according to z coordinate near the star
trajectories.sort(key=lambda s: s['z'][it0], reverse=False)
ycolors = sns.color_palette('magma_r', n_colors=len(trajectories))
star_done = False
for iy0, s in enumerate(trajectories):
x, y, z = s['x'], s['y'], s['z']
# implement clipping by hand
m = (x >= xmin) & (x <= xmax)
m = m & (y >= ymin) & (y <= ymax)
m = m & (z >= zmin) & (z <= zmax)
x[~m] = np.nan
# Draw the star before we get to the first positive z value
if s['z'][it0] > 0.0 and not star_done:
ax.plot([0, 0], [0, 0], [zmin, zmax], '--', color='k', lw=0.3)
ax.plot([xmin, xmax], [0, 0], [0, 0], '--', color='k', lw=0.3)
ax.plot([0, 0], [ymin, ymax], [0, 0], '--', color='k', lw=0.3)
star = ax.plot([0], [0], [0], 'o', color='r')
star_done = True
ax.plot(x, y, z, '-', color='w', lw=1.0)
ax.plot(x, y, z, '-', color=ycolors[iy0], lw=0.7)
ax.auto_scale_xyz([xmin, xmax], [ymin, ymax], [zmin, zmax])
ax.set(
xlabel='$x/R_{0}$',
ylabel='$y/R_{0}$',
zlabel='$z/R_{0}$',
xticks=range(xmin, xmax+1),
yticks=range(ymin, ymax+1),
zticks=[-2, -1, 0, 1, 2],
xlim=[xmin, xmax],
ylim=[ymin, ymax],
zlim=[zmin, zmax],
)
ax.azim = +15
ax.elev = 30
text = "$" + r" \quad\quad ".join([
fr"\theta_B = {thB_degrees:.0f}^\circ",
fr"F_B / F_\mathrm{{rad}} = {LFAC:.1f}",
fr"\mathcal{{M}}_A = {MACH_ALFVEN:.1f}",
fr"y_0 = {YPLANE:.4f}"]) + "$"
fig.text(0.1, 0.9, text)
fig.tight_layout(rect=[0, 0, 1.05, 1.0])
fig.savefig(figfile, dpi=600)
print(figfile, end='')
|
# This is a sample Python script.
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
from myStock_handler.stock_day_handler import regist_day, get_stock_day
import datetime
from myAI.stock_litm import LSTM_model
from myAI.stock_DNN01 import DNN01_model
# from myAI.stock_DNN02 import DNN02_model
# from myAI.stock_DNN03 import DNN03_model
from myAI.stock_DNN02_multidatum import DNN02_multi_model
from myAI.stock_DNN04 import DNN04_model
from my_util.util import loadHist, saveHist, saveModel
import numpy as np
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
modName="DNN02"
num=6502
interval = 5
date_begin=datetime.date(2018, 1, 1)
date_end=datetime.date(2020, 12, 30)
path="/Users/ryo/Documents/StockData/"+str(num)+"_"+str(date_begin)+"to"+str(date_end)+".csv"
path_minute="/Users/ryo/Documents/StockData_min"+ str(interval) +"/"+str(num)+"_"+str(date_begin)+"to"+str(date_end)+".csv"
path_hist = "/Users/ryo/Work/python_work/myAI_data/"+ modName+ "_" + datetime.datetime.now().strftime("%Y-%m-%d")+".json"
path_model = "/Users/ryo/Work/python_work/myAI_data/"+ modName+ "_" + datetime.datetime.now().strftime("%Y-%m-%d")
path_predict=""
# is_success, data = get_stock_day(num, date_begin, date_end)
is_success = True
jj=100
## day date
data = np.loadtxt(path, delimiter=",", dtype="int64, U20, float, float, float, float,int64")
##Loading statistics data
path_ma200="/Users/ryo/Documents/StockData/"+str(num)+"_"+str(date_begin)+"to"+str(date_end)+"_ma200.csv"
path_ma75="/Users/ryo/Documents/StockData/"+str(num)+"_"+str(date_begin)+"to"+str(date_end)+"_ma75.csv"
path_ma50="/Users/ryo/Documents/StockData/"+str(num)+"_"+str(date_begin)+"to"+str(date_end)+"_ma50.csv"
path_ma25="/Users/ryo/Documents/StockData/"+str(num)+"_"+str(date_begin)+"to"+str(date_end)+"_ma25.csv"
data_ma200 = np.loadtxt(path_ma200, delimiter=",", dtype="int64, U20, float, float, float, float,int64, U20")
data_ma75 = np.loadtxt(path_ma75, delimiter=",", dtype="int64, U20, float, float, float, float,int64, U20")
data_ma50 = np.loadtxt(path_ma50, delimiter=",", dtype="int64, U20, float, float, float, float,int64, U20")
data_ma25 = np.loadtxt(path_ma25, delimiter=",", dtype="int64, U20, float, float, float, float,int64, U20")
## minute date
# data_minute = np.loadtxt(path_minute, delimiter=",", dtype="int64, U30, float, float, float, float, int64, int64")
# data = np.loadtxt(path, delimiter=",", dtype=["int64", "str", "float", "float", "float", "float", "int64"])
print('File loading is ' + str(is_success))
if is_success:
print('Date range is ' + str(data[1][0]) + " to " + str(data[1][-1]))
print('Data size is ' + str(len(data[:][0])))
# data_np=np.zeros([len(data_minute),len(data_minute[0])-3])
data_np=np.zeros([len(data),len(data[0])-2])
data_np_ma200=np.zeros([len(data_ma200),len(data_ma200[0])-2])
data_np_ma75=np.zeros([len(data_ma75),len(data_ma75[0])-2])
data_np_ma50=np.zeros([len(data_ma50),len(data_ma50[0])-2])
data_np_ma25=np.zeros([len(data_ma25),len(data_ma25[0])-2])
# print('Date range is ' + str(data_minute[1][0]) + " to " + str(data_minute[1][-1]))
# print('Data size is ' + str(len(data_minute[:][0])))
# # data_np=np.zeros([len(data_minute),len(data_minute[0])-3])
for i in range(len(data)):
for j in range(len(data[1])-2):
data_np[i,j] = data[i][j+2]
data_np_ma200[i,j] = data_ma200[i][j+2]
data_np_ma75[i,j] = data_ma75[i][j+2]
data_np_ma50[i,j] = data_ma50[i][j+2]
data_np_ma25[i,j] = data_ma25[i][j+2]
print(data_np[:int(len(data_np)/2),0])
# model = LSTM_model()
# model = DNN01_model()
# model = DNN02_model()
# model = DNN03_model()
model = DNN04_model()
# model = DNN02_multi_model()
boo, history = model.train_model(data_np[:-150])
saveHist(path_hist, model.history)
saveModel(path_model, model.model)
success_ai, predicted = model.predict(data_np[-150:], path_predict)
print("predicted", predicted)
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
import pandas as pd
import unittest
import os
import pyterrier as pt
from .base import BaseTestCase
class TestFeaturesBatchRetrieve(BaseTestCase):
def test_fbr_ltr(self):
JIR = pt.autoclass('org.terrier.querying.IndexRef')
indexref = JIR.of(self.here + "/fixtures/index/data.properties")
retr = pt.FeaturesBatchRetrieve(indexref, ["WMODEL:PL2"])
topics = pt.Utils.parse_trec_topics_file(self.here + "/fixtures/vaswani_npl/query-text.trec").head(3)
qrels = pt.Utils.parse_qrels(self.here + "/fixtures/vaswani_npl/qrels")
res = retr.transform(topics)
res = res.merge(qrels, on=['qid', 'docno'], how='left').fillna(0)
from sklearn.ensemble import RandomForestClassifier
import numpy as np
print(res.dtypes)
RandomForestClassifier(n_estimators=10).fit(np.stack(res["features"]), res["label"])
def test_fbr(self):
JIR = pt.autoclass('org.terrier.querying.IndexRef')
indexref = JIR.of(self.here + "/fixtures/index/data.properties")
retr = pt.FeaturesBatchRetrieve(indexref, ["WMODEL:PL2"])
input = pd.DataFrame([["1", "Stability"]], columns=['qid', 'query'])
result = retr.transform(input)
self.assertTrue("qid" in result.columns)
self.assertTrue("docno" in result.columns)
self.assertTrue("score" in result.columns)
self.assertTrue("features" in result.columns)
self.assertTrue(len(result) > 0)
self.assertEqual(result.iloc[0]["features"].size, 1)
retrBasic = pt.BatchRetrieve(indexref)
if "matching" in retrBasic.controls:
self.assertNotEqual(retrBasic.controls["matching"], "FatFeaturedScoringMatching,org.terrier.matching.daat.FatFull")
if __name__ == "__main__":
unittest.main()
|
__version__ = "0.9.94.dev3"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.