text stringlengths 8 6.05M |
|---|
from __future__ import print_function
from cms.sitemaps import CMSSitemap
from django.conf.urls import * # NOQA
from django.conf.urls.i18n import i18n_patterns
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
from django.conf import settings
from django.http import HttpResponse
from django.views.generic.base import RedirectView
admin.autodiscover()
urlpatterns = patterns('',
url(r'^subscriptions/', include('subscriptions.urls', namespace='subscriptions')),
url(r'^favicon\.ico/$', RedirectView.as_view(url='/static/favicon.ico')),
url(r'^admin/', include(admin.site.urls)), # NOQA
url(r'^sitemap\.xml$', 'django.contrib.sitemaps.views.sitemap',
{'sitemaps': {'cmspages': CMSSitemap}}),
url(r'^select2/', include('django_select2.urls')),
url(r'^', include('cms.urls')),
)
# This is only needed when using runserver.
if settings.DEBUG:
urlpatterns = patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', # NOQA
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
url(r'^robots.txt$', lambda r: HttpResponse("User-agent: *\nDisallow: /", content_type="text/plain")),
) + staticfiles_urlpatterns() + urlpatterns # NOQA
|
# 在函数内部调用函数本身
# 求n的阶乘
# 普通实现
# def test(n):
# sum = 1
# if n >= 1:
# for i in range(1, n+1):
# # print(i)
# sum *= i
# return sum
# else:
# return n
# 递归实现
def jiecheng(n):
if n<=1:
return n
return n*jiecheng(n-1)
print(jiecheng(7))
'''
关于递归的特性
1、调用函数自身
2、有一个结束条件
3、但凡递归能解决的事情,循环都可以解决
4、递归的效率在很多时候会很低
'''
# 斐波那契数列,从起始值为0,1
# 1,2,3,5,8,13,21,34,55
def fei(n):
a = 0
b = 1
for i in range(n+1):
a, b = b, a + b
return a
print(fei(8))
def feibo(n):
if n <= 2:
return n
return feibo(n-1) + feibo(n-2)
print(feibo(1)) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Resize images and copy them to the static folder
"""
import os, sys
import json
import shutil
from PIL import Image
import numpy as np
import urllib.parse
# for getting dominany color
# from colorthief import ColorThief
IMAGECUTOFF = 100
try:
os.makedirs('artworks-json')
except FileExistsError:
pass
# PARSE IMAGES
root = 'artworks'
filetypes = ['png', 'jpg', 'jpeg', 'webp']
images = {}
imagecounter = {}
for path, subdirs, files in os.walk(root):
for name in files:
ending = name.split(".")[-1]
if not ending in filetypes:
continue
cat = path.split("/")[1]
img_path = os.path.join(path, name)
mod_date = int(os.path.getmtime(img_path))
try:
images[cat].append( (img_path, mod_date) )
except:
images[cat] = [ (img_path, mod_date) ]
for cat in images.keys():
# sort by modification date
images[cat].sort(key=lambda x: int(x[1]), reverse=True)
# only process the latest x = IMAGECUTOFF images for each category
if IMAGECUTOFF:
images[cat] = images[cat][0:IMAGECUTOFF]
outfile = f'artworks-json/{cat}.json'
with open(outfile, 'w') as f:
json.dump(images[cat], f)
print(f"({len(images[cat])})\t{cat}: {outfile}")
# special processing for the latest images (because there is no folder for them)
# get latest 50 images
latest = []
for cat in images.keys():
for img in images[cat]:
latest.append(img)
latest.sort(key=lambda x: int(x[1]), reverse=True)
latest = latest[0:49]
with open(f'artworks-json/latest.json', 'w') as f:
json.dump(latest, f)
def rgb_to_hex(r, g, b):
return ('{:X}{:X}{:X}').format(r, g, b)
# PROCESS IMAGES
root = 'artworks-json'
filetypes = ['json']
outroot = 'public'
chunkspath = 'public/artworks/json'
try:
os.makedirs(chunkspath)
except FileExistsError:
pass
for path, subdirs, files in os.walk(root):
imagecounter = 0
for name in files:
jsonfile = os.path.join(path, name)
imgs = json.load(open(jsonfile, 'r'))
convertedimgs = []
for info in imgs:
imgpath = info[0]
if name == 'latest.json':
cat = 'latest'
else:
cat = imgpath.split('/')[1]
outpath = os.path.join(outroot, imgpath)
webpoutpath = outpath.split('.')
webpoutpath[-1] = "webp"
webpoutpath = ".".join(webpoutpath)
im = Image.open(imgpath)
# resize with aspect ratio
w, h = im.size
ar = w/h
width = 400
height = int(400/ar)
try:
average_color_row = np.average(im, axis=0)
average_color = np.average(average_color_row, axis=0)
average_color = [ int(x) for x in average_color ]
average_color = "#" + rgb_to_hex(average_color[0], average_color[1], average_color[2])
except:
average_color = "#" + rgb_to_hex(222, 222, 222)
# cache check - skip if file already exists
if not os.path.isfile(webpoutpath):
im400 = im.resize((width,height))
try:
os.makedirs(os.path.dirname(outpath))
except FileExistsError:
pass
im400.save(webpoutpath, "webp")
# encode e.g. spaces and hashtags in filename
webpoutpath = urllib.parse.quote(webpoutpath)
print(webpoutpath)
# remove 'public/' from output path
webpoutpath = webpoutpath.split('/')[1:]
# encode e.g. hashtags and spaces
webpoutpath = "https://www.jonaso.de/" + "/".join(webpoutpath)
convertedimgs.append((webpoutpath, width, height, average_color))
# write json with webp
json.dump(convertedimgs, open(os.path.join(outroot, 'artworks', 'json', f'webp-{cat}.json'), 'w'))
chunksize = 100
chunks = [convertedimgs[x:x+100] for x in range(0, len(convertedimgs), 100)]
total = len(convertedimgs)
k = 0
for batch in chunks:
# detect last iteration
outbatch = {
'total': total,
'items': batch,
}
if k == len(chunks) - 1:
outbatch['next'] = False
else:
outbatch['next'] = k+1
json.dump(outbatch, open(os.path.join(chunkspath, f'webp-{cat}-{k}.json'), 'w'))
k += 1
shutil.rmtree('artworks-json', ignore_errors=True)
|
from __future__ import print_function
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
import time
import keyboard
from threading import Thread
import linecache
class Plot_flow():
def __init__ (self, rx_num, tx_num):
print ("Plot 3D")
self.fig = None
self.ax = None
self.rx_num = rx_num
self.tx_num = tx_num
# Make the X, Y meshgrid.
xs = np.linspace(0, tx_num - 1, tx_num)
ys = np.linspace(0, rx_num - 1, rx_num)
self.X, self.Y = np.meshgrid(xs, ys)
self.wframe = None
def plot_function(self, frame_array):
if (int(frame_array.shape[0]) != self.rx_num or int(frame_array.shape[1]) != self.tx_num):
print ("Size error ", "rawdata size = ", frame_array.shape[0], frame_array.shape[1])
print ("Size error ", "RX/TX size = ", self.rx_num, self.tx_num)
return
if self.fig == None:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111, projection='3d')
# If a line collection is already remove it before drawing.
if self.wframe:
self.ax.collections.remove(self.wframe)
self.wframe = self.ax.plot_wireframe(self.X, self.Y, frame_array, rstride = 1, cstride = 1, cmap = cm.Reds) #rstride & cstride -> Downsampling
#self.wframe = self.ax.plot_surface(self.X, self.Y, frame_array, rstride = 1, cstride = 1, cmap = cm.Reds) #https://matplotlib.org/users/colormaps.html
plt.pause(0.001)
#plt.show()
#self.fig.waitforbuttonpress()
def close(self):
plt.close()
self.fig = None
self.ax = None
class Rawdata_plot_mechanism():
def __init__ (self, filename):
self.tx_num = 0
self.rx_num = 0
self.line_cursor = 0
self.filename = filename
self.file_lines = 0
self.rawdata_array = []
self.frame_list = []
#check file available
try:
with open(self.filename, 'r') as f:
for l in f.readlines():
self.file_lines += 1
except:
print ("File open error")
return
self.get_rx_tx_num()
self.plt_obj = Plot_flow(self.rx_num, self.tx_num)
self.thread_plot_thread = Thread(target = self.get_keyboard_event, args = ([self.plt_obj, self.search_rawdata]))
def auto_animation_drawing(self):
frame_array = None
frame_list = []
#read raw data
with open(self.filename, 'r') as f:
for line in f.readlines():
#Split to single data
data_row = line.split()
assign_list = []
#Transfer data to int data only
for data in data_row:
try:
assign_list.append(int(data))
except:
pass
#Change type as np.array
if len(assign_list) != 0:
if len(assign_list) > self.tx_num:
assign_list.pop()
frame_list.append(assign_list)
#Plot
if len(assign_list) == 0 and frame_list != []:
if len(frame_list) > self.rx_num:
frame_list.pop()
frame_array = np.array(frame_list)
#print (frame_array)
self.plt_obj.plot_function(frame_array)
frame_list = []
self.plt_obj.close()
def parse_raw_data(self):
frame_array = None
frame_list = []
raw_data = ""
cur_line = self.line_cursor
line = linecache.getline(self.filename, cur_line)
data_row = line.split()
while len(data_row) > 0:
#Split to single data
line = linecache.getline(self.filename, cur_line)
raw_data += line
data_row = line.split()
assign_list = []
#Transfer data to int data only
for data in data_row:
try:
assign_list.append(int(data))
except:
pass
#Change type as np.array
if len(assign_list) != 0:
if len(assign_list) > self.tx_num:
assign_list.pop()
frame_list.append(assign_list)
#get raw data
if len(assign_list) == 0 and frame_list != []:
if len(frame_list) > self.rx_num:
frame_list.pop()
frame_array = np.array(frame_list)
#print (frame_array)
#print (frame_list)
break
cur_line += 1
return frame_array, raw_data
def search_rawdata(self, direction):
frame_array = []
frame_list = []
if direction == 'previous':
self.line_cursor -= 1
while self.line_cursor >= 0:
line = linecache.getline(self.filename, self.line_cursor)
data_row = line.split()
if len(data_row) > 0 and data_row[0].find('[00]') >= 0:
frame_array, frame_list = self.parse_raw_data()
#print (frame_array)
break
self.line_cursor -= 1
#avoid less than 0
if self.line_cursor < 0:
self.line_cursor = 0
if direction == 'forward':
self.line_cursor += 1
while self.line_cursor <= self.file_lines:
line = linecache.getline(self.filename, self.line_cursor)
data_row = line.split()
if len(data_row) > 0 and data_row[0].find('[00]') >= 0:
frame_array, frame_list = self.parse_raw_data()
#print (frame_array)
break
self.line_cursor += 1
#avoid over max line
if self.line_cursor > self.file_lines:
self.file_lines = self.line_cursor
return frame_array, frame_list
def get_rx_tx_num(self):
status = 0
with open(self.filename, 'r') as f:
for line in f.readlines():
data_row = line.split()
if len(data_row) > 0:
#get TX NUM
if data_row[-1].find('[') >= 0 and data_row[-1].find(']') > 0:
tx = (int(data_row[-1][1:-1]))
self.tx_num = tx
status += 1
#get RX NUM
if data_row[0].find('[') >= 0 and data_row[0].find(']') > 0:
rx = (int(data_row[0][1:-1]))
if rx > self.rx_num:
self.rx_num = rx
if status == 2:
break
def dynamic_plot(self):
self.line_cursor = 0
linecache.clearcache()
self.thread_plot_thread.start()
def get_keyboard_event(self, plt_obj, search_rawdata):
while True:
try:
if keyboard.is_pressed('4'):
self.rawdata_array, rawdata = search_rawdata('previous')
print (rawdata)
time.sleep(0.1)
elif keyboard.is_pressed('6'):
self.rawdata_array, rawdata = search_rawdata('forward')
print (rawdata)
time.sleep(0.1)
elif keyboard.is_pressed('esc'):
print ("Exit")
break
else:
if self.rawdata_array != []:
plt_obj.plot_function(self.rawdata_array) #keep drawing
except:
print ("break")
break
def __del__(self):
self.plt_obj.close()
def main():
RawPlot_obj = Rawdata_plot_mechanism("20190625_11-02-26.txt")
#RawPlot_obj.auto_animation_drawing()
RawPlot_obj.dynamic_plot()
pass
if __name__ == "__main__":
main()
|
import socket
import time
sk = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sk.connect(('127.0.0.1', 8800))
while True:
inp = input('>>>>>:').strip()
sk.send(inp.encode('utf-8'))
data = sk.recv(1024)
print(str(data,'utf-8'))
|
class Solution(object):
def rotate(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: None Do not return anything, modify matrix in-place instead.
"""
result = list([])
if len(matrix) == 1:
return matrix
temp_result = []
matrix_transpose = zip(*matrix)
for i in matrix_transpose:
for k in reversed(i):
if len(temp_result) > len(matrix[0])-1:
result.append(temp_result)
temp_result = []
temp_result = temp_result + [k]
result.append(temp_result)
return result
def imageDriver():
s = Solution()
#matrix = [[1,2,3],[4,5,6],[7,8,9]]
matrix = [[5,1,9,11],[2,4,8,10],[13,3,6,7],[15,14,12,16]]
print(s.rotate(matrix))
imageDriver()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 21 14:08:35 2017
@author: breteau
"""
from fractions import Fraction
from math import cos, sin, pi
from libGeometry import Pt
import numpy as np
from multiprocessing import Pool, Array, Lock
import ctypes
from functools import partial
alpha = Fraction(pi / 5.0)
phi = Fraction(2.0 * cos(alpha))
coeff_reduc = phi / (1 + phi)
def soleil_start(rotation):
# if (depart == "soleil"):
# n_cv = 5
# n_f = 0
# dirs = np.arange(0, n_cv)
# S_cv = np.zeros((2, n_cv))
# C_cv = np.zeros((2, n_cv))
# C_cv[0] = np.cos(angle * (1 + 2 * dirs))
# C_cv[1] = np.sin(angle * (1 + 2 * dirs))
# S_f = C_f = np.zeros((2, 0))
P0 = Pt((Fraction(0), Fraction(0)))
Points = [Pt((Fraction(phi * sin(i * alpha + rotation)), Fraction(phi * cos(i * alpha + rotation)))) for i in xrange(10)]
CV = []
fl = []
for i in xrange(5):
CV.append( [P0, Points[2*i+1], Points[2*i], Points[2*i-1]] )
return CV, fl
def etoile_start(rotation):
# elif (depart == "etoile"):
# n_cv = 0
# n_f = 5
# dirs = np.arange(0, n_f)
# S_f = C_f = np.zeros((2, n_cv))
# C_f[0] = np.cos(angle * (1 + 2 * dirs))
# C_f[1] = np.sin(angle * (1 + 2 * dirs))
# S_cv = C_cv = np.zeros((2, 0))
P0 = Pt((Fraction(0), Fraction(0)))
Points = [Pt((Fraction(phi * sin(i * alpha + rotation)), Fraction(phi * cos(i * alpha + rotation)))) for i in xrange(10)]
Points2 = [Pt((Fraction(sin(i * alpha + rotation)), Fraction(cos(i * alpha + rotation)))) for i in xrange(10)]
CV = []
fl = []
for i in xrange(5):
fl.append( [P0, Points[2*i+1], Points2[2*i], Points[2*i-1]] )
return CV, fl
def soleil_start_demis(rotation):
CV, _ = soleil_start(rotation)
dcv = np.vstack([[[item[0], item[1], item[2]], [item[0], item[3], item[2]]] for item in CV])
# dfl = np.vstack([[[item[0], item[1], item[2]], [item[0], item[3], item[2]]] for item in fl])
dfl = []
return dcv, dfl
def etoile_start_demis(rotation):
_, fl = etoile_start(rotation)
dcv = [] # np.vstack([[[item[0], item[1], item[2]], [item[0], item[3], item[2]]] for item in CV])
dfl = np.vstack([[[item[0], item[1], item[2]], [item[0], item[3], item[2]]] for item in fl])
return dcv, dfl
def cut_fl(Points): #reverse,
assert len(Points) == 4
x1 = coeff_reduc * (Points[1]-Points[0]) + Points[0]
x3 = coeff_reduc * (Points[3]-Points[0]) + Points[0]
return {"cv" : [[Points[0], x1, Points[2], x3]],
"dfl" : [[Points[1], Points[2], x1],
[Points[3], Points[2], x3]]
}
def cut_dfl(Points):
assert len(Points) == 3
x1 = coeff_reduc * (Points[1]-Points[0]) + Points[0]
return {"dcv" : [[Points[0], x1, Points[2]]],
"dfl" : [[Points[1], Points[2], x1]]
}
def cut_cv(Points): # reverse,
assert len(Points) == 4
x1 = coeff_reduc * (Points[0] - Points[1]) + Points[1]
x2 = coeff_reduc * (Points[2] - Points[0]) + Points[0]
x3 = coeff_reduc * (Points[0] - Points[3]) + Points[3]
return {"cv" : [[Points[1], Points[2], x2, x1],
[Points[3], x3, x2, Points[2]]],
"dfl" : [[Points[0], x2, x1],
[Points[0], x2, x3]]
}
def cut_dcv(Points):
assert len(Points) == 3
x1 = coeff_reduc * (Points[0] - Points[1]) + Points[1]
x2 = coeff_reduc * (Points[2] - Points[0]) + Points[0]
# x3 = coeff_reduc * (Points[0] - Points[3]) + Points[3]
return {"dcv" : [[Points[1], Points[2], x2],
[Points[1], x1, x2]],
"dfl" : [[Points[0], x2, x1]]
}
def test_match(dl1, dl2):
return dl1[0] == dl2[0] and dl1[2] == dl2[2]
def concatenate_demiP(dl1, dl2):
return [dl1[0], dl1[1], dl1[2], dl2[1]]
def _appariement_process(llist, N, i):
try:
if shared_arr[i] > -2: # shared_matched
return
for j in xrange(i+1, N):
if shared_arr[j] > -2:
continue
if test_match(llist[i], llist[j]):
lock.acquire()
shared_arr[i] = j
shared_arr[j] = -1
lock.release()
return
except KeyboardInterrupt:
return
def _initProcess(shared, lck):
global shared_arr
global lock
shared_arr = shared
lock = lck
def match_demis(llist, p_type):
# it 5 : 0.364182949066
# it 10 : 42.1131920815
# it 12 : 1261.248471
# avec 2 process it 5 : 0.341376066208
# it 10 : 43.2017059326
# it 12 : 1306.8952601
# -2 : non traite / non apparie
# -1 : apparie (esclave)
# x >=0 : maitre apparie avec escalve x
N = llist.shape[0]
shared_matched = Array(ctypes.c_int, [-2] * N)
shared_matched = np.ctypeslib.as_array(shared_matched.get_obj())
lock = Lock() # todo : retirer lock et lock = False?
p = Pool(7, initializer=_initProcess, initargs=(shared_matched, lock))
try:
p.map(partial(_appariement_process, llist, N), xrange(N))
except KeyboardInterrupt, e:
p.terminate()
import sys
print e
sys.exit(0)
return [concatenate_demiP(llist[i], llist[j]) for i, j in enumerate(shared_matched) if j >= 0] # apparie les maitres avec les esclaves
|
"""
Django settings for CAS project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
Before deployment please see
https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
"""
import os
import re
import sys
from email.utils import getaddresses
from urllib.parse import urlparse
import environ
import ldap
from django_auth_ldap.config import LDAPSearch, LDAPSearchUnion
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
env = environ.Env()
env.read_env()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_NAME = ".".join(__name__.split(".")[:-1])
try:
from .secret_key import SECRET_KEY
except ImportError:
from django.core.management.utils import get_random_secret_key
with open(os.path.join(BASE_DIR, PROJECT_NAME, "secret_key.py"), "w+") as f:
SECRET_KEY = get_random_secret_key()
f.write("SECRET_KEY = '%s'\n" % SECRET_KEY)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Detect if executed under test
TESTING = any(
test in sys.argv
for test in (
"test",
"csslint",
"jenkins",
"jslint",
"jtest",
"lettuce",
"pep8",
"pyflakes",
"pylint",
"sloccount",
)
)
DOCKER = env.bool("DOCKER", default=True)
SITE_URL = env.str("SITE_URL")
FORCE_SCRIPT_NAME = env.str("FORCE_SCRIPT_NAME", default="/cas")
ALLOWED_HOSTS = env.list("ALLOWED_HOSTS", default=[urlparse(SITE_URL).hostname])
BEHIND_PROXY = env.bool("BEHIND_PROXY", default=True)
DJANGO_ADMINS = env("DJANGO_ADMINS", default=None)
if DJANGO_ADMINS:
ADMINS = getaddresses([DJANGO_ADMINS])
MANAGERS = ADMINS
SUPERUSERS = env.tuple("DJANGO_SUPERUSERS", default=())
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# Third-party apps
"mama_cas",
"axes",
"captcha",
"corsheaders",
# Project apps
"core",
"general",
]
AUTHENTICATION_BACKENDS = [
"axes.backends.AxesBackend",
"django.contrib.auth.backends.ModelBackend",
]
# LDAP
try:
AUTH_LDAP_CONNECTION_OPTIONS = {
ldap.OPT_X_TLS_CACERTFILE: "/etc/ssl/certs/ca-certificates.crt",
ldap.OPT_X_TLS_NEWCTX: 0,
}
AUTH_LDAP_SERVER_URI = env.str("AUTH_LDAP_SERVER_URI")
AUTH_LDAP_BIND_DN = env.str("AUTH_LDAP_BIND_DN")
AUTH_LDAP_BIND_PASSWORD = env.str("AUTH_LDAP_BIND_PASSWORD")
try:
AUTH_LDAP_USER_DN_TEMPLATE = env.str("AUTH_LDAP_USER_DN_TEMPLATE")
except environ.ImproperlyConfigured:
AUTH_LDAP_USER_SEARCH_USER_TEMPLATE = env.str(
"AUTH_LDAP_USER_SEARCH_USER_TEMPLATE"
)
try:
AUTH_LDAP_USER_SEARCH_BASE = env.str("AUTH_LDAP_USER_SEARCH_BASE")
AUTH_LDAP_USER_SEARCH = LDAPSearch(
AUTH_LDAP_USER_SEARCH_BASE,
ldap.SCOPE_SUBTREE,
AUTH_LDAP_USER_SEARCH_USER_TEMPLATE,
)
except environ.ImproperlyConfigured:
AUTH_LDAP_USER_SEARCH_BASE_LIST = env.list(
"AUTH_LDAP_USER_SEARCH_BASE_LIST"
)
searches = [
LDAPSearch(x, ldap.SCOPE_SUBTREE, AUTH_LDAP_USER_SEARCH_USER_TEMPLATE)
for x in AUTH_LDAP_USER_SEARCH_BASE_LIST
]
AUTH_LDAP_USER_SEARCH = LDAPSearchUnion(*searches)
AUTH_LDAP_USER_ATTR_MAP = env.dict(
"AUTH_LDAP_USER_ATTR_MAP",
default={"first_name": "givenName", "last_name": "sn", "email": "mail"},
)
AUTH_LDAP_ALWAYS_UPDATE_USER = True
AUTH_LDAP_CACHE_TIMEOUT = 0
AUTHENTICATION_BACKENDS.insert(
AUTHENTICATION_BACKENDS.index("django.contrib.auth.backends.ModelBackend"),
"django_auth_ldap.backend.LDAPBackend",
)
except environ.ImproperlyConfigured:
pass
# CAS
MAMA_CAS_SERVICES = [
{
"SERVICE": r"^http[s]?://{}".format(re.escape(urlparse(SITE_URL).hostname)),
"CALLBACKS": ["core.utils.get_attributes"],
"LOGOUT_ALLOW": True,
# 'LOGOUT_URL': '',
}
]
MAMA_CAS_ENABLE_SINGLE_SIGN_OUT = True
""" Email settings """
SERVER_EMAIL = "error@%s" % urlparse(SITE_URL).hostname
EMAIL_HOST_USER = env.str("EMAIL_HOST_USER", default="")
EMAIL_HOST_PASSWORD = env.str("EMAIL_HOST_PASSWORD", default="")
EMAIL_HOST = env.str("EMAIL_HOST", default="localhost")
EMAIL_PORT = env.int("EMAIL_PORT", default=25)
EMAIL_USE_TLS = env.bool("EMAIL_USE_TLS", default=False)
EMAIL_USE_LOCALTIME = env.bool("EMAIL_USE_LOCALTIME", default=True)
EMAIL_SUBJECT_PREFIX = "{} ".format(
env.str("EMAIL_SUBJECT_PREFIX", default="[CAS]").strip()
)
if DEBUG:
EMAIL_BACKEND = "django.core.mail.backends.filebased.EmailBackend"
EMAIL_FILE_PATH = os.path.join(BASE_DIR, "..", "tmp", "emails")
if not os.path.exists(EMAIL_FILE_PATH):
os.makedirs(EMAIL_FILE_PATH)
""" Https settings """
if SITE_URL.startswith("https"):
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
SECURE_SSL_REDIRECT = True
SECURE_HSTS_SECONDS = 31536000
X_FRAME_OPTIONS = "DENY"
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"htmlmin.middleware.HtmlMinifyMiddleware",
"htmlmin.middleware.MarkRequestMiddleware",
"axes.middleware.AxesMiddleware",
]
AXES_LOCKOUT_URL = reverse_lazy("locked_out")
AXES_VERBOSE = DEBUG
if BEHIND_PROXY:
MIDDLEWARE += ["general.middleware.SetRemoteAddrFromForwardedFor"]
USE_X_FORWARDED_HOST = True
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
ROOT_URLCONF = "{}.urls".format(PROJECT_NAME)
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
"debug": DEBUG,
"string_if_invalid": "[invalid variable '%s'!]" if DEBUG else "",
},
}
]
WSGI_APPLICATION = "{}.wsgi.application".format(PROJECT_NAME)
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": os.environ.get("POSTGRES_DB", "django_{}".format(PROJECT_NAME)),
"USER": os.environ.get("POSTGRES_USER", "django_{}".format(PROJECT_NAME)),
"PASSWORD": os.environ.get(
"POSTGRES_PASSWORD", "password_{}".format(PROJECT_NAME)
),
"HOST": "{}-postgres".format(PROJECT_NAME) if DOCKER else "localhost",
"PORT": "5432",
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": (
"django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
)
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en"
TIME_ZONE = "Europe/Vienna"
USE_I18N = True
USE_L10N = True
USE_TZ = True
LANGUAGES = (("de", _("German")), ("en", _("English")))
LANGUAGES_DICT = dict(LANGUAGES)
LOCALE_PATHS = [
os.path.join(BASE_DIR, "locale"),
os.path.join(BASE_DIR, "locale_mama_cas"),
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATICFILES_DIRS = (
"{}{}".format(os.path.normpath(os.path.join(BASE_DIR, "static")), os.sep),
)
STATIC_URL = "{}/s/".format(FORCE_SCRIPT_NAME if FORCE_SCRIPT_NAME else "")
STATIC_ROOT = "{}{}".format(
os.path.normpath(os.path.join(BASE_DIR, "assets", "static")), os.sep
)
MEDIA_URL = "{}/m/".format(FORCE_SCRIPT_NAME if FORCE_SCRIPT_NAME else "")
MEDIA_ROOT = "{}{}".format(
os.path.normpath(os.path.join(BASE_DIR, "assets", "media")), os.sep
)
FILE_UPLOAD_PERMISSIONS = 0o644
if FORCE_SCRIPT_NAME:
WHITENOISE_STATIC_PREFIX = "/s/"
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
""" Logging """
LOG_DIR = os.path.join(BASE_DIR, "..", "logs")
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": (
"%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d "
"%(message)s"
)
},
"simple": {"format": "%(levelname)s %(message)s"},
"simple_with_time": {"format": "%(levelname)s %(asctime)s %(message)s"},
},
"handlers": {
"null": {"level": "DEBUG", "class": "logging.NullHandler"},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "simple",
},
"file": {
"level": "DEBUG",
"class": "concurrent_log_handler.ConcurrentRotatingFileHandler",
"filename": os.path.join(LOG_DIR, "application.log"),
"maxBytes": 1024 * 1024 * 5, # 5 MB
"backupCount": 1000,
"use_gzip": True,
"delay": True,
"formatter": "verbose",
},
"mail_admins": {
"level": "ERROR",
"class": "django.utils.log.AdminEmailHandler",
},
"stream_to_console": {"level": "DEBUG", "class": "logging.StreamHandler"},
"rq_console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "simple_with_time",
},
},
"loggers": {
"": {
"handlers": ["console", "file", "mail_admins"],
"propagate": True,
"level": "INFO",
},
"django": {
"handlers": ["console", "file", "mail_admins"],
"propagate": True,
"level": "INFO",
},
"django.request": {
"handlers": ["console", "file", "mail_admins"],
"level": "ERROR",
"propagate": True,
},
"rq.worker": {
"handlers": ["rq_console", "mail_admins"],
"level": "DEBUG",
"propagate": False,
},
},
}
""" Cache settings """
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://{}:6379/0".format(
"{}-redis".format(PROJECT_NAME) if DOCKER else "localhost"
),
"OPTIONS": {"CLIENT_CLASS": "django_redis.client.DefaultClient"},
}
}
""" Session settings """
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "default"
SESSION_COOKIE_NAME = "sessionid_{}".format(PROJECT_NAME)
SESSION_COOKIE_DOMAIN = env.str("SESSION_COOKIE_DOMAIN", default=None)
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
CSRF_COOKIE_NAME = "csrftoken_{}".format(PROJECT_NAME)
CSRF_COOKIE_DOMAIN = env.str("CSRF_COOKIE_DOMAIN", default=None)
CSRF_TRUSTED_ORIGINS = env.list("CSRF_TRUSTED_ORIGINS", default=[])
CORS_ALLOW_CREDENTIALS = env.bool("CORS_ALLOW_CREDENTIALS", default=False)
CORS_ORIGIN_ALLOW_ALL = env.bool("CORS_ORIGIN_ALLOW_ALL", default=False)
CORS_ORIGIN_WHITELIST = env.list("CORS_ORIGIN_WHITELIST", default=[])
# CORS_URLS_REGEX = r'^/()/.*$'
HOME_REDIRECT = reverse_lazy("cas_login")
# Axes settings
AXES_FAILURE_LIMIT = 3
AXES_COOLOFF_TIME = 1 # number in hours
CAPTCHA_FLITE_PATH = "/usr/bin/flite"
if DEBUG:
INSTALLED_APPS += ["debug_toolbar"]
MIDDLEWARE.insert(
MIDDLEWARE.index("django.contrib.sessions.middleware.SessionMiddleware"),
"debug_toolbar.middleware.DebugToolbarMiddleware",
)
INTERNAL_IPS = ("127.0.0.1",)
|
import os
import sys
for file in sys.argv[1:]:
with open(file, "r") as fp:
lines = fp.readlines( )
print (lines[0])
print (lines[-1])
|
# I collaborated with other students for this homework : Huilan You
# ========================================================================
# Copyright 2018 Emory University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import codecs
import bibtexparser
import re
import os
import glob
from collections import Counter
from types import SimpleNamespace
import tldextract
# Task 1
def load_map(map_file):
"""
:param map_file: bib_map.tsv
:return: a dictionary where the key is the conference/journal ID and the value is a namespace of (weight, series).
"""
fin = open(map_file)
d = {}
for i, line in enumerate(fin):
l = line.strip().split('\t')
if len(l) == 3:
key = l[0]
d[key] = SimpleNamespace(weight=float(l[1]), series=l[2])
return d
def get_entry_dict(bib_map, bib_dir):
"""
:param bib_map: the output of load_map().
:param bib_dir: the input directory where the bib files are stored.
:return: a dictionary where the key is the publication ID (e.g., 'P17-1000') and the value is its bib entry.
"""
re_pages = re.compile('(\d+)-{1,2}(\d+)')
def parse_name(name):
if ',' in name:
n = name.split(',')
if len(n) == 2: return n[1].strip() + ' ' + n[0].strip()
return name
def get(entry, weight, series):
entry['author'] = [parse_name(name) for name in entry['author'].split(' and ')]
entry['weight'] = weight
entry['series'] = series
return entry['ID'], entry
def valid(entry, weight):
if weight == 1.0:
if 'pages' in entry:
m = re_pages.search(entry['pages'])
return m and int(m.group(2)) - int(m.group(1)) > 4
return False
return 'author' in entry
bibs = {}
for k, v in bib_map.items():
fin = open(os.path.join(bib_dir, k+'.bib'), encoding='utf-8')
bib = bibtexparser.loads(fin.read())
bibs.update([get(entry, v.weight, v.series) for entry in bib.entries if valid(entry, v.weight)])
return bibs
def get_email_dict(txt_dir):
"""
:param txt_dir: the input directory containing all text files.
:return: a dictionary where the key is the publication ID and the value is the list of authors' email addresses.
"""
def chunk(text_file, page_limit=2000):
fin = codecs.open(text_file, encoding='utf-8')
doc = []
n = 0
for line in fin:
line = line.strip().lower()
if line:
doc.append(line)
n += len(line)
if n > page_limit: break
return ' '.join(doc)
re_email = re.compile(
'[({\[]?\s*([a-z0-9\.\-_]+(?:\s*[,;|]\s*[a-z0-9\.\-_]+)*)\s*[\]})]?\s*@\s*([a-z0-9\.\-_]+\.[a-z]{2,})')
email_dict_new = {}
for txt_file in glob.glob(os.path.join(txt_dir, '*.txt')):
try:
doc = chunk(txt_file)
except UnicodeDecodeError:
continue
emails = []
pattern1 = ['lastname.firstname']
pattern2 = ['initial.last', 'initial.surname']
pattern3 = ['surname','lastname']
for m in re_email.findall(doc):
ids = m[0].replace(';', ',').replace('|', ',')
domain = m[1]
## Consider different patterns of emails
if ids in pattern1:
name1 = []
key = os.path.basename(txt_file)[:-4]
if key in entry_dict:
for k in entry_dict[key]['author']:
first = k.split(' ')[0].lower()
last = k.split(' ')[1].lower()
name1.append(last + '.' + first)
ids = ','.join(name1)
if ',' in ids:
for ID in ids.split(','):
if ID.strip() in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
continue
emails.append(ID.strip() + '@' + domain)
break
elif ids in pattern2:
name2 = []
key = os.path.basename(txt_file)[:-4]
if key in entry_dict:
for k in entry_dict[key]['author']:
initial = k.split(' ')[0][1].lower()
last = k.split(' ')[1].lower()
name2.append(initial + '.' + last)
ids = ','.join(name2)
if ',' in ids:
for ID in ids.split(','):
if ID.strip() in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
continue
emails.append(ID.strip() + '@' + domain)
break
elif ids in pattern3:
name3 = []
key = os.path.basename(txt_file)[:-4]
if key in entry_dict:
for k in entry_dict[key]['author']:
last = k.split(' ')[1].lower()
name3.append(last)
ids = ','.join(name3)
if ',' in ids:
for ID in ids.split(','):
if ID.strip() in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
continue
emails.append(ID.strip() + '@' + domain)
break
else:
if ',' in ids:
for ID in ids.split(','):
if ID.strip() in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
continue
emails.append(ID.strip() + '@' + domain)
else:
emails.append(ids + '@' + domain)
if emails:
key = os.path.basename(txt_file)[:-4]
email_dict_new[key] = emails
return email_dict_new
def print_emails(entry_dict, email_dict_new, email_file, weights_dict):
"""
:param entry_dict: the output of get_entry_dict().
:param email_dict: the output of get_email_dict().
:param email_file: the output file in the TSV format, where each column contains
(publication ID, the total number of authors, list of email addresses) for each paper.
"""
fout = open(email_file, 'w')
for k, v in sorted(entry_dict.items()):
n = len(v['author'])
l = [k, str(n)]
if k in email_dict_new:
emails = [';'.join(email_dict_new[k][:n])]
if k in weights_dict:
l = [k, str(n), str(emails), str(weights_dict[k])]
fout.write('\t'.join(l) + '\n')
fout.close()
def print_mismatch(entry_dict, email_dict_new, email_mismatch):
fout = open(email_mismatch, 'w')
for k, v in sorted(entry_dict.items()):
n = len(v['author'])
if k in email_dict_new:
e_count = len(email_dict_new[k])
if e_count != n:
l = [k, str(n), str(e_count), str(email_dict_new[k])]
fout.write('\t'.join(l) + '\n')
fout.close()
# Task 2
def get_domains(email_dict_new):
domain = {}
for k, v in email_dict_new.items():
domains = []
for z in email_dict_new[k]:
domains.append(str(z.split('@')[1]))
domain[k] = domains
return domain
def get_weights(domain):
weights = {}
for k, v in domain.items():
num_dom = len(v)
dom = []
w = Counter([t for t in v])
y = len(Counter([t for t in v]))
doms = {}
if y >= 1:
for k1, v1 in w.items():
ext = tldextract.extract(k1)
main = '.'.join(ext[1:3])
doms[main] = doms.get(main, 0) + v1
for k2, v2 in doms.items():
# print(k2)
weigh = int(v2) / int(num_dom)
weigh = round(weigh, 2)
dom.append((' ' + str(k2) + ":" + str(weigh)).strip())
weights[k] = ';'.join(dom)
return weights
if __name__ == '__main__':
BIB_DIR ='/Users/huilanyou/PycharmProjects/qtm385/hw3/nlp-ranking-master/bib/'
MAP_FILE = '/Users/huilanyou/PycharmProjects/qtm385/hw3/nlp-ranking-master/dat/bib_map.tsv'
bib_map = load_map(MAP_FILE)
entry_dict = get_entry_dict(bib_map, BIB_DIR)
TXT_DIR = '/Users/huilanyou/PycharmProjects/qtm385/hw3/nlp-ranking-m'
email_dict_new = get_email_dict(TXT_DIR)
print(email_dict_new)
EMAIL_MISMATCH = 'you_mismatch.tsv'
print_mismatch(entry_dict, email_dict_new, EMAIL_MISMATCH)
# Task 2: Institution Weighting
domains_dict = get_domains(email_dict_new)
weights_dict = get_weights(domains_dict)
EMAIL_FILE = '/Users/huilanyou/PycharmProjects/qtm385/hw3/nlp-ranking-master/dat/email_map.tsv'
print_emails(entry_dict, email_dict_new, EMAIL_FILE, weights_dict) |
from postproc.field import read_field, write_field
from postproc.settings import *
from postproc.research import Task
import os
class TaskBuilder(object):
def __init__(self, execution_host): # so we allow polymorphism by passing Host or RemoteHost
self._execution_host = execution_host
self._command = None
self._is_global_command = False
self._program = None
self._inputs = []
# Setters are needed because it will allow to generalize build_task method in the future
# (i.e to create a base class and create several builder like TaskWithOutputBuilder, GraphTaskBuilder and so on where there will be another setters)
def set_command(self, command, is_global_command = False):
self._command = command
self._is_global_command = is_global_command
def set_program(self, program):
self._program = program
def set_inputs(self, inputs):
self._inputs += list(inputs)
def build_task(self):
remote_execution = isinstance(self._execution_host, RemoteHost)
if remote_execution:
program_move_mode = 'all_remote'
inputs_move_mode = 'from_local'
else:
program_move_mode = 'all_local'
inputs_move_mode = 'all_local'
copies_list = []
if self._program is not None:
copies_list.append({
'path' : self._program,
'mode' : program_move_mode
})
print(self._inputs)
for input_ in self._inputs:
copies_list.append({'path' : input_, 'mode' : inputs_move_mode})
return Task(self._execution_host, self._command, self._is_global_command, copies_list)
class SgeTaskBuilder(TaskBuilder):
def __init__(self, execution_host):
self._sge_script_lines = []
self._script_name = 'sge_script.sh'
if not isinstance(execution_host, RemoteHost):
raise Exception('SGE can be used only on the remote machine')
super(SgeTaskBuilder, self).__init__(execution_host)
def set_command(self, command):
# assume that commands regard programs in the same dir
self._sge_script_lines.append('./' + command)
def set_sge_properties(self, cores, time='48:00:00'):
self._sge_script_lines = ['#$ -cwd -V\n', '#$ -l h_rt=%s\n' % time, '#$ -pe smp %i\n' % cores] \
+ self._sge_script_lines
def build_task(self):
if len(self._sge_script_lines) != 0:
super(SgeTaskBuilder, self).set_command('qsub ' + self._script_name, is_global_command=True)
task = super(SgeTaskBuilder, self).build_task()
# NAME SHOULD BE CONSISTENT WITH TASK!
tmp_dir = 'tmp'
tmp_sge_script_path = tmp_dir + '/' + self._script_name
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
f = open(tmp_sge_script_path, 'w')
f.writelines(self._sge_script_lines)
task.copies_list.append({
'path' : tmp_sge_script_path,
'mode' : 'from_local'
})
return task
class PythonSgeGeneratorTaskBuilder(TaskBuilder):
def __init__(self, execution_host):
self._sge_script_default_lines = []
self._sge_script_command_lines = []
self._sge_script_names = []
self._py_script_name = 'sge_generator.py'
if not isinstance(execution_host, RemoteHost):
raise Exception('SGE can be used only on the remote machine')
super(PythonSgeGeneratorTaskBuilder, self).__init__(execution_host)
def set_command(self, command, sid):
# assume that commands regard programs in the same dir
self._sge_script_command_lines.append('./' + command)
self._sge_script_names.append('gensge_{}.sh'.format(sid))
def set_sge_properties(self, cores, time='48:00:00'):
self._sge_script_default_lines = ['#$ -cwd -V\\n', '#$ -l h_rt=%s\\n' % time, '#$ -pe smp %i\\n' % cores]
def build_task(self):
if len(self._sge_script_names) != 0:
# nohup is necessary to prevent killing process when logging out
# & is necessary to put the process in background
super(PythonSgeGeneratorTaskBuilder, self).set_command('nohup python ' + self._py_script_name + ' >/dev/null 2>&1 &', is_global_command=True)
task = super(PythonSgeGeneratorTaskBuilder, self).build_task()
# NAME SHOULD BE CONSISTENT WITH TASK!
tmp_dir = 'tmp'
tmp_py_script_path = tmp_dir + '/' + self._py_script_name
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
f = open(tmp_py_script_path, 'w')
f.write('import subprocess\n')
f.write('import time\n\n')
filenames_list_as_str = ''
contents_list_as_str = ''
sge_script_default_content = ''.join(self._sge_script_default_lines)
for sge_script_name, sge_script_command_line in zip(self._sge_script_names, self._sge_script_command_lines):
if filenames_list_as_str == '':
filenames_list_as_str = '['
contents_list_as_str = '['
else:
filenames_list_as_str += ', '
contents_list_as_str += ', '
filenames_list_as_str += "'" + sge_script_name + "'"
sge_script_content = sge_script_default_content + sge_script_command_line + '\\n'
contents_list_as_str += "'" + sge_script_content + "'"
filenames_list_as_str += ']\n'
contents_list_as_str += ']\n'
f.write('sge_filenames = ' + filenames_list_as_str)
f.write('sge_contents = ' + contents_list_as_str)
f.write("log = open('sge_gen.log', 'w')\n")
f.write('while len(sge_filenames) != 0:\n')
f.write("\tstdout = subprocess.check_output(['qstat'])\n")
f.write("\tlines = stdout.split('\\n')\n")
f.write("\tlog.write('have ' + str(len(lines)) + ' lines\\n')\n")
f.write('\tmax_lines_num = 12\n')
f.write('\tif len(lines) < max_lines_num:\n')
f.write('\t\tavail_sge_tasks = max_lines_num - len(lines)\n')
f.write('\t\tif avail_sge_tasks > len(sge_filenames):\n')
f.write('\t\t\tavail_sge_tasks = len(sge_filenames)\n')
f.write("\t\tlog.write('can launch ' + str(avail_sge_tasks) + ' tasks\\n')\n")
f.write('\t\tfor i in range(avail_sge_tasks):\n')
f.write("\t\t\tsge_file = open(sge_filenames[i], 'w')\n")
f.write("\t\t\tsge_file.write(sge_contents[i])\n")
f.write("\t\t\tsge_file.close()\n")
f.write("\t\t\tsubprocess.call(['qsub', sge_filenames[i]])\n")
f.write("\t\tlog.write('delete tasks ' + str(sge_filenames[:avail_sge_tasks]) + '\\n')\n")
f.write('\t\tdel sge_filenames[:avail_sge_tasks]\n')
f.write('\t\tdel sge_contents[:avail_sge_tasks]\n')
f.write('\ttime.sleep(600)\n')
f.write('log.close()\n')
f.close()
task.copies_list.append({
'path' : tmp_py_script_path,
'mode' : 'from_local'
})
return task
class GraphTaskBuilder:
def __init__(self, execution_host):
pass
def set_task(self, task):
pass
def build_task(self):
pass |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 12:50:19 2019
@author: saib
"""
a = [1,2,3,4,5,6,7]
print(a.length)
|
#!/opt/anaconda2/bin/python
import numpy as np
import argparse
import mrcfile
def rand_vol(n, output):
"""
random a volume of size n x n x n and saves it in output as mrcfile
:param n: size of volume
:param output: the path the volume is going to be saved to
:return: None
"""
vol = np.random.uniform(-np.pi, np.pi, n * n * n).reshape([n, n, n])
with mrcfile.new(output, overwrite=True) as mrc:
mrc.set_data(vol.astype(np.float32))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="generate a random volume mrc file")
parser.add_argument("volume size", metavar="n", type=int,
help="size of volume")
parser.add_argument("output", metavar="output", type=str,
help="output path of the volume")
args = parser.parse_args()
rand_vol(args.n, args.output)
|
"""
Starting in the top left corner of a 2×2 grid, and only being able to move to
the right and down, there are exactly 6 routes to the bottom right corner.
How many such routes are there through a 20×20 grid?
"""
# NOTE: we're moving along the *edges* of the grid, not from square to square.
# Essentially we're being asked to find the number of "lattice paths" from the
# point (0, 0) to (20, 20); which is the binomial coefficient
# 20 + 20 Choose 20 == 40 C 20 == 40!/20!20! == 40*39*...*21/20!
#
import math
class MathException(Exception):
pass
def lattice_paths(x, y):
"""
>>> lattice_paths(20, 20)
137846528820
"""
n = x+y
k = x or y
n_choose_k = math.factorial(n) / (math.factorial(n-k) * math.factorial(k))
if n_choose_k.is_integer():
return int(n_choose_k)
else:
raise MathException('Expected Binomial Coefficent to be an Integer')
|
#Stack Linked List
class StackList:
def __init__(self):
self.top = None
self.size = 0
#return true if stack is empty
def isEmpty(self):
return self.top is None
#returns the num items
def __len__(self):
return self.size
#return top elem of stack without removing
def peek(self):
assert not self.isEmpty() , "Cannot peek empty stack"
return self.top.data
#removes and return top element
def pop(self):
assert not self.isEmpty() , "Connot pop empy stack"
node = self.top
self.top = self.top.next
self.size -= 1
return node.data
#push an item onto top of stack
def push(self , val):
self.top = Node(val , self.top)
self.size += 1
def traverse(self):
cur = self.top
while cur is not None:
print(cur.data)
cur = cur.next
class Node:
def __init__(self , data , link):
self.data = data
self.next = link
s = StackList()
s.push(1)
s.push(2)
s.push(3)
s.traverse()
print('peek: ' ,s.peek())
print('popped: ' ,s.pop())
|
import torch
import torch.nn as nn
def safe_detach(x):
"""
detech operation which keeps reguires_grad
---
https://github.com/rtqichen/residual-flows/blob/master/lib/layers/iresblock.py
"""
return x.detach().requires_grad_(x.requires_grad)
def weights_init_as_nearly_identity(m):
"""
initialize weights such that the layer becomes nearly identity mapping
"""
classname = m.__class__.__name__
if classname.find('Linear') != -1 or classname.find('Conv') != -1:
# check if module is wrapped by spectral norm
if hasattr(m, 'weight'):
nn.init.constant_(m.weight, 0)
else:
nn.init.constant_(m.weight_bar, 0)
nn.init.normal_(m.bias, 0, 0.01)
def anomaly_hook(self, inputs, outputs):
"""
module hook for detecting NaN and infinity
"""
if not isinstance(outputs, tuple):
outputs = [outputs]
else:
outputs = list(outputs)
for i, out in enumerate(outputs):
inf_mask = torch.isinf(out)
nan_mask = torch.isnan(out)
if inf_mask.any():
print('In module:', self.__class__.__name__)
print(f'Found NAN in output {i} at indices: ', inf_mask.nonzero(), 'where:',
out[inf_mask.nonzero(as_tuple=False)[:, 0].unique(sorted=True)])
if nan_mask.any():
print("In", self.__class__.__name__)
print(f'Found NAN in output {i} at indices: ', nan_mask.nonzero(), 'where:',
out[nan_mask.nonzero(as_tuple=False)[:, 0].unique(sorted=True)])
if inf_mask.any() or nan_mask.any():
raise RuntimeError('Foud INF or NAN in output of', self.___class__.__name__,
'from input tensor', inputs)
|
#!/usr/bin/env python
from distutils.core import setup
LONG_DESCRIPTION = \
'''The program reads one or more input FASTA files.
For each file it computes a variety of statistics, and then
prints a summary of the statistics as output.
The goal is to provide a solid foundation for new bioinformatics command line tools,
and is an ideal starting place for new projects.'''
setup(
name='biodemo',
version='0.1.0.0',
author='Bernie Pope',
author_email='bjpope@unimelb.edu.au',
packages=['biodemo'],
package_dir={'biodemo': 'biodemo'},
entry_points={
'console_scripts': ['biodemo = biodemo.biodemo:main']
},
url='https://github.com/GITHUB_USERNAME/biodemo',
license='LICENSE',
description=('A prototypical bioinformatics command line tool'),
long_description=(LONG_DESCRIPTION),
install_requires=["biopython"],
)
|
from collections import deque
from math import factorial, isqrt
bound = 20000
queue = deque()
queue.append(3)
found = {3}
paths = {3 : 3}
sqrts = {3 : 0}
while (len(queue)> 0):
m = queue.pop()
n = factorial(m)
i = 0
while (n>3):
if (n<bound) and not(n in found):
found.add(n)
queue.appendleft(n)
paths[n] = m
sqrts[n] = i
n = isqrt(n)
i += 1
for i in range(3,1000):
if not(i in found):
print("{} ".format(i),end="")
print()
print()
v = 9
while not(v==3):
w = paths[v]
print("{} <- {}! + {} sqrt".format(v,w,sqrts[v]))
v = w
|
#coding:utf-8
"""UserRepository
"""
class UserRepository(object):
def __init__(self, db):
self.db = db
def find_by_id(self, user_id):
pass
def find_by_name(self, user):
pass
def create(self, user):
pass
def update(self, user):
pass
def delete(self, user):
pass
|
numero = int(input('Digite um numero: '))
antecessor = numero - 1
sucessor = numero + 1
print('O numero {} tem como antecessor {} e sucessor {}'.format(numero, antecessor, sucessor)) |
from haystack import indexes
from .models import ExtUser
class ExtUsereIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
location = indexes.CharField(model_attr='location')
first_name = indexes.CharField(model_attr='first_name')
last_name = indexes.CharField(model_attr='last_name')
age = indexes.IntegerField(model_attr='age', default=None)
role = indexes.IntegerField(model_attr='role')
desired_salary = indexes.IntegerField(model_attr='desired_salary', default=None)
register_date = indexes.DateField(model_attr='register_date')
email = indexes.CharField(model_attr='email')
def get_model(self):
return ExtUser
def index_queryset(self, using=None):
"""Used when the entire index for model is updated."""
return self.get_model().objects.all()
|
"""
define client
"""
# Copyright 2018-2019 CNRS
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import logging
try:
from urllib.parse import urlparse, quote_plus
except ImportError: # Python 2
from urlparse import urlparse
from urllib import quote_plus
from requests.exceptions import HTTPError
from openid_http_client.auth_client.access_token_client import AccessTokenClient
from openid_http_client.auth_client.simple_refresh_token_client import SimpleRefreshTokenClient
from openid_http_client.http_client import HttpClient
from pyxus.client import NexusClient
from pyxus.resources.entity import Instance
try:
from jupyter_collab_storage import oauth_token_handler
except ImportError:
oauth_token_handler = None
from .errors import AuthenticationError
CURL_LOGGER = logging.getLogger("curl")
CURL_LOGGER.setLevel(logging.WARNING)
logger = logging.getLogger("fairgraph")
SCOPE_MAP = {
"released": "RELEASED",
"latest": "INFERRED",
}
class KGClient(object):
"""docstring"""
def __init__(self, token=None,
nexus_endpoint="https://nexus.humanbrainproject.org/v0",
kg_query_endpoint="https://kg.humanbrainproject.eu/query",
release_endpoint="https://kg.humanbrainproject.eu/api/releases",
idm_endpoint="https://services.humanbrainproject.eu/idm/v1/api",
oidc_host="https://services.humanbrainproject.eu/oidc",
client_id=None,
client_secret=None,
refresh_token=None):
if client_id and client_secret and refresh_token:
auth_client = SimpleRefreshTokenClient(oidc_host, client_secret,
client_id, refresh_token)
else:
if token is None:
if oauth_token_handler:
token = oauth_token_handler.get_token()
else:
try:
token = os.environ["HBP_AUTH_TOKEN"]
except KeyError:
raise AuthenticationError("No token provided.")
auth_client = AccessTokenClient(token)
ep = urlparse(nexus_endpoint)
self.nexus_endpoint = nexus_endpoint
self._nexus_client = NexusClient(scheme=ep.scheme, host=ep.netloc, prefix=ep.path[1:],
alternative_namespace=nexus_endpoint,
auth_client=auth_client)
self._kg_query_client = HttpClient(kg_query_endpoint, "", auth_client=auth_client)
self._release_client = HttpClient(release_endpoint, "", auth_client=auth_client, raw=True)
self._idm_client = HttpClient(idm_endpoint, "", auth_client=auth_client)
self._instance_repo = self._nexus_client.instances
self.cache = {} # todo: use combined uri and rev as cache keys
def list(self, cls, from_index=0, size=100, deprecated=False, api="query", scope="released",
resolved=False, filter=None, context=None):
"""docstring"""
if api == "nexus":
organization, domain, schema, version = cls.path.split("/")
subpath = "/{}/{}/{}/{}".format(organization, domain, schema, version)
instances = self.query_nexus(subpath, filter, context, from_index, size, deprecated)
elif api == "query":
if hasattr(cls, "query_id") and cls.query_id is not None:
if resolved:
query_id = cls.query_id_resolved
else:
query_id = cls.query_id
instances = self.query_kgquery(cls.path, query_id, filter, from_index, size, scope)
return [cls.from_kg_instance(instance, self, resolved=resolved)
for instance in instances]
else:
raise NotImplementedError("Coming soon. For now, please use api='nexus'")
else:
raise ValueError("'api' must be either 'nexus' or 'query'")
return [cls.from_kg_instance(instance, self, resolved=resolved)
for instance in instances]
def count(self, cls, api="query", scope="released"):
"""docstring"""
if api == "nexus":
url = "{}/data/{}/?size=1&deprecated=False".format(self.nexus_endpoint, cls.path)
response = self._nexus_client._http_client.get(url)
elif api == "query":
if scope not in SCOPE_MAP:
raise ValueError("'scope' must be either '{}'".format("' or '".join(list(SCOPE_MAP))))
url = "{}/{}/instances?size=1&databaseScope={}".format(cls.path, cls.query_id, SCOPE_MAP[scope])
response = self._kg_query_client.get(url)
else:
raise ValueError("'api' must be either 'nexus' or 'query'")
return response["total"]
def query_nexus(self, path, filter, context, from_index=0, size=100, deprecated=False):
# Nexus API
logger.debug("Making Nexus query {} with filter {}".format(path, filter))
if filter:
filter = quote_plus(json.dumps(filter))
if context:
context = quote_plus(json.dumps(context))
instances = []
query = self._nexus_client.instances.list(
subpath=path,
filter_query=filter,
context=context,
from_index=from_index,
size=size,
deprecated=deprecated,
resolved=True)
# todo: add support for "sort" field
instances.extend(query.results)
next = query.get_next_link()
while len(instances) < size and next:
query = self._nexus_client.instances.list_by_full_path(next)
instances.extend(query.results)
next = query.get_next_link()
for instance in instances:
self.cache[instance.data["@id"]] = instance
instance.data["fg:api"] = "nexus"
return instances
def query_kgquery(self, path, query_id, filter, from_index=0, size=100, scope="released"):
template = "{}/{}/instances?start={{}}&size={}&databaseScope={}".format(
path, query_id, size, SCOPE_MAP[scope])
if filter:
for key, value in filter.items():
if hasattr(value, "iri"):
filter[key] = value.iri
template += "&" + "&".join("{}={}".format(k, quote_plus(v.encode("utf-8"))) for k, v in filter.items())
if scope not in SCOPE_MAP:
raise ValueError("'scope' must be either '{}'".format("' or '".join(list(SCOPE_MAP))))
start = from_index
#url = quote_plus(template.format(start).encode("utf-8"))
url = template.format(start)
try:
response = self._kg_query_client.get(url)
except HTTPError as err:
if err.response.status_code == 403:
response = None
else:
raise
if response and "results" in response:
instances = [
Instance(path, data, Instance.path)
for data in response["results"]
]
start += response["size"]
while start < min(response["total"], size):
#url = quote_plus(template.format(start).encode("utf-8"))
url = template.format(start)
response = self._kg_query_client.get(url)
instances.extend([
Instance(path, data, Instance.path)
for data in response["results"]
])
start += response["size"]
else:
instances = []
for instance in instances:
self.cache[instance.data["@id"]] = instance
instance.data["fg:api"] = "query"
return instances
def instance_from_full_uri(self, uri, cls=None, use_cache=True, deprecated=False, api="query",
scope="released", resolved=False):
# 'deprecated=True' means 'returns an instance even if that instance is deprecated'
# should perhaps be called 'show_deprecated' or 'include_deprecated'
logger.debug("Retrieving instance from {}, api='{}' use_cache={}".format(uri, api, use_cache))
if use_cache and uri in self.cache:
logger.debug("Retrieving instance {} from cache".format(uri))
instance = self.cache[uri]
elif api == "nexus":
instance = Instance(Instance.extract_id_from_url(uri, self._instance_repo.path),
data=self._instance_repo._http_client.get(uri),
root_path=Instance.path)
if instance and instance.data and "@id" in instance.data:
if deprecated is False and instance.data["nxv:deprecated"]:
instance = None
logger.debug("Not returning deprecated instance")
else:
self.cache[instance.data["@id"]] = instance
logger.debug("Retrieved instance from KG Nexus" + str(instance.data))
else:
instance = None
elif api == "query":
if cls and hasattr(cls, "query_id") and cls.query_id is not None:
if resolved:
query_id = cls.query_id_resolved
else:
query_id = cls.query_id
response = self._kg_query_client.get(
"{}/{}/instances?databaseScope={}&id={}".format(cls.path,
query_id,
SCOPE_MAP[scope],
uri))
if response and len(response["results"]) > 0:
instance = Instance(cls.path, response["results"][0], Instance.path)
self.cache[instance.data["@id"]] = instance
logger.debug("Retrieved instance from KG Query" + str(instance.data))
else:
logger.warning("Instance not found at {} using KG Query API".format(uri))
instance = None
else:
raise NotImplementedError("No query id available: cls={}".format(str(cls)))
else:
raise ValueError("'api' must be either 'nexus' or 'query'")
return instance
def create_new_instance(self, path, data):
instance = Instance(path, data, Instance.path)
entity = self._nexus_client.instances.create(instance)
entity.data.update(data)
return entity
def update_instance(self, instance):
instance.data.pop("links", None)
instance.data.pop("nxv:rev", None)
instance.data.pop("nxv:deprecated", None)
instance.data.pop("fg:api", None)
instance = self._nexus_client.instances.update(instance)
return instance
def delete_instance(self, instance):
self._nexus_client.instances.delete(instance)
logger.debug("Deleting instance {}".format(instance.id))
if instance.data["@id"] in self.cache:
logger.debug("Removing {} from cache".format(instance.data["@id"]))
self.cache.pop(instance.data["@id"])
def by_name(self, cls, name, match="equals", all=False,
api="query", scope="released", resolved=False):
"""Retrieve an object based on the value of schema:name"""
# todo: allow non-exact searches
if api not in ("query", "nexus"):
raise ValueError("'api' must be either 'nexus' or 'query'")
valid_match_methods = {
#"query": ("starts_with", "ends_with", "contains", "equals", "regex"),
"query": ("contains", "equals"),
"nexus": ("equals")
}
if match not in valid_match_methods[api]:
raise ValueError("'match' must be one of {}".format(valid_match_methods[api]))
if api == "nexus":
op = {"equals": "eq", "contains": "in"}[match]
context = {"schema": "http://schema.org/"}
query_filter = {
"path": "schema:name",
"op": op,
"value": name
}
instances = self.query_nexus(cls.path, query_filter, context)
else:
assert api == "query"
if hasattr(cls, "query_id") and cls.query_id is not None:
if resolved:
query_id = cls.query_id_resolved
else:
query_id = cls.query_id
response = self._kg_query_client.get(
"{}/{}{}/instances?databaseScope={}&name={}".format(
cls.path,
query_id,
match == "contains" and "_name_contains" or "", # workaround
SCOPE_MAP[scope],
name))
instances = [Instance(cls.path, result, Instance.path)
for result in response["results"]]
else:
raise NotImplementedError("Coming soon. For now, please use api='nexus'")
if instances:
if all:
return [cls.from_kg_instance(inst, self, resolved=resolved)
for inst in instances]
else: # return only the first result
return cls.from_kg_instance(instances[0], self, resolved=resolved)
else:
return None
def store_query(self, path, query_definition):
self._kg_query_client.raw = True # endpoint returns plain text, not JSON
response = self._kg_query_client.put(path, data=query_definition)
self._kg_query_client.raw = False
def retrieve_query(self, path):
return self._kg_query_client.get(path)
def is_released(self, uri):
"""Release status of the node"""
path = Instance.extract_id_from_url(uri, self._instance_repo.path)
response = self._release_client.get(path)
return response.json()["status"] == "RELEASED"
def release(self, uri):
"""Release the node with the given uri"""
path = Instance.extract_id_from_url(uri, self._instance_repo.path)
response = self._release_client.put(path)
if response.status_code not in (200, 201):
raise Exception("Can't release node with id {}".format(uri))
def unrelease(self, uri):
"""Unrelease the node with the given uri"""
path = Instance.extract_id_from_url(uri, self._instance_repo.path)
response = self._release_client.delete(path)
if response.status_code not in (200, 204):
raise Exception("Can't unrelease node with id {}".format(uri))
def user_info(self):
return self._idm_client.get("user/me")
|
class BaseComponent(object):
# Inheriting from object mainly to get access to super and staticmethod
pass |
factor = lambda n: (i for i in range(1,n+1) if n % i == 0)
is_prime = lambda x: list(factor(x)) == [1,x]
primes = lambda x: (i for i in range(2, x+1) if is_prime(i))
print(list(primes(100)))
|
#runs the cython simulation (jury is out on whether this need be it's own file...)
import Cython
import pyximport
pyximport.install()
from . import cythonSim
#this allows line profiler (via ipython magic %lprun) to profile cython functions
from Cython.Compiler.Options import get_directive_defaults
get_directive_defaults()['linetrace'] = True
get_directive_defaults()['binding'] = True
def runSim(nShells,nPhase,nEcc,T,dt,rMin,rMax,name,nOutput,dmMass,baryonInit,baryonMass,findEcc,G=4.96e-15):
cythonSim.updateGlobal(G) #updates global variables
cythonSim.setFunctions(dmMass,baryonInit,baryonMass,findEcc) #sets user definied function for the intial state and final baryon mass
cythonSim.runSim(nShells,nPhase,nEcc,T,dt,rMin,rMax,nOutput,name) #runs the simulation |
#
# @lc app=leetcode.cn id=221 lang=python3
#
# [221] 最大正方形
#
# @lc code=start
class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
if not matrix:
return 0
max_squre = 0
max_i, max_j = len(matrix), len(matrix[0])
for i, row in enumerate(matrix):
for j in range(len(row)):
k = 0
while matrix[i+k][j+k] == '1':
zero = False
for n in range(k):
if matrix[i+n][j+k] == '0' or matrix[i+k][j+n] == '0':
zero = True
break
if zero:
break
k += 1
if i+k >= max_i or j+k >= max_j:
break
max_squre = max(max_squre, k)
return max_squre*max_squre
# @lc code=end
|
from vid_utils import Video, concatenate_videos
speed_str = input("輸入速度:")
speed = float(speed_str)
videos = [
Video(speed=speed, path="output.mp4")
]
concatenate_videos(videos=videos, output_file=f"output_sped_up.mp4")
|
import tkinter as tk #GUI library
from tkinter import filedialog, Text, StringVar #GUI options
import tkinter.ttk as ttk #GUI table module
from tkinter.ttk import Treeview #import table module
import os #OS access to open file
import interface
import sqlite3
def rfidkeytext():
global card_key
card_key.set(interface.rfidkey_get())
return card_key
#get item from textbox where 1.0 means line 1 char 0 and end -1c means read to end of doccument -the last charachter \n or\r
def NameSave(argz):
FullName.set(argz.get('1.0', 'end -1c'))
return FullName.get()
def addToDb(name,rfid_id):
name = name.upper()
try:
db_conn = sqlite3.connect('test.db')
db_currsor = db_conn.cursor()
print('SQL CONNECTION PASSED!')
except sqlite3.Error as error:
print(f'SQL CONNECTION FAILED! ERROR : {error}')
Check = db_conn.execute(
"SELECT id FROM rfid_acess WHERE b_key IS ? OR full_name IS ?;", (rfid_id, name))
Check = Check.fetchone()
if Check == None:
db_currsor.execute(f"INSERT INTO rfid_acess (b_key, full_name) VALUES (?,?);", (rfid_id, name))
db_conn.commit()
Add_Status.set('Sucess')
else:
Add_Status.set(f'Name or ID exist in database with id = {Check[0]}')
if db_conn:
db_conn.close()
def rmFromDb(name, rfid_id):
name = name.upper()
try:
db_conn = sqlite3.connect('test.db')
db_currsor = db_conn.cursor()
print('SQL CONNECTION PASSED!')
except sqlite3.Error as error:
print(f'SQL CONNECTION FAILED! ERROR : {error}')
Check = db_conn.execute(
"SELECT id FROM rfid_acess WHERE b_key IS ? OR full_name IS ?;" , (rfid_id, name))
Check = Check.fetchone()
if Check != None:
db_currsor.execute(
f"DELETE FROM rfid_acess WHERE b_key IS ? OR full_name IS ?;" , (rfid_id, name))
db_conn.commit()
Add_Status.set('Sucess')
else:
Add_Status.set('Name or ID does not exist in database')
if db_conn:
db_conn.close()
def list_acess():
l_acess_window = tk.Tk()
l_acess_window.title('RFID Acess Table')
l_acess_window.geometry('400x800')
try:
db_conn = sqlite3.connect('test.db')
print('SQL CONNECTION PASSED!')
except sqlite3.Error as error:
print(f'SQL CONNECTION FAILED! ERROR : {error}')
acess = db_conn.execute('SELECT * FROM rfid_acess;')
acess = acess.fetchall()
list_table = ttk.Treeview(l_acess_window)
list_table['columns'] = ('RFID Key', 'Name')
list_table.column('#0', width = 50, minwidth = 25)
list_table.column('RFID Key', width = 200, minwidth = 175)
list_table.column('Name', width=200, minwidth=175)
list_table.heading('#0', text='ID')
list_table.heading('RFID Key', text = 'RFID Key')
list_table.heading('Name', text='Name')
for index, row in enumerate(acess):
list_table.insert('',index,text =row[0], values = row[1:])
list_table.pack(fill = 'both', expand = True)
def list_history():
l_history_window = tk.Tk()
l_history_window.title('RFID Acess Table')
l_history_window.geometry('800x600')
try:
db_conn = sqlite3.connect('test.db')
print('SQL CONNECTION PASSED!')
except sqlite3.Error as error:
print(f'SQL CONNECTION FAILED! ERROR : {error}')
acess = db_conn.execute('SELECT * FROM rfid_history;')
acess = acess.fetchall()
list_table = ttk.Treeview(l_history_window)
list_table['columns'] = ('RFID Key', 'Name')
list_table.column('#0', width = 200, minwidth = 175)
list_table.column('RFID Key', width = 200, minwidth = 175)
list_table.column('Name', width=200, minwidth=175)
list_table.heading('#0', text='Date and Time')
list_table.heading('RFID Key', text = 'RFID Key')
list_table.heading('Name', text='Name')
for index, row in enumerate(acess):
list_table.insert('',index,text =row[0], values = row[1:])
list_table.pack(fill = 'both', expand = True)
#file structue
root = tk.Tk()
root.title('RFID Manager')
root['background'] = '#83C3C8'
#updatable stringvar variabler
card_key = StringVar()
FullName = StringVar()
Add_Status = StringVar()
#prevent app resizability
root.resizable(height = False, width = False)
#canvas options and commit canvas
canvas = tk.Canvas(root, height=240, width=600, bg='#83C3C8')
canvas.pack()
#palce and configure frame
frame = tk.Frame(root, bg ='white') #creare frame
frame.place(relwidth = 0.8, relheight = 0.8 , relx = 0.1, rely = 0.1)
Title_label = tk.Label(frame, text='RFID Manager', font=200, bg='white')
Title_label.grid(row = 0, column = 1, sticky = 'E')
# insert a table at coordiates x,y
Name_label = tk.Label(frame, text='Full Name : ', font=50, bg = 'white')
Name_label.grid(row = 1, column = 0 )
#Adds text editor
edit_name = tk.Text(frame, height = 1, width = 20, relief = 'solid')
edit_name.grid(row=1, column=1)
key_label = tk.Label(frame, text='RFID_key : ', font=50, bg='white')
key_label.grid(row=2, column=0)
key_label = tk.Label(frame, textvariable=card_key, font=20, bg='white')
key_label.grid(row=2, column=1)
rfid_key_button = tk.Button(frame, text = 'Scan Card', padx = 10, pady = 5, bg = 'White', command = rfidkeytext)
rfid_key_button.grid(row = 2, column = 2)
#Adds Button and packs it to the master(root)
AddUserButton = tk.Button(frame, text='Add User', padx=10, pady=5,
bg='White', command=lambda: addToDb(NameSave(edit_name), card_key.get()))
AddUserButton.grid(row = 4, column = 2, sticky='W')
DeleteUserButton = tk.Button(frame, text='Delete User', padx=10, pady=5,
bg='White', command=lambda: rmFromDb(NameSave(edit_name), card_key.get()))
DeleteUserButton.grid(row=4, column=3, sticky = 'E' )
Status_adduser_txt = tk.Label(frame, text = 'Status: ', font=50, bg='white')
Status_adduser = tk.Label(
frame, textvariable=Add_Status, font=50, bg='white', wraplength=200)
Status_adduser_txt.grid(row=3, column=0 )
Status_adduser.grid(row=3, column=1)
List_RFID_Acess = tk.Button(frame, text='Show Users',
padx=10, pady=5, bg='White', command= list_acess)
List_RFID_Acess.grid(row = 5, column = 0, sticky = 'E')
List_RFID_History = tk.Button(frame, text='Show History',
padx=10, pady=5, bg='White', command=list_history)
List_RFID_History.grid(row=5, column=1)
#runs software
root.mainloop()
|
from django.contrib.messages.constants import (
DEBUG, INFO, SUCCESS, WARNING, ERROR, DEFAULT_TAGS
)
STORED_DEBUG = DEBUG + 1
STORED_INFO = INFO + 1
STORED_SUCCESS = SUCCESS + 1
STORED_WARNING = WARNING + 1
STORED_ERROR = ERROR + 1
DEFAULT_TAGS.update({
STORED_DEBUG: 'persisted debug',
STORED_INFO: 'persisted info',
STORED_SUCCESS: 'persisted success',
STORED_WARNING: 'persisted warning',
STORED_ERROR: 'persisted error',
})
|
from __future__ import absolute_import
from .optimizer import OptimizerPass, register_pass, get_optimizer, optimize_model
from .passes.nop import EliminateLinearActivation
from .passes.bn_quant import MergeBatchNormAndQuantizedTanh, QuantizeDenseOutput
from .passes.dense_bn_fuse import FuseDenseAndBatchNormalization
from .passes.fuse_biasadd import FuseBiasAdd
register_pass('eliminate_linear_activation', EliminateLinearActivation)
register_pass('merge_batch_norm_quantized_tanh', MergeBatchNormAndQuantizedTanh)
register_pass('quantize_dense_output', QuantizeDenseOutput)
register_pass('fuse_dense_batch_norm', FuseDenseAndBatchNormalization)
register_pass('fuse_biasadd', FuseBiasAdd)
|
miles=float(input("Enter the distance in miles: "))
km= miles*1.609
print("No. Of Kilometers: ", km) |
x,y=input().split()
a = int(x)
b = int(y)
aaaa=""
for num in range(a,b):
if num > 1:
for i in range(2,num):
if (num % i) == 0:
break
else:
aaaa = aaaa + str(num)+' '
print(aaaa.rstrip())
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
s=raw_input()
chk='AEIOUY'
ans=tmp=1
for i in s:
if i in chk:
tmp=1
else:
tmp+=1
ans=max(ans,tmp)
print ans
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 15 12:02:19 2020
@author: mha
"""
import networkx as nx
from node2vec import Node2Vec
import numpy as np
# FILES
EMBEDDING_FILENAME = './embeddings.emb'
EMBEDDING_MODEL_FILENAME = './embeddings.model'
nodes = np.load("nodes.npy")
#nodes = { i : nodes[i] for i in range(len(nodes))}
g = np.load("graph.npy")
graph = nx.Graph()
for i in range(len(g)):
graph.add_edge(int(g['Node'][i]), int(g['Destination_Node'][i]), weight=g['Weight'][i] )
# Precompute probabilities and generate walks
#node2vec = Node2Vec(graph, dimensions=64, walk_length=30, num_walks=200, workers=4)
## if d_graph is big enough to fit in the memory, pass temp_folder which has enough disk space
# Note: It will trigger "sharedmem" in Parallel, which will be slow on smaller graphs
node2vec = Node2Vec(graph, dimensions=64, walk_length=30, num_walks=200, workers=4, temp_folder="./tmp")
# Embed
model = node2vec.fit(window=10, min_count=1, batch_words=4) # Any keywords acceptable by gensim.Word2Vec can be passed, `diemnsions` and `workers` are automatically passed (from the Node2Vec constructor)
# Look for most similar nodes
#model.wv.most_similar('2') # Output node names are always strings
# Save embeddings for later use
model.wv.save_word2vec_format(EMBEDDING_FILENAME)
# Save model for later use
model.save(EMBEDDING_MODEL_FILENAME)
vectors = []
for i in range(len(nodes)):
vectors.append(model.wv.get_vector(str(i)))
vectors = np.asarray(vectors)
np.save("vectors", vectors) |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
'''
Created on 16 Aug 2017
Based on Nipype Configuration file
logging options : INFO, DEBUG
@author: Gilles de Hollander
Edited by SM
'''
try:
import configparser
except:
import ConfigParser as configparser
import os
import exptools
import json
list_vars = [('screen', 'physical_screen_size'),
('screen', 'gamma_scale'),
('screen', 'background_color'),
('screen', 'size'),
('screen', 'max_lums'),
('stimulus', 'set_1'), # hardcoded and terribly ugly. sorry.
('stimulus', 'set_2'),
('stimulus', 'set_3'),
('stimulus', 'set_4'),
('stimulus', 'set_5'),
('stimulus', 'set_6'),
('stimulus', 'set_7'),
('stimulus', 'set_8'),
('stimulus', 'set_9'),
('stimulus', 'set_10'),
('stimulus', 'x_pos'),
('fixation_cross', 'bg'),
('text', 'feedback_y_pos')]
boolean_vars = [('screen', 'wait_blanking'),
('screen', 'full_screen'),
('screen', 'mouse_visible'),
('mri', 'simulate_mri_trigger')]
str_vars = [('mri', 'mri_trigger_key'),
('stimulus', 'line_color'),
('stimulus', 'units'),
('stimulus', 'type'),
('screen', 'monitor_name'),
('input', 'response_button_left'),
('input', 'response_button_right'),
('text', 'units')]
class ExpToolsConfig(object):
def __init__(self):
self._config = configparser.ConfigParser()
# config_dir = os.path.expanduser('~/.exptools')
# config_file = os.path.join(config_dir, 'exptools.cfg')
# default_file = os.path.join(exptools.__path__[0], 'default_config.cfg')
exp_config_file = os.path.join(os.path.abspath(os.getcwd()), 'exp_config.cfg')
print(exp_config_file)
self._config.read(exp_config_file)
# if os.path.exists(config_dir):
# self._config.read(config_file)
def get(self, section, option):
if (section, option) in list_vars:
return json.loads(self._config.get(section, option))
elif (section, option) in boolean_vars:
return self._config.getboolean(section, option)
elif (section, option) in str_vars:
return self._config.get(section, option)
else:
return float(self._config.get(section, option))
def set(self, section, option, value):
if isinstance(value, bool) or isinstance(value, list):
value = str(value)
return self._config.set(section, option, value)
def has_option(self, section, option):
return self._config.has_option(section, option)
def test_exptools_config():
config = ExpToolsConfig()
assert('screen' in config._config.sections())
|
from imutils import face_utils
import numpy as np
import imutils
import dlib
import os
import cv2
from random import shuffle
import tensorflow as tf
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
image_directory = r'C:\Users\Dell\Downloads\jaffedbase\jaffe'
label_directory = r'C:\Users\Dell\Downloads\ck+\Emotion'
count1, count2 = 0, 0
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("C:/Users/Dell/Downloads/facial-landmarks/facial-landmarks/shape_predictor_68_face_landmarks.dat")
convnet = input_data(shape=[None, 136, 1, 1], name='input')
convnet = fully_connected(convnet, 2048, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 512, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 8, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=0.00001, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet, tensorboard_verbose=3, tensorboard_dir='log')
# print("Model training")
# model.fit({'input': train_x}, {'targets': train_y}, n_epoch=1200,
# validation_set=({'input': test_x}, {'targets': test_y}),
# snapshot_epoch=True, show_metric=True, run_id='cohn_kanade')
# print("Model trained")
#
# # model.save('tflearn_expression_detection_new_jaffe.model')
model.load('tflearn_expression_detection_new_jaffe.model')
camera_port = 0
ramp_frames = 30
camera = cv2.VideoCapture(camera_port)
def get_image():
retval, im = camera.read()
return im
for i in range(ramp_frames):
temp = get_image()
print("Taking image...")
camera_capture = get_image()
file = r'C:/Users/Dell/Downloads/testimg.png'
cv2.imwrite(file, camera_capture)
del camera
file = r'C:/Users/Dell/Downloads/YM.SU1.58.tiff'
img = cv2.resize(cv2.imread(file, cv2.IMREAD_GRAYSCALE), (500, 500))
print(img)
print(model.predict([np.array(get_coordinates(img)).reshape(None, 136, 1, 1)]))
print(test_y[5])
|
from Spider import BbcSpider
from scrapy.crawler import CrawlerProcess
process = CrawlerProcess()
process.crawl(BbcSpider)
process.start() # the script will block here until the crawling is finished
|
p = 9223372036854775837
inv = [0,1]
invorial = [0,1]
factorial = [1,1]
for i in range(2,100005):
inv.append((p-p//i)*inv[p%i]%p)
invorial.append(invorial[i-1]*inv[i]%p)
factorial.append(factorial[i-1]*i%p)
def comb(n,r):
r = min(r,n-r)
if r==0: return 1
return factorial[n]*invorial[r]*invorial[n-r]%p
def ipow(x,k):
ret = 1
po = x
while k>0:
if (k&1)==1:
ret = ret*po%p
po = po*po%p
k>>=1
return ret
for _ in range(int(input())):
t,x,r,c=list(map(int,input().split()))
print(t,ipow(x,r-c)*comb(r,c)%p)
|
import matplotlib
matplotlib.use("TkAgg")
import gym
import gridworld
from gym import wrappers, logger
import numpy as np
import copy
def ValueIteration(env, gamma=0.99, eps=1e-6, diff=1e-8):
states,P = env.getMDP()
random_key = list(P.keys())[0]
nb_actions = len(P[random_key])
V = np.zeros((len(states)))
while(diff>eps):
tmp_V = copy.deepcopy(V)
for state in P.keys():
tmp_values = np.zeros((nb_actions))
state_nb = states[state]
for action in range(nb_actions):
for new_state_nb in range(len(P[state][action])):
proba,obs,reward,done = P[state][action][new_state_nb]
obs_nb = states[obs]
tmp_values[action] += proba*(reward + gamma*tmp_V[obs_nb])
V[state_nb] = np.max(tmp_values)
diff = np.linalg.norm((V-tmp_V))
diff = eps*2
policy = np.zeros_like(V)
for state in P.keys():
tmp_values = np.zeros((nb_actions))
state_nb = states[state]
for action in range(nb_actions):
for new_state_nb in range(len(P[state][action])):
proba,obs,reward,done = P[state][action][new_state_nb]
obs_nb = states[obs]
tmp_values[action] += proba*(reward + gamma*V[obs_nb])
policy[state_nb] = np.argmax(tmp_values)
return policy
def PolicyIteration(env, gamma=0.99, eps=1e-6, diff=1e-8):
states,P = env.getMDP()
random_key = list(P.keys())[0]
nb_actions = len(P[random_key])
policy = np.zeros((len(states)))
tmp_policy = np.zeros_like(policy)
while(True):
V = np.zeros((len(states)))
while(diff>eps):
tmp_V = copy.deepcopy(V)
V = np.zeros((len(states)))
for state in P.keys():
state_nb = states[state]
action = policy[state_nb]
for index_choice in range(len(P[state][action])):
proba,obs,reward,done = P[state][action][index_choice]
obs_nb = states[obs]
V[state_nb] += proba*(reward + gamma*tmp_V[obs_nb])
diff = max(np.abs(V-tmp_V))
diff = eps*2
tmp_policy = copy.deepcopy(policy)
for state in P.keys():
tmp_values = np.zeros((nb_actions))
state_nb = states[state]
for action in range(nb_actions):
for index_choice in range(len(P[state][action])):
proba,obs,reward,done = P[state][action][index_choice]
obs_nb = states[obs]
tmp_values[action] += proba*(reward + gamma*V[obs_nb])
policy[state_nb] = np.argmax(tmp_values)
if(np.all(policy==tmp_policy)):
print("breaking with {}".format(V))
break
return policy
class Agent(object):
def __init__(self,policy,states):
self.policy = policy
self.states = states
def act(self,observation):
return self.policy[self.states[gridworld.GridworldEnv.state2str(observation)]]
if __name__ == '__main__':
env = gym.make('gridworld-v0')
env.seed(0)
env.render(mode="human")
print("Learning ...")
policy = ValueIteration(env)
policy = PolicyIteration(env,gamma = 0.5)
states,P = env.getMDP()
agent = Agent(policy,states)
# outdir = 'gridworld-v0/agent-results'
# env = wrappers.Monitor(env, directory=outdir, force=True, video_callable=False)
env.setPlan("gridworldPlans/plan0.txt", {0: -0.001, 3: 1, 4: 1, 5: -1, 6: -1})
episode_count = 5
reward = 0
done = False
rsum = 0
pause = 0.00001
for i in range(episode_count):
obs = env.reset()
env.verbose = (i%1 == 0)
if env.verbose:
env.render(pause)
j = 0
rsum = 0
while True:
action = agent.act(obs)
obs, reward, done, _ = env.step(action)
rsum += reward
j += 1
if env.verbose:
env.render(pause)
if done:
print("Episode : " + str(i) + " rsum=" + str(rsum) + ", " + str(j) + " actions")
break
print("end")
env.close() |
#!/usr/bin/env python
# coding: utf-8
import re
from abc import ABC, abstractmethod
from collections import namedtuple
from io import StringIO
import pandas as pd
import numpy as np
import requests
from bs4 import BeautifulSoup
from requests.exceptions import HTTPError
from config import URL_BASE, PROXIES, COLUNAS
def request(uri, params):
"""
Retorna resposta de um request.
:param uri: string
:param params: dict
:return: object fornecido por requests.get
"""
endereco = URL_BASE + ''.join(uri)
try:
resp = requests.get(endereco, params, proxies=PROXIES,
allow_redirects=True)
except HTTPError:
return False
else:
return resp
def csv2df(uri, params):
"""
Converte o conteúdo do CSV disponível em certa url para um dataframe pandas.
:param url: string
:param params: dicionário contendo os parâmetros da url
:return: dataframe pandas com o conteúdo do arquivo CSV.
"""
resp = request(uri, params)
resp_bytes = resp.content # Conteúdo do CSV, em bytes
resp_str = resp_bytes.decode('utf8') # Conteúdo do CSV, em texto
csv = StringIO(resp_str) # Para que o pandas possa ler da string
try:
df = pd.read_csv(csv)
except pd.io.common.EmptyDataError: # CSV vazio...
df = pd.DataFrame() # ... dataframe também vazio.
return df
def limpa_cpf_cnpj(cpfcnpj):
"""
Exclui marcas de pontuação do CPF/CNPJ.
:param cpfcnpj:
:return: string
"""
try:
limpo = re.sub(r'[./-]', '', cpfcnpj)
except TypeError: # Contempla o caso em que cpfcnpj = np.nan
limpo = np.nan
return limpo
def limpa_cifra(cifra):
"""
Transforma uma cifra em float. Funciona inclusive para números escritos
no padrão da língua inglesa (P. ex. R$ 2,500.00). OBS: não funcionará
para casos em que não haja separador decimal, tal como 3.000. Neste caso,
será retornado o float 3.0. Mas esse caso tende a ser raro.
:param cifra: string
:return: float
"""
cifra = re.sub(r'R\$\s*', '', str(cifra))
try:
sep_decimal = [x for x in cifra if x in ',.'][-1]
except IndexError:
sep_decimal = '.'
if sep_decimal == ',':
cifra = cifra.replace('.', '').replace(',', '.')
else:
cifra = cifra.replace(',', '')
try:
limpo = float(cifra)
except ValueError:
limpo = np.nan
return limpo
def prepara_latin1(entrada):
"""
Substitui caracteres utf-8 que não podem ser codificados em latin_1 por
similares, para evitar problemas na conversão efetuada pelo SAS.
:param entrada: string
:return: string
"""
saida = entrada
if isinstance(entrada, str):
saida = re.sub('[\u201d\u201c]', '"', entrada)
saida = re.sub('[\u2019\u2018]', '\'', saida)
saida = re.sub('[\u2013\u2014]', '--', saida)
saida = re.sub('[\u2122]', 'TM', saida)
saida = re.sub('[\u2022]', '*', saida)
saida = re.sub('[\u02c6]', '^', saida)
saida = saida.encode('latin1', errors='replace').decode('latin1')
return saida
def sanitiza_df(df):
"""
Converte valores monetários de texto para float e reconhece datas.
Prepara o dataframe para importação pelo SAS (latin_1).
:param df: Dataframe pandas
:return: Dataframe sanitizado
"""
colunas = df.columns
colunas_data = [d for d in colunas if 'Data' in d]
colunas_cifra = [c for c in colunas if 'Valor' in c]
colunas_cnpj = [x for x in colunas if 'cnpj' in x.lower()]
output = df.copy()
for col in colunas_data:
output[col] = pd.to_datetime(output[col])
for col in colunas_cifra:
output[col] = output[col].apply(limpa_cifra)
for col in colunas_cnpj:
output[col] = output[col].apply(limpa_cpf_cnpj)
output = output.applymap(prepara_latin1)
return output
class Componente(ABC):
"""Classe abstrata, da qual herdarão as classes Uasg, Pregao e Item."""
@property
def parte_de(self):
"""Retorna o código do componente de que este é parte."""
try:
output = self.dados[-1]
except TypeError:
output = 'GDF'
return output
@abstractmethod
def partes(self):
"""Retorna dataframe correspondente ao CSV que lista as partes do
componente."""
pass
def __getitem__(self, index):
try:
output = self.partes().iloc[index]
except AttributeError:
output = 'Instância da classe {} não possui partes.'.format(
self.__class__.__name__)
return output
def __len__(self):
try:
tam = len(self.partes())
except pd.io.common.EmptyDataError:
tam = 0
return tam
class Uasg(Componente):
"""Representa uma UASG, no ComprasNet. As "partes" desta classe são os
pregões."""
dados = None
uri = '/pregoes/v1/pregoes'
colunas = COLUNAS.pregoes.keys()
def __init__(self, id):
self._id = str(id)
self._params = {'co_uasg': str(self._id)}
@property
def id(self):
return self._id
@property
def num_partes(self):
"""Retorna o número de pregões da UASG informado no site."""
resp = request(self.uri, self._params)
if resp:
soup = BeautifulSoup(resp.text, 'html.parser')
return int(
soup.find_all(class_='num-resultados')[0].text.split(' ')[-1])
return 0
def _offsets(self):
"""Retorna a lista dos offsets a serem utilizados como parâmetro para
download dos CSVs"""
return [i * 500 for i in range(self.num_partes // 500 + 1)]
def partes(self):
"""Retorna dataframe correspondente ao CSV dos pregões da UASG."""
output = pd.DataFrame(columns=self.colunas)
if self.num_partes:
for offset in self._offsets():
self._params['offset'] = offset
df = csv2df(self.uri + '.csv', self._params)
output = output.append(df)
output['id_uasg'] = self.id
pattern_id_pregao = re.compile(r'/(\d+)/itens$')
extrai_id_pregao = lambda x: pattern_id_pregao.findall(x)[0]
output['id_pregao'] = output['Itens do pregão > uri'].apply(
extrai_id_pregao)
return sanitiza_df(output)
def __repr__(self):
return f'UASG {self._id}'
class Pregao(Componente):
"""Representa um pregão, no ComprasNet. As "partes" do pregão são os
itens."""
uri = '/pregoes/doc/pregao/'
colunas = COLUNAS.itens.keys()
def __init__(self, dados_pregao):
"""A classe é instanciada a partir dos dados do pregão, retornados por
um objeto Uasg."""
self.dados = dados_pregao
self._params = {}
@property
def id(self):
pattern = re.compile(r'\d+')
return pattern.findall(self.dados['Itens do pregão > uri'])[0]
@property
def num_partes(self):
"""Retorna o número de itens do pregão."""
end = self.uri + self.id + '/itens'
resp = request(end, self._params)
if resp:
soup = BeautifulSoup(resp.text, 'html.parser')
return int(
soup.find_all(class_='num-resultados')[0].text.split(' ')[-1])
return 0
def _offsets(self):
"""Retorna a lista dos offsets a serem utilizados como parâmetro para
download dos CSVs."""
return [i * 500 for i in range(self.num_partes // 500 + 1)]
def partes(self):
"""Retorna dataframe correspondente ao CSV dos itens do pregão."""
output = pd.DataFrame(columns=self.colunas)
end = self.uri + self.id + '/itens.csv'
for offset in self._offsets():
self._params['offset'] = offset
df = csv2df(end, self._params)
output = output.append(df, sort=False)
output['id_pregao'] = self.id
pattern_id_item = re.compile(r'item=(\d+)')
extrai_id = lambda x: pattern_id_item.findall(x)[0]
output['id_item'] = output['Eventos do Item da licitação > uri'].apply(
extrai_id
)
# Agora, é necessário instanciar cada item, para verificar se foi
# adjudicado.
output['adjudicado'] = [Item(x).adjudicado() for i, x in
output.iterrows()]
return sanitiza_df(output)
def __repr__(self):
return f'Pregão {self.id}'
class Item(Componente):
"""Representa um item de um pregão. As partes componentes deste item são as
propostas."""
uri = '/pregoes/v1/proposta_item_pregao'
colunas = COLUNAS.propostas.keys()
def __init__(self, dados_item):
"""A classe é instanciada a partir dos dados do item, retornados por um
objeto Pregao."""
self.dados = dados_item
@property
def id(self):
pattern = re.compile(r'item=(\d+)')
return pattern.findall(
self.dados['Propostas do Item da licitação > uri']
)[0]
def co_uasg(self):
pattern = re.compile(r'co_uasg=(\d+)')
return pattern.findall(self.dados['Termos do pregão > uri'])[0]
def co_pregao(self):
pattern = re.compile(r'co_pregao=(\d+)')
return pattern.findall(
self.dados['Propostas do Item da licitação > uri']
)[0]
def nu_pregao(self):
pattern = re.compile(r'nu_pregao=(\d+)')
return pattern.findall(self.dados[-3])[0]
@property
def num_partes(self):
"""Retorna o número de propostas apresentadas para este item."""
params = {'item': self.id, 'co_pregao': self.co_pregao()}
resp = request(self.uri + '.html', params)
if resp:
soup = BeautifulSoup(resp.text, 'html.parser')
return int(
soup.find_all(class_='num-resultados')[0].text.split(' ')[-1])
return 0
def _offsets(self):
"""Retorna a lista dos offsets a serem utilizados como parâmetro para
ownload dos CSVs."""
return [i * 500 for i in range(self.num_partes // 500 + 1)]
def eventos(self):
"""Retorna dataframe correspondente ao CSV da lista de eventos deste
item."""
colunas = COLUNAS.eventos.keys()
output = pd.DataFrame(columns=colunas)
uri = '/pregoes/v1/evento_item_pregao'
params = {'item': self.id}
df_eventos = csv2df(uri + '.csv', params)
if len(df_eventos):
output = output.append(df_eventos)
output = sanitiza_df(output)
return output
def adjudicado(self):
eventos = self.eventos()['Descrição do evento'].values
return 'Adjudicado' in tuple(eventos)
def partes(self):
"""Retorna dataframe correspondente ao CSV das propostas para esse
item."""
output = pd.DataFrame(columns=self.colunas)
if self.num_partes:
params = {'item': self.id, 'co_pregao': self.co_pregao()}
for offset in self._offsets():
params['offset'] = offset
df = csv2df(self.uri + '.csv', params)
output = output.append(df)
output['id_item'] = self.id
return sanitiza_df(output)
def adjudicacao(self):
"""Retorna dados da proposta vencedora, se houver adjudicação.
Essa informação é extraída dos eventos de cada item, os quais nem
sempre são preenchidos corretamente."""
Vencedor = namedtuple('Vencedor', COLUNAS.adjudicacao)
if self.adjudicado():
df = self.eventos()
obs = df.loc[df['Descrição do evento'] == 'Adjudicado']
obs = obs.iloc[0]['Observação']
nome_pattern = re.compile(r'Fornecedor:\s*(.*?),')
try:
nome = nome_pattern.findall(obs)[0]
except IndexError:
nome = np.nan
cnpj_pattern = re.compile(r'CNPJ/CPF:\s*(.*?),')
try:
cnpj = cnpj_pattern.findall(obs)[0]
except IndexError:
cnpj = np.nan
cnpj = limpa_cpf_cnpj(cnpj)
# A regex abaixo vai encontrar a primeira cifra que constar do
# campo "Observação". Caso haja mais de uma cifra, o valor
# retornado pode não ser o da adjudicação.
valor_pattern = re.compile(r'R\$\s*([\d\.,]+\d)')
try:
valor = valor_pattern.findall(obs)[0]
except IndexError:
valor = np.nan
valor = limpa_cifra(valor)
data_adj = df.loc[df['Descrição do evento'] == 'Adjudicado']
data_adj = data_adj.iloc[0]['Data e hora do evento']
data_adj = data_adj.to_pydatetime()
return Vencedor(self.id, obs, nome, cnpj, valor, data_adj)
return None
def __repr__(self):
return f'Item {self.id} (Pregão {self.parte_de})'
# class Proposta(Componente):
# """Representa uma proposta apresentada no pregão."""
#
# def __init__(self, dados):
# """A classe é instanciada a partir da proposta, retornados por um objeto
# Item."""
# self.dados = dados
#
# @property
# def id(self):
# pattern = re.compile(r'co_proposta=(\d+)')
# return pattern.findall(self.dados[-2])[0]
#
# def partes(self):
# return ()
#
# def __repr__(self):
# return f'Proposta de {self.dados["Número cpf/cnpj fornecedor"]}' |
from __future__ import absolute_import
# import apis into api package
from .about_api import AboutApi
from .access_api import AccessApi
from .bucket_bundles_api import BucketBundlesApi
from .bucket_flows_api import BucketFlowsApi
from .buckets_api import BucketsApi
from .bundles_api import BundlesApi
from .config_api import ConfigApi
from .extension_repository_api import ExtensionRepositoryApi
from .extensions_api import ExtensionsApi
from .flows_api import FlowsApi
from .items_api import ItemsApi
from .policies_api import PoliciesApi
from .tenants_api import TenantsApi
|
#!/bin/env python2.7
# encoding: utf-8
'''
backup -- Backup script for AWS
backup is a script which creates backup images (AMIs) from AWS EC2 and VPC instances.
To backup an instance it needs to have a tag 'Backup' (see FILTER_TAG),
its value defines the number of images to keep.
If there is a tag 'NoReboot' (see NO_REBOOT_TAG), the instance will not be rebooted,
unless a 'RebootRRule' (see REBOOT_RRULE_TAG) tag is defined, and contains an
iCalendar (RFC2445) formatted RRULE string. In that case the instance will be rebooted
on the days defined by this rule.
@author: Endre Czirbesz
@copyright: 2013 Ultrasis. All rights reserved.
@license: Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish, dis-
tribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the fol-
lowing conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
@contact: eczirbesz@ultrasis.com
@deffield updated: Updated
'''
# Standard Modules
import os
import sys
import pytz
import ConfigParser
from datetime import datetime
from dateutil.rrule import rrulestr
import dateutil.parser as parser
# Third-Party Modules
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from boto import ec2, utils, exception
__all__ = []
__version__ = '0.5.3'
__date__ = '2013-05-22'
__updated__ = '2013-06-12'
# Settings
FILTER_TAG = 'Backup'
NO_REBOOT_TAG = 'NoReboot'
REBOOT_RRULE_TAG = 'RebootRRule'
CONSISTENT_TAG = 'Consistent'
DEFAULT_KEEP = 7
STAMP_TAG = 'AutoBackupTimestamp'
REBOOT_STAMP_TAG = 'LastRebootTime'
SOURCE_TAG = 'SourceInstanceId'
MAX_TRIES = 3
DEBUG = 0
TESTRUN = 0
PROFILE = 0
# Globals
verbose = 0
silent = False
self_id = None
aws_access_key = None
aws_secret_key = None
class CLIError(Exception):
'''Generic exception to raise and log different fatal errors.'''
def __init__(self, msg):
super(CLIError).__init__(type(self))
self.message = "ERROR: %s" % msg
def __str__(self):
return self.message
def __unicode__(self):
return self.message
def get_self_instance_id():
if not silent and verbose > 0:
print "Enquiring self instance id"
metadata = utils.get_instance_metadata()
instance_id = metadata['instance-id'] if metadata.has_key('instance-id') else None
if not silent and verbose > 0:
print "Instance Id: %s" % (instance_id)
return instance_id
def get_instances_in_regions(regions, filters=None):
if not silent and verbose > 0:
print "Retrieving instances"
if not silent and verbose > 1:
print "Regions: %s\nFilters: %s" % (regions, filters)
instances_in_regions = []
for region in ec2.regions():
if region.name in regions:
if not silent and verbose > 1:
print "Connecting %s region" % (region.name)
conn = ec2.connect_to_region(region.name, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key)
reservations = conn.get_all_instances(filters=filters)
i = 0
for r in reservations:
for instance in r.instances:
instances_in_regions.append(instance)
i += 1
if not silent and verbose > 0:
print "Found %d instances in %s region" % (i, region.name)
if not silent:
print "Got %s instances" % (len(instances_in_regions))
return instances_in_regions
def create_ami(instance):
if not silent and verbose > 0:
print "Creating AMI"
create_time = datetime.now(pytz.utc)
create_time_ISO = create_time.isoformat()
name = '%s_Backup_%s' % ((instance.tags['Name'].replace(' ', '_') if instance.tags.has_key('Name') else instance.id), create_time.strftime('%Y%m%dT%H%M%SZ'))
desc = '%s Backup on %s (%s)' % ((instance.tags['Name'] if instance.tags.has_key('Name') else instance.id), create_time.ctime(), str(create_time.tzinfo))
reboot_rule_str = instance.tags[REBOOT_RRULE_TAG] if instance.tags.has_key(REBOOT_RRULE_TAG) else None
force_reboot = False
if reboot_rule_str:
last_reboot = parser.parse(instance.tags[REBOOT_STAMP_TAG]) if instance.tags.has_key(REBOOT_STAMP_TAG) else parser.parse(instance.launch_time)
try:
force_reboot = True if rrulestr(reboot_rule_str+";byhour=0;byminute=0;bysecond=0", dtstart=last_reboot).before(datetime.now(pytz.utc)) else False
except ValueError as e:
if not silent:
print e.message
no_reboot = ((not force_reboot) and (instance.tags.has_key(NO_REBOOT_TAG) or instance.tags.has_key(REBOOT_RRULE_TAG))) or (instance.id == self_id)
if not no_reboot:
if not silent and verbose > 0:
print "Tagging instance %s: %s" % (REBOOT_STAMP_TAG, create_time_ISO)
instance.add_tag(REBOOT_STAMP_TAG, create_time_ISO)
if not silent and verbose > 1:
print '''Image parameters:
Name: %s
Description: %s
Source: %s
No-Reboot: %s
''' % (name, desc, instance.id, no_reboot)
ami_id = instance.create_image(name, description=desc, no_reboot=no_reboot)
if not silent:
print "Created AMI: %s" % (ami_id)
# Wait for the image to appear
if not silent and verbose > 0:
print "Tagging image"
tries_left = MAX_TRIES
image = None
while not image and tries_left:
try:
image = instance.connection.get_all_images(image_ids=[ami_id])[0]
except exception.EC2ResponseError as e:
if not silent:
print e.message
tries_left -= 1
image.add_tag(STAMP_TAG, create_time_ISO)
image.add_tag(SOURCE_TAG, instance.id)
if not no_reboot:
image.add_tag(CONSISTENT_TAG, "Yes")
if not silent and verbose > 1:
print "Created AMI tags: %s" % (image.tags)
return ami_id
def image_date_compare(ami1, ami2):
if ami1.tags[STAMP_TAG] < ami2.tags[STAMP_TAG]:
return -1
elif ami1.tags[STAMP_TAG] == ami2.tags[STAMP_TAG]:
return 0
return 1
def get_images_for_instance(instance, filters=None):
if not filters:
filters = {'tag:' + SOURCE_TAG: instance.id}
elif not filters.has_key('tag:' + SOURCE_TAG):
filters['tag:' + SOURCE_TAG] = instance.id
images = [image for image in instance.connection.get_all_images(filters=filters)]
images.sort(image_date_compare)
if not silent and verbose > 0:
print "Got %d images" % (len(images))
return images
def get_latest_consistent_image_id_for_instance(instance):
imgs = get_images_for_instance(instance, filters={'tag:' + CONSISTENT_TAG: 'Yes'})
return imgs[-1].id if imgs else None
def remove_old_amis(instance):
keep = int(instance.tags[FILTER_TAG]) if (instance.tags.has_key(FILTER_TAG) and instance.tags[FILTER_TAG].isdigit()) else DEFAULT_KEEP
latest_consistent_image_id = get_latest_consistent_image_id_for_instance(instance)
if not silent and verbose > 0:
print "Removing old images for %s, keeping %d" % (instance.id, keep)
print "Retrieving images"
for image in get_images_for_instance(instance)[:-keep]:
if not image.id == latest_consistent_image_id:
instance.connection.deregister_image(image.id, delete_snapshot=True)
if not silent:
print "Image %s deregistered" % (image.id)
def main(argv=None): # IGNORE:C0111
'''Processing command line options and config file.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
The script creates backup images (AMIs) from AWS EC2 and VPC instances.
To backup an instance it needs to have a tag 'Backup' (see FILTER_TAG),
its value defines the number of images to keep.
Created by Endre Czirbesz on %s.
Copyright 2013 Ultrasis. All rights reserved.
Licensed under the MIT License (MIT)
http://opensource.org/licenses/MIT
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
# Setup argument parser
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("--cron", dest="silent", action="store_true", help="suppress all output for cron run [default: %(default)s]")
parser.add_argument("-C", "--credential-file", dest="credential_file_name", metavar="FILE",
help="config file with AWS credentials [default: ccredentials.ini], overrides environment settings")
parser.add_argument("-O", "--aws-access-key", dest="aws_access_key", metavar="KEY",
help="AWS Access Key ID. Defaults to the value of the AWS_ACCESS_KEY environment variable (if set).")
parser.add_argument("-W", "--aws-secret-key", dest="aws_secret_key", metavar="KEY",
help="AWS Secret Access Key. Defaults to the value of the AWS_SECRET_KEY environment variable (if set).")
parser.add_argument("-v", "--verbose", dest="verbose", action="count", help="set verbosity level [default: %(default)s]")
parser.add_argument('-V', '--version', action='version', version=program_version_message)
region_name_list = [region.name for region in ec2.regions()]
parser.add_argument(dest="regions", help="region(s) to backup [default: %s]" % (region_name_list), metavar="region", nargs='*', default=region_name_list)
# Process arguments
args = parser.parse_args()
global verbose, silent, aws_access_key, aws_secret_key
regions = args.regions
verbose = args.verbose
silent = args.silent
if not silent and verbose > 0:
print "Verbose mode on, level %d" % (verbose)
if (args.aws_access_key == None or args.aws_secret_key == None):
aws_access_key = os.getenv("AWS_ACCESS_KEY")
aws_secret_key = os.getenv("AWS_SECRET_KEY")
if not silent and verbose > 2:
print "Access key from env: %s\nSecret key from env: %s" % (aws_access_key, aws_secret_key)
config_file_path = os.path.abspath(args.credential_file_name if args.credential_file_name else "credentials.ini")
if not silent and verbose > 0:
print "Reading config file: %s" % (config_file_path)
try:
config = ConfigParser.ConfigParser()
config.read(config_file_path)
if not silent and verbose > 0:
print "Got sections: %s" % (config.sections())
if (not config.sections()) and (not args.credential_file_name) and aws_access_key and aws_secret_key:
if not silent and verbose > 0:
print "Missing or empty default config file, falling back to env"
else:
aws_access_key = config.get('AWS', 'AWSAccessKeyId')
aws_secret_key = config.get('AWS', 'AWSSecretKey')
if not silent and verbose > 2:
print "Access key from file: %s\nSecret key from file: %s" % (aws_access_key, aws_secret_key)
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
raise CLIError("AWS credentials must be specified.")
else:
if args.credential_file_name:
raise CLIError("You can not specify both credentials and a config file.")
aws_access_key = args.aws_access_key
aws_secret_key = args.aws_secret_key
if not silent and verbose > 2:
print "Access key from args: %s\nSecret key from args: %s" % (aws_access_key, aws_secret_key)
if not silent and verbose > 2:
print "Access key: %s\nSecret key: %s" % (aws_access_key, aws_secret_key)
if (aws_access_key == None or aws_secret_key == None):
raise CLIError("AWS credentials must be specified.")
global self_id
self_id = get_self_instance_id()
for instance in get_instances_in_regions(regions, {'tag:' + FILTER_TAG: '*'}):
create_ami(instance)
remove_old_amis(instance)
if not silent:
print "Done."
return 0
except KeyboardInterrupt:
### handle keyboard interrupt ###
return 0
except Exception as e:
if not silent and verbose > 0:
import traceback
traceback.print_exc()
if DEBUG or TESTRUN:
raise(e)
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + e.message + "\n")
sys.stderr.write(indent + " for help use --help\n")
return 2
if __name__ == "__main__":
if DEBUG:
sys.argv.append("-vvvv")
if TESTRUN:
import doctest
doctest.testmod()
if PROFILE:
import cProfile
import pstats
profile_filename = 'backup_profile.txt'
cProfile.run('main()', profile_filename)
statsfile = open("profile_stats.txt", "wb")
p = pstats.Stats(profile_filename, stream=statsfile)
stats = p.strip_dirs().sort_stats('cumulative')
stats.print_stats()
statsfile.close()
sys.exit(0)
sys.exit(main())
|
"""
Represents a pair of congress members. Contains the IDs of both and the data
points relevant to calculating the metric between them. Each pair is hashable
(the hash is based on the id's of the two members).
"""
keys = ['votes_same', 'votes_total', 'mutual_sponsorships']
class Pair:
"""
If a tuple is passed in, the class members are based off of the tuple.
Otherwise, class members are based on the other arguments. The names
are optional arguments in case you want to create a dummy Pair using
just the id's (which is what the hash is based on).
"""
def __init__(self, id1, id2, name_1=None, name_2=None, tup=None):
if not tup: #create Pair directly
self.id_a = id1
self.id_b = id2
self.name_a = name_1
self.name_b = name_2
self.data = dict()
for k in keys:
self.data[k] = 0
else: #create Pair from tuple of data values (id_a, id_b, k_0, .., k_n)
self.id_a = tup[0]
self.id_b = tup[1]
self.name_a = tup[2]
self.name_b = tup[3]
self.data = dict()
for i in range(len(keys)):
self.data[keys[i]] = int(tup[i + 4])
""" Equality is based on ID values. """
def __eq__(self, other):
return (self.id_a == other.id_a and self.id_b == other.id_b) or \
(self.id_a == other.id_b and self.id_b == other.id_a)
def __hash__(self):
return hash(self.id_a) ^ hash(self.id_b)
"""
Gets data value for calcualting metrics from dictionary. The list of keys
is stored in the keys list at the top of the file.
"""
def getVal(self, key):
return self.data.get(key)
"""
Returns a value for the metric based on the function f. f takes in the
Pair.data dictionary and calculates the value. The value returned
does not necessarily need to be the value of the metric itself, but can be
used in intermediary calculations for the metric.
For example, if you require a comparison with other pairs, you can fetch the
the data for all pairs using calc and then calculate the metric from those.
"""
def calc(self, f):
return f(self.data)
def toTuple(self):
return tuple([self.id_a, self.id_b, self.name_a, self.name_b] + [v for k, v in self.data.items()])
def get_pair_hash(id_a, id_b):
return hash(id_a) ^ hash(id_b)
|
import tarfile
import glob
import os
import io
import string
import sys
train = sys.argv[0]
test = sys.argv[1]
# train
if (train == True):
print('-'*20 + 'train tsv start' + '-'*20)
f = open('./data/IMDb_train.tsv', 'w')
path = './data/aclImdb/train/pos/'
for fname in glob.glob(os.path.join(path, '*.txt')):
with io.open(fname, 'r', encoding="utf-8") as ff:
text = ff.readline()
# タブがあれば消しておきます
text = text.replace('\t', " ")
text = text+'\t'+'1'+'\t'+'\n'
f.write(text)
path = './data/aclImdb/train/neg/'
for fname in glob.glob(os.path.join(path, '*.txt')):
with io.open(fname, 'r', encoding="utf-8") as ff:
text = ff.readline()
# タブがあれば消しておきます
text = text.replace('\t', " ")
text = text+'\t'+'0'+'\t'+'\n'
f.write(text)
f.close()
print('-'*20 + 'train tsv finish' + '-'*20)
else:
print("passed train")
#test
if (test == True):
print('-'*20 + 'test tsv start' + '-'*20)
f = open('./data/IMDb_test.tsv', 'w')
path = './data/aclImdb/test/pos/'
for fname in glob.glob(os.path.join(path, '*.txt')):
with io.open(fname, 'r', encoding="utf-8") as ff:
text = ff.readline()
# タブがあれば消しておきます
text = text.replace('\t', " ")
text = text+'\t'+'1'+'\t'+'\n'
f.write(text)
path = './data/aclImdb/test/neg/'
for fname in glob.glob(os.path.join(path, '*.txt')):
with io.open(fname, 'r', encoding="utf-8") as ff:
text = ff.readline()
# タブがあれば消しておきます
text = text.replace('\t', " ")
text = text+'\t'+'0'+'\t'+'\n'
f.write(text)
f.close()
print('-'*20 + 'train tsv finish' + '-'*20)
else:
print("passed test") |
from django.urls import path, include
from . import views
urlpatterns = [
path('add/', views.add_formulario, name='add'),
]
|
"""telegraph URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
app_name = 'posts'
urlpatterns = [
path('', views.home, name='home'),
path('world/', views.world, name='world'),
path('nigeria/', views.nigeria, name='nigeria'),
path('politics/', views.politics, name='politics'),
path('opinion/', views.opinion, name='opinion'),
path('sport/', views.sport, name='sport'),
path('business/', views.business, name='business'),
path('entertainment/', views.entertainment, name='entertainment'),
path('<cat_slug>/<slug>/', views.detail, name='detail'),
path('create/', views.post_create, name='create'),
path('<cat_slug>/<slug>/edit/', views.post_update, name='update'),
path('<cat_slug>/<slug>/delete/', views.post_delete, name='delete'),
# url(r'^create/$', post_create, name='create'),
# url(r'^(?P<cat_slug>[\w-]+)/(?P<slug>[\w-]+)/edit/$', post_update, name='update'),
# url(r'^(?P<cat_slug>[\w-]+)/(?P<slug>[\w-]+)/delete/$', post_delete),
# url(r'^(?P<cat_slug>[\w-]+)/(?P<slug>[\w-]+)/$', post_detail, name='detail'),
]
|
#!/usr/bin/env python3
#
# Format a result
#
import jsontemplate
import pscheduler
import sys
from validate import result_is_valid
try:
format = sys.argv[1]
except IndexError:
format = 'text/plain'
input = pscheduler.json_load(exit_on_error=True, max_schema=1)
valid, message = result_is_valid(input["result"])
if not valid:
pscheduler.fail(message)
json = input["result"]
i = input['spec']['record']
if format == 'text/plain':
print('Time ......... %s' % json['time'])
if i == 'a':
for r in input['result']['record']:
print('A ............ %s' % r)
if i == 'aaaa':
for r in input['result']['record']:
print('AAAA ......... %s' % r)
elif i == 'cname':
print('CNAME ........ %s' % input['result']['record'])
elif i == 'mx':
for r in input['result']['record']:
print('Pref ......... %3d %s' % (r['pref'], r['mx']))
elif i == 'ns':
for r in input['result']['record']:
print('NS ........... %s' % r)
elif i == 'ptr':
print('PTR .......... %s' % input['result']['record'])
elif i == 'txt':
for r in input['result']['record']:
print('TXT .......... %s' % r)
elif i == 'soa':
print('Retry ........ %d' % input['result']['record']['retry'])
print('Refresh ...... %d' % input['result']['record']['refresh'])
print('Minimum ...... %d' % input['result']['record']['minimum'])
print('Expire ....... %d' % input['result']['record']['expire'])
print('Owner ........ %s' % input['result']['record']['owner'])
print('Nameserver ... %s' % input['result']['record']['nameserver'])
elif format == 'text/html':
print('<table>')
print(' <tr>')
print(' <th>Time</th>')
print(' </tr>')
print(' <tr>')
print(' <td>%s</td>' % json['time'])
print(' </tr>')
if i == 'a':
print(' <tr>')
print(' <th>A Records</th>')
print(' </tr>')
for r in input['result']['record']:
print(' <tr>')
print(' <td>%s</td>' % r)
print(' </tr>')
if i == 'aaaa':
print(' <tr>')
print(' <th>AAAA Records</th>')
print(' </tr>')
for r in input['result']['record']:
print(' <tr>')
print(' <td>%s</td>' % r)
print(' </tr>')
elif i == 'cname':
print(' <tr>')
print(' <th>CNAME</th>')
print(' </tr>')
print(' <tr>')
print(' <td>%s</td>' % input['result']['record'])
print(' </tr>')
elif i == 'mx':
print(' <tr>')
print(' <th>Pref</th>')
print(' <th>MX</th>')
print(' </tr>')
for r in input['result']['record']:
print(' <tr>')
print(' <td>%s</td>' % r['pref'])
print(' <td>%s</td>' % r['mx'])
print(' </tr>')
elif i == 'ns':
print(' <tr>')
print(' <th>NS Records</th>')
print(' </tr>')
for r in input['result']['record']:
print(' <tr>')
print(' <td>%s</td>' % r)
print(' </tr>')
elif i == 'ptr':
print(' <tr>')
print(' <th>PTR</th>')
print(' </tr>')
print(' <tr>')
print(' <td>%s</td>' % input['result']['record'])
print(' </tr>')
elif i == 'txt':
print(' <tr>')
print(' <th>TXT Records</th>')
print(' </tr>')
for r in input['result']['record']:
print(' <tr>')
print(' <td>%s</td>' % r)
print(' </tr>')
elif i == 'soa':
print(' <tr>')
print(' <th>Retry</th>')
print(' <th>Refresh</th>')
print(' <th>Minimum</th>')
print(' <th>Expire</th>')
print(' <th>Owner</th>')
print(' <th>Nameserver</th>')
print(' </tr>')
print(' <tr>')
print(' <td>%d</td>' % input['result']['record']['retry'])
print(' <td>%d</td>' % input['result']['record']['refresh'])
print(' <td>%d</td>' % input['result']['record']['minimum'])
print(' <td>%d</td>' % input['result']['record']['expire'])
print(' <td>%s</td>' % input['result']['record']['owner'])
print(' <td>%s</td>' % input['result']['record']['nameserver'])
print(' </tr>')
print('</table>')
else:
pscheduler.fail("Unsupported format '%s'" % format)
|
from stack import Stack
def dec_to_bin(dec):
s = Stack()
binary=""
if dec==0:
return 0
while dec>0:
s.push(dec%2)
dec=dec//2
while not s.isEmpty():
binary=binary+str(s.pop())
return binary
print(dec_to_bin(42)) # 回傳 101010
print(dec_to_bin(100)) # 回傳 1100100
|
# Generated by Django 3.0.3 on 2020-03-28 11:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_websites', '0027_auto_20200318_2353'),
]
operations = [
migrations.CreateModel(
name='meta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('meta_title', models.CharField(max_length=200)),
('meta_description', models.CharField(max_length=2000)),
('meta_keywords', models.CharField(max_length=200)),
],
),
migrations.AlterField(
model_name='board_members',
name='profile_pic',
field=models.ImageField(blank=True, default='img/board/no-image-board.png', null=True, upload_to='img/board'),
),
migrations.AlterField(
model_name='resources',
name='website',
field=models.CharField(blank=True, default='', max_length=200),
),
migrations.AlterField(
model_name='sponsors',
name='sponsor_logo',
field=models.ImageField(blank=True, default='img/sponsors-links/no-image-sponsors-links.png', null=True, upload_to='img/sponsors-links'),
),
]
|
import numpy as np
from numpy.testing import dec, assert_, assert_raises,\
assert_almost_equal, assert_allclose
import matplotlib.pyplot as plt
import pdb, os
import scipy.sparse as sps
from .blocks import get_demo_circuit
from .structure import nearest_neighbor
from .dataset import gaussian_pdf, barstripe_pdf
from .contexts import ProjectQContext, ScipyContext
from .mmd import RBFMMD2
from .train import train
from .testsuit import load_gaussian, load_barstripe
from .qclibs import rot, CNOT, ry, I2
def test_dataset():
geometry = (3,3)
pl2 = barstripe_pdf(geometry)
assert_((pl2>1e-5).sum()==14)
def test_bm():
depth = 2
np.random.seed(2)
#bm = load_gaussian(6, depth)
bm = load_barstripe((3,3), depth)
theta_list = np.random.random(bm.circuit.num_param)*2*np.pi
assert_(bm.depth == depth)
print('loss = %s'%bm.mmd_loss(theta_list))
g1 = bm.gradient(theta_list)
g2 = bm.gradient_numerical(theta_list)
assert_allclose(g1, g2, atol=1e-5)
def test_wf():
depth = 0
geometry = (6,)
num_bit = np.prod(geometry)
pairs = nearest_neighbor(geometry)
circuit = get_demo_circuit(num_bit, depth, pairs)
# cross check
theta_list = np.random.random(circuit.num_param)
with ScipyContext(np.prod(geometry)) as cc2:
circuit(cc2.qureg, theta_list)
with ProjectQContext(np.prod(geometry), 'simulate') as cc:
circuit(cc.qureg, theta_list)
assert_allclose(cc.wf, cc2.wf)
def test_qclib():
cnot = CNOT(1,0,2)
assert_(cnot.nnz==4)
assert_allclose(cnot.toarray(), sps.coo_matrix(([1,1,1,1],([0,1,2,3],[0,1,3,2]))).toarray())
assert_allclose(rot(-np.pi/2.,np.pi/4.,np.pi/2.).toarray(),ry(np.pi/4.).toarray())
if __name__ == '__main__':
test_dataset()
test_wf()
test_bm()
test_qclib()
|
#!/usr/bin/env python
# -*- coding:utf8 -*-
# FIXME demonizar de la manera apropiada
activate_this = "/home/fer/.virtualenvs/trytonenv/bin/activate_this.py"
execfile(activate_this, dict(__file__=activate_this))
import trytond
options = {
'init': {},
'update': {},
'configfile': '/home/fer/tryton/trytond.conf',
'db_name': [],
'logfile': '/home/fer/tryton/runtime/tryton.log',
'pidfile': '/home/fer/tryton/runtime/pid'
}
trytond.server.TrytonServer(options).run()
|
"""
Label subscription
"""
from dataclasses import dataclass
from typing import Any, Callable
from typeguard import typechecked
from .subscriptions import GQL_LABEL_CREATED_OR_UPDATED
from ...graphql_client import SubscriptionGraphQLClient
@dataclass
class SubscriptionsLabel:
"""
Set of Label subscriptions
"""
# pylint: disable=too-many-arguments,too-many-locals
def __init__(self, auth):
"""
Initializes the subclass
Parameters
----------
auth : KiliAuth object
"""
self.auth = auth
@typechecked
def label_created_or_updated(self, project_id: str, callback: Callable[[str, str], None]):
# pylint: disable=line-too-long
"""
Subscribe a callback to a project, which is executed when a label is created or updated.
See [the related recipe](https://github.com/kili-technology/kili-playground/blob/master/recipes/webhooks.ipynb) for more explanation on how to use it.
Parameters
----------
project_id :
Identifier of the project
callback :
This function takes as input the id of the asset and its content.
Returns
-------
return
subscription client
"""
ws_endpoint = self.auth.client.endpoint.replace('http', 'ws')
websocket = SubscriptionGraphQLClient(ws_endpoint)
headers = {'Accept': 'application/json',
'Content-Type': 'application/json'}
authorization = f'{self.auth.client.token}'
headers['Authorization'] = authorization
variables = {'projectID': project_id}
websocket.subscribe(
GQL_LABEL_CREATED_OR_UPDATED,
variables=variables,
callback=callback,
headers=headers,
authorization=authorization)
return websocket
|
# coding: utf-8
class Solution(object):
def maximalSquare(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not matrix:
return 0
m, n = len(matrix), len(matrix[0])
st = []
for i in range(m):
k = []
for j in range(n):
k.append([0, 0])
st.append(k) # 每个点所能构成长方形的最大宽度和高度
max_ = 0
for i in range(m):
for j in range(n):
if matrix[i][j]=='1':
if i==0:
st[i][j][0] = 1
else:
st[i][j][0] = min(st[i-1][j][0], st[i-1][j-1][0])+1
if j==0:
st[i][j][1] = 1
else:
st[i][j][1] = min(st[i][j-1][1], st[i-1][j-1][1])+1
if st[i][j][0] == st[i][j][1]: # 判断是否为正方形
max_ = max(max_, st[i][j][0]**2)
for i in range(m):
print([[t[0], t[1], matrix[i][index]] for index, t in enumerate(st[i])])
return max_
obj = Solution()
print(obj.maximalSquare(["101001110","111000001","001100011","011001001","110110010","011111101","101110010","111010001","011110010","100111000"]))
# print(obj.maximalSquare(['0']))
|
import datetime
import redis
from flask import Flask, render_template, jsonify
import json
#
#
#
#
#
SITE_SOURCE_MAP = {
"lagou": 201,
"dajie": 208,
"yinguo": 211,
"juzi":209,
"baidu": 303,
"liepin": 202
}
#
#
#
#
#
#
class NoAsRedis:
def __init__(self, host, port, db):
self.init_redis(host, port, db)
def init_redis(self, host, port, db):
self.redis_client = redis.StrictRedis(
connection_pool=redis.ConnectionPool(host=host, port=port, db=db),
decode_responses=True, # 自动解码
)
def len(self, key):
res = self.redis_client.llen(key)
if not res:
return 0
return res
def get(self, key):
res = self.redis_client.get(key)
if res is None:
return None
return res.decode()
def gene_query_maps(source):
day_list = []
for i in range(7):
day = (datetime.datetime.now() - datetime.timedelta(days=i)).strftime("%Y-%m-%d")
day_list.append('{}_{}'.format(source, day))
day_list.append('{}_total'.format(source))
return day_list
def gene_task_maps(source):
task_list = []
for i in range(1, 6):
task_list.append('{}_{}'.format(source, i))
return task_list
app = Flask(__name__)
# r = NoAsRedis('s19.natfrp.org', 30694, 2)
# t = NoAsRedis('s19.natfrp.org', 30694, 1)
r = NoAsRedis('10.0.0.48', 6379, 2)
t = NoAsRedis('10.0.0.48', 6379, 1)
@app.route('/')
def index():
return 'WELCOME TO WORLD! <br />Usage: /status /task'
@app.route('/status', methods=['get'])
def query_info():
all_res = {}
for k, v in SITE_SOURCE_MAP.items():
query_list = gene_query_maps(v)
query_res = {i.replace('{}_'.format(v), ''): r.get(i) for i in query_list}
all_res[k] = query_res
data = all_res
site_list = [k for k,v in data.items()] # site ["lagou", "dajie"]
datatime = all_res.get(site_list[0])
datatime_list = [k for k,v in datatime.items()]
datatime_list.pop() # ["2019-06-05"......]
datas = {}
for site in site_list:
datas[site] = []
for d in datatime_list:
if data[site][d]:
datas[site].append(data[site][d])
else:
datas[site].append(0)
# 追加total
datas[site].append(data[site]["total"])
tasks = query_task()
res = {"data": datas, # {"lagou": [1,2,3,4,5,6,7], "dajie": [1,2,3,4,5,6,7]}
"datatime_list": datatime_list,
"tasks": tasks}
return render_template('show.html', res=res)
def query_task():
all_res = {}
desc = {
1: 'import task', # 导入
2: 'parse to type2', # 详情
3: 'parse to type1', # list翻页
4: 'parse type1 error', # list错误
5: 'parse type2 error' # resume错误
}
for k, v in SITE_SOURCE_MAP.items():
query_list = gene_task_maps(k)
query_res = {i.replace('{}_'.format(k), ''): t.len(i) for i in query_list}
all_res[k] = query_res
new_res = {}
for site, v in all_res.items():
new_res[site] = []
for i in ["1","2","3","4","5"]:
new_res[site].append(v[i])
return new_res
def run():
app.config['JSON_AS_ASCII'] = False
app.run(debug=True, host='0.0.0.0', port=2333)
if __name__ == '__main__':
run()
|
'''
Author: James Kasakyan
CCNY: CSC 47300 Web Development, Professor Grossberg
HW 3 Problem 2. Enlgish-Tutnese decoder/encoder
'''
# Imported Libraries
import re
# Global variables
encode_dictionary = {
"b":"bub",
"c":"coch",
"d":"dud",
"f":"fuf",
"g":"gug",
"h":"hash",
"j":'jug',
'k':'kuck',
'l':'lul',
'm':'mum',
'n':'nun',
'p':'pup',
'q':'quack',
'r':'rur',
's':'sus',
't':'tut',
'v': 'vuv',
'w': 'wack',
'x': 'xux',
'y': 'yub',
'z': 'zug'
}
decode_dictionary = {value: key for key, value in encode_dictionary.items()}
# -----------------------------------------
# encode()
# Takes English text and coverts to Tutnese.
# THROWS: ValueError exception when "|" character is present in input text.
def encode (english_string):
if len(english_string) == 0: # Empty string
return english_string
if '|' in english_string:
raise ValueError("Found invalid character: '|' ")
lower_english_string = english_string.lower()
lower_duplicate_encoded_english_string = encode_duplicates(lower_english_string)
# Replace all chars in {bcdfghjklmnpqrstvwxyz} that ARE NOT preceded by "|" with their tutnese translation
translated_duplicate_encoded_english_string = re.sub(r'((?<!\|)([bcdfghjklmnpqrstvwxyz]))\1*', lambda m: encode_dictionary[m.group(1)], lower_duplicate_encoded_english_string)
# Replace all chars in {bcdfghjklmnpqrstvwxyz} that are preceded by "|" with their tutnese translation
translated_full_english_string = re.sub(r'((?<=\|)(.))\1*', r'squa\1', translated_duplicate_encoded_english_string)
return (translated_full_english_string.replace('|',""))
# -------------------------------------------
# encode_duplicates()
# Assumes input text is non-empty, doesn't contain "|" character, and is entirely lower-case. Replaces back to back occurrences of a letter X in
# {bcdfghjklmnpqrstvwxyz} with "|X"
def encode_duplicates (english_string):
edited_text = re.sub(r'([bcdfghjklmnpqrstvwxyz])\1{1}', r'|\1', english_string)
return edited_text
# ------------------------------------------
# decode()
# Takes tutnese text and translates to English
def decode(tutnese_string):
if len(tutnese_string) == 0:
return tutnese_string
if '|' in tutnese_string:
raise ValueError("Found invalid character: '|' ")
lower_tutnese_string = tutnese_string.lower()
# Replace all tutnese characters not preceded by 'squa' with their english translation
lower_tutnese_string_decode_non_duplicates = re.sub(r'((?<!squa)(bub|coch|dud|fuf|gug|hash|jug|kuck|lul|mum|nun|pup|quack|rur|sus|tut|vuv|wack|xux|yub|zug))\1*', lambda m: decode_dictionary[m.group(0)], lower_tutnese_string)
# Replace all characters preceded by 'squa' with their duplicate
translated_full_tutnese_string = re.sub(r'((?<=squa)(.))\1*', lambda x: (x.group(1) * 2), lower_tutnese_string_decode_non_duplicates)
return (translated_full_tutnese_string.replace("squa",""))
|
# -*- coding:UTF-8 -*-
import math, datetime, random
import requests
from django.conf import settings
from django.core.cache import cache
from weixin.pay import WeixinPayError
from rest_framework.generics import get_object_or_404
gd_key = getattr(settings, 'GDKEY')
def get_deliver_pay(origin, destination):
ret = cache.get('%s:%s' % (origin, destination))
if not ret:
url = 'https://restapi.amap.com/v4/direction/bicycling?origin={}&destination={}&key={}'.format(origin,
destination,
gd_key)
res = requests.get(url)
# 返回米
paths = res.json()['data']['paths']
meters = min(paths, key=lambda x: x['distance'])['distance']
kilometers = math.ceil(meters / 1000)
if kilometers <= 1:
ret = (1, 1.5, meters)
else:
plat_to_pay = (2 + kilometers) / 2
ret = (kilometers, plat_to_pay, meters)
cache.set('%s:%s' % (origin, destination), ret, timeout=24 * 3600 * 7)
return ret
def store_order_refund(store_order, refund_fee):
from order.models import OrderRefundResult, OrderTrade
now = datetime.datetime.now()
total_fee = int(store_order.account_paid * 100)
if OrderTrade.objects.filter(store_order=store_order).exists():
order_trade = OrderTrade.objects.get(store_order=store_order, paid_money__isnull=False)
elif OrderTrade.objects.filter(unify_order=store_order.unify_order).exists():
order_trade = OrderTrade.objects.get(unify_order=store_order.unify_order, paid_money__isnull=False)
else:
return (4301, '无此支付单号')
if refund_fee > total_fee:
return (4304, '退款金额不能大于总金额')
refund_data = {
"total_fee": total_fee,
"out_refund_no": datetime.datetime.strftime(now, 'TK%Y%m%d%H%M%S%f{}'.format(random.randint(10, 100))),
"out_trade_no": order_trade.trade_no,
"refund_fee": refund_fee
}
from wxpay.views import weixinpay
try:
ret = weixinpay.refund(**refund_data)
except WeixinPayError as e:
return (4305, e.args[0])
if ret.get("return_code", '') == "SUCCESS":
receive_sign = ret.pop('sign')
mysign = weixinpay.sign(ret)
if receive_sign == mysign:
ret.pop('serializer', None)
OrderRefundResult.objects.create(**ret)
return (1000, "退款成功")
else:
return (4302, "退款异常")
else:
return (4303, ret)
def look_up_adocode(location):
url = 'https://restapi.amap.com/v3/geocode/regeo?key=%s&location=%s' % (gd_key, location)
r = requests.get(url).json()
if r['status'] == '1':
return r['regeocode']['addressComponent']['adcode'][:4] + '00'
def look_up_towncode(location):
url = 'https://restapi.amap.com/v3/geocode/regeo?key=%s&location=%s' % (gd_key, location)
r = requests.get(url).json()
if r['status'] == '1':
return r['regeocode']['addressComponent'].get('towncode', None)
def prepare_dwd_order(store_order, user, op=None):
from delivery.models import InitDwdOrder
dwdorder = InitDwdOrder()
receive_address = store_order.unify_order.address if store_order.unify_order else store_order.user_address
store = store_order.store
temp_dict = {
'order_original_id': dwdorder.trade_number,
'order_create_time': int(store_order.paid_time.timestamp() * 1000),
'order_remark': '',
'order_price': int(store_order.account_paid * 100),
'cargo_weight': 0,
'cargo_num': 1,
'city_code': store.adcode,
'seller_id': user.userinfo.id,
'money_rider_needpaid': 0,
'money_rider_prepaid': 0,
'money_rider_charge': 0,
'time_waiting_at_seller': 300,
'delivery_fee_from_seller': 0
}
if op == 'backend' and hasattr(user, 'stores') and store_order.store == user.stores:
temp_dict.update({
'seller_name': store.info.contract_name,
'seller_mobile': store.info.contract_mobile,
'seller_address': store.receive_address,
'seller_lat': round(store.latitude, 6),
'seller_lng': round(store.longitude, 6),
'consignee_name': receive_address.contact,
'consignee_mobile': receive_address.phone,
'consignee_address': receive_address.address + receive_address.room_no,
'consignee_lat': round(receive_address.latitude, 6),
'consignee_lng': round(receive_address.longitude, 6)
})
elif op != 'backend' and store_order.user == user:
temp_dict.update({
'seller_name': receive_address.contact,
'seller_mobile': receive_address.phone,
'seller_address': receive_address.address + receive_address.room_no,
'seller_lat': round(receive_address.latitude, 6),
'seller_lng': round(receive_address.longitude, 6),
'consignee_name': store.info.contract_name,
'consignee_mobile': store.info.contract_mobile,
'consignee_address': store.receive_address,
'consignee_lat': round(store.latitude, 6),
'consignee_lng': round(store.longitude, 6)
})
dwdorder.__dict__.update(temp_dict)
return dwdorder
def customer_get_object(self):
queryset = self.queryset
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
assert lookup_url_kwarg in self.kwargs, (
'Expected view %s to be called with a URL keyword argument '
'named "%s". Fix your URL conf, or set the `.lookup_field` '
'attribute on the view correctly.' %
(self.__class__.__name__, lookup_url_kwarg)
)
filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}
obj = get_object_or_404(queryset, **filter_kwargs)
return obj
|
#!/usr/bin/env python
# -*- coding:utf8 -*-
"""
Final exam: Log file analyzer
"""
import sys
import os
import psutil
def analyzer(path):
"""
Log files analyzer
:type path: root directory to analyze logs
"""
result = {}
comp_stat = {}
for root, dirs, files in os.walk(path):
for f in files:
file_path = os.path.join(root, f)
with open(file_path, 'r') as fd:
result[file_path], comp_stat[file_path] = parse_file(fd)
print_by_files(result)
print_summary(result)
print_by_components(comp_stat)
def parse_file(fd):
msg_stat = {'CRITICAL': 0, 'ERROR': 0, 'WARNING': 0, 'INFO': 0, 'DEBUG': 0}
# {'component_name': ({'MESSAGE_LEVEL': 0}, set([pid_1, pid_2, ...]))}
comp_stat = {}
for line in fd:
msg = line.split(' ', 1)[0] # message level value
component = line.split(' - ', 3)[2] # component name
pid = line.split(' - ', 4)[3] # component pid
if msg in msg_stat:
msg_stat[msg] += 1
if component not in comp_stat:
comp_stat[component] = ({'CRITICAL': 0, 'ERROR': 0, 'WARNING': 0,
'INFO': 0, 'DEBUG': 0}, set())
for k, v in comp_stat.get(component)[0].items():
if msg in k:
comp_stat.get(component)[0][msg] += 1 # message level value
comp_stat.get(component)[1].add(pid) # add pid info
return msg_stat, comp_stat
def print_by_files(result):
for key, value in result.items():
print "File: {}".format(key)
for k, v in value.items():
print " {:<8} - {}".format(k, v)
def print_summary(result, priority_order=('CRITICAL',
'ERROR',
'WARNING',
'INFO',
'DEBUG')):
print("================")
print("Summary results:")
for i in priority_order:
print " {:<8} - {}".format(i, sum(item[i] for item in result.values()))
def is_active_pid(component_name, pids):
for pid in pids:
if psutil.pid_exists(int(pid)):
p = psutil.Process(int(pid))
if component_name == p.name():
return True
return False
def print_by_components(result):
for k, v in result.items():
print "File: {}".format(k)
for s_k, s_v in reversed(sort_by_priority(v)):
is_active_pid_symbol = '+' if is_active_pid(s_k, s_v[1]) else '-'
print " {:<25} {:<4} {}".format(s_k, is_active_pid_symbol, s_v[0])
def sort_by_priority(stat, priority_order=('CRITICAL',
'ERROR',
'WARNING',
'INFO',
'DEBUG')):
ordered = []
for key, ins_val in stat.items():
if not ordered:
ordered.append((key, ins_val)) # add first value
else:
is_inserted = False
for index, (_, curr_val) in enumerate(ordered):
for msg in priority_order:
if curr_val[0][msg] > ins_val[0][msg]:
ordered.insert(index, (key, ins_val))
is_inserted = True # break from outer for_loop
break
# compare another field of priority_order
elif curr_val[0][msg] == ins_val[0][msg]:
continue
else:
break # compare field with the next component
if is_inserted:
break
else:
ordered.append((key, ins_val))
return ordered
def test_analyzer():
analyzer("tests/logs")
def main():
"main"
if len(sys.argv) == 2:
analyzer(sys.argv)
else:
# Toggle comments in real application
# print >>sys.stderr, "Specify ROOT directory for analyzing"
test_analyzer()
return 0
if __name__ == "__main__":
exit(main())
|
from typing import Set
from wingedsheep.carcassonne.carcassonne_game_state import CarcassonneGameState
from wingedsheep.carcassonne.objects.connection import Connection
from wingedsheep.carcassonne.objects.coordinate import Coordinate
from wingedsheep.carcassonne.objects.coordinate_with_side import CoordinateWithSide
from wingedsheep.carcassonne.objects.meeple_position import MeeplePosition
from wingedsheep.carcassonne.objects.road import Road
from wingedsheep.carcassonne.objects.side import Side
from wingedsheep.carcassonne.objects.terrain_type import TerrainType
from wingedsheep.carcassonne.objects.tile import Tile
class RoadUtil:
@classmethod
def opposite_edge(cls, road_position: CoordinateWithSide):
if road_position.side == Side.TOP:
return CoordinateWithSide(Coordinate(road_position.coordinate.row - 1, road_position.coordinate.column),
Side.BOTTOM)
elif road_position.side == Side.RIGHT:
return CoordinateWithSide(Coordinate(road_position.coordinate.row, road_position.coordinate.column + 1),
Side.LEFT)
elif road_position.side == Side.BOTTOM:
return CoordinateWithSide(Coordinate(road_position.coordinate.row + 1, road_position.coordinate.column),
Side.TOP)
elif road_position.side == Side.LEFT:
return CoordinateWithSide(Coordinate(road_position.coordinate.row, road_position.coordinate.column - 1),
Side.RIGHT)
@classmethod
def find_road(cls, game_state: CarcassonneGameState, road_position: CoordinateWithSide) -> Road:
roads: Set[CoordinateWithSide] = set(cls.outgoing_roads_for_position(game_state, road_position))
open_connections: Set[CoordinateWithSide] = set(map(lambda x: cls.opposite_edge(x), roads))
explored: Set[CoordinateWithSide] = roads.union(open_connections)
while len(open_connections) > 0:
open_connection: CoordinateWithSide = open_connections.pop()
new_roads = cls.outgoing_roads_for_position(game_state, open_connection)
roads = roads.union(new_roads)
new_open_connections = set(map(lambda x: cls.opposite_edge(x), new_roads))
explored = explored.union(new_roads)
new_open_connection: CoordinateWithSide
for new_open_connection in new_open_connections:
if new_open_connection not in explored:
open_connections.add(new_open_connection)
explored.add(new_open_connection)
finished: bool = len(explored) == len(roads)
return Road(road_positions=roads, finished=finished)
@classmethod
def outgoing_roads_for_position(cls, game_state: CarcassonneGameState, road_position: CoordinateWithSide) -> [CoordinateWithSide]:
tile: Tile = game_state.get_tile(road_position.coordinate.row, road_position.coordinate.column)
if tile is None:
return []
roads: [CoordinateWithSide] = []
connection: Connection
for connection in tile.road:
if connection.a == road_position.side or connection.b == road_position.side:
if connection.a != Side.CENTER:
roads.append(CoordinateWithSide(coordinate=road_position.coordinate, side=connection.a))
if connection.b != Side.CENTER:
roads.append(CoordinateWithSide(coordinate=road_position.coordinate, side=connection.b))
return roads
@classmethod
def road_contains_meeples(cls, game_state: CarcassonneGameState, road: Road):
for road_position in road.road_positions:
for i in range(game_state.players):
if road_position in list(map(lambda x: x.coordinate_with_side, game_state.placed_meeples[i])):
return True
return False
@classmethod
def find_meeples(cls, game_state: CarcassonneGameState, road: Road) -> [[MeeplePosition]]:
meeples: [[MeeplePosition]] = []
for i in range(game_state.players):
meeples.append([])
for road_position in road.road_positions:
for i in range(game_state.players):
meeple_position: MeeplePosition
for meeple_position in game_state.placed_meeples[i]:
if road_position == meeple_position.coordinate_with_side:
meeples[i].append(meeple_position)
return meeples
@classmethod
def find_roads(cls, game_state: CarcassonneGameState, coordinate: Coordinate):
roads: Set[Road] = set()
tile: Tile = game_state.board[coordinate.row][coordinate.column]
if tile is None:
return roads
side: Side
for side in [Side.TOP, Side.RIGHT, Side.BOTTOM, Side.LEFT]:
if tile.get_type(side) == TerrainType.ROAD:
road: Road = cls.find_road(game_state=game_state,
road_position=CoordinateWithSide(coordinate=coordinate, side=side))
roads.add(road)
return list(roads)
|
from .analyzer import Semantics
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 10:24:14 2018
@author: fuwen
"""
import pymongo,requests,json,time
BilibiliIpUrl = 'https://api.live.bilibili.com/ip_service/v1/ip_service/get_ip_addr'
myclient = pymongo.MongoClient('mongodb://fuwenyue:pass4Top@ds029638.mlab.com:29638/socks_proxies')
mydb = myclient['socks_proxies']
ProxiesCol = mydb['Proxies']
while True:
ProxiesList = ProxiesCol.find({},{ "_id": 0, "https": 1}).sort('update',-1)
ProxiesList = [Proxies for Proxies in ProxiesList]
PROXIESLIST = []
SAMELIST = []
for Proxies in ProxiesList :
if Proxies not in PROXIESLIST :
PROXIESLIST.append(Proxies)
for Proxies in PROXIESLIST :
if ProxiesList.count(Proxies) > 1 :
SAMELIST.append(Proxies)
for Proxies in SAMELIST :
y = ProxiesCol.delete_one(Proxies)
print( y.deleted_count,"个重复文档已删除")
time.sleep(2)
print( "————————————")
if SAMELIST==[]:
break
ProxiesList = ProxiesCol.find().sort('update',1)
for Proxies in ProxiesList :
LastUpdate = Proxies['update']
Proxies = {'https':Proxies['https']}
print('上次更新时间 : ',LastUpdate)
try :
Response = requests.get(BilibiliIpUrl ,proxies = Proxies, timeout=10)
IpJson = json.loads(Response.text)
country = IpJson['data']['country']
province = IpJson['data']['province']
city = IpJson['data']['city']
location = country + province + city
print(location)
d = {'https':Proxies['https']}
t = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
x = ProxiesCol.update_one(d, { '$set': {'location': location,'update':t}})
print(x.modified_count, "文档已更新")
except Exception as e:
d = {'https':Proxies['https']}
y = ProxiesCol.delete_one(d)
print('无效代理 :', y.deleted_count,"个文档已删除")
print('------') |
import numpy as np
import matplotlib.pyplot as plt
from grassopt import minimize
n, p = 20, 4
A = np.random.rand(n, n)
A = (A + A.T)/2
def f1(y):
return np.sum(np.diag(np.dot(np.dot(y.T, A), y)))*1./2
def f1y(y):
return np.dot((A + A.T), y)*1./2
def f1yy(y):
B = np.zeros((n*p, n*p))
for j in range(p):
B[j*n:(j+1)*n, j*n:(j+1)*n] = A
return B
y0 = np.vstack([np.eye(p), np.zeros((n-p, p))])
opt_res = minimize(y0, f1, f1y, f1yy)
optval = np.sum(np.sort(np.linalg.eigvals(A))[:p])/2
fig, axs = plt.subplots(1, 2, figsize=(12, 4))
fy = [f1(yy) for yy in opt_res['y']]
axs[0].plot(range(len(fy)), fy)
axs[0].hlines(optval, 0, len(fy)-1)
axs[0].set_ylabel('f(Y)')
axs[0].set_xlabel('Iteration')
err = abs(fy - optval)
axs[1].semilogy(range(len(fy)), err)
axs[1].set_ylabel('Absolute error')
axs[1].set_xlabel('Iteration')
plt.show() |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from urlparse import urlparse, parse_qs
class mytime:
def __init__(self):
pass
def str2num(self, time):
"""the format is (xx:xx)"""
hour,min = time.split(':')
return int(hour)*60 + int(min)
def get_time(self, list, pos):
"""get the time of the pos th element in list with minutes. the list`s format must be [('xx:xx','content'),......]"""
return self.str2num(eval(list[pos])[0])
def get_now_in_day(self):
"""return year-month-day"""
now = datetime.now().timetuple()
now_str = '-'.join([ str(now.tm_year), str(now.tm_mon), str(now.tm_mday)])
return now_str
def get_now_in_minute(self):
"""return hour:minute"""
now = datetime.now().timetuple()
now_hour_min = ':'.join([str(now.tm_hour), str(now.tm_min)])
return now_hour_min
def get_date_from_url(self, url):
"""get the date parameter in url"""
return self.get_param_from_url(url,'date')
def get_param_from_url(self, url, param_name):
url_param = parse_qs(urlparse(url).query)
return url_param.get(param_name)[0]
if __name__ == '__main__':
mytime = mytime()
print mytime.get_date_from_url('http://localhost/epg/list?date=2012-9-11&channel=cctv1')
|
#importing the library
import numpy as np
import pandas as pd
#reading the dataset and creating the dataframe
dataset = pd.read_csv("data.csv")
#converting all string values to nan
dataset = dataset.convert_objects(convert_numeric=True)
#dividing coloumns between dependent and independent variables
x = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 13].values
#fitting NaN value with the average values
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = "NaN", strategy ="mean", axis = 0)
imputer = imputer.fit(x[:,0:13])
x[:, 0:13] = imputer.transform(x[:, 0:13])
#scalng the data on the same scale
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
x = sc_X.fit_transform(x)
#dividing data between test set and training set
from sklearn.cross_validation import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x , y , test_size=0.1,random_state=8)
#Fitting logistics regression to the Training set
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
classifier.fit(x_train,y_train)
# Predicting the Test set results
y_pred_lr = classifier.predict(x_test)
from sklearn.metrics import confusion_matrix
cm_lr = confusion_matrix(y_test,y_pred_lr)
# Fitting K-NN to the Training set
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)
classifier.fit(x_train, y_train)
# Predicting the Test set results
y_pred_knn = classifier.predict(x_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm_knn = confusion_matrix(y_test, y_pred_knn)
#Fitting SVM to the Training set
from sklearn.svm import SVC
classifier = SVC(kernel = 'rbf',random_state = 0)
classifier.fit(x_train, y_train)
# Predicting the Test set results
y_pred_svm = classifier.predict(x_test)
from sklearn.metrics import confusion_matrix
cm_svm = confusion_matrix(y_test,y_pred_svm)
# Fitting Naive Bayes to the Training set
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(x_train, y_train)
# Predicting the Test set results
y_pred_nb = classifier.predict(x_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm_nb = confusion_matrix(y_test, y_pred_nb)
#Merging the predictions
y_predm=pd.DataFrame({'Logistic Regression':y_pred_lr,'SVM':y_pred_svm,'KNN':y_pred_knn,'Naive Bayes':y_pred_nb})
#Generating a dataframe of zeroes
zero_data=np.zeros(shape=(1,59))
y_pred=pd.DataFrame(zero_data)
y_pred=y_predm.mode(axis=1)
y_predict=y_pred.iloc[:,0].values
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_predict) |
'''
Created on Sep 9, 2015
@author: Jonathan Yu
'''
def score(word):
return len(word)**2
if __name__ == '__main__':
pass |
# -*- coding: utf-8 -*-
# @Date : 2018-03-19 10:27:48
# @Author : jym
# @Description:
# @Version : v0.0
import pymongo
con = pymongo.MongoClient('localhost',28019)
db = con['QiChaCha']
coll = db['users']
coll.ensure_index('user', unique=True)
users = [{'user':'18680325804','pwd':'123456789qwe'},
{'user':'18814118010','pwd':'hbbhbb'},
{'user':'13823562294','pwd':'871124'},
{'user':'13250222195','pwd':'lxj123'},
{'user':'13250201823','pwd':'lxj123'},
{'user':'13148942850','pwd':'lxj123'},
{'user':'13242740941','pwd':'lxj123'},
{'user':'13250502578','pwd':'lxj123'},
{'user':'13250285066','pwd':'lxj123'},
{'user':'13148935862','pwd':'lxj123'}]
coll.insert_many(users) |
def group_by(iterable, key):
return assoc(iterable, key=key, value=lambda x: x)
def assoc(iterable, key, value):
result = {}
for item in iterable:
k, v = key(item), value(item)
if k not in result:
result[k] = []
result[k].append(v)
return result
|
import spotipy
import spotipy.oauth2 as oauth2
import numpy as np
import json
import sys
import yaml
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
limitOfTracksApiCanProcess = 50
conf = yaml.load(open('conf/application.yml'))
CLIENT_ID = conf['client']['id']
CLIENT_SECRET = conf['client']['secret']
#INSERT YOUR CREDENTIALS HERE
credentials = oauth2.SpotifyClientCredentials(CLIENT_ID, CLIENT_SECRET)
token = credentials.get_access_token()
sp = spotipy.Spotify(auth=token)
def prepareProperFormatOfUrl(listOfUrls):
constantPartOfString = 'spotify:track:'
formattedUrlArr = []
for url in listOfUrls:
if(url[-1:] == "\n"):
formattedUrlArr.append(constantPartOfString + url.split('/')[4][:-1])
else:
formattedUrlArr.append(constantPartOfString + url.split('/')[4])
return formattedUrlArr
def readUrlFromFileReturnTrackUrl(filename):
featuresFromFile = []
with open(filename) as f:
line = f.readline()
while line:
featuresFromFile.append(line[:])
line = f.readline()
return prepareProperFormatOfUrl(featuresFromFile)
def getTracksFeatures(trackUrlArr):
vectorOfFeatures = []
#API can process only 50 tracks at once
for i in range(0, (len(trackUrlArr)-1), limitOfTracksApiCanProcess):
try:
if ((i+limitOfTracksApiCanProcess-1) > (len(trackUrlArr) - 1)):
#if there is less tracks left than 50
features = sp.audio_features(trackUrlArr[i:(len(trackUrlArr))])
else:
features = sp.audio_features(trackUrlArr[i:(i+limitOfTracksApiCanProcess)])
except:
print("Spotipy bug")
continue
features = json.loads(json.dumps(features, indent=4))
for singleTrack in features:
try:
energy = float(singleTrack['energy'])
liveness = float(singleTrack['liveness'])
tempo = float(singleTrack['tempo'])
speechiness = float(singleTrack['speechiness'])
acousticness = float(singleTrack['acousticness'])
instrumentalness = float(singleTrack['instrumentalness'])
danceability = float(singleTrack['danceability'])
loudness = float(singleTrack['loudness'])
valence = float(singleTrack['valence'])
vectorOfFeatures.append((energy,liveness, tempo, speechiness, acousticness, instrumentalness, danceability, loudness, valence))
except:
vectorOfFeatures.append((0,0,0,0,0,0,0,0,0))
print("Unknown error")
continue
print(len(vectorOfFeatures), i)
return vectorOfFeatures
def predictFromFeatures(featuresArr):
predictArr = []
#import kernel from file
clf = joblib.load('trainedClf.pkl')
for element in featuresArr:
element = np.array(element)
element = element.reshape(1, -1)
predictArr.append(np.asscalar(clf.predict(element)))
#returns simple array of predicted labels
return predictArr
def writePredictionsToFile(predictedTracks):
writeToFile = open("predictedTracks", "w")
writeToFile.write(str(predictedTracks))
writeToFile.close()
def predict(arrOfTrackUrls):
feat = getTracksFeatures(arrOfTrackUrls)
predictedTracks = predictFromFeatures(feat)
listOfUrlAndPrediction = []
for url, prediction in zip(arrOfTrackUrls, predictedTracks):
listOfUrlAndPrediction.append((url, prediction))
# listOfUrlAndPrediction.append(("https://open.spotify.com/track/" + url.split(':')[2], prediction))
return listOfUrlAndPrediction
if __name__ == "__main__":
predict(prepareProperFormatOfUrl(sys.argv[1]))
|
import pickle
import numpy as np
from itertools import chain, product
from scipy.stats import multivariate_normal as mvn
import scipy.sparse.csgraph as csg
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.exceptions import NotFittedError
from electromorpho.core.gaussian import gn_params, conditional_mvn_params, to_mvn
from electromorpho.core.misc import get_rng
from electromorpho.structure.graphs import MBCGraph
# noinspection PyAttributeOutsideInit
class MGNR(BaseEstimator, RegressorMixin):
def __init__(self, fit_params=None, verbose=False):
if fit_params is None or fit_params == 'mle':
fit_params = lambda structure, data: to_mvn(*gn_params(structure, data, sparse=True))
elif fit_params == 'ridge':
fit_params = lambda structure, data: to_mvn(*gn_params(structure, data, sparse=True, l2_reg=0.1))
elif callable(fit_params):
fit_params = fit_params
else:
raise NotImplementedError('Only mle estimation is currently available')
self.fit_params = fit_params
self.verbose = verbose
@property
def n_vars(self):
try:
return self.mean_.shape[0]
except KeyError:
raise NotFittedError('Cannot access n_vars property until model is fitted')
@property
def n_targets(self):
try:
return self.n_targets_
except KeyError:
raise NotFittedError('Cannot access n_targets property until model is fitted')
@property
def n_features(self):
try:
return self.mean_.shape[0] - self.n_targets
except KeyError:
raise NotFittedError('Cannot access n_features property until model is fitted')
@property
def is_fitted(self):
try:
# noinspection PyStatementEffect
self.n_vars
return True
except NotFittedError:
return False
def fit(self, X, y, structure: MBCGraph=None):
if structure is None:
raise NotImplementedError()
self.n_targets_ = y.shape[1]
data = np.hstack((X, y))
self.mean_, self.sigma_ = self.fit_params(structure, data)
features, targets = np.arange(X.shape[1]), np.arange(y.shape[1]) + X.shape[1]
regression_structure = structure.copy()
regression_structure[list(features)] = False
n_comp, labels = csg.connected_components(regression_structure, directed=False)
target_groups = [[] for _ in range(n_comp)]
feature_groups = [[] for _ in range(n_comp)]
for n, l in enumerate(labels):
if n in targets:
target_groups[l].append(n)
else:
feature_groups[l].append(n)
for i in range(len(target_groups)):
target_groups[i] = sorted(target_groups[i])
for i in range(len(feature_groups)):
feature_groups[i] = sorted(feature_groups[i])
self.components_ = list(zip(feature_groups, target_groups))
return self
def log_prob(self, y, X=None):
if X is None:
idx = sorted(chain.from_iterable(self.components_))
mean, cov = self.mean_[idx], self.sigma_[idx, idx]
else:
mean, cov = self.predict(X, return_cov=True)
return mvn.logpdf(y, mean, cov)
def prob(self, y, X=None):
return np.exp(self.log_prob(y, X))
# noinspection PyUnboundLocalVariable
def predict(self, X, return_cov=False):
if not self.is_fitted:
raise Exception()
predictions = []
if return_cov:
covariances = []
for i, X_i in enumerate(X):
pred = np.zeros(self.n_targets_, dtype=np.float)
if return_cov:
predicted_cov = np.zeros((self.n_targets_, self.n_targets_), dtype=np.float)
for comp_f, comp_t in self.components_:
if not len(comp_t):
continue
x = X_i[comp_f]
comp_vars = list(chain(comp_f, comp_t))
mean_ = self.mean_[comp_vars]
cov_ = self.sigma_[np.ix_(comp_vars, comp_vars)]
cond_params = conditional_mvn_params(mean_, cov_, x, return_cov)
if return_cov:
x, y = list(zip(product(comp_t, repeat=2)))
predicted_cov[x, y] = cond_params[1]
cond_params = cond_params[0]
pred[np.asarray(comp_t, dtype=int) - self.n_features] = cond_params
predictions.append(pred)
if return_cov:
covariances.append(predicted_cov)
predictions = np.asarray(predictions)
return predictions if not return_cov else (predictions, predicted_cov)
@staticmethod
def from_params(mean, sigma, components, n_targets):
model = MGNR()
model.components_ = components
model.mean_ = mean
model.sigma_ = sigma
model.n_targets_ = n_targets
return model
class MGNREnsemble(BaseEstimator, RegressorMixin):
# noinspection PyUnusedLocal
def __init__(self, k=1, parameter_estimator=None, structure_optimization=None, rng=None, verbose=False):
"""
Initializes the models.
Parameters
----------
k: int
The number of sample networks used for prediction. k must be smaller or equal than the number of samples
returned by the struct_opt.
parameter_estimator: callable
The algorithm used to determine the values of the regression coefficients.
structure_optimization: MHStructureOptimizer
The algorithm used to learn the structure of the model
rng: RandomState, int or None (default)
A random state for the class and al its members.
"""
if structure_optimization is None:
raise NotImplementedError()
if k is None:
k = structure_optimization.returned_samples
if k > structure_optimization.returned_samples:
raise ValueError('The structure_optimization is set to return less samples than expected: {0} > {1}'.format(
k, structure_optimization.returned_samples))
self.rng = get_rng(rng)
self.param_estimator = parameter_estimator
self.struct_opt = structure_optimization
self.k = k
self.verbose = verbose
@property
def n_vars(self):
try:
return self.models_[0].n_vars
except KeyError:
raise NotFittedError
@property
def n_targets(self):
try:
return self.models_[0].n_targets
except KeyError:
raise NotFittedError
@property
def n_features(self):
try:
return self.models_[0].n_features
except KeyError:
raise NotFittedError
@property
def is_fit(self):
try:
return len(self.models_)
except KeyError:
raise NotFittedError
def get_params(self, deep=True):
raise NotImplemented()
def set_params(self, **params):
raise NotImplemented()
# noinspection PyAttributeOutsideInit
def fit(self, X, y, samples=None):
"""
Fits the model using MCMC sampling of the structure space and then uses a point estimation procedure for the
parameters of each of the nodes. Right now only MLE estimation is available but Ridge, LASSO and Elastic Net
could be added.
Parameters
----------
X: array like
2-D array of feature variables of the data.
y: array like
2-D array of target variables
samples: list of tuples
Network structures. If None will use the struct_opt to find a set of structures.
Returns
-------
out: MGNREnsemble
The trained model consisting of one or more trained conditional MVN with different base structures to some
models.
"""
# Find structure
if self.verbose:
print('learning structure...')
if samples is None:
samples = self.struct_opt.generate_samples((X, y), return_scores=True)
samples = sorted(zip(*samples), key=lambda s: s[1])
samples = list(zip(*samples))[0]
networks = samples[-self.k:]
if self.verbose:
print('fiting parameters...')
self.models_ = [MGNR(self.param_estimator).fit(X, y, net) for net in networks]
if self.verbose:
print('done')
return self
def predict(self, X):
return np.mean([m.predict(X, return_cov=False) for m in self.models_], axis=0)
def log_prob(self, Y, X=None):
return np.log(self.prob(Y, X))
def prob(self, Y, X=None):
return np.mean([m.prob(X, Y) for m in self.models_])
def save_model(model: MGNREnsemble, path):
n_targets = model.n_targets
mvns_params = [(m.mean_, m.sigma_, m.components_) for m in model.models_]
parameters = {'n_targets': n_targets, 'mvns': mvns_params}
with open(path, mode='wb') as f:
pickle.dump(parameters, f)
def load_model(path):
with open(path, mode='rb') as f:
params = pickle.load(f)
n_targets = params['n_targets']
models = [MGNR.from_params(mean, sigma, components, n_targets) for mean, sigma, components in params['mvns']]
model = MGNREnsemble()
model.k = len(models)
model.models_ = models
return model
|
import streamlit as st
import pandas as pd
import speedtest
from datetime import datetime
import pytz
st.write("# Internet Connection Speed Test")
spd = speedtest.Speedtest()
down = spd.download()/1000000
up = spd.upload()/1000000
st.write(f"### Download Speed = {round(down, 2)} mbps")
st.write(f"### Upload Speed = {round(up, 2)} mbps")
st.write(f"### Other Information :\n")
ip = pd.DataFrame.from_dict(spd.get_config()['client'], orient='index')
# ip = spd.get_config()['client']['ip']
st.table(ip)
IST = pytz.timezone('Asia/Kolkata')
datetime_ist = datetime.now(IST)
st.write(f"Test finished on - {datetime_ist.strftime('%d/%m/%Y %H:%M:%S %Z')}")
st.write(""" * github: https://github.com/ineelhere/webapps/blob/main/app.py
* docker: https://hub.docker.com/r/ineelhere/netspeed/ """)
st.write("*Indraneel*")
|
#=========================================================================
# pisa_srl_test.py
#=========================================================================
import pytest
import random
import pisa_encoding
from pymtl import Bits
from PisaSim import PisaSim
from pisa_inst_test_utils import *
#-------------------------------------------------------------------------
# gen_basic_test
#-------------------------------------------------------------------------
def gen_basic_test():
return """
mfc0 r1, mngr2proc < 0x00008000
nop
nop
nop
nop
nop
nop
nop
nop
srl r3, r1, 0x03
nop
nop
nop
nop
nop
nop
nop
nop
mtc0 r3, proc2mngr > 0x00001000
nop
nop
nop
nop
nop
nop
nop
nop
"""
#-------------------------------------------------------------------------
# gen_dest_byp_test
#-------------------------------------------------------------------------
def gen_dest_byp_test():
return [
gen_rimm_dest_byp_test( 5, "srl", 0x08000000, 1, 0x04000000 ),
gen_rimm_dest_byp_test( 4, "srl", 0x40000000, 1, 0x20000000 ),
gen_rimm_dest_byp_test( 3, "srl", 0x20000000, 1, 0x10000000 ),
gen_rimm_dest_byp_test( 2, "srl", 0x10000000, 1, 0x08000000 ),
gen_rimm_dest_byp_test( 1, "srl", 0x08000000, 1, 0x04000000 ),
gen_rimm_dest_byp_test( 0, "srl", 0x04000000, 1, 0x02000000 ),
]
#-------------------------------------------------------------------------
# gen_src_byp_test
#-------------------------------------------------------------------------
def gen_src_byp_test():
return [
gen_rimm_src_byp_test( 5, "srl", 0x02000000, 1, 0x01000000 ),
gen_rimm_src_byp_test( 4, "srl", 0x01000000, 1, 0x00800000 ),
gen_rimm_src_byp_test( 3, "srl", 0x00800000, 1, 0x00400000 ),
gen_rimm_src_byp_test( 2, "srl", 0x00400000, 1, 0x00200000 ),
gen_rimm_src_byp_test( 1, "srl", 0x00200000, 1, 0x00100000 ),
gen_rimm_src_byp_test( 0, "srl", 0x00100000, 1, 0x00080000 ),
]
#-------------------------------------------------------------------------
# gen_srcs_dest_test
#-------------------------------------------------------------------------
def gen_srcs_dest_test():
return [
gen_rimm_src_eq_dest_test( "srl", 0x00800000, 1, 0x00400000 ),
]
#-------------------------------------------------------------------------
# gen_value_test
#-------------------------------------------------------------------------
def gen_value_test():
return [
gen_rimm_value_test( "srl", 0x80000000, 0, 0x80000000 ),
gen_rimm_value_test( "srl", 0x80000000, 1, 0x40000000 ),
gen_rimm_value_test( "srl", 0x80000000, 7, 0x01000000 ),
gen_rimm_value_test( "srl", 0x80000000, 14, 0x00020000 ),
gen_rimm_value_test( "srl", 0x80000001, 31, 0x00000001 ),
gen_rimm_value_test( "srl", 0xffffffff, 0, 0xffffffff ),
gen_rimm_value_test( "srl", 0xffffffff, 1, 0x7fffffff ),
gen_rimm_value_test( "srl", 0xffffffff, 7, 0x01ffffff ),
gen_rimm_value_test( "srl", 0xffffffff, 14, 0x0003ffff ),
gen_rimm_value_test( "srl", 0xffffffff, 31, 0x00000001 ),
gen_rimm_value_test( "srl", 0x21212121, 0, 0x21212121 ),
gen_rimm_value_test( "srl", 0x21212121, 1, 0x10909090 ),
gen_rimm_value_test( "srl", 0x21212121, 7, 0x00424242 ),
gen_rimm_value_test( "srl", 0x21212121, 14, 0x00008484 ),
gen_rimm_value_test( "srl", 0x21212121, 31, 0x00000000 ),
]
#-------------------------------------------------------------------------
# gen_random_test
#-------------------------------------------------------------------------
def gen_random_test():
asm_code = []
for i in xrange(100):
src = Bits( 32, random.randint(0,0xffffffff) )
imm = Bits( 5, random.randint(0,31) )
dest = src >> imm
asm_code.append( gen_rimm_value_test( "srl", src.uint(), imm.uint(), dest.uint() ) )
return asm_code
#-------------------------------------------------------------------------
# test_basic
#-------------------------------------------------------------------------
@pytest.mark.parametrize( "name,test", [
asm_test( gen_basic_test ),
asm_test( gen_dest_byp_test ),
asm_test( gen_src_byp_test ),
asm_test( gen_srcs_dest_test ),
asm_test( gen_value_test ),
asm_test( gen_random_test ),
])
def test( name, test ):
sim = PisaSim( trace_en=True )
sim.load( pisa_encoding.assemble( test() ) )
sim.run()
|
/home/miaojian/miniconda3/lib/python3.7/keyword.py |
import torch
from engine import Engine
from utils import use_cuda
class GMF(torch.nn.Module):
def __init__(self, config):
super(GMF, self).__init__()
self.num_users = config['num_users']
self.num_items = config['num_items']
self.latent_dim = config['latent_dim']
self.embedding_user = torch.nn.Embedding(num_embeddings=self.num_users, embedding_dim=self.latent_dim)
self.embedding_item = torch.nn.Embedding(num_embeddings=self.num_items, embedding_dim=self.latent_dim)
self.affine_output = torch.nn.Linear(in_features=self.latent_dim, out_features=1)
self.logistic = torch.nn.Sigmoid()
def forward(self, user_indices, item_indices):
user_embedding = self.embedding_user(user_indices)
item_embedding = self.embedding_item(item_indices)
#做内积
element_product = torch.mul(user_embedding, item_embedding)
logits = self.affine_output(element_product)
rating = self.logistic(logits)
return rating
def init_weight(self):
pass
class GMFEngine(Engine):
"""Engine for training & evaluating GMF model"""
def __init__(self, config):
self.model = GMF(config)
if config['use_cuda'] is True:
use_cuda(True, config['device_id'])
self.model.cuda()
super(GMFEngine, self).__init__(config) |
import cv2
import numpy as np
img = cv2.imread('image.jpg') #read image from system
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) #Convert to grayscale image
edged = cv2.Canny(gray, 170, 255) #Determine edges of objects in an image
ret,thresh = cv2.threshold(gray,240,255,cv2.THRESH_BINARY)
(contours,_) = cv2.findContours(edged,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) #Find contours in an image
def detectShape(c): #Function to determine type of polygon on basis of number of sides
shape = 'unknown'
peri=cv2.arcLength(cnt,True)
vertices = cv2.approxPolyDP(cnt, 0.02 * peri, True)
return vertices
print("""
$x
$h
G92 X0Y0Z0
G01 X50Y-200F10000
G92 X0Y0Z0
G90
""")
for cnt in contours:
shape=detectShape(cnt)
for i in range(len(shape)):
print("G01 X%dY%dF1000"%(shape[i][0][0]/4,shape[i][0][1]/4))
if i ==0:
print("G01 Z-22F1000")
print("G01 X%dY%dF1000"%(shape[0][0][0]/4,shape[0][0][1]/4))
print("G01 Z0F1000")
print("\n")
print("""
G01 X0Y0F1000
G01 Z0F1000
""")
|
'''
author: juzicode
address: www.juzicode.com
公众号: juzicode/桔子code
date: 2020.6.11
'''
print('\n')
print('-----欢迎来到www.juzicode.com')
print('-----公众号: juzicode/桔子code\n')
print('流程控制实验:循环条件')
print('类型:',type(range(5)))
print('%s'%range(5))
for r in range(5):
print('r:',r)
print('类型:',type(range(5,10)))
print('%s'%range(5,10))
for r in range(5,10):
print('r:',r)
print('类型:',type(range(5,10,2)))
print('%s'%range(5,10,2))
for r in range(5,10,2):
print('r:',r) |
import psutil
from operator import itemgetter
import collections
from collections import Counter
socket_connections = psutil.net_connections(kind='tcp')
final_dic={}
for a in socket_connections:
if a.laddr != ('0.0.0.0', 0) and a.raddr != ():
laddr_split=list(a.laddr)
raddr_split=list(a.raddr)
laddr_final = laddr_split[0]+"@"+str(laddr_split[1])
raddr_final=raddr_split[0]+"@"+str(raddr_split[1])
final_list=[laddr_final,raddr_final,a.status]
if not a.pid in final_dic:
final_dic[a.pid] = [final_list]
else:
final_dic[a.pid].append(final_list)
new_dic = {}
for keys in final_dic:
new_dic[keys] = len(final_dic[keys])
print '"Pid","laddr","raddr","Status"'
for key, value in sorted(new_dic.iteritems(), key=lambda (k,v): (v,k), reverse=True):
for a in final_dic[key]:
print '"%s", "%s", "%s", "%s"' % (key,a[0],a[1],a[2])
|
class Solution(object):
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
if needle == '' or haystack == needle:return 0
ne = len(needle)
for k in range(len(haystack)-len(needle)+1):
if haystack[k:ne+k] == needle:return k
return -1
|
from django.db import DataError, IntegrityError
from django.test import TestCase
from cars.models import Manufacturer, Car
class CarTestWithoutDBConnection(TestCase):
def setUp(self):
self.car = Car()
def test_car_instance(self):
self.assertTrue(isinstance(self.car, Car))
def test_car_proper_fields(self):
self.assertEqual(
[*self.car.__dict__],
["_state", "id", "manufacturer_id", "model"]
)
def test_car_id_field(self):
self.assertEqual(self.car.id, None)
def test_car_empty_manufacturer_id_field(self):
self.assertEqual(self.car.manufacturer_id, None)
def test_car_empty_model_field(self):
self.assertEqual(self.car.model, None)
def test_car_manufacturer_id_field(self):
car = Car(manufacturer_id=1)
self.assertEqual(car.manufacturer_id, 1)
def test_car_model_field(self):
car = Car(model="Mustang")
self.assertEqual(car.model, "Mustang")
def test_car_str(self):
manufacturer = Manufacturer(id=1, make="Ford")
car = Car(manufacturer=manufacturer, model="Mustang")
self.assertEqual(car.__str__(), "Ford Mustang")
class CarTestWithDBConnection(TestCase):
def setUp(self):
self.manufacturer = Manufacturer.objects.create(make="Ford")
self.car = Car.objects.create(manufacturer=self.manufacturer, model="Mustang")
def test_car_create_with_proper_fields(self):
self.assertNotEqual(self.car.id, None)
self.assertEqual(self.car.manufacturer_id, self.manufacturer.id)
self.assertEqual(self.car.model, "Mustang")
def test_car_empty_manufacturer_id_field(self):
with self.assertRaises(IntegrityError):
Car.objects.create(model="Ford")
def test_car_empty_model_field(self):
with self.assertRaises(IntegrityError):
Car.objects.create(manufacturer=self.manufacturer)
def test_car_too_long_model_field(self):
with self.assertRaises(DataError):
Car.objects.create(manufacturer=self.manufacturer, model="t" * 151)
def test_car_manufacturer_cascade_on_del(self):
self.manufacturer.delete()
with self.assertRaises(Car.DoesNotExist):
Car.objects.get(pk=self.car.id)
|
import logging
import os.path
import Configs
if not os.path.exists("logs/"):
os.makedirs("logs/")
logger = logging.basicConfig(format='%(asctime)s %(message)s', filename='logs/trials.log', level=logging.DEBUG)
def log(message):
try:
if Configs.get_setting('DEBUG', 'log') == '1':
logging.info(message)
except Exception as e:
print(e)
|
import environ
from RuasLimpas.config.base import *
env = environ.Env()
DEBUG = env.bool("DEBUG", False)
SECRET_KEY = env("SECRET_KEY")
ALLOWED_HOSTS = env.list("ALLOWED_HOSTS")
DATABASES = {
"default": {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': env('DATABASE_URL'),
}
} |
W = int(input())
N, K = map( int, input().split())
A = []
B = []
for _ in range(N):
a, b = map( int, input().split())
A.append(a)
B.append(b)
dp = [[[0 for _ in range(N+1)] for _ in range(K+1) ] for _ in range(W+1)]
for i in range(1,W+1):
for j in range(K+1):
for k in range(N+1):
a = A[k-1]
if a <= i:
dp[i][j][k] = max( dp[i][j][k-1], dp[i-a][j-1][k-1] + B[k-1])
else:
dp[i][j][k] = dp[i][j][k-1]
print(dp)
print(dp[W][K][N])
|
import graphene
from models import db, User as UserModel
from .validation_error import ValidationError
def validate_user_creation(shortname):
if not UserModel.query.filter(UserModel.shortname == shortname).first():
return True
class User(graphene.ObjectType):
shortname = graphene.String()
class CreateUser(graphene.Mutation):
class Arguments:
shortname = graphene.String()
ok = graphene.Boolean()
validation_error = graphene.Field(ValidationError)
def mutate(self, info, shortname):
if not validate_user_creation(shortname):
return CreateUser(
ok=False, validation_error=ValidationError(id=1, message='The username is already in use'))
user = UserModel(shortname=shortname)
db.session.add(user)
db.session.commit()
return CreateUser(ok=True, validation_error=None)
|
DEBUG = True # This is debug state flags
# SQLALCHEMY_DATABASE_URI = 'mysql+cymysql://root:qingfing@localhost:3306/fisher'
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:qingfing@localhost:3306/fisher'
SECRET_KEY = 'FJASDKLJFKADSJKLFJADSKLJKL8979345491327%^&%^&$%^$'
# Email configure
MAIL_SERVER = 'smtp.qq.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TSL = False
MAIL_USERNAME = '56237395@qq.com'
MAIL_PASSWORD = 'nonjollpgvckbjbg'
MAIL_SUBJECT_PREFIX = '<Fisher>'
MAIL_SENDER = 'Fisher <abc@qq.com>'
|
from pyramid.config import Configurator
from .models.node import root_factory
def main(global_config, **settings):
config = Configurator(settings=settings,
root_factory=root_factory)
config.include('pyramid_tm')
config.include('pyramid_sqlalchemy')
config.include('pyramid_jinja2')
config.add_static_view('static', 'moonbase:static')
config.add_static_view('deform_static', 'deform:static')
config.scan('.views')
return config.make_wsgi_app()
|
class Solution:
def sortColors(self, num):
if not num:
return
l, r, i = 0, len(num) - 1, 0
while i <= r:
if num[i] == 2:
num[i], num[r] = num[r], num[i]
r -= 1
elif num[i] == 0:
num[i], num[l] = num[l], num[i]
l += 1
i += 1
else:
i += 1
|
class Solution:
def flipAndInvertImage(self, A):
"""
:type A: List[List[int]]
:rtype: List[List[int]]
"""
if A==[]:
return A
for index in A:
index.reverse()
for i in range(len(index)):
index[i]=1-index[i]
return A
if __name__=='__main__':
ans=Solution
print(ans.flipAndInvertImage(ans,[[1,1,0,0],[1,0,0,1],[0,1,1,1],[1,0,1,0]])) |
from django.db import models
from django.contrib.auth.models import User
import datetime
class Pengarang(models.Model):
nama = models.CharField(max_length=75)
def __str__(self):
return self.nama
class Meta:
verbose_name = "Pengarang"
verbose_name_plural = "Data Pengarang"
class Anggota(models.Model):
username = models.ForeignKey(User)
nama = models.CharField(max_length=75)
alamat = models.CharField(max_length=255)
def __str__(self):
return self.nama
class Meta:
verbose_name = "Anggota"
verbose_name_plural = "Data Anggota"
class Telepon(models.Model):
nomer = models.CharField(max_length=15)
anggota_id = models.ForeignKey(Anggota)
def __str__(self):
return self.nomer
class Meta:
verbose_name = "Telepon"
verbose_name_plural = "Data Telepon"
class Pustaka(models.Model):
judul = models.CharField(max_length=75)
jenis = models.CharField(max_length=75)
penerbit = models.CharField(max_length=75)
tahun = models.CharField(max_length=4)
data_pengarang = models.ManyToManyField(Pengarang)
def __str__(self):
return self.judul
class Meta:
verbose_name = "Pustaka"
verbose_name_plural = "Data Pustaka"
class Pinjam(models.Model):
STATUS = (
(0, "Belum Kembali"),
(1, "Sudah Kembali"),
)
anggota = models.ForeignKey(Anggota, verbose_name="Peminjam")
data_pustaka = models.ManyToManyField(Pustaka)
tanggal_pinjam = models.DateField()
tanggal_kembali = models.DateField()
status = models.IntegerField(choices=STATUS, default=0)
def __str__(self):
return self.anggota.nama
def status_peminjaman(self):
if self.status == 0:
isTelat, telat = self.telat()
if isTelat:
return "Belum kembali (%d hari)"%(telat.days)
else:
return "Belum kembali"
else:
return "Sudah Kembali (%s)"%(str(self.tanggal_kembali))
def telat(self):
batas = datetime.timedelta(7)
akhir = self.tanggal_pinjam + batas
jarak = self.tanggal_kembali - akhir
if self.tanggal_kembali > akhir:
return True, jarak
else:
return False, jarak
class Meta:
verbose_name = "Pinjam"
verbose_name_plural = "Data Pinjam" |
# 3. Longest Substring Without Repeating Characters
#
# Given a string, find the length of the longest substring without repeating characters.
#
# Examples:
#
# Given "abcabcbb", the answer is "abc", which the length is 3.
#
# Given "bbbbb", the answer is "b", with the length of 1.
#
# Given "pwwkew", the answer is "wke", with the length of 3.
# Note that the answer must be a substring, "pwke" is a subsequence and not a substring.
import collections
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
pStillNeed = collections.Counter()
counter = begin = end = length = 0
while end < len(s):
c = s[end]
pStillNeed[c] += 1
if pStillNeed[c] > 1:
counter += 1
end += 1
while counter > 0:
tempc = s[begin]
pStillNeed[tempc] -= 1
if pStillNeed[tempc] > 0:
counter -= 1
begin += 1
length = max(length, end - begin)
return length
def lengthOfLongestSubstringDP(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) == 0: return 0
start, dup , dp = 0, {}, [0] * len(s)
dp[0] = 1
dup[s[0]] = 0
for i in range (1, len(s)):
if s[i] in dup and start <= dup[s[i]]:
start = dup[s[i]] + 1
dp[i] = dp[i-1]
else:
dp[i] = max(dp[i-1], i-start+1)
dup[s[i]] = i
return dp[-1]
if __name__ == "__main__":
sol = Solution()
assert sol.lengthOfLongestSubstring('abcabcbb') == 3
assert sol.lengthOfLongestSubstring('bbbbb') == 1
assert sol.lengthOfLongestSubstring('dvdf') == 3
assert sol.lengthOfLongestSubstring('pwwkew') == 3
assert sol.lengthOfLongestSubstring('tmmzuxt') == 5
assert sol.lengthOfLongestSubstringDP('abcabcbb') == 3
assert sol.lengthOfLongestSubstringDP('bbbbb') == 1
assert sol.lengthOfLongestSubstringDP('dvdf') == 3
assert sol.lengthOfLongestSubstringDP('pwwkew') == 3
assert sol.lengthOfLongestSubstringDP('tmmzuxt') == 5 |
class GameStates:
MAIN_MENU = 0
PLAYING = 1
PAUSED = 2
GAME_OVER = 3
TOP_SCORES = 4
SELECT_LEVEL = 5
|
import uuid
from dataclasses import dataclass
from datetime import datetime
from devices.schemas import Serializable
from marshmallow import Schema, fields, post_load, validate
class DeviceAttributeSchema(Schema): # pylint: disable=too-few-public-methods
value = fields.Str(required=False, default=None)
last_update = fields.DateTime(required=False, default=None)
@post_load
def create_device_attribute(self, data, **_): # pylint: disable=no-self-use
return DeviceAttribute(**data)
class DeviceAttributesSchema(Schema): # pylint: disable=too-few-public-methods
bitlocker = fields.Nested(DeviceAttributeSchema, required=False)
device_name = fields.Nested(DeviceAttributeSchema, required=False)
filevault = fields.Nested(DeviceAttributeSchema, required=False)
firewall = fields.Nested(DeviceAttributeSchema, required=False)
gatekeeper = fields.Nested(DeviceAttributeSchema, required=False)
hardware_model = fields.Nested(DeviceAttributeSchema, required=False)
hardware_vendor = fields.Nested(DeviceAttributeSchema, required=False)
hardware_description = fields.Nested(DeviceAttributeSchema, required=False)
host_identifier = fields.Nested(DeviceAttributeSchema, required=False)
host_uuid = fields.Nested(DeviceAttributeSchema, required=False)
hostname = fields.Nested(DeviceAttributeSchema, required=False)
last_active = fields.Nested(DeviceAttributeSchema, required=False)
os_auto_update = fields.Nested(DeviceAttributeSchema, required=False)
os_type = fields.Nested(DeviceAttributeSchema, required=False)
os_version = fields.Nested(DeviceAttributeSchema, required=False)
os_name = fields.Nested(DeviceAttributeSchema, required=False)
serial_number = fields.Nested(DeviceAttributeSchema, required=False)
username = fields.Nested(DeviceAttributeSchema, required=False)
total_ram = fields.Nested(DeviceAttributeSchema, required=False)
total_hard_drive_space = fields.Nested(DeviceAttributeSchema, required=False)
free_hard_drive_space = fields.Nested(DeviceAttributeSchema, required=False)
bitlocker_encryption_percent = fields.Nested(DeviceAttributeSchema, required=False)
filevault_encryption_percent = fields.Nested(DeviceAttributeSchema, required=False)
screen_timeout = fields.Nested(DeviceAttributeSchema, required=False)
source_last_check_in = fields.DateTime(required=False)
serial = fields.Str(required=False)
@post_load
def _create_device_attributes(self, data, **_): # pylint: disable=no-self-use
return DeviceAttributes(**data)
class DeviceStatusSchema(Schema): # pylint: disable=too-few-public-methods
customer_id = fields.Str(required=True)
serial_number_hash = fields.Str(required=True)
serial = fields.Str(allow_none=True)
enrolled = fields.Bool(allow_none=True)
source = fields.Str(allow_none=True)
last_check_in = fields.DateTime(allow_none=True)
healthy = fields.Bool(allow_none=True)
attributes = fields.Nested(DeviceAttributesSchema, required=False)
@post_load
def create__device_status(self, data, **_): # pylint: disable=no-self-use
return DeviceStatus(**data)
class CustomerDeviceStatusSchema(Schema): # pylint: disable=too-few-public-methods
after = fields.Str(allow_none=True, validate=validate.Length(equal=32))
count = fields.Int(required=True)
total = fields.Int(required=True)
devices = fields.Nested(DeviceStatusSchema, required=True, many=True)
@post_load
def create_customer_device_status(self, data, **_): # pylint: disable=no-self-use
return CustomerDeviceStatus(**data)
@dataclass
class CustomerDeviceStatus(Serializable):
serializer = CustomerDeviceStatusSchema()
after: str
count: int
total: int
devices: list
@dataclass
class DeviceAttribute(Serializable):
value: str = None
last_update: datetime = None
@dataclass
class DeviceAttributes(Serializable):
serial: str = None
source_last_check_in: datetime = None
filevault: DeviceAttribute = None
firewall: DeviceAttribute = None
gatekeeper: DeviceAttribute = None
hardware_model: DeviceAttribute = None
hardware_vendor: DeviceAttribute = None
hardware_description: DeviceAttribute = None
host_identifier: DeviceAttribute = None
host_uuid: DeviceAttribute = None
hostname: DeviceAttribute = None
last_active: DeviceAttribute = None
os_auto_update: DeviceAttribute = None
os_type: DeviceAttribute = None
os_version: DeviceAttribute = None
os_name: DeviceAttribute = None
serial_number: DeviceAttribute = None
username: DeviceAttribute = None
total_ram: DeviceAttribute = None
total_hard_drive_space: DeviceAttribute = None
free_hard_drive_space: DeviceAttribute = None
bitlocker_encryption_percent: DeviceAttribute = None
filevault_encryption_percent: DeviceAttribute = None
screen_timeout: DeviceAttribute = None
bitlocker: DeviceAttribute = None
device_name: DeviceAttribute = None
@dataclass
class DeviceStatus(Serializable):
serializer = DeviceStatusSchema()
customer_id: uuid.UUID
serial_number_hash: str
serial: str
enrolled: bool
source: str
last_check_in: datetime
healthy: bool
attributes: DeviceAttributes = None
|
from django.shortcuts import render, render_to_response,redirect
from django.template import RequestContext
from django.conf import settings
import requests
import json
from helper import JsonResponse
# Create your views here.
#vista que muestra la pagina de login
def show_login_button(request):
return render_to_response('login.html')
#vista que redirije al sitio de autenticacion de foursquare
def authenticate_in_foursquare(request):
url = 'https://foursquare.com/oauth2/authenticate?'
cid = 'client_id={}&'.format(settings.CLIENT_ID)
code = 'response_type=code&'
uri = 'redirect_uri={}'.format('http://localhost:8000/redirect/')
uricomp = url+cid+code+uri
return redirect(uricomp)
#vista del mapa, es el sitio donde se redirije a un usuario una vez que aprueba
#connectarse con la aplicacion
#tambien se hace el request del token de acceso al api de forsquare
def login_valid(request):
code = request.GET.get('code',None)
if code:
url = 'https://foursquare.com/oauth2/access_token'
param = {'client_id':settings.CLIENT_ID,
'client_secret':settings.CLIENT_SECRET,
'grant_type':'authorization_code',
'redirect_uri':'http://localhost:8000/redirect/',
'code':code}
ans = requests.get(url,params=param)
if ans.status_code == requests.codes.ok:
settings.ACCES_TOKEN = ans.json()['access_token']
context = {'maps_api_key': settings.GOOGLE_MAPS_APIKEY}
return render_to_response('mapconsult.html',context)
#vista donde se hace el request al api de foursquare
#utiliza la latitud y longitud que se obtienen via ajax por el api de google maps
#devuelve un Json con los sitios que pertenecen la categoria Rest Sushi a un radio de 1600mt
def make_request(request):
lat = request.GET.get('lat')
lng = request.GET.get('lng')
ll = lat+','+lng
url = 'https://api.foursquare.com/v2/venues/search'
param = {'oauth_token':settings.ACCES_TOKEN,
'v':'20140403',
'll':ll,
'categoryId':'4bf58dd8d48988d1d2941735',
'radius':'1600'
}
ans = requests.get(url,params=param)
if ans.status_code == requests.codes.ok:
ajson = ans.json()['response']
return JsonResponse(ajson) |
import os
from numpy import true_divide
from selenium.webdriver import Chrome, ChromeOptions
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options
import time
import pandas as pd
from bs4 import BeautifulSoup
import datetime as dt
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
pd.set_option('display.unicode.east_asian_width', True)
# Chromeを起動する関数
def set_driver(driver_path, headless_flg):
if "chrome" in driver_path:
options = ChromeOptions()
else:
options = Options()
# ヘッドレスモード(画面非表示モード)の設定
if headless_flg == True:
options.add_argument('--headless')
# 起動オプションの設定
options.add_argument(
'--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36')
# options.add_argument('log-level=3')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--ignore-ssl-errors')
options.add_argument('--incognito') # シークレットモードの設定を付与
# ChromeのWebDriverオブジェクトを作成する。
if "chrome" in driver_path:
return Chrome(executable_path=os.getcwd() + "/" + driver_path,options=options)
else:
return Firefox(executable_path=os.getcwd() + "/" + driver_path,options=options)
# ♪♪♪♪♪♪♪♪♪♪♪♪♪♪♪♪
# ページ内のデータ取得用関数
# DataFrameと、次ページへのリンクのタグ返す
# ♪♪♪♪♪♪♪♪♪♪♪♪♪♪♪♪
def page_process(page_html):
# htmlをparse
parse_html = BeautifulSoup(page_html, 'html.parser')
### 会社名を取得 → co_name-list
# h3 をリストに
h3_lists = parse_html.find_all('h3', class_='cassetteRecruit__name')
# h3 テキストをリストに
h3_text_lists=[ list.string for list in h3_lists ]
# テキストから会社名だけ抜いて リストに
co_name_list = [list.split(' |')[0] for list in h3_text_lists]
### 情報更新日を取得 → update_list
# 情報更新日の<p>を取得
update_p_lists = parse_html.find_all('p', class_='cassetteRecruit__updateDate')
# <p>の中の<span>の文字列を抜き出す
update_list=[list.select('span')[0].string for list in update_p_lists]
# 給与の情報を取得 → salary_list
# tableタグを取得
# DataFrameに。1列目をindexに指定
tables = parse_html.find_all('table', class_='tableCondition')
df = pd.read_html(str(tables), header=None, index_col=0)
# indexが給与の値を取得
salary_list = [ table.loc["給与",1] for table in df ]
###listからDataFrameへ
df2 = pd.DataFrame({'会社名':co_name_list, '給与':salary_list, '情報更新日':update_list})
is_next = parse_html.select_one('.pager__next > .iconFont--arrowLeft')
return df2, is_next
# main処理
def main():
url = "https://tenshoku.mynavi.jp/"
search_keyword = input("検索キーワードを入力して、エンターキーを押して下さい。>>")
#ログファイル用の準備
i = 0 #ページ数カウンター
log_list = [f"{dt.datetime.now()}:作業開始"] #リストに作業進捗を記録
driver = webdriver.Chrome(ChromeDriverManager().install())
# driverを起動
if os.name == 'nt': #Windows
driver = set_driver("chromedriver.exe", False)
elif os.name == 'posix': #Mac
driver = set_driver("chromedriver", False)
# Webサイトを開く
driver.get(url)
time.sleep(5)
# ポップアップを閉じる
driver.execute_script('document.querySelector(".karte-close").click()')
time.sleep(5)
# ポップアップを閉じる
driver.execute_script('document.querySelector(".karte-close").click()')
# 検索窓に入力
driver.find_element_by_class_name(
"topSearch__text").send_keys(search_keyword)
# 検索ボタンクリック
driver.find_element_by_class_name("topSearch__button").click()
time.sleep(3)
#空のDataFrame準備
df_list = pd.DataFrame()
# 現在のページのhtml取得
page_html = driver.page_source
try:
#################################
# ページ終了まで繰り返し取得
while True:
re = page_process(page_html)
df_list=pd.concat([df_list,re[0]]) #データフレームを結合
#リストに作業進捗を追記
#ページカウンターセット
i += 1
log_list.append(f"{dt.datetime.now()}:{i:02}ページ目完了")
if re[1] == None:
log_list.append(f"{dt.datetime.now()}:情報取得完了")
break
else:
# ポップアップを閉じる
# driver.execute_script('document.querySelector(".karte-close").click()')
next_url = url + re[1].get('href')
driver.get(next_url)
time.sleep(5)
page_html = driver.page_source
###### end while ################
df_list= df_list.reset_index(drop=True)
df_list.index = df_list.index + 1
df_list.to_csv('test.csv', encoding='shift jis')
data_count = str(len(df_list))
except Exception as e:
log_list.append(f"{dt.datetime.now()}:エラー発生\n\n※※エラー内容:{e} ")
print("エラーが発生しました。処理を終了します。")
print("エラー内容:" , e)
#ログファイルの出力
log_list.insert(0,"★★検索キーワード:" + search_keyword +" 検索結果:"+ data_count+ "件\n") #検索キーワードを記録
now_time = dt.datetime.now()
with open(f"{now_time:%Y%m%d-%H%M%S}.txt", mode='w', encoding='utf-8') as f:
f.write("\n".join(log_list))
# ♪♪♪♪♪♪♪♪♪♪♪♪♪♪♪♪
# ♪♪♪♪♪♪♪♪♪♪♪♪♪♪♪♪
# 直接起動された場合はmain()を起動(モジュールとして呼び出された場合は起動しないようにするため)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding=utf8 -*-
import httplib
import urllib
import urllib2
from datetime import *
import time
def sendhttp():
data = urllib.urlencode({'u_team_id':'125', 'author':'args1', 'body_pic':'[pic]jfjjfj[/pic]','body_text':'nihaoma daye'})
headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1) Gecko/20090624 Firefox/3.5","Accept": "text/plain"}
print "1111"
conn = httplib.HTTPConnection('127.0.0.1:9099', timeout=10)
print "22222"
conn.request('POST', '/autodeploy/Savelog/', data, headers)
conn.set_debuglevel(1)
httpres = conn.getresponse()
print httpres.status
print httpres.reason
#print httpres.read()
def sendurl():
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
url = 'http://127.0.0.1:9099/autodeploy/Savelog/'
url = 'http://127.0.0.1:9099/autodeploy/salt_api/'
#url = 'http://www.baidu.com'
data = {
'fun':'os_util.useradd',
'tgt':'172.19.152.40',
'arg':'zhulh'
}
data1 = urllib.urlencode(data)
# data = urllib.urlencode({'u_team_id':'125', 'author':'args1', 'body_pic':'[pic]jfjjfj[/pic]','body_text':'nihaoma daye'})
#headers = { 'User-Agent' : user_agent }
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
req = urllib2.Request(url, data1, headers)
response = urllib2.urlopen(req)
the_page = response.read()
print the_page
def gethtml():
request = urllib2.Request('http://192.168.10.37:7080/vdapp/')
#request.add_header('User-Agent','Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6')
opener = urllib2.build_opener()
f= opener.open(request)
print f.read().decode('utf-8')
if __name__ == '__main__':
sendurl() |
from django.conf.urls import patterns, include, url
from django.conf import settings
import views
from grades.views import grade_peer_eval, email_user_feedback
urlpatterns = patterns('',
# NOTE: all these URLs are preceded by "_admin/"
#url(r'load-class-list/$', views.load_class_list, name='admin-load-class-list'),
url(r'load-class-list/$', views.load_class_list, name='admin-load-class-list'),
url(r'generate-questions/(?P<course_code_slug>.+)/(?P<question_set_slug>.+)/$',
views.generate_questions, name='admin-generate-questions'),
url(r'load-from-template/$', #(?P<course_code_slug>.+)/(?P<question_set_slug>.+)/',
views.load_from_template, name='admin-load-from-template'),
url(r'report-responses/',
views.report_responses, name='admin-report-responses'),
url(r'report-responses-short-answer/',
views.report_responses_short_answer,
name='admin-report-responses-short-answer'),
#url(r'fix-questions/', views.fix_questions),
url('preview-question/', views.preview_question,
name='admin-preview-question'),
url('grade-peer-eval/', grade_peer_eval, name='admin-grade-peer-eval'),
url('email-user-feedback', email_user_feedback, name="admin-email-user-feedback")
) |
class NumMatrix(object):
def __init__(self, matrix):
"""
initialize your data structure here.
:type matrix: List[List[int]]
"""
##initiate a sum array here
self.matrix = matrix
m, n = len(matrix) + 1, len(matrix[0]) + 1
self.sumArray = [[0 for _ in range(n)] for _ in range(m)]
for i in range(1, m):
for j in range(1, n):
self.add(i, j, matrix[i-1][j-1])
def add(self, i, j, value):
a, b = i, j
while a < len(self.sumArray):
while b < len(self.sumArray[0]):
self.sumArray[a][b] += value
b += b & -b
a += a & -a
b = j
def update(self, i, j, value):
diff = value - self.matrix[i][j]
self.add(i+1, j+1, diff)
self.matrix[i][j] = value
def total(self, i, j):
a, b, total = i, j, 0
while a > 0:
while b > 0:
total += self.sumArray[a][b]
b -= b & -b
a -= a & -a
b = j
return total
def sumRegion(self, row1, col1, row2, col2):
return self.total(row2 + 1, col2 + 1) - self.total(row2 + 1, col1) - \
self.total(row1, col2+1) + self.total(row1, col1)
if __name__ == '__main__':
input = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
matrix = NumMatrix(input)
res = matrix.sumRegion(1, 1, 2, 2)
assert res == 28
matrix.update(1,1,6)
res = matrix.sumRegion(1, 1, 2, 2)
assert res == 29
|
../../ml/line_fit.py |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
## data load
data=pd.read_csv('C:/Users/user/Desktop/creditcard.csv')
# print(data.head())
# print(data.columns)
## check data freq
# print(pd.value_counts(data['Class']))
pd.value_counts(data['Class']).plot.bar()
# plt.title('Fraud class histogram')
# plt.xlabel('Class')
# plt.ylabel('Frequncy')
# plt.show()
## amount stanardscaler preprocessing
sdscaler=StandardScaler()
data['normAmount']=sdscaler.fit_transform(data['Amount'].values.reshape(-1,1))
data=data.drop(['Time','Amount'],axis=1)
# print(data.head())
x=np.array(data.ix[:, data.columns !='Class'])
y=np.array(data.ix[:, data.columns =='Class'])
# print('Shape of X : {}'.format(x.shape))
# print('Shape of y : {}'.format(y.shape))
## divide train, test data
x_train,x_test,y_train,y_test= train_test_split(x,y,test_size=0.3,random_state=0)
# print("Number transaction X_train dataset : ", x_train.shape)
# print("Number transaction y_train dataset : ", y_train.shape)
# print("Number transaction X_test dataset : ", y_test.shape)
# print("Number transaction y_test dataset : ", y_test.shape)
## fit data imbalance
# print("Before OverSampling, counts of label '1' : {}".format(sum(y_train==1)))
# print("Before OverSampling, counts of label '0' : {}\n".format(sum(y_train==0)))
# print("y_train",y_train)
# print("y_train.ravel",y_train.ravel())
## smote
sm=SMOTE(random_state=2)
x_train_res,y_train_res=sm.fit_sample(x_train,y_train.ravel())
# print('After OverSampling, the shape of train_X : {}'.format(x_train_res.shape))
# print('After OverSampling, the shape of train_y : {}'.format(y_train_res.shape))
# print('After OverSampling, counts of y_train_res 1 : {}'.format(sum(y_train_res==1)))
# print('After OverSampling, counts of y_train_res 0 : {}'.format(sum(y_train_res==0)))
# print('After OverSampling, the shape of test_X : {}'.format(x_test.shape))
# print('After OverSampling, the shape of test_y : {}'.format(y_test.shape))
# print('before OverSampling, counts of label 1 : {}'.format(sum(y_test==1)))
# print('before OverSampling, counts of label 0 : {}'.format(sum(y_test==0))) |
"""PG-60: Recursion
Problems that are built off of subproblems.
1. How many subproblems does `f(n)` depend on?
- binary tree: two, linked list: one, regex: number of possible special characters, etc
2. Solve for a "base case". First compute for `f(0) + f(1)` which are hard coded values.
3. Solve for `f(2)`
4. Understand how to solve for f(3) using f(2) or previous solution.
5. Generalize for `f(n)`
"""
################################################################################
# 8.1: Write a method to generate the nth Fibonacci number
"""
Notes:
fibronachi number is:
1 1 2 3 ...
f(n) = fn(n-2) + f(n-1)
"""
def fib_bad(n):
"""
This is technically correct, but uses 2^n memory in stack space (!!).
It also recomputes numbers needlessly.
fib(10)
(fib(8) + fib(9))
(fib(6)+fib(7)) (fib(7)+fib(8))
You can already see above that we are doing duplicate work. You can also
see why it's n^2, each n leads to two branches for _all of n_. This is
different than a binary tree where the depth is `log2 n`
"""
if n == 0 or n == 1:
return 1
return fib(n - 2) + fib(n - 1)
def fib_mem(mem, n):
"""Compute fib using memoization.
We first check if the number exists in mem, if it does we use that.
This reduces our stack size and runtime to to O(n)
"""
if n == 0 or n == 1:
return 1
cached = mem.get(n)
if cached is not None:
return cached
result = fib_mem(mem, n - 2) + fib_mem(mem, n - 1)
mem[n] = result
return result
################################################################################
# 8.2:
# Imagine a robot sitting on the upper left hand corner of an NxN grid. The
# robot can only move in two directions: right and down
#
# How many possible paths are there for the robot?
# FOLLOW UP
# Imagine certain squares are “off limits”, such that the robot can not step on
# them.
#
# Design an algorithm to get all possible paths for the robot
"""
Notes:
Let's do some simple cases:
2x2: 2 paths
____
|_|_|
|_|_|
1 + 1 + 2
3x3: 6 paths
_____
|_|_|_|
|_|_|_|
|_|_|_|
(2 + 2) & 4x2
The robot has the following choices available at each square:
- zero paths for the last square
- 0 paths for the right-most and bottom-most squares.
- 2 paths for all other squares
"""
def count_paths(n, squares, row, col):
"""
This is an n^2 algorithm. Use memoization to reduce.
"""
if row >= n or col >= n:
return 0
return count_paths(squares, row+1, col) + count_paths(squares, row, col+1)
################################################################################
# 8.3: Write a method that returns all subsets of a set.
"""
Notes:
This is actually easier with a list. It should be a generalized form of:
```
subsets = []
for a in A[:]:
for b in A[1:]
for c in C[2:]
subsets.append([a,b,c])
subsets.append([a, b])
subsets.append([a])
```
"""
def subsets(existing, prev, input_values, li):
"""Mutates existing to insert the requested subsets."""
for i in range(li, len(input_values)):
# continue to "build up" prev for the next run
local_prev = copy(prev)
local_prev.insert(input_values[i])
existing.insert(local_prev)
subsets(existing, local_prev, input_values, i+1)
|
import main.DAO.redisDAO as redisDAO
dao = redisDAO.redisDAO()
count = []
for i in range(1,1683):
a = dao.get_item_sim_list(i,0)
if len(a) == 0:
print i
else:
count.append((i,a[0][0],a[0][1]))
count.sort(key =lambda count:count[2])
print count
|
"""empty message
Revision ID: b7afce71bc6f
Revises: 28c51d215b39
Create Date: 2018-04-14 22:47:36.817118
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b7afce71bc6f'
down_revision = '28c51d215b39'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('bookmark_links_bookmark_id_fkey', 'bookmark_links', type_='foreignkey')
op.create_foreign_key(None, 'bookmark_links', 'bookmarks', ['bookmark_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('bookmarks_user_id_fkey', 'bookmarks', type_='foreignkey')
op.drop_constraint('bookmarks_work_id_fkey', 'bookmarks', type_='foreignkey')
op.create_foreign_key(None, 'bookmarks', 'works', ['work_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key(None, 'bookmarks', 'users', ['user_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('chapters_work_id_fkey', 'chapters', type_='foreignkey')
op.create_foreign_key(None, 'chapters', 'works', ['work_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('comments_chapter_id_fkey', 'comments', type_='foreignkey')
op.drop_constraint('comments_bookmark_id_fkey', 'comments', type_='foreignkey')
op.drop_constraint('comments_user_id_fkey', 'comments', type_='foreignkey')
op.create_foreign_key(None, 'comments', 'users', ['user_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key(None, 'comments', 'bookmarks', ['bookmark_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key(None, 'comments', 'chapters', ['chapter_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('messages_from_user_id_fkey', 'messages', type_='foreignkey')
op.drop_constraint('messages_to_user_id_fkey', 'messages', type_='foreignkey')
op.create_foreign_key(None, 'messages', 'users', ['to_user_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key(None, 'messages', 'users', ['from_user_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('notifications_notification_type_id_fkey', 'notifications', type_='foreignkey')
op.create_foreign_key(None, 'notifications', 'notification_types', ['notification_type_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('tags_tag_type_id_fkey', 'tags', type_='foreignkey')
op.create_foreign_key(None, 'tags', 'tag_types', ['tag_type_id'], ['id'], ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'tags', type_='foreignkey')
op.create_foreign_key('tags_tag_type_id_fkey', 'tags', 'tag_types', ['tag_type_id'], ['id'])
op.drop_constraint(None, 'notifications', type_='foreignkey')
op.create_foreign_key('notifications_notification_type_id_fkey', 'notifications', 'notification_types', ['notification_type_id'], ['id'])
op.drop_constraint(None, 'messages', type_='foreignkey')
op.drop_constraint(None, 'messages', type_='foreignkey')
op.create_foreign_key('messages_to_user_id_fkey', 'messages', 'users', ['to_user_id'], ['id'])
op.create_foreign_key('messages_from_user_id_fkey', 'messages', 'users', ['from_user_id'], ['id'])
op.drop_constraint(None, 'comments', type_='foreignkey')
op.drop_constraint(None, 'comments', type_='foreignkey')
op.drop_constraint(None, 'comments', type_='foreignkey')
op.create_foreign_key('comments_user_id_fkey', 'comments', 'users', ['user_id'], ['id'])
op.create_foreign_key('comments_bookmark_id_fkey', 'comments', 'bookmarks', ['bookmark_id'], ['id'])
op.create_foreign_key('comments_chapter_id_fkey', 'comments', 'chapters', ['chapter_id'], ['id'])
op.drop_constraint(None, 'chapters', type_='foreignkey')
op.create_foreign_key('chapters_work_id_fkey', 'chapters', 'works', ['work_id'], ['id'], ondelete='SET NULL')
op.drop_constraint(None, 'bookmarks', type_='foreignkey')
op.drop_constraint(None, 'bookmarks', type_='foreignkey')
op.create_foreign_key('bookmarks_work_id_fkey', 'bookmarks', 'works', ['work_id'], ['id'])
op.create_foreign_key('bookmarks_user_id_fkey', 'bookmarks', 'users', ['user_id'], ['id'])
op.drop_constraint(None, 'bookmark_links', type_='foreignkey')
op.create_foreign_key('bookmark_links_bookmark_id_fkey', 'bookmark_links', 'bookmarks', ['bookmark_id'], ['id'])
# ### end Alembic commands ###
|
# -*- coding: utf-8 -*-
import codecs
import operator
class ngram:
def __init__(self):
self.words_freq = {}
self.max_count = 0
def ishan(self, text):
return all(u'\u4e00' <= char <= u'\u9fff' for char in text)
def executeNgram(self, text, n):
words=[]
for w in range(len(text)-n):
extract = text[w:w+n].strip()
if len(extract) == n:
words.append(extract)
for word in words:
if word not in self.words_freq and self.ishan(word):
self.words_freq[word] = 1
elif word in self.words_freq and self.ishan(word):
self.words_freq[word] = self.words_freq[word] + 1
def regress(self):
j = 0
while j < len(self.words_freq):
# Update max value
self.max_count = max(self.words_freq[j][1], self.max_count)
k = 0
while k < len(self.words_freq):
if j == k:
k = k + 1
continue
if j >= len(self.words_freq) or k >= len(self.words_freq): break
word = self.words_freq[j]
compare_word = self.words_freq[k]
# find duplicate word and maximize the word set
if compare_word[0] in word[0] and word[1] >= compare_word[1] and len(word[0]) > len(compare_word[0]):
# remove compare_word
del self.words_freq[k]
else:
k = k + 1
j = j + 1
def process(self, text):
text_new = []
for line in text:
# line = codecs.decode(line, "utf-8")
text_new.append(line)
min_count = min(map(len, text_new))
for line in text_new:
for count in range(2, len(line)):
self.executeNgram(line, count)
self.words_freq = sorted(self.words_freq.iteritems(),key=operator.itemgetter(1),reverse=True)
self.regress()
return self.words_freq
def getMax(self):
return self.max_count
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.